diff options
Diffstat (limited to 'net/ipv4/inetpeer.c')
-rw-r--r-- | net/ipv4/inetpeer.c | 364 |
1 files changed, 245 insertions, 119 deletions
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index 9ffa24b9a804..ce616d92cc54 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c | |||
@@ -63,7 +63,7 @@ | |||
63 | * refcnt: atomically against modifications on other CPU; | 63 | * refcnt: atomically against modifications on other CPU; |
64 | * usually under some other lock to prevent node disappearing | 64 | * usually under some other lock to prevent node disappearing |
65 | * dtime: unused node list lock | 65 | * dtime: unused node list lock |
66 | * v4daddr: unchangeable | 66 | * daddr: unchangeable |
67 | * ip_id_count: atomic value (no lock needed) | 67 | * ip_id_count: atomic value (no lock needed) |
68 | */ | 68 | */ |
69 | 69 | ||
@@ -72,21 +72,31 @@ static struct kmem_cache *peer_cachep __read_mostly; | |||
72 | #define node_height(x) x->avl_height | 72 | #define node_height(x) x->avl_height |
73 | 73 | ||
74 | #define peer_avl_empty ((struct inet_peer *)&peer_fake_node) | 74 | #define peer_avl_empty ((struct inet_peer *)&peer_fake_node) |
75 | #define peer_avl_empty_rcu ((struct inet_peer __rcu __force *)&peer_fake_node) | ||
75 | static const struct inet_peer peer_fake_node = { | 76 | static const struct inet_peer peer_fake_node = { |
76 | .avl_left = peer_avl_empty, | 77 | .avl_left = peer_avl_empty_rcu, |
77 | .avl_right = peer_avl_empty, | 78 | .avl_right = peer_avl_empty_rcu, |
78 | .avl_height = 0 | 79 | .avl_height = 0 |
79 | }; | 80 | }; |
80 | 81 | ||
81 | static struct { | 82 | struct inet_peer_base { |
82 | struct inet_peer *root; | 83 | struct inet_peer __rcu *root; |
83 | spinlock_t lock; | 84 | seqlock_t lock; |
84 | int total; | 85 | int total; |
85 | } peers = { | 86 | }; |
86 | .root = peer_avl_empty, | 87 | |
87 | .lock = __SPIN_LOCK_UNLOCKED(peers.lock), | 88 | static struct inet_peer_base v4_peers = { |
89 | .root = peer_avl_empty_rcu, | ||
90 | .lock = __SEQLOCK_UNLOCKED(v4_peers.lock), | ||
91 | .total = 0, | ||
92 | }; | ||
93 | |||
94 | static struct inet_peer_base v6_peers = { | ||
95 | .root = peer_avl_empty_rcu, | ||
96 | .lock = __SEQLOCK_UNLOCKED(v6_peers.lock), | ||
88 | .total = 0, | 97 | .total = 0, |
89 | }; | 98 | }; |
99 | |||
90 | #define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */ | 100 | #define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */ |
91 | 101 | ||
92 | /* Exported for sysctl_net_ipv4. */ | 102 | /* Exported for sysctl_net_ipv4. */ |
@@ -144,62 +154,99 @@ void __init inet_initpeers(void) | |||
144 | /* Called with or without local BH being disabled. */ | 154 | /* Called with or without local BH being disabled. */ |
145 | static void unlink_from_unused(struct inet_peer *p) | 155 | static void unlink_from_unused(struct inet_peer *p) |
146 | { | 156 | { |
147 | if (!list_empty(&p->unused)) { | 157 | spin_lock_bh(&unused_peers.lock); |
148 | spin_lock_bh(&unused_peers.lock); | 158 | list_del_init(&p->unused); |
149 | list_del_init(&p->unused); | 159 | spin_unlock_bh(&unused_peers.lock); |
150 | spin_unlock_bh(&unused_peers.lock); | 160 | } |
161 | |||
162 | static int addr_compare(const struct inetpeer_addr *a, | ||
163 | const struct inetpeer_addr *b) | ||
164 | { | ||
165 | int i, n = (a->family == AF_INET ? 1 : 4); | ||
166 | |||
167 | for (i = 0; i < n; i++) { | ||
168 | if (a->addr.a6[i] == b->addr.a6[i]) | ||
169 | continue; | ||
170 | if (a->addr.a6[i] < b->addr.a6[i]) | ||
171 | return -1; | ||
172 | return 1; | ||
151 | } | 173 | } |
174 | |||
175 | return 0; | ||
152 | } | 176 | } |
153 | 177 | ||
178 | #define rcu_deref_locked(X, BASE) \ | ||
179 | rcu_dereference_protected(X, lockdep_is_held(&(BASE)->lock.lock)) | ||
180 | |||
154 | /* | 181 | /* |
155 | * Called with local BH disabled and the pool lock held. | 182 | * Called with local BH disabled and the pool lock held. |
156 | */ | 183 | */ |
157 | #define lookup(_daddr, _stack) \ | 184 | #define lookup(_daddr, _stack, _base) \ |
158 | ({ \ | 185 | ({ \ |
159 | struct inet_peer *u, **v; \ | 186 | struct inet_peer *u; \ |
187 | struct inet_peer __rcu **v; \ | ||
160 | \ | 188 | \ |
161 | stackptr = _stack; \ | 189 | stackptr = _stack; \ |
162 | *stackptr++ = &peers.root; \ | 190 | *stackptr++ = &_base->root; \ |
163 | for (u = peers.root; u != peer_avl_empty; ) { \ | 191 | for (u = rcu_deref_locked(_base->root, _base); \ |
164 | if (_daddr == u->v4daddr) \ | 192 | u != peer_avl_empty; ) { \ |
193 | int cmp = addr_compare(_daddr, &u->daddr); \ | ||
194 | if (cmp == 0) \ | ||
165 | break; \ | 195 | break; \ |
166 | if ((__force __u32)_daddr < (__force __u32)u->v4daddr) \ | 196 | if (cmp == -1) \ |
167 | v = &u->avl_left; \ | 197 | v = &u->avl_left; \ |
168 | else \ | 198 | else \ |
169 | v = &u->avl_right; \ | 199 | v = &u->avl_right; \ |
170 | *stackptr++ = v; \ | 200 | *stackptr++ = v; \ |
171 | u = *v; \ | 201 | u = rcu_deref_locked(*v, _base); \ |
172 | } \ | 202 | } \ |
173 | u; \ | 203 | u; \ |
174 | }) | 204 | }) |
175 | 205 | ||
206 | static bool atomic_add_unless_return(atomic_t *ptr, int a, int u, int *newv) | ||
207 | { | ||
208 | int cur, old = atomic_read(ptr); | ||
209 | |||
210 | while (old != u) { | ||
211 | *newv = old + a; | ||
212 | cur = atomic_cmpxchg(ptr, old, *newv); | ||
213 | if (cur == old) | ||
214 | return true; | ||
215 | old = cur; | ||
216 | } | ||
217 | return false; | ||
218 | } | ||
219 | |||
176 | /* | 220 | /* |
177 | * Called with rcu_read_lock_bh() | 221 | * Called with rcu_read_lock() |
178 | * Because we hold no lock against a writer, its quite possible we fall | 222 | * Because we hold no lock against a writer, its quite possible we fall |
179 | * in an endless loop. | 223 | * in an endless loop. |
180 | * But every pointer we follow is guaranteed to be valid thanks to RCU. | 224 | * But every pointer we follow is guaranteed to be valid thanks to RCU. |
181 | * We exit from this function if number of links exceeds PEER_MAXDEPTH | 225 | * We exit from this function if number of links exceeds PEER_MAXDEPTH |
182 | */ | 226 | */ |
183 | static struct inet_peer *lookup_rcu_bh(__be32 daddr) | 227 | static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr, |
228 | struct inet_peer_base *base, | ||
229 | int *newrefcnt) | ||
184 | { | 230 | { |
185 | struct inet_peer *u = rcu_dereference_bh(peers.root); | 231 | struct inet_peer *u = rcu_dereference(base->root); |
186 | int count = 0; | 232 | int count = 0; |
187 | 233 | ||
188 | while (u != peer_avl_empty) { | 234 | while (u != peer_avl_empty) { |
189 | if (daddr == u->v4daddr) { | 235 | int cmp = addr_compare(daddr, &u->daddr); |
236 | if (cmp == 0) { | ||
190 | /* Before taking a reference, check if this entry was | 237 | /* Before taking a reference, check if this entry was |
191 | * deleted, unlink_from_pool() sets refcnt=-1 to make | 238 | * deleted, unlink_from_pool() sets refcnt=-1 to make |
192 | * distinction between an unused entry (refcnt=0) and | 239 | * distinction between an unused entry (refcnt=0) and |
193 | * a freed one. | 240 | * a freed one. |
194 | */ | 241 | */ |
195 | if (unlikely(!atomic_add_unless(&u->refcnt, 1, -1))) | 242 | if (!atomic_add_unless_return(&u->refcnt, 1, -1, newrefcnt)) |
196 | u = NULL; | 243 | u = NULL; |
197 | return u; | 244 | return u; |
198 | } | 245 | } |
199 | if ((__force __u32)daddr < (__force __u32)u->v4daddr) | 246 | if (cmp == -1) |
200 | u = rcu_dereference_bh(u->avl_left); | 247 | u = rcu_dereference(u->avl_left); |
201 | else | 248 | else |
202 | u = rcu_dereference_bh(u->avl_right); | 249 | u = rcu_dereference(u->avl_right); |
203 | if (unlikely(++count == PEER_MAXDEPTH)) | 250 | if (unlikely(++count == PEER_MAXDEPTH)) |
204 | break; | 251 | break; |
205 | } | 252 | } |
@@ -207,15 +254,17 @@ static struct inet_peer *lookup_rcu_bh(__be32 daddr) | |||
207 | } | 254 | } |
208 | 255 | ||
209 | /* Called with local BH disabled and the pool lock held. */ | 256 | /* Called with local BH disabled and the pool lock held. */ |
210 | #define lookup_rightempty(start) \ | 257 | #define lookup_rightempty(start, base) \ |
211 | ({ \ | 258 | ({ \ |
212 | struct inet_peer *u, **v; \ | 259 | struct inet_peer *u; \ |
260 | struct inet_peer __rcu **v; \ | ||
213 | *stackptr++ = &start->avl_left; \ | 261 | *stackptr++ = &start->avl_left; \ |
214 | v = &start->avl_left; \ | 262 | v = &start->avl_left; \ |
215 | for (u = *v; u->avl_right != peer_avl_empty; ) { \ | 263 | for (u = rcu_deref_locked(*v, base); \ |
264 | u->avl_right != peer_avl_empty_rcu; ) { \ | ||
216 | v = &u->avl_right; \ | 265 | v = &u->avl_right; \ |
217 | *stackptr++ = v; \ | 266 | *stackptr++ = v; \ |
218 | u = *v; \ | 267 | u = rcu_deref_locked(*v, base); \ |
219 | } \ | 268 | } \ |
220 | u; \ | 269 | u; \ |
221 | }) | 270 | }) |
@@ -224,74 +273,76 @@ static struct inet_peer *lookup_rcu_bh(__be32 daddr) | |||
224 | * Variable names are the proof of operation correctness. | 273 | * Variable names are the proof of operation correctness. |
225 | * Look into mm/map_avl.c for more detail description of the ideas. | 274 | * Look into mm/map_avl.c for more detail description of the ideas. |
226 | */ | 275 | */ |
227 | static void peer_avl_rebalance(struct inet_peer **stack[], | 276 | static void peer_avl_rebalance(struct inet_peer __rcu **stack[], |
228 | struct inet_peer ***stackend) | 277 | struct inet_peer __rcu ***stackend, |
278 | struct inet_peer_base *base) | ||
229 | { | 279 | { |
230 | struct inet_peer **nodep, *node, *l, *r; | 280 | struct inet_peer __rcu **nodep; |
281 | struct inet_peer *node, *l, *r; | ||
231 | int lh, rh; | 282 | int lh, rh; |
232 | 283 | ||
233 | while (stackend > stack) { | 284 | while (stackend > stack) { |
234 | nodep = *--stackend; | 285 | nodep = *--stackend; |
235 | node = *nodep; | 286 | node = rcu_deref_locked(*nodep, base); |
236 | l = node->avl_left; | 287 | l = rcu_deref_locked(node->avl_left, base); |
237 | r = node->avl_right; | 288 | r = rcu_deref_locked(node->avl_right, base); |
238 | lh = node_height(l); | 289 | lh = node_height(l); |
239 | rh = node_height(r); | 290 | rh = node_height(r); |
240 | if (lh > rh + 1) { /* l: RH+2 */ | 291 | if (lh > rh + 1) { /* l: RH+2 */ |
241 | struct inet_peer *ll, *lr, *lrl, *lrr; | 292 | struct inet_peer *ll, *lr, *lrl, *lrr; |
242 | int lrh; | 293 | int lrh; |
243 | ll = l->avl_left; | 294 | ll = rcu_deref_locked(l->avl_left, base); |
244 | lr = l->avl_right; | 295 | lr = rcu_deref_locked(l->avl_right, base); |
245 | lrh = node_height(lr); | 296 | lrh = node_height(lr); |
246 | if (lrh <= node_height(ll)) { /* ll: RH+1 */ | 297 | if (lrh <= node_height(ll)) { /* ll: RH+1 */ |
247 | node->avl_left = lr; /* lr: RH or RH+1 */ | 298 | RCU_INIT_POINTER(node->avl_left, lr); /* lr: RH or RH+1 */ |
248 | node->avl_right = r; /* r: RH */ | 299 | RCU_INIT_POINTER(node->avl_right, r); /* r: RH */ |
249 | node->avl_height = lrh + 1; /* RH+1 or RH+2 */ | 300 | node->avl_height = lrh + 1; /* RH+1 or RH+2 */ |
250 | l->avl_left = ll; /* ll: RH+1 */ | 301 | RCU_INIT_POINTER(l->avl_left, ll); /* ll: RH+1 */ |
251 | l->avl_right = node; /* node: RH+1 or RH+2 */ | 302 | RCU_INIT_POINTER(l->avl_right, node); /* node: RH+1 or RH+2 */ |
252 | l->avl_height = node->avl_height + 1; | 303 | l->avl_height = node->avl_height + 1; |
253 | *nodep = l; | 304 | RCU_INIT_POINTER(*nodep, l); |
254 | } else { /* ll: RH, lr: RH+1 */ | 305 | } else { /* ll: RH, lr: RH+1 */ |
255 | lrl = lr->avl_left; /* lrl: RH or RH-1 */ | 306 | lrl = rcu_deref_locked(lr->avl_left, base);/* lrl: RH or RH-1 */ |
256 | lrr = lr->avl_right; /* lrr: RH or RH-1 */ | 307 | lrr = rcu_deref_locked(lr->avl_right, base);/* lrr: RH or RH-1 */ |
257 | node->avl_left = lrr; /* lrr: RH or RH-1 */ | 308 | RCU_INIT_POINTER(node->avl_left, lrr); /* lrr: RH or RH-1 */ |
258 | node->avl_right = r; /* r: RH */ | 309 | RCU_INIT_POINTER(node->avl_right, r); /* r: RH */ |
259 | node->avl_height = rh + 1; /* node: RH+1 */ | 310 | node->avl_height = rh + 1; /* node: RH+1 */ |
260 | l->avl_left = ll; /* ll: RH */ | 311 | RCU_INIT_POINTER(l->avl_left, ll); /* ll: RH */ |
261 | l->avl_right = lrl; /* lrl: RH or RH-1 */ | 312 | RCU_INIT_POINTER(l->avl_right, lrl); /* lrl: RH or RH-1 */ |
262 | l->avl_height = rh + 1; /* l: RH+1 */ | 313 | l->avl_height = rh + 1; /* l: RH+1 */ |
263 | lr->avl_left = l; /* l: RH+1 */ | 314 | RCU_INIT_POINTER(lr->avl_left, l); /* l: RH+1 */ |
264 | lr->avl_right = node; /* node: RH+1 */ | 315 | RCU_INIT_POINTER(lr->avl_right, node); /* node: RH+1 */ |
265 | lr->avl_height = rh + 2; | 316 | lr->avl_height = rh + 2; |
266 | *nodep = lr; | 317 | RCU_INIT_POINTER(*nodep, lr); |
267 | } | 318 | } |
268 | } else if (rh > lh + 1) { /* r: LH+2 */ | 319 | } else if (rh > lh + 1) { /* r: LH+2 */ |
269 | struct inet_peer *rr, *rl, *rlr, *rll; | 320 | struct inet_peer *rr, *rl, *rlr, *rll; |
270 | int rlh; | 321 | int rlh; |
271 | rr = r->avl_right; | 322 | rr = rcu_deref_locked(r->avl_right, base); |
272 | rl = r->avl_left; | 323 | rl = rcu_deref_locked(r->avl_left, base); |
273 | rlh = node_height(rl); | 324 | rlh = node_height(rl); |
274 | if (rlh <= node_height(rr)) { /* rr: LH+1 */ | 325 | if (rlh <= node_height(rr)) { /* rr: LH+1 */ |
275 | node->avl_right = rl; /* rl: LH or LH+1 */ | 326 | RCU_INIT_POINTER(node->avl_right, rl); /* rl: LH or LH+1 */ |
276 | node->avl_left = l; /* l: LH */ | 327 | RCU_INIT_POINTER(node->avl_left, l); /* l: LH */ |
277 | node->avl_height = rlh + 1; /* LH+1 or LH+2 */ | 328 | node->avl_height = rlh + 1; /* LH+1 or LH+2 */ |
278 | r->avl_right = rr; /* rr: LH+1 */ | 329 | RCU_INIT_POINTER(r->avl_right, rr); /* rr: LH+1 */ |
279 | r->avl_left = node; /* node: LH+1 or LH+2 */ | 330 | RCU_INIT_POINTER(r->avl_left, node); /* node: LH+1 or LH+2 */ |
280 | r->avl_height = node->avl_height + 1; | 331 | r->avl_height = node->avl_height + 1; |
281 | *nodep = r; | 332 | RCU_INIT_POINTER(*nodep, r); |
282 | } else { /* rr: RH, rl: RH+1 */ | 333 | } else { /* rr: RH, rl: RH+1 */ |
283 | rlr = rl->avl_right; /* rlr: LH or LH-1 */ | 334 | rlr = rcu_deref_locked(rl->avl_right, base);/* rlr: LH or LH-1 */ |
284 | rll = rl->avl_left; /* rll: LH or LH-1 */ | 335 | rll = rcu_deref_locked(rl->avl_left, base);/* rll: LH or LH-1 */ |
285 | node->avl_right = rll; /* rll: LH or LH-1 */ | 336 | RCU_INIT_POINTER(node->avl_right, rll); /* rll: LH or LH-1 */ |
286 | node->avl_left = l; /* l: LH */ | 337 | RCU_INIT_POINTER(node->avl_left, l); /* l: LH */ |
287 | node->avl_height = lh + 1; /* node: LH+1 */ | 338 | node->avl_height = lh + 1; /* node: LH+1 */ |
288 | r->avl_right = rr; /* rr: LH */ | 339 | RCU_INIT_POINTER(r->avl_right, rr); /* rr: LH */ |
289 | r->avl_left = rlr; /* rlr: LH or LH-1 */ | 340 | RCU_INIT_POINTER(r->avl_left, rlr); /* rlr: LH or LH-1 */ |
290 | r->avl_height = lh + 1; /* r: LH+1 */ | 341 | r->avl_height = lh + 1; /* r: LH+1 */ |
291 | rl->avl_right = r; /* r: LH+1 */ | 342 | RCU_INIT_POINTER(rl->avl_right, r); /* r: LH+1 */ |
292 | rl->avl_left = node; /* node: LH+1 */ | 343 | RCU_INIT_POINTER(rl->avl_left, node); /* node: LH+1 */ |
293 | rl->avl_height = lh + 2; | 344 | rl->avl_height = lh + 2; |
294 | *nodep = rl; | 345 | RCU_INIT_POINTER(*nodep, rl); |
295 | } | 346 | } |
296 | } else { | 347 | } else { |
297 | node->avl_height = (lh > rh ? lh : rh) + 1; | 348 | node->avl_height = (lh > rh ? lh : rh) + 1; |
@@ -300,14 +351,14 @@ static void peer_avl_rebalance(struct inet_peer **stack[], | |||
300 | } | 351 | } |
301 | 352 | ||
302 | /* Called with local BH disabled and the pool lock held. */ | 353 | /* Called with local BH disabled and the pool lock held. */ |
303 | #define link_to_pool(n) \ | 354 | #define link_to_pool(n, base) \ |
304 | do { \ | 355 | do { \ |
305 | n->avl_height = 1; \ | 356 | n->avl_height = 1; \ |
306 | n->avl_left = peer_avl_empty; \ | 357 | n->avl_left = peer_avl_empty_rcu; \ |
307 | n->avl_right = peer_avl_empty; \ | 358 | n->avl_right = peer_avl_empty_rcu; \ |
308 | smp_wmb(); /* lockless readers can catch us now */ \ | 359 | /* lockless readers can catch us now */ \ |
309 | **--stackptr = n; \ | 360 | rcu_assign_pointer(**--stackptr, n); \ |
310 | peer_avl_rebalance(stack, stackptr); \ | 361 | peer_avl_rebalance(stack, stackptr, base); \ |
311 | } while (0) | 362 | } while (0) |
312 | 363 | ||
313 | static void inetpeer_free_rcu(struct rcu_head *head) | 364 | static void inetpeer_free_rcu(struct rcu_head *head) |
@@ -316,13 +367,14 @@ static void inetpeer_free_rcu(struct rcu_head *head) | |||
316 | } | 367 | } |
317 | 368 | ||
318 | /* May be called with local BH enabled. */ | 369 | /* May be called with local BH enabled. */ |
319 | static void unlink_from_pool(struct inet_peer *p) | 370 | static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base, |
371 | struct inet_peer __rcu **stack[PEER_MAXDEPTH]) | ||
320 | { | 372 | { |
321 | int do_free; | 373 | int do_free; |
322 | 374 | ||
323 | do_free = 0; | 375 | do_free = 0; |
324 | 376 | ||
325 | spin_lock_bh(&peers.lock); | 377 | write_seqlock_bh(&base->lock); |
326 | /* Check the reference counter. It was artificially incremented by 1 | 378 | /* Check the reference counter. It was artificially incremented by 1 |
327 | * in cleanup() function to prevent sudden disappearing. If we can | 379 | * in cleanup() function to prevent sudden disappearing. If we can |
328 | * atomically (because of lockless readers) take this last reference, | 380 | * atomically (because of lockless readers) take this last reference, |
@@ -330,38 +382,37 @@ static void unlink_from_pool(struct inet_peer *p) | |||
330 | * We use refcnt=-1 to alert lockless readers this entry is deleted. | 382 | * We use refcnt=-1 to alert lockless readers this entry is deleted. |
331 | */ | 383 | */ |
332 | if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) { | 384 | if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) { |
333 | struct inet_peer **stack[PEER_MAXDEPTH]; | 385 | struct inet_peer __rcu ***stackptr, ***delp; |
334 | struct inet_peer ***stackptr, ***delp; | 386 | if (lookup(&p->daddr, stack, base) != p) |
335 | if (lookup(p->v4daddr, stack) != p) | ||
336 | BUG(); | 387 | BUG(); |
337 | delp = stackptr - 1; /* *delp[0] == p */ | 388 | delp = stackptr - 1; /* *delp[0] == p */ |
338 | if (p->avl_left == peer_avl_empty) { | 389 | if (p->avl_left == peer_avl_empty_rcu) { |
339 | *delp[0] = p->avl_right; | 390 | *delp[0] = p->avl_right; |
340 | --stackptr; | 391 | --stackptr; |
341 | } else { | 392 | } else { |
342 | /* look for a node to insert instead of p */ | 393 | /* look for a node to insert instead of p */ |
343 | struct inet_peer *t; | 394 | struct inet_peer *t; |
344 | t = lookup_rightempty(p); | 395 | t = lookup_rightempty(p, base); |
345 | BUG_ON(*stackptr[-1] != t); | 396 | BUG_ON(rcu_deref_locked(*stackptr[-1], base) != t); |
346 | **--stackptr = t->avl_left; | 397 | **--stackptr = t->avl_left; |
347 | /* t is removed, t->v4daddr > x->v4daddr for any | 398 | /* t is removed, t->daddr > x->daddr for any |
348 | * x in p->avl_left subtree. | 399 | * x in p->avl_left subtree. |
349 | * Put t in the old place of p. */ | 400 | * Put t in the old place of p. */ |
350 | *delp[0] = t; | 401 | RCU_INIT_POINTER(*delp[0], t); |
351 | t->avl_left = p->avl_left; | 402 | t->avl_left = p->avl_left; |
352 | t->avl_right = p->avl_right; | 403 | t->avl_right = p->avl_right; |
353 | t->avl_height = p->avl_height; | 404 | t->avl_height = p->avl_height; |
354 | BUG_ON(delp[1] != &p->avl_left); | 405 | BUG_ON(delp[1] != &p->avl_left); |
355 | delp[1] = &t->avl_left; /* was &p->avl_left */ | 406 | delp[1] = &t->avl_left; /* was &p->avl_left */ |
356 | } | 407 | } |
357 | peer_avl_rebalance(stack, stackptr); | 408 | peer_avl_rebalance(stack, stackptr, base); |
358 | peers.total--; | 409 | base->total--; |
359 | do_free = 1; | 410 | do_free = 1; |
360 | } | 411 | } |
361 | spin_unlock_bh(&peers.lock); | 412 | write_sequnlock_bh(&base->lock); |
362 | 413 | ||
363 | if (do_free) | 414 | if (do_free) |
364 | call_rcu_bh(&p->rcu, inetpeer_free_rcu); | 415 | call_rcu(&p->rcu, inetpeer_free_rcu); |
365 | else | 416 | else |
366 | /* The node is used again. Decrease the reference counter | 417 | /* The node is used again. Decrease the reference counter |
367 | * back. The loop "cleanup -> unlink_from_unused | 418 | * back. The loop "cleanup -> unlink_from_unused |
@@ -373,8 +424,18 @@ static void unlink_from_pool(struct inet_peer *p) | |||
373 | inet_putpeer(p); | 424 | inet_putpeer(p); |
374 | } | 425 | } |
375 | 426 | ||
427 | static struct inet_peer_base *family_to_base(int family) | ||
428 | { | ||
429 | return (family == AF_INET ? &v4_peers : &v6_peers); | ||
430 | } | ||
431 | |||
432 | static struct inet_peer_base *peer_to_base(struct inet_peer *p) | ||
433 | { | ||
434 | return family_to_base(p->daddr.family); | ||
435 | } | ||
436 | |||
376 | /* May be called with local BH enabled. */ | 437 | /* May be called with local BH enabled. */ |
377 | static int cleanup_once(unsigned long ttl) | 438 | static int cleanup_once(unsigned long ttl, struct inet_peer __rcu **stack[PEER_MAXDEPTH]) |
378 | { | 439 | { |
379 | struct inet_peer *p = NULL; | 440 | struct inet_peer *p = NULL; |
380 | 441 | ||
@@ -406,79 +467,101 @@ static int cleanup_once(unsigned long ttl) | |||
406 | * happen because of entry limits in route cache. */ | 467 | * happen because of entry limits in route cache. */ |
407 | return -1; | 468 | return -1; |
408 | 469 | ||
409 | unlink_from_pool(p); | 470 | unlink_from_pool(p, peer_to_base(p), stack); |
410 | return 0; | 471 | return 0; |
411 | } | 472 | } |
412 | 473 | ||
413 | /* Called with or without local BH being disabled. */ | 474 | /* Called with or without local BH being disabled. */ |
414 | struct inet_peer *inet_getpeer(__be32 daddr, int create) | 475 | struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create) |
415 | { | 476 | { |
477 | struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr; | ||
478 | struct inet_peer_base *base = family_to_base(daddr->family); | ||
416 | struct inet_peer *p; | 479 | struct inet_peer *p; |
417 | struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr; | 480 | unsigned int sequence; |
481 | int invalidated, newrefcnt = 0; | ||
418 | 482 | ||
419 | /* Look up for the address quickly, lockless. | 483 | /* Look up for the address quickly, lockless. |
420 | * Because of a concurrent writer, we might not find an existing entry. | 484 | * Because of a concurrent writer, we might not find an existing entry. |
421 | */ | 485 | */ |
422 | rcu_read_lock_bh(); | 486 | rcu_read_lock(); |
423 | p = lookup_rcu_bh(daddr); | 487 | sequence = read_seqbegin(&base->lock); |
424 | rcu_read_unlock_bh(); | 488 | p = lookup_rcu(daddr, base, &newrefcnt); |
489 | invalidated = read_seqretry(&base->lock, sequence); | ||
490 | rcu_read_unlock(); | ||
425 | 491 | ||
426 | if (p) { | 492 | if (p) { |
427 | /* The existing node has been found. | 493 | found: /* The existing node has been found. |
428 | * Remove the entry from unused list if it was there. | 494 | * Remove the entry from unused list if it was there. |
429 | */ | 495 | */ |
430 | unlink_from_unused(p); | 496 | if (newrefcnt == 1) |
497 | unlink_from_unused(p); | ||
431 | return p; | 498 | return p; |
432 | } | 499 | } |
433 | 500 | ||
501 | /* If no writer did a change during our lookup, we can return early. */ | ||
502 | if (!create && !invalidated) | ||
503 | return NULL; | ||
504 | |||
434 | /* retry an exact lookup, taking the lock before. | 505 | /* retry an exact lookup, taking the lock before. |
435 | * At least, nodes should be hot in our cache. | 506 | * At least, nodes should be hot in our cache. |
436 | */ | 507 | */ |
437 | spin_lock_bh(&peers.lock); | 508 | write_seqlock_bh(&base->lock); |
438 | p = lookup(daddr, stack); | 509 | p = lookup(daddr, stack, base); |
439 | if (p != peer_avl_empty) { | 510 | if (p != peer_avl_empty) { |
440 | atomic_inc(&p->refcnt); | 511 | newrefcnt = atomic_inc_return(&p->refcnt); |
441 | spin_unlock_bh(&peers.lock); | 512 | write_sequnlock_bh(&base->lock); |
442 | /* Remove the entry from unused list if it was there. */ | 513 | goto found; |
443 | unlink_from_unused(p); | ||
444 | return p; | ||
445 | } | 514 | } |
446 | p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL; | 515 | p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL; |
447 | if (p) { | 516 | if (p) { |
448 | p->v4daddr = daddr; | 517 | p->daddr = *daddr; |
449 | atomic_set(&p->refcnt, 1); | 518 | atomic_set(&p->refcnt, 1); |
450 | atomic_set(&p->rid, 0); | 519 | atomic_set(&p->rid, 0); |
451 | atomic_set(&p->ip_id_count, secure_ip_id(daddr)); | 520 | atomic_set(&p->ip_id_count, secure_ip_id(daddr->addr.a4)); |
452 | p->tcp_ts_stamp = 0; | 521 | p->tcp_ts_stamp = 0; |
522 | p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW; | ||
523 | p->rate_tokens = 0; | ||
524 | p->rate_last = 0; | ||
525 | p->pmtu_expires = 0; | ||
526 | p->pmtu_orig = 0; | ||
527 | memset(&p->redirect_learned, 0, sizeof(p->redirect_learned)); | ||
453 | INIT_LIST_HEAD(&p->unused); | 528 | INIT_LIST_HEAD(&p->unused); |
454 | 529 | ||
455 | 530 | ||
456 | /* Link the node. */ | 531 | /* Link the node. */ |
457 | link_to_pool(p); | 532 | link_to_pool(p, base); |
458 | peers.total++; | 533 | base->total++; |
459 | } | 534 | } |
460 | spin_unlock_bh(&peers.lock); | 535 | write_sequnlock_bh(&base->lock); |
461 | 536 | ||
462 | if (peers.total >= inet_peer_threshold) | 537 | if (base->total >= inet_peer_threshold) |
463 | /* Remove one less-recently-used entry. */ | 538 | /* Remove one less-recently-used entry. */ |
464 | cleanup_once(0); | 539 | cleanup_once(0, stack); |
465 | 540 | ||
466 | return p; | 541 | return p; |
467 | } | 542 | } |
468 | 543 | ||
544 | static int compute_total(void) | ||
545 | { | ||
546 | return v4_peers.total + v6_peers.total; | ||
547 | } | ||
548 | EXPORT_SYMBOL_GPL(inet_getpeer); | ||
549 | |||
469 | /* Called with local BH disabled. */ | 550 | /* Called with local BH disabled. */ |
470 | static void peer_check_expire(unsigned long dummy) | 551 | static void peer_check_expire(unsigned long dummy) |
471 | { | 552 | { |
472 | unsigned long now = jiffies; | 553 | unsigned long now = jiffies; |
473 | int ttl; | 554 | int ttl, total; |
555 | struct inet_peer __rcu **stack[PEER_MAXDEPTH]; | ||
474 | 556 | ||
475 | if (peers.total >= inet_peer_threshold) | 557 | total = compute_total(); |
558 | if (total >= inet_peer_threshold) | ||
476 | ttl = inet_peer_minttl; | 559 | ttl = inet_peer_minttl; |
477 | else | 560 | else |
478 | ttl = inet_peer_maxttl | 561 | ttl = inet_peer_maxttl |
479 | - (inet_peer_maxttl - inet_peer_minttl) / HZ * | 562 | - (inet_peer_maxttl - inet_peer_minttl) / HZ * |
480 | peers.total / inet_peer_threshold * HZ; | 563 | total / inet_peer_threshold * HZ; |
481 | while (!cleanup_once(ttl)) { | 564 | while (!cleanup_once(ttl, stack)) { |
482 | if (jiffies != now) | 565 | if (jiffies != now) |
483 | break; | 566 | break; |
484 | } | 567 | } |
@@ -486,13 +569,14 @@ static void peer_check_expire(unsigned long dummy) | |||
486 | /* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime | 569 | /* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime |
487 | * interval depending on the total number of entries (more entries, | 570 | * interval depending on the total number of entries (more entries, |
488 | * less interval). */ | 571 | * less interval). */ |
489 | if (peers.total >= inet_peer_threshold) | 572 | total = compute_total(); |
573 | if (total >= inet_peer_threshold) | ||
490 | peer_periodic_timer.expires = jiffies + inet_peer_gc_mintime; | 574 | peer_periodic_timer.expires = jiffies + inet_peer_gc_mintime; |
491 | else | 575 | else |
492 | peer_periodic_timer.expires = jiffies | 576 | peer_periodic_timer.expires = jiffies |
493 | + inet_peer_gc_maxtime | 577 | + inet_peer_gc_maxtime |
494 | - (inet_peer_gc_maxtime - inet_peer_gc_mintime) / HZ * | 578 | - (inet_peer_gc_maxtime - inet_peer_gc_mintime) / HZ * |
495 | peers.total / inet_peer_threshold * HZ; | 579 | total / inet_peer_threshold * HZ; |
496 | add_timer(&peer_periodic_timer); | 580 | add_timer(&peer_periodic_timer); |
497 | } | 581 | } |
498 | 582 | ||
@@ -508,3 +592,45 @@ void inet_putpeer(struct inet_peer *p) | |||
508 | 592 | ||
509 | local_bh_enable(); | 593 | local_bh_enable(); |
510 | } | 594 | } |
595 | EXPORT_SYMBOL_GPL(inet_putpeer); | ||
596 | |||
597 | /* | ||
598 | * Check transmit rate limitation for given message. | ||
599 | * The rate information is held in the inet_peer entries now. | ||
600 | * This function is generic and could be used for other purposes | ||
601 | * too. It uses a Token bucket filter as suggested by Alexey Kuznetsov. | ||
602 | * | ||
603 | * Note that the same inet_peer fields are modified by functions in | ||
604 | * route.c too, but these work for packet destinations while xrlim_allow | ||
605 | * works for icmp destinations. This means the rate limiting information | ||
606 | * for one "ip object" is shared - and these ICMPs are twice limited: | ||
607 | * by source and by destination. | ||
608 | * | ||
609 | * RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate | ||
610 | * SHOULD allow setting of rate limits | ||
611 | * | ||
612 | * Shared between ICMPv4 and ICMPv6. | ||
613 | */ | ||
614 | #define XRLIM_BURST_FACTOR 6 | ||
615 | bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout) | ||
616 | { | ||
617 | unsigned long now, token; | ||
618 | bool rc = false; | ||
619 | |||
620 | if (!peer) | ||
621 | return true; | ||
622 | |||
623 | token = peer->rate_tokens; | ||
624 | now = jiffies; | ||
625 | token += now - peer->rate_last; | ||
626 | peer->rate_last = now; | ||
627 | if (token > XRLIM_BURST_FACTOR * timeout) | ||
628 | token = XRLIM_BURST_FACTOR * timeout; | ||
629 | if (token >= timeout) { | ||
630 | token -= timeout; | ||
631 | rc = true; | ||
632 | } | ||
633 | peer->rate_tokens = token; | ||
634 | return rc; | ||
635 | } | ||
636 | EXPORT_SYMBOL(inet_peer_xrlim_allow); | ||