diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2010-06-14 15:35:21 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-06-15 02:13:08 -0400 |
commit | d6cc1d642de9284cb26488ea390d915b50ee2504 (patch) | |
tree | 7652471f541c11f63f54e282a04a9cba614e8d5e /net/ipv4/inetpeer.c | |
parent | 6b10de38f0ef4e921a1f6e5cba2b6c92d6b46ecd (diff) |
inetpeer: various changes
Try to reduce cache line contentions in peer management, to reduce IP
defragmentation overhead.
- peer_fake_node is marked 'const' to make sure its not modified.
(tested with CONFIG_DEBUG_RODATA=y)
- Group variables in two structures to reduce number of dirtied cache
lines. One named "peers" for avl tree root, its number of entries, and
associated lock. (candidate for RCU conversion)
- A second one named "unused_peers" for unused list and its lock
- Add a !list_empty() test in unlink_from_unused() to avoid taking lock
when entry is not unused.
- Use atomic_dec_and_lock() in inet_putpeer() to avoid taking lock in
some cases.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/inetpeer.c')
-rw-r--r-- | net/ipv4/inetpeer.c | 94 |
1 files changed, 56 insertions, 38 deletions
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index 6bcfe52a9c87..035673fd42d4 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c | |||
@@ -70,17 +70,25 @@ | |||
70 | static struct kmem_cache *peer_cachep __read_mostly; | 70 | static struct kmem_cache *peer_cachep __read_mostly; |
71 | 71 | ||
72 | #define node_height(x) x->avl_height | 72 | #define node_height(x) x->avl_height |
73 | static struct inet_peer peer_fake_node = { | 73 | |
74 | .avl_left = &peer_fake_node, | 74 | #define peer_avl_empty ((struct inet_peer *)&peer_fake_node) |
75 | .avl_right = &peer_fake_node, | 75 | static const struct inet_peer peer_fake_node = { |
76 | .avl_left = peer_avl_empty, | ||
77 | .avl_right = peer_avl_empty, | ||
76 | .avl_height = 0 | 78 | .avl_height = 0 |
77 | }; | 79 | }; |
78 | #define peer_avl_empty (&peer_fake_node) | 80 | |
79 | static struct inet_peer *peer_root = peer_avl_empty; | 81 | static struct { |
80 | static DEFINE_RWLOCK(peer_pool_lock); | 82 | struct inet_peer *root; |
83 | rwlock_t lock; | ||
84 | int total; | ||
85 | } peers = { | ||
86 | .root = peer_avl_empty, | ||
87 | .lock = __RW_LOCK_UNLOCKED(peers.lock), | ||
88 | .total = 0, | ||
89 | }; | ||
81 | #define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */ | 90 | #define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */ |
82 | 91 | ||
83 | static int peer_total; | ||
84 | /* Exported for sysctl_net_ipv4. */ | 92 | /* Exported for sysctl_net_ipv4. */ |
85 | int inet_peer_threshold __read_mostly = 65536 + 128; /* start to throw entries more | 93 | int inet_peer_threshold __read_mostly = 65536 + 128; /* start to throw entries more |
86 | * aggressively at this stage */ | 94 | * aggressively at this stage */ |
@@ -89,8 +97,13 @@ int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min | |||
89 | int inet_peer_gc_mintime __read_mostly = 10 * HZ; | 97 | int inet_peer_gc_mintime __read_mostly = 10 * HZ; |
90 | int inet_peer_gc_maxtime __read_mostly = 120 * HZ; | 98 | int inet_peer_gc_maxtime __read_mostly = 120 * HZ; |
91 | 99 | ||
92 | static LIST_HEAD(unused_peers); | 100 | static struct { |
93 | static DEFINE_SPINLOCK(inet_peer_unused_lock); | 101 | struct list_head list; |
102 | spinlock_t lock; | ||
103 | } unused_peers = { | ||
104 | .list = LIST_HEAD_INIT(unused_peers.list), | ||
105 | .lock = __SPIN_LOCK_UNLOCKED(unused_peers.lock), | ||
106 | }; | ||
94 | 107 | ||
95 | static void peer_check_expire(unsigned long dummy); | 108 | static void peer_check_expire(unsigned long dummy); |
96 | static DEFINE_TIMER(peer_periodic_timer, peer_check_expire, 0, 0); | 109 | static DEFINE_TIMER(peer_periodic_timer, peer_check_expire, 0, 0); |
@@ -131,9 +144,11 @@ void __init inet_initpeers(void) | |||
131 | /* Called with or without local BH being disabled. */ | 144 | /* Called with or without local BH being disabled. */ |
132 | static void unlink_from_unused(struct inet_peer *p) | 145 | static void unlink_from_unused(struct inet_peer *p) |
133 | { | 146 | { |
134 | spin_lock_bh(&inet_peer_unused_lock); | 147 | if (!list_empty(&p->unused)) { |
135 | list_del_init(&p->unused); | 148 | spin_lock_bh(&unused_peers.lock); |
136 | spin_unlock_bh(&inet_peer_unused_lock); | 149 | list_del_init(&p->unused); |
150 | spin_unlock_bh(&unused_peers.lock); | ||
151 | } | ||
137 | } | 152 | } |
138 | 153 | ||
139 | /* | 154 | /* |
@@ -146,9 +161,9 @@ static void unlink_from_unused(struct inet_peer *p) | |||
146 | struct inet_peer *u, **v; \ | 161 | struct inet_peer *u, **v; \ |
147 | if (_stack != NULL) { \ | 162 | if (_stack != NULL) { \ |
148 | stackptr = _stack; \ | 163 | stackptr = _stack; \ |
149 | *stackptr++ = &peer_root; \ | 164 | *stackptr++ = &peers.root; \ |
150 | } \ | 165 | } \ |
151 | for (u = peer_root; u != peer_avl_empty; ) { \ | 166 | for (u = peers.root; u != peer_avl_empty; ) { \ |
152 | if (_daddr == u->v4daddr) \ | 167 | if (_daddr == u->v4daddr) \ |
153 | break; \ | 168 | break; \ |
154 | if ((__force __u32)_daddr < (__force __u32)u->v4daddr) \ | 169 | if ((__force __u32)_daddr < (__force __u32)u->v4daddr) \ |
@@ -262,7 +277,7 @@ do { \ | |||
262 | n->avl_right = peer_avl_empty; \ | 277 | n->avl_right = peer_avl_empty; \ |
263 | **--stackptr = n; \ | 278 | **--stackptr = n; \ |
264 | peer_avl_rebalance(stack, stackptr); \ | 279 | peer_avl_rebalance(stack, stackptr); \ |
265 | } while(0) | 280 | } while (0) |
266 | 281 | ||
267 | /* May be called with local BH enabled. */ | 282 | /* May be called with local BH enabled. */ |
268 | static void unlink_from_pool(struct inet_peer *p) | 283 | static void unlink_from_pool(struct inet_peer *p) |
@@ -271,7 +286,7 @@ static void unlink_from_pool(struct inet_peer *p) | |||
271 | 286 | ||
272 | do_free = 0; | 287 | do_free = 0; |
273 | 288 | ||
274 | write_lock_bh(&peer_pool_lock); | 289 | write_lock_bh(&peers.lock); |
275 | /* Check the reference counter. It was artificially incremented by 1 | 290 | /* Check the reference counter. It was artificially incremented by 1 |
276 | * in cleanup() function to prevent sudden disappearing. If the | 291 | * in cleanup() function to prevent sudden disappearing. If the |
277 | * reference count is still 1 then the node is referenced only as `p' | 292 | * reference count is still 1 then the node is referenced only as `p' |
@@ -303,10 +318,10 @@ static void unlink_from_pool(struct inet_peer *p) | |||
303 | delp[1] = &t->avl_left; /* was &p->avl_left */ | 318 | delp[1] = &t->avl_left; /* was &p->avl_left */ |
304 | } | 319 | } |
305 | peer_avl_rebalance(stack, stackptr); | 320 | peer_avl_rebalance(stack, stackptr); |
306 | peer_total--; | 321 | peers.total--; |
307 | do_free = 1; | 322 | do_free = 1; |
308 | } | 323 | } |
309 | write_unlock_bh(&peer_pool_lock); | 324 | write_unlock_bh(&peers.lock); |
310 | 325 | ||
311 | if (do_free) | 326 | if (do_free) |
312 | kmem_cache_free(peer_cachep, p); | 327 | kmem_cache_free(peer_cachep, p); |
@@ -326,16 +341,16 @@ static int cleanup_once(unsigned long ttl) | |||
326 | struct inet_peer *p = NULL; | 341 | struct inet_peer *p = NULL; |
327 | 342 | ||
328 | /* Remove the first entry from the list of unused nodes. */ | 343 | /* Remove the first entry from the list of unused nodes. */ |
329 | spin_lock_bh(&inet_peer_unused_lock); | 344 | spin_lock_bh(&unused_peers.lock); |
330 | if (!list_empty(&unused_peers)) { | 345 | if (!list_empty(&unused_peers.list)) { |
331 | __u32 delta; | 346 | __u32 delta; |
332 | 347 | ||
333 | p = list_first_entry(&unused_peers, struct inet_peer, unused); | 348 | p = list_first_entry(&unused_peers.list, struct inet_peer, unused); |
334 | delta = (__u32)jiffies - p->dtime; | 349 | delta = (__u32)jiffies - p->dtime; |
335 | 350 | ||
336 | if (delta < ttl) { | 351 | if (delta < ttl) { |
337 | /* Do not prune fresh entries. */ | 352 | /* Do not prune fresh entries. */ |
338 | spin_unlock_bh(&inet_peer_unused_lock); | 353 | spin_unlock_bh(&unused_peers.lock); |
339 | return -1; | 354 | return -1; |
340 | } | 355 | } |
341 | 356 | ||
@@ -345,7 +360,7 @@ static int cleanup_once(unsigned long ttl) | |||
345 | * before unlink_from_pool() call. */ | 360 | * before unlink_from_pool() call. */ |
346 | atomic_inc(&p->refcnt); | 361 | atomic_inc(&p->refcnt); |
347 | } | 362 | } |
348 | spin_unlock_bh(&inet_peer_unused_lock); | 363 | spin_unlock_bh(&unused_peers.lock); |
349 | 364 | ||
350 | if (p == NULL) | 365 | if (p == NULL) |
351 | /* It means that the total number of USED entries has | 366 | /* It means that the total number of USED entries has |
@@ -364,11 +379,11 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create) | |||
364 | struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr; | 379 | struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr; |
365 | 380 | ||
366 | /* Look up for the address quickly. */ | 381 | /* Look up for the address quickly. */ |
367 | read_lock_bh(&peer_pool_lock); | 382 | read_lock_bh(&peers.lock); |
368 | p = lookup(daddr, NULL); | 383 | p = lookup(daddr, NULL); |
369 | if (p != peer_avl_empty) | 384 | if (p != peer_avl_empty) |
370 | atomic_inc(&p->refcnt); | 385 | atomic_inc(&p->refcnt); |
371 | read_unlock_bh(&peer_pool_lock); | 386 | read_unlock_bh(&peers.lock); |
372 | 387 | ||
373 | if (p != peer_avl_empty) { | 388 | if (p != peer_avl_empty) { |
374 | /* The existing node has been found. */ | 389 | /* The existing node has been found. */ |
@@ -390,7 +405,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create) | |||
390 | atomic_set(&n->ip_id_count, secure_ip_id(daddr)); | 405 | atomic_set(&n->ip_id_count, secure_ip_id(daddr)); |
391 | n->tcp_ts_stamp = 0; | 406 | n->tcp_ts_stamp = 0; |
392 | 407 | ||
393 | write_lock_bh(&peer_pool_lock); | 408 | write_lock_bh(&peers.lock); |
394 | /* Check if an entry has suddenly appeared. */ | 409 | /* Check if an entry has suddenly appeared. */ |
395 | p = lookup(daddr, stack); | 410 | p = lookup(daddr, stack); |
396 | if (p != peer_avl_empty) | 411 | if (p != peer_avl_empty) |
@@ -399,10 +414,10 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create) | |||
399 | /* Link the node. */ | 414 | /* Link the node. */ |
400 | link_to_pool(n); | 415 | link_to_pool(n); |
401 | INIT_LIST_HEAD(&n->unused); | 416 | INIT_LIST_HEAD(&n->unused); |
402 | peer_total++; | 417 | peers.total++; |
403 | write_unlock_bh(&peer_pool_lock); | 418 | write_unlock_bh(&peers.lock); |
404 | 419 | ||
405 | if (peer_total >= inet_peer_threshold) | 420 | if (peers.total >= inet_peer_threshold) |
406 | /* Remove one less-recently-used entry. */ | 421 | /* Remove one less-recently-used entry. */ |
407 | cleanup_once(0); | 422 | cleanup_once(0); |
408 | 423 | ||
@@ -411,7 +426,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create) | |||
411 | out_free: | 426 | out_free: |
412 | /* The appropriate node is already in the pool. */ | 427 | /* The appropriate node is already in the pool. */ |
413 | atomic_inc(&p->refcnt); | 428 | atomic_inc(&p->refcnt); |
414 | write_unlock_bh(&peer_pool_lock); | 429 | write_unlock_bh(&peers.lock); |
415 | /* Remove the entry from unused list if it was there. */ | 430 | /* Remove the entry from unused list if it was there. */ |
416 | unlink_from_unused(p); | 431 | unlink_from_unused(p); |
417 | /* Free preallocated the preallocated node. */ | 432 | /* Free preallocated the preallocated node. */ |
@@ -425,12 +440,12 @@ static void peer_check_expire(unsigned long dummy) | |||
425 | unsigned long now = jiffies; | 440 | unsigned long now = jiffies; |
426 | int ttl; | 441 | int ttl; |
427 | 442 | ||
428 | if (peer_total >= inet_peer_threshold) | 443 | if (peers.total >= inet_peer_threshold) |
429 | ttl = inet_peer_minttl; | 444 | ttl = inet_peer_minttl; |
430 | else | 445 | else |
431 | ttl = inet_peer_maxttl | 446 | ttl = inet_peer_maxttl |
432 | - (inet_peer_maxttl - inet_peer_minttl) / HZ * | 447 | - (inet_peer_maxttl - inet_peer_minttl) / HZ * |
433 | peer_total / inet_peer_threshold * HZ; | 448 | peers.total / inet_peer_threshold * HZ; |
434 | while (!cleanup_once(ttl)) { | 449 | while (!cleanup_once(ttl)) { |
435 | if (jiffies != now) | 450 | if (jiffies != now) |
436 | break; | 451 | break; |
@@ -439,22 +454,25 @@ static void peer_check_expire(unsigned long dummy) | |||
439 | /* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime | 454 | /* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime |
440 | * interval depending on the total number of entries (more entries, | 455 | * interval depending on the total number of entries (more entries, |
441 | * less interval). */ | 456 | * less interval). */ |
442 | if (peer_total >= inet_peer_threshold) | 457 | if (peers.total >= inet_peer_threshold) |
443 | peer_periodic_timer.expires = jiffies + inet_peer_gc_mintime; | 458 | peer_periodic_timer.expires = jiffies + inet_peer_gc_mintime; |
444 | else | 459 | else |
445 | peer_periodic_timer.expires = jiffies | 460 | peer_periodic_timer.expires = jiffies |
446 | + inet_peer_gc_maxtime | 461 | + inet_peer_gc_maxtime |
447 | - (inet_peer_gc_maxtime - inet_peer_gc_mintime) / HZ * | 462 | - (inet_peer_gc_maxtime - inet_peer_gc_mintime) / HZ * |
448 | peer_total / inet_peer_threshold * HZ; | 463 | peers.total / inet_peer_threshold * HZ; |
449 | add_timer(&peer_periodic_timer); | 464 | add_timer(&peer_periodic_timer); |
450 | } | 465 | } |
451 | 466 | ||
452 | void inet_putpeer(struct inet_peer *p) | 467 | void inet_putpeer(struct inet_peer *p) |
453 | { | 468 | { |
454 | spin_lock_bh(&inet_peer_unused_lock); | 469 | local_bh_disable(); |
455 | if (atomic_dec_and_test(&p->refcnt)) { | 470 | |
456 | list_add_tail(&p->unused, &unused_peers); | 471 | if (atomic_dec_and_lock(&p->refcnt, &unused_peers.lock)) { |
472 | list_add_tail(&p->unused, &unused_peers.list); | ||
457 | p->dtime = (__u32)jiffies; | 473 | p->dtime = (__u32)jiffies; |
474 | spin_unlock(&unused_peers.lock); | ||
458 | } | 475 | } |
459 | spin_unlock_bh(&inet_peer_unused_lock); | 476 | |
477 | local_bh_enable(); | ||
460 | } | 478 | } |