aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/inetpeer.c
diff options
context:
space:
mode:
authorEric Dumazet <dada1@cosmosbay.com>2007-03-06 23:23:10 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2007-04-26 01:23:49 -0400
commit243bbcaa09e8482aa28065cbc2eb99f0ca2fc8d6 (patch)
treee542a4ed151de034779fa311f8d89a55ac90f685 /net/ipv4/inetpeer.c
parent43e683926f808cec9802466c27cee7499eda3d11 (diff)
[IPV4]: Optimize inet_getpeer()
1) Some sysctl vars are declared __read_mostly 2) We can avoid updating stack[] when doing an AVL lookup only. lookup() macro is extended to receive a second parameter, that may be NULL in case of a pure lookup (no need to save the AVL path). This removes unnecessary instructions, because compiler knows if this _stack parameter is NULL or not. text size of net/ipv4/inetpeer.o is 2063 bytes instead of 2107 on x86_64 Signed-off-by: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/inetpeer.c')
-rw-r--r--net/ipv4/inetpeer.c38
1 files changed, 22 insertions, 16 deletions
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index db3ef96bdfd9..2f44e6128068 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -87,10 +87,12 @@ static DEFINE_RWLOCK(peer_pool_lock);
87 87
88static int peer_total; 88static int peer_total;
89/* Exported for sysctl_net_ipv4. */ 89/* Exported for sysctl_net_ipv4. */
90int inet_peer_threshold = 65536 + 128; /* start to throw entries more 90int inet_peer_threshold __read_mostly = 65536 + 128; /* start to throw entries more
91 * aggressively at this stage */ 91 * aggressively at this stage */
92int inet_peer_minttl = 120 * HZ; /* TTL under high load: 120 sec */ 92int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */
93int inet_peer_maxttl = 10 * 60 * HZ; /* usual time to live: 10 min */ 93int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */
94int inet_peer_gc_mintime __read_mostly = 10 * HZ;
95int inet_peer_gc_maxtime __read_mostly = 120 * HZ;
94 96
95static struct inet_peer *inet_peer_unused_head; 97static struct inet_peer *inet_peer_unused_head;
96static struct inet_peer **inet_peer_unused_tailp = &inet_peer_unused_head; 98static struct inet_peer **inet_peer_unused_tailp = &inet_peer_unused_head;
@@ -99,9 +101,6 @@ static DEFINE_SPINLOCK(inet_peer_unused_lock);
99static void peer_check_expire(unsigned long dummy); 101static void peer_check_expire(unsigned long dummy);
100static DEFINE_TIMER(peer_periodic_timer, peer_check_expire, 0, 0); 102static DEFINE_TIMER(peer_periodic_timer, peer_check_expire, 0, 0);
101 103
102/* Exported for sysctl_net_ipv4. */
103int inet_peer_gc_mintime = 10 * HZ,
104 inet_peer_gc_maxtime = 120 * HZ;
105 104
106/* Called from ip_output.c:ip_init */ 105/* Called from ip_output.c:ip_init */
107void __init inet_initpeers(void) 106void __init inet_initpeers(void)
@@ -151,20 +150,27 @@ static void unlink_from_unused(struct inet_peer *p)
151 spin_unlock_bh(&inet_peer_unused_lock); 150 spin_unlock_bh(&inet_peer_unused_lock);
152} 151}
153 152
154/* Called with local BH disabled and the pool lock held. */ 153/*
155#define lookup(daddr) \ 154 * Called with local BH disabled and the pool lock held.
155 * _stack is known to be NULL or not at compile time,
156 * so compiler will optimize the if (_stack) tests.
157 */
158#define lookup(_daddr,_stack) \
156({ \ 159({ \
157 struct inet_peer *u, **v; \ 160 struct inet_peer *u, **v; \
158 stackptr = stack; \ 161 if (_stack) { \
159 *stackptr++ = &peer_root; \ 162 stackptr = _stack; \
163 *stackptr++ = &peer_root; \
164 } \
160 for (u = peer_root; u != peer_avl_empty; ) { \ 165 for (u = peer_root; u != peer_avl_empty; ) { \
161 if (daddr == u->v4daddr) \ 166 if (_daddr == u->v4daddr) \
162 break; \ 167 break; \
163 if ((__force __u32)daddr < (__force __u32)u->v4daddr) \ 168 if ((__force __u32)_daddr < (__force __u32)u->v4daddr) \
164 v = &u->avl_left; \ 169 v = &u->avl_left; \
165 else \ 170 else \
166 v = &u->avl_right; \ 171 v = &u->avl_right; \
167 *stackptr++ = v; \ 172 if (_stack) \
173 *stackptr++ = v; \
168 u = *v; \ 174 u = *v; \
169 } \ 175 } \
170 u; \ 176 u; \
@@ -288,7 +294,7 @@ static void unlink_from_pool(struct inet_peer *p)
288 if (atomic_read(&p->refcnt) == 1) { 294 if (atomic_read(&p->refcnt) == 1) {
289 struct inet_peer **stack[PEER_MAXDEPTH]; 295 struct inet_peer **stack[PEER_MAXDEPTH];
290 struct inet_peer ***stackptr, ***delp; 296 struct inet_peer ***stackptr, ***delp;
291 if (lookup(p->v4daddr) != p) 297 if (lookup(p->v4daddr, stack) != p)
292 BUG(); 298 BUG();
293 delp = stackptr - 1; /* *delp[0] == p */ 299 delp = stackptr - 1; /* *delp[0] == p */
294 if (p->avl_left == peer_avl_empty) { 300 if (p->avl_left == peer_avl_empty) {
@@ -373,7 +379,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
373 379
374 /* Look up for the address quickly. */ 380 /* Look up for the address quickly. */
375 read_lock_bh(&peer_pool_lock); 381 read_lock_bh(&peer_pool_lock);
376 p = lookup(daddr); 382 p = lookup(daddr, NULL);
377 if (p != peer_avl_empty) 383 if (p != peer_avl_empty)
378 atomic_inc(&p->refcnt); 384 atomic_inc(&p->refcnt);
379 read_unlock_bh(&peer_pool_lock); 385 read_unlock_bh(&peer_pool_lock);
@@ -400,7 +406,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
400 406
401 write_lock_bh(&peer_pool_lock); 407 write_lock_bh(&peer_pool_lock);
402 /* Check if an entry has suddenly appeared. */ 408 /* Check if an entry has suddenly appeared. */
403 p = lookup(daddr); 409 p = lookup(daddr, stack);
404 if (p != peer_avl_empty) 410 if (p != peer_avl_empty)
405 goto out_free; 411 goto out_free;
406 412