aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2009-11-12 04:33:09 -0500
committerDavid S. Miller <davem@davemloft.net>2009-11-13 23:46:58 -0500
commit2c1409a0a2b88585ec0c03f1de0aafa178c56313 (patch)
treee9fc25badfb716e5368079535f548cc8e4b5576a /net
parent234b27c3fd58fc0e15c04dd0fbf4337fac9c2a06 (diff)
inetpeer: Optimize inet_getid()
While investigating for network latencies, I found inet_getid() was a contention point for some workloads, as inet_peer_idlock is shared by all inet_getid() users regardless of peers. One way to fix this is to make ip_id_count an atomic_t instead of __u16, and use atomic_add_return(). In order to keep sizeof(struct inet_peer) = 64 on 64bit arches tcp_ts_stamp is also converted to __u32 instead of "unsigned long". Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/inetpeer.c5
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/tcp_ipv4.c16
3 files changed, 10 insertions, 13 deletions
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index b1fbe18feb5a..6bcfe52a9c87 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -67,9 +67,6 @@
67 * ip_id_count: idlock 67 * ip_id_count: idlock
68 */ 68 */
69 69
70/* Exported for inet_getid inline function. */
71DEFINE_SPINLOCK(inet_peer_idlock);
72
73static struct kmem_cache *peer_cachep __read_mostly; 70static struct kmem_cache *peer_cachep __read_mostly;
74 71
75#define node_height(x) x->avl_height 72#define node_height(x) x->avl_height
@@ -390,7 +387,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
390 n->v4daddr = daddr; 387 n->v4daddr = daddr;
391 atomic_set(&n->refcnt, 1); 388 atomic_set(&n->refcnt, 1);
392 atomic_set(&n->rid, 0); 389 atomic_set(&n->rid, 0);
393 n->ip_id_count = secure_ip_id(daddr); 390 atomic_set(&n->ip_id_count, secure_ip_id(daddr));
394 n->tcp_ts_stamp = 0; 391 n->tcp_ts_stamp = 0;
395 392
396 write_lock_bh(&peer_pool_lock); 393 write_lock_bh(&peer_pool_lock);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index ff258b57680b..4284ceef7945 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2852,7 +2852,7 @@ static int rt_fill_info(struct net *net,
2852 error = rt->u.dst.error; 2852 error = rt->u.dst.error;
2853 expires = rt->u.dst.expires ? rt->u.dst.expires - jiffies : 0; 2853 expires = rt->u.dst.expires ? rt->u.dst.expires - jiffies : 0;
2854 if (rt->peer) { 2854 if (rt->peer) {
2855 id = rt->peer->ip_id_count; 2855 id = atomic_read(&rt->peer->ip_id_count) & 0xffff;
2856 if (rt->peer->tcp_ts_stamp) { 2856 if (rt->peer->tcp_ts_stamp) {
2857 ts = rt->peer->tcp_ts; 2857 ts = rt->peer->tcp_ts;
2858 tsage = get_seconds() - rt->peer->tcp_ts_stamp; 2858 tsage = get_seconds() - rt->peer->tcp_ts_stamp;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index cf7f2086e6e0..df18ce04f41e 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -204,7 +204,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
204 * when trying new connection. 204 * when trying new connection.
205 */ 205 */
206 if (peer != NULL && 206 if (peer != NULL &&
207 peer->tcp_ts_stamp + TCP_PAWS_MSL >= get_seconds()) { 207 (u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
208 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp; 208 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
209 tp->rx_opt.ts_recent = peer->tcp_ts; 209 tp->rx_opt.ts_recent = peer->tcp_ts;
210 } 210 }
@@ -1308,7 +1308,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1308 tcp_death_row.sysctl_tw_recycle && 1308 tcp_death_row.sysctl_tw_recycle &&
1309 (peer = rt_get_peer((struct rtable *)dst)) != NULL && 1309 (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
1310 peer->v4daddr == saddr) { 1310 peer->v4daddr == saddr) {
1311 if (get_seconds() < peer->tcp_ts_stamp + TCP_PAWS_MSL && 1311 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1312 (s32)(peer->tcp_ts - req->ts_recent) > 1312 (s32)(peer->tcp_ts - req->ts_recent) >
1313 TCP_PAWS_WINDOW) { 1313 TCP_PAWS_WINDOW) {
1314 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED); 1314 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
@@ -1727,9 +1727,9 @@ int tcp_v4_remember_stamp(struct sock *sk)
1727 1727
1728 if (peer) { 1728 if (peer) {
1729 if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 || 1729 if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
1730 (peer->tcp_ts_stamp + TCP_PAWS_MSL < get_seconds() && 1730 ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
1731 peer->tcp_ts_stamp <= tp->rx_opt.ts_recent_stamp)) { 1731 peer->tcp_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
1732 peer->tcp_ts_stamp = tp->rx_opt.ts_recent_stamp; 1732 peer->tcp_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
1733 peer->tcp_ts = tp->rx_opt.ts_recent; 1733 peer->tcp_ts = tp->rx_opt.ts_recent;
1734 } 1734 }
1735 if (release_it) 1735 if (release_it)
@@ -1748,9 +1748,9 @@ int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw)
1748 const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); 1748 const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
1749 1749
1750 if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 || 1750 if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
1751 (peer->tcp_ts_stamp + TCP_PAWS_MSL < get_seconds() && 1751 ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
1752 peer->tcp_ts_stamp <= tcptw->tw_ts_recent_stamp)) { 1752 peer->tcp_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
1753 peer->tcp_ts_stamp = tcptw->tw_ts_recent_stamp; 1753 peer->tcp_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
1754 peer->tcp_ts = tcptw->tw_ts_recent; 1754 peer->tcp_ts = tcptw->tw_ts_recent;
1755 } 1755 }
1756 inet_putpeer(peer); 1756 inet_putpeer(peer);