aboutsummaryrefslogtreecommitdiffstats
path: root/include/net/inetpeer.h
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2009-11-12 04:33:09 -0500
committerDavid S. Miller <davem@davemloft.net>2009-11-13 23:46:58 -0500
commit2c1409a0a2b88585ec0c03f1de0aafa178c56313 (patch)
treee9fc25badfb716e5368079535f548cc8e4b5576a /include/net/inetpeer.h
parent234b27c3fd58fc0e15c04dd0fbf4337fac9c2a06 (diff)
inetpeer: Optimize inet_getid()
While investigating for network latencies, I found inet_getid() was a contention point for some workloads, as inet_peer_idlock is shared by all inet_getid() users regardless of peers. One way to fix this is to make ip_id_count an atomic_t instead of __u16, and use atomic_add_return(). In order to keep sizeof(struct inet_peer) = 64 on 64bit arches tcp_ts_stamp is also converted to __u32 instead of "unsigned long". Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net/inetpeer.h')
-rw-r--r--include/net/inetpeer.h16
1 files changed, 5 insertions, 11 deletions
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 35ad7b930467..87b1df0d4d8c 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -17,15 +17,15 @@ struct inet_peer {
17 /* group together avl_left,avl_right,v4daddr to speedup lookups */ 17 /* group together avl_left,avl_right,v4daddr to speedup lookups */
18 struct inet_peer *avl_left, *avl_right; 18 struct inet_peer *avl_left, *avl_right;
19 __be32 v4daddr; /* peer's address */ 19 __be32 v4daddr; /* peer's address */
20 __u16 avl_height; 20 __u32 avl_height;
21 __u16 ip_id_count; /* IP ID for the next packet */
22 struct list_head unused; 21 struct list_head unused;
23 __u32 dtime; /* the time of last use of not 22 __u32 dtime; /* the time of last use of not
24 * referenced entries */ 23 * referenced entries */
25 atomic_t refcnt; 24 atomic_t refcnt;
26 atomic_t rid; /* Frag reception counter */ 25 atomic_t rid; /* Frag reception counter */
26 atomic_t ip_id_count; /* IP ID for the next packet */
27 __u32 tcp_ts; 27 __u32 tcp_ts;
28 unsigned long tcp_ts_stamp; 28 __u32 tcp_ts_stamp;
29}; 29};
30 30
31void inet_initpeers(void) __init; 31void inet_initpeers(void) __init;
@@ -36,17 +36,11 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create);
36/* can be called from BH context or outside */ 36/* can be called from BH context or outside */
37extern void inet_putpeer(struct inet_peer *p); 37extern void inet_putpeer(struct inet_peer *p);
38 38
39extern spinlock_t inet_peer_idlock;
40/* can be called with or without local BH being disabled */ 39/* can be called with or without local BH being disabled */
41static inline __u16 inet_getid(struct inet_peer *p, int more) 40static inline __u16 inet_getid(struct inet_peer *p, int more)
42{ 41{
43 __u16 id; 42 more++;
44 43 return atomic_add_return(more, &p->ip_id_count) - more;
45 spin_lock_bh(&inet_peer_idlock);
46 id = p->ip_id_count;
47 p->ip_id_count += 1 + more;
48 spin_unlock_bh(&inet_peer_idlock);
49 return id;
50} 44}
51 45
52#endif /* _NET_INETPEER_H */ 46#endif /* _NET_INETPEER_H */