diff options
author | Eric Dumazet <dada1@cosmosbay.com> | 2008-11-16 22:40:17 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-11-16 22:40:17 -0500 |
commit | 3ab5aee7fe840b5b1b35a8d1ac11c3de5281e611 (patch) | |
tree | 468296b7be813643248d4ca67497d6ddb6934fc6 /include/net/inet_timewait_sock.h | |
parent | 88ab1932eac721c6e7336708558fa5ed02c85c80 (diff) |
net: Convert TCP & DCCP hash tables to use RCU / hlist_nulls
RCU was added to UDP lookups, using a fast infrastructure :
- sockets kmem_cache use SLAB_DESTROY_BY_RCU and dont pay the
price of call_rcu() at freeing time.
- hlist_nulls permits to use few memory barriers.
This patch uses same infrastructure for TCP/DCCP established
and timewait sockets.
Thanks to SLAB_DESTROY_BY_RCU, no slowdown for applications
using short lived TCP connections. A followup patch, converting
rwlocks to spinlocks will even speedup this case.
__inet_lookup_established() is pretty fast now we dont have to
dirty a contended cache line (read_lock/read_unlock)
Only established and timewait hashtable are converted to RCU
(bind table and listen table are still using traditional locking)
Signed-off-by: Eric Dumazet <dada1@cosmosbay.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net/inet_timewait_sock.h')
-rw-r--r-- | include/net/inet_timewait_sock.h | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h index 80e4977631b8..4b8ece22b8e9 100644 --- a/include/net/inet_timewait_sock.h +++ b/include/net/inet_timewait_sock.h | |||
@@ -110,7 +110,7 @@ struct inet_timewait_sock { | |||
110 | #define tw_state __tw_common.skc_state | 110 | #define tw_state __tw_common.skc_state |
111 | #define tw_reuse __tw_common.skc_reuse | 111 | #define tw_reuse __tw_common.skc_reuse |
112 | #define tw_bound_dev_if __tw_common.skc_bound_dev_if | 112 | #define tw_bound_dev_if __tw_common.skc_bound_dev_if |
113 | #define tw_node __tw_common.skc_node | 113 | #define tw_node __tw_common.skc_nulls_node |
114 | #define tw_bind_node __tw_common.skc_bind_node | 114 | #define tw_bind_node __tw_common.skc_bind_node |
115 | #define tw_refcnt __tw_common.skc_refcnt | 115 | #define tw_refcnt __tw_common.skc_refcnt |
116 | #define tw_hash __tw_common.skc_hash | 116 | #define tw_hash __tw_common.skc_hash |
@@ -137,10 +137,10 @@ struct inet_timewait_sock { | |||
137 | struct hlist_node tw_death_node; | 137 | struct hlist_node tw_death_node; |
138 | }; | 138 | }; |
139 | 139 | ||
140 | static inline void inet_twsk_add_node(struct inet_timewait_sock *tw, | 140 | static inline void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw, |
141 | struct hlist_head *list) | 141 | struct hlist_nulls_head *list) |
142 | { | 142 | { |
143 | hlist_add_head(&tw->tw_node, list); | 143 | hlist_nulls_add_head_rcu(&tw->tw_node, list); |
144 | } | 144 | } |
145 | 145 | ||
146 | static inline void inet_twsk_add_bind_node(struct inet_timewait_sock *tw, | 146 | static inline void inet_twsk_add_bind_node(struct inet_timewait_sock *tw, |
@@ -175,7 +175,7 @@ static inline int inet_twsk_del_dead_node(struct inet_timewait_sock *tw) | |||
175 | } | 175 | } |
176 | 176 | ||
177 | #define inet_twsk_for_each(tw, node, head) \ | 177 | #define inet_twsk_for_each(tw, node, head) \ |
178 | hlist_for_each_entry(tw, node, head, tw_node) | 178 | hlist_nulls_for_each_entry(tw, node, head, tw_node) |
179 | 179 | ||
180 | #define inet_twsk_for_each_inmate(tw, node, jail) \ | 180 | #define inet_twsk_for_each_inmate(tw, node, jail) \ |
181 | hlist_for_each_entry(tw, node, jail, tw_death_node) | 181 | hlist_for_each_entry(tw, node, jail, tw_death_node) |