diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2010-04-08 19:03:29 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-04-13 04:41:33 -0400 |
commit | b6c6712a42ca3f9fa7f4a3d7c40e3a9dd1fd9e03 (patch) | |
tree | 42032b4978874e8ffcf6c851d13324b8c8c7c113 /include/net/sock.h | |
parent | 7a161ea92471087a1579239d7a58dd06eaa5601c (diff) |
net: sk_dst_cache RCUification
With latest CONFIG_PROVE_RCU stuff, I felt more comfortable to make this
work.
sk->sk_dst_cache is currently protected by a rwlock (sk_dst_lock)
This rwlock is readlocked for a very small amount of time, and dst
entries are already freed after RCU grace period. This calls for RCU
again :)
This patch converts sk_dst_lock to a spinlock, and use RCU for readers.
__sk_dst_get() is supposed to be called with rcu_read_lock() or if
socket locked by user, so use appropriate rcu_dereference_check()
condition (rcu_read_lock_held() || sock_owned_by_user(sk))
This patch avoids two atomic ops per tx packet on UDP connected sockets,
for example, and permits sk_dst_lock to be much less dirtied.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net/sock.h')
-rw-r--r-- | include/net/sock.h | 47 |
1 files changed, 30 insertions, 17 deletions
diff --git a/include/net/sock.h b/include/net/sock.h index b4603cd54fcd..56df440a950b 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -262,7 +262,7 @@ struct sock { | |||
262 | #ifdef CONFIG_XFRM | 262 | #ifdef CONFIG_XFRM |
263 | struct xfrm_policy *sk_policy[2]; | 263 | struct xfrm_policy *sk_policy[2]; |
264 | #endif | 264 | #endif |
265 | rwlock_t sk_dst_lock; | 265 | spinlock_t sk_dst_lock; |
266 | atomic_t sk_rmem_alloc; | 266 | atomic_t sk_rmem_alloc; |
267 | atomic_t sk_wmem_alloc; | 267 | atomic_t sk_wmem_alloc; |
268 | atomic_t sk_omem_alloc; | 268 | atomic_t sk_omem_alloc; |
@@ -1192,7 +1192,8 @@ extern unsigned long sock_i_ino(struct sock *sk); | |||
1192 | static inline struct dst_entry * | 1192 | static inline struct dst_entry * |
1193 | __sk_dst_get(struct sock *sk) | 1193 | __sk_dst_get(struct sock *sk) |
1194 | { | 1194 | { |
1195 | return sk->sk_dst_cache; | 1195 | return rcu_dereference_check(sk->sk_dst_cache, rcu_read_lock_held() || |
1196 | sock_owned_by_user(sk)); | ||
1196 | } | 1197 | } |
1197 | 1198 | ||
1198 | static inline struct dst_entry * | 1199 | static inline struct dst_entry * |
@@ -1200,50 +1201,62 @@ sk_dst_get(struct sock *sk) | |||
1200 | { | 1201 | { |
1201 | struct dst_entry *dst; | 1202 | struct dst_entry *dst; |
1202 | 1203 | ||
1203 | read_lock(&sk->sk_dst_lock); | 1204 | rcu_read_lock(); |
1204 | dst = sk->sk_dst_cache; | 1205 | dst = rcu_dereference(sk->sk_dst_cache); |
1205 | if (dst) | 1206 | if (dst) |
1206 | dst_hold(dst); | 1207 | dst_hold(dst); |
1207 | read_unlock(&sk->sk_dst_lock); | 1208 | rcu_read_unlock(); |
1208 | return dst; | 1209 | return dst; |
1209 | } | 1210 | } |
1210 | 1211 | ||
1212 | extern void sk_reset_txq(struct sock *sk); | ||
1213 | |||
1214 | static inline void dst_negative_advice(struct sock *sk) | ||
1215 | { | ||
1216 | struct dst_entry *ndst, *dst = __sk_dst_get(sk); | ||
1217 | |||
1218 | if (dst && dst->ops->negative_advice) { | ||
1219 | ndst = dst->ops->negative_advice(dst); | ||
1220 | |||
1221 | if (ndst != dst) { | ||
1222 | rcu_assign_pointer(sk->sk_dst_cache, ndst); | ||
1223 | sk_reset_txq(sk); | ||
1224 | } | ||
1225 | } | ||
1226 | } | ||
1227 | |||
1211 | static inline void | 1228 | static inline void |
1212 | __sk_dst_set(struct sock *sk, struct dst_entry *dst) | 1229 | __sk_dst_set(struct sock *sk, struct dst_entry *dst) |
1213 | { | 1230 | { |
1214 | struct dst_entry *old_dst; | 1231 | struct dst_entry *old_dst; |
1215 | 1232 | ||
1216 | sk_tx_queue_clear(sk); | 1233 | sk_tx_queue_clear(sk); |
1217 | old_dst = sk->sk_dst_cache; | 1234 | old_dst = rcu_dereference_check(sk->sk_dst_cache, |
1218 | sk->sk_dst_cache = dst; | 1235 | lockdep_is_held(&sk->sk_dst_lock)); |
1236 | rcu_assign_pointer(sk->sk_dst_cache, dst); | ||
1219 | dst_release(old_dst); | 1237 | dst_release(old_dst); |
1220 | } | 1238 | } |
1221 | 1239 | ||
1222 | static inline void | 1240 | static inline void |
1223 | sk_dst_set(struct sock *sk, struct dst_entry *dst) | 1241 | sk_dst_set(struct sock *sk, struct dst_entry *dst) |
1224 | { | 1242 | { |
1225 | write_lock(&sk->sk_dst_lock); | 1243 | spin_lock(&sk->sk_dst_lock); |
1226 | __sk_dst_set(sk, dst); | 1244 | __sk_dst_set(sk, dst); |
1227 | write_unlock(&sk->sk_dst_lock); | 1245 | spin_unlock(&sk->sk_dst_lock); |
1228 | } | 1246 | } |
1229 | 1247 | ||
1230 | static inline void | 1248 | static inline void |
1231 | __sk_dst_reset(struct sock *sk) | 1249 | __sk_dst_reset(struct sock *sk) |
1232 | { | 1250 | { |
1233 | struct dst_entry *old_dst; | 1251 | __sk_dst_set(sk, NULL); |
1234 | |||
1235 | sk_tx_queue_clear(sk); | ||
1236 | old_dst = sk->sk_dst_cache; | ||
1237 | sk->sk_dst_cache = NULL; | ||
1238 | dst_release(old_dst); | ||
1239 | } | 1252 | } |
1240 | 1253 | ||
1241 | static inline void | 1254 | static inline void |
1242 | sk_dst_reset(struct sock *sk) | 1255 | sk_dst_reset(struct sock *sk) |
1243 | { | 1256 | { |
1244 | write_lock(&sk->sk_dst_lock); | 1257 | spin_lock(&sk->sk_dst_lock); |
1245 | __sk_dst_reset(sk); | 1258 | __sk_dst_reset(sk); |
1246 | write_unlock(&sk->sk_dst_lock); | 1259 | spin_unlock(&sk->sk_dst_lock); |
1247 | } | 1260 | } |
1248 | 1261 | ||
1249 | extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie); | 1262 | extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie); |