aboutsummaryrefslogtreecommitdiffstats
path: root/include/net/sock.h
diff options
context:
space:
mode:
authorEric Dumazet <dada1@cosmosbay.com>2008-10-29 05:11:14 -0400
committerDavid S. Miller <davem@davemloft.net>2008-10-29 05:11:14 -0400
commit271b72c7fa82c2c7a795bc16896149933110672d (patch)
tree5634b95c04b4a7ac9babf2d8ac34cfb6c38a8f83 /include/net/sock.h
parent645ca708f936b2fbeb79e52d7823e3eb2c0905f8 (diff)
udp: RCU handling for Unicast packets.
Goals are : 1) Optimizing handling of incoming Unicast UDP frames, so that no memory writes should happen in the fast path. Note: Multicasts and broadcasts still will need to take a lock, because doing a full lockless lookup in this case is difficult. 2) No expensive operations in the socket bind/unhash phases : - No expensive synchronize_rcu() calls. - No added rcu_head in socket structure, increasing memory needs, but more important, forcing us to use call_rcu() calls, that have the bad property of making sockets structure cold. (rcu grace period between socket freeing and its potential reuse make this socket being cold in CPU cache). David did a previous patch using call_rcu() and noticed a 20% impact on TCP connection rates. Quoting Cristopher Lameter : "Right. That results in cacheline cooldown. You'd want to recycle the object as they are cache hot on a per cpu basis. That is screwed up by the delayed regular rcu processing. We have seen multiple regressions due to cacheline cooldown. The only choice in cacheline hot sensitive areas is to deal with the complexity that comes with SLAB_DESTROY_BY_RCU or give up on RCU." - Because udp sockets are allocated from dedicated kmem_cache, use of SLAB_DESTROY_BY_RCU can help here. Theory of operation : --------------------- As the lookup is lockfree (using rcu_read_lock()/rcu_read_unlock()), special attention must be taken by readers and writers. Use of SLAB_DESTROY_BY_RCU is tricky too, because a socket can be freed, reused, inserted in a different chain or in worst case in the same chain while readers could do lookups in the same time. In order to avoid loops, a reader must check each socket found in a chain really belongs to the chain the reader was traversing. If it finds a mismatch, lookup must start again at the begining. This *restart* loop is the reason we had to use rdlock for the multicast case, because we dont want to send same message several times to the same socket. We use RCU only for fast path. Thus, /proc/net/udp still takes spinlocks. Signed-off-by: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net/sock.h')
-rw-r--r--include/net/sock.h37
1 files changed, 36 insertions, 1 deletions
diff --git a/include/net/sock.h b/include/net/sock.h
index d200dfbe1ef6..0bea25db5471 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -363,6 +363,27 @@ static __inline__ int sk_del_node_init(struct sock *sk)
363 return rc; 363 return rc;
364} 364}
365 365
366static __inline__ int __sk_del_node_init_rcu(struct sock *sk)
367{
368 if (sk_hashed(sk)) {
369 hlist_del_init_rcu(&sk->sk_node);
370 return 1;
371 }
372 return 0;
373}
374
375static __inline__ int sk_del_node_init_rcu(struct sock *sk)
376{
377 int rc = __sk_del_node_init_rcu(sk);
378
379 if (rc) {
380 /* paranoid for a while -acme */
381 WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
382 __sock_put(sk);
383 }
384 return rc;
385}
386
366static __inline__ void __sk_add_node(struct sock *sk, struct hlist_head *list) 387static __inline__ void __sk_add_node(struct sock *sk, struct hlist_head *list)
367{ 388{
368 hlist_add_head(&sk->sk_node, list); 389 hlist_add_head(&sk->sk_node, list);
@@ -374,6 +395,17 @@ static __inline__ void sk_add_node(struct sock *sk, struct hlist_head *list)
374 __sk_add_node(sk, list); 395 __sk_add_node(sk, list);
375} 396}
376 397
398static __inline__ void __sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
399{
400 hlist_add_head_rcu(&sk->sk_node, list);
401}
402
403static __inline__ void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
404{
405 sock_hold(sk);
406 __sk_add_node_rcu(sk, list);
407}
408
377static __inline__ void __sk_del_bind_node(struct sock *sk) 409static __inline__ void __sk_del_bind_node(struct sock *sk)
378{ 410{
379 __hlist_del(&sk->sk_bind_node); 411 __hlist_del(&sk->sk_bind_node);
@@ -387,6 +419,8 @@ static __inline__ void sk_add_bind_node(struct sock *sk,
387 419
388#define sk_for_each(__sk, node, list) \ 420#define sk_for_each(__sk, node, list) \
389 hlist_for_each_entry(__sk, node, list, sk_node) 421 hlist_for_each_entry(__sk, node, list, sk_node)
422#define sk_for_each_rcu(__sk, node, list) \
423 hlist_for_each_entry_rcu(__sk, node, list, sk_node)
390#define sk_for_each_from(__sk, node) \ 424#define sk_for_each_from(__sk, node) \
391 if (__sk && ({ node = &(__sk)->sk_node; 1; })) \ 425 if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
392 hlist_for_each_entry_from(__sk, node, sk_node) 426 hlist_for_each_entry_from(__sk, node, sk_node)
@@ -589,8 +623,9 @@ struct proto {
589 int *sysctl_rmem; 623 int *sysctl_rmem;
590 int max_header; 624 int max_header;
591 625
592 struct kmem_cache *slab; 626 struct kmem_cache *slab;
593 unsigned int obj_size; 627 unsigned int obj_size;
628 int slab_flags;
594 629
595 atomic_t *orphan_count; 630 atomic_t *orphan_count;
596 631