aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv6
diff options
context:
space:
mode:
authorEric Dumazet <dada1@cosmosbay.com>2008-10-29 05:11:14 -0400
committerDavid S. Miller <davem@davemloft.net>2008-10-29 05:11:14 -0400
commit271b72c7fa82c2c7a795bc16896149933110672d (patch)
tree5634b95c04b4a7ac9babf2d8ac34cfb6c38a8f83 /net/ipv6
parent645ca708f936b2fbeb79e52d7823e3eb2c0905f8 (diff)
udp: RCU handling for Unicast packets.
Goals are : 1) Optimizing handling of incoming Unicast UDP frames, so that no memory writes should happen in the fast path. Note: Multicasts and broadcasts still will need to take a lock, because doing a full lockless lookup in this case is difficult. 2) No expensive operations in the socket bind/unhash phases : - No expensive synchronize_rcu() calls. - No added rcu_head in socket structure, increasing memory needs, but more important, forcing us to use call_rcu() calls, that have the bad property of making sockets structure cold. (rcu grace period between socket freeing and its potential reuse make this socket being cold in CPU cache). David did a previous patch using call_rcu() and noticed a 20% impact on TCP connection rates. Quoting Cristopher Lameter : "Right. That results in cacheline cooldown. You'd want to recycle the object as they are cache hot on a per cpu basis. That is screwed up by the delayed regular rcu processing. We have seen multiple regressions due to cacheline cooldown. The only choice in cacheline hot sensitive areas is to deal with the complexity that comes with SLAB_DESTROY_BY_RCU or give up on RCU." - Because udp sockets are allocated from dedicated kmem_cache, use of SLAB_DESTROY_BY_RCU can help here. Theory of operation : --------------------- As the lookup is lockfree (using rcu_read_lock()/rcu_read_unlock()), special attention must be taken by readers and writers. Use of SLAB_DESTROY_BY_RCU is tricky too, because a socket can be freed, reused, inserted in a different chain or in worst case in the same chain while readers could do lookups in the same time. In order to avoid loops, a reader must check each socket found in a chain really belongs to the chain the reader was traversing. If it finds a mismatch, lookup must start again at the begining. This *restart* loop is the reason we had to use rdlock for the multicast case, because we dont want to send same message several times to the same socket. We use RCU only for fast path. Thus, /proc/net/udp still takes spinlocks. Signed-off-by: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv6')
-rw-r--r--net/ipv6/udp.c31
-rw-r--r--net/ipv6/udplite.c1
2 files changed, 25 insertions, 7 deletions
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index ccee7244ca0..1d9790e43df 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -97,24 +97,40 @@ static struct sock *__udp6_lib_lookup(struct net *net,
97 struct in6_addr *daddr, __be16 dport, 97 struct in6_addr *daddr, __be16 dport,
98 int dif, struct udp_table *udptable) 98 int dif, struct udp_table *udptable)
99{ 99{
100 struct sock *sk, *result = NULL; 100 struct sock *sk, *result;
101 struct hlist_node *node; 101 struct hlist_node *node;
102 unsigned short hnum = ntohs(dport); 102 unsigned short hnum = ntohs(dport);
103 unsigned int hash = udp_hashfn(net, hnum); 103 unsigned int hash = udp_hashfn(net, hnum);
104 struct udp_hslot *hslot = &udptable->hash[hash]; 104 struct udp_hslot *hslot = &udptable->hash[hash];
105 int score, badness = -1; 105 int score, badness;
106 106
107 spin_lock(&hslot->lock); 107 rcu_read_lock();
108 sk_for_each(sk, node, &hslot->head) { 108begin:
109 result = NULL;
110 badness = -1;
111 sk_for_each_rcu(sk, node, &hslot->head) {
112 /*
113 * lockless reader, and SLAB_DESTROY_BY_RCU items:
114 * We must check this item was not moved to another chain
115 */
116 if (udp_hashfn(net, sk->sk_hash) != hash)
117 goto begin;
109 score = compute_score(sk, net, hnum, saddr, sport, daddr, dport, dif); 118 score = compute_score(sk, net, hnum, saddr, sport, daddr, dport, dif);
110 if (score > badness) { 119 if (score > badness) {
111 result = sk; 120 result = sk;
112 badness = score; 121 badness = score;
113 } 122 }
114 } 123 }
115 if (result) 124 if (result) {
116 sock_hold(result); 125 if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt)))
117 spin_unlock(&hslot->lock); 126 result = NULL;
127 else if (unlikely(compute_score(result, net, hnum, saddr, sport,
128 daddr, dport, dif) < badness)) {
129 sock_put(result);
130 goto begin;
131 }
132 }
133 rcu_read_unlock();
118 return result; 134 return result;
119} 135}
120 136
@@ -1062,6 +1078,7 @@ struct proto udpv6_prot = {
1062 .sysctl_wmem = &sysctl_udp_wmem_min, 1078 .sysctl_wmem = &sysctl_udp_wmem_min,
1063 .sysctl_rmem = &sysctl_udp_rmem_min, 1079 .sysctl_rmem = &sysctl_udp_rmem_min,
1064 .obj_size = sizeof(struct udp6_sock), 1080 .obj_size = sizeof(struct udp6_sock),
1081 .slab_flags = SLAB_DESTROY_BY_RCU,
1065 .h.udp_table = &udp_table, 1082 .h.udp_table = &udp_table,
1066#ifdef CONFIG_COMPAT 1083#ifdef CONFIG_COMPAT
1067 .compat_setsockopt = compat_udpv6_setsockopt, 1084 .compat_setsockopt = compat_udpv6_setsockopt,
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
index f1e892a99e0..ba162a82458 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite.c
@@ -49,6 +49,7 @@ struct proto udplitev6_prot = {
49 .unhash = udp_lib_unhash, 49 .unhash = udp_lib_unhash,
50 .get_port = udp_v6_get_port, 50 .get_port = udp_v6_get_port,
51 .obj_size = sizeof(struct udp6_sock), 51 .obj_size = sizeof(struct udp6_sock),
52 .slab_flags = SLAB_DESTROY_BY_RCU,
52 .h.udp_table = &udplite_table, 53 .h.udp_table = &udplite_table,
53#ifdef CONFIG_COMPAT 54#ifdef CONFIG_COMPAT
54 .compat_setsockopt = compat_udpv6_setsockopt, 55 .compat_setsockopt = compat_udpv6_setsockopt,