aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv6/ip6_tunnel.c
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2009-10-23 02:34:34 -0400
committerDavid S. Miller <davem@davemloft.net>2009-10-24 09:07:58 -0400
commit2922bc8aedfcd41ca6171cfe1a79ff111ad72019 (patch)
tree4b937e9869e6b72ea0b59620526a9af8fd3ddcfe /net/ipv6/ip6_tunnel.c
parent8f95dd63a2ab6fe7243c4f0bd2c3266e3a5525ab (diff)
ip6tnl: convert hash tables locking to RCU
ip6_tunnels use one rwlock to protect their hash tables. This locking scheme can be converted to RCU for free, since netdevice already must wait for a RCU grace period at dismantle time. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv6/ip6_tunnel.c')
-rw-r--r--net/ipv6/ip6_tunnel.c44
1 files changed, 25 insertions, 19 deletions
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index c595bbe1ed99..670c291d2567 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -88,8 +88,10 @@ struct ip6_tnl_net {
88 struct ip6_tnl **tnls[2]; 88 struct ip6_tnl **tnls[2];
89}; 89};
90 90
91/* lock for the tunnel lists */ 91/*
92static DEFINE_RWLOCK(ip6_tnl_lock); 92 * Locking : hash tables are protected by RCU and a spinlock
93 */
94static DEFINE_SPINLOCK(ip6_tnl_lock);
93 95
94static inline struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t) 96static inline struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t)
95{ 97{
@@ -130,6 +132,9 @@ static inline void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst)
130 * else %NULL 132 * else %NULL
131 **/ 133 **/
132 134
135#define for_each_ip6_tunnel_rcu(start) \
136 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
137
133static struct ip6_tnl * 138static struct ip6_tnl *
134ip6_tnl_lookup(struct net *net, struct in6_addr *remote, struct in6_addr *local) 139ip6_tnl_lookup(struct net *net, struct in6_addr *remote, struct in6_addr *local)
135{ 140{
@@ -138,13 +143,14 @@ ip6_tnl_lookup(struct net *net, struct in6_addr *remote, struct in6_addr *local)
138 struct ip6_tnl *t; 143 struct ip6_tnl *t;
139 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 144 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
140 145
141 for (t = ip6n->tnls_r_l[h0 ^ h1]; t; t = t->next) { 146 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[h0 ^ h1]) {
142 if (ipv6_addr_equal(local, &t->parms.laddr) && 147 if (ipv6_addr_equal(local, &t->parms.laddr) &&
143 ipv6_addr_equal(remote, &t->parms.raddr) && 148 ipv6_addr_equal(remote, &t->parms.raddr) &&
144 (t->dev->flags & IFF_UP)) 149 (t->dev->flags & IFF_UP))
145 return t; 150 return t;
146 } 151 }
147 if ((t = ip6n->tnls_wc[0]) != NULL && (t->dev->flags & IFF_UP)) 152 t = rcu_dereference(ip6n->tnls_wc[0]);
153 if (t && (t->dev->flags & IFF_UP))
148 return t; 154 return t;
149 155
150 return NULL; 156 return NULL;
@@ -186,10 +192,10 @@ ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
186{ 192{
187 struct ip6_tnl **tp = ip6_tnl_bucket(ip6n, &t->parms); 193 struct ip6_tnl **tp = ip6_tnl_bucket(ip6n, &t->parms);
188 194
195 spin_lock_bh(&ip6_tnl_lock);
189 t->next = *tp; 196 t->next = *tp;
190 write_lock_bh(&ip6_tnl_lock); 197 rcu_assign_pointer(*tp, t);
191 *tp = t; 198 spin_unlock_bh(&ip6_tnl_lock);
192 write_unlock_bh(&ip6_tnl_lock);
193} 199}
194 200
195/** 201/**
@@ -204,9 +210,9 @@ ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
204 210
205 for (tp = ip6_tnl_bucket(ip6n, &t->parms); *tp; tp = &(*tp)->next) { 211 for (tp = ip6_tnl_bucket(ip6n, &t->parms); *tp; tp = &(*tp)->next) {
206 if (t == *tp) { 212 if (t == *tp) {
207 write_lock_bh(&ip6_tnl_lock); 213 spin_lock_bh(&ip6_tnl_lock);
208 *tp = t->next; 214 *tp = t->next;
209 write_unlock_bh(&ip6_tnl_lock); 215 spin_unlock_bh(&ip6_tnl_lock);
210 break; 216 break;
211 } 217 }
212 } 218 }
@@ -313,9 +319,9 @@ ip6_tnl_dev_uninit(struct net_device *dev)
313 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 319 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
314 320
315 if (dev == ip6n->fb_tnl_dev) { 321 if (dev == ip6n->fb_tnl_dev) {
316 write_lock_bh(&ip6_tnl_lock); 322 spin_lock_bh(&ip6_tnl_lock);
317 ip6n->tnls_wc[0] = NULL; 323 ip6n->tnls_wc[0] = NULL;
318 write_unlock_bh(&ip6_tnl_lock); 324 spin_unlock_bh(&ip6_tnl_lock);
319 } else { 325 } else {
320 ip6_tnl_unlink(ip6n, t); 326 ip6_tnl_unlink(ip6n, t);
321 } 327 }
@@ -409,7 +415,7 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
409 in trouble since we might need the source address for further 415 in trouble since we might need the source address for further
410 processing of the error. */ 416 processing of the error. */
411 417
412 read_lock(&ip6_tnl_lock); 418 rcu_read_lock();
413 if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->daddr, 419 if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->daddr,
414 &ipv6h->saddr)) == NULL) 420 &ipv6h->saddr)) == NULL)
415 goto out; 421 goto out;
@@ -482,7 +488,7 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
482 *msg = rel_msg; 488 *msg = rel_msg;
483 489
484out: 490out:
485 read_unlock(&ip6_tnl_lock); 491 rcu_read_unlock();
486 return err; 492 return err;
487} 493}
488 494
@@ -693,23 +699,23 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
693 struct ip6_tnl *t; 699 struct ip6_tnl *t;
694 struct ipv6hdr *ipv6h = ipv6_hdr(skb); 700 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
695 701
696 read_lock(&ip6_tnl_lock); 702 rcu_read_lock();
697 703
698 if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, 704 if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr,
699 &ipv6h->daddr)) != NULL) { 705 &ipv6h->daddr)) != NULL) {
700 if (t->parms.proto != ipproto && t->parms.proto != 0) { 706 if (t->parms.proto != ipproto && t->parms.proto != 0) {
701 read_unlock(&ip6_tnl_lock); 707 rcu_read_unlock();
702 goto discard; 708 goto discard;
703 } 709 }
704 710
705 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { 711 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
706 read_unlock(&ip6_tnl_lock); 712 rcu_read_unlock();
707 goto discard; 713 goto discard;
708 } 714 }
709 715
710 if (!ip6_tnl_rcv_ctl(t)) { 716 if (!ip6_tnl_rcv_ctl(t)) {
711 t->dev->stats.rx_dropped++; 717 t->dev->stats.rx_dropped++;
712 read_unlock(&ip6_tnl_lock); 718 rcu_read_unlock();
713 goto discard; 719 goto discard;
714 } 720 }
715 secpath_reset(skb); 721 secpath_reset(skb);
@@ -727,10 +733,10 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
727 t->dev->stats.rx_packets++; 733 t->dev->stats.rx_packets++;
728 t->dev->stats.rx_bytes += skb->len; 734 t->dev->stats.rx_bytes += skb->len;
729 netif_rx(skb); 735 netif_rx(skb);
730 read_unlock(&ip6_tnl_lock); 736 rcu_read_unlock();
731 return 0; 737 return 0;
732 } 738 }
733 read_unlock(&ip6_tnl_lock); 739 rcu_read_unlock();
734 return 1; 740 return 1;
735 741
736discard: 742discard: