aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/ipip.c
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2010-09-15 07:07:24 -0400
committerDavid S. Miller <davem@davemloft.net>2010-09-15 22:29:46 -0400
commitb7285b7912776a4492744949c747c88d539006fa (patch)
treeb5231b4e6965b4ac629c3e9feb0613e366780d0e /net/ipv4/ipip.c
parent7dff59efbb0e8b0f81c95fd40379c0d0c757c808 (diff)
ipip: get rid of ipip_lock
As RTNL is held while doing tunnels inserts and deletes, we can remove ipip_lock spinlock. My initial RCU conversion was conservative and converted the rwlock to spinlock, with no RTNL requirement. Use appropriate rcu annotations and modern lockdep checks as well. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/ipip.c')
-rw-r--r--net/ipv4/ipip.c67
1 files changed, 34 insertions, 33 deletions
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 3c6f8f3968a6..8de8888dc95a 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -122,11 +122,11 @@
122 122
123static int ipip_net_id __read_mostly; 123static int ipip_net_id __read_mostly;
124struct ipip_net { 124struct ipip_net {
125 struct ip_tunnel *tunnels_r_l[HASH_SIZE]; 125 struct ip_tunnel __rcu *tunnels_r_l[HASH_SIZE];
126 struct ip_tunnel *tunnels_r[HASH_SIZE]; 126 struct ip_tunnel __rcu *tunnels_r[HASH_SIZE];
127 struct ip_tunnel *tunnels_l[HASH_SIZE]; 127 struct ip_tunnel __rcu *tunnels_l[HASH_SIZE];
128 struct ip_tunnel *tunnels_wc[1]; 128 struct ip_tunnel __rcu *tunnels_wc[1];
129 struct ip_tunnel **tunnels[4]; 129 struct ip_tunnel __rcu **tunnels[4];
130 130
131 struct net_device *fb_tunnel_dev; 131 struct net_device *fb_tunnel_dev;
132}; 132};
@@ -135,9 +135,8 @@ static void ipip_tunnel_init(struct net_device *dev);
135static void ipip_tunnel_setup(struct net_device *dev); 135static void ipip_tunnel_setup(struct net_device *dev);
136 136
137/* 137/*
138 * Locking : hash tables are protected by RCU and a spinlock 138 * Locking : hash tables are protected by RCU and RTNL
139 */ 139 */
140static DEFINE_SPINLOCK(ipip_lock);
141 140
142#define for_each_ip_tunnel_rcu(start) \ 141#define for_each_ip_tunnel_rcu(start) \
143 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) 142 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
@@ -145,8 +144,8 @@ static DEFINE_SPINLOCK(ipip_lock);
145static struct ip_tunnel * ipip_tunnel_lookup(struct net *net, 144static struct ip_tunnel * ipip_tunnel_lookup(struct net *net,
146 __be32 remote, __be32 local) 145 __be32 remote, __be32 local)
147{ 146{
148 unsigned h0 = HASH(remote); 147 unsigned int h0 = HASH(remote);
149 unsigned h1 = HASH(local); 148 unsigned int h1 = HASH(local);
150 struct ip_tunnel *t; 149 struct ip_tunnel *t;
151 struct ipip_net *ipn = net_generic(net, ipip_net_id); 150 struct ipip_net *ipn = net_generic(net, ipip_net_id);
152 151
@@ -169,12 +168,12 @@ static struct ip_tunnel * ipip_tunnel_lookup(struct net *net,
169 return NULL; 168 return NULL;
170} 169}
171 170
172static struct ip_tunnel **__ipip_bucket(struct ipip_net *ipn, 171static struct ip_tunnel __rcu **__ipip_bucket(struct ipip_net *ipn,
173 struct ip_tunnel_parm *parms) 172 struct ip_tunnel_parm *parms)
174{ 173{
175 __be32 remote = parms->iph.daddr; 174 __be32 remote = parms->iph.daddr;
176 __be32 local = parms->iph.saddr; 175 __be32 local = parms->iph.saddr;
177 unsigned h = 0; 176 unsigned int h = 0;
178 int prio = 0; 177 int prio = 0;
179 178
180 if (remote) { 179 if (remote) {
@@ -188,7 +187,7 @@ static struct ip_tunnel **__ipip_bucket(struct ipip_net *ipn,
188 return &ipn->tunnels[prio][h]; 187 return &ipn->tunnels[prio][h];
189} 188}
190 189
191static inline struct ip_tunnel **ipip_bucket(struct ipip_net *ipn, 190static inline struct ip_tunnel __rcu **ipip_bucket(struct ipip_net *ipn,
192 struct ip_tunnel *t) 191 struct ip_tunnel *t)
193{ 192{
194 return __ipip_bucket(ipn, &t->parms); 193 return __ipip_bucket(ipn, &t->parms);
@@ -196,13 +195,14 @@ static inline struct ip_tunnel **ipip_bucket(struct ipip_net *ipn,
196 195
197static void ipip_tunnel_unlink(struct ipip_net *ipn, struct ip_tunnel *t) 196static void ipip_tunnel_unlink(struct ipip_net *ipn, struct ip_tunnel *t)
198{ 197{
199 struct ip_tunnel **tp; 198 struct ip_tunnel __rcu **tp;
200 199 struct ip_tunnel *iter;
201 for (tp = ipip_bucket(ipn, t); *tp; tp = &(*tp)->next) { 200
202 if (t == *tp) { 201 for (tp = ipip_bucket(ipn, t);
203 spin_lock_bh(&ipip_lock); 202 (iter = rtnl_dereference(*tp)) != NULL;
204 *tp = t->next; 203 tp = &iter->next) {
205 spin_unlock_bh(&ipip_lock); 204 if (t == iter) {
205 rcu_assign_pointer(*tp, t->next);
206 break; 206 break;
207 } 207 }
208 } 208 }
@@ -210,12 +210,10 @@ static void ipip_tunnel_unlink(struct ipip_net *ipn, struct ip_tunnel *t)
210 210
211static void ipip_tunnel_link(struct ipip_net *ipn, struct ip_tunnel *t) 211static void ipip_tunnel_link(struct ipip_net *ipn, struct ip_tunnel *t)
212{ 212{
213 struct ip_tunnel **tp = ipip_bucket(ipn, t); 213 struct ip_tunnel __rcu **tp = ipip_bucket(ipn, t);
214 214
215 spin_lock_bh(&ipip_lock); 215 rcu_assign_pointer(t->next, rtnl_dereference(*tp));
216 t->next = *tp;
217 rcu_assign_pointer(*tp, t); 216 rcu_assign_pointer(*tp, t);
218 spin_unlock_bh(&ipip_lock);
219} 217}
220 218
221static struct ip_tunnel * ipip_tunnel_locate(struct net *net, 219static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
@@ -223,12 +221,15 @@ static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
223{ 221{
224 __be32 remote = parms->iph.daddr; 222 __be32 remote = parms->iph.daddr;
225 __be32 local = parms->iph.saddr; 223 __be32 local = parms->iph.saddr;
226 struct ip_tunnel *t, **tp, *nt; 224 struct ip_tunnel *t, *nt;
225 struct ip_tunnel __rcu **tp;
227 struct net_device *dev; 226 struct net_device *dev;
228 char name[IFNAMSIZ]; 227 char name[IFNAMSIZ];
229 struct ipip_net *ipn = net_generic(net, ipip_net_id); 228 struct ipip_net *ipn = net_generic(net, ipip_net_id);
230 229
231 for (tp = __ipip_bucket(ipn, parms); (t = *tp) != NULL; tp = &t->next) { 230 for (tp = __ipip_bucket(ipn, parms);
231 (t = rtnl_dereference(*tp)) != NULL;
232 tp = &t->next) {
232 if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr) 233 if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr)
233 return t; 234 return t;
234 } 235 }
@@ -268,16 +269,15 @@ failed_free:
268 return NULL; 269 return NULL;
269} 270}
270 271
272/* called with RTNL */
271static void ipip_tunnel_uninit(struct net_device *dev) 273static void ipip_tunnel_uninit(struct net_device *dev)
272{ 274{
273 struct net *net = dev_net(dev); 275 struct net *net = dev_net(dev);
274 struct ipip_net *ipn = net_generic(net, ipip_net_id); 276 struct ipip_net *ipn = net_generic(net, ipip_net_id);
275 277
276 if (dev == ipn->fb_tunnel_dev) { 278 if (dev == ipn->fb_tunnel_dev)
277 spin_lock_bh(&ipip_lock); 279 rcu_assign_pointer(ipn->tunnels_wc[0], NULL);
278 ipn->tunnels_wc[0] = NULL; 280 else
279 spin_unlock_bh(&ipip_lock);
280 } else
281 ipip_tunnel_unlink(ipn, netdev_priv(dev)); 281 ipip_tunnel_unlink(ipn, netdev_priv(dev));
282 dev_put(dev); 282 dev_put(dev);
283} 283}
@@ -741,7 +741,7 @@ static void __net_init ipip_fb_tunnel_init(struct net_device *dev)
741 iph->ihl = 5; 741 iph->ihl = 5;
742 742
743 dev_hold(dev); 743 dev_hold(dev);
744 ipn->tunnels_wc[0] = tunnel; 744 rcu_assign_pointer(ipn->tunnels_wc[0], tunnel);
745} 745}
746 746
747static struct xfrm_tunnel ipip_handler __read_mostly = { 747static struct xfrm_tunnel ipip_handler __read_mostly = {
@@ -760,11 +760,12 @@ static void ipip_destroy_tunnels(struct ipip_net *ipn, struct list_head *head)
760 for (prio = 1; prio < 4; prio++) { 760 for (prio = 1; prio < 4; prio++) {
761 int h; 761 int h;
762 for (h = 0; h < HASH_SIZE; h++) { 762 for (h = 0; h < HASH_SIZE; h++) {
763 struct ip_tunnel *t = ipn->tunnels[prio][h]; 763 struct ip_tunnel *t;
764 764
765 t = rtnl_dereference(ipn->tunnels[prio][h]);
765 while (t != NULL) { 766 while (t != NULL) {
766 unregister_netdevice_queue(t->dev, head); 767 unregister_netdevice_queue(t->dev, head);
767 t = t->next; 768 t = rtnl_dereference(t->next);
768 } 769 }
769 } 770 }
770 } 771 }