aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorMartin KaFai Lau <kafai@fb.com>2015-09-15 17:30:09 -0400
committerDavid S. Miller <davem@davemloft.net>2015-09-15 17:53:05 -0400
commit70da5b5c532f0ec8aa76b4f46158da5f010f34b3 (patch)
tree11b8e0054461b9788d669215416655f112545670 /net
parent8e3d5be7368107f0c27a1f8126d79b01a47e9567 (diff)
ipv6: Replace spinlock with seqlock and rcu in ip6_tunnel
This patch uses a seqlock to ensure consistency between idst->dst and idst->cookie. It also makes dst freeing from fib tree to undergo a rcu grace period. Signed-off-by: Martin KaFai Lau <kafai@fb.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/ipv6/ip6_fib.c9
-rw-r--r--net/ipv6/ip6_tunnel.c51
2 files changed, 34 insertions, 26 deletions
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index e68350bf838b..8a9ec01f4d01 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -155,6 +155,11 @@ static void node_free(struct fib6_node *fn)
155 kmem_cache_free(fib6_node_kmem, fn); 155 kmem_cache_free(fib6_node_kmem, fn);
156} 156}
157 157
158static void rt6_rcu_free(struct rt6_info *rt)
159{
160 call_rcu(&rt->dst.rcu_head, dst_rcu_free);
161}
162
158static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt) 163static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
159{ 164{
160 int cpu; 165 int cpu;
@@ -169,7 +174,7 @@ static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
169 ppcpu_rt = per_cpu_ptr(non_pcpu_rt->rt6i_pcpu, cpu); 174 ppcpu_rt = per_cpu_ptr(non_pcpu_rt->rt6i_pcpu, cpu);
170 pcpu_rt = *ppcpu_rt; 175 pcpu_rt = *ppcpu_rt;
171 if (pcpu_rt) { 176 if (pcpu_rt) {
172 dst_free(&pcpu_rt->dst); 177 rt6_rcu_free(pcpu_rt);
173 *ppcpu_rt = NULL; 178 *ppcpu_rt = NULL;
174 } 179 }
175 } 180 }
@@ -181,7 +186,7 @@ static void rt6_release(struct rt6_info *rt)
181{ 186{
182 if (atomic_dec_and_test(&rt->rt6i_ref)) { 187 if (atomic_dec_and_test(&rt->rt6i_ref)) {
183 rt6_free_pcpu(rt); 188 rt6_free_pcpu(rt);
184 dst_free(&rt->dst); 189 rt6_rcu_free(rt);
185 } 190 }
186} 191}
187 192
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 851cf6d1eb45..983f0d20f96d 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -126,45 +126,48 @@ static struct net_device_stats *ip6_get_stats(struct net_device *dev)
126 * Locking : hash tables are protected by RCU and RTNL 126 * Locking : hash tables are protected by RCU and RTNL
127 */ 127 */
128 128
129static void __ip6_tnl_per_cpu_dst_set(struct ip6_tnl_dst *idst, 129static void ip6_tnl_per_cpu_dst_set(struct ip6_tnl_dst *idst,
130 struct dst_entry *dst) 130 struct dst_entry *dst)
131{ 131{
132 dst_release(idst->dst); 132 write_seqlock_bh(&idst->lock);
133 dst_release(rcu_dereference_protected(
134 idst->dst,
135 lockdep_is_held(&idst->lock.lock)));
133 if (dst) { 136 if (dst) {
134 dst_hold(dst); 137 dst_hold(dst);
135 idst->cookie = rt6_get_cookie((struct rt6_info *)dst); 138 idst->cookie = rt6_get_cookie((struct rt6_info *)dst);
136 } else { 139 } else {
137 idst->cookie = 0; 140 idst->cookie = 0;
138 } 141 }
139 idst->dst = dst; 142 rcu_assign_pointer(idst->dst, dst);
140} 143 write_sequnlock_bh(&idst->lock);
141
142static void ip6_tnl_per_cpu_dst_set(struct ip6_tnl_dst *idst,
143 struct dst_entry *dst)
144{
145
146 spin_lock_bh(&idst->lock);
147 __ip6_tnl_per_cpu_dst_set(idst, dst);
148 spin_unlock_bh(&idst->lock);
149} 144}
150 145
151struct dst_entry *ip6_tnl_dst_get(struct ip6_tnl *t) 146struct dst_entry *ip6_tnl_dst_get(struct ip6_tnl *t)
152{ 147{
153 struct ip6_tnl_dst *idst; 148 struct ip6_tnl_dst *idst;
154 struct dst_entry *dst; 149 struct dst_entry *dst;
150 unsigned int seq;
151 u32 cookie;
155 152
156 idst = raw_cpu_ptr(t->dst_cache); 153 idst = raw_cpu_ptr(t->dst_cache);
157 spin_lock_bh(&idst->lock); 154
158 dst = idst->dst; 155 rcu_read_lock();
159 if (dst) { 156 do {
160 if (!dst->obsolete || dst->ops->check(dst, idst->cookie)) { 157 seq = read_seqbegin(&idst->lock);
161 dst_hold(idst->dst); 158 dst = rcu_dereference(idst->dst);
162 } else { 159 cookie = idst->cookie;
163 __ip6_tnl_per_cpu_dst_set(idst, NULL); 160 } while (read_seqretry(&idst->lock, seq));
164 dst = NULL; 161
165 } 162 if (dst && !atomic_inc_not_zero(&dst->__refcnt))
163 dst = NULL;
164 rcu_read_unlock();
165
166 if (dst && dst->obsolete && !dst->ops->check(dst, cookie)) {
167 ip6_tnl_per_cpu_dst_set(idst, NULL);
168 dst_release(dst);
169 dst = NULL;
166 } 170 }
167 spin_unlock_bh(&idst->lock);
168 return dst; 171 return dst;
169} 172}
170EXPORT_SYMBOL_GPL(ip6_tnl_dst_get); 173EXPORT_SYMBOL_GPL(ip6_tnl_dst_get);
@@ -204,7 +207,7 @@ int ip6_tnl_dst_init(struct ip6_tnl *t)
204 return -ENOMEM; 207 return -ENOMEM;
205 208
206 for_each_possible_cpu(i) 209 for_each_possible_cpu(i)
207 spin_lock_init(&per_cpu_ptr(t->dst_cache, i)->lock); 210 seqlock_init(&per_cpu_ptr(t->dst_cache, i)->lock);
208 211
209 return 0; 212 return 0;
210} 213}