diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2010-10-28 23:09:24 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-11-08 16:50:08 -0500 |
commit | fc766e4c4965915ab52a1d1fa3c7a7b3e7bc07f0 (patch) | |
tree | d45160f52eea37d4e5149d511c3c577bef253801 /net/decnet/dn_route.c | |
parent | e4a7b93bd5d84e1e79917d024d17d745d190fc9a (diff) |
decnet: RCU conversion and get rid of dev_base_lock
While tracking dev_base_lock users, I found decnet used it in
dnet_select_source(), but for a wrong purpose:
Writers only hold RTNL, not dev_base_lock, so readers must use RCU if
they cannot use RTNL.
Adds an rcu_head in struct dn_ifaddr and handle proper RCU management.
Adds __rcu annotation in dn_route as well.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Acked-by: Steven Whitehouse <swhiteho@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/decnet/dn_route.c')
-rw-r--r-- | net/decnet/dn_route.c | 68 |
1 files changed, 39 insertions, 29 deletions
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index df0f3e54ff8a..94a9eb1d313e 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c | |||
@@ -93,7 +93,7 @@ | |||
93 | 93 | ||
94 | struct dn_rt_hash_bucket | 94 | struct dn_rt_hash_bucket |
95 | { | 95 | { |
96 | struct dn_route *chain; | 96 | struct dn_route __rcu *chain; |
97 | spinlock_t lock; | 97 | spinlock_t lock; |
98 | }; | 98 | }; |
99 | 99 | ||
@@ -157,15 +157,17 @@ static inline void dnrt_drop(struct dn_route *rt) | |||
157 | static void dn_dst_check_expire(unsigned long dummy) | 157 | static void dn_dst_check_expire(unsigned long dummy) |
158 | { | 158 | { |
159 | int i; | 159 | int i; |
160 | struct dn_route *rt, **rtp; | 160 | struct dn_route *rt; |
161 | struct dn_route __rcu **rtp; | ||
161 | unsigned long now = jiffies; | 162 | unsigned long now = jiffies; |
162 | unsigned long expire = 120 * HZ; | 163 | unsigned long expire = 120 * HZ; |
163 | 164 | ||
164 | for(i = 0; i <= dn_rt_hash_mask; i++) { | 165 | for (i = 0; i <= dn_rt_hash_mask; i++) { |
165 | rtp = &dn_rt_hash_table[i].chain; | 166 | rtp = &dn_rt_hash_table[i].chain; |
166 | 167 | ||
167 | spin_lock(&dn_rt_hash_table[i].lock); | 168 | spin_lock(&dn_rt_hash_table[i].lock); |
168 | while((rt=*rtp) != NULL) { | 169 | while ((rt = rcu_dereference_protected(*rtp, |
170 | lockdep_is_held(&dn_rt_hash_table[i].lock))) != NULL) { | ||
169 | if (atomic_read(&rt->dst.__refcnt) || | 171 | if (atomic_read(&rt->dst.__refcnt) || |
170 | (now - rt->dst.lastuse) < expire) { | 172 | (now - rt->dst.lastuse) < expire) { |
171 | rtp = &rt->dst.dn_next; | 173 | rtp = &rt->dst.dn_next; |
@@ -186,17 +188,19 @@ static void dn_dst_check_expire(unsigned long dummy) | |||
186 | 188 | ||
187 | static int dn_dst_gc(struct dst_ops *ops) | 189 | static int dn_dst_gc(struct dst_ops *ops) |
188 | { | 190 | { |
189 | struct dn_route *rt, **rtp; | 191 | struct dn_route *rt; |
192 | struct dn_route __rcu **rtp; | ||
190 | int i; | 193 | int i; |
191 | unsigned long now = jiffies; | 194 | unsigned long now = jiffies; |
192 | unsigned long expire = 10 * HZ; | 195 | unsigned long expire = 10 * HZ; |
193 | 196 | ||
194 | for(i = 0; i <= dn_rt_hash_mask; i++) { | 197 | for (i = 0; i <= dn_rt_hash_mask; i++) { |
195 | 198 | ||
196 | spin_lock_bh(&dn_rt_hash_table[i].lock); | 199 | spin_lock_bh(&dn_rt_hash_table[i].lock); |
197 | rtp = &dn_rt_hash_table[i].chain; | 200 | rtp = &dn_rt_hash_table[i].chain; |
198 | 201 | ||
199 | while((rt=*rtp) != NULL) { | 202 | while ((rt = rcu_dereference_protected(*rtp, |
203 | lockdep_is_held(&dn_rt_hash_table[i].lock))) != NULL) { | ||
200 | if (atomic_read(&rt->dst.__refcnt) || | 204 | if (atomic_read(&rt->dst.__refcnt) || |
201 | (now - rt->dst.lastuse) < expire) { | 205 | (now - rt->dst.lastuse) < expire) { |
202 | rtp = &rt->dst.dn_next; | 206 | rtp = &rt->dst.dn_next; |
@@ -227,7 +231,7 @@ static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu) | |||
227 | { | 231 | { |
228 | u32 min_mtu = 230; | 232 | u32 min_mtu = 230; |
229 | struct dn_dev *dn = dst->neighbour ? | 233 | struct dn_dev *dn = dst->neighbour ? |
230 | (struct dn_dev *)dst->neighbour->dev->dn_ptr : NULL; | 234 | rcu_dereference_raw(dst->neighbour->dev->dn_ptr) : NULL; |
231 | 235 | ||
232 | if (dn && dn->use_long == 0) | 236 | if (dn && dn->use_long == 0) |
233 | min_mtu -= 6; | 237 | min_mtu -= 6; |
@@ -277,13 +281,15 @@ static inline int compare_keys(struct flowi *fl1, struct flowi *fl2) | |||
277 | 281 | ||
278 | static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route **rp) | 282 | static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route **rp) |
279 | { | 283 | { |
280 | struct dn_route *rth, **rthp; | 284 | struct dn_route *rth; |
285 | struct dn_route __rcu **rthp; | ||
281 | unsigned long now = jiffies; | 286 | unsigned long now = jiffies; |
282 | 287 | ||
283 | rthp = &dn_rt_hash_table[hash].chain; | 288 | rthp = &dn_rt_hash_table[hash].chain; |
284 | 289 | ||
285 | spin_lock_bh(&dn_rt_hash_table[hash].lock); | 290 | spin_lock_bh(&dn_rt_hash_table[hash].lock); |
286 | while((rth = *rthp) != NULL) { | 291 | while ((rth = rcu_dereference_protected(*rthp, |
292 | lockdep_is_held(&dn_rt_hash_table[hash].lock))) != NULL) { | ||
287 | if (compare_keys(&rth->fl, &rt->fl)) { | 293 | if (compare_keys(&rth->fl, &rt->fl)) { |
288 | /* Put it first */ | 294 | /* Put it first */ |
289 | *rthp = rth->dst.dn_next; | 295 | *rthp = rth->dst.dn_next; |
@@ -315,15 +321,15 @@ static void dn_run_flush(unsigned long dummy) | |||
315 | int i; | 321 | int i; |
316 | struct dn_route *rt, *next; | 322 | struct dn_route *rt, *next; |
317 | 323 | ||
318 | for(i = 0; i < dn_rt_hash_mask; i++) { | 324 | for (i = 0; i < dn_rt_hash_mask; i++) { |
319 | spin_lock_bh(&dn_rt_hash_table[i].lock); | 325 | spin_lock_bh(&dn_rt_hash_table[i].lock); |
320 | 326 | ||
321 | if ((rt = xchg(&dn_rt_hash_table[i].chain, NULL)) == NULL) | 327 | if ((rt = xchg((struct dn_route **)&dn_rt_hash_table[i].chain, NULL)) == NULL) |
322 | goto nothing_to_declare; | 328 | goto nothing_to_declare; |
323 | 329 | ||
324 | for(; rt; rt=next) { | 330 | for(; rt; rt = next) { |
325 | next = rt->dst.dn_next; | 331 | next = rcu_dereference_raw(rt->dst.dn_next); |
326 | rt->dst.dn_next = NULL; | 332 | RCU_INIT_POINTER(rt->dst.dn_next, NULL); |
327 | dst_free((struct dst_entry *)rt); | 333 | dst_free((struct dst_entry *)rt); |
328 | } | 334 | } |
329 | 335 | ||
@@ -458,15 +464,16 @@ static int dn_return_long(struct sk_buff *skb) | |||
458 | */ | 464 | */ |
459 | static int dn_route_rx_packet(struct sk_buff *skb) | 465 | static int dn_route_rx_packet(struct sk_buff *skb) |
460 | { | 466 | { |
461 | struct dn_skb_cb *cb = DN_SKB_CB(skb); | 467 | struct dn_skb_cb *cb; |
462 | int err; | 468 | int err; |
463 | 469 | ||
464 | if ((err = dn_route_input(skb)) == 0) | 470 | if ((err = dn_route_input(skb)) == 0) |
465 | return dst_input(skb); | 471 | return dst_input(skb); |
466 | 472 | ||
473 | cb = DN_SKB_CB(skb); | ||
467 | if (decnet_debug_level & 4) { | 474 | if (decnet_debug_level & 4) { |
468 | char *devname = skb->dev ? skb->dev->name : "???"; | 475 | char *devname = skb->dev ? skb->dev->name : "???"; |
469 | struct dn_skb_cb *cb = DN_SKB_CB(skb); | 476 | |
470 | printk(KERN_DEBUG | 477 | printk(KERN_DEBUG |
471 | "DECnet: dn_route_rx_packet: rt_flags=0x%02x dev=%s len=%d src=0x%04hx dst=0x%04hx err=%d type=%d\n", | 478 | "DECnet: dn_route_rx_packet: rt_flags=0x%02x dev=%s len=%d src=0x%04hx dst=0x%04hx err=%d type=%d\n", |
472 | (int)cb->rt_flags, devname, skb->len, | 479 | (int)cb->rt_flags, devname, skb->len, |
@@ -573,7 +580,7 @@ int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type | |||
573 | struct dn_skb_cb *cb; | 580 | struct dn_skb_cb *cb; |
574 | unsigned char flags = 0; | 581 | unsigned char flags = 0; |
575 | __u16 len = le16_to_cpu(*(__le16 *)skb->data); | 582 | __u16 len = le16_to_cpu(*(__le16 *)skb->data); |
576 | struct dn_dev *dn = (struct dn_dev *)dev->dn_ptr; | 583 | struct dn_dev *dn = rcu_dereference(dev->dn_ptr); |
577 | unsigned char padlen = 0; | 584 | unsigned char padlen = 0; |
578 | 585 | ||
579 | if (!net_eq(dev_net(dev), &init_net)) | 586 | if (!net_eq(dev_net(dev), &init_net)) |
@@ -728,7 +735,7 @@ static int dn_forward(struct sk_buff *skb) | |||
728 | { | 735 | { |
729 | struct dn_skb_cb *cb = DN_SKB_CB(skb); | 736 | struct dn_skb_cb *cb = DN_SKB_CB(skb); |
730 | struct dst_entry *dst = skb_dst(skb); | 737 | struct dst_entry *dst = skb_dst(skb); |
731 | struct dn_dev *dn_db = dst->dev->dn_ptr; | 738 | struct dn_dev *dn_db = rcu_dereference(dst->dev->dn_ptr); |
732 | struct dn_route *rt; | 739 | struct dn_route *rt; |
733 | struct neighbour *neigh = dst->neighbour; | 740 | struct neighbour *neigh = dst->neighbour; |
734 | int header_len; | 741 | int header_len; |
@@ -835,13 +842,16 @@ static inline int dn_match_addr(__le16 addr1, __le16 addr2) | |||
835 | static __le16 dnet_select_source(const struct net_device *dev, __le16 daddr, int scope) | 842 | static __le16 dnet_select_source(const struct net_device *dev, __le16 daddr, int scope) |
836 | { | 843 | { |
837 | __le16 saddr = 0; | 844 | __le16 saddr = 0; |
838 | struct dn_dev *dn_db = dev->dn_ptr; | 845 | struct dn_dev *dn_db; |
839 | struct dn_ifaddr *ifa; | 846 | struct dn_ifaddr *ifa; |
840 | int best_match = 0; | 847 | int best_match = 0; |
841 | int ret; | 848 | int ret; |
842 | 849 | ||
843 | read_lock(&dev_base_lock); | 850 | rcu_read_lock(); |
844 | for(ifa = dn_db->ifa_list; ifa; ifa = ifa->ifa_next) { | 851 | dn_db = rcu_dereference(dev->dn_ptr); |
852 | for (ifa = rcu_dereference(dn_db->ifa_list); | ||
853 | ifa != NULL; | ||
854 | ifa = rcu_dereference(ifa->ifa_next)) { | ||
845 | if (ifa->ifa_scope > scope) | 855 | if (ifa->ifa_scope > scope) |
846 | continue; | 856 | continue; |
847 | if (!daddr) { | 857 | if (!daddr) { |
@@ -854,7 +864,7 @@ static __le16 dnet_select_source(const struct net_device *dev, __le16 daddr, int | |||
854 | if (best_match == 0) | 864 | if (best_match == 0) |
855 | saddr = ifa->ifa_local; | 865 | saddr = ifa->ifa_local; |
856 | } | 866 | } |
857 | read_unlock(&dev_base_lock); | 867 | rcu_read_unlock(); |
858 | 868 | ||
859 | return saddr; | 869 | return saddr; |
860 | } | 870 | } |
@@ -1020,7 +1030,7 @@ source_ok: | |||
1020 | err = -ENODEV; | 1030 | err = -ENODEV; |
1021 | if (dev_out == NULL) | 1031 | if (dev_out == NULL) |
1022 | goto out; | 1032 | goto out; |
1023 | dn_db = dev_out->dn_ptr; | 1033 | dn_db = rcu_dereference_raw(dev_out->dn_ptr); |
1024 | /* Possible improvement - check all devices for local addr */ | 1034 | /* Possible improvement - check all devices for local addr */ |
1025 | if (dn_dev_islocal(dev_out, fl.fld_dst)) { | 1035 | if (dn_dev_islocal(dev_out, fl.fld_dst)) { |
1026 | dev_put(dev_out); | 1036 | dev_put(dev_out); |
@@ -1233,7 +1243,7 @@ static int dn_route_input_slow(struct sk_buff *skb) | |||
1233 | 1243 | ||
1234 | dev_hold(in_dev); | 1244 | dev_hold(in_dev); |
1235 | 1245 | ||
1236 | if ((dn_db = in_dev->dn_ptr) == NULL) | 1246 | if ((dn_db = rcu_dereference(in_dev->dn_ptr)) == NULL) |
1237 | goto out; | 1247 | goto out; |
1238 | 1248 | ||
1239 | /* Zero source addresses are not allowed */ | 1249 | /* Zero source addresses are not allowed */ |
@@ -1677,15 +1687,15 @@ static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_rou | |||
1677 | { | 1687 | { |
1678 | struct dn_rt_cache_iter_state *s = seq->private; | 1688 | struct dn_rt_cache_iter_state *s = seq->private; |
1679 | 1689 | ||
1680 | rt = rt->dst.dn_next; | 1690 | rt = rcu_dereference_bh(rt->dst.dn_next); |
1681 | while(!rt) { | 1691 | while (!rt) { |
1682 | rcu_read_unlock_bh(); | 1692 | rcu_read_unlock_bh(); |
1683 | if (--s->bucket < 0) | 1693 | if (--s->bucket < 0) |
1684 | break; | 1694 | break; |
1685 | rcu_read_lock_bh(); | 1695 | rcu_read_lock_bh(); |
1686 | rt = dn_rt_hash_table[s->bucket].chain; | 1696 | rt = rcu_dereference_bh(dn_rt_hash_table[s->bucket].chain); |
1687 | } | 1697 | } |
1688 | return rcu_dereference_bh(rt); | 1698 | return rt; |
1689 | } | 1699 | } |
1690 | 1700 | ||
1691 | static void *dn_rt_cache_seq_start(struct seq_file *seq, loff_t *pos) | 1701 | static void *dn_rt_cache_seq_start(struct seq_file *seq, loff_t *pos) |