diff options
author | YOSHIFUJI Hideaki / 吉藤英明 <yoshfuji@linux-ipv6.org> | 2013-01-30 04:27:52 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-01-30 22:41:13 -0500 |
commit | 18367681a10bd29c3f2305e6b7b984de5b33d548 (patch) | |
tree | 9c651ed8531fef6c7985548330cbe69576e0eb41 | |
parent | d3aedd5ebd4b0b925b0bcda548066803e1318499 (diff) |
ipv6 flowlabel: Convert np->ipv6_fl_list to RCU.
Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/linux/ipv6.h | 2 | ||||
-rw-r--r-- | include/net/ipv6.h | 1 | ||||
-rw-r--r-- | net/ipv6/ip6_flowlabel.c | 72 |
3 files changed, 42 insertions, 33 deletions
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index e971e3742172..850e95bc766c 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h | |||
@@ -214,7 +214,7 @@ struct ipv6_pinfo { | |||
214 | 214 | ||
215 | struct ipv6_mc_socklist __rcu *ipv6_mc_list; | 215 | struct ipv6_mc_socklist __rcu *ipv6_mc_list; |
216 | struct ipv6_ac_socklist *ipv6_ac_list; | 216 | struct ipv6_ac_socklist *ipv6_ac_list; |
217 | struct ipv6_fl_socklist *ipv6_fl_list; | 217 | struct ipv6_fl_socklist __rcu *ipv6_fl_list; |
218 | 218 | ||
219 | struct ipv6_txoptions *opt; | 219 | struct ipv6_txoptions *opt; |
220 | struct sk_buff *pktoptions; | 220 | struct sk_buff *pktoptions; |
diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 1d457161def2..851d5412a299 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h | |||
@@ -239,6 +239,7 @@ struct ip6_flowlabel { | |||
239 | struct ipv6_fl_socklist { | 239 | struct ipv6_fl_socklist { |
240 | struct ipv6_fl_socklist *next; | 240 | struct ipv6_fl_socklist *next; |
241 | struct ip6_flowlabel *fl; | 241 | struct ip6_flowlabel *fl; |
242 | struct rcu_head rcu; | ||
242 | }; | 243 | }; |
243 | 244 | ||
244 | extern struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label); | 245 | extern struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label); |
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index da156015d827..22494afd981c 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c | |||
@@ -62,7 +62,7 @@ static DEFINE_SPINLOCK(ip6_fl_lock); | |||
62 | 62 | ||
63 | /* Big socket sock */ | 63 | /* Big socket sock */ |
64 | 64 | ||
65 | static DEFINE_RWLOCK(ip6_sk_fl_lock); | 65 | static DEFINE_SPINLOCK(ip6_sk_fl_lock); |
66 | 66 | ||
67 | #define for_each_fl_rcu(hash, fl) \ | 67 | #define for_each_fl_rcu(hash, fl) \ |
68 | for (fl = rcu_dereference(fl_ht[(hash)]); \ | 68 | for (fl = rcu_dereference(fl_ht[(hash)]); \ |
@@ -73,6 +73,11 @@ static DEFINE_RWLOCK(ip6_sk_fl_lock); | |||
73 | fl != NULL; \ | 73 | fl != NULL; \ |
74 | fl = rcu_dereference(fl->next)) | 74 | fl = rcu_dereference(fl->next)) |
75 | 75 | ||
76 | #define for_each_sk_fl_rcu(np, sfl) \ | ||
77 | for (sfl = rcu_dereference_bh(np->ipv6_fl_list); \ | ||
78 | sfl != NULL; \ | ||
79 | sfl = rcu_dereference_bh(sfl->next)) | ||
80 | |||
76 | static inline struct ip6_flowlabel *__fl_lookup(struct net *net, __be32 label) | 81 | static inline struct ip6_flowlabel *__fl_lookup(struct net *net, __be32 label) |
77 | { | 82 | { |
78 | struct ip6_flowlabel *fl; | 83 | struct ip6_flowlabel *fl; |
@@ -244,17 +249,17 @@ struct ip6_flowlabel * fl6_sock_lookup(struct sock *sk, __be32 label) | |||
244 | 249 | ||
245 | label &= IPV6_FLOWLABEL_MASK; | 250 | label &= IPV6_FLOWLABEL_MASK; |
246 | 251 | ||
247 | read_lock_bh(&ip6_sk_fl_lock); | 252 | rcu_read_lock_bh(); |
248 | for (sfl=np->ipv6_fl_list; sfl; sfl = sfl->next) { | 253 | for_each_sk_fl_rcu(np, sfl) { |
249 | struct ip6_flowlabel *fl = sfl->fl; | 254 | struct ip6_flowlabel *fl = sfl->fl; |
250 | if (fl->label == label) { | 255 | if (fl->label == label) { |
251 | fl->lastuse = jiffies; | 256 | fl->lastuse = jiffies; |
252 | atomic_inc(&fl->users); | 257 | atomic_inc(&fl->users); |
253 | read_unlock_bh(&ip6_sk_fl_lock); | 258 | rcu_read_unlock_bh(); |
254 | return fl; | 259 | return fl; |
255 | } | 260 | } |
256 | } | 261 | } |
257 | read_unlock_bh(&ip6_sk_fl_lock); | 262 | rcu_read_unlock_bh(); |
258 | return NULL; | 263 | return NULL; |
259 | } | 264 | } |
260 | 265 | ||
@@ -265,20 +270,21 @@ void fl6_free_socklist(struct sock *sk) | |||
265 | struct ipv6_pinfo *np = inet6_sk(sk); | 270 | struct ipv6_pinfo *np = inet6_sk(sk); |
266 | struct ipv6_fl_socklist *sfl; | 271 | struct ipv6_fl_socklist *sfl; |
267 | 272 | ||
268 | if (!np->ipv6_fl_list) | 273 | if (!rcu_access_pointer(np->ipv6_fl_list)) |
269 | return; | 274 | return; |
270 | 275 | ||
271 | write_lock_bh(&ipv6_sk_fl_lock); | 276 | spin_lock_bh(&ip6_sk_fl_lock); |
272 | sfl = np->ipv6_fl_list; | 277 | while ((sfl = rcu_dereference_protected(np->ipv6_fl_list, |
273 | np->ipv6_fl_list = NULL; | 278 | lockdep_is_held(&ip6_sk_fl_lock))) != NULL) { |
274 | write_unlock_bh(&ipv6_sk_fl_lock); | 279 | np->ipv6_fl_list = sfl->next; |
280 | spin_unlock_bh(&ip6_sk_fl_lock); | ||
275 | 281 | ||
276 | while (sfl) { | ||
277 | struct ipv6_fl_socklist *next = sfl->next; | ||
278 | fl_release(sfl->fl); | 282 | fl_release(sfl->fl); |
279 | kfree(sfl); | 283 | kfree_rcu(sfl, rcu); |
280 | sfl = next; | 284 | |
285 | spin_lock_bh(&ip6_sk_fl_lock); | ||
281 | } | 286 | } |
287 | spin_unlock_bh(&ip6_sk_fl_lock); | ||
282 | } | 288 | } |
283 | 289 | ||
284 | /* Service routines */ | 290 | /* Service routines */ |
@@ -443,7 +449,7 @@ static int mem_check(struct sock *sk) | |||
443 | if (room > FL_MAX_SIZE - FL_MAX_PER_SOCK) | 449 | if (room > FL_MAX_SIZE - FL_MAX_PER_SOCK) |
444 | return 0; | 450 | return 0; |
445 | 451 | ||
446 | for (sfl = np->ipv6_fl_list; sfl; sfl = sfl->next) | 452 | for_each_sk_fl_rcu(np, sfl) |
447 | count++; | 453 | count++; |
448 | 454 | ||
449 | if (room <= 0 || | 455 | if (room <= 0 || |
@@ -486,11 +492,11 @@ static bool ipv6_opt_cmp(struct ipv6_txoptions *o1, struct ipv6_txoptions *o2) | |||
486 | static inline void fl_link(struct ipv6_pinfo *np, struct ipv6_fl_socklist *sfl, | 492 | static inline void fl_link(struct ipv6_pinfo *np, struct ipv6_fl_socklist *sfl, |
487 | struct ip6_flowlabel *fl) | 493 | struct ip6_flowlabel *fl) |
488 | { | 494 | { |
489 | write_lock_bh(&ip6_sk_fl_lock); | 495 | spin_lock_bh(&ip6_sk_fl_lock); |
490 | sfl->fl = fl; | 496 | sfl->fl = fl; |
491 | sfl->next = np->ipv6_fl_list; | 497 | sfl->next = np->ipv6_fl_list; |
492 | np->ipv6_fl_list = sfl; | 498 | rcu_assign_pointer(np->ipv6_fl_list, sfl); |
493 | write_unlock_bh(&ip6_sk_fl_lock); | 499 | spin_unlock_bh(&ip6_sk_fl_lock); |
494 | } | 500 | } |
495 | 501 | ||
496 | int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen) | 502 | int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen) |
@@ -512,31 +518,33 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen) | |||
512 | 518 | ||
513 | switch (freq.flr_action) { | 519 | switch (freq.flr_action) { |
514 | case IPV6_FL_A_PUT: | 520 | case IPV6_FL_A_PUT: |
515 | write_lock_bh(&ip6_sk_fl_lock); | 521 | spin_lock_bh(&ip6_sk_fl_lock); |
516 | for (sflp = &np->ipv6_fl_list; (sfl=*sflp)!=NULL; sflp = &sfl->next) { | 522 | for (sflp = &np->ipv6_fl_list; |
523 | (sfl = rcu_dereference(*sflp))!=NULL; | ||
524 | sflp = &sfl->next) { | ||
517 | if (sfl->fl->label == freq.flr_label) { | 525 | if (sfl->fl->label == freq.flr_label) { |
518 | if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK)) | 526 | if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK)) |
519 | np->flow_label &= ~IPV6_FLOWLABEL_MASK; | 527 | np->flow_label &= ~IPV6_FLOWLABEL_MASK; |
520 | *sflp = sfl->next; | 528 | *sflp = rcu_dereference(sfl->next); |
521 | write_unlock_bh(&ip6_sk_fl_lock); | 529 | spin_unlock_bh(&ip6_sk_fl_lock); |
522 | fl_release(sfl->fl); | 530 | fl_release(sfl->fl); |
523 | kfree(sfl); | 531 | kfree_rcu(sfl, rcu); |
524 | return 0; | 532 | return 0; |
525 | } | 533 | } |
526 | } | 534 | } |
527 | write_unlock_bh(&ip6_sk_fl_lock); | 535 | spin_unlock_bh(&ip6_sk_fl_lock); |
528 | return -ESRCH; | 536 | return -ESRCH; |
529 | 537 | ||
530 | case IPV6_FL_A_RENEW: | 538 | case IPV6_FL_A_RENEW: |
531 | read_lock_bh(&ip6_sk_fl_lock); | 539 | rcu_read_lock_bh(); |
532 | for (sfl = np->ipv6_fl_list; sfl; sfl = sfl->next) { | 540 | for_each_sk_fl_rcu(np, sfl) { |
533 | if (sfl->fl->label == freq.flr_label) { | 541 | if (sfl->fl->label == freq.flr_label) { |
534 | err = fl6_renew(sfl->fl, freq.flr_linger, freq.flr_expires); | 542 | err = fl6_renew(sfl->fl, freq.flr_linger, freq.flr_expires); |
535 | read_unlock_bh(&ip6_sk_fl_lock); | 543 | rcu_read_unlock_bh(); |
536 | return err; | 544 | return err; |
537 | } | 545 | } |
538 | } | 546 | } |
539 | read_unlock_bh(&ip6_sk_fl_lock); | 547 | rcu_read_unlock_bh(); |
540 | 548 | ||
541 | if (freq.flr_share == IPV6_FL_S_NONE && | 549 | if (freq.flr_share == IPV6_FL_S_NONE && |
542 | ns_capable(net->user_ns, CAP_NET_ADMIN)) { | 550 | ns_capable(net->user_ns, CAP_NET_ADMIN)) { |
@@ -560,11 +568,11 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen) | |||
560 | 568 | ||
561 | if (freq.flr_label) { | 569 | if (freq.flr_label) { |
562 | err = -EEXIST; | 570 | err = -EEXIST; |
563 | read_lock_bh(&ip6_sk_fl_lock); | 571 | rcu_read_lock_bh(); |
564 | for (sfl = np->ipv6_fl_list; sfl; sfl = sfl->next) { | 572 | for_each_sk_fl_rcu(np, sfl) { |
565 | if (sfl->fl->label == freq.flr_label) { | 573 | if (sfl->fl->label == freq.flr_label) { |
566 | if (freq.flr_flags&IPV6_FL_F_EXCL) { | 574 | if (freq.flr_flags&IPV6_FL_F_EXCL) { |
567 | read_unlock_bh(&ip6_sk_fl_lock); | 575 | rcu_read_unlock_bh(); |
568 | goto done; | 576 | goto done; |
569 | } | 577 | } |
570 | fl1 = sfl->fl; | 578 | fl1 = sfl->fl; |
@@ -572,7 +580,7 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen) | |||
572 | break; | 580 | break; |
573 | } | 581 | } |
574 | } | 582 | } |
575 | read_unlock_bh(&ip6_sk_fl_lock); | 583 | rcu_read_unlock_bh(); |
576 | 584 | ||
577 | if (fl1 == NULL) | 585 | if (fl1 == NULL) |
578 | fl1 = fl_lookup(net, freq.flr_label); | 586 | fl1 = fl_lookup(net, freq.flr_label); |