aboutsummaryrefslogtreecommitdiffstats
path: root/net/tipc/bcast.c
diff options
context:
space:
mode:
authorYing Xue <ying.xue@windriver.com>2015-01-09 02:27:06 -0500
committerDavid S. Miller <davem@davemloft.net>2015-01-12 16:24:32 -0500
commit7f9f95d9d9bcdf253c4149a157b096958013eceb (patch)
tree779858049250f786a0b1b2d89482f3cdccd9617f /net/tipc/bcast.c
parentf2f9800d4955a96d92896841d8ba9b04201deaa1 (diff)
tipc: make bearer list support net namespace
Bearer list defined as a global variable is used to store bearer instances. When tipc supports net namespace, bearers created in one namespace must be isolated with others allocated in other namespaces, which requires us that the bearer list(bearer_list) must be moved to tipc_net structure. As a result, a net namespace pointer has to be passed to functions which access the bearer list. Signed-off-by: Ying Xue <ying.xue@windriver.com> Tested-by: Tero Aho <Tero.Aho@coriant.com> Reviewed-by: Jon Maloy <jon.maloy@ericsson.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tipc/bcast.c')
-rw-r--r--net/tipc/bcast.c27
1 files changed, 17 insertions, 10 deletions
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 816c0e49319f..e7c538304595 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -369,7 +369,7 @@ void tipc_bclink_update_link_state(struct net *net, struct tipc_node *n_ptr,
369 msg_set_bcgap_to(msg, to); 369 msg_set_bcgap_to(msg, to);
370 370
371 tipc_bclink_lock(); 371 tipc_bclink_lock();
372 tipc_bearer_send(MAX_BEARERS, buf, NULL); 372 tipc_bearer_send(net, MAX_BEARERS, buf, NULL);
373 bcl->stats.sent_nacks++; 373 bcl->stats.sent_nacks++;
374 tipc_bclink_unlock(); 374 tipc_bclink_unlock();
375 kfree_skb(buf); 375 kfree_skb(buf);
@@ -425,7 +425,7 @@ int tipc_bclink_xmit(struct net *net, struct sk_buff_head *list)
425 if (likely(bclink)) { 425 if (likely(bclink)) {
426 tipc_bclink_lock(); 426 tipc_bclink_lock();
427 if (likely(bclink->bcast_nodes.count)) { 427 if (likely(bclink->bcast_nodes.count)) {
428 rc = __tipc_link_xmit(bcl, list); 428 rc = __tipc_link_xmit(net, bcl, list);
429 if (likely(!rc)) { 429 if (likely(!rc)) {
430 u32 len = skb_queue_len(&bcl->outqueue); 430 u32 len = skb_queue_len(&bcl->outqueue);
431 431
@@ -682,13 +682,14 @@ static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1,
682 682
683 if (bp_index == 0) { 683 if (bp_index == 0) {
684 /* Use original buffer for first bearer */ 684 /* Use original buffer for first bearer */
685 tipc_bearer_send(b->identity, buf, &b->bcast_addr); 685 tipc_bearer_send(net, b->identity, buf, &b->bcast_addr);
686 } else { 686 } else {
687 /* Avoid concurrent buffer access */ 687 /* Avoid concurrent buffer access */
688 tbuf = pskb_copy_for_clone(buf, GFP_ATOMIC); 688 tbuf = pskb_copy_for_clone(buf, GFP_ATOMIC);
689 if (!tbuf) 689 if (!tbuf)
690 break; 690 break;
691 tipc_bearer_send(b->identity, tbuf, &b->bcast_addr); 691 tipc_bearer_send(net, b->identity, tbuf,
692 &b->bcast_addr);
692 kfree_skb(tbuf); /* Bearer keeps a clone */ 693 kfree_skb(tbuf); /* Bearer keeps a clone */
693 } 694 }
694 if (bcbearer->remains_new.count == 0) 695 if (bcbearer->remains_new.count == 0)
@@ -703,8 +704,10 @@ static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1,
703/** 704/**
704 * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer 705 * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
705 */ 706 */
706void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action) 707void tipc_bcbearer_sort(struct net *net, struct tipc_node_map *nm_ptr,
708 u32 node, bool action)
707{ 709{
710 struct tipc_net *tn = net_generic(net, tipc_net_id);
708 struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp; 711 struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
709 struct tipc_bcbearer_pair *bp_curr; 712 struct tipc_bcbearer_pair *bp_curr;
710 struct tipc_bearer *b; 713 struct tipc_bearer *b;
@@ -723,7 +726,7 @@ void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action)
723 726
724 rcu_read_lock(); 727 rcu_read_lock();
725 for (b_index = 0; b_index < MAX_BEARERS; b_index++) { 728 for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
726 b = rcu_dereference_rtnl(bearer_list[b_index]); 729 b = rcu_dereference_rtnl(tn->bearer_list[b_index]);
727 if (!b || !b->nodes.count) 730 if (!b || !b->nodes.count)
728 continue; 731 continue;
729 732
@@ -939,8 +942,10 @@ int tipc_bclink_set_queue_limits(u32 limit)
939 return 0; 942 return 0;
940} 943}
941 944
942int tipc_bclink_init(void) 945int tipc_bclink_init(struct net *net)
943{ 946{
947 struct tipc_net *tn = net_generic(net, tipc_net_id);
948
944 bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC); 949 bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC);
945 if (!bcbearer) 950 if (!bcbearer)
946 return -ENOMEM; 951 return -ENOMEM;
@@ -967,19 +972,21 @@ int tipc_bclink_init(void)
967 bcl->max_pkt = MAX_PKT_DEFAULT_MCAST; 972 bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
968 tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT); 973 tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
969 bcl->bearer_id = MAX_BEARERS; 974 bcl->bearer_id = MAX_BEARERS;
970 rcu_assign_pointer(bearer_list[MAX_BEARERS], &bcbearer->bearer); 975 rcu_assign_pointer(tn->bearer_list[MAX_BEARERS], &bcbearer->bearer);
971 bcl->state = WORKING_WORKING; 976 bcl->state = WORKING_WORKING;
972 strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME); 977 strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
973 return 0; 978 return 0;
974} 979}
975 980
976void tipc_bclink_stop(void) 981void tipc_bclink_stop(struct net *net)
977{ 982{
983 struct tipc_net *tn = net_generic(net, tipc_net_id);
984
978 tipc_bclink_lock(); 985 tipc_bclink_lock();
979 tipc_link_purge_queues(bcl); 986 tipc_link_purge_queues(bcl);
980 tipc_bclink_unlock(); 987 tipc_bclink_unlock();
981 988
982 RCU_INIT_POINTER(bearer_list[BCBEARER], NULL); 989 RCU_INIT_POINTER(tn->bearer_list[BCBEARER], NULL);
983 synchronize_net(); 990 synchronize_net();
984 kfree(bcbearer); 991 kfree(bcbearer);
985 kfree(bclink); 992 kfree(bclink);