aboutsummaryrefslogtreecommitdiffstats
path: root/net/tipc/link.c
diff options
context:
space:
mode:
authorJon Paul Maloy <jon.maloy@ericsson.com>2015-11-19 14:30:44 -0500
committerDavid S. Miller <davem@davemloft.net>2015-11-20 14:06:10 -0500
commit5405ff6e15f40f2f53e37d2dcd7de521e2b7a96f (patch)
tree226f40f32f063d27a8d9a6abe6708d550721f1fd /net/tipc/link.c
parent2312bf61ae365fdd6b9bfb24558a417859759447 (diff)
tipc: convert node lock to rwlock
According to the node FSM a node in state SELF_UP_PEER_UP cannot change state inside a lock context, except when a TUNNEL_PROTOCOL (SYNCH or FAILOVER) packet arrives. However, the node's individual links may still change state. Since each link now is protected by its own spinlock, we finally have the conditions in place to convert the node spinlock to an rwlock_t. If the node state and arriving packet type are rigth, we can let the link directly receive the packet under protection of its own spinlock and the node lock in read mode. In all other cases we use the node lock in write mode. This enables full concurrent execution between parallel links during steady-state traffic situations, i.e., 99+ % of the time. This commit implements this change. Reviewed-by: Ying Xue <ying.xue@windriver.com> Signed-off-by: Jon Maloy <jon.maloy@ericsson.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tipc/link.c')
-rw-r--r--net/tipc/link.c32
1 files changed, 16 insertions, 16 deletions
diff --git a/net/tipc/link.c b/net/tipc/link.c
index b5e895c6f1aa..1dda46e5dd83 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1547,7 +1547,7 @@ static struct tipc_node *tipc_link_find_owner(struct net *net,
1547 *bearer_id = 0; 1547 *bearer_id = 0;
1548 rcu_read_lock(); 1548 rcu_read_lock();
1549 list_for_each_entry_rcu(n_ptr, &tn->node_list, list) { 1549 list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
1550 tipc_node_lock(n_ptr); 1550 tipc_node_read_lock(n_ptr);
1551 for (i = 0; i < MAX_BEARERS; i++) { 1551 for (i = 0; i < MAX_BEARERS; i++) {
1552 l_ptr = n_ptr->links[i].link; 1552 l_ptr = n_ptr->links[i].link;
1553 if (l_ptr && !strcmp(l_ptr->name, link_name)) { 1553 if (l_ptr && !strcmp(l_ptr->name, link_name)) {
@@ -1556,7 +1556,7 @@ static struct tipc_node *tipc_link_find_owner(struct net *net,
1556 break; 1556 break;
1557 } 1557 }
1558 } 1558 }
1559 tipc_node_unlock(n_ptr); 1559 tipc_node_read_unlock(n_ptr);
1560 if (found_node) 1560 if (found_node)
1561 break; 1561 break;
1562 } 1562 }
@@ -1658,7 +1658,7 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
1658 if (!node) 1658 if (!node)
1659 return -EINVAL; 1659 return -EINVAL;
1660 1660
1661 tipc_node_lock(node); 1661 tipc_node_read_lock(node);
1662 1662
1663 link = node->links[bearer_id].link; 1663 link = node->links[bearer_id].link;
1664 if (!link) { 1664 if (!link) {
@@ -1699,7 +1699,7 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
1699 } 1699 }
1700 1700
1701out: 1701out:
1702 tipc_node_unlock(node); 1702 tipc_node_read_unlock(node);
1703 1703
1704 return res; 1704 return res;
1705} 1705}
@@ -1898,10 +1898,10 @@ int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
1898 1898
1899 list_for_each_entry_continue_rcu(node, &tn->node_list, 1899 list_for_each_entry_continue_rcu(node, &tn->node_list,
1900 list) { 1900 list) {
1901 tipc_node_lock(node); 1901 tipc_node_read_lock(node);
1902 err = __tipc_nl_add_node_links(net, &msg, node, 1902 err = __tipc_nl_add_node_links(net, &msg, node,
1903 &prev_link); 1903 &prev_link);
1904 tipc_node_unlock(node); 1904 tipc_node_read_unlock(node);
1905 if (err) 1905 if (err)
1906 goto out; 1906 goto out;
1907 1907
@@ -1913,10 +1913,10 @@ int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
1913 goto out; 1913 goto out;
1914 1914
1915 list_for_each_entry_rcu(node, &tn->node_list, list) { 1915 list_for_each_entry_rcu(node, &tn->node_list, list) {
1916 tipc_node_lock(node); 1916 tipc_node_read_lock(node);
1917 err = __tipc_nl_add_node_links(net, &msg, node, 1917 err = __tipc_nl_add_node_links(net, &msg, node,
1918 &prev_link); 1918 &prev_link);
1919 tipc_node_unlock(node); 1919 tipc_node_read_unlock(node);
1920 if (err) 1920 if (err)
1921 goto out; 1921 goto out;
1922 1922
@@ -1967,16 +1967,16 @@ int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
1967 if (!node) 1967 if (!node)
1968 return -EINVAL; 1968 return -EINVAL;
1969 1969
1970 tipc_node_lock(node); 1970 tipc_node_read_lock(node);
1971 link = node->links[bearer_id].link; 1971 link = node->links[bearer_id].link;
1972 if (!link) { 1972 if (!link) {
1973 tipc_node_unlock(node); 1973 tipc_node_read_unlock(node);
1974 nlmsg_free(msg.skb); 1974 nlmsg_free(msg.skb);
1975 return -EINVAL; 1975 return -EINVAL;
1976 } 1976 }
1977 1977
1978 err = __tipc_nl_add_link(net, &msg, link, 0); 1978 err = __tipc_nl_add_link(net, &msg, link, 0);
1979 tipc_node_unlock(node); 1979 tipc_node_read_unlock(node);
1980 if (err) { 1980 if (err) {
1981 nlmsg_free(msg.skb); 1981 nlmsg_free(msg.skb);
1982 return err; 1982 return err;
@@ -2021,18 +2021,18 @@ int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info)
2021 node = tipc_link_find_owner(net, link_name, &bearer_id); 2021 node = tipc_link_find_owner(net, link_name, &bearer_id);
2022 if (!node) 2022 if (!node)
2023 return -EINVAL; 2023 return -EINVAL;
2024
2024 le = &node->links[bearer_id]; 2025 le = &node->links[bearer_id];
2025 tipc_node_lock(node); 2026 tipc_node_read_lock(node);
2026 spin_lock_bh(&le->lock); 2027 spin_lock_bh(&le->lock);
2027 link = le->link; 2028 link = le->link;
2028 if (!link) { 2029 if (!link) {
2029 tipc_node_unlock(node); 2030 spin_unlock_bh(&le->lock);
2031 tipc_node_read_unlock(node);
2030 return -EINVAL; 2032 return -EINVAL;
2031 } 2033 }
2032
2033 link_reset_statistics(link); 2034 link_reset_statistics(link);
2034 spin_unlock_bh(&le->lock); 2035 spin_unlock_bh(&le->lock);
2035 tipc_node_unlock(node); 2036 tipc_node_read_unlock(node);
2036
2037 return 0; 2037 return 0;
2038} 2038}