aboutsummaryrefslogtreecommitdiffstats
path: root/net/openvswitch
diff options
context:
space:
mode:
authorJarno Rajahalme <jrajahalme@nicira.com>2014-05-05 17:28:07 -0400
committerPravin B Shelar <pshelar@nicira.com>2014-05-22 19:27:36 -0400
commit0e9796b4af9ef490e203158cb738a5a4986eb75c (patch)
tree31cd60e58dbf7f4fc11a4e57d921bef8a4667a57 /net/openvswitch
parent86ec8dbae27e5fa2b5d54f10f77286d9ef55732a (diff)
openvswitch: Reduce locking requirements.
Reduce and clarify locking requirements for ovs_flow_cmd_alloc_info(), ovs_flow_cmd_fill_info() and ovs_flow_cmd_build_info(). A datapath pointer is available only when holding a lock. Change ovs_flow_cmd_fill_info() and ovs_flow_cmd_build_info() to take a dp_ifindex directly, rather than a datapath pointer that is then (only) used to get the dp_ifindex. This is useful, since the dp_ifindex is available even when the datapath pointer is not, both before and after taking a lock, which makes further critical section reduction possible. Make ovs_flow_cmd_alloc_info() take an 'acts' argument instead a 'flow' pointer. This allows some future patches to do the allocation before acquiring the flow pointer. The locking requirements after this patch are: ovs_flow_cmd_alloc_info(): May be called without locking, must not be called while holding the RCU read lock (due to memory allocation). If 'acts' belong to a flow in the flow table, however, then the caller must hold ovs_mutex. ovs_flow_cmd_fill_info(): Either ovs_mutex or RCU read lock must be held. ovs_flow_cmd_build_info(): This calls both of the above, so the caller must hold ovs_mutex. Signed-off-by: Jarno Rajahalme <jrajahalme@nicira.com> Signed-off-by: Pravin B Shelar <pshelar@nicira.com>
Diffstat (limited to 'net/openvswitch')
-rw-r--r--net/openvswitch/datapath.c54
1 files changed, 29 insertions, 25 deletions
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index f3bcdac80bda..949fc7fadaf0 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -663,7 +663,7 @@ static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts)
663} 663}
664 664
665/* Called with ovs_mutex or RCU read lock. */ 665/* Called with ovs_mutex or RCU read lock. */
666static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp, 666static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex,
667 struct sk_buff *skb, u32 portid, 667 struct sk_buff *skb, u32 portid,
668 u32 seq, u32 flags, u8 cmd) 668 u32 seq, u32 flags, u8 cmd)
669{ 669{
@@ -680,7 +680,7 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
680 if (!ovs_header) 680 if (!ovs_header)
681 return -EMSGSIZE; 681 return -EMSGSIZE;
682 682
683 ovs_header->dp_ifindex = get_dpifindex(dp); 683 ovs_header->dp_ifindex = dp_ifindex;
684 684
685 /* Fill flow key. */ 685 /* Fill flow key. */
686 nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY); 686 nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
@@ -703,6 +703,7 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
703 nla_nest_end(skb, nla); 703 nla_nest_end(skb, nla);
704 704
705 ovs_flow_stats_get(flow, &stats, &used, &tcp_flags); 705 ovs_flow_stats_get(flow, &stats, &used, &tcp_flags);
706
706 if (used && 707 if (used &&
707 nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used))) 708 nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)))
708 goto nla_put_failure; 709 goto nla_put_failure;
@@ -730,9 +731,9 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
730 const struct sw_flow_actions *sf_acts; 731 const struct sw_flow_actions *sf_acts;
731 732
732 sf_acts = rcu_dereference_ovsl(flow->sf_acts); 733 sf_acts = rcu_dereference_ovsl(flow->sf_acts);
733
734 err = ovs_nla_put_actions(sf_acts->actions, 734 err = ovs_nla_put_actions(sf_acts->actions,
735 sf_acts->actions_len, skb); 735 sf_acts->actions_len, skb);
736
736 if (!err) 737 if (!err)
737 nla_nest_end(skb, start); 738 nla_nest_end(skb, start);
738 else { 739 else {
@@ -753,41 +754,40 @@ error:
753 return err; 754 return err;
754} 755}
755 756
756/* Must be called with ovs_mutex. */ 757/* May not be called with RCU read lock. */
757static struct sk_buff *ovs_flow_cmd_alloc_info(struct sw_flow *flow, 758static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *acts,
758 struct genl_info *info, 759 struct genl_info *info,
759 bool always) 760 bool always)
760{ 761{
761 struct sk_buff *skb; 762 struct sk_buff *skb;
762 size_t len;
763 763
764 if (!always && !ovs_must_notify(info, &ovs_dp_flow_multicast_group)) 764 if (!always && !ovs_must_notify(info, &ovs_dp_flow_multicast_group))
765 return NULL; 765 return NULL;
766 766
767 len = ovs_flow_cmd_msg_size(ovsl_dereference(flow->sf_acts)); 767 skb = genlmsg_new_unicast(ovs_flow_cmd_msg_size(acts), info, GFP_KERNEL);
768
769 skb = genlmsg_new_unicast(len, info, GFP_KERNEL);
770 if (!skb) 768 if (!skb)
771 return ERR_PTR(-ENOMEM); 769 return ERR_PTR(-ENOMEM);
772 770
773 return skb; 771 return skb;
774} 772}
775 773
776/* Must be called with ovs_mutex. */ 774/* Called with ovs_mutex. */
777static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow, 775static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow,
778 struct datapath *dp, 776 int dp_ifindex,
779 struct genl_info *info, 777 struct genl_info *info, u8 cmd,
780 u8 cmd, bool always) 778 bool always)
781{ 779{
782 struct sk_buff *skb; 780 struct sk_buff *skb;
783 int retval; 781 int retval;
784 782
785 skb = ovs_flow_cmd_alloc_info(flow, info, always); 783 skb = ovs_flow_cmd_alloc_info(ovsl_dereference(flow->sf_acts), info,
784 always);
786 if (!skb || IS_ERR(skb)) 785 if (!skb || IS_ERR(skb))
787 return skb; 786 return skb;
788 787
789 retval = ovs_flow_cmd_fill_info(flow, dp, skb, info->snd_portid, 788 retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb,
790 info->snd_seq, 0, cmd); 789 info->snd_portid, info->snd_seq, 0,
790 cmd);
791 BUG_ON(retval < 0); 791 BUG_ON(retval < 0);
792 return skb; 792 return skb;
793} 793}
@@ -868,8 +868,8 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
868 goto err_flow_free; 868 goto err_flow_free;
869 } 869 }
870 870
871 reply = ovs_flow_cmd_build_info(flow, dp, info, 871 reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex,
872 OVS_FLOW_CMD_NEW, false); 872 info, OVS_FLOW_CMD_NEW, false);
873 } else { 873 } else {
874 /* We found a matching flow. */ 874 /* We found a matching flow. */
875 /* Bail out if we're not allowed to modify an existing flow. 875 /* Bail out if we're not allowed to modify an existing flow.
@@ -895,8 +895,9 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
895 rcu_assign_pointer(flow->sf_acts, acts); 895 rcu_assign_pointer(flow->sf_acts, acts);
896 ovs_nla_free_flow_actions(old_acts); 896 ovs_nla_free_flow_actions(old_acts);
897 } 897 }
898 reply = ovs_flow_cmd_build_info(flow, dp, info, 898
899 OVS_FLOW_CMD_NEW, false); 899 reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex,
900 info, OVS_FLOW_CMD_NEW, false);
900 901
901 /* Clear stats. */ 902 /* Clear stats. */
902 if (a[OVS_FLOW_ATTR_CLEAR]) 903 if (a[OVS_FLOW_ATTR_CLEAR])
@@ -958,7 +959,8 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
958 goto unlock; 959 goto unlock;
959 } 960 }
960 961
961 reply = ovs_flow_cmd_build_info(flow, dp, info, OVS_FLOW_CMD_NEW, true); 962 reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex, info,
963 OVS_FLOW_CMD_NEW, true);
962 if (IS_ERR(reply)) { 964 if (IS_ERR(reply)) {
963 err = PTR_ERR(reply); 965 err = PTR_ERR(reply);
964 goto unlock; 966 goto unlock;
@@ -1005,7 +1007,8 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1005 goto unlock; 1007 goto unlock;
1006 } 1008 }
1007 1009
1008 reply = ovs_flow_cmd_alloc_info(flow, info, false); 1010 reply = ovs_flow_cmd_alloc_info(ovsl_dereference(flow->sf_acts), info,
1011 false);
1009 if (IS_ERR(reply)) { 1012 if (IS_ERR(reply)) {
1010 err = PTR_ERR(reply); 1013 err = PTR_ERR(reply);
1011 goto unlock; 1014 goto unlock;
@@ -1014,7 +1017,8 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1014 ovs_flow_tbl_remove(&dp->table, flow); 1017 ovs_flow_tbl_remove(&dp->table, flow);
1015 1018
1016 if (reply) { 1019 if (reply) {
1017 err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_portid, 1020 err = ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex,
1021 reply, info->snd_portid,
1018 info->snd_seq, 0, 1022 info->snd_seq, 0,
1019 OVS_FLOW_CMD_DEL); 1023 OVS_FLOW_CMD_DEL);
1020 BUG_ON(err < 0); 1024 BUG_ON(err < 0);
@@ -1054,7 +1058,7 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1054 if (!flow) 1058 if (!flow)
1055 break; 1059 break;
1056 1060
1057 if (ovs_flow_cmd_fill_info(flow, dp, skb, 1061 if (ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex, skb,
1058 NETLINK_CB(cb->skb).portid, 1062 NETLINK_CB(cb->skb).portid,
1059 cb->nlh->nlmsg_seq, NLM_F_MULTI, 1063 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1060 OVS_FLOW_CMD_NEW) < 0) 1064 OVS_FLOW_CMD_NEW) < 0)