aboutsummaryrefslogtreecommitdiffstats
path: root/net/openvswitch
diff options
context:
space:
mode:
authorJoe Stringer <joestringer@nicira.com>2014-09-08 16:09:37 -0400
committerPravin B Shelar <pshelar@nicira.com>2014-11-06 02:52:34 -0500
commitca7105f278b3f7bd2c6f2b336c928f679054de4d (patch)
tree2ebb11ba2d8890820e4e66d1ae6d25cf5a5bf3f3 /net/openvswitch
parent738967b8bf57e582db1a23ce773c36fefd4b7d37 (diff)
openvswitch: Refactor ovs_flow_cmd_fill_info().
Split up ovs_flow_cmd_fill_info() to make it easier to cache parts of a dump reply. This will be used to streamline flow_dump in a future patch. Signed-off-by: Joe Stringer <joestringer@nicira.com> Acked-by: Thomas Graf <tgraf@noironetworks.com> Signed-off-by: Pravin B Shelar <pshelar@nicira.com>
Diffstat (limited to 'net/openvswitch')
-rw-r--r--net/openvswitch/datapath.c93
1 files changed, 66 insertions, 27 deletions
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 04a26ae4a4f6..bbb920bf48da 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -674,58 +674,67 @@ static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts)
674} 674}
675 675
676/* Called with ovs_mutex or RCU read lock. */ 676/* Called with ovs_mutex or RCU read lock. */
677static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex, 677static int ovs_flow_cmd_fill_match(const struct sw_flow *flow,
678 struct sk_buff *skb, u32 portid, 678 struct sk_buff *skb)
679 u32 seq, u32 flags, u8 cmd)
680{ 679{
681 const int skb_orig_len = skb->len;
682 struct nlattr *start;
683 struct ovs_flow_stats stats;
684 __be16 tcp_flags;
685 unsigned long used;
686 struct ovs_header *ovs_header;
687 struct nlattr *nla; 680 struct nlattr *nla;
688 int err; 681 int err;
689 682
690 ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family, flags, cmd);
691 if (!ovs_header)
692 return -EMSGSIZE;
693
694 ovs_header->dp_ifindex = dp_ifindex;
695
696 /* Fill flow key. */ 683 /* Fill flow key. */
697 nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY); 684 nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
698 if (!nla) 685 if (!nla)
699 goto nla_put_failure; 686 return -EMSGSIZE;
700 687
701 err = ovs_nla_put_flow(&flow->unmasked_key, &flow->unmasked_key, skb); 688 err = ovs_nla_put_flow(&flow->unmasked_key, &flow->unmasked_key, skb);
702 if (err) 689 if (err)
703 goto error; 690 return err;
691
704 nla_nest_end(skb, nla); 692 nla_nest_end(skb, nla);
705 693
694 /* Fill flow mask. */
706 nla = nla_nest_start(skb, OVS_FLOW_ATTR_MASK); 695 nla = nla_nest_start(skb, OVS_FLOW_ATTR_MASK);
707 if (!nla) 696 if (!nla)
708 goto nla_put_failure; 697 return -EMSGSIZE;
709 698
710 err = ovs_nla_put_flow(&flow->key, &flow->mask->key, skb); 699 err = ovs_nla_put_flow(&flow->key, &flow->mask->key, skb);
711 if (err) 700 if (err)
712 goto error; 701 return err;
713 702
714 nla_nest_end(skb, nla); 703 nla_nest_end(skb, nla);
704 return 0;
705}
706
707/* Called with ovs_mutex or RCU read lock. */
708static int ovs_flow_cmd_fill_stats(const struct sw_flow *flow,
709 struct sk_buff *skb)
710{
711 struct ovs_flow_stats stats;
712 __be16 tcp_flags;
713 unsigned long used;
715 714
716 ovs_flow_stats_get(flow, &stats, &used, &tcp_flags); 715 ovs_flow_stats_get(flow, &stats, &used, &tcp_flags);
717 716
718 if (used && 717 if (used &&
719 nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used))) 718 nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)))
720 goto nla_put_failure; 719 return -EMSGSIZE;
721 720
722 if (stats.n_packets && 721 if (stats.n_packets &&
723 nla_put(skb, OVS_FLOW_ATTR_STATS, sizeof(struct ovs_flow_stats), &stats)) 722 nla_put(skb, OVS_FLOW_ATTR_STATS, sizeof(struct ovs_flow_stats), &stats))
724 goto nla_put_failure; 723 return -EMSGSIZE;
725 724
726 if ((u8)ntohs(tcp_flags) && 725 if ((u8)ntohs(tcp_flags) &&
727 nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, (u8)ntohs(tcp_flags))) 726 nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, (u8)ntohs(tcp_flags)))
728 goto nla_put_failure; 727 return -EMSGSIZE;
728
729 return 0;
730}
731
732/* Called with ovs_mutex or RCU read lock. */
733static int ovs_flow_cmd_fill_actions(const struct sw_flow *flow,
734 struct sk_buff *skb, int skb_orig_len)
735{
736 struct nlattr *start;
737 int err;
729 738
730 /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if 739 /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
731 * this is the first flow to be dumped into 'skb'. This is unusual for 740 * this is the first flow to be dumped into 'skb'. This is unusual for
@@ -749,17 +758,47 @@ static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex,
749 nla_nest_end(skb, start); 758 nla_nest_end(skb, start);
750 else { 759 else {
751 if (skb_orig_len) 760 if (skb_orig_len)
752 goto error; 761 return err;
753 762
754 nla_nest_cancel(skb, start); 763 nla_nest_cancel(skb, start);
755 } 764 }
756 } else if (skb_orig_len) 765 } else if (skb_orig_len) {
757 goto nla_put_failure; 766 return -EMSGSIZE;
767 }
768
769 return 0;
770}
771
772/* Called with ovs_mutex or RCU read lock. */
773static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex,
774 struct sk_buff *skb, u32 portid,
775 u32 seq, u32 flags, u8 cmd)
776{
777 const int skb_orig_len = skb->len;
778 struct ovs_header *ovs_header;
779 int err;
780
781 ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family,
782 flags, cmd);
783 if (!ovs_header)
784 return -EMSGSIZE;
785
786 ovs_header->dp_ifindex = dp_ifindex;
787
788 err = ovs_flow_cmd_fill_match(flow, skb);
789 if (err)
790 goto error;
791
792 err = ovs_flow_cmd_fill_stats(flow, skb);
793 if (err)
794 goto error;
795
796 err = ovs_flow_cmd_fill_actions(flow, skb, skb_orig_len);
797 if (err)
798 goto error;
758 799
759 return genlmsg_end(skb, ovs_header); 800 return genlmsg_end(skb, ovs_header);
760 801
761nla_put_failure:
762 err = -EMSGSIZE;
763error: 802error:
764 genlmsg_cancel(skb, ovs_header); 803 genlmsg_cancel(skb, ovs_header);
765 return err; 804 return err;