aboutsummaryrefslogtreecommitdiffstats
path: root/net/openvswitch
diff options
context:
space:
mode:
Diffstat (limited to 'net/openvswitch')
-rw-r--r--net/openvswitch/datapath.c271
-rw-r--r--net/openvswitch/flow.c10
-rw-r--r--net/openvswitch/flow.h2
3 files changed, 225 insertions, 58 deletions
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 0f783d9fa00d..f14816b80b80 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -464,16 +464,89 @@ static int flush_flows(struct datapath *dp)
464 return 0; 464 return 0;
465} 465}
466 466
467static int validate_actions(const struct nlattr *attr, 467static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa, int attr_len)
468 const struct sw_flow_key *key, int depth); 468{
469
470 struct sw_flow_actions *acts;
471 int new_acts_size;
472 int req_size = NLA_ALIGN(attr_len);
473 int next_offset = offsetof(struct sw_flow_actions, actions) +
474 (*sfa)->actions_len;
475
476 if (req_size <= (ksize(*sfa) - next_offset))
477 goto out;
478
479 new_acts_size = ksize(*sfa) * 2;
480
481 if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
482 if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size)
483 return ERR_PTR(-EMSGSIZE);
484 new_acts_size = MAX_ACTIONS_BUFSIZE;
485 }
486
487 acts = ovs_flow_actions_alloc(new_acts_size);
488 if (IS_ERR(acts))
489 return (void *)acts;
490
491 memcpy(acts->actions, (*sfa)->actions, (*sfa)->actions_len);
492 acts->actions_len = (*sfa)->actions_len;
493 kfree(*sfa);
494 *sfa = acts;
495
496out:
497 (*sfa)->actions_len += req_size;
498 return (struct nlattr *) ((unsigned char *)(*sfa) + next_offset);
499}
500
501static int add_action(struct sw_flow_actions **sfa, int attrtype, void *data, int len)
502{
503 struct nlattr *a;
504
505 a = reserve_sfa_size(sfa, nla_attr_size(len));
506 if (IS_ERR(a))
507 return PTR_ERR(a);
508
509 a->nla_type = attrtype;
510 a->nla_len = nla_attr_size(len);
511
512 if (data)
513 memcpy(nla_data(a), data, len);
514 memset((unsigned char *) a + a->nla_len, 0, nla_padlen(len));
515
516 return 0;
517}
518
519static inline int add_nested_action_start(struct sw_flow_actions **sfa, int attrtype)
520{
521 int used = (*sfa)->actions_len;
522 int err;
523
524 err = add_action(sfa, attrtype, NULL, 0);
525 if (err)
526 return err;
527
528 return used;
529}
469 530
470static int validate_sample(const struct nlattr *attr, 531static inline void add_nested_action_end(struct sw_flow_actions *sfa, int st_offset)
471 const struct sw_flow_key *key, int depth) 532{
533 struct nlattr *a = (struct nlattr *) ((unsigned char *)sfa->actions + st_offset);
534
535 a->nla_len = sfa->actions_len - st_offset;
536}
537
538static int validate_and_copy_actions(const struct nlattr *attr,
539 const struct sw_flow_key *key, int depth,
540 struct sw_flow_actions **sfa);
541
542static int validate_and_copy_sample(const struct nlattr *attr,
543 const struct sw_flow_key *key, int depth,
544 struct sw_flow_actions **sfa)
472{ 545{
473 const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1]; 546 const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1];
474 const struct nlattr *probability, *actions; 547 const struct nlattr *probability, *actions;
475 const struct nlattr *a; 548 const struct nlattr *a;
476 int rem; 549 int rem, start, err, st_acts;
477 550
478 memset(attrs, 0, sizeof(attrs)); 551 memset(attrs, 0, sizeof(attrs));
479 nla_for_each_nested(a, attr, rem) { 552 nla_for_each_nested(a, attr, rem) {
@@ -492,7 +565,26 @@ static int validate_sample(const struct nlattr *attr,
492 actions = attrs[OVS_SAMPLE_ATTR_ACTIONS]; 565 actions = attrs[OVS_SAMPLE_ATTR_ACTIONS];
493 if (!actions || (nla_len(actions) && nla_len(actions) < NLA_HDRLEN)) 566 if (!actions || (nla_len(actions) && nla_len(actions) < NLA_HDRLEN))
494 return -EINVAL; 567 return -EINVAL;
495 return validate_actions(actions, key, depth + 1); 568
569 /* validation done, copy sample action. */
570 start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SAMPLE);
571 if (start < 0)
572 return start;
573 err = add_action(sfa, OVS_SAMPLE_ATTR_PROBABILITY, nla_data(probability), sizeof(u32));
574 if (err)
575 return err;
576 st_acts = add_nested_action_start(sfa, OVS_SAMPLE_ATTR_ACTIONS);
577 if (st_acts < 0)
578 return st_acts;
579
580 err = validate_and_copy_actions(actions, key, depth + 1, sfa);
581 if (err)
582 return err;
583
584 add_nested_action_end(*sfa, st_acts);
585 add_nested_action_end(*sfa, start);
586
587 return 0;
496} 588}
497 589
498static int validate_tp_port(const struct sw_flow_key *flow_key) 590static int validate_tp_port(const struct sw_flow_key *flow_key)
@@ -606,8 +698,24 @@ static int validate_userspace(const struct nlattr *attr)
606 return 0; 698 return 0;
607} 699}
608 700
609static int validate_actions(const struct nlattr *attr, 701static int copy_action(const struct nlattr *from,
610 const struct sw_flow_key *key, int depth) 702 struct sw_flow_actions **sfa)
703{
704 int totlen = NLA_ALIGN(from->nla_len);
705 struct nlattr *to;
706
707 to = reserve_sfa_size(sfa, from->nla_len);
708 if (IS_ERR(to))
709 return PTR_ERR(to);
710
711 memcpy(to, from, totlen);
712 return 0;
713}
714
715static int validate_and_copy_actions(const struct nlattr *attr,
716 const struct sw_flow_key *key,
717 int depth,
718 struct sw_flow_actions **sfa)
611{ 719{
612 const struct nlattr *a; 720 const struct nlattr *a;
613 int rem, err; 721 int rem, err;
@@ -627,12 +735,14 @@ static int validate_actions(const struct nlattr *attr,
627 }; 735 };
628 const struct ovs_action_push_vlan *vlan; 736 const struct ovs_action_push_vlan *vlan;
629 int type = nla_type(a); 737 int type = nla_type(a);
738 bool skip_copy;
630 739
631 if (type > OVS_ACTION_ATTR_MAX || 740 if (type > OVS_ACTION_ATTR_MAX ||
632 (action_lens[type] != nla_len(a) && 741 (action_lens[type] != nla_len(a) &&
633 action_lens[type] != (u32)-1)) 742 action_lens[type] != (u32)-1))
634 return -EINVAL; 743 return -EINVAL;
635 744
745 skip_copy = false;
636 switch (type) { 746 switch (type) {
637 case OVS_ACTION_ATTR_UNSPEC: 747 case OVS_ACTION_ATTR_UNSPEC:
638 return -EINVAL; 748 return -EINVAL;
@@ -667,14 +777,20 @@ static int validate_actions(const struct nlattr *attr,
667 break; 777 break;
668 778
669 case OVS_ACTION_ATTR_SAMPLE: 779 case OVS_ACTION_ATTR_SAMPLE:
670 err = validate_sample(a, key, depth); 780 err = validate_and_copy_sample(a, key, depth, sfa);
671 if (err) 781 if (err)
672 return err; 782 return err;
783 skip_copy = true;
673 break; 784 break;
674 785
675 default: 786 default:
676 return -EINVAL; 787 return -EINVAL;
677 } 788 }
789 if (!skip_copy) {
790 err = copy_action(a, sfa);
791 if (err)
792 return err;
793 }
678 } 794 }
679 795
680 if (rem > 0) 796 if (rem > 0)
@@ -742,18 +858,16 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
742 err = ovs_flow_metadata_from_nlattrs(flow, a[OVS_PACKET_ATTR_KEY]); 858 err = ovs_flow_metadata_from_nlattrs(flow, a[OVS_PACKET_ATTR_KEY]);
743 if (err) 859 if (err)
744 goto err_flow_free; 860 goto err_flow_free;
745
746 err = validate_actions(a[OVS_PACKET_ATTR_ACTIONS], &flow->key, 0);
747 if (err)
748 goto err_flow_free;
749
750 flow->hash = ovs_flow_hash(&flow->key, key_len); 861 flow->hash = ovs_flow_hash(&flow->key, key_len);
751 862 acts = ovs_flow_actions_alloc(nla_len(a[OVS_PACKET_ATTR_ACTIONS]));
752 acts = ovs_flow_actions_alloc(a[OVS_PACKET_ATTR_ACTIONS]);
753 err = PTR_ERR(acts); 863 err = PTR_ERR(acts);
754 if (IS_ERR(acts)) 864 if (IS_ERR(acts))
755 goto err_flow_free; 865 goto err_flow_free;
866
867 err = validate_and_copy_actions(a[OVS_PACKET_ATTR_ACTIONS], &flow->key, 0, &acts);
756 rcu_assign_pointer(flow->sf_acts, acts); 868 rcu_assign_pointer(flow->sf_acts, acts);
869 if (err)
870 goto err_flow_free;
757 871
758 OVS_CB(packet)->flow = flow; 872 OVS_CB(packet)->flow = flow;
759 packet->priority = flow->key.phy.priority; 873 packet->priority = flow->key.phy.priority;
@@ -843,6 +957,66 @@ static struct genl_multicast_group ovs_dp_flow_multicast_group = {
843 .name = OVS_FLOW_MCGROUP 957 .name = OVS_FLOW_MCGROUP
844}; 958};
845 959
960static int actions_to_attr(const struct nlattr *attr, int len, struct sk_buff *skb);
961static int sample_action_to_attr(const struct nlattr *attr, struct sk_buff *skb)
962{
963 const struct nlattr *a;
964 struct nlattr *start;
965 int err = 0, rem;
966
967 start = nla_nest_start(skb, OVS_ACTION_ATTR_SAMPLE);
968 if (!start)
969 return -EMSGSIZE;
970
971 nla_for_each_nested(a, attr, rem) {
972 int type = nla_type(a);
973 struct nlattr *st_sample;
974
975 switch (type) {
976 case OVS_SAMPLE_ATTR_PROBABILITY:
977 if (nla_put(skb, OVS_SAMPLE_ATTR_PROBABILITY, sizeof(u32), nla_data(a)))
978 return -EMSGSIZE;
979 break;
980 case OVS_SAMPLE_ATTR_ACTIONS:
981 st_sample = nla_nest_start(skb, OVS_SAMPLE_ATTR_ACTIONS);
982 if (!st_sample)
983 return -EMSGSIZE;
984 err = actions_to_attr(nla_data(a), nla_len(a), skb);
985 if (err)
986 return err;
987 nla_nest_end(skb, st_sample);
988 break;
989 }
990 }
991
992 nla_nest_end(skb, start);
993 return err;
994}
995
996static int actions_to_attr(const struct nlattr *attr, int len, struct sk_buff *skb)
997{
998 const struct nlattr *a;
999 int rem, err;
1000
1001 nla_for_each_attr(a, attr, len, rem) {
1002 int type = nla_type(a);
1003
1004 switch (type) {
1005 case OVS_ACTION_ATTR_SAMPLE:
1006 err = sample_action_to_attr(a, skb);
1007 if (err)
1008 return err;
1009 break;
1010 default:
1011 if (nla_put(skb, type, nla_len(a), nla_data(a)))
1012 return -EMSGSIZE;
1013 break;
1014 }
1015 }
1016
1017 return 0;
1018}
1019
846static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts) 1020static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts)
847{ 1021{
848 return NLMSG_ALIGN(sizeof(struct ovs_header)) 1022 return NLMSG_ALIGN(sizeof(struct ovs_header))
@@ -860,6 +1034,7 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
860{ 1034{
861 const int skb_orig_len = skb->len; 1035 const int skb_orig_len = skb->len;
862 const struct sw_flow_actions *sf_acts; 1036 const struct sw_flow_actions *sf_acts;
1037 struct nlattr *start;
863 struct ovs_flow_stats stats; 1038 struct ovs_flow_stats stats;
864 struct ovs_header *ovs_header; 1039 struct ovs_header *ovs_header;
865 struct nlattr *nla; 1040 struct nlattr *nla;
@@ -913,10 +1088,19 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
913 * This can only fail for dump operations because the skb is always 1088 * This can only fail for dump operations because the skb is always
914 * properly sized for single flows. 1089 * properly sized for single flows.
915 */ 1090 */
916 err = nla_put(skb, OVS_FLOW_ATTR_ACTIONS, sf_acts->actions_len, 1091 start = nla_nest_start(skb, OVS_FLOW_ATTR_ACTIONS);
917 sf_acts->actions); 1092 if (start) {
918 if (err < 0 && skb_orig_len) 1093 err = actions_to_attr(sf_acts->actions, sf_acts->actions_len, skb);
919 goto error; 1094 if (!err)
1095 nla_nest_end(skb, start);
1096 else {
1097 if (skb_orig_len)
1098 goto error;
1099
1100 nla_nest_cancel(skb, start);
1101 }
1102 } else if (skb_orig_len)
1103 goto nla_put_failure;
920 1104
921 return genlmsg_end(skb, ovs_header); 1105 return genlmsg_end(skb, ovs_header);
922 1106
@@ -961,6 +1145,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
961 struct sk_buff *reply; 1145 struct sk_buff *reply;
962 struct datapath *dp; 1146 struct datapath *dp;
963 struct flow_table *table; 1147 struct flow_table *table;
1148 struct sw_flow_actions *acts = NULL;
964 int error; 1149 int error;
965 int key_len; 1150 int key_len;
966 1151
@@ -974,9 +1159,14 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
974 1159
975 /* Validate actions. */ 1160 /* Validate actions. */
976 if (a[OVS_FLOW_ATTR_ACTIONS]) { 1161 if (a[OVS_FLOW_ATTR_ACTIONS]) {
977 error = validate_actions(a[OVS_FLOW_ATTR_ACTIONS], &key, 0); 1162 acts = ovs_flow_actions_alloc(nla_len(a[OVS_FLOW_ATTR_ACTIONS]));
978 if (error) 1163 error = PTR_ERR(acts);
1164 if (IS_ERR(acts))
979 goto error; 1165 goto error;
1166
1167 error = validate_and_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], &key, 0, &acts);
1168 if (error)
1169 goto err_kfree;
980 } else if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW) { 1170 } else if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW) {
981 error = -EINVAL; 1171 error = -EINVAL;
982 goto error; 1172 goto error;
@@ -991,8 +1181,6 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
991 table = ovsl_dereference(dp->table); 1181 table = ovsl_dereference(dp->table);
992 flow = ovs_flow_tbl_lookup(table, &key, key_len); 1182 flow = ovs_flow_tbl_lookup(table, &key, key_len);
993 if (!flow) { 1183 if (!flow) {
994 struct sw_flow_actions *acts;
995
996 /* Bail out if we're not allowed to create a new flow. */ 1184 /* Bail out if we're not allowed to create a new flow. */
997 error = -ENOENT; 1185 error = -ENOENT;
998 if (info->genlhdr->cmd == OVS_FLOW_CMD_SET) 1186 if (info->genlhdr->cmd == OVS_FLOW_CMD_SET)
@@ -1019,11 +1207,6 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
1019 flow->key = key; 1207 flow->key = key;
1020 clear_stats(flow); 1208 clear_stats(flow);
1021 1209
1022 /* Obtain actions. */
1023 acts = ovs_flow_actions_alloc(a[OVS_FLOW_ATTR_ACTIONS]);
1024 error = PTR_ERR(acts);
1025 if (IS_ERR(acts))
1026 goto error_free_flow;
1027 rcu_assign_pointer(flow->sf_acts, acts); 1210 rcu_assign_pointer(flow->sf_acts, acts);
1028 1211
1029 /* Put flow in bucket. */ 1212 /* Put flow in bucket. */
@@ -1036,7 +1219,6 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
1036 } else { 1219 } else {
1037 /* We found a matching flow. */ 1220 /* We found a matching flow. */
1038 struct sw_flow_actions *old_acts; 1221 struct sw_flow_actions *old_acts;
1039 struct nlattr *acts_attrs;
1040 1222
1041 /* Bail out if we're not allowed to modify an existing flow. 1223 /* Bail out if we're not allowed to modify an existing flow.
1042 * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL 1224 * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
@@ -1051,21 +1233,8 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
1051 1233
1052 /* Update actions. */ 1234 /* Update actions. */
1053 old_acts = ovsl_dereference(flow->sf_acts); 1235 old_acts = ovsl_dereference(flow->sf_acts);
1054 acts_attrs = a[OVS_FLOW_ATTR_ACTIONS]; 1236 rcu_assign_pointer(flow->sf_acts, acts);
1055 if (acts_attrs && 1237 ovs_flow_deferred_free_acts(old_acts);
1056 (old_acts->actions_len != nla_len(acts_attrs) ||
1057 memcmp(old_acts->actions, nla_data(acts_attrs),
1058 old_acts->actions_len))) {
1059 struct sw_flow_actions *new_acts;
1060
1061 new_acts = ovs_flow_actions_alloc(acts_attrs);
1062 error = PTR_ERR(new_acts);
1063 if (IS_ERR(new_acts))
1064 goto err_unlock_ovs;
1065
1066 rcu_assign_pointer(flow->sf_acts, new_acts);
1067 ovs_flow_deferred_free_acts(old_acts);
1068 }
1069 1238
1070 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid, 1239 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid,
1071 info->snd_seq, OVS_FLOW_CMD_NEW); 1240 info->snd_seq, OVS_FLOW_CMD_NEW);
@@ -1086,10 +1255,10 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
1086 ovs_dp_flow_multicast_group.id, PTR_ERR(reply)); 1255 ovs_dp_flow_multicast_group.id, PTR_ERR(reply));
1087 return 0; 1256 return 0;
1088 1257
1089error_free_flow:
1090 ovs_flow_free(flow);
1091err_unlock_ovs: 1258err_unlock_ovs:
1092 ovs_unlock(); 1259 ovs_unlock();
1260err_kfree:
1261 kfree(acts);
1093error: 1262error:
1094 return error; 1263 return error;
1095} 1264}
@@ -1866,8 +2035,8 @@ static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
1866 goto exit_unlock; 2035 goto exit_unlock;
1867 } 2036 }
1868 2037
1869 reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq, 2038 reply = ovs_vport_cmd_build_info(vport, info->snd_portid,
1870 OVS_VPORT_CMD_DEL); 2039 info->snd_seq, OVS_VPORT_CMD_DEL);
1871 err = PTR_ERR(reply); 2040 err = PTR_ERR(reply);
1872 if (IS_ERR(reply)) 2041 if (IS_ERR(reply))
1873 goto exit_unlock; 2042 goto exit_unlock;
@@ -1896,8 +2065,8 @@ static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
1896 if (IS_ERR(vport)) 2065 if (IS_ERR(vport))
1897 goto exit_unlock; 2066 goto exit_unlock;
1898 2067
1899 reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq, 2068 reply = ovs_vport_cmd_build_info(vport, info->snd_portid,
1900 OVS_VPORT_CMD_NEW); 2069 info->snd_seq, OVS_VPORT_CMD_NEW);
1901 err = PTR_ERR(reply); 2070 err = PTR_ERR(reply);
1902 if (IS_ERR(reply)) 2071 if (IS_ERR(reply))
1903 goto exit_unlock; 2072 goto exit_unlock;
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 093c191d4fc2..940d4b803ff5 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -198,20 +198,18 @@ void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb)
198 spin_unlock(&flow->lock); 198 spin_unlock(&flow->lock);
199} 199}
200 200
201struct sw_flow_actions *ovs_flow_actions_alloc(const struct nlattr *actions) 201struct sw_flow_actions *ovs_flow_actions_alloc(int size)
202{ 202{
203 int actions_len = nla_len(actions);
204 struct sw_flow_actions *sfa; 203 struct sw_flow_actions *sfa;
205 204
206 if (actions_len > MAX_ACTIONS_BUFSIZE) 205 if (size > MAX_ACTIONS_BUFSIZE)
207 return ERR_PTR(-EINVAL); 206 return ERR_PTR(-EINVAL);
208 207
209 sfa = kmalloc(sizeof(*sfa) + actions_len, GFP_KERNEL); 208 sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL);
210 if (!sfa) 209 if (!sfa)
211 return ERR_PTR(-ENOMEM); 210 return ERR_PTR(-ENOMEM);
212 211
213 sfa->actions_len = actions_len; 212 sfa->actions_len = 0;
214 nla_memcpy(sfa->actions, actions, actions_len);
215 return sfa; 213 return sfa;
216} 214}
217 215
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index 2a83e2141f08..e370f6246ee9 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -130,7 +130,7 @@ struct sw_flow *ovs_flow_alloc(void);
130void ovs_flow_deferred_free(struct sw_flow *); 130void ovs_flow_deferred_free(struct sw_flow *);
131void ovs_flow_free(struct sw_flow *flow); 131void ovs_flow_free(struct sw_flow *flow);
132 132
133struct sw_flow_actions *ovs_flow_actions_alloc(const struct nlattr *); 133struct sw_flow_actions *ovs_flow_actions_alloc(int actions_len);
134void ovs_flow_deferred_free_acts(struct sw_flow_actions *); 134void ovs_flow_deferred_free_acts(struct sw_flow_actions *);
135 135
136int ovs_flow_extract(struct sk_buff *, u16 in_port, struct sw_flow_key *, 136int ovs_flow_extract(struct sk_buff *, u16 in_port, struct sw_flow_key *,