aboutsummaryrefslogtreecommitdiffstats
path: root/net/openvswitch/datapath.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/openvswitch/datapath.c')
-rw-r--r--net/openvswitch/datapath.c733
1 files changed, 104 insertions, 629 deletions
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 2aa13bd7f2b2..6f5e1dd3be2d 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -55,21 +55,17 @@
55 55
56#include "datapath.h" 56#include "datapath.h"
57#include "flow.h" 57#include "flow.h"
58#include "flow_netlink.h"
58#include "vport-internal_dev.h" 59#include "vport-internal_dev.h"
59#include "vport-netdev.h" 60#include "vport-netdev.h"
60 61
61
62#define REHASH_FLOW_INTERVAL (10 * 60 * HZ)
63static void rehash_flow_table(struct work_struct *work);
64static DECLARE_DELAYED_WORK(rehash_flow_wq, rehash_flow_table);
65
66int ovs_net_id __read_mostly; 62int ovs_net_id __read_mostly;
67 63
68static void ovs_notify(struct sk_buff *skb, struct genl_info *info, 64static void ovs_notify(struct genl_family *family,
69 struct genl_multicast_group *grp) 65 struct sk_buff *skb, struct genl_info *info)
70{ 66{
71 genl_notify(skb, genl_info_net(info), info->snd_portid, 67 genl_notify(family, skb, genl_info_net(info), info->snd_portid,
72 grp->id, info->nlhdr, GFP_KERNEL); 68 0, info->nlhdr, GFP_KERNEL);
73} 69}
74 70
75/** 71/**
@@ -165,7 +161,7 @@ static void destroy_dp_rcu(struct rcu_head *rcu)
165{ 161{
166 struct datapath *dp = container_of(rcu, struct datapath, rcu); 162 struct datapath *dp = container_of(rcu, struct datapath, rcu);
167 163
168 ovs_flow_tbl_destroy((__force struct flow_table *)dp->table, false); 164 ovs_flow_tbl_destroy(&dp->table);
169 free_percpu(dp->stats_percpu); 165 free_percpu(dp->stats_percpu);
170 release_net(ovs_dp_get_net(dp)); 166 release_net(ovs_dp_get_net(dp));
171 kfree(dp->ports); 167 kfree(dp->ports);
@@ -225,6 +221,7 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
225 struct dp_stats_percpu *stats; 221 struct dp_stats_percpu *stats;
226 struct sw_flow_key key; 222 struct sw_flow_key key;
227 u64 *stats_counter; 223 u64 *stats_counter;
224 u32 n_mask_hit;
228 int error; 225 int error;
229 226
230 stats = this_cpu_ptr(dp->stats_percpu); 227 stats = this_cpu_ptr(dp->stats_percpu);
@@ -237,7 +234,7 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
237 } 234 }
238 235
239 /* Look up flow. */ 236 /* Look up flow. */
240 flow = ovs_flow_lookup(rcu_dereference(dp->table), &key); 237 flow = ovs_flow_tbl_lookup(&dp->table, &key, &n_mask_hit);
241 if (unlikely(!flow)) { 238 if (unlikely(!flow)) {
242 struct dp_upcall_info upcall; 239 struct dp_upcall_info upcall;
243 240
@@ -262,6 +259,7 @@ out:
262 /* Update datapath statistics. */ 259 /* Update datapath statistics. */
263 u64_stats_update_begin(&stats->sync); 260 u64_stats_update_begin(&stats->sync);
264 (*stats_counter)++; 261 (*stats_counter)++;
262 stats->n_mask_hit += n_mask_hit;
265 u64_stats_update_end(&stats->sync); 263 u64_stats_update_end(&stats->sync);
266} 264}
267 265
@@ -435,7 +433,7 @@ static int queue_userspace_packet(struct net *net, int dp_ifindex,
435 upcall->dp_ifindex = dp_ifindex; 433 upcall->dp_ifindex = dp_ifindex;
436 434
437 nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY); 435 nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
438 ovs_flow_to_nlattrs(upcall_info->key, upcall_info->key, user_skb); 436 ovs_nla_put_flow(upcall_info->key, upcall_info->key, user_skb);
439 nla_nest_end(user_skb, nla); 437 nla_nest_end(user_skb, nla);
440 438
441 if (upcall_info->userdata) 439 if (upcall_info->userdata)
@@ -455,398 +453,6 @@ out:
455 return err; 453 return err;
456} 454}
457 455
458/* Called with ovs_mutex. */
459static int flush_flows(struct datapath *dp)
460{
461 struct flow_table *old_table;
462 struct flow_table *new_table;
463
464 old_table = ovsl_dereference(dp->table);
465 new_table = ovs_flow_tbl_alloc(TBL_MIN_BUCKETS);
466 if (!new_table)
467 return -ENOMEM;
468
469 rcu_assign_pointer(dp->table, new_table);
470
471 ovs_flow_tbl_destroy(old_table, true);
472 return 0;
473}
474
475static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa, int attr_len)
476{
477
478 struct sw_flow_actions *acts;
479 int new_acts_size;
480 int req_size = NLA_ALIGN(attr_len);
481 int next_offset = offsetof(struct sw_flow_actions, actions) +
482 (*sfa)->actions_len;
483
484 if (req_size <= (ksize(*sfa) - next_offset))
485 goto out;
486
487 new_acts_size = ksize(*sfa) * 2;
488
489 if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
490 if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size)
491 return ERR_PTR(-EMSGSIZE);
492 new_acts_size = MAX_ACTIONS_BUFSIZE;
493 }
494
495 acts = ovs_flow_actions_alloc(new_acts_size);
496 if (IS_ERR(acts))
497 return (void *)acts;
498
499 memcpy(acts->actions, (*sfa)->actions, (*sfa)->actions_len);
500 acts->actions_len = (*sfa)->actions_len;
501 kfree(*sfa);
502 *sfa = acts;
503
504out:
505 (*sfa)->actions_len += req_size;
506 return (struct nlattr *) ((unsigned char *)(*sfa) + next_offset);
507}
508
509static int add_action(struct sw_flow_actions **sfa, int attrtype, void *data, int len)
510{
511 struct nlattr *a;
512
513 a = reserve_sfa_size(sfa, nla_attr_size(len));
514 if (IS_ERR(a))
515 return PTR_ERR(a);
516
517 a->nla_type = attrtype;
518 a->nla_len = nla_attr_size(len);
519
520 if (data)
521 memcpy(nla_data(a), data, len);
522 memset((unsigned char *) a + a->nla_len, 0, nla_padlen(len));
523
524 return 0;
525}
526
527static inline int add_nested_action_start(struct sw_flow_actions **sfa, int attrtype)
528{
529 int used = (*sfa)->actions_len;
530 int err;
531
532 err = add_action(sfa, attrtype, NULL, 0);
533 if (err)
534 return err;
535
536 return used;
537}
538
539static inline void add_nested_action_end(struct sw_flow_actions *sfa, int st_offset)
540{
541 struct nlattr *a = (struct nlattr *) ((unsigned char *)sfa->actions + st_offset);
542
543 a->nla_len = sfa->actions_len - st_offset;
544}
545
546static int validate_and_copy_actions(const struct nlattr *attr,
547 const struct sw_flow_key *key, int depth,
548 struct sw_flow_actions **sfa);
549
550static int validate_and_copy_sample(const struct nlattr *attr,
551 const struct sw_flow_key *key, int depth,
552 struct sw_flow_actions **sfa)
553{
554 const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1];
555 const struct nlattr *probability, *actions;
556 const struct nlattr *a;
557 int rem, start, err, st_acts;
558
559 memset(attrs, 0, sizeof(attrs));
560 nla_for_each_nested(a, attr, rem) {
561 int type = nla_type(a);
562 if (!type || type > OVS_SAMPLE_ATTR_MAX || attrs[type])
563 return -EINVAL;
564 attrs[type] = a;
565 }
566 if (rem)
567 return -EINVAL;
568
569 probability = attrs[OVS_SAMPLE_ATTR_PROBABILITY];
570 if (!probability || nla_len(probability) != sizeof(u32))
571 return -EINVAL;
572
573 actions = attrs[OVS_SAMPLE_ATTR_ACTIONS];
574 if (!actions || (nla_len(actions) && nla_len(actions) < NLA_HDRLEN))
575 return -EINVAL;
576
577 /* validation done, copy sample action. */
578 start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SAMPLE);
579 if (start < 0)
580 return start;
581 err = add_action(sfa, OVS_SAMPLE_ATTR_PROBABILITY, nla_data(probability), sizeof(u32));
582 if (err)
583 return err;
584 st_acts = add_nested_action_start(sfa, OVS_SAMPLE_ATTR_ACTIONS);
585 if (st_acts < 0)
586 return st_acts;
587
588 err = validate_and_copy_actions(actions, key, depth + 1, sfa);
589 if (err)
590 return err;
591
592 add_nested_action_end(*sfa, st_acts);
593 add_nested_action_end(*sfa, start);
594
595 return 0;
596}
597
598static int validate_tp_port(const struct sw_flow_key *flow_key)
599{
600 if (flow_key->eth.type == htons(ETH_P_IP)) {
601 if (flow_key->ipv4.tp.src || flow_key->ipv4.tp.dst)
602 return 0;
603 } else if (flow_key->eth.type == htons(ETH_P_IPV6)) {
604 if (flow_key->ipv6.tp.src || flow_key->ipv6.tp.dst)
605 return 0;
606 }
607
608 return -EINVAL;
609}
610
611static int validate_and_copy_set_tun(const struct nlattr *attr,
612 struct sw_flow_actions **sfa)
613{
614 struct sw_flow_match match;
615 struct sw_flow_key key;
616 int err, start;
617
618 ovs_match_init(&match, &key, NULL);
619 err = ovs_ipv4_tun_from_nlattr(nla_data(attr), &match, false);
620 if (err)
621 return err;
622
623 start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SET);
624 if (start < 0)
625 return start;
626
627 err = add_action(sfa, OVS_KEY_ATTR_IPV4_TUNNEL, &match.key->tun_key,
628 sizeof(match.key->tun_key));
629 add_nested_action_end(*sfa, start);
630
631 return err;
632}
633
634static int validate_set(const struct nlattr *a,
635 const struct sw_flow_key *flow_key,
636 struct sw_flow_actions **sfa,
637 bool *set_tun)
638{
639 const struct nlattr *ovs_key = nla_data(a);
640 int key_type = nla_type(ovs_key);
641
642 /* There can be only one key in a action */
643 if (nla_total_size(nla_len(ovs_key)) != nla_len(a))
644 return -EINVAL;
645
646 if (key_type > OVS_KEY_ATTR_MAX ||
647 (ovs_key_lens[key_type] != nla_len(ovs_key) &&
648 ovs_key_lens[key_type] != -1))
649 return -EINVAL;
650
651 switch (key_type) {
652 const struct ovs_key_ipv4 *ipv4_key;
653 const struct ovs_key_ipv6 *ipv6_key;
654 int err;
655
656 case OVS_KEY_ATTR_PRIORITY:
657 case OVS_KEY_ATTR_SKB_MARK:
658 case OVS_KEY_ATTR_ETHERNET:
659 break;
660
661 case OVS_KEY_ATTR_TUNNEL:
662 *set_tun = true;
663 err = validate_and_copy_set_tun(a, sfa);
664 if (err)
665 return err;
666 break;
667
668 case OVS_KEY_ATTR_IPV4:
669 if (flow_key->eth.type != htons(ETH_P_IP))
670 return -EINVAL;
671
672 if (!flow_key->ip.proto)
673 return -EINVAL;
674
675 ipv4_key = nla_data(ovs_key);
676 if (ipv4_key->ipv4_proto != flow_key->ip.proto)
677 return -EINVAL;
678
679 if (ipv4_key->ipv4_frag != flow_key->ip.frag)
680 return -EINVAL;
681
682 break;
683
684 case OVS_KEY_ATTR_IPV6:
685 if (flow_key->eth.type != htons(ETH_P_IPV6))
686 return -EINVAL;
687
688 if (!flow_key->ip.proto)
689 return -EINVAL;
690
691 ipv6_key = nla_data(ovs_key);
692 if (ipv6_key->ipv6_proto != flow_key->ip.proto)
693 return -EINVAL;
694
695 if (ipv6_key->ipv6_frag != flow_key->ip.frag)
696 return -EINVAL;
697
698 if (ntohl(ipv6_key->ipv6_label) & 0xFFF00000)
699 return -EINVAL;
700
701 break;
702
703 case OVS_KEY_ATTR_TCP:
704 if (flow_key->ip.proto != IPPROTO_TCP)
705 return -EINVAL;
706
707 return validate_tp_port(flow_key);
708
709 case OVS_KEY_ATTR_UDP:
710 if (flow_key->ip.proto != IPPROTO_UDP)
711 return -EINVAL;
712
713 return validate_tp_port(flow_key);
714
715 case OVS_KEY_ATTR_SCTP:
716 if (flow_key->ip.proto != IPPROTO_SCTP)
717 return -EINVAL;
718
719 return validate_tp_port(flow_key);
720
721 default:
722 return -EINVAL;
723 }
724
725 return 0;
726}
727
728static int validate_userspace(const struct nlattr *attr)
729{
730 static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] = {
731 [OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 },
732 [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_UNSPEC },
733 };
734 struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1];
735 int error;
736
737 error = nla_parse_nested(a, OVS_USERSPACE_ATTR_MAX,
738 attr, userspace_policy);
739 if (error)
740 return error;
741
742 if (!a[OVS_USERSPACE_ATTR_PID] ||
743 !nla_get_u32(a[OVS_USERSPACE_ATTR_PID]))
744 return -EINVAL;
745
746 return 0;
747}
748
749static int copy_action(const struct nlattr *from,
750 struct sw_flow_actions **sfa)
751{
752 int totlen = NLA_ALIGN(from->nla_len);
753 struct nlattr *to;
754
755 to = reserve_sfa_size(sfa, from->nla_len);
756 if (IS_ERR(to))
757 return PTR_ERR(to);
758
759 memcpy(to, from, totlen);
760 return 0;
761}
762
763static int validate_and_copy_actions(const struct nlattr *attr,
764 const struct sw_flow_key *key,
765 int depth,
766 struct sw_flow_actions **sfa)
767{
768 const struct nlattr *a;
769 int rem, err;
770
771 if (depth >= SAMPLE_ACTION_DEPTH)
772 return -EOVERFLOW;
773
774 nla_for_each_nested(a, attr, rem) {
775 /* Expected argument lengths, (u32)-1 for variable length. */
776 static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = {
777 [OVS_ACTION_ATTR_OUTPUT] = sizeof(u32),
778 [OVS_ACTION_ATTR_USERSPACE] = (u32)-1,
779 [OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan),
780 [OVS_ACTION_ATTR_POP_VLAN] = 0,
781 [OVS_ACTION_ATTR_SET] = (u32)-1,
782 [OVS_ACTION_ATTR_SAMPLE] = (u32)-1
783 };
784 const struct ovs_action_push_vlan *vlan;
785 int type = nla_type(a);
786 bool skip_copy;
787
788 if (type > OVS_ACTION_ATTR_MAX ||
789 (action_lens[type] != nla_len(a) &&
790 action_lens[type] != (u32)-1))
791 return -EINVAL;
792
793 skip_copy = false;
794 switch (type) {
795 case OVS_ACTION_ATTR_UNSPEC:
796 return -EINVAL;
797
798 case OVS_ACTION_ATTR_USERSPACE:
799 err = validate_userspace(a);
800 if (err)
801 return err;
802 break;
803
804 case OVS_ACTION_ATTR_OUTPUT:
805 if (nla_get_u32(a) >= DP_MAX_PORTS)
806 return -EINVAL;
807 break;
808
809
810 case OVS_ACTION_ATTR_POP_VLAN:
811 break;
812
813 case OVS_ACTION_ATTR_PUSH_VLAN:
814 vlan = nla_data(a);
815 if (vlan->vlan_tpid != htons(ETH_P_8021Q))
816 return -EINVAL;
817 if (!(vlan->vlan_tci & htons(VLAN_TAG_PRESENT)))
818 return -EINVAL;
819 break;
820
821 case OVS_ACTION_ATTR_SET:
822 err = validate_set(a, key, sfa, &skip_copy);
823 if (err)
824 return err;
825 break;
826
827 case OVS_ACTION_ATTR_SAMPLE:
828 err = validate_and_copy_sample(a, key, depth, sfa);
829 if (err)
830 return err;
831 skip_copy = true;
832 break;
833
834 default:
835 return -EINVAL;
836 }
837 if (!skip_copy) {
838 err = copy_action(a, sfa);
839 if (err)
840 return err;
841 }
842 }
843
844 if (rem > 0)
845 return -EINVAL;
846
847 return 0;
848}
849
850static void clear_stats(struct sw_flow *flow) 456static void clear_stats(struct sw_flow *flow)
851{ 457{
852 flow->used = 0; 458 flow->used = 0;
@@ -902,15 +508,16 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
902 if (err) 508 if (err)
903 goto err_flow_free; 509 goto err_flow_free;
904 510
905 err = ovs_flow_metadata_from_nlattrs(flow, a[OVS_PACKET_ATTR_KEY]); 511 err = ovs_nla_get_flow_metadata(flow, a[OVS_PACKET_ATTR_KEY]);
906 if (err) 512 if (err)
907 goto err_flow_free; 513 goto err_flow_free;
908 acts = ovs_flow_actions_alloc(nla_len(a[OVS_PACKET_ATTR_ACTIONS])); 514 acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_PACKET_ATTR_ACTIONS]));
909 err = PTR_ERR(acts); 515 err = PTR_ERR(acts);
910 if (IS_ERR(acts)) 516 if (IS_ERR(acts))
911 goto err_flow_free; 517 goto err_flow_free;
912 518
913 err = validate_and_copy_actions(a[OVS_PACKET_ATTR_ACTIONS], &flow->key, 0, &acts); 519 err = ovs_nla_copy_actions(a[OVS_PACKET_ATTR_ACTIONS],
520 &flow->key, 0, &acts);
914 rcu_assign_pointer(flow->sf_acts, acts); 521 rcu_assign_pointer(flow->sf_acts, acts);
915 if (err) 522 if (err)
916 goto err_flow_free; 523 goto err_flow_free;
@@ -950,7 +557,7 @@ static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
950 [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED }, 557 [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
951}; 558};
952 559
953static struct genl_ops dp_packet_genl_ops[] = { 560static const struct genl_ops dp_packet_genl_ops[] = {
954 { .cmd = OVS_PACKET_CMD_EXECUTE, 561 { .cmd = OVS_PACKET_CMD_EXECUTE,
955 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ 562 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
956 .policy = packet_policy, 563 .policy = packet_policy,
@@ -958,15 +565,18 @@ static struct genl_ops dp_packet_genl_ops[] = {
958 } 565 }
959}; 566};
960 567
961static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats) 568static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats,
569 struct ovs_dp_megaflow_stats *mega_stats)
962{ 570{
963 struct flow_table *table;
964 int i; 571 int i;
965 572
966 table = rcu_dereference_check(dp->table, lockdep_ovsl_is_held()); 573 memset(mega_stats, 0, sizeof(*mega_stats));
967 stats->n_flows = ovs_flow_tbl_count(table); 574
575 stats->n_flows = ovs_flow_tbl_count(&dp->table);
576 mega_stats->n_masks = ovs_flow_tbl_num_masks(&dp->table);
968 577
969 stats->n_hit = stats->n_missed = stats->n_lost = 0; 578 stats->n_hit = stats->n_missed = stats->n_lost = 0;
579
970 for_each_possible_cpu(i) { 580 for_each_possible_cpu(i) {
971 const struct dp_stats_percpu *percpu_stats; 581 const struct dp_stats_percpu *percpu_stats;
972 struct dp_stats_percpu local_stats; 582 struct dp_stats_percpu local_stats;
@@ -982,6 +592,7 @@ static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats)
982 stats->n_hit += local_stats.n_hit; 592 stats->n_hit += local_stats.n_hit;
983 stats->n_missed += local_stats.n_missed; 593 stats->n_missed += local_stats.n_missed;
984 stats->n_lost += local_stats.n_lost; 594 stats->n_lost += local_stats.n_lost;
595 mega_stats->n_mask_hit += local_stats.n_mask_hit;
985 } 596 }
986} 597}
987 598
@@ -1005,100 +616,6 @@ static struct genl_multicast_group ovs_dp_flow_multicast_group = {
1005 .name = OVS_FLOW_MCGROUP 616 .name = OVS_FLOW_MCGROUP
1006}; 617};
1007 618
1008static int actions_to_attr(const struct nlattr *attr, int len, struct sk_buff *skb);
1009static int sample_action_to_attr(const struct nlattr *attr, struct sk_buff *skb)
1010{
1011 const struct nlattr *a;
1012 struct nlattr *start;
1013 int err = 0, rem;
1014
1015 start = nla_nest_start(skb, OVS_ACTION_ATTR_SAMPLE);
1016 if (!start)
1017 return -EMSGSIZE;
1018
1019 nla_for_each_nested(a, attr, rem) {
1020 int type = nla_type(a);
1021 struct nlattr *st_sample;
1022
1023 switch (type) {
1024 case OVS_SAMPLE_ATTR_PROBABILITY:
1025 if (nla_put(skb, OVS_SAMPLE_ATTR_PROBABILITY, sizeof(u32), nla_data(a)))
1026 return -EMSGSIZE;
1027 break;
1028 case OVS_SAMPLE_ATTR_ACTIONS:
1029 st_sample = nla_nest_start(skb, OVS_SAMPLE_ATTR_ACTIONS);
1030 if (!st_sample)
1031 return -EMSGSIZE;
1032 err = actions_to_attr(nla_data(a), nla_len(a), skb);
1033 if (err)
1034 return err;
1035 nla_nest_end(skb, st_sample);
1036 break;
1037 }
1038 }
1039
1040 nla_nest_end(skb, start);
1041 return err;
1042}
1043
1044static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb)
1045{
1046 const struct nlattr *ovs_key = nla_data(a);
1047 int key_type = nla_type(ovs_key);
1048 struct nlattr *start;
1049 int err;
1050
1051 switch (key_type) {
1052 case OVS_KEY_ATTR_IPV4_TUNNEL:
1053 start = nla_nest_start(skb, OVS_ACTION_ATTR_SET);
1054 if (!start)
1055 return -EMSGSIZE;
1056
1057 err = ovs_ipv4_tun_to_nlattr(skb, nla_data(ovs_key),
1058 nla_data(ovs_key));
1059 if (err)
1060 return err;
1061 nla_nest_end(skb, start);
1062 break;
1063 default:
1064 if (nla_put(skb, OVS_ACTION_ATTR_SET, nla_len(a), ovs_key))
1065 return -EMSGSIZE;
1066 break;
1067 }
1068
1069 return 0;
1070}
1071
1072static int actions_to_attr(const struct nlattr *attr, int len, struct sk_buff *skb)
1073{
1074 const struct nlattr *a;
1075 int rem, err;
1076
1077 nla_for_each_attr(a, attr, len, rem) {
1078 int type = nla_type(a);
1079
1080 switch (type) {
1081 case OVS_ACTION_ATTR_SET:
1082 err = set_action_to_attr(a, skb);
1083 if (err)
1084 return err;
1085 break;
1086
1087 case OVS_ACTION_ATTR_SAMPLE:
1088 err = sample_action_to_attr(a, skb);
1089 if (err)
1090 return err;
1091 break;
1092 default:
1093 if (nla_put(skb, type, nla_len(a), nla_data(a)))
1094 return -EMSGSIZE;
1095 break;
1096 }
1097 }
1098
1099 return 0;
1100}
1101
1102static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts) 619static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts)
1103{ 620{
1104 return NLMSG_ALIGN(sizeof(struct ovs_header)) 621 return NLMSG_ALIGN(sizeof(struct ovs_header))
@@ -1135,8 +652,7 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
1135 if (!nla) 652 if (!nla)
1136 goto nla_put_failure; 653 goto nla_put_failure;
1137 654
1138 err = ovs_flow_to_nlattrs(&flow->unmasked_key, 655 err = ovs_nla_put_flow(&flow->unmasked_key, &flow->unmasked_key, skb);
1139 &flow->unmasked_key, skb);
1140 if (err) 656 if (err)
1141 goto error; 657 goto error;
1142 nla_nest_end(skb, nla); 658 nla_nest_end(skb, nla);
@@ -1145,7 +661,7 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
1145 if (!nla) 661 if (!nla)
1146 goto nla_put_failure; 662 goto nla_put_failure;
1147 663
1148 err = ovs_flow_to_nlattrs(&flow->key, &flow->mask->key, skb); 664 err = ovs_nla_put_flow(&flow->key, &flow->mask->key, skb);
1149 if (err) 665 if (err)
1150 goto error; 666 goto error;
1151 667
@@ -1155,7 +671,7 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
1155 used = flow->used; 671 used = flow->used;
1156 stats.n_packets = flow->packet_count; 672 stats.n_packets = flow->packet_count;
1157 stats.n_bytes = flow->byte_count; 673 stats.n_bytes = flow->byte_count;
1158 tcp_flags = flow->tcp_flags; 674 tcp_flags = (u8)ntohs(flow->tcp_flags);
1159 spin_unlock_bh(&flow->lock); 675 spin_unlock_bh(&flow->lock);
1160 676
1161 if (used && 677 if (used &&
@@ -1188,7 +704,8 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
1188 sf_acts = rcu_dereference_check(flow->sf_acts, 704 sf_acts = rcu_dereference_check(flow->sf_acts,
1189 lockdep_ovsl_is_held()); 705 lockdep_ovsl_is_held());
1190 706
1191 err = actions_to_attr(sf_acts->actions, sf_acts->actions_len, skb); 707 err = ovs_nla_put_actions(sf_acts->actions,
708 sf_acts->actions_len, skb);
1192 if (!err) 709 if (!err)
1193 nla_nest_end(skb, start); 710 nla_nest_end(skb, start);
1194 else { 711 else {
@@ -1234,6 +751,14 @@ static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow,
1234 return skb; 751 return skb;
1235} 752}
1236 753
754static struct sw_flow *__ovs_flow_tbl_lookup(struct flow_table *tbl,
755 const struct sw_flow_key *key)
756{
757 u32 __always_unused n_mask_hit;
758
759 return ovs_flow_tbl_lookup(tbl, key, &n_mask_hit);
760}
761
1237static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) 762static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
1238{ 763{
1239 struct nlattr **a = info->attrs; 764 struct nlattr **a = info->attrs;
@@ -1243,7 +768,6 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
1243 struct sw_flow_mask mask; 768 struct sw_flow_mask mask;
1244 struct sk_buff *reply; 769 struct sk_buff *reply;
1245 struct datapath *dp; 770 struct datapath *dp;
1246 struct flow_table *table;
1247 struct sw_flow_actions *acts = NULL; 771 struct sw_flow_actions *acts = NULL;
1248 struct sw_flow_match match; 772 struct sw_flow_match match;
1249 int error; 773 int error;
@@ -1254,21 +778,21 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
1254 goto error; 778 goto error;
1255 779
1256 ovs_match_init(&match, &key, &mask); 780 ovs_match_init(&match, &key, &mask);
1257 error = ovs_match_from_nlattrs(&match, 781 error = ovs_nla_get_match(&match,
1258 a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]); 782 a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]);
1259 if (error) 783 if (error)
1260 goto error; 784 goto error;
1261 785
1262 /* Validate actions. */ 786 /* Validate actions. */
1263 if (a[OVS_FLOW_ATTR_ACTIONS]) { 787 if (a[OVS_FLOW_ATTR_ACTIONS]) {
1264 acts = ovs_flow_actions_alloc(nla_len(a[OVS_FLOW_ATTR_ACTIONS])); 788 acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_FLOW_ATTR_ACTIONS]));
1265 error = PTR_ERR(acts); 789 error = PTR_ERR(acts);
1266 if (IS_ERR(acts)) 790 if (IS_ERR(acts))
1267 goto error; 791 goto error;
1268 792
1269 ovs_flow_key_mask(&masked_key, &key, &mask); 793 ovs_flow_mask_key(&masked_key, &key, &mask);
1270 error = validate_and_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], 794 error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS],
1271 &masked_key, 0, &acts); 795 &masked_key, 0, &acts);
1272 if (error) { 796 if (error) {
1273 OVS_NLERR("Flow actions may not be safe on all matching packets.\n"); 797 OVS_NLERR("Flow actions may not be safe on all matching packets.\n");
1274 goto err_kfree; 798 goto err_kfree;
@@ -1284,29 +808,14 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
1284 if (!dp) 808 if (!dp)
1285 goto err_unlock_ovs; 809 goto err_unlock_ovs;
1286 810
1287 table = ovsl_dereference(dp->table);
1288
1289 /* Check if this is a duplicate flow */ 811 /* Check if this is a duplicate flow */
1290 flow = ovs_flow_lookup(table, &key); 812 flow = __ovs_flow_tbl_lookup(&dp->table, &key);
1291 if (!flow) { 813 if (!flow) {
1292 struct sw_flow_mask *mask_p;
1293 /* Bail out if we're not allowed to create a new flow. */ 814 /* Bail out if we're not allowed to create a new flow. */
1294 error = -ENOENT; 815 error = -ENOENT;
1295 if (info->genlhdr->cmd == OVS_FLOW_CMD_SET) 816 if (info->genlhdr->cmd == OVS_FLOW_CMD_SET)
1296 goto err_unlock_ovs; 817 goto err_unlock_ovs;
1297 818
1298 /* Expand table, if necessary, to make room. */
1299 if (ovs_flow_tbl_need_to_expand(table)) {
1300 struct flow_table *new_table;
1301
1302 new_table = ovs_flow_tbl_expand(table);
1303 if (!IS_ERR(new_table)) {
1304 rcu_assign_pointer(dp->table, new_table);
1305 ovs_flow_tbl_destroy(table, true);
1306 table = ovsl_dereference(dp->table);
1307 }
1308 }
1309
1310 /* Allocate flow. */ 819 /* Allocate flow. */
1311 flow = ovs_flow_alloc(); 820 flow = ovs_flow_alloc();
1312 if (IS_ERR(flow)) { 821 if (IS_ERR(flow)) {
@@ -1317,25 +826,14 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
1317 826
1318 flow->key = masked_key; 827 flow->key = masked_key;
1319 flow->unmasked_key = key; 828 flow->unmasked_key = key;
1320
1321 /* Make sure mask is unique in the system */
1322 mask_p = ovs_sw_flow_mask_find(table, &mask);
1323 if (!mask_p) {
1324 /* Allocate a new mask if none exsits. */
1325 mask_p = ovs_sw_flow_mask_alloc();
1326 if (!mask_p)
1327 goto err_flow_free;
1328 mask_p->key = mask.key;
1329 mask_p->range = mask.range;
1330 ovs_sw_flow_mask_insert(table, mask_p);
1331 }
1332
1333 ovs_sw_flow_mask_add_ref(mask_p);
1334 flow->mask = mask_p;
1335 rcu_assign_pointer(flow->sf_acts, acts); 829 rcu_assign_pointer(flow->sf_acts, acts);
1336 830
1337 /* Put flow in bucket. */ 831 /* Put flow in bucket. */
1338 ovs_flow_insert(table, flow); 832 error = ovs_flow_tbl_insert(&dp->table, flow, &mask);
833 if (error) {
834 acts = NULL;
835 goto err_flow_free;
836 }
1339 837
1340 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid, 838 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid,
1341 info->snd_seq, OVS_FLOW_CMD_NEW); 839 info->snd_seq, OVS_FLOW_CMD_NEW);
@@ -1356,7 +854,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
1356 854
1357 /* The unmasked key has to be the same for flow updates. */ 855 /* The unmasked key has to be the same for flow updates. */
1358 error = -EINVAL; 856 error = -EINVAL;
1359 if (!ovs_flow_cmp_unmasked_key(flow, &key, match.range.end)) { 857 if (!ovs_flow_cmp_unmasked_key(flow, &match)) {
1360 OVS_NLERR("Flow modification message rejected, unmasked key does not match.\n"); 858 OVS_NLERR("Flow modification message rejected, unmasked key does not match.\n");
1361 goto err_unlock_ovs; 859 goto err_unlock_ovs;
1362 } 860 }
@@ -1364,7 +862,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
1364 /* Update actions. */ 862 /* Update actions. */
1365 old_acts = ovsl_dereference(flow->sf_acts); 863 old_acts = ovsl_dereference(flow->sf_acts);
1366 rcu_assign_pointer(flow->sf_acts, acts); 864 rcu_assign_pointer(flow->sf_acts, acts);
1367 ovs_flow_deferred_free_acts(old_acts); 865 ovs_nla_free_flow_actions(old_acts);
1368 866
1369 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid, 867 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid,
1370 info->snd_seq, OVS_FLOW_CMD_NEW); 868 info->snd_seq, OVS_FLOW_CMD_NEW);
@@ -1379,10 +877,10 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
1379 ovs_unlock(); 877 ovs_unlock();
1380 878
1381 if (!IS_ERR(reply)) 879 if (!IS_ERR(reply))
1382 ovs_notify(reply, info, &ovs_dp_flow_multicast_group); 880 ovs_notify(&dp_flow_genl_family, reply, info);
1383 else 881 else
1384 netlink_set_err(sock_net(skb->sk)->genl_sock, 0, 882 genl_set_err(&dp_flow_genl_family, sock_net(skb->sk), 0,
1385 ovs_dp_flow_multicast_group.id, PTR_ERR(reply)); 883 0, PTR_ERR(reply));
1386 return 0; 884 return 0;
1387 885
1388err_flow_free: 886err_flow_free:
@@ -1403,7 +901,6 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
1403 struct sk_buff *reply; 901 struct sk_buff *reply;
1404 struct sw_flow *flow; 902 struct sw_flow *flow;
1405 struct datapath *dp; 903 struct datapath *dp;
1406 struct flow_table *table;
1407 struct sw_flow_match match; 904 struct sw_flow_match match;
1408 int err; 905 int err;
1409 906
@@ -1413,7 +910,7 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
1413 } 910 }
1414 911
1415 ovs_match_init(&match, &key, NULL); 912 ovs_match_init(&match, &key, NULL);
1416 err = ovs_match_from_nlattrs(&match, a[OVS_FLOW_ATTR_KEY], NULL); 913 err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL);
1417 if (err) 914 if (err)
1418 return err; 915 return err;
1419 916
@@ -1424,9 +921,8 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
1424 goto unlock; 921 goto unlock;
1425 } 922 }
1426 923
1427 table = ovsl_dereference(dp->table); 924 flow = __ovs_flow_tbl_lookup(&dp->table, &key);
1428 flow = ovs_flow_lookup_unmasked_key(table, &match); 925 if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) {
1429 if (!flow) {
1430 err = -ENOENT; 926 err = -ENOENT;
1431 goto unlock; 927 goto unlock;
1432 } 928 }
@@ -1453,7 +949,6 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1453 struct sk_buff *reply; 949 struct sk_buff *reply;
1454 struct sw_flow *flow; 950 struct sw_flow *flow;
1455 struct datapath *dp; 951 struct datapath *dp;
1456 struct flow_table *table;
1457 struct sw_flow_match match; 952 struct sw_flow_match match;
1458 int err; 953 int err;
1459 954
@@ -1465,18 +960,17 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1465 } 960 }
1466 961
1467 if (!a[OVS_FLOW_ATTR_KEY]) { 962 if (!a[OVS_FLOW_ATTR_KEY]) {
1468 err = flush_flows(dp); 963 err = ovs_flow_tbl_flush(&dp->table);
1469 goto unlock; 964 goto unlock;
1470 } 965 }
1471 966
1472 ovs_match_init(&match, &key, NULL); 967 ovs_match_init(&match, &key, NULL);
1473 err = ovs_match_from_nlattrs(&match, a[OVS_FLOW_ATTR_KEY], NULL); 968 err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL);
1474 if (err) 969 if (err)
1475 goto unlock; 970 goto unlock;
1476 971
1477 table = ovsl_dereference(dp->table); 972 flow = __ovs_flow_tbl_lookup(&dp->table, &key);
1478 flow = ovs_flow_lookup_unmasked_key(table, &match); 973 if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) {
1479 if (!flow) {
1480 err = -ENOENT; 974 err = -ENOENT;
1481 goto unlock; 975 goto unlock;
1482 } 976 }
@@ -1487,7 +981,7 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1487 goto unlock; 981 goto unlock;
1488 } 982 }
1489 983
1490 ovs_flow_remove(table, flow); 984 ovs_flow_tbl_remove(&dp->table, flow);
1491 985
1492 err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_portid, 986 err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_portid,
1493 info->snd_seq, 0, OVS_FLOW_CMD_DEL); 987 info->snd_seq, 0, OVS_FLOW_CMD_DEL);
@@ -1496,7 +990,7 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1496 ovs_flow_free(flow, true); 990 ovs_flow_free(flow, true);
1497 ovs_unlock(); 991 ovs_unlock();
1498 992
1499 ovs_notify(reply, info, &ovs_dp_flow_multicast_group); 993 ovs_notify(&dp_flow_genl_family, reply, info);
1500 return 0; 994 return 0;
1501unlock: 995unlock:
1502 ovs_unlock(); 996 ovs_unlock();
@@ -1506,8 +1000,8 @@ unlock:
1506static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) 1000static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1507{ 1001{
1508 struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh)); 1002 struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1003 struct table_instance *ti;
1509 struct datapath *dp; 1004 struct datapath *dp;
1510 struct flow_table *table;
1511 1005
1512 rcu_read_lock(); 1006 rcu_read_lock();
1513 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); 1007 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
@@ -1516,14 +1010,14 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1516 return -ENODEV; 1010 return -ENODEV;
1517 } 1011 }
1518 1012
1519 table = rcu_dereference(dp->table); 1013 ti = rcu_dereference(dp->table.ti);
1520 for (;;) { 1014 for (;;) {
1521 struct sw_flow *flow; 1015 struct sw_flow *flow;
1522 u32 bucket, obj; 1016 u32 bucket, obj;
1523 1017
1524 bucket = cb->args[0]; 1018 bucket = cb->args[0];
1525 obj = cb->args[1]; 1019 obj = cb->args[1];
1526 flow = ovs_flow_dump_next(table, &bucket, &obj); 1020 flow = ovs_flow_tbl_dump_next(ti, &bucket, &obj);
1527 if (!flow) 1021 if (!flow)
1528 break; 1022 break;
1529 1023
@@ -1540,7 +1034,7 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1540 return skb->len; 1034 return skb->len;
1541} 1035}
1542 1036
1543static struct genl_ops dp_flow_genl_ops[] = { 1037static const struct genl_ops dp_flow_genl_ops[] = {
1544 { .cmd = OVS_FLOW_CMD_NEW, 1038 { .cmd = OVS_FLOW_CMD_NEW,
1545 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ 1039 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1546 .policy = flow_policy, 1040 .policy = flow_policy,
@@ -1589,6 +1083,7 @@ static size_t ovs_dp_cmd_msg_size(void)
1589 1083
1590 msgsize += nla_total_size(IFNAMSIZ); 1084 msgsize += nla_total_size(IFNAMSIZ);
1591 msgsize += nla_total_size(sizeof(struct ovs_dp_stats)); 1085 msgsize += nla_total_size(sizeof(struct ovs_dp_stats));
1086 msgsize += nla_total_size(sizeof(struct ovs_dp_megaflow_stats));
1592 1087
1593 return msgsize; 1088 return msgsize;
1594} 1089}
@@ -1598,6 +1093,7 @@ static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1598{ 1093{
1599 struct ovs_header *ovs_header; 1094 struct ovs_header *ovs_header;
1600 struct ovs_dp_stats dp_stats; 1095 struct ovs_dp_stats dp_stats;
1096 struct ovs_dp_megaflow_stats dp_megaflow_stats;
1601 int err; 1097 int err;
1602 1098
1603 ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family, 1099 ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family,
@@ -1613,8 +1109,14 @@ static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1613 if (err) 1109 if (err)
1614 goto nla_put_failure; 1110 goto nla_put_failure;
1615 1111
1616 get_dp_stats(dp, &dp_stats); 1112 get_dp_stats(dp, &dp_stats, &dp_megaflow_stats);
1617 if (nla_put(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), &dp_stats)) 1113 if (nla_put(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats),
1114 &dp_stats))
1115 goto nla_put_failure;
1116
1117 if (nla_put(skb, OVS_DP_ATTR_MEGAFLOW_STATS,
1118 sizeof(struct ovs_dp_megaflow_stats),
1119 &dp_megaflow_stats))
1618 goto nla_put_failure; 1120 goto nla_put_failure;
1619 1121
1620 return genlmsg_end(skb, ovs_header); 1122 return genlmsg_end(skb, ovs_header);
@@ -1687,9 +1189,8 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1687 ovs_dp_set_net(dp, hold_net(sock_net(skb->sk))); 1189 ovs_dp_set_net(dp, hold_net(sock_net(skb->sk)));
1688 1190
1689 /* Allocate table. */ 1191 /* Allocate table. */
1690 err = -ENOMEM; 1192 err = ovs_flow_tbl_init(&dp->table);
1691 rcu_assign_pointer(dp->table, ovs_flow_tbl_alloc(TBL_MIN_BUCKETS)); 1193 if (err)
1692 if (!dp->table)
1693 goto err_free_dp; 1194 goto err_free_dp;
1694 1195
1695 dp->stats_percpu = alloc_percpu(struct dp_stats_percpu); 1196 dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
@@ -1698,8 +1199,14 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1698 goto err_destroy_table; 1199 goto err_destroy_table;
1699 } 1200 }
1700 1201
1202 for_each_possible_cpu(i) {
1203 struct dp_stats_percpu *dpath_stats;
1204 dpath_stats = per_cpu_ptr(dp->stats_percpu, i);
1205 u64_stats_init(&dpath_stats->sync);
1206 }
1207
1701 dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head), 1208 dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
1702 GFP_KERNEL); 1209 GFP_KERNEL);
1703 if (!dp->ports) { 1210 if (!dp->ports) {
1704 err = -ENOMEM; 1211 err = -ENOMEM;
1705 goto err_destroy_percpu; 1212 goto err_destroy_percpu;
@@ -1736,7 +1243,7 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1736 1243
1737 ovs_unlock(); 1244 ovs_unlock();
1738 1245
1739 ovs_notify(reply, info, &ovs_dp_datapath_multicast_group); 1246 ovs_notify(&dp_datapath_genl_family, reply, info);
1740 return 0; 1247 return 0;
1741 1248
1742err_destroy_local_port: 1249err_destroy_local_port:
@@ -1746,7 +1253,7 @@ err_destroy_ports_array:
1746err_destroy_percpu: 1253err_destroy_percpu:
1747 free_percpu(dp->stats_percpu); 1254 free_percpu(dp->stats_percpu);
1748err_destroy_table: 1255err_destroy_table:
1749 ovs_flow_tbl_destroy(ovsl_dereference(dp->table), false); 1256 ovs_flow_tbl_destroy(&dp->table);
1750err_free_dp: 1257err_free_dp:
1751 release_net(ovs_dp_get_net(dp)); 1258 release_net(ovs_dp_get_net(dp));
1752 kfree(dp); 1259 kfree(dp);
@@ -1801,7 +1308,7 @@ static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1801 __dp_destroy(dp); 1308 __dp_destroy(dp);
1802 ovs_unlock(); 1309 ovs_unlock();
1803 1310
1804 ovs_notify(reply, info, &ovs_dp_datapath_multicast_group); 1311 ovs_notify(&dp_datapath_genl_family, reply, info);
1805 1312
1806 return 0; 1313 return 0;
1807unlock: 1314unlock:
@@ -1825,14 +1332,14 @@ static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
1825 info->snd_seq, OVS_DP_CMD_NEW); 1332 info->snd_seq, OVS_DP_CMD_NEW);
1826 if (IS_ERR(reply)) { 1333 if (IS_ERR(reply)) {
1827 err = PTR_ERR(reply); 1334 err = PTR_ERR(reply);
1828 netlink_set_err(sock_net(skb->sk)->genl_sock, 0, 1335 genl_set_err(&dp_datapath_genl_family, sock_net(skb->sk), 0,
1829 ovs_dp_datapath_multicast_group.id, err); 1336 0, err);
1830 err = 0; 1337 err = 0;
1831 goto unlock; 1338 goto unlock;
1832 } 1339 }
1833 1340
1834 ovs_unlock(); 1341 ovs_unlock();
1835 ovs_notify(reply, info, &ovs_dp_datapath_multicast_group); 1342 ovs_notify(&dp_datapath_genl_family, reply, info);
1836 1343
1837 return 0; 1344 return 0;
1838unlock: 1345unlock:
@@ -1891,7 +1398,7 @@ static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1891 return skb->len; 1398 return skb->len;
1892} 1399}
1893 1400
1894static struct genl_ops dp_datapath_genl_ops[] = { 1401static const struct genl_ops dp_datapath_genl_ops[] = {
1895 { .cmd = OVS_DP_CMD_NEW, 1402 { .cmd = OVS_DP_CMD_NEW,
1896 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ 1403 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1897 .policy = datapath_policy, 1404 .policy = datapath_policy,
@@ -1924,7 +1431,7 @@ static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
1924 [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED }, 1431 [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
1925}; 1432};
1926 1433
1927static struct genl_family dp_vport_genl_family = { 1434struct genl_family dp_vport_genl_family = {
1928 .id = GENL_ID_GENERATE, 1435 .id = GENL_ID_GENERATE,
1929 .hdrsize = sizeof(struct ovs_header), 1436 .hdrsize = sizeof(struct ovs_header),
1930 .name = OVS_VPORT_FAMILY, 1437 .name = OVS_VPORT_FAMILY,
@@ -2094,7 +1601,7 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
2094 goto exit_unlock; 1601 goto exit_unlock;
2095 } 1602 }
2096 1603
2097 ovs_notify(reply, info, &ovs_dp_vport_multicast_group); 1604 ovs_notify(&dp_vport_genl_family, reply, info);
2098 1605
2099exit_unlock: 1606exit_unlock:
2100 ovs_unlock(); 1607 ovs_unlock();
@@ -2141,7 +1648,7 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
2141 BUG_ON(err < 0); 1648 BUG_ON(err < 0);
2142 1649
2143 ovs_unlock(); 1650 ovs_unlock();
2144 ovs_notify(reply, info, &ovs_dp_vport_multicast_group); 1651 ovs_notify(&dp_vport_genl_family, reply, info);
2145 return 0; 1652 return 0;
2146 1653
2147exit_free: 1654exit_free:
@@ -2178,7 +1685,7 @@ static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
2178 err = 0; 1685 err = 0;
2179 ovs_dp_detach_port(vport); 1686 ovs_dp_detach_port(vport);
2180 1687
2181 ovs_notify(reply, info, &ovs_dp_vport_multicast_group); 1688 ovs_notify(&dp_vport_genl_family, reply, info);
2182 1689
2183exit_unlock: 1690exit_unlock:
2184 ovs_unlock(); 1691 ovs_unlock();
@@ -2252,7 +1759,7 @@ out:
2252 return skb->len; 1759 return skb->len;
2253} 1760}
2254 1761
2255static struct genl_ops dp_vport_genl_ops[] = { 1762static const struct genl_ops dp_vport_genl_ops[] = {
2256 { .cmd = OVS_VPORT_CMD_NEW, 1763 { .cmd = OVS_VPORT_CMD_NEW,
2257 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ 1764 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2258 .policy = vport_policy, 1765 .policy = vport_policy,
@@ -2278,9 +1785,9 @@ static struct genl_ops dp_vport_genl_ops[] = {
2278 1785
2279struct genl_family_and_ops { 1786struct genl_family_and_ops {
2280 struct genl_family *family; 1787 struct genl_family *family;
2281 struct genl_ops *ops; 1788 const struct genl_ops *ops;
2282 int n_ops; 1789 int n_ops;
2283 struct genl_multicast_group *group; 1790 const struct genl_multicast_group *group;
2284}; 1791};
2285 1792
2286static const struct genl_family_and_ops dp_genl_families[] = { 1793static const struct genl_family_and_ops dp_genl_families[] = {
@@ -2316,17 +1823,14 @@ static int dp_register_genl(void)
2316 for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) { 1823 for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
2317 const struct genl_family_and_ops *f = &dp_genl_families[i]; 1824 const struct genl_family_and_ops *f = &dp_genl_families[i];
2318 1825
2319 err = genl_register_family_with_ops(f->family, f->ops, 1826 f->family->ops = f->ops;
2320 f->n_ops); 1827 f->family->n_ops = f->n_ops;
1828 f->family->mcgrps = f->group;
1829 f->family->n_mcgrps = f->group ? 1 : 0;
1830 err = genl_register_family(f->family);
2321 if (err) 1831 if (err)
2322 goto error; 1832 goto error;
2323 n_registered++; 1833 n_registered++;
2324
2325 if (f->group) {
2326 err = genl_register_mc_group(f->family, f->group);
2327 if (err)
2328 goto error;
2329 }
2330 } 1834 }
2331 1835
2332 return 0; 1836 return 0;
@@ -2336,32 +1840,6 @@ error:
2336 return err; 1840 return err;
2337} 1841}
2338 1842
2339static void rehash_flow_table(struct work_struct *work)
2340{
2341 struct datapath *dp;
2342 struct net *net;
2343
2344 ovs_lock();
2345 rtnl_lock();
2346 for_each_net(net) {
2347 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2348
2349 list_for_each_entry(dp, &ovs_net->dps, list_node) {
2350 struct flow_table *old_table = ovsl_dereference(dp->table);
2351 struct flow_table *new_table;
2352
2353 new_table = ovs_flow_tbl_rehash(old_table);
2354 if (!IS_ERR(new_table)) {
2355 rcu_assign_pointer(dp->table, new_table);
2356 ovs_flow_tbl_destroy(old_table, true);
2357 }
2358 }
2359 }
2360 rtnl_unlock();
2361 ovs_unlock();
2362 schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
2363}
2364
2365static int __net_init ovs_init_net(struct net *net) 1843static int __net_init ovs_init_net(struct net *net)
2366{ 1844{
2367 struct ovs_net *ovs_net = net_generic(net, ovs_net_id); 1845 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
@@ -2419,8 +1897,6 @@ static int __init dp_init(void)
2419 if (err < 0) 1897 if (err < 0)
2420 goto error_unreg_notifier; 1898 goto error_unreg_notifier;
2421 1899
2422 schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
2423
2424 return 0; 1900 return 0;
2425 1901
2426error_unreg_notifier: 1902error_unreg_notifier:
@@ -2437,7 +1913,6 @@ error:
2437 1913
2438static void dp_cleanup(void) 1914static void dp_cleanup(void)
2439{ 1915{
2440 cancel_delayed_work_sync(&rehash_flow_wq);
2441 dp_unregister_genl(ARRAY_SIZE(dp_genl_families)); 1916 dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
2442 unregister_netdevice_notifier(&ovs_dp_device_notifier); 1917 unregister_netdevice_notifier(&ovs_dp_device_notifier);
2443 unregister_pernet_device(&ovs_net_ops); 1918 unregister_pernet_device(&ovs_net_ops);