diff options
author | Pravin B Shelar <pshelar@nicira.com> | 2013-10-03 21:16:47 -0400 |
---|---|---|
committer | Jesse Gross <jesse@nicira.com> | 2013-10-03 21:16:47 -0400 |
commit | e64457191a259537bbbfaebeba9a8043786af96f (patch) | |
tree | 579f47d3124b69a94b07878d65d27f71c4f7c9c5 /net/openvswitch | |
parent | f0627cfa24389cab25c67bb7ca902912216a8a2d (diff) |
openvswitch: Restructure datapath.c and flow.c
Over the time datapath.c and flow.c has became pretty large files.
Following patch restructures functionality of component into three
different components:
flow.c: contains flow extract.
flow_netlink.c: netlink flow api.
flow_table.c: flow table api.
This patch restructures code without changing logic.
Signed-off-by: Pravin B Shelar <pshelar@nicira.com>
Signed-off-by: Jesse Gross <jesse@nicira.com>
Diffstat (limited to 'net/openvswitch')
-rw-r--r-- | net/openvswitch/Makefile | 2 | ||||
-rw-r--r-- | net/openvswitch/datapath.c | 528 | ||||
-rw-r--r-- | net/openvswitch/datapath.h | 1 | ||||
-rw-r--r-- | net/openvswitch/flow.c | 1605 | ||||
-rw-r--r-- | net/openvswitch/flow.h | 128 | ||||
-rw-r--r-- | net/openvswitch/flow_netlink.c | 1603 | ||||
-rw-r--r-- | net/openvswitch/flow_netlink.h | 60 | ||||
-rw-r--r-- | net/openvswitch/flow_table.c | 517 | ||||
-rw-r--r-- | net/openvswitch/flow_table.h | 91 |
9 files changed, 2354 insertions, 2181 deletions
diff --git a/net/openvswitch/Makefile b/net/openvswitch/Makefile index ea36e99089af..3591cb5dae91 100644 --- a/net/openvswitch/Makefile +++ b/net/openvswitch/Makefile | |||
@@ -9,6 +9,8 @@ openvswitch-y := \ | |||
9 | datapath.o \ | 9 | datapath.o \ |
10 | dp_notify.o \ | 10 | dp_notify.o \ |
11 | flow.o \ | 11 | flow.o \ |
12 | flow_netlink.o \ | ||
13 | flow_table.o \ | ||
12 | vport.o \ | 14 | vport.o \ |
13 | vport-internal_dev.o \ | 15 | vport-internal_dev.o \ |
14 | vport-netdev.o | 16 | vport-netdev.o |
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 2e1a9c24e380..72e68743c643 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c | |||
@@ -55,10 +55,10 @@ | |||
55 | 55 | ||
56 | #include "datapath.h" | 56 | #include "datapath.h" |
57 | #include "flow.h" | 57 | #include "flow.h" |
58 | #include "flow_netlink.h" | ||
58 | #include "vport-internal_dev.h" | 59 | #include "vport-internal_dev.h" |
59 | #include "vport-netdev.h" | 60 | #include "vport-netdev.h" |
60 | 61 | ||
61 | |||
62 | #define REHASH_FLOW_INTERVAL (10 * 60 * HZ) | 62 | #define REHASH_FLOW_INTERVAL (10 * 60 * HZ) |
63 | 63 | ||
64 | int ovs_net_id __read_mostly; | 64 | int ovs_net_id __read_mostly; |
@@ -235,7 +235,7 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb) | |||
235 | } | 235 | } |
236 | 236 | ||
237 | /* Look up flow. */ | 237 | /* Look up flow. */ |
238 | flow = ovs_flow_lookup(rcu_dereference(dp->table), &key); | 238 | flow = ovs_flow_tbl_lookup(rcu_dereference(dp->table), &key); |
239 | if (unlikely(!flow)) { | 239 | if (unlikely(!flow)) { |
240 | struct dp_upcall_info upcall; | 240 | struct dp_upcall_info upcall; |
241 | 241 | ||
@@ -433,7 +433,7 @@ static int queue_userspace_packet(struct net *net, int dp_ifindex, | |||
433 | upcall->dp_ifindex = dp_ifindex; | 433 | upcall->dp_ifindex = dp_ifindex; |
434 | 434 | ||
435 | nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY); | 435 | nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY); |
436 | ovs_flow_to_nlattrs(upcall_info->key, upcall_info->key, user_skb); | 436 | ovs_nla_put_flow(upcall_info->key, upcall_info->key, user_skb); |
437 | nla_nest_end(user_skb, nla); | 437 | nla_nest_end(user_skb, nla); |
438 | 438 | ||
439 | if (upcall_info->userdata) | 439 | if (upcall_info->userdata) |
@@ -470,381 +470,6 @@ static int flush_flows(struct datapath *dp) | |||
470 | return 0; | 470 | return 0; |
471 | } | 471 | } |
472 | 472 | ||
473 | static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa, int attr_len) | ||
474 | { | ||
475 | |||
476 | struct sw_flow_actions *acts; | ||
477 | int new_acts_size; | ||
478 | int req_size = NLA_ALIGN(attr_len); | ||
479 | int next_offset = offsetof(struct sw_flow_actions, actions) + | ||
480 | (*sfa)->actions_len; | ||
481 | |||
482 | if (req_size <= (ksize(*sfa) - next_offset)) | ||
483 | goto out; | ||
484 | |||
485 | new_acts_size = ksize(*sfa) * 2; | ||
486 | |||
487 | if (new_acts_size > MAX_ACTIONS_BUFSIZE) { | ||
488 | if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) | ||
489 | return ERR_PTR(-EMSGSIZE); | ||
490 | new_acts_size = MAX_ACTIONS_BUFSIZE; | ||
491 | } | ||
492 | |||
493 | acts = ovs_flow_actions_alloc(new_acts_size); | ||
494 | if (IS_ERR(acts)) | ||
495 | return (void *)acts; | ||
496 | |||
497 | memcpy(acts->actions, (*sfa)->actions, (*sfa)->actions_len); | ||
498 | acts->actions_len = (*sfa)->actions_len; | ||
499 | kfree(*sfa); | ||
500 | *sfa = acts; | ||
501 | |||
502 | out: | ||
503 | (*sfa)->actions_len += req_size; | ||
504 | return (struct nlattr *) ((unsigned char *)(*sfa) + next_offset); | ||
505 | } | ||
506 | |||
507 | static int add_action(struct sw_flow_actions **sfa, int attrtype, void *data, int len) | ||
508 | { | ||
509 | struct nlattr *a; | ||
510 | |||
511 | a = reserve_sfa_size(sfa, nla_attr_size(len)); | ||
512 | if (IS_ERR(a)) | ||
513 | return PTR_ERR(a); | ||
514 | |||
515 | a->nla_type = attrtype; | ||
516 | a->nla_len = nla_attr_size(len); | ||
517 | |||
518 | if (data) | ||
519 | memcpy(nla_data(a), data, len); | ||
520 | memset((unsigned char *) a + a->nla_len, 0, nla_padlen(len)); | ||
521 | |||
522 | return 0; | ||
523 | } | ||
524 | |||
525 | static inline int add_nested_action_start(struct sw_flow_actions **sfa, int attrtype) | ||
526 | { | ||
527 | int used = (*sfa)->actions_len; | ||
528 | int err; | ||
529 | |||
530 | err = add_action(sfa, attrtype, NULL, 0); | ||
531 | if (err) | ||
532 | return err; | ||
533 | |||
534 | return used; | ||
535 | } | ||
536 | |||
537 | static inline void add_nested_action_end(struct sw_flow_actions *sfa, int st_offset) | ||
538 | { | ||
539 | struct nlattr *a = (struct nlattr *) ((unsigned char *)sfa->actions + st_offset); | ||
540 | |||
541 | a->nla_len = sfa->actions_len - st_offset; | ||
542 | } | ||
543 | |||
544 | static int validate_and_copy_actions(const struct nlattr *attr, | ||
545 | const struct sw_flow_key *key, int depth, | ||
546 | struct sw_flow_actions **sfa); | ||
547 | |||
548 | static int validate_and_copy_sample(const struct nlattr *attr, | ||
549 | const struct sw_flow_key *key, int depth, | ||
550 | struct sw_flow_actions **sfa) | ||
551 | { | ||
552 | const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1]; | ||
553 | const struct nlattr *probability, *actions; | ||
554 | const struct nlattr *a; | ||
555 | int rem, start, err, st_acts; | ||
556 | |||
557 | memset(attrs, 0, sizeof(attrs)); | ||
558 | nla_for_each_nested(a, attr, rem) { | ||
559 | int type = nla_type(a); | ||
560 | if (!type || type > OVS_SAMPLE_ATTR_MAX || attrs[type]) | ||
561 | return -EINVAL; | ||
562 | attrs[type] = a; | ||
563 | } | ||
564 | if (rem) | ||
565 | return -EINVAL; | ||
566 | |||
567 | probability = attrs[OVS_SAMPLE_ATTR_PROBABILITY]; | ||
568 | if (!probability || nla_len(probability) != sizeof(u32)) | ||
569 | return -EINVAL; | ||
570 | |||
571 | actions = attrs[OVS_SAMPLE_ATTR_ACTIONS]; | ||
572 | if (!actions || (nla_len(actions) && nla_len(actions) < NLA_HDRLEN)) | ||
573 | return -EINVAL; | ||
574 | |||
575 | /* validation done, copy sample action. */ | ||
576 | start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SAMPLE); | ||
577 | if (start < 0) | ||
578 | return start; | ||
579 | err = add_action(sfa, OVS_SAMPLE_ATTR_PROBABILITY, nla_data(probability), sizeof(u32)); | ||
580 | if (err) | ||
581 | return err; | ||
582 | st_acts = add_nested_action_start(sfa, OVS_SAMPLE_ATTR_ACTIONS); | ||
583 | if (st_acts < 0) | ||
584 | return st_acts; | ||
585 | |||
586 | err = validate_and_copy_actions(actions, key, depth + 1, sfa); | ||
587 | if (err) | ||
588 | return err; | ||
589 | |||
590 | add_nested_action_end(*sfa, st_acts); | ||
591 | add_nested_action_end(*sfa, start); | ||
592 | |||
593 | return 0; | ||
594 | } | ||
595 | |||
596 | static int validate_tp_port(const struct sw_flow_key *flow_key) | ||
597 | { | ||
598 | if (flow_key->eth.type == htons(ETH_P_IP)) { | ||
599 | if (flow_key->ipv4.tp.src || flow_key->ipv4.tp.dst) | ||
600 | return 0; | ||
601 | } else if (flow_key->eth.type == htons(ETH_P_IPV6)) { | ||
602 | if (flow_key->ipv6.tp.src || flow_key->ipv6.tp.dst) | ||
603 | return 0; | ||
604 | } | ||
605 | |||
606 | return -EINVAL; | ||
607 | } | ||
608 | |||
609 | static int validate_and_copy_set_tun(const struct nlattr *attr, | ||
610 | struct sw_flow_actions **sfa) | ||
611 | { | ||
612 | struct sw_flow_match match; | ||
613 | struct sw_flow_key key; | ||
614 | int err, start; | ||
615 | |||
616 | ovs_match_init(&match, &key, NULL); | ||
617 | err = ovs_ipv4_tun_from_nlattr(nla_data(attr), &match, false); | ||
618 | if (err) | ||
619 | return err; | ||
620 | |||
621 | start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SET); | ||
622 | if (start < 0) | ||
623 | return start; | ||
624 | |||
625 | err = add_action(sfa, OVS_KEY_ATTR_IPV4_TUNNEL, &match.key->tun_key, | ||
626 | sizeof(match.key->tun_key)); | ||
627 | add_nested_action_end(*sfa, start); | ||
628 | |||
629 | return err; | ||
630 | } | ||
631 | |||
632 | static int validate_set(const struct nlattr *a, | ||
633 | const struct sw_flow_key *flow_key, | ||
634 | struct sw_flow_actions **sfa, | ||
635 | bool *set_tun) | ||
636 | { | ||
637 | const struct nlattr *ovs_key = nla_data(a); | ||
638 | int key_type = nla_type(ovs_key); | ||
639 | |||
640 | /* There can be only one key in a action */ | ||
641 | if (nla_total_size(nla_len(ovs_key)) != nla_len(a)) | ||
642 | return -EINVAL; | ||
643 | |||
644 | if (key_type > OVS_KEY_ATTR_MAX || | ||
645 | (ovs_key_lens[key_type] != nla_len(ovs_key) && | ||
646 | ovs_key_lens[key_type] != -1)) | ||
647 | return -EINVAL; | ||
648 | |||
649 | switch (key_type) { | ||
650 | const struct ovs_key_ipv4 *ipv4_key; | ||
651 | const struct ovs_key_ipv6 *ipv6_key; | ||
652 | int err; | ||
653 | |||
654 | case OVS_KEY_ATTR_PRIORITY: | ||
655 | case OVS_KEY_ATTR_SKB_MARK: | ||
656 | case OVS_KEY_ATTR_ETHERNET: | ||
657 | break; | ||
658 | |||
659 | case OVS_KEY_ATTR_TUNNEL: | ||
660 | *set_tun = true; | ||
661 | err = validate_and_copy_set_tun(a, sfa); | ||
662 | if (err) | ||
663 | return err; | ||
664 | break; | ||
665 | |||
666 | case OVS_KEY_ATTR_IPV4: | ||
667 | if (flow_key->eth.type != htons(ETH_P_IP)) | ||
668 | return -EINVAL; | ||
669 | |||
670 | if (!flow_key->ip.proto) | ||
671 | return -EINVAL; | ||
672 | |||
673 | ipv4_key = nla_data(ovs_key); | ||
674 | if (ipv4_key->ipv4_proto != flow_key->ip.proto) | ||
675 | return -EINVAL; | ||
676 | |||
677 | if (ipv4_key->ipv4_frag != flow_key->ip.frag) | ||
678 | return -EINVAL; | ||
679 | |||
680 | break; | ||
681 | |||
682 | case OVS_KEY_ATTR_IPV6: | ||
683 | if (flow_key->eth.type != htons(ETH_P_IPV6)) | ||
684 | return -EINVAL; | ||
685 | |||
686 | if (!flow_key->ip.proto) | ||
687 | return -EINVAL; | ||
688 | |||
689 | ipv6_key = nla_data(ovs_key); | ||
690 | if (ipv6_key->ipv6_proto != flow_key->ip.proto) | ||
691 | return -EINVAL; | ||
692 | |||
693 | if (ipv6_key->ipv6_frag != flow_key->ip.frag) | ||
694 | return -EINVAL; | ||
695 | |||
696 | if (ntohl(ipv6_key->ipv6_label) & 0xFFF00000) | ||
697 | return -EINVAL; | ||
698 | |||
699 | break; | ||
700 | |||
701 | case OVS_KEY_ATTR_TCP: | ||
702 | if (flow_key->ip.proto != IPPROTO_TCP) | ||
703 | return -EINVAL; | ||
704 | |||
705 | return validate_tp_port(flow_key); | ||
706 | |||
707 | case OVS_KEY_ATTR_UDP: | ||
708 | if (flow_key->ip.proto != IPPROTO_UDP) | ||
709 | return -EINVAL; | ||
710 | |||
711 | return validate_tp_port(flow_key); | ||
712 | |||
713 | case OVS_KEY_ATTR_SCTP: | ||
714 | if (flow_key->ip.proto != IPPROTO_SCTP) | ||
715 | return -EINVAL; | ||
716 | |||
717 | return validate_tp_port(flow_key); | ||
718 | |||
719 | default: | ||
720 | return -EINVAL; | ||
721 | } | ||
722 | |||
723 | return 0; | ||
724 | } | ||
725 | |||
726 | static int validate_userspace(const struct nlattr *attr) | ||
727 | { | ||
728 | static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] = { | ||
729 | [OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 }, | ||
730 | [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_UNSPEC }, | ||
731 | }; | ||
732 | struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1]; | ||
733 | int error; | ||
734 | |||
735 | error = nla_parse_nested(a, OVS_USERSPACE_ATTR_MAX, | ||
736 | attr, userspace_policy); | ||
737 | if (error) | ||
738 | return error; | ||
739 | |||
740 | if (!a[OVS_USERSPACE_ATTR_PID] || | ||
741 | !nla_get_u32(a[OVS_USERSPACE_ATTR_PID])) | ||
742 | return -EINVAL; | ||
743 | |||
744 | return 0; | ||
745 | } | ||
746 | |||
747 | static int copy_action(const struct nlattr *from, | ||
748 | struct sw_flow_actions **sfa) | ||
749 | { | ||
750 | int totlen = NLA_ALIGN(from->nla_len); | ||
751 | struct nlattr *to; | ||
752 | |||
753 | to = reserve_sfa_size(sfa, from->nla_len); | ||
754 | if (IS_ERR(to)) | ||
755 | return PTR_ERR(to); | ||
756 | |||
757 | memcpy(to, from, totlen); | ||
758 | return 0; | ||
759 | } | ||
760 | |||
761 | static int validate_and_copy_actions(const struct nlattr *attr, | ||
762 | const struct sw_flow_key *key, | ||
763 | int depth, | ||
764 | struct sw_flow_actions **sfa) | ||
765 | { | ||
766 | const struct nlattr *a; | ||
767 | int rem, err; | ||
768 | |||
769 | if (depth >= SAMPLE_ACTION_DEPTH) | ||
770 | return -EOVERFLOW; | ||
771 | |||
772 | nla_for_each_nested(a, attr, rem) { | ||
773 | /* Expected argument lengths, (u32)-1 for variable length. */ | ||
774 | static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = { | ||
775 | [OVS_ACTION_ATTR_OUTPUT] = sizeof(u32), | ||
776 | [OVS_ACTION_ATTR_USERSPACE] = (u32)-1, | ||
777 | [OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan), | ||
778 | [OVS_ACTION_ATTR_POP_VLAN] = 0, | ||
779 | [OVS_ACTION_ATTR_SET] = (u32)-1, | ||
780 | [OVS_ACTION_ATTR_SAMPLE] = (u32)-1 | ||
781 | }; | ||
782 | const struct ovs_action_push_vlan *vlan; | ||
783 | int type = nla_type(a); | ||
784 | bool skip_copy; | ||
785 | |||
786 | if (type > OVS_ACTION_ATTR_MAX || | ||
787 | (action_lens[type] != nla_len(a) && | ||
788 | action_lens[type] != (u32)-1)) | ||
789 | return -EINVAL; | ||
790 | |||
791 | skip_copy = false; | ||
792 | switch (type) { | ||
793 | case OVS_ACTION_ATTR_UNSPEC: | ||
794 | return -EINVAL; | ||
795 | |||
796 | case OVS_ACTION_ATTR_USERSPACE: | ||
797 | err = validate_userspace(a); | ||
798 | if (err) | ||
799 | return err; | ||
800 | break; | ||
801 | |||
802 | case OVS_ACTION_ATTR_OUTPUT: | ||
803 | if (nla_get_u32(a) >= DP_MAX_PORTS) | ||
804 | return -EINVAL; | ||
805 | break; | ||
806 | |||
807 | |||
808 | case OVS_ACTION_ATTR_POP_VLAN: | ||
809 | break; | ||
810 | |||
811 | case OVS_ACTION_ATTR_PUSH_VLAN: | ||
812 | vlan = nla_data(a); | ||
813 | if (vlan->vlan_tpid != htons(ETH_P_8021Q)) | ||
814 | return -EINVAL; | ||
815 | if (!(vlan->vlan_tci & htons(VLAN_TAG_PRESENT))) | ||
816 | return -EINVAL; | ||
817 | break; | ||
818 | |||
819 | case OVS_ACTION_ATTR_SET: | ||
820 | err = validate_set(a, key, sfa, &skip_copy); | ||
821 | if (err) | ||
822 | return err; | ||
823 | break; | ||
824 | |||
825 | case OVS_ACTION_ATTR_SAMPLE: | ||
826 | err = validate_and_copy_sample(a, key, depth, sfa); | ||
827 | if (err) | ||
828 | return err; | ||
829 | skip_copy = true; | ||
830 | break; | ||
831 | |||
832 | default: | ||
833 | return -EINVAL; | ||
834 | } | ||
835 | if (!skip_copy) { | ||
836 | err = copy_action(a, sfa); | ||
837 | if (err) | ||
838 | return err; | ||
839 | } | ||
840 | } | ||
841 | |||
842 | if (rem > 0) | ||
843 | return -EINVAL; | ||
844 | |||
845 | return 0; | ||
846 | } | ||
847 | |||
848 | static void clear_stats(struct sw_flow *flow) | 473 | static void clear_stats(struct sw_flow *flow) |
849 | { | 474 | { |
850 | flow->used = 0; | 475 | flow->used = 0; |
@@ -900,15 +525,16 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) | |||
900 | if (err) | 525 | if (err) |
901 | goto err_flow_free; | 526 | goto err_flow_free; |
902 | 527 | ||
903 | err = ovs_flow_metadata_from_nlattrs(flow, a[OVS_PACKET_ATTR_KEY]); | 528 | err = ovs_nla_get_flow_metadata(flow, a[OVS_PACKET_ATTR_KEY]); |
904 | if (err) | 529 | if (err) |
905 | goto err_flow_free; | 530 | goto err_flow_free; |
906 | acts = ovs_flow_actions_alloc(nla_len(a[OVS_PACKET_ATTR_ACTIONS])); | 531 | acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_PACKET_ATTR_ACTIONS])); |
907 | err = PTR_ERR(acts); | 532 | err = PTR_ERR(acts); |
908 | if (IS_ERR(acts)) | 533 | if (IS_ERR(acts)) |
909 | goto err_flow_free; | 534 | goto err_flow_free; |
910 | 535 | ||
911 | err = validate_and_copy_actions(a[OVS_PACKET_ATTR_ACTIONS], &flow->key, 0, &acts); | 536 | err = ovs_nla_copy_actions(a[OVS_PACKET_ATTR_ACTIONS], |
537 | &flow->key, 0, &acts); | ||
912 | rcu_assign_pointer(flow->sf_acts, acts); | 538 | rcu_assign_pointer(flow->sf_acts, acts); |
913 | if (err) | 539 | if (err) |
914 | goto err_flow_free; | 540 | goto err_flow_free; |
@@ -1003,100 +629,6 @@ static struct genl_multicast_group ovs_dp_flow_multicast_group = { | |||
1003 | .name = OVS_FLOW_MCGROUP | 629 | .name = OVS_FLOW_MCGROUP |
1004 | }; | 630 | }; |
1005 | 631 | ||
1006 | static int actions_to_attr(const struct nlattr *attr, int len, struct sk_buff *skb); | ||
1007 | static int sample_action_to_attr(const struct nlattr *attr, struct sk_buff *skb) | ||
1008 | { | ||
1009 | const struct nlattr *a; | ||
1010 | struct nlattr *start; | ||
1011 | int err = 0, rem; | ||
1012 | |||
1013 | start = nla_nest_start(skb, OVS_ACTION_ATTR_SAMPLE); | ||
1014 | if (!start) | ||
1015 | return -EMSGSIZE; | ||
1016 | |||
1017 | nla_for_each_nested(a, attr, rem) { | ||
1018 | int type = nla_type(a); | ||
1019 | struct nlattr *st_sample; | ||
1020 | |||
1021 | switch (type) { | ||
1022 | case OVS_SAMPLE_ATTR_PROBABILITY: | ||
1023 | if (nla_put(skb, OVS_SAMPLE_ATTR_PROBABILITY, sizeof(u32), nla_data(a))) | ||
1024 | return -EMSGSIZE; | ||
1025 | break; | ||
1026 | case OVS_SAMPLE_ATTR_ACTIONS: | ||
1027 | st_sample = nla_nest_start(skb, OVS_SAMPLE_ATTR_ACTIONS); | ||
1028 | if (!st_sample) | ||
1029 | return -EMSGSIZE; | ||
1030 | err = actions_to_attr(nla_data(a), nla_len(a), skb); | ||
1031 | if (err) | ||
1032 | return err; | ||
1033 | nla_nest_end(skb, st_sample); | ||
1034 | break; | ||
1035 | } | ||
1036 | } | ||
1037 | |||
1038 | nla_nest_end(skb, start); | ||
1039 | return err; | ||
1040 | } | ||
1041 | |||
1042 | static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb) | ||
1043 | { | ||
1044 | const struct nlattr *ovs_key = nla_data(a); | ||
1045 | int key_type = nla_type(ovs_key); | ||
1046 | struct nlattr *start; | ||
1047 | int err; | ||
1048 | |||
1049 | switch (key_type) { | ||
1050 | case OVS_KEY_ATTR_IPV4_TUNNEL: | ||
1051 | start = nla_nest_start(skb, OVS_ACTION_ATTR_SET); | ||
1052 | if (!start) | ||
1053 | return -EMSGSIZE; | ||
1054 | |||
1055 | err = ovs_ipv4_tun_to_nlattr(skb, nla_data(ovs_key), | ||
1056 | nla_data(ovs_key)); | ||
1057 | if (err) | ||
1058 | return err; | ||
1059 | nla_nest_end(skb, start); | ||
1060 | break; | ||
1061 | default: | ||
1062 | if (nla_put(skb, OVS_ACTION_ATTR_SET, nla_len(a), ovs_key)) | ||
1063 | return -EMSGSIZE; | ||
1064 | break; | ||
1065 | } | ||
1066 | |||
1067 | return 0; | ||
1068 | } | ||
1069 | |||
1070 | static int actions_to_attr(const struct nlattr *attr, int len, struct sk_buff *skb) | ||
1071 | { | ||
1072 | const struct nlattr *a; | ||
1073 | int rem, err; | ||
1074 | |||
1075 | nla_for_each_attr(a, attr, len, rem) { | ||
1076 | int type = nla_type(a); | ||
1077 | |||
1078 | switch (type) { | ||
1079 | case OVS_ACTION_ATTR_SET: | ||
1080 | err = set_action_to_attr(a, skb); | ||
1081 | if (err) | ||
1082 | return err; | ||
1083 | break; | ||
1084 | |||
1085 | case OVS_ACTION_ATTR_SAMPLE: | ||
1086 | err = sample_action_to_attr(a, skb); | ||
1087 | if (err) | ||
1088 | return err; | ||
1089 | break; | ||
1090 | default: | ||
1091 | if (nla_put(skb, type, nla_len(a), nla_data(a))) | ||
1092 | return -EMSGSIZE; | ||
1093 | break; | ||
1094 | } | ||
1095 | } | ||
1096 | |||
1097 | return 0; | ||
1098 | } | ||
1099 | |||
1100 | static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts) | 632 | static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts) |
1101 | { | 633 | { |
1102 | return NLMSG_ALIGN(sizeof(struct ovs_header)) | 634 | return NLMSG_ALIGN(sizeof(struct ovs_header)) |
@@ -1133,8 +665,7 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp, | |||
1133 | if (!nla) | 665 | if (!nla) |
1134 | goto nla_put_failure; | 666 | goto nla_put_failure; |
1135 | 667 | ||
1136 | err = ovs_flow_to_nlattrs(&flow->unmasked_key, | 668 | err = ovs_nla_put_flow(&flow->unmasked_key, &flow->unmasked_key, skb); |
1137 | &flow->unmasked_key, skb); | ||
1138 | if (err) | 669 | if (err) |
1139 | goto error; | 670 | goto error; |
1140 | nla_nest_end(skb, nla); | 671 | nla_nest_end(skb, nla); |
@@ -1143,7 +674,7 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp, | |||
1143 | if (!nla) | 674 | if (!nla) |
1144 | goto nla_put_failure; | 675 | goto nla_put_failure; |
1145 | 676 | ||
1146 | err = ovs_flow_to_nlattrs(&flow->key, &flow->mask->key, skb); | 677 | err = ovs_nla_put_flow(&flow->key, &flow->mask->key, skb); |
1147 | if (err) | 678 | if (err) |
1148 | goto error; | 679 | goto error; |
1149 | 680 | ||
@@ -1186,7 +717,8 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp, | |||
1186 | sf_acts = rcu_dereference_check(flow->sf_acts, | 717 | sf_acts = rcu_dereference_check(flow->sf_acts, |
1187 | lockdep_ovsl_is_held()); | 718 | lockdep_ovsl_is_held()); |
1188 | 719 | ||
1189 | err = actions_to_attr(sf_acts->actions, sf_acts->actions_len, skb); | 720 | err = ovs_nla_put_actions(sf_acts->actions, |
721 | sf_acts->actions_len, skb); | ||
1190 | if (!err) | 722 | if (!err) |
1191 | nla_nest_end(skb, start); | 723 | nla_nest_end(skb, start); |
1192 | else { | 724 | else { |
@@ -1252,21 +784,21 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) | |||
1252 | goto error; | 784 | goto error; |
1253 | 785 | ||
1254 | ovs_match_init(&match, &key, &mask); | 786 | ovs_match_init(&match, &key, &mask); |
1255 | error = ovs_match_from_nlattrs(&match, | 787 | error = ovs_nla_get_match(&match, |
1256 | a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]); | 788 | a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]); |
1257 | if (error) | 789 | if (error) |
1258 | goto error; | 790 | goto error; |
1259 | 791 | ||
1260 | /* Validate actions. */ | 792 | /* Validate actions. */ |
1261 | if (a[OVS_FLOW_ATTR_ACTIONS]) { | 793 | if (a[OVS_FLOW_ATTR_ACTIONS]) { |
1262 | acts = ovs_flow_actions_alloc(nla_len(a[OVS_FLOW_ATTR_ACTIONS])); | 794 | acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_FLOW_ATTR_ACTIONS])); |
1263 | error = PTR_ERR(acts); | 795 | error = PTR_ERR(acts); |
1264 | if (IS_ERR(acts)) | 796 | if (IS_ERR(acts)) |
1265 | goto error; | 797 | goto error; |
1266 | 798 | ||
1267 | ovs_flow_key_mask(&masked_key, &key, &mask); | 799 | ovs_flow_mask_key(&masked_key, &key, &mask); |
1268 | error = validate_and_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], | 800 | error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], |
1269 | &masked_key, 0, &acts); | 801 | &masked_key, 0, &acts); |
1270 | if (error) { | 802 | if (error) { |
1271 | OVS_NLERR("Flow actions may not be safe on all matching packets.\n"); | 803 | OVS_NLERR("Flow actions may not be safe on all matching packets.\n"); |
1272 | goto err_kfree; | 804 | goto err_kfree; |
@@ -1285,7 +817,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) | |||
1285 | table = ovsl_dereference(dp->table); | 817 | table = ovsl_dereference(dp->table); |
1286 | 818 | ||
1287 | /* Check if this is a duplicate flow */ | 819 | /* Check if this is a duplicate flow */ |
1288 | flow = ovs_flow_lookup(table, &key); | 820 | flow = ovs_flow_tbl_lookup(table, &key); |
1289 | if (!flow) { | 821 | if (!flow) { |
1290 | struct flow_table *new_table = NULL; | 822 | struct flow_table *new_table = NULL; |
1291 | struct sw_flow_mask *mask_p; | 823 | struct sw_flow_mask *mask_p; |
@@ -1336,7 +868,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) | |||
1336 | rcu_assign_pointer(flow->sf_acts, acts); | 868 | rcu_assign_pointer(flow->sf_acts, acts); |
1337 | 869 | ||
1338 | /* Put flow in bucket. */ | 870 | /* Put flow in bucket. */ |
1339 | ovs_flow_insert(table, flow); | 871 | ovs_flow_tbl_insert(table, flow); |
1340 | 872 | ||
1341 | reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid, | 873 | reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid, |
1342 | info->snd_seq, OVS_FLOW_CMD_NEW); | 874 | info->snd_seq, OVS_FLOW_CMD_NEW); |
@@ -1357,7 +889,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) | |||
1357 | 889 | ||
1358 | /* The unmasked key has to be the same for flow updates. */ | 890 | /* The unmasked key has to be the same for flow updates. */ |
1359 | error = -EINVAL; | 891 | error = -EINVAL; |
1360 | if (!ovs_flow_cmp_unmasked_key(flow, &key, match.range.end)) { | 892 | if (!ovs_flow_cmp_unmasked_key(flow, &match)) { |
1361 | OVS_NLERR("Flow modification message rejected, unmasked key does not match.\n"); | 893 | OVS_NLERR("Flow modification message rejected, unmasked key does not match.\n"); |
1362 | goto err_unlock_ovs; | 894 | goto err_unlock_ovs; |
1363 | } | 895 | } |
@@ -1365,7 +897,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) | |||
1365 | /* Update actions. */ | 897 | /* Update actions. */ |
1366 | old_acts = ovsl_dereference(flow->sf_acts); | 898 | old_acts = ovsl_dereference(flow->sf_acts); |
1367 | rcu_assign_pointer(flow->sf_acts, acts); | 899 | rcu_assign_pointer(flow->sf_acts, acts); |
1368 | ovs_flow_deferred_free_acts(old_acts); | 900 | ovs_nla_free_flow_actions(old_acts); |
1369 | 901 | ||
1370 | reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid, | 902 | reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid, |
1371 | info->snd_seq, OVS_FLOW_CMD_NEW); | 903 | info->snd_seq, OVS_FLOW_CMD_NEW); |
@@ -1414,7 +946,7 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info) | |||
1414 | } | 946 | } |
1415 | 947 | ||
1416 | ovs_match_init(&match, &key, NULL); | 948 | ovs_match_init(&match, &key, NULL); |
1417 | err = ovs_match_from_nlattrs(&match, a[OVS_FLOW_ATTR_KEY], NULL); | 949 | err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL); |
1418 | if (err) | 950 | if (err) |
1419 | return err; | 951 | return err; |
1420 | 952 | ||
@@ -1426,8 +958,8 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info) | |||
1426 | } | 958 | } |
1427 | 959 | ||
1428 | table = ovsl_dereference(dp->table); | 960 | table = ovsl_dereference(dp->table); |
1429 | flow = ovs_flow_lookup_unmasked_key(table, &match); | 961 | flow = ovs_flow_tbl_lookup(table, &key); |
1430 | if (!flow) { | 962 | if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) { |
1431 | err = -ENOENT; | 963 | err = -ENOENT; |
1432 | goto unlock; | 964 | goto unlock; |
1433 | } | 965 | } |
@@ -1471,13 +1003,13 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info) | |||
1471 | } | 1003 | } |
1472 | 1004 | ||
1473 | ovs_match_init(&match, &key, NULL); | 1005 | ovs_match_init(&match, &key, NULL); |
1474 | err = ovs_match_from_nlattrs(&match, a[OVS_FLOW_ATTR_KEY], NULL); | 1006 | err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL); |
1475 | if (err) | 1007 | if (err) |
1476 | goto unlock; | 1008 | goto unlock; |
1477 | 1009 | ||
1478 | table = ovsl_dereference(dp->table); | 1010 | table = ovsl_dereference(dp->table); |
1479 | flow = ovs_flow_lookup_unmasked_key(table, &match); | 1011 | flow = ovs_flow_tbl_lookup(table, &key); |
1480 | if (!flow) { | 1012 | if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) { |
1481 | err = -ENOENT; | 1013 | err = -ENOENT; |
1482 | goto unlock; | 1014 | goto unlock; |
1483 | } | 1015 | } |
@@ -1488,7 +1020,7 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info) | |||
1488 | goto unlock; | 1020 | goto unlock; |
1489 | } | 1021 | } |
1490 | 1022 | ||
1491 | ovs_flow_remove(table, flow); | 1023 | ovs_flow_tbl_remove(table, flow); |
1492 | 1024 | ||
1493 | err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_portid, | 1025 | err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_portid, |
1494 | info->snd_seq, 0, OVS_FLOW_CMD_DEL); | 1026 | info->snd_seq, 0, OVS_FLOW_CMD_DEL); |
@@ -1524,7 +1056,7 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
1524 | 1056 | ||
1525 | bucket = cb->args[0]; | 1057 | bucket = cb->args[0]; |
1526 | obj = cb->args[1]; | 1058 | obj = cb->args[1]; |
1527 | flow = ovs_flow_dump_next(table, &bucket, &obj); | 1059 | flow = ovs_flow_tbl_dump_next(table, &bucket, &obj); |
1528 | if (!flow) | 1060 | if (!flow) |
1529 | break; | 1061 | break; |
1530 | 1062 | ||
@@ -1700,7 +1232,7 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info) | |||
1700 | } | 1232 | } |
1701 | 1233 | ||
1702 | dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head), | 1234 | dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head), |
1703 | GFP_KERNEL); | 1235 | GFP_KERNEL); |
1704 | if (!dp->ports) { | 1236 | if (!dp->ports) { |
1705 | err = -ENOMEM; | 1237 | err = -ENOMEM; |
1706 | goto err_destroy_percpu; | 1238 | goto err_destroy_percpu; |
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h index 2c15541f3b46..a6982ef84f20 100644 --- a/net/openvswitch/datapath.h +++ b/net/openvswitch/datapath.h | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/u64_stats_sync.h> | 27 | #include <linux/u64_stats_sync.h> |
28 | 28 | ||
29 | #include "flow.h" | 29 | #include "flow.h" |
30 | #include "flow_table.h" | ||
30 | #include "vport.h" | 31 | #include "vport.h" |
31 | 32 | ||
32 | #define DP_MAX_PORTS USHRT_MAX | 33 | #define DP_MAX_PORTS USHRT_MAX |
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 410db90db73d..617810f1a21e 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c | |||
@@ -45,202 +45,40 @@ | |||
45 | #include <net/ipv6.h> | 45 | #include <net/ipv6.h> |
46 | #include <net/ndisc.h> | 46 | #include <net/ndisc.h> |
47 | 47 | ||
48 | static struct kmem_cache *flow_cache; | 48 | u64 ovs_flow_used_time(unsigned long flow_jiffies) |
49 | |||
50 | static void ovs_sw_flow_mask_set(struct sw_flow_mask *mask, | ||
51 | struct sw_flow_key_range *range, u8 val); | ||
52 | |||
53 | static void update_range__(struct sw_flow_match *match, | ||
54 | size_t offset, size_t size, bool is_mask) | ||
55 | { | 49 | { |
56 | struct sw_flow_key_range *range = NULL; | 50 | struct timespec cur_ts; |
57 | size_t start = rounddown(offset, sizeof(long)); | 51 | u64 cur_ms, idle_ms; |
58 | size_t end = roundup(offset + size, sizeof(long)); | ||
59 | |||
60 | if (!is_mask) | ||
61 | range = &match->range; | ||
62 | else if (match->mask) | ||
63 | range = &match->mask->range; | ||
64 | |||
65 | if (!range) | ||
66 | return; | ||
67 | |||
68 | if (range->start == range->end) { | ||
69 | range->start = start; | ||
70 | range->end = end; | ||
71 | return; | ||
72 | } | ||
73 | |||
74 | if (range->start > start) | ||
75 | range->start = start; | ||
76 | 52 | ||
77 | if (range->end < end) | 53 | ktime_get_ts(&cur_ts); |
78 | range->end = end; | 54 | idle_ms = jiffies_to_msecs(jiffies - flow_jiffies); |
79 | } | 55 | cur_ms = (u64)cur_ts.tv_sec * MSEC_PER_SEC + |
56 | cur_ts.tv_nsec / NSEC_PER_MSEC; | ||
80 | 57 | ||
81 | #define SW_FLOW_KEY_PUT(match, field, value, is_mask) \ | 58 | return cur_ms - idle_ms; |
82 | do { \ | ||
83 | update_range__(match, offsetof(struct sw_flow_key, field), \ | ||
84 | sizeof((match)->key->field), is_mask); \ | ||
85 | if (is_mask) { \ | ||
86 | if ((match)->mask) \ | ||
87 | (match)->mask->key.field = value; \ | ||
88 | } else { \ | ||
89 | (match)->key->field = value; \ | ||
90 | } \ | ||
91 | } while (0) | ||
92 | |||
93 | #define SW_FLOW_KEY_MEMCPY(match, field, value_p, len, is_mask) \ | ||
94 | do { \ | ||
95 | update_range__(match, offsetof(struct sw_flow_key, field), \ | ||
96 | len, is_mask); \ | ||
97 | if (is_mask) { \ | ||
98 | if ((match)->mask) \ | ||
99 | memcpy(&(match)->mask->key.field, value_p, len);\ | ||
100 | } else { \ | ||
101 | memcpy(&(match)->key->field, value_p, len); \ | ||
102 | } \ | ||
103 | } while (0) | ||
104 | |||
105 | static u16 range_n_bytes(const struct sw_flow_key_range *range) | ||
106 | { | ||
107 | return range->end - range->start; | ||
108 | } | 59 | } |
109 | 60 | ||
110 | void ovs_match_init(struct sw_flow_match *match, | 61 | #define TCP_FLAGS_OFFSET 13 |
111 | struct sw_flow_key *key, | 62 | #define TCP_FLAG_MASK 0x3f |
112 | struct sw_flow_mask *mask) | ||
113 | { | ||
114 | memset(match, 0, sizeof(*match)); | ||
115 | match->key = key; | ||
116 | match->mask = mask; | ||
117 | |||
118 | memset(key, 0, sizeof(*key)); | ||
119 | |||
120 | if (mask) { | ||
121 | memset(&mask->key, 0, sizeof(mask->key)); | ||
122 | mask->range.start = mask->range.end = 0; | ||
123 | } | ||
124 | } | ||
125 | 63 | ||
126 | static bool ovs_match_validate(const struct sw_flow_match *match, | 64 | void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb) |
127 | u64 key_attrs, u64 mask_attrs) | ||
128 | { | 65 | { |
129 | u64 key_expected = 1 << OVS_KEY_ATTR_ETHERNET; | 66 | u8 tcp_flags = 0; |
130 | u64 mask_allowed = key_attrs; /* At most allow all key attributes */ | ||
131 | |||
132 | /* The following mask attributes allowed only if they | ||
133 | * pass the validation tests. */ | ||
134 | mask_allowed &= ~((1 << OVS_KEY_ATTR_IPV4) | ||
135 | | (1 << OVS_KEY_ATTR_IPV6) | ||
136 | | (1 << OVS_KEY_ATTR_TCP) | ||
137 | | (1 << OVS_KEY_ATTR_UDP) | ||
138 | | (1 << OVS_KEY_ATTR_SCTP) | ||
139 | | (1 << OVS_KEY_ATTR_ICMP) | ||
140 | | (1 << OVS_KEY_ATTR_ICMPV6) | ||
141 | | (1 << OVS_KEY_ATTR_ARP) | ||
142 | | (1 << OVS_KEY_ATTR_ND)); | ||
143 | |||
144 | /* Always allowed mask fields. */ | ||
145 | mask_allowed |= ((1 << OVS_KEY_ATTR_TUNNEL) | ||
146 | | (1 << OVS_KEY_ATTR_IN_PORT) | ||
147 | | (1 << OVS_KEY_ATTR_ETHERTYPE)); | ||
148 | |||
149 | /* Check key attributes. */ | ||
150 | if (match->key->eth.type == htons(ETH_P_ARP) | ||
151 | || match->key->eth.type == htons(ETH_P_RARP)) { | ||
152 | key_expected |= 1 << OVS_KEY_ATTR_ARP; | ||
153 | if (match->mask && (match->mask->key.eth.type == htons(0xffff))) | ||
154 | mask_allowed |= 1 << OVS_KEY_ATTR_ARP; | ||
155 | } | ||
156 | |||
157 | if (match->key->eth.type == htons(ETH_P_IP)) { | ||
158 | key_expected |= 1 << OVS_KEY_ATTR_IPV4; | ||
159 | if (match->mask && (match->mask->key.eth.type == htons(0xffff))) | ||
160 | mask_allowed |= 1 << OVS_KEY_ATTR_IPV4; | ||
161 | |||
162 | if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) { | ||
163 | if (match->key->ip.proto == IPPROTO_UDP) { | ||
164 | key_expected |= 1 << OVS_KEY_ATTR_UDP; | ||
165 | if (match->mask && (match->mask->key.ip.proto == 0xff)) | ||
166 | mask_allowed |= 1 << OVS_KEY_ATTR_UDP; | ||
167 | } | ||
168 | |||
169 | if (match->key->ip.proto == IPPROTO_SCTP) { | ||
170 | key_expected |= 1 << OVS_KEY_ATTR_SCTP; | ||
171 | if (match->mask && (match->mask->key.ip.proto == 0xff)) | ||
172 | mask_allowed |= 1 << OVS_KEY_ATTR_SCTP; | ||
173 | } | ||
174 | |||
175 | if (match->key->ip.proto == IPPROTO_TCP) { | ||
176 | key_expected |= 1 << OVS_KEY_ATTR_TCP; | ||
177 | if (match->mask && (match->mask->key.ip.proto == 0xff)) | ||
178 | mask_allowed |= 1 << OVS_KEY_ATTR_TCP; | ||
179 | } | ||
180 | |||
181 | if (match->key->ip.proto == IPPROTO_ICMP) { | ||
182 | key_expected |= 1 << OVS_KEY_ATTR_ICMP; | ||
183 | if (match->mask && (match->mask->key.ip.proto == 0xff)) | ||
184 | mask_allowed |= 1 << OVS_KEY_ATTR_ICMP; | ||
185 | } | ||
186 | } | ||
187 | } | ||
188 | |||
189 | if (match->key->eth.type == htons(ETH_P_IPV6)) { | ||
190 | key_expected |= 1 << OVS_KEY_ATTR_IPV6; | ||
191 | if (match->mask && (match->mask->key.eth.type == htons(0xffff))) | ||
192 | mask_allowed |= 1 << OVS_KEY_ATTR_IPV6; | ||
193 | |||
194 | if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) { | ||
195 | if (match->key->ip.proto == IPPROTO_UDP) { | ||
196 | key_expected |= 1 << OVS_KEY_ATTR_UDP; | ||
197 | if (match->mask && (match->mask->key.ip.proto == 0xff)) | ||
198 | mask_allowed |= 1 << OVS_KEY_ATTR_UDP; | ||
199 | } | ||
200 | |||
201 | if (match->key->ip.proto == IPPROTO_SCTP) { | ||
202 | key_expected |= 1 << OVS_KEY_ATTR_SCTP; | ||
203 | if (match->mask && (match->mask->key.ip.proto == 0xff)) | ||
204 | mask_allowed |= 1 << OVS_KEY_ATTR_SCTP; | ||
205 | } | ||
206 | |||
207 | if (match->key->ip.proto == IPPROTO_TCP) { | ||
208 | key_expected |= 1 << OVS_KEY_ATTR_TCP; | ||
209 | if (match->mask && (match->mask->key.ip.proto == 0xff)) | ||
210 | mask_allowed |= 1 << OVS_KEY_ATTR_TCP; | ||
211 | } | ||
212 | |||
213 | if (match->key->ip.proto == IPPROTO_ICMPV6) { | ||
214 | key_expected |= 1 << OVS_KEY_ATTR_ICMPV6; | ||
215 | if (match->mask && (match->mask->key.ip.proto == 0xff)) | ||
216 | mask_allowed |= 1 << OVS_KEY_ATTR_ICMPV6; | ||
217 | |||
218 | if (match->key->ipv6.tp.src == | ||
219 | htons(NDISC_NEIGHBOUR_SOLICITATION) || | ||
220 | match->key->ipv6.tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) { | ||
221 | key_expected |= 1 << OVS_KEY_ATTR_ND; | ||
222 | if (match->mask && (match->mask->key.ipv6.tp.src == htons(0xffff))) | ||
223 | mask_allowed |= 1 << OVS_KEY_ATTR_ND; | ||
224 | } | ||
225 | } | ||
226 | } | ||
227 | } | ||
228 | |||
229 | if ((key_attrs & key_expected) != key_expected) { | ||
230 | /* Key attributes check failed. */ | ||
231 | OVS_NLERR("Missing expected key attributes (key_attrs=%llx, expected=%llx).\n", | ||
232 | key_attrs, key_expected); | ||
233 | return false; | ||
234 | } | ||
235 | 67 | ||
236 | if ((mask_attrs & mask_allowed) != mask_attrs) { | 68 | if ((flow->key.eth.type == htons(ETH_P_IP) || |
237 | /* Mask attributes check failed. */ | 69 | flow->key.eth.type == htons(ETH_P_IPV6)) && |
238 | OVS_NLERR("Contain more than allowed mask fields (mask_attrs=%llx, mask_allowed=%llx).\n", | 70 | flow->key.ip.proto == IPPROTO_TCP && |
239 | mask_attrs, mask_allowed); | 71 | likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) { |
240 | return false; | 72 | u8 *tcp = (u8 *)tcp_hdr(skb); |
73 | tcp_flags = *(tcp + TCP_FLAGS_OFFSET) & TCP_FLAG_MASK; | ||
241 | } | 74 | } |
242 | 75 | ||
243 | return true; | 76 | spin_lock(&flow->lock); |
77 | flow->used = jiffies; | ||
78 | flow->packet_count++; | ||
79 | flow->byte_count += skb->len; | ||
80 | flow->tcp_flags |= tcp_flags; | ||
81 | spin_unlock(&flow->lock); | ||
244 | } | 82 | } |
245 | 83 | ||
246 | static int check_header(struct sk_buff *skb, int len) | 84 | static int check_header(struct sk_buff *skb, int len) |
@@ -311,19 +149,6 @@ static bool icmphdr_ok(struct sk_buff *skb) | |||
311 | sizeof(struct icmphdr)); | 149 | sizeof(struct icmphdr)); |
312 | } | 150 | } |
313 | 151 | ||
314 | u64 ovs_flow_used_time(unsigned long flow_jiffies) | ||
315 | { | ||
316 | struct timespec cur_ts; | ||
317 | u64 cur_ms, idle_ms; | ||
318 | |||
319 | ktime_get_ts(&cur_ts); | ||
320 | idle_ms = jiffies_to_msecs(jiffies - flow_jiffies); | ||
321 | cur_ms = (u64)cur_ts.tv_sec * MSEC_PER_SEC + | ||
322 | cur_ts.tv_nsec / NSEC_PER_MSEC; | ||
323 | |||
324 | return cur_ms - idle_ms; | ||
325 | } | ||
326 | |||
327 | static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key) | 152 | static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key) |
328 | { | 153 | { |
329 | unsigned int nh_ofs = skb_network_offset(skb); | 154 | unsigned int nh_ofs = skb_network_offset(skb); |
@@ -372,311 +197,6 @@ static bool icmp6hdr_ok(struct sk_buff *skb) | |||
372 | sizeof(struct icmp6hdr)); | 197 | sizeof(struct icmp6hdr)); |
373 | } | 198 | } |
374 | 199 | ||
375 | void ovs_flow_key_mask(struct sw_flow_key *dst, const struct sw_flow_key *src, | ||
376 | const struct sw_flow_mask *mask) | ||
377 | { | ||
378 | const long *m = (long *)((u8 *)&mask->key + mask->range.start); | ||
379 | const long *s = (long *)((u8 *)src + mask->range.start); | ||
380 | long *d = (long *)((u8 *)dst + mask->range.start); | ||
381 | int i; | ||
382 | |||
383 | /* The memory outside of the 'mask->range' are not set since | ||
384 | * further operations on 'dst' only uses contents within | ||
385 | * 'mask->range'. | ||
386 | */ | ||
387 | for (i = 0; i < range_n_bytes(&mask->range); i += sizeof(long)) | ||
388 | *d++ = *s++ & *m++; | ||
389 | } | ||
390 | |||
391 | #define TCP_FLAGS_OFFSET 13 | ||
392 | #define TCP_FLAG_MASK 0x3f | ||
393 | |||
394 | void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb) | ||
395 | { | ||
396 | u8 tcp_flags = 0; | ||
397 | |||
398 | if ((flow->key.eth.type == htons(ETH_P_IP) || | ||
399 | flow->key.eth.type == htons(ETH_P_IPV6)) && | ||
400 | flow->key.ip.proto == IPPROTO_TCP && | ||
401 | likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) { | ||
402 | u8 *tcp = (u8 *)tcp_hdr(skb); | ||
403 | tcp_flags = *(tcp + TCP_FLAGS_OFFSET) & TCP_FLAG_MASK; | ||
404 | } | ||
405 | |||
406 | spin_lock(&flow->lock); | ||
407 | flow->used = jiffies; | ||
408 | flow->packet_count++; | ||
409 | flow->byte_count += skb->len; | ||
410 | flow->tcp_flags |= tcp_flags; | ||
411 | spin_unlock(&flow->lock); | ||
412 | } | ||
413 | |||
414 | struct sw_flow_actions *ovs_flow_actions_alloc(int size) | ||
415 | { | ||
416 | struct sw_flow_actions *sfa; | ||
417 | |||
418 | if (size > MAX_ACTIONS_BUFSIZE) | ||
419 | return ERR_PTR(-EINVAL); | ||
420 | |||
421 | sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL); | ||
422 | if (!sfa) | ||
423 | return ERR_PTR(-ENOMEM); | ||
424 | |||
425 | sfa->actions_len = 0; | ||
426 | return sfa; | ||
427 | } | ||
428 | |||
429 | struct sw_flow *ovs_flow_alloc(void) | ||
430 | { | ||
431 | struct sw_flow *flow; | ||
432 | |||
433 | flow = kmem_cache_alloc(flow_cache, GFP_KERNEL); | ||
434 | if (!flow) | ||
435 | return ERR_PTR(-ENOMEM); | ||
436 | |||
437 | spin_lock_init(&flow->lock); | ||
438 | flow->sf_acts = NULL; | ||
439 | flow->mask = NULL; | ||
440 | |||
441 | return flow; | ||
442 | } | ||
443 | |||
444 | static struct hlist_head *find_bucket(struct flow_table *table, u32 hash) | ||
445 | { | ||
446 | hash = jhash_1word(hash, table->hash_seed); | ||
447 | return flex_array_get(table->buckets, | ||
448 | (hash & (table->n_buckets - 1))); | ||
449 | } | ||
450 | |||
451 | static struct flex_array *alloc_buckets(unsigned int n_buckets) | ||
452 | { | ||
453 | struct flex_array *buckets; | ||
454 | int i, err; | ||
455 | |||
456 | buckets = flex_array_alloc(sizeof(struct hlist_head), | ||
457 | n_buckets, GFP_KERNEL); | ||
458 | if (!buckets) | ||
459 | return NULL; | ||
460 | |||
461 | err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL); | ||
462 | if (err) { | ||
463 | flex_array_free(buckets); | ||
464 | return NULL; | ||
465 | } | ||
466 | |||
467 | for (i = 0; i < n_buckets; i++) | ||
468 | INIT_HLIST_HEAD((struct hlist_head *) | ||
469 | flex_array_get(buckets, i)); | ||
470 | |||
471 | return buckets; | ||
472 | } | ||
473 | |||
474 | static void free_buckets(struct flex_array *buckets) | ||
475 | { | ||
476 | flex_array_free(buckets); | ||
477 | } | ||
478 | |||
479 | static struct flow_table *__flow_tbl_alloc(int new_size) | ||
480 | { | ||
481 | struct flow_table *table = kmalloc(sizeof(*table), GFP_KERNEL); | ||
482 | |||
483 | if (!table) | ||
484 | return NULL; | ||
485 | |||
486 | table->buckets = alloc_buckets(new_size); | ||
487 | |||
488 | if (!table->buckets) { | ||
489 | kfree(table); | ||
490 | return NULL; | ||
491 | } | ||
492 | table->n_buckets = new_size; | ||
493 | table->count = 0; | ||
494 | table->node_ver = 0; | ||
495 | table->keep_flows = false; | ||
496 | get_random_bytes(&table->hash_seed, sizeof(u32)); | ||
497 | table->mask_list = NULL; | ||
498 | |||
499 | return table; | ||
500 | } | ||
501 | |||
502 | static void __flow_tbl_destroy(struct flow_table *table) | ||
503 | { | ||
504 | int i; | ||
505 | |||
506 | if (table->keep_flows) | ||
507 | goto skip_flows; | ||
508 | |||
509 | for (i = 0; i < table->n_buckets; i++) { | ||
510 | struct sw_flow *flow; | ||
511 | struct hlist_head *head = flex_array_get(table->buckets, i); | ||
512 | struct hlist_node *n; | ||
513 | int ver = table->node_ver; | ||
514 | |||
515 | hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) { | ||
516 | hlist_del(&flow->hash_node[ver]); | ||
517 | ovs_flow_free(flow, false); | ||
518 | } | ||
519 | } | ||
520 | |||
521 | BUG_ON(!list_empty(table->mask_list)); | ||
522 | kfree(table->mask_list); | ||
523 | |||
524 | skip_flows: | ||
525 | free_buckets(table->buckets); | ||
526 | kfree(table); | ||
527 | } | ||
528 | |||
529 | struct flow_table *ovs_flow_tbl_alloc(int new_size) | ||
530 | { | ||
531 | struct flow_table *table = __flow_tbl_alloc(new_size); | ||
532 | |||
533 | if (!table) | ||
534 | return NULL; | ||
535 | |||
536 | table->mask_list = kmalloc(sizeof(struct list_head), GFP_KERNEL); | ||
537 | if (!table->mask_list) { | ||
538 | table->keep_flows = true; | ||
539 | __flow_tbl_destroy(table); | ||
540 | return NULL; | ||
541 | } | ||
542 | INIT_LIST_HEAD(table->mask_list); | ||
543 | |||
544 | return table; | ||
545 | } | ||
546 | |||
547 | static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu) | ||
548 | { | ||
549 | struct flow_table *table = container_of(rcu, struct flow_table, rcu); | ||
550 | |||
551 | __flow_tbl_destroy(table); | ||
552 | } | ||
553 | |||
554 | void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred) | ||
555 | { | ||
556 | if (!table) | ||
557 | return; | ||
558 | |||
559 | if (deferred) | ||
560 | call_rcu(&table->rcu, flow_tbl_destroy_rcu_cb); | ||
561 | else | ||
562 | __flow_tbl_destroy(table); | ||
563 | } | ||
564 | |||
565 | struct sw_flow *ovs_flow_dump_next(struct flow_table *table, u32 *bucket, u32 *last) | ||
566 | { | ||
567 | struct sw_flow *flow; | ||
568 | struct hlist_head *head; | ||
569 | int ver; | ||
570 | int i; | ||
571 | |||
572 | ver = table->node_ver; | ||
573 | while (*bucket < table->n_buckets) { | ||
574 | i = 0; | ||
575 | head = flex_array_get(table->buckets, *bucket); | ||
576 | hlist_for_each_entry_rcu(flow, head, hash_node[ver]) { | ||
577 | if (i < *last) { | ||
578 | i++; | ||
579 | continue; | ||
580 | } | ||
581 | *last = i + 1; | ||
582 | return flow; | ||
583 | } | ||
584 | (*bucket)++; | ||
585 | *last = 0; | ||
586 | } | ||
587 | |||
588 | return NULL; | ||
589 | } | ||
590 | |||
591 | static void __tbl_insert(struct flow_table *table, struct sw_flow *flow) | ||
592 | { | ||
593 | struct hlist_head *head; | ||
594 | |||
595 | head = find_bucket(table, flow->hash); | ||
596 | hlist_add_head_rcu(&flow->hash_node[table->node_ver], head); | ||
597 | |||
598 | table->count++; | ||
599 | } | ||
600 | |||
601 | static void flow_table_copy_flows(struct flow_table *old, struct flow_table *new) | ||
602 | { | ||
603 | int old_ver; | ||
604 | int i; | ||
605 | |||
606 | old_ver = old->node_ver; | ||
607 | new->node_ver = !old_ver; | ||
608 | |||
609 | /* Insert in new table. */ | ||
610 | for (i = 0; i < old->n_buckets; i++) { | ||
611 | struct sw_flow *flow; | ||
612 | struct hlist_head *head; | ||
613 | |||
614 | head = flex_array_get(old->buckets, i); | ||
615 | |||
616 | hlist_for_each_entry(flow, head, hash_node[old_ver]) | ||
617 | __tbl_insert(new, flow); | ||
618 | } | ||
619 | |||
620 | new->mask_list = old->mask_list; | ||
621 | old->keep_flows = true; | ||
622 | } | ||
623 | |||
624 | static struct flow_table *__flow_tbl_rehash(struct flow_table *table, int n_buckets) | ||
625 | { | ||
626 | struct flow_table *new_table; | ||
627 | |||
628 | new_table = __flow_tbl_alloc(n_buckets); | ||
629 | if (!new_table) | ||
630 | return ERR_PTR(-ENOMEM); | ||
631 | |||
632 | flow_table_copy_flows(table, new_table); | ||
633 | |||
634 | return new_table; | ||
635 | } | ||
636 | |||
637 | struct flow_table *ovs_flow_tbl_rehash(struct flow_table *table) | ||
638 | { | ||
639 | return __flow_tbl_rehash(table, table->n_buckets); | ||
640 | } | ||
641 | |||
642 | struct flow_table *ovs_flow_tbl_expand(struct flow_table *table) | ||
643 | { | ||
644 | return __flow_tbl_rehash(table, table->n_buckets * 2); | ||
645 | } | ||
646 | |||
647 | static void __flow_free(struct sw_flow *flow) | ||
648 | { | ||
649 | kfree((struct sf_flow_acts __force *)flow->sf_acts); | ||
650 | kmem_cache_free(flow_cache, flow); | ||
651 | } | ||
652 | |||
653 | static void rcu_free_flow_callback(struct rcu_head *rcu) | ||
654 | { | ||
655 | struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu); | ||
656 | |||
657 | __flow_free(flow); | ||
658 | } | ||
659 | |||
660 | void ovs_flow_free(struct sw_flow *flow, bool deferred) | ||
661 | { | ||
662 | if (!flow) | ||
663 | return; | ||
664 | |||
665 | ovs_sw_flow_mask_del_ref(flow->mask, deferred); | ||
666 | |||
667 | if (deferred) | ||
668 | call_rcu(&flow->rcu, rcu_free_flow_callback); | ||
669 | else | ||
670 | __flow_free(flow); | ||
671 | } | ||
672 | |||
673 | /* Schedules 'sf_acts' to be freed after the next RCU grace period. | ||
674 | * The caller must hold rcu_read_lock for this to be sensible. */ | ||
675 | void ovs_flow_deferred_free_acts(struct sw_flow_actions *sf_acts) | ||
676 | { | ||
677 | kfree_rcu(sf_acts, rcu); | ||
678 | } | ||
679 | |||
680 | static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key) | 200 | static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key) |
681 | { | 201 | { |
682 | struct qtag_prefix { | 202 | struct qtag_prefix { |
@@ -1002,1080 +522,3 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key) | |||
1002 | 522 | ||
1003 | return 0; | 523 | return 0; |
1004 | } | 524 | } |
1005 | |||
1006 | static u32 ovs_flow_hash(const struct sw_flow_key *key, int key_start, | ||
1007 | int key_end) | ||
1008 | { | ||
1009 | u32 *hash_key = (u32 *)((u8 *)key + key_start); | ||
1010 | int hash_u32s = (key_end - key_start) >> 2; | ||
1011 | |||
1012 | /* Make sure number of hash bytes are multiple of u32. */ | ||
1013 | BUILD_BUG_ON(sizeof(long) % sizeof(u32)); | ||
1014 | |||
1015 | return jhash2(hash_key, hash_u32s, 0); | ||
1016 | } | ||
1017 | |||
1018 | static int flow_key_start(const struct sw_flow_key *key) | ||
1019 | { | ||
1020 | if (key->tun_key.ipv4_dst) | ||
1021 | return 0; | ||
1022 | else | ||
1023 | return rounddown(offsetof(struct sw_flow_key, phy), | ||
1024 | sizeof(long)); | ||
1025 | } | ||
1026 | |||
1027 | static bool __cmp_key(const struct sw_flow_key *key1, | ||
1028 | const struct sw_flow_key *key2, int key_start, int key_end) | ||
1029 | { | ||
1030 | const long *cp1 = (long *)((u8 *)key1 + key_start); | ||
1031 | const long *cp2 = (long *)((u8 *)key2 + key_start); | ||
1032 | long diffs = 0; | ||
1033 | int i; | ||
1034 | |||
1035 | for (i = key_start; i < key_end; i += sizeof(long)) | ||
1036 | diffs |= *cp1++ ^ *cp2++; | ||
1037 | |||
1038 | return diffs == 0; | ||
1039 | } | ||
1040 | |||
1041 | static bool __flow_cmp_masked_key(const struct sw_flow *flow, | ||
1042 | const struct sw_flow_key *key, int key_start, int key_end) | ||
1043 | { | ||
1044 | return __cmp_key(&flow->key, key, key_start, key_end); | ||
1045 | } | ||
1046 | |||
1047 | static bool __flow_cmp_unmasked_key(const struct sw_flow *flow, | ||
1048 | const struct sw_flow_key *key, int key_start, int key_end) | ||
1049 | { | ||
1050 | return __cmp_key(&flow->unmasked_key, key, key_start, key_end); | ||
1051 | } | ||
1052 | |||
1053 | bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow, | ||
1054 | const struct sw_flow_key *key, int key_end) | ||
1055 | { | ||
1056 | int key_start; | ||
1057 | key_start = flow_key_start(key); | ||
1058 | |||
1059 | return __flow_cmp_unmasked_key(flow, key, key_start, key_end); | ||
1060 | |||
1061 | } | ||
1062 | |||
1063 | struct sw_flow *ovs_flow_lookup_unmasked_key(struct flow_table *table, | ||
1064 | struct sw_flow_match *match) | ||
1065 | { | ||
1066 | struct sw_flow_key *unmasked = match->key; | ||
1067 | int key_end = match->range.end; | ||
1068 | struct sw_flow *flow; | ||
1069 | |||
1070 | flow = ovs_flow_lookup(table, unmasked); | ||
1071 | if (flow && (!ovs_flow_cmp_unmasked_key(flow, unmasked, key_end))) | ||
1072 | flow = NULL; | ||
1073 | |||
1074 | return flow; | ||
1075 | } | ||
1076 | |||
1077 | static struct sw_flow *ovs_masked_flow_lookup(struct flow_table *table, | ||
1078 | const struct sw_flow_key *unmasked, | ||
1079 | struct sw_flow_mask *mask) | ||
1080 | { | ||
1081 | struct sw_flow *flow; | ||
1082 | struct hlist_head *head; | ||
1083 | int key_start = mask->range.start; | ||
1084 | int key_end = mask->range.end; | ||
1085 | u32 hash; | ||
1086 | struct sw_flow_key masked_key; | ||
1087 | |||
1088 | ovs_flow_key_mask(&masked_key, unmasked, mask); | ||
1089 | hash = ovs_flow_hash(&masked_key, key_start, key_end); | ||
1090 | head = find_bucket(table, hash); | ||
1091 | hlist_for_each_entry_rcu(flow, head, hash_node[table->node_ver]) { | ||
1092 | if (flow->mask == mask && | ||
1093 | __flow_cmp_masked_key(flow, &masked_key, | ||
1094 | key_start, key_end)) | ||
1095 | return flow; | ||
1096 | } | ||
1097 | return NULL; | ||
1098 | } | ||
1099 | |||
1100 | struct sw_flow *ovs_flow_lookup(struct flow_table *tbl, | ||
1101 | const struct sw_flow_key *key) | ||
1102 | { | ||
1103 | struct sw_flow *flow = NULL; | ||
1104 | struct sw_flow_mask *mask; | ||
1105 | |||
1106 | list_for_each_entry_rcu(mask, tbl->mask_list, list) { | ||
1107 | flow = ovs_masked_flow_lookup(tbl, key, mask); | ||
1108 | if (flow) /* Found */ | ||
1109 | break; | ||
1110 | } | ||
1111 | |||
1112 | return flow; | ||
1113 | } | ||
1114 | |||
1115 | |||
1116 | void ovs_flow_insert(struct flow_table *table, struct sw_flow *flow) | ||
1117 | { | ||
1118 | flow->hash = ovs_flow_hash(&flow->key, flow->mask->range.start, | ||
1119 | flow->mask->range.end); | ||
1120 | __tbl_insert(table, flow); | ||
1121 | } | ||
1122 | |||
1123 | void ovs_flow_remove(struct flow_table *table, struct sw_flow *flow) | ||
1124 | { | ||
1125 | BUG_ON(table->count == 0); | ||
1126 | hlist_del_rcu(&flow->hash_node[table->node_ver]); | ||
1127 | table->count--; | ||
1128 | } | ||
1129 | |||
1130 | /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */ | ||
1131 | const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = { | ||
1132 | [OVS_KEY_ATTR_ENCAP] = -1, | ||
1133 | [OVS_KEY_ATTR_PRIORITY] = sizeof(u32), | ||
1134 | [OVS_KEY_ATTR_IN_PORT] = sizeof(u32), | ||
1135 | [OVS_KEY_ATTR_SKB_MARK] = sizeof(u32), | ||
1136 | [OVS_KEY_ATTR_ETHERNET] = sizeof(struct ovs_key_ethernet), | ||
1137 | [OVS_KEY_ATTR_VLAN] = sizeof(__be16), | ||
1138 | [OVS_KEY_ATTR_ETHERTYPE] = sizeof(__be16), | ||
1139 | [OVS_KEY_ATTR_IPV4] = sizeof(struct ovs_key_ipv4), | ||
1140 | [OVS_KEY_ATTR_IPV6] = sizeof(struct ovs_key_ipv6), | ||
1141 | [OVS_KEY_ATTR_TCP] = sizeof(struct ovs_key_tcp), | ||
1142 | [OVS_KEY_ATTR_UDP] = sizeof(struct ovs_key_udp), | ||
1143 | [OVS_KEY_ATTR_SCTP] = sizeof(struct ovs_key_sctp), | ||
1144 | [OVS_KEY_ATTR_ICMP] = sizeof(struct ovs_key_icmp), | ||
1145 | [OVS_KEY_ATTR_ICMPV6] = sizeof(struct ovs_key_icmpv6), | ||
1146 | [OVS_KEY_ATTR_ARP] = sizeof(struct ovs_key_arp), | ||
1147 | [OVS_KEY_ATTR_ND] = sizeof(struct ovs_key_nd), | ||
1148 | [OVS_KEY_ATTR_TUNNEL] = -1, | ||
1149 | }; | ||
1150 | |||
1151 | static bool is_all_zero(const u8 *fp, size_t size) | ||
1152 | { | ||
1153 | int i; | ||
1154 | |||
1155 | if (!fp) | ||
1156 | return false; | ||
1157 | |||
1158 | for (i = 0; i < size; i++) | ||
1159 | if (fp[i]) | ||
1160 | return false; | ||
1161 | |||
1162 | return true; | ||
1163 | } | ||
1164 | |||
1165 | static int __parse_flow_nlattrs(const struct nlattr *attr, | ||
1166 | const struct nlattr *a[], | ||
1167 | u64 *attrsp, bool nz) | ||
1168 | { | ||
1169 | const struct nlattr *nla; | ||
1170 | u32 attrs; | ||
1171 | int rem; | ||
1172 | |||
1173 | attrs = *attrsp; | ||
1174 | nla_for_each_nested(nla, attr, rem) { | ||
1175 | u16 type = nla_type(nla); | ||
1176 | int expected_len; | ||
1177 | |||
1178 | if (type > OVS_KEY_ATTR_MAX) { | ||
1179 | OVS_NLERR("Unknown key attribute (type=%d, max=%d).\n", | ||
1180 | type, OVS_KEY_ATTR_MAX); | ||
1181 | return -EINVAL; | ||
1182 | } | ||
1183 | |||
1184 | if (attrs & (1 << type)) { | ||
1185 | OVS_NLERR("Duplicate key attribute (type %d).\n", type); | ||
1186 | return -EINVAL; | ||
1187 | } | ||
1188 | |||
1189 | expected_len = ovs_key_lens[type]; | ||
1190 | if (nla_len(nla) != expected_len && expected_len != -1) { | ||
1191 | OVS_NLERR("Key attribute has unexpected length (type=%d" | ||
1192 | ", length=%d, expected=%d).\n", type, | ||
1193 | nla_len(nla), expected_len); | ||
1194 | return -EINVAL; | ||
1195 | } | ||
1196 | |||
1197 | if (!nz || !is_all_zero(nla_data(nla), expected_len)) { | ||
1198 | attrs |= 1 << type; | ||
1199 | a[type] = nla; | ||
1200 | } | ||
1201 | } | ||
1202 | if (rem) { | ||
1203 | OVS_NLERR("Message has %d unknown bytes.\n", rem); | ||
1204 | return -EINVAL; | ||
1205 | } | ||
1206 | |||
1207 | *attrsp = attrs; | ||
1208 | return 0; | ||
1209 | } | ||
1210 | |||
1211 | static int parse_flow_mask_nlattrs(const struct nlattr *attr, | ||
1212 | const struct nlattr *a[], u64 *attrsp) | ||
1213 | { | ||
1214 | return __parse_flow_nlattrs(attr, a, attrsp, true); | ||
1215 | } | ||
1216 | |||
1217 | static int parse_flow_nlattrs(const struct nlattr *attr, | ||
1218 | const struct nlattr *a[], u64 *attrsp) | ||
1219 | { | ||
1220 | return __parse_flow_nlattrs(attr, a, attrsp, false); | ||
1221 | } | ||
1222 | |||
1223 | int ovs_ipv4_tun_from_nlattr(const struct nlattr *attr, | ||
1224 | struct sw_flow_match *match, bool is_mask) | ||
1225 | { | ||
1226 | struct nlattr *a; | ||
1227 | int rem; | ||
1228 | bool ttl = false; | ||
1229 | __be16 tun_flags = 0; | ||
1230 | |||
1231 | nla_for_each_nested(a, attr, rem) { | ||
1232 | int type = nla_type(a); | ||
1233 | static const u32 ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = { | ||
1234 | [OVS_TUNNEL_KEY_ATTR_ID] = sizeof(u64), | ||
1235 | [OVS_TUNNEL_KEY_ATTR_IPV4_SRC] = sizeof(u32), | ||
1236 | [OVS_TUNNEL_KEY_ATTR_IPV4_DST] = sizeof(u32), | ||
1237 | [OVS_TUNNEL_KEY_ATTR_TOS] = 1, | ||
1238 | [OVS_TUNNEL_KEY_ATTR_TTL] = 1, | ||
1239 | [OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT] = 0, | ||
1240 | [OVS_TUNNEL_KEY_ATTR_CSUM] = 0, | ||
1241 | }; | ||
1242 | |||
1243 | if (type > OVS_TUNNEL_KEY_ATTR_MAX) { | ||
1244 | OVS_NLERR("Unknown IPv4 tunnel attribute (type=%d, max=%d).\n", | ||
1245 | type, OVS_TUNNEL_KEY_ATTR_MAX); | ||
1246 | return -EINVAL; | ||
1247 | } | ||
1248 | |||
1249 | if (ovs_tunnel_key_lens[type] != nla_len(a)) { | ||
1250 | OVS_NLERR("IPv4 tunnel attribute type has unexpected " | ||
1251 | " length (type=%d, length=%d, expected=%d).\n", | ||
1252 | type, nla_len(a), ovs_tunnel_key_lens[type]); | ||
1253 | return -EINVAL; | ||
1254 | } | ||
1255 | |||
1256 | switch (type) { | ||
1257 | case OVS_TUNNEL_KEY_ATTR_ID: | ||
1258 | SW_FLOW_KEY_PUT(match, tun_key.tun_id, | ||
1259 | nla_get_be64(a), is_mask); | ||
1260 | tun_flags |= TUNNEL_KEY; | ||
1261 | break; | ||
1262 | case OVS_TUNNEL_KEY_ATTR_IPV4_SRC: | ||
1263 | SW_FLOW_KEY_PUT(match, tun_key.ipv4_src, | ||
1264 | nla_get_be32(a), is_mask); | ||
1265 | break; | ||
1266 | case OVS_TUNNEL_KEY_ATTR_IPV4_DST: | ||
1267 | SW_FLOW_KEY_PUT(match, tun_key.ipv4_dst, | ||
1268 | nla_get_be32(a), is_mask); | ||
1269 | break; | ||
1270 | case OVS_TUNNEL_KEY_ATTR_TOS: | ||
1271 | SW_FLOW_KEY_PUT(match, tun_key.ipv4_tos, | ||
1272 | nla_get_u8(a), is_mask); | ||
1273 | break; | ||
1274 | case OVS_TUNNEL_KEY_ATTR_TTL: | ||
1275 | SW_FLOW_KEY_PUT(match, tun_key.ipv4_ttl, | ||
1276 | nla_get_u8(a), is_mask); | ||
1277 | ttl = true; | ||
1278 | break; | ||
1279 | case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT: | ||
1280 | tun_flags |= TUNNEL_DONT_FRAGMENT; | ||
1281 | break; | ||
1282 | case OVS_TUNNEL_KEY_ATTR_CSUM: | ||
1283 | tun_flags |= TUNNEL_CSUM; | ||
1284 | break; | ||
1285 | default: | ||
1286 | return -EINVAL; | ||
1287 | } | ||
1288 | } | ||
1289 | |||
1290 | SW_FLOW_KEY_PUT(match, tun_key.tun_flags, tun_flags, is_mask); | ||
1291 | |||
1292 | if (rem > 0) { | ||
1293 | OVS_NLERR("IPv4 tunnel attribute has %d unknown bytes.\n", rem); | ||
1294 | return -EINVAL; | ||
1295 | } | ||
1296 | |||
1297 | if (!is_mask) { | ||
1298 | if (!match->key->tun_key.ipv4_dst) { | ||
1299 | OVS_NLERR("IPv4 tunnel destination address is zero.\n"); | ||
1300 | return -EINVAL; | ||
1301 | } | ||
1302 | |||
1303 | if (!ttl) { | ||
1304 | OVS_NLERR("IPv4 tunnel TTL not specified.\n"); | ||
1305 | return -EINVAL; | ||
1306 | } | ||
1307 | } | ||
1308 | |||
1309 | return 0; | ||
1310 | } | ||
1311 | |||
1312 | int ovs_ipv4_tun_to_nlattr(struct sk_buff *skb, | ||
1313 | const struct ovs_key_ipv4_tunnel *tun_key, | ||
1314 | const struct ovs_key_ipv4_tunnel *output) | ||
1315 | { | ||
1316 | struct nlattr *nla; | ||
1317 | |||
1318 | nla = nla_nest_start(skb, OVS_KEY_ATTR_TUNNEL); | ||
1319 | if (!nla) | ||
1320 | return -EMSGSIZE; | ||
1321 | |||
1322 | if (output->tun_flags & TUNNEL_KEY && | ||
1323 | nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id)) | ||
1324 | return -EMSGSIZE; | ||
1325 | if (output->ipv4_src && | ||
1326 | nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC, output->ipv4_src)) | ||
1327 | return -EMSGSIZE; | ||
1328 | if (output->ipv4_dst && | ||
1329 | nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST, output->ipv4_dst)) | ||
1330 | return -EMSGSIZE; | ||
1331 | if (output->ipv4_tos && | ||
1332 | nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->ipv4_tos)) | ||
1333 | return -EMSGSIZE; | ||
1334 | if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, output->ipv4_ttl)) | ||
1335 | return -EMSGSIZE; | ||
1336 | if ((output->tun_flags & TUNNEL_DONT_FRAGMENT) && | ||
1337 | nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT)) | ||
1338 | return -EMSGSIZE; | ||
1339 | if ((output->tun_flags & TUNNEL_CSUM) && | ||
1340 | nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_CSUM)) | ||
1341 | return -EMSGSIZE; | ||
1342 | |||
1343 | nla_nest_end(skb, nla); | ||
1344 | return 0; | ||
1345 | } | ||
1346 | |||
1347 | static int metadata_from_nlattrs(struct sw_flow_match *match, u64 *attrs, | ||
1348 | const struct nlattr **a, bool is_mask) | ||
1349 | { | ||
1350 | if (*attrs & (1 << OVS_KEY_ATTR_PRIORITY)) { | ||
1351 | SW_FLOW_KEY_PUT(match, phy.priority, | ||
1352 | nla_get_u32(a[OVS_KEY_ATTR_PRIORITY]), is_mask); | ||
1353 | *attrs &= ~(1 << OVS_KEY_ATTR_PRIORITY); | ||
1354 | } | ||
1355 | |||
1356 | if (*attrs & (1 << OVS_KEY_ATTR_IN_PORT)) { | ||
1357 | u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]); | ||
1358 | |||
1359 | if (is_mask) | ||
1360 | in_port = 0xffffffff; /* Always exact match in_port. */ | ||
1361 | else if (in_port >= DP_MAX_PORTS) | ||
1362 | return -EINVAL; | ||
1363 | |||
1364 | SW_FLOW_KEY_PUT(match, phy.in_port, in_port, is_mask); | ||
1365 | *attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT); | ||
1366 | } else if (!is_mask) { | ||
1367 | SW_FLOW_KEY_PUT(match, phy.in_port, DP_MAX_PORTS, is_mask); | ||
1368 | } | ||
1369 | |||
1370 | if (*attrs & (1 << OVS_KEY_ATTR_SKB_MARK)) { | ||
1371 | uint32_t mark = nla_get_u32(a[OVS_KEY_ATTR_SKB_MARK]); | ||
1372 | |||
1373 | SW_FLOW_KEY_PUT(match, phy.skb_mark, mark, is_mask); | ||
1374 | *attrs &= ~(1 << OVS_KEY_ATTR_SKB_MARK); | ||
1375 | } | ||
1376 | if (*attrs & (1 << OVS_KEY_ATTR_TUNNEL)) { | ||
1377 | if (ovs_ipv4_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], match, | ||
1378 | is_mask)) | ||
1379 | return -EINVAL; | ||
1380 | *attrs &= ~(1 << OVS_KEY_ATTR_TUNNEL); | ||
1381 | } | ||
1382 | return 0; | ||
1383 | } | ||
1384 | |||
1385 | static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, | ||
1386 | const struct nlattr **a, bool is_mask) | ||
1387 | { | ||
1388 | int err; | ||
1389 | u64 orig_attrs = attrs; | ||
1390 | |||
1391 | err = metadata_from_nlattrs(match, &attrs, a, is_mask); | ||
1392 | if (err) | ||
1393 | return err; | ||
1394 | |||
1395 | if (attrs & (1 << OVS_KEY_ATTR_ETHERNET)) { | ||
1396 | const struct ovs_key_ethernet *eth_key; | ||
1397 | |||
1398 | eth_key = nla_data(a[OVS_KEY_ATTR_ETHERNET]); | ||
1399 | SW_FLOW_KEY_MEMCPY(match, eth.src, | ||
1400 | eth_key->eth_src, ETH_ALEN, is_mask); | ||
1401 | SW_FLOW_KEY_MEMCPY(match, eth.dst, | ||
1402 | eth_key->eth_dst, ETH_ALEN, is_mask); | ||
1403 | attrs &= ~(1 << OVS_KEY_ATTR_ETHERNET); | ||
1404 | } | ||
1405 | |||
1406 | if (attrs & (1 << OVS_KEY_ATTR_VLAN)) { | ||
1407 | __be16 tci; | ||
1408 | |||
1409 | tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]); | ||
1410 | if (!(tci & htons(VLAN_TAG_PRESENT))) { | ||
1411 | if (is_mask) | ||
1412 | OVS_NLERR("VLAN TCI mask does not have exact match for VLAN_TAG_PRESENT bit.\n"); | ||
1413 | else | ||
1414 | OVS_NLERR("VLAN TCI does not have VLAN_TAG_PRESENT bit set.\n"); | ||
1415 | |||
1416 | return -EINVAL; | ||
1417 | } | ||
1418 | |||
1419 | SW_FLOW_KEY_PUT(match, eth.tci, tci, is_mask); | ||
1420 | attrs &= ~(1 << OVS_KEY_ATTR_VLAN); | ||
1421 | } else if (!is_mask) | ||
1422 | SW_FLOW_KEY_PUT(match, eth.tci, htons(0xffff), true); | ||
1423 | |||
1424 | if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) { | ||
1425 | __be16 eth_type; | ||
1426 | |||
1427 | eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]); | ||
1428 | if (is_mask) { | ||
1429 | /* Always exact match EtherType. */ | ||
1430 | eth_type = htons(0xffff); | ||
1431 | } else if (ntohs(eth_type) < ETH_P_802_3_MIN) { | ||
1432 | OVS_NLERR("EtherType is less than minimum (type=%x, min=%x).\n", | ||
1433 | ntohs(eth_type), ETH_P_802_3_MIN); | ||
1434 | return -EINVAL; | ||
1435 | } | ||
1436 | |||
1437 | SW_FLOW_KEY_PUT(match, eth.type, eth_type, is_mask); | ||
1438 | attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE); | ||
1439 | } else if (!is_mask) { | ||
1440 | SW_FLOW_KEY_PUT(match, eth.type, htons(ETH_P_802_2), is_mask); | ||
1441 | } | ||
1442 | |||
1443 | if (attrs & (1 << OVS_KEY_ATTR_IPV4)) { | ||
1444 | const struct ovs_key_ipv4 *ipv4_key; | ||
1445 | |||
1446 | ipv4_key = nla_data(a[OVS_KEY_ATTR_IPV4]); | ||
1447 | if (!is_mask && ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX) { | ||
1448 | OVS_NLERR("Unknown IPv4 fragment type (value=%d, max=%d).\n", | ||
1449 | ipv4_key->ipv4_frag, OVS_FRAG_TYPE_MAX); | ||
1450 | return -EINVAL; | ||
1451 | } | ||
1452 | SW_FLOW_KEY_PUT(match, ip.proto, | ||
1453 | ipv4_key->ipv4_proto, is_mask); | ||
1454 | SW_FLOW_KEY_PUT(match, ip.tos, | ||
1455 | ipv4_key->ipv4_tos, is_mask); | ||
1456 | SW_FLOW_KEY_PUT(match, ip.ttl, | ||
1457 | ipv4_key->ipv4_ttl, is_mask); | ||
1458 | SW_FLOW_KEY_PUT(match, ip.frag, | ||
1459 | ipv4_key->ipv4_frag, is_mask); | ||
1460 | SW_FLOW_KEY_PUT(match, ipv4.addr.src, | ||
1461 | ipv4_key->ipv4_src, is_mask); | ||
1462 | SW_FLOW_KEY_PUT(match, ipv4.addr.dst, | ||
1463 | ipv4_key->ipv4_dst, is_mask); | ||
1464 | attrs &= ~(1 << OVS_KEY_ATTR_IPV4); | ||
1465 | } | ||
1466 | |||
1467 | if (attrs & (1 << OVS_KEY_ATTR_IPV6)) { | ||
1468 | const struct ovs_key_ipv6 *ipv6_key; | ||
1469 | |||
1470 | ipv6_key = nla_data(a[OVS_KEY_ATTR_IPV6]); | ||
1471 | if (!is_mask && ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX) { | ||
1472 | OVS_NLERR("Unknown IPv6 fragment type (value=%d, max=%d).\n", | ||
1473 | ipv6_key->ipv6_frag, OVS_FRAG_TYPE_MAX); | ||
1474 | return -EINVAL; | ||
1475 | } | ||
1476 | SW_FLOW_KEY_PUT(match, ipv6.label, | ||
1477 | ipv6_key->ipv6_label, is_mask); | ||
1478 | SW_FLOW_KEY_PUT(match, ip.proto, | ||
1479 | ipv6_key->ipv6_proto, is_mask); | ||
1480 | SW_FLOW_KEY_PUT(match, ip.tos, | ||
1481 | ipv6_key->ipv6_tclass, is_mask); | ||
1482 | SW_FLOW_KEY_PUT(match, ip.ttl, | ||
1483 | ipv6_key->ipv6_hlimit, is_mask); | ||
1484 | SW_FLOW_KEY_PUT(match, ip.frag, | ||
1485 | ipv6_key->ipv6_frag, is_mask); | ||
1486 | SW_FLOW_KEY_MEMCPY(match, ipv6.addr.src, | ||
1487 | ipv6_key->ipv6_src, | ||
1488 | sizeof(match->key->ipv6.addr.src), | ||
1489 | is_mask); | ||
1490 | SW_FLOW_KEY_MEMCPY(match, ipv6.addr.dst, | ||
1491 | ipv6_key->ipv6_dst, | ||
1492 | sizeof(match->key->ipv6.addr.dst), | ||
1493 | is_mask); | ||
1494 | |||
1495 | attrs &= ~(1 << OVS_KEY_ATTR_IPV6); | ||
1496 | } | ||
1497 | |||
1498 | if (attrs & (1 << OVS_KEY_ATTR_ARP)) { | ||
1499 | const struct ovs_key_arp *arp_key; | ||
1500 | |||
1501 | arp_key = nla_data(a[OVS_KEY_ATTR_ARP]); | ||
1502 | if (!is_mask && (arp_key->arp_op & htons(0xff00))) { | ||
1503 | OVS_NLERR("Unknown ARP opcode (opcode=%d).\n", | ||
1504 | arp_key->arp_op); | ||
1505 | return -EINVAL; | ||
1506 | } | ||
1507 | |||
1508 | SW_FLOW_KEY_PUT(match, ipv4.addr.src, | ||
1509 | arp_key->arp_sip, is_mask); | ||
1510 | SW_FLOW_KEY_PUT(match, ipv4.addr.dst, | ||
1511 | arp_key->arp_tip, is_mask); | ||
1512 | SW_FLOW_KEY_PUT(match, ip.proto, | ||
1513 | ntohs(arp_key->arp_op), is_mask); | ||
1514 | SW_FLOW_KEY_MEMCPY(match, ipv4.arp.sha, | ||
1515 | arp_key->arp_sha, ETH_ALEN, is_mask); | ||
1516 | SW_FLOW_KEY_MEMCPY(match, ipv4.arp.tha, | ||
1517 | arp_key->arp_tha, ETH_ALEN, is_mask); | ||
1518 | |||
1519 | attrs &= ~(1 << OVS_KEY_ATTR_ARP); | ||
1520 | } | ||
1521 | |||
1522 | if (attrs & (1 << OVS_KEY_ATTR_TCP)) { | ||
1523 | const struct ovs_key_tcp *tcp_key; | ||
1524 | |||
1525 | tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]); | ||
1526 | if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) { | ||
1527 | SW_FLOW_KEY_PUT(match, ipv4.tp.src, | ||
1528 | tcp_key->tcp_src, is_mask); | ||
1529 | SW_FLOW_KEY_PUT(match, ipv4.tp.dst, | ||
1530 | tcp_key->tcp_dst, is_mask); | ||
1531 | } else { | ||
1532 | SW_FLOW_KEY_PUT(match, ipv6.tp.src, | ||
1533 | tcp_key->tcp_src, is_mask); | ||
1534 | SW_FLOW_KEY_PUT(match, ipv6.tp.dst, | ||
1535 | tcp_key->tcp_dst, is_mask); | ||
1536 | } | ||
1537 | attrs &= ~(1 << OVS_KEY_ATTR_TCP); | ||
1538 | } | ||
1539 | |||
1540 | if (attrs & (1 << OVS_KEY_ATTR_UDP)) { | ||
1541 | const struct ovs_key_udp *udp_key; | ||
1542 | |||
1543 | udp_key = nla_data(a[OVS_KEY_ATTR_UDP]); | ||
1544 | if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) { | ||
1545 | SW_FLOW_KEY_PUT(match, ipv4.tp.src, | ||
1546 | udp_key->udp_src, is_mask); | ||
1547 | SW_FLOW_KEY_PUT(match, ipv4.tp.dst, | ||
1548 | udp_key->udp_dst, is_mask); | ||
1549 | } else { | ||
1550 | SW_FLOW_KEY_PUT(match, ipv6.tp.src, | ||
1551 | udp_key->udp_src, is_mask); | ||
1552 | SW_FLOW_KEY_PUT(match, ipv6.tp.dst, | ||
1553 | udp_key->udp_dst, is_mask); | ||
1554 | } | ||
1555 | attrs &= ~(1 << OVS_KEY_ATTR_UDP); | ||
1556 | } | ||
1557 | |||
1558 | if (attrs & (1 << OVS_KEY_ATTR_SCTP)) { | ||
1559 | const struct ovs_key_sctp *sctp_key; | ||
1560 | |||
1561 | sctp_key = nla_data(a[OVS_KEY_ATTR_SCTP]); | ||
1562 | if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) { | ||
1563 | SW_FLOW_KEY_PUT(match, ipv4.tp.src, | ||
1564 | sctp_key->sctp_src, is_mask); | ||
1565 | SW_FLOW_KEY_PUT(match, ipv4.tp.dst, | ||
1566 | sctp_key->sctp_dst, is_mask); | ||
1567 | } else { | ||
1568 | SW_FLOW_KEY_PUT(match, ipv6.tp.src, | ||
1569 | sctp_key->sctp_src, is_mask); | ||
1570 | SW_FLOW_KEY_PUT(match, ipv6.tp.dst, | ||
1571 | sctp_key->sctp_dst, is_mask); | ||
1572 | } | ||
1573 | attrs &= ~(1 << OVS_KEY_ATTR_SCTP); | ||
1574 | } | ||
1575 | |||
1576 | if (attrs & (1 << OVS_KEY_ATTR_ICMP)) { | ||
1577 | const struct ovs_key_icmp *icmp_key; | ||
1578 | |||
1579 | icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]); | ||
1580 | SW_FLOW_KEY_PUT(match, ipv4.tp.src, | ||
1581 | htons(icmp_key->icmp_type), is_mask); | ||
1582 | SW_FLOW_KEY_PUT(match, ipv4.tp.dst, | ||
1583 | htons(icmp_key->icmp_code), is_mask); | ||
1584 | attrs &= ~(1 << OVS_KEY_ATTR_ICMP); | ||
1585 | } | ||
1586 | |||
1587 | if (attrs & (1 << OVS_KEY_ATTR_ICMPV6)) { | ||
1588 | const struct ovs_key_icmpv6 *icmpv6_key; | ||
1589 | |||
1590 | icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]); | ||
1591 | SW_FLOW_KEY_PUT(match, ipv6.tp.src, | ||
1592 | htons(icmpv6_key->icmpv6_type), is_mask); | ||
1593 | SW_FLOW_KEY_PUT(match, ipv6.tp.dst, | ||
1594 | htons(icmpv6_key->icmpv6_code), is_mask); | ||
1595 | attrs &= ~(1 << OVS_KEY_ATTR_ICMPV6); | ||
1596 | } | ||
1597 | |||
1598 | if (attrs & (1 << OVS_KEY_ATTR_ND)) { | ||
1599 | const struct ovs_key_nd *nd_key; | ||
1600 | |||
1601 | nd_key = nla_data(a[OVS_KEY_ATTR_ND]); | ||
1602 | SW_FLOW_KEY_MEMCPY(match, ipv6.nd.target, | ||
1603 | nd_key->nd_target, | ||
1604 | sizeof(match->key->ipv6.nd.target), | ||
1605 | is_mask); | ||
1606 | SW_FLOW_KEY_MEMCPY(match, ipv6.nd.sll, | ||
1607 | nd_key->nd_sll, ETH_ALEN, is_mask); | ||
1608 | SW_FLOW_KEY_MEMCPY(match, ipv6.nd.tll, | ||
1609 | nd_key->nd_tll, ETH_ALEN, is_mask); | ||
1610 | attrs &= ~(1 << OVS_KEY_ATTR_ND); | ||
1611 | } | ||
1612 | |||
1613 | if (attrs != 0) | ||
1614 | return -EINVAL; | ||
1615 | |||
1616 | return 0; | ||
1617 | } | ||
1618 | |||
1619 | /** | ||
1620 | * ovs_match_from_nlattrs - parses Netlink attributes into a flow key and | ||
1621 | * mask. In case the 'mask' is NULL, the flow is treated as exact match | ||
1622 | * flow. Otherwise, it is treated as a wildcarded flow, except the mask | ||
1623 | * does not include any don't care bit. | ||
1624 | * @match: receives the extracted flow match information. | ||
1625 | * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute | ||
1626 | * sequence. The fields should of the packet that triggered the creation | ||
1627 | * of this flow. | ||
1628 | * @mask: Optional. Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink | ||
1629 | * attribute specifies the mask field of the wildcarded flow. | ||
1630 | */ | ||
1631 | int ovs_match_from_nlattrs(struct sw_flow_match *match, | ||
1632 | const struct nlattr *key, | ||
1633 | const struct nlattr *mask) | ||
1634 | { | ||
1635 | const struct nlattr *a[OVS_KEY_ATTR_MAX + 1]; | ||
1636 | const struct nlattr *encap; | ||
1637 | u64 key_attrs = 0; | ||
1638 | u64 mask_attrs = 0; | ||
1639 | bool encap_valid = false; | ||
1640 | int err; | ||
1641 | |||
1642 | err = parse_flow_nlattrs(key, a, &key_attrs); | ||
1643 | if (err) | ||
1644 | return err; | ||
1645 | |||
1646 | if ((key_attrs & (1 << OVS_KEY_ATTR_ETHERNET)) && | ||
1647 | (key_attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) && | ||
1648 | (nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]) == htons(ETH_P_8021Q))) { | ||
1649 | __be16 tci; | ||
1650 | |||
1651 | if (!((key_attrs & (1 << OVS_KEY_ATTR_VLAN)) && | ||
1652 | (key_attrs & (1 << OVS_KEY_ATTR_ENCAP)))) { | ||
1653 | OVS_NLERR("Invalid Vlan frame.\n"); | ||
1654 | return -EINVAL; | ||
1655 | } | ||
1656 | |||
1657 | key_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE); | ||
1658 | tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]); | ||
1659 | encap = a[OVS_KEY_ATTR_ENCAP]; | ||
1660 | key_attrs &= ~(1 << OVS_KEY_ATTR_ENCAP); | ||
1661 | encap_valid = true; | ||
1662 | |||
1663 | if (tci & htons(VLAN_TAG_PRESENT)) { | ||
1664 | err = parse_flow_nlattrs(encap, a, &key_attrs); | ||
1665 | if (err) | ||
1666 | return err; | ||
1667 | } else if (!tci) { | ||
1668 | /* Corner case for truncated 802.1Q header. */ | ||
1669 | if (nla_len(encap)) { | ||
1670 | OVS_NLERR("Truncated 802.1Q header has non-zero encap attribute.\n"); | ||
1671 | return -EINVAL; | ||
1672 | } | ||
1673 | } else { | ||
1674 | OVS_NLERR("Encap attribute is set for a non-VLAN frame.\n"); | ||
1675 | return -EINVAL; | ||
1676 | } | ||
1677 | } | ||
1678 | |||
1679 | err = ovs_key_from_nlattrs(match, key_attrs, a, false); | ||
1680 | if (err) | ||
1681 | return err; | ||
1682 | |||
1683 | if (mask) { | ||
1684 | err = parse_flow_mask_nlattrs(mask, a, &mask_attrs); | ||
1685 | if (err) | ||
1686 | return err; | ||
1687 | |||
1688 | if (mask_attrs & 1ULL << OVS_KEY_ATTR_ENCAP) { | ||
1689 | __be16 eth_type = 0; | ||
1690 | __be16 tci = 0; | ||
1691 | |||
1692 | if (!encap_valid) { | ||
1693 | OVS_NLERR("Encap mask attribute is set for non-VLAN frame.\n"); | ||
1694 | return -EINVAL; | ||
1695 | } | ||
1696 | |||
1697 | mask_attrs &= ~(1 << OVS_KEY_ATTR_ENCAP); | ||
1698 | if (a[OVS_KEY_ATTR_ETHERTYPE]) | ||
1699 | eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]); | ||
1700 | |||
1701 | if (eth_type == htons(0xffff)) { | ||
1702 | mask_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE); | ||
1703 | encap = a[OVS_KEY_ATTR_ENCAP]; | ||
1704 | err = parse_flow_mask_nlattrs(encap, a, &mask_attrs); | ||
1705 | } else { | ||
1706 | OVS_NLERR("VLAN frames must have an exact match on the TPID (mask=%x).\n", | ||
1707 | ntohs(eth_type)); | ||
1708 | return -EINVAL; | ||
1709 | } | ||
1710 | |||
1711 | if (a[OVS_KEY_ATTR_VLAN]) | ||
1712 | tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]); | ||
1713 | |||
1714 | if (!(tci & htons(VLAN_TAG_PRESENT))) { | ||
1715 | OVS_NLERR("VLAN tag present bit must have an exact match (tci_mask=%x).\n", ntohs(tci)); | ||
1716 | return -EINVAL; | ||
1717 | } | ||
1718 | } | ||
1719 | |||
1720 | err = ovs_key_from_nlattrs(match, mask_attrs, a, true); | ||
1721 | if (err) | ||
1722 | return err; | ||
1723 | } else { | ||
1724 | /* Populate exact match flow's key mask. */ | ||
1725 | if (match->mask) | ||
1726 | ovs_sw_flow_mask_set(match->mask, &match->range, 0xff); | ||
1727 | } | ||
1728 | |||
1729 | if (!ovs_match_validate(match, key_attrs, mask_attrs)) | ||
1730 | return -EINVAL; | ||
1731 | |||
1732 | return 0; | ||
1733 | } | ||
1734 | |||
1735 | /** | ||
1736 | * ovs_flow_metadata_from_nlattrs - parses Netlink attributes into a flow key. | ||
1737 | * @flow: Receives extracted in_port, priority, tun_key and skb_mark. | ||
1738 | * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute | ||
1739 | * sequence. | ||
1740 | * | ||
1741 | * This parses a series of Netlink attributes that form a flow key, which must | ||
1742 | * take the same form accepted by flow_from_nlattrs(), but only enough of it to | ||
1743 | * get the metadata, that is, the parts of the flow key that cannot be | ||
1744 | * extracted from the packet itself. | ||
1745 | */ | ||
1746 | |||
1747 | int ovs_flow_metadata_from_nlattrs(struct sw_flow *flow, | ||
1748 | const struct nlattr *attr) | ||
1749 | { | ||
1750 | struct ovs_key_ipv4_tunnel *tun_key = &flow->key.tun_key; | ||
1751 | const struct nlattr *a[OVS_KEY_ATTR_MAX + 1]; | ||
1752 | u64 attrs = 0; | ||
1753 | int err; | ||
1754 | struct sw_flow_match match; | ||
1755 | |||
1756 | flow->key.phy.in_port = DP_MAX_PORTS; | ||
1757 | flow->key.phy.priority = 0; | ||
1758 | flow->key.phy.skb_mark = 0; | ||
1759 | memset(tun_key, 0, sizeof(flow->key.tun_key)); | ||
1760 | |||
1761 | err = parse_flow_nlattrs(attr, a, &attrs); | ||
1762 | if (err) | ||
1763 | return -EINVAL; | ||
1764 | |||
1765 | memset(&match, 0, sizeof(match)); | ||
1766 | match.key = &flow->key; | ||
1767 | |||
1768 | err = metadata_from_nlattrs(&match, &attrs, a, false); | ||
1769 | if (err) | ||
1770 | return err; | ||
1771 | |||
1772 | return 0; | ||
1773 | } | ||
1774 | |||
1775 | int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, | ||
1776 | const struct sw_flow_key *output, struct sk_buff *skb) | ||
1777 | { | ||
1778 | struct ovs_key_ethernet *eth_key; | ||
1779 | struct nlattr *nla, *encap; | ||
1780 | bool is_mask = (swkey != output); | ||
1781 | |||
1782 | if (nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, output->phy.priority)) | ||
1783 | goto nla_put_failure; | ||
1784 | |||
1785 | if ((swkey->tun_key.ipv4_dst || is_mask) && | ||
1786 | ovs_ipv4_tun_to_nlattr(skb, &swkey->tun_key, &output->tun_key)) | ||
1787 | goto nla_put_failure; | ||
1788 | |||
1789 | if (swkey->phy.in_port == DP_MAX_PORTS) { | ||
1790 | if (is_mask && (output->phy.in_port == 0xffff)) | ||
1791 | if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, 0xffffffff)) | ||
1792 | goto nla_put_failure; | ||
1793 | } else { | ||
1794 | u16 upper_u16; | ||
1795 | upper_u16 = !is_mask ? 0 : 0xffff; | ||
1796 | |||
1797 | if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, | ||
1798 | (upper_u16 << 16) | output->phy.in_port)) | ||
1799 | goto nla_put_failure; | ||
1800 | } | ||
1801 | |||
1802 | if (nla_put_u32(skb, OVS_KEY_ATTR_SKB_MARK, output->phy.skb_mark)) | ||
1803 | goto nla_put_failure; | ||
1804 | |||
1805 | nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key)); | ||
1806 | if (!nla) | ||
1807 | goto nla_put_failure; | ||
1808 | |||
1809 | eth_key = nla_data(nla); | ||
1810 | memcpy(eth_key->eth_src, output->eth.src, ETH_ALEN); | ||
1811 | memcpy(eth_key->eth_dst, output->eth.dst, ETH_ALEN); | ||
1812 | |||
1813 | if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) { | ||
1814 | __be16 eth_type; | ||
1815 | eth_type = !is_mask ? htons(ETH_P_8021Q) : htons(0xffff); | ||
1816 | if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, eth_type) || | ||
1817 | nla_put_be16(skb, OVS_KEY_ATTR_VLAN, output->eth.tci)) | ||
1818 | goto nla_put_failure; | ||
1819 | encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP); | ||
1820 | if (!swkey->eth.tci) | ||
1821 | goto unencap; | ||
1822 | } else | ||
1823 | encap = NULL; | ||
1824 | |||
1825 | if (swkey->eth.type == htons(ETH_P_802_2)) { | ||
1826 | /* | ||
1827 | * Ethertype 802.2 is represented in the netlink with omitted | ||
1828 | * OVS_KEY_ATTR_ETHERTYPE in the flow key attribute, and | ||
1829 | * 0xffff in the mask attribute. Ethertype can also | ||
1830 | * be wildcarded. | ||
1831 | */ | ||
1832 | if (is_mask && output->eth.type) | ||
1833 | if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, | ||
1834 | output->eth.type)) | ||
1835 | goto nla_put_failure; | ||
1836 | goto unencap; | ||
1837 | } | ||
1838 | |||
1839 | if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, output->eth.type)) | ||
1840 | goto nla_put_failure; | ||
1841 | |||
1842 | if (swkey->eth.type == htons(ETH_P_IP)) { | ||
1843 | struct ovs_key_ipv4 *ipv4_key; | ||
1844 | |||
1845 | nla = nla_reserve(skb, OVS_KEY_ATTR_IPV4, sizeof(*ipv4_key)); | ||
1846 | if (!nla) | ||
1847 | goto nla_put_failure; | ||
1848 | ipv4_key = nla_data(nla); | ||
1849 | ipv4_key->ipv4_src = output->ipv4.addr.src; | ||
1850 | ipv4_key->ipv4_dst = output->ipv4.addr.dst; | ||
1851 | ipv4_key->ipv4_proto = output->ip.proto; | ||
1852 | ipv4_key->ipv4_tos = output->ip.tos; | ||
1853 | ipv4_key->ipv4_ttl = output->ip.ttl; | ||
1854 | ipv4_key->ipv4_frag = output->ip.frag; | ||
1855 | } else if (swkey->eth.type == htons(ETH_P_IPV6)) { | ||
1856 | struct ovs_key_ipv6 *ipv6_key; | ||
1857 | |||
1858 | nla = nla_reserve(skb, OVS_KEY_ATTR_IPV6, sizeof(*ipv6_key)); | ||
1859 | if (!nla) | ||
1860 | goto nla_put_failure; | ||
1861 | ipv6_key = nla_data(nla); | ||
1862 | memcpy(ipv6_key->ipv6_src, &output->ipv6.addr.src, | ||
1863 | sizeof(ipv6_key->ipv6_src)); | ||
1864 | memcpy(ipv6_key->ipv6_dst, &output->ipv6.addr.dst, | ||
1865 | sizeof(ipv6_key->ipv6_dst)); | ||
1866 | ipv6_key->ipv6_label = output->ipv6.label; | ||
1867 | ipv6_key->ipv6_proto = output->ip.proto; | ||
1868 | ipv6_key->ipv6_tclass = output->ip.tos; | ||
1869 | ipv6_key->ipv6_hlimit = output->ip.ttl; | ||
1870 | ipv6_key->ipv6_frag = output->ip.frag; | ||
1871 | } else if (swkey->eth.type == htons(ETH_P_ARP) || | ||
1872 | swkey->eth.type == htons(ETH_P_RARP)) { | ||
1873 | struct ovs_key_arp *arp_key; | ||
1874 | |||
1875 | nla = nla_reserve(skb, OVS_KEY_ATTR_ARP, sizeof(*arp_key)); | ||
1876 | if (!nla) | ||
1877 | goto nla_put_failure; | ||
1878 | arp_key = nla_data(nla); | ||
1879 | memset(arp_key, 0, sizeof(struct ovs_key_arp)); | ||
1880 | arp_key->arp_sip = output->ipv4.addr.src; | ||
1881 | arp_key->arp_tip = output->ipv4.addr.dst; | ||
1882 | arp_key->arp_op = htons(output->ip.proto); | ||
1883 | memcpy(arp_key->arp_sha, output->ipv4.arp.sha, ETH_ALEN); | ||
1884 | memcpy(arp_key->arp_tha, output->ipv4.arp.tha, ETH_ALEN); | ||
1885 | } | ||
1886 | |||
1887 | if ((swkey->eth.type == htons(ETH_P_IP) || | ||
1888 | swkey->eth.type == htons(ETH_P_IPV6)) && | ||
1889 | swkey->ip.frag != OVS_FRAG_TYPE_LATER) { | ||
1890 | |||
1891 | if (swkey->ip.proto == IPPROTO_TCP) { | ||
1892 | struct ovs_key_tcp *tcp_key; | ||
1893 | |||
1894 | nla = nla_reserve(skb, OVS_KEY_ATTR_TCP, sizeof(*tcp_key)); | ||
1895 | if (!nla) | ||
1896 | goto nla_put_failure; | ||
1897 | tcp_key = nla_data(nla); | ||
1898 | if (swkey->eth.type == htons(ETH_P_IP)) { | ||
1899 | tcp_key->tcp_src = output->ipv4.tp.src; | ||
1900 | tcp_key->tcp_dst = output->ipv4.tp.dst; | ||
1901 | } else if (swkey->eth.type == htons(ETH_P_IPV6)) { | ||
1902 | tcp_key->tcp_src = output->ipv6.tp.src; | ||
1903 | tcp_key->tcp_dst = output->ipv6.tp.dst; | ||
1904 | } | ||
1905 | } else if (swkey->ip.proto == IPPROTO_UDP) { | ||
1906 | struct ovs_key_udp *udp_key; | ||
1907 | |||
1908 | nla = nla_reserve(skb, OVS_KEY_ATTR_UDP, sizeof(*udp_key)); | ||
1909 | if (!nla) | ||
1910 | goto nla_put_failure; | ||
1911 | udp_key = nla_data(nla); | ||
1912 | if (swkey->eth.type == htons(ETH_P_IP)) { | ||
1913 | udp_key->udp_src = output->ipv4.tp.src; | ||
1914 | udp_key->udp_dst = output->ipv4.tp.dst; | ||
1915 | } else if (swkey->eth.type == htons(ETH_P_IPV6)) { | ||
1916 | udp_key->udp_src = output->ipv6.tp.src; | ||
1917 | udp_key->udp_dst = output->ipv6.tp.dst; | ||
1918 | } | ||
1919 | } else if (swkey->ip.proto == IPPROTO_SCTP) { | ||
1920 | struct ovs_key_sctp *sctp_key; | ||
1921 | |||
1922 | nla = nla_reserve(skb, OVS_KEY_ATTR_SCTP, sizeof(*sctp_key)); | ||
1923 | if (!nla) | ||
1924 | goto nla_put_failure; | ||
1925 | sctp_key = nla_data(nla); | ||
1926 | if (swkey->eth.type == htons(ETH_P_IP)) { | ||
1927 | sctp_key->sctp_src = swkey->ipv4.tp.src; | ||
1928 | sctp_key->sctp_dst = swkey->ipv4.tp.dst; | ||
1929 | } else if (swkey->eth.type == htons(ETH_P_IPV6)) { | ||
1930 | sctp_key->sctp_src = swkey->ipv6.tp.src; | ||
1931 | sctp_key->sctp_dst = swkey->ipv6.tp.dst; | ||
1932 | } | ||
1933 | } else if (swkey->eth.type == htons(ETH_P_IP) && | ||
1934 | swkey->ip.proto == IPPROTO_ICMP) { | ||
1935 | struct ovs_key_icmp *icmp_key; | ||
1936 | |||
1937 | nla = nla_reserve(skb, OVS_KEY_ATTR_ICMP, sizeof(*icmp_key)); | ||
1938 | if (!nla) | ||
1939 | goto nla_put_failure; | ||
1940 | icmp_key = nla_data(nla); | ||
1941 | icmp_key->icmp_type = ntohs(output->ipv4.tp.src); | ||
1942 | icmp_key->icmp_code = ntohs(output->ipv4.tp.dst); | ||
1943 | } else if (swkey->eth.type == htons(ETH_P_IPV6) && | ||
1944 | swkey->ip.proto == IPPROTO_ICMPV6) { | ||
1945 | struct ovs_key_icmpv6 *icmpv6_key; | ||
1946 | |||
1947 | nla = nla_reserve(skb, OVS_KEY_ATTR_ICMPV6, | ||
1948 | sizeof(*icmpv6_key)); | ||
1949 | if (!nla) | ||
1950 | goto nla_put_failure; | ||
1951 | icmpv6_key = nla_data(nla); | ||
1952 | icmpv6_key->icmpv6_type = ntohs(output->ipv6.tp.src); | ||
1953 | icmpv6_key->icmpv6_code = ntohs(output->ipv6.tp.dst); | ||
1954 | |||
1955 | if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION || | ||
1956 | icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) { | ||
1957 | struct ovs_key_nd *nd_key; | ||
1958 | |||
1959 | nla = nla_reserve(skb, OVS_KEY_ATTR_ND, sizeof(*nd_key)); | ||
1960 | if (!nla) | ||
1961 | goto nla_put_failure; | ||
1962 | nd_key = nla_data(nla); | ||
1963 | memcpy(nd_key->nd_target, &output->ipv6.nd.target, | ||
1964 | sizeof(nd_key->nd_target)); | ||
1965 | memcpy(nd_key->nd_sll, output->ipv6.nd.sll, ETH_ALEN); | ||
1966 | memcpy(nd_key->nd_tll, output->ipv6.nd.tll, ETH_ALEN); | ||
1967 | } | ||
1968 | } | ||
1969 | } | ||
1970 | |||
1971 | unencap: | ||
1972 | if (encap) | ||
1973 | nla_nest_end(skb, encap); | ||
1974 | |||
1975 | return 0; | ||
1976 | |||
1977 | nla_put_failure: | ||
1978 | return -EMSGSIZE; | ||
1979 | } | ||
1980 | |||
1981 | /* Initializes the flow module. | ||
1982 | * Returns zero if successful or a negative error code. */ | ||
1983 | int ovs_flow_init(void) | ||
1984 | { | ||
1985 | BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long)); | ||
1986 | BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long)); | ||
1987 | |||
1988 | flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0, | ||
1989 | 0, NULL); | ||
1990 | if (flow_cache == NULL) | ||
1991 | return -ENOMEM; | ||
1992 | |||
1993 | return 0; | ||
1994 | } | ||
1995 | |||
1996 | /* Uninitializes the flow module. */ | ||
1997 | void ovs_flow_exit(void) | ||
1998 | { | ||
1999 | kmem_cache_destroy(flow_cache); | ||
2000 | } | ||
2001 | |||
2002 | struct sw_flow_mask *ovs_sw_flow_mask_alloc(void) | ||
2003 | { | ||
2004 | struct sw_flow_mask *mask; | ||
2005 | |||
2006 | mask = kmalloc(sizeof(*mask), GFP_KERNEL); | ||
2007 | if (mask) | ||
2008 | mask->ref_count = 0; | ||
2009 | |||
2010 | return mask; | ||
2011 | } | ||
2012 | |||
2013 | void ovs_sw_flow_mask_add_ref(struct sw_flow_mask *mask) | ||
2014 | { | ||
2015 | mask->ref_count++; | ||
2016 | } | ||
2017 | |||
2018 | void ovs_sw_flow_mask_del_ref(struct sw_flow_mask *mask, bool deferred) | ||
2019 | { | ||
2020 | if (!mask) | ||
2021 | return; | ||
2022 | |||
2023 | BUG_ON(!mask->ref_count); | ||
2024 | mask->ref_count--; | ||
2025 | |||
2026 | if (!mask->ref_count) { | ||
2027 | list_del_rcu(&mask->list); | ||
2028 | if (deferred) | ||
2029 | kfree_rcu(mask, rcu); | ||
2030 | else | ||
2031 | kfree(mask); | ||
2032 | } | ||
2033 | } | ||
2034 | |||
2035 | static bool ovs_sw_flow_mask_equal(const struct sw_flow_mask *a, | ||
2036 | const struct sw_flow_mask *b) | ||
2037 | { | ||
2038 | u8 *a_ = (u8 *)&a->key + a->range.start; | ||
2039 | u8 *b_ = (u8 *)&b->key + b->range.start; | ||
2040 | |||
2041 | return (a->range.end == b->range.end) | ||
2042 | && (a->range.start == b->range.start) | ||
2043 | && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0); | ||
2044 | } | ||
2045 | |||
2046 | struct sw_flow_mask *ovs_sw_flow_mask_find(const struct flow_table *tbl, | ||
2047 | const struct sw_flow_mask *mask) | ||
2048 | { | ||
2049 | struct list_head *ml; | ||
2050 | |||
2051 | list_for_each(ml, tbl->mask_list) { | ||
2052 | struct sw_flow_mask *m; | ||
2053 | m = container_of(ml, struct sw_flow_mask, list); | ||
2054 | if (ovs_sw_flow_mask_equal(mask, m)) | ||
2055 | return m; | ||
2056 | } | ||
2057 | |||
2058 | return NULL; | ||
2059 | } | ||
2060 | |||
2061 | /** | ||
2062 | * add a new mask into the mask list. | ||
2063 | * The caller needs to make sure that 'mask' is not the same | ||
2064 | * as any masks that are already on the list. | ||
2065 | */ | ||
2066 | void ovs_sw_flow_mask_insert(struct flow_table *tbl, struct sw_flow_mask *mask) | ||
2067 | { | ||
2068 | list_add_rcu(&mask->list, tbl->mask_list); | ||
2069 | } | ||
2070 | |||
2071 | /** | ||
2072 | * Set 'range' fields in the mask to the value of 'val'. | ||
2073 | */ | ||
2074 | static void ovs_sw_flow_mask_set(struct sw_flow_mask *mask, | ||
2075 | struct sw_flow_key_range *range, u8 val) | ||
2076 | { | ||
2077 | u8 *m = (u8 *)&mask->key + range->start; | ||
2078 | |||
2079 | mask->range = *range; | ||
2080 | memset(m, val, range_n_bytes(range)); | ||
2081 | } | ||
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h index 212fbf7510c4..098fd1db6a23 100644 --- a/net/openvswitch/flow.h +++ b/net/openvswitch/flow.h | |||
@@ -33,14 +33,6 @@ | |||
33 | #include <net/inet_ecn.h> | 33 | #include <net/inet_ecn.h> |
34 | 34 | ||
35 | struct sk_buff; | 35 | struct sk_buff; |
36 | struct sw_flow_mask; | ||
37 | struct flow_table; | ||
38 | |||
39 | struct sw_flow_actions { | ||
40 | struct rcu_head rcu; | ||
41 | u32 actions_len; | ||
42 | struct nlattr actions[]; | ||
43 | }; | ||
44 | 36 | ||
45 | /* Used to memset ovs_key_ipv4_tunnel padding. */ | 37 | /* Used to memset ovs_key_ipv4_tunnel padding. */ |
46 | #define OVS_TUNNEL_KEY_SIZE \ | 38 | #define OVS_TUNNEL_KEY_SIZE \ |
@@ -127,6 +119,31 @@ struct sw_flow_key { | |||
127 | }; | 119 | }; |
128 | } __aligned(BITS_PER_LONG/8); /* Ensure that we can do comparisons as longs. */ | 120 | } __aligned(BITS_PER_LONG/8); /* Ensure that we can do comparisons as longs. */ |
129 | 121 | ||
122 | struct sw_flow_key_range { | ||
123 | size_t start; | ||
124 | size_t end; | ||
125 | }; | ||
126 | |||
127 | struct sw_flow_mask { | ||
128 | int ref_count; | ||
129 | struct rcu_head rcu; | ||
130 | struct list_head list; | ||
131 | struct sw_flow_key_range range; | ||
132 | struct sw_flow_key key; | ||
133 | }; | ||
134 | |||
135 | struct sw_flow_match { | ||
136 | struct sw_flow_key *key; | ||
137 | struct sw_flow_key_range range; | ||
138 | struct sw_flow_mask *mask; | ||
139 | }; | ||
140 | |||
141 | struct sw_flow_actions { | ||
142 | struct rcu_head rcu; | ||
143 | u32 actions_len; | ||
144 | struct nlattr actions[]; | ||
145 | }; | ||
146 | |||
130 | struct sw_flow { | 147 | struct sw_flow { |
131 | struct rcu_head rcu; | 148 | struct rcu_head rcu; |
132 | struct hlist_node hash_node[2]; | 149 | struct hlist_node hash_node[2]; |
@@ -144,20 +161,6 @@ struct sw_flow { | |||
144 | u8 tcp_flags; /* Union of seen TCP flags. */ | 161 | u8 tcp_flags; /* Union of seen TCP flags. */ |
145 | }; | 162 | }; |
146 | 163 | ||
147 | struct sw_flow_key_range { | ||
148 | size_t start; | ||
149 | size_t end; | ||
150 | }; | ||
151 | |||
152 | struct sw_flow_match { | ||
153 | struct sw_flow_key *key; | ||
154 | struct sw_flow_key_range range; | ||
155 | struct sw_flow_mask *mask; | ||
156 | }; | ||
157 | |||
158 | void ovs_match_init(struct sw_flow_match *match, | ||
159 | struct sw_flow_key *key, struct sw_flow_mask *mask); | ||
160 | |||
161 | struct arp_eth_header { | 164 | struct arp_eth_header { |
162 | __be16 ar_hrd; /* format of hardware address */ | 165 | __be16 ar_hrd; /* format of hardware address */ |
163 | __be16 ar_pro; /* format of protocol address */ | 166 | __be16 ar_pro; /* format of protocol address */ |
@@ -172,88 +175,9 @@ struct arp_eth_header { | |||
172 | unsigned char ar_tip[4]; /* target IP address */ | 175 | unsigned char ar_tip[4]; /* target IP address */ |
173 | } __packed; | 176 | } __packed; |
174 | 177 | ||
175 | int ovs_flow_init(void); | ||
176 | void ovs_flow_exit(void); | ||
177 | |||
178 | struct sw_flow *ovs_flow_alloc(void); | ||
179 | void ovs_flow_deferred_free(struct sw_flow *); | ||
180 | void ovs_flow_free(struct sw_flow *, bool deferred); | ||
181 | |||
182 | struct sw_flow_actions *ovs_flow_actions_alloc(int actions_len); | ||
183 | void ovs_flow_deferred_free_acts(struct sw_flow_actions *); | ||
184 | |||
185 | int ovs_flow_extract(struct sk_buff *, u16 in_port, struct sw_flow_key *); | ||
186 | void ovs_flow_used(struct sw_flow *, struct sk_buff *); | 178 | void ovs_flow_used(struct sw_flow *, struct sk_buff *); |
187 | u64 ovs_flow_used_time(unsigned long flow_jiffies); | 179 | u64 ovs_flow_used_time(unsigned long flow_jiffies); |
188 | int ovs_flow_to_nlattrs(const struct sw_flow_key *, | ||
189 | const struct sw_flow_key *, struct sk_buff *); | ||
190 | int ovs_match_from_nlattrs(struct sw_flow_match *match, | ||
191 | const struct nlattr *, | ||
192 | const struct nlattr *); | ||
193 | int ovs_flow_metadata_from_nlattrs(struct sw_flow *flow, | ||
194 | const struct nlattr *attr); | ||
195 | 180 | ||
196 | #define MAX_ACTIONS_BUFSIZE (32 * 1024) | 181 | int ovs_flow_extract(struct sk_buff *, u16 in_port, struct sw_flow_key *); |
197 | #define TBL_MIN_BUCKETS 1024 | ||
198 | |||
199 | struct flow_table { | ||
200 | struct flex_array *buckets; | ||
201 | unsigned int count, n_buckets; | ||
202 | struct rcu_head rcu; | ||
203 | struct list_head *mask_list; | ||
204 | int node_ver; | ||
205 | u32 hash_seed; | ||
206 | bool keep_flows; | ||
207 | }; | ||
208 | |||
209 | static inline int ovs_flow_tbl_count(struct flow_table *table) | ||
210 | { | ||
211 | return table->count; | ||
212 | } | ||
213 | |||
214 | static inline int ovs_flow_tbl_need_to_expand(struct flow_table *table) | ||
215 | { | ||
216 | return (table->count > table->n_buckets); | ||
217 | } | ||
218 | |||
219 | struct sw_flow *ovs_flow_lookup(struct flow_table *, | ||
220 | const struct sw_flow_key *); | ||
221 | struct sw_flow *ovs_flow_lookup_unmasked_key(struct flow_table *table, | ||
222 | struct sw_flow_match *match); | ||
223 | |||
224 | void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred); | ||
225 | struct flow_table *ovs_flow_tbl_alloc(int new_size); | ||
226 | struct flow_table *ovs_flow_tbl_expand(struct flow_table *table); | ||
227 | struct flow_table *ovs_flow_tbl_rehash(struct flow_table *table); | ||
228 | |||
229 | void ovs_flow_insert(struct flow_table *table, struct sw_flow *flow); | ||
230 | void ovs_flow_remove(struct flow_table *table, struct sw_flow *flow); | ||
231 | |||
232 | struct sw_flow *ovs_flow_dump_next(struct flow_table *table, u32 *bucket, u32 *idx); | ||
233 | extern const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1]; | ||
234 | int ovs_ipv4_tun_from_nlattr(const struct nlattr *attr, | ||
235 | struct sw_flow_match *match, bool is_mask); | ||
236 | int ovs_ipv4_tun_to_nlattr(struct sk_buff *skb, | ||
237 | const struct ovs_key_ipv4_tunnel *tun_key, | ||
238 | const struct ovs_key_ipv4_tunnel *output); | ||
239 | |||
240 | bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow, | ||
241 | const struct sw_flow_key *key, int key_end); | ||
242 | |||
243 | struct sw_flow_mask { | ||
244 | int ref_count; | ||
245 | struct rcu_head rcu; | ||
246 | struct list_head list; | ||
247 | struct sw_flow_key_range range; | ||
248 | struct sw_flow_key key; | ||
249 | }; | ||
250 | 182 | ||
251 | struct sw_flow_mask *ovs_sw_flow_mask_alloc(void); | ||
252 | void ovs_sw_flow_mask_add_ref(struct sw_flow_mask *); | ||
253 | void ovs_sw_flow_mask_del_ref(struct sw_flow_mask *, bool deferred); | ||
254 | void ovs_sw_flow_mask_insert(struct flow_table *, struct sw_flow_mask *); | ||
255 | struct sw_flow_mask *ovs_sw_flow_mask_find(const struct flow_table *, | ||
256 | const struct sw_flow_mask *); | ||
257 | void ovs_flow_key_mask(struct sw_flow_key *dst, const struct sw_flow_key *src, | ||
258 | const struct sw_flow_mask *mask); | ||
259 | #endif /* flow.h */ | 183 | #endif /* flow.h */ |
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c new file mode 100644 index 000000000000..e04649c56a96 --- /dev/null +++ b/net/openvswitch/flow_netlink.c | |||
@@ -0,0 +1,1603 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2007-2013 Nicira, Inc. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of version 2 of the GNU General Public | ||
6 | * License as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
11 | * General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program; if not, write to the Free Software | ||
15 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
16 | * 02110-1301, USA | ||
17 | */ | ||
18 | |||
19 | #include "flow.h" | ||
20 | #include "datapath.h" | ||
21 | #include <linux/uaccess.h> | ||
22 | #include <linux/netdevice.h> | ||
23 | #include <linux/etherdevice.h> | ||
24 | #include <linux/if_ether.h> | ||
25 | #include <linux/if_vlan.h> | ||
26 | #include <net/llc_pdu.h> | ||
27 | #include <linux/kernel.h> | ||
28 | #include <linux/jhash.h> | ||
29 | #include <linux/jiffies.h> | ||
30 | #include <linux/llc.h> | ||
31 | #include <linux/module.h> | ||
32 | #include <linux/in.h> | ||
33 | #include <linux/rcupdate.h> | ||
34 | #include <linux/if_arp.h> | ||
35 | #include <linux/ip.h> | ||
36 | #include <linux/ipv6.h> | ||
37 | #include <linux/sctp.h> | ||
38 | #include <linux/tcp.h> | ||
39 | #include <linux/udp.h> | ||
40 | #include <linux/icmp.h> | ||
41 | #include <linux/icmpv6.h> | ||
42 | #include <linux/rculist.h> | ||
43 | #include <net/ip.h> | ||
44 | #include <net/ipv6.h> | ||
45 | #include <net/ndisc.h> | ||
46 | |||
47 | #include "flow_netlink.h" | ||
48 | |||
49 | static void update_range__(struct sw_flow_match *match, | ||
50 | size_t offset, size_t size, bool is_mask) | ||
51 | { | ||
52 | struct sw_flow_key_range *range = NULL; | ||
53 | size_t start = rounddown(offset, sizeof(long)); | ||
54 | size_t end = roundup(offset + size, sizeof(long)); | ||
55 | |||
56 | if (!is_mask) | ||
57 | range = &match->range; | ||
58 | else if (match->mask) | ||
59 | range = &match->mask->range; | ||
60 | |||
61 | if (!range) | ||
62 | return; | ||
63 | |||
64 | if (range->start == range->end) { | ||
65 | range->start = start; | ||
66 | range->end = end; | ||
67 | return; | ||
68 | } | ||
69 | |||
70 | if (range->start > start) | ||
71 | range->start = start; | ||
72 | |||
73 | if (range->end < end) | ||
74 | range->end = end; | ||
75 | } | ||
76 | |||
77 | #define SW_FLOW_KEY_PUT(match, field, value, is_mask) \ | ||
78 | do { \ | ||
79 | update_range__(match, offsetof(struct sw_flow_key, field), \ | ||
80 | sizeof((match)->key->field), is_mask); \ | ||
81 | if (is_mask) { \ | ||
82 | if ((match)->mask) \ | ||
83 | (match)->mask->key.field = value; \ | ||
84 | } else { \ | ||
85 | (match)->key->field = value; \ | ||
86 | } \ | ||
87 | } while (0) | ||
88 | |||
89 | #define SW_FLOW_KEY_MEMCPY(match, field, value_p, len, is_mask) \ | ||
90 | do { \ | ||
91 | update_range__(match, offsetof(struct sw_flow_key, field), \ | ||
92 | len, is_mask); \ | ||
93 | if (is_mask) { \ | ||
94 | if ((match)->mask) \ | ||
95 | memcpy(&(match)->mask->key.field, value_p, len);\ | ||
96 | } else { \ | ||
97 | memcpy(&(match)->key->field, value_p, len); \ | ||
98 | } \ | ||
99 | } while (0) | ||
100 | |||
101 | static u16 range_n_bytes(const struct sw_flow_key_range *range) | ||
102 | { | ||
103 | return range->end - range->start; | ||
104 | } | ||
105 | |||
106 | static bool match_validate(const struct sw_flow_match *match, | ||
107 | u64 key_attrs, u64 mask_attrs) | ||
108 | { | ||
109 | u64 key_expected = 1 << OVS_KEY_ATTR_ETHERNET; | ||
110 | u64 mask_allowed = key_attrs; /* At most allow all key attributes */ | ||
111 | |||
112 | /* The following mask attributes allowed only if they | ||
113 | * pass the validation tests. */ | ||
114 | mask_allowed &= ~((1 << OVS_KEY_ATTR_IPV4) | ||
115 | | (1 << OVS_KEY_ATTR_IPV6) | ||
116 | | (1 << OVS_KEY_ATTR_TCP) | ||
117 | | (1 << OVS_KEY_ATTR_UDP) | ||
118 | | (1 << OVS_KEY_ATTR_SCTP) | ||
119 | | (1 << OVS_KEY_ATTR_ICMP) | ||
120 | | (1 << OVS_KEY_ATTR_ICMPV6) | ||
121 | | (1 << OVS_KEY_ATTR_ARP) | ||
122 | | (1 << OVS_KEY_ATTR_ND)); | ||
123 | |||
124 | /* Always allowed mask fields. */ | ||
125 | mask_allowed |= ((1 << OVS_KEY_ATTR_TUNNEL) | ||
126 | | (1 << OVS_KEY_ATTR_IN_PORT) | ||
127 | | (1 << OVS_KEY_ATTR_ETHERTYPE)); | ||
128 | |||
129 | /* Check key attributes. */ | ||
130 | if (match->key->eth.type == htons(ETH_P_ARP) | ||
131 | || match->key->eth.type == htons(ETH_P_RARP)) { | ||
132 | key_expected |= 1 << OVS_KEY_ATTR_ARP; | ||
133 | if (match->mask && (match->mask->key.eth.type == htons(0xffff))) | ||
134 | mask_allowed |= 1 << OVS_KEY_ATTR_ARP; | ||
135 | } | ||
136 | |||
137 | if (match->key->eth.type == htons(ETH_P_IP)) { | ||
138 | key_expected |= 1 << OVS_KEY_ATTR_IPV4; | ||
139 | if (match->mask && (match->mask->key.eth.type == htons(0xffff))) | ||
140 | mask_allowed |= 1 << OVS_KEY_ATTR_IPV4; | ||
141 | |||
142 | if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) { | ||
143 | if (match->key->ip.proto == IPPROTO_UDP) { | ||
144 | key_expected |= 1 << OVS_KEY_ATTR_UDP; | ||
145 | if (match->mask && (match->mask->key.ip.proto == 0xff)) | ||
146 | mask_allowed |= 1 << OVS_KEY_ATTR_UDP; | ||
147 | } | ||
148 | |||
149 | if (match->key->ip.proto == IPPROTO_SCTP) { | ||
150 | key_expected |= 1 << OVS_KEY_ATTR_SCTP; | ||
151 | if (match->mask && (match->mask->key.ip.proto == 0xff)) | ||
152 | mask_allowed |= 1 << OVS_KEY_ATTR_SCTP; | ||
153 | } | ||
154 | |||
155 | if (match->key->ip.proto == IPPROTO_TCP) { | ||
156 | key_expected |= 1 << OVS_KEY_ATTR_TCP; | ||
157 | if (match->mask && (match->mask->key.ip.proto == 0xff)) | ||
158 | mask_allowed |= 1 << OVS_KEY_ATTR_TCP; | ||
159 | } | ||
160 | |||
161 | if (match->key->ip.proto == IPPROTO_ICMP) { | ||
162 | key_expected |= 1 << OVS_KEY_ATTR_ICMP; | ||
163 | if (match->mask && (match->mask->key.ip.proto == 0xff)) | ||
164 | mask_allowed |= 1 << OVS_KEY_ATTR_ICMP; | ||
165 | } | ||
166 | } | ||
167 | } | ||
168 | |||
169 | if (match->key->eth.type == htons(ETH_P_IPV6)) { | ||
170 | key_expected |= 1 << OVS_KEY_ATTR_IPV6; | ||
171 | if (match->mask && (match->mask->key.eth.type == htons(0xffff))) | ||
172 | mask_allowed |= 1 << OVS_KEY_ATTR_IPV6; | ||
173 | |||
174 | if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) { | ||
175 | if (match->key->ip.proto == IPPROTO_UDP) { | ||
176 | key_expected |= 1 << OVS_KEY_ATTR_UDP; | ||
177 | if (match->mask && (match->mask->key.ip.proto == 0xff)) | ||
178 | mask_allowed |= 1 << OVS_KEY_ATTR_UDP; | ||
179 | } | ||
180 | |||
181 | if (match->key->ip.proto == IPPROTO_SCTP) { | ||
182 | key_expected |= 1 << OVS_KEY_ATTR_SCTP; | ||
183 | if (match->mask && (match->mask->key.ip.proto == 0xff)) | ||
184 | mask_allowed |= 1 << OVS_KEY_ATTR_SCTP; | ||
185 | } | ||
186 | |||
187 | if (match->key->ip.proto == IPPROTO_TCP) { | ||
188 | key_expected |= 1 << OVS_KEY_ATTR_TCP; | ||
189 | if (match->mask && (match->mask->key.ip.proto == 0xff)) | ||
190 | mask_allowed |= 1 << OVS_KEY_ATTR_TCP; | ||
191 | } | ||
192 | |||
193 | if (match->key->ip.proto == IPPROTO_ICMPV6) { | ||
194 | key_expected |= 1 << OVS_KEY_ATTR_ICMPV6; | ||
195 | if (match->mask && (match->mask->key.ip.proto == 0xff)) | ||
196 | mask_allowed |= 1 << OVS_KEY_ATTR_ICMPV6; | ||
197 | |||
198 | if (match->key->ipv6.tp.src == | ||
199 | htons(NDISC_NEIGHBOUR_SOLICITATION) || | ||
200 | match->key->ipv6.tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) { | ||
201 | key_expected |= 1 << OVS_KEY_ATTR_ND; | ||
202 | if (match->mask && (match->mask->key.ipv6.tp.src == htons(0xffff))) | ||
203 | mask_allowed |= 1 << OVS_KEY_ATTR_ND; | ||
204 | } | ||
205 | } | ||
206 | } | ||
207 | } | ||
208 | |||
209 | if ((key_attrs & key_expected) != key_expected) { | ||
210 | /* Key attributes check failed. */ | ||
211 | OVS_NLERR("Missing expected key attributes (key_attrs=%llx, expected=%llx).\n", | ||
212 | key_attrs, key_expected); | ||
213 | return false; | ||
214 | } | ||
215 | |||
216 | if ((mask_attrs & mask_allowed) != mask_attrs) { | ||
217 | /* Mask attributes check failed. */ | ||
218 | OVS_NLERR("Contain more than allowed mask fields (mask_attrs=%llx, mask_allowed=%llx).\n", | ||
219 | mask_attrs, mask_allowed); | ||
220 | return false; | ||
221 | } | ||
222 | |||
223 | return true; | ||
224 | } | ||
225 | |||
226 | /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */ | ||
227 | static const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = { | ||
228 | [OVS_KEY_ATTR_ENCAP] = -1, | ||
229 | [OVS_KEY_ATTR_PRIORITY] = sizeof(u32), | ||
230 | [OVS_KEY_ATTR_IN_PORT] = sizeof(u32), | ||
231 | [OVS_KEY_ATTR_SKB_MARK] = sizeof(u32), | ||
232 | [OVS_KEY_ATTR_ETHERNET] = sizeof(struct ovs_key_ethernet), | ||
233 | [OVS_KEY_ATTR_VLAN] = sizeof(__be16), | ||
234 | [OVS_KEY_ATTR_ETHERTYPE] = sizeof(__be16), | ||
235 | [OVS_KEY_ATTR_IPV4] = sizeof(struct ovs_key_ipv4), | ||
236 | [OVS_KEY_ATTR_IPV6] = sizeof(struct ovs_key_ipv6), | ||
237 | [OVS_KEY_ATTR_TCP] = sizeof(struct ovs_key_tcp), | ||
238 | [OVS_KEY_ATTR_UDP] = sizeof(struct ovs_key_udp), | ||
239 | [OVS_KEY_ATTR_SCTP] = sizeof(struct ovs_key_sctp), | ||
240 | [OVS_KEY_ATTR_ICMP] = sizeof(struct ovs_key_icmp), | ||
241 | [OVS_KEY_ATTR_ICMPV6] = sizeof(struct ovs_key_icmpv6), | ||
242 | [OVS_KEY_ATTR_ARP] = sizeof(struct ovs_key_arp), | ||
243 | [OVS_KEY_ATTR_ND] = sizeof(struct ovs_key_nd), | ||
244 | [OVS_KEY_ATTR_TUNNEL] = -1, | ||
245 | }; | ||
246 | |||
247 | static bool is_all_zero(const u8 *fp, size_t size) | ||
248 | { | ||
249 | int i; | ||
250 | |||
251 | if (!fp) | ||
252 | return false; | ||
253 | |||
254 | for (i = 0; i < size; i++) | ||
255 | if (fp[i]) | ||
256 | return false; | ||
257 | |||
258 | return true; | ||
259 | } | ||
260 | |||
261 | static int __parse_flow_nlattrs(const struct nlattr *attr, | ||
262 | const struct nlattr *a[], | ||
263 | u64 *attrsp, bool nz) | ||
264 | { | ||
265 | const struct nlattr *nla; | ||
266 | u64 attrs; | ||
267 | int rem; | ||
268 | |||
269 | attrs = *attrsp; | ||
270 | nla_for_each_nested(nla, attr, rem) { | ||
271 | u16 type = nla_type(nla); | ||
272 | int expected_len; | ||
273 | |||
274 | if (type > OVS_KEY_ATTR_MAX) { | ||
275 | OVS_NLERR("Unknown key attribute (type=%d, max=%d).\n", | ||
276 | type, OVS_KEY_ATTR_MAX); | ||
277 | return -EINVAL; | ||
278 | } | ||
279 | |||
280 | if (attrs & (1 << type)) { | ||
281 | OVS_NLERR("Duplicate key attribute (type %d).\n", type); | ||
282 | return -EINVAL; | ||
283 | } | ||
284 | |||
285 | expected_len = ovs_key_lens[type]; | ||
286 | if (nla_len(nla) != expected_len && expected_len != -1) { | ||
287 | OVS_NLERR("Key attribute has unexpected length (type=%d" | ||
288 | ", length=%d, expected=%d).\n", type, | ||
289 | nla_len(nla), expected_len); | ||
290 | return -EINVAL; | ||
291 | } | ||
292 | |||
293 | if (!nz || !is_all_zero(nla_data(nla), expected_len)) { | ||
294 | attrs |= 1 << type; | ||
295 | a[type] = nla; | ||
296 | } | ||
297 | } | ||
298 | if (rem) { | ||
299 | OVS_NLERR("Message has %d unknown bytes.\n", rem); | ||
300 | return -EINVAL; | ||
301 | } | ||
302 | |||
303 | *attrsp = attrs; | ||
304 | return 0; | ||
305 | } | ||
306 | |||
307 | static int parse_flow_mask_nlattrs(const struct nlattr *attr, | ||
308 | const struct nlattr *a[], u64 *attrsp) | ||
309 | { | ||
310 | return __parse_flow_nlattrs(attr, a, attrsp, true); | ||
311 | } | ||
312 | |||
313 | static int parse_flow_nlattrs(const struct nlattr *attr, | ||
314 | const struct nlattr *a[], u64 *attrsp) | ||
315 | { | ||
316 | return __parse_flow_nlattrs(attr, a, attrsp, false); | ||
317 | } | ||
318 | |||
319 | static int ipv4_tun_from_nlattr(const struct nlattr *attr, | ||
320 | struct sw_flow_match *match, bool is_mask) | ||
321 | { | ||
322 | struct nlattr *a; | ||
323 | int rem; | ||
324 | bool ttl = false; | ||
325 | __be16 tun_flags = 0; | ||
326 | |||
327 | nla_for_each_nested(a, attr, rem) { | ||
328 | int type = nla_type(a); | ||
329 | static const u32 ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = { | ||
330 | [OVS_TUNNEL_KEY_ATTR_ID] = sizeof(u64), | ||
331 | [OVS_TUNNEL_KEY_ATTR_IPV4_SRC] = sizeof(u32), | ||
332 | [OVS_TUNNEL_KEY_ATTR_IPV4_DST] = sizeof(u32), | ||
333 | [OVS_TUNNEL_KEY_ATTR_TOS] = 1, | ||
334 | [OVS_TUNNEL_KEY_ATTR_TTL] = 1, | ||
335 | [OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT] = 0, | ||
336 | [OVS_TUNNEL_KEY_ATTR_CSUM] = 0, | ||
337 | }; | ||
338 | |||
339 | if (type > OVS_TUNNEL_KEY_ATTR_MAX) { | ||
340 | OVS_NLERR("Unknown IPv4 tunnel attribute (type=%d, max=%d).\n", | ||
341 | type, OVS_TUNNEL_KEY_ATTR_MAX); | ||
342 | return -EINVAL; | ||
343 | } | ||
344 | |||
345 | if (ovs_tunnel_key_lens[type] != nla_len(a)) { | ||
346 | OVS_NLERR("IPv4 tunnel attribute type has unexpected " | ||
347 | " length (type=%d, length=%d, expected=%d).\n", | ||
348 | type, nla_len(a), ovs_tunnel_key_lens[type]); | ||
349 | return -EINVAL; | ||
350 | } | ||
351 | |||
352 | switch (type) { | ||
353 | case OVS_TUNNEL_KEY_ATTR_ID: | ||
354 | SW_FLOW_KEY_PUT(match, tun_key.tun_id, | ||
355 | nla_get_be64(a), is_mask); | ||
356 | tun_flags |= TUNNEL_KEY; | ||
357 | break; | ||
358 | case OVS_TUNNEL_KEY_ATTR_IPV4_SRC: | ||
359 | SW_FLOW_KEY_PUT(match, tun_key.ipv4_src, | ||
360 | nla_get_be32(a), is_mask); | ||
361 | break; | ||
362 | case OVS_TUNNEL_KEY_ATTR_IPV4_DST: | ||
363 | SW_FLOW_KEY_PUT(match, tun_key.ipv4_dst, | ||
364 | nla_get_be32(a), is_mask); | ||
365 | break; | ||
366 | case OVS_TUNNEL_KEY_ATTR_TOS: | ||
367 | SW_FLOW_KEY_PUT(match, tun_key.ipv4_tos, | ||
368 | nla_get_u8(a), is_mask); | ||
369 | break; | ||
370 | case OVS_TUNNEL_KEY_ATTR_TTL: | ||
371 | SW_FLOW_KEY_PUT(match, tun_key.ipv4_ttl, | ||
372 | nla_get_u8(a), is_mask); | ||
373 | ttl = true; | ||
374 | break; | ||
375 | case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT: | ||
376 | tun_flags |= TUNNEL_DONT_FRAGMENT; | ||
377 | break; | ||
378 | case OVS_TUNNEL_KEY_ATTR_CSUM: | ||
379 | tun_flags |= TUNNEL_CSUM; | ||
380 | break; | ||
381 | default: | ||
382 | return -EINVAL; | ||
383 | } | ||
384 | } | ||
385 | |||
386 | SW_FLOW_KEY_PUT(match, tun_key.tun_flags, tun_flags, is_mask); | ||
387 | |||
388 | if (rem > 0) { | ||
389 | OVS_NLERR("IPv4 tunnel attribute has %d unknown bytes.\n", rem); | ||
390 | return -EINVAL; | ||
391 | } | ||
392 | |||
393 | if (!is_mask) { | ||
394 | if (!match->key->tun_key.ipv4_dst) { | ||
395 | OVS_NLERR("IPv4 tunnel destination address is zero.\n"); | ||
396 | return -EINVAL; | ||
397 | } | ||
398 | |||
399 | if (!ttl) { | ||
400 | OVS_NLERR("IPv4 tunnel TTL not specified.\n"); | ||
401 | return -EINVAL; | ||
402 | } | ||
403 | } | ||
404 | |||
405 | return 0; | ||
406 | } | ||
407 | |||
408 | static int ipv4_tun_to_nlattr(struct sk_buff *skb, | ||
409 | const struct ovs_key_ipv4_tunnel *tun_key, | ||
410 | const struct ovs_key_ipv4_tunnel *output) | ||
411 | { | ||
412 | struct nlattr *nla; | ||
413 | |||
414 | nla = nla_nest_start(skb, OVS_KEY_ATTR_TUNNEL); | ||
415 | if (!nla) | ||
416 | return -EMSGSIZE; | ||
417 | |||
418 | if (output->tun_flags & TUNNEL_KEY && | ||
419 | nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id)) | ||
420 | return -EMSGSIZE; | ||
421 | if (output->ipv4_src && | ||
422 | nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC, output->ipv4_src)) | ||
423 | return -EMSGSIZE; | ||
424 | if (output->ipv4_dst && | ||
425 | nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST, output->ipv4_dst)) | ||
426 | return -EMSGSIZE; | ||
427 | if (output->ipv4_tos && | ||
428 | nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->ipv4_tos)) | ||
429 | return -EMSGSIZE; | ||
430 | if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, output->ipv4_ttl)) | ||
431 | return -EMSGSIZE; | ||
432 | if ((output->tun_flags & TUNNEL_DONT_FRAGMENT) && | ||
433 | nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT)) | ||
434 | return -EMSGSIZE; | ||
435 | if ((output->tun_flags & TUNNEL_CSUM) && | ||
436 | nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_CSUM)) | ||
437 | return -EMSGSIZE; | ||
438 | |||
439 | nla_nest_end(skb, nla); | ||
440 | return 0; | ||
441 | } | ||
442 | |||
443 | |||
444 | static int metadata_from_nlattrs(struct sw_flow_match *match, u64 *attrs, | ||
445 | const struct nlattr **a, bool is_mask) | ||
446 | { | ||
447 | if (*attrs & (1 << OVS_KEY_ATTR_PRIORITY)) { | ||
448 | SW_FLOW_KEY_PUT(match, phy.priority, | ||
449 | nla_get_u32(a[OVS_KEY_ATTR_PRIORITY]), is_mask); | ||
450 | *attrs &= ~(1 << OVS_KEY_ATTR_PRIORITY); | ||
451 | } | ||
452 | |||
453 | if (*attrs & (1 << OVS_KEY_ATTR_IN_PORT)) { | ||
454 | u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]); | ||
455 | |||
456 | if (is_mask) | ||
457 | in_port = 0xffffffff; /* Always exact match in_port. */ | ||
458 | else if (in_port >= DP_MAX_PORTS) | ||
459 | return -EINVAL; | ||
460 | |||
461 | SW_FLOW_KEY_PUT(match, phy.in_port, in_port, is_mask); | ||
462 | *attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT); | ||
463 | } else if (!is_mask) { | ||
464 | SW_FLOW_KEY_PUT(match, phy.in_port, DP_MAX_PORTS, is_mask); | ||
465 | } | ||
466 | |||
467 | if (*attrs & (1 << OVS_KEY_ATTR_SKB_MARK)) { | ||
468 | uint32_t mark = nla_get_u32(a[OVS_KEY_ATTR_SKB_MARK]); | ||
469 | |||
470 | SW_FLOW_KEY_PUT(match, phy.skb_mark, mark, is_mask); | ||
471 | *attrs &= ~(1 << OVS_KEY_ATTR_SKB_MARK); | ||
472 | } | ||
473 | if (*attrs & (1 << OVS_KEY_ATTR_TUNNEL)) { | ||
474 | if (ipv4_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], match, | ||
475 | is_mask)) | ||
476 | return -EINVAL; | ||
477 | *attrs &= ~(1 << OVS_KEY_ATTR_TUNNEL); | ||
478 | } | ||
479 | return 0; | ||
480 | } | ||
481 | |||
482 | static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, | ||
483 | const struct nlattr **a, bool is_mask) | ||
484 | { | ||
485 | int err; | ||
486 | u64 orig_attrs = attrs; | ||
487 | |||
488 | err = metadata_from_nlattrs(match, &attrs, a, is_mask); | ||
489 | if (err) | ||
490 | return err; | ||
491 | |||
492 | if (attrs & (1 << OVS_KEY_ATTR_ETHERNET)) { | ||
493 | const struct ovs_key_ethernet *eth_key; | ||
494 | |||
495 | eth_key = nla_data(a[OVS_KEY_ATTR_ETHERNET]); | ||
496 | SW_FLOW_KEY_MEMCPY(match, eth.src, | ||
497 | eth_key->eth_src, ETH_ALEN, is_mask); | ||
498 | SW_FLOW_KEY_MEMCPY(match, eth.dst, | ||
499 | eth_key->eth_dst, ETH_ALEN, is_mask); | ||
500 | attrs &= ~(1 << OVS_KEY_ATTR_ETHERNET); | ||
501 | } | ||
502 | |||
503 | if (attrs & (1 << OVS_KEY_ATTR_VLAN)) { | ||
504 | __be16 tci; | ||
505 | |||
506 | tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]); | ||
507 | if (!(tci & htons(VLAN_TAG_PRESENT))) { | ||
508 | if (is_mask) | ||
509 | OVS_NLERR("VLAN TCI mask does not have exact match for VLAN_TAG_PRESENT bit.\n"); | ||
510 | else | ||
511 | OVS_NLERR("VLAN TCI does not have VLAN_TAG_PRESENT bit set.\n"); | ||
512 | |||
513 | return -EINVAL; | ||
514 | } | ||
515 | |||
516 | SW_FLOW_KEY_PUT(match, eth.tci, tci, is_mask); | ||
517 | attrs &= ~(1 << OVS_KEY_ATTR_VLAN); | ||
518 | } else if (!is_mask) | ||
519 | SW_FLOW_KEY_PUT(match, eth.tci, htons(0xffff), true); | ||
520 | |||
521 | if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) { | ||
522 | __be16 eth_type; | ||
523 | |||
524 | eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]); | ||
525 | if (is_mask) { | ||
526 | /* Always exact match EtherType. */ | ||
527 | eth_type = htons(0xffff); | ||
528 | } else if (ntohs(eth_type) < ETH_P_802_3_MIN) { | ||
529 | OVS_NLERR("EtherType is less than minimum (type=%x, min=%x).\n", | ||
530 | ntohs(eth_type), ETH_P_802_3_MIN); | ||
531 | return -EINVAL; | ||
532 | } | ||
533 | |||
534 | SW_FLOW_KEY_PUT(match, eth.type, eth_type, is_mask); | ||
535 | attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE); | ||
536 | } else if (!is_mask) { | ||
537 | SW_FLOW_KEY_PUT(match, eth.type, htons(ETH_P_802_2), is_mask); | ||
538 | } | ||
539 | |||
540 | if (attrs & (1 << OVS_KEY_ATTR_IPV4)) { | ||
541 | const struct ovs_key_ipv4 *ipv4_key; | ||
542 | |||
543 | ipv4_key = nla_data(a[OVS_KEY_ATTR_IPV4]); | ||
544 | if (!is_mask && ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX) { | ||
545 | OVS_NLERR("Unknown IPv4 fragment type (value=%d, max=%d).\n", | ||
546 | ipv4_key->ipv4_frag, OVS_FRAG_TYPE_MAX); | ||
547 | return -EINVAL; | ||
548 | } | ||
549 | SW_FLOW_KEY_PUT(match, ip.proto, | ||
550 | ipv4_key->ipv4_proto, is_mask); | ||
551 | SW_FLOW_KEY_PUT(match, ip.tos, | ||
552 | ipv4_key->ipv4_tos, is_mask); | ||
553 | SW_FLOW_KEY_PUT(match, ip.ttl, | ||
554 | ipv4_key->ipv4_ttl, is_mask); | ||
555 | SW_FLOW_KEY_PUT(match, ip.frag, | ||
556 | ipv4_key->ipv4_frag, is_mask); | ||
557 | SW_FLOW_KEY_PUT(match, ipv4.addr.src, | ||
558 | ipv4_key->ipv4_src, is_mask); | ||
559 | SW_FLOW_KEY_PUT(match, ipv4.addr.dst, | ||
560 | ipv4_key->ipv4_dst, is_mask); | ||
561 | attrs &= ~(1 << OVS_KEY_ATTR_IPV4); | ||
562 | } | ||
563 | |||
564 | if (attrs & (1 << OVS_KEY_ATTR_IPV6)) { | ||
565 | const struct ovs_key_ipv6 *ipv6_key; | ||
566 | |||
567 | ipv6_key = nla_data(a[OVS_KEY_ATTR_IPV6]); | ||
568 | if (!is_mask && ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX) { | ||
569 | OVS_NLERR("Unknown IPv6 fragment type (value=%d, max=%d).\n", | ||
570 | ipv6_key->ipv6_frag, OVS_FRAG_TYPE_MAX); | ||
571 | return -EINVAL; | ||
572 | } | ||
573 | SW_FLOW_KEY_PUT(match, ipv6.label, | ||
574 | ipv6_key->ipv6_label, is_mask); | ||
575 | SW_FLOW_KEY_PUT(match, ip.proto, | ||
576 | ipv6_key->ipv6_proto, is_mask); | ||
577 | SW_FLOW_KEY_PUT(match, ip.tos, | ||
578 | ipv6_key->ipv6_tclass, is_mask); | ||
579 | SW_FLOW_KEY_PUT(match, ip.ttl, | ||
580 | ipv6_key->ipv6_hlimit, is_mask); | ||
581 | SW_FLOW_KEY_PUT(match, ip.frag, | ||
582 | ipv6_key->ipv6_frag, is_mask); | ||
583 | SW_FLOW_KEY_MEMCPY(match, ipv6.addr.src, | ||
584 | ipv6_key->ipv6_src, | ||
585 | sizeof(match->key->ipv6.addr.src), | ||
586 | is_mask); | ||
587 | SW_FLOW_KEY_MEMCPY(match, ipv6.addr.dst, | ||
588 | ipv6_key->ipv6_dst, | ||
589 | sizeof(match->key->ipv6.addr.dst), | ||
590 | is_mask); | ||
591 | |||
592 | attrs &= ~(1 << OVS_KEY_ATTR_IPV6); | ||
593 | } | ||
594 | |||
595 | if (attrs & (1 << OVS_KEY_ATTR_ARP)) { | ||
596 | const struct ovs_key_arp *arp_key; | ||
597 | |||
598 | arp_key = nla_data(a[OVS_KEY_ATTR_ARP]); | ||
599 | if (!is_mask && (arp_key->arp_op & htons(0xff00))) { | ||
600 | OVS_NLERR("Unknown ARP opcode (opcode=%d).\n", | ||
601 | arp_key->arp_op); | ||
602 | return -EINVAL; | ||
603 | } | ||
604 | |||
605 | SW_FLOW_KEY_PUT(match, ipv4.addr.src, | ||
606 | arp_key->arp_sip, is_mask); | ||
607 | SW_FLOW_KEY_PUT(match, ipv4.addr.dst, | ||
608 | arp_key->arp_tip, is_mask); | ||
609 | SW_FLOW_KEY_PUT(match, ip.proto, | ||
610 | ntohs(arp_key->arp_op), is_mask); | ||
611 | SW_FLOW_KEY_MEMCPY(match, ipv4.arp.sha, | ||
612 | arp_key->arp_sha, ETH_ALEN, is_mask); | ||
613 | SW_FLOW_KEY_MEMCPY(match, ipv4.arp.tha, | ||
614 | arp_key->arp_tha, ETH_ALEN, is_mask); | ||
615 | |||
616 | attrs &= ~(1 << OVS_KEY_ATTR_ARP); | ||
617 | } | ||
618 | |||
619 | if (attrs & (1 << OVS_KEY_ATTR_TCP)) { | ||
620 | const struct ovs_key_tcp *tcp_key; | ||
621 | |||
622 | tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]); | ||
623 | if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) { | ||
624 | SW_FLOW_KEY_PUT(match, ipv4.tp.src, | ||
625 | tcp_key->tcp_src, is_mask); | ||
626 | SW_FLOW_KEY_PUT(match, ipv4.tp.dst, | ||
627 | tcp_key->tcp_dst, is_mask); | ||
628 | } else { | ||
629 | SW_FLOW_KEY_PUT(match, ipv6.tp.src, | ||
630 | tcp_key->tcp_src, is_mask); | ||
631 | SW_FLOW_KEY_PUT(match, ipv6.tp.dst, | ||
632 | tcp_key->tcp_dst, is_mask); | ||
633 | } | ||
634 | attrs &= ~(1 << OVS_KEY_ATTR_TCP); | ||
635 | } | ||
636 | |||
637 | if (attrs & (1 << OVS_KEY_ATTR_UDP)) { | ||
638 | const struct ovs_key_udp *udp_key; | ||
639 | |||
640 | udp_key = nla_data(a[OVS_KEY_ATTR_UDP]); | ||
641 | if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) { | ||
642 | SW_FLOW_KEY_PUT(match, ipv4.tp.src, | ||
643 | udp_key->udp_src, is_mask); | ||
644 | SW_FLOW_KEY_PUT(match, ipv4.tp.dst, | ||
645 | udp_key->udp_dst, is_mask); | ||
646 | } else { | ||
647 | SW_FLOW_KEY_PUT(match, ipv6.tp.src, | ||
648 | udp_key->udp_src, is_mask); | ||
649 | SW_FLOW_KEY_PUT(match, ipv6.tp.dst, | ||
650 | udp_key->udp_dst, is_mask); | ||
651 | } | ||
652 | attrs &= ~(1 << OVS_KEY_ATTR_UDP); | ||
653 | } | ||
654 | |||
655 | if (attrs & (1 << OVS_KEY_ATTR_SCTP)) { | ||
656 | const struct ovs_key_sctp *sctp_key; | ||
657 | |||
658 | sctp_key = nla_data(a[OVS_KEY_ATTR_SCTP]); | ||
659 | if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) { | ||
660 | SW_FLOW_KEY_PUT(match, ipv4.tp.src, | ||
661 | sctp_key->sctp_src, is_mask); | ||
662 | SW_FLOW_KEY_PUT(match, ipv4.tp.dst, | ||
663 | sctp_key->sctp_dst, is_mask); | ||
664 | } else { | ||
665 | SW_FLOW_KEY_PUT(match, ipv6.tp.src, | ||
666 | sctp_key->sctp_src, is_mask); | ||
667 | SW_FLOW_KEY_PUT(match, ipv6.tp.dst, | ||
668 | sctp_key->sctp_dst, is_mask); | ||
669 | } | ||
670 | attrs &= ~(1 << OVS_KEY_ATTR_SCTP); | ||
671 | } | ||
672 | |||
673 | if (attrs & (1 << OVS_KEY_ATTR_ICMP)) { | ||
674 | const struct ovs_key_icmp *icmp_key; | ||
675 | |||
676 | icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]); | ||
677 | SW_FLOW_KEY_PUT(match, ipv4.tp.src, | ||
678 | htons(icmp_key->icmp_type), is_mask); | ||
679 | SW_FLOW_KEY_PUT(match, ipv4.tp.dst, | ||
680 | htons(icmp_key->icmp_code), is_mask); | ||
681 | attrs &= ~(1 << OVS_KEY_ATTR_ICMP); | ||
682 | } | ||
683 | |||
684 | if (attrs & (1 << OVS_KEY_ATTR_ICMPV6)) { | ||
685 | const struct ovs_key_icmpv6 *icmpv6_key; | ||
686 | |||
687 | icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]); | ||
688 | SW_FLOW_KEY_PUT(match, ipv6.tp.src, | ||
689 | htons(icmpv6_key->icmpv6_type), is_mask); | ||
690 | SW_FLOW_KEY_PUT(match, ipv6.tp.dst, | ||
691 | htons(icmpv6_key->icmpv6_code), is_mask); | ||
692 | attrs &= ~(1 << OVS_KEY_ATTR_ICMPV6); | ||
693 | } | ||
694 | |||
695 | if (attrs & (1 << OVS_KEY_ATTR_ND)) { | ||
696 | const struct ovs_key_nd *nd_key; | ||
697 | |||
698 | nd_key = nla_data(a[OVS_KEY_ATTR_ND]); | ||
699 | SW_FLOW_KEY_MEMCPY(match, ipv6.nd.target, | ||
700 | nd_key->nd_target, | ||
701 | sizeof(match->key->ipv6.nd.target), | ||
702 | is_mask); | ||
703 | SW_FLOW_KEY_MEMCPY(match, ipv6.nd.sll, | ||
704 | nd_key->nd_sll, ETH_ALEN, is_mask); | ||
705 | SW_FLOW_KEY_MEMCPY(match, ipv6.nd.tll, | ||
706 | nd_key->nd_tll, ETH_ALEN, is_mask); | ||
707 | attrs &= ~(1 << OVS_KEY_ATTR_ND); | ||
708 | } | ||
709 | |||
710 | if (attrs != 0) | ||
711 | return -EINVAL; | ||
712 | |||
713 | return 0; | ||
714 | } | ||
715 | |||
716 | static void sw_flow_mask_set(struct sw_flow_mask *mask, | ||
717 | struct sw_flow_key_range *range, u8 val) | ||
718 | { | ||
719 | u8 *m = (u8 *)&mask->key + range->start; | ||
720 | |||
721 | mask->range = *range; | ||
722 | memset(m, val, range_n_bytes(range)); | ||
723 | } | ||
724 | |||
725 | /** | ||
726 | * ovs_nla_get_match - parses Netlink attributes into a flow key and | ||
727 | * mask. In case the 'mask' is NULL, the flow is treated as exact match | ||
728 | * flow. Otherwise, it is treated as a wildcarded flow, except the mask | ||
729 | * does not include any don't care bit. | ||
730 | * @match: receives the extracted flow match information. | ||
731 | * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute | ||
732 | * sequence. The fields should of the packet that triggered the creation | ||
733 | * of this flow. | ||
734 | * @mask: Optional. Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink | ||
735 | * attribute specifies the mask field of the wildcarded flow. | ||
736 | */ | ||
737 | int ovs_nla_get_match(struct sw_flow_match *match, | ||
738 | const struct nlattr *key, | ||
739 | const struct nlattr *mask) | ||
740 | { | ||
741 | const struct nlattr *a[OVS_KEY_ATTR_MAX + 1]; | ||
742 | const struct nlattr *encap; | ||
743 | u64 key_attrs = 0; | ||
744 | u64 mask_attrs = 0; | ||
745 | bool encap_valid = false; | ||
746 | int err; | ||
747 | |||
748 | err = parse_flow_nlattrs(key, a, &key_attrs); | ||
749 | if (err) | ||
750 | return err; | ||
751 | |||
752 | if ((key_attrs & (1 << OVS_KEY_ATTR_ETHERNET)) && | ||
753 | (key_attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) && | ||
754 | (nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]) == htons(ETH_P_8021Q))) { | ||
755 | __be16 tci; | ||
756 | |||
757 | if (!((key_attrs & (1 << OVS_KEY_ATTR_VLAN)) && | ||
758 | (key_attrs & (1 << OVS_KEY_ATTR_ENCAP)))) { | ||
759 | OVS_NLERR("Invalid Vlan frame.\n"); | ||
760 | return -EINVAL; | ||
761 | } | ||
762 | |||
763 | key_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE); | ||
764 | tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]); | ||
765 | encap = a[OVS_KEY_ATTR_ENCAP]; | ||
766 | key_attrs &= ~(1 << OVS_KEY_ATTR_ENCAP); | ||
767 | encap_valid = true; | ||
768 | |||
769 | if (tci & htons(VLAN_TAG_PRESENT)) { | ||
770 | err = parse_flow_nlattrs(encap, a, &key_attrs); | ||
771 | if (err) | ||
772 | return err; | ||
773 | } else if (!tci) { | ||
774 | /* Corner case for truncated 802.1Q header. */ | ||
775 | if (nla_len(encap)) { | ||
776 | OVS_NLERR("Truncated 802.1Q header has non-zero encap attribute.\n"); | ||
777 | return -EINVAL; | ||
778 | } | ||
779 | } else { | ||
780 | OVS_NLERR("Encap attribute is set for a non-VLAN frame.\n"); | ||
781 | return -EINVAL; | ||
782 | } | ||
783 | } | ||
784 | |||
785 | err = ovs_key_from_nlattrs(match, key_attrs, a, false); | ||
786 | if (err) | ||
787 | return err; | ||
788 | |||
789 | if (mask) { | ||
790 | err = parse_flow_mask_nlattrs(mask, a, &mask_attrs); | ||
791 | if (err) | ||
792 | return err; | ||
793 | |||
794 | if (mask_attrs & 1 << OVS_KEY_ATTR_ENCAP) { | ||
795 | __be16 eth_type = 0; | ||
796 | __be16 tci = 0; | ||
797 | |||
798 | if (!encap_valid) { | ||
799 | OVS_NLERR("Encap mask attribute is set for non-VLAN frame.\n"); | ||
800 | return -EINVAL; | ||
801 | } | ||
802 | |||
803 | mask_attrs &= ~(1 << OVS_KEY_ATTR_ENCAP); | ||
804 | if (a[OVS_KEY_ATTR_ETHERTYPE]) | ||
805 | eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]); | ||
806 | |||
807 | if (eth_type == htons(0xffff)) { | ||
808 | mask_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE); | ||
809 | encap = a[OVS_KEY_ATTR_ENCAP]; | ||
810 | err = parse_flow_mask_nlattrs(encap, a, &mask_attrs); | ||
811 | } else { | ||
812 | OVS_NLERR("VLAN frames must have an exact match on the TPID (mask=%x).\n", | ||
813 | ntohs(eth_type)); | ||
814 | return -EINVAL; | ||
815 | } | ||
816 | |||
817 | if (a[OVS_KEY_ATTR_VLAN]) | ||
818 | tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]); | ||
819 | |||
820 | if (!(tci & htons(VLAN_TAG_PRESENT))) { | ||
821 | OVS_NLERR("VLAN tag present bit must have an exact match (tci_mask=%x).\n", ntohs(tci)); | ||
822 | return -EINVAL; | ||
823 | } | ||
824 | } | ||
825 | |||
826 | err = ovs_key_from_nlattrs(match, mask_attrs, a, true); | ||
827 | if (err) | ||
828 | return err; | ||
829 | } else { | ||
830 | /* Populate exact match flow's key mask. */ | ||
831 | if (match->mask) | ||
832 | sw_flow_mask_set(match->mask, &match->range, 0xff); | ||
833 | } | ||
834 | |||
835 | if (!match_validate(match, key_attrs, mask_attrs)) | ||
836 | return -EINVAL; | ||
837 | |||
838 | return 0; | ||
839 | } | ||
840 | |||
841 | /** | ||
842 | * ovs_nla_get_flow_metadata - parses Netlink attributes into a flow key. | ||
843 | * @flow: Receives extracted in_port, priority, tun_key and skb_mark. | ||
844 | * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute | ||
845 | * sequence. | ||
846 | * | ||
847 | * This parses a series of Netlink attributes that form a flow key, which must | ||
848 | * take the same form accepted by flow_from_nlattrs(), but only enough of it to | ||
849 | * get the metadata, that is, the parts of the flow key that cannot be | ||
850 | * extracted from the packet itself. | ||
851 | */ | ||
852 | |||
853 | int ovs_nla_get_flow_metadata(struct sw_flow *flow, | ||
854 | const struct nlattr *attr) | ||
855 | { | ||
856 | struct ovs_key_ipv4_tunnel *tun_key = &flow->key.tun_key; | ||
857 | const struct nlattr *a[OVS_KEY_ATTR_MAX + 1]; | ||
858 | u64 attrs = 0; | ||
859 | int err; | ||
860 | struct sw_flow_match match; | ||
861 | |||
862 | flow->key.phy.in_port = DP_MAX_PORTS; | ||
863 | flow->key.phy.priority = 0; | ||
864 | flow->key.phy.skb_mark = 0; | ||
865 | memset(tun_key, 0, sizeof(flow->key.tun_key)); | ||
866 | |||
867 | err = parse_flow_nlattrs(attr, a, &attrs); | ||
868 | if (err) | ||
869 | return -EINVAL; | ||
870 | |||
871 | memset(&match, 0, sizeof(match)); | ||
872 | match.key = &flow->key; | ||
873 | |||
874 | err = metadata_from_nlattrs(&match, &attrs, a, false); | ||
875 | if (err) | ||
876 | return err; | ||
877 | |||
878 | return 0; | ||
879 | } | ||
880 | |||
881 | int ovs_nla_put_flow(const struct sw_flow_key *swkey, | ||
882 | const struct sw_flow_key *output, struct sk_buff *skb) | ||
883 | { | ||
884 | struct ovs_key_ethernet *eth_key; | ||
885 | struct nlattr *nla, *encap; | ||
886 | bool is_mask = (swkey != output); | ||
887 | |||
888 | if (nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, output->phy.priority)) | ||
889 | goto nla_put_failure; | ||
890 | |||
891 | if ((swkey->tun_key.ipv4_dst || is_mask) && | ||
892 | ipv4_tun_to_nlattr(skb, &swkey->tun_key, &output->tun_key)) | ||
893 | goto nla_put_failure; | ||
894 | |||
895 | if (swkey->phy.in_port == DP_MAX_PORTS) { | ||
896 | if (is_mask && (output->phy.in_port == 0xffff)) | ||
897 | if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, 0xffffffff)) | ||
898 | goto nla_put_failure; | ||
899 | } else { | ||
900 | u16 upper_u16; | ||
901 | upper_u16 = !is_mask ? 0 : 0xffff; | ||
902 | |||
903 | if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, | ||
904 | (upper_u16 << 16) | output->phy.in_port)) | ||
905 | goto nla_put_failure; | ||
906 | } | ||
907 | |||
908 | if (nla_put_u32(skb, OVS_KEY_ATTR_SKB_MARK, output->phy.skb_mark)) | ||
909 | goto nla_put_failure; | ||
910 | |||
911 | nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key)); | ||
912 | if (!nla) | ||
913 | goto nla_put_failure; | ||
914 | |||
915 | eth_key = nla_data(nla); | ||
916 | memcpy(eth_key->eth_src, output->eth.src, ETH_ALEN); | ||
917 | memcpy(eth_key->eth_dst, output->eth.dst, ETH_ALEN); | ||
918 | |||
919 | if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) { | ||
920 | __be16 eth_type; | ||
921 | eth_type = !is_mask ? htons(ETH_P_8021Q) : htons(0xffff); | ||
922 | if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, eth_type) || | ||
923 | nla_put_be16(skb, OVS_KEY_ATTR_VLAN, output->eth.tci)) | ||
924 | goto nla_put_failure; | ||
925 | encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP); | ||
926 | if (!swkey->eth.tci) | ||
927 | goto unencap; | ||
928 | } else | ||
929 | encap = NULL; | ||
930 | |||
931 | if (swkey->eth.type == htons(ETH_P_802_2)) { | ||
932 | /* | ||
933 | * Ethertype 802.2 is represented in the netlink with omitted | ||
934 | * OVS_KEY_ATTR_ETHERTYPE in the flow key attribute, and | ||
935 | * 0xffff in the mask attribute. Ethertype can also | ||
936 | * be wildcarded. | ||
937 | */ | ||
938 | if (is_mask && output->eth.type) | ||
939 | if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, | ||
940 | output->eth.type)) | ||
941 | goto nla_put_failure; | ||
942 | goto unencap; | ||
943 | } | ||
944 | |||
945 | if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, output->eth.type)) | ||
946 | goto nla_put_failure; | ||
947 | |||
948 | if (swkey->eth.type == htons(ETH_P_IP)) { | ||
949 | struct ovs_key_ipv4 *ipv4_key; | ||
950 | |||
951 | nla = nla_reserve(skb, OVS_KEY_ATTR_IPV4, sizeof(*ipv4_key)); | ||
952 | if (!nla) | ||
953 | goto nla_put_failure; | ||
954 | ipv4_key = nla_data(nla); | ||
955 | ipv4_key->ipv4_src = output->ipv4.addr.src; | ||
956 | ipv4_key->ipv4_dst = output->ipv4.addr.dst; | ||
957 | ipv4_key->ipv4_proto = output->ip.proto; | ||
958 | ipv4_key->ipv4_tos = output->ip.tos; | ||
959 | ipv4_key->ipv4_ttl = output->ip.ttl; | ||
960 | ipv4_key->ipv4_frag = output->ip.frag; | ||
961 | } else if (swkey->eth.type == htons(ETH_P_IPV6)) { | ||
962 | struct ovs_key_ipv6 *ipv6_key; | ||
963 | |||
964 | nla = nla_reserve(skb, OVS_KEY_ATTR_IPV6, sizeof(*ipv6_key)); | ||
965 | if (!nla) | ||
966 | goto nla_put_failure; | ||
967 | ipv6_key = nla_data(nla); | ||
968 | memcpy(ipv6_key->ipv6_src, &output->ipv6.addr.src, | ||
969 | sizeof(ipv6_key->ipv6_src)); | ||
970 | memcpy(ipv6_key->ipv6_dst, &output->ipv6.addr.dst, | ||
971 | sizeof(ipv6_key->ipv6_dst)); | ||
972 | ipv6_key->ipv6_label = output->ipv6.label; | ||
973 | ipv6_key->ipv6_proto = output->ip.proto; | ||
974 | ipv6_key->ipv6_tclass = output->ip.tos; | ||
975 | ipv6_key->ipv6_hlimit = output->ip.ttl; | ||
976 | ipv6_key->ipv6_frag = output->ip.frag; | ||
977 | } else if (swkey->eth.type == htons(ETH_P_ARP) || | ||
978 | swkey->eth.type == htons(ETH_P_RARP)) { | ||
979 | struct ovs_key_arp *arp_key; | ||
980 | |||
981 | nla = nla_reserve(skb, OVS_KEY_ATTR_ARP, sizeof(*arp_key)); | ||
982 | if (!nla) | ||
983 | goto nla_put_failure; | ||
984 | arp_key = nla_data(nla); | ||
985 | memset(arp_key, 0, sizeof(struct ovs_key_arp)); | ||
986 | arp_key->arp_sip = output->ipv4.addr.src; | ||
987 | arp_key->arp_tip = output->ipv4.addr.dst; | ||
988 | arp_key->arp_op = htons(output->ip.proto); | ||
989 | memcpy(arp_key->arp_sha, output->ipv4.arp.sha, ETH_ALEN); | ||
990 | memcpy(arp_key->arp_tha, output->ipv4.arp.tha, ETH_ALEN); | ||
991 | } | ||
992 | |||
993 | if ((swkey->eth.type == htons(ETH_P_IP) || | ||
994 | swkey->eth.type == htons(ETH_P_IPV6)) && | ||
995 | swkey->ip.frag != OVS_FRAG_TYPE_LATER) { | ||
996 | |||
997 | if (swkey->ip.proto == IPPROTO_TCP) { | ||
998 | struct ovs_key_tcp *tcp_key; | ||
999 | |||
1000 | nla = nla_reserve(skb, OVS_KEY_ATTR_TCP, sizeof(*tcp_key)); | ||
1001 | if (!nla) | ||
1002 | goto nla_put_failure; | ||
1003 | tcp_key = nla_data(nla); | ||
1004 | if (swkey->eth.type == htons(ETH_P_IP)) { | ||
1005 | tcp_key->tcp_src = output->ipv4.tp.src; | ||
1006 | tcp_key->tcp_dst = output->ipv4.tp.dst; | ||
1007 | } else if (swkey->eth.type == htons(ETH_P_IPV6)) { | ||
1008 | tcp_key->tcp_src = output->ipv6.tp.src; | ||
1009 | tcp_key->tcp_dst = output->ipv6.tp.dst; | ||
1010 | } | ||
1011 | } else if (swkey->ip.proto == IPPROTO_UDP) { | ||
1012 | struct ovs_key_udp *udp_key; | ||
1013 | |||
1014 | nla = nla_reserve(skb, OVS_KEY_ATTR_UDP, sizeof(*udp_key)); | ||
1015 | if (!nla) | ||
1016 | goto nla_put_failure; | ||
1017 | udp_key = nla_data(nla); | ||
1018 | if (swkey->eth.type == htons(ETH_P_IP)) { | ||
1019 | udp_key->udp_src = output->ipv4.tp.src; | ||
1020 | udp_key->udp_dst = output->ipv4.tp.dst; | ||
1021 | } else if (swkey->eth.type == htons(ETH_P_IPV6)) { | ||
1022 | udp_key->udp_src = output->ipv6.tp.src; | ||
1023 | udp_key->udp_dst = output->ipv6.tp.dst; | ||
1024 | } | ||
1025 | } else if (swkey->ip.proto == IPPROTO_SCTP) { | ||
1026 | struct ovs_key_sctp *sctp_key; | ||
1027 | |||
1028 | nla = nla_reserve(skb, OVS_KEY_ATTR_SCTP, sizeof(*sctp_key)); | ||
1029 | if (!nla) | ||
1030 | goto nla_put_failure; | ||
1031 | sctp_key = nla_data(nla); | ||
1032 | if (swkey->eth.type == htons(ETH_P_IP)) { | ||
1033 | sctp_key->sctp_src = swkey->ipv4.tp.src; | ||
1034 | sctp_key->sctp_dst = swkey->ipv4.tp.dst; | ||
1035 | } else if (swkey->eth.type == htons(ETH_P_IPV6)) { | ||
1036 | sctp_key->sctp_src = swkey->ipv6.tp.src; | ||
1037 | sctp_key->sctp_dst = swkey->ipv6.tp.dst; | ||
1038 | } | ||
1039 | } else if (swkey->eth.type == htons(ETH_P_IP) && | ||
1040 | swkey->ip.proto == IPPROTO_ICMP) { | ||
1041 | struct ovs_key_icmp *icmp_key; | ||
1042 | |||
1043 | nla = nla_reserve(skb, OVS_KEY_ATTR_ICMP, sizeof(*icmp_key)); | ||
1044 | if (!nla) | ||
1045 | goto nla_put_failure; | ||
1046 | icmp_key = nla_data(nla); | ||
1047 | icmp_key->icmp_type = ntohs(output->ipv4.tp.src); | ||
1048 | icmp_key->icmp_code = ntohs(output->ipv4.tp.dst); | ||
1049 | } else if (swkey->eth.type == htons(ETH_P_IPV6) && | ||
1050 | swkey->ip.proto == IPPROTO_ICMPV6) { | ||
1051 | struct ovs_key_icmpv6 *icmpv6_key; | ||
1052 | |||
1053 | nla = nla_reserve(skb, OVS_KEY_ATTR_ICMPV6, | ||
1054 | sizeof(*icmpv6_key)); | ||
1055 | if (!nla) | ||
1056 | goto nla_put_failure; | ||
1057 | icmpv6_key = nla_data(nla); | ||
1058 | icmpv6_key->icmpv6_type = ntohs(output->ipv6.tp.src); | ||
1059 | icmpv6_key->icmpv6_code = ntohs(output->ipv6.tp.dst); | ||
1060 | |||
1061 | if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION || | ||
1062 | icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) { | ||
1063 | struct ovs_key_nd *nd_key; | ||
1064 | |||
1065 | nla = nla_reserve(skb, OVS_KEY_ATTR_ND, sizeof(*nd_key)); | ||
1066 | if (!nla) | ||
1067 | goto nla_put_failure; | ||
1068 | nd_key = nla_data(nla); | ||
1069 | memcpy(nd_key->nd_target, &output->ipv6.nd.target, | ||
1070 | sizeof(nd_key->nd_target)); | ||
1071 | memcpy(nd_key->nd_sll, output->ipv6.nd.sll, ETH_ALEN); | ||
1072 | memcpy(nd_key->nd_tll, output->ipv6.nd.tll, ETH_ALEN); | ||
1073 | } | ||
1074 | } | ||
1075 | } | ||
1076 | |||
1077 | unencap: | ||
1078 | if (encap) | ||
1079 | nla_nest_end(skb, encap); | ||
1080 | |||
1081 | return 0; | ||
1082 | |||
1083 | nla_put_failure: | ||
1084 | return -EMSGSIZE; | ||
1085 | } | ||
1086 | |||
1087 | #define MAX_ACTIONS_BUFSIZE (32 * 1024) | ||
1088 | |||
1089 | struct sw_flow_actions *ovs_nla_alloc_flow_actions(int size) | ||
1090 | { | ||
1091 | struct sw_flow_actions *sfa; | ||
1092 | |||
1093 | if (size > MAX_ACTIONS_BUFSIZE) | ||
1094 | return ERR_PTR(-EINVAL); | ||
1095 | |||
1096 | sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL); | ||
1097 | if (!sfa) | ||
1098 | return ERR_PTR(-ENOMEM); | ||
1099 | |||
1100 | sfa->actions_len = 0; | ||
1101 | return sfa; | ||
1102 | } | ||
1103 | |||
1104 | /* RCU callback used by ovs_nla_free_flow_actions. */ | ||
1105 | static void rcu_free_acts_callback(struct rcu_head *rcu) | ||
1106 | { | ||
1107 | struct sw_flow_actions *sf_acts = container_of(rcu, | ||
1108 | struct sw_flow_actions, rcu); | ||
1109 | kfree(sf_acts); | ||
1110 | } | ||
1111 | |||
1112 | /* Schedules 'sf_acts' to be freed after the next RCU grace period. | ||
1113 | * The caller must hold rcu_read_lock for this to be sensible. */ | ||
1114 | void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts) | ||
1115 | { | ||
1116 | call_rcu(&sf_acts->rcu, rcu_free_acts_callback); | ||
1117 | } | ||
1118 | |||
1119 | static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa, | ||
1120 | int attr_len) | ||
1121 | { | ||
1122 | |||
1123 | struct sw_flow_actions *acts; | ||
1124 | int new_acts_size; | ||
1125 | int req_size = NLA_ALIGN(attr_len); | ||
1126 | int next_offset = offsetof(struct sw_flow_actions, actions) + | ||
1127 | (*sfa)->actions_len; | ||
1128 | |||
1129 | if (req_size <= (ksize(*sfa) - next_offset)) | ||
1130 | goto out; | ||
1131 | |||
1132 | new_acts_size = ksize(*sfa) * 2; | ||
1133 | |||
1134 | if (new_acts_size > MAX_ACTIONS_BUFSIZE) { | ||
1135 | if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) | ||
1136 | return ERR_PTR(-EMSGSIZE); | ||
1137 | new_acts_size = MAX_ACTIONS_BUFSIZE; | ||
1138 | } | ||
1139 | |||
1140 | acts = ovs_nla_alloc_flow_actions(new_acts_size); | ||
1141 | if (IS_ERR(acts)) | ||
1142 | return (void *)acts; | ||
1143 | |||
1144 | memcpy(acts->actions, (*sfa)->actions, (*sfa)->actions_len); | ||
1145 | acts->actions_len = (*sfa)->actions_len; | ||
1146 | kfree(*sfa); | ||
1147 | *sfa = acts; | ||
1148 | |||
1149 | out: | ||
1150 | (*sfa)->actions_len += req_size; | ||
1151 | return (struct nlattr *) ((unsigned char *)(*sfa) + next_offset); | ||
1152 | } | ||
1153 | |||
1154 | static int add_action(struct sw_flow_actions **sfa, int attrtype, void *data, int len) | ||
1155 | { | ||
1156 | struct nlattr *a; | ||
1157 | |||
1158 | a = reserve_sfa_size(sfa, nla_attr_size(len)); | ||
1159 | if (IS_ERR(a)) | ||
1160 | return PTR_ERR(a); | ||
1161 | |||
1162 | a->nla_type = attrtype; | ||
1163 | a->nla_len = nla_attr_size(len); | ||
1164 | |||
1165 | if (data) | ||
1166 | memcpy(nla_data(a), data, len); | ||
1167 | memset((unsigned char *) a + a->nla_len, 0, nla_padlen(len)); | ||
1168 | |||
1169 | return 0; | ||
1170 | } | ||
1171 | |||
1172 | static inline int add_nested_action_start(struct sw_flow_actions **sfa, | ||
1173 | int attrtype) | ||
1174 | { | ||
1175 | int used = (*sfa)->actions_len; | ||
1176 | int err; | ||
1177 | |||
1178 | err = add_action(sfa, attrtype, NULL, 0); | ||
1179 | if (err) | ||
1180 | return err; | ||
1181 | |||
1182 | return used; | ||
1183 | } | ||
1184 | |||
1185 | static inline void add_nested_action_end(struct sw_flow_actions *sfa, | ||
1186 | int st_offset) | ||
1187 | { | ||
1188 | struct nlattr *a = (struct nlattr *) ((unsigned char *)sfa->actions + | ||
1189 | st_offset); | ||
1190 | |||
1191 | a->nla_len = sfa->actions_len - st_offset; | ||
1192 | } | ||
1193 | |||
1194 | static int validate_and_copy_sample(const struct nlattr *attr, | ||
1195 | const struct sw_flow_key *key, int depth, | ||
1196 | struct sw_flow_actions **sfa) | ||
1197 | { | ||
1198 | const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1]; | ||
1199 | const struct nlattr *probability, *actions; | ||
1200 | const struct nlattr *a; | ||
1201 | int rem, start, err, st_acts; | ||
1202 | |||
1203 | memset(attrs, 0, sizeof(attrs)); | ||
1204 | nla_for_each_nested(a, attr, rem) { | ||
1205 | int type = nla_type(a); | ||
1206 | if (!type || type > OVS_SAMPLE_ATTR_MAX || attrs[type]) | ||
1207 | return -EINVAL; | ||
1208 | attrs[type] = a; | ||
1209 | } | ||
1210 | if (rem) | ||
1211 | return -EINVAL; | ||
1212 | |||
1213 | probability = attrs[OVS_SAMPLE_ATTR_PROBABILITY]; | ||
1214 | if (!probability || nla_len(probability) != sizeof(u32)) | ||
1215 | return -EINVAL; | ||
1216 | |||
1217 | actions = attrs[OVS_SAMPLE_ATTR_ACTIONS]; | ||
1218 | if (!actions || (nla_len(actions) && nla_len(actions) < NLA_HDRLEN)) | ||
1219 | return -EINVAL; | ||
1220 | |||
1221 | /* validation done, copy sample action. */ | ||
1222 | start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SAMPLE); | ||
1223 | if (start < 0) | ||
1224 | return start; | ||
1225 | err = add_action(sfa, OVS_SAMPLE_ATTR_PROBABILITY, | ||
1226 | nla_data(probability), sizeof(u32)); | ||
1227 | if (err) | ||
1228 | return err; | ||
1229 | st_acts = add_nested_action_start(sfa, OVS_SAMPLE_ATTR_ACTIONS); | ||
1230 | if (st_acts < 0) | ||
1231 | return st_acts; | ||
1232 | |||
1233 | err = ovs_nla_copy_actions(actions, key, depth + 1, sfa); | ||
1234 | if (err) | ||
1235 | return err; | ||
1236 | |||
1237 | add_nested_action_end(*sfa, st_acts); | ||
1238 | add_nested_action_end(*sfa, start); | ||
1239 | |||
1240 | return 0; | ||
1241 | } | ||
1242 | |||
1243 | static int validate_tp_port(const struct sw_flow_key *flow_key) | ||
1244 | { | ||
1245 | if (flow_key->eth.type == htons(ETH_P_IP)) { | ||
1246 | if (flow_key->ipv4.tp.src || flow_key->ipv4.tp.dst) | ||
1247 | return 0; | ||
1248 | } else if (flow_key->eth.type == htons(ETH_P_IPV6)) { | ||
1249 | if (flow_key->ipv6.tp.src || flow_key->ipv6.tp.dst) | ||
1250 | return 0; | ||
1251 | } | ||
1252 | |||
1253 | return -EINVAL; | ||
1254 | } | ||
1255 | |||
1256 | void ovs_match_init(struct sw_flow_match *match, | ||
1257 | struct sw_flow_key *key, | ||
1258 | struct sw_flow_mask *mask) | ||
1259 | { | ||
1260 | memset(match, 0, sizeof(*match)); | ||
1261 | match->key = key; | ||
1262 | match->mask = mask; | ||
1263 | |||
1264 | memset(key, 0, sizeof(*key)); | ||
1265 | |||
1266 | if (mask) { | ||
1267 | memset(&mask->key, 0, sizeof(mask->key)); | ||
1268 | mask->range.start = mask->range.end = 0; | ||
1269 | } | ||
1270 | } | ||
1271 | |||
1272 | static int validate_and_copy_set_tun(const struct nlattr *attr, | ||
1273 | struct sw_flow_actions **sfa) | ||
1274 | { | ||
1275 | struct sw_flow_match match; | ||
1276 | struct sw_flow_key key; | ||
1277 | int err, start; | ||
1278 | |||
1279 | ovs_match_init(&match, &key, NULL); | ||
1280 | err = ipv4_tun_from_nlattr(nla_data(attr), &match, false); | ||
1281 | if (err) | ||
1282 | return err; | ||
1283 | |||
1284 | start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SET); | ||
1285 | if (start < 0) | ||
1286 | return start; | ||
1287 | |||
1288 | err = add_action(sfa, OVS_KEY_ATTR_IPV4_TUNNEL, &match.key->tun_key, | ||
1289 | sizeof(match.key->tun_key)); | ||
1290 | add_nested_action_end(*sfa, start); | ||
1291 | |||
1292 | return err; | ||
1293 | } | ||
1294 | |||
1295 | static int validate_set(const struct nlattr *a, | ||
1296 | const struct sw_flow_key *flow_key, | ||
1297 | struct sw_flow_actions **sfa, | ||
1298 | bool *set_tun) | ||
1299 | { | ||
1300 | const struct nlattr *ovs_key = nla_data(a); | ||
1301 | int key_type = nla_type(ovs_key); | ||
1302 | |||
1303 | /* There can be only one key in a action */ | ||
1304 | if (nla_total_size(nla_len(ovs_key)) != nla_len(a)) | ||
1305 | return -EINVAL; | ||
1306 | |||
1307 | if (key_type > OVS_KEY_ATTR_MAX || | ||
1308 | (ovs_key_lens[key_type] != nla_len(ovs_key) && | ||
1309 | ovs_key_lens[key_type] != -1)) | ||
1310 | return -EINVAL; | ||
1311 | |||
1312 | switch (key_type) { | ||
1313 | const struct ovs_key_ipv4 *ipv4_key; | ||
1314 | const struct ovs_key_ipv6 *ipv6_key; | ||
1315 | int err; | ||
1316 | |||
1317 | case OVS_KEY_ATTR_PRIORITY: | ||
1318 | case OVS_KEY_ATTR_SKB_MARK: | ||
1319 | case OVS_KEY_ATTR_ETHERNET: | ||
1320 | break; | ||
1321 | |||
1322 | case OVS_KEY_ATTR_TUNNEL: | ||
1323 | *set_tun = true; | ||
1324 | err = validate_and_copy_set_tun(a, sfa); | ||
1325 | if (err) | ||
1326 | return err; | ||
1327 | break; | ||
1328 | |||
1329 | case OVS_KEY_ATTR_IPV4: | ||
1330 | if (flow_key->eth.type != htons(ETH_P_IP)) | ||
1331 | return -EINVAL; | ||
1332 | |||
1333 | if (!flow_key->ip.proto) | ||
1334 | return -EINVAL; | ||
1335 | |||
1336 | ipv4_key = nla_data(ovs_key); | ||
1337 | if (ipv4_key->ipv4_proto != flow_key->ip.proto) | ||
1338 | return -EINVAL; | ||
1339 | |||
1340 | if (ipv4_key->ipv4_frag != flow_key->ip.frag) | ||
1341 | return -EINVAL; | ||
1342 | |||
1343 | break; | ||
1344 | |||
1345 | case OVS_KEY_ATTR_IPV6: | ||
1346 | if (flow_key->eth.type != htons(ETH_P_IPV6)) | ||
1347 | return -EINVAL; | ||
1348 | |||
1349 | if (!flow_key->ip.proto) | ||
1350 | return -EINVAL; | ||
1351 | |||
1352 | ipv6_key = nla_data(ovs_key); | ||
1353 | if (ipv6_key->ipv6_proto != flow_key->ip.proto) | ||
1354 | return -EINVAL; | ||
1355 | |||
1356 | if (ipv6_key->ipv6_frag != flow_key->ip.frag) | ||
1357 | return -EINVAL; | ||
1358 | |||
1359 | if (ntohl(ipv6_key->ipv6_label) & 0xFFF00000) | ||
1360 | return -EINVAL; | ||
1361 | |||
1362 | break; | ||
1363 | |||
1364 | case OVS_KEY_ATTR_TCP: | ||
1365 | if (flow_key->ip.proto != IPPROTO_TCP) | ||
1366 | return -EINVAL; | ||
1367 | |||
1368 | return validate_tp_port(flow_key); | ||
1369 | |||
1370 | case OVS_KEY_ATTR_UDP: | ||
1371 | if (flow_key->ip.proto != IPPROTO_UDP) | ||
1372 | return -EINVAL; | ||
1373 | |||
1374 | return validate_tp_port(flow_key); | ||
1375 | |||
1376 | case OVS_KEY_ATTR_SCTP: | ||
1377 | if (flow_key->ip.proto != IPPROTO_SCTP) | ||
1378 | return -EINVAL; | ||
1379 | |||
1380 | return validate_tp_port(flow_key); | ||
1381 | |||
1382 | default: | ||
1383 | return -EINVAL; | ||
1384 | } | ||
1385 | |||
1386 | return 0; | ||
1387 | } | ||
1388 | |||
1389 | static int validate_userspace(const struct nlattr *attr) | ||
1390 | { | ||
1391 | static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] = { | ||
1392 | [OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 }, | ||
1393 | [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_UNSPEC }, | ||
1394 | }; | ||
1395 | struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1]; | ||
1396 | int error; | ||
1397 | |||
1398 | error = nla_parse_nested(a, OVS_USERSPACE_ATTR_MAX, | ||
1399 | attr, userspace_policy); | ||
1400 | if (error) | ||
1401 | return error; | ||
1402 | |||
1403 | if (!a[OVS_USERSPACE_ATTR_PID] || | ||
1404 | !nla_get_u32(a[OVS_USERSPACE_ATTR_PID])) | ||
1405 | return -EINVAL; | ||
1406 | |||
1407 | return 0; | ||
1408 | } | ||
1409 | |||
1410 | static int copy_action(const struct nlattr *from, | ||
1411 | struct sw_flow_actions **sfa) | ||
1412 | { | ||
1413 | int totlen = NLA_ALIGN(from->nla_len); | ||
1414 | struct nlattr *to; | ||
1415 | |||
1416 | to = reserve_sfa_size(sfa, from->nla_len); | ||
1417 | if (IS_ERR(to)) | ||
1418 | return PTR_ERR(to); | ||
1419 | |||
1420 | memcpy(to, from, totlen); | ||
1421 | return 0; | ||
1422 | } | ||
1423 | |||
1424 | int ovs_nla_copy_actions(const struct nlattr *attr, | ||
1425 | const struct sw_flow_key *key, | ||
1426 | int depth, | ||
1427 | struct sw_flow_actions **sfa) | ||
1428 | { | ||
1429 | const struct nlattr *a; | ||
1430 | int rem, err; | ||
1431 | |||
1432 | if (depth >= SAMPLE_ACTION_DEPTH) | ||
1433 | return -EOVERFLOW; | ||
1434 | |||
1435 | nla_for_each_nested(a, attr, rem) { | ||
1436 | /* Expected argument lengths, (u32)-1 for variable length. */ | ||
1437 | static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = { | ||
1438 | [OVS_ACTION_ATTR_OUTPUT] = sizeof(u32), | ||
1439 | [OVS_ACTION_ATTR_USERSPACE] = (u32)-1, | ||
1440 | [OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan), | ||
1441 | [OVS_ACTION_ATTR_POP_VLAN] = 0, | ||
1442 | [OVS_ACTION_ATTR_SET] = (u32)-1, | ||
1443 | [OVS_ACTION_ATTR_SAMPLE] = (u32)-1 | ||
1444 | }; | ||
1445 | const struct ovs_action_push_vlan *vlan; | ||
1446 | int type = nla_type(a); | ||
1447 | bool skip_copy; | ||
1448 | |||
1449 | if (type > OVS_ACTION_ATTR_MAX || | ||
1450 | (action_lens[type] != nla_len(a) && | ||
1451 | action_lens[type] != (u32)-1)) | ||
1452 | return -EINVAL; | ||
1453 | |||
1454 | skip_copy = false; | ||
1455 | switch (type) { | ||
1456 | case OVS_ACTION_ATTR_UNSPEC: | ||
1457 | return -EINVAL; | ||
1458 | |||
1459 | case OVS_ACTION_ATTR_USERSPACE: | ||
1460 | err = validate_userspace(a); | ||
1461 | if (err) | ||
1462 | return err; | ||
1463 | break; | ||
1464 | |||
1465 | case OVS_ACTION_ATTR_OUTPUT: | ||
1466 | if (nla_get_u32(a) >= DP_MAX_PORTS) | ||
1467 | return -EINVAL; | ||
1468 | break; | ||
1469 | |||
1470 | |||
1471 | case OVS_ACTION_ATTR_POP_VLAN: | ||
1472 | break; | ||
1473 | |||
1474 | case OVS_ACTION_ATTR_PUSH_VLAN: | ||
1475 | vlan = nla_data(a); | ||
1476 | if (vlan->vlan_tpid != htons(ETH_P_8021Q)) | ||
1477 | return -EINVAL; | ||
1478 | if (!(vlan->vlan_tci & htons(VLAN_TAG_PRESENT))) | ||
1479 | return -EINVAL; | ||
1480 | break; | ||
1481 | |||
1482 | case OVS_ACTION_ATTR_SET: | ||
1483 | err = validate_set(a, key, sfa, &skip_copy); | ||
1484 | if (err) | ||
1485 | return err; | ||
1486 | break; | ||
1487 | |||
1488 | case OVS_ACTION_ATTR_SAMPLE: | ||
1489 | err = validate_and_copy_sample(a, key, depth, sfa); | ||
1490 | if (err) | ||
1491 | return err; | ||
1492 | skip_copy = true; | ||
1493 | break; | ||
1494 | |||
1495 | default: | ||
1496 | return -EINVAL; | ||
1497 | } | ||
1498 | if (!skip_copy) { | ||
1499 | err = copy_action(a, sfa); | ||
1500 | if (err) | ||
1501 | return err; | ||
1502 | } | ||
1503 | } | ||
1504 | |||
1505 | if (rem > 0) | ||
1506 | return -EINVAL; | ||
1507 | |||
1508 | return 0; | ||
1509 | } | ||
1510 | |||
1511 | static int sample_action_to_attr(const struct nlattr *attr, struct sk_buff *skb) | ||
1512 | { | ||
1513 | const struct nlattr *a; | ||
1514 | struct nlattr *start; | ||
1515 | int err = 0, rem; | ||
1516 | |||
1517 | start = nla_nest_start(skb, OVS_ACTION_ATTR_SAMPLE); | ||
1518 | if (!start) | ||
1519 | return -EMSGSIZE; | ||
1520 | |||
1521 | nla_for_each_nested(a, attr, rem) { | ||
1522 | int type = nla_type(a); | ||
1523 | struct nlattr *st_sample; | ||
1524 | |||
1525 | switch (type) { | ||
1526 | case OVS_SAMPLE_ATTR_PROBABILITY: | ||
1527 | if (nla_put(skb, OVS_SAMPLE_ATTR_PROBABILITY, | ||
1528 | sizeof(u32), nla_data(a))) | ||
1529 | return -EMSGSIZE; | ||
1530 | break; | ||
1531 | case OVS_SAMPLE_ATTR_ACTIONS: | ||
1532 | st_sample = nla_nest_start(skb, OVS_SAMPLE_ATTR_ACTIONS); | ||
1533 | if (!st_sample) | ||
1534 | return -EMSGSIZE; | ||
1535 | err = ovs_nla_put_actions(nla_data(a), nla_len(a), skb); | ||
1536 | if (err) | ||
1537 | return err; | ||
1538 | nla_nest_end(skb, st_sample); | ||
1539 | break; | ||
1540 | } | ||
1541 | } | ||
1542 | |||
1543 | nla_nest_end(skb, start); | ||
1544 | return err; | ||
1545 | } | ||
1546 | |||
1547 | static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb) | ||
1548 | { | ||
1549 | const struct nlattr *ovs_key = nla_data(a); | ||
1550 | int key_type = nla_type(ovs_key); | ||
1551 | struct nlattr *start; | ||
1552 | int err; | ||
1553 | |||
1554 | switch (key_type) { | ||
1555 | case OVS_KEY_ATTR_IPV4_TUNNEL: | ||
1556 | start = nla_nest_start(skb, OVS_ACTION_ATTR_SET); | ||
1557 | if (!start) | ||
1558 | return -EMSGSIZE; | ||
1559 | |||
1560 | err = ipv4_tun_to_nlattr(skb, nla_data(ovs_key), | ||
1561 | nla_data(ovs_key)); | ||
1562 | if (err) | ||
1563 | return err; | ||
1564 | nla_nest_end(skb, start); | ||
1565 | break; | ||
1566 | default: | ||
1567 | if (nla_put(skb, OVS_ACTION_ATTR_SET, nla_len(a), ovs_key)) | ||
1568 | return -EMSGSIZE; | ||
1569 | break; | ||
1570 | } | ||
1571 | |||
1572 | return 0; | ||
1573 | } | ||
1574 | |||
1575 | int ovs_nla_put_actions(const struct nlattr *attr, int len, struct sk_buff *skb) | ||
1576 | { | ||
1577 | const struct nlattr *a; | ||
1578 | int rem, err; | ||
1579 | |||
1580 | nla_for_each_attr(a, attr, len, rem) { | ||
1581 | int type = nla_type(a); | ||
1582 | |||
1583 | switch (type) { | ||
1584 | case OVS_ACTION_ATTR_SET: | ||
1585 | err = set_action_to_attr(a, skb); | ||
1586 | if (err) | ||
1587 | return err; | ||
1588 | break; | ||
1589 | |||
1590 | case OVS_ACTION_ATTR_SAMPLE: | ||
1591 | err = sample_action_to_attr(a, skb); | ||
1592 | if (err) | ||
1593 | return err; | ||
1594 | break; | ||
1595 | default: | ||
1596 | if (nla_put(skb, type, nla_len(a), nla_data(a))) | ||
1597 | return -EMSGSIZE; | ||
1598 | break; | ||
1599 | } | ||
1600 | } | ||
1601 | |||
1602 | return 0; | ||
1603 | } | ||
diff --git a/net/openvswitch/flow_netlink.h b/net/openvswitch/flow_netlink.h new file mode 100644 index 000000000000..440151045d39 --- /dev/null +++ b/net/openvswitch/flow_netlink.h | |||
@@ -0,0 +1,60 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2007-2013 Nicira, Inc. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of version 2 of the GNU General Public | ||
6 | * License as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
11 | * General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program; if not, write to the Free Software | ||
15 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
16 | * 02110-1301, USA | ||
17 | */ | ||
18 | |||
19 | |||
20 | #ifndef FLOW_NETLINK_H | ||
21 | #define FLOW_NETLINK_H 1 | ||
22 | |||
23 | #include <linux/kernel.h> | ||
24 | #include <linux/netlink.h> | ||
25 | #include <linux/openvswitch.h> | ||
26 | #include <linux/spinlock.h> | ||
27 | #include <linux/types.h> | ||
28 | #include <linux/rcupdate.h> | ||
29 | #include <linux/if_ether.h> | ||
30 | #include <linux/in6.h> | ||
31 | #include <linux/jiffies.h> | ||
32 | #include <linux/time.h> | ||
33 | #include <linux/flex_array.h> | ||
34 | |||
35 | #include <net/inet_ecn.h> | ||
36 | #include <net/ip_tunnels.h> | ||
37 | |||
38 | #include "flow.h" | ||
39 | |||
40 | void ovs_match_init(struct sw_flow_match *match, | ||
41 | struct sw_flow_key *key, struct sw_flow_mask *mask); | ||
42 | |||
43 | int ovs_nla_put_flow(const struct sw_flow_key *, | ||
44 | const struct sw_flow_key *, struct sk_buff *); | ||
45 | int ovs_nla_get_flow_metadata(struct sw_flow *flow, | ||
46 | const struct nlattr *attr); | ||
47 | int ovs_nla_get_match(struct sw_flow_match *match, | ||
48 | const struct nlattr *, | ||
49 | const struct nlattr *); | ||
50 | |||
51 | int ovs_nla_copy_actions(const struct nlattr *attr, | ||
52 | const struct sw_flow_key *key, int depth, | ||
53 | struct sw_flow_actions **sfa); | ||
54 | int ovs_nla_put_actions(const struct nlattr *attr, | ||
55 | int len, struct sk_buff *skb); | ||
56 | |||
57 | struct sw_flow_actions *ovs_nla_alloc_flow_actions(int actions_len); | ||
58 | void ovs_nla_free_flow_actions(struct sw_flow_actions *); | ||
59 | |||
60 | #endif /* flow_netlink.h */ | ||
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c new file mode 100644 index 000000000000..dcadb75bb173 --- /dev/null +++ b/net/openvswitch/flow_table.c | |||
@@ -0,0 +1,517 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2007-2013 Nicira, Inc. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of version 2 of the GNU General Public | ||
6 | * License as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
11 | * General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program; if not, write to the Free Software | ||
15 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
16 | * 02110-1301, USA | ||
17 | */ | ||
18 | |||
19 | #include "flow.h" | ||
20 | #include "datapath.h" | ||
21 | #include <linux/uaccess.h> | ||
22 | #include <linux/netdevice.h> | ||
23 | #include <linux/etherdevice.h> | ||
24 | #include <linux/if_ether.h> | ||
25 | #include <linux/if_vlan.h> | ||
26 | #include <net/llc_pdu.h> | ||
27 | #include <linux/kernel.h> | ||
28 | #include <linux/jhash.h> | ||
29 | #include <linux/jiffies.h> | ||
30 | #include <linux/llc.h> | ||
31 | #include <linux/module.h> | ||
32 | #include <linux/in.h> | ||
33 | #include <linux/rcupdate.h> | ||
34 | #include <linux/if_arp.h> | ||
35 | #include <linux/ip.h> | ||
36 | #include <linux/ipv6.h> | ||
37 | #include <linux/sctp.h> | ||
38 | #include <linux/tcp.h> | ||
39 | #include <linux/udp.h> | ||
40 | #include <linux/icmp.h> | ||
41 | #include <linux/icmpv6.h> | ||
42 | #include <linux/rculist.h> | ||
43 | #include <net/ip.h> | ||
44 | #include <net/ipv6.h> | ||
45 | #include <net/ndisc.h> | ||
46 | |||
47 | static struct kmem_cache *flow_cache; | ||
48 | |||
49 | static u16 range_n_bytes(const struct sw_flow_key_range *range) | ||
50 | { | ||
51 | return range->end - range->start; | ||
52 | } | ||
53 | |||
54 | void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src, | ||
55 | const struct sw_flow_mask *mask) | ||
56 | { | ||
57 | const long *m = (long *)((u8 *)&mask->key + mask->range.start); | ||
58 | const long *s = (long *)((u8 *)src + mask->range.start); | ||
59 | long *d = (long *)((u8 *)dst + mask->range.start); | ||
60 | int i; | ||
61 | |||
62 | /* The memory outside of the 'mask->range' are not set since | ||
63 | * further operations on 'dst' only uses contents within | ||
64 | * 'mask->range'. | ||
65 | */ | ||
66 | for (i = 0; i < range_n_bytes(&mask->range); i += sizeof(long)) | ||
67 | *d++ = *s++ & *m++; | ||
68 | } | ||
69 | |||
70 | struct sw_flow *ovs_flow_alloc(void) | ||
71 | { | ||
72 | struct sw_flow *flow; | ||
73 | |||
74 | flow = kmem_cache_alloc(flow_cache, GFP_KERNEL); | ||
75 | if (!flow) | ||
76 | return ERR_PTR(-ENOMEM); | ||
77 | |||
78 | spin_lock_init(&flow->lock); | ||
79 | flow->sf_acts = NULL; | ||
80 | flow->mask = NULL; | ||
81 | |||
82 | return flow; | ||
83 | } | ||
84 | |||
85 | static struct flex_array *alloc_buckets(unsigned int n_buckets) | ||
86 | { | ||
87 | struct flex_array *buckets; | ||
88 | int i, err; | ||
89 | |||
90 | buckets = flex_array_alloc(sizeof(struct hlist_head), | ||
91 | n_buckets, GFP_KERNEL); | ||
92 | if (!buckets) | ||
93 | return NULL; | ||
94 | |||
95 | err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL); | ||
96 | if (err) { | ||
97 | flex_array_free(buckets); | ||
98 | return NULL; | ||
99 | } | ||
100 | |||
101 | for (i = 0; i < n_buckets; i++) | ||
102 | INIT_HLIST_HEAD((struct hlist_head *) | ||
103 | flex_array_get(buckets, i)); | ||
104 | |||
105 | return buckets; | ||
106 | } | ||
107 | |||
108 | static void flow_free(struct sw_flow *flow) | ||
109 | { | ||
110 | kfree((struct sf_flow_acts __force *)flow->sf_acts); | ||
111 | kmem_cache_free(flow_cache, flow); | ||
112 | } | ||
113 | |||
114 | static void rcu_free_flow_callback(struct rcu_head *rcu) | ||
115 | { | ||
116 | struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu); | ||
117 | |||
118 | flow_free(flow); | ||
119 | } | ||
120 | |||
121 | void ovs_flow_free(struct sw_flow *flow, bool deferred) | ||
122 | { | ||
123 | if (!flow) | ||
124 | return; | ||
125 | |||
126 | ovs_sw_flow_mask_del_ref(flow->mask, deferred); | ||
127 | |||
128 | if (deferred) | ||
129 | call_rcu(&flow->rcu, rcu_free_flow_callback); | ||
130 | else | ||
131 | flow_free(flow); | ||
132 | } | ||
133 | |||
134 | static void free_buckets(struct flex_array *buckets) | ||
135 | { | ||
136 | flex_array_free(buckets); | ||
137 | } | ||
138 | |||
139 | static void __flow_tbl_destroy(struct flow_table *table) | ||
140 | { | ||
141 | int i; | ||
142 | |||
143 | if (table->keep_flows) | ||
144 | goto skip_flows; | ||
145 | |||
146 | for (i = 0; i < table->n_buckets; i++) { | ||
147 | struct sw_flow *flow; | ||
148 | struct hlist_head *head = flex_array_get(table->buckets, i); | ||
149 | struct hlist_node *n; | ||
150 | int ver = table->node_ver; | ||
151 | |||
152 | hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) { | ||
153 | hlist_del(&flow->hash_node[ver]); | ||
154 | ovs_flow_free(flow, false); | ||
155 | } | ||
156 | } | ||
157 | |||
158 | BUG_ON(!list_empty(table->mask_list)); | ||
159 | kfree(table->mask_list); | ||
160 | |||
161 | skip_flows: | ||
162 | free_buckets(table->buckets); | ||
163 | kfree(table); | ||
164 | } | ||
165 | |||
166 | static struct flow_table *__flow_tbl_alloc(int new_size) | ||
167 | { | ||
168 | struct flow_table *table = kmalloc(sizeof(*table), GFP_KERNEL); | ||
169 | |||
170 | if (!table) | ||
171 | return NULL; | ||
172 | |||
173 | table->buckets = alloc_buckets(new_size); | ||
174 | |||
175 | if (!table->buckets) { | ||
176 | kfree(table); | ||
177 | return NULL; | ||
178 | } | ||
179 | table->n_buckets = new_size; | ||
180 | table->count = 0; | ||
181 | table->node_ver = 0; | ||
182 | table->keep_flows = false; | ||
183 | get_random_bytes(&table->hash_seed, sizeof(u32)); | ||
184 | table->mask_list = NULL; | ||
185 | |||
186 | return table; | ||
187 | } | ||
188 | |||
189 | struct flow_table *ovs_flow_tbl_alloc(int new_size) | ||
190 | { | ||
191 | struct flow_table *table = __flow_tbl_alloc(new_size); | ||
192 | |||
193 | if (!table) | ||
194 | return NULL; | ||
195 | |||
196 | table->mask_list = kmalloc(sizeof(struct list_head), GFP_KERNEL); | ||
197 | if (!table->mask_list) { | ||
198 | table->keep_flows = true; | ||
199 | __flow_tbl_destroy(table); | ||
200 | return NULL; | ||
201 | } | ||
202 | INIT_LIST_HEAD(table->mask_list); | ||
203 | |||
204 | return table; | ||
205 | } | ||
206 | |||
207 | static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu) | ||
208 | { | ||
209 | struct flow_table *table = container_of(rcu, struct flow_table, rcu); | ||
210 | |||
211 | __flow_tbl_destroy(table); | ||
212 | } | ||
213 | |||
214 | void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred) | ||
215 | { | ||
216 | if (!table) | ||
217 | return; | ||
218 | |||
219 | if (deferred) | ||
220 | call_rcu(&table->rcu, flow_tbl_destroy_rcu_cb); | ||
221 | else | ||
222 | __flow_tbl_destroy(table); | ||
223 | } | ||
224 | |||
225 | struct sw_flow *ovs_flow_tbl_dump_next(struct flow_table *table, | ||
226 | u32 *bucket, u32 *last) | ||
227 | { | ||
228 | struct sw_flow *flow; | ||
229 | struct hlist_head *head; | ||
230 | int ver; | ||
231 | int i; | ||
232 | |||
233 | ver = table->node_ver; | ||
234 | while (*bucket < table->n_buckets) { | ||
235 | i = 0; | ||
236 | head = flex_array_get(table->buckets, *bucket); | ||
237 | hlist_for_each_entry_rcu(flow, head, hash_node[ver]) { | ||
238 | if (i < *last) { | ||
239 | i++; | ||
240 | continue; | ||
241 | } | ||
242 | *last = i + 1; | ||
243 | return flow; | ||
244 | } | ||
245 | (*bucket)++; | ||
246 | *last = 0; | ||
247 | } | ||
248 | |||
249 | return NULL; | ||
250 | } | ||
251 | |||
252 | static struct hlist_head *find_bucket(struct flow_table *table, u32 hash) | ||
253 | { | ||
254 | hash = jhash_1word(hash, table->hash_seed); | ||
255 | return flex_array_get(table->buckets, | ||
256 | (hash & (table->n_buckets - 1))); | ||
257 | } | ||
258 | |||
259 | static void __tbl_insert(struct flow_table *table, struct sw_flow *flow) | ||
260 | { | ||
261 | struct hlist_head *head; | ||
262 | |||
263 | head = find_bucket(table, flow->hash); | ||
264 | hlist_add_head_rcu(&flow->hash_node[table->node_ver], head); | ||
265 | |||
266 | table->count++; | ||
267 | } | ||
268 | |||
269 | static void flow_table_copy_flows(struct flow_table *old, | ||
270 | struct flow_table *new) | ||
271 | { | ||
272 | int old_ver; | ||
273 | int i; | ||
274 | |||
275 | old_ver = old->node_ver; | ||
276 | new->node_ver = !old_ver; | ||
277 | |||
278 | /* Insert in new table. */ | ||
279 | for (i = 0; i < old->n_buckets; i++) { | ||
280 | struct sw_flow *flow; | ||
281 | struct hlist_head *head; | ||
282 | |||
283 | head = flex_array_get(old->buckets, i); | ||
284 | |||
285 | hlist_for_each_entry(flow, head, hash_node[old_ver]) | ||
286 | __tbl_insert(new, flow); | ||
287 | } | ||
288 | |||
289 | new->mask_list = old->mask_list; | ||
290 | old->keep_flows = true; | ||
291 | } | ||
292 | |||
293 | static struct flow_table *__flow_tbl_rehash(struct flow_table *table, | ||
294 | int n_buckets) | ||
295 | { | ||
296 | struct flow_table *new_table; | ||
297 | |||
298 | new_table = __flow_tbl_alloc(n_buckets); | ||
299 | if (!new_table) | ||
300 | return ERR_PTR(-ENOMEM); | ||
301 | |||
302 | flow_table_copy_flows(table, new_table); | ||
303 | |||
304 | return new_table; | ||
305 | } | ||
306 | |||
307 | struct flow_table *ovs_flow_tbl_rehash(struct flow_table *table) | ||
308 | { | ||
309 | return __flow_tbl_rehash(table, table->n_buckets); | ||
310 | } | ||
311 | |||
312 | struct flow_table *ovs_flow_tbl_expand(struct flow_table *table) | ||
313 | { | ||
314 | return __flow_tbl_rehash(table, table->n_buckets * 2); | ||
315 | } | ||
316 | |||
317 | static u32 flow_hash(const struct sw_flow_key *key, int key_start, | ||
318 | int key_end) | ||
319 | { | ||
320 | u32 *hash_key = (u32 *)((u8 *)key + key_start); | ||
321 | int hash_u32s = (key_end - key_start) >> 2; | ||
322 | |||
323 | /* Make sure number of hash bytes are multiple of u32. */ | ||
324 | BUILD_BUG_ON(sizeof(long) % sizeof(u32)); | ||
325 | |||
326 | return jhash2(hash_key, hash_u32s, 0); | ||
327 | } | ||
328 | |||
329 | static int flow_key_start(const struct sw_flow_key *key) | ||
330 | { | ||
331 | if (key->tun_key.ipv4_dst) | ||
332 | return 0; | ||
333 | else | ||
334 | return rounddown(offsetof(struct sw_flow_key, phy), | ||
335 | sizeof(long)); | ||
336 | } | ||
337 | |||
338 | static bool cmp_key(const struct sw_flow_key *key1, | ||
339 | const struct sw_flow_key *key2, | ||
340 | int key_start, int key_end) | ||
341 | { | ||
342 | const long *cp1 = (long *)((u8 *)key1 + key_start); | ||
343 | const long *cp2 = (long *)((u8 *)key2 + key_start); | ||
344 | long diffs = 0; | ||
345 | int i; | ||
346 | |||
347 | for (i = key_start; i < key_end; i += sizeof(long)) | ||
348 | diffs |= *cp1++ ^ *cp2++; | ||
349 | |||
350 | return diffs == 0; | ||
351 | } | ||
352 | |||
353 | static bool flow_cmp_masked_key(const struct sw_flow *flow, | ||
354 | const struct sw_flow_key *key, | ||
355 | int key_start, int key_end) | ||
356 | { | ||
357 | return cmp_key(&flow->key, key, key_start, key_end); | ||
358 | } | ||
359 | |||
360 | bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow, | ||
361 | struct sw_flow_match *match) | ||
362 | { | ||
363 | struct sw_flow_key *key = match->key; | ||
364 | int key_start = flow_key_start(key); | ||
365 | int key_end = match->range.end; | ||
366 | |||
367 | return cmp_key(&flow->unmasked_key, key, key_start, key_end); | ||
368 | } | ||
369 | |||
370 | static struct sw_flow *masked_flow_lookup(struct flow_table *table, | ||
371 | const struct sw_flow_key *unmasked, | ||
372 | struct sw_flow_mask *mask) | ||
373 | { | ||
374 | struct sw_flow *flow; | ||
375 | struct hlist_head *head; | ||
376 | int key_start = mask->range.start; | ||
377 | int key_end = mask->range.end; | ||
378 | u32 hash; | ||
379 | struct sw_flow_key masked_key; | ||
380 | |||
381 | ovs_flow_mask_key(&masked_key, unmasked, mask); | ||
382 | hash = flow_hash(&masked_key, key_start, key_end); | ||
383 | head = find_bucket(table, hash); | ||
384 | hlist_for_each_entry_rcu(flow, head, hash_node[table->node_ver]) { | ||
385 | if (flow->mask == mask && | ||
386 | flow_cmp_masked_key(flow, &masked_key, | ||
387 | key_start, key_end)) | ||
388 | return flow; | ||
389 | } | ||
390 | return NULL; | ||
391 | } | ||
392 | |||
393 | struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl, | ||
394 | const struct sw_flow_key *key) | ||
395 | { | ||
396 | struct sw_flow *flow = NULL; | ||
397 | struct sw_flow_mask *mask; | ||
398 | |||
399 | list_for_each_entry_rcu(mask, tbl->mask_list, list) { | ||
400 | flow = masked_flow_lookup(tbl, key, mask); | ||
401 | if (flow) /* Found */ | ||
402 | break; | ||
403 | } | ||
404 | |||
405 | return flow; | ||
406 | } | ||
407 | |||
408 | void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow) | ||
409 | { | ||
410 | flow->hash = flow_hash(&flow->key, flow->mask->range.start, | ||
411 | flow->mask->range.end); | ||
412 | __tbl_insert(table, flow); | ||
413 | } | ||
414 | |||
415 | void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow) | ||
416 | { | ||
417 | BUG_ON(table->count == 0); | ||
418 | hlist_del_rcu(&flow->hash_node[table->node_ver]); | ||
419 | table->count--; | ||
420 | } | ||
421 | |||
422 | struct sw_flow_mask *ovs_sw_flow_mask_alloc(void) | ||
423 | { | ||
424 | struct sw_flow_mask *mask; | ||
425 | |||
426 | mask = kmalloc(sizeof(*mask), GFP_KERNEL); | ||
427 | if (mask) | ||
428 | mask->ref_count = 0; | ||
429 | |||
430 | return mask; | ||
431 | } | ||
432 | |||
433 | void ovs_sw_flow_mask_add_ref(struct sw_flow_mask *mask) | ||
434 | { | ||
435 | mask->ref_count++; | ||
436 | } | ||
437 | |||
438 | static void rcu_free_sw_flow_mask_cb(struct rcu_head *rcu) | ||
439 | { | ||
440 | struct sw_flow_mask *mask = container_of(rcu, struct sw_flow_mask, rcu); | ||
441 | |||
442 | kfree(mask); | ||
443 | } | ||
444 | |||
445 | void ovs_sw_flow_mask_del_ref(struct sw_flow_mask *mask, bool deferred) | ||
446 | { | ||
447 | if (!mask) | ||
448 | return; | ||
449 | |||
450 | BUG_ON(!mask->ref_count); | ||
451 | mask->ref_count--; | ||
452 | |||
453 | if (!mask->ref_count) { | ||
454 | list_del_rcu(&mask->list); | ||
455 | if (deferred) | ||
456 | call_rcu(&mask->rcu, rcu_free_sw_flow_mask_cb); | ||
457 | else | ||
458 | kfree(mask); | ||
459 | } | ||
460 | } | ||
461 | |||
462 | static bool mask_equal(const struct sw_flow_mask *a, | ||
463 | const struct sw_flow_mask *b) | ||
464 | { | ||
465 | u8 *a_ = (u8 *)&a->key + a->range.start; | ||
466 | u8 *b_ = (u8 *)&b->key + b->range.start; | ||
467 | |||
468 | return (a->range.end == b->range.end) | ||
469 | && (a->range.start == b->range.start) | ||
470 | && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0); | ||
471 | } | ||
472 | |||
473 | struct sw_flow_mask *ovs_sw_flow_mask_find(const struct flow_table *tbl, | ||
474 | const struct sw_flow_mask *mask) | ||
475 | { | ||
476 | struct list_head *ml; | ||
477 | |||
478 | list_for_each(ml, tbl->mask_list) { | ||
479 | struct sw_flow_mask *m; | ||
480 | m = container_of(ml, struct sw_flow_mask, list); | ||
481 | if (mask_equal(mask, m)) | ||
482 | return m; | ||
483 | } | ||
484 | |||
485 | return NULL; | ||
486 | } | ||
487 | |||
488 | /** | ||
489 | * add a new mask into the mask list. | ||
490 | * The caller needs to make sure that 'mask' is not the same | ||
491 | * as any masks that are already on the list. | ||
492 | */ | ||
493 | void ovs_sw_flow_mask_insert(struct flow_table *tbl, struct sw_flow_mask *mask) | ||
494 | { | ||
495 | list_add_rcu(&mask->list, tbl->mask_list); | ||
496 | } | ||
497 | |||
498 | /* Initializes the flow module. | ||
499 | * Returns zero if successful or a negative error code. */ | ||
500 | int ovs_flow_init(void) | ||
501 | { | ||
502 | BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long)); | ||
503 | BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long)); | ||
504 | |||
505 | flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0, | ||
506 | 0, NULL); | ||
507 | if (flow_cache == NULL) | ||
508 | return -ENOMEM; | ||
509 | |||
510 | return 0; | ||
511 | } | ||
512 | |||
513 | /* Uninitializes the flow module. */ | ||
514 | void ovs_flow_exit(void) | ||
515 | { | ||
516 | kmem_cache_destroy(flow_cache); | ||
517 | } | ||
diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h new file mode 100644 index 000000000000..d7a114457cde --- /dev/null +++ b/net/openvswitch/flow_table.h | |||
@@ -0,0 +1,91 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2007-2013 Nicira, Inc. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of version 2 of the GNU General Public | ||
6 | * License as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
11 | * General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program; if not, write to the Free Software | ||
15 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
16 | * 02110-1301, USA | ||
17 | */ | ||
18 | |||
19 | #ifndef FLOW_TABLE_H | ||
20 | #define FLOW_TABLE_H 1 | ||
21 | |||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/netlink.h> | ||
24 | #include <linux/openvswitch.h> | ||
25 | #include <linux/spinlock.h> | ||
26 | #include <linux/types.h> | ||
27 | #include <linux/rcupdate.h> | ||
28 | #include <linux/if_ether.h> | ||
29 | #include <linux/in6.h> | ||
30 | #include <linux/jiffies.h> | ||
31 | #include <linux/time.h> | ||
32 | #include <linux/flex_array.h> | ||
33 | |||
34 | #include <net/inet_ecn.h> | ||
35 | #include <net/ip_tunnels.h> | ||
36 | |||
37 | #include "flow.h" | ||
38 | |||
39 | #define TBL_MIN_BUCKETS 1024 | ||
40 | |||
41 | struct flow_table { | ||
42 | struct flex_array *buckets; | ||
43 | unsigned int count, n_buckets; | ||
44 | struct rcu_head rcu; | ||
45 | struct list_head *mask_list; | ||
46 | int node_ver; | ||
47 | u32 hash_seed; | ||
48 | bool keep_flows; | ||
49 | }; | ||
50 | |||
51 | int ovs_flow_init(void); | ||
52 | void ovs_flow_exit(void); | ||
53 | |||
54 | struct sw_flow *ovs_flow_alloc(void); | ||
55 | void ovs_flow_free(struct sw_flow *, bool deferred); | ||
56 | |||
57 | static inline int ovs_flow_tbl_count(struct flow_table *table) | ||
58 | { | ||
59 | return table->count; | ||
60 | } | ||
61 | |||
62 | static inline int ovs_flow_tbl_need_to_expand(struct flow_table *table) | ||
63 | { | ||
64 | return (table->count > table->n_buckets); | ||
65 | } | ||
66 | |||
67 | struct flow_table *ovs_flow_tbl_alloc(int new_size); | ||
68 | struct flow_table *ovs_flow_tbl_expand(struct flow_table *table); | ||
69 | struct flow_table *ovs_flow_tbl_rehash(struct flow_table *table); | ||
70 | void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred); | ||
71 | |||
72 | void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow); | ||
73 | void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow); | ||
74 | struct sw_flow *ovs_flow_tbl_dump_next(struct flow_table *table, | ||
75 | u32 *bucket, u32 *idx); | ||
76 | struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *, | ||
77 | const struct sw_flow_key *); | ||
78 | |||
79 | bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow, | ||
80 | struct sw_flow_match *match); | ||
81 | |||
82 | struct sw_flow_mask *ovs_sw_flow_mask_alloc(void); | ||
83 | void ovs_sw_flow_mask_add_ref(struct sw_flow_mask *); | ||
84 | void ovs_sw_flow_mask_del_ref(struct sw_flow_mask *, bool deferred); | ||
85 | void ovs_sw_flow_mask_insert(struct flow_table *, struct sw_flow_mask *); | ||
86 | struct sw_flow_mask *ovs_sw_flow_mask_find(const struct flow_table *, | ||
87 | const struct sw_flow_mask *); | ||
88 | void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src, | ||
89 | const struct sw_flow_mask *mask); | ||
90 | |||
91 | #endif /* flow_table.h */ | ||