diff options
Diffstat (limited to 'net/openvswitch')
| -rw-r--r-- | net/openvswitch/Kconfig | 14 | ||||
| -rw-r--r-- | net/openvswitch/Makefile | 9 | ||||
| -rw-r--r-- | net/openvswitch/actions.c | 45 | ||||
| -rw-r--r-- | net/openvswitch/datapath.c | 176 | ||||
| -rw-r--r-- | net/openvswitch/datapath.h | 6 | ||||
| -rw-r--r-- | net/openvswitch/flow.c | 1487 | ||||
| -rw-r--r-- | net/openvswitch/flow.h | 89 | ||||
| -rw-r--r-- | net/openvswitch/vport-gre.c | 7 | ||||
| -rw-r--r-- | net/openvswitch/vport-netdev.c | 20 | ||||
| -rw-r--r-- | net/openvswitch/vport-vxlan.c | 204 | ||||
| -rw-r--r-- | net/openvswitch/vport.c | 6 | ||||
| -rw-r--r-- | net/openvswitch/vport.h | 1 |
12 files changed, 1520 insertions, 544 deletions
diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig index 27ee56b688a3..6ecf491ad509 100644 --- a/net/openvswitch/Kconfig +++ b/net/openvswitch/Kconfig | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | 4 | ||
| 5 | config OPENVSWITCH | 5 | config OPENVSWITCH |
| 6 | tristate "Open vSwitch" | 6 | tristate "Open vSwitch" |
| 7 | select LIBCRC32C | ||
| 7 | ---help--- | 8 | ---help--- |
| 8 | Open vSwitch is a multilayer Ethernet switch targeted at virtualized | 9 | Open vSwitch is a multilayer Ethernet switch targeted at virtualized |
| 9 | environments. In addition to supporting a variety of features | 10 | environments. In addition to supporting a variety of features |
| @@ -40,3 +41,16 @@ config OPENVSWITCH_GRE | |||
| 40 | Say N to exclude this support and reduce the binary size. | 41 | Say N to exclude this support and reduce the binary size. |
| 41 | 42 | ||
| 42 | If unsure, say Y. | 43 | If unsure, say Y. |
| 44 | |||
| 45 | config OPENVSWITCH_VXLAN | ||
| 46 | bool "Open vSwitch VXLAN tunneling support" | ||
| 47 | depends on INET | ||
| 48 | depends on OPENVSWITCH | ||
| 49 | depends on VXLAN && !(OPENVSWITCH=y && VXLAN=m) | ||
| 50 | default y | ||
| 51 | ---help--- | ||
| 52 | If you say Y here, then the Open vSwitch will be able create vxlan vport. | ||
| 53 | |||
| 54 | Say N to exclude this support and reduce the binary size. | ||
| 55 | |||
| 56 | If unsure, say Y. | ||
diff --git a/net/openvswitch/Makefile b/net/openvswitch/Makefile index 01bddb2991e3..ea36e99089af 100644 --- a/net/openvswitch/Makefile +++ b/net/openvswitch/Makefile | |||
| @@ -10,6 +10,13 @@ openvswitch-y := \ | |||
| 10 | dp_notify.o \ | 10 | dp_notify.o \ |
| 11 | flow.o \ | 11 | flow.o \ |
| 12 | vport.o \ | 12 | vport.o \ |
| 13 | vport-gre.o \ | ||
| 14 | vport-internal_dev.o \ | 13 | vport-internal_dev.o \ |
| 15 | vport-netdev.o | 14 | vport-netdev.o |
| 15 | |||
| 16 | ifneq ($(CONFIG_OPENVSWITCH_VXLAN),) | ||
| 17 | openvswitch-y += vport-vxlan.o | ||
| 18 | endif | ||
| 19 | |||
| 20 | ifneq ($(CONFIG_OPENVSWITCH_GRE),) | ||
| 21 | openvswitch-y += vport-gre.o | ||
| 22 | endif | ||
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index ab101f715447..65cfaa816075 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Copyright (c) 2007-2012 Nicira, Inc. | 2 | * Copyright (c) 2007-2013 Nicira, Inc. |
| 3 | * | 3 | * |
| 4 | * This program is free software; you can redistribute it and/or | 4 | * This program is free software; you can redistribute it and/or |
| 5 | * modify it under the terms of version 2 of the GNU General Public | 5 | * modify it under the terms of version 2 of the GNU General Public |
| @@ -22,6 +22,7 @@ | |||
| 22 | #include <linux/in.h> | 22 | #include <linux/in.h> |
| 23 | #include <linux/ip.h> | 23 | #include <linux/ip.h> |
| 24 | #include <linux/openvswitch.h> | 24 | #include <linux/openvswitch.h> |
| 25 | #include <linux/sctp.h> | ||
| 25 | #include <linux/tcp.h> | 26 | #include <linux/tcp.h> |
| 26 | #include <linux/udp.h> | 27 | #include <linux/udp.h> |
| 27 | #include <linux/in6.h> | 28 | #include <linux/in6.h> |
| @@ -31,6 +32,7 @@ | |||
| 31 | #include <net/ipv6.h> | 32 | #include <net/ipv6.h> |
| 32 | #include <net/checksum.h> | 33 | #include <net/checksum.h> |
| 33 | #include <net/dsfield.h> | 34 | #include <net/dsfield.h> |
| 35 | #include <net/sctp/checksum.h> | ||
| 34 | 36 | ||
| 35 | #include "datapath.h" | 37 | #include "datapath.h" |
| 36 | #include "vport.h" | 38 | #include "vport.h" |
| @@ -352,6 +354,39 @@ static int set_tcp(struct sk_buff *skb, const struct ovs_key_tcp *tcp_port_key) | |||
| 352 | return 0; | 354 | return 0; |
| 353 | } | 355 | } |
| 354 | 356 | ||
| 357 | static int set_sctp(struct sk_buff *skb, | ||
| 358 | const struct ovs_key_sctp *sctp_port_key) | ||
| 359 | { | ||
| 360 | struct sctphdr *sh; | ||
| 361 | int err; | ||
| 362 | unsigned int sctphoff = skb_transport_offset(skb); | ||
| 363 | |||
| 364 | err = make_writable(skb, sctphoff + sizeof(struct sctphdr)); | ||
| 365 | if (unlikely(err)) | ||
| 366 | return err; | ||
| 367 | |||
| 368 | sh = sctp_hdr(skb); | ||
| 369 | if (sctp_port_key->sctp_src != sh->source || | ||
| 370 | sctp_port_key->sctp_dst != sh->dest) { | ||
| 371 | __le32 old_correct_csum, new_csum, old_csum; | ||
| 372 | |||
| 373 | old_csum = sh->checksum; | ||
| 374 | old_correct_csum = sctp_compute_cksum(skb, sctphoff); | ||
| 375 | |||
| 376 | sh->source = sctp_port_key->sctp_src; | ||
| 377 | sh->dest = sctp_port_key->sctp_dst; | ||
| 378 | |||
| 379 | new_csum = sctp_compute_cksum(skb, sctphoff); | ||
| 380 | |||
| 381 | /* Carry any checksum errors through. */ | ||
| 382 | sh->checksum = old_csum ^ old_correct_csum ^ new_csum; | ||
| 383 | |||
| 384 | skb->rxhash = 0; | ||
| 385 | } | ||
| 386 | |||
| 387 | return 0; | ||
| 388 | } | ||
| 389 | |||
| 355 | static int do_output(struct datapath *dp, struct sk_buff *skb, int out_port) | 390 | static int do_output(struct datapath *dp, struct sk_buff *skb, int out_port) |
| 356 | { | 391 | { |
| 357 | struct vport *vport; | 392 | struct vport *vport; |
| @@ -376,8 +411,10 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb, | |||
| 376 | const struct nlattr *a; | 411 | const struct nlattr *a; |
| 377 | int rem; | 412 | int rem; |
| 378 | 413 | ||
| 414 | BUG_ON(!OVS_CB(skb)->pkt_key); | ||
| 415 | |||
| 379 | upcall.cmd = OVS_PACKET_CMD_ACTION; | 416 | upcall.cmd = OVS_PACKET_CMD_ACTION; |
| 380 | upcall.key = &OVS_CB(skb)->flow->key; | 417 | upcall.key = OVS_CB(skb)->pkt_key; |
| 381 | upcall.userdata = NULL; | 418 | upcall.userdata = NULL; |
| 382 | upcall.portid = 0; | 419 | upcall.portid = 0; |
| 383 | 420 | ||
| @@ -459,6 +496,10 @@ static int execute_set_action(struct sk_buff *skb, | |||
| 459 | case OVS_KEY_ATTR_UDP: | 496 | case OVS_KEY_ATTR_UDP: |
| 460 | err = set_udp(skb, nla_data(nested_attr)); | 497 | err = set_udp(skb, nla_data(nested_attr)); |
| 461 | break; | 498 | break; |
| 499 | |||
| 500 | case OVS_KEY_ATTR_SCTP: | ||
| 501 | err = set_sctp(skb, nla_data(nested_attr)); | ||
| 502 | break; | ||
| 462 | } | 503 | } |
| 463 | 504 | ||
| 464 | return err; | 505 | return err; |
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index f2ed7600084e..2aa13bd7f2b2 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Copyright (c) 2007-2012 Nicira, Inc. | 2 | * Copyright (c) 2007-2013 Nicira, Inc. |
| 3 | * | 3 | * |
| 4 | * This program is free software; you can redistribute it and/or | 4 | * This program is free software; you can redistribute it and/or |
| 5 | * modify it under the terms of version 2 of the GNU General Public | 5 | * modify it under the terms of version 2 of the GNU General Public |
| @@ -165,7 +165,7 @@ static void destroy_dp_rcu(struct rcu_head *rcu) | |||
| 165 | { | 165 | { |
| 166 | struct datapath *dp = container_of(rcu, struct datapath, rcu); | 166 | struct datapath *dp = container_of(rcu, struct datapath, rcu); |
| 167 | 167 | ||
| 168 | ovs_flow_tbl_destroy((__force struct flow_table *)dp->table); | 168 | ovs_flow_tbl_destroy((__force struct flow_table *)dp->table, false); |
| 169 | free_percpu(dp->stats_percpu); | 169 | free_percpu(dp->stats_percpu); |
| 170 | release_net(ovs_dp_get_net(dp)); | 170 | release_net(ovs_dp_get_net(dp)); |
| 171 | kfree(dp->ports); | 171 | kfree(dp->ports); |
| @@ -226,19 +226,18 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb) | |||
| 226 | struct sw_flow_key key; | 226 | struct sw_flow_key key; |
| 227 | u64 *stats_counter; | 227 | u64 *stats_counter; |
| 228 | int error; | 228 | int error; |
| 229 | int key_len; | ||
| 230 | 229 | ||
| 231 | stats = this_cpu_ptr(dp->stats_percpu); | 230 | stats = this_cpu_ptr(dp->stats_percpu); |
| 232 | 231 | ||
| 233 | /* Extract flow from 'skb' into 'key'. */ | 232 | /* Extract flow from 'skb' into 'key'. */ |
| 234 | error = ovs_flow_extract(skb, p->port_no, &key, &key_len); | 233 | error = ovs_flow_extract(skb, p->port_no, &key); |
| 235 | if (unlikely(error)) { | 234 | if (unlikely(error)) { |
| 236 | kfree_skb(skb); | 235 | kfree_skb(skb); |
| 237 | return; | 236 | return; |
| 238 | } | 237 | } |
| 239 | 238 | ||
| 240 | /* Look up flow. */ | 239 | /* Look up flow. */ |
| 241 | flow = ovs_flow_tbl_lookup(rcu_dereference(dp->table), &key, key_len); | 240 | flow = ovs_flow_lookup(rcu_dereference(dp->table), &key); |
| 242 | if (unlikely(!flow)) { | 241 | if (unlikely(!flow)) { |
| 243 | struct dp_upcall_info upcall; | 242 | struct dp_upcall_info upcall; |
| 244 | 243 | ||
| @@ -253,6 +252,7 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb) | |||
| 253 | } | 252 | } |
| 254 | 253 | ||
| 255 | OVS_CB(skb)->flow = flow; | 254 | OVS_CB(skb)->flow = flow; |
| 255 | OVS_CB(skb)->pkt_key = &key; | ||
| 256 | 256 | ||
| 257 | stats_counter = &stats->n_hit; | 257 | stats_counter = &stats->n_hit; |
| 258 | ovs_flow_used(OVS_CB(skb)->flow, skb); | 258 | ovs_flow_used(OVS_CB(skb)->flow, skb); |
| @@ -435,7 +435,7 @@ static int queue_userspace_packet(struct net *net, int dp_ifindex, | |||
| 435 | upcall->dp_ifindex = dp_ifindex; | 435 | upcall->dp_ifindex = dp_ifindex; |
| 436 | 436 | ||
| 437 | nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY); | 437 | nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY); |
| 438 | ovs_flow_to_nlattrs(upcall_info->key, user_skb); | 438 | ovs_flow_to_nlattrs(upcall_info->key, upcall_info->key, user_skb); |
| 439 | nla_nest_end(user_skb, nla); | 439 | nla_nest_end(user_skb, nla); |
| 440 | 440 | ||
| 441 | if (upcall_info->userdata) | 441 | if (upcall_info->userdata) |
| @@ -468,7 +468,7 @@ static int flush_flows(struct datapath *dp) | |||
| 468 | 468 | ||
| 469 | rcu_assign_pointer(dp->table, new_table); | 469 | rcu_assign_pointer(dp->table, new_table); |
| 470 | 470 | ||
| 471 | ovs_flow_tbl_deferred_destroy(old_table); | 471 | ovs_flow_tbl_destroy(old_table, true); |
| 472 | return 0; | 472 | return 0; |
| 473 | } | 473 | } |
| 474 | 474 | ||
| @@ -611,10 +611,12 @@ static int validate_tp_port(const struct sw_flow_key *flow_key) | |||
| 611 | static int validate_and_copy_set_tun(const struct nlattr *attr, | 611 | static int validate_and_copy_set_tun(const struct nlattr *attr, |
| 612 | struct sw_flow_actions **sfa) | 612 | struct sw_flow_actions **sfa) |
| 613 | { | 613 | { |
| 614 | struct ovs_key_ipv4_tunnel tun_key; | 614 | struct sw_flow_match match; |
| 615 | struct sw_flow_key key; | ||
| 615 | int err, start; | 616 | int err, start; |
| 616 | 617 | ||
| 617 | err = ovs_ipv4_tun_from_nlattr(nla_data(attr), &tun_key); | 618 | ovs_match_init(&match, &key, NULL); |
| 619 | err = ovs_ipv4_tun_from_nlattr(nla_data(attr), &match, false); | ||
| 618 | if (err) | 620 | if (err) |
| 619 | return err; | 621 | return err; |
| 620 | 622 | ||
| @@ -622,7 +624,8 @@ static int validate_and_copy_set_tun(const struct nlattr *attr, | |||
| 622 | if (start < 0) | 624 | if (start < 0) |
| 623 | return start; | 625 | return start; |
| 624 | 626 | ||
| 625 | err = add_action(sfa, OVS_KEY_ATTR_IPV4_TUNNEL, &tun_key, sizeof(tun_key)); | 627 | err = add_action(sfa, OVS_KEY_ATTR_IPV4_TUNNEL, &match.key->tun_key, |
| 628 | sizeof(match.key->tun_key)); | ||
| 626 | add_nested_action_end(*sfa, start); | 629 | add_nested_action_end(*sfa, start); |
| 627 | 630 | ||
| 628 | return err; | 631 | return err; |
| @@ -709,6 +712,12 @@ static int validate_set(const struct nlattr *a, | |||
| 709 | 712 | ||
| 710 | return validate_tp_port(flow_key); | 713 | return validate_tp_port(flow_key); |
| 711 | 714 | ||
| 715 | case OVS_KEY_ATTR_SCTP: | ||
| 716 | if (flow_key->ip.proto != IPPROTO_SCTP) | ||
| 717 | return -EINVAL; | ||
| 718 | |||
| 719 | return validate_tp_port(flow_key); | ||
| 720 | |||
| 712 | default: | 721 | default: |
| 713 | return -EINVAL; | 722 | return -EINVAL; |
| 714 | } | 723 | } |
| @@ -857,7 +866,6 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) | |||
| 857 | struct ethhdr *eth; | 866 | struct ethhdr *eth; |
| 858 | int len; | 867 | int len; |
| 859 | int err; | 868 | int err; |
| 860 | int key_len; | ||
| 861 | 869 | ||
| 862 | err = -EINVAL; | 870 | err = -EINVAL; |
| 863 | if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] || | 871 | if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] || |
| @@ -890,11 +898,11 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) | |||
| 890 | if (IS_ERR(flow)) | 898 | if (IS_ERR(flow)) |
| 891 | goto err_kfree_skb; | 899 | goto err_kfree_skb; |
| 892 | 900 | ||
| 893 | err = ovs_flow_extract(packet, -1, &flow->key, &key_len); | 901 | err = ovs_flow_extract(packet, -1, &flow->key); |
| 894 | if (err) | 902 | if (err) |
| 895 | goto err_flow_free; | 903 | goto err_flow_free; |
| 896 | 904 | ||
| 897 | err = ovs_flow_metadata_from_nlattrs(flow, key_len, a[OVS_PACKET_ATTR_KEY]); | 905 | err = ovs_flow_metadata_from_nlattrs(flow, a[OVS_PACKET_ATTR_KEY]); |
| 898 | if (err) | 906 | if (err) |
| 899 | goto err_flow_free; | 907 | goto err_flow_free; |
| 900 | acts = ovs_flow_actions_alloc(nla_len(a[OVS_PACKET_ATTR_ACTIONS])); | 908 | acts = ovs_flow_actions_alloc(nla_len(a[OVS_PACKET_ATTR_ACTIONS])); |
| @@ -908,6 +916,7 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) | |||
| 908 | goto err_flow_free; | 916 | goto err_flow_free; |
| 909 | 917 | ||
| 910 | OVS_CB(packet)->flow = flow; | 918 | OVS_CB(packet)->flow = flow; |
| 919 | OVS_CB(packet)->pkt_key = &flow->key; | ||
| 911 | packet->priority = flow->key.phy.priority; | 920 | packet->priority = flow->key.phy.priority; |
| 912 | packet->mark = flow->key.phy.skb_mark; | 921 | packet->mark = flow->key.phy.skb_mark; |
| 913 | 922 | ||
| @@ -922,13 +931,13 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) | |||
| 922 | local_bh_enable(); | 931 | local_bh_enable(); |
| 923 | rcu_read_unlock(); | 932 | rcu_read_unlock(); |
| 924 | 933 | ||
| 925 | ovs_flow_free(flow); | 934 | ovs_flow_free(flow, false); |
| 926 | return err; | 935 | return err; |
| 927 | 936 | ||
| 928 | err_unlock: | 937 | err_unlock: |
| 929 | rcu_read_unlock(); | 938 | rcu_read_unlock(); |
| 930 | err_flow_free: | 939 | err_flow_free: |
| 931 | ovs_flow_free(flow); | 940 | ovs_flow_free(flow, false); |
| 932 | err_kfree_skb: | 941 | err_kfree_skb: |
| 933 | kfree_skb(packet); | 942 | kfree_skb(packet); |
| 934 | err: | 943 | err: |
| @@ -951,9 +960,10 @@ static struct genl_ops dp_packet_genl_ops[] = { | |||
| 951 | 960 | ||
| 952 | static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats) | 961 | static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats) |
| 953 | { | 962 | { |
| 963 | struct flow_table *table; | ||
| 954 | int i; | 964 | int i; |
| 955 | struct flow_table *table = ovsl_dereference(dp->table); | ||
| 956 | 965 | ||
| 966 | table = rcu_dereference_check(dp->table, lockdep_ovsl_is_held()); | ||
| 957 | stats->n_flows = ovs_flow_tbl_count(table); | 967 | stats->n_flows = ovs_flow_tbl_count(table); |
| 958 | 968 | ||
| 959 | stats->n_hit = stats->n_missed = stats->n_lost = 0; | 969 | stats->n_hit = stats->n_missed = stats->n_lost = 0; |
| @@ -1044,7 +1054,8 @@ static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb) | |||
| 1044 | if (!start) | 1054 | if (!start) |
| 1045 | return -EMSGSIZE; | 1055 | return -EMSGSIZE; |
| 1046 | 1056 | ||
| 1047 | err = ovs_ipv4_tun_to_nlattr(skb, nla_data(ovs_key)); | 1057 | err = ovs_ipv4_tun_to_nlattr(skb, nla_data(ovs_key), |
| 1058 | nla_data(ovs_key)); | ||
| 1048 | if (err) | 1059 | if (err) |
| 1049 | return err; | 1060 | return err; |
| 1050 | nla_nest_end(skb, start); | 1061 | nla_nest_end(skb, start); |
| @@ -1092,6 +1103,7 @@ static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts) | |||
| 1092 | { | 1103 | { |
| 1093 | return NLMSG_ALIGN(sizeof(struct ovs_header)) | 1104 | return NLMSG_ALIGN(sizeof(struct ovs_header)) |
| 1094 | + nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_KEY */ | 1105 | + nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_KEY */ |
| 1106 | + nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_MASK */ | ||
| 1095 | + nla_total_size(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */ | 1107 | + nla_total_size(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */ |
| 1096 | + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */ | 1108 | + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */ |
| 1097 | + nla_total_size(8) /* OVS_FLOW_ATTR_USED */ | 1109 | + nla_total_size(8) /* OVS_FLOW_ATTR_USED */ |
| @@ -1104,7 +1116,6 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp, | |||
| 1104 | u32 seq, u32 flags, u8 cmd) | 1116 | u32 seq, u32 flags, u8 cmd) |
| 1105 | { | 1117 | { |
| 1106 | const int skb_orig_len = skb->len; | 1118 | const int skb_orig_len = skb->len; |
| 1107 | const struct sw_flow_actions *sf_acts; | ||
| 1108 | struct nlattr *start; | 1119 | struct nlattr *start; |
| 1109 | struct ovs_flow_stats stats; | 1120 | struct ovs_flow_stats stats; |
| 1110 | struct ovs_header *ovs_header; | 1121 | struct ovs_header *ovs_header; |
| @@ -1113,20 +1124,31 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp, | |||
| 1113 | u8 tcp_flags; | 1124 | u8 tcp_flags; |
| 1114 | int err; | 1125 | int err; |
| 1115 | 1126 | ||
| 1116 | sf_acts = ovsl_dereference(flow->sf_acts); | ||
| 1117 | |||
| 1118 | ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family, flags, cmd); | 1127 | ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family, flags, cmd); |
| 1119 | if (!ovs_header) | 1128 | if (!ovs_header) |
| 1120 | return -EMSGSIZE; | 1129 | return -EMSGSIZE; |
| 1121 | 1130 | ||
| 1122 | ovs_header->dp_ifindex = get_dpifindex(dp); | 1131 | ovs_header->dp_ifindex = get_dpifindex(dp); |
| 1123 | 1132 | ||
| 1133 | /* Fill flow key. */ | ||
| 1124 | nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY); | 1134 | nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY); |
| 1125 | if (!nla) | 1135 | if (!nla) |
| 1126 | goto nla_put_failure; | 1136 | goto nla_put_failure; |
| 1127 | err = ovs_flow_to_nlattrs(&flow->key, skb); | 1137 | |
| 1138 | err = ovs_flow_to_nlattrs(&flow->unmasked_key, | ||
| 1139 | &flow->unmasked_key, skb); | ||
| 1140 | if (err) | ||
| 1141 | goto error; | ||
| 1142 | nla_nest_end(skb, nla); | ||
| 1143 | |||
| 1144 | nla = nla_nest_start(skb, OVS_FLOW_ATTR_MASK); | ||
| 1145 | if (!nla) | ||
| 1146 | goto nla_put_failure; | ||
| 1147 | |||
| 1148 | err = ovs_flow_to_nlattrs(&flow->key, &flow->mask->key, skb); | ||
| 1128 | if (err) | 1149 | if (err) |
| 1129 | goto error; | 1150 | goto error; |
| 1151 | |||
| 1130 | nla_nest_end(skb, nla); | 1152 | nla_nest_end(skb, nla); |
| 1131 | 1153 | ||
| 1132 | spin_lock_bh(&flow->lock); | 1154 | spin_lock_bh(&flow->lock); |
| @@ -1161,6 +1183,11 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp, | |||
| 1161 | */ | 1183 | */ |
| 1162 | start = nla_nest_start(skb, OVS_FLOW_ATTR_ACTIONS); | 1184 | start = nla_nest_start(skb, OVS_FLOW_ATTR_ACTIONS); |
| 1163 | if (start) { | 1185 | if (start) { |
| 1186 | const struct sw_flow_actions *sf_acts; | ||
| 1187 | |||
| 1188 | sf_acts = rcu_dereference_check(flow->sf_acts, | ||
| 1189 | lockdep_ovsl_is_held()); | ||
| 1190 | |||
| 1164 | err = actions_to_attr(sf_acts->actions, sf_acts->actions_len, skb); | 1191 | err = actions_to_attr(sf_acts->actions, sf_acts->actions_len, skb); |
| 1165 | if (!err) | 1192 | if (!err) |
| 1166 | nla_nest_end(skb, start); | 1193 | nla_nest_end(skb, start); |
| @@ -1211,20 +1238,24 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) | |||
| 1211 | { | 1238 | { |
| 1212 | struct nlattr **a = info->attrs; | 1239 | struct nlattr **a = info->attrs; |
| 1213 | struct ovs_header *ovs_header = info->userhdr; | 1240 | struct ovs_header *ovs_header = info->userhdr; |
| 1214 | struct sw_flow_key key; | 1241 | struct sw_flow_key key, masked_key; |
| 1215 | struct sw_flow *flow; | 1242 | struct sw_flow *flow = NULL; |
| 1243 | struct sw_flow_mask mask; | ||
| 1216 | struct sk_buff *reply; | 1244 | struct sk_buff *reply; |
| 1217 | struct datapath *dp; | 1245 | struct datapath *dp; |
| 1218 | struct flow_table *table; | 1246 | struct flow_table *table; |
| 1219 | struct sw_flow_actions *acts = NULL; | 1247 | struct sw_flow_actions *acts = NULL; |
| 1248 | struct sw_flow_match match; | ||
| 1220 | int error; | 1249 | int error; |
| 1221 | int key_len; | ||
| 1222 | 1250 | ||
| 1223 | /* Extract key. */ | 1251 | /* Extract key. */ |
| 1224 | error = -EINVAL; | 1252 | error = -EINVAL; |
| 1225 | if (!a[OVS_FLOW_ATTR_KEY]) | 1253 | if (!a[OVS_FLOW_ATTR_KEY]) |
| 1226 | goto error; | 1254 | goto error; |
| 1227 | error = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]); | 1255 | |
| 1256 | ovs_match_init(&match, &key, &mask); | ||
| 1257 | error = ovs_match_from_nlattrs(&match, | ||
| 1258 | a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]); | ||
| 1228 | if (error) | 1259 | if (error) |
| 1229 | goto error; | 1260 | goto error; |
| 1230 | 1261 | ||
| @@ -1235,9 +1266,13 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) | |||
| 1235 | if (IS_ERR(acts)) | 1266 | if (IS_ERR(acts)) |
| 1236 | goto error; | 1267 | goto error; |
| 1237 | 1268 | ||
| 1238 | error = validate_and_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], &key, 0, &acts); | 1269 | ovs_flow_key_mask(&masked_key, &key, &mask); |
| 1239 | if (error) | 1270 | error = validate_and_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], |
| 1271 | &masked_key, 0, &acts); | ||
| 1272 | if (error) { | ||
| 1273 | OVS_NLERR("Flow actions may not be safe on all matching packets.\n"); | ||
| 1240 | goto err_kfree; | 1274 | goto err_kfree; |
| 1275 | } | ||
| 1241 | } else if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW) { | 1276 | } else if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW) { |
| 1242 | error = -EINVAL; | 1277 | error = -EINVAL; |
| 1243 | goto error; | 1278 | goto error; |
| @@ -1250,8 +1285,11 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) | |||
| 1250 | goto err_unlock_ovs; | 1285 | goto err_unlock_ovs; |
| 1251 | 1286 | ||
| 1252 | table = ovsl_dereference(dp->table); | 1287 | table = ovsl_dereference(dp->table); |
| 1253 | flow = ovs_flow_tbl_lookup(table, &key, key_len); | 1288 | |
| 1289 | /* Check if this is a duplicate flow */ | ||
| 1290 | flow = ovs_flow_lookup(table, &key); | ||
| 1254 | if (!flow) { | 1291 | if (!flow) { |
| 1292 | struct sw_flow_mask *mask_p; | ||
| 1255 | /* Bail out if we're not allowed to create a new flow. */ | 1293 | /* Bail out if we're not allowed to create a new flow. */ |
| 1256 | error = -ENOENT; | 1294 | error = -ENOENT; |
| 1257 | if (info->genlhdr->cmd == OVS_FLOW_CMD_SET) | 1295 | if (info->genlhdr->cmd == OVS_FLOW_CMD_SET) |
| @@ -1264,7 +1302,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) | |||
| 1264 | new_table = ovs_flow_tbl_expand(table); | 1302 | new_table = ovs_flow_tbl_expand(table); |
| 1265 | if (!IS_ERR(new_table)) { | 1303 | if (!IS_ERR(new_table)) { |
| 1266 | rcu_assign_pointer(dp->table, new_table); | 1304 | rcu_assign_pointer(dp->table, new_table); |
| 1267 | ovs_flow_tbl_deferred_destroy(table); | 1305 | ovs_flow_tbl_destroy(table, true); |
| 1268 | table = ovsl_dereference(dp->table); | 1306 | table = ovsl_dereference(dp->table); |
| 1269 | } | 1307 | } |
| 1270 | } | 1308 | } |
| @@ -1277,14 +1315,30 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) | |||
| 1277 | } | 1315 | } |
| 1278 | clear_stats(flow); | 1316 | clear_stats(flow); |
| 1279 | 1317 | ||
| 1318 | flow->key = masked_key; | ||
| 1319 | flow->unmasked_key = key; | ||
| 1320 | |||
| 1321 | /* Make sure mask is unique in the system */ | ||
| 1322 | mask_p = ovs_sw_flow_mask_find(table, &mask); | ||
| 1323 | if (!mask_p) { | ||
| 1324 | /* Allocate a new mask if none exsits. */ | ||
| 1325 | mask_p = ovs_sw_flow_mask_alloc(); | ||
| 1326 | if (!mask_p) | ||
| 1327 | goto err_flow_free; | ||
| 1328 | mask_p->key = mask.key; | ||
| 1329 | mask_p->range = mask.range; | ||
| 1330 | ovs_sw_flow_mask_insert(table, mask_p); | ||
| 1331 | } | ||
| 1332 | |||
| 1333 | ovs_sw_flow_mask_add_ref(mask_p); | ||
| 1334 | flow->mask = mask_p; | ||
| 1280 | rcu_assign_pointer(flow->sf_acts, acts); | 1335 | rcu_assign_pointer(flow->sf_acts, acts); |
| 1281 | 1336 | ||
| 1282 | /* Put flow in bucket. */ | 1337 | /* Put flow in bucket. */ |
| 1283 | ovs_flow_tbl_insert(table, flow, &key, key_len); | 1338 | ovs_flow_insert(table, flow); |
| 1284 | 1339 | ||
| 1285 | reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid, | 1340 | reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid, |
| 1286 | info->snd_seq, | 1341 | info->snd_seq, OVS_FLOW_CMD_NEW); |
| 1287 | OVS_FLOW_CMD_NEW); | ||
| 1288 | } else { | 1342 | } else { |
| 1289 | /* We found a matching flow. */ | 1343 | /* We found a matching flow. */ |
| 1290 | struct sw_flow_actions *old_acts; | 1344 | struct sw_flow_actions *old_acts; |
| @@ -1300,6 +1354,13 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) | |||
| 1300 | info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL)) | 1354 | info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL)) |
| 1301 | goto err_unlock_ovs; | 1355 | goto err_unlock_ovs; |
| 1302 | 1356 | ||
| 1357 | /* The unmasked key has to be the same for flow updates. */ | ||
| 1358 | error = -EINVAL; | ||
| 1359 | if (!ovs_flow_cmp_unmasked_key(flow, &key, match.range.end)) { | ||
| 1360 | OVS_NLERR("Flow modification message rejected, unmasked key does not match.\n"); | ||
| 1361 | goto err_unlock_ovs; | ||
| 1362 | } | ||
| 1363 | |||
| 1303 | /* Update actions. */ | 1364 | /* Update actions. */ |
| 1304 | old_acts = ovsl_dereference(flow->sf_acts); | 1365 | old_acts = ovsl_dereference(flow->sf_acts); |
| 1305 | rcu_assign_pointer(flow->sf_acts, acts); | 1366 | rcu_assign_pointer(flow->sf_acts, acts); |
| @@ -1324,6 +1385,8 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) | |||
| 1324 | ovs_dp_flow_multicast_group.id, PTR_ERR(reply)); | 1385 | ovs_dp_flow_multicast_group.id, PTR_ERR(reply)); |
| 1325 | return 0; | 1386 | return 0; |
| 1326 | 1387 | ||
| 1388 | err_flow_free: | ||
| 1389 | ovs_flow_free(flow, false); | ||
| 1327 | err_unlock_ovs: | 1390 | err_unlock_ovs: |
| 1328 | ovs_unlock(); | 1391 | ovs_unlock(); |
| 1329 | err_kfree: | 1392 | err_kfree: |
| @@ -1341,12 +1404,16 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info) | |||
| 1341 | struct sw_flow *flow; | 1404 | struct sw_flow *flow; |
| 1342 | struct datapath *dp; | 1405 | struct datapath *dp; |
| 1343 | struct flow_table *table; | 1406 | struct flow_table *table; |
| 1407 | struct sw_flow_match match; | ||
| 1344 | int err; | 1408 | int err; |
| 1345 | int key_len; | ||
| 1346 | 1409 | ||
| 1347 | if (!a[OVS_FLOW_ATTR_KEY]) | 1410 | if (!a[OVS_FLOW_ATTR_KEY]) { |
| 1411 | OVS_NLERR("Flow get message rejected, Key attribute missing.\n"); | ||
| 1348 | return -EINVAL; | 1412 | return -EINVAL; |
| 1349 | err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]); | 1413 | } |
| 1414 | |||
| 1415 | ovs_match_init(&match, &key, NULL); | ||
| 1416 | err = ovs_match_from_nlattrs(&match, a[OVS_FLOW_ATTR_KEY], NULL); | ||
| 1350 | if (err) | 1417 | if (err) |
| 1351 | return err; | 1418 | return err; |
| 1352 | 1419 | ||
| @@ -1358,7 +1425,7 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info) | |||
| 1358 | } | 1425 | } |
| 1359 | 1426 | ||
| 1360 | table = ovsl_dereference(dp->table); | 1427 | table = ovsl_dereference(dp->table); |
| 1361 | flow = ovs_flow_tbl_lookup(table, &key, key_len); | 1428 | flow = ovs_flow_lookup_unmasked_key(table, &match); |
| 1362 | if (!flow) { | 1429 | if (!flow) { |
| 1363 | err = -ENOENT; | 1430 | err = -ENOENT; |
| 1364 | goto unlock; | 1431 | goto unlock; |
| @@ -1387,8 +1454,8 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info) | |||
| 1387 | struct sw_flow *flow; | 1454 | struct sw_flow *flow; |
| 1388 | struct datapath *dp; | 1455 | struct datapath *dp; |
| 1389 | struct flow_table *table; | 1456 | struct flow_table *table; |
| 1457 | struct sw_flow_match match; | ||
| 1390 | int err; | 1458 | int err; |
| 1391 | int key_len; | ||
| 1392 | 1459 | ||
| 1393 | ovs_lock(); | 1460 | ovs_lock(); |
| 1394 | dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); | 1461 | dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); |
| @@ -1401,12 +1468,14 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info) | |||
| 1401 | err = flush_flows(dp); | 1468 | err = flush_flows(dp); |
| 1402 | goto unlock; | 1469 | goto unlock; |
| 1403 | } | 1470 | } |
| 1404 | err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]); | 1471 | |
| 1472 | ovs_match_init(&match, &key, NULL); | ||
| 1473 | err = ovs_match_from_nlattrs(&match, a[OVS_FLOW_ATTR_KEY], NULL); | ||
| 1405 | if (err) | 1474 | if (err) |
| 1406 | goto unlock; | 1475 | goto unlock; |
| 1407 | 1476 | ||
| 1408 | table = ovsl_dereference(dp->table); | 1477 | table = ovsl_dereference(dp->table); |
| 1409 | flow = ovs_flow_tbl_lookup(table, &key, key_len); | 1478 | flow = ovs_flow_lookup_unmasked_key(table, &match); |
| 1410 | if (!flow) { | 1479 | if (!flow) { |
| 1411 | err = -ENOENT; | 1480 | err = -ENOENT; |
| 1412 | goto unlock; | 1481 | goto unlock; |
| @@ -1418,13 +1487,13 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info) | |||
| 1418 | goto unlock; | 1487 | goto unlock; |
| 1419 | } | 1488 | } |
| 1420 | 1489 | ||
| 1421 | ovs_flow_tbl_remove(table, flow); | 1490 | ovs_flow_remove(table, flow); |
| 1422 | 1491 | ||
| 1423 | err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_portid, | 1492 | err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_portid, |
| 1424 | info->snd_seq, 0, OVS_FLOW_CMD_DEL); | 1493 | info->snd_seq, 0, OVS_FLOW_CMD_DEL); |
| 1425 | BUG_ON(err < 0); | 1494 | BUG_ON(err < 0); |
| 1426 | 1495 | ||
| 1427 | ovs_flow_deferred_free(flow); | 1496 | ovs_flow_free(flow, true); |
| 1428 | ovs_unlock(); | 1497 | ovs_unlock(); |
| 1429 | 1498 | ||
| 1430 | ovs_notify(reply, info, &ovs_dp_flow_multicast_group); | 1499 | ovs_notify(reply, info, &ovs_dp_flow_multicast_group); |
| @@ -1440,22 +1509,21 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 1440 | struct datapath *dp; | 1509 | struct datapath *dp; |
| 1441 | struct flow_table *table; | 1510 | struct flow_table *table; |
| 1442 | 1511 | ||
| 1443 | ovs_lock(); | 1512 | rcu_read_lock(); |
| 1444 | dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); | 1513 | dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); |
| 1445 | if (!dp) { | 1514 | if (!dp) { |
| 1446 | ovs_unlock(); | 1515 | rcu_read_unlock(); |
| 1447 | return -ENODEV; | 1516 | return -ENODEV; |
| 1448 | } | 1517 | } |
| 1449 | 1518 | ||
| 1450 | table = ovsl_dereference(dp->table); | 1519 | table = rcu_dereference(dp->table); |
| 1451 | |||
| 1452 | for (;;) { | 1520 | for (;;) { |
| 1453 | struct sw_flow *flow; | 1521 | struct sw_flow *flow; |
| 1454 | u32 bucket, obj; | 1522 | u32 bucket, obj; |
| 1455 | 1523 | ||
| 1456 | bucket = cb->args[0]; | 1524 | bucket = cb->args[0]; |
| 1457 | obj = cb->args[1]; | 1525 | obj = cb->args[1]; |
| 1458 | flow = ovs_flow_tbl_next(table, &bucket, &obj); | 1526 | flow = ovs_flow_dump_next(table, &bucket, &obj); |
| 1459 | if (!flow) | 1527 | if (!flow) |
| 1460 | break; | 1528 | break; |
| 1461 | 1529 | ||
| @@ -1468,7 +1536,7 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 1468 | cb->args[0] = bucket; | 1536 | cb->args[0] = bucket; |
| 1469 | cb->args[1] = obj; | 1537 | cb->args[1] = obj; |
| 1470 | } | 1538 | } |
| 1471 | ovs_unlock(); | 1539 | rcu_read_unlock(); |
| 1472 | return skb->len; | 1540 | return skb->len; |
| 1473 | } | 1541 | } |
| 1474 | 1542 | ||
| @@ -1664,7 +1732,7 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info) | |||
| 1664 | goto err_destroy_local_port; | 1732 | goto err_destroy_local_port; |
| 1665 | 1733 | ||
| 1666 | ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id); | 1734 | ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id); |
| 1667 | list_add_tail(&dp->list_node, &ovs_net->dps); | 1735 | list_add_tail_rcu(&dp->list_node, &ovs_net->dps); |
| 1668 | 1736 | ||
| 1669 | ovs_unlock(); | 1737 | ovs_unlock(); |
| 1670 | 1738 | ||
| @@ -1678,7 +1746,7 @@ err_destroy_ports_array: | |||
| 1678 | err_destroy_percpu: | 1746 | err_destroy_percpu: |
| 1679 | free_percpu(dp->stats_percpu); | 1747 | free_percpu(dp->stats_percpu); |
| 1680 | err_destroy_table: | 1748 | err_destroy_table: |
| 1681 | ovs_flow_tbl_destroy(ovsl_dereference(dp->table)); | 1749 | ovs_flow_tbl_destroy(ovsl_dereference(dp->table), false); |
| 1682 | err_free_dp: | 1750 | err_free_dp: |
| 1683 | release_net(ovs_dp_get_net(dp)); | 1751 | release_net(ovs_dp_get_net(dp)); |
| 1684 | kfree(dp); | 1752 | kfree(dp); |
| @@ -1702,7 +1770,7 @@ static void __dp_destroy(struct datapath *dp) | |||
| 1702 | ovs_dp_detach_port(vport); | 1770 | ovs_dp_detach_port(vport); |
| 1703 | } | 1771 | } |
| 1704 | 1772 | ||
| 1705 | list_del(&dp->list_node); | 1773 | list_del_rcu(&dp->list_node); |
| 1706 | 1774 | ||
| 1707 | /* OVSP_LOCAL is datapath internal port. We need to make sure that | 1775 | /* OVSP_LOCAL is datapath internal port. We need to make sure that |
| 1708 | * all port in datapath are destroyed first before freeing datapath. | 1776 | * all port in datapath are destroyed first before freeing datapath. |
| @@ -1807,8 +1875,8 @@ static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 1807 | int skip = cb->args[0]; | 1875 | int skip = cb->args[0]; |
| 1808 | int i = 0; | 1876 | int i = 0; |
| 1809 | 1877 | ||
| 1810 | ovs_lock(); | 1878 | rcu_read_lock(); |
| 1811 | list_for_each_entry(dp, &ovs_net->dps, list_node) { | 1879 | list_for_each_entry_rcu(dp, &ovs_net->dps, list_node) { |
| 1812 | if (i >= skip && | 1880 | if (i >= skip && |
| 1813 | ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid, | 1881 | ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid, |
| 1814 | cb->nlh->nlmsg_seq, NLM_F_MULTI, | 1882 | cb->nlh->nlmsg_seq, NLM_F_MULTI, |
| @@ -1816,7 +1884,7 @@ static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 1816 | break; | 1884 | break; |
| 1817 | i++; | 1885 | i++; |
| 1818 | } | 1886 | } |
| 1819 | ovs_unlock(); | 1887 | rcu_read_unlock(); |
| 1820 | 1888 | ||
| 1821 | cb->args[0] = i; | 1889 | cb->args[0] = i; |
| 1822 | 1890 | ||
| @@ -2285,7 +2353,7 @@ static void rehash_flow_table(struct work_struct *work) | |||
| 2285 | new_table = ovs_flow_tbl_rehash(old_table); | 2353 | new_table = ovs_flow_tbl_rehash(old_table); |
| 2286 | if (!IS_ERR(new_table)) { | 2354 | if (!IS_ERR(new_table)) { |
| 2287 | rcu_assign_pointer(dp->table, new_table); | 2355 | rcu_assign_pointer(dp->table, new_table); |
| 2288 | ovs_flow_tbl_deferred_destroy(old_table); | 2356 | ovs_flow_tbl_destroy(old_table, true); |
| 2289 | } | 2357 | } |
| 2290 | } | 2358 | } |
| 2291 | } | 2359 | } |
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h index a91486484916..4d109c176ef3 100644 --- a/net/openvswitch/datapath.h +++ b/net/openvswitch/datapath.h | |||
| @@ -88,11 +88,13 @@ struct datapath { | |||
| 88 | /** | 88 | /** |
| 89 | * struct ovs_skb_cb - OVS data in skb CB | 89 | * struct ovs_skb_cb - OVS data in skb CB |
| 90 | * @flow: The flow associated with this packet. May be %NULL if no flow. | 90 | * @flow: The flow associated with this packet. May be %NULL if no flow. |
| 91 | * @pkt_key: The flow information extracted from the packet. Must be nonnull. | ||
| 91 | * @tun_key: Key for the tunnel that encapsulated this packet. NULL if the | 92 | * @tun_key: Key for the tunnel that encapsulated this packet. NULL if the |
| 92 | * packet is not being tunneled. | 93 | * packet is not being tunneled. |
| 93 | */ | 94 | */ |
| 94 | struct ovs_skb_cb { | 95 | struct ovs_skb_cb { |
| 95 | struct sw_flow *flow; | 96 | struct sw_flow *flow; |
| 97 | struct sw_flow_key *pkt_key; | ||
| 96 | struct ovs_key_ipv4_tunnel *tun_key; | 98 | struct ovs_key_ipv4_tunnel *tun_key; |
| 97 | }; | 99 | }; |
| 98 | #define OVS_CB(skb) ((struct ovs_skb_cb *)(skb)->cb) | 100 | #define OVS_CB(skb) ((struct ovs_skb_cb *)(skb)->cb) |
| @@ -183,4 +185,8 @@ struct sk_buff *ovs_vport_cmd_build_info(struct vport *, u32 pid, u32 seq, | |||
| 183 | 185 | ||
| 184 | int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb); | 186 | int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb); |
| 185 | void ovs_dp_notify_wq(struct work_struct *work); | 187 | void ovs_dp_notify_wq(struct work_struct *work); |
| 188 | |||
| 189 | #define OVS_NLERR(fmt, ...) \ | ||
| 190 | pr_info_once("netlink: " fmt, ##__VA_ARGS__) | ||
| 191 | |||
| 186 | #endif /* datapath.h */ | 192 | #endif /* datapath.h */ |
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 1aa84dc58777..410db90db73d 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Copyright (c) 2007-2011 Nicira, Inc. | 2 | * Copyright (c) 2007-2013 Nicira, Inc. |
| 3 | * | 3 | * |
| 4 | * This program is free software; you can redistribute it and/or | 4 | * This program is free software; you can redistribute it and/or |
| 5 | * modify it under the terms of version 2 of the GNU General Public | 5 | * modify it under the terms of version 2 of the GNU General Public |
| @@ -34,6 +34,7 @@ | |||
| 34 | #include <linux/if_arp.h> | 34 | #include <linux/if_arp.h> |
| 35 | #include <linux/ip.h> | 35 | #include <linux/ip.h> |
| 36 | #include <linux/ipv6.h> | 36 | #include <linux/ipv6.h> |
| 37 | #include <linux/sctp.h> | ||
| 37 | #include <linux/tcp.h> | 38 | #include <linux/tcp.h> |
| 38 | #include <linux/udp.h> | 39 | #include <linux/udp.h> |
| 39 | #include <linux/icmp.h> | 40 | #include <linux/icmp.h> |
| @@ -46,6 +47,202 @@ | |||
| 46 | 47 | ||
| 47 | static struct kmem_cache *flow_cache; | 48 | static struct kmem_cache *flow_cache; |
| 48 | 49 | ||
| 50 | static void ovs_sw_flow_mask_set(struct sw_flow_mask *mask, | ||
| 51 | struct sw_flow_key_range *range, u8 val); | ||
| 52 | |||
| 53 | static void update_range__(struct sw_flow_match *match, | ||
| 54 | size_t offset, size_t size, bool is_mask) | ||
| 55 | { | ||
| 56 | struct sw_flow_key_range *range = NULL; | ||
| 57 | size_t start = rounddown(offset, sizeof(long)); | ||
| 58 | size_t end = roundup(offset + size, sizeof(long)); | ||
| 59 | |||
| 60 | if (!is_mask) | ||
| 61 | range = &match->range; | ||
| 62 | else if (match->mask) | ||
| 63 | range = &match->mask->range; | ||
| 64 | |||
| 65 | if (!range) | ||
| 66 | return; | ||
| 67 | |||
| 68 | if (range->start == range->end) { | ||
| 69 | range->start = start; | ||
| 70 | range->end = end; | ||
| 71 | return; | ||
| 72 | } | ||
| 73 | |||
| 74 | if (range->start > start) | ||
| 75 | range->start = start; | ||
| 76 | |||
| 77 | if (range->end < end) | ||
| 78 | range->end = end; | ||
| 79 | } | ||
| 80 | |||
| 81 | #define SW_FLOW_KEY_PUT(match, field, value, is_mask) \ | ||
| 82 | do { \ | ||
| 83 | update_range__(match, offsetof(struct sw_flow_key, field), \ | ||
| 84 | sizeof((match)->key->field), is_mask); \ | ||
| 85 | if (is_mask) { \ | ||
| 86 | if ((match)->mask) \ | ||
| 87 | (match)->mask->key.field = value; \ | ||
| 88 | } else { \ | ||
| 89 | (match)->key->field = value; \ | ||
| 90 | } \ | ||
| 91 | } while (0) | ||
| 92 | |||
| 93 | #define SW_FLOW_KEY_MEMCPY(match, field, value_p, len, is_mask) \ | ||
| 94 | do { \ | ||
| 95 | update_range__(match, offsetof(struct sw_flow_key, field), \ | ||
| 96 | len, is_mask); \ | ||
| 97 | if (is_mask) { \ | ||
| 98 | if ((match)->mask) \ | ||
| 99 | memcpy(&(match)->mask->key.field, value_p, len);\ | ||
| 100 | } else { \ | ||
| 101 | memcpy(&(match)->key->field, value_p, len); \ | ||
| 102 | } \ | ||
| 103 | } while (0) | ||
| 104 | |||
| 105 | static u16 range_n_bytes(const struct sw_flow_key_range *range) | ||
| 106 | { | ||
| 107 | return range->end - range->start; | ||
| 108 | } | ||
| 109 | |||
| 110 | void ovs_match_init(struct sw_flow_match *match, | ||
| 111 | struct sw_flow_key *key, | ||
| 112 | struct sw_flow_mask *mask) | ||
| 113 | { | ||
| 114 | memset(match, 0, sizeof(*match)); | ||
| 115 | match->key = key; | ||
| 116 | match->mask = mask; | ||
| 117 | |||
| 118 | memset(key, 0, sizeof(*key)); | ||
| 119 | |||
| 120 | if (mask) { | ||
| 121 | memset(&mask->key, 0, sizeof(mask->key)); | ||
| 122 | mask->range.start = mask->range.end = 0; | ||
| 123 | } | ||
| 124 | } | ||
| 125 | |||
| 126 | static bool ovs_match_validate(const struct sw_flow_match *match, | ||
| 127 | u64 key_attrs, u64 mask_attrs) | ||
| 128 | { | ||
| 129 | u64 key_expected = 1 << OVS_KEY_ATTR_ETHERNET; | ||
| 130 | u64 mask_allowed = key_attrs; /* At most allow all key attributes */ | ||
| 131 | |||
| 132 | /* The following mask attributes allowed only if they | ||
| 133 | * pass the validation tests. */ | ||
| 134 | mask_allowed &= ~((1 << OVS_KEY_ATTR_IPV4) | ||
| 135 | | (1 << OVS_KEY_ATTR_IPV6) | ||
| 136 | | (1 << OVS_KEY_ATTR_TCP) | ||
| 137 | | (1 << OVS_KEY_ATTR_UDP) | ||
| 138 | | (1 << OVS_KEY_ATTR_SCTP) | ||
| 139 | | (1 << OVS_KEY_ATTR_ICMP) | ||
| 140 | | (1 << OVS_KEY_ATTR_ICMPV6) | ||
| 141 | | (1 << OVS_KEY_ATTR_ARP) | ||
| 142 | | (1 << OVS_KEY_ATTR_ND)); | ||
| 143 | |||
| 144 | /* Always allowed mask fields. */ | ||
| 145 | mask_allowed |= ((1 << OVS_KEY_ATTR_TUNNEL) | ||
| 146 | | (1 << OVS_KEY_ATTR_IN_PORT) | ||
| 147 | | (1 << OVS_KEY_ATTR_ETHERTYPE)); | ||
| 148 | |||
| 149 | /* Check key attributes. */ | ||
| 150 | if (match->key->eth.type == htons(ETH_P_ARP) | ||
| 151 | || match->key->eth.type == htons(ETH_P_RARP)) { | ||
| 152 | key_expected |= 1 << OVS_KEY_ATTR_ARP; | ||
| 153 | if (match->mask && (match->mask->key.eth.type == htons(0xffff))) | ||
| 154 | mask_allowed |= 1 << OVS_KEY_ATTR_ARP; | ||
| 155 | } | ||
| 156 | |||
| 157 | if (match->key->eth.type == htons(ETH_P_IP)) { | ||
| 158 | key_expected |= 1 << OVS_KEY_ATTR_IPV4; | ||
| 159 | if (match->mask && (match->mask->key.eth.type == htons(0xffff))) | ||
| 160 | mask_allowed |= 1 << OVS_KEY_ATTR_IPV4; | ||
| 161 | |||
| 162 | if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) { | ||
| 163 | if (match->key->ip.proto == IPPROTO_UDP) { | ||
| 164 | key_expected |= 1 << OVS_KEY_ATTR_UDP; | ||
| 165 | if (match->mask && (match->mask->key.ip.proto == 0xff)) | ||
| 166 | mask_allowed |= 1 << OVS_KEY_ATTR_UDP; | ||
| 167 | } | ||
| 168 | |||
| 169 | if (match->key->ip.proto == IPPROTO_SCTP) { | ||
| 170 | key_expected |= 1 << OVS_KEY_ATTR_SCTP; | ||
| 171 | if (match->mask && (match->mask->key.ip.proto == 0xff)) | ||
| 172 | mask_allowed |= 1 << OVS_KEY_ATTR_SCTP; | ||
| 173 | } | ||
| 174 | |||
| 175 | if (match->key->ip.proto == IPPROTO_TCP) { | ||
| 176 | key_expected |= 1 << OVS_KEY_ATTR_TCP; | ||
| 177 | if (match->mask && (match->mask->key.ip.proto == 0xff)) | ||
| 178 | mask_allowed |= 1 << OVS_KEY_ATTR_TCP; | ||
| 179 | } | ||
| 180 | |||
| 181 | if (match->key->ip.proto == IPPROTO_ICMP) { | ||
| 182 | key_expected |= 1 << OVS_KEY_ATTR_ICMP; | ||
| 183 | if (match->mask && (match->mask->key.ip.proto == 0xff)) | ||
| 184 | mask_allowed |= 1 << OVS_KEY_ATTR_ICMP; | ||
| 185 | } | ||
| 186 | } | ||
| 187 | } | ||
| 188 | |||
| 189 | if (match->key->eth.type == htons(ETH_P_IPV6)) { | ||
| 190 | key_expected |= 1 << OVS_KEY_ATTR_IPV6; | ||
| 191 | if (match->mask && (match->mask->key.eth.type == htons(0xffff))) | ||
| 192 | mask_allowed |= 1 << OVS_KEY_ATTR_IPV6; | ||
| 193 | |||
| 194 | if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) { | ||
| 195 | if (match->key->ip.proto == IPPROTO_UDP) { | ||
| 196 | key_expected |= 1 << OVS_KEY_ATTR_UDP; | ||
| 197 | if (match->mask && (match->mask->key.ip.proto == 0xff)) | ||
| 198 | mask_allowed |= 1 << OVS_KEY_ATTR_UDP; | ||
| 199 | } | ||
| 200 | |||
| 201 | if (match->key->ip.proto == IPPROTO_SCTP) { | ||
| 202 | key_expected |= 1 << OVS_KEY_ATTR_SCTP; | ||
| 203 | if (match->mask && (match->mask->key.ip.proto == 0xff)) | ||
| 204 | mask_allowed |= 1 << OVS_KEY_ATTR_SCTP; | ||
| 205 | } | ||
| 206 | |||
| 207 | if (match->key->ip.proto == IPPROTO_TCP) { | ||
| 208 | key_expected |= 1 << OVS_KEY_ATTR_TCP; | ||
| 209 | if (match->mask && (match->mask->key.ip.proto == 0xff)) | ||
| 210 | mask_allowed |= 1 << OVS_KEY_ATTR_TCP; | ||
| 211 | } | ||
| 212 | |||
| 213 | if (match->key->ip.proto == IPPROTO_ICMPV6) { | ||
| 214 | key_expected |= 1 << OVS_KEY_ATTR_ICMPV6; | ||
| 215 | if (match->mask && (match->mask->key.ip.proto == 0xff)) | ||
| 216 | mask_allowed |= 1 << OVS_KEY_ATTR_ICMPV6; | ||
| 217 | |||
| 218 | if (match->key->ipv6.tp.src == | ||
| 219 | htons(NDISC_NEIGHBOUR_SOLICITATION) || | ||
| 220 | match->key->ipv6.tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) { | ||
| 221 | key_expected |= 1 << OVS_KEY_ATTR_ND; | ||
| 222 | if (match->mask && (match->mask->key.ipv6.tp.src == htons(0xffff))) | ||
| 223 | mask_allowed |= 1 << OVS_KEY_ATTR_ND; | ||
| 224 | } | ||
| 225 | } | ||
| 226 | } | ||
| 227 | } | ||
| 228 | |||
| 229 | if ((key_attrs & key_expected) != key_expected) { | ||
| 230 | /* Key attributes check failed. */ | ||
| 231 | OVS_NLERR("Missing expected key attributes (key_attrs=%llx, expected=%llx).\n", | ||
| 232 | key_attrs, key_expected); | ||
| 233 | return false; | ||
| 234 | } | ||
| 235 | |||
| 236 | if ((mask_attrs & mask_allowed) != mask_attrs) { | ||
| 237 | /* Mask attributes check failed. */ | ||
| 238 | OVS_NLERR("Contain more than allowed mask fields (mask_attrs=%llx, mask_allowed=%llx).\n", | ||
| 239 | mask_attrs, mask_allowed); | ||
| 240 | return false; | ||
| 241 | } | ||
| 242 | |||
| 243 | return true; | ||
| 244 | } | ||
| 245 | |||
| 49 | static int check_header(struct sk_buff *skb, int len) | 246 | static int check_header(struct sk_buff *skb, int len) |
| 50 | { | 247 | { |
| 51 | if (unlikely(skb->len < len)) | 248 | if (unlikely(skb->len < len)) |
| @@ -102,6 +299,12 @@ static bool udphdr_ok(struct sk_buff *skb) | |||
| 102 | sizeof(struct udphdr)); | 299 | sizeof(struct udphdr)); |
| 103 | } | 300 | } |
| 104 | 301 | ||
| 302 | static bool sctphdr_ok(struct sk_buff *skb) | ||
| 303 | { | ||
| 304 | return pskb_may_pull(skb, skb_transport_offset(skb) + | ||
| 305 | sizeof(struct sctphdr)); | ||
| 306 | } | ||
| 307 | |||
| 105 | static bool icmphdr_ok(struct sk_buff *skb) | 308 | static bool icmphdr_ok(struct sk_buff *skb) |
| 106 | { | 309 | { |
| 107 | return pskb_may_pull(skb, skb_transport_offset(skb) + | 310 | return pskb_may_pull(skb, skb_transport_offset(skb) + |
| @@ -121,12 +324,7 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies) | |||
| 121 | return cur_ms - idle_ms; | 324 | return cur_ms - idle_ms; |
| 122 | } | 325 | } |
| 123 | 326 | ||
| 124 | #define SW_FLOW_KEY_OFFSET(field) \ | 327 | static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key) |
| 125 | (offsetof(struct sw_flow_key, field) + \ | ||
| 126 | FIELD_SIZEOF(struct sw_flow_key, field)) | ||
| 127 | |||
| 128 | static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key, | ||
| 129 | int *key_lenp) | ||
| 130 | { | 328 | { |
| 131 | unsigned int nh_ofs = skb_network_offset(skb); | 329 | unsigned int nh_ofs = skb_network_offset(skb); |
| 132 | unsigned int nh_len; | 330 | unsigned int nh_len; |
| @@ -136,8 +334,6 @@ static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key, | |||
| 136 | __be16 frag_off; | 334 | __be16 frag_off; |
| 137 | int err; | 335 | int err; |
| 138 | 336 | ||
| 139 | *key_lenp = SW_FLOW_KEY_OFFSET(ipv6.label); | ||
| 140 | |||
| 141 | err = check_header(skb, nh_ofs + sizeof(*nh)); | 337 | err = check_header(skb, nh_ofs + sizeof(*nh)); |
| 142 | if (unlikely(err)) | 338 | if (unlikely(err)) |
| 143 | return err; | 339 | return err; |
| @@ -176,6 +372,22 @@ static bool icmp6hdr_ok(struct sk_buff *skb) | |||
| 176 | sizeof(struct icmp6hdr)); | 372 | sizeof(struct icmp6hdr)); |
| 177 | } | 373 | } |
| 178 | 374 | ||
| 375 | void ovs_flow_key_mask(struct sw_flow_key *dst, const struct sw_flow_key *src, | ||
| 376 | const struct sw_flow_mask *mask) | ||
| 377 | { | ||
| 378 | const long *m = (long *)((u8 *)&mask->key + mask->range.start); | ||
| 379 | const long *s = (long *)((u8 *)src + mask->range.start); | ||
| 380 | long *d = (long *)((u8 *)dst + mask->range.start); | ||
| 381 | int i; | ||
| 382 | |||
| 383 | /* The memory outside of the 'mask->range' are not set since | ||
| 384 | * further operations on 'dst' only uses contents within | ||
| 385 | * 'mask->range'. | ||
| 386 | */ | ||
| 387 | for (i = 0; i < range_n_bytes(&mask->range); i += sizeof(long)) | ||
| 388 | *d++ = *s++ & *m++; | ||
| 389 | } | ||
| 390 | |||
| 179 | #define TCP_FLAGS_OFFSET 13 | 391 | #define TCP_FLAGS_OFFSET 13 |
| 180 | #define TCP_FLAG_MASK 0x3f | 392 | #define TCP_FLAG_MASK 0x3f |
| 181 | 393 | ||
| @@ -224,6 +436,7 @@ struct sw_flow *ovs_flow_alloc(void) | |||
| 224 | 436 | ||
| 225 | spin_lock_init(&flow->lock); | 437 | spin_lock_init(&flow->lock); |
| 226 | flow->sf_acts = NULL; | 438 | flow->sf_acts = NULL; |
| 439 | flow->mask = NULL; | ||
| 227 | 440 | ||
| 228 | return flow; | 441 | return flow; |
| 229 | } | 442 | } |
| @@ -263,7 +476,7 @@ static void free_buckets(struct flex_array *buckets) | |||
| 263 | flex_array_free(buckets); | 476 | flex_array_free(buckets); |
| 264 | } | 477 | } |
| 265 | 478 | ||
| 266 | struct flow_table *ovs_flow_tbl_alloc(int new_size) | 479 | static struct flow_table *__flow_tbl_alloc(int new_size) |
| 267 | { | 480 | { |
| 268 | struct flow_table *table = kmalloc(sizeof(*table), GFP_KERNEL); | 481 | struct flow_table *table = kmalloc(sizeof(*table), GFP_KERNEL); |
| 269 | 482 | ||
| @@ -281,17 +494,15 @@ struct flow_table *ovs_flow_tbl_alloc(int new_size) | |||
| 281 | table->node_ver = 0; | 494 | table->node_ver = 0; |
| 282 | table->keep_flows = false; | 495 | table->keep_flows = false; |
| 283 | get_random_bytes(&table->hash_seed, sizeof(u32)); | 496 | get_random_bytes(&table->hash_seed, sizeof(u32)); |
| 497 | table->mask_list = NULL; | ||
| 284 | 498 | ||
| 285 | return table; | 499 | return table; |
| 286 | } | 500 | } |
| 287 | 501 | ||
| 288 | void ovs_flow_tbl_destroy(struct flow_table *table) | 502 | static void __flow_tbl_destroy(struct flow_table *table) |
| 289 | { | 503 | { |
| 290 | int i; | 504 | int i; |
| 291 | 505 | ||
| 292 | if (!table) | ||
| 293 | return; | ||
| 294 | |||
| 295 | if (table->keep_flows) | 506 | if (table->keep_flows) |
| 296 | goto skip_flows; | 507 | goto skip_flows; |
| 297 | 508 | ||
| @@ -302,32 +513,56 @@ void ovs_flow_tbl_destroy(struct flow_table *table) | |||
| 302 | int ver = table->node_ver; | 513 | int ver = table->node_ver; |
| 303 | 514 | ||
| 304 | hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) { | 515 | hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) { |
| 305 | hlist_del_rcu(&flow->hash_node[ver]); | 516 | hlist_del(&flow->hash_node[ver]); |
| 306 | ovs_flow_free(flow); | 517 | ovs_flow_free(flow, false); |
| 307 | } | 518 | } |
| 308 | } | 519 | } |
| 309 | 520 | ||
| 521 | BUG_ON(!list_empty(table->mask_list)); | ||
| 522 | kfree(table->mask_list); | ||
| 523 | |||
| 310 | skip_flows: | 524 | skip_flows: |
| 311 | free_buckets(table->buckets); | 525 | free_buckets(table->buckets); |
| 312 | kfree(table); | 526 | kfree(table); |
| 313 | } | 527 | } |
| 314 | 528 | ||
| 529 | struct flow_table *ovs_flow_tbl_alloc(int new_size) | ||
| 530 | { | ||
| 531 | struct flow_table *table = __flow_tbl_alloc(new_size); | ||
| 532 | |||
| 533 | if (!table) | ||
| 534 | return NULL; | ||
| 535 | |||
| 536 | table->mask_list = kmalloc(sizeof(struct list_head), GFP_KERNEL); | ||
| 537 | if (!table->mask_list) { | ||
| 538 | table->keep_flows = true; | ||
| 539 | __flow_tbl_destroy(table); | ||
| 540 | return NULL; | ||
| 541 | } | ||
| 542 | INIT_LIST_HEAD(table->mask_list); | ||
| 543 | |||
| 544 | return table; | ||
| 545 | } | ||
| 546 | |||
| 315 | static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu) | 547 | static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu) |
| 316 | { | 548 | { |
| 317 | struct flow_table *table = container_of(rcu, struct flow_table, rcu); | 549 | struct flow_table *table = container_of(rcu, struct flow_table, rcu); |
| 318 | 550 | ||
| 319 | ovs_flow_tbl_destroy(table); | 551 | __flow_tbl_destroy(table); |
| 320 | } | 552 | } |
| 321 | 553 | ||
| 322 | void ovs_flow_tbl_deferred_destroy(struct flow_table *table) | 554 | void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred) |
| 323 | { | 555 | { |
| 324 | if (!table) | 556 | if (!table) |
| 325 | return; | 557 | return; |
| 326 | 558 | ||
| 327 | call_rcu(&table->rcu, flow_tbl_destroy_rcu_cb); | 559 | if (deferred) |
| 560 | call_rcu(&table->rcu, flow_tbl_destroy_rcu_cb); | ||
| 561 | else | ||
| 562 | __flow_tbl_destroy(table); | ||
| 328 | } | 563 | } |
| 329 | 564 | ||
| 330 | struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *last) | 565 | struct sw_flow *ovs_flow_dump_next(struct flow_table *table, u32 *bucket, u32 *last) |
| 331 | { | 566 | { |
| 332 | struct sw_flow *flow; | 567 | struct sw_flow *flow; |
| 333 | struct hlist_head *head; | 568 | struct hlist_head *head; |
| @@ -353,11 +588,13 @@ struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *la | |||
| 353 | return NULL; | 588 | return NULL; |
| 354 | } | 589 | } |
| 355 | 590 | ||
| 356 | static void __flow_tbl_insert(struct flow_table *table, struct sw_flow *flow) | 591 | static void __tbl_insert(struct flow_table *table, struct sw_flow *flow) |
| 357 | { | 592 | { |
| 358 | struct hlist_head *head; | 593 | struct hlist_head *head; |
| 594 | |||
| 359 | head = find_bucket(table, flow->hash); | 595 | head = find_bucket(table, flow->hash); |
| 360 | hlist_add_head_rcu(&flow->hash_node[table->node_ver], head); | 596 | hlist_add_head_rcu(&flow->hash_node[table->node_ver], head); |
| 597 | |||
| 361 | table->count++; | 598 | table->count++; |
| 362 | } | 599 | } |
| 363 | 600 | ||
| @@ -377,8 +614,10 @@ static void flow_table_copy_flows(struct flow_table *old, struct flow_table *new | |||
| 377 | head = flex_array_get(old->buckets, i); | 614 | head = flex_array_get(old->buckets, i); |
| 378 | 615 | ||
| 379 | hlist_for_each_entry(flow, head, hash_node[old_ver]) | 616 | hlist_for_each_entry(flow, head, hash_node[old_ver]) |
| 380 | __flow_tbl_insert(new, flow); | 617 | __tbl_insert(new, flow); |
| 381 | } | 618 | } |
| 619 | |||
| 620 | new->mask_list = old->mask_list; | ||
| 382 | old->keep_flows = true; | 621 | old->keep_flows = true; |
| 383 | } | 622 | } |
| 384 | 623 | ||
| @@ -386,7 +625,7 @@ static struct flow_table *__flow_tbl_rehash(struct flow_table *table, int n_buck | |||
| 386 | { | 625 | { |
| 387 | struct flow_table *new_table; | 626 | struct flow_table *new_table; |
| 388 | 627 | ||
| 389 | new_table = ovs_flow_tbl_alloc(n_buckets); | 628 | new_table = __flow_tbl_alloc(n_buckets); |
| 390 | if (!new_table) | 629 | if (!new_table) |
| 391 | return ERR_PTR(-ENOMEM); | 630 | return ERR_PTR(-ENOMEM); |
| 392 | 631 | ||
| @@ -405,28 +644,30 @@ struct flow_table *ovs_flow_tbl_expand(struct flow_table *table) | |||
| 405 | return __flow_tbl_rehash(table, table->n_buckets * 2); | 644 | return __flow_tbl_rehash(table, table->n_buckets * 2); |
| 406 | } | 645 | } |
| 407 | 646 | ||
| 408 | void ovs_flow_free(struct sw_flow *flow) | 647 | static void __flow_free(struct sw_flow *flow) |
| 409 | { | 648 | { |
| 410 | if (unlikely(!flow)) | ||
| 411 | return; | ||
| 412 | |||
| 413 | kfree((struct sf_flow_acts __force *)flow->sf_acts); | 649 | kfree((struct sf_flow_acts __force *)flow->sf_acts); |
| 414 | kmem_cache_free(flow_cache, flow); | 650 | kmem_cache_free(flow_cache, flow); |
| 415 | } | 651 | } |
| 416 | 652 | ||
| 417 | /* RCU callback used by ovs_flow_deferred_free. */ | ||
| 418 | static void rcu_free_flow_callback(struct rcu_head *rcu) | 653 | static void rcu_free_flow_callback(struct rcu_head *rcu) |
| 419 | { | 654 | { |
| 420 | struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu); | 655 | struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu); |
| 421 | 656 | ||
| 422 | ovs_flow_free(flow); | 657 | __flow_free(flow); |
| 423 | } | 658 | } |
| 424 | 659 | ||
| 425 | /* Schedules 'flow' to be freed after the next RCU grace period. | 660 | void ovs_flow_free(struct sw_flow *flow, bool deferred) |
| 426 | * The caller must hold rcu_read_lock for this to be sensible. */ | ||
| 427 | void ovs_flow_deferred_free(struct sw_flow *flow) | ||
| 428 | { | 661 | { |
| 429 | call_rcu(&flow->rcu, rcu_free_flow_callback); | 662 | if (!flow) |
| 663 | return; | ||
| 664 | |||
| 665 | ovs_sw_flow_mask_del_ref(flow->mask, deferred); | ||
| 666 | |||
| 667 | if (deferred) | ||
| 668 | call_rcu(&flow->rcu, rcu_free_flow_callback); | ||
| 669 | else | ||
| 670 | __flow_free(flow); | ||
| 430 | } | 671 | } |
| 431 | 672 | ||
| 432 | /* Schedules 'sf_acts' to be freed after the next RCU grace period. | 673 | /* Schedules 'sf_acts' to be freed after the next RCU grace period. |
| @@ -497,18 +738,15 @@ static __be16 parse_ethertype(struct sk_buff *skb) | |||
| 497 | } | 738 | } |
| 498 | 739 | ||
| 499 | static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key, | 740 | static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key, |
| 500 | int *key_lenp, int nh_len) | 741 | int nh_len) |
| 501 | { | 742 | { |
| 502 | struct icmp6hdr *icmp = icmp6_hdr(skb); | 743 | struct icmp6hdr *icmp = icmp6_hdr(skb); |
| 503 | int error = 0; | ||
| 504 | int key_len; | ||
| 505 | 744 | ||
| 506 | /* The ICMPv6 type and code fields use the 16-bit transport port | 745 | /* The ICMPv6 type and code fields use the 16-bit transport port |
| 507 | * fields, so we need to store them in 16-bit network byte order. | 746 | * fields, so we need to store them in 16-bit network byte order. |
| 508 | */ | 747 | */ |
| 509 | key->ipv6.tp.src = htons(icmp->icmp6_type); | 748 | key->ipv6.tp.src = htons(icmp->icmp6_type); |
| 510 | key->ipv6.tp.dst = htons(icmp->icmp6_code); | 749 | key->ipv6.tp.dst = htons(icmp->icmp6_code); |
| 511 | key_len = SW_FLOW_KEY_OFFSET(ipv6.tp); | ||
| 512 | 750 | ||
| 513 | if (icmp->icmp6_code == 0 && | 751 | if (icmp->icmp6_code == 0 && |
| 514 | (icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION || | 752 | (icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION || |
| @@ -517,21 +755,17 @@ static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key, | |||
| 517 | struct nd_msg *nd; | 755 | struct nd_msg *nd; |
| 518 | int offset; | 756 | int offset; |
| 519 | 757 | ||
| 520 | key_len = SW_FLOW_KEY_OFFSET(ipv6.nd); | ||
| 521 | |||
| 522 | /* In order to process neighbor discovery options, we need the | 758 | /* In order to process neighbor discovery options, we need the |
| 523 | * entire packet. | 759 | * entire packet. |
| 524 | */ | 760 | */ |
| 525 | if (unlikely(icmp_len < sizeof(*nd))) | 761 | if (unlikely(icmp_len < sizeof(*nd))) |
| 526 | goto out; | 762 | return 0; |
| 527 | if (unlikely(skb_linearize(skb))) { | 763 | |
| 528 | error = -ENOMEM; | 764 | if (unlikely(skb_linearize(skb))) |
| 529 | goto out; | 765 | return -ENOMEM; |
| 530 | } | ||
| 531 | 766 | ||
| 532 | nd = (struct nd_msg *)skb_transport_header(skb); | 767 | nd = (struct nd_msg *)skb_transport_header(skb); |
| 533 | key->ipv6.nd.target = nd->target; | 768 | key->ipv6.nd.target = nd->target; |
| 534 | key_len = SW_FLOW_KEY_OFFSET(ipv6.nd); | ||
| 535 | 769 | ||
| 536 | icmp_len -= sizeof(*nd); | 770 | icmp_len -= sizeof(*nd); |
| 537 | offset = 0; | 771 | offset = 0; |
| @@ -541,7 +775,7 @@ static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key, | |||
| 541 | int opt_len = nd_opt->nd_opt_len * 8; | 775 | int opt_len = nd_opt->nd_opt_len * 8; |
| 542 | 776 | ||
| 543 | if (unlikely(!opt_len || opt_len > icmp_len)) | 777 | if (unlikely(!opt_len || opt_len > icmp_len)) |
| 544 | goto invalid; | 778 | return 0; |
| 545 | 779 | ||
| 546 | /* Store the link layer address if the appropriate | 780 | /* Store the link layer address if the appropriate |
| 547 | * option is provided. It is considered an error if | 781 | * option is provided. It is considered an error if |
| @@ -566,16 +800,14 @@ static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key, | |||
| 566 | } | 800 | } |
| 567 | } | 801 | } |
| 568 | 802 | ||
| 569 | goto out; | 803 | return 0; |
| 570 | 804 | ||
| 571 | invalid: | 805 | invalid: |
| 572 | memset(&key->ipv6.nd.target, 0, sizeof(key->ipv6.nd.target)); | 806 | memset(&key->ipv6.nd.target, 0, sizeof(key->ipv6.nd.target)); |
| 573 | memset(key->ipv6.nd.sll, 0, sizeof(key->ipv6.nd.sll)); | 807 | memset(key->ipv6.nd.sll, 0, sizeof(key->ipv6.nd.sll)); |
| 574 | memset(key->ipv6.nd.tll, 0, sizeof(key->ipv6.nd.tll)); | 808 | memset(key->ipv6.nd.tll, 0, sizeof(key->ipv6.nd.tll)); |
| 575 | 809 | ||
| 576 | out: | 810 | return 0; |
| 577 | *key_lenp = key_len; | ||
| 578 | return error; | ||
| 579 | } | 811 | } |
| 580 | 812 | ||
| 581 | /** | 813 | /** |
| @@ -584,7 +816,6 @@ out: | |||
| 584 | * Ethernet header | 816 | * Ethernet header |
| 585 | * @in_port: port number on which @skb was received. | 817 | * @in_port: port number on which @skb was received. |
| 586 | * @key: output flow key | 818 | * @key: output flow key |
| 587 | * @key_lenp: length of output flow key | ||
| 588 | * | 819 | * |
| 589 | * The caller must ensure that skb->len >= ETH_HLEN. | 820 | * The caller must ensure that skb->len >= ETH_HLEN. |
| 590 | * | 821 | * |
| @@ -602,11 +833,9 @@ out: | |||
| 602 | * of a correct length, otherwise the same as skb->network_header. | 833 | * of a correct length, otherwise the same as skb->network_header. |
| 603 | * For other key->eth.type values it is left untouched. | 834 | * For other key->eth.type values it is left untouched. |
| 604 | */ | 835 | */ |
| 605 | int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key, | 836 | int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key) |
| 606 | int *key_lenp) | ||
| 607 | { | 837 | { |
| 608 | int error = 0; | 838 | int error; |
| 609 | int key_len = SW_FLOW_KEY_OFFSET(eth); | ||
| 610 | struct ethhdr *eth; | 839 | struct ethhdr *eth; |
| 611 | 840 | ||
| 612 | memset(key, 0, sizeof(*key)); | 841 | memset(key, 0, sizeof(*key)); |
| @@ -649,15 +878,13 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key, | |||
| 649 | struct iphdr *nh; | 878 | struct iphdr *nh; |
| 650 | __be16 offset; | 879 | __be16 offset; |
| 651 | 880 | ||
| 652 | key_len = SW_FLOW_KEY_OFFSET(ipv4.addr); | ||
| 653 | |||
| 654 | error = check_iphdr(skb); | 881 | error = check_iphdr(skb); |
| 655 | if (unlikely(error)) { | 882 | if (unlikely(error)) { |
| 656 | if (error == -EINVAL) { | 883 | if (error == -EINVAL) { |
| 657 | skb->transport_header = skb->network_header; | 884 | skb->transport_header = skb->network_header; |
| 658 | error = 0; | 885 | error = 0; |
| 659 | } | 886 | } |
| 660 | goto out; | 887 | return error; |
| 661 | } | 888 | } |
| 662 | 889 | ||
| 663 | nh = ip_hdr(skb); | 890 | nh = ip_hdr(skb); |
| @@ -671,7 +898,7 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key, | |||
| 671 | offset = nh->frag_off & htons(IP_OFFSET); | 898 | offset = nh->frag_off & htons(IP_OFFSET); |
| 672 | if (offset) { | 899 | if (offset) { |
| 673 | key->ip.frag = OVS_FRAG_TYPE_LATER; | 900 | key->ip.frag = OVS_FRAG_TYPE_LATER; |
| 674 | goto out; | 901 | return 0; |
| 675 | } | 902 | } |
| 676 | if (nh->frag_off & htons(IP_MF) || | 903 | if (nh->frag_off & htons(IP_MF) || |
| 677 | skb_shinfo(skb)->gso_type & SKB_GSO_UDP) | 904 | skb_shinfo(skb)->gso_type & SKB_GSO_UDP) |
| @@ -679,21 +906,24 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key, | |||
| 679 | 906 | ||
| 680 | /* Transport layer. */ | 907 | /* Transport layer. */ |
| 681 | if (key->ip.proto == IPPROTO_TCP) { | 908 | if (key->ip.proto == IPPROTO_TCP) { |
| 682 | key_len = SW_FLOW_KEY_OFFSET(ipv4.tp); | ||
| 683 | if (tcphdr_ok(skb)) { | 909 | if (tcphdr_ok(skb)) { |
| 684 | struct tcphdr *tcp = tcp_hdr(skb); | 910 | struct tcphdr *tcp = tcp_hdr(skb); |
| 685 | key->ipv4.tp.src = tcp->source; | 911 | key->ipv4.tp.src = tcp->source; |
| 686 | key->ipv4.tp.dst = tcp->dest; | 912 | key->ipv4.tp.dst = tcp->dest; |
| 687 | } | 913 | } |
| 688 | } else if (key->ip.proto == IPPROTO_UDP) { | 914 | } else if (key->ip.proto == IPPROTO_UDP) { |
| 689 | key_len = SW_FLOW_KEY_OFFSET(ipv4.tp); | ||
| 690 | if (udphdr_ok(skb)) { | 915 | if (udphdr_ok(skb)) { |
| 691 | struct udphdr *udp = udp_hdr(skb); | 916 | struct udphdr *udp = udp_hdr(skb); |
| 692 | key->ipv4.tp.src = udp->source; | 917 | key->ipv4.tp.src = udp->source; |
| 693 | key->ipv4.tp.dst = udp->dest; | 918 | key->ipv4.tp.dst = udp->dest; |
| 694 | } | 919 | } |
| 920 | } else if (key->ip.proto == IPPROTO_SCTP) { | ||
| 921 | if (sctphdr_ok(skb)) { | ||
| 922 | struct sctphdr *sctp = sctp_hdr(skb); | ||
| 923 | key->ipv4.tp.src = sctp->source; | ||
| 924 | key->ipv4.tp.dst = sctp->dest; | ||
| 925 | } | ||
| 695 | } else if (key->ip.proto == IPPROTO_ICMP) { | 926 | } else if (key->ip.proto == IPPROTO_ICMP) { |
| 696 | key_len = SW_FLOW_KEY_OFFSET(ipv4.tp); | ||
| 697 | if (icmphdr_ok(skb)) { | 927 | if (icmphdr_ok(skb)) { |
| 698 | struct icmphdr *icmp = icmp_hdr(skb); | 928 | struct icmphdr *icmp = icmp_hdr(skb); |
| 699 | /* The ICMP type and code fields use the 16-bit | 929 | /* The ICMP type and code fields use the 16-bit |
| @@ -722,102 +952,175 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key, | |||
| 722 | memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst)); | 952 | memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst)); |
| 723 | memcpy(key->ipv4.arp.sha, arp->ar_sha, ETH_ALEN); | 953 | memcpy(key->ipv4.arp.sha, arp->ar_sha, ETH_ALEN); |
| 724 | memcpy(key->ipv4.arp.tha, arp->ar_tha, ETH_ALEN); | 954 | memcpy(key->ipv4.arp.tha, arp->ar_tha, ETH_ALEN); |
| 725 | key_len = SW_FLOW_KEY_OFFSET(ipv4.arp); | ||
| 726 | } | 955 | } |
| 727 | } else if (key->eth.type == htons(ETH_P_IPV6)) { | 956 | } else if (key->eth.type == htons(ETH_P_IPV6)) { |
| 728 | int nh_len; /* IPv6 Header + Extensions */ | 957 | int nh_len; /* IPv6 Header + Extensions */ |
| 729 | 958 | ||
| 730 | nh_len = parse_ipv6hdr(skb, key, &key_len); | 959 | nh_len = parse_ipv6hdr(skb, key); |
| 731 | if (unlikely(nh_len < 0)) { | 960 | if (unlikely(nh_len < 0)) { |
| 732 | if (nh_len == -EINVAL) | 961 | if (nh_len == -EINVAL) { |
| 733 | skb->transport_header = skb->network_header; | 962 | skb->transport_header = skb->network_header; |
| 734 | else | 963 | error = 0; |
| 964 | } else { | ||
| 735 | error = nh_len; | 965 | error = nh_len; |
| 736 | goto out; | 966 | } |
| 967 | return error; | ||
| 737 | } | 968 | } |
| 738 | 969 | ||
| 739 | if (key->ip.frag == OVS_FRAG_TYPE_LATER) | 970 | if (key->ip.frag == OVS_FRAG_TYPE_LATER) |
| 740 | goto out; | 971 | return 0; |
| 741 | if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) | 972 | if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) |
| 742 | key->ip.frag = OVS_FRAG_TYPE_FIRST; | 973 | key->ip.frag = OVS_FRAG_TYPE_FIRST; |
| 743 | 974 | ||
| 744 | /* Transport layer. */ | 975 | /* Transport layer. */ |
| 745 | if (key->ip.proto == NEXTHDR_TCP) { | 976 | if (key->ip.proto == NEXTHDR_TCP) { |
| 746 | key_len = SW_FLOW_KEY_OFFSET(ipv6.tp); | ||
| 747 | if (tcphdr_ok(skb)) { | 977 | if (tcphdr_ok(skb)) { |
| 748 | struct tcphdr *tcp = tcp_hdr(skb); | 978 | struct tcphdr *tcp = tcp_hdr(skb); |
| 749 | key->ipv6.tp.src = tcp->source; | 979 | key->ipv6.tp.src = tcp->source; |
| 750 | key->ipv6.tp.dst = tcp->dest; | 980 | key->ipv6.tp.dst = tcp->dest; |
| 751 | } | 981 | } |
| 752 | } else if (key->ip.proto == NEXTHDR_UDP) { | 982 | } else if (key->ip.proto == NEXTHDR_UDP) { |
| 753 | key_len = SW_FLOW_KEY_OFFSET(ipv6.tp); | ||
| 754 | if (udphdr_ok(skb)) { | 983 | if (udphdr_ok(skb)) { |
| 755 | struct udphdr *udp = udp_hdr(skb); | 984 | struct udphdr *udp = udp_hdr(skb); |
| 756 | key->ipv6.tp.src = udp->source; | 985 | key->ipv6.tp.src = udp->source; |
| 757 | key->ipv6.tp.dst = udp->dest; | 986 | key->ipv6.tp.dst = udp->dest; |
| 758 | } | 987 | } |
| 988 | } else if (key->ip.proto == NEXTHDR_SCTP) { | ||
| 989 | if (sctphdr_ok(skb)) { | ||
| 990 | struct sctphdr *sctp = sctp_hdr(skb); | ||
| 991 | key->ipv6.tp.src = sctp->source; | ||
| 992 | key->ipv6.tp.dst = sctp->dest; | ||
| 993 | } | ||
| 759 | } else if (key->ip.proto == NEXTHDR_ICMP) { | 994 | } else if (key->ip.proto == NEXTHDR_ICMP) { |
| 760 | key_len = SW_FLOW_KEY_OFFSET(ipv6.tp); | ||
| 761 | if (icmp6hdr_ok(skb)) { | 995 | if (icmp6hdr_ok(skb)) { |
| 762 | error = parse_icmpv6(skb, key, &key_len, nh_len); | 996 | error = parse_icmpv6(skb, key, nh_len); |
| 763 | if (error < 0) | 997 | if (error) |
| 764 | goto out; | 998 | return error; |
| 765 | } | 999 | } |
| 766 | } | 1000 | } |
| 767 | } | 1001 | } |
| 768 | 1002 | ||
| 769 | out: | 1003 | return 0; |
| 770 | *key_lenp = key_len; | ||
| 771 | return error; | ||
| 772 | } | 1004 | } |
| 773 | 1005 | ||
| 774 | static u32 ovs_flow_hash(const struct sw_flow_key *key, int key_start, int key_len) | 1006 | static u32 ovs_flow_hash(const struct sw_flow_key *key, int key_start, |
| 1007 | int key_end) | ||
| 775 | { | 1008 | { |
| 776 | return jhash2((u32 *)((u8 *)key + key_start), | 1009 | u32 *hash_key = (u32 *)((u8 *)key + key_start); |
| 777 | DIV_ROUND_UP(key_len - key_start, sizeof(u32)), 0); | 1010 | int hash_u32s = (key_end - key_start) >> 2; |
| 1011 | |||
| 1012 | /* Make sure number of hash bytes are multiple of u32. */ | ||
| 1013 | BUILD_BUG_ON(sizeof(long) % sizeof(u32)); | ||
| 1014 | |||
| 1015 | return jhash2(hash_key, hash_u32s, 0); | ||
| 778 | } | 1016 | } |
| 779 | 1017 | ||
| 780 | static int flow_key_start(struct sw_flow_key *key) | 1018 | static int flow_key_start(const struct sw_flow_key *key) |
| 781 | { | 1019 | { |
| 782 | if (key->tun_key.ipv4_dst) | 1020 | if (key->tun_key.ipv4_dst) |
| 783 | return 0; | 1021 | return 0; |
| 784 | else | 1022 | else |
| 785 | return offsetof(struct sw_flow_key, phy); | 1023 | return rounddown(offsetof(struct sw_flow_key, phy), |
| 1024 | sizeof(long)); | ||
| 1025 | } | ||
| 1026 | |||
| 1027 | static bool __cmp_key(const struct sw_flow_key *key1, | ||
| 1028 | const struct sw_flow_key *key2, int key_start, int key_end) | ||
| 1029 | { | ||
| 1030 | const long *cp1 = (long *)((u8 *)key1 + key_start); | ||
| 1031 | const long *cp2 = (long *)((u8 *)key2 + key_start); | ||
| 1032 | long diffs = 0; | ||
| 1033 | int i; | ||
| 1034 | |||
| 1035 | for (i = key_start; i < key_end; i += sizeof(long)) | ||
| 1036 | diffs |= *cp1++ ^ *cp2++; | ||
| 1037 | |||
| 1038 | return diffs == 0; | ||
| 786 | } | 1039 | } |
| 787 | 1040 | ||
| 788 | struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *table, | 1041 | static bool __flow_cmp_masked_key(const struct sw_flow *flow, |
| 789 | struct sw_flow_key *key, int key_len) | 1042 | const struct sw_flow_key *key, int key_start, int key_end) |
| 1043 | { | ||
| 1044 | return __cmp_key(&flow->key, key, key_start, key_end); | ||
| 1045 | } | ||
| 1046 | |||
| 1047 | static bool __flow_cmp_unmasked_key(const struct sw_flow *flow, | ||
| 1048 | const struct sw_flow_key *key, int key_start, int key_end) | ||
| 1049 | { | ||
| 1050 | return __cmp_key(&flow->unmasked_key, key, key_start, key_end); | ||
| 1051 | } | ||
| 1052 | |||
| 1053 | bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow, | ||
| 1054 | const struct sw_flow_key *key, int key_end) | ||
| 1055 | { | ||
| 1056 | int key_start; | ||
| 1057 | key_start = flow_key_start(key); | ||
| 1058 | |||
| 1059 | return __flow_cmp_unmasked_key(flow, key, key_start, key_end); | ||
| 1060 | |||
| 1061 | } | ||
| 1062 | |||
| 1063 | struct sw_flow *ovs_flow_lookup_unmasked_key(struct flow_table *table, | ||
| 1064 | struct sw_flow_match *match) | ||
| 1065 | { | ||
| 1066 | struct sw_flow_key *unmasked = match->key; | ||
| 1067 | int key_end = match->range.end; | ||
| 1068 | struct sw_flow *flow; | ||
| 1069 | |||
| 1070 | flow = ovs_flow_lookup(table, unmasked); | ||
| 1071 | if (flow && (!ovs_flow_cmp_unmasked_key(flow, unmasked, key_end))) | ||
| 1072 | flow = NULL; | ||
| 1073 | |||
| 1074 | return flow; | ||
| 1075 | } | ||
| 1076 | |||
| 1077 | static struct sw_flow *ovs_masked_flow_lookup(struct flow_table *table, | ||
| 1078 | const struct sw_flow_key *unmasked, | ||
| 1079 | struct sw_flow_mask *mask) | ||
| 790 | { | 1080 | { |
| 791 | struct sw_flow *flow; | 1081 | struct sw_flow *flow; |
| 792 | struct hlist_head *head; | 1082 | struct hlist_head *head; |
| 793 | u8 *_key; | 1083 | int key_start = mask->range.start; |
| 794 | int key_start; | 1084 | int key_end = mask->range.end; |
| 795 | u32 hash; | 1085 | u32 hash; |
| 1086 | struct sw_flow_key masked_key; | ||
| 796 | 1087 | ||
| 797 | key_start = flow_key_start(key); | 1088 | ovs_flow_key_mask(&masked_key, unmasked, mask); |
| 798 | hash = ovs_flow_hash(key, key_start, key_len); | 1089 | hash = ovs_flow_hash(&masked_key, key_start, key_end); |
| 799 | |||
| 800 | _key = (u8 *) key + key_start; | ||
| 801 | head = find_bucket(table, hash); | 1090 | head = find_bucket(table, hash); |
| 802 | hlist_for_each_entry_rcu(flow, head, hash_node[table->node_ver]) { | 1091 | hlist_for_each_entry_rcu(flow, head, hash_node[table->node_ver]) { |
| 803 | 1092 | if (flow->mask == mask && | |
| 804 | if (flow->hash == hash && | 1093 | __flow_cmp_masked_key(flow, &masked_key, |
| 805 | !memcmp((u8 *)&flow->key + key_start, _key, key_len - key_start)) { | 1094 | key_start, key_end)) |
| 806 | return flow; | 1095 | return flow; |
| 807 | } | ||
| 808 | } | 1096 | } |
| 809 | return NULL; | 1097 | return NULL; |
| 810 | } | 1098 | } |
| 811 | 1099 | ||
| 812 | void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow, | 1100 | struct sw_flow *ovs_flow_lookup(struct flow_table *tbl, |
| 813 | struct sw_flow_key *key, int key_len) | 1101 | const struct sw_flow_key *key) |
| 1102 | { | ||
| 1103 | struct sw_flow *flow = NULL; | ||
| 1104 | struct sw_flow_mask *mask; | ||
| 1105 | |||
| 1106 | list_for_each_entry_rcu(mask, tbl->mask_list, list) { | ||
| 1107 | flow = ovs_masked_flow_lookup(tbl, key, mask); | ||
| 1108 | if (flow) /* Found */ | ||
| 1109 | break; | ||
| 1110 | } | ||
| 1111 | |||
| 1112 | return flow; | ||
| 1113 | } | ||
| 1114 | |||
| 1115 | |||
| 1116 | void ovs_flow_insert(struct flow_table *table, struct sw_flow *flow) | ||
| 814 | { | 1117 | { |
| 815 | flow->hash = ovs_flow_hash(key, flow_key_start(key), key_len); | 1118 | flow->hash = ovs_flow_hash(&flow->key, flow->mask->range.start, |
| 816 | memcpy(&flow->key, key, sizeof(flow->key)); | 1119 | flow->mask->range.end); |
| 817 | __flow_tbl_insert(table, flow); | 1120 | __tbl_insert(table, flow); |
| 818 | } | 1121 | } |
| 819 | 1122 | ||
| 820 | void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow) | 1123 | void ovs_flow_remove(struct flow_table *table, struct sw_flow *flow) |
| 821 | { | 1124 | { |
| 822 | BUG_ON(table->count == 0); | 1125 | BUG_ON(table->count == 0); |
| 823 | hlist_del_rcu(&flow->hash_node[table->node_ver]); | 1126 | hlist_del_rcu(&flow->hash_node[table->node_ver]); |
| @@ -837,6 +1140,7 @@ const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = { | |||
| 837 | [OVS_KEY_ATTR_IPV6] = sizeof(struct ovs_key_ipv6), | 1140 | [OVS_KEY_ATTR_IPV6] = sizeof(struct ovs_key_ipv6), |
| 838 | [OVS_KEY_ATTR_TCP] = sizeof(struct ovs_key_tcp), | 1141 | [OVS_KEY_ATTR_TCP] = sizeof(struct ovs_key_tcp), |
| 839 | [OVS_KEY_ATTR_UDP] = sizeof(struct ovs_key_udp), | 1142 | [OVS_KEY_ATTR_UDP] = sizeof(struct ovs_key_udp), |
| 1143 | [OVS_KEY_ATTR_SCTP] = sizeof(struct ovs_key_sctp), | ||
| 840 | [OVS_KEY_ATTR_ICMP] = sizeof(struct ovs_key_icmp), | 1144 | [OVS_KEY_ATTR_ICMP] = sizeof(struct ovs_key_icmp), |
| 841 | [OVS_KEY_ATTR_ICMPV6] = sizeof(struct ovs_key_icmpv6), | 1145 | [OVS_KEY_ATTR_ICMPV6] = sizeof(struct ovs_key_icmpv6), |
| 842 | [OVS_KEY_ATTR_ARP] = sizeof(struct ovs_key_arp), | 1146 | [OVS_KEY_ATTR_ARP] = sizeof(struct ovs_key_arp), |
| @@ -844,149 +1148,85 @@ const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = { | |||
| 844 | [OVS_KEY_ATTR_TUNNEL] = -1, | 1148 | [OVS_KEY_ATTR_TUNNEL] = -1, |
| 845 | }; | 1149 | }; |
| 846 | 1150 | ||
| 847 | static int ipv4_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_len, | 1151 | static bool is_all_zero(const u8 *fp, size_t size) |
| 848 | const struct nlattr *a[], u32 *attrs) | ||
| 849 | { | 1152 | { |
| 850 | const struct ovs_key_icmp *icmp_key; | 1153 | int i; |
| 851 | const struct ovs_key_tcp *tcp_key; | ||
| 852 | const struct ovs_key_udp *udp_key; | ||
| 853 | |||
| 854 | switch (swkey->ip.proto) { | ||
| 855 | case IPPROTO_TCP: | ||
| 856 | if (!(*attrs & (1 << OVS_KEY_ATTR_TCP))) | ||
| 857 | return -EINVAL; | ||
| 858 | *attrs &= ~(1 << OVS_KEY_ATTR_TCP); | ||
| 859 | |||
| 860 | *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp); | ||
| 861 | tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]); | ||
| 862 | swkey->ipv4.tp.src = tcp_key->tcp_src; | ||
| 863 | swkey->ipv4.tp.dst = tcp_key->tcp_dst; | ||
| 864 | break; | ||
| 865 | |||
| 866 | case IPPROTO_UDP: | ||
| 867 | if (!(*attrs & (1 << OVS_KEY_ATTR_UDP))) | ||
| 868 | return -EINVAL; | ||
| 869 | *attrs &= ~(1 << OVS_KEY_ATTR_UDP); | ||
| 870 | |||
| 871 | *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp); | ||
| 872 | udp_key = nla_data(a[OVS_KEY_ATTR_UDP]); | ||
| 873 | swkey->ipv4.tp.src = udp_key->udp_src; | ||
| 874 | swkey->ipv4.tp.dst = udp_key->udp_dst; | ||
| 875 | break; | ||
| 876 | |||
| 877 | case IPPROTO_ICMP: | ||
| 878 | if (!(*attrs & (1 << OVS_KEY_ATTR_ICMP))) | ||
| 879 | return -EINVAL; | ||
| 880 | *attrs &= ~(1 << OVS_KEY_ATTR_ICMP); | ||
| 881 | |||
| 882 | *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp); | ||
| 883 | icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]); | ||
| 884 | swkey->ipv4.tp.src = htons(icmp_key->icmp_type); | ||
| 885 | swkey->ipv4.tp.dst = htons(icmp_key->icmp_code); | ||
| 886 | break; | ||
| 887 | } | ||
| 888 | |||
| 889 | return 0; | ||
| 890 | } | ||
| 891 | |||
| 892 | static int ipv6_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_len, | ||
| 893 | const struct nlattr *a[], u32 *attrs) | ||
| 894 | { | ||
| 895 | const struct ovs_key_icmpv6 *icmpv6_key; | ||
| 896 | const struct ovs_key_tcp *tcp_key; | ||
| 897 | const struct ovs_key_udp *udp_key; | ||
| 898 | |||
| 899 | switch (swkey->ip.proto) { | ||
| 900 | case IPPROTO_TCP: | ||
| 901 | if (!(*attrs & (1 << OVS_KEY_ATTR_TCP))) | ||
| 902 | return -EINVAL; | ||
| 903 | *attrs &= ~(1 << OVS_KEY_ATTR_TCP); | ||
| 904 | |||
| 905 | *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp); | ||
| 906 | tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]); | ||
| 907 | swkey->ipv6.tp.src = tcp_key->tcp_src; | ||
| 908 | swkey->ipv6.tp.dst = tcp_key->tcp_dst; | ||
| 909 | break; | ||
| 910 | |||
| 911 | case IPPROTO_UDP: | ||
| 912 | if (!(*attrs & (1 << OVS_KEY_ATTR_UDP))) | ||
| 913 | return -EINVAL; | ||
| 914 | *attrs &= ~(1 << OVS_KEY_ATTR_UDP); | ||
| 915 | |||
| 916 | *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp); | ||
| 917 | udp_key = nla_data(a[OVS_KEY_ATTR_UDP]); | ||
| 918 | swkey->ipv6.tp.src = udp_key->udp_src; | ||
| 919 | swkey->ipv6.tp.dst = udp_key->udp_dst; | ||
| 920 | break; | ||
| 921 | |||
| 922 | case IPPROTO_ICMPV6: | ||
| 923 | if (!(*attrs & (1 << OVS_KEY_ATTR_ICMPV6))) | ||
| 924 | return -EINVAL; | ||
| 925 | *attrs &= ~(1 << OVS_KEY_ATTR_ICMPV6); | ||
| 926 | |||
| 927 | *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp); | ||
| 928 | icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]); | ||
| 929 | swkey->ipv6.tp.src = htons(icmpv6_key->icmpv6_type); | ||
| 930 | swkey->ipv6.tp.dst = htons(icmpv6_key->icmpv6_code); | ||
| 931 | 1154 | ||
| 932 | if (swkey->ipv6.tp.src == htons(NDISC_NEIGHBOUR_SOLICITATION) || | 1155 | if (!fp) |
| 933 | swkey->ipv6.tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) { | 1156 | return false; |
| 934 | const struct ovs_key_nd *nd_key; | ||
| 935 | 1157 | ||
| 936 | if (!(*attrs & (1 << OVS_KEY_ATTR_ND))) | 1158 | for (i = 0; i < size; i++) |
| 937 | return -EINVAL; | 1159 | if (fp[i]) |
| 938 | *attrs &= ~(1 << OVS_KEY_ATTR_ND); | 1160 | return false; |
| 939 | |||
| 940 | *key_len = SW_FLOW_KEY_OFFSET(ipv6.nd); | ||
| 941 | nd_key = nla_data(a[OVS_KEY_ATTR_ND]); | ||
| 942 | memcpy(&swkey->ipv6.nd.target, nd_key->nd_target, | ||
| 943 | sizeof(swkey->ipv6.nd.target)); | ||
| 944 | memcpy(swkey->ipv6.nd.sll, nd_key->nd_sll, ETH_ALEN); | ||
| 945 | memcpy(swkey->ipv6.nd.tll, nd_key->nd_tll, ETH_ALEN); | ||
| 946 | } | ||
| 947 | break; | ||
| 948 | } | ||
| 949 | 1161 | ||
| 950 | return 0; | 1162 | return true; |
| 951 | } | 1163 | } |
| 952 | 1164 | ||
| 953 | static int parse_flow_nlattrs(const struct nlattr *attr, | 1165 | static int __parse_flow_nlattrs(const struct nlattr *attr, |
| 954 | const struct nlattr *a[], u32 *attrsp) | 1166 | const struct nlattr *a[], |
| 1167 | u64 *attrsp, bool nz) | ||
| 955 | { | 1168 | { |
| 956 | const struct nlattr *nla; | 1169 | const struct nlattr *nla; |
| 957 | u32 attrs; | 1170 | u32 attrs; |
| 958 | int rem; | 1171 | int rem; |
| 959 | 1172 | ||
| 960 | attrs = 0; | 1173 | attrs = *attrsp; |
| 961 | nla_for_each_nested(nla, attr, rem) { | 1174 | nla_for_each_nested(nla, attr, rem) { |
| 962 | u16 type = nla_type(nla); | 1175 | u16 type = nla_type(nla); |
| 963 | int expected_len; | 1176 | int expected_len; |
| 964 | 1177 | ||
| 965 | if (type > OVS_KEY_ATTR_MAX || attrs & (1 << type)) | 1178 | if (type > OVS_KEY_ATTR_MAX) { |
| 1179 | OVS_NLERR("Unknown key attribute (type=%d, max=%d).\n", | ||
| 1180 | type, OVS_KEY_ATTR_MAX); | ||
| 966 | return -EINVAL; | 1181 | return -EINVAL; |
| 1182 | } | ||
| 1183 | |||
| 1184 | if (attrs & (1 << type)) { | ||
| 1185 | OVS_NLERR("Duplicate key attribute (type %d).\n", type); | ||
| 1186 | return -EINVAL; | ||
| 1187 | } | ||
| 967 | 1188 | ||
| 968 | expected_len = ovs_key_lens[type]; | 1189 | expected_len = ovs_key_lens[type]; |
| 969 | if (nla_len(nla) != expected_len && expected_len != -1) | 1190 | if (nla_len(nla) != expected_len && expected_len != -1) { |
| 1191 | OVS_NLERR("Key attribute has unexpected length (type=%d" | ||
| 1192 | ", length=%d, expected=%d).\n", type, | ||
| 1193 | nla_len(nla), expected_len); | ||
| 970 | return -EINVAL; | 1194 | return -EINVAL; |
| 1195 | } | ||
| 971 | 1196 | ||
| 972 | attrs |= 1 << type; | 1197 | if (!nz || !is_all_zero(nla_data(nla), expected_len)) { |
| 973 | a[type] = nla; | 1198 | attrs |= 1 << type; |
| 1199 | a[type] = nla; | ||
| 1200 | } | ||
| 974 | } | 1201 | } |
| 975 | if (rem) | 1202 | if (rem) { |
| 1203 | OVS_NLERR("Message has %d unknown bytes.\n", rem); | ||
| 976 | return -EINVAL; | 1204 | return -EINVAL; |
| 1205 | } | ||
| 977 | 1206 | ||
| 978 | *attrsp = attrs; | 1207 | *attrsp = attrs; |
| 979 | return 0; | 1208 | return 0; |
| 980 | } | 1209 | } |
| 981 | 1210 | ||
| 1211 | static int parse_flow_mask_nlattrs(const struct nlattr *attr, | ||
| 1212 | const struct nlattr *a[], u64 *attrsp) | ||
| 1213 | { | ||
| 1214 | return __parse_flow_nlattrs(attr, a, attrsp, true); | ||
| 1215 | } | ||
| 1216 | |||
| 1217 | static int parse_flow_nlattrs(const struct nlattr *attr, | ||
| 1218 | const struct nlattr *a[], u64 *attrsp) | ||
| 1219 | { | ||
| 1220 | return __parse_flow_nlattrs(attr, a, attrsp, false); | ||
| 1221 | } | ||
| 1222 | |||
| 982 | int ovs_ipv4_tun_from_nlattr(const struct nlattr *attr, | 1223 | int ovs_ipv4_tun_from_nlattr(const struct nlattr *attr, |
| 983 | struct ovs_key_ipv4_tunnel *tun_key) | 1224 | struct sw_flow_match *match, bool is_mask) |
| 984 | { | 1225 | { |
| 985 | struct nlattr *a; | 1226 | struct nlattr *a; |
| 986 | int rem; | 1227 | int rem; |
| 987 | bool ttl = false; | 1228 | bool ttl = false; |
| 988 | 1229 | __be16 tun_flags = 0; | |
| 989 | memset(tun_key, 0, sizeof(*tun_key)); | ||
| 990 | 1230 | ||
| 991 | nla_for_each_nested(a, attr, rem) { | 1231 | nla_for_each_nested(a, attr, rem) { |
| 992 | int type = nla_type(a); | 1232 | int type = nla_type(a); |
| @@ -1000,53 +1240,78 @@ int ovs_ipv4_tun_from_nlattr(const struct nlattr *attr, | |||
| 1000 | [OVS_TUNNEL_KEY_ATTR_CSUM] = 0, | 1240 | [OVS_TUNNEL_KEY_ATTR_CSUM] = 0, |
| 1001 | }; | 1241 | }; |
| 1002 | 1242 | ||
| 1003 | if (type > OVS_TUNNEL_KEY_ATTR_MAX || | 1243 | if (type > OVS_TUNNEL_KEY_ATTR_MAX) { |
| 1004 | ovs_tunnel_key_lens[type] != nla_len(a)) | 1244 | OVS_NLERR("Unknown IPv4 tunnel attribute (type=%d, max=%d).\n", |
| 1245 | type, OVS_TUNNEL_KEY_ATTR_MAX); | ||
| 1005 | return -EINVAL; | 1246 | return -EINVAL; |
| 1247 | } | ||
| 1248 | |||
| 1249 | if (ovs_tunnel_key_lens[type] != nla_len(a)) { | ||
| 1250 | OVS_NLERR("IPv4 tunnel attribute type has unexpected " | ||
| 1251 | " length (type=%d, length=%d, expected=%d).\n", | ||
| 1252 | type, nla_len(a), ovs_tunnel_key_lens[type]); | ||
| 1253 | return -EINVAL; | ||
| 1254 | } | ||
| 1006 | 1255 | ||
| 1007 | switch (type) { | 1256 | switch (type) { |
| 1008 | case OVS_TUNNEL_KEY_ATTR_ID: | 1257 | case OVS_TUNNEL_KEY_ATTR_ID: |
| 1009 | tun_key->tun_id = nla_get_be64(a); | 1258 | SW_FLOW_KEY_PUT(match, tun_key.tun_id, |
| 1010 | tun_key->tun_flags |= TUNNEL_KEY; | 1259 | nla_get_be64(a), is_mask); |
| 1260 | tun_flags |= TUNNEL_KEY; | ||
| 1011 | break; | 1261 | break; |
| 1012 | case OVS_TUNNEL_KEY_ATTR_IPV4_SRC: | 1262 | case OVS_TUNNEL_KEY_ATTR_IPV4_SRC: |
| 1013 | tun_key->ipv4_src = nla_get_be32(a); | 1263 | SW_FLOW_KEY_PUT(match, tun_key.ipv4_src, |
| 1264 | nla_get_be32(a), is_mask); | ||
| 1014 | break; | 1265 | break; |
| 1015 | case OVS_TUNNEL_KEY_ATTR_IPV4_DST: | 1266 | case OVS_TUNNEL_KEY_ATTR_IPV4_DST: |
| 1016 | tun_key->ipv4_dst = nla_get_be32(a); | 1267 | SW_FLOW_KEY_PUT(match, tun_key.ipv4_dst, |
| 1268 | nla_get_be32(a), is_mask); | ||
| 1017 | break; | 1269 | break; |
| 1018 | case OVS_TUNNEL_KEY_ATTR_TOS: | 1270 | case OVS_TUNNEL_KEY_ATTR_TOS: |
| 1019 | tun_key->ipv4_tos = nla_get_u8(a); | 1271 | SW_FLOW_KEY_PUT(match, tun_key.ipv4_tos, |
| 1272 | nla_get_u8(a), is_mask); | ||
| 1020 | break; | 1273 | break; |
| 1021 | case OVS_TUNNEL_KEY_ATTR_TTL: | 1274 | case OVS_TUNNEL_KEY_ATTR_TTL: |
| 1022 | tun_key->ipv4_ttl = nla_get_u8(a); | 1275 | SW_FLOW_KEY_PUT(match, tun_key.ipv4_ttl, |
| 1276 | nla_get_u8(a), is_mask); | ||
| 1023 | ttl = true; | 1277 | ttl = true; |
| 1024 | break; | 1278 | break; |
| 1025 | case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT: | 1279 | case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT: |
| 1026 | tun_key->tun_flags |= TUNNEL_DONT_FRAGMENT; | 1280 | tun_flags |= TUNNEL_DONT_FRAGMENT; |
| 1027 | break; | 1281 | break; |
| 1028 | case OVS_TUNNEL_KEY_ATTR_CSUM: | 1282 | case OVS_TUNNEL_KEY_ATTR_CSUM: |
| 1029 | tun_key->tun_flags |= TUNNEL_CSUM; | 1283 | tun_flags |= TUNNEL_CSUM; |
| 1030 | break; | 1284 | break; |
| 1031 | default: | 1285 | default: |
| 1032 | return -EINVAL; | 1286 | return -EINVAL; |
| 1033 | |||
| 1034 | } | 1287 | } |
| 1035 | } | 1288 | } |
| 1036 | if (rem > 0) | ||
| 1037 | return -EINVAL; | ||
| 1038 | 1289 | ||
| 1039 | if (!tun_key->ipv4_dst) | 1290 | SW_FLOW_KEY_PUT(match, tun_key.tun_flags, tun_flags, is_mask); |
| 1040 | return -EINVAL; | ||
| 1041 | 1291 | ||
| 1042 | if (!ttl) | 1292 | if (rem > 0) { |
| 1293 | OVS_NLERR("IPv4 tunnel attribute has %d unknown bytes.\n", rem); | ||
| 1043 | return -EINVAL; | 1294 | return -EINVAL; |
| 1295 | } | ||
| 1296 | |||
| 1297 | if (!is_mask) { | ||
| 1298 | if (!match->key->tun_key.ipv4_dst) { | ||
| 1299 | OVS_NLERR("IPv4 tunnel destination address is zero.\n"); | ||
| 1300 | return -EINVAL; | ||
| 1301 | } | ||
| 1302 | |||
| 1303 | if (!ttl) { | ||
| 1304 | OVS_NLERR("IPv4 tunnel TTL not specified.\n"); | ||
| 1305 | return -EINVAL; | ||
| 1306 | } | ||
| 1307 | } | ||
| 1044 | 1308 | ||
| 1045 | return 0; | 1309 | return 0; |
| 1046 | } | 1310 | } |
| 1047 | 1311 | ||
| 1048 | int ovs_ipv4_tun_to_nlattr(struct sk_buff *skb, | 1312 | int ovs_ipv4_tun_to_nlattr(struct sk_buff *skb, |
| 1049 | const struct ovs_key_ipv4_tunnel *tun_key) | 1313 | const struct ovs_key_ipv4_tunnel *tun_key, |
| 1314 | const struct ovs_key_ipv4_tunnel *output) | ||
| 1050 | { | 1315 | { |
| 1051 | struct nlattr *nla; | 1316 | struct nlattr *nla; |
| 1052 | 1317 | ||
| @@ -1054,23 +1319,24 @@ int ovs_ipv4_tun_to_nlattr(struct sk_buff *skb, | |||
| 1054 | if (!nla) | 1319 | if (!nla) |
| 1055 | return -EMSGSIZE; | 1320 | return -EMSGSIZE; |
| 1056 | 1321 | ||
| 1057 | if (tun_key->tun_flags & TUNNEL_KEY && | 1322 | if (output->tun_flags & TUNNEL_KEY && |
| 1058 | nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, tun_key->tun_id)) | 1323 | nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id)) |
| 1059 | return -EMSGSIZE; | 1324 | return -EMSGSIZE; |
| 1060 | if (tun_key->ipv4_src && | 1325 | if (output->ipv4_src && |
| 1061 | nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC, tun_key->ipv4_src)) | 1326 | nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC, output->ipv4_src)) |
| 1062 | return -EMSGSIZE; | 1327 | return -EMSGSIZE; |
| 1063 | if (nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST, tun_key->ipv4_dst)) | 1328 | if (output->ipv4_dst && |
| 1329 | nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST, output->ipv4_dst)) | ||
| 1064 | return -EMSGSIZE; | 1330 | return -EMSGSIZE; |
| 1065 | if (tun_key->ipv4_tos && | 1331 | if (output->ipv4_tos && |
| 1066 | nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, tun_key->ipv4_tos)) | 1332 | nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->ipv4_tos)) |
| 1067 | return -EMSGSIZE; | 1333 | return -EMSGSIZE; |
| 1068 | if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, tun_key->ipv4_ttl)) | 1334 | if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, output->ipv4_ttl)) |
| 1069 | return -EMSGSIZE; | 1335 | return -EMSGSIZE; |
| 1070 | if ((tun_key->tun_flags & TUNNEL_DONT_FRAGMENT) && | 1336 | if ((output->tun_flags & TUNNEL_DONT_FRAGMENT) && |
| 1071 | nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT)) | 1337 | nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT)) |
| 1072 | return -EMSGSIZE; | 1338 | return -EMSGSIZE; |
| 1073 | if ((tun_key->tun_flags & TUNNEL_CSUM) && | 1339 | if ((output->tun_flags & TUNNEL_CSUM) && |
| 1074 | nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_CSUM)) | 1340 | nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_CSUM)) |
| 1075 | return -EMSGSIZE; | 1341 | return -EMSGSIZE; |
| 1076 | 1342 | ||
| @@ -1078,176 +1344,390 @@ int ovs_ipv4_tun_to_nlattr(struct sk_buff *skb, | |||
| 1078 | return 0; | 1344 | return 0; |
| 1079 | } | 1345 | } |
| 1080 | 1346 | ||
| 1081 | /** | 1347 | static int metadata_from_nlattrs(struct sw_flow_match *match, u64 *attrs, |
| 1082 | * ovs_flow_from_nlattrs - parses Netlink attributes into a flow key. | 1348 | const struct nlattr **a, bool is_mask) |
| 1083 | * @swkey: receives the extracted flow key. | ||
| 1084 | * @key_lenp: number of bytes used in @swkey. | ||
| 1085 | * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute | ||
| 1086 | * sequence. | ||
| 1087 | */ | ||
| 1088 | int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp, | ||
| 1089 | const struct nlattr *attr) | ||
| 1090 | { | 1349 | { |
| 1091 | const struct nlattr *a[OVS_KEY_ATTR_MAX + 1]; | 1350 | if (*attrs & (1 << OVS_KEY_ATTR_PRIORITY)) { |
| 1092 | const struct ovs_key_ethernet *eth_key; | 1351 | SW_FLOW_KEY_PUT(match, phy.priority, |
| 1093 | int key_len; | 1352 | nla_get_u32(a[OVS_KEY_ATTR_PRIORITY]), is_mask); |
| 1094 | u32 attrs; | 1353 | *attrs &= ~(1 << OVS_KEY_ATTR_PRIORITY); |
| 1095 | int err; | 1354 | } |
| 1096 | 1355 | ||
| 1097 | memset(swkey, 0, sizeof(struct sw_flow_key)); | 1356 | if (*attrs & (1 << OVS_KEY_ATTR_IN_PORT)) { |
| 1098 | key_len = SW_FLOW_KEY_OFFSET(eth); | 1357 | u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]); |
| 1099 | 1358 | ||
| 1100 | err = parse_flow_nlattrs(attr, a, &attrs); | 1359 | if (is_mask) |
| 1101 | if (err) | 1360 | in_port = 0xffffffff; /* Always exact match in_port. */ |
| 1102 | return err; | 1361 | else if (in_port >= DP_MAX_PORTS) |
| 1362 | return -EINVAL; | ||
| 1103 | 1363 | ||
| 1104 | /* Metadata attributes. */ | 1364 | SW_FLOW_KEY_PUT(match, phy.in_port, in_port, is_mask); |
| 1105 | if (attrs & (1 << OVS_KEY_ATTR_PRIORITY)) { | 1365 | *attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT); |
| 1106 | swkey->phy.priority = nla_get_u32(a[OVS_KEY_ATTR_PRIORITY]); | 1366 | } else if (!is_mask) { |
| 1107 | attrs &= ~(1 << OVS_KEY_ATTR_PRIORITY); | 1367 | SW_FLOW_KEY_PUT(match, phy.in_port, DP_MAX_PORTS, is_mask); |
| 1108 | } | 1368 | } |
| 1109 | if (attrs & (1 << OVS_KEY_ATTR_IN_PORT)) { | 1369 | |
| 1110 | u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]); | 1370 | if (*attrs & (1 << OVS_KEY_ATTR_SKB_MARK)) { |
| 1111 | if (in_port >= DP_MAX_PORTS) | 1371 | uint32_t mark = nla_get_u32(a[OVS_KEY_ATTR_SKB_MARK]); |
| 1112 | return -EINVAL; | 1372 | |
| 1113 | swkey->phy.in_port = in_port; | 1373 | SW_FLOW_KEY_PUT(match, phy.skb_mark, mark, is_mask); |
| 1114 | attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT); | 1374 | *attrs &= ~(1 << OVS_KEY_ATTR_SKB_MARK); |
| 1115 | } else { | ||
| 1116 | swkey->phy.in_port = DP_MAX_PORTS; | ||
| 1117 | } | 1375 | } |
| 1118 | if (attrs & (1 << OVS_KEY_ATTR_SKB_MARK)) { | 1376 | if (*attrs & (1 << OVS_KEY_ATTR_TUNNEL)) { |
| 1119 | swkey->phy.skb_mark = nla_get_u32(a[OVS_KEY_ATTR_SKB_MARK]); | 1377 | if (ovs_ipv4_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], match, |
| 1120 | attrs &= ~(1 << OVS_KEY_ATTR_SKB_MARK); | 1378 | is_mask)) |
| 1379 | return -EINVAL; | ||
| 1380 | *attrs &= ~(1 << OVS_KEY_ATTR_TUNNEL); | ||
| 1121 | } | 1381 | } |
| 1382 | return 0; | ||
| 1383 | } | ||
| 1122 | 1384 | ||
| 1123 | if (attrs & (1 << OVS_KEY_ATTR_TUNNEL)) { | 1385 | static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, |
| 1124 | err = ovs_ipv4_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], &swkey->tun_key); | 1386 | const struct nlattr **a, bool is_mask) |
| 1125 | if (err) | 1387 | { |
| 1126 | return err; | 1388 | int err; |
| 1389 | u64 orig_attrs = attrs; | ||
| 1127 | 1390 | ||
| 1128 | attrs &= ~(1 << OVS_KEY_ATTR_TUNNEL); | 1391 | err = metadata_from_nlattrs(match, &attrs, a, is_mask); |
| 1129 | } | 1392 | if (err) |
| 1393 | return err; | ||
| 1130 | 1394 | ||
| 1131 | /* Data attributes. */ | 1395 | if (attrs & (1 << OVS_KEY_ATTR_ETHERNET)) { |
| 1132 | if (!(attrs & (1 << OVS_KEY_ATTR_ETHERNET))) | 1396 | const struct ovs_key_ethernet *eth_key; |
| 1133 | return -EINVAL; | ||
| 1134 | attrs &= ~(1 << OVS_KEY_ATTR_ETHERNET); | ||
| 1135 | 1397 | ||
| 1136 | eth_key = nla_data(a[OVS_KEY_ATTR_ETHERNET]); | 1398 | eth_key = nla_data(a[OVS_KEY_ATTR_ETHERNET]); |
| 1137 | memcpy(swkey->eth.src, eth_key->eth_src, ETH_ALEN); | 1399 | SW_FLOW_KEY_MEMCPY(match, eth.src, |
| 1138 | memcpy(swkey->eth.dst, eth_key->eth_dst, ETH_ALEN); | 1400 | eth_key->eth_src, ETH_ALEN, is_mask); |
| 1401 | SW_FLOW_KEY_MEMCPY(match, eth.dst, | ||
| 1402 | eth_key->eth_dst, ETH_ALEN, is_mask); | ||
| 1403 | attrs &= ~(1 << OVS_KEY_ATTR_ETHERNET); | ||
| 1404 | } | ||
| 1139 | 1405 | ||
| 1140 | if (attrs & (1u << OVS_KEY_ATTR_ETHERTYPE) && | 1406 | if (attrs & (1 << OVS_KEY_ATTR_VLAN)) { |
| 1141 | nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]) == htons(ETH_P_8021Q)) { | ||
| 1142 | const struct nlattr *encap; | ||
| 1143 | __be16 tci; | 1407 | __be16 tci; |
| 1144 | 1408 | ||
| 1145 | if (attrs != ((1 << OVS_KEY_ATTR_VLAN) | | ||
| 1146 | (1 << OVS_KEY_ATTR_ETHERTYPE) | | ||
| 1147 | (1 << OVS_KEY_ATTR_ENCAP))) | ||
| 1148 | return -EINVAL; | ||
| 1149 | |||
| 1150 | encap = a[OVS_KEY_ATTR_ENCAP]; | ||
| 1151 | tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]); | 1409 | tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]); |
| 1152 | if (tci & htons(VLAN_TAG_PRESENT)) { | 1410 | if (!(tci & htons(VLAN_TAG_PRESENT))) { |
| 1153 | swkey->eth.tci = tci; | 1411 | if (is_mask) |
| 1154 | 1412 | OVS_NLERR("VLAN TCI mask does not have exact match for VLAN_TAG_PRESENT bit.\n"); | |
| 1155 | err = parse_flow_nlattrs(encap, a, &attrs); | 1413 | else |
| 1156 | if (err) | 1414 | OVS_NLERR("VLAN TCI does not have VLAN_TAG_PRESENT bit set.\n"); |
| 1157 | return err; | ||
| 1158 | } else if (!tci) { | ||
| 1159 | /* Corner case for truncated 802.1Q header. */ | ||
| 1160 | if (nla_len(encap)) | ||
| 1161 | return -EINVAL; | ||
| 1162 | 1415 | ||
| 1163 | swkey->eth.type = htons(ETH_P_8021Q); | ||
| 1164 | *key_lenp = key_len; | ||
| 1165 | return 0; | ||
| 1166 | } else { | ||
| 1167 | return -EINVAL; | 1416 | return -EINVAL; |
| 1168 | } | 1417 | } |
| 1169 | } | 1418 | |
| 1419 | SW_FLOW_KEY_PUT(match, eth.tci, tci, is_mask); | ||
| 1420 | attrs &= ~(1 << OVS_KEY_ATTR_VLAN); | ||
| 1421 | } else if (!is_mask) | ||
| 1422 | SW_FLOW_KEY_PUT(match, eth.tci, htons(0xffff), true); | ||
| 1170 | 1423 | ||
| 1171 | if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) { | 1424 | if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) { |
| 1172 | swkey->eth.type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]); | 1425 | __be16 eth_type; |
| 1173 | if (ntohs(swkey->eth.type) < ETH_P_802_3_MIN) | 1426 | |
| 1427 | eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]); | ||
| 1428 | if (is_mask) { | ||
| 1429 | /* Always exact match EtherType. */ | ||
| 1430 | eth_type = htons(0xffff); | ||
| 1431 | } else if (ntohs(eth_type) < ETH_P_802_3_MIN) { | ||
| 1432 | OVS_NLERR("EtherType is less than minimum (type=%x, min=%x).\n", | ||
| 1433 | ntohs(eth_type), ETH_P_802_3_MIN); | ||
| 1174 | return -EINVAL; | 1434 | return -EINVAL; |
| 1435 | } | ||
| 1436 | |||
| 1437 | SW_FLOW_KEY_PUT(match, eth.type, eth_type, is_mask); | ||
| 1175 | attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE); | 1438 | attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE); |
| 1176 | } else { | 1439 | } else if (!is_mask) { |
| 1177 | swkey->eth.type = htons(ETH_P_802_2); | 1440 | SW_FLOW_KEY_PUT(match, eth.type, htons(ETH_P_802_2), is_mask); |
| 1178 | } | 1441 | } |
| 1179 | 1442 | ||
| 1180 | if (swkey->eth.type == htons(ETH_P_IP)) { | 1443 | if (attrs & (1 << OVS_KEY_ATTR_IPV4)) { |
| 1181 | const struct ovs_key_ipv4 *ipv4_key; | 1444 | const struct ovs_key_ipv4 *ipv4_key; |
| 1182 | 1445 | ||
| 1183 | if (!(attrs & (1 << OVS_KEY_ATTR_IPV4))) | ||
| 1184 | return -EINVAL; | ||
| 1185 | attrs &= ~(1 << OVS_KEY_ATTR_IPV4); | ||
| 1186 | |||
| 1187 | key_len = SW_FLOW_KEY_OFFSET(ipv4.addr); | ||
| 1188 | ipv4_key = nla_data(a[OVS_KEY_ATTR_IPV4]); | 1446 | ipv4_key = nla_data(a[OVS_KEY_ATTR_IPV4]); |
| 1189 | if (ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX) | 1447 | if (!is_mask && ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX) { |
| 1448 | OVS_NLERR("Unknown IPv4 fragment type (value=%d, max=%d).\n", | ||
| 1449 | ipv4_key->ipv4_frag, OVS_FRAG_TYPE_MAX); | ||
| 1190 | return -EINVAL; | 1450 | return -EINVAL; |
| 1191 | swkey->ip.proto = ipv4_key->ipv4_proto; | ||
| 1192 | swkey->ip.tos = ipv4_key->ipv4_tos; | ||
| 1193 | swkey->ip.ttl = ipv4_key->ipv4_ttl; | ||
| 1194 | swkey->ip.frag = ipv4_key->ipv4_frag; | ||
| 1195 | swkey->ipv4.addr.src = ipv4_key->ipv4_src; | ||
| 1196 | swkey->ipv4.addr.dst = ipv4_key->ipv4_dst; | ||
| 1197 | |||
| 1198 | if (swkey->ip.frag != OVS_FRAG_TYPE_LATER) { | ||
| 1199 | err = ipv4_flow_from_nlattrs(swkey, &key_len, a, &attrs); | ||
| 1200 | if (err) | ||
| 1201 | return err; | ||
| 1202 | } | 1451 | } |
| 1203 | } else if (swkey->eth.type == htons(ETH_P_IPV6)) { | 1452 | SW_FLOW_KEY_PUT(match, ip.proto, |
| 1204 | const struct ovs_key_ipv6 *ipv6_key; | 1453 | ipv4_key->ipv4_proto, is_mask); |
| 1454 | SW_FLOW_KEY_PUT(match, ip.tos, | ||
| 1455 | ipv4_key->ipv4_tos, is_mask); | ||
| 1456 | SW_FLOW_KEY_PUT(match, ip.ttl, | ||
| 1457 | ipv4_key->ipv4_ttl, is_mask); | ||
| 1458 | SW_FLOW_KEY_PUT(match, ip.frag, | ||
| 1459 | ipv4_key->ipv4_frag, is_mask); | ||
| 1460 | SW_FLOW_KEY_PUT(match, ipv4.addr.src, | ||
| 1461 | ipv4_key->ipv4_src, is_mask); | ||
| 1462 | SW_FLOW_KEY_PUT(match, ipv4.addr.dst, | ||
| 1463 | ipv4_key->ipv4_dst, is_mask); | ||
| 1464 | attrs &= ~(1 << OVS_KEY_ATTR_IPV4); | ||
| 1465 | } | ||
| 1205 | 1466 | ||
| 1206 | if (!(attrs & (1 << OVS_KEY_ATTR_IPV6))) | 1467 | if (attrs & (1 << OVS_KEY_ATTR_IPV6)) { |
| 1207 | return -EINVAL; | 1468 | const struct ovs_key_ipv6 *ipv6_key; |
| 1208 | attrs &= ~(1 << OVS_KEY_ATTR_IPV6); | ||
| 1209 | 1469 | ||
| 1210 | key_len = SW_FLOW_KEY_OFFSET(ipv6.label); | ||
| 1211 | ipv6_key = nla_data(a[OVS_KEY_ATTR_IPV6]); | 1470 | ipv6_key = nla_data(a[OVS_KEY_ATTR_IPV6]); |
| 1212 | if (ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX) | 1471 | if (!is_mask && ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX) { |
| 1472 | OVS_NLERR("Unknown IPv6 fragment type (value=%d, max=%d).\n", | ||
| 1473 | ipv6_key->ipv6_frag, OVS_FRAG_TYPE_MAX); | ||
| 1213 | return -EINVAL; | 1474 | return -EINVAL; |
| 1214 | swkey->ipv6.label = ipv6_key->ipv6_label; | ||
| 1215 | swkey->ip.proto = ipv6_key->ipv6_proto; | ||
| 1216 | swkey->ip.tos = ipv6_key->ipv6_tclass; | ||
| 1217 | swkey->ip.ttl = ipv6_key->ipv6_hlimit; | ||
| 1218 | swkey->ip.frag = ipv6_key->ipv6_frag; | ||
| 1219 | memcpy(&swkey->ipv6.addr.src, ipv6_key->ipv6_src, | ||
| 1220 | sizeof(swkey->ipv6.addr.src)); | ||
| 1221 | memcpy(&swkey->ipv6.addr.dst, ipv6_key->ipv6_dst, | ||
| 1222 | sizeof(swkey->ipv6.addr.dst)); | ||
| 1223 | |||
| 1224 | if (swkey->ip.frag != OVS_FRAG_TYPE_LATER) { | ||
| 1225 | err = ipv6_flow_from_nlattrs(swkey, &key_len, a, &attrs); | ||
| 1226 | if (err) | ||
| 1227 | return err; | ||
| 1228 | } | 1475 | } |
| 1229 | } else if (swkey->eth.type == htons(ETH_P_ARP) || | 1476 | SW_FLOW_KEY_PUT(match, ipv6.label, |
| 1230 | swkey->eth.type == htons(ETH_P_RARP)) { | 1477 | ipv6_key->ipv6_label, is_mask); |
| 1478 | SW_FLOW_KEY_PUT(match, ip.proto, | ||
| 1479 | ipv6_key->ipv6_proto, is_mask); | ||
| 1480 | SW_FLOW_KEY_PUT(match, ip.tos, | ||
| 1481 | ipv6_key->ipv6_tclass, is_mask); | ||
| 1482 | SW_FLOW_KEY_PUT(match, ip.ttl, | ||
| 1483 | ipv6_key->ipv6_hlimit, is_mask); | ||
| 1484 | SW_FLOW_KEY_PUT(match, ip.frag, | ||
| 1485 | ipv6_key->ipv6_frag, is_mask); | ||
| 1486 | SW_FLOW_KEY_MEMCPY(match, ipv6.addr.src, | ||
| 1487 | ipv6_key->ipv6_src, | ||
| 1488 | sizeof(match->key->ipv6.addr.src), | ||
| 1489 | is_mask); | ||
| 1490 | SW_FLOW_KEY_MEMCPY(match, ipv6.addr.dst, | ||
| 1491 | ipv6_key->ipv6_dst, | ||
| 1492 | sizeof(match->key->ipv6.addr.dst), | ||
| 1493 | is_mask); | ||
| 1494 | |||
| 1495 | attrs &= ~(1 << OVS_KEY_ATTR_IPV6); | ||
| 1496 | } | ||
| 1497 | |||
| 1498 | if (attrs & (1 << OVS_KEY_ATTR_ARP)) { | ||
| 1231 | const struct ovs_key_arp *arp_key; | 1499 | const struct ovs_key_arp *arp_key; |
| 1232 | 1500 | ||
| 1233 | if (!(attrs & (1 << OVS_KEY_ATTR_ARP))) | 1501 | arp_key = nla_data(a[OVS_KEY_ATTR_ARP]); |
| 1502 | if (!is_mask && (arp_key->arp_op & htons(0xff00))) { | ||
| 1503 | OVS_NLERR("Unknown ARP opcode (opcode=%d).\n", | ||
| 1504 | arp_key->arp_op); | ||
| 1234 | return -EINVAL; | 1505 | return -EINVAL; |
| 1506 | } | ||
| 1507 | |||
| 1508 | SW_FLOW_KEY_PUT(match, ipv4.addr.src, | ||
| 1509 | arp_key->arp_sip, is_mask); | ||
| 1510 | SW_FLOW_KEY_PUT(match, ipv4.addr.dst, | ||
| 1511 | arp_key->arp_tip, is_mask); | ||
| 1512 | SW_FLOW_KEY_PUT(match, ip.proto, | ||
| 1513 | ntohs(arp_key->arp_op), is_mask); | ||
| 1514 | SW_FLOW_KEY_MEMCPY(match, ipv4.arp.sha, | ||
| 1515 | arp_key->arp_sha, ETH_ALEN, is_mask); | ||
| 1516 | SW_FLOW_KEY_MEMCPY(match, ipv4.arp.tha, | ||
| 1517 | arp_key->arp_tha, ETH_ALEN, is_mask); | ||
| 1518 | |||
| 1235 | attrs &= ~(1 << OVS_KEY_ATTR_ARP); | 1519 | attrs &= ~(1 << OVS_KEY_ATTR_ARP); |
| 1520 | } | ||
| 1236 | 1521 | ||
| 1237 | key_len = SW_FLOW_KEY_OFFSET(ipv4.arp); | 1522 | if (attrs & (1 << OVS_KEY_ATTR_TCP)) { |
| 1238 | arp_key = nla_data(a[OVS_KEY_ATTR_ARP]); | 1523 | const struct ovs_key_tcp *tcp_key; |
| 1239 | swkey->ipv4.addr.src = arp_key->arp_sip; | 1524 | |
| 1240 | swkey->ipv4.addr.dst = arp_key->arp_tip; | 1525 | tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]); |
| 1241 | if (arp_key->arp_op & htons(0xff00)) | 1526 | if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) { |
| 1527 | SW_FLOW_KEY_PUT(match, ipv4.tp.src, | ||
| 1528 | tcp_key->tcp_src, is_mask); | ||
| 1529 | SW_FLOW_KEY_PUT(match, ipv4.tp.dst, | ||
| 1530 | tcp_key->tcp_dst, is_mask); | ||
| 1531 | } else { | ||
| 1532 | SW_FLOW_KEY_PUT(match, ipv6.tp.src, | ||
| 1533 | tcp_key->tcp_src, is_mask); | ||
| 1534 | SW_FLOW_KEY_PUT(match, ipv6.tp.dst, | ||
| 1535 | tcp_key->tcp_dst, is_mask); | ||
| 1536 | } | ||
| 1537 | attrs &= ~(1 << OVS_KEY_ATTR_TCP); | ||
| 1538 | } | ||
| 1539 | |||
| 1540 | if (attrs & (1 << OVS_KEY_ATTR_UDP)) { | ||
| 1541 | const struct ovs_key_udp *udp_key; | ||
| 1542 | |||
| 1543 | udp_key = nla_data(a[OVS_KEY_ATTR_UDP]); | ||
| 1544 | if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) { | ||
| 1545 | SW_FLOW_KEY_PUT(match, ipv4.tp.src, | ||
| 1546 | udp_key->udp_src, is_mask); | ||
| 1547 | SW_FLOW_KEY_PUT(match, ipv4.tp.dst, | ||
| 1548 | udp_key->udp_dst, is_mask); | ||
| 1549 | } else { | ||
| 1550 | SW_FLOW_KEY_PUT(match, ipv6.tp.src, | ||
| 1551 | udp_key->udp_src, is_mask); | ||
| 1552 | SW_FLOW_KEY_PUT(match, ipv6.tp.dst, | ||
| 1553 | udp_key->udp_dst, is_mask); | ||
| 1554 | } | ||
| 1555 | attrs &= ~(1 << OVS_KEY_ATTR_UDP); | ||
| 1556 | } | ||
| 1557 | |||
| 1558 | if (attrs & (1 << OVS_KEY_ATTR_SCTP)) { | ||
| 1559 | const struct ovs_key_sctp *sctp_key; | ||
| 1560 | |||
| 1561 | sctp_key = nla_data(a[OVS_KEY_ATTR_SCTP]); | ||
| 1562 | if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) { | ||
| 1563 | SW_FLOW_KEY_PUT(match, ipv4.tp.src, | ||
| 1564 | sctp_key->sctp_src, is_mask); | ||
| 1565 | SW_FLOW_KEY_PUT(match, ipv4.tp.dst, | ||
| 1566 | sctp_key->sctp_dst, is_mask); | ||
| 1567 | } else { | ||
| 1568 | SW_FLOW_KEY_PUT(match, ipv6.tp.src, | ||
| 1569 | sctp_key->sctp_src, is_mask); | ||
| 1570 | SW_FLOW_KEY_PUT(match, ipv6.tp.dst, | ||
| 1571 | sctp_key->sctp_dst, is_mask); | ||
| 1572 | } | ||
| 1573 | attrs &= ~(1 << OVS_KEY_ATTR_SCTP); | ||
| 1574 | } | ||
| 1575 | |||
| 1576 | if (attrs & (1 << OVS_KEY_ATTR_ICMP)) { | ||
| 1577 | const struct ovs_key_icmp *icmp_key; | ||
| 1578 | |||
| 1579 | icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]); | ||
| 1580 | SW_FLOW_KEY_PUT(match, ipv4.tp.src, | ||
| 1581 | htons(icmp_key->icmp_type), is_mask); | ||
| 1582 | SW_FLOW_KEY_PUT(match, ipv4.tp.dst, | ||
| 1583 | htons(icmp_key->icmp_code), is_mask); | ||
| 1584 | attrs &= ~(1 << OVS_KEY_ATTR_ICMP); | ||
| 1585 | } | ||
| 1586 | |||
| 1587 | if (attrs & (1 << OVS_KEY_ATTR_ICMPV6)) { | ||
| 1588 | const struct ovs_key_icmpv6 *icmpv6_key; | ||
| 1589 | |||
| 1590 | icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]); | ||
| 1591 | SW_FLOW_KEY_PUT(match, ipv6.tp.src, | ||
| 1592 | htons(icmpv6_key->icmpv6_type), is_mask); | ||
| 1593 | SW_FLOW_KEY_PUT(match, ipv6.tp.dst, | ||
| 1594 | htons(icmpv6_key->icmpv6_code), is_mask); | ||
| 1595 | attrs &= ~(1 << OVS_KEY_ATTR_ICMPV6); | ||
| 1596 | } | ||
| 1597 | |||
| 1598 | if (attrs & (1 << OVS_KEY_ATTR_ND)) { | ||
| 1599 | const struct ovs_key_nd *nd_key; | ||
| 1600 | |||
| 1601 | nd_key = nla_data(a[OVS_KEY_ATTR_ND]); | ||
| 1602 | SW_FLOW_KEY_MEMCPY(match, ipv6.nd.target, | ||
| 1603 | nd_key->nd_target, | ||
| 1604 | sizeof(match->key->ipv6.nd.target), | ||
| 1605 | is_mask); | ||
| 1606 | SW_FLOW_KEY_MEMCPY(match, ipv6.nd.sll, | ||
| 1607 | nd_key->nd_sll, ETH_ALEN, is_mask); | ||
| 1608 | SW_FLOW_KEY_MEMCPY(match, ipv6.nd.tll, | ||
| 1609 | nd_key->nd_tll, ETH_ALEN, is_mask); | ||
| 1610 | attrs &= ~(1 << OVS_KEY_ATTR_ND); | ||
| 1611 | } | ||
| 1612 | |||
| 1613 | if (attrs != 0) | ||
| 1614 | return -EINVAL; | ||
| 1615 | |||
| 1616 | return 0; | ||
| 1617 | } | ||
| 1618 | |||
| 1619 | /** | ||
| 1620 | * ovs_match_from_nlattrs - parses Netlink attributes into a flow key and | ||
| 1621 | * mask. In case the 'mask' is NULL, the flow is treated as exact match | ||
| 1622 | * flow. Otherwise, it is treated as a wildcarded flow, except the mask | ||
| 1623 | * does not include any don't care bit. | ||
| 1624 | * @match: receives the extracted flow match information. | ||
| 1625 | * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute | ||
| 1626 | * sequence. The fields should of the packet that triggered the creation | ||
| 1627 | * of this flow. | ||
| 1628 | * @mask: Optional. Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink | ||
| 1629 | * attribute specifies the mask field of the wildcarded flow. | ||
| 1630 | */ | ||
| 1631 | int ovs_match_from_nlattrs(struct sw_flow_match *match, | ||
| 1632 | const struct nlattr *key, | ||
| 1633 | const struct nlattr *mask) | ||
| 1634 | { | ||
| 1635 | const struct nlattr *a[OVS_KEY_ATTR_MAX + 1]; | ||
| 1636 | const struct nlattr *encap; | ||
| 1637 | u64 key_attrs = 0; | ||
| 1638 | u64 mask_attrs = 0; | ||
| 1639 | bool encap_valid = false; | ||
| 1640 | int err; | ||
| 1641 | |||
| 1642 | err = parse_flow_nlattrs(key, a, &key_attrs); | ||
| 1643 | if (err) | ||
| 1644 | return err; | ||
| 1645 | |||
| 1646 | if ((key_attrs & (1 << OVS_KEY_ATTR_ETHERNET)) && | ||
| 1647 | (key_attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) && | ||
| 1648 | (nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]) == htons(ETH_P_8021Q))) { | ||
| 1649 | __be16 tci; | ||
| 1650 | |||
| 1651 | if (!((key_attrs & (1 << OVS_KEY_ATTR_VLAN)) && | ||
| 1652 | (key_attrs & (1 << OVS_KEY_ATTR_ENCAP)))) { | ||
| 1653 | OVS_NLERR("Invalid Vlan frame.\n"); | ||
| 1242 | return -EINVAL; | 1654 | return -EINVAL; |
| 1243 | swkey->ip.proto = ntohs(arp_key->arp_op); | 1655 | } |
| 1244 | memcpy(swkey->ipv4.arp.sha, arp_key->arp_sha, ETH_ALEN); | 1656 | |
| 1245 | memcpy(swkey->ipv4.arp.tha, arp_key->arp_tha, ETH_ALEN); | 1657 | key_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE); |
| 1658 | tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]); | ||
| 1659 | encap = a[OVS_KEY_ATTR_ENCAP]; | ||
| 1660 | key_attrs &= ~(1 << OVS_KEY_ATTR_ENCAP); | ||
| 1661 | encap_valid = true; | ||
| 1662 | |||
| 1663 | if (tci & htons(VLAN_TAG_PRESENT)) { | ||
| 1664 | err = parse_flow_nlattrs(encap, a, &key_attrs); | ||
| 1665 | if (err) | ||
| 1666 | return err; | ||
| 1667 | } else if (!tci) { | ||
| 1668 | /* Corner case for truncated 802.1Q header. */ | ||
| 1669 | if (nla_len(encap)) { | ||
| 1670 | OVS_NLERR("Truncated 802.1Q header has non-zero encap attribute.\n"); | ||
| 1671 | return -EINVAL; | ||
| 1672 | } | ||
| 1673 | } else { | ||
| 1674 | OVS_NLERR("Encap attribute is set for a non-VLAN frame.\n"); | ||
| 1675 | return -EINVAL; | ||
| 1676 | } | ||
| 1246 | } | 1677 | } |
| 1247 | 1678 | ||
| 1248 | if (attrs) | 1679 | err = ovs_key_from_nlattrs(match, key_attrs, a, false); |
| 1680 | if (err) | ||
| 1681 | return err; | ||
| 1682 | |||
| 1683 | if (mask) { | ||
| 1684 | err = parse_flow_mask_nlattrs(mask, a, &mask_attrs); | ||
| 1685 | if (err) | ||
| 1686 | return err; | ||
| 1687 | |||
| 1688 | if (mask_attrs & 1ULL << OVS_KEY_ATTR_ENCAP) { | ||
| 1689 | __be16 eth_type = 0; | ||
| 1690 | __be16 tci = 0; | ||
| 1691 | |||
| 1692 | if (!encap_valid) { | ||
| 1693 | OVS_NLERR("Encap mask attribute is set for non-VLAN frame.\n"); | ||
| 1694 | return -EINVAL; | ||
| 1695 | } | ||
| 1696 | |||
| 1697 | mask_attrs &= ~(1 << OVS_KEY_ATTR_ENCAP); | ||
| 1698 | if (a[OVS_KEY_ATTR_ETHERTYPE]) | ||
| 1699 | eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]); | ||
| 1700 | |||
| 1701 | if (eth_type == htons(0xffff)) { | ||
| 1702 | mask_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE); | ||
| 1703 | encap = a[OVS_KEY_ATTR_ENCAP]; | ||
| 1704 | err = parse_flow_mask_nlattrs(encap, a, &mask_attrs); | ||
| 1705 | } else { | ||
| 1706 | OVS_NLERR("VLAN frames must have an exact match on the TPID (mask=%x).\n", | ||
| 1707 | ntohs(eth_type)); | ||
| 1708 | return -EINVAL; | ||
| 1709 | } | ||
| 1710 | |||
| 1711 | if (a[OVS_KEY_ATTR_VLAN]) | ||
| 1712 | tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]); | ||
| 1713 | |||
| 1714 | if (!(tci & htons(VLAN_TAG_PRESENT))) { | ||
| 1715 | OVS_NLERR("VLAN tag present bit must have an exact match (tci_mask=%x).\n", ntohs(tci)); | ||
| 1716 | return -EINVAL; | ||
| 1717 | } | ||
| 1718 | } | ||
| 1719 | |||
| 1720 | err = ovs_key_from_nlattrs(match, mask_attrs, a, true); | ||
| 1721 | if (err) | ||
| 1722 | return err; | ||
| 1723 | } else { | ||
| 1724 | /* Populate exact match flow's key mask. */ | ||
| 1725 | if (match->mask) | ||
| 1726 | ovs_sw_flow_mask_set(match->mask, &match->range, 0xff); | ||
| 1727 | } | ||
| 1728 | |||
| 1729 | if (!ovs_match_validate(match, key_attrs, mask_attrs)) | ||
| 1249 | return -EINVAL; | 1730 | return -EINVAL; |
| 1250 | *key_lenp = key_len; | ||
| 1251 | 1731 | ||
| 1252 | return 0; | 1732 | return 0; |
| 1253 | } | 1733 | } |
| @@ -1255,7 +1735,6 @@ int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp, | |||
| 1255 | /** | 1735 | /** |
| 1256 | * ovs_flow_metadata_from_nlattrs - parses Netlink attributes into a flow key. | 1736 | * ovs_flow_metadata_from_nlattrs - parses Netlink attributes into a flow key. |
| 1257 | * @flow: Receives extracted in_port, priority, tun_key and skb_mark. | 1737 | * @flow: Receives extracted in_port, priority, tun_key and skb_mark. |
| 1258 | * @key_len: Length of key in @flow. Used for calculating flow hash. | ||
| 1259 | * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute | 1738 | * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute |
| 1260 | * sequence. | 1739 | * sequence. |
| 1261 | * | 1740 | * |
| @@ -1264,102 +1743,100 @@ int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp, | |||
| 1264 | * get the metadata, that is, the parts of the flow key that cannot be | 1743 | * get the metadata, that is, the parts of the flow key that cannot be |
| 1265 | * extracted from the packet itself. | 1744 | * extracted from the packet itself. |
| 1266 | */ | 1745 | */ |
| 1267 | int ovs_flow_metadata_from_nlattrs(struct sw_flow *flow, int key_len, | 1746 | |
| 1268 | const struct nlattr *attr) | 1747 | int ovs_flow_metadata_from_nlattrs(struct sw_flow *flow, |
| 1748 | const struct nlattr *attr) | ||
| 1269 | { | 1749 | { |
| 1270 | struct ovs_key_ipv4_tunnel *tun_key = &flow->key.tun_key; | 1750 | struct ovs_key_ipv4_tunnel *tun_key = &flow->key.tun_key; |
| 1271 | const struct nlattr *nla; | 1751 | const struct nlattr *a[OVS_KEY_ATTR_MAX + 1]; |
| 1272 | int rem; | 1752 | u64 attrs = 0; |
| 1753 | int err; | ||
| 1754 | struct sw_flow_match match; | ||
| 1273 | 1755 | ||
| 1274 | flow->key.phy.in_port = DP_MAX_PORTS; | 1756 | flow->key.phy.in_port = DP_MAX_PORTS; |
| 1275 | flow->key.phy.priority = 0; | 1757 | flow->key.phy.priority = 0; |
| 1276 | flow->key.phy.skb_mark = 0; | 1758 | flow->key.phy.skb_mark = 0; |
| 1277 | memset(tun_key, 0, sizeof(flow->key.tun_key)); | 1759 | memset(tun_key, 0, sizeof(flow->key.tun_key)); |
| 1278 | 1760 | ||
| 1279 | nla_for_each_nested(nla, attr, rem) { | 1761 | err = parse_flow_nlattrs(attr, a, &attrs); |
| 1280 | int type = nla_type(nla); | 1762 | if (err) |
| 1281 | |||
| 1282 | if (type <= OVS_KEY_ATTR_MAX && ovs_key_lens[type] > 0) { | ||
| 1283 | int err; | ||
| 1284 | |||
| 1285 | if (nla_len(nla) != ovs_key_lens[type]) | ||
| 1286 | return -EINVAL; | ||
| 1287 | |||
| 1288 | switch (type) { | ||
| 1289 | case OVS_KEY_ATTR_PRIORITY: | ||
| 1290 | flow->key.phy.priority = nla_get_u32(nla); | ||
| 1291 | break; | ||
| 1292 | |||
| 1293 | case OVS_KEY_ATTR_TUNNEL: | ||
| 1294 | err = ovs_ipv4_tun_from_nlattr(nla, tun_key); | ||
| 1295 | if (err) | ||
| 1296 | return err; | ||
| 1297 | break; | ||
| 1298 | |||
| 1299 | case OVS_KEY_ATTR_IN_PORT: | ||
| 1300 | if (nla_get_u32(nla) >= DP_MAX_PORTS) | ||
| 1301 | return -EINVAL; | ||
| 1302 | flow->key.phy.in_port = nla_get_u32(nla); | ||
| 1303 | break; | ||
| 1304 | |||
| 1305 | case OVS_KEY_ATTR_SKB_MARK: | ||
| 1306 | flow->key.phy.skb_mark = nla_get_u32(nla); | ||
| 1307 | break; | ||
| 1308 | } | ||
| 1309 | } | ||
| 1310 | } | ||
| 1311 | if (rem) | ||
| 1312 | return -EINVAL; | 1763 | return -EINVAL; |
| 1313 | 1764 | ||
| 1314 | flow->hash = ovs_flow_hash(&flow->key, | 1765 | memset(&match, 0, sizeof(match)); |
| 1315 | flow_key_start(&flow->key), key_len); | 1766 | match.key = &flow->key; |
| 1767 | |||
| 1768 | err = metadata_from_nlattrs(&match, &attrs, a, false); | ||
| 1769 | if (err) | ||
| 1770 | return err; | ||
| 1316 | 1771 | ||
| 1317 | return 0; | 1772 | return 0; |
| 1318 | } | 1773 | } |
| 1319 | 1774 | ||
| 1320 | int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb) | 1775 | int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, |
| 1776 | const struct sw_flow_key *output, struct sk_buff *skb) | ||
| 1321 | { | 1777 | { |
| 1322 | struct ovs_key_ethernet *eth_key; | 1778 | struct ovs_key_ethernet *eth_key; |
| 1323 | struct nlattr *nla, *encap; | 1779 | struct nlattr *nla, *encap; |
| 1780 | bool is_mask = (swkey != output); | ||
| 1324 | 1781 | ||
| 1325 | if (swkey->phy.priority && | 1782 | if (nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, output->phy.priority)) |
| 1326 | nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority)) | ||
| 1327 | goto nla_put_failure; | 1783 | goto nla_put_failure; |
| 1328 | 1784 | ||
| 1329 | if (swkey->tun_key.ipv4_dst && | 1785 | if ((swkey->tun_key.ipv4_dst || is_mask) && |
| 1330 | ovs_ipv4_tun_to_nlattr(skb, &swkey->tun_key)) | 1786 | ovs_ipv4_tun_to_nlattr(skb, &swkey->tun_key, &output->tun_key)) |
| 1331 | goto nla_put_failure; | 1787 | goto nla_put_failure; |
| 1332 | 1788 | ||
| 1333 | if (swkey->phy.in_port != DP_MAX_PORTS && | 1789 | if (swkey->phy.in_port == DP_MAX_PORTS) { |
| 1334 | nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port)) | 1790 | if (is_mask && (output->phy.in_port == 0xffff)) |
| 1335 | goto nla_put_failure; | 1791 | if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, 0xffffffff)) |
| 1792 | goto nla_put_failure; | ||
| 1793 | } else { | ||
| 1794 | u16 upper_u16; | ||
| 1795 | upper_u16 = !is_mask ? 0 : 0xffff; | ||
| 1336 | 1796 | ||
| 1337 | if (swkey->phy.skb_mark && | 1797 | if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, |
| 1338 | nla_put_u32(skb, OVS_KEY_ATTR_SKB_MARK, swkey->phy.skb_mark)) | 1798 | (upper_u16 << 16) | output->phy.in_port)) |
| 1799 | goto nla_put_failure; | ||
| 1800 | } | ||
| 1801 | |||
| 1802 | if (nla_put_u32(skb, OVS_KEY_ATTR_SKB_MARK, output->phy.skb_mark)) | ||
| 1339 | goto nla_put_failure; | 1803 | goto nla_put_failure; |
| 1340 | 1804 | ||
| 1341 | nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key)); | 1805 | nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key)); |
| 1342 | if (!nla) | 1806 | if (!nla) |
| 1343 | goto nla_put_failure; | 1807 | goto nla_put_failure; |
| 1808 | |||
| 1344 | eth_key = nla_data(nla); | 1809 | eth_key = nla_data(nla); |
| 1345 | memcpy(eth_key->eth_src, swkey->eth.src, ETH_ALEN); | 1810 | memcpy(eth_key->eth_src, output->eth.src, ETH_ALEN); |
| 1346 | memcpy(eth_key->eth_dst, swkey->eth.dst, ETH_ALEN); | 1811 | memcpy(eth_key->eth_dst, output->eth.dst, ETH_ALEN); |
| 1347 | 1812 | ||
| 1348 | if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) { | 1813 | if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) { |
| 1349 | if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, htons(ETH_P_8021Q)) || | 1814 | __be16 eth_type; |
| 1350 | nla_put_be16(skb, OVS_KEY_ATTR_VLAN, swkey->eth.tci)) | 1815 | eth_type = !is_mask ? htons(ETH_P_8021Q) : htons(0xffff); |
| 1816 | if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, eth_type) || | ||
| 1817 | nla_put_be16(skb, OVS_KEY_ATTR_VLAN, output->eth.tci)) | ||
| 1351 | goto nla_put_failure; | 1818 | goto nla_put_failure; |
| 1352 | encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP); | 1819 | encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP); |
| 1353 | if (!swkey->eth.tci) | 1820 | if (!swkey->eth.tci) |
| 1354 | goto unencap; | 1821 | goto unencap; |
| 1355 | } else { | 1822 | } else |
| 1356 | encap = NULL; | 1823 | encap = NULL; |
| 1357 | } | ||
| 1358 | 1824 | ||
| 1359 | if (swkey->eth.type == htons(ETH_P_802_2)) | 1825 | if (swkey->eth.type == htons(ETH_P_802_2)) { |
| 1826 | /* | ||
| 1827 | * Ethertype 802.2 is represented in the netlink with omitted | ||
| 1828 | * OVS_KEY_ATTR_ETHERTYPE in the flow key attribute, and | ||
| 1829 | * 0xffff in the mask attribute. Ethertype can also | ||
| 1830 | * be wildcarded. | ||
| 1831 | */ | ||
| 1832 | if (is_mask && output->eth.type) | ||
| 1833 | if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, | ||
| 1834 | output->eth.type)) | ||
| 1835 | goto nla_put_failure; | ||
| 1360 | goto unencap; | 1836 | goto unencap; |
| 1837 | } | ||
| 1361 | 1838 | ||
| 1362 | if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, swkey->eth.type)) | 1839 | if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, output->eth.type)) |
| 1363 | goto nla_put_failure; | 1840 | goto nla_put_failure; |
| 1364 | 1841 | ||
| 1365 | if (swkey->eth.type == htons(ETH_P_IP)) { | 1842 | if (swkey->eth.type == htons(ETH_P_IP)) { |
| @@ -1369,12 +1846,12 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb) | |||
| 1369 | if (!nla) | 1846 | if (!nla) |
| 1370 | goto nla_put_failure; | 1847 | goto nla_put_failure; |
| 1371 | ipv4_key = nla_data(nla); | 1848 | ipv4_key = nla_data(nla); |
| 1372 | ipv4_key->ipv4_src = swkey->ipv4.addr.src; | 1849 | ipv4_key->ipv4_src = output->ipv4.addr.src; |
| 1373 | ipv4_key->ipv4_dst = swkey->ipv4.addr.dst; | 1850 | ipv4_key->ipv4_dst = output->ipv4.addr.dst; |
| 1374 | ipv4_key->ipv4_proto = swkey->ip.proto; | 1851 | ipv4_key->ipv4_proto = output->ip.proto; |
| 1375 | ipv4_key->ipv4_tos = swkey->ip.tos; | 1852 | ipv4_key->ipv4_tos = output->ip.tos; |
| 1376 | ipv4_key->ipv4_ttl = swkey->ip.ttl; | 1853 | ipv4_key->ipv4_ttl = output->ip.ttl; |
| 1377 | ipv4_key->ipv4_frag = swkey->ip.frag; | 1854 | ipv4_key->ipv4_frag = output->ip.frag; |
| 1378 | } else if (swkey->eth.type == htons(ETH_P_IPV6)) { | 1855 | } else if (swkey->eth.type == htons(ETH_P_IPV6)) { |
| 1379 | struct ovs_key_ipv6 *ipv6_key; | 1856 | struct ovs_key_ipv6 *ipv6_key; |
| 1380 | 1857 | ||
| @@ -1382,15 +1859,15 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb) | |||
| 1382 | if (!nla) | 1859 | if (!nla) |
| 1383 | goto nla_put_failure; | 1860 | goto nla_put_failure; |
| 1384 | ipv6_key = nla_data(nla); | 1861 | ipv6_key = nla_data(nla); |
| 1385 | memcpy(ipv6_key->ipv6_src, &swkey->ipv6.addr.src, | 1862 | memcpy(ipv6_key->ipv6_src, &output->ipv6.addr.src, |
| 1386 | sizeof(ipv6_key->ipv6_src)); | 1863 | sizeof(ipv6_key->ipv6_src)); |
| 1387 | memcpy(ipv6_key->ipv6_dst, &swkey->ipv6.addr.dst, | 1864 | memcpy(ipv6_key->ipv6_dst, &output->ipv6.addr.dst, |
| 1388 | sizeof(ipv6_key->ipv6_dst)); | 1865 | sizeof(ipv6_key->ipv6_dst)); |
| 1389 | ipv6_key->ipv6_label = swkey->ipv6.label; | 1866 | ipv6_key->ipv6_label = output->ipv6.label; |
| 1390 | ipv6_key->ipv6_proto = swkey->ip.proto; | 1867 | ipv6_key->ipv6_proto = output->ip.proto; |
| 1391 | ipv6_key->ipv6_tclass = swkey->ip.tos; | 1868 | ipv6_key->ipv6_tclass = output->ip.tos; |
| 1392 | ipv6_key->ipv6_hlimit = swkey->ip.ttl; | 1869 | ipv6_key->ipv6_hlimit = output->ip.ttl; |
| 1393 | ipv6_key->ipv6_frag = swkey->ip.frag; | 1870 | ipv6_key->ipv6_frag = output->ip.frag; |
| 1394 | } else if (swkey->eth.type == htons(ETH_P_ARP) || | 1871 | } else if (swkey->eth.type == htons(ETH_P_ARP) || |
| 1395 | swkey->eth.type == htons(ETH_P_RARP)) { | 1872 | swkey->eth.type == htons(ETH_P_RARP)) { |
| 1396 | struct ovs_key_arp *arp_key; | 1873 | struct ovs_key_arp *arp_key; |
| @@ -1400,11 +1877,11 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb) | |||
| 1400 | goto nla_put_failure; | 1877 | goto nla_put_failure; |
| 1401 | arp_key = nla_data(nla); | 1878 | arp_key = nla_data(nla); |
| 1402 | memset(arp_key, 0, sizeof(struct ovs_key_arp)); | 1879 | memset(arp_key, 0, sizeof(struct ovs_key_arp)); |
| 1403 | arp_key->arp_sip = swkey->ipv4.addr.src; | 1880 | arp_key->arp_sip = output->ipv4.addr.src; |
| 1404 | arp_key->arp_tip = swkey->ipv4.addr.dst; | 1881 | arp_key->arp_tip = output->ipv4.addr.dst; |
| 1405 | arp_key->arp_op = htons(swkey->ip.proto); | 1882 | arp_key->arp_op = htons(output->ip.proto); |
| 1406 | memcpy(arp_key->arp_sha, swkey->ipv4.arp.sha, ETH_ALEN); | 1883 | memcpy(arp_key->arp_sha, output->ipv4.arp.sha, ETH_ALEN); |
| 1407 | memcpy(arp_key->arp_tha, swkey->ipv4.arp.tha, ETH_ALEN); | 1884 | memcpy(arp_key->arp_tha, output->ipv4.arp.tha, ETH_ALEN); |
| 1408 | } | 1885 | } |
| 1409 | 1886 | ||
| 1410 | if ((swkey->eth.type == htons(ETH_P_IP) || | 1887 | if ((swkey->eth.type == htons(ETH_P_IP) || |
| @@ -1419,11 +1896,11 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb) | |||
| 1419 | goto nla_put_failure; | 1896 | goto nla_put_failure; |
| 1420 | tcp_key = nla_data(nla); | 1897 | tcp_key = nla_data(nla); |
| 1421 | if (swkey->eth.type == htons(ETH_P_IP)) { | 1898 | if (swkey->eth.type == htons(ETH_P_IP)) { |
| 1422 | tcp_key->tcp_src = swkey->ipv4.tp.src; | 1899 | tcp_key->tcp_src = output->ipv4.tp.src; |
| 1423 | tcp_key->tcp_dst = swkey->ipv4.tp.dst; | 1900 | tcp_key->tcp_dst = output->ipv4.tp.dst; |
| 1424 | } else if (swkey->eth.type == htons(ETH_P_IPV6)) { | 1901 | } else if (swkey->eth.type == htons(ETH_P_IPV6)) { |
| 1425 | tcp_key->tcp_src = swkey->ipv6.tp.src; | 1902 | tcp_key->tcp_src = output->ipv6.tp.src; |
| 1426 | tcp_key->tcp_dst = swkey->ipv6.tp.dst; | 1903 | tcp_key->tcp_dst = output->ipv6.tp.dst; |
| 1427 | } | 1904 | } |
| 1428 | } else if (swkey->ip.proto == IPPROTO_UDP) { | 1905 | } else if (swkey->ip.proto == IPPROTO_UDP) { |
| 1429 | struct ovs_key_udp *udp_key; | 1906 | struct ovs_key_udp *udp_key; |
| @@ -1433,11 +1910,25 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb) | |||
| 1433 | goto nla_put_failure; | 1910 | goto nla_put_failure; |
| 1434 | udp_key = nla_data(nla); | 1911 | udp_key = nla_data(nla); |
| 1435 | if (swkey->eth.type == htons(ETH_P_IP)) { | 1912 | if (swkey->eth.type == htons(ETH_P_IP)) { |
| 1436 | udp_key->udp_src = swkey->ipv4.tp.src; | 1913 | udp_key->udp_src = output->ipv4.tp.src; |
| 1437 | udp_key->udp_dst = swkey->ipv4.tp.dst; | 1914 | udp_key->udp_dst = output->ipv4.tp.dst; |
| 1915 | } else if (swkey->eth.type == htons(ETH_P_IPV6)) { | ||
| 1916 | udp_key->udp_src = output->ipv6.tp.src; | ||
| 1917 | udp_key->udp_dst = output->ipv6.tp.dst; | ||
| 1918 | } | ||
| 1919 | } else if (swkey->ip.proto == IPPROTO_SCTP) { | ||
| 1920 | struct ovs_key_sctp *sctp_key; | ||
| 1921 | |||
| 1922 | nla = nla_reserve(skb, OVS_KEY_ATTR_SCTP, sizeof(*sctp_key)); | ||
| 1923 | if (!nla) | ||
| 1924 | goto nla_put_failure; | ||
| 1925 | sctp_key = nla_data(nla); | ||
| 1926 | if (swkey->eth.type == htons(ETH_P_IP)) { | ||
| 1927 | sctp_key->sctp_src = swkey->ipv4.tp.src; | ||
| 1928 | sctp_key->sctp_dst = swkey->ipv4.tp.dst; | ||
| 1438 | } else if (swkey->eth.type == htons(ETH_P_IPV6)) { | 1929 | } else if (swkey->eth.type == htons(ETH_P_IPV6)) { |
| 1439 | udp_key->udp_src = swkey->ipv6.tp.src; | 1930 | sctp_key->sctp_src = swkey->ipv6.tp.src; |
| 1440 | udp_key->udp_dst = swkey->ipv6.tp.dst; | 1931 | sctp_key->sctp_dst = swkey->ipv6.tp.dst; |
| 1441 | } | 1932 | } |
| 1442 | } else if (swkey->eth.type == htons(ETH_P_IP) && | 1933 | } else if (swkey->eth.type == htons(ETH_P_IP) && |
| 1443 | swkey->ip.proto == IPPROTO_ICMP) { | 1934 | swkey->ip.proto == IPPROTO_ICMP) { |
| @@ -1447,8 +1938,8 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb) | |||
| 1447 | if (!nla) | 1938 | if (!nla) |
| 1448 | goto nla_put_failure; | 1939 | goto nla_put_failure; |
| 1449 | icmp_key = nla_data(nla); | 1940 | icmp_key = nla_data(nla); |
| 1450 | icmp_key->icmp_type = ntohs(swkey->ipv4.tp.src); | 1941 | icmp_key->icmp_type = ntohs(output->ipv4.tp.src); |
| 1451 | icmp_key->icmp_code = ntohs(swkey->ipv4.tp.dst); | 1942 | icmp_key->icmp_code = ntohs(output->ipv4.tp.dst); |
| 1452 | } else if (swkey->eth.type == htons(ETH_P_IPV6) && | 1943 | } else if (swkey->eth.type == htons(ETH_P_IPV6) && |
| 1453 | swkey->ip.proto == IPPROTO_ICMPV6) { | 1944 | swkey->ip.proto == IPPROTO_ICMPV6) { |
| 1454 | struct ovs_key_icmpv6 *icmpv6_key; | 1945 | struct ovs_key_icmpv6 *icmpv6_key; |
| @@ -1458,8 +1949,8 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb) | |||
| 1458 | if (!nla) | 1949 | if (!nla) |
| 1459 | goto nla_put_failure; | 1950 | goto nla_put_failure; |
| 1460 | icmpv6_key = nla_data(nla); | 1951 | icmpv6_key = nla_data(nla); |
| 1461 | icmpv6_key->icmpv6_type = ntohs(swkey->ipv6.tp.src); | 1952 | icmpv6_key->icmpv6_type = ntohs(output->ipv6.tp.src); |
| 1462 | icmpv6_key->icmpv6_code = ntohs(swkey->ipv6.tp.dst); | 1953 | icmpv6_key->icmpv6_code = ntohs(output->ipv6.tp.dst); |
| 1463 | 1954 | ||
| 1464 | if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION || | 1955 | if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION || |
| 1465 | icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) { | 1956 | icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) { |
| @@ -1469,10 +1960,10 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb) | |||
| 1469 | if (!nla) | 1960 | if (!nla) |
| 1470 | goto nla_put_failure; | 1961 | goto nla_put_failure; |
| 1471 | nd_key = nla_data(nla); | 1962 | nd_key = nla_data(nla); |
| 1472 | memcpy(nd_key->nd_target, &swkey->ipv6.nd.target, | 1963 | memcpy(nd_key->nd_target, &output->ipv6.nd.target, |
| 1473 | sizeof(nd_key->nd_target)); | 1964 | sizeof(nd_key->nd_target)); |
| 1474 | memcpy(nd_key->nd_sll, swkey->ipv6.nd.sll, ETH_ALEN); | 1965 | memcpy(nd_key->nd_sll, output->ipv6.nd.sll, ETH_ALEN); |
| 1475 | memcpy(nd_key->nd_tll, swkey->ipv6.nd.tll, ETH_ALEN); | 1966 | memcpy(nd_key->nd_tll, output->ipv6.nd.tll, ETH_ALEN); |
| 1476 | } | 1967 | } |
| 1477 | } | 1968 | } |
| 1478 | } | 1969 | } |
| @@ -1491,6 +1982,9 @@ nla_put_failure: | |||
| 1491 | * Returns zero if successful or a negative error code. */ | 1982 | * Returns zero if successful or a negative error code. */ |
| 1492 | int ovs_flow_init(void) | 1983 | int ovs_flow_init(void) |
| 1493 | { | 1984 | { |
| 1985 | BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long)); | ||
| 1986 | BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long)); | ||
| 1987 | |||
| 1494 | flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0, | 1988 | flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0, |
| 1495 | 0, NULL); | 1989 | 0, NULL); |
| 1496 | if (flow_cache == NULL) | 1990 | if (flow_cache == NULL) |
| @@ -1504,3 +1998,84 @@ void ovs_flow_exit(void) | |||
| 1504 | { | 1998 | { |
| 1505 | kmem_cache_destroy(flow_cache); | 1999 | kmem_cache_destroy(flow_cache); |
| 1506 | } | 2000 | } |
| 2001 | |||
| 2002 | struct sw_flow_mask *ovs_sw_flow_mask_alloc(void) | ||
| 2003 | { | ||
| 2004 | struct sw_flow_mask *mask; | ||
| 2005 | |||
| 2006 | mask = kmalloc(sizeof(*mask), GFP_KERNEL); | ||
| 2007 | if (mask) | ||
| 2008 | mask->ref_count = 0; | ||
| 2009 | |||
| 2010 | return mask; | ||
| 2011 | } | ||
| 2012 | |||
| 2013 | void ovs_sw_flow_mask_add_ref(struct sw_flow_mask *mask) | ||
| 2014 | { | ||
| 2015 | mask->ref_count++; | ||
| 2016 | } | ||
| 2017 | |||
| 2018 | void ovs_sw_flow_mask_del_ref(struct sw_flow_mask *mask, bool deferred) | ||
| 2019 | { | ||
| 2020 | if (!mask) | ||
| 2021 | return; | ||
| 2022 | |||
| 2023 | BUG_ON(!mask->ref_count); | ||
| 2024 | mask->ref_count--; | ||
| 2025 | |||
| 2026 | if (!mask->ref_count) { | ||
| 2027 | list_del_rcu(&mask->list); | ||
| 2028 | if (deferred) | ||
| 2029 | kfree_rcu(mask, rcu); | ||
| 2030 | else | ||
| 2031 | kfree(mask); | ||
| 2032 | } | ||
| 2033 | } | ||
| 2034 | |||
| 2035 | static bool ovs_sw_flow_mask_equal(const struct sw_flow_mask *a, | ||
| 2036 | const struct sw_flow_mask *b) | ||
| 2037 | { | ||
| 2038 | u8 *a_ = (u8 *)&a->key + a->range.start; | ||
| 2039 | u8 *b_ = (u8 *)&b->key + b->range.start; | ||
| 2040 | |||
| 2041 | return (a->range.end == b->range.end) | ||
| 2042 | && (a->range.start == b->range.start) | ||
| 2043 | && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0); | ||
| 2044 | } | ||
| 2045 | |||
| 2046 | struct sw_flow_mask *ovs_sw_flow_mask_find(const struct flow_table *tbl, | ||
| 2047 | const struct sw_flow_mask *mask) | ||
| 2048 | { | ||
| 2049 | struct list_head *ml; | ||
| 2050 | |||
| 2051 | list_for_each(ml, tbl->mask_list) { | ||
| 2052 | struct sw_flow_mask *m; | ||
| 2053 | m = container_of(ml, struct sw_flow_mask, list); | ||
| 2054 | if (ovs_sw_flow_mask_equal(mask, m)) | ||
| 2055 | return m; | ||
| 2056 | } | ||
| 2057 | |||
| 2058 | return NULL; | ||
| 2059 | } | ||
| 2060 | |||
| 2061 | /** | ||
| 2062 | * add a new mask into the mask list. | ||
| 2063 | * The caller needs to make sure that 'mask' is not the same | ||
| 2064 | * as any masks that are already on the list. | ||
| 2065 | */ | ||
| 2066 | void ovs_sw_flow_mask_insert(struct flow_table *tbl, struct sw_flow_mask *mask) | ||
| 2067 | { | ||
| 2068 | list_add_rcu(&mask->list, tbl->mask_list); | ||
| 2069 | } | ||
| 2070 | |||
| 2071 | /** | ||
| 2072 | * Set 'range' fields in the mask to the value of 'val'. | ||
| 2073 | */ | ||
| 2074 | static void ovs_sw_flow_mask_set(struct sw_flow_mask *mask, | ||
| 2075 | struct sw_flow_key_range *range, u8 val) | ||
| 2076 | { | ||
| 2077 | u8 *m = (u8 *)&mask->key + range->start; | ||
| 2078 | |||
| 2079 | mask->range = *range; | ||
| 2080 | memset(m, val, range_n_bytes(range)); | ||
| 2081 | } | ||
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h index 66ef7220293e..212fbf7510c4 100644 --- a/net/openvswitch/flow.h +++ b/net/openvswitch/flow.h | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Copyright (c) 2007-2011 Nicira, Inc. | 2 | * Copyright (c) 2007-2013 Nicira, Inc. |
| 3 | * | 3 | * |
| 4 | * This program is free software; you can redistribute it and/or | 4 | * This program is free software; you can redistribute it and/or |
| 5 | * modify it under the terms of version 2 of the GNU General Public | 5 | * modify it under the terms of version 2 of the GNU General Public |
| @@ -33,6 +33,8 @@ | |||
| 33 | #include <net/inet_ecn.h> | 33 | #include <net/inet_ecn.h> |
| 34 | 34 | ||
| 35 | struct sk_buff; | 35 | struct sk_buff; |
| 36 | struct sw_flow_mask; | ||
| 37 | struct flow_table; | ||
| 36 | 38 | ||
| 37 | struct sw_flow_actions { | 39 | struct sw_flow_actions { |
| 38 | struct rcu_head rcu; | 40 | struct rcu_head rcu; |
| @@ -97,8 +99,8 @@ struct sw_flow_key { | |||
| 97 | } addr; | 99 | } addr; |
| 98 | union { | 100 | union { |
| 99 | struct { | 101 | struct { |
| 100 | __be16 src; /* TCP/UDP source port. */ | 102 | __be16 src; /* TCP/UDP/SCTP source port. */ |
| 101 | __be16 dst; /* TCP/UDP destination port. */ | 103 | __be16 dst; /* TCP/UDP/SCTP destination port. */ |
| 102 | } tp; | 104 | } tp; |
| 103 | struct { | 105 | struct { |
| 104 | u8 sha[ETH_ALEN]; /* ARP source hardware address. */ | 106 | u8 sha[ETH_ALEN]; /* ARP source hardware address. */ |
| @@ -113,8 +115,8 @@ struct sw_flow_key { | |||
| 113 | } addr; | 115 | } addr; |
| 114 | __be32 label; /* IPv6 flow label. */ | 116 | __be32 label; /* IPv6 flow label. */ |
| 115 | struct { | 117 | struct { |
| 116 | __be16 src; /* TCP/UDP source port. */ | 118 | __be16 src; /* TCP/UDP/SCTP source port. */ |
| 117 | __be16 dst; /* TCP/UDP destination port. */ | 119 | __be16 dst; /* TCP/UDP/SCTP destination port. */ |
| 118 | } tp; | 120 | } tp; |
| 119 | struct { | 121 | struct { |
| 120 | struct in6_addr target; /* ND target address. */ | 122 | struct in6_addr target; /* ND target address. */ |
| @@ -123,7 +125,7 @@ struct sw_flow_key { | |||
| 123 | } nd; | 125 | } nd; |
| 124 | } ipv6; | 126 | } ipv6; |
| 125 | }; | 127 | }; |
| 126 | }; | 128 | } __aligned(BITS_PER_LONG/8); /* Ensure that we can do comparisons as longs. */ |
| 127 | 129 | ||
| 128 | struct sw_flow { | 130 | struct sw_flow { |
| 129 | struct rcu_head rcu; | 131 | struct rcu_head rcu; |
| @@ -131,6 +133,8 @@ struct sw_flow { | |||
| 131 | u32 hash; | 133 | u32 hash; |
| 132 | 134 | ||
| 133 | struct sw_flow_key key; | 135 | struct sw_flow_key key; |
| 136 | struct sw_flow_key unmasked_key; | ||
| 137 | struct sw_flow_mask *mask; | ||
| 134 | struct sw_flow_actions __rcu *sf_acts; | 138 | struct sw_flow_actions __rcu *sf_acts; |
| 135 | 139 | ||
| 136 | spinlock_t lock; /* Lock for values below. */ | 140 | spinlock_t lock; /* Lock for values below. */ |
| @@ -140,6 +144,20 @@ struct sw_flow { | |||
| 140 | u8 tcp_flags; /* Union of seen TCP flags. */ | 144 | u8 tcp_flags; /* Union of seen TCP flags. */ |
| 141 | }; | 145 | }; |
| 142 | 146 | ||
| 147 | struct sw_flow_key_range { | ||
| 148 | size_t start; | ||
| 149 | size_t end; | ||
| 150 | }; | ||
| 151 | |||
| 152 | struct sw_flow_match { | ||
| 153 | struct sw_flow_key *key; | ||
| 154 | struct sw_flow_key_range range; | ||
| 155 | struct sw_flow_mask *mask; | ||
| 156 | }; | ||
| 157 | |||
| 158 | void ovs_match_init(struct sw_flow_match *match, | ||
| 159 | struct sw_flow_key *key, struct sw_flow_mask *mask); | ||
| 160 | |||
| 143 | struct arp_eth_header { | 161 | struct arp_eth_header { |
| 144 | __be16 ar_hrd; /* format of hardware address */ | 162 | __be16 ar_hrd; /* format of hardware address */ |
| 145 | __be16 ar_pro; /* format of protocol address */ | 163 | __be16 ar_pro; /* format of protocol address */ |
| @@ -159,21 +177,21 @@ void ovs_flow_exit(void); | |||
| 159 | 177 | ||
| 160 | struct sw_flow *ovs_flow_alloc(void); | 178 | struct sw_flow *ovs_flow_alloc(void); |
| 161 | void ovs_flow_deferred_free(struct sw_flow *); | 179 | void ovs_flow_deferred_free(struct sw_flow *); |
| 162 | void ovs_flow_free(struct sw_flow *flow); | 180 | void ovs_flow_free(struct sw_flow *, bool deferred); |
| 163 | 181 | ||
| 164 | struct sw_flow_actions *ovs_flow_actions_alloc(int actions_len); | 182 | struct sw_flow_actions *ovs_flow_actions_alloc(int actions_len); |
| 165 | void ovs_flow_deferred_free_acts(struct sw_flow_actions *); | 183 | void ovs_flow_deferred_free_acts(struct sw_flow_actions *); |
| 166 | 184 | ||
| 167 | int ovs_flow_extract(struct sk_buff *, u16 in_port, struct sw_flow_key *, | 185 | int ovs_flow_extract(struct sk_buff *, u16 in_port, struct sw_flow_key *); |
| 168 | int *key_lenp); | ||
| 169 | void ovs_flow_used(struct sw_flow *, struct sk_buff *); | 186 | void ovs_flow_used(struct sw_flow *, struct sk_buff *); |
| 170 | u64 ovs_flow_used_time(unsigned long flow_jiffies); | 187 | u64 ovs_flow_used_time(unsigned long flow_jiffies); |
| 171 | 188 | int ovs_flow_to_nlattrs(const struct sw_flow_key *, | |
| 172 | int ovs_flow_to_nlattrs(const struct sw_flow_key *, struct sk_buff *); | 189 | const struct sw_flow_key *, struct sk_buff *); |
| 173 | int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp, | 190 | int ovs_match_from_nlattrs(struct sw_flow_match *match, |
| 191 | const struct nlattr *, | ||
| 174 | const struct nlattr *); | 192 | const struct nlattr *); |
| 175 | int ovs_flow_metadata_from_nlattrs(struct sw_flow *flow, int key_len, | 193 | int ovs_flow_metadata_from_nlattrs(struct sw_flow *flow, |
| 176 | const struct nlattr *attr); | 194 | const struct nlattr *attr); |
| 177 | 195 | ||
| 178 | #define MAX_ACTIONS_BUFSIZE (32 * 1024) | 196 | #define MAX_ACTIONS_BUFSIZE (32 * 1024) |
| 179 | #define TBL_MIN_BUCKETS 1024 | 197 | #define TBL_MIN_BUCKETS 1024 |
| @@ -182,6 +200,7 @@ struct flow_table { | |||
| 182 | struct flex_array *buckets; | 200 | struct flex_array *buckets; |
| 183 | unsigned int count, n_buckets; | 201 | unsigned int count, n_buckets; |
| 184 | struct rcu_head rcu; | 202 | struct rcu_head rcu; |
| 203 | struct list_head *mask_list; | ||
| 185 | int node_ver; | 204 | int node_ver; |
| 186 | u32 hash_seed; | 205 | u32 hash_seed; |
| 187 | bool keep_flows; | 206 | bool keep_flows; |
| @@ -197,22 +216,44 @@ static inline int ovs_flow_tbl_need_to_expand(struct flow_table *table) | |||
| 197 | return (table->count > table->n_buckets); | 216 | return (table->count > table->n_buckets); |
| 198 | } | 217 | } |
| 199 | 218 | ||
| 200 | struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *table, | 219 | struct sw_flow *ovs_flow_lookup(struct flow_table *, |
| 201 | struct sw_flow_key *key, int len); | 220 | const struct sw_flow_key *); |
| 202 | void ovs_flow_tbl_destroy(struct flow_table *table); | 221 | struct sw_flow *ovs_flow_lookup_unmasked_key(struct flow_table *table, |
| 203 | void ovs_flow_tbl_deferred_destroy(struct flow_table *table); | 222 | struct sw_flow_match *match); |
| 223 | |||
| 224 | void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred); | ||
| 204 | struct flow_table *ovs_flow_tbl_alloc(int new_size); | 225 | struct flow_table *ovs_flow_tbl_alloc(int new_size); |
| 205 | struct flow_table *ovs_flow_tbl_expand(struct flow_table *table); | 226 | struct flow_table *ovs_flow_tbl_expand(struct flow_table *table); |
| 206 | struct flow_table *ovs_flow_tbl_rehash(struct flow_table *table); | 227 | struct flow_table *ovs_flow_tbl_rehash(struct flow_table *table); |
| 207 | void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow, | ||
| 208 | struct sw_flow_key *key, int key_len); | ||
| 209 | void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow); | ||
| 210 | 228 | ||
| 211 | struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *idx); | 229 | void ovs_flow_insert(struct flow_table *table, struct sw_flow *flow); |
| 230 | void ovs_flow_remove(struct flow_table *table, struct sw_flow *flow); | ||
| 231 | |||
| 232 | struct sw_flow *ovs_flow_dump_next(struct flow_table *table, u32 *bucket, u32 *idx); | ||
| 212 | extern const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1]; | 233 | extern const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1]; |
| 213 | int ovs_ipv4_tun_from_nlattr(const struct nlattr *attr, | 234 | int ovs_ipv4_tun_from_nlattr(const struct nlattr *attr, |
| 214 | struct ovs_key_ipv4_tunnel *tun_key); | 235 | struct sw_flow_match *match, bool is_mask); |
| 215 | int ovs_ipv4_tun_to_nlattr(struct sk_buff *skb, | 236 | int ovs_ipv4_tun_to_nlattr(struct sk_buff *skb, |
| 216 | const struct ovs_key_ipv4_tunnel *tun_key); | 237 | const struct ovs_key_ipv4_tunnel *tun_key, |
| 238 | const struct ovs_key_ipv4_tunnel *output); | ||
| 239 | |||
| 240 | bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow, | ||
| 241 | const struct sw_flow_key *key, int key_end); | ||
| 242 | |||
| 243 | struct sw_flow_mask { | ||
| 244 | int ref_count; | ||
| 245 | struct rcu_head rcu; | ||
| 246 | struct list_head list; | ||
| 247 | struct sw_flow_key_range range; | ||
| 248 | struct sw_flow_key key; | ||
| 249 | }; | ||
| 217 | 250 | ||
| 251 | struct sw_flow_mask *ovs_sw_flow_mask_alloc(void); | ||
| 252 | void ovs_sw_flow_mask_add_ref(struct sw_flow_mask *); | ||
| 253 | void ovs_sw_flow_mask_del_ref(struct sw_flow_mask *, bool deferred); | ||
| 254 | void ovs_sw_flow_mask_insert(struct flow_table *, struct sw_flow_mask *); | ||
| 255 | struct sw_flow_mask *ovs_sw_flow_mask_find(const struct flow_table *, | ||
| 256 | const struct sw_flow_mask *); | ||
| 257 | void ovs_flow_key_mask(struct sw_flow_key *dst, const struct sw_flow_key *src, | ||
| 258 | const struct sw_flow_mask *mask); | ||
| 218 | #endif /* flow.h */ | 259 | #endif /* flow.h */ |
diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c index 493e9775dcda..c99dea543d64 100644 --- a/net/openvswitch/vport-gre.c +++ b/net/openvswitch/vport-gre.c | |||
| @@ -16,7 +16,6 @@ | |||
| 16 | * 02110-1301, USA | 16 | * 02110-1301, USA |
| 17 | */ | 17 | */ |
| 18 | 18 | ||
| 19 | #ifdef CONFIG_OPENVSWITCH_GRE | ||
| 20 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 19 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 21 | 20 | ||
| 22 | #include <linux/if.h> | 21 | #include <linux/if.h> |
| @@ -177,10 +176,10 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb) | |||
| 177 | 176 | ||
| 178 | skb->local_df = 1; | 177 | skb->local_df = 1; |
| 179 | 178 | ||
| 180 | return iptunnel_xmit(net, rt, skb, fl.saddr, | 179 | return iptunnel_xmit(rt, skb, fl.saddr, |
| 181 | OVS_CB(skb)->tun_key->ipv4_dst, IPPROTO_GRE, | 180 | OVS_CB(skb)->tun_key->ipv4_dst, IPPROTO_GRE, |
| 182 | OVS_CB(skb)->tun_key->ipv4_tos, | 181 | OVS_CB(skb)->tun_key->ipv4_tos, |
| 183 | OVS_CB(skb)->tun_key->ipv4_ttl, df); | 182 | OVS_CB(skb)->tun_key->ipv4_ttl, df, false); |
| 184 | err_free_rt: | 183 | err_free_rt: |
| 185 | ip_rt_put(rt); | 184 | ip_rt_put(rt); |
| 186 | error: | 185 | error: |
| @@ -271,5 +270,3 @@ const struct vport_ops ovs_gre_vport_ops = { | |||
| 271 | .get_name = gre_get_name, | 270 | .get_name = gre_get_name, |
| 272 | .send = gre_tnl_send, | 271 | .send = gre_tnl_send, |
| 273 | }; | 272 | }; |
| 274 | |||
| 275 | #endif /* OPENVSWITCH_GRE */ | ||
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c index 5982f3f62835..09d93c13cfd6 100644 --- a/net/openvswitch/vport-netdev.c +++ b/net/openvswitch/vport-netdev.c | |||
| @@ -25,6 +25,7 @@ | |||
| 25 | #include <linux/llc.h> | 25 | #include <linux/llc.h> |
| 26 | #include <linux/rtnetlink.h> | 26 | #include <linux/rtnetlink.h> |
| 27 | #include <linux/skbuff.h> | 27 | #include <linux/skbuff.h> |
| 28 | #include <linux/openvswitch.h> | ||
| 28 | 29 | ||
| 29 | #include <net/llc.h> | 30 | #include <net/llc.h> |
| 30 | 31 | ||
| @@ -74,6 +75,15 @@ static rx_handler_result_t netdev_frame_hook(struct sk_buff **pskb) | |||
| 74 | return RX_HANDLER_CONSUMED; | 75 | return RX_HANDLER_CONSUMED; |
| 75 | } | 76 | } |
| 76 | 77 | ||
| 78 | static struct net_device *get_dpdev(struct datapath *dp) | ||
| 79 | { | ||
| 80 | struct vport *local; | ||
| 81 | |||
| 82 | local = ovs_vport_ovsl(dp, OVSP_LOCAL); | ||
| 83 | BUG_ON(!local); | ||
| 84 | return netdev_vport_priv(local)->dev; | ||
| 85 | } | ||
| 86 | |||
| 77 | static struct vport *netdev_create(const struct vport_parms *parms) | 87 | static struct vport *netdev_create(const struct vport_parms *parms) |
| 78 | { | 88 | { |
| 79 | struct vport *vport; | 89 | struct vport *vport; |
| @@ -103,10 +113,15 @@ static struct vport *netdev_create(const struct vport_parms *parms) | |||
| 103 | } | 113 | } |
| 104 | 114 | ||
| 105 | rtnl_lock(); | 115 | rtnl_lock(); |
| 116 | err = netdev_master_upper_dev_link(netdev_vport->dev, | ||
| 117 | get_dpdev(vport->dp)); | ||
| 118 | if (err) | ||
| 119 | goto error_unlock; | ||
| 120 | |||
| 106 | err = netdev_rx_handler_register(netdev_vport->dev, netdev_frame_hook, | 121 | err = netdev_rx_handler_register(netdev_vport->dev, netdev_frame_hook, |
| 107 | vport); | 122 | vport); |
| 108 | if (err) | 123 | if (err) |
| 109 | goto error_unlock; | 124 | goto error_master_upper_dev_unlink; |
| 110 | 125 | ||
| 111 | dev_set_promiscuity(netdev_vport->dev, 1); | 126 | dev_set_promiscuity(netdev_vport->dev, 1); |
| 112 | netdev_vport->dev->priv_flags |= IFF_OVS_DATAPATH; | 127 | netdev_vport->dev->priv_flags |= IFF_OVS_DATAPATH; |
| @@ -114,6 +129,8 @@ static struct vport *netdev_create(const struct vport_parms *parms) | |||
| 114 | 129 | ||
| 115 | return vport; | 130 | return vport; |
| 116 | 131 | ||
| 132 | error_master_upper_dev_unlink: | ||
| 133 | netdev_upper_dev_unlink(netdev_vport->dev, get_dpdev(vport->dp)); | ||
| 117 | error_unlock: | 134 | error_unlock: |
| 118 | rtnl_unlock(); | 135 | rtnl_unlock(); |
| 119 | error_put: | 136 | error_put: |
| @@ -140,6 +157,7 @@ static void netdev_destroy(struct vport *vport) | |||
| 140 | rtnl_lock(); | 157 | rtnl_lock(); |
| 141 | netdev_vport->dev->priv_flags &= ~IFF_OVS_DATAPATH; | 158 | netdev_vport->dev->priv_flags &= ~IFF_OVS_DATAPATH; |
| 142 | netdev_rx_handler_unregister(netdev_vport->dev); | 159 | netdev_rx_handler_unregister(netdev_vport->dev); |
| 160 | netdev_upper_dev_unlink(netdev_vport->dev, get_dpdev(vport->dp)); | ||
| 143 | dev_set_promiscuity(netdev_vport->dev, -1); | 161 | dev_set_promiscuity(netdev_vport->dev, -1); |
| 144 | rtnl_unlock(); | 162 | rtnl_unlock(); |
| 145 | 163 | ||
diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c new file mode 100644 index 000000000000..a481c03e2861 --- /dev/null +++ b/net/openvswitch/vport-vxlan.c | |||
| @@ -0,0 +1,204 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2013 Nicira, Inc. | ||
| 3 | * Copyright (c) 2013 Cisco Systems, Inc. | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or | ||
| 6 | * modify it under the terms of version 2 of the GNU General Public | ||
| 7 | * License as published by the Free Software Foundation. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, but | ||
| 10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 12 | * General Public License for more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License | ||
| 15 | * along with this program; if not, write to the Free Software | ||
| 16 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
| 17 | * 02110-1301, USA | ||
| 18 | */ | ||
| 19 | |||
| 20 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
| 21 | |||
| 22 | #include <linux/in.h> | ||
| 23 | #include <linux/ip.h> | ||
| 24 | #include <linux/net.h> | ||
| 25 | #include <linux/rculist.h> | ||
| 26 | #include <linux/udp.h> | ||
| 27 | |||
| 28 | #include <net/icmp.h> | ||
| 29 | #include <net/ip.h> | ||
| 30 | #include <net/udp.h> | ||
| 31 | #include <net/ip_tunnels.h> | ||
| 32 | #include <net/udp.h> | ||
| 33 | #include <net/rtnetlink.h> | ||
| 34 | #include <net/route.h> | ||
| 35 | #include <net/dsfield.h> | ||
| 36 | #include <net/inet_ecn.h> | ||
| 37 | #include <net/net_namespace.h> | ||
| 38 | #include <net/netns/generic.h> | ||
| 39 | #include <net/vxlan.h> | ||
| 40 | |||
| 41 | #include "datapath.h" | ||
| 42 | #include "vport.h" | ||
| 43 | |||
| 44 | /** | ||
| 45 | * struct vxlan_port - Keeps track of open UDP ports | ||
| 46 | * @vs: vxlan_sock created for the port. | ||
| 47 | * @name: vport name. | ||
| 48 | */ | ||
| 49 | struct vxlan_port { | ||
| 50 | struct vxlan_sock *vs; | ||
| 51 | char name[IFNAMSIZ]; | ||
| 52 | }; | ||
| 53 | |||
| 54 | static inline struct vxlan_port *vxlan_vport(const struct vport *vport) | ||
| 55 | { | ||
| 56 | return vport_priv(vport); | ||
| 57 | } | ||
| 58 | |||
| 59 | /* Called with rcu_read_lock and BH disabled. */ | ||
| 60 | static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, __be32 vx_vni) | ||
| 61 | { | ||
| 62 | struct ovs_key_ipv4_tunnel tun_key; | ||
| 63 | struct vport *vport = vs->data; | ||
| 64 | struct iphdr *iph; | ||
| 65 | __be64 key; | ||
| 66 | |||
| 67 | /* Save outer tunnel values */ | ||
| 68 | iph = ip_hdr(skb); | ||
| 69 | key = cpu_to_be64(ntohl(vx_vni) >> 8); | ||
| 70 | ovs_flow_tun_key_init(&tun_key, iph, key, TUNNEL_KEY); | ||
| 71 | |||
| 72 | ovs_vport_receive(vport, skb, &tun_key); | ||
| 73 | } | ||
| 74 | |||
| 75 | static int vxlan_get_options(const struct vport *vport, struct sk_buff *skb) | ||
| 76 | { | ||
| 77 | struct vxlan_port *vxlan_port = vxlan_vport(vport); | ||
| 78 | __be16 dst_port = inet_sk(vxlan_port->vs->sock->sk)->inet_sport; | ||
| 79 | |||
| 80 | if (nla_put_u16(skb, OVS_TUNNEL_ATTR_DST_PORT, ntohs(dst_port))) | ||
| 81 | return -EMSGSIZE; | ||
| 82 | return 0; | ||
| 83 | } | ||
| 84 | |||
| 85 | static void vxlan_tnl_destroy(struct vport *vport) | ||
| 86 | { | ||
| 87 | struct vxlan_port *vxlan_port = vxlan_vport(vport); | ||
| 88 | |||
| 89 | vxlan_sock_release(vxlan_port->vs); | ||
| 90 | |||
| 91 | ovs_vport_deferred_free(vport); | ||
| 92 | } | ||
| 93 | |||
| 94 | static struct vport *vxlan_tnl_create(const struct vport_parms *parms) | ||
| 95 | { | ||
| 96 | struct net *net = ovs_dp_get_net(parms->dp); | ||
| 97 | struct nlattr *options = parms->options; | ||
| 98 | struct vxlan_port *vxlan_port; | ||
| 99 | struct vxlan_sock *vs; | ||
| 100 | struct vport *vport; | ||
| 101 | struct nlattr *a; | ||
| 102 | u16 dst_port; | ||
| 103 | int err; | ||
| 104 | |||
| 105 | if (!options) { | ||
| 106 | err = -EINVAL; | ||
| 107 | goto error; | ||
| 108 | } | ||
| 109 | a = nla_find_nested(options, OVS_TUNNEL_ATTR_DST_PORT); | ||
| 110 | if (a && nla_len(a) == sizeof(u16)) { | ||
| 111 | dst_port = nla_get_u16(a); | ||
| 112 | } else { | ||
| 113 | /* Require destination port from userspace. */ | ||
| 114 | err = -EINVAL; | ||
| 115 | goto error; | ||
| 116 | } | ||
| 117 | |||
| 118 | vport = ovs_vport_alloc(sizeof(struct vxlan_port), | ||
| 119 | &ovs_vxlan_vport_ops, parms); | ||
| 120 | if (IS_ERR(vport)) | ||
| 121 | return vport; | ||
| 122 | |||
| 123 | vxlan_port = vxlan_vport(vport); | ||
| 124 | strncpy(vxlan_port->name, parms->name, IFNAMSIZ); | ||
| 125 | |||
| 126 | vs = vxlan_sock_add(net, htons(dst_port), vxlan_rcv, vport, true, false); | ||
| 127 | if (IS_ERR(vs)) { | ||
| 128 | ovs_vport_free(vport); | ||
| 129 | return (void *)vs; | ||
| 130 | } | ||
| 131 | vxlan_port->vs = vs; | ||
| 132 | |||
| 133 | return vport; | ||
| 134 | |||
| 135 | error: | ||
| 136 | return ERR_PTR(err); | ||
| 137 | } | ||
| 138 | |||
| 139 | static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb) | ||
| 140 | { | ||
| 141 | struct net *net = ovs_dp_get_net(vport->dp); | ||
| 142 | struct vxlan_port *vxlan_port = vxlan_vport(vport); | ||
| 143 | __be16 dst_port = inet_sk(vxlan_port->vs->sock->sk)->inet_sport; | ||
| 144 | struct rtable *rt; | ||
| 145 | struct flowi4 fl; | ||
| 146 | __be16 src_port; | ||
| 147 | int port_min; | ||
| 148 | int port_max; | ||
| 149 | __be16 df; | ||
| 150 | int err; | ||
| 151 | |||
| 152 | if (unlikely(!OVS_CB(skb)->tun_key)) { | ||
| 153 | err = -EINVAL; | ||
| 154 | goto error; | ||
| 155 | } | ||
| 156 | |||
| 157 | /* Route lookup */ | ||
| 158 | memset(&fl, 0, sizeof(fl)); | ||
| 159 | fl.daddr = OVS_CB(skb)->tun_key->ipv4_dst; | ||
| 160 | fl.saddr = OVS_CB(skb)->tun_key->ipv4_src; | ||
| 161 | fl.flowi4_tos = RT_TOS(OVS_CB(skb)->tun_key->ipv4_tos); | ||
| 162 | fl.flowi4_mark = skb->mark; | ||
| 163 | fl.flowi4_proto = IPPROTO_UDP; | ||
| 164 | |||
| 165 | rt = ip_route_output_key(net, &fl); | ||
| 166 | if (IS_ERR(rt)) { | ||
| 167 | err = PTR_ERR(rt); | ||
| 168 | goto error; | ||
| 169 | } | ||
| 170 | |||
| 171 | df = OVS_CB(skb)->tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ? | ||
| 172 | htons(IP_DF) : 0; | ||
| 173 | |||
| 174 | skb->local_df = 1; | ||
| 175 | |||
| 176 | inet_get_local_port_range(&port_min, &port_max); | ||
| 177 | src_port = vxlan_src_port(port_min, port_max, skb); | ||
| 178 | |||
| 179 | err = vxlan_xmit_skb(vxlan_port->vs, rt, skb, | ||
| 180 | fl.saddr, OVS_CB(skb)->tun_key->ipv4_dst, | ||
| 181 | OVS_CB(skb)->tun_key->ipv4_tos, | ||
| 182 | OVS_CB(skb)->tun_key->ipv4_ttl, df, | ||
| 183 | src_port, dst_port, | ||
| 184 | htonl(be64_to_cpu(OVS_CB(skb)->tun_key->tun_id) << 8)); | ||
| 185 | if (err < 0) | ||
| 186 | ip_rt_put(rt); | ||
| 187 | error: | ||
| 188 | return err; | ||
| 189 | } | ||
| 190 | |||
| 191 | static const char *vxlan_get_name(const struct vport *vport) | ||
| 192 | { | ||
| 193 | struct vxlan_port *vxlan_port = vxlan_vport(vport); | ||
| 194 | return vxlan_port->name; | ||
| 195 | } | ||
| 196 | |||
| 197 | const struct vport_ops ovs_vxlan_vport_ops = { | ||
| 198 | .type = OVS_VPORT_TYPE_VXLAN, | ||
| 199 | .create = vxlan_tnl_create, | ||
| 200 | .destroy = vxlan_tnl_destroy, | ||
| 201 | .get_name = vxlan_get_name, | ||
| 202 | .get_options = vxlan_get_options, | ||
| 203 | .send = vxlan_tnl_send, | ||
| 204 | }; | ||
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c index d4c7fa04ce08..6f65dbe13812 100644 --- a/net/openvswitch/vport.c +++ b/net/openvswitch/vport.c | |||
| @@ -42,6 +42,9 @@ static const struct vport_ops *vport_ops_list[] = { | |||
| 42 | #ifdef CONFIG_OPENVSWITCH_GRE | 42 | #ifdef CONFIG_OPENVSWITCH_GRE |
| 43 | &ovs_gre_vport_ops, | 43 | &ovs_gre_vport_ops, |
| 44 | #endif | 44 | #endif |
| 45 | #ifdef CONFIG_OPENVSWITCH_VXLAN | ||
| 46 | &ovs_vxlan_vport_ops, | ||
| 47 | #endif | ||
| 45 | }; | 48 | }; |
| 46 | 49 | ||
| 47 | /* Protected by RCU read lock for reading, ovs_mutex for writing. */ | 50 | /* Protected by RCU read lock for reading, ovs_mutex for writing. */ |
| @@ -200,7 +203,7 @@ out: | |||
| 200 | * ovs_vport_set_options - modify existing vport device (for kernel callers) | 203 | * ovs_vport_set_options - modify existing vport device (for kernel callers) |
| 201 | * | 204 | * |
| 202 | * @vport: vport to modify. | 205 | * @vport: vport to modify. |
| 203 | * @port: New configuration. | 206 | * @options: New configuration. |
| 204 | * | 207 | * |
| 205 | * Modifies an existing device with the specified configuration (which is | 208 | * Modifies an existing device with the specified configuration (which is |
| 206 | * dependent on device type). ovs_mutex must be held. | 209 | * dependent on device type). ovs_mutex must be held. |
| @@ -325,6 +328,7 @@ int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb) | |||
| 325 | * | 328 | * |
| 326 | * @vport: vport that received the packet | 329 | * @vport: vport that received the packet |
| 327 | * @skb: skb that was received | 330 | * @skb: skb that was received |
| 331 | * @tun_key: tunnel (if any) that carried packet | ||
| 328 | * | 332 | * |
| 329 | * Must be called with rcu_read_lock. The packet cannot be shared and | 333 | * Must be called with rcu_read_lock. The packet cannot be shared and |
| 330 | * skb->data should point to the Ethernet header. | 334 | * skb->data should point to the Ethernet header. |
diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h index 376045c42f8b..1a9fbcec6e1b 100644 --- a/net/openvswitch/vport.h +++ b/net/openvswitch/vport.h | |||
| @@ -199,6 +199,7 @@ void ovs_vport_record_error(struct vport *, enum vport_err_type err_type); | |||
| 199 | extern const struct vport_ops ovs_netdev_vport_ops; | 199 | extern const struct vport_ops ovs_netdev_vport_ops; |
| 200 | extern const struct vport_ops ovs_internal_vport_ops; | 200 | extern const struct vport_ops ovs_internal_vport_ops; |
| 201 | extern const struct vport_ops ovs_gre_vport_ops; | 201 | extern const struct vport_ops ovs_gre_vport_ops; |
| 202 | extern const struct vport_ops ovs_vxlan_vport_ops; | ||
| 202 | 203 | ||
| 203 | static inline void ovs_skb_postpush_rcsum(struct sk_buff *skb, | 204 | static inline void ovs_skb_postpush_rcsum(struct sk_buff *skb, |
| 204 | const void *start, unsigned int len) | 205 | const void *start, unsigned int len) |
