diff options
-rw-r--r-- | include/linux/netdev_features.h | 5 | ||||
-rw-r--r-- | include/linux/netdevice.h | 1 | ||||
-rw-r--r-- | include/linux/skbuff.h | 4 | ||||
-rw-r--r-- | include/net/mpls.h | 39 | ||||
-rw-r--r-- | include/uapi/linux/openvswitch.h | 38 | ||||
-rw-r--r-- | net/core/dev.c | 3 | ||||
-rw-r--r-- | net/core/ethtool.c | 1 | ||||
-rw-r--r-- | net/ipv4/af_inet.c | 1 | ||||
-rw-r--r-- | net/ipv4/tcp_offload.c | 1 | ||||
-rw-r--r-- | net/ipv4/udp_offload.c | 3 | ||||
-rw-r--r-- | net/ipv6/ip6_offload.c | 1 | ||||
-rw-r--r-- | net/ipv6/udp_offload.c | 3 | ||||
-rw-r--r-- | net/mpls/mpls_gso.c | 3 | ||||
-rw-r--r-- | net/openvswitch/Kconfig | 1 | ||||
-rw-r--r-- | net/openvswitch/actions.c | 136 | ||||
-rw-r--r-- | net/openvswitch/datapath.c | 215 | ||||
-rw-r--r-- | net/openvswitch/datapath.h | 4 | ||||
-rw-r--r-- | net/openvswitch/flow.c | 30 | ||||
-rw-r--r-- | net/openvswitch/flow.h | 17 | ||||
-rw-r--r-- | net/openvswitch/flow_netlink.c | 322 | ||||
-rw-r--r-- | net/openvswitch/flow_netlink.h | 5 | ||||
-rw-r--r-- | net/openvswitch/flow_table.c | 11 | ||||
-rw-r--r-- | net/openvswitch/flow_table.h | 2 | ||||
-rw-r--r-- | net/openvswitch/vport-internal_dev.c | 5 |
24 files changed, 606 insertions, 245 deletions
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index 8c94b07e654a..8e30685affeb 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h | |||
@@ -47,7 +47,6 @@ enum { | |||
47 | NETIF_F_GSO_SIT_BIT, /* ... SIT tunnel with TSO */ | 47 | NETIF_F_GSO_SIT_BIT, /* ... SIT tunnel with TSO */ |
48 | NETIF_F_GSO_UDP_TUNNEL_BIT, /* ... UDP TUNNEL with TSO */ | 48 | NETIF_F_GSO_UDP_TUNNEL_BIT, /* ... UDP TUNNEL with TSO */ |
49 | NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT,/* ... UDP TUNNEL with TSO & CSUM */ | 49 | NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT,/* ... UDP TUNNEL with TSO & CSUM */ |
50 | NETIF_F_GSO_MPLS_BIT, /* ... MPLS segmentation */ | ||
51 | NETIF_F_GSO_TUNNEL_REMCSUM_BIT, /* ... TUNNEL with TSO & REMCSUM */ | 50 | NETIF_F_GSO_TUNNEL_REMCSUM_BIT, /* ... TUNNEL with TSO & REMCSUM */ |
52 | /**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */ | 51 | /**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */ |
53 | NETIF_F_GSO_TUNNEL_REMCSUM_BIT, | 52 | NETIF_F_GSO_TUNNEL_REMCSUM_BIT, |
@@ -119,7 +118,6 @@ enum { | |||
119 | #define NETIF_F_GSO_SIT __NETIF_F(GSO_SIT) | 118 | #define NETIF_F_GSO_SIT __NETIF_F(GSO_SIT) |
120 | #define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL) | 119 | #define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL) |
121 | #define NETIF_F_GSO_UDP_TUNNEL_CSUM __NETIF_F(GSO_UDP_TUNNEL_CSUM) | 120 | #define NETIF_F_GSO_UDP_TUNNEL_CSUM __NETIF_F(GSO_UDP_TUNNEL_CSUM) |
122 | #define NETIF_F_GSO_MPLS __NETIF_F(GSO_MPLS) | ||
123 | #define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM) | 121 | #define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM) |
124 | #define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER) | 122 | #define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER) |
125 | #define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX) | 123 | #define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX) |
@@ -183,7 +181,6 @@ enum { | |||
183 | NETIF_F_GSO_IPIP | \ | 181 | NETIF_F_GSO_IPIP | \ |
184 | NETIF_F_GSO_SIT | \ | 182 | NETIF_F_GSO_SIT | \ |
185 | NETIF_F_GSO_UDP_TUNNEL | \ | 183 | NETIF_F_GSO_UDP_TUNNEL | \ |
186 | NETIF_F_GSO_UDP_TUNNEL_CSUM | \ | 184 | NETIF_F_GSO_UDP_TUNNEL_CSUM) |
187 | NETIF_F_GSO_MPLS) | ||
188 | 185 | ||
189 | #endif /* _LINUX_NETDEV_FEATURES_H */ | 186 | #endif /* _LINUX_NETDEV_FEATURES_H */ |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 4767f546d7c0..90ac95900a11 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -3583,7 +3583,6 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type) | |||
3583 | BUILD_BUG_ON(SKB_GSO_SIT != (NETIF_F_GSO_SIT >> NETIF_F_GSO_SHIFT)); | 3583 | BUILD_BUG_ON(SKB_GSO_SIT != (NETIF_F_GSO_SIT >> NETIF_F_GSO_SHIFT)); |
3584 | BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT)); | 3584 | BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT)); |
3585 | BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT)); | 3585 | BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT)); |
3586 | BUILD_BUG_ON(SKB_GSO_MPLS != (NETIF_F_GSO_MPLS >> NETIF_F_GSO_SHIFT)); | ||
3587 | BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT)); | 3586 | BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT)); |
3588 | 3587 | ||
3589 | return (features & feature) == feature; | 3588 | return (features & feature) == feature; |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 39ec7530ae27..53f4f6c93356 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -372,9 +372,7 @@ enum { | |||
372 | 372 | ||
373 | SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11, | 373 | SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11, |
374 | 374 | ||
375 | SKB_GSO_MPLS = 1 << 12, | 375 | SKB_GSO_TUNNEL_REMCSUM = 1 << 12, |
376 | |||
377 | SKB_GSO_TUNNEL_REMCSUM = 1 << 13, | ||
378 | }; | 376 | }; |
379 | 377 | ||
380 | #if BITS_PER_LONG > 32 | 378 | #if BITS_PER_LONG > 32 |
diff --git a/include/net/mpls.h b/include/net/mpls.h new file mode 100644 index 000000000000..5b3b5addfb08 --- /dev/null +++ b/include/net/mpls.h | |||
@@ -0,0 +1,39 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2014 Nicira, Inc. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of version 2 of the GNU General Public | ||
6 | * License as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
11 | * General Public License for more details. | ||
12 | */ | ||
13 | |||
14 | #ifndef _NET_MPLS_H | ||
15 | #define _NET_MPLS_H 1 | ||
16 | |||
17 | #include <linux/if_ether.h> | ||
18 | #include <linux/netdevice.h> | ||
19 | |||
20 | #define MPLS_HLEN 4 | ||
21 | |||
22 | static inline bool eth_p_mpls(__be16 eth_type) | ||
23 | { | ||
24 | return eth_type == htons(ETH_P_MPLS_UC) || | ||
25 | eth_type == htons(ETH_P_MPLS_MC); | ||
26 | } | ||
27 | |||
28 | /* | ||
29 | * For non-MPLS skbs this will correspond to the network header. | ||
30 | * For MPLS skbs it will be before the network_header as the MPLS | ||
31 | * label stack lies between the end of the mac header and the network | ||
32 | * header. That is, for MPLS skbs the end of the mac header | ||
33 | * is the top of the MPLS label stack. | ||
34 | */ | ||
35 | static inline unsigned char *skb_mpls_header(struct sk_buff *skb) | ||
36 | { | ||
37 | return skb_mac_header(skb) + skb->mac_len; | ||
38 | } | ||
39 | #endif | ||
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index 435eabc5ffaa..26c36c4cf7e2 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h | |||
@@ -293,6 +293,9 @@ enum ovs_key_attr { | |||
293 | OVS_KEY_ATTR_DP_HASH, /* u32 hash value. Value 0 indicates the hash | 293 | OVS_KEY_ATTR_DP_HASH, /* u32 hash value. Value 0 indicates the hash |
294 | is not computed by the datapath. */ | 294 | is not computed by the datapath. */ |
295 | OVS_KEY_ATTR_RECIRC_ID, /* u32 recirc id */ | 295 | OVS_KEY_ATTR_RECIRC_ID, /* u32 recirc id */ |
296 | OVS_KEY_ATTR_MPLS, /* array of struct ovs_key_mpls. | ||
297 | * The implementation may restrict | ||
298 | * the accepted length of the array. */ | ||
296 | 299 | ||
297 | #ifdef __KERNEL__ | 300 | #ifdef __KERNEL__ |
298 | OVS_KEY_ATTR_TUNNEL_INFO, /* struct ovs_tunnel_info */ | 301 | OVS_KEY_ATTR_TUNNEL_INFO, /* struct ovs_tunnel_info */ |
@@ -340,6 +343,10 @@ struct ovs_key_ethernet { | |||
340 | __u8 eth_dst[ETH_ALEN]; | 343 | __u8 eth_dst[ETH_ALEN]; |
341 | }; | 344 | }; |
342 | 345 | ||
346 | struct ovs_key_mpls { | ||
347 | __be32 mpls_lse; | ||
348 | }; | ||
349 | |||
343 | struct ovs_key_ipv4 { | 350 | struct ovs_key_ipv4 { |
344 | __be32 ipv4_src; | 351 | __be32 ipv4_src; |
345 | __be32 ipv4_dst; | 352 | __be32 ipv4_dst; |
@@ -393,9 +400,9 @@ struct ovs_key_arp { | |||
393 | }; | 400 | }; |
394 | 401 | ||
395 | struct ovs_key_nd { | 402 | struct ovs_key_nd { |
396 | __u32 nd_target[4]; | 403 | __be32 nd_target[4]; |
397 | __u8 nd_sll[ETH_ALEN]; | 404 | __u8 nd_sll[ETH_ALEN]; |
398 | __u8 nd_tll[ETH_ALEN]; | 405 | __u8 nd_tll[ETH_ALEN]; |
399 | }; | 406 | }; |
400 | 407 | ||
401 | /** | 408 | /** |
@@ -484,6 +491,19 @@ enum ovs_userspace_attr { | |||
484 | #define OVS_USERSPACE_ATTR_MAX (__OVS_USERSPACE_ATTR_MAX - 1) | 491 | #define OVS_USERSPACE_ATTR_MAX (__OVS_USERSPACE_ATTR_MAX - 1) |
485 | 492 | ||
486 | /** | 493 | /** |
494 | * struct ovs_action_push_mpls - %OVS_ACTION_ATTR_PUSH_MPLS action argument. | ||
495 | * @mpls_lse: MPLS label stack entry to push. | ||
496 | * @mpls_ethertype: Ethertype to set in the encapsulating ethernet frame. | ||
497 | * | ||
498 | * The only values @mpls_ethertype should ever be given are %ETH_P_MPLS_UC and | ||
499 | * %ETH_P_MPLS_MC, indicating MPLS unicast or multicast. Other are rejected. | ||
500 | */ | ||
501 | struct ovs_action_push_mpls { | ||
502 | __be32 mpls_lse; | ||
503 | __be16 mpls_ethertype; /* Either %ETH_P_MPLS_UC or %ETH_P_MPLS_MC */ | ||
504 | }; | ||
505 | |||
506 | /** | ||
487 | * struct ovs_action_push_vlan - %OVS_ACTION_ATTR_PUSH_VLAN action argument. | 507 | * struct ovs_action_push_vlan - %OVS_ACTION_ATTR_PUSH_VLAN action argument. |
488 | * @vlan_tpid: Tag protocol identifier (TPID) to push. | 508 | * @vlan_tpid: Tag protocol identifier (TPID) to push. |
489 | * @vlan_tci: Tag control identifier (TCI) to push. The CFI bit must be set | 509 | * @vlan_tci: Tag control identifier (TCI) to push. The CFI bit must be set |
@@ -534,6 +554,15 @@ struct ovs_action_hash { | |||
534 | * @OVS_ACTION_ATTR_POP_VLAN: Pop the outermost 802.1Q header off the packet. | 554 | * @OVS_ACTION_ATTR_POP_VLAN: Pop the outermost 802.1Q header off the packet. |
535 | * @OVS_ACTION_ATTR_SAMPLE: Probabilitically executes actions, as specified in | 555 | * @OVS_ACTION_ATTR_SAMPLE: Probabilitically executes actions, as specified in |
536 | * the nested %OVS_SAMPLE_ATTR_* attributes. | 556 | * the nested %OVS_SAMPLE_ATTR_* attributes. |
557 | * @OVS_ACTION_ATTR_PUSH_MPLS: Push a new MPLS label stack entry onto the | ||
558 | * top of the packets MPLS label stack. Set the ethertype of the | ||
559 | * encapsulating frame to either %ETH_P_MPLS_UC or %ETH_P_MPLS_MC to | ||
560 | * indicate the new packet contents. | ||
561 | * @OVS_ACTION_ATTR_POP_MPLS: Pop an MPLS label stack entry off of the | ||
562 | * packet's MPLS label stack. Set the encapsulating frame's ethertype to | ||
563 | * indicate the new packet contents. This could potentially still be | ||
564 | * %ETH_P_MPLS if the resulting MPLS label stack is not empty. If there | ||
565 | * is no MPLS label stack, as determined by ethertype, no action is taken. | ||
537 | * | 566 | * |
538 | * Only a single header can be set with a single %OVS_ACTION_ATTR_SET. Not all | 567 | * Only a single header can be set with a single %OVS_ACTION_ATTR_SET. Not all |
539 | * fields within a header are modifiable, e.g. the IPv4 protocol and fragment | 568 | * fields within a header are modifiable, e.g. the IPv4 protocol and fragment |
@@ -550,6 +579,9 @@ enum ovs_action_attr { | |||
550 | OVS_ACTION_ATTR_SAMPLE, /* Nested OVS_SAMPLE_ATTR_*. */ | 579 | OVS_ACTION_ATTR_SAMPLE, /* Nested OVS_SAMPLE_ATTR_*. */ |
551 | OVS_ACTION_ATTR_RECIRC, /* u32 recirc_id. */ | 580 | OVS_ACTION_ATTR_RECIRC, /* u32 recirc_id. */ |
552 | OVS_ACTION_ATTR_HASH, /* struct ovs_action_hash. */ | 581 | OVS_ACTION_ATTR_HASH, /* struct ovs_action_hash. */ |
582 | OVS_ACTION_ATTR_PUSH_MPLS, /* struct ovs_action_push_mpls. */ | ||
583 | OVS_ACTION_ATTR_POP_MPLS, /* __be16 ethertype. */ | ||
584 | |||
553 | __OVS_ACTION_ATTR_MAX | 585 | __OVS_ACTION_ATTR_MAX |
554 | }; | 586 | }; |
555 | 587 | ||
diff --git a/net/core/dev.c b/net/core/dev.c index 40be481268de..70bb609c283d 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -118,6 +118,7 @@ | |||
118 | #include <linux/if_vlan.h> | 118 | #include <linux/if_vlan.h> |
119 | #include <linux/ip.h> | 119 | #include <linux/ip.h> |
120 | #include <net/ip.h> | 120 | #include <net/ip.h> |
121 | #include <net/mpls.h> | ||
121 | #include <linux/ipv6.h> | 122 | #include <linux/ipv6.h> |
122 | #include <linux/in.h> | 123 | #include <linux/in.h> |
123 | #include <linux/jhash.h> | 124 | #include <linux/jhash.h> |
@@ -2530,7 +2531,7 @@ static netdev_features_t net_mpls_features(struct sk_buff *skb, | |||
2530 | netdev_features_t features, | 2531 | netdev_features_t features, |
2531 | __be16 type) | 2532 | __be16 type) |
2532 | { | 2533 | { |
2533 | if (type == htons(ETH_P_MPLS_UC) || type == htons(ETH_P_MPLS_MC)) | 2534 | if (eth_p_mpls(type)) |
2534 | features &= skb->dev->mpls_features; | 2535 | features &= skb->dev->mpls_features; |
2535 | 2536 | ||
2536 | return features; | 2537 | return features; |
diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 06dfb293e5aa..b0f84f5ddda8 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c | |||
@@ -84,7 +84,6 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] | |||
84 | [NETIF_F_GSO_IPIP_BIT] = "tx-ipip-segmentation", | 84 | [NETIF_F_GSO_IPIP_BIT] = "tx-ipip-segmentation", |
85 | [NETIF_F_GSO_SIT_BIT] = "tx-sit-segmentation", | 85 | [NETIF_F_GSO_SIT_BIT] = "tx-sit-segmentation", |
86 | [NETIF_F_GSO_UDP_TUNNEL_BIT] = "tx-udp_tnl-segmentation", | 86 | [NETIF_F_GSO_UDP_TUNNEL_BIT] = "tx-udp_tnl-segmentation", |
87 | [NETIF_F_GSO_MPLS_BIT] = "tx-mpls-segmentation", | ||
88 | 87 | ||
89 | [NETIF_F_FCOE_CRC_BIT] = "tx-checksum-fcoe-crc", | 88 | [NETIF_F_FCOE_CRC_BIT] = "tx-checksum-fcoe-crc", |
90 | [NETIF_F_SCTP_CSUM_BIT] = "tx-checksum-sctp", | 89 | [NETIF_F_SCTP_CSUM_BIT] = "tx-checksum-sctp", |
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index ed2c672c5b01..3a096bb2d596 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
@@ -1223,7 +1223,6 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, | |||
1223 | SKB_GSO_UDP_TUNNEL | | 1223 | SKB_GSO_UDP_TUNNEL | |
1224 | SKB_GSO_UDP_TUNNEL_CSUM | | 1224 | SKB_GSO_UDP_TUNNEL_CSUM | |
1225 | SKB_GSO_TUNNEL_REMCSUM | | 1225 | SKB_GSO_TUNNEL_REMCSUM | |
1226 | SKB_GSO_MPLS | | ||
1227 | 0))) | 1226 | 0))) |
1228 | goto out; | 1227 | goto out; |
1229 | 1228 | ||
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c index a1b2a5624f91..9d7930ba8e0f 100644 --- a/net/ipv4/tcp_offload.c +++ b/net/ipv4/tcp_offload.c | |||
@@ -94,7 +94,6 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb, | |||
94 | SKB_GSO_GRE_CSUM | | 94 | SKB_GSO_GRE_CSUM | |
95 | SKB_GSO_IPIP | | 95 | SKB_GSO_IPIP | |
96 | SKB_GSO_SIT | | 96 | SKB_GSO_SIT | |
97 | SKB_GSO_MPLS | | ||
98 | SKB_GSO_UDP_TUNNEL | | 97 | SKB_GSO_UDP_TUNNEL | |
99 | SKB_GSO_UDP_TUNNEL_CSUM | | 98 | SKB_GSO_UDP_TUNNEL_CSUM | |
100 | SKB_GSO_TUNNEL_REMCSUM | | 99 | SKB_GSO_TUNNEL_REMCSUM | |
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 0a5a70d0e84c..d3e537ef6b7f 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c | |||
@@ -207,8 +207,7 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, | |||
207 | SKB_GSO_UDP_TUNNEL_CSUM | | 207 | SKB_GSO_UDP_TUNNEL_CSUM | |
208 | SKB_GSO_TUNNEL_REMCSUM | | 208 | SKB_GSO_TUNNEL_REMCSUM | |
209 | SKB_GSO_IPIP | | 209 | SKB_GSO_IPIP | |
210 | SKB_GSO_GRE | SKB_GSO_GRE_CSUM | | 210 | SKB_GSO_GRE | SKB_GSO_GRE_CSUM) || |
211 | SKB_GSO_MPLS) || | ||
212 | !(type & (SKB_GSO_UDP)))) | 211 | !(type & (SKB_GSO_UDP)))) |
213 | goto out; | 212 | goto out; |
214 | 213 | ||
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index e9767079a360..fd76ce938c32 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c | |||
@@ -79,7 +79,6 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, | |||
79 | SKB_GSO_UDP_TUNNEL | | 79 | SKB_GSO_UDP_TUNNEL | |
80 | SKB_GSO_UDP_TUNNEL_CSUM | | 80 | SKB_GSO_UDP_TUNNEL_CSUM | |
81 | SKB_GSO_TUNNEL_REMCSUM | | 81 | SKB_GSO_TUNNEL_REMCSUM | |
82 | SKB_GSO_MPLS | | ||
83 | SKB_GSO_TCPV6 | | 82 | SKB_GSO_TCPV6 | |
84 | 0))) | 83 | 0))) |
85 | goto out; | 84 | goto out; |
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c index 637ba2e438b7..b6aa8ed18257 100644 --- a/net/ipv6/udp_offload.c +++ b/net/ipv6/udp_offload.c | |||
@@ -46,8 +46,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, | |||
46 | SKB_GSO_GRE | | 46 | SKB_GSO_GRE | |
47 | SKB_GSO_GRE_CSUM | | 47 | SKB_GSO_GRE_CSUM | |
48 | SKB_GSO_IPIP | | 48 | SKB_GSO_IPIP | |
49 | SKB_GSO_SIT | | 49 | SKB_GSO_SIT) || |
50 | SKB_GSO_MPLS) || | ||
51 | !(type & (SKB_GSO_UDP)))) | 50 | !(type & (SKB_GSO_UDP)))) |
52 | goto out; | 51 | goto out; |
53 | 52 | ||
diff --git a/net/mpls/mpls_gso.c b/net/mpls/mpls_gso.c index e3545f21a099..ca27837974fe 100644 --- a/net/mpls/mpls_gso.c +++ b/net/mpls/mpls_gso.c | |||
@@ -34,8 +34,7 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb, | |||
34 | SKB_GSO_TCP_ECN | | 34 | SKB_GSO_TCP_ECN | |
35 | SKB_GSO_GRE | | 35 | SKB_GSO_GRE | |
36 | SKB_GSO_GRE_CSUM | | 36 | SKB_GSO_GRE_CSUM | |
37 | SKB_GSO_IPIP | | 37 | SKB_GSO_IPIP))) |
38 | SKB_GSO_MPLS))) | ||
39 | goto out; | 38 | goto out; |
40 | 39 | ||
41 | /* Setup inner SKB. */ | 40 | /* Setup inner SKB. */ |
diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig index 2a9673e39ca1..454ce12efbbf 100644 --- a/net/openvswitch/Kconfig +++ b/net/openvswitch/Kconfig | |||
@@ -30,6 +30,7 @@ config OPENVSWITCH | |||
30 | 30 | ||
31 | config OPENVSWITCH_GRE | 31 | config OPENVSWITCH_GRE |
32 | tristate "Open vSwitch GRE tunneling support" | 32 | tristate "Open vSwitch GRE tunneling support" |
33 | select NET_MPLS_GSO | ||
33 | depends on INET | 34 | depends on INET |
34 | depends on OPENVSWITCH | 35 | depends on OPENVSWITCH |
35 | depends on NET_IPGRE_DEMUX | 36 | depends on NET_IPGRE_DEMUX |
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index 922c133b1933..f7e589159e4a 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c | |||
@@ -28,10 +28,12 @@ | |||
28 | #include <linux/in6.h> | 28 | #include <linux/in6.h> |
29 | #include <linux/if_arp.h> | 29 | #include <linux/if_arp.h> |
30 | #include <linux/if_vlan.h> | 30 | #include <linux/if_vlan.h> |
31 | |||
31 | #include <net/ip.h> | 32 | #include <net/ip.h> |
32 | #include <net/ipv6.h> | 33 | #include <net/ipv6.h> |
33 | #include <net/checksum.h> | 34 | #include <net/checksum.h> |
34 | #include <net/dsfield.h> | 35 | #include <net/dsfield.h> |
36 | #include <net/mpls.h> | ||
35 | #include <net/sctp/checksum.h> | 37 | #include <net/sctp/checksum.h> |
36 | 38 | ||
37 | #include "datapath.h" | 39 | #include "datapath.h" |
@@ -118,6 +120,92 @@ static int make_writable(struct sk_buff *skb, int write_len) | |||
118 | return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); | 120 | return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); |
119 | } | 121 | } |
120 | 122 | ||
123 | static int push_mpls(struct sk_buff *skb, | ||
124 | const struct ovs_action_push_mpls *mpls) | ||
125 | { | ||
126 | __be32 *new_mpls_lse; | ||
127 | struct ethhdr *hdr; | ||
128 | |||
129 | /* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */ | ||
130 | if (skb->encapsulation) | ||
131 | return -ENOTSUPP; | ||
132 | |||
133 | if (skb_cow_head(skb, MPLS_HLEN) < 0) | ||
134 | return -ENOMEM; | ||
135 | |||
136 | skb_push(skb, MPLS_HLEN); | ||
137 | memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb), | ||
138 | skb->mac_len); | ||
139 | skb_reset_mac_header(skb); | ||
140 | |||
141 | new_mpls_lse = (__be32 *)skb_mpls_header(skb); | ||
142 | *new_mpls_lse = mpls->mpls_lse; | ||
143 | |||
144 | if (skb->ip_summed == CHECKSUM_COMPLETE) | ||
145 | skb->csum = csum_add(skb->csum, csum_partial(new_mpls_lse, | ||
146 | MPLS_HLEN, 0)); | ||
147 | |||
148 | hdr = eth_hdr(skb); | ||
149 | hdr->h_proto = mpls->mpls_ethertype; | ||
150 | |||
151 | skb_set_inner_protocol(skb, skb->protocol); | ||
152 | skb->protocol = mpls->mpls_ethertype; | ||
153 | |||
154 | return 0; | ||
155 | } | ||
156 | |||
157 | static int pop_mpls(struct sk_buff *skb, const __be16 ethertype) | ||
158 | { | ||
159 | struct ethhdr *hdr; | ||
160 | int err; | ||
161 | |||
162 | err = make_writable(skb, skb->mac_len + MPLS_HLEN); | ||
163 | if (unlikely(err)) | ||
164 | return err; | ||
165 | |||
166 | if (skb->ip_summed == CHECKSUM_COMPLETE) | ||
167 | skb->csum = csum_sub(skb->csum, | ||
168 | csum_partial(skb_mpls_header(skb), | ||
169 | MPLS_HLEN, 0)); | ||
170 | |||
171 | memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb), | ||
172 | skb->mac_len); | ||
173 | |||
174 | __skb_pull(skb, MPLS_HLEN); | ||
175 | skb_reset_mac_header(skb); | ||
176 | |||
177 | /* skb_mpls_header() is used to locate the ethertype | ||
178 | * field correctly in the presence of VLAN tags. | ||
179 | */ | ||
180 | hdr = (struct ethhdr *)(skb_mpls_header(skb) - ETH_HLEN); | ||
181 | hdr->h_proto = ethertype; | ||
182 | if (eth_p_mpls(skb->protocol)) | ||
183 | skb->protocol = ethertype; | ||
184 | return 0; | ||
185 | } | ||
186 | |||
187 | static int set_mpls(struct sk_buff *skb, const __be32 *mpls_lse) | ||
188 | { | ||
189 | __be32 *stack; | ||
190 | int err; | ||
191 | |||
192 | err = make_writable(skb, skb->mac_len + MPLS_HLEN); | ||
193 | if (unlikely(err)) | ||
194 | return err; | ||
195 | |||
196 | stack = (__be32 *)skb_mpls_header(skb); | ||
197 | if (skb->ip_summed == CHECKSUM_COMPLETE) { | ||
198 | __be32 diff[] = { ~(*stack), *mpls_lse }; | ||
199 | |||
200 | skb->csum = ~csum_partial((char *)diff, sizeof(diff), | ||
201 | ~skb->csum); | ||
202 | } | ||
203 | |||
204 | *stack = *mpls_lse; | ||
205 | |||
206 | return 0; | ||
207 | } | ||
208 | |||
121 | /* remove VLAN header from packet and update csum accordingly. */ | 209 | /* remove VLAN header from packet and update csum accordingly. */ |
122 | static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci) | 210 | static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci) |
123 | { | 211 | { |
@@ -140,10 +228,12 @@ static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci) | |||
140 | 228 | ||
141 | vlan_set_encap_proto(skb, vhdr); | 229 | vlan_set_encap_proto(skb, vhdr); |
142 | skb->mac_header += VLAN_HLEN; | 230 | skb->mac_header += VLAN_HLEN; |
231 | |||
143 | if (skb_network_offset(skb) < ETH_HLEN) | 232 | if (skb_network_offset(skb) < ETH_HLEN) |
144 | skb_set_network_header(skb, ETH_HLEN); | 233 | skb_set_network_header(skb, ETH_HLEN); |
145 | skb_reset_mac_len(skb); | ||
146 | 234 | ||
235 | /* Update mac_len for subsequent MPLS actions */ | ||
236 | skb_reset_mac_len(skb); | ||
147 | return 0; | 237 | return 0; |
148 | } | 238 | } |
149 | 239 | ||
@@ -186,6 +276,8 @@ static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vla | |||
186 | 276 | ||
187 | if (!__vlan_put_tag(skb, skb->vlan_proto, current_tag)) | 277 | if (!__vlan_put_tag(skb, skb->vlan_proto, current_tag)) |
188 | return -ENOMEM; | 278 | return -ENOMEM; |
279 | /* Update mac_len for subsequent MPLS actions */ | ||
280 | skb->mac_len += VLAN_HLEN; | ||
189 | 281 | ||
190 | if (skb->ip_summed == CHECKSUM_COMPLETE) | 282 | if (skb->ip_summed == CHECKSUM_COMPLETE) |
191 | skb->csum = csum_add(skb->csum, csum_partial(skb->data | 283 | skb->csum = csum_add(skb->csum, csum_partial(skb->data |
@@ -459,21 +551,14 @@ static int set_sctp(struct sk_buff *skb, | |||
459 | return 0; | 551 | return 0; |
460 | } | 552 | } |
461 | 553 | ||
462 | static int do_output(struct datapath *dp, struct sk_buff *skb, int out_port) | 554 | static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port) |
463 | { | 555 | { |
464 | struct vport *vport; | 556 | struct vport *vport = ovs_vport_rcu(dp, out_port); |
465 | 557 | ||
466 | if (unlikely(!skb)) | 558 | if (likely(vport)) |
467 | return -ENOMEM; | 559 | ovs_vport_send(vport, skb); |
468 | 560 | else | |
469 | vport = ovs_vport_rcu(dp, out_port); | ||
470 | if (unlikely(!vport)) { | ||
471 | kfree_skb(skb); | 561 | kfree_skb(skb); |
472 | return -ENODEV; | ||
473 | } | ||
474 | |||
475 | ovs_vport_send(vport, skb); | ||
476 | return 0; | ||
477 | } | 562 | } |
478 | 563 | ||
479 | static int output_userspace(struct datapath *dp, struct sk_buff *skb, | 564 | static int output_userspace(struct datapath *dp, struct sk_buff *skb, |
@@ -612,6 +697,10 @@ static int execute_set_action(struct sk_buff *skb, | |||
612 | case OVS_KEY_ATTR_SCTP: | 697 | case OVS_KEY_ATTR_SCTP: |
613 | err = set_sctp(skb, nla_data(nested_attr)); | 698 | err = set_sctp(skb, nla_data(nested_attr)); |
614 | break; | 699 | break; |
700 | |||
701 | case OVS_KEY_ATTR_MPLS: | ||
702 | err = set_mpls(skb, nla_data(nested_attr)); | ||
703 | break; | ||
615 | } | 704 | } |
616 | 705 | ||
617 | return err; | 706 | return err; |
@@ -672,8 +761,12 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, | |||
672 | a = nla_next(a, &rem)) { | 761 | a = nla_next(a, &rem)) { |
673 | int err = 0; | 762 | int err = 0; |
674 | 763 | ||
675 | if (prev_port != -1) { | 764 | if (unlikely(prev_port != -1)) { |
676 | do_output(dp, skb_clone(skb, GFP_ATOMIC), prev_port); | 765 | struct sk_buff *out_skb = skb_clone(skb, GFP_ATOMIC); |
766 | |||
767 | if (out_skb) | ||
768 | do_output(dp, out_skb, prev_port); | ||
769 | |||
677 | prev_port = -1; | 770 | prev_port = -1; |
678 | } | 771 | } |
679 | 772 | ||
@@ -690,6 +783,14 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, | |||
690 | execute_hash(skb, key, a); | 783 | execute_hash(skb, key, a); |
691 | break; | 784 | break; |
692 | 785 | ||
786 | case OVS_ACTION_ATTR_PUSH_MPLS: | ||
787 | err = push_mpls(skb, nla_data(a)); | ||
788 | break; | ||
789 | |||
790 | case OVS_ACTION_ATTR_POP_MPLS: | ||
791 | err = pop_mpls(skb, nla_get_be16(a)); | ||
792 | break; | ||
793 | |||
693 | case OVS_ACTION_ATTR_PUSH_VLAN: | 794 | case OVS_ACTION_ATTR_PUSH_VLAN: |
694 | err = push_vlan(skb, nla_data(a)); | 795 | err = push_vlan(skb, nla_data(a)); |
695 | if (unlikely(err)) /* skb already freed. */ | 796 | if (unlikely(err)) /* skb already freed. */ |
@@ -764,14 +865,11 @@ static void process_deferred_actions(struct datapath *dp) | |||
764 | 865 | ||
765 | /* Execute a list of actions against 'skb'. */ | 866 | /* Execute a list of actions against 'skb'. */ |
766 | int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb, | 867 | int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb, |
767 | struct sw_flow_key *key) | 868 | struct sw_flow_actions *acts, struct sw_flow_key *key) |
768 | { | 869 | { |
769 | int level = this_cpu_read(exec_actions_level); | 870 | int level = this_cpu_read(exec_actions_level); |
770 | struct sw_flow_actions *acts; | ||
771 | int err; | 871 | int err; |
772 | 872 | ||
773 | acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts); | ||
774 | |||
775 | this_cpu_inc(exec_actions_level); | 873 | this_cpu_inc(exec_actions_level); |
776 | OVS_CB(skb)->egress_tun_info = NULL; | 874 | OVS_CB(skb)->egress_tun_info = NULL; |
777 | err = do_execute_actions(dp, skb, key, | 875 | err = do_execute_actions(dp, skb, key, |
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index f18302f32049..014485ec4b0d 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c | |||
@@ -140,19 +140,30 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *, | |||
140 | static int queue_userspace_packet(struct datapath *dp, struct sk_buff *, | 140 | static int queue_userspace_packet(struct datapath *dp, struct sk_buff *, |
141 | const struct dp_upcall_info *); | 141 | const struct dp_upcall_info *); |
142 | 142 | ||
143 | /* Must be called with rcu_read_lock or ovs_mutex. */ | 143 | /* Must be called with rcu_read_lock. */ |
144 | static struct datapath *get_dp(struct net *net, int dp_ifindex) | 144 | static struct datapath *get_dp_rcu(struct net *net, int dp_ifindex) |
145 | { | 145 | { |
146 | struct datapath *dp = NULL; | 146 | struct net_device *dev = dev_get_by_index_rcu(net, dp_ifindex); |
147 | struct net_device *dev; | ||
148 | 147 | ||
149 | rcu_read_lock(); | ||
150 | dev = dev_get_by_index_rcu(net, dp_ifindex); | ||
151 | if (dev) { | 148 | if (dev) { |
152 | struct vport *vport = ovs_internal_dev_get_vport(dev); | 149 | struct vport *vport = ovs_internal_dev_get_vport(dev); |
153 | if (vport) | 150 | if (vport) |
154 | dp = vport->dp; | 151 | return vport->dp; |
155 | } | 152 | } |
153 | |||
154 | return NULL; | ||
155 | } | ||
156 | |||
157 | /* The caller must hold either ovs_mutex or rcu_read_lock to keep the | ||
158 | * returned dp pointer valid. | ||
159 | */ | ||
160 | static inline struct datapath *get_dp(struct net *net, int dp_ifindex) | ||
161 | { | ||
162 | struct datapath *dp; | ||
163 | |||
164 | WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_ovsl_is_held()); | ||
165 | rcu_read_lock(); | ||
166 | dp = get_dp_rcu(net, dp_ifindex); | ||
156 | rcu_read_unlock(); | 167 | rcu_read_unlock(); |
157 | 168 | ||
158 | return dp; | 169 | return dp; |
@@ -187,6 +198,7 @@ static void destroy_dp_rcu(struct rcu_head *rcu) | |||
187 | { | 198 | { |
188 | struct datapath *dp = container_of(rcu, struct datapath, rcu); | 199 | struct datapath *dp = container_of(rcu, struct datapath, rcu); |
189 | 200 | ||
201 | ovs_flow_tbl_destroy(&dp->table); | ||
190 | free_percpu(dp->stats_percpu); | 202 | free_percpu(dp->stats_percpu); |
191 | release_net(ovs_dp_get_net(dp)); | 203 | release_net(ovs_dp_get_net(dp)); |
192 | kfree(dp->ports); | 204 | kfree(dp->ports); |
@@ -245,6 +257,7 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key) | |||
245 | const struct vport *p = OVS_CB(skb)->input_vport; | 257 | const struct vport *p = OVS_CB(skb)->input_vport; |
246 | struct datapath *dp = p->dp; | 258 | struct datapath *dp = p->dp; |
247 | struct sw_flow *flow; | 259 | struct sw_flow *flow; |
260 | struct sw_flow_actions *sf_acts; | ||
248 | struct dp_stats_percpu *stats; | 261 | struct dp_stats_percpu *stats; |
249 | u64 *stats_counter; | 262 | u64 *stats_counter; |
250 | u32 n_mask_hit; | 263 | u32 n_mask_hit; |
@@ -270,10 +283,10 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key) | |||
270 | goto out; | 283 | goto out; |
271 | } | 284 | } |
272 | 285 | ||
273 | OVS_CB(skb)->flow = flow; | 286 | ovs_flow_stats_update(flow, key->tp.flags, skb); |
287 | sf_acts = rcu_dereference(flow->sf_acts); | ||
288 | ovs_execute_actions(dp, skb, sf_acts, key); | ||
274 | 289 | ||
275 | ovs_flow_stats_update(OVS_CB(skb)->flow, key->tp.flags, skb); | ||
276 | ovs_execute_actions(dp, skb, key); | ||
277 | stats_counter = &stats->n_hit; | 290 | stats_counter = &stats->n_hit; |
278 | 291 | ||
279 | out: | 292 | out: |
@@ -362,37 +375,12 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb, | |||
362 | return err; | 375 | return err; |
363 | } | 376 | } |
364 | 377 | ||
365 | static size_t key_attr_size(void) | ||
366 | { | ||
367 | return nla_total_size(4) /* OVS_KEY_ATTR_PRIORITY */ | ||
368 | + nla_total_size(0) /* OVS_KEY_ATTR_TUNNEL */ | ||
369 | + nla_total_size(8) /* OVS_TUNNEL_KEY_ATTR_ID */ | ||
370 | + nla_total_size(4) /* OVS_TUNNEL_KEY_ATTR_IPV4_SRC */ | ||
371 | + nla_total_size(4) /* OVS_TUNNEL_KEY_ATTR_IPV4_DST */ | ||
372 | + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TOS */ | ||
373 | + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TTL */ | ||
374 | + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT */ | ||
375 | + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_CSUM */ | ||
376 | + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_OAM */ | ||
377 | + nla_total_size(256) /* OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS */ | ||
378 | + nla_total_size(4) /* OVS_KEY_ATTR_IN_PORT */ | ||
379 | + nla_total_size(4) /* OVS_KEY_ATTR_SKB_MARK */ | ||
380 | + nla_total_size(12) /* OVS_KEY_ATTR_ETHERNET */ | ||
381 | + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */ | ||
382 | + nla_total_size(4) /* OVS_KEY_ATTR_8021Q */ | ||
383 | + nla_total_size(0) /* OVS_KEY_ATTR_ENCAP */ | ||
384 | + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */ | ||
385 | + nla_total_size(40) /* OVS_KEY_ATTR_IPV6 */ | ||
386 | + nla_total_size(2) /* OVS_KEY_ATTR_ICMPV6 */ | ||
387 | + nla_total_size(28); /* OVS_KEY_ATTR_ND */ | ||
388 | } | ||
389 | |||
390 | static size_t upcall_msg_size(const struct nlattr *userdata, | 378 | static size_t upcall_msg_size(const struct nlattr *userdata, |
391 | unsigned int hdrlen) | 379 | unsigned int hdrlen) |
392 | { | 380 | { |
393 | size_t size = NLMSG_ALIGN(sizeof(struct ovs_header)) | 381 | size_t size = NLMSG_ALIGN(sizeof(struct ovs_header)) |
394 | + nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */ | 382 | + nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */ |
395 | + nla_total_size(key_attr_size()); /* OVS_PACKET_ATTR_KEY */ | 383 | + nla_total_size(ovs_key_attr_size()); /* OVS_PACKET_ATTR_KEY */ |
396 | 384 | ||
397 | /* OVS_PACKET_ATTR_USERDATA */ | 385 | /* OVS_PACKET_ATTR_USERDATA */ |
398 | if (userdata) | 386 | if (userdata) |
@@ -512,6 +500,7 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) | |||
512 | struct sw_flow_actions *acts; | 500 | struct sw_flow_actions *acts; |
513 | struct sk_buff *packet; | 501 | struct sk_buff *packet; |
514 | struct sw_flow *flow; | 502 | struct sw_flow *flow; |
503 | struct sw_flow_actions *sf_acts; | ||
515 | struct datapath *dp; | 504 | struct datapath *dp; |
516 | struct ethhdr *eth; | 505 | struct ethhdr *eth; |
517 | struct vport *input_vport; | 506 | struct vport *input_vport; |
@@ -554,25 +543,18 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) | |||
554 | if (err) | 543 | if (err) |
555 | goto err_flow_free; | 544 | goto err_flow_free; |
556 | 545 | ||
557 | acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_PACKET_ATTR_ACTIONS])); | ||
558 | err = PTR_ERR(acts); | ||
559 | if (IS_ERR(acts)) | ||
560 | goto err_flow_free; | ||
561 | |||
562 | err = ovs_nla_copy_actions(a[OVS_PACKET_ATTR_ACTIONS], | 546 | err = ovs_nla_copy_actions(a[OVS_PACKET_ATTR_ACTIONS], |
563 | &flow->key, 0, &acts); | 547 | &flow->key, &acts); |
564 | if (err) | 548 | if (err) |
565 | goto err_flow_free; | 549 | goto err_flow_free; |
566 | 550 | ||
567 | rcu_assign_pointer(flow->sf_acts, acts); | 551 | rcu_assign_pointer(flow->sf_acts, acts); |
568 | |||
569 | OVS_CB(packet)->egress_tun_info = NULL; | 552 | OVS_CB(packet)->egress_tun_info = NULL; |
570 | OVS_CB(packet)->flow = flow; | ||
571 | packet->priority = flow->key.phy.priority; | 553 | packet->priority = flow->key.phy.priority; |
572 | packet->mark = flow->key.phy.skb_mark; | 554 | packet->mark = flow->key.phy.skb_mark; |
573 | 555 | ||
574 | rcu_read_lock(); | 556 | rcu_read_lock(); |
575 | dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); | 557 | dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex); |
576 | err = -ENODEV; | 558 | err = -ENODEV; |
577 | if (!dp) | 559 | if (!dp) |
578 | goto err_unlock; | 560 | goto err_unlock; |
@@ -585,9 +567,10 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) | |||
585 | goto err_unlock; | 567 | goto err_unlock; |
586 | 568 | ||
587 | OVS_CB(packet)->input_vport = input_vport; | 569 | OVS_CB(packet)->input_vport = input_vport; |
570 | sf_acts = rcu_dereference(flow->sf_acts); | ||
588 | 571 | ||
589 | local_bh_disable(); | 572 | local_bh_disable(); |
590 | err = ovs_execute_actions(dp, packet, &flow->key); | 573 | err = ovs_execute_actions(dp, packet, sf_acts, &flow->key); |
591 | local_bh_enable(); | 574 | local_bh_enable(); |
592 | rcu_read_unlock(); | 575 | rcu_read_unlock(); |
593 | 576 | ||
@@ -664,8 +647,8 @@ static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats, | |||
664 | static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts) | 647 | static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts) |
665 | { | 648 | { |
666 | return NLMSG_ALIGN(sizeof(struct ovs_header)) | 649 | return NLMSG_ALIGN(sizeof(struct ovs_header)) |
667 | + nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_KEY */ | 650 | + nla_total_size(ovs_key_attr_size()) /* OVS_FLOW_ATTR_KEY */ |
668 | + nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_MASK */ | 651 | + nla_total_size(ovs_key_attr_size()) /* OVS_FLOW_ATTR_MASK */ |
669 | + nla_total_size(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */ | 652 | + nla_total_size(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */ |
670 | + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */ | 653 | + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */ |
671 | + nla_total_size(8) /* OVS_FLOW_ATTR_USED */ | 654 | + nla_total_size(8) /* OVS_FLOW_ATTR_USED */ |
@@ -673,58 +656,67 @@ static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts) | |||
673 | } | 656 | } |
674 | 657 | ||
675 | /* Called with ovs_mutex or RCU read lock. */ | 658 | /* Called with ovs_mutex or RCU read lock. */ |
676 | static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex, | 659 | static int ovs_flow_cmd_fill_match(const struct sw_flow *flow, |
677 | struct sk_buff *skb, u32 portid, | 660 | struct sk_buff *skb) |
678 | u32 seq, u32 flags, u8 cmd) | ||
679 | { | 661 | { |
680 | const int skb_orig_len = skb->len; | ||
681 | struct nlattr *start; | ||
682 | struct ovs_flow_stats stats; | ||
683 | __be16 tcp_flags; | ||
684 | unsigned long used; | ||
685 | struct ovs_header *ovs_header; | ||
686 | struct nlattr *nla; | 662 | struct nlattr *nla; |
687 | int err; | 663 | int err; |
688 | 664 | ||
689 | ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family, flags, cmd); | ||
690 | if (!ovs_header) | ||
691 | return -EMSGSIZE; | ||
692 | |||
693 | ovs_header->dp_ifindex = dp_ifindex; | ||
694 | |||
695 | /* Fill flow key. */ | 665 | /* Fill flow key. */ |
696 | nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY); | 666 | nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY); |
697 | if (!nla) | 667 | if (!nla) |
698 | goto nla_put_failure; | 668 | return -EMSGSIZE; |
699 | 669 | ||
700 | err = ovs_nla_put_flow(&flow->unmasked_key, &flow->unmasked_key, skb); | 670 | err = ovs_nla_put_flow(&flow->unmasked_key, &flow->unmasked_key, skb); |
701 | if (err) | 671 | if (err) |
702 | goto error; | 672 | return err; |
673 | |||
703 | nla_nest_end(skb, nla); | 674 | nla_nest_end(skb, nla); |
704 | 675 | ||
676 | /* Fill flow mask. */ | ||
705 | nla = nla_nest_start(skb, OVS_FLOW_ATTR_MASK); | 677 | nla = nla_nest_start(skb, OVS_FLOW_ATTR_MASK); |
706 | if (!nla) | 678 | if (!nla) |
707 | goto nla_put_failure; | 679 | return -EMSGSIZE; |
708 | 680 | ||
709 | err = ovs_nla_put_flow(&flow->key, &flow->mask->key, skb); | 681 | err = ovs_nla_put_flow(&flow->key, &flow->mask->key, skb); |
710 | if (err) | 682 | if (err) |
711 | goto error; | 683 | return err; |
712 | 684 | ||
713 | nla_nest_end(skb, nla); | 685 | nla_nest_end(skb, nla); |
686 | return 0; | ||
687 | } | ||
688 | |||
689 | /* Called with ovs_mutex or RCU read lock. */ | ||
690 | static int ovs_flow_cmd_fill_stats(const struct sw_flow *flow, | ||
691 | struct sk_buff *skb) | ||
692 | { | ||
693 | struct ovs_flow_stats stats; | ||
694 | __be16 tcp_flags; | ||
695 | unsigned long used; | ||
714 | 696 | ||
715 | ovs_flow_stats_get(flow, &stats, &used, &tcp_flags); | 697 | ovs_flow_stats_get(flow, &stats, &used, &tcp_flags); |
716 | 698 | ||
717 | if (used && | 699 | if (used && |
718 | nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used))) | 700 | nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used))) |
719 | goto nla_put_failure; | 701 | return -EMSGSIZE; |
720 | 702 | ||
721 | if (stats.n_packets && | 703 | if (stats.n_packets && |
722 | nla_put(skb, OVS_FLOW_ATTR_STATS, sizeof(struct ovs_flow_stats), &stats)) | 704 | nla_put(skb, OVS_FLOW_ATTR_STATS, sizeof(struct ovs_flow_stats), &stats)) |
723 | goto nla_put_failure; | 705 | return -EMSGSIZE; |
724 | 706 | ||
725 | if ((u8)ntohs(tcp_flags) && | 707 | if ((u8)ntohs(tcp_flags) && |
726 | nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, (u8)ntohs(tcp_flags))) | 708 | nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, (u8)ntohs(tcp_flags))) |
727 | goto nla_put_failure; | 709 | return -EMSGSIZE; |
710 | |||
711 | return 0; | ||
712 | } | ||
713 | |||
714 | /* Called with ovs_mutex or RCU read lock. */ | ||
715 | static int ovs_flow_cmd_fill_actions(const struct sw_flow *flow, | ||
716 | struct sk_buff *skb, int skb_orig_len) | ||
717 | { | ||
718 | struct nlattr *start; | ||
719 | int err; | ||
728 | 720 | ||
729 | /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if | 721 | /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if |
730 | * this is the first flow to be dumped into 'skb'. This is unusual for | 722 | * this is the first flow to be dumped into 'skb'. This is unusual for |
@@ -748,17 +740,47 @@ static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex, | |||
748 | nla_nest_end(skb, start); | 740 | nla_nest_end(skb, start); |
749 | else { | 741 | else { |
750 | if (skb_orig_len) | 742 | if (skb_orig_len) |
751 | goto error; | 743 | return err; |
752 | 744 | ||
753 | nla_nest_cancel(skb, start); | 745 | nla_nest_cancel(skb, start); |
754 | } | 746 | } |
755 | } else if (skb_orig_len) | 747 | } else if (skb_orig_len) { |
756 | goto nla_put_failure; | 748 | return -EMSGSIZE; |
749 | } | ||
750 | |||
751 | return 0; | ||
752 | } | ||
753 | |||
754 | /* Called with ovs_mutex or RCU read lock. */ | ||
755 | static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex, | ||
756 | struct sk_buff *skb, u32 portid, | ||
757 | u32 seq, u32 flags, u8 cmd) | ||
758 | { | ||
759 | const int skb_orig_len = skb->len; | ||
760 | struct ovs_header *ovs_header; | ||
761 | int err; | ||
762 | |||
763 | ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family, | ||
764 | flags, cmd); | ||
765 | if (!ovs_header) | ||
766 | return -EMSGSIZE; | ||
767 | |||
768 | ovs_header->dp_ifindex = dp_ifindex; | ||
769 | |||
770 | err = ovs_flow_cmd_fill_match(flow, skb); | ||
771 | if (err) | ||
772 | goto error; | ||
773 | |||
774 | err = ovs_flow_cmd_fill_stats(flow, skb); | ||
775 | if (err) | ||
776 | goto error; | ||
777 | |||
778 | err = ovs_flow_cmd_fill_actions(flow, skb, skb_orig_len); | ||
779 | if (err) | ||
780 | goto error; | ||
757 | 781 | ||
758 | return genlmsg_end(skb, ovs_header); | 782 | return genlmsg_end(skb, ovs_header); |
759 | 783 | ||
760 | nla_put_failure: | ||
761 | err = -EMSGSIZE; | ||
762 | error: | 784 | error: |
763 | genlmsg_cancel(skb, ovs_header); | 785 | genlmsg_cancel(skb, ovs_header); |
764 | return err; | 786 | return err; |
@@ -816,10 +838,14 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info) | |||
816 | 838 | ||
817 | /* Must have key and actions. */ | 839 | /* Must have key and actions. */ |
818 | error = -EINVAL; | 840 | error = -EINVAL; |
819 | if (!a[OVS_FLOW_ATTR_KEY]) | 841 | if (!a[OVS_FLOW_ATTR_KEY]) { |
842 | OVS_NLERR("Flow key attribute not present in new flow.\n"); | ||
820 | goto error; | 843 | goto error; |
821 | if (!a[OVS_FLOW_ATTR_ACTIONS]) | 844 | } |
845 | if (!a[OVS_FLOW_ATTR_ACTIONS]) { | ||
846 | OVS_NLERR("Flow actions attribute not present in new flow.\n"); | ||
822 | goto error; | 847 | goto error; |
848 | } | ||
823 | 849 | ||
824 | /* Most of the time we need to allocate a new flow, do it before | 850 | /* Most of the time we need to allocate a new flow, do it before |
825 | * locking. | 851 | * locking. |
@@ -840,16 +866,11 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info) | |||
840 | ovs_flow_mask_key(&new_flow->key, &new_flow->unmasked_key, &mask); | 866 | ovs_flow_mask_key(&new_flow->key, &new_flow->unmasked_key, &mask); |
841 | 867 | ||
842 | /* Validate actions. */ | 868 | /* Validate actions. */ |
843 | acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_FLOW_ATTR_ACTIONS])); | ||
844 | error = PTR_ERR(acts); | ||
845 | if (IS_ERR(acts)) | ||
846 | goto err_kfree_flow; | ||
847 | |||
848 | error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], &new_flow->key, | 869 | error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], &new_flow->key, |
849 | 0, &acts); | 870 | &acts); |
850 | if (error) { | 871 | if (error) { |
851 | OVS_NLERR("Flow actions may not be safe on all matching packets.\n"); | 872 | OVS_NLERR("Flow actions may not be safe on all matching packets.\n"); |
852 | goto err_kfree_acts; | 873 | goto err_kfree_flow; |
853 | } | 874 | } |
854 | 875 | ||
855 | reply = ovs_flow_cmd_alloc_info(acts, info, false); | 876 | reply = ovs_flow_cmd_alloc_info(acts, info, false); |
@@ -940,6 +961,7 @@ error: | |||
940 | return error; | 961 | return error; |
941 | } | 962 | } |
942 | 963 | ||
964 | /* Factor out action copy to avoid "Wframe-larger-than=1024" warning. */ | ||
943 | static struct sw_flow_actions *get_flow_actions(const struct nlattr *a, | 965 | static struct sw_flow_actions *get_flow_actions(const struct nlattr *a, |
944 | const struct sw_flow_key *key, | 966 | const struct sw_flow_key *key, |
945 | const struct sw_flow_mask *mask) | 967 | const struct sw_flow_mask *mask) |
@@ -948,15 +970,10 @@ static struct sw_flow_actions *get_flow_actions(const struct nlattr *a, | |||
948 | struct sw_flow_key masked_key; | 970 | struct sw_flow_key masked_key; |
949 | int error; | 971 | int error; |
950 | 972 | ||
951 | acts = ovs_nla_alloc_flow_actions(nla_len(a)); | ||
952 | if (IS_ERR(acts)) | ||
953 | return acts; | ||
954 | |||
955 | ovs_flow_mask_key(&masked_key, key, mask); | 973 | ovs_flow_mask_key(&masked_key, key, mask); |
956 | error = ovs_nla_copy_actions(a, &masked_key, 0, &acts); | 974 | error = ovs_nla_copy_actions(a, &masked_key, &acts); |
957 | if (error) { | 975 | if (error) { |
958 | OVS_NLERR("Flow actions may not be safe on all matching packets.\n"); | 976 | OVS_NLERR("Actions may not be safe on all matching packets.\n"); |
959 | kfree(acts); | ||
960 | return ERR_PTR(error); | 977 | return ERR_PTR(error); |
961 | } | 978 | } |
962 | 979 | ||
@@ -978,8 +995,10 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info) | |||
978 | 995 | ||
979 | /* Extract key. */ | 996 | /* Extract key. */ |
980 | error = -EINVAL; | 997 | error = -EINVAL; |
981 | if (!a[OVS_FLOW_ATTR_KEY]) | 998 | if (!a[OVS_FLOW_ATTR_KEY]) { |
999 | OVS_NLERR("Flow key attribute not present in set flow.\n"); | ||
982 | goto error; | 1000 | goto error; |
1001 | } | ||
983 | 1002 | ||
984 | ovs_match_init(&match, &key, &mask); | 1003 | ovs_match_init(&match, &key, &mask); |
985 | error = ovs_nla_get_match(&match, | 1004 | error = ovs_nla_get_match(&match, |
@@ -994,10 +1013,8 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info) | |||
994 | error = PTR_ERR(acts); | 1013 | error = PTR_ERR(acts); |
995 | goto error; | 1014 | goto error; |
996 | } | 1015 | } |
997 | } | ||
998 | 1016 | ||
999 | /* Can allocate before locking if have acts. */ | 1017 | /* Can allocate before locking if have acts. */ |
1000 | if (acts) { | ||
1001 | reply = ovs_flow_cmd_alloc_info(acts, info, false); | 1018 | reply = ovs_flow_cmd_alloc_info(acts, info, false); |
1002 | if (IS_ERR(reply)) { | 1019 | if (IS_ERR(reply)) { |
1003 | error = PTR_ERR(reply); | 1020 | error = PTR_ERR(reply); |
@@ -1181,7 +1198,7 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
1181 | struct datapath *dp; | 1198 | struct datapath *dp; |
1182 | 1199 | ||
1183 | rcu_read_lock(); | 1200 | rcu_read_lock(); |
1184 | dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); | 1201 | dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex); |
1185 | if (!dp) { | 1202 | if (!dp) { |
1186 | rcu_read_unlock(); | 1203 | rcu_read_unlock(); |
1187 | return -ENODEV; | 1204 | return -ENODEV; |
@@ -1444,7 +1461,7 @@ err_destroy_ports_array: | |||
1444 | err_destroy_percpu: | 1461 | err_destroy_percpu: |
1445 | free_percpu(dp->stats_percpu); | 1462 | free_percpu(dp->stats_percpu); |
1446 | err_destroy_table: | 1463 | err_destroy_table: |
1447 | ovs_flow_tbl_destroy(&dp->table, false); | 1464 | ovs_flow_tbl_destroy(&dp->table); |
1448 | err_free_dp: | 1465 | err_free_dp: |
1449 | release_net(ovs_dp_get_net(dp)); | 1466 | release_net(ovs_dp_get_net(dp)); |
1450 | kfree(dp); | 1467 | kfree(dp); |
@@ -1476,8 +1493,6 @@ static void __dp_destroy(struct datapath *dp) | |||
1476 | ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL)); | 1493 | ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL)); |
1477 | 1494 | ||
1478 | /* RCU destroy the flow table */ | 1495 | /* RCU destroy the flow table */ |
1479 | ovs_flow_tbl_destroy(&dp->table, true); | ||
1480 | |||
1481 | call_rcu(&dp->rcu, destroy_dp_rcu); | 1496 | call_rcu(&dp->rcu, destroy_dp_rcu); |
1482 | } | 1497 | } |
1483 | 1498 | ||
@@ -1945,7 +1960,7 @@ static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
1945 | int i, j = 0; | 1960 | int i, j = 0; |
1946 | 1961 | ||
1947 | rcu_read_lock(); | 1962 | rcu_read_lock(); |
1948 | dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); | 1963 | dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex); |
1949 | if (!dp) { | 1964 | if (!dp) { |
1950 | rcu_read_unlock(); | 1965 | rcu_read_unlock(); |
1951 | return -ENODEV; | 1966 | return -ENODEV; |
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h index 974135439c5c..1c56a80d6677 100644 --- a/net/openvswitch/datapath.h +++ b/net/openvswitch/datapath.h | |||
@@ -94,14 +94,12 @@ struct datapath { | |||
94 | 94 | ||
95 | /** | 95 | /** |
96 | * struct ovs_skb_cb - OVS data in skb CB | 96 | * struct ovs_skb_cb - OVS data in skb CB |
97 | * @flow: The flow associated with this packet. May be %NULL if no flow. | ||
98 | * @egress_tun_key: Tunnel information about this packet on egress path. | 97 | * @egress_tun_key: Tunnel information about this packet on egress path. |
99 | * NULL if the packet is not being tunneled. | 98 | * NULL if the packet is not being tunneled. |
100 | * @input_vport: The original vport packet came in on. This value is cached | 99 | * @input_vport: The original vport packet came in on. This value is cached |
101 | * when a packet is received by OVS. | 100 | * when a packet is received by OVS. |
102 | */ | 101 | */ |
103 | struct ovs_skb_cb { | 102 | struct ovs_skb_cb { |
104 | struct sw_flow *flow; | ||
105 | struct ovs_tunnel_info *egress_tun_info; | 103 | struct ovs_tunnel_info *egress_tun_info; |
106 | struct vport *input_vport; | 104 | struct vport *input_vport; |
107 | }; | 105 | }; |
@@ -194,7 +192,7 @@ struct sk_buff *ovs_vport_cmd_build_info(struct vport *, u32 pid, u32 seq, | |||
194 | u8 cmd); | 192 | u8 cmd); |
195 | 193 | ||
196 | int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb, | 194 | int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb, |
197 | struct sw_flow_key *); | 195 | struct sw_flow_actions *acts, struct sw_flow_key *); |
198 | 196 | ||
199 | void ovs_dp_notify_wq(struct work_struct *work); | 197 | void ovs_dp_notify_wq(struct work_struct *work); |
200 | 198 | ||
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 2b78789ea7c5..90a21010fc8f 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/if_arp.h> | 32 | #include <linux/if_arp.h> |
33 | #include <linux/ip.h> | 33 | #include <linux/ip.h> |
34 | #include <linux/ipv6.h> | 34 | #include <linux/ipv6.h> |
35 | #include <linux/mpls.h> | ||
35 | #include <linux/sctp.h> | 36 | #include <linux/sctp.h> |
36 | #include <linux/smp.h> | 37 | #include <linux/smp.h> |
37 | #include <linux/tcp.h> | 38 | #include <linux/tcp.h> |
@@ -42,6 +43,7 @@ | |||
42 | #include <net/ip.h> | 43 | #include <net/ip.h> |
43 | #include <net/ip_tunnels.h> | 44 | #include <net/ip_tunnels.h> |
44 | #include <net/ipv6.h> | 45 | #include <net/ipv6.h> |
46 | #include <net/mpls.h> | ||
45 | #include <net/ndisc.h> | 47 | #include <net/ndisc.h> |
46 | 48 | ||
47 | #include "datapath.h" | 49 | #include "datapath.h" |
@@ -480,6 +482,7 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key) | |||
480 | return -ENOMEM; | 482 | return -ENOMEM; |
481 | 483 | ||
482 | skb_reset_network_header(skb); | 484 | skb_reset_network_header(skb); |
485 | skb_reset_mac_len(skb); | ||
483 | __skb_push(skb, skb->data - skb_mac_header(skb)); | 486 | __skb_push(skb, skb->data - skb_mac_header(skb)); |
484 | 487 | ||
485 | /* Network layer. */ | 488 | /* Network layer. */ |
@@ -584,6 +587,33 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key) | |||
584 | memset(&key->ip, 0, sizeof(key->ip)); | 587 | memset(&key->ip, 0, sizeof(key->ip)); |
585 | memset(&key->ipv4, 0, sizeof(key->ipv4)); | 588 | memset(&key->ipv4, 0, sizeof(key->ipv4)); |
586 | } | 589 | } |
590 | } else if (eth_p_mpls(key->eth.type)) { | ||
591 | size_t stack_len = MPLS_HLEN; | ||
592 | |||
593 | /* In the presence of an MPLS label stack the end of the L2 | ||
594 | * header and the beginning of the L3 header differ. | ||
595 | * | ||
596 | * Advance network_header to the beginning of the L3 | ||
597 | * header. mac_len corresponds to the end of the L2 header. | ||
598 | */ | ||
599 | while (1) { | ||
600 | __be32 lse; | ||
601 | |||
602 | error = check_header(skb, skb->mac_len + stack_len); | ||
603 | if (unlikely(error)) | ||
604 | return 0; | ||
605 | |||
606 | memcpy(&lse, skb_network_header(skb), MPLS_HLEN); | ||
607 | |||
608 | if (stack_len == MPLS_HLEN) | ||
609 | memcpy(&key->mpls.top_lse, &lse, MPLS_HLEN); | ||
610 | |||
611 | skb_set_network_header(skb, skb->mac_len + stack_len); | ||
612 | if (lse & htonl(MPLS_LS_S_MASK)) | ||
613 | break; | ||
614 | |||
615 | stack_len += MPLS_HLEN; | ||
616 | } | ||
587 | } else if (key->eth.type == htons(ETH_P_IPV6)) { | 617 | } else if (key->eth.type == htons(ETH_P_IPV6)) { |
588 | int nh_len; /* IPv6 Header + Extensions */ | 618 | int nh_len; /* IPv6 Header + Extensions */ |
589 | 619 | ||
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h index 71813318c8c7..4962bee81a11 100644 --- a/net/openvswitch/flow.h +++ b/net/openvswitch/flow.h | |||
@@ -102,12 +102,17 @@ struct sw_flow_key { | |||
102 | __be16 tci; /* 0 if no VLAN, VLAN_TAG_PRESENT set otherwise. */ | 102 | __be16 tci; /* 0 if no VLAN, VLAN_TAG_PRESENT set otherwise. */ |
103 | __be16 type; /* Ethernet frame type. */ | 103 | __be16 type; /* Ethernet frame type. */ |
104 | } eth; | 104 | } eth; |
105 | struct { | 105 | union { |
106 | u8 proto; /* IP protocol or lower 8 bits of ARP opcode. */ | 106 | struct { |
107 | u8 tos; /* IP ToS. */ | 107 | __be32 top_lse; /* top label stack entry */ |
108 | u8 ttl; /* IP TTL/hop limit. */ | 108 | } mpls; |
109 | u8 frag; /* One of OVS_FRAG_TYPE_*. */ | 109 | struct { |
110 | } ip; | 110 | u8 proto; /* IP protocol or lower 8 bits of ARP opcode. */ |
111 | u8 tos; /* IP ToS. */ | ||
112 | u8 ttl; /* IP TTL/hop limit. */ | ||
113 | u8 frag; /* One of OVS_FRAG_TYPE_*. */ | ||
114 | } ip; | ||
115 | }; | ||
111 | struct { | 116 | struct { |
112 | __be16 src; /* TCP/UDP/SCTP source port. */ | 117 | __be16 src; /* TCP/UDP/SCTP source port. */ |
113 | __be16 dst; /* TCP/UDP/SCTP destination port. */ | 118 | __be16 dst; /* TCP/UDP/SCTP destination port. */ |
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index 939bcb32100f..ed3109761827 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c | |||
@@ -46,24 +46,22 @@ | |||
46 | #include <net/ip.h> | 46 | #include <net/ip.h> |
47 | #include <net/ipv6.h> | 47 | #include <net/ipv6.h> |
48 | #include <net/ndisc.h> | 48 | #include <net/ndisc.h> |
49 | #include <net/mpls.h> | ||
49 | 50 | ||
50 | #include "flow_netlink.h" | 51 | #include "flow_netlink.h" |
51 | 52 | ||
52 | static void update_range__(struct sw_flow_match *match, | 53 | static void update_range(struct sw_flow_match *match, |
53 | size_t offset, size_t size, bool is_mask) | 54 | size_t offset, size_t size, bool is_mask) |
54 | { | 55 | { |
55 | struct sw_flow_key_range *range = NULL; | 56 | struct sw_flow_key_range *range; |
56 | size_t start = rounddown(offset, sizeof(long)); | 57 | size_t start = rounddown(offset, sizeof(long)); |
57 | size_t end = roundup(offset + size, sizeof(long)); | 58 | size_t end = roundup(offset + size, sizeof(long)); |
58 | 59 | ||
59 | if (!is_mask) | 60 | if (!is_mask) |
60 | range = &match->range; | 61 | range = &match->range; |
61 | else if (match->mask) | 62 | else |
62 | range = &match->mask->range; | 63 | range = &match->mask->range; |
63 | 64 | ||
64 | if (!range) | ||
65 | return; | ||
66 | |||
67 | if (range->start == range->end) { | 65 | if (range->start == range->end) { |
68 | range->start = start; | 66 | range->start = start; |
69 | range->end = end; | 67 | range->end = end; |
@@ -79,22 +77,20 @@ static void update_range__(struct sw_flow_match *match, | |||
79 | 77 | ||
80 | #define SW_FLOW_KEY_PUT(match, field, value, is_mask) \ | 78 | #define SW_FLOW_KEY_PUT(match, field, value, is_mask) \ |
81 | do { \ | 79 | do { \ |
82 | update_range__(match, offsetof(struct sw_flow_key, field), \ | 80 | update_range(match, offsetof(struct sw_flow_key, field), \ |
83 | sizeof((match)->key->field), is_mask); \ | 81 | sizeof((match)->key->field), is_mask); \ |
84 | if (is_mask) { \ | 82 | if (is_mask) \ |
85 | if ((match)->mask) \ | 83 | (match)->mask->key.field = value; \ |
86 | (match)->mask->key.field = value; \ | 84 | else \ |
87 | } else { \ | ||
88 | (match)->key->field = value; \ | 85 | (match)->key->field = value; \ |
89 | } \ | ||
90 | } while (0) | 86 | } while (0) |
91 | 87 | ||
92 | #define SW_FLOW_KEY_MEMCPY_OFFSET(match, offset, value_p, len, is_mask) \ | 88 | #define SW_FLOW_KEY_MEMCPY_OFFSET(match, offset, value_p, len, is_mask) \ |
93 | do { \ | 89 | do { \ |
94 | update_range__(match, offset, len, is_mask); \ | 90 | update_range(match, offset, len, is_mask); \ |
95 | if (is_mask) \ | 91 | if (is_mask) \ |
96 | memcpy((u8 *)&(match)->mask->key + offset, value_p, \ | 92 | memcpy((u8 *)&(match)->mask->key + offset, value_p, \ |
97 | len); \ | 93 | len); \ |
98 | else \ | 94 | else \ |
99 | memcpy((u8 *)(match)->key + offset, value_p, len); \ | 95 | memcpy((u8 *)(match)->key + offset, value_p, len); \ |
100 | } while (0) | 96 | } while (0) |
@@ -103,18 +99,16 @@ static void update_range__(struct sw_flow_match *match, | |||
103 | SW_FLOW_KEY_MEMCPY_OFFSET(match, offsetof(struct sw_flow_key, field), \ | 99 | SW_FLOW_KEY_MEMCPY_OFFSET(match, offsetof(struct sw_flow_key, field), \ |
104 | value_p, len, is_mask) | 100 | value_p, len, is_mask) |
105 | 101 | ||
106 | #define SW_FLOW_KEY_MEMSET_FIELD(match, field, value, is_mask) \ | 102 | #define SW_FLOW_KEY_MEMSET_FIELD(match, field, value, is_mask) \ |
107 | do { \ | 103 | do { \ |
108 | update_range__(match, offsetof(struct sw_flow_key, field), \ | 104 | update_range(match, offsetof(struct sw_flow_key, field), \ |
109 | sizeof((match)->key->field), is_mask); \ | 105 | sizeof((match)->key->field), is_mask); \ |
110 | if (is_mask) { \ | 106 | if (is_mask) \ |
111 | if ((match)->mask) \ | 107 | memset((u8 *)&(match)->mask->key.field, value, \ |
112 | memset((u8 *)&(match)->mask->key.field, value,\ | 108 | sizeof((match)->mask->key.field)); \ |
113 | sizeof((match)->mask->key.field)); \ | 109 | else \ |
114 | } else { \ | ||
115 | memset((u8 *)&(match)->key->field, value, \ | 110 | memset((u8 *)&(match)->key->field, value, \ |
116 | sizeof((match)->key->field)); \ | 111 | sizeof((match)->key->field)); \ |
117 | } \ | ||
118 | } while (0) | 112 | } while (0) |
119 | 113 | ||
120 | static bool match_validate(const struct sw_flow_match *match, | 114 | static bool match_validate(const struct sw_flow_match *match, |
@@ -134,7 +128,8 @@ static bool match_validate(const struct sw_flow_match *match, | |||
134 | | (1 << OVS_KEY_ATTR_ICMP) | 128 | | (1 << OVS_KEY_ATTR_ICMP) |
135 | | (1 << OVS_KEY_ATTR_ICMPV6) | 129 | | (1 << OVS_KEY_ATTR_ICMPV6) |
136 | | (1 << OVS_KEY_ATTR_ARP) | 130 | | (1 << OVS_KEY_ATTR_ARP) |
137 | | (1 << OVS_KEY_ATTR_ND)); | 131 | | (1 << OVS_KEY_ATTR_ND) |
132 | | (1 << OVS_KEY_ATTR_MPLS)); | ||
138 | 133 | ||
139 | /* Always allowed mask fields. */ | 134 | /* Always allowed mask fields. */ |
140 | mask_allowed |= ((1 << OVS_KEY_ATTR_TUNNEL) | 135 | mask_allowed |= ((1 << OVS_KEY_ATTR_TUNNEL) |
@@ -149,6 +144,12 @@ static bool match_validate(const struct sw_flow_match *match, | |||
149 | mask_allowed |= 1 << OVS_KEY_ATTR_ARP; | 144 | mask_allowed |= 1 << OVS_KEY_ATTR_ARP; |
150 | } | 145 | } |
151 | 146 | ||
147 | if (eth_p_mpls(match->key->eth.type)) { | ||
148 | key_expected |= 1 << OVS_KEY_ATTR_MPLS; | ||
149 | if (match->mask && (match->mask->key.eth.type == htons(0xffff))) | ||
150 | mask_allowed |= 1 << OVS_KEY_ATTR_MPLS; | ||
151 | } | ||
152 | |||
152 | if (match->key->eth.type == htons(ETH_P_IP)) { | 153 | if (match->key->eth.type == htons(ETH_P_IP)) { |
153 | key_expected |= 1 << OVS_KEY_ATTR_IPV4; | 154 | key_expected |= 1 << OVS_KEY_ATTR_IPV4; |
154 | if (match->mask && (match->mask->key.eth.type == htons(0xffff))) | 155 | if (match->mask && (match->mask->key.eth.type == htons(0xffff))) |
@@ -244,6 +245,38 @@ static bool match_validate(const struct sw_flow_match *match, | |||
244 | return true; | 245 | return true; |
245 | } | 246 | } |
246 | 247 | ||
248 | size_t ovs_key_attr_size(void) | ||
249 | { | ||
250 | /* Whenever adding new OVS_KEY_ FIELDS, we should consider | ||
251 | * updating this function. | ||
252 | */ | ||
253 | BUILD_BUG_ON(OVS_KEY_ATTR_TUNNEL_INFO != 22); | ||
254 | |||
255 | return nla_total_size(4) /* OVS_KEY_ATTR_PRIORITY */ | ||
256 | + nla_total_size(0) /* OVS_KEY_ATTR_TUNNEL */ | ||
257 | + nla_total_size(8) /* OVS_TUNNEL_KEY_ATTR_ID */ | ||
258 | + nla_total_size(4) /* OVS_TUNNEL_KEY_ATTR_IPV4_SRC */ | ||
259 | + nla_total_size(4) /* OVS_TUNNEL_KEY_ATTR_IPV4_DST */ | ||
260 | + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TOS */ | ||
261 | + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TTL */ | ||
262 | + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT */ | ||
263 | + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_CSUM */ | ||
264 | + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_OAM */ | ||
265 | + nla_total_size(256) /* OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS */ | ||
266 | + nla_total_size(4) /* OVS_KEY_ATTR_IN_PORT */ | ||
267 | + nla_total_size(4) /* OVS_KEY_ATTR_SKB_MARK */ | ||
268 | + nla_total_size(4) /* OVS_KEY_ATTR_DP_HASH */ | ||
269 | + nla_total_size(4) /* OVS_KEY_ATTR_RECIRC_ID */ | ||
270 | + nla_total_size(12) /* OVS_KEY_ATTR_ETHERNET */ | ||
271 | + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */ | ||
272 | + nla_total_size(4) /* OVS_KEY_ATTR_VLAN */ | ||
273 | + nla_total_size(0) /* OVS_KEY_ATTR_ENCAP */ | ||
274 | + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */ | ||
275 | + nla_total_size(40) /* OVS_KEY_ATTR_IPV6 */ | ||
276 | + nla_total_size(2) /* OVS_KEY_ATTR_ICMPV6 */ | ||
277 | + nla_total_size(28); /* OVS_KEY_ATTR_ND */ | ||
278 | } | ||
279 | |||
247 | /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */ | 280 | /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */ |
248 | static const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = { | 281 | static const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = { |
249 | [OVS_KEY_ATTR_ENCAP] = -1, | 282 | [OVS_KEY_ATTR_ENCAP] = -1, |
@@ -266,6 +299,7 @@ static const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = { | |||
266 | [OVS_KEY_ATTR_RECIRC_ID] = sizeof(u32), | 299 | [OVS_KEY_ATTR_RECIRC_ID] = sizeof(u32), |
267 | [OVS_KEY_ATTR_DP_HASH] = sizeof(u32), | 300 | [OVS_KEY_ATTR_DP_HASH] = sizeof(u32), |
268 | [OVS_KEY_ATTR_TUNNEL] = -1, | 301 | [OVS_KEY_ATTR_TUNNEL] = -1, |
302 | [OVS_KEY_ATTR_MPLS] = sizeof(struct ovs_key_mpls), | ||
269 | }; | 303 | }; |
270 | 304 | ||
271 | static bool is_all_zero(const u8 *fp, size_t size) | 305 | static bool is_all_zero(const u8 *fp, size_t size) |
@@ -572,10 +606,13 @@ static int metadata_from_nlattrs(struct sw_flow_match *match, u64 *attrs, | |||
572 | if (*attrs & (1 << OVS_KEY_ATTR_IN_PORT)) { | 606 | if (*attrs & (1 << OVS_KEY_ATTR_IN_PORT)) { |
573 | u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]); | 607 | u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]); |
574 | 608 | ||
575 | if (is_mask) | 609 | if (is_mask) { |
576 | in_port = 0xffffffff; /* Always exact match in_port. */ | 610 | in_port = 0xffffffff; /* Always exact match in_port. */ |
577 | else if (in_port >= DP_MAX_PORTS) | 611 | } else if (in_port >= DP_MAX_PORTS) { |
612 | OVS_NLERR("Port (%d) exceeds maximum allowable (%d).\n", | ||
613 | in_port, DP_MAX_PORTS); | ||
578 | return -EINVAL; | 614 | return -EINVAL; |
615 | } | ||
579 | 616 | ||
580 | SW_FLOW_KEY_PUT(match, phy.in_port, in_port, is_mask); | 617 | SW_FLOW_KEY_PUT(match, phy.in_port, in_port, is_mask); |
581 | *attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT); | 618 | *attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT); |
@@ -602,7 +639,6 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, | |||
602 | const struct nlattr **a, bool is_mask) | 639 | const struct nlattr **a, bool is_mask) |
603 | { | 640 | { |
604 | int err; | 641 | int err; |
605 | u64 orig_attrs = attrs; | ||
606 | 642 | ||
607 | err = metadata_from_nlattrs(match, &attrs, a, is_mask); | 643 | err = metadata_from_nlattrs(match, &attrs, a, is_mask); |
608 | if (err) | 644 | if (err) |
@@ -634,8 +670,7 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, | |||
634 | 670 | ||
635 | SW_FLOW_KEY_PUT(match, eth.tci, tci, is_mask); | 671 | SW_FLOW_KEY_PUT(match, eth.tci, tci, is_mask); |
636 | attrs &= ~(1 << OVS_KEY_ATTR_VLAN); | 672 | attrs &= ~(1 << OVS_KEY_ATTR_VLAN); |
637 | } else if (!is_mask) | 673 | } |
638 | SW_FLOW_KEY_PUT(match, eth.tci, htons(0xffff), true); | ||
639 | 674 | ||
640 | if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) { | 675 | if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) { |
641 | __be16 eth_type; | 676 | __be16 eth_type; |
@@ -735,6 +770,16 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, | |||
735 | attrs &= ~(1 << OVS_KEY_ATTR_ARP); | 770 | attrs &= ~(1 << OVS_KEY_ATTR_ARP); |
736 | } | 771 | } |
737 | 772 | ||
773 | if (attrs & (1 << OVS_KEY_ATTR_MPLS)) { | ||
774 | const struct ovs_key_mpls *mpls_key; | ||
775 | |||
776 | mpls_key = nla_data(a[OVS_KEY_ATTR_MPLS]); | ||
777 | SW_FLOW_KEY_PUT(match, mpls.top_lse, | ||
778 | mpls_key->mpls_lse, is_mask); | ||
779 | |||
780 | attrs &= ~(1 << OVS_KEY_ATTR_MPLS); | ||
781 | } | ||
782 | |||
738 | if (attrs & (1 << OVS_KEY_ATTR_TCP)) { | 783 | if (attrs & (1 << OVS_KEY_ATTR_TCP)) { |
739 | const struct ovs_key_tcp *tcp_key; | 784 | const struct ovs_key_tcp *tcp_key; |
740 | 785 | ||
@@ -745,15 +790,9 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, | |||
745 | } | 790 | } |
746 | 791 | ||
747 | if (attrs & (1 << OVS_KEY_ATTR_TCP_FLAGS)) { | 792 | if (attrs & (1 << OVS_KEY_ATTR_TCP_FLAGS)) { |
748 | if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) { | 793 | SW_FLOW_KEY_PUT(match, tp.flags, |
749 | SW_FLOW_KEY_PUT(match, tp.flags, | 794 | nla_get_be16(a[OVS_KEY_ATTR_TCP_FLAGS]), |
750 | nla_get_be16(a[OVS_KEY_ATTR_TCP_FLAGS]), | 795 | is_mask); |
751 | is_mask); | ||
752 | } else { | ||
753 | SW_FLOW_KEY_PUT(match, tp.flags, | ||
754 | nla_get_be16(a[OVS_KEY_ATTR_TCP_FLAGS]), | ||
755 | is_mask); | ||
756 | } | ||
757 | attrs &= ~(1 << OVS_KEY_ATTR_TCP_FLAGS); | 796 | attrs &= ~(1 << OVS_KEY_ATTR_TCP_FLAGS); |
758 | } | 797 | } |
759 | 798 | ||
@@ -812,8 +851,11 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, | |||
812 | attrs &= ~(1 << OVS_KEY_ATTR_ND); | 851 | attrs &= ~(1 << OVS_KEY_ATTR_ND); |
813 | } | 852 | } |
814 | 853 | ||
815 | if (attrs != 0) | 854 | if (attrs != 0) { |
855 | OVS_NLERR("Unknown key attributes (%llx).\n", | ||
856 | (unsigned long long)attrs); | ||
816 | return -EINVAL; | 857 | return -EINVAL; |
858 | } | ||
817 | 859 | ||
818 | return 0; | 860 | return 0; |
819 | } | 861 | } |
@@ -853,8 +895,8 @@ static void mask_set_nlattr(struct nlattr *attr, u8 val) | |||
853 | * attribute specifies the mask field of the wildcarded flow. | 895 | * attribute specifies the mask field of the wildcarded flow. |
854 | */ | 896 | */ |
855 | int ovs_nla_get_match(struct sw_flow_match *match, | 897 | int ovs_nla_get_match(struct sw_flow_match *match, |
856 | const struct nlattr *key, | 898 | const struct nlattr *nla_key, |
857 | const struct nlattr *mask) | 899 | const struct nlattr *nla_mask) |
858 | { | 900 | { |
859 | const struct nlattr *a[OVS_KEY_ATTR_MAX + 1]; | 901 | const struct nlattr *a[OVS_KEY_ATTR_MAX + 1]; |
860 | const struct nlattr *encap; | 902 | const struct nlattr *encap; |
@@ -864,7 +906,7 @@ int ovs_nla_get_match(struct sw_flow_match *match, | |||
864 | bool encap_valid = false; | 906 | bool encap_valid = false; |
865 | int err; | 907 | int err; |
866 | 908 | ||
867 | err = parse_flow_nlattrs(key, a, &key_attrs); | 909 | err = parse_flow_nlattrs(nla_key, a, &key_attrs); |
868 | if (err) | 910 | if (err) |
869 | return err; | 911 | return err; |
870 | 912 | ||
@@ -905,36 +947,43 @@ int ovs_nla_get_match(struct sw_flow_match *match, | |||
905 | if (err) | 947 | if (err) |
906 | return err; | 948 | return err; |
907 | 949 | ||
908 | if (match->mask && !mask) { | 950 | if (match->mask) { |
909 | /* Create an exact match mask. We need to set to 0xff all the | 951 | if (!nla_mask) { |
910 | * 'match->mask' fields that have been touched in 'match->key'. | 952 | /* Create an exact match mask. We need to set to 0xff |
911 | * We cannot simply memset 'match->mask', because padding bytes | 953 | * all the 'match->mask' fields that have been touched |
912 | * and fields not specified in 'match->key' should be left to 0. | 954 | * in 'match->key'. We cannot simply memset |
913 | * Instead, we use a stream of netlink attributes, copied from | 955 | * 'match->mask', because padding bytes and fields not |
914 | * 'key' and set to 0xff: ovs_key_from_nlattrs() will take care | 956 | * specified in 'match->key' should be left to 0. |
915 | * of filling 'match->mask' appropriately. | 957 | * Instead, we use a stream of netlink attributes, |
916 | */ | 958 | * copied from 'key' and set to 0xff. |
917 | newmask = kmemdup(key, nla_total_size(nla_len(key)), | 959 | * ovs_key_from_nlattrs() will take care of filling |
918 | GFP_KERNEL); | 960 | * 'match->mask' appropriately. |
919 | if (!newmask) | 961 | */ |
920 | return -ENOMEM; | 962 | newmask = kmemdup(nla_key, |
963 | nla_total_size(nla_len(nla_key)), | ||
964 | GFP_KERNEL); | ||
965 | if (!newmask) | ||
966 | return -ENOMEM; | ||
921 | 967 | ||
922 | mask_set_nlattr(newmask, 0xff); | 968 | mask_set_nlattr(newmask, 0xff); |
923 | 969 | ||
924 | /* The userspace does not send tunnel attributes that are 0, | 970 | /* The userspace does not send tunnel attributes that |
925 | * but we should not wildcard them nonetheless. | 971 | * are 0, but we should not wildcard them nonetheless. |
926 | */ | 972 | */ |
927 | if (match->key->tun_key.ipv4_dst) | 973 | if (match->key->tun_key.ipv4_dst) |
928 | SW_FLOW_KEY_MEMSET_FIELD(match, tun_key, 0xff, true); | 974 | SW_FLOW_KEY_MEMSET_FIELD(match, tun_key, |
975 | 0xff, true); | ||
929 | 976 | ||
930 | mask = newmask; | 977 | nla_mask = newmask; |
931 | } | 978 | } |
932 | 979 | ||
933 | if (mask) { | 980 | err = parse_flow_mask_nlattrs(nla_mask, a, &mask_attrs); |
934 | err = parse_flow_mask_nlattrs(mask, a, &mask_attrs); | ||
935 | if (err) | 981 | if (err) |
936 | goto free_newmask; | 982 | goto free_newmask; |
937 | 983 | ||
984 | /* Always match on tci. */ | ||
985 | SW_FLOW_KEY_PUT(match, eth.tci, htons(0xffff), true); | ||
986 | |||
938 | if (mask_attrs & 1 << OVS_KEY_ATTR_ENCAP) { | 987 | if (mask_attrs & 1 << OVS_KEY_ATTR_ENCAP) { |
939 | __be16 eth_type = 0; | 988 | __be16 eth_type = 0; |
940 | __be16 tci = 0; | 989 | __be16 tci = 0; |
@@ -1140,6 +1189,14 @@ int ovs_nla_put_flow(const struct sw_flow_key *swkey, | |||
1140 | arp_key->arp_op = htons(output->ip.proto); | 1189 | arp_key->arp_op = htons(output->ip.proto); |
1141 | ether_addr_copy(arp_key->arp_sha, output->ipv4.arp.sha); | 1190 | ether_addr_copy(arp_key->arp_sha, output->ipv4.arp.sha); |
1142 | ether_addr_copy(arp_key->arp_tha, output->ipv4.arp.tha); | 1191 | ether_addr_copy(arp_key->arp_tha, output->ipv4.arp.tha); |
1192 | } else if (eth_p_mpls(swkey->eth.type)) { | ||
1193 | struct ovs_key_mpls *mpls_key; | ||
1194 | |||
1195 | nla = nla_reserve(skb, OVS_KEY_ATTR_MPLS, sizeof(*mpls_key)); | ||
1196 | if (!nla) | ||
1197 | goto nla_put_failure; | ||
1198 | mpls_key = nla_data(nla); | ||
1199 | mpls_key->mpls_lse = output->mpls.top_lse; | ||
1143 | } | 1200 | } |
1144 | 1201 | ||
1145 | if ((swkey->eth.type == htons(ETH_P_IP) || | 1202 | if ((swkey->eth.type == htons(ETH_P_IP) || |
@@ -1226,12 +1283,14 @@ nla_put_failure: | |||
1226 | 1283 | ||
1227 | #define MAX_ACTIONS_BUFSIZE (32 * 1024) | 1284 | #define MAX_ACTIONS_BUFSIZE (32 * 1024) |
1228 | 1285 | ||
1229 | struct sw_flow_actions *ovs_nla_alloc_flow_actions(int size) | 1286 | static struct sw_flow_actions *nla_alloc_flow_actions(int size) |
1230 | { | 1287 | { |
1231 | struct sw_flow_actions *sfa; | 1288 | struct sw_flow_actions *sfa; |
1232 | 1289 | ||
1233 | if (size > MAX_ACTIONS_BUFSIZE) | 1290 | if (size > MAX_ACTIONS_BUFSIZE) { |
1291 | OVS_NLERR("Flow action size (%u bytes) exceeds maximum", size); | ||
1234 | return ERR_PTR(-EINVAL); | 1292 | return ERR_PTR(-EINVAL); |
1293 | } | ||
1235 | 1294 | ||
1236 | sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL); | 1295 | sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL); |
1237 | if (!sfa) | 1296 | if (!sfa) |
@@ -1269,7 +1328,7 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa, | |||
1269 | new_acts_size = MAX_ACTIONS_BUFSIZE; | 1328 | new_acts_size = MAX_ACTIONS_BUFSIZE; |
1270 | } | 1329 | } |
1271 | 1330 | ||
1272 | acts = ovs_nla_alloc_flow_actions(new_acts_size); | 1331 | acts = nla_alloc_flow_actions(new_acts_size); |
1273 | if (IS_ERR(acts)) | 1332 | if (IS_ERR(acts)) |
1274 | return (void *)acts; | 1333 | return (void *)acts; |
1275 | 1334 | ||
@@ -1336,9 +1395,15 @@ static inline void add_nested_action_end(struct sw_flow_actions *sfa, | |||
1336 | a->nla_len = sfa->actions_len - st_offset; | 1395 | a->nla_len = sfa->actions_len - st_offset; |
1337 | } | 1396 | } |
1338 | 1397 | ||
1398 | static int __ovs_nla_copy_actions(const struct nlattr *attr, | ||
1399 | const struct sw_flow_key *key, | ||
1400 | int depth, struct sw_flow_actions **sfa, | ||
1401 | __be16 eth_type, __be16 vlan_tci); | ||
1402 | |||
1339 | static int validate_and_copy_sample(const struct nlattr *attr, | 1403 | static int validate_and_copy_sample(const struct nlattr *attr, |
1340 | const struct sw_flow_key *key, int depth, | 1404 | const struct sw_flow_key *key, int depth, |
1341 | struct sw_flow_actions **sfa) | 1405 | struct sw_flow_actions **sfa, |
1406 | __be16 eth_type, __be16 vlan_tci) | ||
1342 | { | 1407 | { |
1343 | const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1]; | 1408 | const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1]; |
1344 | const struct nlattr *probability, *actions; | 1409 | const struct nlattr *probability, *actions; |
@@ -1375,7 +1440,8 @@ static int validate_and_copy_sample(const struct nlattr *attr, | |||
1375 | if (st_acts < 0) | 1440 | if (st_acts < 0) |
1376 | return st_acts; | 1441 | return st_acts; |
1377 | 1442 | ||
1378 | err = ovs_nla_copy_actions(actions, key, depth + 1, sfa); | 1443 | err = __ovs_nla_copy_actions(actions, key, depth + 1, sfa, |
1444 | eth_type, vlan_tci); | ||
1379 | if (err) | 1445 | if (err) |
1380 | return err; | 1446 | return err; |
1381 | 1447 | ||
@@ -1385,10 +1451,10 @@ static int validate_and_copy_sample(const struct nlattr *attr, | |||
1385 | return 0; | 1451 | return 0; |
1386 | } | 1452 | } |
1387 | 1453 | ||
1388 | static int validate_tp_port(const struct sw_flow_key *flow_key) | 1454 | static int validate_tp_port(const struct sw_flow_key *flow_key, |
1455 | __be16 eth_type) | ||
1389 | { | 1456 | { |
1390 | if ((flow_key->eth.type == htons(ETH_P_IP) || | 1457 | if ((eth_type == htons(ETH_P_IP) || eth_type == htons(ETH_P_IPV6)) && |
1391 | flow_key->eth.type == htons(ETH_P_IPV6)) && | ||
1392 | (flow_key->tp.src || flow_key->tp.dst)) | 1458 | (flow_key->tp.src || flow_key->tp.dst)) |
1393 | return 0; | 1459 | return 0; |
1394 | 1460 | ||
@@ -1483,7 +1549,7 @@ static int validate_and_copy_set_tun(const struct nlattr *attr, | |||
1483 | static int validate_set(const struct nlattr *a, | 1549 | static int validate_set(const struct nlattr *a, |
1484 | const struct sw_flow_key *flow_key, | 1550 | const struct sw_flow_key *flow_key, |
1485 | struct sw_flow_actions **sfa, | 1551 | struct sw_flow_actions **sfa, |
1486 | bool *set_tun) | 1552 | bool *set_tun, __be16 eth_type) |
1487 | { | 1553 | { |
1488 | const struct nlattr *ovs_key = nla_data(a); | 1554 | const struct nlattr *ovs_key = nla_data(a); |
1489 | int key_type = nla_type(ovs_key); | 1555 | int key_type = nla_type(ovs_key); |
@@ -1508,6 +1574,9 @@ static int validate_set(const struct nlattr *a, | |||
1508 | break; | 1574 | break; |
1509 | 1575 | ||
1510 | case OVS_KEY_ATTR_TUNNEL: | 1576 | case OVS_KEY_ATTR_TUNNEL: |
1577 | if (eth_p_mpls(eth_type)) | ||
1578 | return -EINVAL; | ||
1579 | |||
1511 | *set_tun = true; | 1580 | *set_tun = true; |
1512 | err = validate_and_copy_set_tun(a, sfa); | 1581 | err = validate_and_copy_set_tun(a, sfa); |
1513 | if (err) | 1582 | if (err) |
@@ -1515,7 +1584,7 @@ static int validate_set(const struct nlattr *a, | |||
1515 | break; | 1584 | break; |
1516 | 1585 | ||
1517 | case OVS_KEY_ATTR_IPV4: | 1586 | case OVS_KEY_ATTR_IPV4: |
1518 | if (flow_key->eth.type != htons(ETH_P_IP)) | 1587 | if (eth_type != htons(ETH_P_IP)) |
1519 | return -EINVAL; | 1588 | return -EINVAL; |
1520 | 1589 | ||
1521 | if (!flow_key->ip.proto) | 1590 | if (!flow_key->ip.proto) |
@@ -1531,7 +1600,7 @@ static int validate_set(const struct nlattr *a, | |||
1531 | break; | 1600 | break; |
1532 | 1601 | ||
1533 | case OVS_KEY_ATTR_IPV6: | 1602 | case OVS_KEY_ATTR_IPV6: |
1534 | if (flow_key->eth.type != htons(ETH_P_IPV6)) | 1603 | if (eth_type != htons(ETH_P_IPV6)) |
1535 | return -EINVAL; | 1604 | return -EINVAL; |
1536 | 1605 | ||
1537 | if (!flow_key->ip.proto) | 1606 | if (!flow_key->ip.proto) |
@@ -1553,19 +1622,24 @@ static int validate_set(const struct nlattr *a, | |||
1553 | if (flow_key->ip.proto != IPPROTO_TCP) | 1622 | if (flow_key->ip.proto != IPPROTO_TCP) |
1554 | return -EINVAL; | 1623 | return -EINVAL; |
1555 | 1624 | ||
1556 | return validate_tp_port(flow_key); | 1625 | return validate_tp_port(flow_key, eth_type); |
1557 | 1626 | ||
1558 | case OVS_KEY_ATTR_UDP: | 1627 | case OVS_KEY_ATTR_UDP: |
1559 | if (flow_key->ip.proto != IPPROTO_UDP) | 1628 | if (flow_key->ip.proto != IPPROTO_UDP) |
1560 | return -EINVAL; | 1629 | return -EINVAL; |
1561 | 1630 | ||
1562 | return validate_tp_port(flow_key); | 1631 | return validate_tp_port(flow_key, eth_type); |
1632 | |||
1633 | case OVS_KEY_ATTR_MPLS: | ||
1634 | if (!eth_p_mpls(eth_type)) | ||
1635 | return -EINVAL; | ||
1636 | break; | ||
1563 | 1637 | ||
1564 | case OVS_KEY_ATTR_SCTP: | 1638 | case OVS_KEY_ATTR_SCTP: |
1565 | if (flow_key->ip.proto != IPPROTO_SCTP) | 1639 | if (flow_key->ip.proto != IPPROTO_SCTP) |
1566 | return -EINVAL; | 1640 | return -EINVAL; |
1567 | 1641 | ||
1568 | return validate_tp_port(flow_key); | 1642 | return validate_tp_port(flow_key, eth_type); |
1569 | 1643 | ||
1570 | default: | 1644 | default: |
1571 | return -EINVAL; | 1645 | return -EINVAL; |
@@ -1609,12 +1683,13 @@ static int copy_action(const struct nlattr *from, | |||
1609 | return 0; | 1683 | return 0; |
1610 | } | 1684 | } |
1611 | 1685 | ||
1612 | int ovs_nla_copy_actions(const struct nlattr *attr, | 1686 | static int __ovs_nla_copy_actions(const struct nlattr *attr, |
1613 | const struct sw_flow_key *key, | 1687 | const struct sw_flow_key *key, |
1614 | int depth, | 1688 | int depth, struct sw_flow_actions **sfa, |
1615 | struct sw_flow_actions **sfa) | 1689 | __be16 eth_type, __be16 vlan_tci) |
1616 | { | 1690 | { |
1617 | const struct nlattr *a; | 1691 | const struct nlattr *a; |
1692 | bool out_tnl_port = false; | ||
1618 | int rem, err; | 1693 | int rem, err; |
1619 | 1694 | ||
1620 | if (depth >= SAMPLE_ACTION_DEPTH) | 1695 | if (depth >= SAMPLE_ACTION_DEPTH) |
@@ -1626,6 +1701,8 @@ int ovs_nla_copy_actions(const struct nlattr *attr, | |||
1626 | [OVS_ACTION_ATTR_OUTPUT] = sizeof(u32), | 1701 | [OVS_ACTION_ATTR_OUTPUT] = sizeof(u32), |
1627 | [OVS_ACTION_ATTR_RECIRC] = sizeof(u32), | 1702 | [OVS_ACTION_ATTR_RECIRC] = sizeof(u32), |
1628 | [OVS_ACTION_ATTR_USERSPACE] = (u32)-1, | 1703 | [OVS_ACTION_ATTR_USERSPACE] = (u32)-1, |
1704 | [OVS_ACTION_ATTR_PUSH_MPLS] = sizeof(struct ovs_action_push_mpls), | ||
1705 | [OVS_ACTION_ATTR_POP_MPLS] = sizeof(__be16), | ||
1629 | [OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan), | 1706 | [OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan), |
1630 | [OVS_ACTION_ATTR_POP_VLAN] = 0, | 1707 | [OVS_ACTION_ATTR_POP_VLAN] = 0, |
1631 | [OVS_ACTION_ATTR_SET] = (u32)-1, | 1708 | [OVS_ACTION_ATTR_SET] = (u32)-1, |
@@ -1655,6 +1732,8 @@ int ovs_nla_copy_actions(const struct nlattr *attr, | |||
1655 | case OVS_ACTION_ATTR_OUTPUT: | 1732 | case OVS_ACTION_ATTR_OUTPUT: |
1656 | if (nla_get_u32(a) >= DP_MAX_PORTS) | 1733 | if (nla_get_u32(a) >= DP_MAX_PORTS) |
1657 | return -EINVAL; | 1734 | return -EINVAL; |
1735 | out_tnl_port = false; | ||
1736 | |||
1658 | break; | 1737 | break; |
1659 | 1738 | ||
1660 | case OVS_ACTION_ATTR_HASH: { | 1739 | case OVS_ACTION_ATTR_HASH: { |
@@ -1671,6 +1750,7 @@ int ovs_nla_copy_actions(const struct nlattr *attr, | |||
1671 | } | 1750 | } |
1672 | 1751 | ||
1673 | case OVS_ACTION_ATTR_POP_VLAN: | 1752 | case OVS_ACTION_ATTR_POP_VLAN: |
1753 | vlan_tci = htons(0); | ||
1674 | break; | 1754 | break; |
1675 | 1755 | ||
1676 | case OVS_ACTION_ATTR_PUSH_VLAN: | 1756 | case OVS_ACTION_ATTR_PUSH_VLAN: |
@@ -1679,25 +1759,73 @@ int ovs_nla_copy_actions(const struct nlattr *attr, | |||
1679 | return -EINVAL; | 1759 | return -EINVAL; |
1680 | if (!(vlan->vlan_tci & htons(VLAN_TAG_PRESENT))) | 1760 | if (!(vlan->vlan_tci & htons(VLAN_TAG_PRESENT))) |
1681 | return -EINVAL; | 1761 | return -EINVAL; |
1762 | vlan_tci = vlan->vlan_tci; | ||
1682 | break; | 1763 | break; |
1683 | 1764 | ||
1684 | case OVS_ACTION_ATTR_RECIRC: | 1765 | case OVS_ACTION_ATTR_RECIRC: |
1685 | break; | 1766 | break; |
1686 | 1767 | ||
1768 | case OVS_ACTION_ATTR_PUSH_MPLS: { | ||
1769 | const struct ovs_action_push_mpls *mpls = nla_data(a); | ||
1770 | |||
1771 | /* Networking stack do not allow simultaneous Tunnel | ||
1772 | * and MPLS GSO. | ||
1773 | */ | ||
1774 | if (out_tnl_port) | ||
1775 | return -EINVAL; | ||
1776 | |||
1777 | if (!eth_p_mpls(mpls->mpls_ethertype)) | ||
1778 | return -EINVAL; | ||
1779 | /* Prohibit push MPLS other than to a white list | ||
1780 | * for packets that have a known tag order. | ||
1781 | */ | ||
1782 | if (vlan_tci & htons(VLAN_TAG_PRESENT) || | ||
1783 | (eth_type != htons(ETH_P_IP) && | ||
1784 | eth_type != htons(ETH_P_IPV6) && | ||
1785 | eth_type != htons(ETH_P_ARP) && | ||
1786 | eth_type != htons(ETH_P_RARP) && | ||
1787 | !eth_p_mpls(eth_type))) | ||
1788 | return -EINVAL; | ||
1789 | eth_type = mpls->mpls_ethertype; | ||
1790 | break; | ||
1791 | } | ||
1792 | |||
1793 | case OVS_ACTION_ATTR_POP_MPLS: | ||
1794 | if (vlan_tci & htons(VLAN_TAG_PRESENT) || | ||
1795 | !eth_p_mpls(eth_type)) | ||
1796 | return -EINVAL; | ||
1797 | |||
1798 | /* Disallow subsequent L2.5+ set and mpls_pop actions | ||
1799 | * as there is no check here to ensure that the new | ||
1800 | * eth_type is valid and thus set actions could | ||
1801 | * write off the end of the packet or otherwise | ||
1802 | * corrupt it. | ||
1803 | * | ||
1804 | * Support for these actions is planned using packet | ||
1805 | * recirculation. | ||
1806 | */ | ||
1807 | eth_type = htons(0); | ||
1808 | break; | ||
1809 | |||
1687 | case OVS_ACTION_ATTR_SET: | 1810 | case OVS_ACTION_ATTR_SET: |
1688 | err = validate_set(a, key, sfa, &skip_copy); | 1811 | err = validate_set(a, key, sfa, |
1812 | &out_tnl_port, eth_type); | ||
1689 | if (err) | 1813 | if (err) |
1690 | return err; | 1814 | return err; |
1815 | |||
1816 | skip_copy = out_tnl_port; | ||
1691 | break; | 1817 | break; |
1692 | 1818 | ||
1693 | case OVS_ACTION_ATTR_SAMPLE: | 1819 | case OVS_ACTION_ATTR_SAMPLE: |
1694 | err = validate_and_copy_sample(a, key, depth, sfa); | 1820 | err = validate_and_copy_sample(a, key, depth, sfa, |
1821 | eth_type, vlan_tci); | ||
1695 | if (err) | 1822 | if (err) |
1696 | return err; | 1823 | return err; |
1697 | skip_copy = true; | 1824 | skip_copy = true; |
1698 | break; | 1825 | break; |
1699 | 1826 | ||
1700 | default: | 1827 | default: |
1828 | OVS_NLERR("Unknown tunnel attribute (%d).\n", type); | ||
1701 | return -EINVAL; | 1829 | return -EINVAL; |
1702 | } | 1830 | } |
1703 | if (!skip_copy) { | 1831 | if (!skip_copy) { |
@@ -1713,6 +1841,24 @@ int ovs_nla_copy_actions(const struct nlattr *attr, | |||
1713 | return 0; | 1841 | return 0; |
1714 | } | 1842 | } |
1715 | 1843 | ||
1844 | int ovs_nla_copy_actions(const struct nlattr *attr, | ||
1845 | const struct sw_flow_key *key, | ||
1846 | struct sw_flow_actions **sfa) | ||
1847 | { | ||
1848 | int err; | ||
1849 | |||
1850 | *sfa = nla_alloc_flow_actions(nla_len(attr)); | ||
1851 | if (IS_ERR(*sfa)) | ||
1852 | return PTR_ERR(*sfa); | ||
1853 | |||
1854 | err = __ovs_nla_copy_actions(attr, key, 0, sfa, key->eth.type, | ||
1855 | key->eth.tci); | ||
1856 | if (err) | ||
1857 | kfree(*sfa); | ||
1858 | |||
1859 | return err; | ||
1860 | } | ||
1861 | |||
1716 | static int sample_action_to_attr(const struct nlattr *attr, struct sk_buff *skb) | 1862 | static int sample_action_to_attr(const struct nlattr *attr, struct sk_buff *skb) |
1717 | { | 1863 | { |
1718 | const struct nlattr *a; | 1864 | const struct nlattr *a; |
diff --git a/net/openvswitch/flow_netlink.h b/net/openvswitch/flow_netlink.h index 206e45add888..eb0b177300ad 100644 --- a/net/openvswitch/flow_netlink.h +++ b/net/openvswitch/flow_netlink.h | |||
@@ -37,6 +37,8 @@ | |||
37 | 37 | ||
38 | #include "flow.h" | 38 | #include "flow.h" |
39 | 39 | ||
40 | size_t ovs_key_attr_size(void); | ||
41 | |||
40 | void ovs_match_init(struct sw_flow_match *match, | 42 | void ovs_match_init(struct sw_flow_match *match, |
41 | struct sw_flow_key *key, struct sw_flow_mask *mask); | 43 | struct sw_flow_key *key, struct sw_flow_mask *mask); |
42 | 44 | ||
@@ -49,12 +51,11 @@ int ovs_nla_get_match(struct sw_flow_match *match, | |||
49 | const struct nlattr *); | 51 | const struct nlattr *); |
50 | 52 | ||
51 | int ovs_nla_copy_actions(const struct nlattr *attr, | 53 | int ovs_nla_copy_actions(const struct nlattr *attr, |
52 | const struct sw_flow_key *key, int depth, | 54 | const struct sw_flow_key *key, |
53 | struct sw_flow_actions **sfa); | 55 | struct sw_flow_actions **sfa); |
54 | int ovs_nla_put_actions(const struct nlattr *attr, | 56 | int ovs_nla_put_actions(const struct nlattr *attr, |
55 | int len, struct sk_buff *skb); | 57 | int len, struct sk_buff *skb); |
56 | 58 | ||
57 | struct sw_flow_actions *ovs_nla_alloc_flow_actions(int actions_len); | ||
58 | void ovs_nla_free_flow_actions(struct sw_flow_actions *); | 59 | void ovs_nla_free_flow_actions(struct sw_flow_actions *); |
59 | 60 | ||
60 | #endif /* flow_netlink.h */ | 61 | #endif /* flow_netlink.h */ |
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c index cf2d853646f0..90f8b40a350b 100644 --- a/net/openvswitch/flow_table.c +++ b/net/openvswitch/flow_table.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2007-2013 Nicira, Inc. | 2 | * Copyright (c) 2007-2014 Nicira, Inc. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or | 4 | * This program is free software; you can redistribute it and/or |
5 | * modify it under the terms of version 2 of the GNU General Public | 5 | * modify it under the terms of version 2 of the GNU General Public |
@@ -250,11 +250,14 @@ skip_flows: | |||
250 | __table_instance_destroy(ti); | 250 | __table_instance_destroy(ti); |
251 | } | 251 | } |
252 | 252 | ||
253 | void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred) | 253 | /* No need for locking this function is called from RCU callback or |
254 | * error path. | ||
255 | */ | ||
256 | void ovs_flow_tbl_destroy(struct flow_table *table) | ||
254 | { | 257 | { |
255 | struct table_instance *ti = ovsl_dereference(table->ti); | 258 | struct table_instance *ti = rcu_dereference_raw(table->ti); |
256 | 259 | ||
257 | table_instance_destroy(ti, deferred); | 260 | table_instance_destroy(ti, false); |
258 | } | 261 | } |
259 | 262 | ||
260 | struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti, | 263 | struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti, |
diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h index 5918bff7f3f6..f682c8c07f44 100644 --- a/net/openvswitch/flow_table.h +++ b/net/openvswitch/flow_table.h | |||
@@ -62,7 +62,7 @@ void ovs_flow_free(struct sw_flow *, bool deferred); | |||
62 | 62 | ||
63 | int ovs_flow_tbl_init(struct flow_table *); | 63 | int ovs_flow_tbl_init(struct flow_table *); |
64 | int ovs_flow_tbl_count(struct flow_table *table); | 64 | int ovs_flow_tbl_count(struct flow_table *table); |
65 | void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred); | 65 | void ovs_flow_tbl_destroy(struct flow_table *table); |
66 | int ovs_flow_tbl_flush(struct flow_table *flow_table); | 66 | int ovs_flow_tbl_flush(struct flow_table *flow_table); |
67 | 67 | ||
68 | int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow, | 68 | int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow, |
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c index 10dc07e1678b..6a55f7105505 100644 --- a/net/openvswitch/vport-internal_dev.c +++ b/net/openvswitch/vport-internal_dev.c | |||
@@ -224,6 +224,11 @@ static int internal_dev_recv(struct vport *vport, struct sk_buff *skb) | |||
224 | struct net_device *netdev = netdev_vport_priv(vport)->dev; | 224 | struct net_device *netdev = netdev_vport_priv(vport)->dev; |
225 | int len; | 225 | int len; |
226 | 226 | ||
227 | if (unlikely(!(netdev->flags & IFF_UP))) { | ||
228 | kfree_skb(skb); | ||
229 | return 0; | ||
230 | } | ||
231 | |||
227 | len = skb->len; | 232 | len = skb->len; |
228 | 233 | ||
229 | skb_dst_drop(skb); | 234 | skb_dst_drop(skb); |