aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-01-06 19:48:38 -0500
committerDavid S. Miller <davem@davemloft.net>2014-01-06 19:48:38 -0500
commit39b6b2992f9dc65d1de5c66e7ec2271b8a5fac33 (patch)
treec0fc4e2be0429bb4d7643e6b6f8f5a56212f9284
parent56a4342dfe3145cd66f766adccb28fd9b571606d (diff)
parent443cd88c8a31379e95326428bbbd40af25c1d440 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jesse/openvswitch
Jesse Gross says: ==================== [GIT net-next] Open vSwitch Open vSwitch changes for net-next/3.14. Highlights are: * Performance improvements in the mechanism to get packets to userspace using memory mapped netlink and skb zero copy where appropriate. * Per-cpu flow stats in situations where flows are likely to be shared across CPUs. Standard flow stats are used in other situations to save memory and allocation time. * A handful of code cleanups and rationalization. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/skbuff.h3
-rw-r--r--include/net/genetlink.h4
-rw-r--r--include/uapi/linux/openvswitch.h14
-rw-r--r--net/core/skbuff.c85
-rw-r--r--net/netfilter/nfnetlink_queue_core.c59
-rw-r--r--net/netlink/af_netlink.c4
-rw-r--r--net/netlink/genetlink.c21
-rw-r--r--net/openvswitch/datapath.c231
-rw-r--r--net/openvswitch/datapath.h6
-rw-r--r--net/openvswitch/flow.c96
-rw-r--r--net/openvswitch/flow.h33
-rw-r--r--net/openvswitch/flow_netlink.c66
-rw-r--r--net/openvswitch/flow_netlink.h1
-rw-r--r--net/openvswitch/flow_table.c60
-rw-r--r--net/openvswitch/flow_table.h6
-rw-r--r--net/openvswitch/vport.c6
-rw-r--r--net/openvswitch/vport.h1
17 files changed, 483 insertions, 213 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 88d4f2ebbec6..956e11a168d8 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2445,6 +2445,9 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
2445 struct pipe_inode_info *pipe, unsigned int len, 2445 struct pipe_inode_info *pipe, unsigned int len,
2446 unsigned int flags); 2446 unsigned int flags);
2447void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); 2447void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
2448unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
2449void skb_zerocopy(struct sk_buff *to, const struct sk_buff *from,
2450 int len, int hlen);
2448void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len); 2451void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
2449int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen); 2452int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
2450void skb_scrub_packet(struct sk_buff *skb, bool xnet); 2453void skb_scrub_packet(struct sk_buff *skb, bool xnet);
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index 1b177ed803b7..93695f0e22a5 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -73,6 +73,7 @@ struct genl_family {
73 * @attrs: netlink attributes 73 * @attrs: netlink attributes
74 * @_net: network namespace 74 * @_net: network namespace
75 * @user_ptr: user pointers 75 * @user_ptr: user pointers
76 * @dst_sk: destination socket
76 */ 77 */
77struct genl_info { 78struct genl_info {
78 u32 snd_seq; 79 u32 snd_seq;
@@ -85,6 +86,7 @@ struct genl_info {
85 struct net * _net; 86 struct net * _net;
86#endif 87#endif
87 void * user_ptr[2]; 88 void * user_ptr[2];
89 struct sock * dst_sk;
88}; 90};
89 91
90static inline struct net *genl_info_net(struct genl_info *info) 92static inline struct net *genl_info_net(struct genl_info *info)
@@ -177,6 +179,8 @@ void genl_notify(struct genl_family *family,
177 struct sk_buff *skb, struct net *net, u32 portid, 179 struct sk_buff *skb, struct net *net, u32 portid,
178 u32 group, struct nlmsghdr *nlh, gfp_t flags); 180 u32 group, struct nlmsghdr *nlh, gfp_t flags);
179 181
182struct sk_buff *genlmsg_new_unicast(size_t payload, struct genl_info *info,
183 gfp_t flags);
180void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, 184void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
181 struct genl_family *family, int flags, u8 cmd); 185 struct genl_family *family, int flags, u8 cmd);
182 186
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index d120f9fe0017..970553cbbc8e 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -40,7 +40,15 @@ struct ovs_header {
40 40
41#define OVS_DATAPATH_FAMILY "ovs_datapath" 41#define OVS_DATAPATH_FAMILY "ovs_datapath"
42#define OVS_DATAPATH_MCGROUP "ovs_datapath" 42#define OVS_DATAPATH_MCGROUP "ovs_datapath"
43#define OVS_DATAPATH_VERSION 0x1 43
44/* V2:
45 * - API users are expected to provide OVS_DP_ATTR_USER_FEATURES
46 * when creating the datapath.
47 */
48#define OVS_DATAPATH_VERSION 2
49
50/* First OVS datapath version to support features */
51#define OVS_DP_VER_FEATURES 2
44 52
45enum ovs_datapath_cmd { 53enum ovs_datapath_cmd {
46 OVS_DP_CMD_UNSPEC, 54 OVS_DP_CMD_UNSPEC,
@@ -75,6 +83,7 @@ enum ovs_datapath_attr {
75 OVS_DP_ATTR_UPCALL_PID, /* Netlink PID to receive upcalls */ 83 OVS_DP_ATTR_UPCALL_PID, /* Netlink PID to receive upcalls */
76 OVS_DP_ATTR_STATS, /* struct ovs_dp_stats */ 84 OVS_DP_ATTR_STATS, /* struct ovs_dp_stats */
77 OVS_DP_ATTR_MEGAFLOW_STATS, /* struct ovs_dp_megaflow_stats */ 85 OVS_DP_ATTR_MEGAFLOW_STATS, /* struct ovs_dp_megaflow_stats */
86 OVS_DP_ATTR_USER_FEATURES, /* OVS_DP_F_* */
78 __OVS_DP_ATTR_MAX 87 __OVS_DP_ATTR_MAX
79}; 88};
80 89
@@ -106,6 +115,9 @@ struct ovs_vport_stats {
106 __u64 tx_dropped; /* no space available in linux */ 115 __u64 tx_dropped; /* no space available in linux */
107}; 116};
108 117
118/* Allow last Netlink attribute to be unaligned */
119#define OVS_DP_F_UNALIGNED (1 << 0)
120
109/* Fixed logical ports. */ 121/* Fixed logical ports. */
110#define OVSP_LOCAL ((__u32)0) 122#define OVSP_LOCAL ((__u32)0)
111 123
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index eb96c2c22400..1d641e781f85 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2121,6 +2121,91 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
2121} 2121}
2122EXPORT_SYMBOL(skb_copy_and_csum_bits); 2122EXPORT_SYMBOL(skb_copy_and_csum_bits);
2123 2123
2124 /**
2125 * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy()
2126 * @from: source buffer
2127 *
2128 * Calculates the amount of linear headroom needed in the 'to' skb passed
2129 * into skb_zerocopy().
2130 */
2131unsigned int
2132skb_zerocopy_headlen(const struct sk_buff *from)
2133{
2134 unsigned int hlen = 0;
2135
2136 if (!from->head_frag ||
2137 skb_headlen(from) < L1_CACHE_BYTES ||
2138 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
2139 hlen = skb_headlen(from);
2140
2141 if (skb_has_frag_list(from))
2142 hlen = from->len;
2143
2144 return hlen;
2145}
2146EXPORT_SYMBOL_GPL(skb_zerocopy_headlen);
2147
2148/**
2149 * skb_zerocopy - Zero copy skb to skb
2150 * @to: destination buffer
2151 * @source: source buffer
2152 * @len: number of bytes to copy from source buffer
2153 * @hlen: size of linear headroom in destination buffer
2154 *
2155 * Copies up to `len` bytes from `from` to `to` by creating references
2156 * to the frags in the source buffer.
2157 *
2158 * The `hlen` as calculated by skb_zerocopy_headlen() specifies the
2159 * headroom in the `to` buffer.
2160 */
2161void
2162skb_zerocopy(struct sk_buff *to, const struct sk_buff *from, int len, int hlen)
2163{
2164 int i, j = 0;
2165 int plen = 0; /* length of skb->head fragment */
2166 struct page *page;
2167 unsigned int offset;
2168
2169 BUG_ON(!from->head_frag && !hlen);
2170
2171 /* dont bother with small payloads */
2172 if (len <= skb_tailroom(to)) {
2173 skb_copy_bits(from, 0, skb_put(to, len), len);
2174 return;
2175 }
2176
2177 if (hlen) {
2178 skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
2179 len -= hlen;
2180 } else {
2181 plen = min_t(int, skb_headlen(from), len);
2182 if (plen) {
2183 page = virt_to_head_page(from->head);
2184 offset = from->data - (unsigned char *)page_address(page);
2185 __skb_fill_page_desc(to, 0, page, offset, plen);
2186 get_page(page);
2187 j = 1;
2188 len -= plen;
2189 }
2190 }
2191
2192 to->truesize += len + plen;
2193 to->len += len + plen;
2194 to->data_len += len + plen;
2195
2196 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
2197 if (!len)
2198 break;
2199 skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i];
2200 skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len);
2201 len -= skb_shinfo(to)->frags[j].size;
2202 skb_frag_ref(to, j);
2203 j++;
2204 }
2205 skb_shinfo(to)->nr_frags = j;
2206}
2207EXPORT_SYMBOL_GPL(skb_zerocopy);
2208
2124void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) 2209void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
2125{ 2210{
2126 __wsum csum; 2211 __wsum csum;
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
index b5e1f82890df..f072fe803510 100644
--- a/net/netfilter/nfnetlink_queue_core.c
+++ b/net/netfilter/nfnetlink_queue_core.c
@@ -236,51 +236,6 @@ nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data)
236 spin_unlock_bh(&queue->lock); 236 spin_unlock_bh(&queue->lock);
237} 237}
238 238
239static void
240nfqnl_zcopy(struct sk_buff *to, const struct sk_buff *from, int len, int hlen)
241{
242 int i, j = 0;
243 int plen = 0; /* length of skb->head fragment */
244 struct page *page;
245 unsigned int offset;
246
247 /* dont bother with small payloads */
248 if (len <= skb_tailroom(to)) {
249 skb_copy_bits(from, 0, skb_put(to, len), len);
250 return;
251 }
252
253 if (hlen) {
254 skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
255 len -= hlen;
256 } else {
257 plen = min_t(int, skb_headlen(from), len);
258 if (plen) {
259 page = virt_to_head_page(from->head);
260 offset = from->data - (unsigned char *)page_address(page);
261 __skb_fill_page_desc(to, 0, page, offset, plen);
262 get_page(page);
263 j = 1;
264 len -= plen;
265 }
266 }
267
268 to->truesize += len + plen;
269 to->len += len + plen;
270 to->data_len += len + plen;
271
272 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
273 if (!len)
274 break;
275 skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i];
276 skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len);
277 len -= skb_shinfo(to)->frags[j].size;
278 skb_frag_ref(to, j);
279 j++;
280 }
281 skb_shinfo(to)->nr_frags = j;
282}
283
284static int 239static int
285nfqnl_put_packet_info(struct sk_buff *nlskb, struct sk_buff *packet, 240nfqnl_put_packet_info(struct sk_buff *nlskb, struct sk_buff *packet,
286 bool csum_verify) 241 bool csum_verify)
@@ -330,7 +285,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
330{ 285{
331 size_t size; 286 size_t size;
332 size_t data_len = 0, cap_len = 0; 287 size_t data_len = 0, cap_len = 0;
333 int hlen = 0; 288 unsigned int hlen = 0;
334 struct sk_buff *skb; 289 struct sk_buff *skb;
335 struct nlattr *nla; 290 struct nlattr *nla;
336 struct nfqnl_msg_packet_hdr *pmsg; 291 struct nfqnl_msg_packet_hdr *pmsg;
@@ -382,14 +337,8 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
382 if (data_len > entskb->len) 337 if (data_len > entskb->len)
383 data_len = entskb->len; 338 data_len = entskb->len;
384 339
385 if (!entskb->head_frag || 340 hlen = skb_zerocopy_headlen(entskb);
386 skb_headlen(entskb) < L1_CACHE_BYTES || 341 hlen = min_t(unsigned int, hlen, data_len);
387 skb_shinfo(entskb)->nr_frags >= MAX_SKB_FRAGS)
388 hlen = skb_headlen(entskb);
389
390 if (skb_has_frag_list(entskb))
391 hlen = entskb->len;
392 hlen = min_t(int, data_len, hlen);
393 size += sizeof(struct nlattr) + hlen; 342 size += sizeof(struct nlattr) + hlen;
394 cap_len = entskb->len; 343 cap_len = entskb->len;
395 break; 344 break;
@@ -539,7 +488,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
539 nla->nla_type = NFQA_PAYLOAD; 488 nla->nla_type = NFQA_PAYLOAD;
540 nla->nla_len = nla_attr_size(data_len); 489 nla->nla_len = nla_attr_size(data_len);
541 490
542 nfqnl_zcopy(skb, entskb, data_len, hlen); 491 skb_zerocopy(skb, entskb, data_len, hlen);
543 } 492 }
544 493
545 nlh->nlmsg_len = skb->len; 494 nlh->nlmsg_len = skb->len;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index b077b90c1254..34a656d90175 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1773,6 +1773,9 @@ struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
1773 if (ring->pg_vec == NULL) 1773 if (ring->pg_vec == NULL)
1774 goto out_put; 1774 goto out_put;
1775 1775
1776 if (ring->frame_size - NL_MMAP_HDRLEN < size)
1777 goto out_put;
1778
1776 skb = alloc_skb_head(gfp_mask); 1779 skb = alloc_skb_head(gfp_mask);
1777 if (skb == NULL) 1780 if (skb == NULL)
1778 goto err1; 1781 goto err1;
@@ -1782,6 +1785,7 @@ struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
1782 if (ring->pg_vec == NULL) 1785 if (ring->pg_vec == NULL)
1783 goto out_free; 1786 goto out_free;
1784 1787
1788 /* check again under lock */
1785 maxlen = ring->frame_size - NL_MMAP_HDRLEN; 1789 maxlen = ring->frame_size - NL_MMAP_HDRLEN;
1786 if (maxlen < size) 1790 if (maxlen < size)
1787 goto out_free; 1791 goto out_free;
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 713671ae45af..b1dcdb932a86 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -461,6 +461,26 @@ int genl_unregister_family(struct genl_family *family)
461EXPORT_SYMBOL(genl_unregister_family); 461EXPORT_SYMBOL(genl_unregister_family);
462 462
463/** 463/**
464 * genlmsg_new_unicast - Allocate generic netlink message for unicast
465 * @payload: size of the message payload
466 * @info: information on destination
467 * @flags: the type of memory to allocate
468 *
469 * Allocates a new sk_buff large enough to cover the specified payload
470 * plus required Netlink headers. Will check receiving socket for
471 * memory mapped i/o capability and use it if enabled. Will fall back
472 * to non-mapped skb if message size exceeds the frame size of the ring.
473 */
474struct sk_buff *genlmsg_new_unicast(size_t payload, struct genl_info *info,
475 gfp_t flags)
476{
477 size_t len = nlmsg_total_size(genlmsg_total_size(payload));
478
479 return netlink_alloc_skb(info->dst_sk, len, info->snd_portid, flags);
480}
481EXPORT_SYMBOL_GPL(genlmsg_new_unicast);
482
483/**
464 * genlmsg_put - Add generic netlink header to netlink message 484 * genlmsg_put - Add generic netlink header to netlink message
465 * @skb: socket buffer holding the message 485 * @skb: socket buffer holding the message
466 * @portid: netlink portid the message is addressed to 486 * @portid: netlink portid the message is addressed to
@@ -600,6 +620,7 @@ static int genl_family_rcv_msg(struct genl_family *family,
600 info.genlhdr = nlmsg_data(nlh); 620 info.genlhdr = nlmsg_data(nlh);
601 info.userhdr = nlmsg_data(nlh) + GENL_HDRLEN; 621 info.userhdr = nlmsg_data(nlh) + GENL_HDRLEN;
602 info.attrs = attrbuf; 622 info.attrs = attrbuf;
623 info.dst_sk = skb->sk;
603 genl_info_net_set(&info, net); 624 genl_info_net_set(&info, net);
604 memset(&info.user_ptr, 0, sizeof(info.user_ptr)); 625 memset(&info.user_ptr, 0, sizeof(info.user_ptr));
605 626
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 6f5e1dd3be2d..df4692826ead 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -108,10 +108,9 @@ int lockdep_ovsl_is_held(void)
108#endif 108#endif
109 109
110static struct vport *new_vport(const struct vport_parms *); 110static struct vport *new_vport(const struct vport_parms *);
111static int queue_gso_packets(struct net *, int dp_ifindex, struct sk_buff *, 111static int queue_gso_packets(struct datapath *dp, struct sk_buff *,
112 const struct dp_upcall_info *); 112 const struct dp_upcall_info *);
113static int queue_userspace_packet(struct net *, int dp_ifindex, 113static int queue_userspace_packet(struct datapath *dp, struct sk_buff *,
114 struct sk_buff *,
115 const struct dp_upcall_info *); 114 const struct dp_upcall_info *);
116 115
117/* Must be called with rcu_read_lock or ovs_mutex. */ 116/* Must be called with rcu_read_lock or ovs_mutex. */
@@ -133,7 +132,7 @@ static struct datapath *get_dp(struct net *net, int dp_ifindex)
133} 132}
134 133
135/* Must be called with rcu_read_lock or ovs_mutex. */ 134/* Must be called with rcu_read_lock or ovs_mutex. */
136const char *ovs_dp_name(const struct datapath *dp) 135static const char *ovs_dp_name(const struct datapath *dp)
137{ 136{
138 struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL); 137 struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL);
139 return vport->ops->get_name(vport); 138 return vport->ops->get_name(vport);
@@ -234,7 +233,7 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
234 } 233 }
235 234
236 /* Look up flow. */ 235 /* Look up flow. */
237 flow = ovs_flow_tbl_lookup(&dp->table, &key, &n_mask_hit); 236 flow = ovs_flow_tbl_lookup_stats(&dp->table, &key, &n_mask_hit);
238 if (unlikely(!flow)) { 237 if (unlikely(!flow)) {
239 struct dp_upcall_info upcall; 238 struct dp_upcall_info upcall;
240 239
@@ -251,9 +250,9 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
251 OVS_CB(skb)->flow = flow; 250 OVS_CB(skb)->flow = flow;
252 OVS_CB(skb)->pkt_key = &key; 251 OVS_CB(skb)->pkt_key = &key;
253 252
254 stats_counter = &stats->n_hit; 253 ovs_flow_stats_update(OVS_CB(skb)->flow, skb);
255 ovs_flow_used(OVS_CB(skb)->flow, skb);
256 ovs_execute_actions(dp, skb); 254 ovs_execute_actions(dp, skb);
255 stats_counter = &stats->n_hit;
257 256
258out: 257out:
259 /* Update datapath statistics. */ 258 /* Update datapath statistics. */
@@ -277,7 +276,6 @@ int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
277 const struct dp_upcall_info *upcall_info) 276 const struct dp_upcall_info *upcall_info)
278{ 277{
279 struct dp_stats_percpu *stats; 278 struct dp_stats_percpu *stats;
280 int dp_ifindex;
281 int err; 279 int err;
282 280
283 if (upcall_info->portid == 0) { 281 if (upcall_info->portid == 0) {
@@ -285,16 +283,10 @@ int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
285 goto err; 283 goto err;
286 } 284 }
287 285
288 dp_ifindex = get_dpifindex(dp);
289 if (!dp_ifindex) {
290 err = -ENODEV;
291 goto err;
292 }
293
294 if (!skb_is_gso(skb)) 286 if (!skb_is_gso(skb))
295 err = queue_userspace_packet(ovs_dp_get_net(dp), dp_ifindex, skb, upcall_info); 287 err = queue_userspace_packet(dp, skb, upcall_info);
296 else 288 else
297 err = queue_gso_packets(ovs_dp_get_net(dp), dp_ifindex, skb, upcall_info); 289 err = queue_gso_packets(dp, skb, upcall_info);
298 if (err) 290 if (err)
299 goto err; 291 goto err;
300 292
@@ -310,8 +302,7 @@ err:
310 return err; 302 return err;
311} 303}
312 304
313static int queue_gso_packets(struct net *net, int dp_ifindex, 305static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
314 struct sk_buff *skb,
315 const struct dp_upcall_info *upcall_info) 306 const struct dp_upcall_info *upcall_info)
316{ 307{
317 unsigned short gso_type = skb_shinfo(skb)->gso_type; 308 unsigned short gso_type = skb_shinfo(skb)->gso_type;
@@ -320,14 +311,14 @@ static int queue_gso_packets(struct net *net, int dp_ifindex,
320 struct sk_buff *segs, *nskb; 311 struct sk_buff *segs, *nskb;
321 int err; 312 int err;
322 313
323 segs = __skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM, false); 314 segs = __skb_gso_segment(skb, NETIF_F_SG, false);
324 if (IS_ERR(segs)) 315 if (IS_ERR(segs))
325 return PTR_ERR(segs); 316 return PTR_ERR(segs);
326 317
327 /* Queue all of the segments. */ 318 /* Queue all of the segments. */
328 skb = segs; 319 skb = segs;
329 do { 320 do {
330 err = queue_userspace_packet(net, dp_ifindex, skb, upcall_info); 321 err = queue_userspace_packet(dp, skb, upcall_info);
331 if (err) 322 if (err)
332 break; 323 break;
333 324
@@ -380,11 +371,11 @@ static size_t key_attr_size(void)
380 + nla_total_size(28); /* OVS_KEY_ATTR_ND */ 371 + nla_total_size(28); /* OVS_KEY_ATTR_ND */
381} 372}
382 373
383static size_t upcall_msg_size(const struct sk_buff *skb, 374static size_t upcall_msg_size(const struct nlattr *userdata,
384 const struct nlattr *userdata) 375 unsigned int hdrlen)
385{ 376{
386 size_t size = NLMSG_ALIGN(sizeof(struct ovs_header)) 377 size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
387 + nla_total_size(skb->len) /* OVS_PACKET_ATTR_PACKET */ 378 + nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */
388 + nla_total_size(key_attr_size()); /* OVS_PACKET_ATTR_KEY */ 379 + nla_total_size(key_attr_size()); /* OVS_PACKET_ATTR_KEY */
389 380
390 /* OVS_PACKET_ATTR_USERDATA */ 381 /* OVS_PACKET_ATTR_USERDATA */
@@ -394,15 +385,24 @@ static size_t upcall_msg_size(const struct sk_buff *skb,
394 return size; 385 return size;
395} 386}
396 387
397static int queue_userspace_packet(struct net *net, int dp_ifindex, 388static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
398 struct sk_buff *skb,
399 const struct dp_upcall_info *upcall_info) 389 const struct dp_upcall_info *upcall_info)
400{ 390{
401 struct ovs_header *upcall; 391 struct ovs_header *upcall;
402 struct sk_buff *nskb = NULL; 392 struct sk_buff *nskb = NULL;
403 struct sk_buff *user_skb; /* to be queued to userspace */ 393 struct sk_buff *user_skb; /* to be queued to userspace */
404 struct nlattr *nla; 394 struct nlattr *nla;
405 int err; 395 struct genl_info info = {
396 .dst_sk = ovs_dp_get_net(dp)->genl_sock,
397 .snd_portid = upcall_info->portid,
398 };
399 size_t len;
400 unsigned int hlen;
401 int err, dp_ifindex;
402
403 dp_ifindex = get_dpifindex(dp);
404 if (!dp_ifindex)
405 return -ENODEV;
406 406
407 if (vlan_tx_tag_present(skb)) { 407 if (vlan_tx_tag_present(skb)) {
408 nskb = skb_clone(skb, GFP_ATOMIC); 408 nskb = skb_clone(skb, GFP_ATOMIC);
@@ -422,7 +422,22 @@ static int queue_userspace_packet(struct net *net, int dp_ifindex,
422 goto out; 422 goto out;
423 } 423 }
424 424
425 user_skb = genlmsg_new(upcall_msg_size(skb, upcall_info->userdata), GFP_ATOMIC); 425 /* Complete checksum if needed */
426 if (skb->ip_summed == CHECKSUM_PARTIAL &&
427 (err = skb_checksum_help(skb)))
428 goto out;
429
430 /* Older versions of OVS user space enforce alignment of the last
431 * Netlink attribute to NLA_ALIGNTO which would require extensive
432 * padding logic. Only perform zerocopy if padding is not required.
433 */
434 if (dp->user_features & OVS_DP_F_UNALIGNED)
435 hlen = skb_zerocopy_headlen(skb);
436 else
437 hlen = skb->len;
438
439 len = upcall_msg_size(upcall_info->userdata, hlen);
440 user_skb = genlmsg_new_unicast(len, &info, GFP_ATOMIC);
426 if (!user_skb) { 441 if (!user_skb) {
427 err = -ENOMEM; 442 err = -ENOMEM;
428 goto out; 443 goto out;
@@ -441,26 +456,24 @@ static int queue_userspace_packet(struct net *net, int dp_ifindex,
441 nla_len(upcall_info->userdata), 456 nla_len(upcall_info->userdata),
442 nla_data(upcall_info->userdata)); 457 nla_data(upcall_info->userdata));
443 458
444 nla = __nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, skb->len); 459 /* Only reserve room for attribute header, packet data is added
460 * in skb_zerocopy() */
461 if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) {
462 err = -ENOBUFS;
463 goto out;
464 }
465 nla->nla_len = nla_attr_size(skb->len);
445 466
446 skb_copy_and_csum_dev(skb, nla_data(nla)); 467 skb_zerocopy(user_skb, skb, skb->len, hlen);
447 468
448 genlmsg_end(user_skb, upcall); 469 ((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len;
449 err = genlmsg_unicast(net, user_skb, upcall_info->portid);
450 470
471 err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
451out: 472out:
452 kfree_skb(nskb); 473 kfree_skb(nskb);
453 return err; 474 return err;
454} 475}
455 476
456static void clear_stats(struct sw_flow *flow)
457{
458 flow->used = 0;
459 flow->tcp_flags = 0;
460 flow->packet_count = 0;
461 flow->byte_count = 0;
462}
463
464static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) 477static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
465{ 478{
466 struct ovs_header *ovs_header = info->userhdr; 479 struct ovs_header *ovs_header = info->userhdr;
@@ -499,7 +512,7 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
499 packet->protocol = htons(ETH_P_802_2); 512 packet->protocol = htons(ETH_P_802_2);
500 513
501 /* Build an sw_flow for sending this packet. */ 514 /* Build an sw_flow for sending this packet. */
502 flow = ovs_flow_alloc(); 515 flow = ovs_flow_alloc(false);
503 err = PTR_ERR(flow); 516 err = PTR_ERR(flow);
504 if (IS_ERR(flow)) 517 if (IS_ERR(flow))
505 goto err_kfree_skb; 518 goto err_kfree_skb;
@@ -635,10 +648,10 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
635 const int skb_orig_len = skb->len; 648 const int skb_orig_len = skb->len;
636 struct nlattr *start; 649 struct nlattr *start;
637 struct ovs_flow_stats stats; 650 struct ovs_flow_stats stats;
651 __be16 tcp_flags;
652 unsigned long used;
638 struct ovs_header *ovs_header; 653 struct ovs_header *ovs_header;
639 struct nlattr *nla; 654 struct nlattr *nla;
640 unsigned long used;
641 u8 tcp_flags;
642 int err; 655 int err;
643 656
644 ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family, flags, cmd); 657 ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family, flags, cmd);
@@ -667,24 +680,17 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
667 680
668 nla_nest_end(skb, nla); 681 nla_nest_end(skb, nla);
669 682
670 spin_lock_bh(&flow->lock); 683 ovs_flow_stats_get(flow, &stats, &used, &tcp_flags);
671 used = flow->used;
672 stats.n_packets = flow->packet_count;
673 stats.n_bytes = flow->byte_count;
674 tcp_flags = (u8)ntohs(flow->tcp_flags);
675 spin_unlock_bh(&flow->lock);
676
677 if (used && 684 if (used &&
678 nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used))) 685 nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)))
679 goto nla_put_failure; 686 goto nla_put_failure;
680 687
681 if (stats.n_packets && 688 if (stats.n_packets &&
682 nla_put(skb, OVS_FLOW_ATTR_STATS, 689 nla_put(skb, OVS_FLOW_ATTR_STATS, sizeof(struct ovs_flow_stats), &stats))
683 sizeof(struct ovs_flow_stats), &stats))
684 goto nla_put_failure; 690 goto nla_put_failure;
685 691
686 if (tcp_flags && 692 if ((u8)ntohs(tcp_flags) &&
687 nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags)) 693 nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, (u8)ntohs(tcp_flags)))
688 goto nla_put_failure; 694 goto nla_put_failure;
689 695
690 /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if 696 /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
@@ -701,8 +707,7 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
701 if (start) { 707 if (start) {
702 const struct sw_flow_actions *sf_acts; 708 const struct sw_flow_actions *sf_acts;
703 709
704 sf_acts = rcu_dereference_check(flow->sf_acts, 710 sf_acts = rcu_dereference_ovsl(flow->sf_acts);
705 lockdep_ovsl_is_held());
706 711
707 err = ovs_nla_put_actions(sf_acts->actions, 712 err = ovs_nla_put_actions(sf_acts->actions,
708 sf_acts->actions_len, skb); 713 sf_acts->actions_len, skb);
@@ -726,39 +731,34 @@ error:
726 return err; 731 return err;
727} 732}
728 733
729static struct sk_buff *ovs_flow_cmd_alloc_info(struct sw_flow *flow) 734static struct sk_buff *ovs_flow_cmd_alloc_info(struct sw_flow *flow,
735 struct genl_info *info)
730{ 736{
731 const struct sw_flow_actions *sf_acts; 737 size_t len;
732 738
733 sf_acts = ovsl_dereference(flow->sf_acts); 739 len = ovs_flow_cmd_msg_size(ovsl_dereference(flow->sf_acts));
734 740
735 return genlmsg_new(ovs_flow_cmd_msg_size(sf_acts), GFP_KERNEL); 741 return genlmsg_new_unicast(len, info, GFP_KERNEL);
736} 742}
737 743
738static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow, 744static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow,
739 struct datapath *dp, 745 struct datapath *dp,
740 u32 portid, u32 seq, u8 cmd) 746 struct genl_info *info,
747 u8 cmd)
741{ 748{
742 struct sk_buff *skb; 749 struct sk_buff *skb;
743 int retval; 750 int retval;
744 751
745 skb = ovs_flow_cmd_alloc_info(flow); 752 skb = ovs_flow_cmd_alloc_info(flow, info);
746 if (!skb) 753 if (!skb)
747 return ERR_PTR(-ENOMEM); 754 return ERR_PTR(-ENOMEM);
748 755
749 retval = ovs_flow_cmd_fill_info(flow, dp, skb, portid, seq, 0, cmd); 756 retval = ovs_flow_cmd_fill_info(flow, dp, skb, info->snd_portid,
757 info->snd_seq, 0, cmd);
750 BUG_ON(retval < 0); 758 BUG_ON(retval < 0);
751 return skb; 759 return skb;
752} 760}
753 761
754static struct sw_flow *__ovs_flow_tbl_lookup(struct flow_table *tbl,
755 const struct sw_flow_key *key)
756{
757 u32 __always_unused n_mask_hit;
758
759 return ovs_flow_tbl_lookup(tbl, key, &n_mask_hit);
760}
761
762static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) 762static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
763{ 763{
764 struct nlattr **a = info->attrs; 764 struct nlattr **a = info->attrs;
@@ -770,6 +770,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
770 struct datapath *dp; 770 struct datapath *dp;
771 struct sw_flow_actions *acts = NULL; 771 struct sw_flow_actions *acts = NULL;
772 struct sw_flow_match match; 772 struct sw_flow_match match;
773 bool exact_5tuple;
773 int error; 774 int error;
774 775
775 /* Extract key. */ 776 /* Extract key. */
@@ -778,7 +779,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
778 goto error; 779 goto error;
779 780
780 ovs_match_init(&match, &key, &mask); 781 ovs_match_init(&match, &key, &mask);
781 error = ovs_nla_get_match(&match, 782 error = ovs_nla_get_match(&match, &exact_5tuple,
782 a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]); 783 a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]);
783 if (error) 784 if (error)
784 goto error; 785 goto error;
@@ -809,7 +810,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
809 goto err_unlock_ovs; 810 goto err_unlock_ovs;
810 811
811 /* Check if this is a duplicate flow */ 812 /* Check if this is a duplicate flow */
812 flow = __ovs_flow_tbl_lookup(&dp->table, &key); 813 flow = ovs_flow_tbl_lookup(&dp->table, &key);
813 if (!flow) { 814 if (!flow) {
814 /* Bail out if we're not allowed to create a new flow. */ 815 /* Bail out if we're not allowed to create a new flow. */
815 error = -ENOENT; 816 error = -ENOENT;
@@ -817,12 +818,11 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
817 goto err_unlock_ovs; 818 goto err_unlock_ovs;
818 819
819 /* Allocate flow. */ 820 /* Allocate flow. */
820 flow = ovs_flow_alloc(); 821 flow = ovs_flow_alloc(!exact_5tuple);
821 if (IS_ERR(flow)) { 822 if (IS_ERR(flow)) {
822 error = PTR_ERR(flow); 823 error = PTR_ERR(flow);
823 goto err_unlock_ovs; 824 goto err_unlock_ovs;
824 } 825 }
825 clear_stats(flow);
826 826
827 flow->key = masked_key; 827 flow->key = masked_key;
828 flow->unmasked_key = key; 828 flow->unmasked_key = key;
@@ -835,8 +835,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
835 goto err_flow_free; 835 goto err_flow_free;
836 } 836 }
837 837
838 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid, 838 reply = ovs_flow_cmd_build_info(flow, dp, info, OVS_FLOW_CMD_NEW);
839 info->snd_seq, OVS_FLOW_CMD_NEW);
840 } else { 839 } else {
841 /* We found a matching flow. */ 840 /* We found a matching flow. */
842 struct sw_flow_actions *old_acts; 841 struct sw_flow_actions *old_acts;
@@ -864,15 +863,11 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
864 rcu_assign_pointer(flow->sf_acts, acts); 863 rcu_assign_pointer(flow->sf_acts, acts);
865 ovs_nla_free_flow_actions(old_acts); 864 ovs_nla_free_flow_actions(old_acts);
866 865
867 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid, 866 reply = ovs_flow_cmd_build_info(flow, dp, info, OVS_FLOW_CMD_NEW);
868 info->snd_seq, OVS_FLOW_CMD_NEW);
869 867
870 /* Clear stats. */ 868 /* Clear stats. */
871 if (a[OVS_FLOW_ATTR_CLEAR]) { 869 if (a[OVS_FLOW_ATTR_CLEAR])
872 spin_lock_bh(&flow->lock); 870 ovs_flow_stats_clear(flow);
873 clear_stats(flow);
874 spin_unlock_bh(&flow->lock);
875 }
876 } 871 }
877 ovs_unlock(); 872 ovs_unlock();
878 873
@@ -910,7 +905,7 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
910 } 905 }
911 906
912 ovs_match_init(&match, &key, NULL); 907 ovs_match_init(&match, &key, NULL);
913 err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL); 908 err = ovs_nla_get_match(&match, NULL, a[OVS_FLOW_ATTR_KEY], NULL);
914 if (err) 909 if (err)
915 return err; 910 return err;
916 911
@@ -921,14 +916,13 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
921 goto unlock; 916 goto unlock;
922 } 917 }
923 918
924 flow = __ovs_flow_tbl_lookup(&dp->table, &key); 919 flow = ovs_flow_tbl_lookup(&dp->table, &key);
925 if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) { 920 if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) {
926 err = -ENOENT; 921 err = -ENOENT;
927 goto unlock; 922 goto unlock;
928 } 923 }
929 924
930 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid, 925 reply = ovs_flow_cmd_build_info(flow, dp, info, OVS_FLOW_CMD_NEW);
931 info->snd_seq, OVS_FLOW_CMD_NEW);
932 if (IS_ERR(reply)) { 926 if (IS_ERR(reply)) {
933 err = PTR_ERR(reply); 927 err = PTR_ERR(reply);
934 goto unlock; 928 goto unlock;
@@ -965,17 +959,17 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
965 } 959 }
966 960
967 ovs_match_init(&match, &key, NULL); 961 ovs_match_init(&match, &key, NULL);
968 err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL); 962 err = ovs_nla_get_match(&match, NULL, a[OVS_FLOW_ATTR_KEY], NULL);
969 if (err) 963 if (err)
970 goto unlock; 964 goto unlock;
971 965
972 flow = __ovs_flow_tbl_lookup(&dp->table, &key); 966 flow = ovs_flow_tbl_lookup(&dp->table, &key);
973 if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) { 967 if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) {
974 err = -ENOENT; 968 err = -ENOENT;
975 goto unlock; 969 goto unlock;
976 } 970 }
977 971
978 reply = ovs_flow_cmd_alloc_info(flow); 972 reply = ovs_flow_cmd_alloc_info(flow, info);
979 if (!reply) { 973 if (!reply) {
980 err = -ENOMEM; 974 err = -ENOMEM;
981 goto unlock; 975 goto unlock;
@@ -1061,6 +1055,7 @@ static const struct genl_ops dp_flow_genl_ops[] = {
1061static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = { 1055static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
1062 [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 }, 1056 [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1063 [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 }, 1057 [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1058 [OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 },
1064}; 1059};
1065 1060
1066static struct genl_family dp_datapath_genl_family = { 1061static struct genl_family dp_datapath_genl_family = {
@@ -1119,6 +1114,9 @@ static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1119 &dp_megaflow_stats)) 1114 &dp_megaflow_stats))
1120 goto nla_put_failure; 1115 goto nla_put_failure;
1121 1116
1117 if (nla_put_u32(skb, OVS_DP_ATTR_USER_FEATURES, dp->user_features))
1118 goto nla_put_failure;
1119
1122 return genlmsg_end(skb, ovs_header); 1120 return genlmsg_end(skb, ovs_header);
1123 1121
1124nla_put_failure: 1122nla_put_failure:
@@ -1127,17 +1125,17 @@ error:
1127 return -EMSGSIZE; 1125 return -EMSGSIZE;
1128} 1126}
1129 1127
1130static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp, u32 portid, 1128static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp,
1131 u32 seq, u8 cmd) 1129 struct genl_info *info, u8 cmd)
1132{ 1130{
1133 struct sk_buff *skb; 1131 struct sk_buff *skb;
1134 int retval; 1132 int retval;
1135 1133
1136 skb = genlmsg_new(ovs_dp_cmd_msg_size(), GFP_KERNEL); 1134 skb = genlmsg_new_unicast(ovs_dp_cmd_msg_size(), info, GFP_KERNEL);
1137 if (!skb) 1135 if (!skb)
1138 return ERR_PTR(-ENOMEM); 1136 return ERR_PTR(-ENOMEM);
1139 1137
1140 retval = ovs_dp_cmd_fill_info(dp, skb, portid, seq, 0, cmd); 1138 retval = ovs_dp_cmd_fill_info(dp, skb, info->snd_portid, info->snd_seq, 0, cmd);
1141 if (retval < 0) { 1139 if (retval < 0) {
1142 kfree_skb(skb); 1140 kfree_skb(skb);
1143 return ERR_PTR(retval); 1141 return ERR_PTR(retval);
@@ -1165,6 +1163,24 @@ static struct datapath *lookup_datapath(struct net *net,
1165 return dp ? dp : ERR_PTR(-ENODEV); 1163 return dp ? dp : ERR_PTR(-ENODEV);
1166} 1164}
1167 1165
1166static void ovs_dp_reset_user_features(struct sk_buff *skb, struct genl_info *info)
1167{
1168 struct datapath *dp;
1169
1170 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1171 if (!dp)
1172 return;
1173
1174 WARN(dp->user_features, "Dropping previously announced user features\n");
1175 dp->user_features = 0;
1176}
1177
1178static void ovs_dp_change(struct datapath *dp, struct nlattr **a)
1179{
1180 if (a[OVS_DP_ATTR_USER_FEATURES])
1181 dp->user_features = nla_get_u32(a[OVS_DP_ATTR_USER_FEATURES]);
1182}
1183
1168static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info) 1184static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1169{ 1185{
1170 struct nlattr **a = info->attrs; 1186 struct nlattr **a = info->attrs;
@@ -1223,17 +1239,27 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1223 parms.port_no = OVSP_LOCAL; 1239 parms.port_no = OVSP_LOCAL;
1224 parms.upcall_portid = nla_get_u32(a[OVS_DP_ATTR_UPCALL_PID]); 1240 parms.upcall_portid = nla_get_u32(a[OVS_DP_ATTR_UPCALL_PID]);
1225 1241
1242 ovs_dp_change(dp, a);
1243
1226 vport = new_vport(&parms); 1244 vport = new_vport(&parms);
1227 if (IS_ERR(vport)) { 1245 if (IS_ERR(vport)) {
1228 err = PTR_ERR(vport); 1246 err = PTR_ERR(vport);
1229 if (err == -EBUSY) 1247 if (err == -EBUSY)
1230 err = -EEXIST; 1248 err = -EEXIST;
1231 1249
1250 if (err == -EEXIST) {
1251 /* An outdated user space instance that does not understand
1252 * the concept of user_features has attempted to create a new
1253 * datapath and is likely to reuse it. Drop all user features.
1254 */
1255 if (info->genlhdr->version < OVS_DP_VER_FEATURES)
1256 ovs_dp_reset_user_features(skb, info);
1257 }
1258
1232 goto err_destroy_ports_array; 1259 goto err_destroy_ports_array;
1233 } 1260 }
1234 1261
1235 reply = ovs_dp_cmd_build_info(dp, info->snd_portid, 1262 reply = ovs_dp_cmd_build_info(dp, info, OVS_DP_CMD_NEW);
1236 info->snd_seq, OVS_DP_CMD_NEW);
1237 err = PTR_ERR(reply); 1263 err = PTR_ERR(reply);
1238 if (IS_ERR(reply)) 1264 if (IS_ERR(reply))
1239 goto err_destroy_local_port; 1265 goto err_destroy_local_port;
@@ -1299,8 +1325,7 @@ static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1299 if (IS_ERR(dp)) 1325 if (IS_ERR(dp))
1300 goto unlock; 1326 goto unlock;
1301 1327
1302 reply = ovs_dp_cmd_build_info(dp, info->snd_portid, 1328 reply = ovs_dp_cmd_build_info(dp, info, OVS_DP_CMD_DEL);
1303 info->snd_seq, OVS_DP_CMD_DEL);
1304 err = PTR_ERR(reply); 1329 err = PTR_ERR(reply);
1305 if (IS_ERR(reply)) 1330 if (IS_ERR(reply))
1306 goto unlock; 1331 goto unlock;
@@ -1328,8 +1353,9 @@ static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
1328 if (IS_ERR(dp)) 1353 if (IS_ERR(dp))
1329 goto unlock; 1354 goto unlock;
1330 1355
1331 reply = ovs_dp_cmd_build_info(dp, info->snd_portid, 1356 ovs_dp_change(dp, info->attrs);
1332 info->snd_seq, OVS_DP_CMD_NEW); 1357
1358 reply = ovs_dp_cmd_build_info(dp, info, OVS_DP_CMD_NEW);
1333 if (IS_ERR(reply)) { 1359 if (IS_ERR(reply)) {
1334 err = PTR_ERR(reply); 1360 err = PTR_ERR(reply);
1335 genl_set_err(&dp_datapath_genl_family, sock_net(skb->sk), 0, 1361 genl_set_err(&dp_datapath_genl_family, sock_net(skb->sk), 0,
@@ -1360,8 +1386,7 @@ static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1360 goto unlock; 1386 goto unlock;
1361 } 1387 }
1362 1388
1363 reply = ovs_dp_cmd_build_info(dp, info->snd_portid, 1389 reply = ovs_dp_cmd_build_info(dp, info, OVS_DP_CMD_NEW);
1364 info->snd_seq, OVS_DP_CMD_NEW);
1365 if (IS_ERR(reply)) { 1390 if (IS_ERR(reply)) {
1366 err = PTR_ERR(reply); 1391 err = PTR_ERR(reply);
1367 goto unlock; 1392 goto unlock;
@@ -1441,7 +1466,7 @@ struct genl_family dp_vport_genl_family = {
1441 .parallel_ops = true, 1466 .parallel_ops = true,
1442}; 1467};
1443 1468
1444struct genl_multicast_group ovs_dp_vport_multicast_group = { 1469static struct genl_multicast_group ovs_dp_vport_multicast_group = {
1445 .name = OVS_VPORT_MCGROUP 1470 .name = OVS_VPORT_MCGROUP
1446}; 1471};
1447 1472
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
index 4067ea41be28..6be9fbb5e9cb 100644
--- a/net/openvswitch/datapath.h
+++ b/net/openvswitch/datapath.h
@@ -88,6 +88,8 @@ struct datapath {
88 /* Network namespace ref. */ 88 /* Network namespace ref. */
89 struct net *net; 89 struct net *net;
90#endif 90#endif
91
92 u32 user_features;
91}; 93};
92 94
93/** 95/**
@@ -145,6 +147,8 @@ int lockdep_ovsl_is_held(void);
145#define ASSERT_OVSL() WARN_ON(unlikely(!lockdep_ovsl_is_held())) 147#define ASSERT_OVSL() WARN_ON(unlikely(!lockdep_ovsl_is_held()))
146#define ovsl_dereference(p) \ 148#define ovsl_dereference(p) \
147 rcu_dereference_protected(p, lockdep_ovsl_is_held()) 149 rcu_dereference_protected(p, lockdep_ovsl_is_held())
150#define rcu_dereference_ovsl(p) \
151 rcu_dereference_check(p, lockdep_ovsl_is_held())
148 152
149static inline struct net *ovs_dp_get_net(struct datapath *dp) 153static inline struct net *ovs_dp_get_net(struct datapath *dp)
150{ 154{
@@ -178,14 +182,12 @@ static inline struct vport *ovs_vport_ovsl(const struct datapath *dp, int port_n
178 182
179extern struct notifier_block ovs_dp_device_notifier; 183extern struct notifier_block ovs_dp_device_notifier;
180extern struct genl_family dp_vport_genl_family; 184extern struct genl_family dp_vport_genl_family;
181extern struct genl_multicast_group ovs_dp_vport_multicast_group;
182 185
183void ovs_dp_process_received_packet(struct vport *, struct sk_buff *); 186void ovs_dp_process_received_packet(struct vport *, struct sk_buff *);
184void ovs_dp_detach_port(struct vport *); 187void ovs_dp_detach_port(struct vport *);
185int ovs_dp_upcall(struct datapath *, struct sk_buff *, 188int ovs_dp_upcall(struct datapath *, struct sk_buff *,
186 const struct dp_upcall_info *); 189 const struct dp_upcall_info *);
187 190
188const char *ovs_dp_name(const struct datapath *dp);
189struct sk_buff *ovs_vport_cmd_build_info(struct vport *, u32 pid, u32 seq, 191struct sk_buff *ovs_vport_cmd_build_info(struct vport *, u32 pid, u32 seq,
190 u8 cmd); 192 u8 cmd);
191 193
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index b409f5279601..16f4b46161d4 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -35,6 +35,7 @@
35#include <linux/ip.h> 35#include <linux/ip.h>
36#include <linux/ipv6.h> 36#include <linux/ipv6.h>
37#include <linux/sctp.h> 37#include <linux/sctp.h>
38#include <linux/smp.h>
38#include <linux/tcp.h> 39#include <linux/tcp.h>
39#include <linux/udp.h> 40#include <linux/udp.h>
40#include <linux/icmp.h> 41#include <linux/icmp.h>
@@ -60,10 +61,16 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies)
60 61
61#define TCP_FLAGS_BE16(tp) (*(__be16 *)&tcp_flag_word(tp) & htons(0x0FFF)) 62#define TCP_FLAGS_BE16(tp) (*(__be16 *)&tcp_flag_word(tp) & htons(0x0FFF))
62 63
63void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb) 64void ovs_flow_stats_update(struct sw_flow *flow, struct sk_buff *skb)
64{ 65{
66 struct flow_stats *stats;
65 __be16 tcp_flags = 0; 67 __be16 tcp_flags = 0;
66 68
69 if (!flow->stats.is_percpu)
70 stats = flow->stats.stat;
71 else
72 stats = this_cpu_ptr(flow->stats.cpu_stats);
73
67 if ((flow->key.eth.type == htons(ETH_P_IP) || 74 if ((flow->key.eth.type == htons(ETH_P_IP) ||
68 flow->key.eth.type == htons(ETH_P_IPV6)) && 75 flow->key.eth.type == htons(ETH_P_IPV6)) &&
69 flow->key.ip.proto == IPPROTO_TCP && 76 flow->key.ip.proto == IPPROTO_TCP &&
@@ -71,12 +78,87 @@ void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb)
71 tcp_flags = TCP_FLAGS_BE16(tcp_hdr(skb)); 78 tcp_flags = TCP_FLAGS_BE16(tcp_hdr(skb));
72 } 79 }
73 80
74 spin_lock(&flow->lock); 81 spin_lock(&stats->lock);
75 flow->used = jiffies; 82 stats->used = jiffies;
76 flow->packet_count++; 83 stats->packet_count++;
77 flow->byte_count += skb->len; 84 stats->byte_count += skb->len;
78 flow->tcp_flags |= tcp_flags; 85 stats->tcp_flags |= tcp_flags;
79 spin_unlock(&flow->lock); 86 spin_unlock(&stats->lock);
87}
88
89static void stats_read(struct flow_stats *stats,
90 struct ovs_flow_stats *ovs_stats,
91 unsigned long *used, __be16 *tcp_flags)
92{
93 spin_lock(&stats->lock);
94 if (time_after(stats->used, *used))
95 *used = stats->used;
96 *tcp_flags |= stats->tcp_flags;
97 ovs_stats->n_packets += stats->packet_count;
98 ovs_stats->n_bytes += stats->byte_count;
99 spin_unlock(&stats->lock);
100}
101
102void ovs_flow_stats_get(struct sw_flow *flow, struct ovs_flow_stats *ovs_stats,
103 unsigned long *used, __be16 *tcp_flags)
104{
105 int cpu, cur_cpu;
106
107 *used = 0;
108 *tcp_flags = 0;
109 memset(ovs_stats, 0, sizeof(*ovs_stats));
110
111 if (!flow->stats.is_percpu) {
112 stats_read(flow->stats.stat, ovs_stats, used, tcp_flags);
113 } else {
114 cur_cpu = get_cpu();
115 for_each_possible_cpu(cpu) {
116 struct flow_stats *stats;
117
118 if (cpu == cur_cpu)
119 local_bh_disable();
120
121 stats = per_cpu_ptr(flow->stats.cpu_stats, cpu);
122 stats_read(stats, ovs_stats, used, tcp_flags);
123
124 if (cpu == cur_cpu)
125 local_bh_enable();
126 }
127 put_cpu();
128 }
129}
130
131static void stats_reset(struct flow_stats *stats)
132{
133 spin_lock(&stats->lock);
134 stats->used = 0;
135 stats->packet_count = 0;
136 stats->byte_count = 0;
137 stats->tcp_flags = 0;
138 spin_unlock(&stats->lock);
139}
140
141void ovs_flow_stats_clear(struct sw_flow *flow)
142{
143 int cpu, cur_cpu;
144
145 if (!flow->stats.is_percpu) {
146 stats_reset(flow->stats.stat);
147 } else {
148 cur_cpu = get_cpu();
149
150 for_each_possible_cpu(cpu) {
151
152 if (cpu == cur_cpu)
153 local_bh_disable();
154
155 stats_reset(per_cpu_ptr(flow->stats.cpu_stats, cpu));
156
157 if (cpu == cur_cpu)
158 local_bh_enable();
159 }
160 put_cpu();
161 }
80} 162}
81 163
82static int check_header(struct sk_buff *skb, int len) 164static int check_header(struct sk_buff *skb, int len)
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index 1510f51dbf74..2d770e28a3a3 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -19,6 +19,7 @@
19#ifndef FLOW_H 19#ifndef FLOW_H
20#define FLOW_H 1 20#define FLOW_H 1
21 21
22#include <linux/cache.h>
22#include <linux/kernel.h> 23#include <linux/kernel.h>
23#include <linux/netlink.h> 24#include <linux/netlink.h>
24#include <linux/openvswitch.h> 25#include <linux/openvswitch.h>
@@ -122,8 +123,8 @@ struct sw_flow_key {
122} __aligned(BITS_PER_LONG/8); /* Ensure that we can do comparisons as longs. */ 123} __aligned(BITS_PER_LONG/8); /* Ensure that we can do comparisons as longs. */
123 124
124struct sw_flow_key_range { 125struct sw_flow_key_range {
125 size_t start; 126 unsigned short int start;
126 size_t end; 127 unsigned short int end;
127}; 128};
128 129
129struct sw_flow_mask { 130struct sw_flow_mask {
@@ -146,6 +147,22 @@ struct sw_flow_actions {
146 struct nlattr actions[]; 147 struct nlattr actions[];
147}; 148};
148 149
150struct flow_stats {
151 u64 packet_count; /* Number of packets matched. */
152 u64 byte_count; /* Number of bytes matched. */
153 unsigned long used; /* Last used time (in jiffies). */
154 spinlock_t lock; /* Lock for atomic stats update. */
155 __be16 tcp_flags; /* Union of seen TCP flags. */
156};
157
158struct sw_flow_stats {
159 bool is_percpu;
160 union {
161 struct flow_stats *stat;
162 struct flow_stats __percpu *cpu_stats;
163 };
164};
165
149struct sw_flow { 166struct sw_flow {
150 struct rcu_head rcu; 167 struct rcu_head rcu;
151 struct hlist_node hash_node[2]; 168 struct hlist_node hash_node[2];
@@ -155,12 +172,7 @@ struct sw_flow {
155 struct sw_flow_key unmasked_key; 172 struct sw_flow_key unmasked_key;
156 struct sw_flow_mask *mask; 173 struct sw_flow_mask *mask;
157 struct sw_flow_actions __rcu *sf_acts; 174 struct sw_flow_actions __rcu *sf_acts;
158 175 struct sw_flow_stats stats;
159 spinlock_t lock; /* Lock for values below. */
160 unsigned long used; /* Last used time (in jiffies). */
161 u64 packet_count; /* Number of packets matched. */
162 u64 byte_count; /* Number of bytes matched. */
163 __be16 tcp_flags; /* Union of seen TCP flags. */
164}; 176};
165 177
166struct arp_eth_header { 178struct arp_eth_header {
@@ -177,7 +189,10 @@ struct arp_eth_header {
177 unsigned char ar_tip[4]; /* target IP address */ 189 unsigned char ar_tip[4]; /* target IP address */
178} __packed; 190} __packed;
179 191
180void ovs_flow_used(struct sw_flow *, struct sk_buff *); 192void ovs_flow_stats_update(struct sw_flow *flow, struct sk_buff *skb);
193void ovs_flow_stats_get(struct sw_flow *flow, struct ovs_flow_stats *stats,
194 unsigned long *used, __be16 *tcp_flags);
195void ovs_flow_stats_clear(struct sw_flow *flow);
181u64 ovs_flow_used_time(unsigned long flow_jiffies); 196u64 ovs_flow_used_time(unsigned long flow_jiffies);
182 197
183int ovs_flow_extract(struct sk_buff *, u16 in_port, struct sw_flow_key *); 198int ovs_flow_extract(struct sk_buff *, u16 in_port, struct sw_flow_key *);
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 2bc1bc1aca3b..4d000acaed0d 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -266,6 +266,20 @@ static bool is_all_zero(const u8 *fp, size_t size)
266 return true; 266 return true;
267} 267}
268 268
269static bool is_all_set(const u8 *fp, size_t size)
270{
271 int i;
272
273 if (!fp)
274 return false;
275
276 for (i = 0; i < size; i++)
277 if (fp[i] != 0xff)
278 return false;
279
280 return true;
281}
282
269static int __parse_flow_nlattrs(const struct nlattr *attr, 283static int __parse_flow_nlattrs(const struct nlattr *attr,
270 const struct nlattr *a[], 284 const struct nlattr *a[],
271 u64 *attrsp, bool nz) 285 u64 *attrsp, bool nz)
@@ -487,8 +501,9 @@ static int metadata_from_nlattrs(struct sw_flow_match *match, u64 *attrs,
487 return 0; 501 return 0;
488} 502}
489 503
490static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, 504static int ovs_key_from_nlattrs(struct sw_flow_match *match, bool *exact_5tuple,
491 const struct nlattr **a, bool is_mask) 505 u64 attrs, const struct nlattr **a,
506 bool is_mask)
492{ 507{
493 int err; 508 int err;
494 u64 orig_attrs = attrs; 509 u64 orig_attrs = attrs;
@@ -545,6 +560,11 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs,
545 SW_FLOW_KEY_PUT(match, eth.type, htons(ETH_P_802_2), is_mask); 560 SW_FLOW_KEY_PUT(match, eth.type, htons(ETH_P_802_2), is_mask);
546 } 561 }
547 562
563 if (is_mask && exact_5tuple) {
564 if (match->mask->key.eth.type != htons(0xffff))
565 *exact_5tuple = false;
566 }
567
548 if (attrs & (1 << OVS_KEY_ATTR_IPV4)) { 568 if (attrs & (1 << OVS_KEY_ATTR_IPV4)) {
549 const struct ovs_key_ipv4 *ipv4_key; 569 const struct ovs_key_ipv4 *ipv4_key;
550 570
@@ -567,6 +587,13 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs,
567 SW_FLOW_KEY_PUT(match, ipv4.addr.dst, 587 SW_FLOW_KEY_PUT(match, ipv4.addr.dst,
568 ipv4_key->ipv4_dst, is_mask); 588 ipv4_key->ipv4_dst, is_mask);
569 attrs &= ~(1 << OVS_KEY_ATTR_IPV4); 589 attrs &= ~(1 << OVS_KEY_ATTR_IPV4);
590
591 if (is_mask && exact_5tuple && *exact_5tuple) {
592 if (ipv4_key->ipv4_proto != 0xff ||
593 ipv4_key->ipv4_src != htonl(0xffffffff) ||
594 ipv4_key->ipv4_dst != htonl(0xffffffff))
595 *exact_5tuple = false;
596 }
570 } 597 }
571 598
572 if (attrs & (1 << OVS_KEY_ATTR_IPV6)) { 599 if (attrs & (1 << OVS_KEY_ATTR_IPV6)) {
@@ -598,6 +625,13 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs,
598 is_mask); 625 is_mask);
599 626
600 attrs &= ~(1 << OVS_KEY_ATTR_IPV6); 627 attrs &= ~(1 << OVS_KEY_ATTR_IPV6);
628
629 if (is_mask && exact_5tuple && *exact_5tuple) {
630 if (ipv6_key->ipv6_proto != 0xff ||
631 !is_all_set((u8 *)ipv6_key->ipv6_src, sizeof(match->key->ipv6.addr.src)) ||
632 !is_all_set((u8 *)ipv6_key->ipv6_dst, sizeof(match->key->ipv6.addr.dst)))
633 *exact_5tuple = false;
634 }
601 } 635 }
602 636
603 if (attrs & (1 << OVS_KEY_ATTR_ARP)) { 637 if (attrs & (1 << OVS_KEY_ATTR_ARP)) {
@@ -640,6 +674,11 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs,
640 tcp_key->tcp_dst, is_mask); 674 tcp_key->tcp_dst, is_mask);
641 } 675 }
642 attrs &= ~(1 << OVS_KEY_ATTR_TCP); 676 attrs &= ~(1 << OVS_KEY_ATTR_TCP);
677
678 if (is_mask && exact_5tuple && *exact_5tuple &&
679 (tcp_key->tcp_src != htons(0xffff) ||
680 tcp_key->tcp_dst != htons(0xffff)))
681 *exact_5tuple = false;
643 } 682 }
644 683
645 if (attrs & (1 << OVS_KEY_ATTR_TCP_FLAGS)) { 684 if (attrs & (1 << OVS_KEY_ATTR_TCP_FLAGS)) {
@@ -671,6 +710,11 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs,
671 udp_key->udp_dst, is_mask); 710 udp_key->udp_dst, is_mask);
672 } 711 }
673 attrs &= ~(1 << OVS_KEY_ATTR_UDP); 712 attrs &= ~(1 << OVS_KEY_ATTR_UDP);
713
714 if (is_mask && exact_5tuple && *exact_5tuple &&
715 (udp_key->udp_src != htons(0xffff) ||
716 udp_key->udp_dst != htons(0xffff)))
717 *exact_5tuple = false;
674 } 718 }
675 719
676 if (attrs & (1 << OVS_KEY_ATTR_SCTP)) { 720 if (attrs & (1 << OVS_KEY_ATTR_SCTP)) {
@@ -756,6 +800,7 @@ static void sw_flow_mask_set(struct sw_flow_mask *mask,
756 * attribute specifies the mask field of the wildcarded flow. 800 * attribute specifies the mask field of the wildcarded flow.
757 */ 801 */
758int ovs_nla_get_match(struct sw_flow_match *match, 802int ovs_nla_get_match(struct sw_flow_match *match,
803 bool *exact_5tuple,
759 const struct nlattr *key, 804 const struct nlattr *key,
760 const struct nlattr *mask) 805 const struct nlattr *mask)
761{ 806{
@@ -803,10 +848,13 @@ int ovs_nla_get_match(struct sw_flow_match *match,
803 } 848 }
804 } 849 }
805 850
806 err = ovs_key_from_nlattrs(match, key_attrs, a, false); 851 err = ovs_key_from_nlattrs(match, NULL, key_attrs, a, false);
807 if (err) 852 if (err)
808 return err; 853 return err;
809 854
855 if (exact_5tuple)
856 *exact_5tuple = true;
857
810 if (mask) { 858 if (mask) {
811 err = parse_flow_mask_nlattrs(mask, a, &mask_attrs); 859 err = parse_flow_mask_nlattrs(mask, a, &mask_attrs);
812 if (err) 860 if (err)
@@ -844,7 +892,7 @@ int ovs_nla_get_match(struct sw_flow_match *match,
844 } 892 }
845 } 893 }
846 894
847 err = ovs_key_from_nlattrs(match, mask_attrs, a, true); 895 err = ovs_key_from_nlattrs(match, exact_5tuple, mask_attrs, a, true);
848 if (err) 896 if (err)
849 return err; 897 return err;
850 } else { 898 } else {
@@ -1128,19 +1176,11 @@ struct sw_flow_actions *ovs_nla_alloc_flow_actions(int size)
1128 return sfa; 1176 return sfa;
1129} 1177}
1130 1178
1131/* RCU callback used by ovs_nla_free_flow_actions. */
1132static void rcu_free_acts_callback(struct rcu_head *rcu)
1133{
1134 struct sw_flow_actions *sf_acts = container_of(rcu,
1135 struct sw_flow_actions, rcu);
1136 kfree(sf_acts);
1137}
1138
1139/* Schedules 'sf_acts' to be freed after the next RCU grace period. 1179/* Schedules 'sf_acts' to be freed after the next RCU grace period.
1140 * The caller must hold rcu_read_lock for this to be sensible. */ 1180 * The caller must hold rcu_read_lock for this to be sensible. */
1141void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts) 1181void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts)
1142{ 1182{
1143 call_rcu(&sf_acts->rcu, rcu_free_acts_callback); 1183 kfree_rcu(sf_acts, rcu);
1144} 1184}
1145 1185
1146static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa, 1186static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
diff --git a/net/openvswitch/flow_netlink.h b/net/openvswitch/flow_netlink.h
index 440151045d39..b31fbe28bc7a 100644
--- a/net/openvswitch/flow_netlink.h
+++ b/net/openvswitch/flow_netlink.h
@@ -45,6 +45,7 @@ int ovs_nla_put_flow(const struct sw_flow_key *,
45int ovs_nla_get_flow_metadata(struct sw_flow *flow, 45int ovs_nla_get_flow_metadata(struct sw_flow *flow,
46 const struct nlattr *attr); 46 const struct nlattr *attr);
47int ovs_nla_get_match(struct sw_flow_match *match, 47int ovs_nla_get_match(struct sw_flow_match *match,
48 bool *exact_5tuple,
48 const struct nlattr *, 49 const struct nlattr *,
49 const struct nlattr *); 50 const struct nlattr *);
50 51
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index 0e720c316070..b430d42b2d0f 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -44,8 +44,6 @@
44#include <net/ipv6.h> 44#include <net/ipv6.h>
45#include <net/ndisc.h> 45#include <net/ndisc.h>
46 46
47#include "datapath.h"
48
49#define TBL_MIN_BUCKETS 1024 47#define TBL_MIN_BUCKETS 1024
50#define REHASH_INTERVAL (10 * 60 * HZ) 48#define REHASH_INTERVAL (10 * 60 * HZ)
51 49
@@ -72,19 +70,42 @@ void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
72 *d++ = *s++ & *m++; 70 *d++ = *s++ & *m++;
73} 71}
74 72
75struct sw_flow *ovs_flow_alloc(void) 73struct sw_flow *ovs_flow_alloc(bool percpu_stats)
76{ 74{
77 struct sw_flow *flow; 75 struct sw_flow *flow;
76 int cpu;
78 77
79 flow = kmem_cache_alloc(flow_cache, GFP_KERNEL); 78 flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
80 if (!flow) 79 if (!flow)
81 return ERR_PTR(-ENOMEM); 80 return ERR_PTR(-ENOMEM);
82 81
83 spin_lock_init(&flow->lock);
84 flow->sf_acts = NULL; 82 flow->sf_acts = NULL;
85 flow->mask = NULL; 83 flow->mask = NULL;
86 84
85 flow->stats.is_percpu = percpu_stats;
86
87 if (!percpu_stats) {
88 flow->stats.stat = kzalloc(sizeof(*flow->stats.stat), GFP_KERNEL);
89 if (!flow->stats.stat)
90 goto err;
91
92 spin_lock_init(&flow->stats.stat->lock);
93 } else {
94 flow->stats.cpu_stats = alloc_percpu(struct flow_stats);
95 if (!flow->stats.cpu_stats)
96 goto err;
97
98 for_each_possible_cpu(cpu) {
99 struct flow_stats *cpu_stats;
100
101 cpu_stats = per_cpu_ptr(flow->stats.cpu_stats, cpu);
102 spin_lock_init(&cpu_stats->lock);
103 }
104 }
87 return flow; 105 return flow;
106err:
107 kfree(flow);
108 return ERR_PTR(-ENOMEM);
88} 109}
89 110
90int ovs_flow_tbl_count(struct flow_table *table) 111int ovs_flow_tbl_count(struct flow_table *table)
@@ -118,6 +139,10 @@ static struct flex_array *alloc_buckets(unsigned int n_buckets)
118static void flow_free(struct sw_flow *flow) 139static void flow_free(struct sw_flow *flow)
119{ 140{
120 kfree((struct sf_flow_acts __force *)flow->sf_acts); 141 kfree((struct sf_flow_acts __force *)flow->sf_acts);
142 if (flow->stats.is_percpu)
143 free_percpu(flow->stats.cpu_stats);
144 else
145 kfree(flow->stats.stat);
121 kmem_cache_free(flow_cache, flow); 146 kmem_cache_free(flow_cache, flow);
122} 147}
123 148
@@ -128,13 +153,6 @@ static void rcu_free_flow_callback(struct rcu_head *rcu)
128 flow_free(flow); 153 flow_free(flow);
129} 154}
130 155
131static void rcu_free_sw_flow_mask_cb(struct rcu_head *rcu)
132{
133 struct sw_flow_mask *mask = container_of(rcu, struct sw_flow_mask, rcu);
134
135 kfree(mask);
136}
137
138static void flow_mask_del_ref(struct sw_flow_mask *mask, bool deferred) 156static void flow_mask_del_ref(struct sw_flow_mask *mask, bool deferred)
139{ 157{
140 if (!mask) 158 if (!mask)
@@ -146,7 +164,7 @@ static void flow_mask_del_ref(struct sw_flow_mask *mask, bool deferred)
146 if (!mask->ref_count) { 164 if (!mask->ref_count) {
147 list_del_rcu(&mask->list); 165 list_del_rcu(&mask->list);
148 if (deferred) 166 if (deferred)
149 call_rcu(&mask->rcu, rcu_free_sw_flow_mask_cb); 167 kfree_rcu(mask, rcu);
150 else 168 else
151 kfree(mask); 169 kfree(mask);
152 } 170 }
@@ -429,11 +447,11 @@ static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
429 return NULL; 447 return NULL;
430} 448}
431 449
432struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl, 450struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
433 const struct sw_flow_key *key, 451 const struct sw_flow_key *key,
434 u32 *n_mask_hit) 452 u32 *n_mask_hit)
435{ 453{
436 struct table_instance *ti = rcu_dereference(tbl->ti); 454 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
437 struct sw_flow_mask *mask; 455 struct sw_flow_mask *mask;
438 struct sw_flow *flow; 456 struct sw_flow *flow;
439 457
@@ -447,6 +465,14 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
447 return NULL; 465 return NULL;
448} 466}
449 467
468struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
469 const struct sw_flow_key *key)
470{
471 u32 __always_unused n_mask_hit;
472
473 return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit);
474}
475
450int ovs_flow_tbl_num_masks(const struct flow_table *table) 476int ovs_flow_tbl_num_masks(const struct flow_table *table)
451{ 477{
452 struct sw_flow_mask *mask; 478 struct sw_flow_mask *mask;
@@ -514,11 +540,7 @@ static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
514 return NULL; 540 return NULL;
515} 541}
516 542
517/** 543/* Add 'mask' into the mask list, if it is not already there. */
518 * add a new mask into the mask list.
519 * The caller needs to make sure that 'mask' is not the same
520 * as any masks that are already on the list.
521 */
522static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow, 544static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
523 struct sw_flow_mask *new) 545 struct sw_flow_mask *new)
524{ 546{
diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h
index fbe45d5ad07d..1996e34c0fd8 100644
--- a/net/openvswitch/flow_table.h
+++ b/net/openvswitch/flow_table.h
@@ -55,7 +55,7 @@ struct flow_table {
55int ovs_flow_init(void); 55int ovs_flow_init(void);
56void ovs_flow_exit(void); 56void ovs_flow_exit(void);
57 57
58struct sw_flow *ovs_flow_alloc(void); 58struct sw_flow *ovs_flow_alloc(bool percpu_stats);
59void ovs_flow_free(struct sw_flow *, bool deferred); 59void ovs_flow_free(struct sw_flow *, bool deferred);
60 60
61int ovs_flow_tbl_init(struct flow_table *); 61int ovs_flow_tbl_init(struct flow_table *);
@@ -69,9 +69,11 @@ void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow);
69int ovs_flow_tbl_num_masks(const struct flow_table *table); 69int ovs_flow_tbl_num_masks(const struct flow_table *table);
70struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *table, 70struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *table,
71 u32 *bucket, u32 *idx); 71 u32 *bucket, u32 *idx);
72struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *, 72struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *,
73 const struct sw_flow_key *, 73 const struct sw_flow_key *,
74 u32 *n_mask_hit); 74 u32 *n_mask_hit);
75struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *,
76 const struct sw_flow_key *);
75 77
76bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow, 78bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
77 struct sw_flow_match *match); 79 struct sw_flow_match *match);
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index f5275dd29cd9..208dd9a26dd1 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -33,6 +33,9 @@
33#include "vport.h" 33#include "vport.h"
34#include "vport-internal_dev.h" 34#include "vport-internal_dev.h"
35 35
36static void ovs_vport_record_error(struct vport *,
37 enum vport_err_type err_type);
38
36/* List of statically compiled vport implementations. Don't forget to also 39/* List of statically compiled vport implementations. Don't forget to also
37 * add yours to the list at the bottom of vport.h. */ 40 * add yours to the list at the bottom of vport.h. */
38static const struct vport_ops *vport_ops_list[] = { 41static const struct vport_ops *vport_ops_list[] = {
@@ -396,7 +399,8 @@ int ovs_vport_send(struct vport *vport, struct sk_buff *skb)
396 * If using the vport generic stats layer indicate that an error of the given 399 * If using the vport generic stats layer indicate that an error of the given
397 * type has occurred. 400 * type has occurred.
398 */ 401 */
399void ovs_vport_record_error(struct vport *vport, enum vport_err_type err_type) 402static void ovs_vport_record_error(struct vport *vport,
403 enum vport_err_type err_type)
400{ 404{
401 spin_lock(&vport->stats_lock); 405 spin_lock(&vport->stats_lock);
402 406
diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
index bc97ef7fa2af..d7e50a17396c 100644
--- a/net/openvswitch/vport.h
+++ b/net/openvswitch/vport.h
@@ -192,7 +192,6 @@ static inline struct vport *vport_from_priv(const void *priv)
192 192
193void ovs_vport_receive(struct vport *, struct sk_buff *, 193void ovs_vport_receive(struct vport *, struct sk_buff *,
194 struct ovs_key_ipv4_tunnel *); 194 struct ovs_key_ipv4_tunnel *);
195void ovs_vport_record_error(struct vport *, enum vport_err_type err_type);
196 195
197/* List of statically compiled vport implementations. Don't forget to also 196/* List of statically compiled vport implementations. Don't forget to also
198 * add yours to the list at the top of vport.c. */ 197 * add yours to the list at the top of vport.c. */