diff options
Diffstat (limited to 'net/openvswitch')
-rw-r--r-- | net/openvswitch/actions.c | 12 | ||||
-rw-r--r-- | net/openvswitch/datapath.c | 261 | ||||
-rw-r--r-- | net/openvswitch/datapath.h | 6 | ||||
-rw-r--r-- | net/openvswitch/flow.c | 97 | ||||
-rw-r--r-- | net/openvswitch/flow.h | 33 | ||||
-rw-r--r-- | net/openvswitch/flow_netlink.c | 66 | ||||
-rw-r--r-- | net/openvswitch/flow_netlink.h | 1 | ||||
-rw-r--r-- | net/openvswitch/flow_table.c | 150 | ||||
-rw-r--r-- | net/openvswitch/flow_table.h | 8 | ||||
-rw-r--r-- | net/openvswitch/vport.c | 18 | ||||
-rw-r--r-- | net/openvswitch/vport.h | 3 |
11 files changed, 428 insertions, 227 deletions
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index 65cfaa816075..2c77e7b1a913 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c | |||
@@ -165,7 +165,7 @@ static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh, | |||
165 | } | 165 | } |
166 | 166 | ||
167 | csum_replace4(&nh->check, *addr, new_addr); | 167 | csum_replace4(&nh->check, *addr, new_addr); |
168 | skb->rxhash = 0; | 168 | skb_clear_hash(skb); |
169 | *addr = new_addr; | 169 | *addr = new_addr; |
170 | } | 170 | } |
171 | 171 | ||
@@ -199,7 +199,7 @@ static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto, | |||
199 | if (recalculate_csum) | 199 | if (recalculate_csum) |
200 | update_ipv6_checksum(skb, l4_proto, addr, new_addr); | 200 | update_ipv6_checksum(skb, l4_proto, addr, new_addr); |
201 | 201 | ||
202 | skb->rxhash = 0; | 202 | skb_clear_hash(skb); |
203 | memcpy(addr, new_addr, sizeof(__be32[4])); | 203 | memcpy(addr, new_addr, sizeof(__be32[4])); |
204 | } | 204 | } |
205 | 205 | ||
@@ -296,7 +296,7 @@ static void set_tp_port(struct sk_buff *skb, __be16 *port, | |||
296 | { | 296 | { |
297 | inet_proto_csum_replace2(check, skb, *port, new_port, 0); | 297 | inet_proto_csum_replace2(check, skb, *port, new_port, 0); |
298 | *port = new_port; | 298 | *port = new_port; |
299 | skb->rxhash = 0; | 299 | skb_clear_hash(skb); |
300 | } | 300 | } |
301 | 301 | ||
302 | static void set_udp_port(struct sk_buff *skb, __be16 *port, __be16 new_port) | 302 | static void set_udp_port(struct sk_buff *skb, __be16 *port, __be16 new_port) |
@@ -310,7 +310,7 @@ static void set_udp_port(struct sk_buff *skb, __be16 *port, __be16 new_port) | |||
310 | uh->check = CSUM_MANGLED_0; | 310 | uh->check = CSUM_MANGLED_0; |
311 | } else { | 311 | } else { |
312 | *port = new_port; | 312 | *port = new_port; |
313 | skb->rxhash = 0; | 313 | skb_clear_hash(skb); |
314 | } | 314 | } |
315 | } | 315 | } |
316 | 316 | ||
@@ -381,7 +381,7 @@ static int set_sctp(struct sk_buff *skb, | |||
381 | /* Carry any checksum errors through. */ | 381 | /* Carry any checksum errors through. */ |
382 | sh->checksum = old_csum ^ old_correct_csum ^ new_csum; | 382 | sh->checksum = old_csum ^ old_correct_csum ^ new_csum; |
383 | 383 | ||
384 | skb->rxhash = 0; | 384 | skb_clear_hash(skb); |
385 | } | 385 | } |
386 | 386 | ||
387 | return 0; | 387 | return 0; |
@@ -445,7 +445,7 @@ static int sample(struct datapath *dp, struct sk_buff *skb, | |||
445 | a = nla_next(a, &rem)) { | 445 | a = nla_next(a, &rem)) { |
446 | switch (nla_type(a)) { | 446 | switch (nla_type(a)) { |
447 | case OVS_SAMPLE_ATTR_PROBABILITY: | 447 | case OVS_SAMPLE_ATTR_PROBABILITY: |
448 | if (net_random() >= nla_get_u32(a)) | 448 | if (prandom_u32() >= nla_get_u32(a)) |
449 | return 0; | 449 | return 0; |
450 | break; | 450 | break; |
451 | 451 | ||
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 6f5e1dd3be2d..8601b320b443 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c | |||
@@ -55,6 +55,7 @@ | |||
55 | 55 | ||
56 | #include "datapath.h" | 56 | #include "datapath.h" |
57 | #include "flow.h" | 57 | #include "flow.h" |
58 | #include "flow_table.h" | ||
58 | #include "flow_netlink.h" | 59 | #include "flow_netlink.h" |
59 | #include "vport-internal_dev.h" | 60 | #include "vport-internal_dev.h" |
60 | #include "vport-netdev.h" | 61 | #include "vport-netdev.h" |
@@ -108,10 +109,9 @@ int lockdep_ovsl_is_held(void) | |||
108 | #endif | 109 | #endif |
109 | 110 | ||
110 | static struct vport *new_vport(const struct vport_parms *); | 111 | static struct vport *new_vport(const struct vport_parms *); |
111 | static int queue_gso_packets(struct net *, int dp_ifindex, struct sk_buff *, | 112 | static int queue_gso_packets(struct datapath *dp, struct sk_buff *, |
112 | const struct dp_upcall_info *); | 113 | const struct dp_upcall_info *); |
113 | static int queue_userspace_packet(struct net *, int dp_ifindex, | 114 | static int queue_userspace_packet(struct datapath *dp, struct sk_buff *, |
114 | struct sk_buff *, | ||
115 | const struct dp_upcall_info *); | 115 | const struct dp_upcall_info *); |
116 | 116 | ||
117 | /* Must be called with rcu_read_lock or ovs_mutex. */ | 117 | /* Must be called with rcu_read_lock or ovs_mutex. */ |
@@ -133,7 +133,7 @@ static struct datapath *get_dp(struct net *net, int dp_ifindex) | |||
133 | } | 133 | } |
134 | 134 | ||
135 | /* Must be called with rcu_read_lock or ovs_mutex. */ | 135 | /* Must be called with rcu_read_lock or ovs_mutex. */ |
136 | const char *ovs_dp_name(const struct datapath *dp) | 136 | static const char *ovs_dp_name(const struct datapath *dp) |
137 | { | 137 | { |
138 | struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL); | 138 | struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL); |
139 | return vport->ops->get_name(vport); | 139 | return vport->ops->get_name(vport); |
@@ -161,7 +161,6 @@ static void destroy_dp_rcu(struct rcu_head *rcu) | |||
161 | { | 161 | { |
162 | struct datapath *dp = container_of(rcu, struct datapath, rcu); | 162 | struct datapath *dp = container_of(rcu, struct datapath, rcu); |
163 | 163 | ||
164 | ovs_flow_tbl_destroy(&dp->table); | ||
165 | free_percpu(dp->stats_percpu); | 164 | free_percpu(dp->stats_percpu); |
166 | release_net(ovs_dp_get_net(dp)); | 165 | release_net(ovs_dp_get_net(dp)); |
167 | kfree(dp->ports); | 166 | kfree(dp->ports); |
@@ -234,7 +233,7 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb) | |||
234 | } | 233 | } |
235 | 234 | ||
236 | /* Look up flow. */ | 235 | /* Look up flow. */ |
237 | flow = ovs_flow_tbl_lookup(&dp->table, &key, &n_mask_hit); | 236 | flow = ovs_flow_tbl_lookup_stats(&dp->table, &key, &n_mask_hit); |
238 | if (unlikely(!flow)) { | 237 | if (unlikely(!flow)) { |
239 | struct dp_upcall_info upcall; | 238 | struct dp_upcall_info upcall; |
240 | 239 | ||
@@ -251,9 +250,9 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb) | |||
251 | OVS_CB(skb)->flow = flow; | 250 | OVS_CB(skb)->flow = flow; |
252 | OVS_CB(skb)->pkt_key = &key; | 251 | OVS_CB(skb)->pkt_key = &key; |
253 | 252 | ||
254 | stats_counter = &stats->n_hit; | 253 | ovs_flow_stats_update(OVS_CB(skb)->flow, skb); |
255 | ovs_flow_used(OVS_CB(skb)->flow, skb); | ||
256 | ovs_execute_actions(dp, skb); | 254 | ovs_execute_actions(dp, skb); |
255 | stats_counter = &stats->n_hit; | ||
257 | 256 | ||
258 | out: | 257 | out: |
259 | /* Update datapath statistics. */ | 258 | /* Update datapath statistics. */ |
@@ -277,7 +276,6 @@ int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb, | |||
277 | const struct dp_upcall_info *upcall_info) | 276 | const struct dp_upcall_info *upcall_info) |
278 | { | 277 | { |
279 | struct dp_stats_percpu *stats; | 278 | struct dp_stats_percpu *stats; |
280 | int dp_ifindex; | ||
281 | int err; | 279 | int err; |
282 | 280 | ||
283 | if (upcall_info->portid == 0) { | 281 | if (upcall_info->portid == 0) { |
@@ -285,16 +283,10 @@ int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb, | |||
285 | goto err; | 283 | goto err; |
286 | } | 284 | } |
287 | 285 | ||
288 | dp_ifindex = get_dpifindex(dp); | ||
289 | if (!dp_ifindex) { | ||
290 | err = -ENODEV; | ||
291 | goto err; | ||
292 | } | ||
293 | |||
294 | if (!skb_is_gso(skb)) | 286 | if (!skb_is_gso(skb)) |
295 | err = queue_userspace_packet(ovs_dp_get_net(dp), dp_ifindex, skb, upcall_info); | 287 | err = queue_userspace_packet(dp, skb, upcall_info); |
296 | else | 288 | else |
297 | err = queue_gso_packets(ovs_dp_get_net(dp), dp_ifindex, skb, upcall_info); | 289 | err = queue_gso_packets(dp, skb, upcall_info); |
298 | if (err) | 290 | if (err) |
299 | goto err; | 291 | goto err; |
300 | 292 | ||
@@ -310,8 +302,7 @@ err: | |||
310 | return err; | 302 | return err; |
311 | } | 303 | } |
312 | 304 | ||
313 | static int queue_gso_packets(struct net *net, int dp_ifindex, | 305 | static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb, |
314 | struct sk_buff *skb, | ||
315 | const struct dp_upcall_info *upcall_info) | 306 | const struct dp_upcall_info *upcall_info) |
316 | { | 307 | { |
317 | unsigned short gso_type = skb_shinfo(skb)->gso_type; | 308 | unsigned short gso_type = skb_shinfo(skb)->gso_type; |
@@ -320,14 +311,14 @@ static int queue_gso_packets(struct net *net, int dp_ifindex, | |||
320 | struct sk_buff *segs, *nskb; | 311 | struct sk_buff *segs, *nskb; |
321 | int err; | 312 | int err; |
322 | 313 | ||
323 | segs = __skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM, false); | 314 | segs = __skb_gso_segment(skb, NETIF_F_SG, false); |
324 | if (IS_ERR(segs)) | 315 | if (IS_ERR(segs)) |
325 | return PTR_ERR(segs); | 316 | return PTR_ERR(segs); |
326 | 317 | ||
327 | /* Queue all of the segments. */ | 318 | /* Queue all of the segments. */ |
328 | skb = segs; | 319 | skb = segs; |
329 | do { | 320 | do { |
330 | err = queue_userspace_packet(net, dp_ifindex, skb, upcall_info); | 321 | err = queue_userspace_packet(dp, skb, upcall_info); |
331 | if (err) | 322 | if (err) |
332 | break; | 323 | break; |
333 | 324 | ||
@@ -380,11 +371,11 @@ static size_t key_attr_size(void) | |||
380 | + nla_total_size(28); /* OVS_KEY_ATTR_ND */ | 371 | + nla_total_size(28); /* OVS_KEY_ATTR_ND */ |
381 | } | 372 | } |
382 | 373 | ||
383 | static size_t upcall_msg_size(const struct sk_buff *skb, | 374 | static size_t upcall_msg_size(const struct nlattr *userdata, |
384 | const struct nlattr *userdata) | 375 | unsigned int hdrlen) |
385 | { | 376 | { |
386 | size_t size = NLMSG_ALIGN(sizeof(struct ovs_header)) | 377 | size_t size = NLMSG_ALIGN(sizeof(struct ovs_header)) |
387 | + nla_total_size(skb->len) /* OVS_PACKET_ATTR_PACKET */ | 378 | + nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */ |
388 | + nla_total_size(key_attr_size()); /* OVS_PACKET_ATTR_KEY */ | 379 | + nla_total_size(key_attr_size()); /* OVS_PACKET_ATTR_KEY */ |
389 | 380 | ||
390 | /* OVS_PACKET_ATTR_USERDATA */ | 381 | /* OVS_PACKET_ATTR_USERDATA */ |
@@ -394,15 +385,24 @@ static size_t upcall_msg_size(const struct sk_buff *skb, | |||
394 | return size; | 385 | return size; |
395 | } | 386 | } |
396 | 387 | ||
397 | static int queue_userspace_packet(struct net *net, int dp_ifindex, | 388 | static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb, |
398 | struct sk_buff *skb, | ||
399 | const struct dp_upcall_info *upcall_info) | 389 | const struct dp_upcall_info *upcall_info) |
400 | { | 390 | { |
401 | struct ovs_header *upcall; | 391 | struct ovs_header *upcall; |
402 | struct sk_buff *nskb = NULL; | 392 | struct sk_buff *nskb = NULL; |
403 | struct sk_buff *user_skb; /* to be queued to userspace */ | 393 | struct sk_buff *user_skb; /* to be queued to userspace */ |
404 | struct nlattr *nla; | 394 | struct nlattr *nla; |
405 | int err; | 395 | struct genl_info info = { |
396 | .dst_sk = ovs_dp_get_net(dp)->genl_sock, | ||
397 | .snd_portid = upcall_info->portid, | ||
398 | }; | ||
399 | size_t len; | ||
400 | unsigned int hlen; | ||
401 | int err, dp_ifindex; | ||
402 | |||
403 | dp_ifindex = get_dpifindex(dp); | ||
404 | if (!dp_ifindex) | ||
405 | return -ENODEV; | ||
406 | 406 | ||
407 | if (vlan_tx_tag_present(skb)) { | 407 | if (vlan_tx_tag_present(skb)) { |
408 | nskb = skb_clone(skb, GFP_ATOMIC); | 408 | nskb = skb_clone(skb, GFP_ATOMIC); |
@@ -422,7 +422,22 @@ static int queue_userspace_packet(struct net *net, int dp_ifindex, | |||
422 | goto out; | 422 | goto out; |
423 | } | 423 | } |
424 | 424 | ||
425 | user_skb = genlmsg_new(upcall_msg_size(skb, upcall_info->userdata), GFP_ATOMIC); | 425 | /* Complete checksum if needed */ |
426 | if (skb->ip_summed == CHECKSUM_PARTIAL && | ||
427 | (err = skb_checksum_help(skb))) | ||
428 | goto out; | ||
429 | |||
430 | /* Older versions of OVS user space enforce alignment of the last | ||
431 | * Netlink attribute to NLA_ALIGNTO which would require extensive | ||
432 | * padding logic. Only perform zerocopy if padding is not required. | ||
433 | */ | ||
434 | if (dp->user_features & OVS_DP_F_UNALIGNED) | ||
435 | hlen = skb_zerocopy_headlen(skb); | ||
436 | else | ||
437 | hlen = skb->len; | ||
438 | |||
439 | len = upcall_msg_size(upcall_info->userdata, hlen); | ||
440 | user_skb = genlmsg_new_unicast(len, &info, GFP_ATOMIC); | ||
426 | if (!user_skb) { | 441 | if (!user_skb) { |
427 | err = -ENOMEM; | 442 | err = -ENOMEM; |
428 | goto out; | 443 | goto out; |
@@ -441,26 +456,32 @@ static int queue_userspace_packet(struct net *net, int dp_ifindex, | |||
441 | nla_len(upcall_info->userdata), | 456 | nla_len(upcall_info->userdata), |
442 | nla_data(upcall_info->userdata)); | 457 | nla_data(upcall_info->userdata)); |
443 | 458 | ||
444 | nla = __nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, skb->len); | 459 | /* Only reserve room for attribute header, packet data is added |
460 | * in skb_zerocopy() */ | ||
461 | if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) { | ||
462 | err = -ENOBUFS; | ||
463 | goto out; | ||
464 | } | ||
465 | nla->nla_len = nla_attr_size(skb->len); | ||
466 | |||
467 | skb_zerocopy(user_skb, skb, skb->len, hlen); | ||
468 | |||
469 | /* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */ | ||
470 | if (!(dp->user_features & OVS_DP_F_UNALIGNED)) { | ||
471 | size_t plen = NLA_ALIGN(user_skb->len) - user_skb->len; | ||
445 | 472 | ||
446 | skb_copy_and_csum_dev(skb, nla_data(nla)); | 473 | if (plen > 0) |
474 | memset(skb_put(user_skb, plen), 0, plen); | ||
475 | } | ||
447 | 476 | ||
448 | genlmsg_end(user_skb, upcall); | 477 | ((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len; |
449 | err = genlmsg_unicast(net, user_skb, upcall_info->portid); | ||
450 | 478 | ||
479 | err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid); | ||
451 | out: | 480 | out: |
452 | kfree_skb(nskb); | 481 | kfree_skb(nskb); |
453 | return err; | 482 | return err; |
454 | } | 483 | } |
455 | 484 | ||
456 | static void clear_stats(struct sw_flow *flow) | ||
457 | { | ||
458 | flow->used = 0; | ||
459 | flow->tcp_flags = 0; | ||
460 | flow->packet_count = 0; | ||
461 | flow->byte_count = 0; | ||
462 | } | ||
463 | |||
464 | static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) | 485 | static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) |
465 | { | 486 | { |
466 | struct ovs_header *ovs_header = info->userhdr; | 487 | struct ovs_header *ovs_header = info->userhdr; |
@@ -499,7 +520,7 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) | |||
499 | packet->protocol = htons(ETH_P_802_2); | 520 | packet->protocol = htons(ETH_P_802_2); |
500 | 521 | ||
501 | /* Build an sw_flow for sending this packet. */ | 522 | /* Build an sw_flow for sending this packet. */ |
502 | flow = ovs_flow_alloc(); | 523 | flow = ovs_flow_alloc(false); |
503 | err = PTR_ERR(flow); | 524 | err = PTR_ERR(flow); |
504 | if (IS_ERR(flow)) | 525 | if (IS_ERR(flow)) |
505 | goto err_kfree_skb; | 526 | goto err_kfree_skb; |
@@ -635,10 +656,10 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp, | |||
635 | const int skb_orig_len = skb->len; | 656 | const int skb_orig_len = skb->len; |
636 | struct nlattr *start; | 657 | struct nlattr *start; |
637 | struct ovs_flow_stats stats; | 658 | struct ovs_flow_stats stats; |
659 | __be16 tcp_flags; | ||
660 | unsigned long used; | ||
638 | struct ovs_header *ovs_header; | 661 | struct ovs_header *ovs_header; |
639 | struct nlattr *nla; | 662 | struct nlattr *nla; |
640 | unsigned long used; | ||
641 | u8 tcp_flags; | ||
642 | int err; | 663 | int err; |
643 | 664 | ||
644 | ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family, flags, cmd); | 665 | ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family, flags, cmd); |
@@ -667,24 +688,17 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp, | |||
667 | 688 | ||
668 | nla_nest_end(skb, nla); | 689 | nla_nest_end(skb, nla); |
669 | 690 | ||
670 | spin_lock_bh(&flow->lock); | 691 | ovs_flow_stats_get(flow, &stats, &used, &tcp_flags); |
671 | used = flow->used; | ||
672 | stats.n_packets = flow->packet_count; | ||
673 | stats.n_bytes = flow->byte_count; | ||
674 | tcp_flags = (u8)ntohs(flow->tcp_flags); | ||
675 | spin_unlock_bh(&flow->lock); | ||
676 | |||
677 | if (used && | 692 | if (used && |
678 | nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used))) | 693 | nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used))) |
679 | goto nla_put_failure; | 694 | goto nla_put_failure; |
680 | 695 | ||
681 | if (stats.n_packets && | 696 | if (stats.n_packets && |
682 | nla_put(skb, OVS_FLOW_ATTR_STATS, | 697 | nla_put(skb, OVS_FLOW_ATTR_STATS, sizeof(struct ovs_flow_stats), &stats)) |
683 | sizeof(struct ovs_flow_stats), &stats)) | ||
684 | goto nla_put_failure; | 698 | goto nla_put_failure; |
685 | 699 | ||
686 | if (tcp_flags && | 700 | if ((u8)ntohs(tcp_flags) && |
687 | nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags)) | 701 | nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, (u8)ntohs(tcp_flags))) |
688 | goto nla_put_failure; | 702 | goto nla_put_failure; |
689 | 703 | ||
690 | /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if | 704 | /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if |
@@ -701,8 +715,7 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp, | |||
701 | if (start) { | 715 | if (start) { |
702 | const struct sw_flow_actions *sf_acts; | 716 | const struct sw_flow_actions *sf_acts; |
703 | 717 | ||
704 | sf_acts = rcu_dereference_check(flow->sf_acts, | 718 | sf_acts = rcu_dereference_ovsl(flow->sf_acts); |
705 | lockdep_ovsl_is_held()); | ||
706 | 719 | ||
707 | err = ovs_nla_put_actions(sf_acts->actions, | 720 | err = ovs_nla_put_actions(sf_acts->actions, |
708 | sf_acts->actions_len, skb); | 721 | sf_acts->actions_len, skb); |
@@ -726,39 +739,34 @@ error: | |||
726 | return err; | 739 | return err; |
727 | } | 740 | } |
728 | 741 | ||
729 | static struct sk_buff *ovs_flow_cmd_alloc_info(struct sw_flow *flow) | 742 | static struct sk_buff *ovs_flow_cmd_alloc_info(struct sw_flow *flow, |
743 | struct genl_info *info) | ||
730 | { | 744 | { |
731 | const struct sw_flow_actions *sf_acts; | 745 | size_t len; |
732 | 746 | ||
733 | sf_acts = ovsl_dereference(flow->sf_acts); | 747 | len = ovs_flow_cmd_msg_size(ovsl_dereference(flow->sf_acts)); |
734 | 748 | ||
735 | return genlmsg_new(ovs_flow_cmd_msg_size(sf_acts), GFP_KERNEL); | 749 | return genlmsg_new_unicast(len, info, GFP_KERNEL); |
736 | } | 750 | } |
737 | 751 | ||
738 | static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow, | 752 | static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow, |
739 | struct datapath *dp, | 753 | struct datapath *dp, |
740 | u32 portid, u32 seq, u8 cmd) | 754 | struct genl_info *info, |
755 | u8 cmd) | ||
741 | { | 756 | { |
742 | struct sk_buff *skb; | 757 | struct sk_buff *skb; |
743 | int retval; | 758 | int retval; |
744 | 759 | ||
745 | skb = ovs_flow_cmd_alloc_info(flow); | 760 | skb = ovs_flow_cmd_alloc_info(flow, info); |
746 | if (!skb) | 761 | if (!skb) |
747 | return ERR_PTR(-ENOMEM); | 762 | return ERR_PTR(-ENOMEM); |
748 | 763 | ||
749 | retval = ovs_flow_cmd_fill_info(flow, dp, skb, portid, seq, 0, cmd); | 764 | retval = ovs_flow_cmd_fill_info(flow, dp, skb, info->snd_portid, |
765 | info->snd_seq, 0, cmd); | ||
750 | BUG_ON(retval < 0); | 766 | BUG_ON(retval < 0); |
751 | return skb; | 767 | return skb; |
752 | } | 768 | } |
753 | 769 | ||
754 | static struct sw_flow *__ovs_flow_tbl_lookup(struct flow_table *tbl, | ||
755 | const struct sw_flow_key *key) | ||
756 | { | ||
757 | u32 __always_unused n_mask_hit; | ||
758 | |||
759 | return ovs_flow_tbl_lookup(tbl, key, &n_mask_hit); | ||
760 | } | ||
761 | |||
762 | static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) | 770 | static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) |
763 | { | 771 | { |
764 | struct nlattr **a = info->attrs; | 772 | struct nlattr **a = info->attrs; |
@@ -770,6 +778,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) | |||
770 | struct datapath *dp; | 778 | struct datapath *dp; |
771 | struct sw_flow_actions *acts = NULL; | 779 | struct sw_flow_actions *acts = NULL; |
772 | struct sw_flow_match match; | 780 | struct sw_flow_match match; |
781 | bool exact_5tuple; | ||
773 | int error; | 782 | int error; |
774 | 783 | ||
775 | /* Extract key. */ | 784 | /* Extract key. */ |
@@ -778,7 +787,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) | |||
778 | goto error; | 787 | goto error; |
779 | 788 | ||
780 | ovs_match_init(&match, &key, &mask); | 789 | ovs_match_init(&match, &key, &mask); |
781 | error = ovs_nla_get_match(&match, | 790 | error = ovs_nla_get_match(&match, &exact_5tuple, |
782 | a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]); | 791 | a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]); |
783 | if (error) | 792 | if (error) |
784 | goto error; | 793 | goto error; |
@@ -809,7 +818,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) | |||
809 | goto err_unlock_ovs; | 818 | goto err_unlock_ovs; |
810 | 819 | ||
811 | /* Check if this is a duplicate flow */ | 820 | /* Check if this is a duplicate flow */ |
812 | flow = __ovs_flow_tbl_lookup(&dp->table, &key); | 821 | flow = ovs_flow_tbl_lookup(&dp->table, &key); |
813 | if (!flow) { | 822 | if (!flow) { |
814 | /* Bail out if we're not allowed to create a new flow. */ | 823 | /* Bail out if we're not allowed to create a new flow. */ |
815 | error = -ENOENT; | 824 | error = -ENOENT; |
@@ -817,12 +826,11 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) | |||
817 | goto err_unlock_ovs; | 826 | goto err_unlock_ovs; |
818 | 827 | ||
819 | /* Allocate flow. */ | 828 | /* Allocate flow. */ |
820 | flow = ovs_flow_alloc(); | 829 | flow = ovs_flow_alloc(!exact_5tuple); |
821 | if (IS_ERR(flow)) { | 830 | if (IS_ERR(flow)) { |
822 | error = PTR_ERR(flow); | 831 | error = PTR_ERR(flow); |
823 | goto err_unlock_ovs; | 832 | goto err_unlock_ovs; |
824 | } | 833 | } |
825 | clear_stats(flow); | ||
826 | 834 | ||
827 | flow->key = masked_key; | 835 | flow->key = masked_key; |
828 | flow->unmasked_key = key; | 836 | flow->unmasked_key = key; |
@@ -835,8 +843,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) | |||
835 | goto err_flow_free; | 843 | goto err_flow_free; |
836 | } | 844 | } |
837 | 845 | ||
838 | reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid, | 846 | reply = ovs_flow_cmd_build_info(flow, dp, info, OVS_FLOW_CMD_NEW); |
839 | info->snd_seq, OVS_FLOW_CMD_NEW); | ||
840 | } else { | 847 | } else { |
841 | /* We found a matching flow. */ | 848 | /* We found a matching flow. */ |
842 | struct sw_flow_actions *old_acts; | 849 | struct sw_flow_actions *old_acts; |
@@ -853,26 +860,19 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) | |||
853 | goto err_unlock_ovs; | 860 | goto err_unlock_ovs; |
854 | 861 | ||
855 | /* The unmasked key has to be the same for flow updates. */ | 862 | /* The unmasked key has to be the same for flow updates. */ |
856 | error = -EINVAL; | 863 | if (!ovs_flow_cmp_unmasked_key(flow, &match)) |
857 | if (!ovs_flow_cmp_unmasked_key(flow, &match)) { | ||
858 | OVS_NLERR("Flow modification message rejected, unmasked key does not match.\n"); | ||
859 | goto err_unlock_ovs; | 864 | goto err_unlock_ovs; |
860 | } | ||
861 | 865 | ||
862 | /* Update actions. */ | 866 | /* Update actions. */ |
863 | old_acts = ovsl_dereference(flow->sf_acts); | 867 | old_acts = ovsl_dereference(flow->sf_acts); |
864 | rcu_assign_pointer(flow->sf_acts, acts); | 868 | rcu_assign_pointer(flow->sf_acts, acts); |
865 | ovs_nla_free_flow_actions(old_acts); | 869 | ovs_nla_free_flow_actions(old_acts); |
866 | 870 | ||
867 | reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid, | 871 | reply = ovs_flow_cmd_build_info(flow, dp, info, OVS_FLOW_CMD_NEW); |
868 | info->snd_seq, OVS_FLOW_CMD_NEW); | ||
869 | 872 | ||
870 | /* Clear stats. */ | 873 | /* Clear stats. */ |
871 | if (a[OVS_FLOW_ATTR_CLEAR]) { | 874 | if (a[OVS_FLOW_ATTR_CLEAR]) |
872 | spin_lock_bh(&flow->lock); | 875 | ovs_flow_stats_clear(flow); |
873 | clear_stats(flow); | ||
874 | spin_unlock_bh(&flow->lock); | ||
875 | } | ||
876 | } | 876 | } |
877 | ovs_unlock(); | 877 | ovs_unlock(); |
878 | 878 | ||
@@ -910,7 +910,7 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info) | |||
910 | } | 910 | } |
911 | 911 | ||
912 | ovs_match_init(&match, &key, NULL); | 912 | ovs_match_init(&match, &key, NULL); |
913 | err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL); | 913 | err = ovs_nla_get_match(&match, NULL, a[OVS_FLOW_ATTR_KEY], NULL); |
914 | if (err) | 914 | if (err) |
915 | return err; | 915 | return err; |
916 | 916 | ||
@@ -921,14 +921,13 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info) | |||
921 | goto unlock; | 921 | goto unlock; |
922 | } | 922 | } |
923 | 923 | ||
924 | flow = __ovs_flow_tbl_lookup(&dp->table, &key); | 924 | flow = ovs_flow_tbl_lookup(&dp->table, &key); |
925 | if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) { | 925 | if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) { |
926 | err = -ENOENT; | 926 | err = -ENOENT; |
927 | goto unlock; | 927 | goto unlock; |
928 | } | 928 | } |
929 | 929 | ||
930 | reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid, | 930 | reply = ovs_flow_cmd_build_info(flow, dp, info, OVS_FLOW_CMD_NEW); |
931 | info->snd_seq, OVS_FLOW_CMD_NEW); | ||
932 | if (IS_ERR(reply)) { | 931 | if (IS_ERR(reply)) { |
933 | err = PTR_ERR(reply); | 932 | err = PTR_ERR(reply); |
934 | goto unlock; | 933 | goto unlock; |
@@ -965,17 +964,17 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info) | |||
965 | } | 964 | } |
966 | 965 | ||
967 | ovs_match_init(&match, &key, NULL); | 966 | ovs_match_init(&match, &key, NULL); |
968 | err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL); | 967 | err = ovs_nla_get_match(&match, NULL, a[OVS_FLOW_ATTR_KEY], NULL); |
969 | if (err) | 968 | if (err) |
970 | goto unlock; | 969 | goto unlock; |
971 | 970 | ||
972 | flow = __ovs_flow_tbl_lookup(&dp->table, &key); | 971 | flow = ovs_flow_tbl_lookup(&dp->table, &key); |
973 | if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) { | 972 | if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) { |
974 | err = -ENOENT; | 973 | err = -ENOENT; |
975 | goto unlock; | 974 | goto unlock; |
976 | } | 975 | } |
977 | 976 | ||
978 | reply = ovs_flow_cmd_alloc_info(flow); | 977 | reply = ovs_flow_cmd_alloc_info(flow, info); |
979 | if (!reply) { | 978 | if (!reply) { |
980 | err = -ENOMEM; | 979 | err = -ENOMEM; |
981 | goto unlock; | 980 | goto unlock; |
@@ -1061,6 +1060,7 @@ static const struct genl_ops dp_flow_genl_ops[] = { | |||
1061 | static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = { | 1060 | static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = { |
1062 | [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 }, | 1061 | [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 }, |
1063 | [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 }, | 1062 | [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 }, |
1063 | [OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 }, | ||
1064 | }; | 1064 | }; |
1065 | 1065 | ||
1066 | static struct genl_family dp_datapath_genl_family = { | 1066 | static struct genl_family dp_datapath_genl_family = { |
@@ -1084,6 +1084,7 @@ static size_t ovs_dp_cmd_msg_size(void) | |||
1084 | msgsize += nla_total_size(IFNAMSIZ); | 1084 | msgsize += nla_total_size(IFNAMSIZ); |
1085 | msgsize += nla_total_size(sizeof(struct ovs_dp_stats)); | 1085 | msgsize += nla_total_size(sizeof(struct ovs_dp_stats)); |
1086 | msgsize += nla_total_size(sizeof(struct ovs_dp_megaflow_stats)); | 1086 | msgsize += nla_total_size(sizeof(struct ovs_dp_megaflow_stats)); |
1087 | msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_USER_FEATURES */ | ||
1087 | 1088 | ||
1088 | return msgsize; | 1089 | return msgsize; |
1089 | } | 1090 | } |
@@ -1119,6 +1120,9 @@ static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb, | |||
1119 | &dp_megaflow_stats)) | 1120 | &dp_megaflow_stats)) |
1120 | goto nla_put_failure; | 1121 | goto nla_put_failure; |
1121 | 1122 | ||
1123 | if (nla_put_u32(skb, OVS_DP_ATTR_USER_FEATURES, dp->user_features)) | ||
1124 | goto nla_put_failure; | ||
1125 | |||
1122 | return genlmsg_end(skb, ovs_header); | 1126 | return genlmsg_end(skb, ovs_header); |
1123 | 1127 | ||
1124 | nla_put_failure: | 1128 | nla_put_failure: |
@@ -1127,17 +1131,17 @@ error: | |||
1127 | return -EMSGSIZE; | 1131 | return -EMSGSIZE; |
1128 | } | 1132 | } |
1129 | 1133 | ||
1130 | static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp, u32 portid, | 1134 | static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp, |
1131 | u32 seq, u8 cmd) | 1135 | struct genl_info *info, u8 cmd) |
1132 | { | 1136 | { |
1133 | struct sk_buff *skb; | 1137 | struct sk_buff *skb; |
1134 | int retval; | 1138 | int retval; |
1135 | 1139 | ||
1136 | skb = genlmsg_new(ovs_dp_cmd_msg_size(), GFP_KERNEL); | 1140 | skb = genlmsg_new_unicast(ovs_dp_cmd_msg_size(), info, GFP_KERNEL); |
1137 | if (!skb) | 1141 | if (!skb) |
1138 | return ERR_PTR(-ENOMEM); | 1142 | return ERR_PTR(-ENOMEM); |
1139 | 1143 | ||
1140 | retval = ovs_dp_cmd_fill_info(dp, skb, portid, seq, 0, cmd); | 1144 | retval = ovs_dp_cmd_fill_info(dp, skb, info->snd_portid, info->snd_seq, 0, cmd); |
1141 | if (retval < 0) { | 1145 | if (retval < 0) { |
1142 | kfree_skb(skb); | 1146 | kfree_skb(skb); |
1143 | return ERR_PTR(retval); | 1147 | return ERR_PTR(retval); |
@@ -1165,6 +1169,24 @@ static struct datapath *lookup_datapath(struct net *net, | |||
1165 | return dp ? dp : ERR_PTR(-ENODEV); | 1169 | return dp ? dp : ERR_PTR(-ENODEV); |
1166 | } | 1170 | } |
1167 | 1171 | ||
1172 | static void ovs_dp_reset_user_features(struct sk_buff *skb, struct genl_info *info) | ||
1173 | { | ||
1174 | struct datapath *dp; | ||
1175 | |||
1176 | dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs); | ||
1177 | if (IS_ERR(dp)) | ||
1178 | return; | ||
1179 | |||
1180 | WARN(dp->user_features, "Dropping previously announced user features\n"); | ||
1181 | dp->user_features = 0; | ||
1182 | } | ||
1183 | |||
1184 | static void ovs_dp_change(struct datapath *dp, struct nlattr **a) | ||
1185 | { | ||
1186 | if (a[OVS_DP_ATTR_USER_FEATURES]) | ||
1187 | dp->user_features = nla_get_u32(a[OVS_DP_ATTR_USER_FEATURES]); | ||
1188 | } | ||
1189 | |||
1168 | static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info) | 1190 | static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info) |
1169 | { | 1191 | { |
1170 | struct nlattr **a = info->attrs; | 1192 | struct nlattr **a = info->attrs; |
@@ -1223,17 +1245,27 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info) | |||
1223 | parms.port_no = OVSP_LOCAL; | 1245 | parms.port_no = OVSP_LOCAL; |
1224 | parms.upcall_portid = nla_get_u32(a[OVS_DP_ATTR_UPCALL_PID]); | 1246 | parms.upcall_portid = nla_get_u32(a[OVS_DP_ATTR_UPCALL_PID]); |
1225 | 1247 | ||
1248 | ovs_dp_change(dp, a); | ||
1249 | |||
1226 | vport = new_vport(&parms); | 1250 | vport = new_vport(&parms); |
1227 | if (IS_ERR(vport)) { | 1251 | if (IS_ERR(vport)) { |
1228 | err = PTR_ERR(vport); | 1252 | err = PTR_ERR(vport); |
1229 | if (err == -EBUSY) | 1253 | if (err == -EBUSY) |
1230 | err = -EEXIST; | 1254 | err = -EEXIST; |
1231 | 1255 | ||
1256 | if (err == -EEXIST) { | ||
1257 | /* An outdated user space instance that does not understand | ||
1258 | * the concept of user_features has attempted to create a new | ||
1259 | * datapath and is likely to reuse it. Drop all user features. | ||
1260 | */ | ||
1261 | if (info->genlhdr->version < OVS_DP_VER_FEATURES) | ||
1262 | ovs_dp_reset_user_features(skb, info); | ||
1263 | } | ||
1264 | |||
1232 | goto err_destroy_ports_array; | 1265 | goto err_destroy_ports_array; |
1233 | } | 1266 | } |
1234 | 1267 | ||
1235 | reply = ovs_dp_cmd_build_info(dp, info->snd_portid, | 1268 | reply = ovs_dp_cmd_build_info(dp, info, OVS_DP_CMD_NEW); |
1236 | info->snd_seq, OVS_DP_CMD_NEW); | ||
1237 | err = PTR_ERR(reply); | 1269 | err = PTR_ERR(reply); |
1238 | if (IS_ERR(reply)) | 1270 | if (IS_ERR(reply)) |
1239 | goto err_destroy_local_port; | 1271 | goto err_destroy_local_port; |
@@ -1253,7 +1285,7 @@ err_destroy_ports_array: | |||
1253 | err_destroy_percpu: | 1285 | err_destroy_percpu: |
1254 | free_percpu(dp->stats_percpu); | 1286 | free_percpu(dp->stats_percpu); |
1255 | err_destroy_table: | 1287 | err_destroy_table: |
1256 | ovs_flow_tbl_destroy(&dp->table); | 1288 | ovs_flow_tbl_destroy(&dp->table, false); |
1257 | err_free_dp: | 1289 | err_free_dp: |
1258 | release_net(ovs_dp_get_net(dp)); | 1290 | release_net(ovs_dp_get_net(dp)); |
1259 | kfree(dp); | 1291 | kfree(dp); |
@@ -1280,10 +1312,13 @@ static void __dp_destroy(struct datapath *dp) | |||
1280 | list_del_rcu(&dp->list_node); | 1312 | list_del_rcu(&dp->list_node); |
1281 | 1313 | ||
1282 | /* OVSP_LOCAL is datapath internal port. We need to make sure that | 1314 | /* OVSP_LOCAL is datapath internal port. We need to make sure that |
1283 | * all port in datapath are destroyed first before freeing datapath. | 1315 | * all ports in datapath are destroyed first before freeing datapath. |
1284 | */ | 1316 | */ |
1285 | ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL)); | 1317 | ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL)); |
1286 | 1318 | ||
1319 | /* RCU destroy the flow table */ | ||
1320 | ovs_flow_tbl_destroy(&dp->table, true); | ||
1321 | |||
1287 | call_rcu(&dp->rcu, destroy_dp_rcu); | 1322 | call_rcu(&dp->rcu, destroy_dp_rcu); |
1288 | } | 1323 | } |
1289 | 1324 | ||
@@ -1299,8 +1334,7 @@ static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info) | |||
1299 | if (IS_ERR(dp)) | 1334 | if (IS_ERR(dp)) |
1300 | goto unlock; | 1335 | goto unlock; |
1301 | 1336 | ||
1302 | reply = ovs_dp_cmd_build_info(dp, info->snd_portid, | 1337 | reply = ovs_dp_cmd_build_info(dp, info, OVS_DP_CMD_DEL); |
1303 | info->snd_seq, OVS_DP_CMD_DEL); | ||
1304 | err = PTR_ERR(reply); | 1338 | err = PTR_ERR(reply); |
1305 | if (IS_ERR(reply)) | 1339 | if (IS_ERR(reply)) |
1306 | goto unlock; | 1340 | goto unlock; |
@@ -1328,8 +1362,9 @@ static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info) | |||
1328 | if (IS_ERR(dp)) | 1362 | if (IS_ERR(dp)) |
1329 | goto unlock; | 1363 | goto unlock; |
1330 | 1364 | ||
1331 | reply = ovs_dp_cmd_build_info(dp, info->snd_portid, | 1365 | ovs_dp_change(dp, info->attrs); |
1332 | info->snd_seq, OVS_DP_CMD_NEW); | 1366 | |
1367 | reply = ovs_dp_cmd_build_info(dp, info, OVS_DP_CMD_NEW); | ||
1333 | if (IS_ERR(reply)) { | 1368 | if (IS_ERR(reply)) { |
1334 | err = PTR_ERR(reply); | 1369 | err = PTR_ERR(reply); |
1335 | genl_set_err(&dp_datapath_genl_family, sock_net(skb->sk), 0, | 1370 | genl_set_err(&dp_datapath_genl_family, sock_net(skb->sk), 0, |
@@ -1360,8 +1395,7 @@ static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info) | |||
1360 | goto unlock; | 1395 | goto unlock; |
1361 | } | 1396 | } |
1362 | 1397 | ||
1363 | reply = ovs_dp_cmd_build_info(dp, info->snd_portid, | 1398 | reply = ovs_dp_cmd_build_info(dp, info, OVS_DP_CMD_NEW); |
1364 | info->snd_seq, OVS_DP_CMD_NEW); | ||
1365 | if (IS_ERR(reply)) { | 1399 | if (IS_ERR(reply)) { |
1366 | err = PTR_ERR(reply); | 1400 | err = PTR_ERR(reply); |
1367 | goto unlock; | 1401 | goto unlock; |
@@ -1441,7 +1475,7 @@ struct genl_family dp_vport_genl_family = { | |||
1441 | .parallel_ops = true, | 1475 | .parallel_ops = true, |
1442 | }; | 1476 | }; |
1443 | 1477 | ||
1444 | struct genl_multicast_group ovs_dp_vport_multicast_group = { | 1478 | static struct genl_multicast_group ovs_dp_vport_multicast_group = { |
1445 | .name = OVS_VPORT_MCGROUP | 1479 | .name = OVS_VPORT_MCGROUP |
1446 | }; | 1480 | }; |
1447 | 1481 | ||
@@ -1728,11 +1762,12 @@ static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
1728 | int bucket = cb->args[0], skip = cb->args[1]; | 1762 | int bucket = cb->args[0], skip = cb->args[1]; |
1729 | int i, j = 0; | 1763 | int i, j = 0; |
1730 | 1764 | ||
1765 | rcu_read_lock(); | ||
1731 | dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); | 1766 | dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); |
1732 | if (!dp) | 1767 | if (!dp) { |
1768 | rcu_read_unlock(); | ||
1733 | return -ENODEV; | 1769 | return -ENODEV; |
1734 | 1770 | } | |
1735 | rcu_read_lock(); | ||
1736 | for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) { | 1771 | for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) { |
1737 | struct vport *vport; | 1772 | struct vport *vport; |
1738 | 1773 | ||
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h index 4067ea41be28..6be9fbb5e9cb 100644 --- a/net/openvswitch/datapath.h +++ b/net/openvswitch/datapath.h | |||
@@ -88,6 +88,8 @@ struct datapath { | |||
88 | /* Network namespace ref. */ | 88 | /* Network namespace ref. */ |
89 | struct net *net; | 89 | struct net *net; |
90 | #endif | 90 | #endif |
91 | |||
92 | u32 user_features; | ||
91 | }; | 93 | }; |
92 | 94 | ||
93 | /** | 95 | /** |
@@ -145,6 +147,8 @@ int lockdep_ovsl_is_held(void); | |||
145 | #define ASSERT_OVSL() WARN_ON(unlikely(!lockdep_ovsl_is_held())) | 147 | #define ASSERT_OVSL() WARN_ON(unlikely(!lockdep_ovsl_is_held())) |
146 | #define ovsl_dereference(p) \ | 148 | #define ovsl_dereference(p) \ |
147 | rcu_dereference_protected(p, lockdep_ovsl_is_held()) | 149 | rcu_dereference_protected(p, lockdep_ovsl_is_held()) |
150 | #define rcu_dereference_ovsl(p) \ | ||
151 | rcu_dereference_check(p, lockdep_ovsl_is_held()) | ||
148 | 152 | ||
149 | static inline struct net *ovs_dp_get_net(struct datapath *dp) | 153 | static inline struct net *ovs_dp_get_net(struct datapath *dp) |
150 | { | 154 | { |
@@ -178,14 +182,12 @@ static inline struct vport *ovs_vport_ovsl(const struct datapath *dp, int port_n | |||
178 | 182 | ||
179 | extern struct notifier_block ovs_dp_device_notifier; | 183 | extern struct notifier_block ovs_dp_device_notifier; |
180 | extern struct genl_family dp_vport_genl_family; | 184 | extern struct genl_family dp_vport_genl_family; |
181 | extern struct genl_multicast_group ovs_dp_vport_multicast_group; | ||
182 | 185 | ||
183 | void ovs_dp_process_received_packet(struct vport *, struct sk_buff *); | 186 | void ovs_dp_process_received_packet(struct vport *, struct sk_buff *); |
184 | void ovs_dp_detach_port(struct vport *); | 187 | void ovs_dp_detach_port(struct vport *); |
185 | int ovs_dp_upcall(struct datapath *, struct sk_buff *, | 188 | int ovs_dp_upcall(struct datapath *, struct sk_buff *, |
186 | const struct dp_upcall_info *); | 189 | const struct dp_upcall_info *); |
187 | 190 | ||
188 | const char *ovs_dp_name(const struct datapath *dp); | ||
189 | struct sk_buff *ovs_vport_cmd_build_info(struct vport *, u32 pid, u32 seq, | 191 | struct sk_buff *ovs_vport_cmd_build_info(struct vport *, u32 pid, u32 seq, |
190 | u8 cmd); | 192 | u8 cmd); |
191 | 193 | ||
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index b409f5279601..dda451f4429c 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/ip.h> | 35 | #include <linux/ip.h> |
36 | #include <linux/ipv6.h> | 36 | #include <linux/ipv6.h> |
37 | #include <linux/sctp.h> | 37 | #include <linux/sctp.h> |
38 | #include <linux/smp.h> | ||
38 | #include <linux/tcp.h> | 39 | #include <linux/tcp.h> |
39 | #include <linux/udp.h> | 40 | #include <linux/udp.h> |
40 | #include <linux/icmp.h> | 41 | #include <linux/icmp.h> |
@@ -60,23 +61,105 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies) | |||
60 | 61 | ||
61 | #define TCP_FLAGS_BE16(tp) (*(__be16 *)&tcp_flag_word(tp) & htons(0x0FFF)) | 62 | #define TCP_FLAGS_BE16(tp) (*(__be16 *)&tcp_flag_word(tp) & htons(0x0FFF)) |
62 | 63 | ||
63 | void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb) | 64 | void ovs_flow_stats_update(struct sw_flow *flow, struct sk_buff *skb) |
64 | { | 65 | { |
66 | struct flow_stats *stats; | ||
65 | __be16 tcp_flags = 0; | 67 | __be16 tcp_flags = 0; |
66 | 68 | ||
69 | if (!flow->stats.is_percpu) | ||
70 | stats = flow->stats.stat; | ||
71 | else | ||
72 | stats = this_cpu_ptr(flow->stats.cpu_stats); | ||
73 | |||
67 | if ((flow->key.eth.type == htons(ETH_P_IP) || | 74 | if ((flow->key.eth.type == htons(ETH_P_IP) || |
68 | flow->key.eth.type == htons(ETH_P_IPV6)) && | 75 | flow->key.eth.type == htons(ETH_P_IPV6)) && |
76 | flow->key.ip.frag != OVS_FRAG_TYPE_LATER && | ||
69 | flow->key.ip.proto == IPPROTO_TCP && | 77 | flow->key.ip.proto == IPPROTO_TCP && |
70 | likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) { | 78 | likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) { |
71 | tcp_flags = TCP_FLAGS_BE16(tcp_hdr(skb)); | 79 | tcp_flags = TCP_FLAGS_BE16(tcp_hdr(skb)); |
72 | } | 80 | } |
73 | 81 | ||
74 | spin_lock(&flow->lock); | 82 | spin_lock(&stats->lock); |
75 | flow->used = jiffies; | 83 | stats->used = jiffies; |
76 | flow->packet_count++; | 84 | stats->packet_count++; |
77 | flow->byte_count += skb->len; | 85 | stats->byte_count += skb->len; |
78 | flow->tcp_flags |= tcp_flags; | 86 | stats->tcp_flags |= tcp_flags; |
79 | spin_unlock(&flow->lock); | 87 | spin_unlock(&stats->lock); |
88 | } | ||
89 | |||
90 | static void stats_read(struct flow_stats *stats, | ||
91 | struct ovs_flow_stats *ovs_stats, | ||
92 | unsigned long *used, __be16 *tcp_flags) | ||
93 | { | ||
94 | spin_lock(&stats->lock); | ||
95 | if (!*used || time_after(stats->used, *used)) | ||
96 | *used = stats->used; | ||
97 | *tcp_flags |= stats->tcp_flags; | ||
98 | ovs_stats->n_packets += stats->packet_count; | ||
99 | ovs_stats->n_bytes += stats->byte_count; | ||
100 | spin_unlock(&stats->lock); | ||
101 | } | ||
102 | |||
103 | void ovs_flow_stats_get(struct sw_flow *flow, struct ovs_flow_stats *ovs_stats, | ||
104 | unsigned long *used, __be16 *tcp_flags) | ||
105 | { | ||
106 | int cpu, cur_cpu; | ||
107 | |||
108 | *used = 0; | ||
109 | *tcp_flags = 0; | ||
110 | memset(ovs_stats, 0, sizeof(*ovs_stats)); | ||
111 | |||
112 | if (!flow->stats.is_percpu) { | ||
113 | stats_read(flow->stats.stat, ovs_stats, used, tcp_flags); | ||
114 | } else { | ||
115 | cur_cpu = get_cpu(); | ||
116 | for_each_possible_cpu(cpu) { | ||
117 | struct flow_stats *stats; | ||
118 | |||
119 | if (cpu == cur_cpu) | ||
120 | local_bh_disable(); | ||
121 | |||
122 | stats = per_cpu_ptr(flow->stats.cpu_stats, cpu); | ||
123 | stats_read(stats, ovs_stats, used, tcp_flags); | ||
124 | |||
125 | if (cpu == cur_cpu) | ||
126 | local_bh_enable(); | ||
127 | } | ||
128 | put_cpu(); | ||
129 | } | ||
130 | } | ||
131 | |||
132 | static void stats_reset(struct flow_stats *stats) | ||
133 | { | ||
134 | spin_lock(&stats->lock); | ||
135 | stats->used = 0; | ||
136 | stats->packet_count = 0; | ||
137 | stats->byte_count = 0; | ||
138 | stats->tcp_flags = 0; | ||
139 | spin_unlock(&stats->lock); | ||
140 | } | ||
141 | |||
142 | void ovs_flow_stats_clear(struct sw_flow *flow) | ||
143 | { | ||
144 | int cpu, cur_cpu; | ||
145 | |||
146 | if (!flow->stats.is_percpu) { | ||
147 | stats_reset(flow->stats.stat); | ||
148 | } else { | ||
149 | cur_cpu = get_cpu(); | ||
150 | |||
151 | for_each_possible_cpu(cpu) { | ||
152 | |||
153 | if (cpu == cur_cpu) | ||
154 | local_bh_disable(); | ||
155 | |||
156 | stats_reset(per_cpu_ptr(flow->stats.cpu_stats, cpu)); | ||
157 | |||
158 | if (cpu == cur_cpu) | ||
159 | local_bh_enable(); | ||
160 | } | ||
161 | put_cpu(); | ||
162 | } | ||
80 | } | 163 | } |
81 | 164 | ||
82 | static int check_header(struct sk_buff *skb, int len) | 165 | static int check_header(struct sk_buff *skb, int len) |
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h index 1510f51dbf74..2d770e28a3a3 100644 --- a/net/openvswitch/flow.h +++ b/net/openvswitch/flow.h | |||
@@ -19,6 +19,7 @@ | |||
19 | #ifndef FLOW_H | 19 | #ifndef FLOW_H |
20 | #define FLOW_H 1 | 20 | #define FLOW_H 1 |
21 | 21 | ||
22 | #include <linux/cache.h> | ||
22 | #include <linux/kernel.h> | 23 | #include <linux/kernel.h> |
23 | #include <linux/netlink.h> | 24 | #include <linux/netlink.h> |
24 | #include <linux/openvswitch.h> | 25 | #include <linux/openvswitch.h> |
@@ -122,8 +123,8 @@ struct sw_flow_key { | |||
122 | } __aligned(BITS_PER_LONG/8); /* Ensure that we can do comparisons as longs. */ | 123 | } __aligned(BITS_PER_LONG/8); /* Ensure that we can do comparisons as longs. */ |
123 | 124 | ||
124 | struct sw_flow_key_range { | 125 | struct sw_flow_key_range { |
125 | size_t start; | 126 | unsigned short int start; |
126 | size_t end; | 127 | unsigned short int end; |
127 | }; | 128 | }; |
128 | 129 | ||
129 | struct sw_flow_mask { | 130 | struct sw_flow_mask { |
@@ -146,6 +147,22 @@ struct sw_flow_actions { | |||
146 | struct nlattr actions[]; | 147 | struct nlattr actions[]; |
147 | }; | 148 | }; |
148 | 149 | ||
150 | struct flow_stats { | ||
151 | u64 packet_count; /* Number of packets matched. */ | ||
152 | u64 byte_count; /* Number of bytes matched. */ | ||
153 | unsigned long used; /* Last used time (in jiffies). */ | ||
154 | spinlock_t lock; /* Lock for atomic stats update. */ | ||
155 | __be16 tcp_flags; /* Union of seen TCP flags. */ | ||
156 | }; | ||
157 | |||
158 | struct sw_flow_stats { | ||
159 | bool is_percpu; | ||
160 | union { | ||
161 | struct flow_stats *stat; | ||
162 | struct flow_stats __percpu *cpu_stats; | ||
163 | }; | ||
164 | }; | ||
165 | |||
149 | struct sw_flow { | 166 | struct sw_flow { |
150 | struct rcu_head rcu; | 167 | struct rcu_head rcu; |
151 | struct hlist_node hash_node[2]; | 168 | struct hlist_node hash_node[2]; |
@@ -155,12 +172,7 @@ struct sw_flow { | |||
155 | struct sw_flow_key unmasked_key; | 172 | struct sw_flow_key unmasked_key; |
156 | struct sw_flow_mask *mask; | 173 | struct sw_flow_mask *mask; |
157 | struct sw_flow_actions __rcu *sf_acts; | 174 | struct sw_flow_actions __rcu *sf_acts; |
158 | 175 | struct sw_flow_stats stats; | |
159 | spinlock_t lock; /* Lock for values below. */ | ||
160 | unsigned long used; /* Last used time (in jiffies). */ | ||
161 | u64 packet_count; /* Number of packets matched. */ | ||
162 | u64 byte_count; /* Number of bytes matched. */ | ||
163 | __be16 tcp_flags; /* Union of seen TCP flags. */ | ||
164 | }; | 176 | }; |
165 | 177 | ||
166 | struct arp_eth_header { | 178 | struct arp_eth_header { |
@@ -177,7 +189,10 @@ struct arp_eth_header { | |||
177 | unsigned char ar_tip[4]; /* target IP address */ | 189 | unsigned char ar_tip[4]; /* target IP address */ |
178 | } __packed; | 190 | } __packed; |
179 | 191 | ||
180 | void ovs_flow_used(struct sw_flow *, struct sk_buff *); | 192 | void ovs_flow_stats_update(struct sw_flow *flow, struct sk_buff *skb); |
193 | void ovs_flow_stats_get(struct sw_flow *flow, struct ovs_flow_stats *stats, | ||
194 | unsigned long *used, __be16 *tcp_flags); | ||
195 | void ovs_flow_stats_clear(struct sw_flow *flow); | ||
181 | u64 ovs_flow_used_time(unsigned long flow_jiffies); | 196 | u64 ovs_flow_used_time(unsigned long flow_jiffies); |
182 | 197 | ||
183 | int ovs_flow_extract(struct sk_buff *, u16 in_port, struct sw_flow_key *); | 198 | int ovs_flow_extract(struct sk_buff *, u16 in_port, struct sw_flow_key *); |
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index 2bc1bc1aca3b..4d000acaed0d 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c | |||
@@ -266,6 +266,20 @@ static bool is_all_zero(const u8 *fp, size_t size) | |||
266 | return true; | 266 | return true; |
267 | } | 267 | } |
268 | 268 | ||
269 | static bool is_all_set(const u8 *fp, size_t size) | ||
270 | { | ||
271 | int i; | ||
272 | |||
273 | if (!fp) | ||
274 | return false; | ||
275 | |||
276 | for (i = 0; i < size; i++) | ||
277 | if (fp[i] != 0xff) | ||
278 | return false; | ||
279 | |||
280 | return true; | ||
281 | } | ||
282 | |||
269 | static int __parse_flow_nlattrs(const struct nlattr *attr, | 283 | static int __parse_flow_nlattrs(const struct nlattr *attr, |
270 | const struct nlattr *a[], | 284 | const struct nlattr *a[], |
271 | u64 *attrsp, bool nz) | 285 | u64 *attrsp, bool nz) |
@@ -487,8 +501,9 @@ static int metadata_from_nlattrs(struct sw_flow_match *match, u64 *attrs, | |||
487 | return 0; | 501 | return 0; |
488 | } | 502 | } |
489 | 503 | ||
490 | static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, | 504 | static int ovs_key_from_nlattrs(struct sw_flow_match *match, bool *exact_5tuple, |
491 | const struct nlattr **a, bool is_mask) | 505 | u64 attrs, const struct nlattr **a, |
506 | bool is_mask) | ||
492 | { | 507 | { |
493 | int err; | 508 | int err; |
494 | u64 orig_attrs = attrs; | 509 | u64 orig_attrs = attrs; |
@@ -545,6 +560,11 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, | |||
545 | SW_FLOW_KEY_PUT(match, eth.type, htons(ETH_P_802_2), is_mask); | 560 | SW_FLOW_KEY_PUT(match, eth.type, htons(ETH_P_802_2), is_mask); |
546 | } | 561 | } |
547 | 562 | ||
563 | if (is_mask && exact_5tuple) { | ||
564 | if (match->mask->key.eth.type != htons(0xffff)) | ||
565 | *exact_5tuple = false; | ||
566 | } | ||
567 | |||
548 | if (attrs & (1 << OVS_KEY_ATTR_IPV4)) { | 568 | if (attrs & (1 << OVS_KEY_ATTR_IPV4)) { |
549 | const struct ovs_key_ipv4 *ipv4_key; | 569 | const struct ovs_key_ipv4 *ipv4_key; |
550 | 570 | ||
@@ -567,6 +587,13 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, | |||
567 | SW_FLOW_KEY_PUT(match, ipv4.addr.dst, | 587 | SW_FLOW_KEY_PUT(match, ipv4.addr.dst, |
568 | ipv4_key->ipv4_dst, is_mask); | 588 | ipv4_key->ipv4_dst, is_mask); |
569 | attrs &= ~(1 << OVS_KEY_ATTR_IPV4); | 589 | attrs &= ~(1 << OVS_KEY_ATTR_IPV4); |
590 | |||
591 | if (is_mask && exact_5tuple && *exact_5tuple) { | ||
592 | if (ipv4_key->ipv4_proto != 0xff || | ||
593 | ipv4_key->ipv4_src != htonl(0xffffffff) || | ||
594 | ipv4_key->ipv4_dst != htonl(0xffffffff)) | ||
595 | *exact_5tuple = false; | ||
596 | } | ||
570 | } | 597 | } |
571 | 598 | ||
572 | if (attrs & (1 << OVS_KEY_ATTR_IPV6)) { | 599 | if (attrs & (1 << OVS_KEY_ATTR_IPV6)) { |
@@ -598,6 +625,13 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, | |||
598 | is_mask); | 625 | is_mask); |
599 | 626 | ||
600 | attrs &= ~(1 << OVS_KEY_ATTR_IPV6); | 627 | attrs &= ~(1 << OVS_KEY_ATTR_IPV6); |
628 | |||
629 | if (is_mask && exact_5tuple && *exact_5tuple) { | ||
630 | if (ipv6_key->ipv6_proto != 0xff || | ||
631 | !is_all_set((u8 *)ipv6_key->ipv6_src, sizeof(match->key->ipv6.addr.src)) || | ||
632 | !is_all_set((u8 *)ipv6_key->ipv6_dst, sizeof(match->key->ipv6.addr.dst))) | ||
633 | *exact_5tuple = false; | ||
634 | } | ||
601 | } | 635 | } |
602 | 636 | ||
603 | if (attrs & (1 << OVS_KEY_ATTR_ARP)) { | 637 | if (attrs & (1 << OVS_KEY_ATTR_ARP)) { |
@@ -640,6 +674,11 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, | |||
640 | tcp_key->tcp_dst, is_mask); | 674 | tcp_key->tcp_dst, is_mask); |
641 | } | 675 | } |
642 | attrs &= ~(1 << OVS_KEY_ATTR_TCP); | 676 | attrs &= ~(1 << OVS_KEY_ATTR_TCP); |
677 | |||
678 | if (is_mask && exact_5tuple && *exact_5tuple && | ||
679 | (tcp_key->tcp_src != htons(0xffff) || | ||
680 | tcp_key->tcp_dst != htons(0xffff))) | ||
681 | *exact_5tuple = false; | ||
643 | } | 682 | } |
644 | 683 | ||
645 | if (attrs & (1 << OVS_KEY_ATTR_TCP_FLAGS)) { | 684 | if (attrs & (1 << OVS_KEY_ATTR_TCP_FLAGS)) { |
@@ -671,6 +710,11 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, | |||
671 | udp_key->udp_dst, is_mask); | 710 | udp_key->udp_dst, is_mask); |
672 | } | 711 | } |
673 | attrs &= ~(1 << OVS_KEY_ATTR_UDP); | 712 | attrs &= ~(1 << OVS_KEY_ATTR_UDP); |
713 | |||
714 | if (is_mask && exact_5tuple && *exact_5tuple && | ||
715 | (udp_key->udp_src != htons(0xffff) || | ||
716 | udp_key->udp_dst != htons(0xffff))) | ||
717 | *exact_5tuple = false; | ||
674 | } | 718 | } |
675 | 719 | ||
676 | if (attrs & (1 << OVS_KEY_ATTR_SCTP)) { | 720 | if (attrs & (1 << OVS_KEY_ATTR_SCTP)) { |
@@ -756,6 +800,7 @@ static void sw_flow_mask_set(struct sw_flow_mask *mask, | |||
756 | * attribute specifies the mask field of the wildcarded flow. | 800 | * attribute specifies the mask field of the wildcarded flow. |
757 | */ | 801 | */ |
758 | int ovs_nla_get_match(struct sw_flow_match *match, | 802 | int ovs_nla_get_match(struct sw_flow_match *match, |
803 | bool *exact_5tuple, | ||
759 | const struct nlattr *key, | 804 | const struct nlattr *key, |
760 | const struct nlattr *mask) | 805 | const struct nlattr *mask) |
761 | { | 806 | { |
@@ -803,10 +848,13 @@ int ovs_nla_get_match(struct sw_flow_match *match, | |||
803 | } | 848 | } |
804 | } | 849 | } |
805 | 850 | ||
806 | err = ovs_key_from_nlattrs(match, key_attrs, a, false); | 851 | err = ovs_key_from_nlattrs(match, NULL, key_attrs, a, false); |
807 | if (err) | 852 | if (err) |
808 | return err; | 853 | return err; |
809 | 854 | ||
855 | if (exact_5tuple) | ||
856 | *exact_5tuple = true; | ||
857 | |||
810 | if (mask) { | 858 | if (mask) { |
811 | err = parse_flow_mask_nlattrs(mask, a, &mask_attrs); | 859 | err = parse_flow_mask_nlattrs(mask, a, &mask_attrs); |
812 | if (err) | 860 | if (err) |
@@ -844,7 +892,7 @@ int ovs_nla_get_match(struct sw_flow_match *match, | |||
844 | } | 892 | } |
845 | } | 893 | } |
846 | 894 | ||
847 | err = ovs_key_from_nlattrs(match, mask_attrs, a, true); | 895 | err = ovs_key_from_nlattrs(match, exact_5tuple, mask_attrs, a, true); |
848 | if (err) | 896 | if (err) |
849 | return err; | 897 | return err; |
850 | } else { | 898 | } else { |
@@ -1128,19 +1176,11 @@ struct sw_flow_actions *ovs_nla_alloc_flow_actions(int size) | |||
1128 | return sfa; | 1176 | return sfa; |
1129 | } | 1177 | } |
1130 | 1178 | ||
1131 | /* RCU callback used by ovs_nla_free_flow_actions. */ | ||
1132 | static void rcu_free_acts_callback(struct rcu_head *rcu) | ||
1133 | { | ||
1134 | struct sw_flow_actions *sf_acts = container_of(rcu, | ||
1135 | struct sw_flow_actions, rcu); | ||
1136 | kfree(sf_acts); | ||
1137 | } | ||
1138 | |||
1139 | /* Schedules 'sf_acts' to be freed after the next RCU grace period. | 1179 | /* Schedules 'sf_acts' to be freed after the next RCU grace period. |
1140 | * The caller must hold rcu_read_lock for this to be sensible. */ | 1180 | * The caller must hold rcu_read_lock for this to be sensible. */ |
1141 | void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts) | 1181 | void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts) |
1142 | { | 1182 | { |
1143 | call_rcu(&sf_acts->rcu, rcu_free_acts_callback); | 1183 | kfree_rcu(sf_acts, rcu); |
1144 | } | 1184 | } |
1145 | 1185 | ||
1146 | static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa, | 1186 | static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa, |
diff --git a/net/openvswitch/flow_netlink.h b/net/openvswitch/flow_netlink.h index 440151045d39..b31fbe28bc7a 100644 --- a/net/openvswitch/flow_netlink.h +++ b/net/openvswitch/flow_netlink.h | |||
@@ -45,6 +45,7 @@ int ovs_nla_put_flow(const struct sw_flow_key *, | |||
45 | int ovs_nla_get_flow_metadata(struct sw_flow *flow, | 45 | int ovs_nla_get_flow_metadata(struct sw_flow *flow, |
46 | const struct nlattr *attr); | 46 | const struct nlattr *attr); |
47 | int ovs_nla_get_match(struct sw_flow_match *match, | 47 | int ovs_nla_get_match(struct sw_flow_match *match, |
48 | bool *exact_5tuple, | ||
48 | const struct nlattr *, | 49 | const struct nlattr *, |
49 | const struct nlattr *); | 50 | const struct nlattr *); |
50 | 51 | ||
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c index e42542706087..3c268b3d71c3 100644 --- a/net/openvswitch/flow_table.c +++ b/net/openvswitch/flow_table.c | |||
@@ -25,7 +25,7 @@ | |||
25 | #include <linux/if_vlan.h> | 25 | #include <linux/if_vlan.h> |
26 | #include <net/llc_pdu.h> | 26 | #include <net/llc_pdu.h> |
27 | #include <linux/kernel.h> | 27 | #include <linux/kernel.h> |
28 | #include <linux/jhash.h> | 28 | #include <linux/hash.h> |
29 | #include <linux/jiffies.h> | 29 | #include <linux/jiffies.h> |
30 | #include <linux/llc.h> | 30 | #include <linux/llc.h> |
31 | #include <linux/module.h> | 31 | #include <linux/module.h> |
@@ -44,8 +44,6 @@ | |||
44 | #include <net/ipv6.h> | 44 | #include <net/ipv6.h> |
45 | #include <net/ndisc.h> | 45 | #include <net/ndisc.h> |
46 | 46 | ||
47 | #include "datapath.h" | ||
48 | |||
49 | #define TBL_MIN_BUCKETS 1024 | 47 | #define TBL_MIN_BUCKETS 1024 |
50 | #define REHASH_INTERVAL (10 * 60 * HZ) | 48 | #define REHASH_INTERVAL (10 * 60 * HZ) |
51 | 49 | ||
@@ -72,19 +70,42 @@ void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src, | |||
72 | *d++ = *s++ & *m++; | 70 | *d++ = *s++ & *m++; |
73 | } | 71 | } |
74 | 72 | ||
75 | struct sw_flow *ovs_flow_alloc(void) | 73 | struct sw_flow *ovs_flow_alloc(bool percpu_stats) |
76 | { | 74 | { |
77 | struct sw_flow *flow; | 75 | struct sw_flow *flow; |
76 | int cpu; | ||
78 | 77 | ||
79 | flow = kmem_cache_alloc(flow_cache, GFP_KERNEL); | 78 | flow = kmem_cache_alloc(flow_cache, GFP_KERNEL); |
80 | if (!flow) | 79 | if (!flow) |
81 | return ERR_PTR(-ENOMEM); | 80 | return ERR_PTR(-ENOMEM); |
82 | 81 | ||
83 | spin_lock_init(&flow->lock); | ||
84 | flow->sf_acts = NULL; | 82 | flow->sf_acts = NULL; |
85 | flow->mask = NULL; | 83 | flow->mask = NULL; |
86 | 84 | ||
85 | flow->stats.is_percpu = percpu_stats; | ||
86 | |||
87 | if (!percpu_stats) { | ||
88 | flow->stats.stat = kzalloc(sizeof(*flow->stats.stat), GFP_KERNEL); | ||
89 | if (!flow->stats.stat) | ||
90 | goto err; | ||
91 | |||
92 | spin_lock_init(&flow->stats.stat->lock); | ||
93 | } else { | ||
94 | flow->stats.cpu_stats = alloc_percpu(struct flow_stats); | ||
95 | if (!flow->stats.cpu_stats) | ||
96 | goto err; | ||
97 | |||
98 | for_each_possible_cpu(cpu) { | ||
99 | struct flow_stats *cpu_stats; | ||
100 | |||
101 | cpu_stats = per_cpu_ptr(flow->stats.cpu_stats, cpu); | ||
102 | spin_lock_init(&cpu_stats->lock); | ||
103 | } | ||
104 | } | ||
87 | return flow; | 105 | return flow; |
106 | err: | ||
107 | kmem_cache_free(flow_cache, flow); | ||
108 | return ERR_PTR(-ENOMEM); | ||
88 | } | 109 | } |
89 | 110 | ||
90 | int ovs_flow_tbl_count(struct flow_table *table) | 111 | int ovs_flow_tbl_count(struct flow_table *table) |
@@ -118,6 +139,10 @@ static struct flex_array *alloc_buckets(unsigned int n_buckets) | |||
118 | static void flow_free(struct sw_flow *flow) | 139 | static void flow_free(struct sw_flow *flow) |
119 | { | 140 | { |
120 | kfree((struct sf_flow_acts __force *)flow->sf_acts); | 141 | kfree((struct sf_flow_acts __force *)flow->sf_acts); |
142 | if (flow->stats.is_percpu) | ||
143 | free_percpu(flow->stats.cpu_stats); | ||
144 | else | ||
145 | kfree(flow->stats.stat); | ||
121 | kmem_cache_free(flow_cache, flow); | 146 | kmem_cache_free(flow_cache, flow); |
122 | } | 147 | } |
123 | 148 | ||
@@ -128,36 +153,29 @@ static void rcu_free_flow_callback(struct rcu_head *rcu) | |||
128 | flow_free(flow); | 153 | flow_free(flow); |
129 | } | 154 | } |
130 | 155 | ||
131 | static void rcu_free_sw_flow_mask_cb(struct rcu_head *rcu) | ||
132 | { | ||
133 | struct sw_flow_mask *mask = container_of(rcu, struct sw_flow_mask, rcu); | ||
134 | |||
135 | kfree(mask); | ||
136 | } | ||
137 | |||
138 | static void flow_mask_del_ref(struct sw_flow_mask *mask, bool deferred) | ||
139 | { | ||
140 | if (!mask) | ||
141 | return; | ||
142 | |||
143 | BUG_ON(!mask->ref_count); | ||
144 | mask->ref_count--; | ||
145 | |||
146 | if (!mask->ref_count) { | ||
147 | list_del_rcu(&mask->list); | ||
148 | if (deferred) | ||
149 | call_rcu(&mask->rcu, rcu_free_sw_flow_mask_cb); | ||
150 | else | ||
151 | kfree(mask); | ||
152 | } | ||
153 | } | ||
154 | |||
155 | void ovs_flow_free(struct sw_flow *flow, bool deferred) | 156 | void ovs_flow_free(struct sw_flow *flow, bool deferred) |
156 | { | 157 | { |
157 | if (!flow) | 158 | if (!flow) |
158 | return; | 159 | return; |
159 | 160 | ||
160 | flow_mask_del_ref(flow->mask, deferred); | 161 | if (flow->mask) { |
162 | struct sw_flow_mask *mask = flow->mask; | ||
163 | |||
164 | /* ovs-lock is required to protect mask-refcount and | ||
165 | * mask list. | ||
166 | */ | ||
167 | ASSERT_OVSL(); | ||
168 | BUG_ON(!mask->ref_count); | ||
169 | mask->ref_count--; | ||
170 | |||
171 | if (!mask->ref_count) { | ||
172 | list_del_rcu(&mask->list); | ||
173 | if (deferred) | ||
174 | kfree_rcu(mask, rcu); | ||
175 | else | ||
176 | kfree(mask); | ||
177 | } | ||
178 | } | ||
161 | 179 | ||
162 | if (deferred) | 180 | if (deferred) |
163 | call_rcu(&flow->rcu, rcu_free_flow_callback); | 181 | call_rcu(&flow->rcu, rcu_free_flow_callback); |
@@ -170,26 +188,9 @@ static void free_buckets(struct flex_array *buckets) | |||
170 | flex_array_free(buckets); | 188 | flex_array_free(buckets); |
171 | } | 189 | } |
172 | 190 | ||
191 | |||
173 | static void __table_instance_destroy(struct table_instance *ti) | 192 | static void __table_instance_destroy(struct table_instance *ti) |
174 | { | 193 | { |
175 | int i; | ||
176 | |||
177 | if (ti->keep_flows) | ||
178 | goto skip_flows; | ||
179 | |||
180 | for (i = 0; i < ti->n_buckets; i++) { | ||
181 | struct sw_flow *flow; | ||
182 | struct hlist_head *head = flex_array_get(ti->buckets, i); | ||
183 | struct hlist_node *n; | ||
184 | int ver = ti->node_ver; | ||
185 | |||
186 | hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) { | ||
187 | hlist_del(&flow->hash_node[ver]); | ||
188 | ovs_flow_free(flow, false); | ||
189 | } | ||
190 | } | ||
191 | |||
192 | skip_flows: | ||
193 | free_buckets(ti->buckets); | 194 | free_buckets(ti->buckets); |
194 | kfree(ti); | 195 | kfree(ti); |
195 | } | 196 | } |
@@ -240,20 +241,38 @@ static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu) | |||
240 | 241 | ||
241 | static void table_instance_destroy(struct table_instance *ti, bool deferred) | 242 | static void table_instance_destroy(struct table_instance *ti, bool deferred) |
242 | { | 243 | { |
244 | int i; | ||
245 | |||
243 | if (!ti) | 246 | if (!ti) |
244 | return; | 247 | return; |
245 | 248 | ||
249 | if (ti->keep_flows) | ||
250 | goto skip_flows; | ||
251 | |||
252 | for (i = 0; i < ti->n_buckets; i++) { | ||
253 | struct sw_flow *flow; | ||
254 | struct hlist_head *head = flex_array_get(ti->buckets, i); | ||
255 | struct hlist_node *n; | ||
256 | int ver = ti->node_ver; | ||
257 | |||
258 | hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) { | ||
259 | hlist_del_rcu(&flow->hash_node[ver]); | ||
260 | ovs_flow_free(flow, deferred); | ||
261 | } | ||
262 | } | ||
263 | |||
264 | skip_flows: | ||
246 | if (deferred) | 265 | if (deferred) |
247 | call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb); | 266 | call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb); |
248 | else | 267 | else |
249 | __table_instance_destroy(ti); | 268 | __table_instance_destroy(ti); |
250 | } | 269 | } |
251 | 270 | ||
252 | void ovs_flow_tbl_destroy(struct flow_table *table) | 271 | void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred) |
253 | { | 272 | { |
254 | struct table_instance *ti = ovsl_dereference(table->ti); | 273 | struct table_instance *ti = ovsl_dereference(table->ti); |
255 | 274 | ||
256 | table_instance_destroy(ti, false); | 275 | table_instance_destroy(ti, deferred); |
257 | } | 276 | } |
258 | 277 | ||
259 | struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti, | 278 | struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti, |
@@ -362,7 +381,7 @@ static u32 flow_hash(const struct sw_flow_key *key, int key_start, | |||
362 | /* Make sure number of hash bytes are multiple of u32. */ | 381 | /* Make sure number of hash bytes are multiple of u32. */ |
363 | BUILD_BUG_ON(sizeof(long) % sizeof(u32)); | 382 | BUILD_BUG_ON(sizeof(long) % sizeof(u32)); |
364 | 383 | ||
365 | return jhash2(hash_key, hash_u32s, 0); | 384 | return arch_fast_hash2(hash_key, hash_u32s, 0); |
366 | } | 385 | } |
367 | 386 | ||
368 | static int flow_key_start(const struct sw_flow_key *key) | 387 | static int flow_key_start(const struct sw_flow_key *key) |
@@ -429,11 +448,11 @@ static struct sw_flow *masked_flow_lookup(struct table_instance *ti, | |||
429 | return NULL; | 448 | return NULL; |
430 | } | 449 | } |
431 | 450 | ||
432 | struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl, | 451 | struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl, |
433 | const struct sw_flow_key *key, | 452 | const struct sw_flow_key *key, |
434 | u32 *n_mask_hit) | 453 | u32 *n_mask_hit) |
435 | { | 454 | { |
436 | struct table_instance *ti = rcu_dereference(tbl->ti); | 455 | struct table_instance *ti = rcu_dereference_ovsl(tbl->ti); |
437 | struct sw_flow_mask *mask; | 456 | struct sw_flow_mask *mask; |
438 | struct sw_flow *flow; | 457 | struct sw_flow *flow; |
439 | 458 | ||
@@ -447,6 +466,14 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl, | |||
447 | return NULL; | 466 | return NULL; |
448 | } | 467 | } |
449 | 468 | ||
469 | struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl, | ||
470 | const struct sw_flow_key *key) | ||
471 | { | ||
472 | u32 __always_unused n_mask_hit; | ||
473 | |||
474 | return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit); | ||
475 | } | ||
476 | |||
450 | int ovs_flow_tbl_num_masks(const struct flow_table *table) | 477 | int ovs_flow_tbl_num_masks(const struct flow_table *table) |
451 | { | 478 | { |
452 | struct sw_flow_mask *mask; | 479 | struct sw_flow_mask *mask; |
@@ -478,16 +505,11 @@ static struct sw_flow_mask *mask_alloc(void) | |||
478 | 505 | ||
479 | mask = kmalloc(sizeof(*mask), GFP_KERNEL); | 506 | mask = kmalloc(sizeof(*mask), GFP_KERNEL); |
480 | if (mask) | 507 | if (mask) |
481 | mask->ref_count = 0; | 508 | mask->ref_count = 1; |
482 | 509 | ||
483 | return mask; | 510 | return mask; |
484 | } | 511 | } |
485 | 512 | ||
486 | static void mask_add_ref(struct sw_flow_mask *mask) | ||
487 | { | ||
488 | mask->ref_count++; | ||
489 | } | ||
490 | |||
491 | static bool mask_equal(const struct sw_flow_mask *a, | 513 | static bool mask_equal(const struct sw_flow_mask *a, |
492 | const struct sw_flow_mask *b) | 514 | const struct sw_flow_mask *b) |
493 | { | 515 | { |
@@ -514,11 +536,7 @@ static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl, | |||
514 | return NULL; | 536 | return NULL; |
515 | } | 537 | } |
516 | 538 | ||
517 | /** | 539 | /* Add 'mask' into the mask list, if it is not already there. */ |
518 | * add a new mask into the mask list. | ||
519 | * The caller needs to make sure that 'mask' is not the same | ||
520 | * as any masks that are already on the list. | ||
521 | */ | ||
522 | static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow, | 540 | static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow, |
523 | struct sw_flow_mask *new) | 541 | struct sw_flow_mask *new) |
524 | { | 542 | { |
@@ -532,9 +550,11 @@ static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow, | |||
532 | mask->key = new->key; | 550 | mask->key = new->key; |
533 | mask->range = new->range; | 551 | mask->range = new->range; |
534 | list_add_rcu(&mask->list, &tbl->mask_list); | 552 | list_add_rcu(&mask->list, &tbl->mask_list); |
553 | } else { | ||
554 | BUG_ON(!mask->ref_count); | ||
555 | mask->ref_count++; | ||
535 | } | 556 | } |
536 | 557 | ||
537 | mask_add_ref(mask); | ||
538 | flow->mask = mask; | 558 | flow->mask = mask; |
539 | return 0; | 559 | return 0; |
540 | } | 560 | } |
diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h index fbe45d5ad07d..baaeb101924d 100644 --- a/net/openvswitch/flow_table.h +++ b/net/openvswitch/flow_table.h | |||
@@ -55,12 +55,12 @@ struct flow_table { | |||
55 | int ovs_flow_init(void); | 55 | int ovs_flow_init(void); |
56 | void ovs_flow_exit(void); | 56 | void ovs_flow_exit(void); |
57 | 57 | ||
58 | struct sw_flow *ovs_flow_alloc(void); | 58 | struct sw_flow *ovs_flow_alloc(bool percpu_stats); |
59 | void ovs_flow_free(struct sw_flow *, bool deferred); | 59 | void ovs_flow_free(struct sw_flow *, bool deferred); |
60 | 60 | ||
61 | int ovs_flow_tbl_init(struct flow_table *); | 61 | int ovs_flow_tbl_init(struct flow_table *); |
62 | int ovs_flow_tbl_count(struct flow_table *table); | 62 | int ovs_flow_tbl_count(struct flow_table *table); |
63 | void ovs_flow_tbl_destroy(struct flow_table *table); | 63 | void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred); |
64 | int ovs_flow_tbl_flush(struct flow_table *flow_table); | 64 | int ovs_flow_tbl_flush(struct flow_table *flow_table); |
65 | 65 | ||
66 | int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow, | 66 | int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow, |
@@ -69,9 +69,11 @@ void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow); | |||
69 | int ovs_flow_tbl_num_masks(const struct flow_table *table); | 69 | int ovs_flow_tbl_num_masks(const struct flow_table *table); |
70 | struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *table, | 70 | struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *table, |
71 | u32 *bucket, u32 *idx); | 71 | u32 *bucket, u32 *idx); |
72 | struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *, | 72 | struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *, |
73 | const struct sw_flow_key *, | 73 | const struct sw_flow_key *, |
74 | u32 *n_mask_hit); | 74 | u32 *n_mask_hit); |
75 | struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *, | ||
76 | const struct sw_flow_key *); | ||
75 | 77 | ||
76 | bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow, | 78 | bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow, |
77 | struct sw_flow_match *match); | 79 | struct sw_flow_match *match); |
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c index d830a95f03a4..208dd9a26dd1 100644 --- a/net/openvswitch/vport.c +++ b/net/openvswitch/vport.c | |||
@@ -33,6 +33,9 @@ | |||
33 | #include "vport.h" | 33 | #include "vport.h" |
34 | #include "vport-internal_dev.h" | 34 | #include "vport-internal_dev.h" |
35 | 35 | ||
36 | static void ovs_vport_record_error(struct vport *, | ||
37 | enum vport_err_type err_type); | ||
38 | |||
36 | /* List of statically compiled vport implementations. Don't forget to also | 39 | /* List of statically compiled vport implementations. Don't forget to also |
37 | * add yours to the list at the bottom of vport.h. */ | 40 | * add yours to the list at the bottom of vport.h. */ |
38 | static const struct vport_ops *vport_ops_list[] = { | 41 | static const struct vport_ops *vport_ops_list[] = { |
@@ -136,14 +139,14 @@ struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops, | |||
136 | vport->ops = ops; | 139 | vport->ops = ops; |
137 | INIT_HLIST_NODE(&vport->dp_hash_node); | 140 | INIT_HLIST_NODE(&vport->dp_hash_node); |
138 | 141 | ||
139 | vport->percpu_stats = alloc_percpu(struct pcpu_tstats); | 142 | vport->percpu_stats = alloc_percpu(struct pcpu_sw_netstats); |
140 | if (!vport->percpu_stats) { | 143 | if (!vport->percpu_stats) { |
141 | kfree(vport); | 144 | kfree(vport); |
142 | return ERR_PTR(-ENOMEM); | 145 | return ERR_PTR(-ENOMEM); |
143 | } | 146 | } |
144 | 147 | ||
145 | for_each_possible_cpu(i) { | 148 | for_each_possible_cpu(i) { |
146 | struct pcpu_tstats *vport_stats; | 149 | struct pcpu_sw_netstats *vport_stats; |
147 | vport_stats = per_cpu_ptr(vport->percpu_stats, i); | 150 | vport_stats = per_cpu_ptr(vport->percpu_stats, i); |
148 | u64_stats_init(&vport_stats->syncp); | 151 | u64_stats_init(&vport_stats->syncp); |
149 | } | 152 | } |
@@ -275,8 +278,8 @@ void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats) | |||
275 | spin_unlock_bh(&vport->stats_lock); | 278 | spin_unlock_bh(&vport->stats_lock); |
276 | 279 | ||
277 | for_each_possible_cpu(i) { | 280 | for_each_possible_cpu(i) { |
278 | const struct pcpu_tstats *percpu_stats; | 281 | const struct pcpu_sw_netstats *percpu_stats; |
279 | struct pcpu_tstats local_stats; | 282 | struct pcpu_sw_netstats local_stats; |
280 | unsigned int start; | 283 | unsigned int start; |
281 | 284 | ||
282 | percpu_stats = per_cpu_ptr(vport->percpu_stats, i); | 285 | percpu_stats = per_cpu_ptr(vport->percpu_stats, i); |
@@ -344,7 +347,7 @@ int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb) | |||
344 | void ovs_vport_receive(struct vport *vport, struct sk_buff *skb, | 347 | void ovs_vport_receive(struct vport *vport, struct sk_buff *skb, |
345 | struct ovs_key_ipv4_tunnel *tun_key) | 348 | struct ovs_key_ipv4_tunnel *tun_key) |
346 | { | 349 | { |
347 | struct pcpu_tstats *stats; | 350 | struct pcpu_sw_netstats *stats; |
348 | 351 | ||
349 | stats = this_cpu_ptr(vport->percpu_stats); | 352 | stats = this_cpu_ptr(vport->percpu_stats); |
350 | u64_stats_update_begin(&stats->syncp); | 353 | u64_stats_update_begin(&stats->syncp); |
@@ -370,7 +373,7 @@ int ovs_vport_send(struct vport *vport, struct sk_buff *skb) | |||
370 | int sent = vport->ops->send(vport, skb); | 373 | int sent = vport->ops->send(vport, skb); |
371 | 374 | ||
372 | if (likely(sent > 0)) { | 375 | if (likely(sent > 0)) { |
373 | struct pcpu_tstats *stats; | 376 | struct pcpu_sw_netstats *stats; |
374 | 377 | ||
375 | stats = this_cpu_ptr(vport->percpu_stats); | 378 | stats = this_cpu_ptr(vport->percpu_stats); |
376 | 379 | ||
@@ -396,7 +399,8 @@ int ovs_vport_send(struct vport *vport, struct sk_buff *skb) | |||
396 | * If using the vport generic stats layer indicate that an error of the given | 399 | * If using the vport generic stats layer indicate that an error of the given |
397 | * type has occurred. | 400 | * type has occurred. |
398 | */ | 401 | */ |
399 | void ovs_vport_record_error(struct vport *vport, enum vport_err_type err_type) | 402 | static void ovs_vport_record_error(struct vport *vport, |
403 | enum vport_err_type err_type) | ||
400 | { | 404 | { |
401 | spin_lock(&vport->stats_lock); | 405 | spin_lock(&vport->stats_lock); |
402 | 406 | ||
diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h index 1a9fbcec6e1b..d7e50a17396c 100644 --- a/net/openvswitch/vport.h +++ b/net/openvswitch/vport.h | |||
@@ -87,7 +87,7 @@ struct vport { | |||
87 | struct hlist_node dp_hash_node; | 87 | struct hlist_node dp_hash_node; |
88 | const struct vport_ops *ops; | 88 | const struct vport_ops *ops; |
89 | 89 | ||
90 | struct pcpu_tstats __percpu *percpu_stats; | 90 | struct pcpu_sw_netstats __percpu *percpu_stats; |
91 | 91 | ||
92 | spinlock_t stats_lock; | 92 | spinlock_t stats_lock; |
93 | struct vport_err_stats err_stats; | 93 | struct vport_err_stats err_stats; |
@@ -192,7 +192,6 @@ static inline struct vport *vport_from_priv(const void *priv) | |||
192 | 192 | ||
193 | void ovs_vport_receive(struct vport *, struct sk_buff *, | 193 | void ovs_vport_receive(struct vport *, struct sk_buff *, |
194 | struct ovs_key_ipv4_tunnel *); | 194 | struct ovs_key_ipv4_tunnel *); |
195 | void ovs_vport_record_error(struct vport *, enum vport_err_type err_type); | ||
196 | 195 | ||
197 | /* List of statically compiled vport implementations. Don't forget to also | 196 | /* List of statically compiled vport implementations. Don't forget to also |
198 | * add yours to the list at the top of vport.c. */ | 197 | * add yours to the list at the top of vport.c. */ |