aboutsummaryrefslogtreecommitdiffstats
path: root/net/openvswitch/flow.c
diff options
context:
space:
mode:
authorJarno Rajahalme <jrajahalme@nicira.com>2014-05-05 12:54:49 -0400
committerPravin B Shelar <pshelar@nicira.com>2014-05-22 19:27:34 -0400
commit1139e241ec436b9e9610c7a33ac5c6657f87fda1 (patch)
tree55695454aef14f07009148a6919b52c04308152a /net/openvswitch/flow.c
parent091b64868b43ed84334c6623ea6a08497529d4ff (diff)
openvswitch: Compact sw_flow_key.
Minimize padding in sw_flow_key and move 'tp' top the main struct. These changes simplify code when accessing the transport port numbers and the tcp flags, and makes the sw_flow_key 8 bytes smaller on 64-bit systems (128->120 bytes). These changes also make the keys for IPv4 packets to fit in one cache line. There is a valid concern for safety of packing the struct ovs_key_ipv4_tunnel, as it would be possible to take the address of the tun_id member as a __be64 * which could result in unaligned access in some systems. However: - sw_flow_key itself is 64-bit aligned, so the tun_id within is always 64-bit aligned. - We never make arrays of ovs_key_ipv4_tunnel (which would force every second tun_key to be misaligned). - We never take the address of the tun_id in to a __be64 *. - Whereever we use struct ovs_key_ipv4_tunnel outside the sw_flow_key, it is in stack (on tunnel input functions), where compiler has full control of the alignment. Signed-off-by: Jarno Rajahalme <jrajahalme@nicira.com> Signed-off-by: Pravin B Shelar <pshelar@nicira.com>
Diffstat (limited to 'net/openvswitch/flow.c')
-rw-r--r--net/openvswitch/flow.c44
1 files changed, 19 insertions, 25 deletions
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index e0fc12bbeeb1..6d8d2da0a8ec 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -64,17 +64,11 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies)
64void ovs_flow_stats_update(struct sw_flow *flow, struct sk_buff *skb) 64void ovs_flow_stats_update(struct sw_flow *flow, struct sk_buff *skb)
65{ 65{
66 struct flow_stats *stats; 66 struct flow_stats *stats;
67 __be16 tcp_flags = 0; 67 __be16 tcp_flags = flow->key.tp.flags;
68 int node = numa_node_id(); 68 int node = numa_node_id();
69 69
70 stats = rcu_dereference(flow->stats[node]); 70 stats = rcu_dereference(flow->stats[node]);
71 71
72 if (likely(flow->key.ip.proto == IPPROTO_TCP)) {
73 if (likely(flow->key.eth.type == htons(ETH_P_IP)))
74 tcp_flags = flow->key.ipv4.tp.flags;
75 else if (likely(flow->key.eth.type == htons(ETH_P_IPV6)))
76 tcp_flags = flow->key.ipv6.tp.flags;
77 }
78 /* Check if already have node-specific stats. */ 72 /* Check if already have node-specific stats. */
79 if (likely(stats)) { 73 if (likely(stats)) {
80 spin_lock(&stats->lock); 74 spin_lock(&stats->lock);
@@ -357,8 +351,8 @@ static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
357 /* The ICMPv6 type and code fields use the 16-bit transport port 351 /* The ICMPv6 type and code fields use the 16-bit transport port
358 * fields, so we need to store them in 16-bit network byte order. 352 * fields, so we need to store them in 16-bit network byte order.
359 */ 353 */
360 key->ipv6.tp.src = htons(icmp->icmp6_type); 354 key->tp.src = htons(icmp->icmp6_type);
361 key->ipv6.tp.dst = htons(icmp->icmp6_code); 355 key->tp.dst = htons(icmp->icmp6_code);
362 356
363 if (icmp->icmp6_code == 0 && 357 if (icmp->icmp6_code == 0 &&
364 (icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION || 358 (icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION ||
@@ -520,21 +514,21 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key)
520 if (key->ip.proto == IPPROTO_TCP) { 514 if (key->ip.proto == IPPROTO_TCP) {
521 if (tcphdr_ok(skb)) { 515 if (tcphdr_ok(skb)) {
522 struct tcphdr *tcp = tcp_hdr(skb); 516 struct tcphdr *tcp = tcp_hdr(skb);
523 key->ipv4.tp.src = tcp->source; 517 key->tp.src = tcp->source;
524 key->ipv4.tp.dst = tcp->dest; 518 key->tp.dst = tcp->dest;
525 key->ipv4.tp.flags = TCP_FLAGS_BE16(tcp); 519 key->tp.flags = TCP_FLAGS_BE16(tcp);
526 } 520 }
527 } else if (key->ip.proto == IPPROTO_UDP) { 521 } else if (key->ip.proto == IPPROTO_UDP) {
528 if (udphdr_ok(skb)) { 522 if (udphdr_ok(skb)) {
529 struct udphdr *udp = udp_hdr(skb); 523 struct udphdr *udp = udp_hdr(skb);
530 key->ipv4.tp.src = udp->source; 524 key->tp.src = udp->source;
531 key->ipv4.tp.dst = udp->dest; 525 key->tp.dst = udp->dest;
532 } 526 }
533 } else if (key->ip.proto == IPPROTO_SCTP) { 527 } else if (key->ip.proto == IPPROTO_SCTP) {
534 if (sctphdr_ok(skb)) { 528 if (sctphdr_ok(skb)) {
535 struct sctphdr *sctp = sctp_hdr(skb); 529 struct sctphdr *sctp = sctp_hdr(skb);
536 key->ipv4.tp.src = sctp->source; 530 key->tp.src = sctp->source;
537 key->ipv4.tp.dst = sctp->dest; 531 key->tp.dst = sctp->dest;
538 } 532 }
539 } else if (key->ip.proto == IPPROTO_ICMP) { 533 } else if (key->ip.proto == IPPROTO_ICMP) {
540 if (icmphdr_ok(skb)) { 534 if (icmphdr_ok(skb)) {
@@ -542,8 +536,8 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key)
542 /* The ICMP type and code fields use the 16-bit 536 /* The ICMP type and code fields use the 16-bit
543 * transport port fields, so we need to store 537 * transport port fields, so we need to store
544 * them in 16-bit network byte order. */ 538 * them in 16-bit network byte order. */
545 key->ipv4.tp.src = htons(icmp->type); 539 key->tp.src = htons(icmp->type);
546 key->ipv4.tp.dst = htons(icmp->code); 540 key->tp.dst = htons(icmp->code);
547 } 541 }
548 } 542 }
549 543
@@ -589,21 +583,21 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key)
589 if (key->ip.proto == NEXTHDR_TCP) { 583 if (key->ip.proto == NEXTHDR_TCP) {
590 if (tcphdr_ok(skb)) { 584 if (tcphdr_ok(skb)) {
591 struct tcphdr *tcp = tcp_hdr(skb); 585 struct tcphdr *tcp = tcp_hdr(skb);
592 key->ipv6.tp.src = tcp->source; 586 key->tp.src = tcp->source;
593 key->ipv6.tp.dst = tcp->dest; 587 key->tp.dst = tcp->dest;
594 key->ipv6.tp.flags = TCP_FLAGS_BE16(tcp); 588 key->tp.flags = TCP_FLAGS_BE16(tcp);
595 } 589 }
596 } else if (key->ip.proto == NEXTHDR_UDP) { 590 } else if (key->ip.proto == NEXTHDR_UDP) {
597 if (udphdr_ok(skb)) { 591 if (udphdr_ok(skb)) {
598 struct udphdr *udp = udp_hdr(skb); 592 struct udphdr *udp = udp_hdr(skb);
599 key->ipv6.tp.src = udp->source; 593 key->tp.src = udp->source;
600 key->ipv6.tp.dst = udp->dest; 594 key->tp.dst = udp->dest;
601 } 595 }
602 } else if (key->ip.proto == NEXTHDR_SCTP) { 596 } else if (key->ip.proto == NEXTHDR_SCTP) {
603 if (sctphdr_ok(skb)) { 597 if (sctphdr_ok(skb)) {
604 struct sctphdr *sctp = sctp_hdr(skb); 598 struct sctphdr *sctp = sctp_hdr(skb);
605 key->ipv6.tp.src = sctp->source; 599 key->tp.src = sctp->source;
606 key->ipv6.tp.dst = sctp->dest; 600 key->tp.dst = sctp->dest;
607 } 601 }
608 } else if (key->ip.proto == NEXTHDR_ICMP) { 602 } else if (key->ip.proto == NEXTHDR_ICMP) {
609 if (icmp6hdr_ok(skb)) { 603 if (icmp6hdr_ok(skb)) {