aboutsummaryrefslogtreecommitdiffstats
path: root/net/openvswitch/flow.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/openvswitch/flow.c')
-rw-r--r--net/openvswitch/flow.c168
1 files changed, 97 insertions, 71 deletions
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index dca3b1e2acf0..9d81d2c7bf82 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -59,7 +59,7 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies)
59void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags, 59void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
60 const struct sk_buff *skb) 60 const struct sk_buff *skb)
61{ 61{
62 struct flow_stats *stats; 62 struct sw_flow_stats *stats;
63 unsigned int cpu = smp_processor_id(); 63 unsigned int cpu = smp_processor_id();
64 int len = skb->len + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0); 64 int len = skb->len + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
65 65
@@ -87,7 +87,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
87 if (likely(flow->stats_last_writer != -1) && 87 if (likely(flow->stats_last_writer != -1) &&
88 likely(!rcu_access_pointer(flow->stats[cpu]))) { 88 likely(!rcu_access_pointer(flow->stats[cpu]))) {
89 /* Try to allocate CPU-specific stats. */ 89 /* Try to allocate CPU-specific stats. */
90 struct flow_stats *new_stats; 90 struct sw_flow_stats *new_stats;
91 91
92 new_stats = 92 new_stats =
93 kmem_cache_alloc_node(flow_stats_cache, 93 kmem_cache_alloc_node(flow_stats_cache,
@@ -134,7 +134,7 @@ void ovs_flow_stats_get(const struct sw_flow *flow,
134 134
135 /* We open code this to make sure cpu 0 is always considered */ 135 /* We open code this to make sure cpu 0 is always considered */
136 for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) { 136 for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
137 struct flow_stats *stats = rcu_dereference_ovsl(flow->stats[cpu]); 137 struct sw_flow_stats *stats = rcu_dereference_ovsl(flow->stats[cpu]);
138 138
139 if (stats) { 139 if (stats) {
140 /* Local CPU may write on non-local stats, so we must 140 /* Local CPU may write on non-local stats, so we must
@@ -158,7 +158,7 @@ void ovs_flow_stats_clear(struct sw_flow *flow)
158 158
159 /* We open code this to make sure cpu 0 is always considered */ 159 /* We open code this to make sure cpu 0 is always considered */
160 for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) { 160 for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
161 struct flow_stats *stats = ovsl_dereference(flow->stats[cpu]); 161 struct sw_flow_stats *stats = ovsl_dereference(flow->stats[cpu]);
162 162
163 if (stats) { 163 if (stats) {
164 spin_lock_bh(&stats->lock); 164 spin_lock_bh(&stats->lock);
@@ -523,78 +523,15 @@ static int parse_nsh(struct sk_buff *skb, struct sw_flow_key *key)
523} 523}
524 524
525/** 525/**
526 * key_extract - extracts a flow key from an Ethernet frame. 526 * key_extract_l3l4 - extracts L3/L4 header information.
527 * @skb: sk_buff that contains the frame, with skb->data pointing to the 527 * @skb: sk_buff that contains the frame, with skb->data pointing to the
528 * Ethernet header 528 * L3 header
529 * @key: output flow key 529 * @key: output flow key
530 * 530 *
531 * The caller must ensure that skb->len >= ETH_HLEN.
532 *
533 * Returns 0 if successful, otherwise a negative errno value.
534 *
535 * Initializes @skb header fields as follows:
536 *
537 * - skb->mac_header: the L2 header.
538 *
539 * - skb->network_header: just past the L2 header, or just past the
540 * VLAN header, to the first byte of the L2 payload.
541 *
542 * - skb->transport_header: If key->eth.type is ETH_P_IP or ETH_P_IPV6
543 * on output, then just past the IP header, if one is present and
544 * of a correct length, otherwise the same as skb->network_header.
545 * For other key->eth.type values it is left untouched.
546 *
547 * - skb->protocol: the type of the data starting at skb->network_header.
548 * Equals to key->eth.type.
549 */ 531 */
550static int key_extract(struct sk_buff *skb, struct sw_flow_key *key) 532static int key_extract_l3l4(struct sk_buff *skb, struct sw_flow_key *key)
551{ 533{
552 int error; 534 int error;
553 struct ethhdr *eth;
554
555 /* Flags are always used as part of stats */
556 key->tp.flags = 0;
557
558 skb_reset_mac_header(skb);
559
560 /* Link layer. */
561 clear_vlan(key);
562 if (ovs_key_mac_proto(key) == MAC_PROTO_NONE) {
563 if (unlikely(eth_type_vlan(skb->protocol)))
564 return -EINVAL;
565
566 skb_reset_network_header(skb);
567 key->eth.type = skb->protocol;
568 } else {
569 eth = eth_hdr(skb);
570 ether_addr_copy(key->eth.src, eth->h_source);
571 ether_addr_copy(key->eth.dst, eth->h_dest);
572
573 __skb_pull(skb, 2 * ETH_ALEN);
574 /* We are going to push all headers that we pull, so no need to
575 * update skb->csum here.
576 */
577
578 if (unlikely(parse_vlan(skb, key)))
579 return -ENOMEM;
580
581 key->eth.type = parse_ethertype(skb);
582 if (unlikely(key->eth.type == htons(0)))
583 return -ENOMEM;
584
585 /* Multiple tagged packets need to retain TPID to satisfy
586 * skb_vlan_pop(), which will later shift the ethertype into
587 * skb->protocol.
588 */
589 if (key->eth.cvlan.tci & htons(VLAN_CFI_MASK))
590 skb->protocol = key->eth.cvlan.tpid;
591 else
592 skb->protocol = key->eth.type;
593
594 skb_reset_network_header(skb);
595 __skb_push(skb, skb->data - skb_mac_header(skb));
596 }
597 skb_reset_mac_len(skb);
598 535
599 /* Network layer. */ 536 /* Network layer. */
600 if (key->eth.type == htons(ETH_P_IP)) { 537 if (key->eth.type == htons(ETH_P_IP)) {
@@ -623,6 +560,7 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
623 offset = nh->frag_off & htons(IP_OFFSET); 560 offset = nh->frag_off & htons(IP_OFFSET);
624 if (offset) { 561 if (offset) {
625 key->ip.frag = OVS_FRAG_TYPE_LATER; 562 key->ip.frag = OVS_FRAG_TYPE_LATER;
563 memset(&key->tp, 0, sizeof(key->tp));
626 return 0; 564 return 0;
627 } 565 }
628 if (nh->frag_off & htons(IP_MF) || 566 if (nh->frag_off & htons(IP_MF) ||
@@ -740,8 +678,10 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
740 return error; 678 return error;
741 } 679 }
742 680
743 if (key->ip.frag == OVS_FRAG_TYPE_LATER) 681 if (key->ip.frag == OVS_FRAG_TYPE_LATER) {
682 memset(&key->tp, 0, sizeof(key->tp));
744 return 0; 683 return 0;
684 }
745 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) 685 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
746 key->ip.frag = OVS_FRAG_TYPE_FIRST; 686 key->ip.frag = OVS_FRAG_TYPE_FIRST;
747 687
@@ -788,6 +728,92 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
788 return 0; 728 return 0;
789} 729}
790 730
731/**
732 * key_extract - extracts a flow key from an Ethernet frame.
733 * @skb: sk_buff that contains the frame, with skb->data pointing to the
734 * Ethernet header
735 * @key: output flow key
736 *
737 * The caller must ensure that skb->len >= ETH_HLEN.
738 *
739 * Returns 0 if successful, otherwise a negative errno value.
740 *
741 * Initializes @skb header fields as follows:
742 *
743 * - skb->mac_header: the L2 header.
744 *
745 * - skb->network_header: just past the L2 header, or just past the
746 * VLAN header, to the first byte of the L2 payload.
747 *
748 * - skb->transport_header: If key->eth.type is ETH_P_IP or ETH_P_IPV6
749 * on output, then just past the IP header, if one is present and
750 * of a correct length, otherwise the same as skb->network_header.
751 * For other key->eth.type values it is left untouched.
752 *
753 * - skb->protocol: the type of the data starting at skb->network_header.
754 * Equals to key->eth.type.
755 */
756static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
757{
758 struct ethhdr *eth;
759
760 /* Flags are always used as part of stats */
761 key->tp.flags = 0;
762
763 skb_reset_mac_header(skb);
764
765 /* Link layer. */
766 clear_vlan(key);
767 if (ovs_key_mac_proto(key) == MAC_PROTO_NONE) {
768 if (unlikely(eth_type_vlan(skb->protocol)))
769 return -EINVAL;
770
771 skb_reset_network_header(skb);
772 key->eth.type = skb->protocol;
773 } else {
774 eth = eth_hdr(skb);
775 ether_addr_copy(key->eth.src, eth->h_source);
776 ether_addr_copy(key->eth.dst, eth->h_dest);
777
778 __skb_pull(skb, 2 * ETH_ALEN);
779 /* We are going to push all headers that we pull, so no need to
780 * update skb->csum here.
781 */
782
783 if (unlikely(parse_vlan(skb, key)))
784 return -ENOMEM;
785
786 key->eth.type = parse_ethertype(skb);
787 if (unlikely(key->eth.type == htons(0)))
788 return -ENOMEM;
789
790 /* Multiple tagged packets need to retain TPID to satisfy
791 * skb_vlan_pop(), which will later shift the ethertype into
792 * skb->protocol.
793 */
794 if (key->eth.cvlan.tci & htons(VLAN_CFI_MASK))
795 skb->protocol = key->eth.cvlan.tpid;
796 else
797 skb->protocol = key->eth.type;
798
799 skb_reset_network_header(skb);
800 __skb_push(skb, skb->data - skb_mac_header(skb));
801 }
802
803 skb_reset_mac_len(skb);
804
805 /* Fill out L3/L4 key info, if any */
806 return key_extract_l3l4(skb, key);
807}
808
809/* In the case of conntrack fragment handling it expects L3 headers,
810 * add a helper.
811 */
812int ovs_flow_key_update_l3l4(struct sk_buff *skb, struct sw_flow_key *key)
813{
814 return key_extract_l3l4(skb, key);
815}
816
791int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key) 817int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key)
792{ 818{
793 int res; 819 int res;