aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorstephen hemminger <shemminger@vyatta.com>2012-09-25 07:02:48 -0400
committerDavid S. Miller <davem@davemloft.net>2012-09-27 18:12:37 -0400
commiteccc1bb8d4b4cf68d3c9becb083fa94ada7d495c (patch)
treeb0be7efd0c4a4eed26ffd63863dc372d3b1f2ca0
parentb0558ef24a792906914fcad277f3befe2420e618 (diff)
tunnel: drop packet if ECN present with not-ECT
Linux tunnels were written before RFC6040 and therefore never implemented the corner case of ECN getting set in the outer header and the inner header not being ready for it. Section 4.2. Default Tunnel Egress Behaviour. o If the inner ECN field is Not-ECT, the decapsulator MUST NOT propagate any other ECN codepoint onwards. This is because the inner Not-ECT marking is set by transports that rely on dropped packets as an indication of congestion and would not understand or respond to any other ECN codepoint [RFC4774]. Specifically: * If the inner ECN field is Not-ECT and the outer ECN field is CE, the decapsulator MUST drop the packet. * If the inner ECN field is Not-ECT and the outer ECN field is Not-ECT, ECT(0), or ECT(1), the decapsulator MUST forward the outgoing packet with the ECN field cleared to Not-ECT. This patch moves the ECN decap logic out of the individual tunnels into a common place. It also adds logging to allow detecting broken systems that set ECN bits incorrectly when tunneling (or an intermediate router might be changing the header). Overloads rx_frame_error to keep track of ECN related error. Thanks to Chris Wright who caught this while reviewing the new VXLAN tunnel. This code was tested by injecting faulty logic in other end GRE to send incorrectly encapsulated packets. Signed-off-by: Stephen Hemminger <shemminger@vyatta.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/inet_ecn.h76
-rw-r--r--net/ipv4/ip_gre.c38
-rw-r--r--net/ipv4/ipip.c42
-rw-r--r--net/ipv6/ip6_gre.c54
4 files changed, 147 insertions, 63 deletions
diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h
index 2fa14691869c..aab73757bc4d 100644
--- a/include/net/inet_ecn.h
+++ b/include/net/inet_ecn.h
@@ -15,6 +15,8 @@ enum {
15 INET_ECN_MASK = 3, 15 INET_ECN_MASK = 3,
16}; 16};
17 17
18extern int sysctl_tunnel_ecn_log;
19
18static inline int INET_ECN_is_ce(__u8 dsfield) 20static inline int INET_ECN_is_ce(__u8 dsfield)
19{ 21{
20 return (dsfield & INET_ECN_MASK) == INET_ECN_CE; 22 return (dsfield & INET_ECN_MASK) == INET_ECN_CE;
@@ -145,4 +147,78 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb)
145 return 0; 147 return 0;
146} 148}
147 149
150/*
151 * RFC 6080 4.2
152 * To decapsulate the inner header at the tunnel egress, a compliant
153 * tunnel egress MUST set the outgoing ECN field to the codepoint at the
154 * intersection of the appropriate arriving inner header (row) and outer
155 * header (column) in Figure 4
156 *
157 * +---------+------------------------------------------------+
158 * |Arriving | Arriving Outer Header |
159 * | Inner +---------+------------+------------+------------+
160 * | Header | Not-ECT | ECT(0) | ECT(1) | CE |
161 * +---------+---------+------------+------------+------------+
162 * | Not-ECT | Not-ECT |Not-ECT(!!!)|Not-ECT(!!!)| <drop>(!!!)|
163 * | ECT(0) | ECT(0) | ECT(0) | ECT(1) | CE |
164 * | ECT(1) | ECT(1) | ECT(1) (!) | ECT(1) | CE |
165 * | CE | CE | CE | CE(!!!)| CE |
166 * +---------+---------+------------+------------+------------+
167 *
168 * Figure 4: New IP in IP Decapsulation Behaviour
169 *
170 * returns 0 on success
171 * 1 if something is broken and should be logged (!!! above)
172 * 2 if packet should be dropped
173 */
174static inline int INET_ECN_decapsulate(struct sk_buff *skb,
175 __u8 outer, __u8 inner)
176{
177 if (INET_ECN_is_not_ect(inner)) {
178 switch (outer & INET_ECN_MASK) {
179 case INET_ECN_NOT_ECT:
180 return 0;
181 case INET_ECN_ECT_0:
182 case INET_ECN_ECT_1:
183 return 1;
184 case INET_ECN_CE:
185 return 2;
186 }
187 }
188
189 if (INET_ECN_is_ce(outer))
190 INET_ECN_set_ce(skb);
191
192 return 0;
193}
194
195static inline int IP_ECN_decapsulate(const struct iphdr *oiph,
196 struct sk_buff *skb)
197{
198 __u8 inner;
199
200 if (skb->protocol == htons(ETH_P_IP))
201 inner = ip_hdr(skb)->tos;
202 else if (skb->protocol == htons(ETH_P_IPV6))
203 inner = ipv6_get_dsfield(ipv6_hdr(skb));
204 else
205 return 0;
206
207 return INET_ECN_decapsulate(skb, oiph->tos, inner);
208}
209
210static inline int IP6_ECN_decapsulate(const struct ipv6hdr *oipv6h,
211 struct sk_buff *skb)
212{
213 __u8 inner;
214
215 if (skb->protocol == htons(ETH_P_IP))
216 inner = ip_hdr(skb)->tos;
217 else if (skb->protocol == htons(ETH_P_IPV6))
218 inner = ipv6_get_dsfield(ipv6_hdr(skb));
219 else
220 return 0;
221
222 return INET_ECN_decapsulate(skb, ipv6_get_dsfield(oipv6h), inner);
223}
148#endif 224#endif
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 1c012cb2cb94..ef0b861ce044 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -120,6 +120,10 @@
120 Alexey Kuznetsov. 120 Alexey Kuznetsov.
121 */ 121 */
122 122
123static bool log_ecn_error = true;
124module_param(log_ecn_error, bool, 0644);
125MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
126
123static struct rtnl_link_ops ipgre_link_ops __read_mostly; 127static struct rtnl_link_ops ipgre_link_ops __read_mostly;
124static int ipgre_tunnel_init(struct net_device *dev); 128static int ipgre_tunnel_init(struct net_device *dev);
125static void ipgre_tunnel_setup(struct net_device *dev); 129static void ipgre_tunnel_setup(struct net_device *dev);
@@ -204,7 +208,9 @@ static struct rtnl_link_stats64 *ipgre_get_stats64(struct net_device *dev,
204 tot->rx_crc_errors = dev->stats.rx_crc_errors; 208 tot->rx_crc_errors = dev->stats.rx_crc_errors;
205 tot->rx_fifo_errors = dev->stats.rx_fifo_errors; 209 tot->rx_fifo_errors = dev->stats.rx_fifo_errors;
206 tot->rx_length_errors = dev->stats.rx_length_errors; 210 tot->rx_length_errors = dev->stats.rx_length_errors;
211 tot->rx_frame_errors = dev->stats.rx_frame_errors;
207 tot->rx_errors = dev->stats.rx_errors; 212 tot->rx_errors = dev->stats.rx_errors;
213
208 tot->tx_fifo_errors = dev->stats.tx_fifo_errors; 214 tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
209 tot->tx_carrier_errors = dev->stats.tx_carrier_errors; 215 tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
210 tot->tx_dropped = dev->stats.tx_dropped; 216 tot->tx_dropped = dev->stats.tx_dropped;
@@ -587,17 +593,6 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
587 t->err_time = jiffies; 593 t->err_time = jiffies;
588} 594}
589 595
590static inline void ipgre_ecn_decapsulate(const struct iphdr *iph, struct sk_buff *skb)
591{
592 if (INET_ECN_is_ce(iph->tos)) {
593 if (skb->protocol == htons(ETH_P_IP)) {
594 IP_ECN_set_ce(ip_hdr(skb));
595 } else if (skb->protocol == htons(ETH_P_IPV6)) {
596 IP6_ECN_set_ce(ipv6_hdr(skb));
597 }
598 }
599}
600
601static inline u8 596static inline u8
602ipgre_ecn_encapsulate(u8 tos, const struct iphdr *old_iph, struct sk_buff *skb) 597ipgre_ecn_encapsulate(u8 tos, const struct iphdr *old_iph, struct sk_buff *skb)
603{ 598{
@@ -620,6 +615,7 @@ static int ipgre_rcv(struct sk_buff *skb)
620 struct ip_tunnel *tunnel; 615 struct ip_tunnel *tunnel;
621 int offset = 4; 616 int offset = 4;
622 __be16 gre_proto; 617 __be16 gre_proto;
618 int err;
623 619
624 if (!pskb_may_pull(skb, 16)) 620 if (!pskb_may_pull(skb, 16))
625 goto drop; 621 goto drop;
@@ -723,17 +719,27 @@ static int ipgre_rcv(struct sk_buff *skb)
723 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); 719 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
724 } 720 }
725 721
722 __skb_tunnel_rx(skb, tunnel->dev);
723
724 skb_reset_network_header(skb);
725 err = IP_ECN_decapsulate(iph, skb);
726 if (unlikely(err)) {
727 if (log_ecn_error)
728 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
729 &iph->saddr, iph->tos);
730 if (err > 1) {
731 ++tunnel->dev->stats.rx_frame_errors;
732 ++tunnel->dev->stats.rx_errors;
733 goto drop;
734 }
735 }
736
726 tstats = this_cpu_ptr(tunnel->dev->tstats); 737 tstats = this_cpu_ptr(tunnel->dev->tstats);
727 u64_stats_update_begin(&tstats->syncp); 738 u64_stats_update_begin(&tstats->syncp);
728 tstats->rx_packets++; 739 tstats->rx_packets++;
729 tstats->rx_bytes += skb->len; 740 tstats->rx_bytes += skb->len;
730 u64_stats_update_end(&tstats->syncp); 741 u64_stats_update_end(&tstats->syncp);
731 742
732 __skb_tunnel_rx(skb, tunnel->dev);
733
734 skb_reset_network_header(skb);
735 ipgre_ecn_decapsulate(iph, skb);
736
737 netif_rx(skb); 743 netif_rx(skb);
738 744
739 return 0; 745 return 0;
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 618bde867ac1..e15b45297c09 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -120,6 +120,10 @@
120#define HASH_SIZE 16 120#define HASH_SIZE 16
121#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF) 121#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
122 122
123static bool log_ecn_error = true;
124module_param(log_ecn_error, bool, 0644);
125MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
126
123static int ipip_net_id __read_mostly; 127static int ipip_net_id __read_mostly;
124struct ipip_net { 128struct ipip_net {
125 struct ip_tunnel __rcu *tunnels_r_l[HASH_SIZE]; 129 struct ip_tunnel __rcu *tunnels_r_l[HASH_SIZE];
@@ -400,28 +404,18 @@ out:
400 return err; 404 return err;
401} 405}
402 406
403static inline void ipip_ecn_decapsulate(const struct iphdr *outer_iph,
404 struct sk_buff *skb)
405{
406 struct iphdr *inner_iph = ip_hdr(skb);
407
408 if (INET_ECN_is_ce(outer_iph->tos))
409 IP_ECN_set_ce(inner_iph);
410}
411
412static int ipip_rcv(struct sk_buff *skb) 407static int ipip_rcv(struct sk_buff *skb)
413{ 408{
414 struct ip_tunnel *tunnel; 409 struct ip_tunnel *tunnel;
415 const struct iphdr *iph = ip_hdr(skb); 410 const struct iphdr *iph = ip_hdr(skb);
411 int err;
416 412
417 tunnel = ipip_tunnel_lookup(dev_net(skb->dev), iph->saddr, iph->daddr); 413 tunnel = ipip_tunnel_lookup(dev_net(skb->dev), iph->saddr, iph->daddr);
418 if (tunnel != NULL) { 414 if (tunnel != NULL) {
419 struct pcpu_tstats *tstats; 415 struct pcpu_tstats *tstats;
420 416
421 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { 417 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
422 kfree_skb(skb); 418 goto drop;
423 return 0;
424 }
425 419
426 secpath_reset(skb); 420 secpath_reset(skb);
427 421
@@ -430,21 +424,35 @@ static int ipip_rcv(struct sk_buff *skb)
430 skb->protocol = htons(ETH_P_IP); 424 skb->protocol = htons(ETH_P_IP);
431 skb->pkt_type = PACKET_HOST; 425 skb->pkt_type = PACKET_HOST;
432 426
427 __skb_tunnel_rx(skb, tunnel->dev);
428
429 err = IP_ECN_decapsulate(iph, skb);
430 if (unlikely(err)) {
431 if (log_ecn_error)
432 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
433 &iph->saddr, iph->tos);
434 if (err > 1) {
435 ++tunnel->dev->stats.rx_frame_errors;
436 ++tunnel->dev->stats.rx_errors;
437 goto drop;
438 }
439 }
440
433 tstats = this_cpu_ptr(tunnel->dev->tstats); 441 tstats = this_cpu_ptr(tunnel->dev->tstats);
434 u64_stats_update_begin(&tstats->syncp); 442 u64_stats_update_begin(&tstats->syncp);
435 tstats->rx_packets++; 443 tstats->rx_packets++;
436 tstats->rx_bytes += skb->len; 444 tstats->rx_bytes += skb->len;
437 u64_stats_update_end(&tstats->syncp); 445 u64_stats_update_end(&tstats->syncp);
438 446
439 __skb_tunnel_rx(skb, tunnel->dev);
440
441 ipip_ecn_decapsulate(iph, skb);
442
443 netif_rx(skb); 447 netif_rx(skb);
444 return 0; 448 return 0;
445 } 449 }
446 450
447 return -1; 451 return -1;
452
453drop:
454 kfree_skb(skb);
455 return 0;
448} 456}
449 457
450/* 458/*
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index b987d4db790f..613a16647741 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -56,6 +56,10 @@
56#include <net/ip6_tunnel.h> 56#include <net/ip6_tunnel.h>
57 57
58 58
59static bool log_ecn_error = true;
60module_param(log_ecn_error, bool, 0644);
61MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
62
59#define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK) 63#define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK)
60#define IPV6_TCLASS_SHIFT 20 64#define IPV6_TCLASS_SHIFT 20
61 65
@@ -149,7 +153,9 @@ static struct rtnl_link_stats64 *ip6gre_get_stats64(struct net_device *dev,
149 tot->rx_crc_errors = dev->stats.rx_crc_errors; 153 tot->rx_crc_errors = dev->stats.rx_crc_errors;
150 tot->rx_fifo_errors = dev->stats.rx_fifo_errors; 154 tot->rx_fifo_errors = dev->stats.rx_fifo_errors;
151 tot->rx_length_errors = dev->stats.rx_length_errors; 155 tot->rx_length_errors = dev->stats.rx_length_errors;
156 tot->rx_frame_errors = dev->stats.rx_frame_errors;
152 tot->rx_errors = dev->stats.rx_errors; 157 tot->rx_errors = dev->stats.rx_errors;
158
153 tot->tx_fifo_errors = dev->stats.tx_fifo_errors; 159 tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
154 tot->tx_carrier_errors = dev->stats.tx_carrier_errors; 160 tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
155 tot->tx_dropped = dev->stats.tx_dropped; 161 tot->tx_dropped = dev->stats.tx_dropped;
@@ -489,28 +495,6 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
489 t->err_time = jiffies; 495 t->err_time = jiffies;
490} 496}
491 497
492static inline void ip6gre_ecn_decapsulate_ipv4(const struct ip6_tnl *t,
493 const struct ipv6hdr *ipv6h, struct sk_buff *skb)
494{
495 __u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK;
496
497 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
498 ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, dsfield);
499
500 if (INET_ECN_is_ce(dsfield))
501 IP_ECN_set_ce(ip_hdr(skb));
502}
503
504static inline void ip6gre_ecn_decapsulate_ipv6(const struct ip6_tnl *t,
505 const struct ipv6hdr *ipv6h, struct sk_buff *skb)
506{
507 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
508 ipv6_copy_dscp(ipv6_get_dsfield(ipv6h), ipv6_hdr(skb));
509
510 if (INET_ECN_is_ce(ipv6_get_dsfield(ipv6h)))
511 IP6_ECN_set_ce(ipv6_hdr(skb));
512}
513
514static int ip6gre_rcv(struct sk_buff *skb) 498static int ip6gre_rcv(struct sk_buff *skb)
515{ 499{
516 const struct ipv6hdr *ipv6h; 500 const struct ipv6hdr *ipv6h;
@@ -522,6 +506,7 @@ static int ip6gre_rcv(struct sk_buff *skb)
522 struct ip6_tnl *tunnel; 506 struct ip6_tnl *tunnel;
523 int offset = 4; 507 int offset = 4;
524 __be16 gre_proto; 508 __be16 gre_proto;
509 int err;
525 510
526 if (!pskb_may_pull(skb, sizeof(struct in6_addr))) 511 if (!pskb_may_pull(skb, sizeof(struct in6_addr)))
527 goto drop; 512 goto drop;
@@ -625,20 +610,29 @@ static int ip6gre_rcv(struct sk_buff *skb)
625 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); 610 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
626 } 611 }
627 612
613 __skb_tunnel_rx(skb, tunnel->dev);
614
615 skb_reset_network_header(skb);
616
617 err = IP6_ECN_decapsulate(ipv6h, skb);
618 if (unlikely(err)) {
619 if (log_ecn_error)
620 net_info_ratelimited("non-ECT from %pI6 with dsfield=%#x\n",
621 &ipv6h->saddr,
622 ipv6_get_dsfield(ipv6h));
623 if (err > 1) {
624 ++tunnel->dev->stats.rx_frame_errors;
625 ++tunnel->dev->stats.rx_errors;
626 goto drop;
627 }
628 }
629
628 tstats = this_cpu_ptr(tunnel->dev->tstats); 630 tstats = this_cpu_ptr(tunnel->dev->tstats);
629 u64_stats_update_begin(&tstats->syncp); 631 u64_stats_update_begin(&tstats->syncp);
630 tstats->rx_packets++; 632 tstats->rx_packets++;
631 tstats->rx_bytes += skb->len; 633 tstats->rx_bytes += skb->len;
632 u64_stats_update_end(&tstats->syncp); 634 u64_stats_update_end(&tstats->syncp);
633 635
634 __skb_tunnel_rx(skb, tunnel->dev);
635
636 skb_reset_network_header(skb);
637 if (skb->protocol == htons(ETH_P_IP))
638 ip6gre_ecn_decapsulate_ipv4(tunnel, ipv6h, skb);
639 else if (skb->protocol == htons(ETH_P_IPV6))
640 ip6gre_ecn_decapsulate_ipv6(tunnel, ipv6h, skb);
641
642 netif_rx(skb); 636 netif_rx(skb);
643 637
644 return 0; 638 return 0;