diff options
-rw-r--r-- | include/linux/udp.h | 6 | ||||
-rw-r--r-- | include/net/udp.h | 45 | ||||
-rw-r--r-- | net/ipv4/udp.c | 23 | ||||
-rw-r--r-- | net/ipv6/udp.c | 24 |
4 files changed, 88 insertions, 10 deletions
diff --git a/include/linux/udp.h b/include/linux/udp.h index e23d5024f42f..0a9c54e76305 100644 --- a/include/linux/udp.h +++ b/include/linux/udp.h | |||
@@ -132,6 +132,12 @@ static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk, | |||
132 | } | 132 | } |
133 | } | 133 | } |
134 | 134 | ||
135 | static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb) | ||
136 | { | ||
137 | return !udp_sk(sk)->gro_enabled && skb_is_gso(skb) && | ||
138 | skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4; | ||
139 | } | ||
140 | |||
135 | #define udp_portaddr_for_each_entry(__sk, list) \ | 141 | #define udp_portaddr_for_each_entry(__sk, list) \ |
136 | hlist_for_each_entry(__sk, list, __sk_common.skc_portaddr_node) | 142 | hlist_for_each_entry(__sk, list, __sk_common.skc_portaddr_node) |
137 | 143 | ||
diff --git a/include/net/udp.h b/include/net/udp.h index a496e441645e..eccca2325ee6 100644 --- a/include/net/udp.h +++ b/include/net/udp.h | |||
@@ -417,17 +417,24 @@ static inline int copy_linear_skb(struct sk_buff *skb, int len, int off, | |||
417 | } while(0) | 417 | } while(0) |
418 | 418 | ||
419 | #if IS_ENABLED(CONFIG_IPV6) | 419 | #if IS_ENABLED(CONFIG_IPV6) |
420 | #define __UDPX_INC_STATS(sk, field) \ | 420 | #define __UDPX_MIB(sk, ipv4) \ |
421 | do { \ | 421 | ({ \ |
422 | if ((sk)->sk_family == AF_INET) \ | 422 | ipv4 ? (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \ |
423 | __UDP_INC_STATS(sock_net(sk), field, 0); \ | 423 | sock_net(sk)->mib.udp_statistics) : \ |
424 | else \ | 424 | (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_stats_in6 : \ |
425 | __UDP6_INC_STATS(sock_net(sk), field, 0); \ | 425 | sock_net(sk)->mib.udp_stats_in6); \ |
426 | } while (0) | 426 | }) |
427 | #else | 427 | #else |
428 | #define __UDPX_INC_STATS(sk, field) __UDP_INC_STATS(sock_net(sk), field, 0) | 428 | #define __UDPX_MIB(sk, ipv4) \ |
429 | ({ \ | ||
430 | IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \ | ||
431 | sock_net(sk)->mib.udp_statistics; \ | ||
432 | }) | ||
429 | #endif | 433 | #endif |
430 | 434 | ||
435 | #define __UDPX_INC_STATS(sk, field) \ | ||
436 | __SNMP_INC_STATS(__UDPX_MIB(sk, (sk)->sk_family == AF_INET), field) | ||
437 | |||
431 | #ifdef CONFIG_PROC_FS | 438 | #ifdef CONFIG_PROC_FS |
432 | struct udp_seq_afinfo { | 439 | struct udp_seq_afinfo { |
433 | sa_family_t family; | 440 | sa_family_t family; |
@@ -461,4 +468,26 @@ DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key); | |||
461 | void udpv6_encap_enable(void); | 468 | void udpv6_encap_enable(void); |
462 | #endif | 469 | #endif |
463 | 470 | ||
471 | static inline struct sk_buff *udp_rcv_segment(struct sock *sk, | ||
472 | struct sk_buff *skb, bool ipv4) | ||
473 | { | ||
474 | struct sk_buff *segs; | ||
475 | |||
476 | /* the GSO CB lays after the UDP one, no need to save and restore any | ||
477 | * CB fragment | ||
478 | */ | ||
479 | segs = __skb_gso_segment(skb, NETIF_F_SG, false); | ||
480 | if (unlikely(IS_ERR_OR_NULL(segs))) { | ||
481 | int segs_nr = skb_shinfo(skb)->gso_segs; | ||
482 | |||
483 | atomic_add(segs_nr, &sk->sk_drops); | ||
484 | SNMP_ADD_STATS(__UDPX_MIB(sk, ipv4), UDP_MIB_INERRORS, segs_nr); | ||
485 | kfree_skb(skb); | ||
486 | return NULL; | ||
487 | } | ||
488 | |||
489 | consume_skb(skb); | ||
490 | return segs; | ||
491 | } | ||
492 | |||
464 | #endif /* _UDP_H */ | 493 | #endif /* _UDP_H */ |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index dddc6fe90f51..3488650b90ac 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -1906,7 +1906,7 @@ EXPORT_SYMBOL(udp_encap_enable); | |||
1906 | * Note that in the success and error cases, the skb is assumed to | 1906 | * Note that in the success and error cases, the skb is assumed to |
1907 | * have either been requeued or freed. | 1907 | * have either been requeued or freed. |
1908 | */ | 1908 | */ |
1909 | static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | 1909 | static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb) |
1910 | { | 1910 | { |
1911 | struct udp_sock *up = udp_sk(sk); | 1911 | struct udp_sock *up = udp_sk(sk); |
1912 | int is_udplite = IS_UDPLITE(sk); | 1912 | int is_udplite = IS_UDPLITE(sk); |
@@ -2009,6 +2009,27 @@ drop: | |||
2009 | return -1; | 2009 | return -1; |
2010 | } | 2010 | } |
2011 | 2011 | ||
2012 | static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | ||
2013 | { | ||
2014 | struct sk_buff *next, *segs; | ||
2015 | int ret; | ||
2016 | |||
2017 | if (likely(!udp_unexpected_gso(sk, skb))) | ||
2018 | return udp_queue_rcv_one_skb(sk, skb); | ||
2019 | |||
2020 | BUILD_BUG_ON(sizeof(struct udp_skb_cb) > SKB_SGO_CB_OFFSET); | ||
2021 | __skb_push(skb, -skb_mac_offset(skb)); | ||
2022 | segs = udp_rcv_segment(sk, skb, true); | ||
2023 | for (skb = segs; skb; skb = next) { | ||
2024 | next = skb->next; | ||
2025 | __skb_pull(skb, skb_transport_offset(skb)); | ||
2026 | ret = udp_queue_rcv_one_skb(sk, skb); | ||
2027 | if (ret > 0) | ||
2028 | ip_protocol_deliver_rcu(dev_net(skb->dev), skb, -ret); | ||
2029 | } | ||
2030 | return 0; | ||
2031 | } | ||
2032 | |||
2012 | /* For TCP sockets, sk_rx_dst is protected by socket lock | 2033 | /* For TCP sockets, sk_rx_dst is protected by socket lock |
2013 | * For UDP, we use xchg() to guard against concurrent changes. | 2034 | * For UDP, we use xchg() to guard against concurrent changes. |
2014 | */ | 2035 | */ |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 4c79dc5329bc..c55698d19d68 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -554,7 +554,7 @@ void udpv6_encap_enable(void) | |||
554 | } | 554 | } |
555 | EXPORT_SYMBOL(udpv6_encap_enable); | 555 | EXPORT_SYMBOL(udpv6_encap_enable); |
556 | 556 | ||
557 | static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | 557 | static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb) |
558 | { | 558 | { |
559 | struct udp_sock *up = udp_sk(sk); | 559 | struct udp_sock *up = udp_sk(sk); |
560 | int is_udplite = IS_UDPLITE(sk); | 560 | int is_udplite = IS_UDPLITE(sk); |
@@ -637,6 +637,28 @@ drop: | |||
637 | return -1; | 637 | return -1; |
638 | } | 638 | } |
639 | 639 | ||
640 | static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | ||
641 | { | ||
642 | struct sk_buff *next, *segs; | ||
643 | int ret; | ||
644 | |||
645 | if (likely(!udp_unexpected_gso(sk, skb))) | ||
646 | return udpv6_queue_rcv_one_skb(sk, skb); | ||
647 | |||
648 | __skb_push(skb, -skb_mac_offset(skb)); | ||
649 | segs = udp_rcv_segment(sk, skb, false); | ||
650 | for (skb = segs; skb; skb = next) { | ||
651 | next = skb->next; | ||
652 | __skb_pull(skb, skb_transport_offset(skb)); | ||
653 | |||
654 | ret = udpv6_queue_rcv_one_skb(sk, skb); | ||
655 | if (ret > 0) | ||
656 | ip6_protocol_deliver_rcu(dev_net(skb->dev), skb, ret, | ||
657 | true); | ||
658 | } | ||
659 | return 0; | ||
660 | } | ||
661 | |||
640 | static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk, | 662 | static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk, |
641 | __be16 loc_port, const struct in6_addr *loc_addr, | 663 | __be16 loc_port, const struct in6_addr *loc_addr, |
642 | __be16 rmt_port, const struct in6_addr *rmt_addr, | 664 | __be16 rmt_port, const struct in6_addr *rmt_addr, |