summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/geneve.c28
-rw-r--r--drivers/net/vxlan.c30
-rw-r--r--include/linux/netdevice.h17
-rw-r--r--include/linux/udp.h8
-rw-r--r--include/net/protocol.h3
-rw-r--r--include/net/route.h7
-rw-r--r--include/net/udp.h11
-rw-r--r--include/net/udp_tunnel.h7
-rw-r--r--include/net/vxlan.h1
-rw-r--r--net/ipv4/fou.c48
-rw-r--r--net/ipv4/udp.c13
-rw-r--r--net/ipv4/udp_offload.c113
-rw-r--r--net/ipv4/udp_tunnel.c2
-rw-r--r--net/ipv6/Makefile5
-rw-r--r--net/ipv6/af_inet6.c8
-rw-r--r--net/ipv6/ip6_offload.c2
-rw-r--r--net/ipv6/ip6_offload.h3
-rw-r--r--net/ipv6/udp.c13
-rw-r--r--net/ipv6/udp_offload.c11
19 files changed, 129 insertions, 201 deletions
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index bc168894bda3..a9fbf17eb256 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -87,7 +87,6 @@ struct geneve_sock {
87 struct socket *sock; 87 struct socket *sock;
88 struct rcu_head rcu; 88 struct rcu_head rcu;
89 int refcnt; 89 int refcnt;
90 struct udp_offload udp_offloads;
91 struct hlist_head vni_list[VNI_HASH_SIZE]; 90 struct hlist_head vni_list[VNI_HASH_SIZE];
92 u32 flags; 91 u32 flags;
93}; 92};
@@ -409,14 +408,6 @@ static void geneve_notify_add_rx_port(struct geneve_sock *gs)
409 struct net *net = sock_net(sk); 408 struct net *net = sock_net(sk);
410 sa_family_t sa_family = geneve_get_sk_family(gs); 409 sa_family_t sa_family = geneve_get_sk_family(gs);
411 __be16 port = inet_sk(sk)->inet_sport; 410 __be16 port = inet_sk(sk)->inet_sport;
412 int err;
413
414 if (sa_family == AF_INET) {
415 err = udp_add_offload(sock_net(sk), &gs->udp_offloads);
416 if (err)
417 pr_warn("geneve: udp_add_offload failed with status %d\n",
418 err);
419 }
420 411
421 rcu_read_lock(); 412 rcu_read_lock();
422 for_each_netdev_rcu(net, dev) { 413 for_each_netdev_rcu(net, dev) {
@@ -432,9 +423,9 @@ static int geneve_hlen(struct genevehdr *gh)
432 return sizeof(*gh) + gh->opt_len * 4; 423 return sizeof(*gh) + gh->opt_len * 4;
433} 424}
434 425
435static struct sk_buff **geneve_gro_receive(struct sk_buff **head, 426static struct sk_buff **geneve_gro_receive(struct sock *sk,
436 struct sk_buff *skb, 427 struct sk_buff **head,
437 struct udp_offload *uoff) 428 struct sk_buff *skb)
438{ 429{
439 struct sk_buff *p, **pp = NULL; 430 struct sk_buff *p, **pp = NULL;
440 struct genevehdr *gh, *gh2; 431 struct genevehdr *gh, *gh2;
@@ -495,8 +486,8 @@ out:
495 return pp; 486 return pp;
496} 487}
497 488
498static int geneve_gro_complete(struct sk_buff *skb, int nhoff, 489static int geneve_gro_complete(struct sock *sk, struct sk_buff *skb,
499 struct udp_offload *uoff) 490 int nhoff)
500{ 491{
501 struct genevehdr *gh; 492 struct genevehdr *gh;
502 struct packet_offload *ptype; 493 struct packet_offload *ptype;
@@ -545,14 +536,14 @@ static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port,
545 INIT_HLIST_HEAD(&gs->vni_list[h]); 536 INIT_HLIST_HEAD(&gs->vni_list[h]);
546 537
547 /* Initialize the geneve udp offloads structure */ 538 /* Initialize the geneve udp offloads structure */
548 gs->udp_offloads.port = port;
549 gs->udp_offloads.callbacks.gro_receive = geneve_gro_receive;
550 gs->udp_offloads.callbacks.gro_complete = geneve_gro_complete;
551 geneve_notify_add_rx_port(gs); 539 geneve_notify_add_rx_port(gs);
552 540
553 /* Mark socket as an encapsulation socket */ 541 /* Mark socket as an encapsulation socket */
542 memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
554 tunnel_cfg.sk_user_data = gs; 543 tunnel_cfg.sk_user_data = gs;
555 tunnel_cfg.encap_type = 1; 544 tunnel_cfg.encap_type = 1;
545 tunnel_cfg.gro_receive = geneve_gro_receive;
546 tunnel_cfg.gro_complete = geneve_gro_complete;
556 tunnel_cfg.encap_rcv = geneve_udp_encap_recv; 547 tunnel_cfg.encap_rcv = geneve_udp_encap_recv;
557 tunnel_cfg.encap_destroy = NULL; 548 tunnel_cfg.encap_destroy = NULL;
558 setup_udp_tunnel_sock(net, sock, &tunnel_cfg); 549 setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
@@ -576,9 +567,6 @@ static void geneve_notify_del_rx_port(struct geneve_sock *gs)
576 } 567 }
577 568
578 rcu_read_unlock(); 569 rcu_read_unlock();
579
580 if (sa_family == AF_INET)
581 udp_del_offload(&gs->udp_offloads);
582} 570}
583 571
584static void __geneve_sock_release(struct geneve_sock *gs) 572static void __geneve_sock_release(struct geneve_sock *gs)
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 51cccddfe403..9f3634064c92 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -551,16 +551,15 @@ static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
551 return vh; 551 return vh;
552} 552}
553 553
554static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, 554static struct sk_buff **vxlan_gro_receive(struct sock *sk,
555 struct sk_buff *skb, 555 struct sk_buff **head,
556 struct udp_offload *uoff) 556 struct sk_buff *skb)
557{ 557{
558 struct sk_buff *p, **pp = NULL; 558 struct sk_buff *p, **pp = NULL;
559 struct vxlanhdr *vh, *vh2; 559 struct vxlanhdr *vh, *vh2;
560 unsigned int hlen, off_vx; 560 unsigned int hlen, off_vx;
561 int flush = 1; 561 int flush = 1;
562 struct vxlan_sock *vs = container_of(uoff, struct vxlan_sock, 562 struct vxlan_sock *vs = rcu_dereference_sk_user_data(sk);
563 udp_offloads);
564 __be32 flags; 563 __be32 flags;
565 struct gro_remcsum grc; 564 struct gro_remcsum grc;
566 565
@@ -613,8 +612,7 @@ out:
613 return pp; 612 return pp;
614} 613}
615 614
616static int vxlan_gro_complete(struct sk_buff *skb, int nhoff, 615static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
617 struct udp_offload *uoff)
618{ 616{
619 udp_tunnel_gro_complete(skb, nhoff); 617 udp_tunnel_gro_complete(skb, nhoff);
620 618
@@ -629,13 +627,6 @@ static void vxlan_notify_add_rx_port(struct vxlan_sock *vs)
629 struct net *net = sock_net(sk); 627 struct net *net = sock_net(sk);
630 sa_family_t sa_family = vxlan_get_sk_family(vs); 628 sa_family_t sa_family = vxlan_get_sk_family(vs);
631 __be16 port = inet_sk(sk)->inet_sport; 629 __be16 port = inet_sk(sk)->inet_sport;
632 int err;
633
634 if (sa_family == AF_INET) {
635 err = udp_add_offload(net, &vs->udp_offloads);
636 if (err)
637 pr_warn("vxlan: udp_add_offload failed with status %d\n", err);
638 }
639 630
640 rcu_read_lock(); 631 rcu_read_lock();
641 for_each_netdev_rcu(net, dev) { 632 for_each_netdev_rcu(net, dev) {
@@ -662,9 +653,6 @@ static void vxlan_notify_del_rx_port(struct vxlan_sock *vs)
662 port); 653 port);
663 } 654 }
664 rcu_read_unlock(); 655 rcu_read_unlock();
665
666 if (sa_family == AF_INET)
667 udp_del_offload(&vs->udp_offloads);
668} 656}
669 657
670/* Add new entry to forwarding table -- assumes lock held */ 658/* Add new entry to forwarding table -- assumes lock held */
@@ -2752,21 +2740,19 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
2752 atomic_set(&vs->refcnt, 1); 2740 atomic_set(&vs->refcnt, 1);
2753 vs->flags = (flags & VXLAN_F_RCV_FLAGS); 2741 vs->flags = (flags & VXLAN_F_RCV_FLAGS);
2754 2742
2755 /* Initialize the vxlan udp offloads structure */
2756 vs->udp_offloads.port = port;
2757 vs->udp_offloads.callbacks.gro_receive = vxlan_gro_receive;
2758 vs->udp_offloads.callbacks.gro_complete = vxlan_gro_complete;
2759
2760 spin_lock(&vn->sock_lock); 2743 spin_lock(&vn->sock_lock);
2761 hlist_add_head_rcu(&vs->hlist, vs_head(net, port)); 2744 hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
2762 vxlan_notify_add_rx_port(vs); 2745 vxlan_notify_add_rx_port(vs);
2763 spin_unlock(&vn->sock_lock); 2746 spin_unlock(&vn->sock_lock);
2764 2747
2765 /* Mark socket as an encapsulation socket. */ 2748 /* Mark socket as an encapsulation socket. */
2749 memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
2766 tunnel_cfg.sk_user_data = vs; 2750 tunnel_cfg.sk_user_data = vs;
2767 tunnel_cfg.encap_type = 1; 2751 tunnel_cfg.encap_type = 1;
2768 tunnel_cfg.encap_rcv = vxlan_rcv; 2752 tunnel_cfg.encap_rcv = vxlan_rcv;
2769 tunnel_cfg.encap_destroy = NULL; 2753 tunnel_cfg.encap_destroy = NULL;
2754 tunnel_cfg.gro_receive = vxlan_gro_receive;
2755 tunnel_cfg.gro_complete = vxlan_gro_complete;
2770 2756
2771 setup_udp_tunnel_sock(net, sock, &tunnel_cfg); 2757 setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
2772 2758
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index cb0d5d09c2e4..cb4e508b3f38 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2159,23 +2159,6 @@ struct packet_offload {
2159 struct list_head list; 2159 struct list_head list;
2160}; 2160};
2161 2161
2162struct udp_offload;
2163
2164struct udp_offload_callbacks {
2165 struct sk_buff **(*gro_receive)(struct sk_buff **head,
2166 struct sk_buff *skb,
2167 struct udp_offload *uoff);
2168 int (*gro_complete)(struct sk_buff *skb,
2169 int nhoff,
2170 struct udp_offload *uoff);
2171};
2172
2173struct udp_offload {
2174 __be16 port;
2175 u8 ipproto;
2176 struct udp_offload_callbacks callbacks;
2177};
2178
2179/* often modified stats are per-CPU, other are shared (netdev->stats) */ 2162/* often modified stats are per-CPU, other are shared (netdev->stats) */
2180struct pcpu_sw_netstats { 2163struct pcpu_sw_netstats {
2181 u64 rx_packets; 2164 u64 rx_packets;
diff --git a/include/linux/udp.h b/include/linux/udp.h
index 32342754643a..d1fd8cd39478 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -71,6 +71,14 @@ struct udp_sock {
71 */ 71 */
72 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); 72 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
73 void (*encap_destroy)(struct sock *sk); 73 void (*encap_destroy)(struct sock *sk);
74
75 /* GRO functions for UDP socket */
76 struct sk_buff ** (*gro_receive)(struct sock *sk,
77 struct sk_buff **head,
78 struct sk_buff *skb);
79 int (*gro_complete)(struct sock *sk,
80 struct sk_buff *skb,
81 int nhoff);
74}; 82};
75 83
76static inline struct udp_sock *udp_sk(const struct sock *sk) 84static inline struct udp_sock *udp_sk(const struct sock *sk)
diff --git a/include/net/protocol.h b/include/net/protocol.h
index da689f5432de..bf36ca34af7a 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -107,9 +107,6 @@ int inet_del_offload(const struct net_offload *prot, unsigned char num);
107void inet_register_protosw(struct inet_protosw *p); 107void inet_register_protosw(struct inet_protosw *p);
108void inet_unregister_protosw(struct inet_protosw *p); 108void inet_unregister_protosw(struct inet_protosw *p);
109 109
110int udp_add_offload(struct net *net, struct udp_offload *prot);
111void udp_del_offload(struct udp_offload *prot);
112
113#if IS_ENABLED(CONFIG_IPV6) 110#if IS_ENABLED(CONFIG_IPV6)
114int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char num); 111int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char num);
115int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char num); 112int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char num);
diff --git a/include/net/route.h b/include/net/route.h
index 9b0a523bb428..f4b11eee1754 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -322,10 +322,11 @@ static inline struct rtable *ip_route_newports(struct flowi4 *fl4, struct rtable
322 322
323static inline int inet_iif(const struct sk_buff *skb) 323static inline int inet_iif(const struct sk_buff *skb)
324{ 324{
325 int iif = skb_rtable(skb)->rt_iif; 325 struct rtable *rt = skb_rtable(skb);
326
327 if (rt && rt->rt_iif)
328 return rt->rt_iif;
326 329
327 if (iif)
328 return iif;
329 return skb->skb_iif; 330 return skb->skb_iif;
330} 331}
331 332
diff --git a/include/net/udp.h b/include/net/udp.h
index a0b0da97164c..3c5a65e0946d 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -167,9 +167,12 @@ static inline void udp_csum_pull_header(struct sk_buff *skb)
167 UDP_SKB_CB(skb)->cscov -= sizeof(struct udphdr); 167 UDP_SKB_CB(skb)->cscov -= sizeof(struct udphdr);
168} 168}
169 169
170typedef struct sock *(*udp_lookup_t)(struct sk_buff *skb, __be16 sport,
171 __be16 dport);
172
170struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb, 173struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
171 struct udphdr *uh); 174 struct udphdr *uh, udp_lookup_t lookup);
172int udp_gro_complete(struct sk_buff *skb, int nhoff); 175int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
173 176
174static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb) 177static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
175{ 178{
@@ -269,6 +272,8 @@ struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
269struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, 272struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
270 __be32 daddr, __be16 dport, int dif, 273 __be32 daddr, __be16 dport, int dif,
271 struct udp_table *tbl, struct sk_buff *skb); 274 struct udp_table *tbl, struct sk_buff *skb);
275struct sock *udp4_lib_lookup_skb(struct sk_buff *skb,
276 __be16 sport, __be16 dport);
272struct sock *udp6_lib_lookup(struct net *net, 277struct sock *udp6_lib_lookup(struct net *net,
273 const struct in6_addr *saddr, __be16 sport, 278 const struct in6_addr *saddr, __be16 sport,
274 const struct in6_addr *daddr, __be16 dport, 279 const struct in6_addr *daddr, __be16 dport,
@@ -278,6 +283,8 @@ struct sock *__udp6_lib_lookup(struct net *net,
278 const struct in6_addr *daddr, __be16 dport, 283 const struct in6_addr *daddr, __be16 dport,
279 int dif, struct udp_table *tbl, 284 int dif, struct udp_table *tbl,
280 struct sk_buff *skb); 285 struct sk_buff *skb);
286struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
287 __be16 sport, __be16 dport);
281 288
282/* 289/*
283 * SNMP statistics for UDP and UDP-Lite 290 * SNMP statistics for UDP and UDP-Lite
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index b83114077cee..2dcf1de948ac 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -64,6 +64,11 @@ static inline int udp_sock_create(struct net *net,
64 64
65typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb); 65typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb);
66typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk); 66typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk);
67typedef struct sk_buff **(*udp_tunnel_gro_receive_t)(struct sock *sk,
68 struct sk_buff **head,
69 struct sk_buff *skb);
70typedef int (*udp_tunnel_gro_complete_t)(struct sock *sk, struct sk_buff *skb,
71 int nhoff);
67 72
68struct udp_tunnel_sock_cfg { 73struct udp_tunnel_sock_cfg {
69 void *sk_user_data; /* user data used by encap_rcv call back */ 74 void *sk_user_data; /* user data used by encap_rcv call back */
@@ -71,6 +76,8 @@ struct udp_tunnel_sock_cfg {
71 __u8 encap_type; 76 __u8 encap_type;
72 udp_tunnel_encap_rcv_t encap_rcv; 77 udp_tunnel_encap_rcv_t encap_rcv;
73 udp_tunnel_encap_destroy_t encap_destroy; 78 udp_tunnel_encap_destroy_t encap_destroy;
79 udp_tunnel_gro_receive_t gro_receive;
80 udp_tunnel_gro_complete_t gro_complete;
74}; 81};
75 82
76/* Setup the given (UDP) sock to receive UDP encapsulated packets */ 83/* Setup the given (UDP) sock to receive UDP encapsulated packets */
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index dcc6f4057115..2f168f0ea32c 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -189,7 +189,6 @@ struct vxlan_sock {
189 struct rcu_head rcu; 189 struct rcu_head rcu;
190 struct hlist_head vni_list[VNI_HASH_SIZE]; 190 struct hlist_head vni_list[VNI_HASH_SIZE];
191 atomic_t refcnt; 191 atomic_t refcnt;
192 struct udp_offload udp_offloads;
193 u32 flags; 192 u32 flags;
194}; 193};
195 194
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index 5a94aea280d3..5738b9771067 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -22,7 +22,6 @@ struct fou {
22 u8 flags; 22 u8 flags;
23 __be16 port; 23 __be16 port;
24 u16 type; 24 u16 type;
25 struct udp_offload udp_offloads;
26 struct list_head list; 25 struct list_head list;
27 struct rcu_head rcu; 26 struct rcu_head rcu;
28}; 27};
@@ -186,13 +185,13 @@ drop:
186 return 0; 185 return 0;
187} 186}
188 187
189static struct sk_buff **fou_gro_receive(struct sk_buff **head, 188static struct sk_buff **fou_gro_receive(struct sock *sk,
190 struct sk_buff *skb, 189 struct sk_buff **head,
191 struct udp_offload *uoff) 190 struct sk_buff *skb)
192{ 191{
193 const struct net_offload *ops; 192 const struct net_offload *ops;
194 struct sk_buff **pp = NULL; 193 struct sk_buff **pp = NULL;
195 u8 proto = NAPI_GRO_CB(skb)->proto; 194 u8 proto = fou_from_sock(sk)->protocol;
196 const struct net_offload **offloads; 195 const struct net_offload **offloads;
197 196
198 /* We can clear the encap_mark for FOU as we are essentially doing 197 /* We can clear the encap_mark for FOU as we are essentially doing
@@ -217,11 +216,11 @@ out_unlock:
217 return pp; 216 return pp;
218} 217}
219 218
220static int fou_gro_complete(struct sk_buff *skb, int nhoff, 219static int fou_gro_complete(struct sock *sk, struct sk_buff *skb,
221 struct udp_offload *uoff) 220 int nhoff)
222{ 221{
223 const struct net_offload *ops; 222 const struct net_offload *ops;
224 u8 proto = NAPI_GRO_CB(skb)->proto; 223 u8 proto = fou_from_sock(sk)->protocol;
225 int err = -ENOSYS; 224 int err = -ENOSYS;
226 const struct net_offload **offloads; 225 const struct net_offload **offloads;
227 226
@@ -264,9 +263,9 @@ static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off,
264 return guehdr; 263 return guehdr;
265} 264}
266 265
267static struct sk_buff **gue_gro_receive(struct sk_buff **head, 266static struct sk_buff **gue_gro_receive(struct sock *sk,
268 struct sk_buff *skb, 267 struct sk_buff **head,
269 struct udp_offload *uoff) 268 struct sk_buff *skb)
270{ 269{
271 const struct net_offload **offloads; 270 const struct net_offload **offloads;
272 const struct net_offload *ops; 271 const struct net_offload *ops;
@@ -277,7 +276,7 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head,
277 void *data; 276 void *data;
278 u16 doffset = 0; 277 u16 doffset = 0;
279 int flush = 1; 278 int flush = 1;
280 struct fou *fou = container_of(uoff, struct fou, udp_offloads); 279 struct fou *fou = fou_from_sock(sk);
281 struct gro_remcsum grc; 280 struct gro_remcsum grc;
282 281
283 skb_gro_remcsum_init(&grc); 282 skb_gro_remcsum_init(&grc);
@@ -386,8 +385,7 @@ out:
386 return pp; 385 return pp;
387} 386}
388 387
389static int gue_gro_complete(struct sk_buff *skb, int nhoff, 388static int gue_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
390 struct udp_offload *uoff)
391{ 389{
392 const struct net_offload **offloads; 390 const struct net_offload **offloads;
393 struct guehdr *guehdr = (struct guehdr *)(skb->data + nhoff); 391 struct guehdr *guehdr = (struct guehdr *)(skb->data + nhoff);
@@ -435,10 +433,7 @@ static int fou_add_to_port_list(struct net *net, struct fou *fou)
435static void fou_release(struct fou *fou) 433static void fou_release(struct fou *fou)
436{ 434{
437 struct socket *sock = fou->sock; 435 struct socket *sock = fou->sock;
438 struct sock *sk = sock->sk;
439 436
440 if (sk->sk_family == AF_INET)
441 udp_del_offload(&fou->udp_offloads);
442 list_del(&fou->list); 437 list_del(&fou->list);
443 udp_tunnel_sock_release(sock); 438 udp_tunnel_sock_release(sock);
444 439
@@ -448,11 +443,9 @@ static void fou_release(struct fou *fou)
448static int fou_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg) 443static int fou_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg)
449{ 444{
450 udp_sk(sk)->encap_rcv = fou_udp_recv; 445 udp_sk(sk)->encap_rcv = fou_udp_recv;
451 fou->protocol = cfg->protocol; 446 udp_sk(sk)->gro_receive = fou_gro_receive;
452 fou->udp_offloads.callbacks.gro_receive = fou_gro_receive; 447 udp_sk(sk)->gro_complete = fou_gro_complete;
453 fou->udp_offloads.callbacks.gro_complete = fou_gro_complete; 448 fou_from_sock(sk)->protocol = cfg->protocol;
454 fou->udp_offloads.port = cfg->udp_config.local_udp_port;
455 fou->udp_offloads.ipproto = cfg->protocol;
456 449
457 return 0; 450 return 0;
458} 451}
@@ -460,9 +453,8 @@ static int fou_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg)
460static int gue_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg) 453static int gue_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg)
461{ 454{
462 udp_sk(sk)->encap_rcv = gue_udp_recv; 455 udp_sk(sk)->encap_rcv = gue_udp_recv;
463 fou->udp_offloads.callbacks.gro_receive = gue_gro_receive; 456 udp_sk(sk)->gro_receive = gue_gro_receive;
464 fou->udp_offloads.callbacks.gro_complete = gue_gro_complete; 457 udp_sk(sk)->gro_complete = gue_gro_complete;
465 fou->udp_offloads.port = cfg->udp_config.local_udp_port;
466 458
467 return 0; 459 return 0;
468} 460}
@@ -521,12 +513,6 @@ static int fou_create(struct net *net, struct fou_cfg *cfg,
521 513
522 sk->sk_allocation = GFP_ATOMIC; 514 sk->sk_allocation = GFP_ATOMIC;
523 515
524 if (cfg->udp_config.family == AF_INET) {
525 err = udp_add_offload(net, &fou->udp_offloads);
526 if (err)
527 goto error;
528 }
529
530 err = fou_add_to_port_list(net, fou); 516 err = fou_add_to_port_list(net, fou);
531 if (err) 517 if (err)
532 goto error; 518 goto error;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index d80312ddbb8a..3563788d064f 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -604,6 +604,19 @@ static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
604 udptable, skb); 604 udptable, skb);
605} 605}
606 606
607struct sock *udp4_lib_lookup_skb(struct sk_buff *skb,
608 __be16 sport, __be16 dport)
609{
610 const struct iphdr *iph = ip_hdr(skb);
611 const struct net_device *dev =
612 skb_dst(skb) ? skb_dst(skb)->dev : skb->dev;
613
614 return __udp4_lib_lookup(dev_net(dev), iph->saddr, sport,
615 iph->daddr, dport, inet_iif(skb),
616 &udp_table, skb);
617}
618EXPORT_SYMBOL_GPL(udp4_lib_lookup_skb);
619
607/* Must be called under rcu_read_lock(). 620/* Must be called under rcu_read_lock().
608 * Does increment socket refcount. 621 * Does increment socket refcount.
609 */ 622 */
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 0ed2dafb7cc4..6230cf4b0d2d 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -14,18 +14,6 @@
14#include <net/udp.h> 14#include <net/udp.h>
15#include <net/protocol.h> 15#include <net/protocol.h>
16 16
17static DEFINE_SPINLOCK(udp_offload_lock);
18static struct udp_offload_priv __rcu *udp_offload_base __read_mostly;
19
20#define udp_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&udp_offload_lock))
21
22struct udp_offload_priv {
23 struct udp_offload *offload;
24 possible_net_t net;
25 struct rcu_head rcu;
26 struct udp_offload_priv __rcu *next;
27};
28
29static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, 17static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
30 netdev_features_t features, 18 netdev_features_t features,
31 struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb, 19 struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb,
@@ -179,6 +167,7 @@ out_unlock:
179 167
180 return segs; 168 return segs;
181} 169}
170EXPORT_SYMBOL(skb_udp_tunnel_segment);
182 171
183static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, 172static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
184 netdev_features_t features) 173 netdev_features_t features)
@@ -253,64 +242,14 @@ out:
253 return segs; 242 return segs;
254} 243}
255 244
256int udp_add_offload(struct net *net, struct udp_offload *uo)
257{
258 struct udp_offload_priv *new_offload = kzalloc(sizeof(*new_offload), GFP_ATOMIC);
259
260 if (!new_offload)
261 return -ENOMEM;
262
263 write_pnet(&new_offload->net, net);
264 new_offload->offload = uo;
265
266 spin_lock(&udp_offload_lock);
267 new_offload->next = udp_offload_base;
268 rcu_assign_pointer(udp_offload_base, new_offload);
269 spin_unlock(&udp_offload_lock);
270
271 return 0;
272}
273EXPORT_SYMBOL(udp_add_offload);
274
275static void udp_offload_free_routine(struct rcu_head *head)
276{
277 struct udp_offload_priv *ou_priv = container_of(head, struct udp_offload_priv, rcu);
278 kfree(ou_priv);
279}
280
281void udp_del_offload(struct udp_offload *uo)
282{
283 struct udp_offload_priv __rcu **head = &udp_offload_base;
284 struct udp_offload_priv *uo_priv;
285
286 spin_lock(&udp_offload_lock);
287
288 uo_priv = udp_deref_protected(*head);
289 for (; uo_priv != NULL;
290 uo_priv = udp_deref_protected(*head)) {
291 if (uo_priv->offload == uo) {
292 rcu_assign_pointer(*head,
293 udp_deref_protected(uo_priv->next));
294 goto unlock;
295 }
296 head = &uo_priv->next;
297 }
298 pr_warn("udp_del_offload: didn't find offload for port %d\n", ntohs(uo->port));
299unlock:
300 spin_unlock(&udp_offload_lock);
301 if (uo_priv)
302 call_rcu(&uo_priv->rcu, udp_offload_free_routine);
303}
304EXPORT_SYMBOL(udp_del_offload);
305
306struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb, 245struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
307 struct udphdr *uh) 246 struct udphdr *uh, udp_lookup_t lookup)
308{ 247{
309 struct udp_offload_priv *uo_priv;
310 struct sk_buff *p, **pp = NULL; 248 struct sk_buff *p, **pp = NULL;
311 struct udphdr *uh2; 249 struct udphdr *uh2;
312 unsigned int off = skb_gro_offset(skb); 250 unsigned int off = skb_gro_offset(skb);
313 int flush = 1; 251 int flush = 1;
252 struct sock *sk;
314 253
315 if (NAPI_GRO_CB(skb)->encap_mark || 254 if (NAPI_GRO_CB(skb)->encap_mark ||
316 (skb->ip_summed != CHECKSUM_PARTIAL && 255 (skb->ip_summed != CHECKSUM_PARTIAL &&
@@ -322,13 +261,10 @@ struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
322 NAPI_GRO_CB(skb)->encap_mark = 1; 261 NAPI_GRO_CB(skb)->encap_mark = 1;
323 262
324 rcu_read_lock(); 263 rcu_read_lock();
325 uo_priv = rcu_dereference(udp_offload_base); 264 sk = (*lookup)(skb, uh->source, uh->dest);
326 for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) { 265
327 if (net_eq(read_pnet(&uo_priv->net), dev_net(skb->dev)) && 266 if (sk && udp_sk(sk)->gro_receive)
328 uo_priv->offload->port == uh->dest && 267 goto unflush;
329 uo_priv->offload->callbacks.gro_receive)
330 goto unflush;
331 }
332 goto out_unlock; 268 goto out_unlock;
333 269
334unflush: 270unflush:
@@ -352,9 +288,7 @@ unflush:
352 288
353 skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */ 289 skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
354 skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr)); 290 skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
355 NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto; 291 pp = udp_sk(sk)->gro_receive(sk, head, skb);
356 pp = uo_priv->offload->callbacks.gro_receive(head, skb,
357 uo_priv->offload);
358 292
359out_unlock: 293out_unlock:
360 rcu_read_unlock(); 294 rcu_read_unlock();
@@ -362,6 +296,7 @@ out:
362 NAPI_GRO_CB(skb)->flush |= flush; 296 NAPI_GRO_CB(skb)->flush |= flush;
363 return pp; 297 return pp;
364} 298}
299EXPORT_SYMBOL(udp_gro_receive);
365 300
366static struct sk_buff **udp4_gro_receive(struct sk_buff **head, 301static struct sk_buff **udp4_gro_receive(struct sk_buff **head,
367 struct sk_buff *skb) 302 struct sk_buff *skb)
@@ -383,39 +318,28 @@ static struct sk_buff **udp4_gro_receive(struct sk_buff **head,
383 inet_gro_compute_pseudo); 318 inet_gro_compute_pseudo);
384skip: 319skip:
385 NAPI_GRO_CB(skb)->is_ipv6 = 0; 320 NAPI_GRO_CB(skb)->is_ipv6 = 0;
386 return udp_gro_receive(head, skb, uh); 321 return udp_gro_receive(head, skb, uh, udp4_lib_lookup_skb);
387 322
388flush: 323flush:
389 NAPI_GRO_CB(skb)->flush = 1; 324 NAPI_GRO_CB(skb)->flush = 1;
390 return NULL; 325 return NULL;
391} 326}
392 327
393int udp_gro_complete(struct sk_buff *skb, int nhoff) 328int udp_gro_complete(struct sk_buff *skb, int nhoff,
329 udp_lookup_t lookup)
394{ 330{
395 struct udp_offload_priv *uo_priv;
396 __be16 newlen = htons(skb->len - nhoff); 331 __be16 newlen = htons(skb->len - nhoff);
397 struct udphdr *uh = (struct udphdr *)(skb->data + nhoff); 332 struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
398 int err = -ENOSYS; 333 int err = -ENOSYS;
334 struct sock *sk;
399 335
400 uh->len = newlen; 336 uh->len = newlen;
401 337
402 rcu_read_lock(); 338 rcu_read_lock();
403 339 sk = (*lookup)(skb, uh->source, uh->dest);
404 uo_priv = rcu_dereference(udp_offload_base); 340 if (sk && udp_sk(sk)->gro_complete)
405 for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) { 341 err = udp_sk(sk)->gro_complete(sk, skb,
406 if (net_eq(read_pnet(&uo_priv->net), dev_net(skb->dev)) && 342 nhoff + sizeof(struct udphdr));
407 uo_priv->offload->port == uh->dest &&
408 uo_priv->offload->callbacks.gro_complete)
409 break;
410 }
411
412 if (uo_priv) {
413 NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto;
414 err = uo_priv->offload->callbacks.gro_complete(skb,
415 nhoff + sizeof(struct udphdr),
416 uo_priv->offload);
417 }
418
419 rcu_read_unlock(); 343 rcu_read_unlock();
420 344
421 if (skb->remcsum_offload) 345 if (skb->remcsum_offload)
@@ -426,6 +350,7 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff)
426 350
427 return err; 351 return err;
428} 352}
353EXPORT_SYMBOL(udp_gro_complete);
429 354
430static int udp4_gro_complete(struct sk_buff *skb, int nhoff) 355static int udp4_gro_complete(struct sk_buff *skb, int nhoff)
431{ 356{
@@ -440,7 +365,7 @@ static int udp4_gro_complete(struct sk_buff *skb, int nhoff)
440 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; 365 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
441 } 366 }
442 367
443 return udp_gro_complete(skb, nhoff); 368 return udp_gro_complete(skb, nhoff, udp4_lib_lookup_skb);
444} 369}
445 370
446static const struct net_offload udpv4_offload = { 371static const struct net_offload udpv4_offload = {
diff --git a/net/ipv4/udp_tunnel.c b/net/ipv4/udp_tunnel.c
index 96599d1a1318..47f12c73d959 100644
--- a/net/ipv4/udp_tunnel.c
+++ b/net/ipv4/udp_tunnel.c
@@ -69,6 +69,8 @@ void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
69 udp_sk(sk)->encap_type = cfg->encap_type; 69 udp_sk(sk)->encap_type = cfg->encap_type;
70 udp_sk(sk)->encap_rcv = cfg->encap_rcv; 70 udp_sk(sk)->encap_rcv = cfg->encap_rcv;
71 udp_sk(sk)->encap_destroy = cfg->encap_destroy; 71 udp_sk(sk)->encap_destroy = cfg->encap_destroy;
72 udp_sk(sk)->gro_receive = cfg->gro_receive;
73 udp_sk(sk)->gro_complete = cfg->gro_complete;
72 74
73 udp_tunnel_encap_enable(sock); 75 udp_tunnel_encap_enable(sock);
74} 76}
diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile
index 2fbd90bf8d33..5e9d6bf4aaca 100644
--- a/net/ipv6/Makefile
+++ b/net/ipv6/Makefile
@@ -8,9 +8,10 @@ ipv6-objs := af_inet6.o anycast.o ip6_output.o ip6_input.o addrconf.o \
8 addrlabel.o \ 8 addrlabel.o \
9 route.o ip6_fib.o ipv6_sockglue.o ndisc.o udp.o udplite.o \ 9 route.o ip6_fib.o ipv6_sockglue.o ndisc.o udp.o udplite.o \
10 raw.o icmp.o mcast.o reassembly.o tcp_ipv6.o ping.o \ 10 raw.o icmp.o mcast.o reassembly.o tcp_ipv6.o ping.o \
11 exthdrs.o datagram.o ip6_flowlabel.o inet6_connection_sock.o 11 exthdrs.o datagram.o ip6_flowlabel.o inet6_connection_sock.o \
12 udp_offload.o
12 13
13ipv6-offload := ip6_offload.o tcpv6_offload.o udp_offload.o exthdrs_offload.o 14ipv6-offload := ip6_offload.o tcpv6_offload.o exthdrs_offload.o
14 15
15ipv6-$(CONFIG_SYSCTL) = sysctl_net_ipv6.o 16ipv6-$(CONFIG_SYSCTL) = sysctl_net_ipv6.o
16ipv6-$(CONFIG_IPV6_MROUTE) += ip6mr.o 17ipv6-$(CONFIG_IPV6_MROUTE) += ip6mr.o
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 2b78aad0d52f..bfa86f040c16 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -64,6 +64,8 @@
64#include <asm/uaccess.h> 64#include <asm/uaccess.h>
65#include <linux/mroute6.h> 65#include <linux/mroute6.h>
66 66
67#include "ip6_offload.h"
68
67MODULE_AUTHOR("Cast of dozens"); 69MODULE_AUTHOR("Cast of dozens");
68MODULE_DESCRIPTION("IPv6 protocol stack for Linux"); 70MODULE_DESCRIPTION("IPv6 protocol stack for Linux");
69MODULE_LICENSE("GPL"); 71MODULE_LICENSE("GPL");
@@ -959,6 +961,10 @@ static int __init inet6_init(void)
959 if (err) 961 if (err)
960 goto udplitev6_fail; 962 goto udplitev6_fail;
961 963
964 err = udpv6_offload_init();
965 if (err)
966 goto udpv6_offload_fail;
967
962 err = tcpv6_init(); 968 err = tcpv6_init();
963 if (err) 969 if (err)
964 goto tcpv6_fail; 970 goto tcpv6_fail;
@@ -988,6 +994,8 @@ pingv6_fail:
988ipv6_packet_fail: 994ipv6_packet_fail:
989 tcpv6_exit(); 995 tcpv6_exit();
990tcpv6_fail: 996tcpv6_fail:
997 udpv6_offload_exit();
998udpv6_offload_fail:
991 udplitev6_exit(); 999 udplitev6_exit();
992udplitev6_fail: 1000udplitev6_fail:
993 udpv6_exit(); 1001 udpv6_exit();
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 82e9f3076028..204af2219471 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -325,8 +325,6 @@ static int __init ipv6_offload_init(void)
325 325
326 if (tcpv6_offload_init() < 0) 326 if (tcpv6_offload_init() < 0)
327 pr_crit("%s: Cannot add TCP protocol offload\n", __func__); 327 pr_crit("%s: Cannot add TCP protocol offload\n", __func__);
328 if (udp_offload_init() < 0)
329 pr_crit("%s: Cannot add UDP protocol offload\n", __func__);
330 if (ipv6_exthdrs_offload_init() < 0) 328 if (ipv6_exthdrs_offload_init() < 0)
331 pr_crit("%s: Cannot add EXTHDRS protocol offload\n", __func__); 329 pr_crit("%s: Cannot add EXTHDRS protocol offload\n", __func__);
332 330
diff --git a/net/ipv6/ip6_offload.h b/net/ipv6/ip6_offload.h
index 2e155c651b35..96b40e41ac53 100644
--- a/net/ipv6/ip6_offload.h
+++ b/net/ipv6/ip6_offload.h
@@ -12,7 +12,8 @@
12#define __ip6_offload_h 12#define __ip6_offload_h
13 13
14int ipv6_exthdrs_offload_init(void); 14int ipv6_exthdrs_offload_init(void);
15int udp_offload_init(void); 15int udpv6_offload_init(void);
16int udpv6_offload_exit(void);
16int tcpv6_offload_init(void); 17int tcpv6_offload_init(void);
17 18
18#endif 19#endif
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 87bd7aff88b4..a050b70b9101 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -326,6 +326,19 @@ static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
326 udptable, skb); 326 udptable, skb);
327} 327}
328 328
329struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
330 __be16 sport, __be16 dport)
331{
332 const struct ipv6hdr *iph = ipv6_hdr(skb);
333 const struct net_device *dev =
334 skb_dst(skb) ? skb_dst(skb)->dev : skb->dev;
335
336 return __udp6_lib_lookup(dev_net(dev), &iph->saddr, sport,
337 &iph->daddr, dport, inet6_iif(skb),
338 &udp_table, skb);
339}
340EXPORT_SYMBOL_GPL(udp6_lib_lookup_skb);
341
329/* Must be called under rcu_read_lock(). 342/* Must be called under rcu_read_lock().
330 * Does increment socket refcount. 343 * Does increment socket refcount.
331 */ 344 */
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index 2b0fbe6929e8..5429f6bcf047 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -153,7 +153,7 @@ static struct sk_buff **udp6_gro_receive(struct sk_buff **head,
153 153
154skip: 154skip:
155 NAPI_GRO_CB(skb)->is_ipv6 = 1; 155 NAPI_GRO_CB(skb)->is_ipv6 = 1;
156 return udp_gro_receive(head, skb, uh); 156 return udp_gro_receive(head, skb, uh, udp6_lib_lookup_skb);
157 157
158flush: 158flush:
159 NAPI_GRO_CB(skb)->flush = 1; 159 NAPI_GRO_CB(skb)->flush = 1;
@@ -173,7 +173,7 @@ static int udp6_gro_complete(struct sk_buff *skb, int nhoff)
173 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; 173 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
174 } 174 }
175 175
176 return udp_gro_complete(skb, nhoff); 176 return udp_gro_complete(skb, nhoff, udp6_lib_lookup_skb);
177} 177}
178 178
179static const struct net_offload udpv6_offload = { 179static const struct net_offload udpv6_offload = {
@@ -184,7 +184,12 @@ static const struct net_offload udpv6_offload = {
184 }, 184 },
185}; 185};
186 186
187int __init udp_offload_init(void) 187int udpv6_offload_init(void)
188{ 188{
189 return inet6_add_offload(&udpv6_offload, IPPROTO_UDP); 189 return inet6_add_offload(&udpv6_offload, IPPROTO_UDP);
190} 190}
191
192int udpv6_offload_exit(void)
193{
194 return inet6_del_offload(&udpv6_offload, IPPROTO_UDP);
195}