aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/netdevice.h10
-rw-r--r--include/net/protocol.h10
-rw-r--r--net/core/dev.c14
-rw-r--r--net/ipv4/af_inet.c42
-rw-r--r--net/ipv6/ip6_offload.c28
-rw-r--r--net/ipv6/tcpv6_offload.c10
-rw-r--r--net/ipv6/udp_offload.c6
7 files changed, 66 insertions, 54 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 61bc8483031f..e46c830c88d8 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1515,15 +1515,19 @@ struct packet_type {
1515 struct list_head list; 1515 struct list_head list;
1516}; 1516};
1517 1517
1518struct packet_offload { 1518struct offload_callbacks {
1519 __be16 type; /* This is really htons(ether_type). */
1520 struct sk_buff *(*gso_segment)(struct sk_buff *skb, 1519 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
1521 netdev_features_t features); 1520 netdev_features_t features);
1522 int (*gso_send_check)(struct sk_buff *skb); 1521 int (*gso_send_check)(struct sk_buff *skb);
1523 struct sk_buff **(*gro_receive)(struct sk_buff **head, 1522 struct sk_buff **(*gro_receive)(struct sk_buff **head,
1524 struct sk_buff *skb); 1523 struct sk_buff *skb);
1525 int (*gro_complete)(struct sk_buff *skb); 1524 int (*gro_complete)(struct sk_buff *skb);
1526 struct list_head list; 1525};
1526
1527struct packet_offload {
1528 __be16 type; /* This is really htons(ether_type). */
1529 struct offload_callbacks callbacks;
1530 struct list_head list;
1527}; 1531};
1528 1532
1529#include <linux/notifier.h> 1533#include <linux/notifier.h>
diff --git a/include/net/protocol.h b/include/net/protocol.h
index 2c90794c139d..047c0476c0a0 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -29,6 +29,7 @@
29#if IS_ENABLED(CONFIG_IPV6) 29#if IS_ENABLED(CONFIG_IPV6)
30#include <linux/ipv6.h> 30#include <linux/ipv6.h>
31#endif 31#endif
32#include <linux/netdevice.h>
32 33
33/* This is one larger than the largest protocol value that can be 34/* This is one larger than the largest protocol value that can be
34 * found in an ipv4 or ipv6 header. Since in both cases the protocol 35 * found in an ipv4 or ipv6 header. Since in both cases the protocol
@@ -63,13 +64,8 @@ struct inet6_protocol {
63#endif 64#endif
64 65
65struct net_offload { 66struct net_offload {
66 int (*gso_send_check)(struct sk_buff *skb); 67 struct offload_callbacks callbacks;
67 struct sk_buff *(*gso_segment)(struct sk_buff *skb, 68 unsigned int flags; /* Flags used by IPv6 for now */
68 netdev_features_t features);
69 struct sk_buff **(*gro_receive)(struct sk_buff **head,
70 struct sk_buff *skb);
71 int (*gro_complete)(struct sk_buff *skb);
72 unsigned int flags; /* Flags used by IPv6 for now */
73}; 69};
74/* This should be set for any extension header which is compatible with GSO. */ 70/* This should be set for any extension header which is compatible with GSO. */
75#define INET6_PROTO_GSO_EXTHDR 0x1 71#define INET6_PROTO_GSO_EXTHDR 0x1
diff --git a/net/core/dev.c b/net/core/dev.c
index cf843a256cc6..cf105e886cca 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2102,16 +2102,16 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb,
2102 2102
2103 rcu_read_lock(); 2103 rcu_read_lock();
2104 list_for_each_entry_rcu(ptype, &offload_base, list) { 2104 list_for_each_entry_rcu(ptype, &offload_base, list) {
2105 if (ptype->type == type && ptype->gso_segment) { 2105 if (ptype->type == type && ptype->callbacks.gso_segment) {
2106 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { 2106 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
2107 err = ptype->gso_send_check(skb); 2107 err = ptype->callbacks.gso_send_check(skb);
2108 segs = ERR_PTR(err); 2108 segs = ERR_PTR(err);
2109 if (err || skb_gso_ok(skb, features)) 2109 if (err || skb_gso_ok(skb, features))
2110 break; 2110 break;
2111 __skb_push(skb, (skb->data - 2111 __skb_push(skb, (skb->data -
2112 skb_network_header(skb))); 2112 skb_network_header(skb)));
2113 } 2113 }
2114 segs = ptype->gso_segment(skb, features); 2114 segs = ptype->callbacks.gso_segment(skb, features);
2115 break; 2115 break;
2116 } 2116 }
2117 } 2117 }
@@ -3533,10 +3533,10 @@ static int napi_gro_complete(struct sk_buff *skb)
3533 3533
3534 rcu_read_lock(); 3534 rcu_read_lock();
3535 list_for_each_entry_rcu(ptype, head, list) { 3535 list_for_each_entry_rcu(ptype, head, list) {
3536 if (ptype->type != type || !ptype->gro_complete) 3536 if (ptype->type != type || !ptype->callbacks.gro_complete)
3537 continue; 3537 continue;
3538 3538
3539 err = ptype->gro_complete(skb); 3539 err = ptype->callbacks.gro_complete(skb);
3540 break; 3540 break;
3541 } 3541 }
3542 rcu_read_unlock(); 3542 rcu_read_unlock();
@@ -3598,7 +3598,7 @@ enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3598 3598
3599 rcu_read_lock(); 3599 rcu_read_lock();
3600 list_for_each_entry_rcu(ptype, head, list) { 3600 list_for_each_entry_rcu(ptype, head, list) {
3601 if (ptype->type != type || !ptype->gro_receive) 3601 if (ptype->type != type || !ptype->callbacks.gro_receive)
3602 continue; 3602 continue;
3603 3603
3604 skb_set_network_header(skb, skb_gro_offset(skb)); 3604 skb_set_network_header(skb, skb_gro_offset(skb));
@@ -3608,7 +3608,7 @@ enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3608 NAPI_GRO_CB(skb)->flush = 0; 3608 NAPI_GRO_CB(skb)->flush = 0;
3609 NAPI_GRO_CB(skb)->free = 0; 3609 NAPI_GRO_CB(skb)->free = 0;
3610 3610
3611 pp = ptype->gro_receive(&napi->gro_list, skb); 3611 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
3612 break; 3612 break;
3613 } 3613 }
3614 rcu_read_unlock(); 3614 rcu_read_unlock();
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 9f2e7fd8bea8..d5e5a054123c 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1276,8 +1276,8 @@ static int inet_gso_send_check(struct sk_buff *skb)
1276 1276
1277 rcu_read_lock(); 1277 rcu_read_lock();
1278 ops = rcu_dereference(inet_offloads[proto]); 1278 ops = rcu_dereference(inet_offloads[proto]);
1279 if (likely(ops && ops->gso_send_check)) 1279 if (likely(ops && ops->callbacks.gso_send_check))
1280 err = ops->gso_send_check(skb); 1280 err = ops->callbacks.gso_send_check(skb);
1281 rcu_read_unlock(); 1281 rcu_read_unlock();
1282 1282
1283out: 1283out:
@@ -1326,8 +1326,8 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
1326 1326
1327 rcu_read_lock(); 1327 rcu_read_lock();
1328 ops = rcu_dereference(inet_offloads[proto]); 1328 ops = rcu_dereference(inet_offloads[proto]);
1329 if (likely(ops && ops->gso_segment)) 1329 if (likely(ops && ops->callbacks.gso_segment))
1330 segs = ops->gso_segment(skb, features); 1330 segs = ops->callbacks.gso_segment(skb, features);
1331 rcu_read_unlock(); 1331 rcu_read_unlock();
1332 1332
1333 if (!segs || IS_ERR(segs)) 1333 if (!segs || IS_ERR(segs))
@@ -1379,7 +1379,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
1379 1379
1380 rcu_read_lock(); 1380 rcu_read_lock();
1381 ops = rcu_dereference(inet_offloads[proto]); 1381 ops = rcu_dereference(inet_offloads[proto]);
1382 if (!ops || !ops->gro_receive) 1382 if (!ops || !ops->callbacks.gro_receive)
1383 goto out_unlock; 1383 goto out_unlock;
1384 1384
1385 if (*(u8 *)iph != 0x45) 1385 if (*(u8 *)iph != 0x45)
@@ -1420,7 +1420,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
1420 skb_gro_pull(skb, sizeof(*iph)); 1420 skb_gro_pull(skb, sizeof(*iph));
1421 skb_set_transport_header(skb, skb_gro_offset(skb)); 1421 skb_set_transport_header(skb, skb_gro_offset(skb));
1422 1422
1423 pp = ops->gro_receive(head, skb); 1423 pp = ops->callbacks.gro_receive(head, skb);
1424 1424
1425out_unlock: 1425out_unlock:
1426 rcu_read_unlock(); 1426 rcu_read_unlock();
@@ -1444,10 +1444,10 @@ static int inet_gro_complete(struct sk_buff *skb)
1444 1444
1445 rcu_read_lock(); 1445 rcu_read_lock();
1446 ops = rcu_dereference(inet_offloads[proto]); 1446 ops = rcu_dereference(inet_offloads[proto]);
1447 if (WARN_ON(!ops || !ops->gro_complete)) 1447 if (WARN_ON(!ops || !ops->callbacks.gro_complete))
1448 goto out_unlock; 1448 goto out_unlock;
1449 1449
1450 err = ops->gro_complete(skb); 1450 err = ops->callbacks.gro_complete(skb);
1451 1451
1452out_unlock: 1452out_unlock:
1453 rcu_read_unlock(); 1453 rcu_read_unlock();
@@ -1563,10 +1563,12 @@ static const struct net_protocol tcp_protocol = {
1563}; 1563};
1564 1564
1565static const struct net_offload tcp_offload = { 1565static const struct net_offload tcp_offload = {
1566 .gso_send_check = tcp_v4_gso_send_check, 1566 .callbacks = {
1567 .gso_segment = tcp_tso_segment, 1567 .gso_send_check = tcp_v4_gso_send_check,
1568 .gro_receive = tcp4_gro_receive, 1568 .gso_segment = tcp_tso_segment,
1569 .gro_complete = tcp4_gro_complete, 1569 .gro_receive = tcp4_gro_receive,
1570 .gro_complete = tcp4_gro_complete,
1571 },
1570}; 1572};
1571 1573
1572static const struct net_protocol udp_protocol = { 1574static const struct net_protocol udp_protocol = {
@@ -1577,8 +1579,10 @@ static const struct net_protocol udp_protocol = {
1577}; 1579};
1578 1580
1579static const struct net_offload udp_offload = { 1581static const struct net_offload udp_offload = {
1580 .gso_send_check = udp4_ufo_send_check, 1582 .callbacks = {
1581 .gso_segment = udp4_ufo_fragment, 1583 .gso_send_check = udp4_ufo_send_check,
1584 .gso_segment = udp4_ufo_fragment,
1585 },
1582}; 1586};
1583 1587
1584static const struct net_protocol icmp_protocol = { 1588static const struct net_protocol icmp_protocol = {
@@ -1667,10 +1671,12 @@ static int ipv4_proc_init(void);
1667 1671
1668static struct packet_offload ip_packet_offload __read_mostly = { 1672static struct packet_offload ip_packet_offload __read_mostly = {
1669 .type = cpu_to_be16(ETH_P_IP), 1673 .type = cpu_to_be16(ETH_P_IP),
1670 .gso_send_check = inet_gso_send_check, 1674 .callbacks = {
1671 .gso_segment = inet_gso_segment, 1675 .gso_send_check = inet_gso_send_check,
1672 .gro_receive = inet_gro_receive, 1676 .gso_segment = inet_gso_segment,
1673 .gro_complete = inet_gro_complete, 1677 .gro_receive = inet_gro_receive,
1678 .gro_complete = inet_gro_complete,
1679 },
1674}; 1680};
1675 1681
1676static int __init ipv4_offload_init(void) 1682static int __init ipv4_offload_init(void)
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 63d79d9005bd..f26f0da7f095 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -70,9 +70,9 @@ static int ipv6_gso_send_check(struct sk_buff *skb)
70 ops = rcu_dereference(inet6_offloads[ 70 ops = rcu_dereference(inet6_offloads[
71 ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr)]); 71 ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr)]);
72 72
73 if (likely(ops && ops->gso_send_check)) { 73 if (likely(ops && ops->callbacks.gso_send_check)) {
74 skb_reset_transport_header(skb); 74 skb_reset_transport_header(skb);
75 err = ops->gso_send_check(skb); 75 err = ops->callbacks.gso_send_check(skb);
76 } 76 }
77 rcu_read_unlock(); 77 rcu_read_unlock();
78 78
@@ -113,9 +113,9 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
113 proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); 113 proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
114 rcu_read_lock(); 114 rcu_read_lock();
115 ops = rcu_dereference(inet6_offloads[proto]); 115 ops = rcu_dereference(inet6_offloads[proto]);
116 if (likely(ops && ops->gso_segment)) { 116 if (likely(ops && ops->callbacks.gso_segment)) {
117 skb_reset_transport_header(skb); 117 skb_reset_transport_header(skb);
118 segs = ops->gso_segment(skb, features); 118 segs = ops->callbacks.gso_segment(skb, features);
119 } 119 }
120 rcu_read_unlock(); 120 rcu_read_unlock();
121 121
@@ -173,7 +173,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
173 rcu_read_lock(); 173 rcu_read_lock();
174 proto = iph->nexthdr; 174 proto = iph->nexthdr;
175 ops = rcu_dereference(inet6_offloads[proto]); 175 ops = rcu_dereference(inet6_offloads[proto]);
176 if (!ops || !ops->gro_receive) { 176 if (!ops || !ops->callbacks.gro_receive) {
177 __pskb_pull(skb, skb_gro_offset(skb)); 177 __pskb_pull(skb, skb_gro_offset(skb));
178 proto = ipv6_gso_pull_exthdrs(skb, proto); 178 proto = ipv6_gso_pull_exthdrs(skb, proto);
179 skb_gro_pull(skb, -skb_transport_offset(skb)); 179 skb_gro_pull(skb, -skb_transport_offset(skb));
@@ -181,7 +181,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
181 __skb_push(skb, skb_gro_offset(skb)); 181 __skb_push(skb, skb_gro_offset(skb));
182 182
183 ops = rcu_dereference(inet6_offloads[proto]); 183 ops = rcu_dereference(inet6_offloads[proto]);
184 if (!ops || !ops->gro_receive) 184 if (!ops || !ops->callbacks.gro_receive)
185 goto out_unlock; 185 goto out_unlock;
186 186
187 iph = ipv6_hdr(skb); 187 iph = ipv6_hdr(skb);
@@ -220,7 +220,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
220 csum = skb->csum; 220 csum = skb->csum;
221 skb_postpull_rcsum(skb, iph, skb_network_header_len(skb)); 221 skb_postpull_rcsum(skb, iph, skb_network_header_len(skb));
222 222
223 pp = ops->gro_receive(head, skb); 223 pp = ops->callbacks.gro_receive(head, skb);
224 224
225 skb->csum = csum; 225 skb->csum = csum;
226 226
@@ -244,10 +244,10 @@ static int ipv6_gro_complete(struct sk_buff *skb)
244 244
245 rcu_read_lock(); 245 rcu_read_lock();
246 ops = rcu_dereference(inet6_offloads[NAPI_GRO_CB(skb)->proto]); 246 ops = rcu_dereference(inet6_offloads[NAPI_GRO_CB(skb)->proto]);
247 if (WARN_ON(!ops || !ops->gro_complete)) 247 if (WARN_ON(!ops || !ops->callbacks.gro_complete))
248 goto out_unlock; 248 goto out_unlock;
249 249
250 err = ops->gro_complete(skb); 250 err = ops->callbacks.gro_complete(skb);
251 251
252out_unlock: 252out_unlock:
253 rcu_read_unlock(); 253 rcu_read_unlock();
@@ -257,10 +257,12 @@ out_unlock:
257 257
258static struct packet_offload ipv6_packet_offload __read_mostly = { 258static struct packet_offload ipv6_packet_offload __read_mostly = {
259 .type = cpu_to_be16(ETH_P_IPV6), 259 .type = cpu_to_be16(ETH_P_IPV6),
260 .gso_send_check = ipv6_gso_send_check, 260 .callbacks = {
261 .gso_segment = ipv6_gso_segment, 261 .gso_send_check = ipv6_gso_send_check,
262 .gro_receive = ipv6_gro_receive, 262 .gso_segment = ipv6_gso_segment,
263 .gro_complete = ipv6_gro_complete, 263 .gro_receive = ipv6_gro_receive,
264 .gro_complete = ipv6_gro_complete,
265 },
264}; 266};
265 267
266static int __init ipv6_offload_init(void) 268static int __init ipv6_offload_init(void)
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
index 3a27fe685c8e..2ec6bf6a0aa0 100644
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -81,10 +81,12 @@ static int tcp6_gro_complete(struct sk_buff *skb)
81} 81}
82 82
83static const struct net_offload tcpv6_offload = { 83static const struct net_offload tcpv6_offload = {
84 .gso_send_check = tcp_v6_gso_send_check, 84 .callbacks = {
85 .gso_segment = tcp_tso_segment, 85 .gso_send_check = tcp_v6_gso_send_check,
86 .gro_receive = tcp6_gro_receive, 86 .gso_segment = tcp_tso_segment,
87 .gro_complete = tcp6_gro_complete, 87 .gro_receive = tcp6_gro_receive,
88 .gro_complete = tcp6_gro_complete,
89 },
88}; 90};
89 91
90int __init tcpv6_offload_init(void) 92int __init tcpv6_offload_init(void)
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index 979e4ab63a8b..8e01c44a987c 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -107,8 +107,10 @@ out:
107 return segs; 107 return segs;
108} 108}
109static const struct net_offload udpv6_offload = { 109static const struct net_offload udpv6_offload = {
110 .gso_send_check = udp6_ufo_send_check, 110 .callbacks = {
111 .gso_segment = udp6_ufo_fragment, 111 .gso_send_check = udp6_ufo_send_check,
112 .gso_segment = udp6_ufo_fragment,
113 },
112}; 114};
113 115
114int __init udp_offload_init(void) 116int __init udp_offload_init(void)