aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/xen-netback/netback.c260
-rw-r--r--drivers/net/xen-netfront.c48
-rw-r--r--include/linux/skbuff.h2
-rw-r--r--net/core/skbuff.c273
4 files changed, 281 insertions, 302 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 27385639b6e5..6b62c3eb8e18 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -39,7 +39,6 @@
39#include <linux/udp.h> 39#include <linux/udp.h>
40 40
41#include <net/tcp.h> 41#include <net/tcp.h>
42#include <net/ip6_checksum.h>
43 42
44#include <xen/xen.h> 43#include <xen/xen.h>
45#include <xen/events.h> 44#include <xen/events.h>
@@ -1051,257 +1050,9 @@ static int xenvif_set_skb_gso(struct xenvif *vif,
1051 return 0; 1050 return 0;
1052} 1051}
1053 1052
1054static inline int maybe_pull_tail(struct sk_buff *skb, unsigned int len,
1055 unsigned int max)
1056{
1057 if (skb_headlen(skb) >= len)
1058 return 0;
1059
1060 /* If we need to pullup then pullup to the max, so we
1061 * won't need to do it again.
1062 */
1063 if (max > skb->len)
1064 max = skb->len;
1065
1066 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL)
1067 return -ENOMEM;
1068
1069 if (skb_headlen(skb) < len)
1070 return -EPROTO;
1071
1072 return 0;
1073}
1074
1075/* This value should be large enough to cover a tagged ethernet header plus
1076 * maximally sized IP and TCP or UDP headers.
1077 */
1078#define MAX_IP_HDR_LEN 128
1079
1080static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb,
1081 int recalculate_partial_csum)
1082{
1083 unsigned int off;
1084 bool fragment;
1085 int err;
1086
1087 fragment = false;
1088
1089 err = maybe_pull_tail(skb,
1090 sizeof(struct iphdr),
1091 MAX_IP_HDR_LEN);
1092 if (err < 0)
1093 goto out;
1094
1095 if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF))
1096 fragment = true;
1097
1098 off = ip_hdrlen(skb);
1099
1100 err = -EPROTO;
1101
1102 if (fragment)
1103 goto out;
1104
1105 switch (ip_hdr(skb)->protocol) {
1106 case IPPROTO_TCP:
1107 err = maybe_pull_tail(skb,
1108 off + sizeof(struct tcphdr),
1109 MAX_IP_HDR_LEN);
1110 if (err < 0)
1111 goto out;
1112
1113 if (!skb_partial_csum_set(skb, off,
1114 offsetof(struct tcphdr, check))) {
1115 err = -EPROTO;
1116 goto out;
1117 }
1118
1119 if (recalculate_partial_csum)
1120 tcp_hdr(skb)->check =
1121 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
1122 ip_hdr(skb)->daddr,
1123 skb->len - off,
1124 IPPROTO_TCP, 0);
1125 break;
1126 case IPPROTO_UDP:
1127 err = maybe_pull_tail(skb,
1128 off + sizeof(struct udphdr),
1129 MAX_IP_HDR_LEN);
1130 if (err < 0)
1131 goto out;
1132
1133 if (!skb_partial_csum_set(skb, off,
1134 offsetof(struct udphdr, check))) {
1135 err = -EPROTO;
1136 goto out;
1137 }
1138
1139 if (recalculate_partial_csum)
1140 udp_hdr(skb)->check =
1141 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
1142 ip_hdr(skb)->daddr,
1143 skb->len - off,
1144 IPPROTO_UDP, 0);
1145 break;
1146 default:
1147 goto out;
1148 }
1149
1150 err = 0;
1151
1152out:
1153 return err;
1154}
1155
1156/* This value should be large enough to cover a tagged ethernet header plus
1157 * an IPv6 header, all options, and a maximal TCP or UDP header.
1158 */
1159#define MAX_IPV6_HDR_LEN 256
1160
1161#define OPT_HDR(type, skb, off) \
1162 (type *)(skb_network_header(skb) + (off))
1163
1164static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb,
1165 int recalculate_partial_csum)
1166{
1167 int err;
1168 u8 nexthdr;
1169 unsigned int off;
1170 unsigned int len;
1171 bool fragment;
1172 bool done;
1173
1174 fragment = false;
1175 done = false;
1176
1177 off = sizeof(struct ipv6hdr);
1178
1179 err = maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN);
1180 if (err < 0)
1181 goto out;
1182
1183 nexthdr = ipv6_hdr(skb)->nexthdr;
1184
1185 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
1186 while (off <= len && !done) {
1187 switch (nexthdr) {
1188 case IPPROTO_DSTOPTS:
1189 case IPPROTO_HOPOPTS:
1190 case IPPROTO_ROUTING: {
1191 struct ipv6_opt_hdr *hp;
1192
1193 err = maybe_pull_tail(skb,
1194 off +
1195 sizeof(struct ipv6_opt_hdr),
1196 MAX_IPV6_HDR_LEN);
1197 if (err < 0)
1198 goto out;
1199
1200 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
1201 nexthdr = hp->nexthdr;
1202 off += ipv6_optlen(hp);
1203 break;
1204 }
1205 case IPPROTO_AH: {
1206 struct ip_auth_hdr *hp;
1207
1208 err = maybe_pull_tail(skb,
1209 off +
1210 sizeof(struct ip_auth_hdr),
1211 MAX_IPV6_HDR_LEN);
1212 if (err < 0)
1213 goto out;
1214
1215 hp = OPT_HDR(struct ip_auth_hdr, skb, off);
1216 nexthdr = hp->nexthdr;
1217 off += ipv6_authlen(hp);
1218 break;
1219 }
1220 case IPPROTO_FRAGMENT: {
1221 struct frag_hdr *hp;
1222
1223 err = maybe_pull_tail(skb,
1224 off +
1225 sizeof(struct frag_hdr),
1226 MAX_IPV6_HDR_LEN);
1227 if (err < 0)
1228 goto out;
1229
1230 hp = OPT_HDR(struct frag_hdr, skb, off);
1231
1232 if (hp->frag_off & htons(IP6_OFFSET | IP6_MF))
1233 fragment = true;
1234
1235 nexthdr = hp->nexthdr;
1236 off += sizeof(struct frag_hdr);
1237 break;
1238 }
1239 default:
1240 done = true;
1241 break;
1242 }
1243 }
1244
1245 err = -EPROTO;
1246
1247 if (!done || fragment)
1248 goto out;
1249
1250 switch (nexthdr) {
1251 case IPPROTO_TCP:
1252 err = maybe_pull_tail(skb,
1253 off + sizeof(struct tcphdr),
1254 MAX_IPV6_HDR_LEN);
1255 if (err < 0)
1256 goto out;
1257
1258 if (!skb_partial_csum_set(skb, off,
1259 offsetof(struct tcphdr, check))) {
1260 err = -EPROTO;
1261 goto out;
1262 }
1263
1264 if (recalculate_partial_csum)
1265 tcp_hdr(skb)->check =
1266 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1267 &ipv6_hdr(skb)->daddr,
1268 skb->len - off,
1269 IPPROTO_TCP, 0);
1270 break;
1271 case IPPROTO_UDP:
1272 err = maybe_pull_tail(skb,
1273 off + sizeof(struct udphdr),
1274 MAX_IPV6_HDR_LEN);
1275 if (err < 0)
1276 goto out;
1277
1278 if (!skb_partial_csum_set(skb, off,
1279 offsetof(struct udphdr, check))) {
1280 err = -EPROTO;
1281 goto out;
1282 }
1283
1284 if (recalculate_partial_csum)
1285 udp_hdr(skb)->check =
1286 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1287 &ipv6_hdr(skb)->daddr,
1288 skb->len - off,
1289 IPPROTO_UDP, 0);
1290 break;
1291 default:
1292 goto out;
1293 }
1294
1295 err = 0;
1296
1297out:
1298 return err;
1299}
1300
1301static int checksum_setup(struct xenvif *vif, struct sk_buff *skb) 1053static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
1302{ 1054{
1303 int err = -EPROTO; 1055 bool recalculate_partial_csum = false;
1304 int recalculate_partial_csum = 0;
1305 1056
1306 /* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy 1057 /* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1307 * peers can fail to set NETRXF_csum_blank when sending a GSO 1058 * peers can fail to set NETRXF_csum_blank when sending a GSO
@@ -1311,19 +1062,14 @@ static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
1311 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) { 1062 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1312 vif->rx_gso_checksum_fixup++; 1063 vif->rx_gso_checksum_fixup++;
1313 skb->ip_summed = CHECKSUM_PARTIAL; 1064 skb->ip_summed = CHECKSUM_PARTIAL;
1314 recalculate_partial_csum = 1; 1065 recalculate_partial_csum = true;
1315 } 1066 }
1316 1067
1317 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */ 1068 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1318 if (skb->ip_summed != CHECKSUM_PARTIAL) 1069 if (skb->ip_summed != CHECKSUM_PARTIAL)
1319 return 0; 1070 return 0;
1320 1071
1321 if (skb->protocol == htons(ETH_P_IP)) 1072 return skb_checksum_setup(skb, recalculate_partial_csum);
1322 err = checksum_setup_ip(vif, skb, recalculate_partial_csum);
1323 else if (skb->protocol == htons(ETH_P_IPV6))
1324 err = checksum_setup_ipv6(vif, skb, recalculate_partial_csum);
1325
1326 return err;
1327} 1073}
1328 1074
1329static bool tx_credit_exceeded(struct xenvif *vif, unsigned size) 1075static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index e59acb1daa23..c41537b577a4 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -859,9 +859,7 @@ static RING_IDX xennet_fill_frags(struct netfront_info *np,
859 859
860static int checksum_setup(struct net_device *dev, struct sk_buff *skb) 860static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
861{ 861{
862 struct iphdr *iph; 862 bool recalculate_partial_csum = false;
863 int err = -EPROTO;
864 int recalculate_partial_csum = 0;
865 863
866 /* 864 /*
867 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy 865 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
@@ -873,54 +871,14 @@ static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
873 struct netfront_info *np = netdev_priv(dev); 871 struct netfront_info *np = netdev_priv(dev);
874 np->rx_gso_checksum_fixup++; 872 np->rx_gso_checksum_fixup++;
875 skb->ip_summed = CHECKSUM_PARTIAL; 873 skb->ip_summed = CHECKSUM_PARTIAL;
876 recalculate_partial_csum = 1; 874 recalculate_partial_csum = true;
877 } 875 }
878 876
879 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */ 877 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
880 if (skb->ip_summed != CHECKSUM_PARTIAL) 878 if (skb->ip_summed != CHECKSUM_PARTIAL)
881 return 0; 879 return 0;
882 880
883 if (skb->protocol != htons(ETH_P_IP)) 881 return skb_checksum_setup(skb, recalculate_partial_csum);
884 goto out;
885
886 iph = (void *)skb->data;
887
888 switch (iph->protocol) {
889 case IPPROTO_TCP:
890 if (!skb_partial_csum_set(skb, 4 * iph->ihl,
891 offsetof(struct tcphdr, check)))
892 goto out;
893
894 if (recalculate_partial_csum) {
895 struct tcphdr *tcph = tcp_hdr(skb);
896 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
897 skb->len - iph->ihl*4,
898 IPPROTO_TCP, 0);
899 }
900 break;
901 case IPPROTO_UDP:
902 if (!skb_partial_csum_set(skb, 4 * iph->ihl,
903 offsetof(struct udphdr, check)))
904 goto out;
905
906 if (recalculate_partial_csum) {
907 struct udphdr *udph = udp_hdr(skb);
908 udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
909 skb->len - iph->ihl*4,
910 IPPROTO_UDP, 0);
911 }
912 break;
913 default:
914 if (net_ratelimit())
915 pr_err("Attempting to checksum a non-TCP/UDP packet, dropping a protocol %d packet\n",
916 iph->protocol);
917 goto out;
918 }
919
920 err = 0;
921
922out:
923 return err;
924} 882}
925 883
926static int handle_incoming_queue(struct net_device *dev, 884static int handle_incoming_queue(struct net_device *dev,
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index d97f2d07d02b..48b760505cb6 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2893,6 +2893,8 @@ static inline void skb_checksum_none_assert(const struct sk_buff *skb)
2893 2893
2894bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); 2894bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
2895 2895
2896int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
2897
2896u32 __skb_get_poff(const struct sk_buff *skb); 2898u32 __skb_get_poff(const struct sk_buff *skb);
2897 2899
2898/** 2900/**
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 1d641e781f85..15057d29b010 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -65,6 +65,7 @@
65#include <net/dst.h> 65#include <net/dst.h>
66#include <net/sock.h> 66#include <net/sock.h>
67#include <net/checksum.h> 67#include <net/checksum.h>
68#include <net/ip6_checksum.h>
68#include <net/xfrm.h> 69#include <net/xfrm.h>
69 70
70#include <asm/uaccess.h> 71#include <asm/uaccess.h>
@@ -3549,6 +3550,278 @@ bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
3549} 3550}
3550EXPORT_SYMBOL_GPL(skb_partial_csum_set); 3551EXPORT_SYMBOL_GPL(skb_partial_csum_set);
3551 3552
3553static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len,
3554 unsigned int max)
3555{
3556 if (skb_headlen(skb) >= len)
3557 return 0;
3558
3559 /* If we need to pullup then pullup to the max, so we
3560 * won't need to do it again.
3561 */
3562 if (max > skb->len)
3563 max = skb->len;
3564
3565 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL)
3566 return -ENOMEM;
3567
3568 if (skb_headlen(skb) < len)
3569 return -EPROTO;
3570
3571 return 0;
3572}
3573
3574/* This value should be large enough to cover a tagged ethernet header plus
3575 * maximally sized IP and TCP or UDP headers.
3576 */
3577#define MAX_IP_HDR_LEN 128
3578
3579static int skb_checksum_setup_ip(struct sk_buff *skb, bool recalculate)
3580{
3581 unsigned int off;
3582 bool fragment;
3583 int err;
3584
3585 fragment = false;
3586
3587 err = skb_maybe_pull_tail(skb,
3588 sizeof(struct iphdr),
3589 MAX_IP_HDR_LEN);
3590 if (err < 0)
3591 goto out;
3592
3593 if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF))
3594 fragment = true;
3595
3596 off = ip_hdrlen(skb);
3597
3598 err = -EPROTO;
3599
3600 if (fragment)
3601 goto out;
3602
3603 switch (ip_hdr(skb)->protocol) {
3604 case IPPROTO_TCP:
3605 err = skb_maybe_pull_tail(skb,
3606 off + sizeof(struct tcphdr),
3607 MAX_IP_HDR_LEN);
3608 if (err < 0)
3609 goto out;
3610
3611 if (!skb_partial_csum_set(skb, off,
3612 offsetof(struct tcphdr, check))) {
3613 err = -EPROTO;
3614 goto out;
3615 }
3616
3617 if (recalculate)
3618 tcp_hdr(skb)->check =
3619 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3620 ip_hdr(skb)->daddr,
3621 skb->len - off,
3622 IPPROTO_TCP, 0);
3623 break;
3624 case IPPROTO_UDP:
3625 err = skb_maybe_pull_tail(skb,
3626 off + sizeof(struct udphdr),
3627 MAX_IP_HDR_LEN);
3628 if (err < 0)
3629 goto out;
3630
3631 if (!skb_partial_csum_set(skb, off,
3632 offsetof(struct udphdr, check))) {
3633 err = -EPROTO;
3634 goto out;
3635 }
3636
3637 if (recalculate)
3638 udp_hdr(skb)->check =
3639 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3640 ip_hdr(skb)->daddr,
3641 skb->len - off,
3642 IPPROTO_UDP, 0);
3643 break;
3644 default:
3645 goto out;
3646 }
3647
3648 err = 0;
3649
3650out:
3651 return err;
3652}
3653
3654/* This value should be large enough to cover a tagged ethernet header plus
3655 * an IPv6 header, all options, and a maximal TCP or UDP header.
3656 */
3657#define MAX_IPV6_HDR_LEN 256
3658
3659#define OPT_HDR(type, skb, off) \
3660 (type *)(skb_network_header(skb) + (off))
3661
3662static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate)
3663{
3664 int err;
3665 u8 nexthdr;
3666 unsigned int off;
3667 unsigned int len;
3668 bool fragment;
3669 bool done;
3670
3671 fragment = false;
3672 done = false;
3673
3674 off = sizeof(struct ipv6hdr);
3675
3676 err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN);
3677 if (err < 0)
3678 goto out;
3679
3680 nexthdr = ipv6_hdr(skb)->nexthdr;
3681
3682 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
3683 while (off <= len && !done) {
3684 switch (nexthdr) {
3685 case IPPROTO_DSTOPTS:
3686 case IPPROTO_HOPOPTS:
3687 case IPPROTO_ROUTING: {
3688 struct ipv6_opt_hdr *hp;
3689
3690 err = skb_maybe_pull_tail(skb,
3691 off +
3692 sizeof(struct ipv6_opt_hdr),
3693 MAX_IPV6_HDR_LEN);
3694 if (err < 0)
3695 goto out;
3696
3697 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
3698 nexthdr = hp->nexthdr;
3699 off += ipv6_optlen(hp);
3700 break;
3701 }
3702 case IPPROTO_AH: {
3703 struct ip_auth_hdr *hp;
3704
3705 err = skb_maybe_pull_tail(skb,
3706 off +
3707 sizeof(struct ip_auth_hdr),
3708 MAX_IPV6_HDR_LEN);
3709 if (err < 0)
3710 goto out;
3711
3712 hp = OPT_HDR(struct ip_auth_hdr, skb, off);
3713 nexthdr = hp->nexthdr;
3714 off += ipv6_authlen(hp);
3715 break;
3716 }
3717 case IPPROTO_FRAGMENT: {
3718 struct frag_hdr *hp;
3719
3720 err = skb_maybe_pull_tail(skb,
3721 off +
3722 sizeof(struct frag_hdr),
3723 MAX_IPV6_HDR_LEN);
3724 if (err < 0)
3725 goto out;
3726
3727 hp = OPT_HDR(struct frag_hdr, skb, off);
3728
3729 if (hp->frag_off & htons(IP6_OFFSET | IP6_MF))
3730 fragment = true;
3731
3732 nexthdr = hp->nexthdr;
3733 off += sizeof(struct frag_hdr);
3734 break;
3735 }
3736 default:
3737 done = true;
3738 break;
3739 }
3740 }
3741
3742 err = -EPROTO;
3743
3744 if (!done || fragment)
3745 goto out;
3746
3747 switch (nexthdr) {
3748 case IPPROTO_TCP:
3749 err = skb_maybe_pull_tail(skb,
3750 off + sizeof(struct tcphdr),
3751 MAX_IPV6_HDR_LEN);
3752 if (err < 0)
3753 goto out;
3754
3755 if (!skb_partial_csum_set(skb, off,
3756 offsetof(struct tcphdr, check))) {
3757 err = -EPROTO;
3758 goto out;
3759 }
3760
3761 if (recalculate)
3762 tcp_hdr(skb)->check =
3763 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3764 &ipv6_hdr(skb)->daddr,
3765 skb->len - off,
3766 IPPROTO_TCP, 0);
3767 break;
3768 case IPPROTO_UDP:
3769 err = skb_maybe_pull_tail(skb,
3770 off + sizeof(struct udphdr),
3771 MAX_IPV6_HDR_LEN);
3772 if (err < 0)
3773 goto out;
3774
3775 if (!skb_partial_csum_set(skb, off,
3776 offsetof(struct udphdr, check))) {
3777 err = -EPROTO;
3778 goto out;
3779 }
3780
3781 if (recalculate)
3782 udp_hdr(skb)->check =
3783 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3784 &ipv6_hdr(skb)->daddr,
3785 skb->len - off,
3786 IPPROTO_UDP, 0);
3787 break;
3788 default:
3789 goto out;
3790 }
3791
3792 err = 0;
3793
3794out:
3795 return err;
3796}
3797
3798/**
3799 * skb_checksum_setup - set up partial checksum offset
3800 * @skb: the skb to set up
3801 * @recalculate: if true the pseudo-header checksum will be recalculated
3802 */
3803int skb_checksum_setup(struct sk_buff *skb, bool recalculate)
3804{
3805 int err;
3806
3807 switch (skb->protocol) {
3808 case htons(ETH_P_IP):
3809 err = skb_checksum_setup_ip(skb, recalculate);
3810 break;
3811
3812 case htons(ETH_P_IPV6):
3813 err = skb_checksum_setup_ipv6(skb, recalculate);
3814 break;
3815
3816 default:
3817 err = -EPROTO;
3818 break;
3819 }
3820
3821 return err;
3822}
3823EXPORT_SYMBOL(skb_checksum_setup);
3824
3552void __skb_warn_lro_forwarding(const struct sk_buff *skb) 3825void __skb_warn_lro_forwarding(const struct sk_buff *skb)
3553{ 3826{
3554 net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n", 3827 net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n",