aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-03-05 14:29:24 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-03-05 14:29:24 -0500
commit547046141f44dba075207fd343e3e032e129c9ac (patch)
tree3979961d838def5efa9f3835d19e05d60b3b4d88
parent661e50bc853209e41a5c14a290ca4decc43cbfd1 (diff)
parenta7f0fb1bfb66ded5d556d6723d691b77a7146b6f (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Use an appropriate TSQ pacing shift in mac80211, from Toke Høiland-Jørgensen. 2) Just like ipv4's ip_route_me_harder(), we have to use skb_to_full_sk in ip6_route_me_harder, from Eric Dumazet. 3) Fix several shutdown races and similar other problems in l2tp, from James Chapman. 4) Handle missing XDP flush properly in tuntap, for real this time. From Jason Wang. 5) Out-of-bounds access in powerpc ebpf tailcalls, from Daniel Borkmann. 6) Fix phy_resume() locking, from Andrew Lunn. 7) IFLA_MTU values are ignored on newlink for some tunnel types, fix from Xin Long. 8) Revert F-RTO middle box workarounds, they only handle one dimension of the problem. From Yuchung Cheng. 9) Fix socket refcounting in RDS, from Ka-Cheong Poon. 10) Don't allow ppp unit registration to an unregistered channel, from Guillaume Nault. 11) Various hv_netvsc fixes from Stephen Hemminger. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (98 commits) hv_netvsc: propagate rx filters to VF hv_netvsc: filter multicast/broadcast hv_netvsc: defer queue selection to VF hv_netvsc: use napi_schedule_irqoff hv_netvsc: fix race in napi poll when rescheduling hv_netvsc: cancel subchannel setup before halting device hv_netvsc: fix error unwind handling if vmbus_open fails hv_netvsc: only wake transmit queue if link is up hv_netvsc: avoid retry on send during shutdown virtio-net: re enable XDP_REDIRECT for mergeable buffer ppp: prevent unregistered channels from connecting to PPP units tc-testing: skbmod: fix match value of ethertype mlxsw: spectrum_switchdev: Check success of FDB add operation net: make skb_gso_*_seglen functions private net: xfrm: use skb_gso_validate_network_len() to check gso sizes net: sched: tbf: handle GSO_BY_FRAGS case in enqueue net: rename skb_gso_validate_mtu -> skb_gso_validate_network_len rds: Incorrect reference counting in TCP socket creation net: ethtool: don't ignore return from driver get_fecparam method vrf: check forwarding on the original netdevice when generating ICMP dest unreachable ...
-rw-r--r--Documentation/devicetree/bindings/net/renesas,ravb.txt1
-rw-r--r--arch/arm/mach-orion5x/Kconfig3
-rw-r--r--arch/arm/mach-orion5x/dns323-setup.c53
-rw-r--r--arch/arm/mach-orion5x/tsx09-common.c49
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c1
-rw-r--r--drivers/bluetooth/btusb.c25
-rw-r--r--drivers/bluetooth/hci_bcm.c7
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c83
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c29
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c11
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h11
-rw-r--r--drivers/net/hyperv/netvsc.c33
-rw-r--r--drivers/net/hyperv/netvsc_drv.c62
-rw-r--r--drivers/net/hyperv/rndis_filter.c23
-rw-r--r--drivers/net/phy/phy.c2
-rw-r--r--drivers/net/phy/phy_device.c18
-rw-r--r--drivers/net/ppp/ppp_generic.c9
-rw-r--r--drivers/net/tun.c22
-rw-r--r--drivers/net/usb/cdc_ether.c6
-rw-r--r--drivers/net/usb/r8152.c2
-rw-r--r--drivers/net/virtio_net.c62
-rw-r--r--drivers/net/wan/hdlc_ppp.c5
-rw-r--r--drivers/s390/net/qeth_core_main.c29
-rw-r--r--drivers/s390/net/qeth_l3.h34
-rw-r--r--drivers/s390/net/qeth_l3_main.c123
-rw-r--r--include/linux/phy.h1
-rw-r--r--include/linux/skbuff.h35
-rw-r--r--include/net/devlink.h18
-rw-r--r--kernel/bpf/verifier.c42
-rw-r--r--lib/test_bpf.c4
-rw-r--r--net/batman-adv/bat_iv_ogm.c26
-rw-r--r--net/batman-adv/bat_v.c2
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c22
-rw-r--r--net/batman-adv/fragmentation.c3
-rw-r--r--net/batman-adv/hard-interface.c9
-rw-r--r--net/batman-adv/originator.c4
-rw-r--r--net/batman-adv/originator.h4
-rw-r--r--net/batman-adv/soft-interface.c8
-rw-r--r--net/batman-adv/types.h11
-rw-r--r--net/bridge/br_netfilter_hooks.c4
-rw-r--r--net/bridge/br_vlan.c2
-rw-r--r--net/bridge/netfilter/ebt_among.c21
-rw-r--r--net/bridge/netfilter/ebtables.c40
-rw-r--r--net/core/dev.c14
-rw-r--r--net/core/devlink.c46
-rw-r--r--net/core/ethtool.c5
-rw-r--r--net/core/skbuff.c48
-rw-r--r--net/ipv4/ip_forward.c2
-rw-r--r--net/ipv4/ip_gre.c5
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/ip_tunnel.c13
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c15
-rw-r--r--net/ipv4/netfilter/nf_flow_table_ipv4.c3
-rw-r--r--net/ipv4/route.c18
-rw-r--r--net/ipv4/tcp_illinois.c2
-rw-r--r--net/ipv4/tcp_input.c24
-rw-r--r--net/ipv4/xfrm4_output.c3
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/ip6_tunnel.c12
-rw-r--r--net/ipv6/netfilter.c9
-rw-r--r--net/ipv6/netfilter/ip6t_rpfilter.c4
-rw-r--r--net/ipv6/netfilter/nf_flow_table_ipv6.c2
-rw-r--r--net/ipv6/netfilter/nf_nat_l3proto_ipv6.c4
-rw-r--r--net/ipv6/netfilter/nft_fib_ipv6.c12
-rw-r--r--net/ipv6/sit.c7
-rw-r--r--net/ipv6/xfrm6_output.c2
-rw-r--r--net/l2tp/l2tp_core.c142
-rw-r--r--net/l2tp/l2tp_core.h23
-rw-r--r--net/l2tp/l2tp_ip.c10
-rw-r--r--net/l2tp/l2tp_ip6.c8
-rw-r--r--net/l2tp/l2tp_ppp.c60
-rw-r--r--net/mac80211/rx.c2
-rw-r--r--net/mac80211/tx.c8
-rw-r--r--net/mpls/af_mpls.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_ftp.c2
-rw-r--r--net/netfilter/nf_tables_api.c25
-rw-r--r--net/qrtr/smd.c1
-rw-r--r--net/rds/tcp_listen.c14
-rw-r--r--net/sched/sch_tbf.c3
-rw-r--r--net/smc/af_smc.c4
-rw-r--r--net/smc/smc_cdc.c2
-rw-r--r--net/smc/smc_core.c3
-rw-r--r--net/smc/smc_llc.c2
-rw-r--r--net/tipc/group.c1
-rw-r--r--net/tipc/socket.c1
-rw-r--r--net/tls/tls_main.c52
-rw-r--r--net/wireless/Kconfig13
-rw-r--r--net/xfrm/xfrm_device.c2
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c58
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/skbmod.json2
95 files changed, 1033 insertions, 658 deletions
diff --git a/Documentation/devicetree/bindings/net/renesas,ravb.txt b/Documentation/devicetree/bindings/net/renesas,ravb.txt
index c902261893b9..92fd4b2f17b2 100644
--- a/Documentation/devicetree/bindings/net/renesas,ravb.txt
+++ b/Documentation/devicetree/bindings/net/renesas,ravb.txt
@@ -18,6 +18,7 @@ Required properties:
18 - "renesas,etheravb-r8a7795" for the R8A7795 SoC. 18 - "renesas,etheravb-r8a7795" for the R8A7795 SoC.
19 - "renesas,etheravb-r8a7796" for the R8A7796 SoC. 19 - "renesas,etheravb-r8a7796" for the R8A7796 SoC.
20 - "renesas,etheravb-r8a77970" for the R8A77970 SoC. 20 - "renesas,etheravb-r8a77970" for the R8A77970 SoC.
21 - "renesas,etheravb-r8a77980" for the R8A77980 SoC.
21 - "renesas,etheravb-r8a77995" for the R8A77995 SoC. 22 - "renesas,etheravb-r8a77995" for the R8A77995 SoC.
22 - "renesas,etheravb-rcar-gen3" as a fallback for the above 23 - "renesas,etheravb-rcar-gen3" as a fallback for the above
23 R-Car Gen3 devices. 24 R-Car Gen3 devices.
diff --git a/arch/arm/mach-orion5x/Kconfig b/arch/arm/mach-orion5x/Kconfig
index 2a7bb6ccdcb7..a810f4dd34b1 100644
--- a/arch/arm/mach-orion5x/Kconfig
+++ b/arch/arm/mach-orion5x/Kconfig
@@ -58,7 +58,6 @@ config MACH_KUROBOX_PRO
58 58
59config MACH_DNS323 59config MACH_DNS323
60 bool "D-Link DNS-323" 60 bool "D-Link DNS-323"
61 select GENERIC_NET_UTILS
62 select I2C_BOARDINFO if I2C 61 select I2C_BOARDINFO if I2C
63 help 62 help
64 Say 'Y' here if you want your kernel to support the 63 Say 'Y' here if you want your kernel to support the
@@ -66,7 +65,6 @@ config MACH_DNS323
66 65
67config MACH_TS209 66config MACH_TS209
68 bool "QNAP TS-109/TS-209" 67 bool "QNAP TS-109/TS-209"
69 select GENERIC_NET_UTILS
70 help 68 help
71 Say 'Y' here if you want your kernel to support the 69 Say 'Y' here if you want your kernel to support the
72 QNAP TS-109/TS-209 platform. 70 QNAP TS-109/TS-209 platform.
@@ -101,7 +99,6 @@ config MACH_LINKSTATION_LS_HGL
101 99
102config MACH_TS409 100config MACH_TS409
103 bool "QNAP TS-409" 101 bool "QNAP TS-409"
104 select GENERIC_NET_UTILS
105 help 102 help
106 Say 'Y' here if you want your kernel to support the 103 Say 'Y' here if you want your kernel to support the
107 QNAP TS-409 platform. 104 QNAP TS-409 platform.
diff --git a/arch/arm/mach-orion5x/dns323-setup.c b/arch/arm/mach-orion5x/dns323-setup.c
index cd483bfb5ca8..d13344b2ddcd 100644
--- a/arch/arm/mach-orion5x/dns323-setup.c
+++ b/arch/arm/mach-orion5x/dns323-setup.c
@@ -173,10 +173,42 @@ static struct mv643xx_eth_platform_data dns323_eth_data = {
173 .phy_addr = MV643XX_ETH_PHY_ADDR(8), 173 .phy_addr = MV643XX_ETH_PHY_ADDR(8),
174}; 174};
175 175
176/* dns323_parse_hex_*() taken from tsx09-common.c; should a common copy of these
177 * functions be kept somewhere?
178 */
179static int __init dns323_parse_hex_nibble(char n)
180{
181 if (n >= '0' && n <= '9')
182 return n - '0';
183
184 if (n >= 'A' && n <= 'F')
185 return n - 'A' + 10;
186
187 if (n >= 'a' && n <= 'f')
188 return n - 'a' + 10;
189
190 return -1;
191}
192
193static int __init dns323_parse_hex_byte(const char *b)
194{
195 int hi;
196 int lo;
197
198 hi = dns323_parse_hex_nibble(b[0]);
199 lo = dns323_parse_hex_nibble(b[1]);
200
201 if (hi < 0 || lo < 0)
202 return -1;
203
204 return (hi << 4) | lo;
205}
206
176static int __init dns323_read_mac_addr(void) 207static int __init dns323_read_mac_addr(void)
177{ 208{
178 u_int8_t addr[6]; 209 u_int8_t addr[6];
179 void __iomem *mac_page; 210 int i;
211 char *mac_page;
180 212
181 /* MAC address is stored as a regular ol' string in /dev/mtdblock4 213 /* MAC address is stored as a regular ol' string in /dev/mtdblock4
182 * (0x007d0000-0x00800000) starting at offset 196480 (0x2ff80). 214 * (0x007d0000-0x00800000) starting at offset 196480 (0x2ff80).
@@ -185,8 +217,23 @@ static int __init dns323_read_mac_addr(void)
185 if (!mac_page) 217 if (!mac_page)
186 return -ENOMEM; 218 return -ENOMEM;
187 219
188 if (!mac_pton((__force const char *) mac_page, addr)) 220 /* Sanity check the string we're looking at */
189 goto error_fail; 221 for (i = 0; i < 5; i++) {
222 if (*(mac_page + (i * 3) + 2) != ':') {
223 goto error_fail;
224 }
225 }
226
227 for (i = 0; i < 6; i++) {
228 int byte;
229
230 byte = dns323_parse_hex_byte(mac_page + (i * 3));
231 if (byte < 0) {
232 goto error_fail;
233 }
234
235 addr[i] = byte;
236 }
190 237
191 iounmap(mac_page); 238 iounmap(mac_page);
192 printk("DNS-323: Found ethernet MAC address: %pM\n", addr); 239 printk("DNS-323: Found ethernet MAC address: %pM\n", addr);
diff --git a/arch/arm/mach-orion5x/tsx09-common.c b/arch/arm/mach-orion5x/tsx09-common.c
index 89774985d380..905d4f2dd0b8 100644
--- a/arch/arm/mach-orion5x/tsx09-common.c
+++ b/arch/arm/mach-orion5x/tsx09-common.c
@@ -53,12 +53,53 @@ struct mv643xx_eth_platform_data qnap_tsx09_eth_data = {
53 .phy_addr = MV643XX_ETH_PHY_ADDR(8), 53 .phy_addr = MV643XX_ETH_PHY_ADDR(8),
54}; 54};
55 55
56static int __init qnap_tsx09_parse_hex_nibble(char n)
57{
58 if (n >= '0' && n <= '9')
59 return n - '0';
60
61 if (n >= 'A' && n <= 'F')
62 return n - 'A' + 10;
63
64 if (n >= 'a' && n <= 'f')
65 return n - 'a' + 10;
66
67 return -1;
68}
69
70static int __init qnap_tsx09_parse_hex_byte(const char *b)
71{
72 int hi;
73 int lo;
74
75 hi = qnap_tsx09_parse_hex_nibble(b[0]);
76 lo = qnap_tsx09_parse_hex_nibble(b[1]);
77
78 if (hi < 0 || lo < 0)
79 return -1;
80
81 return (hi << 4) | lo;
82}
83
56static int __init qnap_tsx09_check_mac_addr(const char *addr_str) 84static int __init qnap_tsx09_check_mac_addr(const char *addr_str)
57{ 85{
58 u_int8_t addr[6]; 86 u_int8_t addr[6];
87 int i;
59 88
60 if (!mac_pton(addr_str, addr)) 89 for (i = 0; i < 6; i++) {
61 return -1; 90 int byte;
91
92 /*
93 * Enforce "xx:xx:xx:xx:xx:xx\n" format.
94 */
95 if (addr_str[(i * 3) + 2] != ((i < 5) ? ':' : '\n'))
96 return -1;
97
98 byte = qnap_tsx09_parse_hex_byte(addr_str + (i * 3));
99 if (byte < 0)
100 return -1;
101 addr[i] = byte;
102 }
62 103
63 printk(KERN_INFO "tsx09: found ethernet mac address %pM\n", addr); 104 printk(KERN_INFO "tsx09: found ethernet mac address %pM\n", addr);
64 105
@@ -77,12 +118,12 @@ void __init qnap_tsx09_find_mac_addr(u32 mem_base, u32 size)
77 unsigned long addr; 118 unsigned long addr;
78 119
79 for (addr = mem_base; addr < (mem_base + size); addr += 1024) { 120 for (addr = mem_base; addr < (mem_base + size); addr += 1024) {
80 void __iomem *nor_page; 121 char *nor_page;
81 int ret = 0; 122 int ret = 0;
82 123
83 nor_page = ioremap(addr, 1024); 124 nor_page = ioremap(addr, 1024);
84 if (nor_page != NULL) { 125 if (nor_page != NULL) {
85 ret = qnap_tsx09_check_mac_addr((__force const char *)nor_page); 126 ret = qnap_tsx09_check_mac_addr(nor_page);
86 iounmap(nor_page); 127 iounmap(nor_page);
87 } 128 }
88 129
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 0a34b0cec7b7..0ef3d9580e98 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -240,6 +240,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
240 * goto out; 240 * goto out;
241 */ 241 */
242 PPC_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)); 242 PPC_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries));
243 PPC_RLWINM(b2p_index, b2p_index, 0, 0, 31);
243 PPC_CMPLW(b2p_index, b2p[TMP_REG_1]); 244 PPC_CMPLW(b2p_index, b2p[TMP_REG_1]);
244 PPC_BCC(COND_GE, out); 245 PPC_BCC(COND_GE, out);
245 246
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 2a55380ad730..60bf04b8f103 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -21,6 +21,7 @@
21 * 21 *
22 */ 22 */
23 23
24#include <linux/dmi.h>
24#include <linux/module.h> 25#include <linux/module.h>
25#include <linux/usb.h> 26#include <linux/usb.h>
26#include <linux/usb/quirks.h> 27#include <linux/usb/quirks.h>
@@ -379,6 +380,21 @@ static const struct usb_device_id blacklist_table[] = {
379 { } /* Terminating entry */ 380 { } /* Terminating entry */
380}; 381};
381 382
383/* The Bluetooth USB module build into some devices needs to be reset on resume,
384 * this is a problem with the platform (likely shutting off all power) not with
385 * the module itself. So we use a DMI list to match known broken platforms.
386 */
387static const struct dmi_system_id btusb_needs_reset_resume_table[] = {
388 {
389 /* Lenovo Yoga 920 (QCA Rome device 0cf3:e300) */
390 .matches = {
391 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
392 DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 920"),
393 },
394 },
395 {}
396};
397
382#define BTUSB_MAX_ISOC_FRAMES 10 398#define BTUSB_MAX_ISOC_FRAMES 10
383 399
384#define BTUSB_INTR_RUNNING 0 400#define BTUSB_INTR_RUNNING 0
@@ -2945,6 +2961,9 @@ static int btusb_probe(struct usb_interface *intf,
2945 hdev->send = btusb_send_frame; 2961 hdev->send = btusb_send_frame;
2946 hdev->notify = btusb_notify; 2962 hdev->notify = btusb_notify;
2947 2963
2964 if (dmi_check_system(btusb_needs_reset_resume_table))
2965 interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
2966
2948#ifdef CONFIG_PM 2967#ifdef CONFIG_PM
2949 err = btusb_config_oob_wake(hdev); 2968 err = btusb_config_oob_wake(hdev);
2950 if (err) 2969 if (err)
@@ -3031,12 +3050,6 @@ static int btusb_probe(struct usb_interface *intf,
3031 if (id->driver_info & BTUSB_QCA_ROME) { 3050 if (id->driver_info & BTUSB_QCA_ROME) {
3032 data->setup_on_usb = btusb_setup_qca; 3051 data->setup_on_usb = btusb_setup_qca;
3033 hdev->set_bdaddr = btusb_set_bdaddr_ath3012; 3052 hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
3034
3035 /* QCA Rome devices lose their updated firmware over suspend,
3036 * but the USB hub doesn't notice any status change.
3037 * explicitly request a device reset on resume.
3038 */
3039 interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
3040 } 3053 }
3041 3054
3042#ifdef CONFIG_BT_HCIBTUSB_RTL 3055#ifdef CONFIG_BT_HCIBTUSB_RTL
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 0438a64b8185..6314dfb02969 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -922,12 +922,13 @@ static int bcm_get_resources(struct bcm_device *dev)
922 922
923 dev->clk = devm_clk_get(dev->dev, NULL); 923 dev->clk = devm_clk_get(dev->dev, NULL);
924 924
925 dev->device_wakeup = devm_gpiod_get(dev->dev, "device-wakeup", 925 dev->device_wakeup = devm_gpiod_get_optional(dev->dev, "device-wakeup",
926 GPIOD_OUT_LOW); 926 GPIOD_OUT_LOW);
927 if (IS_ERR(dev->device_wakeup)) 927 if (IS_ERR(dev->device_wakeup))
928 return PTR_ERR(dev->device_wakeup); 928 return PTR_ERR(dev->device_wakeup);
929 929
930 dev->shutdown = devm_gpiod_get(dev->dev, "shutdown", GPIOD_OUT_LOW); 930 dev->shutdown = devm_gpiod_get_optional(dev->dev, "shutdown",
931 GPIOD_OUT_LOW);
931 if (IS_ERR(dev->shutdown)) 932 if (IS_ERR(dev->shutdown))
932 return PTR_ERR(dev->shutdown); 933 return PTR_ERR(dev->shutdown);
933 934
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index f5c87bd35fa1..f27f9bae1a4a 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -3063,9 +3063,6 @@ static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
3063 if (ndev->features & NETIF_F_RXCSUM) 3063 if (ndev->features & NETIF_F_RXCSUM)
3064 gfar_rx_checksum(skb, fcb); 3064 gfar_rx_checksum(skb, fcb);
3065 3065
3066 /* Tell the skb what kind of packet this is */
3067 skb->protocol = eth_type_trans(skb, ndev);
3068
3069 /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here. 3066 /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
3070 * Even if vlan rx accel is disabled, on some chips 3067 * Even if vlan rx accel is disabled, on some chips
3071 * RXFCB_VLN is pseudo randomly set. 3068 * RXFCB_VLN is pseudo randomly set.
@@ -3136,13 +3133,15 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
3136 continue; 3133 continue;
3137 } 3134 }
3138 3135
3136 gfar_process_frame(ndev, skb);
3137
3139 /* Increment the number of packets */ 3138 /* Increment the number of packets */
3140 total_pkts++; 3139 total_pkts++;
3141 total_bytes += skb->len; 3140 total_bytes += skb->len;
3142 3141
3143 skb_record_rx_queue(skb, rx_queue->qindex); 3142 skb_record_rx_queue(skb, rx_queue->qindex);
3144 3143
3145 gfar_process_frame(ndev, skb); 3144 skb->protocol = eth_type_trans(skb, ndev);
3146 3145
3147 /* Send the packet up the stack */ 3146 /* Send the packet up the stack */
3148 napi_gro_receive(&rx_queue->grp->napi_rx, skb); 3147 napi_gro_receive(&rx_queue->grp->napi_rx, skb);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 0da5aa2c8aba..9fc063af233c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1888,6 +1888,14 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
1888 ixgbe_rx_pg_size(rx_ring), 1888 ixgbe_rx_pg_size(rx_ring),
1889 DMA_FROM_DEVICE, 1889 DMA_FROM_DEVICE,
1890 IXGBE_RX_DMA_ATTR); 1890 IXGBE_RX_DMA_ATTR);
1891 } else if (ring_uses_build_skb(rx_ring)) {
1892 unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK;
1893
1894 dma_sync_single_range_for_cpu(rx_ring->dev,
1895 IXGBE_CB(skb)->dma,
1896 offset,
1897 skb_headlen(skb),
1898 DMA_FROM_DEVICE);
1891 } else { 1899 } else {
1892 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; 1900 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1893 1901
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
index f6963b0b4a55..122506daa586 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
@@ -107,20 +107,20 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = {
107 MLXSW_AFK_ELEMENT_INFO_U32(VID, 0x10, 8, 12), 107 MLXSW_AFK_ELEMENT_INFO_U32(VID, 0x10, 8, 12),
108 MLXSW_AFK_ELEMENT_INFO_U32(PCP, 0x10, 20, 3), 108 MLXSW_AFK_ELEMENT_INFO_U32(PCP, 0x10, 20, 3),
109 MLXSW_AFK_ELEMENT_INFO_U32(TCP_FLAGS, 0x10, 23, 9), 109 MLXSW_AFK_ELEMENT_INFO_U32(TCP_FLAGS, 0x10, 23, 9),
110 MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x14, 0, 8),
111 MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x14, 9, 2),
112 MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x14, 11, 6),
113 MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x18, 0, 32),
114 MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x1C, 0, 32),
115 MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x18, 8),
116 MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_LO, 0x20, 8),
117 MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_HI, 0x28, 8),
118 MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_LO, 0x30, 8),
119 MLXSW_AFK_ELEMENT_INFO_U32(DST_L4_PORT, 0x14, 0, 16), 110 MLXSW_AFK_ELEMENT_INFO_U32(DST_L4_PORT, 0x14, 0, 16),
120 MLXSW_AFK_ELEMENT_INFO_U32(SRC_L4_PORT, 0x14, 16, 16), 111 MLXSW_AFK_ELEMENT_INFO_U32(SRC_L4_PORT, 0x14, 16, 16),
112 MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x18, 0, 8),
113 MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x18, 9, 2),
114 MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x18, 11, 6),
115 MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x20, 0, 32),
116 MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x24, 0, 32),
117 MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x20, 8),
118 MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_LO, 0x28, 8),
119 MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_HI, 0x30, 8),
120 MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_LO, 0x38, 8),
121}; 121};
122 122
123#define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x38 123#define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x40
124 124
125struct mlxsw_afk_element_inst { /* element instance in actual block */ 125struct mlxsw_afk_element_inst { /* element instance in actual block */
126 const struct mlxsw_afk_element_info *info; 126 const struct mlxsw_afk_element_info *info;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 3dcc58d61506..c7e941aecc2a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -1459,6 +1459,7 @@ mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1459 } 1459 }
1460 1460
1461 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 1461 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
1462 mlxsw_sp_port_vlan->ref_count = 1;
1462 mlxsw_sp_port_vlan->vid = vid; 1463 mlxsw_sp_port_vlan->vid = vid;
1463 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 1464 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
1464 1465
@@ -1486,8 +1487,10 @@ mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1486 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1487 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1487 1488
1488 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1489 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1489 if (mlxsw_sp_port_vlan) 1490 if (mlxsw_sp_port_vlan) {
1491 mlxsw_sp_port_vlan->ref_count++;
1490 return mlxsw_sp_port_vlan; 1492 return mlxsw_sp_port_vlan;
1493 }
1491 1494
1492 return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid); 1495 return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid);
1493} 1496}
@@ -1496,6 +1499,9 @@ void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1496{ 1499{
1497 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid; 1500 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1498 1501
1502 if (--mlxsw_sp_port_vlan->ref_count != 0)
1503 return;
1504
1499 if (mlxsw_sp_port_vlan->bridge_port) 1505 if (mlxsw_sp_port_vlan->bridge_port)
1500 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 1506 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1501 else if (fid) 1507 else if (fid)
@@ -4207,13 +4213,12 @@ static struct devlink_resource_ops mlxsw_sp_resource_kvd_hash_double_ops = {
4207 .size_validate = mlxsw_sp_resource_kvd_hash_double_size_validate, 4213 .size_validate = mlxsw_sp_resource_kvd_hash_double_size_validate,
4208}; 4214};
4209 4215
4210static struct devlink_resource_size_params mlxsw_sp_kvd_size_params;
4211static struct devlink_resource_size_params mlxsw_sp_linear_size_params;
4212static struct devlink_resource_size_params mlxsw_sp_hash_single_size_params;
4213static struct devlink_resource_size_params mlxsw_sp_hash_double_size_params;
4214
4215static void 4216static void
4216mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core) 4217mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
4218 struct devlink_resource_size_params *kvd_size_params,
4219 struct devlink_resource_size_params *linear_size_params,
4220 struct devlink_resource_size_params *hash_double_size_params,
4221 struct devlink_resource_size_params *hash_single_size_params)
4217{ 4222{
4218 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 4223 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
4219 KVD_SINGLE_MIN_SIZE); 4224 KVD_SINGLE_MIN_SIZE);
@@ -4222,37 +4227,35 @@ mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core)
4222 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 4227 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
4223 u32 linear_size_min = 0; 4228 u32 linear_size_min = 0;
4224 4229
4225 /* KVD top resource */ 4230 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size,
4226 mlxsw_sp_kvd_size_params.size_min = kvd_size; 4231 MLXSW_SP_KVD_GRANULARITY,
4227 mlxsw_sp_kvd_size_params.size_max = kvd_size; 4232 DEVLINK_RESOURCE_UNIT_ENTRY);
4228 mlxsw_sp_kvd_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY; 4233 devlink_resource_size_params_init(linear_size_params, linear_size_min,
4229 mlxsw_sp_kvd_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY; 4234 kvd_size - single_size_min -
4230 4235 double_size_min,
4231 /* Linear part init */ 4236 MLXSW_SP_KVD_GRANULARITY,
4232 mlxsw_sp_linear_size_params.size_min = linear_size_min; 4237 DEVLINK_RESOURCE_UNIT_ENTRY);
4233 mlxsw_sp_linear_size_params.size_max = kvd_size - single_size_min - 4238 devlink_resource_size_params_init(hash_double_size_params,
4234 double_size_min; 4239 double_size_min,
4235 mlxsw_sp_linear_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY; 4240 kvd_size - single_size_min -
4236 mlxsw_sp_linear_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY; 4241 linear_size_min,
4237 4242 MLXSW_SP_KVD_GRANULARITY,
4238 /* Hash double part init */ 4243 DEVLINK_RESOURCE_UNIT_ENTRY);
4239 mlxsw_sp_hash_double_size_params.size_min = double_size_min; 4244 devlink_resource_size_params_init(hash_single_size_params,
4240 mlxsw_sp_hash_double_size_params.size_max = kvd_size - single_size_min - 4245 single_size_min,
4241 linear_size_min; 4246 kvd_size - double_size_min -
4242 mlxsw_sp_hash_double_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY; 4247 linear_size_min,
4243 mlxsw_sp_hash_double_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY; 4248 MLXSW_SP_KVD_GRANULARITY,
4244 4249 DEVLINK_RESOURCE_UNIT_ENTRY);
4245 /* Hash single part init */
4246 mlxsw_sp_hash_single_size_params.size_min = single_size_min;
4247 mlxsw_sp_hash_single_size_params.size_max = kvd_size - double_size_min -
4248 linear_size_min;
4249 mlxsw_sp_hash_single_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
4250 mlxsw_sp_hash_single_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
4251} 4250}
4252 4251
4253static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core) 4252static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
4254{ 4253{
4255 struct devlink *devlink = priv_to_devlink(mlxsw_core); 4254 struct devlink *devlink = priv_to_devlink(mlxsw_core);
4255 struct devlink_resource_size_params hash_single_size_params;
4256 struct devlink_resource_size_params hash_double_size_params;
4257 struct devlink_resource_size_params linear_size_params;
4258 struct devlink_resource_size_params kvd_size_params;
4256 u32 kvd_size, single_size, double_size, linear_size; 4259 u32 kvd_size, single_size, double_size, linear_size;
4257 const struct mlxsw_config_profile *profile; 4260 const struct mlxsw_config_profile *profile;
4258 int err; 4261 int err;
@@ -4261,13 +4264,17 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
4261 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 4264 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
4262 return -EIO; 4265 return -EIO;
4263 4266
4264 mlxsw_sp_resource_size_params_prepare(mlxsw_core); 4267 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params,
4268 &linear_size_params,
4269 &hash_double_size_params,
4270 &hash_single_size_params);
4271
4265 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 4272 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
4266 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 4273 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
4267 true, kvd_size, 4274 true, kvd_size,
4268 MLXSW_SP_RESOURCE_KVD, 4275 MLXSW_SP_RESOURCE_KVD,
4269 DEVLINK_RESOURCE_ID_PARENT_TOP, 4276 DEVLINK_RESOURCE_ID_PARENT_TOP,
4270 &mlxsw_sp_kvd_size_params, 4277 &kvd_size_params,
4271 &mlxsw_sp_resource_kvd_ops); 4278 &mlxsw_sp_resource_kvd_ops);
4272 if (err) 4279 if (err)
4273 return err; 4280 return err;
@@ -4277,7 +4284,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
4277 false, linear_size, 4284 false, linear_size,
4278 MLXSW_SP_RESOURCE_KVD_LINEAR, 4285 MLXSW_SP_RESOURCE_KVD_LINEAR,
4279 MLXSW_SP_RESOURCE_KVD, 4286 MLXSW_SP_RESOURCE_KVD,
4280 &mlxsw_sp_linear_size_params, 4287 &linear_size_params,
4281 &mlxsw_sp_resource_kvd_linear_ops); 4288 &mlxsw_sp_resource_kvd_linear_ops);
4282 if (err) 4289 if (err)
4283 return err; 4290 return err;
@@ -4291,7 +4298,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
4291 false, double_size, 4298 false, double_size,
4292 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 4299 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
4293 MLXSW_SP_RESOURCE_KVD, 4300 MLXSW_SP_RESOURCE_KVD,
4294 &mlxsw_sp_hash_double_size_params, 4301 &hash_double_size_params,
4295 &mlxsw_sp_resource_kvd_hash_double_ops); 4302 &mlxsw_sp_resource_kvd_hash_double_ops);
4296 if (err) 4303 if (err)
4297 return err; 4304 return err;
@@ -4301,7 +4308,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
4301 false, single_size, 4308 false, single_size,
4302 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 4309 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
4303 MLXSW_SP_RESOURCE_KVD, 4310 MLXSW_SP_RESOURCE_KVD,
4304 &mlxsw_sp_hash_single_size_params, 4311 &hash_single_size_params,
4305 &mlxsw_sp_resource_kvd_hash_single_ops); 4312 &mlxsw_sp_resource_kvd_hash_single_ops);
4306 if (err) 4313 if (err)
4307 return err; 4314 return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index bdd8f94a452c..4ec1ca3c96c8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -211,6 +211,7 @@ struct mlxsw_sp_port_vlan {
211 struct list_head list; 211 struct list_head list;
212 struct mlxsw_sp_port *mlxsw_sp_port; 212 struct mlxsw_sp_port *mlxsw_sp_port;
213 struct mlxsw_sp_fid *fid; 213 struct mlxsw_sp_fid *fid;
214 unsigned int ref_count;
214 u16 vid; 215 u16 vid;
215 struct mlxsw_sp_bridge_port *bridge_port; 216 struct mlxsw_sp_bridge_port *bridge_port;
216 struct list_head bridge_vlan_node; 217 struct list_head bridge_vlan_node;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
index bbd238e50f05..54262af4e98f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
@@ -112,11 +112,11 @@ static const int mlxsw_sp_sfgc_bc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = {
112 [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP] = 1, 112 [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP] = 1,
113 [MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL] = 1, 113 [MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL] = 1,
114 [MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST] = 1, 114 [MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST] = 1,
115 [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6] = 1,
115}; 116};
116 117
117static const int mlxsw_sp_sfgc_mc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = { 118static const int mlxsw_sp_sfgc_mc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = {
118 [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4] = 1, 119 [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4] = 1,
119 [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6] = 1,
120}; 120};
121 121
122static const int *mlxsw_sp_packet_type_sfgc_types[] = { 122static const int *mlxsw_sp_packet_type_sfgc_types[] = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 593ad31be749..161bcdc012f0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -1203,6 +1203,7 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1203 bool dynamic) 1203 bool dynamic)
1204{ 1204{
1205 char *sfd_pl; 1205 char *sfd_pl;
1206 u8 num_rec;
1206 int err; 1207 int err;
1207 1208
1208 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); 1209 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
@@ -1212,9 +1213,16 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1212 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); 1213 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1213 mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), 1214 mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
1214 mac, fid, action, local_port); 1215 mac, fid, action, local_port);
1216 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1215 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); 1217 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1216 kfree(sfd_pl); 1218 if (err)
1219 goto out;
1220
1221 if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1222 err = -EBUSY;
1217 1223
1224out:
1225 kfree(sfd_pl);
1218 return err; 1226 return err;
1219} 1227}
1220 1228
@@ -1239,6 +1247,7 @@ static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
1239 bool adding, bool dynamic) 1247 bool adding, bool dynamic)
1240{ 1248{
1241 char *sfd_pl; 1249 char *sfd_pl;
1250 u8 num_rec;
1242 int err; 1251 int err;
1243 1252
1244 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); 1253 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
@@ -1249,9 +1258,16 @@ static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
1249 mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), 1258 mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
1250 mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP, 1259 mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
1251 lag_vid, lag_id); 1260 lag_vid, lag_id);
1261 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1252 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); 1262 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1253 kfree(sfd_pl); 1263 if (err)
1264 goto out;
1265
1266 if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1267 err = -EBUSY;
1254 1268
1269out:
1270 kfree(sfd_pl);
1255 return err; 1271 return err;
1256} 1272}
1257 1273
@@ -1296,6 +1312,7 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
1296 u16 fid, u16 mid_idx, bool adding) 1312 u16 fid, u16 mid_idx, bool adding)
1297{ 1313{
1298 char *sfd_pl; 1314 char *sfd_pl;
1315 u8 num_rec;
1299 int err; 1316 int err;
1300 1317
1301 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); 1318 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
@@ -1305,7 +1322,15 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
1305 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); 1322 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1306 mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid, 1323 mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
1307 MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx); 1324 MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx);
1325 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1308 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); 1326 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1327 if (err)
1328 goto out;
1329
1330 if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1331 err = -EBUSY;
1332
1333out:
1309 kfree(sfd_pl); 1334 kfree(sfd_pl);
1310 return err; 1335 return err;
1311} 1336}
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 92dcf8717fc6..14c839bb09e7 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -439,6 +439,17 @@ static void sh_eth_modify(struct net_device *ndev, int enum_index, u32 clear,
439 enum_index); 439 enum_index);
440} 440}
441 441
442static void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data,
443 int enum_index)
444{
445 iowrite32(data, mdp->tsu_addr + mdp->reg_offset[enum_index]);
446}
447
448static u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index)
449{
450 return ioread32(mdp->tsu_addr + mdp->reg_offset[enum_index]);
451}
452
442static bool sh_eth_is_gether(struct sh_eth_private *mdp) 453static bool sh_eth_is_gether(struct sh_eth_private *mdp)
443{ 454{
444 return mdp->reg_offset == sh_eth_offset_gigabit; 455 return mdp->reg_offset == sh_eth_offset_gigabit;
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index a6753ccba711..e5fe70134690 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -567,15 +567,4 @@ static inline void *sh_eth_tsu_get_offset(struct sh_eth_private *mdp,
567 return mdp->tsu_addr + mdp->reg_offset[enum_index]; 567 return mdp->tsu_addr + mdp->reg_offset[enum_index];
568} 568}
569 569
570static inline void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data,
571 int enum_index)
572{
573 iowrite32(data, mdp->tsu_addr + mdp->reg_offset[enum_index]);
574}
575
576static inline u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index)
577{
578 return ioread32(mdp->tsu_addr + mdp->reg_offset[enum_index]);
579}
580
581#endif /* #ifndef __SH_ETH_H__ */ 570#endif /* #ifndef __SH_ETH_H__ */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 17e529af79dc..0265d703eb03 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -852,13 +852,6 @@ int netvsc_send(struct net_device *ndev,
852 if (unlikely(!net_device || net_device->destroy)) 852 if (unlikely(!net_device || net_device->destroy))
853 return -ENODEV; 853 return -ENODEV;
854 854
855 /* We may race with netvsc_connect_vsp()/netvsc_init_buf() and get
856 * here before the negotiation with the host is finished and
857 * send_section_map may not be allocated yet.
858 */
859 if (unlikely(!net_device->send_section_map))
860 return -EAGAIN;
861
862 nvchan = &net_device->chan_table[packet->q_idx]; 855 nvchan = &net_device->chan_table[packet->q_idx];
863 packet->send_buf_index = NETVSC_INVALID_INDEX; 856 packet->send_buf_index = NETVSC_INVALID_INDEX;
864 packet->cp_partial = false; 857 packet->cp_partial = false;
@@ -866,10 +859,8 @@ int netvsc_send(struct net_device *ndev,
866 /* Send control message directly without accessing msd (Multi-Send 859 /* Send control message directly without accessing msd (Multi-Send
867 * Data) field which may be changed during data packet processing. 860 * Data) field which may be changed during data packet processing.
868 */ 861 */
869 if (!skb) { 862 if (!skb)
870 cur_send = packet; 863 return netvsc_send_pkt(device, packet, net_device, pb, skb);
871 goto send_now;
872 }
873 864
874 /* batch packets in send buffer if possible */ 865 /* batch packets in send buffer if possible */
875 msdp = &nvchan->msd; 866 msdp = &nvchan->msd;
@@ -953,7 +944,6 @@ int netvsc_send(struct net_device *ndev,
953 } 944 }
954 } 945 }
955 946
956send_now:
957 if (cur_send) 947 if (cur_send)
958 ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb); 948 ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb);
959 949
@@ -1217,9 +1207,10 @@ int netvsc_poll(struct napi_struct *napi, int budget)
1217 if (send_recv_completions(ndev, net_device, nvchan) == 0 && 1207 if (send_recv_completions(ndev, net_device, nvchan) == 0 &&
1218 work_done < budget && 1208 work_done < budget &&
1219 napi_complete_done(napi, work_done) && 1209 napi_complete_done(napi, work_done) &&
1220 hv_end_read(&channel->inbound)) { 1210 hv_end_read(&channel->inbound) &&
1211 napi_schedule_prep(napi)) {
1221 hv_begin_read(&channel->inbound); 1212 hv_begin_read(&channel->inbound);
1222 napi_reschedule(napi); 1213 __napi_schedule(napi);
1223 } 1214 }
1224 1215
1225 /* Driver may overshoot since multiple packets per descriptor */ 1216 /* Driver may overshoot since multiple packets per descriptor */
@@ -1242,7 +1233,7 @@ void netvsc_channel_cb(void *context)
1242 /* disable interupts from host */ 1233 /* disable interupts from host */
1243 hv_begin_read(rbi); 1234 hv_begin_read(rbi);
1244 1235
1245 __napi_schedule(&nvchan->napi); 1236 __napi_schedule_irqoff(&nvchan->napi);
1246 } 1237 }
1247} 1238}
1248 1239
@@ -1296,7 +1287,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
1296 netvsc_channel_cb, net_device->chan_table); 1287 netvsc_channel_cb, net_device->chan_table);
1297 1288
1298 if (ret != 0) { 1289 if (ret != 0) {
1299 netif_napi_del(&net_device->chan_table[0].napi);
1300 netdev_err(ndev, "unable to open channel: %d\n", ret); 1290 netdev_err(ndev, "unable to open channel: %d\n", ret);
1301 goto cleanup; 1291 goto cleanup;
1302 } 1292 }
@@ -1306,11 +1296,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
1306 1296
1307 napi_enable(&net_device->chan_table[0].napi); 1297 napi_enable(&net_device->chan_table[0].napi);
1308 1298
1309 /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
1310 * populated.
1311 */
1312 rcu_assign_pointer(net_device_ctx->nvdev, net_device);
1313
1314 /* Connect with the NetVsp */ 1299 /* Connect with the NetVsp */
1315 ret = netvsc_connect_vsp(device, net_device, device_info); 1300 ret = netvsc_connect_vsp(device, net_device, device_info);
1316 if (ret != 0) { 1301 if (ret != 0) {
@@ -1319,6 +1304,11 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
1319 goto close; 1304 goto close;
1320 } 1305 }
1321 1306
1307 /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
1308 * populated.
1309 */
1310 rcu_assign_pointer(net_device_ctx->nvdev, net_device);
1311
1322 return net_device; 1312 return net_device;
1323 1313
1324close: 1314close:
@@ -1329,6 +1319,7 @@ close:
1329 vmbus_close(device->channel); 1319 vmbus_close(device->channel);
1330 1320
1331cleanup: 1321cleanup:
1322 netif_napi_del(&net_device->chan_table[0].napi);
1332 free_netvsc_device(&net_device->rcu); 1323 free_netvsc_device(&net_device->rcu);
1333 1324
1334 return ERR_PTR(ret); 1325 return ERR_PTR(ret);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index c5584c2d440e..cdb78eefab67 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -66,10 +66,36 @@ static int debug = -1;
66module_param(debug, int, S_IRUGO); 66module_param(debug, int, S_IRUGO);
67MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 67MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
68 68
69static void netvsc_set_multicast_list(struct net_device *net) 69static void netvsc_change_rx_flags(struct net_device *net, int change)
70{ 70{
71 struct net_device_context *net_device_ctx = netdev_priv(net); 71 struct net_device_context *ndev_ctx = netdev_priv(net);
72 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); 72 struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
73 int inc;
74
75 if (!vf_netdev)
76 return;
77
78 if (change & IFF_PROMISC) {
79 inc = (net->flags & IFF_PROMISC) ? 1 : -1;
80 dev_set_promiscuity(vf_netdev, inc);
81 }
82
83 if (change & IFF_ALLMULTI) {
84 inc = (net->flags & IFF_ALLMULTI) ? 1 : -1;
85 dev_set_allmulti(vf_netdev, inc);
86 }
87}
88
89static void netvsc_set_rx_mode(struct net_device *net)
90{
91 struct net_device_context *ndev_ctx = netdev_priv(net);
92 struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
93 struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev);
94
95 if (vf_netdev) {
96 dev_uc_sync(vf_netdev, net);
97 dev_mc_sync(vf_netdev, net);
98 }
73 99
74 rndis_filter_update(nvdev); 100 rndis_filter_update(nvdev);
75} 101}
@@ -91,12 +117,11 @@ static int netvsc_open(struct net_device *net)
91 return ret; 117 return ret;
92 } 118 }
93 119
94 netif_tx_wake_all_queues(net);
95
96 rdev = nvdev->extension; 120 rdev = nvdev->extension;
97 121 if (!rdev->link_state) {
98 if (!rdev->link_state)
99 netif_carrier_on(net); 122 netif_carrier_on(net);
123 netif_tx_wake_all_queues(net);
124 }
100 125
101 if (vf_netdev) { 126 if (vf_netdev) {
102 /* Setting synthetic device up transparently sets 127 /* Setting synthetic device up transparently sets
@@ -299,8 +324,19 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
299 rcu_read_lock(); 324 rcu_read_lock();
300 vf_netdev = rcu_dereference(ndc->vf_netdev); 325 vf_netdev = rcu_dereference(ndc->vf_netdev);
301 if (vf_netdev) { 326 if (vf_netdev) {
302 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0; 327 const struct net_device_ops *vf_ops = vf_netdev->netdev_ops;
303 qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping; 328
329 if (vf_ops->ndo_select_queue)
330 txq = vf_ops->ndo_select_queue(vf_netdev, skb,
331 accel_priv, fallback);
332 else
333 txq = fallback(vf_netdev, skb);
334
335 /* Record the queue selected by VF so that it can be
336 * used for common case where VF has more queues than
337 * the synthetic device.
338 */
339 qdisc_skb_cb(skb)->slave_dev_queue_mapping = txq;
304 } else { 340 } else {
305 txq = netvsc_pick_tx(ndev, skb); 341 txq = netvsc_pick_tx(ndev, skb);
306 } 342 }
@@ -1576,7 +1612,8 @@ static const struct net_device_ops device_ops = {
1576 .ndo_open = netvsc_open, 1612 .ndo_open = netvsc_open,
1577 .ndo_stop = netvsc_close, 1613 .ndo_stop = netvsc_close,
1578 .ndo_start_xmit = netvsc_start_xmit, 1614 .ndo_start_xmit = netvsc_start_xmit,
1579 .ndo_set_rx_mode = netvsc_set_multicast_list, 1615 .ndo_change_rx_flags = netvsc_change_rx_flags,
1616 .ndo_set_rx_mode = netvsc_set_rx_mode,
1580 .ndo_change_mtu = netvsc_change_mtu, 1617 .ndo_change_mtu = netvsc_change_mtu,
1581 .ndo_validate_addr = eth_validate_addr, 1618 .ndo_validate_addr = eth_validate_addr,
1582 .ndo_set_mac_address = netvsc_set_mac_addr, 1619 .ndo_set_mac_address = netvsc_set_mac_addr,
@@ -1807,6 +1844,11 @@ static void __netvsc_vf_setup(struct net_device *ndev,
1807 netdev_warn(vf_netdev, 1844 netdev_warn(vf_netdev,
1808 "unable to change mtu to %u\n", ndev->mtu); 1845 "unable to change mtu to %u\n", ndev->mtu);
1809 1846
1847 /* set multicast etc flags on VF */
1848 dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE);
1849 dev_uc_sync(vf_netdev, ndev);
1850 dev_mc_sync(vf_netdev, ndev);
1851
1810 if (netif_running(ndev)) { 1852 if (netif_running(ndev)) {
1811 ret = dev_open(vf_netdev); 1853 ret = dev_open(vf_netdev);
1812 if (ret) 1854 if (ret)
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index c3ca191fea7f..8927c483c217 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -854,15 +854,19 @@ static void rndis_set_multicast(struct work_struct *w)
854{ 854{
855 struct rndis_device *rdev 855 struct rndis_device *rdev
856 = container_of(w, struct rndis_device, mcast_work); 856 = container_of(w, struct rndis_device, mcast_work);
857 u32 filter = NDIS_PACKET_TYPE_DIRECTED;
858 unsigned int flags = rdev->ndev->flags;
857 859
858 if (rdev->ndev->flags & IFF_PROMISC) 860 if (flags & IFF_PROMISC) {
859 rndis_filter_set_packet_filter(rdev, 861 filter = NDIS_PACKET_TYPE_PROMISCUOUS;
860 NDIS_PACKET_TYPE_PROMISCUOUS); 862 } else {
861 else 863 if (flags & IFF_ALLMULTI)
862 rndis_filter_set_packet_filter(rdev, 864 flags |= NDIS_PACKET_TYPE_ALL_MULTICAST;
863 NDIS_PACKET_TYPE_BROADCAST | 865 if (flags & IFF_BROADCAST)
864 NDIS_PACKET_TYPE_ALL_MULTICAST | 866 flags |= NDIS_PACKET_TYPE_BROADCAST;
865 NDIS_PACKET_TYPE_DIRECTED); 867 }
868
869 rndis_filter_set_packet_filter(rdev, filter);
866} 870}
867 871
868void rndis_filter_update(struct netvsc_device *nvdev) 872void rndis_filter_update(struct netvsc_device *nvdev)
@@ -1340,6 +1344,9 @@ void rndis_filter_device_remove(struct hv_device *dev,
1340{ 1344{
1341 struct rndis_device *rndis_dev = net_dev->extension; 1345 struct rndis_device *rndis_dev = net_dev->extension;
1342 1346
1347 /* Don't try and setup sub channels if about to halt */
1348 cancel_work_sync(&net_dev->subchan_work);
1349
1343 /* Halt and release the rndis device */ 1350 /* Halt and release the rndis device */
1344 rndis_filter_halt_device(rndis_dev); 1351 rndis_filter_halt_device(rndis_dev);
1345 1352
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index e3e29c2b028b..a6f924fee584 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -819,7 +819,7 @@ void phy_start(struct phy_device *phydev)
819 break; 819 break;
820 case PHY_HALTED: 820 case PHY_HALTED:
821 /* if phy was suspended, bring the physical link up again */ 821 /* if phy was suspended, bring the physical link up again */
822 phy_resume(phydev); 822 __phy_resume(phydev);
823 823
824 /* make sure interrupts are re-enabled for the PHY */ 824 /* make sure interrupts are re-enabled for the PHY */
825 if (phy_interrupt_is_valid(phydev)) { 825 if (phy_interrupt_is_valid(phydev)) {
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index d39ae77707ef..478405e544cc 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -135,9 +135,7 @@ static int mdio_bus_phy_resume(struct device *dev)
135 if (!mdio_bus_phy_may_suspend(phydev)) 135 if (!mdio_bus_phy_may_suspend(phydev))
136 goto no_resume; 136 goto no_resume;
137 137
138 mutex_lock(&phydev->lock);
139 ret = phy_resume(phydev); 138 ret = phy_resume(phydev);
140 mutex_unlock(&phydev->lock);
141 if (ret < 0) 139 if (ret < 0)
142 return ret; 140 return ret;
143 141
@@ -1041,9 +1039,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
1041 if (err) 1039 if (err)
1042 goto error; 1040 goto error;
1043 1041
1044 mutex_lock(&phydev->lock);
1045 phy_resume(phydev); 1042 phy_resume(phydev);
1046 mutex_unlock(&phydev->lock);
1047 phy_led_triggers_register(phydev); 1043 phy_led_triggers_register(phydev);
1048 1044
1049 return err; 1045 return err;
@@ -1172,7 +1168,7 @@ int phy_suspend(struct phy_device *phydev)
1172} 1168}
1173EXPORT_SYMBOL(phy_suspend); 1169EXPORT_SYMBOL(phy_suspend);
1174 1170
1175int phy_resume(struct phy_device *phydev) 1171int __phy_resume(struct phy_device *phydev)
1176{ 1172{
1177 struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver); 1173 struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver);
1178 int ret = 0; 1174 int ret = 0;
@@ -1189,6 +1185,18 @@ int phy_resume(struct phy_device *phydev)
1189 1185
1190 return ret; 1186 return ret;
1191} 1187}
1188EXPORT_SYMBOL(__phy_resume);
1189
1190int phy_resume(struct phy_device *phydev)
1191{
1192 int ret;
1193
1194 mutex_lock(&phydev->lock);
1195 ret = __phy_resume(phydev);
1196 mutex_unlock(&phydev->lock);
1197
1198 return ret;
1199}
1192EXPORT_SYMBOL(phy_resume); 1200EXPORT_SYMBOL(phy_resume);
1193 1201
1194int phy_loopback(struct phy_device *phydev, bool enable) 1202int phy_loopback(struct phy_device *phydev, bool enable)
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 255a5def56e9..fa2a9bdd1866 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -3161,6 +3161,15 @@ ppp_connect_channel(struct channel *pch, int unit)
3161 goto outl; 3161 goto outl;
3162 3162
3163 ppp_lock(ppp); 3163 ppp_lock(ppp);
3164 spin_lock_bh(&pch->downl);
3165 if (!pch->chan) {
3166 /* Don't connect unregistered channels */
3167 spin_unlock_bh(&pch->downl);
3168 ppp_unlock(ppp);
3169 ret = -ENOTCONN;
3170 goto outl;
3171 }
3172 spin_unlock_bh(&pch->downl);
3164 if (pch->file.hdrlen > ppp->file.hdrlen) 3173 if (pch->file.hdrlen > ppp->file.hdrlen)
3165 ppp->file.hdrlen = pch->file.hdrlen; 3174 ppp->file.hdrlen = pch->file.hdrlen;
3166 hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */ 3175 hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index b52258c327d2..7433bb2e4451 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -181,7 +181,6 @@ struct tun_file {
181 struct tun_struct *detached; 181 struct tun_struct *detached;
182 struct ptr_ring tx_ring; 182 struct ptr_ring tx_ring;
183 struct xdp_rxq_info xdp_rxq; 183 struct xdp_rxq_info xdp_rxq;
184 int xdp_pending_pkts;
185}; 184};
186 185
187struct tun_flow_entry { 186struct tun_flow_entry {
@@ -1643,6 +1642,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1643 else 1642 else
1644 *skb_xdp = 0; 1643 *skb_xdp = 0;
1645 1644
1645 preempt_disable();
1646 rcu_read_lock(); 1646 rcu_read_lock();
1647 xdp_prog = rcu_dereference(tun->xdp_prog); 1647 xdp_prog = rcu_dereference(tun->xdp_prog);
1648 if (xdp_prog && !*skb_xdp) { 1648 if (xdp_prog && !*skb_xdp) {
@@ -1662,11 +1662,12 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1662 case XDP_REDIRECT: 1662 case XDP_REDIRECT:
1663 get_page(alloc_frag->page); 1663 get_page(alloc_frag->page);
1664 alloc_frag->offset += buflen; 1664 alloc_frag->offset += buflen;
1665 ++tfile->xdp_pending_pkts;
1666 err = xdp_do_redirect(tun->dev, &xdp, xdp_prog); 1665 err = xdp_do_redirect(tun->dev, &xdp, xdp_prog);
1666 xdp_do_flush_map();
1667 if (err) 1667 if (err)
1668 goto err_redirect; 1668 goto err_redirect;
1669 rcu_read_unlock(); 1669 rcu_read_unlock();
1670 preempt_enable();
1670 return NULL; 1671 return NULL;
1671 case XDP_TX: 1672 case XDP_TX:
1672 xdp_xmit = true; 1673 xdp_xmit = true;
@@ -1688,6 +1689,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1688 skb = build_skb(buf, buflen); 1689 skb = build_skb(buf, buflen);
1689 if (!skb) { 1690 if (!skb) {
1690 rcu_read_unlock(); 1691 rcu_read_unlock();
1692 preempt_enable();
1691 return ERR_PTR(-ENOMEM); 1693 return ERR_PTR(-ENOMEM);
1692 } 1694 }
1693 1695
@@ -1700,10 +1702,12 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1700 skb->dev = tun->dev; 1702 skb->dev = tun->dev;
1701 generic_xdp_tx(skb, xdp_prog); 1703 generic_xdp_tx(skb, xdp_prog);
1702 rcu_read_unlock(); 1704 rcu_read_unlock();
1705 preempt_enable();
1703 return NULL; 1706 return NULL;
1704 } 1707 }
1705 1708
1706 rcu_read_unlock(); 1709 rcu_read_unlock();
1710 preempt_enable();
1707 1711
1708 return skb; 1712 return skb;
1709 1713
@@ -1711,6 +1715,7 @@ err_redirect:
1711 put_page(alloc_frag->page); 1715 put_page(alloc_frag->page);
1712err_xdp: 1716err_xdp:
1713 rcu_read_unlock(); 1717 rcu_read_unlock();
1718 preempt_enable();
1714 this_cpu_inc(tun->pcpu_stats->rx_dropped); 1719 this_cpu_inc(tun->pcpu_stats->rx_dropped);
1715 return NULL; 1720 return NULL;
1716} 1721}
@@ -1984,11 +1989,6 @@ static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
1984 result = tun_get_user(tun, tfile, NULL, from, 1989 result = tun_get_user(tun, tfile, NULL, from,
1985 file->f_flags & O_NONBLOCK, false); 1990 file->f_flags & O_NONBLOCK, false);
1986 1991
1987 if (tfile->xdp_pending_pkts) {
1988 tfile->xdp_pending_pkts = 0;
1989 xdp_do_flush_map();
1990 }
1991
1992 tun_put(tun); 1992 tun_put(tun);
1993 return result; 1993 return result;
1994} 1994}
@@ -2325,13 +2325,6 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
2325 ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter, 2325 ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter,
2326 m->msg_flags & MSG_DONTWAIT, 2326 m->msg_flags & MSG_DONTWAIT,
2327 m->msg_flags & MSG_MORE); 2327 m->msg_flags & MSG_MORE);
2328
2329 if (tfile->xdp_pending_pkts >= NAPI_POLL_WEIGHT ||
2330 !(m->msg_flags & MSG_MORE)) {
2331 tfile->xdp_pending_pkts = 0;
2332 xdp_do_flush_map();
2333 }
2334
2335 tun_put(tun); 2328 tun_put(tun);
2336 return ret; 2329 return ret;
2337} 2330}
@@ -3163,7 +3156,6 @@ static int tun_chr_open(struct inode *inode, struct file * file)
3163 sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); 3156 sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
3164 3157
3165 memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring)); 3158 memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring));
3166 tfile->xdp_pending_pkts = 0;
3167 3159
3168 return 0; 3160 return 0;
3169} 3161}
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 05dca3e5c93d..fff4b13eece2 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -896,6 +896,12 @@ static const struct usb_device_id products[] = {
896 USB_CDC_PROTO_NONE), 896 USB_CDC_PROTO_NONE),
897 .driver_info = (unsigned long)&wwan_info, 897 .driver_info = (unsigned long)&wwan_info,
898}, { 898}, {
899 /* Cinterion PLS8 modem by GEMALTO */
900 USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x0061, USB_CLASS_COMM,
901 USB_CDC_SUBCLASS_ETHERNET,
902 USB_CDC_PROTO_NONE),
903 .driver_info = (unsigned long)&wwan_info,
904}, {
899 USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, 905 USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
900 USB_CDC_PROTO_NONE), 906 USB_CDC_PROTO_NONE),
901 .driver_info = (unsigned long) &cdc_info, 907 .driver_info = (unsigned long) &cdc_info,
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 958b2e8b90f6..86f7196f9d91 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -1794,7 +1794,7 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
1794 1794
1795 tx_data += len; 1795 tx_data += len;
1796 agg->skb_len += len; 1796 agg->skb_len += len;
1797 agg->skb_num++; 1797 agg->skb_num += skb_shinfo(skb)->gso_segs ?: 1;
1798 1798
1799 dev_kfree_skb_any(skb); 1799 dev_kfree_skb_any(skb);
1800 1800
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 9bb9e562b893..23374603e4d9 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -504,6 +504,7 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
504 page_off += *len; 504 page_off += *len;
505 505
506 while (--*num_buf) { 506 while (--*num_buf) {
507 int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
507 unsigned int buflen; 508 unsigned int buflen;
508 void *buf; 509 void *buf;
509 int off; 510 int off;
@@ -518,7 +519,7 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
518 /* guard against a misconfigured or uncooperative backend that 519 /* guard against a misconfigured or uncooperative backend that
519 * is sending packet larger than the MTU. 520 * is sending packet larger than the MTU.
520 */ 521 */
521 if ((page_off + buflen) > PAGE_SIZE) { 522 if ((page_off + buflen + tailroom) > PAGE_SIZE) {
522 put_page(p); 523 put_page(p);
523 goto err_buf; 524 goto err_buf;
524 } 525 }
@@ -690,6 +691,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
690 unsigned int truesize; 691 unsigned int truesize;
691 unsigned int headroom = mergeable_ctx_to_headroom(ctx); 692 unsigned int headroom = mergeable_ctx_to_headroom(ctx);
692 bool sent; 693 bool sent;
694 int err;
693 695
694 head_skb = NULL; 696 head_skb = NULL;
695 697
@@ -701,7 +703,12 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
701 void *data; 703 void *data;
702 u32 act; 704 u32 act;
703 705
704 /* This happens when rx buffer size is underestimated */ 706 /* This happens when rx buffer size is underestimated
707 * or headroom is not enough because of the buffer
708 * was refilled before XDP is set. This should only
709 * happen for the first several packets, so we don't
710 * care much about its performance.
711 */
705 if (unlikely(num_buf > 1 || 712 if (unlikely(num_buf > 1 ||
706 headroom < virtnet_get_headroom(vi))) { 713 headroom < virtnet_get_headroom(vi))) {
707 /* linearize data for XDP */ 714 /* linearize data for XDP */
@@ -736,9 +743,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
736 743
737 act = bpf_prog_run_xdp(xdp_prog, &xdp); 744 act = bpf_prog_run_xdp(xdp_prog, &xdp);
738 745
739 if (act != XDP_PASS)
740 ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
741
742 switch (act) { 746 switch (act) {
743 case XDP_PASS: 747 case XDP_PASS:
744 /* recalculate offset to account for any header 748 /* recalculate offset to account for any header
@@ -770,6 +774,18 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
770 goto err_xdp; 774 goto err_xdp;
771 rcu_read_unlock(); 775 rcu_read_unlock();
772 goto xdp_xmit; 776 goto xdp_xmit;
777 case XDP_REDIRECT:
778 err = xdp_do_redirect(dev, &xdp, xdp_prog);
779 if (err) {
780 if (unlikely(xdp_page != page))
781 put_page(xdp_page);
782 goto err_xdp;
783 }
784 *xdp_xmit = true;
785 if (unlikely(xdp_page != page))
786 goto err_xdp;
787 rcu_read_unlock();
788 goto xdp_xmit;
773 default: 789 default:
774 bpf_warn_invalid_xdp_action(act); 790 bpf_warn_invalid_xdp_action(act);
775 case XDP_ABORTED: 791 case XDP_ABORTED:
@@ -1013,13 +1029,18 @@ static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
1013} 1029}
1014 1030
1015static unsigned int get_mergeable_buf_len(struct receive_queue *rq, 1031static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
1016 struct ewma_pkt_len *avg_pkt_len) 1032 struct ewma_pkt_len *avg_pkt_len,
1033 unsigned int room)
1017{ 1034{
1018 const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); 1035 const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1019 unsigned int len; 1036 unsigned int len;
1020 1037
1021 len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), 1038 if (room)
1039 return PAGE_SIZE - room;
1040
1041 len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
1022 rq->min_buf_len, PAGE_SIZE - hdr_len); 1042 rq->min_buf_len, PAGE_SIZE - hdr_len);
1043
1023 return ALIGN(len, L1_CACHE_BYTES); 1044 return ALIGN(len, L1_CACHE_BYTES);
1024} 1045}
1025 1046
@@ -1028,21 +1049,27 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
1028{ 1049{
1029 struct page_frag *alloc_frag = &rq->alloc_frag; 1050 struct page_frag *alloc_frag = &rq->alloc_frag;
1030 unsigned int headroom = virtnet_get_headroom(vi); 1051 unsigned int headroom = virtnet_get_headroom(vi);
1052 unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1053 unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
1031 char *buf; 1054 char *buf;
1032 void *ctx; 1055 void *ctx;
1033 int err; 1056 int err;
1034 unsigned int len, hole; 1057 unsigned int len, hole;
1035 1058
1036 len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len); 1059 /* Extra tailroom is needed to satisfy XDP's assumption. This
1037 if (unlikely(!skb_page_frag_refill(len + headroom, alloc_frag, gfp))) 1060 * means rx frags coalescing won't work, but consider we've
1061 * disabled GSO for XDP, it won't be a big issue.
1062 */
1063 len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room);
1064 if (unlikely(!skb_page_frag_refill(len + room, alloc_frag, gfp)))
1038 return -ENOMEM; 1065 return -ENOMEM;
1039 1066
1040 buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; 1067 buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
1041 buf += headroom; /* advance address leaving hole at front of pkt */ 1068 buf += headroom; /* advance address leaving hole at front of pkt */
1042 get_page(alloc_frag->page); 1069 get_page(alloc_frag->page);
1043 alloc_frag->offset += len + headroom; 1070 alloc_frag->offset += len + room;
1044 hole = alloc_frag->size - alloc_frag->offset; 1071 hole = alloc_frag->size - alloc_frag->offset;
1045 if (hole < len + headroom) { 1072 if (hole < len + room) {
1046 /* To avoid internal fragmentation, if there is very likely not 1073 /* To avoid internal fragmentation, if there is very likely not
1047 * enough space for another buffer, add the remaining space to 1074 * enough space for another buffer, add the remaining space to
1048 * the current buffer. 1075 * the current buffer.
@@ -2185,8 +2212,9 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
2185 } 2212 }
2186 2213
2187 /* Make sure NAPI is not using any XDP TX queues for RX. */ 2214 /* Make sure NAPI is not using any XDP TX queues for RX. */
2188 for (i = 0; i < vi->max_queue_pairs; i++) 2215 if (netif_running(dev))
2189 napi_disable(&vi->rq[i].napi); 2216 for (i = 0; i < vi->max_queue_pairs; i++)
2217 napi_disable(&vi->rq[i].napi);
2190 2218
2191 netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); 2219 netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
2192 err = _virtnet_set_queues(vi, curr_qp + xdp_qp); 2220 err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
@@ -2205,7 +2233,8 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
2205 } 2233 }
2206 if (old_prog) 2234 if (old_prog)
2207 bpf_prog_put(old_prog); 2235 bpf_prog_put(old_prog);
2208 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); 2236 if (netif_running(dev))
2237 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
2209 } 2238 }
2210 2239
2211 return 0; 2240 return 0;
@@ -2576,12 +2605,15 @@ static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
2576{ 2605{
2577 struct virtnet_info *vi = netdev_priv(queue->dev); 2606 struct virtnet_info *vi = netdev_priv(queue->dev);
2578 unsigned int queue_index = get_netdev_rx_queue_index(queue); 2607 unsigned int queue_index = get_netdev_rx_queue_index(queue);
2608 unsigned int headroom = virtnet_get_headroom(vi);
2609 unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
2579 struct ewma_pkt_len *avg; 2610 struct ewma_pkt_len *avg;
2580 2611
2581 BUG_ON(queue_index >= vi->max_queue_pairs); 2612 BUG_ON(queue_index >= vi->max_queue_pairs);
2582 avg = &vi->rq[queue_index].mrg_avg_pkt_len; 2613 avg = &vi->rq[queue_index].mrg_avg_pkt_len;
2583 return sprintf(buf, "%u\n", 2614 return sprintf(buf, "%u\n",
2584 get_mergeable_buf_len(&vi->rq[queue_index], avg)); 2615 get_mergeable_buf_len(&vi->rq[queue_index], avg,
2616 SKB_DATA_ALIGN(headroom + tailroom)));
2585} 2617}
2586 2618
2587static struct rx_queue_attribute mergeable_rx_buffer_size_attribute = 2619static struct rx_queue_attribute mergeable_rx_buffer_size_attribute =
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index afeca6bcdade..ab8b3cbbb205 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -574,7 +574,10 @@ static void ppp_timer(struct timer_list *t)
574 ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0, 574 ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0,
575 0, NULL); 575 0, NULL);
576 proto->restart_counter--; 576 proto->restart_counter--;
577 } else 577 } else if (netif_carrier_ok(proto->dev))
578 ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0,
579 0, NULL);
580 else
578 ppp_cp_event(proto->dev, proto->pid, TO_BAD, 0, 0, 581 ppp_cp_event(proto->dev, proto->pid, TO_BAD, 0, 0,
579 0, NULL); 582 0, NULL);
580 break; 583 break;
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index ca72f3311004..c8b308cfabf1 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -2134,24 +2134,25 @@ int qeth_send_control_data(struct qeth_card *card, int len,
2134 } 2134 }
2135 reply->callback = reply_cb; 2135 reply->callback = reply_cb;
2136 reply->param = reply_param; 2136 reply->param = reply_param;
2137 if (card->state == CARD_STATE_DOWN) 2137
2138 reply->seqno = QETH_IDX_COMMAND_SEQNO;
2139 else
2140 reply->seqno = card->seqno.ipa++;
2141 init_waitqueue_head(&reply->wait_q); 2138 init_waitqueue_head(&reply->wait_q);
2142 spin_lock_irqsave(&card->lock, flags);
2143 list_add_tail(&reply->list, &card->cmd_waiter_list);
2144 spin_unlock_irqrestore(&card->lock, flags);
2145 2139
2146 while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ; 2140 while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ;
2147 qeth_prepare_control_data(card, len, iob);
2148 2141
2149 if (IS_IPA(iob->data)) { 2142 if (IS_IPA(iob->data)) {
2150 cmd = __ipa_cmd(iob); 2143 cmd = __ipa_cmd(iob);
2144 cmd->hdr.seqno = card->seqno.ipa++;
2145 reply->seqno = cmd->hdr.seqno;
2151 event_timeout = QETH_IPA_TIMEOUT; 2146 event_timeout = QETH_IPA_TIMEOUT;
2152 } else { 2147 } else {
2148 reply->seqno = QETH_IDX_COMMAND_SEQNO;
2153 event_timeout = QETH_TIMEOUT; 2149 event_timeout = QETH_TIMEOUT;
2154 } 2150 }
2151 qeth_prepare_control_data(card, len, iob);
2152
2153 spin_lock_irqsave(&card->lock, flags);
2154 list_add_tail(&reply->list, &card->cmd_waiter_list);
2155 spin_unlock_irqrestore(&card->lock, flags);
2155 2156
2156 timeout = jiffies + event_timeout; 2157 timeout = jiffies + event_timeout;
2157 2158
@@ -2933,7 +2934,7 @@ static void qeth_fill_ipacmd_header(struct qeth_card *card,
2933 memset(cmd, 0, sizeof(struct qeth_ipa_cmd)); 2934 memset(cmd, 0, sizeof(struct qeth_ipa_cmd));
2934 cmd->hdr.command = command; 2935 cmd->hdr.command = command;
2935 cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST; 2936 cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
2936 cmd->hdr.seqno = card->seqno.ipa; 2937 /* cmd->hdr.seqno is set by qeth_send_control_data() */
2937 cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type); 2938 cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type);
2938 cmd->hdr.rel_adapter_no = (__u8) card->info.portno; 2939 cmd->hdr.rel_adapter_no = (__u8) card->info.portno;
2939 if (card->options.layer2) 2940 if (card->options.layer2)
@@ -3898,10 +3899,12 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
3898int qeth_get_elements_no(struct qeth_card *card, 3899int qeth_get_elements_no(struct qeth_card *card,
3899 struct sk_buff *skb, int extra_elems, int data_offset) 3900 struct sk_buff *skb, int extra_elems, int data_offset)
3900{ 3901{
3901 int elements = qeth_get_elements_for_range( 3902 addr_t end = (addr_t)skb->data + skb_headlen(skb);
3902 (addr_t)skb->data + data_offset, 3903 int elements = qeth_get_elements_for_frags(skb);
3903 (addr_t)skb->data + skb_headlen(skb)) + 3904 addr_t start = (addr_t)skb->data + data_offset;
3904 qeth_get_elements_for_frags(skb); 3905
3906 if (start != end)
3907 elements += qeth_get_elements_for_range(start, end);
3905 3908
3906 if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { 3909 if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
3907 QETH_DBF_MESSAGE(2, "Invalid size of IP packet " 3910 QETH_DBF_MESSAGE(2, "Invalid size of IP packet "
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index bdd45f4dcace..498fe9af2cdb 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -40,8 +40,40 @@ struct qeth_ipaddr {
40 unsigned int pfxlen; 40 unsigned int pfxlen;
41 } a6; 41 } a6;
42 } u; 42 } u;
43
44}; 43};
44
45static inline bool qeth_l3_addr_match_ip(struct qeth_ipaddr *a1,
46 struct qeth_ipaddr *a2)
47{
48 if (a1->proto != a2->proto)
49 return false;
50 if (a1->proto == QETH_PROT_IPV6)
51 return ipv6_addr_equal(&a1->u.a6.addr, &a2->u.a6.addr);
52 return a1->u.a4.addr == a2->u.a4.addr;
53}
54
55static inline bool qeth_l3_addr_match_all(struct qeth_ipaddr *a1,
56 struct qeth_ipaddr *a2)
57{
58 /* Assumes that the pair was obtained via qeth_l3_addr_find_by_ip(),
59 * so 'proto' and 'addr' match for sure.
60 *
61 * For ucast:
62 * - 'mac' is always 0.
63 * - 'mask'/'pfxlen' for RXIP/VIPA is always 0. For NORMAL, matching
64 * values are required to avoid mixups in takeover eligibility.
65 *
66 * For mcast,
67 * - 'mac' is mapped from the IP, and thus always matches.
68 * - 'mask'/'pfxlen' is always 0.
69 */
70 if (a1->type != a2->type)
71 return false;
72 if (a1->proto == QETH_PROT_IPV6)
73 return a1->u.a6.pfxlen == a2->u.a6.pfxlen;
74 return a1->u.a4.mask == a2->u.a4.mask;
75}
76
45static inline u64 qeth_l3_ipaddr_hash(struct qeth_ipaddr *addr) 77static inline u64 qeth_l3_ipaddr_hash(struct qeth_ipaddr *addr)
46{ 78{
47 u64 ret = 0; 79 u64 ret = 0;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index b0c888e86cd4..962a04b68dd2 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -67,6 +67,24 @@ void qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr,
67 qeth_l3_ipaddr6_to_string(addr, buf); 67 qeth_l3_ipaddr6_to_string(addr, buf);
68} 68}
69 69
70static struct qeth_ipaddr *qeth_l3_find_addr_by_ip(struct qeth_card *card,
71 struct qeth_ipaddr *query)
72{
73 u64 key = qeth_l3_ipaddr_hash(query);
74 struct qeth_ipaddr *addr;
75
76 if (query->is_multicast) {
77 hash_for_each_possible(card->ip_mc_htable, addr, hnode, key)
78 if (qeth_l3_addr_match_ip(addr, query))
79 return addr;
80 } else {
81 hash_for_each_possible(card->ip_htable, addr, hnode, key)
82 if (qeth_l3_addr_match_ip(addr, query))
83 return addr;
84 }
85 return NULL;
86}
87
70static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len) 88static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
71{ 89{
72 int i, j; 90 int i, j;
@@ -120,34 +138,6 @@ static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
120 return rc; 138 return rc;
121} 139}
122 140
123inline int
124qeth_l3_ipaddrs_is_equal(struct qeth_ipaddr *addr1, struct qeth_ipaddr *addr2)
125{
126 return addr1->proto == addr2->proto &&
127 !memcmp(&addr1->u, &addr2->u, sizeof(addr1->u)) &&
128 ether_addr_equal_64bits(addr1->mac, addr2->mac);
129}
130
131static struct qeth_ipaddr *
132qeth_l3_ip_from_hash(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
133{
134 struct qeth_ipaddr *addr;
135
136 if (tmp_addr->is_multicast) {
137 hash_for_each_possible(card->ip_mc_htable, addr,
138 hnode, qeth_l3_ipaddr_hash(tmp_addr))
139 if (qeth_l3_ipaddrs_is_equal(tmp_addr, addr))
140 return addr;
141 } else {
142 hash_for_each_possible(card->ip_htable, addr,
143 hnode, qeth_l3_ipaddr_hash(tmp_addr))
144 if (qeth_l3_ipaddrs_is_equal(tmp_addr, addr))
145 return addr;
146 }
147
148 return NULL;
149}
150
151int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) 141int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
152{ 142{
153 int rc = 0; 143 int rc = 0;
@@ -162,23 +152,18 @@ int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
162 QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8); 152 QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8);
163 } 153 }
164 154
165 addr = qeth_l3_ip_from_hash(card, tmp_addr); 155 addr = qeth_l3_find_addr_by_ip(card, tmp_addr);
166 if (!addr) 156 if (!addr || !qeth_l3_addr_match_all(addr, tmp_addr))
167 return -ENOENT; 157 return -ENOENT;
168 158
169 addr->ref_counter--; 159 addr->ref_counter--;
170 if (addr->ref_counter > 0 && (addr->type == QETH_IP_TYPE_NORMAL || 160 if (addr->type == QETH_IP_TYPE_NORMAL && addr->ref_counter > 0)
171 addr->type == QETH_IP_TYPE_RXIP))
172 return rc; 161 return rc;
173 if (addr->in_progress) 162 if (addr->in_progress)
174 return -EINPROGRESS; 163 return -EINPROGRESS;
175 164
176 if (!qeth_card_hw_is_reachable(card)) { 165 if (qeth_card_hw_is_reachable(card))
177 addr->disp_flag = QETH_DISP_ADDR_DELETE; 166 rc = qeth_l3_deregister_addr_entry(card, addr);
178 return 0;
179 }
180
181 rc = qeth_l3_deregister_addr_entry(card, addr);
182 167
183 hash_del(&addr->hnode); 168 hash_del(&addr->hnode);
184 kfree(addr); 169 kfree(addr);
@@ -190,6 +175,7 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
190{ 175{
191 int rc = 0; 176 int rc = 0;
192 struct qeth_ipaddr *addr; 177 struct qeth_ipaddr *addr;
178 char buf[40];
193 179
194 QETH_CARD_TEXT(card, 4, "addip"); 180 QETH_CARD_TEXT(card, 4, "addip");
195 181
@@ -200,8 +186,20 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
200 QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8); 186 QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8);
201 } 187 }
202 188
203 addr = qeth_l3_ip_from_hash(card, tmp_addr); 189 addr = qeth_l3_find_addr_by_ip(card, tmp_addr);
204 if (!addr) { 190 if (addr) {
191 if (tmp_addr->type != QETH_IP_TYPE_NORMAL)
192 return -EADDRINUSE;
193 if (qeth_l3_addr_match_all(addr, tmp_addr)) {
194 addr->ref_counter++;
195 return 0;
196 }
197 qeth_l3_ipaddr_to_string(tmp_addr->proto, (u8 *)&tmp_addr->u,
198 buf);
199 dev_warn(&card->gdev->dev,
200 "Registering IP address %s failed\n", buf);
201 return -EADDRINUSE;
202 } else {
205 addr = qeth_l3_get_addr_buffer(tmp_addr->proto); 203 addr = qeth_l3_get_addr_buffer(tmp_addr->proto);
206 if (!addr) 204 if (!addr)
207 return -ENOMEM; 205 return -ENOMEM;
@@ -241,19 +239,15 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
241 (rc == IPA_RC_LAN_OFFLINE)) { 239 (rc == IPA_RC_LAN_OFFLINE)) {
242 addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING; 240 addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
243 if (addr->ref_counter < 1) { 241 if (addr->ref_counter < 1) {
244 qeth_l3_delete_ip(card, addr); 242 qeth_l3_deregister_addr_entry(card, addr);
243 hash_del(&addr->hnode);
245 kfree(addr); 244 kfree(addr);
246 } 245 }
247 } else { 246 } else {
248 hash_del(&addr->hnode); 247 hash_del(&addr->hnode);
249 kfree(addr); 248 kfree(addr);
250 } 249 }
251 } else {
252 if (addr->type == QETH_IP_TYPE_NORMAL ||
253 addr->type == QETH_IP_TYPE_RXIP)
254 addr->ref_counter++;
255 } 250 }
256
257 return rc; 251 return rc;
258} 252}
259 253
@@ -321,11 +315,7 @@ static void qeth_l3_recover_ip(struct qeth_card *card)
321 spin_lock_bh(&card->ip_lock); 315 spin_lock_bh(&card->ip_lock);
322 316
323 hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) { 317 hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
324 if (addr->disp_flag == QETH_DISP_ADDR_DELETE) { 318 if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
325 qeth_l3_deregister_addr_entry(card, addr);
326 hash_del(&addr->hnode);
327 kfree(addr);
328 } else if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
329 if (addr->proto == QETH_PROT_IPV4) { 319 if (addr->proto == QETH_PROT_IPV4) {
330 addr->in_progress = 1; 320 addr->in_progress = 1;
331 spin_unlock_bh(&card->ip_lock); 321 spin_unlock_bh(&card->ip_lock);
@@ -643,12 +633,7 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
643 return -ENOMEM; 633 return -ENOMEM;
644 634
645 spin_lock_bh(&card->ip_lock); 635 spin_lock_bh(&card->ip_lock);
646 636 rc = qeth_l3_add_ip(card, ipaddr);
647 if (qeth_l3_ip_from_hash(card, ipaddr))
648 rc = -EEXIST;
649 else
650 rc = qeth_l3_add_ip(card, ipaddr);
651
652 spin_unlock_bh(&card->ip_lock); 637 spin_unlock_bh(&card->ip_lock);
653 638
654 kfree(ipaddr); 639 kfree(ipaddr);
@@ -713,12 +698,7 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
713 return -ENOMEM; 698 return -ENOMEM;
714 699
715 spin_lock_bh(&card->ip_lock); 700 spin_lock_bh(&card->ip_lock);
716 701 rc = qeth_l3_add_ip(card, ipaddr);
717 if (qeth_l3_ip_from_hash(card, ipaddr))
718 rc = -EEXIST;
719 else
720 rc = qeth_l3_add_ip(card, ipaddr);
721
722 spin_unlock_bh(&card->ip_lock); 702 spin_unlock_bh(&card->ip_lock);
723 703
724 kfree(ipaddr); 704 kfree(ipaddr);
@@ -1239,8 +1219,9 @@ qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
1239 tmp->u.a4.addr = be32_to_cpu(im4->multiaddr); 1219 tmp->u.a4.addr = be32_to_cpu(im4->multiaddr);
1240 tmp->is_multicast = 1; 1220 tmp->is_multicast = 1;
1241 1221
1242 ipm = qeth_l3_ip_from_hash(card, tmp); 1222 ipm = qeth_l3_find_addr_by_ip(card, tmp);
1243 if (ipm) { 1223 if (ipm) {
1224 /* for mcast, by-IP match means full match */
1244 ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING; 1225 ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
1245 } else { 1226 } else {
1246 ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); 1227 ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
@@ -1319,8 +1300,9 @@ static void qeth_l3_add_mc6_to_hash(struct qeth_card *card,
1319 sizeof(struct in6_addr)); 1300 sizeof(struct in6_addr));
1320 tmp->is_multicast = 1; 1301 tmp->is_multicast = 1;
1321 1302
1322 ipm = qeth_l3_ip_from_hash(card, tmp); 1303 ipm = qeth_l3_find_addr_by_ip(card, tmp);
1323 if (ipm) { 1304 if (ipm) {
1305 /* for mcast, by-IP match means full match */
1324 ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING; 1306 ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
1325 continue; 1307 continue;
1326 } 1308 }
@@ -2450,11 +2432,12 @@ static void qeth_tso_fill_header(struct qeth_card *card,
2450static int qeth_l3_get_elements_no_tso(struct qeth_card *card, 2432static int qeth_l3_get_elements_no_tso(struct qeth_card *card,
2451 struct sk_buff *skb, int extra_elems) 2433 struct sk_buff *skb, int extra_elems)
2452{ 2434{
2453 addr_t tcpdptr = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb); 2435 addr_t start = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb);
2454 int elements = qeth_get_elements_for_range( 2436 addr_t end = (addr_t)skb->data + skb_headlen(skb);
2455 tcpdptr, 2437 int elements = qeth_get_elements_for_frags(skb);
2456 (addr_t)skb->data + skb_headlen(skb)) + 2438
2457 qeth_get_elements_for_frags(skb); 2439 if (start != end)
2440 elements += qeth_get_elements_for_range(start, end);
2458 2441
2459 if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { 2442 if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
2460 QETH_DBF_MESSAGE(2, 2443 QETH_DBF_MESSAGE(2,
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 5a0c3e53e7c2..d7069539f351 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -924,6 +924,7 @@ void phy_device_remove(struct phy_device *phydev);
924int phy_init_hw(struct phy_device *phydev); 924int phy_init_hw(struct phy_device *phydev);
925int phy_suspend(struct phy_device *phydev); 925int phy_suspend(struct phy_device *phydev);
926int phy_resume(struct phy_device *phydev); 926int phy_resume(struct phy_device *phydev);
927int __phy_resume(struct phy_device *phydev);
927int phy_loopback(struct phy_device *phydev, bool enable); 928int phy_loopback(struct phy_device *phydev, bool enable);
928struct phy_device *phy_attach(struct net_device *dev, const char *bus_id, 929struct phy_device *phy_attach(struct net_device *dev, const char *bus_id,
929 phy_interface_t interface); 930 phy_interface_t interface);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index c1e66bdcf583..ddf77cf4ff2d 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -3285,8 +3285,7 @@ int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
3285void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len); 3285void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
3286int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen); 3286int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
3287void skb_scrub_packet(struct sk_buff *skb, bool xnet); 3287void skb_scrub_packet(struct sk_buff *skb, bool xnet);
3288unsigned int skb_gso_transport_seglen(const struct sk_buff *skb); 3288bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu);
3289bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu);
3290bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len); 3289bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len);
3291struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features); 3290struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
3292struct sk_buff *skb_vlan_untag(struct sk_buff *skb); 3291struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
@@ -4104,38 +4103,6 @@ static inline bool skb_head_is_locked(const struct sk_buff *skb)
4104 return !skb->head_frag || skb_cloned(skb); 4103 return !skb->head_frag || skb_cloned(skb);
4105} 4104}
4106 4105
4107/**
4108 * skb_gso_network_seglen - Return length of individual segments of a gso packet
4109 *
4110 * @skb: GSO skb
4111 *
4112 * skb_gso_network_seglen is used to determine the real size of the
4113 * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
4114 *
4115 * The MAC/L2 header is not accounted for.
4116 */
4117static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
4118{
4119 unsigned int hdr_len = skb_transport_header(skb) -
4120 skb_network_header(skb);
4121 return hdr_len + skb_gso_transport_seglen(skb);
4122}
4123
4124/**
4125 * skb_gso_mac_seglen - Return length of individual segments of a gso packet
4126 *
4127 * @skb: GSO skb
4128 *
4129 * skb_gso_mac_seglen is used to determine the real size of the
4130 * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4
4131 * headers (TCP/UDP).
4132 */
4133static inline unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
4134{
4135 unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
4136 return hdr_len + skb_gso_transport_seglen(skb);
4137}
4138
4139/* Local Checksum Offload. 4106/* Local Checksum Offload.
4140 * Compute outer checksum based on the assumption that the 4107 * Compute outer checksum based on the assumption that the
4141 * inner checksum will be offloaded later. 4108 * inner checksum will be offloaded later.
diff --git a/include/net/devlink.h b/include/net/devlink.h
index 6545b03e97f7..4de35ed12bcc 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -257,6 +257,18 @@ struct devlink_resource_size_params {
257 enum devlink_resource_unit unit; 257 enum devlink_resource_unit unit;
258}; 258};
259 259
260static inline void
261devlink_resource_size_params_init(struct devlink_resource_size_params *size_params,
262 u64 size_min, u64 size_max,
263 u64 size_granularity,
264 enum devlink_resource_unit unit)
265{
266 size_params->size_min = size_min;
267 size_params->size_max = size_max;
268 size_params->size_granularity = size_granularity;
269 size_params->unit = unit;
270}
271
260/** 272/**
261 * struct devlink_resource - devlink resource 273 * struct devlink_resource - devlink resource
262 * @name: name of the resource 274 * @name: name of the resource
@@ -278,7 +290,7 @@ struct devlink_resource {
278 u64 size_new; 290 u64 size_new;
279 bool size_valid; 291 bool size_valid;
280 struct devlink_resource *parent; 292 struct devlink_resource *parent;
281 struct devlink_resource_size_params *size_params; 293 struct devlink_resource_size_params size_params;
282 struct list_head list; 294 struct list_head list;
283 struct list_head resource_list; 295 struct list_head resource_list;
284 const struct devlink_resource_ops *resource_ops; 296 const struct devlink_resource_ops *resource_ops;
@@ -402,7 +414,7 @@ int devlink_resource_register(struct devlink *devlink,
402 u64 resource_size, 414 u64 resource_size,
403 u64 resource_id, 415 u64 resource_id,
404 u64 parent_resource_id, 416 u64 parent_resource_id,
405 struct devlink_resource_size_params *size_params, 417 const struct devlink_resource_size_params *size_params,
406 const struct devlink_resource_ops *resource_ops); 418 const struct devlink_resource_ops *resource_ops);
407void devlink_resources_unregister(struct devlink *devlink, 419void devlink_resources_unregister(struct devlink *devlink,
408 struct devlink_resource *resource); 420 struct devlink_resource *resource);
@@ -556,7 +568,7 @@ devlink_resource_register(struct devlink *devlink,
556 u64 resource_size, 568 u64 resource_size,
557 u64 resource_id, 569 u64 resource_id,
558 u64 parent_resource_id, 570 u64 parent_resource_id,
559 struct devlink_resource_size_params *size_params, 571 const struct devlink_resource_size_params *size_params,
560 const struct devlink_resource_ops *resource_ops) 572 const struct devlink_resource_ops *resource_ops)
561{ 573{
562 return 0; 574 return 0;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 5fb69a85d967..c6eff108aa99 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1356,6 +1356,13 @@ static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
1356 return reg->type == PTR_TO_CTX; 1356 return reg->type == PTR_TO_CTX;
1357} 1357}
1358 1358
1359static bool is_pkt_reg(struct bpf_verifier_env *env, int regno)
1360{
1361 const struct bpf_reg_state *reg = cur_regs(env) + regno;
1362
1363 return type_is_pkt_pointer(reg->type);
1364}
1365
1359static int check_pkt_ptr_alignment(struct bpf_verifier_env *env, 1366static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
1360 const struct bpf_reg_state *reg, 1367 const struct bpf_reg_state *reg,
1361 int off, int size, bool strict) 1368 int off, int size, bool strict)
@@ -1416,10 +1423,10 @@ static int check_generic_ptr_alignment(struct bpf_verifier_env *env,
1416} 1423}
1417 1424
1418static int check_ptr_alignment(struct bpf_verifier_env *env, 1425static int check_ptr_alignment(struct bpf_verifier_env *env,
1419 const struct bpf_reg_state *reg, 1426 const struct bpf_reg_state *reg, int off,
1420 int off, int size) 1427 int size, bool strict_alignment_once)
1421{ 1428{
1422 bool strict = env->strict_alignment; 1429 bool strict = env->strict_alignment || strict_alignment_once;
1423 const char *pointer_desc = ""; 1430 const char *pointer_desc = "";
1424 1431
1425 switch (reg->type) { 1432 switch (reg->type) {
@@ -1576,9 +1583,9 @@ static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
1576 * if t==write && value_regno==-1, some unknown value is stored into memory 1583 * if t==write && value_regno==-1, some unknown value is stored into memory
1577 * if t==read && value_regno==-1, don't care what we read from memory 1584 * if t==read && value_regno==-1, don't care what we read from memory
1578 */ 1585 */
1579static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, int off, 1586static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno,
1580 int bpf_size, enum bpf_access_type t, 1587 int off, int bpf_size, enum bpf_access_type t,
1581 int value_regno) 1588 int value_regno, bool strict_alignment_once)
1582{ 1589{
1583 struct bpf_reg_state *regs = cur_regs(env); 1590 struct bpf_reg_state *regs = cur_regs(env);
1584 struct bpf_reg_state *reg = regs + regno; 1591 struct bpf_reg_state *reg = regs + regno;
@@ -1590,7 +1597,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
1590 return size; 1597 return size;
1591 1598
1592 /* alignment checks will add in reg->off themselves */ 1599 /* alignment checks will add in reg->off themselves */
1593 err = check_ptr_alignment(env, reg, off, size); 1600 err = check_ptr_alignment(env, reg, off, size, strict_alignment_once);
1594 if (err) 1601 if (err)
1595 return err; 1602 return err;
1596 1603
@@ -1735,21 +1742,23 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins
1735 return -EACCES; 1742 return -EACCES;
1736 } 1743 }
1737 1744
1738 if (is_ctx_reg(env, insn->dst_reg)) { 1745 if (is_ctx_reg(env, insn->dst_reg) ||
1739 verbose(env, "BPF_XADD stores into R%d context is not allowed\n", 1746 is_pkt_reg(env, insn->dst_reg)) {
1740 insn->dst_reg); 1747 verbose(env, "BPF_XADD stores into R%d %s is not allowed\n",
1748 insn->dst_reg, is_ctx_reg(env, insn->dst_reg) ?
1749 "context" : "packet");
1741 return -EACCES; 1750 return -EACCES;
1742 } 1751 }
1743 1752
1744 /* check whether atomic_add can read the memory */ 1753 /* check whether atomic_add can read the memory */
1745 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 1754 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
1746 BPF_SIZE(insn->code), BPF_READ, -1); 1755 BPF_SIZE(insn->code), BPF_READ, -1, true);
1747 if (err) 1756 if (err)
1748 return err; 1757 return err;
1749 1758
1750 /* check whether atomic_add can write into the same memory */ 1759 /* check whether atomic_add can write into the same memory */
1751 return check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 1760 return check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
1752 BPF_SIZE(insn->code), BPF_WRITE, -1); 1761 BPF_SIZE(insn->code), BPF_WRITE, -1, true);
1753} 1762}
1754 1763
1755/* when register 'regno' is passed into function that will read 'access_size' 1764/* when register 'regno' is passed into function that will read 'access_size'
@@ -2388,7 +2397,8 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
2388 * is inferred from register state. 2397 * is inferred from register state.
2389 */ 2398 */
2390 for (i = 0; i < meta.access_size; i++) { 2399 for (i = 0; i < meta.access_size; i++) {
2391 err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, BPF_WRITE, -1); 2400 err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B,
2401 BPF_WRITE, -1, false);
2392 if (err) 2402 if (err)
2393 return err; 2403 return err;
2394 } 2404 }
@@ -4632,7 +4642,7 @@ static int do_check(struct bpf_verifier_env *env)
4632 */ 4642 */
4633 err = check_mem_access(env, insn_idx, insn->src_reg, insn->off, 4643 err = check_mem_access(env, insn_idx, insn->src_reg, insn->off,
4634 BPF_SIZE(insn->code), BPF_READ, 4644 BPF_SIZE(insn->code), BPF_READ,
4635 insn->dst_reg); 4645 insn->dst_reg, false);
4636 if (err) 4646 if (err)
4637 return err; 4647 return err;
4638 4648
@@ -4684,7 +4694,7 @@ static int do_check(struct bpf_verifier_env *env)
4684 /* check that memory (dst_reg + off) is writeable */ 4694 /* check that memory (dst_reg + off) is writeable */
4685 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 4695 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
4686 BPF_SIZE(insn->code), BPF_WRITE, 4696 BPF_SIZE(insn->code), BPF_WRITE,
4687 insn->src_reg); 4697 insn->src_reg, false);
4688 if (err) 4698 if (err)
4689 return err; 4699 return err;
4690 4700
@@ -4719,7 +4729,7 @@ static int do_check(struct bpf_verifier_env *env)
4719 /* check that memory (dst_reg + off) is writeable */ 4729 /* check that memory (dst_reg + off) is writeable */
4720 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 4730 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
4721 BPF_SIZE(insn->code), BPF_WRITE, 4731 BPF_SIZE(insn->code), BPF_WRITE,
4722 -1); 4732 -1, false);
4723 if (err) 4733 if (err)
4724 return err; 4734 return err;
4725 4735
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index b4e22345963f..2efb213716fa 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -24,10 +24,11 @@
24#include <linux/if_vlan.h> 24#include <linux/if_vlan.h>
25#include <linux/random.h> 25#include <linux/random.h>
26#include <linux/highmem.h> 26#include <linux/highmem.h>
27#include <linux/sched.h>
27 28
28/* General test specific settings */ 29/* General test specific settings */
29#define MAX_SUBTESTS 3 30#define MAX_SUBTESTS 3
30#define MAX_TESTRUNS 10000 31#define MAX_TESTRUNS 1000
31#define MAX_DATA 128 32#define MAX_DATA 128
32#define MAX_INSNS 512 33#define MAX_INSNS 512
33#define MAX_K 0xffffFFFF 34#define MAX_K 0xffffFFFF
@@ -6582,6 +6583,7 @@ static __init int test_bpf(void)
6582 struct bpf_prog *fp; 6583 struct bpf_prog *fp;
6583 int err; 6584 int err;
6584 6585
6586 cond_resched();
6585 if (exclude_test(i)) 6587 if (exclude_test(i))
6586 continue; 6588 continue;
6587 6589
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 79e326383726..99abeadf416e 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -157,7 +157,7 @@ static void batadv_iv_ogm_orig_free(struct batadv_orig_node *orig_node)
157 * Return: 0 on success, a negative error code otherwise. 157 * Return: 0 on success, a negative error code otherwise.
158 */ 158 */
159static int batadv_iv_ogm_orig_add_if(struct batadv_orig_node *orig_node, 159static int batadv_iv_ogm_orig_add_if(struct batadv_orig_node *orig_node,
160 int max_if_num) 160 unsigned int max_if_num)
161{ 161{
162 void *data_ptr; 162 void *data_ptr;
163 size_t old_size; 163 size_t old_size;
@@ -201,7 +201,8 @@ unlock:
201 */ 201 */
202static void 202static void
203batadv_iv_ogm_drop_bcast_own_entry(struct batadv_orig_node *orig_node, 203batadv_iv_ogm_drop_bcast_own_entry(struct batadv_orig_node *orig_node,
204 int max_if_num, int del_if_num) 204 unsigned int max_if_num,
205 unsigned int del_if_num)
205{ 206{
206 size_t chunk_size; 207 size_t chunk_size;
207 size_t if_offset; 208 size_t if_offset;
@@ -239,7 +240,8 @@ batadv_iv_ogm_drop_bcast_own_entry(struct batadv_orig_node *orig_node,
239 */ 240 */
240static void 241static void
241batadv_iv_ogm_drop_bcast_own_sum_entry(struct batadv_orig_node *orig_node, 242batadv_iv_ogm_drop_bcast_own_sum_entry(struct batadv_orig_node *orig_node,
242 int max_if_num, int del_if_num) 243 unsigned int max_if_num,
244 unsigned int del_if_num)
243{ 245{
244 size_t if_offset; 246 size_t if_offset;
245 void *data_ptr; 247 void *data_ptr;
@@ -276,7 +278,8 @@ batadv_iv_ogm_drop_bcast_own_sum_entry(struct batadv_orig_node *orig_node,
276 * Return: 0 on success, a negative error code otherwise. 278 * Return: 0 on success, a negative error code otherwise.
277 */ 279 */
278static int batadv_iv_ogm_orig_del_if(struct batadv_orig_node *orig_node, 280static int batadv_iv_ogm_orig_del_if(struct batadv_orig_node *orig_node,
279 int max_if_num, int del_if_num) 281 unsigned int max_if_num,
282 unsigned int del_if_num)
280{ 283{
281 spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock); 284 spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
282 285
@@ -311,7 +314,8 @@ static struct batadv_orig_node *
311batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const u8 *addr) 314batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const u8 *addr)
312{ 315{
313 struct batadv_orig_node *orig_node; 316 struct batadv_orig_node *orig_node;
314 int size, hash_added; 317 int hash_added;
318 size_t size;
315 319
316 orig_node = batadv_orig_hash_find(bat_priv, addr); 320 orig_node = batadv_orig_hash_find(bat_priv, addr);
317 if (orig_node) 321 if (orig_node)
@@ -893,7 +897,7 @@ batadv_iv_ogm_slide_own_bcast_window(struct batadv_hard_iface *hard_iface)
893 u32 i; 897 u32 i;
894 size_t word_index; 898 size_t word_index;
895 u8 *w; 899 u8 *w;
896 int if_num; 900 unsigned int if_num;
897 901
898 for (i = 0; i < hash->size; i++) { 902 for (i = 0; i < hash->size; i++) {
899 head = &hash->table[i]; 903 head = &hash->table[i];
@@ -1023,7 +1027,7 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
1023 struct batadv_neigh_node *tmp_neigh_node = NULL; 1027 struct batadv_neigh_node *tmp_neigh_node = NULL;
1024 struct batadv_neigh_node *router = NULL; 1028 struct batadv_neigh_node *router = NULL;
1025 struct batadv_orig_node *orig_node_tmp; 1029 struct batadv_orig_node *orig_node_tmp;
1026 int if_num; 1030 unsigned int if_num;
1027 u8 sum_orig, sum_neigh; 1031 u8 sum_orig, sum_neigh;
1028 u8 *neigh_addr; 1032 u8 *neigh_addr;
1029 u8 tq_avg; 1033 u8 tq_avg;
@@ -1182,7 +1186,7 @@ static bool batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
1182 u8 total_count; 1186 u8 total_count;
1183 u8 orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own; 1187 u8 orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own;
1184 unsigned int neigh_rq_inv_cube, neigh_rq_max_cube; 1188 unsigned int neigh_rq_inv_cube, neigh_rq_max_cube;
1185 int if_num; 1189 unsigned int if_num;
1186 unsigned int tq_asym_penalty, inv_asym_penalty; 1190 unsigned int tq_asym_penalty, inv_asym_penalty;
1187 unsigned int combined_tq; 1191 unsigned int combined_tq;
1188 unsigned int tq_iface_penalty; 1192 unsigned int tq_iface_penalty;
@@ -1702,9 +1706,9 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
1702 1706
1703 if (is_my_orig) { 1707 if (is_my_orig) {
1704 unsigned long *word; 1708 unsigned long *word;
1705 int offset; 1709 size_t offset;
1706 s32 bit_pos; 1710 s32 bit_pos;
1707 s16 if_num; 1711 unsigned int if_num;
1708 u8 *weight; 1712 u8 *weight;
1709 1713
1710 orig_neigh_node = batadv_iv_ogm_orig_get(bat_priv, 1714 orig_neigh_node = batadv_iv_ogm_orig_get(bat_priv,
@@ -2729,7 +2733,7 @@ static int batadv_iv_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
2729 struct batadv_neigh_ifinfo *router_ifinfo = NULL; 2733 struct batadv_neigh_ifinfo *router_ifinfo = NULL;
2730 struct batadv_neigh_node *router; 2734 struct batadv_neigh_node *router;
2731 struct batadv_gw_node *curr_gw; 2735 struct batadv_gw_node *curr_gw;
2732 int ret = -EINVAL; 2736 int ret = 0;
2733 void *hdr; 2737 void *hdr;
2734 2738
2735 router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT); 2739 router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT);
diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c
index 27e165ac9302..c74f81341dab 100644
--- a/net/batman-adv/bat_v.c
+++ b/net/batman-adv/bat_v.c
@@ -928,7 +928,7 @@ static int batadv_v_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
928 struct batadv_neigh_ifinfo *router_ifinfo = NULL; 928 struct batadv_neigh_ifinfo *router_ifinfo = NULL;
929 struct batadv_neigh_node *router; 929 struct batadv_neigh_node *router;
930 struct batadv_gw_node *curr_gw; 930 struct batadv_gw_node *curr_gw;
931 int ret = -EINVAL; 931 int ret = 0;
932 void *hdr; 932 void *hdr;
933 933
934 router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT); 934 router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT);
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index fad47853ad3c..b1a08374088b 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -2161,22 +2161,25 @@ batadv_bla_claim_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
2161{ 2161{
2162 struct batadv_bla_claim *claim; 2162 struct batadv_bla_claim *claim;
2163 int idx = 0; 2163 int idx = 0;
2164 int ret = 0;
2164 2165
2165 rcu_read_lock(); 2166 rcu_read_lock();
2166 hlist_for_each_entry_rcu(claim, head, hash_entry) { 2167 hlist_for_each_entry_rcu(claim, head, hash_entry) {
2167 if (idx++ < *idx_skip) 2168 if (idx++ < *idx_skip)
2168 continue; 2169 continue;
2169 if (batadv_bla_claim_dump_entry(msg, portid, seq, 2170
2170 primary_if, claim)) { 2171 ret = batadv_bla_claim_dump_entry(msg, portid, seq,
2172 primary_if, claim);
2173 if (ret) {
2171 *idx_skip = idx - 1; 2174 *idx_skip = idx - 1;
2172 goto unlock; 2175 goto unlock;
2173 } 2176 }
2174 } 2177 }
2175 2178
2176 *idx_skip = idx; 2179 *idx_skip = 0;
2177unlock: 2180unlock:
2178 rcu_read_unlock(); 2181 rcu_read_unlock();
2179 return 0; 2182 return ret;
2180} 2183}
2181 2184
2182/** 2185/**
@@ -2391,22 +2394,25 @@ batadv_bla_backbone_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
2391{ 2394{
2392 struct batadv_bla_backbone_gw *backbone_gw; 2395 struct batadv_bla_backbone_gw *backbone_gw;
2393 int idx = 0; 2396 int idx = 0;
2397 int ret = 0;
2394 2398
2395 rcu_read_lock(); 2399 rcu_read_lock();
2396 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) { 2400 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
2397 if (idx++ < *idx_skip) 2401 if (idx++ < *idx_skip)
2398 continue; 2402 continue;
2399 if (batadv_bla_backbone_dump_entry(msg, portid, seq, 2403
2400 primary_if, backbone_gw)) { 2404 ret = batadv_bla_backbone_dump_entry(msg, portid, seq,
2405 primary_if, backbone_gw);
2406 if (ret) {
2401 *idx_skip = idx - 1; 2407 *idx_skip = idx - 1;
2402 goto unlock; 2408 goto unlock;
2403 } 2409 }
2404 } 2410 }
2405 2411
2406 *idx_skip = idx; 2412 *idx_skip = 0;
2407unlock: 2413unlock:
2408 rcu_read_unlock(); 2414 rcu_read_unlock();
2409 return 0; 2415 return ret;
2410} 2416}
2411 2417
2412/** 2418/**
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index 22dde42fd80e..5afe641ee4b0 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -288,7 +288,8 @@ batadv_frag_merge_packets(struct hlist_head *chain)
288 /* Move the existing MAC header to just before the payload. (Override 288 /* Move the existing MAC header to just before the payload. (Override
289 * the fragment header.) 289 * the fragment header.)
290 */ 290 */
291 skb_pull_rcsum(skb_out, hdr_size); 291 skb_pull(skb_out, hdr_size);
292 skb_out->ip_summed = CHECKSUM_NONE;
292 memmove(skb_out->data - ETH_HLEN, skb_mac_header(skb_out), ETH_HLEN); 293 memmove(skb_out->data - ETH_HLEN, skb_mac_header(skb_out), ETH_HLEN);
293 skb_set_mac_header(skb_out, -ETH_HLEN); 294 skb_set_mac_header(skb_out, -ETH_HLEN);
294 skb_reset_network_header(skb_out); 295 skb_reset_network_header(skb_out);
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 5f186bff284a..68b54a39c51d 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -763,6 +763,11 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
763 hard_iface->soft_iface = soft_iface; 763 hard_iface->soft_iface = soft_iface;
764 bat_priv = netdev_priv(hard_iface->soft_iface); 764 bat_priv = netdev_priv(hard_iface->soft_iface);
765 765
766 if (bat_priv->num_ifaces >= UINT_MAX) {
767 ret = -ENOSPC;
768 goto err_dev;
769 }
770
766 ret = netdev_master_upper_dev_link(hard_iface->net_dev, 771 ret = netdev_master_upper_dev_link(hard_iface->net_dev,
767 soft_iface, NULL, NULL, NULL); 772 soft_iface, NULL, NULL, NULL);
768 if (ret) 773 if (ret)
@@ -876,7 +881,7 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface,
876 batadv_hardif_recalc_extra_skbroom(hard_iface->soft_iface); 881 batadv_hardif_recalc_extra_skbroom(hard_iface->soft_iface);
877 882
878 /* nobody uses this interface anymore */ 883 /* nobody uses this interface anymore */
879 if (!bat_priv->num_ifaces) { 884 if (bat_priv->num_ifaces == 0) {
880 batadv_gw_check_client_stop(bat_priv); 885 batadv_gw_check_client_stop(bat_priv);
881 886
882 if (autodel == BATADV_IF_CLEANUP_AUTO) 887 if (autodel == BATADV_IF_CLEANUP_AUTO)
@@ -912,7 +917,7 @@ batadv_hardif_add_interface(struct net_device *net_dev)
912 if (ret) 917 if (ret)
913 goto free_if; 918 goto free_if;
914 919
915 hard_iface->if_num = -1; 920 hard_iface->if_num = 0;
916 hard_iface->net_dev = net_dev; 921 hard_iface->net_dev = net_dev;
917 hard_iface->soft_iface = NULL; 922 hard_iface->soft_iface = NULL;
918 hard_iface->if_status = BATADV_IF_NOT_IN_USE; 923 hard_iface->if_status = BATADV_IF_NOT_IN_USE;
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 58a7d9274435..74782426bb77 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -1569,7 +1569,7 @@ int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb)
1569 * Return: 0 on success or negative error number in case of failure 1569 * Return: 0 on success or negative error number in case of failure
1570 */ 1570 */
1571int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface, 1571int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
1572 int max_if_num) 1572 unsigned int max_if_num)
1573{ 1573{
1574 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 1574 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
1575 struct batadv_algo_ops *bao = bat_priv->algo_ops; 1575 struct batadv_algo_ops *bao = bat_priv->algo_ops;
@@ -1611,7 +1611,7 @@ err:
1611 * Return: 0 on success or negative error number in case of failure 1611 * Return: 0 on success or negative error number in case of failure
1612 */ 1612 */
1613int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface, 1613int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
1614 int max_if_num) 1614 unsigned int max_if_num)
1615{ 1615{
1616 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 1616 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
1617 struct batadv_hashtable *hash = bat_priv->orig_hash; 1617 struct batadv_hashtable *hash = bat_priv->orig_hash;
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index 8e543a3cdc6c..15d896b2de6f 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -73,9 +73,9 @@ int batadv_orig_seq_print_text(struct seq_file *seq, void *offset);
73int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb); 73int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb);
74int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset); 74int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset);
75int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface, 75int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
76 int max_if_num); 76 unsigned int max_if_num);
77int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface, 77int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
78 int max_if_num); 78 unsigned int max_if_num);
79struct batadv_orig_node_vlan * 79struct batadv_orig_node_vlan *
80batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node, 80batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
81 unsigned short vid); 81 unsigned short vid);
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 900c5ce21cd4..367a81fb785f 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -459,13 +459,7 @@ void batadv_interface_rx(struct net_device *soft_iface,
459 459
460 /* skb->dev & skb->pkt_type are set here */ 460 /* skb->dev & skb->pkt_type are set here */
461 skb->protocol = eth_type_trans(skb, soft_iface); 461 skb->protocol = eth_type_trans(skb, soft_iface);
462 462 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
463 /* should not be necessary anymore as we use skb_pull_rcsum()
464 * TODO: please verify this and remove this TODO
465 * -- Dec 21st 2009, Simon Wunderlich
466 */
467
468 /* skb->ip_summed = CHECKSUM_UNNECESSARY; */
469 463
470 batadv_inc_counter(bat_priv, BATADV_CNT_RX); 464 batadv_inc_counter(bat_priv, BATADV_CNT_RX);
471 batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES, 465 batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index bb1578410e0c..a5aa6d61f4e2 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -167,7 +167,7 @@ struct batadv_hard_iface {
167 struct list_head list; 167 struct list_head list;
168 168
169 /** @if_num: identificator of the interface */ 169 /** @if_num: identificator of the interface */
170 s16 if_num; 170 unsigned int if_num;
171 171
172 /** @if_status: status of the interface for batman-adv */ 172 /** @if_status: status of the interface for batman-adv */
173 char if_status; 173 char if_status;
@@ -1596,7 +1596,7 @@ struct batadv_priv {
1596 atomic_t batman_queue_left; 1596 atomic_t batman_queue_left;
1597 1597
1598 /** @num_ifaces: number of interfaces assigned to this mesh interface */ 1598 /** @num_ifaces: number of interfaces assigned to this mesh interface */
1599 char num_ifaces; 1599 unsigned int num_ifaces;
1600 1600
1601 /** @mesh_obj: kobject for sysfs mesh subdirectory */ 1601 /** @mesh_obj: kobject for sysfs mesh subdirectory */
1602 struct kobject *mesh_obj; 1602 struct kobject *mesh_obj;
@@ -2186,15 +2186,16 @@ struct batadv_algo_orig_ops {
2186 * orig_node due to a new hard-interface being added into the mesh 2186 * orig_node due to a new hard-interface being added into the mesh
2187 * (optional) 2187 * (optional)
2188 */ 2188 */
2189 int (*add_if)(struct batadv_orig_node *orig_node, int max_if_num); 2189 int (*add_if)(struct batadv_orig_node *orig_node,
2190 unsigned int max_if_num);
2190 2191
2191 /** 2192 /**
2192 * @del_if: ask the routing algorithm to apply the needed changes to the 2193 * @del_if: ask the routing algorithm to apply the needed changes to the
2193 * orig_node due to an hard-interface being removed from the mesh 2194 * orig_node due to an hard-interface being removed from the mesh
2194 * (optional) 2195 * (optional)
2195 */ 2196 */
2196 int (*del_if)(struct batadv_orig_node *orig_node, int max_if_num, 2197 int (*del_if)(struct batadv_orig_node *orig_node,
2197 int del_if_num); 2198 unsigned int max_if_num, unsigned int del_if_num);
2198 2199
2199#ifdef CONFIG_BATMAN_ADV_DEBUGFS 2200#ifdef CONFIG_BATMAN_ADV_DEBUGFS
2200 /** @print: print the originator table (optional) */ 2201 /** @print: print the originator table (optional) */
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index 27f1d4f2114a..9b16eaf33819 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -214,7 +214,7 @@ static int br_validate_ipv4(struct net *net, struct sk_buff *skb)
214 214
215 iph = ip_hdr(skb); 215 iph = ip_hdr(skb);
216 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) 216 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
217 goto inhdr_error; 217 goto csum_error;
218 218
219 len = ntohs(iph->tot_len); 219 len = ntohs(iph->tot_len);
220 if (skb->len < len) { 220 if (skb->len < len) {
@@ -236,6 +236,8 @@ static int br_validate_ipv4(struct net *net, struct sk_buff *skb)
236 */ 236 */
237 return 0; 237 return 0;
238 238
239csum_error:
240 __IP_INC_STATS(net, IPSTATS_MIB_CSUMERRORS);
239inhdr_error: 241inhdr_error:
240 __IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS); 242 __IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
241drop: 243drop:
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 51935270c651..9896f4975353 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -168,6 +168,8 @@ static struct net_bridge_vlan *br_vlan_get_master(struct net_bridge *br, u16 vid
168 masterv = br_vlan_find(vg, vid); 168 masterv = br_vlan_find(vg, vid);
169 if (WARN_ON(!masterv)) 169 if (WARN_ON(!masterv))
170 return NULL; 170 return NULL;
171 refcount_set(&masterv->refcnt, 1);
172 return masterv;
171 } 173 }
172 refcount_inc(&masterv->refcnt); 174 refcount_inc(&masterv->refcnt);
173 175
diff --git a/net/bridge/netfilter/ebt_among.c b/net/bridge/netfilter/ebt_among.c
index ce7152a12bd8..c5afb4232ecb 100644
--- a/net/bridge/netfilter/ebt_among.c
+++ b/net/bridge/netfilter/ebt_among.c
@@ -172,18 +172,35 @@ ebt_among_mt(const struct sk_buff *skb, struct xt_action_param *par)
172 return true; 172 return true;
173} 173}
174 174
175static bool poolsize_invalid(const struct ebt_mac_wormhash *w)
176{
177 return w && w->poolsize >= (INT_MAX / sizeof(struct ebt_mac_wormhash_tuple));
178}
179
175static int ebt_among_mt_check(const struct xt_mtchk_param *par) 180static int ebt_among_mt_check(const struct xt_mtchk_param *par)
176{ 181{
177 const struct ebt_among_info *info = par->matchinfo; 182 const struct ebt_among_info *info = par->matchinfo;
178 const struct ebt_entry_match *em = 183 const struct ebt_entry_match *em =
179 container_of(par->matchinfo, const struct ebt_entry_match, data); 184 container_of(par->matchinfo, const struct ebt_entry_match, data);
180 int expected_length = sizeof(struct ebt_among_info); 185 unsigned int expected_length = sizeof(struct ebt_among_info);
181 const struct ebt_mac_wormhash *wh_dst, *wh_src; 186 const struct ebt_mac_wormhash *wh_dst, *wh_src;
182 int err; 187 int err;
183 188
189 if (expected_length > em->match_size)
190 return -EINVAL;
191
184 wh_dst = ebt_among_wh_dst(info); 192 wh_dst = ebt_among_wh_dst(info);
185 wh_src = ebt_among_wh_src(info); 193 if (poolsize_invalid(wh_dst))
194 return -EINVAL;
195
186 expected_length += ebt_mac_wormhash_size(wh_dst); 196 expected_length += ebt_mac_wormhash_size(wh_dst);
197 if (expected_length > em->match_size)
198 return -EINVAL;
199
200 wh_src = ebt_among_wh_src(info);
201 if (poolsize_invalid(wh_src))
202 return -EINVAL;
203
187 expected_length += ebt_mac_wormhash_size(wh_src); 204 expected_length += ebt_mac_wormhash_size(wh_src);
188 205
189 if (em->match_size != EBT_ALIGN(expected_length)) { 206 if (em->match_size != EBT_ALIGN(expected_length)) {
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 02c4b409d317..254ef9f49567 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1641,7 +1641,8 @@ static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr,
1641 int off = ebt_compat_match_offset(match, m->match_size); 1641 int off = ebt_compat_match_offset(match, m->match_size);
1642 compat_uint_t msize = m->match_size - off; 1642 compat_uint_t msize = m->match_size - off;
1643 1643
1644 BUG_ON(off >= m->match_size); 1644 if (WARN_ON(off >= m->match_size))
1645 return -EINVAL;
1645 1646
1646 if (copy_to_user(cm->u.name, match->name, 1647 if (copy_to_user(cm->u.name, match->name,
1647 strlen(match->name) + 1) || put_user(msize, &cm->match_size)) 1648 strlen(match->name) + 1) || put_user(msize, &cm->match_size))
@@ -1671,7 +1672,8 @@ static int compat_target_to_user(struct ebt_entry_target *t,
1671 int off = xt_compat_target_offset(target); 1672 int off = xt_compat_target_offset(target);
1672 compat_uint_t tsize = t->target_size - off; 1673 compat_uint_t tsize = t->target_size - off;
1673 1674
1674 BUG_ON(off >= t->target_size); 1675 if (WARN_ON(off >= t->target_size))
1676 return -EINVAL;
1675 1677
1676 if (copy_to_user(cm->u.name, target->name, 1678 if (copy_to_user(cm->u.name, target->name,
1677 strlen(target->name) + 1) || put_user(tsize, &cm->match_size)) 1679 strlen(target->name) + 1) || put_user(tsize, &cm->match_size))
@@ -1902,7 +1904,8 @@ static int ebt_buf_add(struct ebt_entries_buf_state *state,
1902 if (state->buf_kern_start == NULL) 1904 if (state->buf_kern_start == NULL)
1903 goto count_only; 1905 goto count_only;
1904 1906
1905 BUG_ON(state->buf_kern_offset + sz > state->buf_kern_len); 1907 if (WARN_ON(state->buf_kern_offset + sz > state->buf_kern_len))
1908 return -EINVAL;
1906 1909
1907 memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz); 1910 memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz);
1908 1911
@@ -1915,7 +1918,8 @@ static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz)
1915{ 1918{
1916 char *b = state->buf_kern_start; 1919 char *b = state->buf_kern_start;
1917 1920
1918 BUG_ON(b && state->buf_kern_offset > state->buf_kern_len); 1921 if (WARN_ON(b && state->buf_kern_offset > state->buf_kern_len))
1922 return -EINVAL;
1919 1923
1920 if (b != NULL && sz > 0) 1924 if (b != NULL && sz > 0)
1921 memset(b + state->buf_kern_offset, 0, sz); 1925 memset(b + state->buf_kern_offset, 0, sz);
@@ -1992,8 +1996,10 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
1992 pad = XT_ALIGN(size_kern) - size_kern; 1996 pad = XT_ALIGN(size_kern) - size_kern;
1993 1997
1994 if (pad > 0 && dst) { 1998 if (pad > 0 && dst) {
1995 BUG_ON(state->buf_kern_len <= pad); 1999 if (WARN_ON(state->buf_kern_len <= pad))
1996 BUG_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad); 2000 return -EINVAL;
2001 if (WARN_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad))
2002 return -EINVAL;
1997 memset(dst + size_kern, 0, pad); 2003 memset(dst + size_kern, 0, pad);
1998 } 2004 }
1999 return off + match_size; 2005 return off + match_size;
@@ -2043,7 +2049,8 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
2043 if (ret < 0) 2049 if (ret < 0)
2044 return ret; 2050 return ret;
2045 2051
2046 BUG_ON(ret < match32->match_size); 2052 if (WARN_ON(ret < match32->match_size))
2053 return -EINVAL;
2047 growth += ret - match32->match_size; 2054 growth += ret - match32->match_size;
2048 growth += ebt_compat_entry_padsize(); 2055 growth += ebt_compat_entry_padsize();
2049 2056
@@ -2053,7 +2060,9 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
2053 if (match_kern) 2060 if (match_kern)
2054 match_kern->match_size = ret; 2061 match_kern->match_size = ret;
2055 2062
2056 WARN_ON(type == EBT_COMPAT_TARGET && size_left); 2063 if (WARN_ON(type == EBT_COMPAT_TARGET && size_left))
2064 return -EINVAL;
2065
2057 match32 = (struct compat_ebt_entry_mwt *) buf; 2066 match32 = (struct compat_ebt_entry_mwt *) buf;
2058 } 2067 }
2059 2068
@@ -2109,6 +2118,15 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
2109 * 2118 *
2110 * offsets are relative to beginning of struct ebt_entry (i.e., 0). 2119 * offsets are relative to beginning of struct ebt_entry (i.e., 0).
2111 */ 2120 */
2121 for (i = 0; i < 4 ; ++i) {
2122 if (offsets[i] >= *total)
2123 return -EINVAL;
2124 if (i == 0)
2125 continue;
2126 if (offsets[i-1] > offsets[i])
2127 return -EINVAL;
2128 }
2129
2112 for (i = 0, j = 1 ; j < 4 ; j++, i++) { 2130 for (i = 0, j = 1 ; j < 4 ; j++, i++) {
2113 struct compat_ebt_entry_mwt *match32; 2131 struct compat_ebt_entry_mwt *match32;
2114 unsigned int size; 2132 unsigned int size;
@@ -2140,7 +2158,8 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
2140 2158
2141 startoff = state->buf_user_offset - startoff; 2159 startoff = state->buf_user_offset - startoff;
2142 2160
2143 BUG_ON(*total < startoff); 2161 if (WARN_ON(*total < startoff))
2162 return -EINVAL;
2144 *total -= startoff; 2163 *total -= startoff;
2145 return 0; 2164 return 0;
2146} 2165}
@@ -2267,7 +2286,8 @@ static int compat_do_replace(struct net *net, void __user *user,
2267 state.buf_kern_len = size64; 2286 state.buf_kern_len = size64;
2268 2287
2269 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state); 2288 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2270 BUG_ON(ret < 0); /* parses same data again */ 2289 if (WARN_ON(ret < 0))
2290 goto out_unlock;
2271 2291
2272 vfree(entries_tmp); 2292 vfree(entries_tmp);
2273 tmp.entries_size = size64; 2293 tmp.entries_size = size64;
diff --git a/net/core/dev.c b/net/core/dev.c
index d4362befe7e2..2cedf520cb28 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -6396,6 +6396,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
6396 .linking = true, 6396 .linking = true,
6397 .upper_info = upper_info, 6397 .upper_info = upper_info,
6398 }; 6398 };
6399 struct net_device *master_dev;
6399 int ret = 0; 6400 int ret = 0;
6400 6401
6401 ASSERT_RTNL(); 6402 ASSERT_RTNL();
@@ -6407,11 +6408,14 @@ static int __netdev_upper_dev_link(struct net_device *dev,
6407 if (netdev_has_upper_dev(upper_dev, dev)) 6408 if (netdev_has_upper_dev(upper_dev, dev))
6408 return -EBUSY; 6409 return -EBUSY;
6409 6410
6410 if (netdev_has_upper_dev(dev, upper_dev)) 6411 if (!master) {
6411 return -EEXIST; 6412 if (netdev_has_upper_dev(dev, upper_dev))
6412 6413 return -EEXIST;
6413 if (master && netdev_master_upper_dev_get(dev)) 6414 } else {
6414 return -EBUSY; 6415 master_dev = netdev_master_upper_dev_get(dev);
6416 if (master_dev)
6417 return master_dev == upper_dev ? -EEXIST : -EBUSY;
6418 }
6415 6419
6416 ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, 6420 ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
6417 &changeupper_info.info); 6421 &changeupper_info.info);
diff --git a/net/core/devlink.c b/net/core/devlink.c
index 18d385ed8237..2f2307d94787 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -1695,10 +1695,11 @@ static int devlink_dpipe_table_put(struct sk_buff *skb,
1695 goto nla_put_failure; 1695 goto nla_put_failure;
1696 1696
1697 if (table->resource_valid) { 1697 if (table->resource_valid) {
1698 nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID, 1698 if (nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID,
1699 table->resource_id, DEVLINK_ATTR_PAD); 1699 table->resource_id, DEVLINK_ATTR_PAD) ||
1700 nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS, 1700 nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS,
1701 table->resource_units, DEVLINK_ATTR_PAD); 1701 table->resource_units, DEVLINK_ATTR_PAD))
1702 goto nla_put_failure;
1702 } 1703 }
1703 if (devlink_dpipe_matches_put(table, skb)) 1704 if (devlink_dpipe_matches_put(table, skb))
1704 goto nla_put_failure; 1705 goto nla_put_failure;
@@ -2332,7 +2333,7 @@ devlink_resource_validate_children(struct devlink_resource *resource)
2332 list_for_each_entry(child_resource, &resource->resource_list, list) 2333 list_for_each_entry(child_resource, &resource->resource_list, list)
2333 parts_size += child_resource->size_new; 2334 parts_size += child_resource->size_new;
2334 2335
2335 if (parts_size > resource->size) 2336 if (parts_size > resource->size_new)
2336 size_valid = false; 2337 size_valid = false;
2337out: 2338out:
2338 resource->size_valid = size_valid; 2339 resource->size_valid = size_valid;
@@ -2372,20 +2373,22 @@ static int devlink_nl_cmd_resource_set(struct sk_buff *skb,
2372 return 0; 2373 return 0;
2373} 2374}
2374 2375
2375static void 2376static int
2376devlink_resource_size_params_put(struct devlink_resource *resource, 2377devlink_resource_size_params_put(struct devlink_resource *resource,
2377 struct sk_buff *skb) 2378 struct sk_buff *skb)
2378{ 2379{
2379 struct devlink_resource_size_params *size_params; 2380 struct devlink_resource_size_params *size_params;
2380 2381
2381 size_params = resource->size_params; 2382 size_params = &resource->size_params;
2382 nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_GRAN, 2383 if (nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_GRAN,
2383 size_params->size_granularity, DEVLINK_ATTR_PAD); 2384 size_params->size_granularity, DEVLINK_ATTR_PAD) ||
2384 nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MAX, 2385 nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MAX,
2385 size_params->size_max, DEVLINK_ATTR_PAD); 2386 size_params->size_max, DEVLINK_ATTR_PAD) ||
2386 nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MIN, 2387 nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MIN,
2387 size_params->size_min, DEVLINK_ATTR_PAD); 2388 size_params->size_min, DEVLINK_ATTR_PAD) ||
2388 nla_put_u8(skb, DEVLINK_ATTR_RESOURCE_UNIT, size_params->unit); 2389 nla_put_u8(skb, DEVLINK_ATTR_RESOURCE_UNIT, size_params->unit))
2390 return -EMSGSIZE;
2391 return 0;
2389} 2392}
2390 2393
2391static int devlink_resource_put(struct devlink *devlink, struct sk_buff *skb, 2394static int devlink_resource_put(struct devlink *devlink, struct sk_buff *skb,
@@ -2409,10 +2412,12 @@ static int devlink_resource_put(struct devlink *devlink, struct sk_buff *skb,
2409 nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_NEW, 2412 nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_NEW,
2410 resource->size_new, DEVLINK_ATTR_PAD); 2413 resource->size_new, DEVLINK_ATTR_PAD);
2411 if (resource->resource_ops && resource->resource_ops->occ_get) 2414 if (resource->resource_ops && resource->resource_ops->occ_get)
2412 nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_OCC, 2415 if (nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_OCC,
2413 resource->resource_ops->occ_get(devlink), 2416 resource->resource_ops->occ_get(devlink),
2414 DEVLINK_ATTR_PAD); 2417 DEVLINK_ATTR_PAD))
2415 devlink_resource_size_params_put(resource, skb); 2418 goto nla_put_failure;
2419 if (devlink_resource_size_params_put(resource, skb))
2420 goto nla_put_failure;
2416 if (list_empty(&resource->resource_list)) 2421 if (list_empty(&resource->resource_list))
2417 goto out; 2422 goto out;
2418 2423
@@ -3151,7 +3156,7 @@ int devlink_resource_register(struct devlink *devlink,
3151 u64 resource_size, 3156 u64 resource_size,
3152 u64 resource_id, 3157 u64 resource_id,
3153 u64 parent_resource_id, 3158 u64 parent_resource_id,
3154 struct devlink_resource_size_params *size_params, 3159 const struct devlink_resource_size_params *size_params,
3155 const struct devlink_resource_ops *resource_ops) 3160 const struct devlink_resource_ops *resource_ops)
3156{ 3161{
3157 struct devlink_resource *resource; 3162 struct devlink_resource *resource;
@@ -3194,7 +3199,8 @@ int devlink_resource_register(struct devlink *devlink,
3194 resource->id = resource_id; 3199 resource->id = resource_id;
3195 resource->resource_ops = resource_ops; 3200 resource->resource_ops = resource_ops;
3196 resource->size_valid = true; 3201 resource->size_valid = true;
3197 resource->size_params = size_params; 3202 memcpy(&resource->size_params, size_params,
3203 sizeof(resource->size_params));
3198 INIT_LIST_HEAD(&resource->resource_list); 3204 INIT_LIST_HEAD(&resource->resource_list);
3199 list_add_tail(&resource->list, resource_list); 3205 list_add_tail(&resource->list, resource_list);
3200out: 3206out:
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 494e6a5d7306..3f89c76d5c24 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -2520,11 +2520,14 @@ static int set_phy_tunable(struct net_device *dev, void __user *useraddr)
2520static int ethtool_get_fecparam(struct net_device *dev, void __user *useraddr) 2520static int ethtool_get_fecparam(struct net_device *dev, void __user *useraddr)
2521{ 2521{
2522 struct ethtool_fecparam fecparam = { ETHTOOL_GFECPARAM }; 2522 struct ethtool_fecparam fecparam = { ETHTOOL_GFECPARAM };
2523 int rc;
2523 2524
2524 if (!dev->ethtool_ops->get_fecparam) 2525 if (!dev->ethtool_ops->get_fecparam)
2525 return -EOPNOTSUPP; 2526 return -EOPNOTSUPP;
2526 2527
2527 dev->ethtool_ops->get_fecparam(dev, &fecparam); 2528 rc = dev->ethtool_ops->get_fecparam(dev, &fecparam);
2529 if (rc)
2530 return rc;
2528 2531
2529 if (copy_to_user(useraddr, &fecparam, sizeof(fecparam))) 2532 if (copy_to_user(useraddr, &fecparam, sizeof(fecparam)))
2530 return -EFAULT; 2533 return -EFAULT;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 09bd89c90a71..0bb0d8877954 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4891,7 +4891,7 @@ EXPORT_SYMBOL_GPL(skb_scrub_packet);
4891 * 4891 *
4892 * The MAC/L2 or network (IP, IPv6) headers are not accounted for. 4892 * The MAC/L2 or network (IP, IPv6) headers are not accounted for.
4893 */ 4893 */
4894unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) 4894static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
4895{ 4895{
4896 const struct skb_shared_info *shinfo = skb_shinfo(skb); 4896 const struct skb_shared_info *shinfo = skb_shinfo(skb);
4897 unsigned int thlen = 0; 4897 unsigned int thlen = 0;
@@ -4913,7 +4913,40 @@ unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
4913 */ 4913 */
4914 return thlen + shinfo->gso_size; 4914 return thlen + shinfo->gso_size;
4915} 4915}
4916EXPORT_SYMBOL_GPL(skb_gso_transport_seglen); 4916
4917/**
4918 * skb_gso_network_seglen - Return length of individual segments of a gso packet
4919 *
4920 * @skb: GSO skb
4921 *
4922 * skb_gso_network_seglen is used to determine the real size of the
4923 * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
4924 *
4925 * The MAC/L2 header is not accounted for.
4926 */
4927static unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
4928{
4929 unsigned int hdr_len = skb_transport_header(skb) -
4930 skb_network_header(skb);
4931
4932 return hdr_len + skb_gso_transport_seglen(skb);
4933}
4934
4935/**
4936 * skb_gso_mac_seglen - Return length of individual segments of a gso packet
4937 *
4938 * @skb: GSO skb
4939 *
4940 * skb_gso_mac_seglen is used to determine the real size of the
4941 * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4
4942 * headers (TCP/UDP).
4943 */
4944static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
4945{
4946 unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
4947
4948 return hdr_len + skb_gso_transport_seglen(skb);
4949}
4917 4950
4918/** 4951/**
4919 * skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS 4952 * skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS
@@ -4955,19 +4988,20 @@ static inline bool skb_gso_size_check(const struct sk_buff *skb,
4955} 4988}
4956 4989
4957/** 4990/**
4958 * skb_gso_validate_mtu - Return in case such skb fits a given MTU 4991 * skb_gso_validate_network_len - Will a split GSO skb fit into a given MTU?
4959 * 4992 *
4960 * @skb: GSO skb 4993 * @skb: GSO skb
4961 * @mtu: MTU to validate against 4994 * @mtu: MTU to validate against
4962 * 4995 *
4963 * skb_gso_validate_mtu validates if a given skb will fit a wanted MTU 4996 * skb_gso_validate_network_len validates if a given skb will fit a
4964 * once split. 4997 * wanted MTU once split. It considers L3 headers, L4 headers, and the
4998 * payload.
4965 */ 4999 */
4966bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu) 5000bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu)
4967{ 5001{
4968 return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu); 5002 return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu);
4969} 5003}
4970EXPORT_SYMBOL_GPL(skb_gso_validate_mtu); 5004EXPORT_SYMBOL_GPL(skb_gso_validate_network_len);
4971 5005
4972/** 5006/**
4973 * skb_gso_validate_mac_len - Will a split GSO skb fit in a given length? 5007 * skb_gso_validate_mac_len - Will a split GSO skb fit in a given length?
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 2dd21c3281a1..b54b948b0596 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -55,7 +55,7 @@ static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
55 if (skb->ignore_df) 55 if (skb->ignore_df)
56 return false; 56 return false;
57 57
58 if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu)) 58 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
59 return false; 59 return false;
60 60
61 return true; 61 return true;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 45d97e9b2759..0901de42ed85 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -970,9 +970,6 @@ static void __gre_tunnel_init(struct net_device *dev)
970 970
971 t_hlen = tunnel->hlen + sizeof(struct iphdr); 971 t_hlen = tunnel->hlen + sizeof(struct iphdr);
972 972
973 dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4;
974 dev->mtu = ETH_DATA_LEN - t_hlen - 4;
975
976 dev->features |= GRE_FEATURES; 973 dev->features |= GRE_FEATURES;
977 dev->hw_features |= GRE_FEATURES; 974 dev->hw_features |= GRE_FEATURES;
978 975
@@ -1290,8 +1287,6 @@ static int erspan_tunnel_init(struct net_device *dev)
1290 erspan_hdr_len(tunnel->erspan_ver); 1287 erspan_hdr_len(tunnel->erspan_ver);
1291 t_hlen = tunnel->hlen + sizeof(struct iphdr); 1288 t_hlen = tunnel->hlen + sizeof(struct iphdr);
1292 1289
1293 dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4;
1294 dev->mtu = ETH_DATA_LEN - t_hlen - 4;
1295 dev->features |= GRE_FEATURES; 1290 dev->features |= GRE_FEATURES;
1296 dev->hw_features |= GRE_FEATURES; 1291 dev->hw_features |= GRE_FEATURES;
1297 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1292 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index e8e675be60ec..66340ab750e6 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -248,7 +248,7 @@ static int ip_finish_output_gso(struct net *net, struct sock *sk,
248 248
249 /* common case: seglen is <= mtu 249 /* common case: seglen is <= mtu
250 */ 250 */
251 if (skb_gso_validate_mtu(skb, mtu)) 251 if (skb_gso_validate_network_len(skb, mtu))
252 return ip_finish_output2(net, sk, skb); 252 return ip_finish_output2(net, sk, skb);
253 253
254 /* Slowpath - GSO segment length exceeds the egress MTU. 254 /* Slowpath - GSO segment length exceeds the egress MTU.
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index d786a8441bce..6d21068f9b55 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -710,16 +710,9 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
710 } 710 }
711 } 711 }
712 712
713 if (tunnel->fwmark) { 713 init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr,
714 init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr, 714 tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link,
715 tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link, 715 tunnel->fwmark);
716 tunnel->fwmark);
717 }
718 else {
719 init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr,
720 tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link,
721 skb->mark);
722 }
723 716
724 if (ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0) 717 if (ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0)
725 goto tx_error; 718 goto tx_error;
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 4b02ab39ebc5..8a8ae61cea71 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -232,7 +232,6 @@ clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i,
232 c->hash_mode = i->hash_mode; 232 c->hash_mode = i->hash_mode;
233 c->hash_initval = i->hash_initval; 233 c->hash_initval = i->hash_initval;
234 refcount_set(&c->refcount, 1); 234 refcount_set(&c->refcount, 1);
235 refcount_set(&c->entries, 1);
236 235
237 spin_lock_bh(&cn->lock); 236 spin_lock_bh(&cn->lock);
238 if (__clusterip_config_find(net, ip)) { 237 if (__clusterip_config_find(net, ip)) {
@@ -263,8 +262,10 @@ clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i,
263 262
264 c->notifier.notifier_call = clusterip_netdev_event; 263 c->notifier.notifier_call = clusterip_netdev_event;
265 err = register_netdevice_notifier(&c->notifier); 264 err = register_netdevice_notifier(&c->notifier);
266 if (!err) 265 if (!err) {
266 refcount_set(&c->entries, 1);
267 return c; 267 return c;
268 }
268 269
269#ifdef CONFIG_PROC_FS 270#ifdef CONFIG_PROC_FS
270 proc_remove(c->pde); 271 proc_remove(c->pde);
@@ -273,7 +274,7 @@ err:
273 spin_lock_bh(&cn->lock); 274 spin_lock_bh(&cn->lock);
274 list_del_rcu(&c->list); 275 list_del_rcu(&c->list);
275 spin_unlock_bh(&cn->lock); 276 spin_unlock_bh(&cn->lock);
276 kfree(c); 277 clusterip_config_put(c);
277 278
278 return ERR_PTR(err); 279 return ERR_PTR(err);
279} 280}
@@ -496,12 +497,15 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par)
496 return PTR_ERR(config); 497 return PTR_ERR(config);
497 } 498 }
498 } 499 }
499 cipinfo->config = config;
500 500
501 ret = nf_ct_netns_get(par->net, par->family); 501 ret = nf_ct_netns_get(par->net, par->family);
502 if (ret < 0) 502 if (ret < 0) {
503 pr_info("cannot load conntrack support for proto=%u\n", 503 pr_info("cannot load conntrack support for proto=%u\n",
504 par->family); 504 par->family);
505 clusterip_config_entry_put(par->net, config);
506 clusterip_config_put(config);
507 return ret;
508 }
505 509
506 if (!par->net->xt.clusterip_deprecated_warning) { 510 if (!par->net->xt.clusterip_deprecated_warning) {
507 pr_info("ipt_CLUSTERIP is deprecated and it will removed soon, " 511 pr_info("ipt_CLUSTERIP is deprecated and it will removed soon, "
@@ -509,6 +513,7 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par)
509 par->net->xt.clusterip_deprecated_warning = true; 513 par->net->xt.clusterip_deprecated_warning = true;
510 } 514 }
511 515
516 cipinfo->config = config;
512 return ret; 517 return ret;
513} 518}
514 519
diff --git a/net/ipv4/netfilter/nf_flow_table_ipv4.c b/net/ipv4/netfilter/nf_flow_table_ipv4.c
index 25d2975da156..0cd46bffa469 100644
--- a/net/ipv4/netfilter/nf_flow_table_ipv4.c
+++ b/net/ipv4/netfilter/nf_flow_table_ipv4.c
@@ -111,6 +111,7 @@ static int nf_flow_dnat_ip(const struct flow_offload *flow, struct sk_buff *skb,
111 default: 111 default:
112 return -1; 112 return -1;
113 } 113 }
114 csum_replace4(&iph->check, addr, new_addr);
114 115
115 return nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr); 116 return nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
116} 117}
@@ -185,7 +186,7 @@ static bool __nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
185 if ((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) 186 if ((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0)
186 return false; 187 return false;
187 188
188 if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu)) 189 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
189 return false; 190 return false;
190 191
191 return true; 192 return true;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index a4f44d815a61..860b3fd2f54b 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -128,10 +128,11 @@ static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
128static int ip_rt_error_cost __read_mostly = HZ; 128static int ip_rt_error_cost __read_mostly = HZ;
129static int ip_rt_error_burst __read_mostly = 5 * HZ; 129static int ip_rt_error_burst __read_mostly = 5 * HZ;
130static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ; 130static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
131static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20; 131static u32 ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
132static int ip_rt_min_advmss __read_mostly = 256; 132static int ip_rt_min_advmss __read_mostly = 256;
133 133
134static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT; 134static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
135
135/* 136/*
136 * Interface to generic destination cache. 137 * Interface to generic destination cache.
137 */ 138 */
@@ -930,14 +931,23 @@ out_put_peer:
930 931
931static int ip_error(struct sk_buff *skb) 932static int ip_error(struct sk_buff *skb)
932{ 933{
933 struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
934 struct rtable *rt = skb_rtable(skb); 934 struct rtable *rt = skb_rtable(skb);
935 struct net_device *dev = skb->dev;
936 struct in_device *in_dev;
935 struct inet_peer *peer; 937 struct inet_peer *peer;
936 unsigned long now; 938 unsigned long now;
937 struct net *net; 939 struct net *net;
938 bool send; 940 bool send;
939 int code; 941 int code;
940 942
943 if (netif_is_l3_master(skb->dev)) {
944 dev = __dev_get_by_index(dev_net(skb->dev), IPCB(skb)->iif);
945 if (!dev)
946 goto out;
947 }
948
949 in_dev = __in_dev_get_rcu(dev);
950
941 /* IP on this device is disabled. */ 951 /* IP on this device is disabled. */
942 if (!in_dev) 952 if (!in_dev)
943 goto out; 953 goto out;
@@ -2818,6 +2828,7 @@ void ip_rt_multicast_event(struct in_device *in_dev)
2818static int ip_rt_gc_interval __read_mostly = 60 * HZ; 2828static int ip_rt_gc_interval __read_mostly = 60 * HZ;
2819static int ip_rt_gc_min_interval __read_mostly = HZ / 2; 2829static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
2820static int ip_rt_gc_elasticity __read_mostly = 8; 2830static int ip_rt_gc_elasticity __read_mostly = 8;
2831static int ip_min_valid_pmtu __read_mostly = IPV4_MIN_MTU;
2821 2832
2822static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write, 2833static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write,
2823 void __user *buffer, 2834 void __user *buffer,
@@ -2933,7 +2944,8 @@ static struct ctl_table ipv4_route_table[] = {
2933 .data = &ip_rt_min_pmtu, 2944 .data = &ip_rt_min_pmtu,
2934 .maxlen = sizeof(int), 2945 .maxlen = sizeof(int),
2935 .mode = 0644, 2946 .mode = 0644,
2936 .proc_handler = proc_dointvec, 2947 .proc_handler = proc_dointvec_minmax,
2948 .extra1 = &ip_min_valid_pmtu,
2937 }, 2949 },
2938 { 2950 {
2939 .procname = "min_adv_mss", 2951 .procname = "min_adv_mss",
diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
index 7c843578f233..faddf4f9a707 100644
--- a/net/ipv4/tcp_illinois.c
+++ b/net/ipv4/tcp_illinois.c
@@ -6,7 +6,7 @@
6 * The algorithm is described in: 6 * The algorithm is described in:
7 * "TCP-Illinois: A Loss and Delay-Based Congestion Control Algorithm 7 * "TCP-Illinois: A Loss and Delay-Based Congestion Control Algorithm
8 * for High-Speed Networks" 8 * for High-Speed Networks"
9 * http://www.ifp.illinois.edu/~srikant/Papers/liubassri06perf.pdf 9 * http://tamerbasar.csl.illinois.edu/LiuBasarSrikantPerfEvalArtJun2008.pdf
10 * 10 *
11 * Implemented from description in paper and ns-2 simulation. 11 * Implemented from description in paper and ns-2 simulation.
12 * Copyright (C) 2007 Stephen Hemminger <shemminger@linux-foundation.org> 12 * Copyright (C) 2007 Stephen Hemminger <shemminger@linux-foundation.org>
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 575d3c1fb6e8..9a1b3c1c1c14 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1971,11 +1971,6 @@ void tcp_enter_loss(struct sock *sk)
1971 /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous 1971 /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous
1972 * loss recovery is underway except recurring timeout(s) on 1972 * loss recovery is underway except recurring timeout(s) on
1973 * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing 1973 * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing
1974 *
1975 * In theory F-RTO can be used repeatedly during loss recovery.
1976 * In practice this interacts badly with broken middle-boxes that
1977 * falsely raise the receive window, which results in repeated
1978 * timeouts and stop-and-go behavior.
1979 */ 1974 */
1980 tp->frto = net->ipv4.sysctl_tcp_frto && 1975 tp->frto = net->ipv4.sysctl_tcp_frto &&
1981 (new_recovery || icsk->icsk_retransmits) && 1976 (new_recovery || icsk->icsk_retransmits) &&
@@ -2631,18 +2626,14 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack,
2631 tcp_try_undo_loss(sk, false)) 2626 tcp_try_undo_loss(sk, false))
2632 return; 2627 return;
2633 2628
2634 /* The ACK (s)acks some never-retransmitted data meaning not all
2635 * the data packets before the timeout were lost. Therefore we
2636 * undo the congestion window and state. This is essentially
2637 * the operation in F-RTO (RFC5682 section 3.1 step 3.b). Since
2638 * a retransmitted skb is permantly marked, we can apply such an
2639 * operation even if F-RTO was not used.
2640 */
2641 if ((flag & FLAG_ORIG_SACK_ACKED) &&
2642 tcp_try_undo_loss(sk, tp->undo_marker))
2643 return;
2644
2645 if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */ 2629 if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */
2630 /* Step 3.b. A timeout is spurious if not all data are
2631 * lost, i.e., never-retransmitted data are (s)acked.
2632 */
2633 if ((flag & FLAG_ORIG_SACK_ACKED) &&
2634 tcp_try_undo_loss(sk, true))
2635 return;
2636
2646 if (after(tp->snd_nxt, tp->high_seq)) { 2637 if (after(tp->snd_nxt, tp->high_seq)) {
2647 if (flag & FLAG_DATA_SACKED || is_dupack) 2638 if (flag & FLAG_DATA_SACKED || is_dupack)
2648 tp->frto = 0; /* Step 3.a. loss was real */ 2639 tp->frto = 0; /* Step 3.a. loss was real */
@@ -4001,6 +3992,7 @@ void tcp_reset(struct sock *sk)
4001 /* This barrier is coupled with smp_rmb() in tcp_poll() */ 3992 /* This barrier is coupled with smp_rmb() in tcp_poll() */
4002 smp_wmb(); 3993 smp_wmb();
4003 3994
3995 tcp_write_queue_purge(sk);
4004 tcp_done(sk); 3996 tcp_done(sk);
4005 3997
4006 if (!sock_flag(sk, SOCK_DEAD)) 3998 if (!sock_flag(sk, SOCK_DEAD))
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index 94b8702603bc..be980c195fc5 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -30,7 +30,8 @@ static int xfrm4_tunnel_check_size(struct sk_buff *skb)
30 30
31 mtu = dst_mtu(skb_dst(skb)); 31 mtu = dst_mtu(skb_dst(skb));
32 if ((!skb_is_gso(skb) && skb->len > mtu) || 32 if ((!skb_is_gso(skb) && skb->len > mtu) ||
33 (skb_is_gso(skb) && skb_gso_network_seglen(skb) > ip_skb_dst_mtu(skb->sk, skb))) { 33 (skb_is_gso(skb) &&
34 !skb_gso_validate_network_len(skb, ip_skb_dst_mtu(skb->sk, skb)))) {
34 skb->protocol = htons(ETH_P_IP); 35 skb->protocol = htons(ETH_P_IP);
35 36
36 if (skb->sk) 37 if (skb->sk)
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 997c7f19ad62..a8a919520090 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -412,7 +412,7 @@ static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
412 if (skb->ignore_df) 412 if (skb->ignore_df)
413 return false; 413 return false;
414 414
415 if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu)) 415 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
416 return false; 416 return false;
417 417
418 return true; 418 return true;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 4b15fe928278..6e0f21eed88a 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1982,14 +1982,14 @@ static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
1982{ 1982{
1983 struct net *net = dev_net(dev); 1983 struct net *net = dev_net(dev);
1984 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1984 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1985 struct ip6_tnl *nt, *t;
1986 struct ip_tunnel_encap ipencap; 1985 struct ip_tunnel_encap ipencap;
1986 struct ip6_tnl *nt, *t;
1987 int err;
1987 1988
1988 nt = netdev_priv(dev); 1989 nt = netdev_priv(dev);
1989 1990
1990 if (ip6_tnl_netlink_encap_parms(data, &ipencap)) { 1991 if (ip6_tnl_netlink_encap_parms(data, &ipencap)) {
1991 int err = ip6_tnl_encap_setup(nt, &ipencap); 1992 err = ip6_tnl_encap_setup(nt, &ipencap);
1992
1993 if (err < 0) 1993 if (err < 0)
1994 return err; 1994 return err;
1995 } 1995 }
@@ -2005,7 +2005,11 @@ static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
2005 return -EEXIST; 2005 return -EEXIST;
2006 } 2006 }
2007 2007
2008 return ip6_tnl_create2(dev); 2008 err = ip6_tnl_create2(dev);
2009 if (!err && tb[IFLA_MTU])
2010 ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
2011
2012 return err;
2009} 2013}
2010 2014
2011static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[], 2015static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[],
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index d95ceca7ff8f..531d6957af36 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -21,18 +21,19 @@
21int ip6_route_me_harder(struct net *net, struct sk_buff *skb) 21int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
22{ 22{
23 const struct ipv6hdr *iph = ipv6_hdr(skb); 23 const struct ipv6hdr *iph = ipv6_hdr(skb);
24 struct sock *sk = sk_to_full_sk(skb->sk);
24 unsigned int hh_len; 25 unsigned int hh_len;
25 struct dst_entry *dst; 26 struct dst_entry *dst;
26 struct flowi6 fl6 = { 27 struct flowi6 fl6 = {
27 .flowi6_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0, 28 .flowi6_oif = sk ? sk->sk_bound_dev_if : 0,
28 .flowi6_mark = skb->mark, 29 .flowi6_mark = skb->mark,
29 .flowi6_uid = sock_net_uid(net, skb->sk), 30 .flowi6_uid = sock_net_uid(net, sk),
30 .daddr = iph->daddr, 31 .daddr = iph->daddr,
31 .saddr = iph->saddr, 32 .saddr = iph->saddr,
32 }; 33 };
33 int err; 34 int err;
34 35
35 dst = ip6_route_output(net, skb->sk, &fl6); 36 dst = ip6_route_output(net, sk, &fl6);
36 err = dst->error; 37 err = dst->error;
37 if (err) { 38 if (err) {
38 IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); 39 IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
@@ -50,7 +51,7 @@ int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
50 if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) && 51 if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
51 xfrm_decode_session(skb, flowi6_to_flowi(&fl6), AF_INET6) == 0) { 52 xfrm_decode_session(skb, flowi6_to_flowi(&fl6), AF_INET6) == 0) {
52 skb_dst_set(skb, NULL); 53 skb_dst_set(skb, NULL);
53 dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), skb->sk, 0); 54 dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), sk, 0);
54 if (IS_ERR(dst)) 55 if (IS_ERR(dst))
55 return PTR_ERR(dst); 56 return PTR_ERR(dst);
56 skb_dst_set(skb, dst); 57 skb_dst_set(skb, dst);
diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c
index 94deb69bbbda..91ed25a24b79 100644
--- a/net/ipv6/netfilter/ip6t_rpfilter.c
+++ b/net/ipv6/netfilter/ip6t_rpfilter.c
@@ -48,10 +48,6 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
48 } 48 }
49 49
50 fl6.flowi6_mark = flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0; 50 fl6.flowi6_mark = flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
51 if ((flags & XT_RPFILTER_LOOSE) == 0) {
52 fl6.flowi6_oif = dev->ifindex;
53 lookup_flags |= RT6_LOOKUP_F_IFACE;
54 }
55 51
56 rt = (void *) ip6_route_lookup(net, &fl6, lookup_flags); 52 rt = (void *) ip6_route_lookup(net, &fl6, lookup_flags);
57 if (rt->dst.error) 53 if (rt->dst.error)
diff --git a/net/ipv6/netfilter/nf_flow_table_ipv6.c b/net/ipv6/netfilter/nf_flow_table_ipv6.c
index d346705d6ee6..207cb35569b1 100644
--- a/net/ipv6/netfilter/nf_flow_table_ipv6.c
+++ b/net/ipv6/netfilter/nf_flow_table_ipv6.c
@@ -178,7 +178,7 @@ static bool __nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
178 if (skb->len <= mtu) 178 if (skb->len <= mtu)
179 return false; 179 return false;
180 180
181 if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu)) 181 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
182 return false; 182 return false;
183 183
184 return true; 184 return true;
diff --git a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
index bed57ee65f7b..6b7f075f811f 100644
--- a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
@@ -99,6 +99,10 @@ static bool nf_nat_ipv6_manip_pkt(struct sk_buff *skb,
99 !l4proto->manip_pkt(skb, &nf_nat_l3proto_ipv6, iphdroff, hdroff, 99 !l4proto->manip_pkt(skb, &nf_nat_l3proto_ipv6, iphdroff, hdroff,
100 target, maniptype)) 100 target, maniptype))
101 return false; 101 return false;
102
103 /* must reload, offset might have changed */
104 ipv6h = (void *)skb->data + iphdroff;
105
102manip_addr: 106manip_addr:
103 if (maniptype == NF_NAT_MANIP_SRC) 107 if (maniptype == NF_NAT_MANIP_SRC)
104 ipv6h->saddr = target->src.u3.in6; 108 ipv6h->saddr = target->src.u3.in6;
diff --git a/net/ipv6/netfilter/nft_fib_ipv6.c b/net/ipv6/netfilter/nft_fib_ipv6.c
index cc5174c7254c..62fc84d7bdff 100644
--- a/net/ipv6/netfilter/nft_fib_ipv6.c
+++ b/net/ipv6/netfilter/nft_fib_ipv6.c
@@ -180,7 +180,6 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
180 } 180 }
181 181
182 *dest = 0; 182 *dest = 0;
183 again:
184 rt = (void *)ip6_route_lookup(nft_net(pkt), &fl6, lookup_flags); 183 rt = (void *)ip6_route_lookup(nft_net(pkt), &fl6, lookup_flags);
185 if (rt->dst.error) 184 if (rt->dst.error)
186 goto put_rt_err; 185 goto put_rt_err;
@@ -189,15 +188,8 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
189 if (rt->rt6i_flags & (RTF_REJECT | RTF_ANYCAST | RTF_LOCAL)) 188 if (rt->rt6i_flags & (RTF_REJECT | RTF_ANYCAST | RTF_LOCAL))
190 goto put_rt_err; 189 goto put_rt_err;
191 190
192 if (oif && oif != rt->rt6i_idev->dev) { 191 if (oif && oif != rt->rt6i_idev->dev)
193 /* multipath route? Try again with F_IFACE */ 192 goto put_rt_err;
194 if ((lookup_flags & RT6_LOOKUP_F_IFACE) == 0) {
195 lookup_flags |= RT6_LOOKUP_F_IFACE;
196 fl6.flowi6_oif = oif->ifindex;
197 ip6_rt_put(rt);
198 goto again;
199 }
200 }
201 193
202 switch (priv->result) { 194 switch (priv->result) {
203 case NFT_FIB_RESULT_OIF: 195 case NFT_FIB_RESULT_OIF:
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 3a1775a62973..0195598f7bb5 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1578,6 +1578,13 @@ static int ipip6_newlink(struct net *src_net, struct net_device *dev,
1578 if (err < 0) 1578 if (err < 0)
1579 return err; 1579 return err;
1580 1580
1581 if (tb[IFLA_MTU]) {
1582 u32 mtu = nla_get_u32(tb[IFLA_MTU]);
1583
1584 if (mtu >= IPV6_MIN_MTU && mtu <= 0xFFF8 - dev->hard_header_len)
1585 dev->mtu = mtu;
1586 }
1587
1581#ifdef CONFIG_IPV6_SIT_6RD 1588#ifdef CONFIG_IPV6_SIT_6RD
1582 if (ipip6_netlink_6rd_parms(data, &ip6rd)) 1589 if (ipip6_netlink_6rd_parms(data, &ip6rd))
1583 err = ipip6_tunnel_update_6rd(nt, &ip6rd); 1590 err = ipip6_tunnel_update_6rd(nt, &ip6rd);
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 8ae87d4ec5ff..5959ce9620eb 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -82,7 +82,7 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb)
82 82
83 if ((!skb_is_gso(skb) && skb->len > mtu) || 83 if ((!skb_is_gso(skb) && skb->len > mtu) ||
84 (skb_is_gso(skb) && 84 (skb_is_gso(skb) &&
85 skb_gso_network_seglen(skb) > ip6_skb_dst_mtu(skb))) { 85 !skb_gso_validate_network_len(skb, ip6_skb_dst_mtu(skb)))) {
86 skb->dev = dst->dev; 86 skb->dev = dst->dev;
87 skb->protocol = htons(ETH_P_IPV6); 87 skb->protocol = htons(ETH_P_IPV6);
88 88
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 194a7483bb93..83421c6f0bef 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -136,51 +136,6 @@ l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id)
136 136
137} 137}
138 138
139/* Lookup the tunnel socket, possibly involving the fs code if the socket is
140 * owned by userspace. A struct sock returned from this function must be
141 * released using l2tp_tunnel_sock_put once you're done with it.
142 */
143static struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel)
144{
145 int err = 0;
146 struct socket *sock = NULL;
147 struct sock *sk = NULL;
148
149 if (!tunnel)
150 goto out;
151
152 if (tunnel->fd >= 0) {
153 /* Socket is owned by userspace, who might be in the process
154 * of closing it. Look the socket up using the fd to ensure
155 * consistency.
156 */
157 sock = sockfd_lookup(tunnel->fd, &err);
158 if (sock)
159 sk = sock->sk;
160 } else {
161 /* Socket is owned by kernelspace */
162 sk = tunnel->sock;
163 sock_hold(sk);
164 }
165
166out:
167 return sk;
168}
169
170/* Drop a reference to a tunnel socket obtained via. l2tp_tunnel_sock_put */
171static void l2tp_tunnel_sock_put(struct sock *sk)
172{
173 struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
174 if (tunnel) {
175 if (tunnel->fd >= 0) {
176 /* Socket is owned by userspace */
177 sockfd_put(sk->sk_socket);
178 }
179 sock_put(sk);
180 }
181 sock_put(sk);
182}
183
184/* Session hash list. 139/* Session hash list.
185 * The session_id SHOULD be random according to RFC2661, but several 140 * The session_id SHOULD be random according to RFC2661, but several
186 * L2TP implementations (Cisco and Microsoft) use incrementing 141 * L2TP implementations (Cisco and Microsoft) use incrementing
@@ -193,6 +148,13 @@ l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id)
193 return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)]; 148 return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)];
194} 149}
195 150
151void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
152{
153 sock_put(tunnel->sock);
154 /* the tunnel is freed in the socket destructor */
155}
156EXPORT_SYMBOL(l2tp_tunnel_free);
157
196/* Lookup a tunnel. A new reference is held on the returned tunnel. */ 158/* Lookup a tunnel. A new reference is held on the returned tunnel. */
197struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id) 159struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
198{ 160{
@@ -345,13 +307,11 @@ int l2tp_session_register(struct l2tp_session *session,
345 } 307 }
346 308
347 l2tp_tunnel_inc_refcount(tunnel); 309 l2tp_tunnel_inc_refcount(tunnel);
348 sock_hold(tunnel->sock);
349 hlist_add_head_rcu(&session->global_hlist, g_head); 310 hlist_add_head_rcu(&session->global_hlist, g_head);
350 311
351 spin_unlock_bh(&pn->l2tp_session_hlist_lock); 312 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
352 } else { 313 } else {
353 l2tp_tunnel_inc_refcount(tunnel); 314 l2tp_tunnel_inc_refcount(tunnel);
354 sock_hold(tunnel->sock);
355 } 315 }
356 316
357 hlist_add_head(&session->hlist, head); 317 hlist_add_head(&session->hlist, head);
@@ -969,7 +929,7 @@ int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
969{ 929{
970 struct l2tp_tunnel *tunnel; 930 struct l2tp_tunnel *tunnel;
971 931
972 tunnel = l2tp_sock_to_tunnel(sk); 932 tunnel = l2tp_tunnel(sk);
973 if (tunnel == NULL) 933 if (tunnel == NULL)
974 goto pass_up; 934 goto pass_up;
975 935
@@ -977,13 +937,10 @@ int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
977 tunnel->name, skb->len); 937 tunnel->name, skb->len);
978 938
979 if (l2tp_udp_recv_core(tunnel, skb, tunnel->recv_payload_hook)) 939 if (l2tp_udp_recv_core(tunnel, skb, tunnel->recv_payload_hook))
980 goto pass_up_put; 940 goto pass_up;
981 941
982 sock_put(sk);
983 return 0; 942 return 0;
984 943
985pass_up_put:
986 sock_put(sk);
987pass_up: 944pass_up:
988 return 1; 945 return 1;
989} 946}
@@ -1207,14 +1164,12 @@ EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
1207static void l2tp_tunnel_destruct(struct sock *sk) 1164static void l2tp_tunnel_destruct(struct sock *sk)
1208{ 1165{
1209 struct l2tp_tunnel *tunnel = l2tp_tunnel(sk); 1166 struct l2tp_tunnel *tunnel = l2tp_tunnel(sk);
1210 struct l2tp_net *pn;
1211 1167
1212 if (tunnel == NULL) 1168 if (tunnel == NULL)
1213 goto end; 1169 goto end;
1214 1170
1215 l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing...\n", tunnel->name); 1171 l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing...\n", tunnel->name);
1216 1172
1217
1218 /* Disable udp encapsulation */ 1173 /* Disable udp encapsulation */
1219 switch (tunnel->encap) { 1174 switch (tunnel->encap) {
1220 case L2TP_ENCAPTYPE_UDP: 1175 case L2TP_ENCAPTYPE_UDP:
@@ -1231,18 +1186,11 @@ static void l2tp_tunnel_destruct(struct sock *sk)
1231 sk->sk_destruct = tunnel->old_sk_destruct; 1186 sk->sk_destruct = tunnel->old_sk_destruct;
1232 sk->sk_user_data = NULL; 1187 sk->sk_user_data = NULL;
1233 1188
1234 /* Remove the tunnel struct from the tunnel list */
1235 pn = l2tp_pernet(tunnel->l2tp_net);
1236 spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1237 list_del_rcu(&tunnel->list);
1238 spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1239
1240 tunnel->sock = NULL;
1241 l2tp_tunnel_dec_refcount(tunnel);
1242
1243 /* Call the original destructor */ 1189 /* Call the original destructor */
1244 if (sk->sk_destruct) 1190 if (sk->sk_destruct)
1245 (*sk->sk_destruct)(sk); 1191 (*sk->sk_destruct)(sk);
1192
1193 kfree_rcu(tunnel, rcu);
1246end: 1194end:
1247 return; 1195 return;
1248} 1196}
@@ -1303,49 +1251,43 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_closeall);
1303/* Tunnel socket destroy hook for UDP encapsulation */ 1251/* Tunnel socket destroy hook for UDP encapsulation */
1304static void l2tp_udp_encap_destroy(struct sock *sk) 1252static void l2tp_udp_encap_destroy(struct sock *sk)
1305{ 1253{
1306 struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); 1254 struct l2tp_tunnel *tunnel = l2tp_tunnel(sk);
1307 if (tunnel) { 1255
1308 l2tp_tunnel_closeall(tunnel); 1256 if (tunnel)
1309 sock_put(sk); 1257 l2tp_tunnel_delete(tunnel);
1310 }
1311} 1258}
1312 1259
1313/* Workqueue tunnel deletion function */ 1260/* Workqueue tunnel deletion function */
1314static void l2tp_tunnel_del_work(struct work_struct *work) 1261static void l2tp_tunnel_del_work(struct work_struct *work)
1315{ 1262{
1316 struct l2tp_tunnel *tunnel = NULL; 1263 struct l2tp_tunnel *tunnel = container_of(work, struct l2tp_tunnel,
1317 struct socket *sock = NULL; 1264 del_work);
1318 struct sock *sk = NULL; 1265 struct sock *sk = tunnel->sock;
1319 1266 struct socket *sock = sk->sk_socket;
1320 tunnel = container_of(work, struct l2tp_tunnel, del_work); 1267 struct l2tp_net *pn;
1321 1268
1322 l2tp_tunnel_closeall(tunnel); 1269 l2tp_tunnel_closeall(tunnel);
1323 1270
1324 sk = l2tp_tunnel_sock_lookup(tunnel); 1271 /* If the tunnel socket was created within the kernel, use
1325 if (!sk)
1326 goto out;
1327
1328 sock = sk->sk_socket;
1329
1330 /* If the tunnel socket was created by userspace, then go through the
1331 * inet layer to shut the socket down, and let userspace close it.
1332 * Otherwise, if we created the socket directly within the kernel, use
1333 * the sk API to release it here. 1272 * the sk API to release it here.
1334 * In either case the tunnel resources are freed in the socket
1335 * destructor when the tunnel socket goes away.
1336 */ 1273 */
1337 if (tunnel->fd >= 0) { 1274 if (tunnel->fd < 0) {
1338 if (sock)
1339 inet_shutdown(sock, 2);
1340 } else {
1341 if (sock) { 1275 if (sock) {
1342 kernel_sock_shutdown(sock, SHUT_RDWR); 1276 kernel_sock_shutdown(sock, SHUT_RDWR);
1343 sock_release(sock); 1277 sock_release(sock);
1344 } 1278 }
1345 } 1279 }
1346 1280
1347 l2tp_tunnel_sock_put(sk); 1281 /* Remove the tunnel struct from the tunnel list */
1348out: 1282 pn = l2tp_pernet(tunnel->l2tp_net);
1283 spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1284 list_del_rcu(&tunnel->list);
1285 spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1286
1287 /* drop initial ref */
1288 l2tp_tunnel_dec_refcount(tunnel);
1289
1290 /* drop workqueue ref */
1349 l2tp_tunnel_dec_refcount(tunnel); 1291 l2tp_tunnel_dec_refcount(tunnel);
1350} 1292}
1351 1293
@@ -1598,13 +1540,22 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
1598 sk->sk_user_data = tunnel; 1540 sk->sk_user_data = tunnel;
1599 } 1541 }
1600 1542
1543 /* Bump the reference count. The tunnel context is deleted
1544 * only when this drops to zero. A reference is also held on
1545 * the tunnel socket to ensure that it is not released while
1546 * the tunnel is extant. Must be done before sk_destruct is
1547 * set.
1548 */
1549 refcount_set(&tunnel->ref_count, 1);
1550 sock_hold(sk);
1551 tunnel->sock = sk;
1552 tunnel->fd = fd;
1553
1601 /* Hook on the tunnel socket destructor so that we can cleanup 1554 /* Hook on the tunnel socket destructor so that we can cleanup
1602 * if the tunnel socket goes away. 1555 * if the tunnel socket goes away.
1603 */ 1556 */
1604 tunnel->old_sk_destruct = sk->sk_destruct; 1557 tunnel->old_sk_destruct = sk->sk_destruct;
1605 sk->sk_destruct = &l2tp_tunnel_destruct; 1558 sk->sk_destruct = &l2tp_tunnel_destruct;
1606 tunnel->sock = sk;
1607 tunnel->fd = fd;
1608 lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock"); 1559 lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock");
1609 1560
1610 sk->sk_allocation = GFP_ATOMIC; 1561 sk->sk_allocation = GFP_ATOMIC;
@@ -1614,11 +1565,6 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
1614 1565
1615 /* Add tunnel to our list */ 1566 /* Add tunnel to our list */
1616 INIT_LIST_HEAD(&tunnel->list); 1567 INIT_LIST_HEAD(&tunnel->list);
1617
1618 /* Bump the reference count. The tunnel context is deleted
1619 * only when this drops to zero. Must be done before list insertion
1620 */
1621 refcount_set(&tunnel->ref_count, 1);
1622 spin_lock_bh(&pn->l2tp_tunnel_list_lock); 1568 spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1623 list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list); 1569 list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
1624 spin_unlock_bh(&pn->l2tp_tunnel_list_lock); 1570 spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
@@ -1659,8 +1605,6 @@ void l2tp_session_free(struct l2tp_session *session)
1659 1605
1660 if (tunnel) { 1606 if (tunnel) {
1661 BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC); 1607 BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
1662 sock_put(tunnel->sock);
1663 session->tunnel = NULL;
1664 l2tp_tunnel_dec_refcount(tunnel); 1608 l2tp_tunnel_dec_refcount(tunnel);
1665 } 1609 }
1666 1610
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 9bbee90e9963..a1aa9550f04e 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -214,27 +214,8 @@ static inline void *l2tp_session_priv(struct l2tp_session *session)
214 return &session->priv[0]; 214 return &session->priv[0];
215} 215}
216 216
217static inline struct l2tp_tunnel *l2tp_sock_to_tunnel(struct sock *sk)
218{
219 struct l2tp_tunnel *tunnel;
220
221 if (sk == NULL)
222 return NULL;
223
224 sock_hold(sk);
225 tunnel = (struct l2tp_tunnel *)(sk->sk_user_data);
226 if (tunnel == NULL) {
227 sock_put(sk);
228 goto out;
229 }
230
231 BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
232
233out:
234 return tunnel;
235}
236
237struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id); 217struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id);
218void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
238 219
239struct l2tp_session *l2tp_session_get(const struct net *net, 220struct l2tp_session *l2tp_session_get(const struct net *net,
240 struct l2tp_tunnel *tunnel, 221 struct l2tp_tunnel *tunnel,
@@ -283,7 +264,7 @@ static inline void l2tp_tunnel_inc_refcount(struct l2tp_tunnel *tunnel)
283static inline void l2tp_tunnel_dec_refcount(struct l2tp_tunnel *tunnel) 264static inline void l2tp_tunnel_dec_refcount(struct l2tp_tunnel *tunnel)
284{ 265{
285 if (refcount_dec_and_test(&tunnel->ref_count)) 266 if (refcount_dec_and_test(&tunnel->ref_count))
286 kfree_rcu(tunnel, rcu); 267 l2tp_tunnel_free(tunnel);
287} 268}
288 269
289/* Session reference counts. Incremented when code obtains a reference 270/* Session reference counts. Incremented when code obtains a reference
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index ff61124fdf59..3428fba6f2b7 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -234,17 +234,13 @@ static void l2tp_ip_close(struct sock *sk, long timeout)
234static void l2tp_ip_destroy_sock(struct sock *sk) 234static void l2tp_ip_destroy_sock(struct sock *sk)
235{ 235{
236 struct sk_buff *skb; 236 struct sk_buff *skb;
237 struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); 237 struct l2tp_tunnel *tunnel = sk->sk_user_data;
238 238
239 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) 239 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
240 kfree_skb(skb); 240 kfree_skb(skb);
241 241
242 if (tunnel) { 242 if (tunnel)
243 l2tp_tunnel_closeall(tunnel); 243 l2tp_tunnel_delete(tunnel);
244 sock_put(sk);
245 }
246
247 sk_refcnt_debug_dec(sk);
248} 244}
249 245
250static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) 246static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 192344688c06..6f009eaa5fbe 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -248,16 +248,14 @@ static void l2tp_ip6_close(struct sock *sk, long timeout)
248 248
249static void l2tp_ip6_destroy_sock(struct sock *sk) 249static void l2tp_ip6_destroy_sock(struct sock *sk)
250{ 250{
251 struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); 251 struct l2tp_tunnel *tunnel = sk->sk_user_data;
252 252
253 lock_sock(sk); 253 lock_sock(sk);
254 ip6_flush_pending_frames(sk); 254 ip6_flush_pending_frames(sk);
255 release_sock(sk); 255 release_sock(sk);
256 256
257 if (tunnel) { 257 if (tunnel)
258 l2tp_tunnel_closeall(tunnel); 258 l2tp_tunnel_delete(tunnel);
259 sock_put(sk);
260 }
261 259
262 inet6_destroy_sock(sk); 260 inet6_destroy_sock(sk);
263} 261}
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 59f246d7b290..3b02f24ea9ec 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -416,20 +416,28 @@ abort:
416 * Session (and tunnel control) socket create/destroy. 416 * Session (and tunnel control) socket create/destroy.
417 *****************************************************************************/ 417 *****************************************************************************/
418 418
419static void pppol2tp_put_sk(struct rcu_head *head)
420{
421 struct pppol2tp_session *ps;
422
423 ps = container_of(head, typeof(*ps), rcu);
424 sock_put(ps->__sk);
425}
426
419/* Called by l2tp_core when a session socket is being closed. 427/* Called by l2tp_core when a session socket is being closed.
420 */ 428 */
421static void pppol2tp_session_close(struct l2tp_session *session) 429static void pppol2tp_session_close(struct l2tp_session *session)
422{ 430{
423 struct sock *sk; 431 struct pppol2tp_session *ps;
424
425 BUG_ON(session->magic != L2TP_SESSION_MAGIC);
426 432
427 sk = pppol2tp_session_get_sock(session); 433 ps = l2tp_session_priv(session);
428 if (sk) { 434 mutex_lock(&ps->sk_lock);
429 if (sk->sk_socket) 435 ps->__sk = rcu_dereference_protected(ps->sk,
430 inet_shutdown(sk->sk_socket, SEND_SHUTDOWN); 436 lockdep_is_held(&ps->sk_lock));
431 sock_put(sk); 437 RCU_INIT_POINTER(ps->sk, NULL);
432 } 438 if (ps->__sk)
439 call_rcu(&ps->rcu, pppol2tp_put_sk);
440 mutex_unlock(&ps->sk_lock);
433} 441}
434 442
435/* Really kill the session socket. (Called from sock_put() if 443/* Really kill the session socket. (Called from sock_put() if
@@ -449,14 +457,6 @@ static void pppol2tp_session_destruct(struct sock *sk)
449 } 457 }
450} 458}
451 459
452static void pppol2tp_put_sk(struct rcu_head *head)
453{
454 struct pppol2tp_session *ps;
455
456 ps = container_of(head, typeof(*ps), rcu);
457 sock_put(ps->__sk);
458}
459
460/* Called when the PPPoX socket (session) is closed. 460/* Called when the PPPoX socket (session) is closed.
461 */ 461 */
462static int pppol2tp_release(struct socket *sock) 462static int pppol2tp_release(struct socket *sock)
@@ -480,26 +480,17 @@ static int pppol2tp_release(struct socket *sock)
480 sock_orphan(sk); 480 sock_orphan(sk);
481 sock->sk = NULL; 481 sock->sk = NULL;
482 482
483 /* If the socket is associated with a session,
484 * l2tp_session_delete will call pppol2tp_session_close which
485 * will drop the session's ref on the socket.
486 */
483 session = pppol2tp_sock_to_session(sk); 487 session = pppol2tp_sock_to_session(sk);
484 488 if (session) {
485 if (session != NULL) {
486 struct pppol2tp_session *ps;
487
488 l2tp_session_delete(session); 489 l2tp_session_delete(session);
489 490 /* drop the ref obtained by pppol2tp_sock_to_session */
490 ps = l2tp_session_priv(session); 491 sock_put(sk);
491 mutex_lock(&ps->sk_lock);
492 ps->__sk = rcu_dereference_protected(ps->sk,
493 lockdep_is_held(&ps->sk_lock));
494 RCU_INIT_POINTER(ps->sk, NULL);
495 mutex_unlock(&ps->sk_lock);
496 call_rcu(&ps->rcu, pppol2tp_put_sk);
497
498 /* Rely on the sock_put() call at the end of the function for
499 * dropping the reference held by pppol2tp_sock_to_session().
500 * The last reference will be dropped by pppol2tp_put_sk().
501 */
502 } 492 }
493
503 release_sock(sk); 494 release_sock(sk);
504 495
505 /* This will delete the session context via 496 /* This will delete the session context via
@@ -796,6 +787,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
796 787
797out_no_ppp: 788out_no_ppp:
798 /* This is how we get the session context from the socket. */ 789 /* This is how we get the session context from the socket. */
790 sock_hold(sk);
799 sk->sk_user_data = session; 791 sk->sk_user_data = session;
800 rcu_assign_pointer(ps->sk, sk); 792 rcu_assign_pointer(ps->sk, sk);
801 mutex_unlock(&ps->sk_lock); 793 mutex_unlock(&ps->sk_lock);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index fd580614085b..56fe16b07538 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -3921,7 +3921,7 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
3921 if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FROMDS | 3921 if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FROMDS |
3922 IEEE80211_FCTL_TODS)) != 3922 IEEE80211_FCTL_TODS)) !=
3923 fast_rx->expected_ds_bits) 3923 fast_rx->expected_ds_bits)
3924 goto drop; 3924 return false;
3925 3925
3926 /* assign the key to drop unencrypted frames (later) 3926 /* assign the key to drop unencrypted frames (later)
3927 * and strip the IV/MIC if necessary 3927 * and strip the IV/MIC if necessary
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 25904af38839..69722504e3e1 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -3574,6 +3574,14 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb,
3574 if (!IS_ERR_OR_NULL(sta)) { 3574 if (!IS_ERR_OR_NULL(sta)) {
3575 struct ieee80211_fast_tx *fast_tx; 3575 struct ieee80211_fast_tx *fast_tx;
3576 3576
3577 /* We need a bit of data queued to build aggregates properly, so
3578 * instruct the TCP stack to allow more than a single ms of data
3579 * to be queued in the stack. The value is a bit-shift of 1
3580 * second, so 8 is ~4ms of queued data. Only affects local TCP
3581 * sockets.
3582 */
3583 sk_pacing_shift_update(skb->sk, 8);
3584
3577 fast_tx = rcu_dereference(sta->fast_tx); 3585 fast_tx = rcu_dereference(sta->fast_tx);
3578 3586
3579 if (fast_tx && 3587 if (fast_tx &&
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index e545a3c9365f..7a4de6d618b1 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -122,7 +122,7 @@ bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
122 if (skb->len <= mtu) 122 if (skb->len <= mtu)
123 return false; 123 return false;
124 124
125 if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu)) 125 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
126 return false; 126 return false;
127 127
128 return true; 128 return true;
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index 3e17d32b629d..58d5d05aec24 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -260,7 +260,7 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
260 buf_len = strlen(buf); 260 buf_len = strlen(buf);
261 261
262 ct = nf_ct_get(skb, &ctinfo); 262 ct = nf_ct_get(skb, &ctinfo);
263 if (ct && (ct->status & IPS_NAT_MASK)) { 263 if (ct) {
264 bool mangled; 264 bool mangled;
265 265
266 /* If mangling fails this function will return 0 266 /* If mangling fails this function will return 0
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 8b9fe30de0cd..558593e6a0a3 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -5037,9 +5037,9 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
5037{ 5037{
5038 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 5038 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
5039 const struct nf_flowtable_type *type; 5039 const struct nf_flowtable_type *type;
5040 struct nft_flowtable *flowtable, *ft;
5040 u8 genmask = nft_genmask_next(net); 5041 u8 genmask = nft_genmask_next(net);
5041 int family = nfmsg->nfgen_family; 5042 int family = nfmsg->nfgen_family;
5042 struct nft_flowtable *flowtable;
5043 struct nft_table *table; 5043 struct nft_table *table;
5044 struct nft_ctx ctx; 5044 struct nft_ctx ctx;
5045 int err, i, k; 5045 int err, i, k;
@@ -5099,6 +5099,22 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
5099 goto err3; 5099 goto err3;
5100 5100
5101 for (i = 0; i < flowtable->ops_len; i++) { 5101 for (i = 0; i < flowtable->ops_len; i++) {
5102 if (!flowtable->ops[i].dev)
5103 continue;
5104
5105 list_for_each_entry(ft, &table->flowtables, list) {
5106 for (k = 0; k < ft->ops_len; k++) {
5107 if (!ft->ops[k].dev)
5108 continue;
5109
5110 if (flowtable->ops[i].dev == ft->ops[k].dev &&
5111 flowtable->ops[i].pf == ft->ops[k].pf) {
5112 err = -EBUSY;
5113 goto err4;
5114 }
5115 }
5116 }
5117
5102 err = nf_register_net_hook(net, &flowtable->ops[i]); 5118 err = nf_register_net_hook(net, &flowtable->ops[i]);
5103 if (err < 0) 5119 if (err < 0)
5104 goto err4; 5120 goto err4;
@@ -5120,7 +5136,7 @@ err5:
5120 i = flowtable->ops_len; 5136 i = flowtable->ops_len;
5121err4: 5137err4:
5122 for (k = i - 1; k >= 0; k--) 5138 for (k = i - 1; k >= 0; k--)
5123 nf_unregister_net_hook(net, &flowtable->ops[i]); 5139 nf_unregister_net_hook(net, &flowtable->ops[k]);
5124 5140
5125 kfree(flowtable->ops); 5141 kfree(flowtable->ops);
5126err3: 5142err3:
@@ -5145,6 +5161,11 @@ static int nf_tables_delflowtable(struct net *net, struct sock *nlsk,
5145 struct nft_table *table; 5161 struct nft_table *table;
5146 struct nft_ctx ctx; 5162 struct nft_ctx ctx;
5147 5163
5164 if (!nla[NFTA_FLOWTABLE_TABLE] ||
5165 (!nla[NFTA_FLOWTABLE_NAME] &&
5166 !nla[NFTA_FLOWTABLE_HANDLE]))
5167 return -EINVAL;
5168
5148 table = nf_tables_table_lookup(net, nla[NFTA_FLOWTABLE_TABLE], 5169 table = nf_tables_table_lookup(net, nla[NFTA_FLOWTABLE_TABLE],
5149 family, genmask); 5170 family, genmask);
5150 if (IS_ERR(table)) 5171 if (IS_ERR(table))
diff --git a/net/qrtr/smd.c b/net/qrtr/smd.c
index 50615d5efac1..9cf089b9754e 100644
--- a/net/qrtr/smd.c
+++ b/net/qrtr/smd.c
@@ -114,5 +114,6 @@ static struct rpmsg_driver qcom_smd_qrtr_driver = {
114 114
115module_rpmsg_driver(qcom_smd_qrtr_driver); 115module_rpmsg_driver(qcom_smd_qrtr_driver);
116 116
117MODULE_ALIAS("rpmsg:IPCRTR");
117MODULE_DESCRIPTION("Qualcomm IPC-Router SMD interface driver"); 118MODULE_DESCRIPTION("Qualcomm IPC-Router SMD interface driver");
118MODULE_LICENSE("GPL v2"); 119MODULE_LICENSE("GPL v2");
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index c061d6eb465d..22571189f21e 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 Oracle. All rights reserved. 2 * Copyright (c) 2006, 2018 Oracle. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
@@ -142,12 +142,20 @@ int rds_tcp_accept_one(struct socket *sock)
142 if (ret) 142 if (ret)
143 goto out; 143 goto out;
144 144
145 new_sock->type = sock->type;
146 new_sock->ops = sock->ops;
147 ret = sock->ops->accept(sock, new_sock, O_NONBLOCK, true); 145 ret = sock->ops->accept(sock, new_sock, O_NONBLOCK, true);
148 if (ret < 0) 146 if (ret < 0)
149 goto out; 147 goto out;
150 148
149 /* sock_create_lite() does not get a hold on the owner module so we
150 * need to do it here. Note that sock_release() uses sock->ops to
151 * determine if it needs to decrement the reference count. So set
152 * sock->ops after calling accept() in case that fails. And there's
153 * no need to do try_module_get() as the listener should have a hold
154 * already.
155 */
156 new_sock->ops = sock->ops;
157 __module_get(new_sock->ops->owner);
158
151 ret = rds_tcp_keepalive(new_sock); 159 ret = rds_tcp_keepalive(new_sock);
152 if (ret < 0) 160 if (ret < 0)
153 goto out; 161 goto out;
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 229172d509cc..03225a8df973 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -188,7 +188,8 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
188 int ret; 188 int ret;
189 189
190 if (qdisc_pkt_len(skb) > q->max_size) { 190 if (qdisc_pkt_len(skb) > q->max_size) {
191 if (skb_is_gso(skb) && skb_gso_mac_seglen(skb) <= q->max_size) 191 if (skb_is_gso(skb) &&
192 skb_gso_validate_mac_len(skb, q->max_size))
192 return tbf_segment(skb, sch, to_free); 193 return tbf_segment(skb, sch, to_free);
193 return qdisc_drop(skb, sch, to_free); 194 return qdisc_drop(skb, sch, to_free);
194 } 195 }
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index da1a5cdefd13..8cc97834d4f6 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -1406,8 +1406,10 @@ static int smc_create(struct net *net, struct socket *sock, int protocol,
1406 smc->use_fallback = false; /* assume rdma capability first */ 1406 smc->use_fallback = false; /* assume rdma capability first */
1407 rc = sock_create_kern(net, PF_INET, SOCK_STREAM, 1407 rc = sock_create_kern(net, PF_INET, SOCK_STREAM,
1408 IPPROTO_TCP, &smc->clcsock); 1408 IPPROTO_TCP, &smc->clcsock);
1409 if (rc) 1409 if (rc) {
1410 sk_common_release(sk); 1410 sk_common_release(sk);
1411 goto out;
1412 }
1411 smc->sk.sk_sndbuf = max(smc->clcsock->sk->sk_sndbuf, SMC_BUF_MIN_SIZE); 1413 smc->sk.sk_sndbuf = max(smc->clcsock->sk->sk_sndbuf, SMC_BUF_MIN_SIZE);
1412 smc->sk.sk_rcvbuf = max(smc->clcsock->sk->sk_rcvbuf, SMC_BUF_MIN_SIZE); 1414 smc->sk.sk_rcvbuf = max(smc->clcsock->sk->sk_rcvbuf, SMC_BUF_MIN_SIZE);
1413 1415
diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c
index 3cd086e5bd28..b42395d24cba 100644
--- a/net/smc/smc_cdc.c
+++ b/net/smc/smc_cdc.c
@@ -269,7 +269,7 @@ static void smc_cdc_rx_handler(struct ib_wc *wc, void *buf)
269 269
270 if (wc->byte_len < offsetof(struct smc_cdc_msg, reserved)) 270 if (wc->byte_len < offsetof(struct smc_cdc_msg, reserved))
271 return; /* short message */ 271 return; /* short message */
272 if (cdc->len != sizeof(*cdc)) 272 if (cdc->len != SMC_WR_TX_SIZE)
273 return; /* invalid message */ 273 return; /* invalid message */
274 smc_cdc_msg_recv(cdc, link, wc->wr_id); 274 smc_cdc_msg_recv(cdc, link, wc->wr_id);
275} 275}
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 2424c7100aaf..645dd226177b 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -177,6 +177,7 @@ static int smc_lgr_create(struct smc_sock *smc, __be32 peer_in_addr,
177 177
178 lnk = &lgr->lnk[SMC_SINGLE_LINK]; 178 lnk = &lgr->lnk[SMC_SINGLE_LINK];
179 /* initialize link */ 179 /* initialize link */
180 lnk->link_id = SMC_SINGLE_LINK;
180 lnk->smcibdev = smcibdev; 181 lnk->smcibdev = smcibdev;
181 lnk->ibport = ibport; 182 lnk->ibport = ibport;
182 lnk->path_mtu = smcibdev->pattr[ibport - 1].active_mtu; 183 lnk->path_mtu = smcibdev->pattr[ibport - 1].active_mtu;
@@ -465,7 +466,7 @@ create:
465 rc = smc_link_determine_gid(conn->lgr); 466 rc = smc_link_determine_gid(conn->lgr);
466 } 467 }
467 conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE; 468 conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE;
468 conn->local_tx_ctrl.len = sizeof(struct smc_cdc_msg); 469 conn->local_tx_ctrl.len = SMC_WR_TX_SIZE;
469#ifndef KERNEL_HAS_ATOMIC64 470#ifndef KERNEL_HAS_ATOMIC64
470 spin_lock_init(&conn->acurs_lock); 471 spin_lock_init(&conn->acurs_lock);
471#endif 472#endif
diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c
index 92fe4cc8c82c..b4aa4fcedb96 100644
--- a/net/smc/smc_llc.c
+++ b/net/smc/smc_llc.c
@@ -92,7 +92,7 @@ int smc_llc_send_confirm_link(struct smc_link *link, u8 mac[],
92 memcpy(confllc->sender_mac, mac, ETH_ALEN); 92 memcpy(confllc->sender_mac, mac, ETH_ALEN);
93 memcpy(confllc->sender_gid, gid, SMC_GID_SIZE); 93 memcpy(confllc->sender_gid, gid, SMC_GID_SIZE);
94 hton24(confllc->sender_qp_num, link->roce_qp->qp_num); 94 hton24(confllc->sender_qp_num, link->roce_qp->qp_num);
95 /* confllc->link_num = SMC_SINGLE_LINK; already done by memset above */ 95 confllc->link_num = link->link_id;
96 memcpy(confllc->link_uid, lgr->id, SMC_LGR_ID_SIZE); 96 memcpy(confllc->link_uid, lgr->id, SMC_LGR_ID_SIZE);
97 confllc->max_links = SMC_LINKS_PER_LGR_MAX; 97 confllc->max_links = SMC_LINKS_PER_LGR_MAX;
98 /* send llc message */ 98 /* send llc message */
diff --git a/net/tipc/group.c b/net/tipc/group.c
index 122162a31816..04e516d18054 100644
--- a/net/tipc/group.c
+++ b/net/tipc/group.c
@@ -189,6 +189,7 @@ struct tipc_group *tipc_group_create(struct net *net, u32 portid,
189 grp->loopback = mreq->flags & TIPC_GROUP_LOOPBACK; 189 grp->loopback = mreq->flags & TIPC_GROUP_LOOPBACK;
190 grp->events = mreq->flags & TIPC_GROUP_MEMBER_EVTS; 190 grp->events = mreq->flags & TIPC_GROUP_MEMBER_EVTS;
191 grp->open = group_is_open; 191 grp->open = group_is_open;
192 *grp->open = false;
192 filter |= global ? TIPC_SUB_CLUSTER_SCOPE : TIPC_SUB_NODE_SCOPE; 193 filter |= global ? TIPC_SUB_CLUSTER_SCOPE : TIPC_SUB_NODE_SCOPE;
193 if (tipc_topsrv_kern_subscr(net, portid, type, 0, ~0, 194 if (tipc_topsrv_kern_subscr(net, portid, type, 0, ~0,
194 filter, &grp->subid)) 195 filter, &grp->subid))
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index b0323ec7971e..7dfa9fc99ec3 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -473,6 +473,7 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
473 sk->sk_write_space = tipc_write_space; 473 sk->sk_write_space = tipc_write_space;
474 sk->sk_destruct = tipc_sock_destruct; 474 sk->sk_destruct = tipc_sock_destruct;
475 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT; 475 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
476 tsk->group_is_open = true;
476 atomic_set(&tsk->dupl_rcvcnt, 0); 477 atomic_set(&tsk->dupl_rcvcnt, 0);
477 478
478 /* Start out with safe limits until we receive an advertised window */ 479 /* Start out with safe limits until we receive an advertised window */
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index e9b4b53ab53e..d824d548447e 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -46,16 +46,26 @@ MODULE_DESCRIPTION("Transport Layer Security Support");
46MODULE_LICENSE("Dual BSD/GPL"); 46MODULE_LICENSE("Dual BSD/GPL");
47 47
48enum { 48enum {
49 TLSV4,
50 TLSV6,
51 TLS_NUM_PROTS,
52};
53
54enum {
49 TLS_BASE_TX, 55 TLS_BASE_TX,
50 TLS_SW_TX, 56 TLS_SW_TX,
51 TLS_NUM_CONFIG, 57 TLS_NUM_CONFIG,
52}; 58};
53 59
54static struct proto tls_prots[TLS_NUM_CONFIG]; 60static struct proto *saved_tcpv6_prot;
61static DEFINE_MUTEX(tcpv6_prot_mutex);
62static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG];
55 63
56static inline void update_sk_prot(struct sock *sk, struct tls_context *ctx) 64static inline void update_sk_prot(struct sock *sk, struct tls_context *ctx)
57{ 65{
58 sk->sk_prot = &tls_prots[ctx->tx_conf]; 66 int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
67
68 sk->sk_prot = &tls_prots[ip_ver][ctx->tx_conf];
59} 69}
60 70
61int wait_on_pending_writer(struct sock *sk, long *timeo) 71int wait_on_pending_writer(struct sock *sk, long *timeo)
@@ -453,8 +463,21 @@ static int tls_setsockopt(struct sock *sk, int level, int optname,
453 return do_tls_setsockopt(sk, optname, optval, optlen); 463 return do_tls_setsockopt(sk, optname, optval, optlen);
454} 464}
455 465
466static void build_protos(struct proto *prot, struct proto *base)
467{
468 prot[TLS_BASE_TX] = *base;
469 prot[TLS_BASE_TX].setsockopt = tls_setsockopt;
470 prot[TLS_BASE_TX].getsockopt = tls_getsockopt;
471 prot[TLS_BASE_TX].close = tls_sk_proto_close;
472
473 prot[TLS_SW_TX] = prot[TLS_BASE_TX];
474 prot[TLS_SW_TX].sendmsg = tls_sw_sendmsg;
475 prot[TLS_SW_TX].sendpage = tls_sw_sendpage;
476}
477
456static int tls_init(struct sock *sk) 478static int tls_init(struct sock *sk)
457{ 479{
480 int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
458 struct inet_connection_sock *icsk = inet_csk(sk); 481 struct inet_connection_sock *icsk = inet_csk(sk);
459 struct tls_context *ctx; 482 struct tls_context *ctx;
460 int rc = 0; 483 int rc = 0;
@@ -479,6 +502,17 @@ static int tls_init(struct sock *sk)
479 ctx->getsockopt = sk->sk_prot->getsockopt; 502 ctx->getsockopt = sk->sk_prot->getsockopt;
480 ctx->sk_proto_close = sk->sk_prot->close; 503 ctx->sk_proto_close = sk->sk_prot->close;
481 504
505 /* Build IPv6 TLS whenever the address of tcpv6_prot changes */
506 if (ip_ver == TLSV6 &&
507 unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) {
508 mutex_lock(&tcpv6_prot_mutex);
509 if (likely(sk->sk_prot != saved_tcpv6_prot)) {
510 build_protos(tls_prots[TLSV6], sk->sk_prot);
511 smp_store_release(&saved_tcpv6_prot, sk->sk_prot);
512 }
513 mutex_unlock(&tcpv6_prot_mutex);
514 }
515
482 ctx->tx_conf = TLS_BASE_TX; 516 ctx->tx_conf = TLS_BASE_TX;
483 update_sk_prot(sk, ctx); 517 update_sk_prot(sk, ctx);
484out: 518out:
@@ -493,21 +527,9 @@ static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = {
493 .init = tls_init, 527 .init = tls_init,
494}; 528};
495 529
496static void build_protos(struct proto *prot, struct proto *base)
497{
498 prot[TLS_BASE_TX] = *base;
499 prot[TLS_BASE_TX].setsockopt = tls_setsockopt;
500 prot[TLS_BASE_TX].getsockopt = tls_getsockopt;
501 prot[TLS_BASE_TX].close = tls_sk_proto_close;
502
503 prot[TLS_SW_TX] = prot[TLS_BASE_TX];
504 prot[TLS_SW_TX].sendmsg = tls_sw_sendmsg;
505 prot[TLS_SW_TX].sendpage = tls_sw_sendpage;
506}
507
508static int __init tls_register(void) 530static int __init tls_register(void)
509{ 531{
510 build_protos(tls_prots, &tcp_prot); 532 build_protos(tls_prots[TLSV4], &tcp_prot);
511 533
512 tcp_register_ulp(&tcp_tls_ulp_ops); 534 tcp_register_ulp(&tcp_tls_ulp_ops);
513 535
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 1abcc4fc4df1..41722046b937 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -34,9 +34,10 @@ config CFG80211
34 34
35 When built as a module it will be called cfg80211. 35 When built as a module it will be called cfg80211.
36 36
37if CFG80211
38
37config NL80211_TESTMODE 39config NL80211_TESTMODE
38 bool "nl80211 testmode command" 40 bool "nl80211 testmode command"
39 depends on CFG80211
40 help 41 help
41 The nl80211 testmode command helps implementing things like 42 The nl80211 testmode command helps implementing things like
42 factory calibration or validation tools for wireless chips. 43 factory calibration or validation tools for wireless chips.
@@ -51,7 +52,6 @@ config NL80211_TESTMODE
51 52
52config CFG80211_DEVELOPER_WARNINGS 53config CFG80211_DEVELOPER_WARNINGS
53 bool "enable developer warnings" 54 bool "enable developer warnings"
54 depends on CFG80211
55 default n 55 default n
56 help 56 help
57 This option enables some additional warnings that help 57 This option enables some additional warnings that help
@@ -68,7 +68,7 @@ config CFG80211_DEVELOPER_WARNINGS
68 68
69config CFG80211_CERTIFICATION_ONUS 69config CFG80211_CERTIFICATION_ONUS
70 bool "cfg80211 certification onus" 70 bool "cfg80211 certification onus"
71 depends on CFG80211 && EXPERT 71 depends on EXPERT
72 default n 72 default n
73 ---help--- 73 ---help---
74 You should disable this option unless you are both capable 74 You should disable this option unless you are both capable
@@ -159,7 +159,6 @@ config CFG80211_REG_RELAX_NO_IR
159 159
160config CFG80211_DEFAULT_PS 160config CFG80211_DEFAULT_PS
161 bool "enable powersave by default" 161 bool "enable powersave by default"
162 depends on CFG80211
163 default y 162 default y
164 help 163 help
165 This option enables powersave mode by default. 164 This option enables powersave mode by default.
@@ -170,7 +169,6 @@ config CFG80211_DEFAULT_PS
170 169
171config CFG80211_DEBUGFS 170config CFG80211_DEBUGFS
172 bool "cfg80211 DebugFS entries" 171 bool "cfg80211 DebugFS entries"
173 depends on CFG80211
174 depends on DEBUG_FS 172 depends on DEBUG_FS
175 ---help--- 173 ---help---
176 You can enable this if you want debugfs entries for cfg80211. 174 You can enable this if you want debugfs entries for cfg80211.
@@ -180,7 +178,6 @@ config CFG80211_DEBUGFS
180config CFG80211_CRDA_SUPPORT 178config CFG80211_CRDA_SUPPORT
181 bool "support CRDA" if EXPERT 179 bool "support CRDA" if EXPERT
182 default y 180 default y
183 depends on CFG80211
184 help 181 help
185 You should enable this option unless you know for sure you have no 182 You should enable this option unless you know for sure you have no
186 need for it, for example when using internal regdb (above) or the 183 need for it, for example when using internal regdb (above) or the
@@ -190,7 +187,6 @@ config CFG80211_CRDA_SUPPORT
190 187
191config CFG80211_WEXT 188config CFG80211_WEXT
192 bool "cfg80211 wireless extensions compatibility" if !CFG80211_WEXT_EXPORT 189 bool "cfg80211 wireless extensions compatibility" if !CFG80211_WEXT_EXPORT
193 depends on CFG80211
194 select WEXT_CORE 190 select WEXT_CORE
195 default y if CFG80211_WEXT_EXPORT 191 default y if CFG80211_WEXT_EXPORT
196 help 192 help
@@ -199,11 +195,12 @@ config CFG80211_WEXT
199 195
200config CFG80211_WEXT_EXPORT 196config CFG80211_WEXT_EXPORT
201 bool 197 bool
202 depends on CFG80211
203 help 198 help
204 Drivers should select this option if they require cfg80211's 199 Drivers should select this option if they require cfg80211's
205 wext compatibility symbols to be exported. 200 wext compatibility symbols to be exported.
206 201
202endif # CFG80211
203
207config LIB80211 204config LIB80211
208 tristate 205 tristate
209 default n 206 default n
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
index 8e70291e586a..e87d6c4dd5b6 100644
--- a/net/xfrm/xfrm_device.c
+++ b/net/xfrm/xfrm_device.c
@@ -217,7 +217,7 @@ bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
217 if (skb->len <= mtu) 217 if (skb->len <= mtu)
218 goto ok; 218 goto ok;
219 219
220 if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu)) 220 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
221 goto ok; 221 goto ok;
222 } 222 }
223 223
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index c73592fa3d41..437c0b1c9d21 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -11163,6 +11163,64 @@ static struct bpf_test tests[] = {
11163 .result = REJECT, 11163 .result = REJECT,
11164 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 11164 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11165 }, 11165 },
11166 {
11167 "xadd/w check unaligned stack",
11168 .insns = {
11169 BPF_MOV64_IMM(BPF_REG_0, 1),
11170 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
11171 BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -7),
11172 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
11173 BPF_EXIT_INSN(),
11174 },
11175 .result = REJECT,
11176 .errstr = "misaligned stack access off",
11177 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11178 },
11179 {
11180 "xadd/w check unaligned map",
11181 .insns = {
11182 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11183 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11184 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11185 BPF_LD_MAP_FD(BPF_REG_1, 0),
11186 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11187 BPF_FUNC_map_lookup_elem),
11188 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
11189 BPF_EXIT_INSN(),
11190 BPF_MOV64_IMM(BPF_REG_1, 1),
11191 BPF_STX_XADD(BPF_W, BPF_REG_0, BPF_REG_1, 3),
11192 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3),
11193 BPF_EXIT_INSN(),
11194 },
11195 .fixup_map1 = { 3 },
11196 .result = REJECT,
11197 .errstr = "misaligned value access off",
11198 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11199 },
11200 {
11201 "xadd/w check unaligned pkt",
11202 .insns = {
11203 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11204 offsetof(struct xdp_md, data)),
11205 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11206 offsetof(struct xdp_md, data_end)),
11207 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11208 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11209 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 2),
11210 BPF_MOV64_IMM(BPF_REG_0, 99),
11211 BPF_JMP_IMM(BPF_JA, 0, 0, 6),
11212 BPF_MOV64_IMM(BPF_REG_0, 1),
11213 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11214 BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0),
11215 BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 1),
11216 BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 2),
11217 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1),
11218 BPF_EXIT_INSN(),
11219 },
11220 .result = REJECT,
11221 .errstr = "BPF_XADD stores into R2 packet",
11222 .prog_type = BPF_PROG_TYPE_XDP,
11223 },
11166}; 11224};
11167 11225
11168static int probe_filter_length(const struct bpf_insn *fp) 11226static int probe_filter_length(const struct bpf_insn *fp)
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/skbmod.json b/tools/testing/selftests/tc-testing/tc-tests/actions/skbmod.json
index e34075059c26..90bba48c3f07 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/skbmod.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/skbmod.json
@@ -315,7 +315,7 @@
315 "cmdUnderTest": "$TC actions ls action skbmod", 315 "cmdUnderTest": "$TC actions ls action skbmod",
316 "expExitCode": "0", 316 "expExitCode": "0",
317 "verifyCmd": "$TC actions get action skbmod index 4", 317 "verifyCmd": "$TC actions get action skbmod index 4",
318 "matchPattern": "action order [0-9]*: skbmod pipe set etype 0x0031", 318 "matchPattern": "action order [0-9]*: skbmod pipe set etype 0x31",
319 "matchCount": "1", 319 "matchCount": "1",
320 "teardown": [ 320 "teardown": [
321 "$TC actions flush action skbmod" 321 "$TC actions flush action skbmod"