aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-07-18 22:32:54 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-07-18 22:32:54 -0400
commit024ddc0ce1049298bd3cae60ae45d9c5f0fb8b9c (patch)
tree18b44ede17871ded93577c317f3afe3db122ba33
parent05df204549c510c7c56e58d25098c448998a0cd5 (diff)
parente56b8ce363a36fb7b74b80aaa5cc9084f2c908b4 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: "Lots of fixes, here goes: 1) NULL deref in qtnfmac, from Gustavo A. R. Silva. 2) Kernel oops when fw download fails in rtlwifi, from Ping-Ke Shih. 3) Lost completion messages in AF_XDP, from Magnus Karlsson. 4) Correct bogus self-assignment in rhashtable, from Rishabh Bhatnagar. 5) Fix regression in ipv6 route append handling, from David Ahern. 6) Fix masking in __set_phy_supported(), from Heiner Kallweit. 7) Missing module owner set in x_tables icmp, from Florian Westphal. 8) liquidio's timeouts are HZ dependent, fix from Nicholas Mc Guire. 9) Link setting fixes for sh_eth and ravb, from Vladimir Zapolskiy. 10) Fix NULL deref when using chains in act_csum, from Davide Caratti. 11) XDP_REDIRECT needs to check if the interface is up and whether the MTU is sufficient. From Toshiaki Makita. 12) Net diag can do a double free when killing TCP_NEW_SYN_RECV connections, from Lorenzo Colitti. 13) nf_defrag in ipv6 can unnecessarily hold onto dst entries for a full minute, delaying device unregister. From Eric Dumazet. 14) Update MAC entries in the correct order in ixgbe, from Alexander Duyck. 15) Don't leave partial mangles bpf program in jit_subprogs, from Daniel Borkmann. 16) Fix pfmemalloc SKB state propagation, from Stefano Brivio. 17) Fix ACK handling in DCTCP congestion control, from Yuchung Cheng. 18) Use after free in tun XDP_TX, from Toshiaki Makita. 19) Stale ipv6 header pointer in ipv6 gre code, from Prashant Bhole. 20) Don't reuse remainder of RX page when XDP is set in mlx4, from Saeed Mahameed. 21) Fix window probe handling of TCP rapair sockets, from Stefan Baranoff. 22) Missing socket locking in smc_ioctl(), from Ursula Braun. 23) IPV6_ILA needs DST_CACHE, from Arnd Bergmann. 24) Spectre v1 fix in cxgb3, from Gustavo A. R. Silva. 25) Two spots in ipv6 do a rol32() on a hash value but ignore the result. Fixes from Colin Ian King" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (176 commits) tcp: identify cryptic messages as TCP seq # bugs ptp: fix missing break in switch hv_netvsc: Fix napi reschedule while receive completion is busy MAINTAINERS: Drop inactive Vitaly Bordug's email net: cavium: Add fine-granular dependencies on PCI net: qca_spi: Fix log level if probe fails net: qca_spi: Make sure the QCA7000 reset is triggered net: qca_spi: Avoid packet drop during initial sync ipv6: fix useless rol32 call on hash ipv6: sr: fix useless rol32 call on hash net: sched: Using NULL instead of plain integer net: usb: asix: replace mii_nway_restart in resume path net: cxgb3_main: fix potential Spectre v1 lib/rhashtable: consider param->min_size when setting initial table size net/smc: reset recv timeout after clc handshake net/smc: add error handling for get_user() net/smc: optimize consumer cursor updates net/nfc: Avoid stalls when nfc_alloc_send_skb() returned NULL. ipv6: ila: select CONFIG_DST_CACHE net: usb: rtl8150: demote allmulti message to dev_dbg() ...
-rw-r--r--Documentation/networking/bonding.txt2
-rw-r--r--Documentation/networking/e100.rst27
-rw-r--r--Documentation/networking/e1000.rst187
-rw-r--r--MAINTAINERS7
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c11
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c47
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c4
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c4
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c24
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c30
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c13
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h2
-rw-r--r--drivers/net/ethernet/cadence/macb.h11
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c36
-rw-r--r--drivers/net/ethernet/cavium/Kconfig12
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c5
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c35
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c43
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c48
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c39
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c2
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c21
-rw-r--r--drivers/net/ethernet/realtek/r8169.c1
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c93
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c94
-rw-r--r--drivers/net/ethernet/sfc/ef10.c30
-rw-r--r--drivers/net/ethernet/sfc/efx.c17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c1
-rw-r--r--drivers/net/hyperv/netvsc.c17
-rw-r--r--drivers/net/hyperv/rndis_filter.c1
-rw-r--r--drivers/net/ieee802154/adf7242.c34
-rw-r--r--drivers/net/ieee802154/at86rf230.c15
-rw-r--r--drivers/net/ieee802154/fakelb.c2
-rw-r--r--drivers/net/ieee802154/mcr20a.c3
-rw-r--r--drivers/net/phy/marvell.c54
-rw-r--r--drivers/net/phy/phy_device.c7
-rw-r--r--drivers/net/phy/sfp-bus.c35
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/usb/asix_devices.c4
-rw-r--r--drivers/net/usb/lan78xx.c5
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/usb/rtl8150.c2
-rw-r--r--drivers/net/usb/smsc75xx.c62
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c16
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h1
-rw-r--r--drivers/net/wireless/ath/wcn36xx/testmode.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c7
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c7
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/phy.c6
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c17
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c2
-rw-r--r--drivers/ptp/ptp_chardev.c1
-rw-r--r--include/linux/bpf-cgroup.h1
-rw-r--r--include/linux/filter.h6
-rw-r--r--include/linux/fsl/guts.h1
-rw-r--r--include/linux/if_bridge.h4
-rw-r--r--include/linux/igmp.h2
-rw-r--r--include/linux/marvell_phy.h2
-rw-r--r--include/linux/skbuff.h10
-rw-r--r--include/net/ip6_route.h6
-rw-r--r--include/net/ipv6.h13
-rw-r--r--include/net/netfilter/nf_tables_core.h6
-rw-r--r--include/net/netfilter/nf_tproxy.h4
-rw-r--r--include/net/tc_act/tc_csum.h1
-rw-r--r--include/net/tc_act/tc_tunnel_key.h1
-rw-r--r--include/net/tcp.h6
-rw-r--r--include/net/xdp_sock.h4
-rw-r--r--include/uapi/linux/ethtool.h2
-rw-r--r--include/uapi/linux/tcp.h4
-rw-r--r--kernel/bpf/btf.c30
-rw-r--r--kernel/bpf/devmap.c7
-rw-r--r--kernel/bpf/hashtab.c16
-rw-r--r--kernel/bpf/sockmap.c43
-rw-r--r--kernel/bpf/syscall.c4
-rw-r--r--kernel/bpf/verifier.c11
-rw-r--r--lib/rhashtable.c27
-rw-r--r--net/batman-adv/bat_iv_ogm.c4
-rw-r--r--net/batman-adv/bat_v.c4
-rw-r--r--net/batman-adv/debugfs.c40
-rw-r--r--net/batman-adv/debugfs.h11
-rw-r--r--net/batman-adv/hard-interface.c37
-rw-r--r--net/batman-adv/translation-table.c7
-rw-r--r--net/bpf/test_run.c17
-rw-r--r--net/core/filter.c149
-rw-r--r--net/core/gen_stats.c16
-rw-r--r--net/core/skbuff.c1
-rw-r--r--net/dns_resolver/dns_key.c28
-rw-r--r--net/ieee802154/6lowpan/core.c6
-rw-r--r--net/ipv4/fib_frontend.c1
-rw-r--r--net/ipv4/igmp.c58
-rw-r--r--net/ipv4/inet_fragment.c2
-rw-r--r--net/ipv4/ip_sockglue.c4
-rw-r--r--net/ipv4/netfilter/ip_tables.c1
-rw-r--r--net/ipv4/netfilter/nf_tproxy_ipv4.c18
-rw-r--r--net/ipv4/sysctl_net_ipv4.c5
-rw-r--r--net/ipv4/tcp.c16
-rw-r--r--net/ipv4/tcp_dctcp.c31
-rw-r--r--net/ipv4/tcp_ipv4.c23
-rw-r--r--net/ipv4/tcp_output.c4
-rw-r--r--net/ipv6/Kconfig1
-rw-r--r--net/ipv6/calipso.c9
-rw-r--r--net/ipv6/exthdrs.c111
-rw-r--r--net/ipv6/ip6_fib.c156
-rw-r--r--net/ipv6/ip6_gre.c3
-rw-r--r--net/ipv6/ipv6_sockglue.c32
-rw-r--r--net/ipv6/mcast.c64
-rw-r--r--net/ipv6/ndisc.c2
-rw-r--r--net/ipv6/netfilter/ip6_tables.c1
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c2
-rw-r--r--net/ipv6/netfilter/nf_tproxy_ipv6.c18
-rw-r--r--net/ipv6/route.c10
-rw-r--r--net/ipv6/seg6_iptunnel.c2
-rw-r--r--net/netfilter/Kconfig25
-rw-r--r--net/netfilter/Makefile7
-rw-r--r--net/netfilter/nf_conntrack_core.c2
-rw-r--r--net/netfilter/nf_tables_set_core.c28
-rw-r--r--net/netfilter/nft_compat.c13
-rw-r--r--net/netfilter/nft_set_bitmap.c19
-rw-r--r--net/netfilter/nft_set_hash.c29
-rw-r--r--net/netfilter/nft_set_rbtree.c19
-rw-r--r--net/netfilter/xt_TPROXY.c8
-rw-r--r--net/nfc/llcp_commands.c9
-rw-r--r--net/nsh/nsh.c2
-rw-r--r--net/packet/af_packet.c2
-rw-r--r--net/qrtr/qrtr.c13
-rw-r--r--net/sched/act_csum.c6
-rw-r--r--net/sched/act_tunnel_key.c6
-rw-r--r--net/sched/cls_api.c4
-rw-r--r--net/sched/sch_fq_codel.c25
-rw-r--r--net/sctp/transport.c2
-rw-r--r--net/smc/af_smc.c37
-rw-r--r--net/smc/smc_clc.c3
-rw-r--r--net/smc/smc_close.c2
-rw-r--r--net/smc/smc_tx.c12
-rw-r--r--net/tipc/discover.c18
-rw-r--r--net/tipc/net.c17
-rw-r--r--net/tipc/node.c7
-rw-r--r--net/tls/tls_sw.c7
-rw-r--r--net/xdp/xsk.c30
-rw-r--r--net/xdp/xsk_queue.h9
-rw-r--r--samples/bpf/.gitignore49
-rw-r--r--samples/bpf/parse_varlen.c6
-rw-r--r--samples/bpf/test_overhead_user.c19
-rw-r--r--samples/bpf/trace_event_user.c27
-rwxr-xr-xsamples/bpf/xdp2skb_meta.sh6
-rw-r--r--samples/bpf/xdpsock_user.c2
-rwxr-xr-xscripts/tags.sh1
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c23
-rwxr-xr-xtools/testing/selftests/net/fib_tests.sh41
-rwxr-xr-xtools/testing/selftests/net/udpgso_bench.sh3
171 files changed, 1739 insertions, 1128 deletions
diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt
index c13214d073a4..d3e5dd26db12 100644
--- a/Documentation/networking/bonding.txt
+++ b/Documentation/networking/bonding.txt
@@ -1490,7 +1490,7 @@ To remove an ARP target:
1490 1490
1491To configure the interval between learning packet transmits: 1491To configure the interval between learning packet transmits:
1492# echo 12 > /sys/class/net/bond0/bonding/lp_interval 1492# echo 12 > /sys/class/net/bond0/bonding/lp_interval
1493 NOTE: the lp_inteval is the number of seconds between instances where 1493 NOTE: the lp_interval is the number of seconds between instances where
1494the bonding driver sends learning packets to each slaves peer switch. The 1494the bonding driver sends learning packets to each slaves peer switch. The
1495default interval is 1 second. 1495default interval is 1 second.
1496 1496
diff --git a/Documentation/networking/e100.rst b/Documentation/networking/e100.rst
index 9708f5fa76de..f81111eba9c5 100644
--- a/Documentation/networking/e100.rst
+++ b/Documentation/networking/e100.rst
@@ -47,41 +47,45 @@ Driver Configuration Parameters
47The default value for each parameter is generally the recommended setting, 47The default value for each parameter is generally the recommended setting,
48unless otherwise noted. 48unless otherwise noted.
49 49
50Rx Descriptors: Number of receive descriptors. A receive descriptor is a data 50Rx Descriptors:
51 Number of receive descriptors. A receive descriptor is a data
51 structure that describes a receive buffer and its attributes to the network 52 structure that describes a receive buffer and its attributes to the network
52 controller. The data in the descriptor is used by the controller to write 53 controller. The data in the descriptor is used by the controller to write
53 data from the controller to host memory. In the 3.x.x driver the valid range 54 data from the controller to host memory. In the 3.x.x driver the valid range
54 for this parameter is 64-256. The default value is 256. This parameter can be 55 for this parameter is 64-256. The default value is 256. This parameter can be
55 changed using the command:: 56 changed using the command::
56 57
57 ethtool -G eth? rx n 58 ethtool -G eth? rx n
58 59
59 Where n is the number of desired Rx descriptors. 60 Where n is the number of desired Rx descriptors.
60 61
61Tx Descriptors: Number of transmit descriptors. A transmit descriptor is a data 62Tx Descriptors:
63 Number of transmit descriptors. A transmit descriptor is a data
62 structure that describes a transmit buffer and its attributes to the network 64 structure that describes a transmit buffer and its attributes to the network
63 controller. The data in the descriptor is used by the controller to read 65 controller. The data in the descriptor is used by the controller to read
64 data from the host memory to the controller. In the 3.x.x driver the valid 66 data from the host memory to the controller. In the 3.x.x driver the valid
65 range for this parameter is 64-256. The default value is 128. This parameter 67 range for this parameter is 64-256. The default value is 128. This parameter
66 can be changed using the command:: 68 can be changed using the command::
67 69
68 ethtool -G eth? tx n 70 ethtool -G eth? tx n
69 71
70 Where n is the number of desired Tx descriptors. 72 Where n is the number of desired Tx descriptors.
71 73
72Speed/Duplex: The driver auto-negotiates the link speed and duplex settings by 74Speed/Duplex:
75 The driver auto-negotiates the link speed and duplex settings by
73 default. The ethtool utility can be used as follows to force speed/duplex.:: 76 default. The ethtool utility can be used as follows to force speed/duplex.::
74 77
75 ethtool -s eth? autoneg off speed {10|100} duplex {full|half} 78 ethtool -s eth? autoneg off speed {10|100} duplex {full|half}
76 79
77 NOTE: setting the speed/duplex to incorrect values will cause the link to 80 NOTE: setting the speed/duplex to incorrect values will cause the link to
78 fail. 81 fail.
79 82
80Event Log Message Level: The driver uses the message level flag to log events 83Event Log Message Level:
84 The driver uses the message level flag to log events
81 to syslog. The message level can be set at driver load time. It can also be 85 to syslog. The message level can be set at driver load time. It can also be
82 set using the command:: 86 set using the command::
83 87
84 ethtool -s eth? msglvl n 88 ethtool -s eth? msglvl n
85 89
86 90
87Additional Configurations 91Additional Configurations
@@ -92,7 +96,7 @@ Configuring the Driver on Different Distributions
92 96
93Configuring a network driver to load properly when the system is started 97Configuring a network driver to load properly when the system is started
94is distribution dependent. Typically, the configuration process involves 98is distribution dependent. Typically, the configuration process involves
95adding an alias line to /etc/modprobe.d/*.conf as well as editing other 99adding an alias line to `/etc/modprobe.d/*.conf` as well as editing other
96system startup scripts and/or configuration files. Many popular Linux 100system startup scripts and/or configuration files. Many popular Linux
97distributions ship with tools to make these changes for you. To learn 101distributions ship with tools to make these changes for you. To learn
98the proper way to configure a network device for your system, refer to 102the proper way to configure a network device for your system, refer to
@@ -160,7 +164,10 @@ This results in unbalanced receive traffic.
160If you have multiple interfaces in a server, either turn on ARP 164If you have multiple interfaces in a server, either turn on ARP
161filtering by 165filtering by
162 166
163(1) entering:: echo 1 > /proc/sys/net/ipv4/conf/all/arp_filter 167(1) entering::
168
169 echo 1 > /proc/sys/net/ipv4/conf/all/arp_filter
170
164 (this only works if your kernel's version is higher than 2.4.5), or 171 (this only works if your kernel's version is higher than 2.4.5), or
165 172
166(2) installing the interfaces in separate broadcast domains (either 173(2) installing the interfaces in separate broadcast domains (either
diff --git a/Documentation/networking/e1000.rst b/Documentation/networking/e1000.rst
index 144b87eef153..f10dd4086921 100644
--- a/Documentation/networking/e1000.rst
+++ b/Documentation/networking/e1000.rst
@@ -34,7 +34,8 @@ Command Line Parameters
34The default value for each parameter is generally the recommended setting, 34The default value for each parameter is generally the recommended setting,
35unless otherwise noted. 35unless otherwise noted.
36 36
37NOTES: For more information about the AutoNeg, Duplex, and Speed 37NOTES:
38 For more information about the AutoNeg, Duplex, and Speed
38 parameters, see the "Speed and Duplex Configuration" section in 39 parameters, see the "Speed and Duplex Configuration" section in
39 this document. 40 this document.
40 41
@@ -45,22 +46,27 @@ NOTES: For more information about the AutoNeg, Duplex, and Speed
45 46
46AutoNeg 47AutoNeg
47------- 48-------
49
48(Supported only on adapters with copper connections) 50(Supported only on adapters with copper connections)
49Valid Range: 0x01-0x0F, 0x20-0x2F 51
50Default Value: 0x2F 52:Valid Range: 0x01-0x0F, 0x20-0x2F
53:Default Value: 0x2F
51 54
52This parameter is a bit-mask that specifies the speed and duplex settings 55This parameter is a bit-mask that specifies the speed and duplex settings
53advertised by the adapter. When this parameter is used, the Speed and 56advertised by the adapter. When this parameter is used, the Speed and
54Duplex parameters must not be specified. 57Duplex parameters must not be specified.
55 58
56NOTE: Refer to the Speed and Duplex section of this readme for more 59NOTE:
60 Refer to the Speed and Duplex section of this readme for more
57 information on the AutoNeg parameter. 61 information on the AutoNeg parameter.
58 62
59Duplex 63Duplex
60------ 64------
65
61(Supported only on adapters with copper connections) 66(Supported only on adapters with copper connections)
62Valid Range: 0-2 (0=auto-negotiate, 1=half, 2=full) 67
63Default Value: 0 68:Valid Range: 0-2 (0=auto-negotiate, 1=half, 2=full)
69:Default Value: 0
64 70
65This defines the direction in which data is allowed to flow. Can be 71This defines the direction in which data is allowed to flow. Can be
66either one or two-directional. If both Duplex and the link partner are 72either one or two-directional. If both Duplex and the link partner are
@@ -70,18 +76,22 @@ duplex.
70 76
71FlowControl 77FlowControl
72----------- 78-----------
73Valid Range: 0-3 (0=none, 1=Rx only, 2=Tx only, 3=Rx&Tx) 79
74Default Value: Reads flow control settings from the EEPROM 80:Valid Range: 0-3 (0=none, 1=Rx only, 2=Tx only, 3=Rx&Tx)
81:Default Value: Reads flow control settings from the EEPROM
75 82
76This parameter controls the automatic generation(Tx) and response(Rx) 83This parameter controls the automatic generation(Tx) and response(Rx)
77to Ethernet PAUSE frames. 84to Ethernet PAUSE frames.
78 85
79InterruptThrottleRate 86InterruptThrottleRate
80--------------------- 87---------------------
88
81(not supported on Intel(R) 82542, 82543 or 82544-based adapters) 89(not supported on Intel(R) 82542, 82543 or 82544-based adapters)
82Valid Range: 0,1,3,4,100-100000 (0=off, 1=dynamic, 3=dynamic conservative, 90
83 4=simplified balancing) 91:Valid Range:
84Default Value: 3 92 0,1,3,4,100-100000 (0=off, 1=dynamic, 3=dynamic conservative,
93 4=simplified balancing)
94:Default Value: 3
85 95
86The driver can limit the amount of interrupts per second that the adapter 96The driver can limit the amount of interrupts per second that the adapter
87will generate for incoming packets. It does this by writing a value to the 97will generate for incoming packets. It does this by writing a value to the
@@ -135,13 +145,15 @@ Setting InterruptThrottleRate to 0 turns off any interrupt moderation
135and may improve small packet latency, but is generally not suitable 145and may improve small packet latency, but is generally not suitable
136for bulk throughput traffic. 146for bulk throughput traffic.
137 147
138NOTE: InterruptThrottleRate takes precedence over the TxAbsIntDelay and 148NOTE:
149 InterruptThrottleRate takes precedence over the TxAbsIntDelay and
139 RxAbsIntDelay parameters. In other words, minimizing the receive 150 RxAbsIntDelay parameters. In other words, minimizing the receive
140 and/or transmit absolute delays does not force the controller to 151 and/or transmit absolute delays does not force the controller to
141 generate more interrupts than what the Interrupt Throttle Rate 152 generate more interrupts than what the Interrupt Throttle Rate
142 allows. 153 allows.
143 154
144CAUTION: If you are using the Intel(R) PRO/1000 CT Network Connection 155CAUTION:
156 If you are using the Intel(R) PRO/1000 CT Network Connection
145 (controller 82547), setting InterruptThrottleRate to a value 157 (controller 82547), setting InterruptThrottleRate to a value
146 greater than 75,000, may hang (stop transmitting) adapters 158 greater than 75,000, may hang (stop transmitting) adapters
147 under certain network conditions. If this occurs a NETDEV 159 under certain network conditions. If this occurs a NETDEV
@@ -151,7 +163,8 @@ CAUTION: If you are using the Intel(R) PRO/1000 CT Network Connection
151 hang, ensure that InterruptThrottleRate is set no greater 163 hang, ensure that InterruptThrottleRate is set no greater
152 than 75,000 and is not set to 0. 164 than 75,000 and is not set to 0.
153 165
154NOTE: When e1000 is loaded with default settings and multiple adapters 166NOTE:
167 When e1000 is loaded with default settings and multiple adapters
155 are in use simultaneously, the CPU utilization may increase non- 168 are in use simultaneously, the CPU utilization may increase non-
156 linearly. In order to limit the CPU utilization without impacting 169 linearly. In order to limit the CPU utilization without impacting
157 the overall throughput, we recommend that you load the driver as 170 the overall throughput, we recommend that you load the driver as
@@ -168,9 +181,11 @@ NOTE: When e1000 is loaded with default settings and multiple adapters
168 181
169RxDescriptors 182RxDescriptors
170------------- 183-------------
171Valid Range: 48-256 for 82542 and 82543-based adapters 184
172 48-4096 for all other supported adapters 185:Valid Range:
173Default Value: 256 186 - 48-256 for 82542 and 82543-based adapters
187 - 48-4096 for all other supported adapters
188:Default Value: 256
174 189
175This value specifies the number of receive buffer descriptors allocated 190This value specifies the number of receive buffer descriptors allocated
176by the driver. Increasing this value allows the driver to buffer more 191by the driver. Increasing this value allows the driver to buffer more
@@ -180,15 +195,17 @@ Each descriptor is 16 bytes. A receive buffer is also allocated for each
180descriptor and can be either 2048, 4096, 8192, or 16384 bytes, depending 195descriptor and can be either 2048, 4096, 8192, or 16384 bytes, depending
181on the MTU setting. The maximum MTU size is 16110. 196on the MTU setting. The maximum MTU size is 16110.
182 197
183NOTE: MTU designates the frame size. It only needs to be set for Jumbo 198NOTE:
199 MTU designates the frame size. It only needs to be set for Jumbo
184 Frames. Depending on the available system resources, the request 200 Frames. Depending on the available system resources, the request
185 for a higher number of receive descriptors may be denied. In this 201 for a higher number of receive descriptors may be denied. In this
186 case, use a lower number. 202 case, use a lower number.
187 203
188RxIntDelay 204RxIntDelay
189---------- 205----------
190Valid Range: 0-65535 (0=off) 206
191Default Value: 0 207:Valid Range: 0-65535 (0=off)
208:Default Value: 0
192 209
193This value delays the generation of receive interrupts in units of 1.024 210This value delays the generation of receive interrupts in units of 1.024
194microseconds. Receive interrupt reduction can improve CPU efficiency if 211microseconds. Receive interrupt reduction can improve CPU efficiency if
@@ -198,7 +215,8 @@ of TCP traffic. If the system is reporting dropped receives, this value
198may be set too high, causing the driver to run out of available receive 215may be set too high, causing the driver to run out of available receive
199descriptors. 216descriptors.
200 217
201CAUTION: When setting RxIntDelay to a value other than 0, adapters may 218CAUTION:
219 When setting RxIntDelay to a value other than 0, adapters may
202 hang (stop transmitting) under certain network conditions. If 220 hang (stop transmitting) under certain network conditions. If
203 this occurs a NETDEV WATCHDOG message is logged in the system 221 this occurs a NETDEV WATCHDOG message is logged in the system
204 event log. In addition, the controller is automatically reset, 222 event log. In addition, the controller is automatically reset,
@@ -207,9 +225,11 @@ CAUTION: When setting RxIntDelay to a value other than 0, adapters may
207 225
208RxAbsIntDelay 226RxAbsIntDelay
209------------- 227-------------
228
210(This parameter is supported only on 82540, 82545 and later adapters.) 229(This parameter is supported only on 82540, 82545 and later adapters.)
211Valid Range: 0-65535 (0=off) 230
212Default Value: 128 231:Valid Range: 0-65535 (0=off)
232:Default Value: 128
213 233
214This value, in units of 1.024 microseconds, limits the delay in which a 234This value, in units of 1.024 microseconds, limits the delay in which a
215receive interrupt is generated. Useful only if RxIntDelay is non-zero, 235receive interrupt is generated. Useful only if RxIntDelay is non-zero,
@@ -220,9 +240,11 @@ conditions.
220 240
221Speed 241Speed
222----- 242-----
243
223(This parameter is supported only on adapters with copper connections.) 244(This parameter is supported only on adapters with copper connections.)
224Valid Settings: 0, 10, 100, 1000 245
225Default Value: 0 (auto-negotiate at all supported speeds) 246:Valid Settings: 0, 10, 100, 1000
247:Default Value: 0 (auto-negotiate at all supported speeds)
226 248
227Speed forces the line speed to the specified value in megabits per second 249Speed forces the line speed to the specified value in megabits per second
228(Mbps). If this parameter is not specified or is set to 0 and the link 250(Mbps). If this parameter is not specified or is set to 0 and the link
@@ -231,22 +253,26 @@ speed. Duplex should also be set when Speed is set to either 10 or 100.
231 253
232TxDescriptors 254TxDescriptors
233------------- 255-------------
234Valid Range: 48-256 for 82542 and 82543-based adapters 256
235 48-4096 for all other supported adapters 257:Valid Range:
236Default Value: 256 258 - 48-256 for 82542 and 82543-based adapters
259 - 48-4096 for all other supported adapters
260:Default Value: 256
237 261
238This value is the number of transmit descriptors allocated by the driver. 262This value is the number of transmit descriptors allocated by the driver.
239Increasing this value allows the driver to queue more transmits. Each 263Increasing this value allows the driver to queue more transmits. Each
240descriptor is 16 bytes. 264descriptor is 16 bytes.
241 265
242NOTE: Depending on the available system resources, the request for a 266NOTE:
267 Depending on the available system resources, the request for a
243 higher number of transmit descriptors may be denied. In this case, 268 higher number of transmit descriptors may be denied. In this case,
244 use a lower number. 269 use a lower number.
245 270
246TxIntDelay 271TxIntDelay
247---------- 272----------
248Valid Range: 0-65535 (0=off) 273
249Default Value: 8 274:Valid Range: 0-65535 (0=off)
275:Default Value: 8
250 276
251This value delays the generation of transmit interrupts in units of 277This value delays the generation of transmit interrupts in units of
2521.024 microseconds. Transmit interrupt reduction can improve CPU 2781.024 microseconds. Transmit interrupt reduction can improve CPU
@@ -256,9 +282,11 @@ causing the driver to run out of available transmit descriptors.
256 282
257TxAbsIntDelay 283TxAbsIntDelay
258------------- 284-------------
285
259(This parameter is supported only on 82540, 82545 and later adapters.) 286(This parameter is supported only on 82540, 82545 and later adapters.)
260Valid Range: 0-65535 (0=off) 287
261Default Value: 32 288:Valid Range: 0-65535 (0=off)
289:Default Value: 32
262 290
263This value, in units of 1.024 microseconds, limits the delay in which a 291This value, in units of 1.024 microseconds, limits the delay in which a
264transmit interrupt is generated. Useful only if TxIntDelay is non-zero, 292transmit interrupt is generated. Useful only if TxIntDelay is non-zero,
@@ -269,18 +297,21 @@ network conditions.
269 297
270XsumRX 298XsumRX
271------ 299------
300
272(This parameter is NOT supported on the 82542-based adapter.) 301(This parameter is NOT supported on the 82542-based adapter.)
273Valid Range: 0-1 302
274Default Value: 1 303:Valid Range: 0-1
304:Default Value: 1
275 305
276A value of '1' indicates that the driver should enable IP checksum 306A value of '1' indicates that the driver should enable IP checksum
277offload for received packets (both UDP and TCP) to the adapter hardware. 307offload for received packets (both UDP and TCP) to the adapter hardware.
278 308
279Copybreak 309Copybreak
280--------- 310---------
281Valid Range: 0-xxxxxxx (0=off) 311
282Default Value: 256 312:Valid Range: 0-xxxxxxx (0=off)
283Usage: modprobe e1000.ko copybreak=128 313:Default Value: 256
314:Usage: modprobe e1000.ko copybreak=128
284 315
285Driver copies all packets below or equaling this size to a fresh RX 316Driver copies all packets below or equaling this size to a fresh RX
286buffer before handing it up the stack. 317buffer before handing it up the stack.
@@ -292,8 +323,9 @@ it is also available during runtime at
292 323
293SmartPowerDownEnable 324SmartPowerDownEnable
294-------------------- 325--------------------
295Valid Range: 0-1 326
296Default Value: 0 (disabled) 327:Valid Range: 0-1
328:Default Value: 0 (disabled)
297 329
298Allows PHY to turn off in lower power states. The user can turn off 330Allows PHY to turn off in lower power states. The user can turn off
299this parameter in supported chipsets. 331this parameter in supported chipsets.
@@ -309,14 +341,14 @@ fiber interface board only links at 1000 Mbps full-duplex.
309 341
310For copper-based boards, the keywords interact as follows: 342For copper-based boards, the keywords interact as follows:
311 343
312 The default operation is auto-negotiate. The board advertises all 344- The default operation is auto-negotiate. The board advertises all
313 supported speed and duplex combinations, and it links at the highest 345 supported speed and duplex combinations, and it links at the highest
314 common speed and duplex mode IF the link partner is set to auto-negotiate. 346 common speed and duplex mode IF the link partner is set to auto-negotiate.
315 347
316 If Speed = 1000, limited auto-negotiation is enabled and only 1000 Mbps 348- If Speed = 1000, limited auto-negotiation is enabled and only 1000 Mbps
317 is advertised (The 1000BaseT spec requires auto-negotiation.) 349 is advertised (The 1000BaseT spec requires auto-negotiation.)
318 350
319 If Speed = 10 or 100, then both Speed and Duplex should be set. Auto- 351- If Speed = 10 or 100, then both Speed and Duplex should be set. Auto-
320 negotiation is disabled, and the AutoNeg parameter is ignored. Partner 352 negotiation is disabled, and the AutoNeg parameter is ignored. Partner
321 SHOULD also be forced. 353 SHOULD also be forced.
322 354
@@ -328,13 +360,15 @@ process.
328The parameter may be specified as either a decimal or hexadecimal value as 360The parameter may be specified as either a decimal or hexadecimal value as
329determined by the bitmap below. 361determined by the bitmap below.
330 362
363============== ====== ====== ======= ======= ====== ====== ======= ======
331Bit position 7 6 5 4 3 2 1 0 364Bit position 7 6 5 4 3 2 1 0
332Decimal Value 128 64 32 16 8 4 2 1 365Decimal Value 128 64 32 16 8 4 2 1
333Hex value 80 40 20 10 8 4 2 1 366Hex value 80 40 20 10 8 4 2 1
334Speed (Mbps) N/A N/A 1000 N/A 100 100 10 10 367Speed (Mbps) N/A N/A 1000 N/A 100 100 10 10
335Duplex Full Full Half Full Half 368Duplex Full Full Half Full Half
369============== ====== ====== ======= ======= ====== ====== ======= ======
336 370
337Some examples of using AutoNeg: 371Some examples of using AutoNeg::
338 372
339 modprobe e1000 AutoNeg=0x01 (Restricts autonegotiation to 10 Half) 373 modprobe e1000 AutoNeg=0x01 (Restricts autonegotiation to 10 Half)
340 modprobe e1000 AutoNeg=1 (Same as above) 374 modprobe e1000 AutoNeg=1 (Same as above)
@@ -357,56 +391,59 @@ Additional Configurations
357 391
358Jumbo Frames 392Jumbo Frames
359------------ 393------------
360Jumbo Frames support is enabled by changing the MTU to a value larger 394
361than the default of 1500. Use the ifconfig command to increase the MTU 395 Jumbo Frames support is enabled by changing the MTU to a value larger than
362size. For example:: 396 the default of 1500. Use the ifconfig command to increase the MTU size.
397 For example::
363 398
364 ifconfig eth<x> mtu 9000 up 399 ifconfig eth<x> mtu 9000 up
365 400
366This setting is not saved across reboots. It can be made permanent if 401 This setting is not saved across reboots. It can be made permanent if
367you add:: 402 you add::
368 403
369 MTU=9000 404 MTU=9000
370 405
371to the file /etc/sysconfig/network-scripts/ifcfg-eth<x>. This example 406 to the file /etc/sysconfig/network-scripts/ifcfg-eth<x>. This example
372applies to the Red Hat distributions; other distributions may store this 407 applies to the Red Hat distributions; other distributions may store this
373setting in a different location. 408 setting in a different location.
409
410Notes:
411 Degradation in throughput performance may be observed in some Jumbo frames
412 environments. If this is observed, increasing the application's socket buffer
413 size and/or increasing the /proc/sys/net/ipv4/tcp_*mem entry values may help.
414 See the specific application manual and /usr/src/linux*/Documentation/
415 networking/ip-sysctl.txt for more details.
374 416
375Notes: Degradation in throughput performance may be observed in some 417 - The maximum MTU setting for Jumbo Frames is 16110. This value coincides
376Jumbo frames environments. If this is observed, increasing the 418 with the maximum Jumbo Frames size of 16128.
377application's socket buffer size and/or increasing the
378/proc/sys/net/ipv4/tcp_*mem entry values may help. See the specific
379application manual and /usr/src/linux*/Documentation/
380networking/ip-sysctl.txt for more details.
381 419
382- The maximum MTU setting for Jumbo Frames is 16110. This value 420 - Using Jumbo frames at 10 or 100 Mbps is not supported and may result in
383 coincides with the maximum Jumbo Frames size of 16128. 421 poor performance or loss of link.
384 422
385- Using Jumbo frames at 10 or 100 Mbps is not supported and may result 423 - Adapters based on the Intel(R) 82542 and 82573V/E controller do not
386 in poor performance or loss of link. 424 support Jumbo Frames. These correspond to the following product names::
387 425
388- Adapters based on the Intel(R) 82542 and 82573V/E controller do not 426 Intel(R) PRO/1000 Gigabit Server Adapter
389 support Jumbo Frames. These correspond to the following product names: 427 Intel(R) PRO/1000 PM Network Connection
390 Intel(R) PRO/1000 Gigabit Server Adapter Intel(R) PRO/1000 PM Network
391 Connection
392 428
393ethtool 429ethtool
394------- 430-------
395The driver utilizes the ethtool interface for driver configuration and
396diagnostics, as well as displaying statistical information. The ethtool
397version 1.6 or later is required for this functionality.
398 431
399The latest release of ethtool can be found from 432 The driver utilizes the ethtool interface for driver configuration and
400https://www.kernel.org/pub/software/network/ethtool/ 433 diagnostics, as well as displaying statistical information. The ethtool
434 version 1.6 or later is required for this functionality.
435
436 The latest release of ethtool can be found from
437 https://www.kernel.org/pub/software/network/ethtool/
401 438
402Enabling Wake on LAN* (WoL) 439Enabling Wake on LAN* (WoL)
403--------------------------- 440---------------------------
404WoL is configured through the ethtool* utility.
405 441
406WoL will be enabled on the system during the next shut down or reboot. 442 WoL is configured through the ethtool* utility.
407For this driver version, in order to enable WoL, the e1000 driver must be
408loaded when shutting down or rebooting the system.
409 443
444 WoL will be enabled on the system during the next shut down or reboot.
445 For this driver version, in order to enable WoL, the e1000 driver must be
446 loaded when shutting down or rebooting the system.
410 447
411Support 448Support
412======= 449=======
diff --git a/MAINTAINERS b/MAINTAINERS
index 192d7f73fd01..1505c8ea8e7b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2523,7 +2523,7 @@ S: Supported
2523F: drivers/scsi/esas2r 2523F: drivers/scsi/esas2r
2524 2524
2525ATUSB IEEE 802.15.4 RADIO DRIVER 2525ATUSB IEEE 802.15.4 RADIO DRIVER
2526M: Stefan Schmidt <stefan@osg.samsung.com> 2526M: Stefan Schmidt <stefan@datenfreihafen.org>
2527L: linux-wpan@vger.kernel.org 2527L: linux-wpan@vger.kernel.org
2528S: Maintained 2528S: Maintained
2529F: drivers/net/ieee802154/atusb.c 2529F: drivers/net/ieee802154/atusb.c
@@ -5790,7 +5790,6 @@ F: include/linux/fsl/
5790 5790
5791FREESCALE SOC FS_ENET DRIVER 5791FREESCALE SOC FS_ENET DRIVER
5792M: Pantelis Antoniou <pantelis.antoniou@gmail.com> 5792M: Pantelis Antoniou <pantelis.antoniou@gmail.com>
5793M: Vitaly Bordug <vbordug@ru.mvista.com>
5794L: linuxppc-dev@lists.ozlabs.org 5793L: linuxppc-dev@lists.ozlabs.org
5795L: netdev@vger.kernel.org 5794L: netdev@vger.kernel.org
5796S: Maintained 5795S: Maintained
@@ -6909,7 +6908,7 @@ F: drivers/clk/clk-versaclock5.c
6909 6908
6910IEEE 802.15.4 SUBSYSTEM 6909IEEE 802.15.4 SUBSYSTEM
6911M: Alexander Aring <alex.aring@gmail.com> 6910M: Alexander Aring <alex.aring@gmail.com>
6912M: Stefan Schmidt <stefan@osg.samsung.com> 6911M: Stefan Schmidt <stefan@datenfreihafen.org>
6913L: linux-wpan@vger.kernel.org 6912L: linux-wpan@vger.kernel.org
6914W: http://wpan.cakelab.org/ 6913W: http://wpan.cakelab.org/
6915T: git git://git.kernel.org/pub/scm/linux/kernel/git/sschmidt/wpan.git 6914T: git git://git.kernel.org/pub/scm/linux/kernel/git/sschmidt/wpan.git
@@ -8629,7 +8628,7 @@ MARVELL MWIFIEX WIRELESS DRIVER
8629M: Amitkumar Karwar <amitkarwar@gmail.com> 8628M: Amitkumar Karwar <amitkarwar@gmail.com>
8630M: Nishant Sarmukadam <nishants@marvell.com> 8629M: Nishant Sarmukadam <nishants@marvell.com>
8631M: Ganapathi Bhat <gbhat@marvell.com> 8630M: Ganapathi Bhat <gbhat@marvell.com>
8632M: Xinming Hu <huxm@marvell.com> 8631M: Xinming Hu <huxinming820@gmail.com>
8633L: linux-wireless@vger.kernel.org 8632L: linux-wireless@vger.kernel.org
8634S: Maintained 8633S: Maintained
8635F: drivers/net/wireless/marvell/mwifiex/ 8634F: drivers/net/wireless/marvell/mwifiex/
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index fc7383106946..91eb8910b1c9 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -63,8 +63,6 @@
63 63
64#define AQ_CFG_NAPI_WEIGHT 64U 64#define AQ_CFG_NAPI_WEIGHT 64U
65 65
66#define AQ_CFG_MULTICAST_ADDRESS_MAX 32U
67
68/*#define AQ_CFG_MAC_ADDR_PERMANENT {0x30, 0x0E, 0xE3, 0x12, 0x34, 0x56}*/ 66/*#define AQ_CFG_MAC_ADDR_PERMANENT {0x30, 0x0E, 0xE3, 0x12, 0x34, 0x56}*/
69 67
70#define AQ_NIC_FC_OFF 0U 68#define AQ_NIC_FC_OFF 0U
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index a2d416b24ffc..2c6ebd91a9f2 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -98,6 +98,8 @@ struct aq_stats_s {
98#define AQ_HW_MEDIA_TYPE_TP 1U 98#define AQ_HW_MEDIA_TYPE_TP 1U
99#define AQ_HW_MEDIA_TYPE_FIBRE 2U 99#define AQ_HW_MEDIA_TYPE_FIBRE 2U
100 100
101#define AQ_HW_MULTICAST_ADDRESS_MAX 32U
102
101struct aq_hw_s { 103struct aq_hw_s {
102 atomic_t flags; 104 atomic_t flags;
103 u8 rbl_enabled:1; 105 u8 rbl_enabled:1;
@@ -177,7 +179,7 @@ struct aq_hw_ops {
177 unsigned int packet_filter); 179 unsigned int packet_filter);
178 180
179 int (*hw_multicast_list_set)(struct aq_hw_s *self, 181 int (*hw_multicast_list_set)(struct aq_hw_s *self,
180 u8 ar_mac[AQ_CFG_MULTICAST_ADDRESS_MAX] 182 u8 ar_mac[AQ_HW_MULTICAST_ADDRESS_MAX]
181 [ETH_ALEN], 183 [ETH_ALEN],
182 u32 count); 184 u32 count);
183 185
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index ba5fe8c4125d..e3ae29e523f0 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -135,17 +135,10 @@ err_exit:
135static void aq_ndev_set_multicast_settings(struct net_device *ndev) 135static void aq_ndev_set_multicast_settings(struct net_device *ndev)
136{ 136{
137 struct aq_nic_s *aq_nic = netdev_priv(ndev); 137 struct aq_nic_s *aq_nic = netdev_priv(ndev);
138 int err = 0;
139 138
140 err = aq_nic_set_packet_filter(aq_nic, ndev->flags); 139 aq_nic_set_packet_filter(aq_nic, ndev->flags);
141 if (err < 0)
142 return;
143 140
144 if (netdev_mc_count(ndev)) { 141 aq_nic_set_multicast_list(aq_nic, ndev);
145 err = aq_nic_set_multicast_list(aq_nic, ndev);
146 if (err < 0)
147 return;
148 }
149} 142}
150 143
151static const struct net_device_ops aq_ndev_ops = { 144static const struct net_device_ops aq_ndev_ops = {
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 1a1a6380c128..7a22d0257e04 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -563,34 +563,41 @@ err_exit:
563 563
564int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev) 564int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
565{ 565{
566 unsigned int packet_filter = self->packet_filter;
566 struct netdev_hw_addr *ha = NULL; 567 struct netdev_hw_addr *ha = NULL;
567 unsigned int i = 0U; 568 unsigned int i = 0U;
568 569
569 self->mc_list.count = 0U; 570 self->mc_list.count = 0;
570 571 if (netdev_uc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
571 netdev_for_each_mc_addr(ha, ndev) { 572 packet_filter |= IFF_PROMISC;
572 ether_addr_copy(self->mc_list.ar[i++], ha->addr); 573 } else {
573 ++self->mc_list.count; 574 netdev_for_each_uc_addr(ha, ndev) {
575 ether_addr_copy(self->mc_list.ar[i++], ha->addr);
574 576
575 if (i >= AQ_CFG_MULTICAST_ADDRESS_MAX) 577 if (i >= AQ_HW_MULTICAST_ADDRESS_MAX)
576 break; 578 break;
579 }
577 } 580 }
578 581
579 if (i >= AQ_CFG_MULTICAST_ADDRESS_MAX) { 582 if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
580 /* Number of filters is too big: atlantic does not support this. 583 packet_filter |= IFF_ALLMULTI;
581 * Force all multi filter to support this.
582 * With this we disable all UC filters and setup "all pass"
583 * multicast mask
584 */
585 self->packet_filter |= IFF_ALLMULTI;
586 self->aq_nic_cfg.mc_list_count = 0;
587 return self->aq_hw_ops->hw_packet_filter_set(self->aq_hw,
588 self->packet_filter);
589 } else { 584 } else {
590 return self->aq_hw_ops->hw_multicast_list_set(self->aq_hw, 585 netdev_for_each_mc_addr(ha, ndev) {
591 self->mc_list.ar, 586 ether_addr_copy(self->mc_list.ar[i++], ha->addr);
592 self->mc_list.count); 587
588 if (i >= AQ_HW_MULTICAST_ADDRESS_MAX)
589 break;
590 }
591 }
592
593 if (i > 0 && i < AQ_HW_MULTICAST_ADDRESS_MAX) {
594 packet_filter |= IFF_MULTICAST;
595 self->mc_list.count = i;
596 self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
597 self->mc_list.ar,
598 self->mc_list.count);
593 } 599 }
600 return aq_nic_set_packet_filter(self, packet_filter);
594} 601}
595 602
596int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu) 603int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index faa533a0ec47..fecfc401f95d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -75,7 +75,7 @@ struct aq_nic_s {
75 struct aq_hw_link_status_s link_status; 75 struct aq_hw_link_status_s link_status;
76 struct { 76 struct {
77 u32 count; 77 u32 count;
78 u8 ar[AQ_CFG_MULTICAST_ADDRESS_MAX][ETH_ALEN]; 78 u8 ar[AQ_HW_MULTICAST_ADDRESS_MAX][ETH_ALEN];
79 } mc_list; 79 } mc_list;
80 80
81 struct pci_dev *pdev; 81 struct pci_dev *pdev;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index 67e2f9fb9402..8cc6abadc03b 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -765,7 +765,7 @@ static int hw_atl_a0_hw_packet_filter_set(struct aq_hw_s *self,
765 765
766static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self, 766static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self,
767 u8 ar_mac 767 u8 ar_mac
768 [AQ_CFG_MULTICAST_ADDRESS_MAX] 768 [AQ_HW_MULTICAST_ADDRESS_MAX]
769 [ETH_ALEN], 769 [ETH_ALEN],
770 u32 count) 770 u32 count)
771{ 771{
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 819f6bcf9b4e..956860a69797 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -784,7 +784,7 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
784 784
785static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self, 785static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self,
786 u8 ar_mac 786 u8 ar_mac
787 [AQ_CFG_MULTICAST_ADDRESS_MAX] 787 [AQ_HW_MULTICAST_ADDRESS_MAX]
788 [ETH_ALEN], 788 [ETH_ALEN],
789 u32 count) 789 u32 count)
790{ 790{
@@ -812,7 +812,7 @@ static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self,
812 812
813 hw_atl_rpfl2_uc_flr_en_set(self, 813 hw_atl_rpfl2_uc_flr_en_set(self,
814 (self->aq_nic_cfg->is_mc_list_enabled), 814 (self->aq_nic_cfg->is_mc_list_enabled),
815 HW_ATL_B0_MAC_MIN + i); 815 HW_ATL_B0_MAC_MIN + i);
816 } 816 }
817 817
818 err = aq_hw_err_from_flags(self); 818 err = aq_hw_err_from_flags(self);
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index d5fca2e5a9bc..a1f60f89e059 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1946,8 +1946,8 @@ static int bcm_sysport_open(struct net_device *dev)
1946 if (!priv->is_lite) 1946 if (!priv->is_lite)
1947 priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD); 1947 priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
1948 else 1948 else
1949 priv->crc_fwd = !!(gib_readl(priv, GIB_CONTROL) & 1949 priv->crc_fwd = !((gib_readl(priv, GIB_CONTROL) &
1950 GIB_FCS_STRIP); 1950 GIB_FCS_STRIP) >> GIB_FCS_STRIP_SHIFT);
1951 1951
1952 phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link, 1952 phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
1953 0, priv->phy_interface); 1953 0, priv->phy_interface);
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index d6e5d0cbf3a3..cf440b91fd04 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -278,7 +278,8 @@ struct bcm_rsb {
278#define GIB_GTX_CLK_EXT_CLK (0 << GIB_GTX_CLK_SEL_SHIFT) 278#define GIB_GTX_CLK_EXT_CLK (0 << GIB_GTX_CLK_SEL_SHIFT)
279#define GIB_GTX_CLK_125MHZ (1 << GIB_GTX_CLK_SEL_SHIFT) 279#define GIB_GTX_CLK_125MHZ (1 << GIB_GTX_CLK_SEL_SHIFT)
280#define GIB_GTX_CLK_250MHZ (2 << GIB_GTX_CLK_SEL_SHIFT) 280#define GIB_GTX_CLK_250MHZ (2 << GIB_GTX_CLK_SEL_SHIFT)
281#define GIB_FCS_STRIP (1 << 6) 281#define GIB_FCS_STRIP_SHIFT 6
282#define GIB_FCS_STRIP (1 << GIB_FCS_STRIP_SHIFT)
282#define GIB_LCL_LOOP_EN (1 << 7) 283#define GIB_LCL_LOOP_EN (1 << 7)
283#define GIB_LCL_LOOP_TXEN (1 << 8) 284#define GIB_LCL_LOOP_TXEN (1 << 8)
284#define GIB_RMT_LOOP_EN (1 << 9) 285#define GIB_RMT_LOOP_EN (1 << 9)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 176fc9f4d7de..4394c1162be4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -5712,7 +5712,9 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
5712 } 5712 }
5713 vnic->uc_filter_count = 1; 5713 vnic->uc_filter_count = 1;
5714 5714
5715 vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; 5715 vnic->rx_mask = 0;
5716 if (bp->dev->flags & IFF_BROADCAST)
5717 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
5716 5718
5717 if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp)) 5719 if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
5718 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 5720 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
@@ -5917,7 +5919,7 @@ unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
5917 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings); 5919 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
5918} 5920}
5919 5921
5920void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs) 5922static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
5921{ 5923{
5922 bp->hw_resc.max_irqs = max_irqs; 5924 bp->hw_resc.max_irqs = max_irqs;
5923} 5925}
@@ -6888,7 +6890,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
6888 rc = bnxt_request_irq(bp); 6890 rc = bnxt_request_irq(bp);
6889 if (rc) { 6891 if (rc) {
6890 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc); 6892 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
6891 goto open_err; 6893 goto open_err_irq;
6892 } 6894 }
6893 } 6895 }
6894 6896
@@ -6928,6 +6930,8 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
6928open_err: 6930open_err:
6929 bnxt_debug_dev_exit(bp); 6931 bnxt_debug_dev_exit(bp);
6930 bnxt_disable_napi(bp); 6932 bnxt_disable_napi(bp);
6933
6934open_err_irq:
6931 bnxt_del_napi(bp); 6935 bnxt_del_napi(bp);
6932 6936
6933open_err_free_mem: 6937open_err_free_mem:
@@ -7214,13 +7218,16 @@ static void bnxt_set_rx_mode(struct net_device *dev)
7214 7218
7215 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS | 7219 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
7216 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST | 7220 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
7217 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST); 7221 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
7222 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
7218 7223
7219 if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp)) 7224 if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
7220 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 7225 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
7221 7226
7222 uc_update = bnxt_uc_list_updated(bp); 7227 uc_update = bnxt_uc_list_updated(bp);
7223 7228
7229 if (dev->flags & IFF_BROADCAST)
7230 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
7224 if (dev->flags & IFF_ALLMULTI) { 7231 if (dev->flags & IFF_ALLMULTI) {
7225 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 7232 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
7226 vnic->mc_list_count = 0; 7233 vnic->mc_list_count = 0;
@@ -8502,11 +8509,11 @@ int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
8502 int rx, tx, cp; 8509 int rx, tx, cp;
8503 8510
8504 _bnxt_get_max_rings(bp, &rx, &tx, &cp); 8511 _bnxt_get_max_rings(bp, &rx, &tx, &cp);
8512 *max_rx = rx;
8513 *max_tx = tx;
8505 if (!rx || !tx || !cp) 8514 if (!rx || !tx || !cp)
8506 return -ENOMEM; 8515 return -ENOMEM;
8507 8516
8508 *max_rx = rx;
8509 *max_tx = tx;
8510 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared); 8517 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
8511} 8518}
8512 8519
@@ -8520,8 +8527,11 @@ static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
8520 /* Not enough rings, try disabling agg rings. */ 8527 /* Not enough rings, try disabling agg rings. */
8521 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 8528 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
8522 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); 8529 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
8523 if (rc) 8530 if (rc) {
8531 /* set BNXT_FLAG_AGG_RINGS back for consistency */
8532 bp->flags |= BNXT_FLAG_AGG_RINGS;
8524 return rc; 8533 return rc;
8534 }
8525 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; 8535 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
8526 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 8536 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
8527 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 8537 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 9b14eb610b9f..91575ef97c8c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1470,7 +1470,6 @@ void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max);
1470unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp); 1470unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp);
1471void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max); 1471void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max);
1472unsigned int bnxt_get_max_func_irqs(struct bnxt *bp); 1472unsigned int bnxt_get_max_func_irqs(struct bnxt *bp);
1473void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max);
1474int bnxt_get_avail_msix(struct bnxt *bp, int num); 1473int bnxt_get_avail_msix(struct bnxt *bp, int num);
1475int bnxt_reserve_rings(struct bnxt *bp); 1474int bnxt_reserve_rings(struct bnxt *bp);
1476void bnxt_tx_disable(struct bnxt *bp); 1475void bnxt_tx_disable(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 795f45024c20..491bd40a254d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -27,6 +27,15 @@
27#define BNXT_FID_INVALID 0xffff 27#define BNXT_FID_INVALID 0xffff
28#define VLAN_TCI(vid, prio) ((vid) | ((prio) << VLAN_PRIO_SHIFT)) 28#define VLAN_TCI(vid, prio) ((vid) | ((prio) << VLAN_PRIO_SHIFT))
29 29
30#define is_vlan_pcp_wildcarded(vlan_tci_mask) \
31 ((ntohs(vlan_tci_mask) & VLAN_PRIO_MASK) == 0x0000)
32#define is_vlan_pcp_exactmatch(vlan_tci_mask) \
33 ((ntohs(vlan_tci_mask) & VLAN_PRIO_MASK) == VLAN_PRIO_MASK)
34#define is_vlan_pcp_zero(vlan_tci) \
35 ((ntohs(vlan_tci) & VLAN_PRIO_MASK) == 0x0000)
36#define is_vid_exactmatch(vlan_tci_mask) \
37 ((ntohs(vlan_tci_mask) & VLAN_VID_MASK) == VLAN_VID_MASK)
38
30/* Return the dst fid of the func for flow forwarding 39/* Return the dst fid of the func for flow forwarding
31 * For PFs: src_fid is the fid of the PF 40 * For PFs: src_fid is the fid of the PF
32 * For VF-reps: src_fid the fid of the VF 41 * For VF-reps: src_fid the fid of the VF
@@ -389,6 +398,21 @@ static bool is_exactmatch(void *mask, int len)
389 return true; 398 return true;
390} 399}
391 400
401static bool is_vlan_tci_allowed(__be16 vlan_tci_mask,
402 __be16 vlan_tci)
403{
404 /* VLAN priority must be either exactly zero or fully wildcarded and
405 * VLAN id must be exact match.
406 */
407 if (is_vid_exactmatch(vlan_tci_mask) &&
408 ((is_vlan_pcp_exactmatch(vlan_tci_mask) &&
409 is_vlan_pcp_zero(vlan_tci)) ||
410 is_vlan_pcp_wildcarded(vlan_tci_mask)))
411 return true;
412
413 return false;
414}
415
392static bool bits_set(void *key, int len) 416static bool bits_set(void *key, int len)
393{ 417{
394 const u8 *p = key; 418 const u8 *p = key;
@@ -803,9 +827,9 @@ static bool bnxt_tc_can_offload(struct bnxt *bp, struct bnxt_tc_flow *flow)
803 /* Currently VLAN fields cannot be partial wildcard */ 827 /* Currently VLAN fields cannot be partial wildcard */
804 if (bits_set(&flow->l2_key.inner_vlan_tci, 828 if (bits_set(&flow->l2_key.inner_vlan_tci,
805 sizeof(flow->l2_key.inner_vlan_tci)) && 829 sizeof(flow->l2_key.inner_vlan_tci)) &&
806 !is_exactmatch(&flow->l2_mask.inner_vlan_tci, 830 !is_vlan_tci_allowed(flow->l2_mask.inner_vlan_tci,
807 sizeof(flow->l2_mask.inner_vlan_tci))) { 831 flow->l2_key.inner_vlan_tci)) {
808 netdev_info(bp->dev, "Wildcard match unsupported for VLAN TCI\n"); 832 netdev_info(bp->dev, "Unsupported VLAN TCI\n");
809 return false; 833 return false;
810 } 834 }
811 if (bits_set(&flow->l2_key.inner_vlan_tpid, 835 if (bits_set(&flow->l2_key.inner_vlan_tpid,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 347e4f946eb2..840f6e505f73 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -169,7 +169,6 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
169 edev->ulp_tbl[ulp_id].msix_requested = avail_msix; 169 edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
170 } 170 }
171 bnxt_fill_msix_vecs(bp, ent); 171 bnxt_fill_msix_vecs(bp, ent);
172 bnxt_set_max_func_irqs(bp, bnxt_get_max_func_irqs(bp) - avail_msix);
173 bnxt_set_max_func_cp_rings(bp, max_cp_rings - avail_msix); 172 bnxt_set_max_func_cp_rings(bp, max_cp_rings - avail_msix);
174 edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED; 173 edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
175 return avail_msix; 174 return avail_msix;
@@ -192,7 +191,6 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
192 msix_requested = edev->ulp_tbl[ulp_id].msix_requested; 191 msix_requested = edev->ulp_tbl[ulp_id].msix_requested;
193 bnxt_set_max_func_cp_rings(bp, max_cp_rings + msix_requested); 192 bnxt_set_max_func_cp_rings(bp, max_cp_rings + msix_requested);
194 edev->ulp_tbl[ulp_id].msix_requested = 0; 193 edev->ulp_tbl[ulp_id].msix_requested = 0;
195 bnxt_set_max_func_irqs(bp, bnxt_get_max_func_irqs(bp) + msix_requested);
196 edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED; 194 edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
197 if (netif_running(dev)) { 195 if (netif_running(dev)) {
198 bnxt_close_nic(bp, true, false); 196 bnxt_close_nic(bp, true, false);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 3be87efdc93d..aa1374d0af93 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -6,11 +6,15 @@
6 * Copyright (C) 2004 Sun Microsystems Inc. 6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2016 Broadcom Corporation. 7 * Copyright (C) 2005-2016 Broadcom Corporation.
8 * Copyright (C) 2016-2017 Broadcom Limited. 8 * Copyright (C) 2016-2017 Broadcom Limited.
9 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10 * refers to Broadcom Inc. and/or its subsidiaries.
9 * 11 *
10 * Firmware is: 12 * Firmware is:
11 * Derived from proprietary unpublished source code, 13 * Derived from proprietary unpublished source code,
12 * Copyright (C) 2000-2016 Broadcom Corporation. 14 * Copyright (C) 2000-2016 Broadcom Corporation.
13 * Copyright (C) 2016-2017 Broadcom Ltd. 15 * Copyright (C) 2016-2017 Broadcom Ltd.
16 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
17 * refers to Broadcom Inc. and/or its subsidiaries.
14 * 18 *
15 * Permission is hereby granted for the distribution of this firmware 19 * Permission is hereby granted for the distribution of this firmware
16 * data in hexadecimal or equivalent format, provided this copyright 20 * data in hexadecimal or equivalent format, provided this copyright
@@ -9290,6 +9294,15 @@ static int tg3_chip_reset(struct tg3 *tp)
9290 9294
9291 tg3_restore_clk(tp); 9295 tg3_restore_clk(tp);
9292 9296
9297 /* Increase the core clock speed to fix tx timeout issue for 5762
9298 * with 100Mbps link speed.
9299 */
9300 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9301 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9302 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9303 TG3_CPMU_MAC_ORIDE_ENABLE);
9304 }
9305
9293 /* Reprobe ASF enable state. */ 9306 /* Reprobe ASF enable state. */
9294 tg3_flag_clear(tp, ENABLE_ASF); 9307 tg3_flag_clear(tp, ENABLE_ASF);
9295 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK | 9308 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 1d61aa3efda1..a772a33b685c 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -7,6 +7,8 @@
7 * Copyright (C) 2004 Sun Microsystems Inc. 7 * Copyright (C) 2004 Sun Microsystems Inc.
8 * Copyright (C) 2007-2016 Broadcom Corporation. 8 * Copyright (C) 2007-2016 Broadcom Corporation.
9 * Copyright (C) 2016-2017 Broadcom Limited. 9 * Copyright (C) 2016-2017 Broadcom Limited.
10 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
11 * refers to Broadcom Inc. and/or its subsidiaries.
10 */ 12 */
11 13
12#ifndef _T3_H 14#ifndef _T3_H
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 86659823b259..3d45f4c92cf6 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -166,6 +166,7 @@
166#define GEM_DCFG6 0x0294 /* Design Config 6 */ 166#define GEM_DCFG6 0x0294 /* Design Config 6 */
167#define GEM_DCFG7 0x0298 /* Design Config 7 */ 167#define GEM_DCFG7 0x0298 /* Design Config 7 */
168#define GEM_DCFG8 0x029C /* Design Config 8 */ 168#define GEM_DCFG8 0x029C /* Design Config 8 */
169#define GEM_DCFG10 0x02A4 /* Design Config 10 */
169 170
170#define GEM_TXBDCTRL 0x04cc /* TX Buffer Descriptor control register */ 171#define GEM_TXBDCTRL 0x04cc /* TX Buffer Descriptor control register */
171#define GEM_RXBDCTRL 0x04d0 /* RX Buffer Descriptor control register */ 172#define GEM_RXBDCTRL 0x04d0 /* RX Buffer Descriptor control register */
@@ -490,6 +491,12 @@
490#define GEM_SCR2CMP_OFFSET 0 491#define GEM_SCR2CMP_OFFSET 0
491#define GEM_SCR2CMP_SIZE 8 492#define GEM_SCR2CMP_SIZE 8
492 493
494/* Bitfields in DCFG10 */
495#define GEM_TXBD_RDBUFF_OFFSET 12
496#define GEM_TXBD_RDBUFF_SIZE 4
497#define GEM_RXBD_RDBUFF_OFFSET 8
498#define GEM_RXBD_RDBUFF_SIZE 4
499
493/* Bitfields in TISUBN */ 500/* Bitfields in TISUBN */
494#define GEM_SUBNSINCR_OFFSET 0 501#define GEM_SUBNSINCR_OFFSET 0
495#define GEM_SUBNSINCR_SIZE 16 502#define GEM_SUBNSINCR_SIZE 16
@@ -635,6 +642,7 @@
635#define MACB_CAPS_USRIO_DISABLED 0x00000010 642#define MACB_CAPS_USRIO_DISABLED 0x00000010
636#define MACB_CAPS_JUMBO 0x00000020 643#define MACB_CAPS_JUMBO 0x00000020
637#define MACB_CAPS_GEM_HAS_PTP 0x00000040 644#define MACB_CAPS_GEM_HAS_PTP 0x00000040
645#define MACB_CAPS_BD_RD_PREFETCH 0x00000080
638#define MACB_CAPS_FIFO_MODE 0x10000000 646#define MACB_CAPS_FIFO_MODE 0x10000000
639#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 647#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
640#define MACB_CAPS_SG_DISABLED 0x40000000 648#define MACB_CAPS_SG_DISABLED 0x40000000
@@ -1203,6 +1211,9 @@ struct macb {
1203 unsigned int max_tuples; 1211 unsigned int max_tuples;
1204 1212
1205 struct tasklet_struct hresp_err_tasklet; 1213 struct tasklet_struct hresp_err_tasklet;
1214
1215 int rx_bd_rd_prefetch;
1216 int tx_bd_rd_prefetch;
1206}; 1217};
1207 1218
1208#ifdef CONFIG_MACB_USE_HWSTAMP 1219#ifdef CONFIG_MACB_USE_HWSTAMP
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 96cc03a6d942..a6c911bb5ce2 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -1811,23 +1811,25 @@ static void macb_free_consistent(struct macb *bp)
1811{ 1811{
1812 struct macb_queue *queue; 1812 struct macb_queue *queue;
1813 unsigned int q; 1813 unsigned int q;
1814 int size;
1814 1815
1815 queue = &bp->queues[0];
1816 bp->macbgem_ops.mog_free_rx_buffers(bp); 1816 bp->macbgem_ops.mog_free_rx_buffers(bp);
1817 if (queue->rx_ring) {
1818 dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES(bp),
1819 queue->rx_ring, queue->rx_ring_dma);
1820 queue->rx_ring = NULL;
1821 }
1822 1817
1823 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1818 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1824 kfree(queue->tx_skb); 1819 kfree(queue->tx_skb);
1825 queue->tx_skb = NULL; 1820 queue->tx_skb = NULL;
1826 if (queue->tx_ring) { 1821 if (queue->tx_ring) {
1827 dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES(bp), 1822 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
1823 dma_free_coherent(&bp->pdev->dev, size,
1828 queue->tx_ring, queue->tx_ring_dma); 1824 queue->tx_ring, queue->tx_ring_dma);
1829 queue->tx_ring = NULL; 1825 queue->tx_ring = NULL;
1830 } 1826 }
1827 if (queue->rx_ring) {
1828 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
1829 dma_free_coherent(&bp->pdev->dev, size,
1830 queue->rx_ring, queue->rx_ring_dma);
1831 queue->rx_ring = NULL;
1832 }
1831 } 1833 }
1832} 1834}
1833 1835
@@ -1874,7 +1876,7 @@ static int macb_alloc_consistent(struct macb *bp)
1874 int size; 1876 int size;
1875 1877
1876 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1878 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1877 size = TX_RING_BYTES(bp); 1879 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
1878 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, 1880 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
1879 &queue->tx_ring_dma, 1881 &queue->tx_ring_dma,
1880 GFP_KERNEL); 1882 GFP_KERNEL);
@@ -1890,7 +1892,7 @@ static int macb_alloc_consistent(struct macb *bp)
1890 if (!queue->tx_skb) 1892 if (!queue->tx_skb)
1891 goto out_err; 1893 goto out_err;
1892 1894
1893 size = RX_RING_BYTES(bp); 1895 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
1894 queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size, 1896 queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
1895 &queue->rx_ring_dma, GFP_KERNEL); 1897 &queue->rx_ring_dma, GFP_KERNEL);
1896 if (!queue->rx_ring) 1898 if (!queue->rx_ring)
@@ -3797,7 +3799,7 @@ static const struct macb_config np4_config = {
3797static const struct macb_config zynqmp_config = { 3799static const struct macb_config zynqmp_config = {
3798 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | 3800 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
3799 MACB_CAPS_JUMBO | 3801 MACB_CAPS_JUMBO |
3800 MACB_CAPS_GEM_HAS_PTP, 3802 MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH,
3801 .dma_burst_length = 16, 3803 .dma_burst_length = 16,
3802 .clk_init = macb_clk_init, 3804 .clk_init = macb_clk_init,
3803 .init = macb_init, 3805 .init = macb_init,
@@ -3858,7 +3860,7 @@ static int macb_probe(struct platform_device *pdev)
3858 void __iomem *mem; 3860 void __iomem *mem;
3859 const char *mac; 3861 const char *mac;
3860 struct macb *bp; 3862 struct macb *bp;
3861 int err; 3863 int err, val;
3862 3864
3863 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 3865 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3864 mem = devm_ioremap_resource(&pdev->dev, regs); 3866 mem = devm_ioremap_resource(&pdev->dev, regs);
@@ -3947,6 +3949,18 @@ static int macb_probe(struct platform_device *pdev)
3947 else 3949 else
3948 dev->max_mtu = ETH_DATA_LEN; 3950 dev->max_mtu = ETH_DATA_LEN;
3949 3951
3952 if (bp->caps & MACB_CAPS_BD_RD_PREFETCH) {
3953 val = GEM_BFEXT(RXBD_RDBUFF, gem_readl(bp, DCFG10));
3954 if (val)
3955 bp->rx_bd_rd_prefetch = (2 << (val - 1)) *
3956 macb_dma_desc_get_size(bp);
3957
3958 val = GEM_BFEXT(TXBD_RDBUFF, gem_readl(bp, DCFG10));
3959 if (val)
3960 bp->tx_bd_rd_prefetch = (2 << (val - 1)) *
3961 macb_dma_desc_get_size(bp);
3962 }
3963
3950 mac = of_get_mac_address(np); 3964 mac = of_get_mac_address(np);
3951 if (mac) { 3965 if (mac) {
3952 ether_addr_copy(bp->dev->dev_addr, mac); 3966 ether_addr_copy(bp->dev->dev_addr, mac);
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index 043e3c11c42b..92d88c5f76fb 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -15,7 +15,7 @@ if NET_VENDOR_CAVIUM
15 15
16config THUNDER_NIC_PF 16config THUNDER_NIC_PF
17 tristate "Thunder Physical function driver" 17 tristate "Thunder Physical function driver"
18 depends on 64BIT 18 depends on 64BIT && PCI
19 select THUNDER_NIC_BGX 19 select THUNDER_NIC_BGX
20 ---help--- 20 ---help---
21 This driver supports Thunder's NIC physical function. 21 This driver supports Thunder's NIC physical function.
@@ -28,13 +28,13 @@ config THUNDER_NIC_PF
28config THUNDER_NIC_VF 28config THUNDER_NIC_VF
29 tristate "Thunder Virtual function driver" 29 tristate "Thunder Virtual function driver"
30 imply CAVIUM_PTP 30 imply CAVIUM_PTP
31 depends on 64BIT 31 depends on 64BIT && PCI
32 ---help--- 32 ---help---
33 This driver supports Thunder's NIC virtual function 33 This driver supports Thunder's NIC virtual function
34 34
35config THUNDER_NIC_BGX 35config THUNDER_NIC_BGX
36 tristate "Thunder MAC interface driver (BGX)" 36 tristate "Thunder MAC interface driver (BGX)"
37 depends on 64BIT 37 depends on 64BIT && PCI
38 select PHYLIB 38 select PHYLIB
39 select MDIO_THUNDER 39 select MDIO_THUNDER
40 select THUNDER_NIC_RGX 40 select THUNDER_NIC_RGX
@@ -44,7 +44,7 @@ config THUNDER_NIC_BGX
44 44
45config THUNDER_NIC_RGX 45config THUNDER_NIC_RGX
46 tristate "Thunder MAC interface driver (RGX)" 46 tristate "Thunder MAC interface driver (RGX)"
47 depends on 64BIT 47 depends on 64BIT && PCI
48 select PHYLIB 48 select PHYLIB
49 select MDIO_THUNDER 49 select MDIO_THUNDER
50 ---help--- 50 ---help---
@@ -53,7 +53,7 @@ config THUNDER_NIC_RGX
53 53
54config CAVIUM_PTP 54config CAVIUM_PTP
55 tristate "Cavium PTP coprocessor as PTP clock" 55 tristate "Cavium PTP coprocessor as PTP clock"
56 depends on 64BIT 56 depends on 64BIT && PCI
57 imply PTP_1588_CLOCK 57 imply PTP_1588_CLOCK
58 default y 58 default y
59 ---help--- 59 ---help---
@@ -65,7 +65,7 @@ config CAVIUM_PTP
65 65
66config LIQUIDIO 66config LIQUIDIO
67 tristate "Cavium LiquidIO support" 67 tristate "Cavium LiquidIO support"
68 depends on 64BIT 68 depends on 64BIT && PCI
69 depends on MAY_USE_DEVLINK 69 depends on MAY_USE_DEVLINK
70 imply PTP_1588_CLOCK 70 imply PTP_1588_CLOCK
71 select FW_LOADER 71 select FW_LOADER
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 8a815bb57177..7e8454d3b1ad 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -91,6 +91,9 @@ static int octeon_console_debug_enabled(u32 console)
91 */ 91 */
92#define LIO_SYNC_OCTEON_TIME_INTERVAL_MS 60000 92#define LIO_SYNC_OCTEON_TIME_INTERVAL_MS 60000
93 93
94/* time to wait for possible in-flight requests in milliseconds */
95#define WAIT_INFLIGHT_REQUEST msecs_to_jiffies(1000)
96
94struct lio_trusted_vf_ctx { 97struct lio_trusted_vf_ctx {
95 struct completion complete; 98 struct completion complete;
96 int status; 99 int status;
@@ -259,7 +262,7 @@ static inline void pcierror_quiesce_device(struct octeon_device *oct)
259 force_io_queues_off(oct); 262 force_io_queues_off(oct);
260 263
261 /* To allow for in-flight requests */ 264 /* To allow for in-flight requests */
262 schedule_timeout_uninterruptible(100); 265 schedule_timeout_uninterruptible(WAIT_INFLIGHT_REQUEST);
263 266
264 if (wait_for_pending_requests(oct)) 267 if (wait_for_pending_requests(oct))
265 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); 268 dev_err(&oct->pci_dev->dev, "There were pending requests\n");
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 3f6afb54a5eb..bb43ddb7539e 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -643,13 +643,21 @@ static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr)
643static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu) 643static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
644{ 644{
645 struct octeon_mgmt *p = netdev_priv(netdev); 645 struct octeon_mgmt *p = netdev_priv(netdev);
646 int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM; 646 int max_packet = new_mtu + ETH_HLEN + ETH_FCS_LEN;
647 647
648 netdev->mtu = new_mtu; 648 netdev->mtu = new_mtu;
649 649
650 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, size_without_fcs); 650 /* HW lifts the limit if the frame is VLAN tagged
651 * (+4 bytes per each tag, up to two tags)
652 */
653 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, max_packet);
654 /* Set the hardware to truncate packets larger than the MTU. The jabber
655 * register must be set to a multiple of 8 bytes, so round up. JABBER is
656 * an unconditional limit, so we need to account for two possible VLAN
657 * tags.
658 */
651 cvmx_write_csr(p->agl + AGL_GMX_RX_JABBER, 659 cvmx_write_csr(p->agl + AGL_GMX_RX_JABBER,
652 (size_without_fcs + 7) & 0xfff8); 660 (max_packet + 7 + VLAN_HLEN * 2) & 0xfff8);
653 661
654 return 0; 662 return 0;
655} 663}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 7b795edd9d3a..a19172dbe6be 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -51,6 +51,7 @@
51#include <linux/sched.h> 51#include <linux/sched.h>
52#include <linux/slab.h> 52#include <linux/slab.h>
53#include <linux/uaccess.h> 53#include <linux/uaccess.h>
54#include <linux/nospec.h>
54 55
55#include "common.h" 56#include "common.h"
56#include "cxgb3_ioctl.h" 57#include "cxgb3_ioctl.h"
@@ -2268,6 +2269,7 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2268 2269
2269 if (t.qset_idx >= nqsets) 2270 if (t.qset_idx >= nqsets)
2270 return -EINVAL; 2271 return -EINVAL;
2272 t.qset_idx = array_index_nospec(t.qset_idx, nqsets);
2271 2273
2272 q = &adapter->params.sge.qset[q1 + t.qset_idx]; 2274 q = &adapter->params.sge.qset[q1 + t.qset_idx];
2273 t.rspq_size = q->rspq_size; 2275 t.rspq_size = q->rspq_size;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 974a868a4824..3720c3e11ebb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -8702,7 +8702,7 @@ static int t4_get_flash_params(struct adapter *adap)
8702 }; 8702 };
8703 8703
8704 unsigned int part, manufacturer; 8704 unsigned int part, manufacturer;
8705 unsigned int density, size; 8705 unsigned int density, size = 0;
8706 u32 flashid = 0; 8706 u32 flashid = 0;
8707 int ret; 8707 int ret;
8708 8708
@@ -8772,11 +8772,6 @@ static int t4_get_flash_params(struct adapter *adap)
8772 case 0x22: /* 256MB */ 8772 case 0x22: /* 256MB */
8773 size = 1 << 28; 8773 size = 1 << 28;
8774 break; 8774 break;
8775
8776 default:
8777 dev_err(adap->pdev_dev, "Micron Flash Part has bad size, ID = %#x, Density code = %#x\n",
8778 flashid, density);
8779 return -EINVAL;
8780 } 8775 }
8781 break; 8776 break;
8782 } 8777 }
@@ -8792,10 +8787,6 @@ static int t4_get_flash_params(struct adapter *adap)
8792 case 0x17: /* 64MB */ 8787 case 0x17: /* 64MB */
8793 size = 1 << 26; 8788 size = 1 << 26;
8794 break; 8789 break;
8795 default:
8796 dev_err(adap->pdev_dev, "ISSI Flash Part has bad size, ID = %#x, Density code = %#x\n",
8797 flashid, density);
8798 return -EINVAL;
8799 } 8790 }
8800 break; 8791 break;
8801 } 8792 }
@@ -8811,10 +8802,6 @@ static int t4_get_flash_params(struct adapter *adap)
8811 case 0x18: /* 16MB */ 8802 case 0x18: /* 16MB */
8812 size = 1 << 24; 8803 size = 1 << 24;
8813 break; 8804 break;
8814 default:
8815 dev_err(adap->pdev_dev, "Macronix Flash Part has bad size, ID = %#x, Density code = %#x\n",
8816 flashid, density);
8817 return -EINVAL;
8818 } 8805 }
8819 break; 8806 break;
8820 } 8807 }
@@ -8830,17 +8817,21 @@ static int t4_get_flash_params(struct adapter *adap)
8830 case 0x18: /* 16MB */ 8817 case 0x18: /* 16MB */
8831 size = 1 << 24; 8818 size = 1 << 24;
8832 break; 8819 break;
8833 default:
8834 dev_err(adap->pdev_dev, "Winbond Flash Part has bad size, ID = %#x, Density code = %#x\n",
8835 flashid, density);
8836 return -EINVAL;
8837 } 8820 }
8838 break; 8821 break;
8839 } 8822 }
8840 default: 8823 }
8841 dev_err(adap->pdev_dev, "Unsupported Flash Part, ID = %#x\n", 8824
8842 flashid); 8825 /* If we didn't recognize the FLASH part, that's no real issue: the
8843 return -EINVAL; 8826 * Hardware/Software contract says that Hardware will _*ALWAYS*_
8827 * use a FLASH part which is at least 4MB in size and has 64KB
8828 * sectors. The unrecognized FLASH part is likely to be much larger
8829 * than 4MB, but that's all we really need.
8830 */
8831 if (size == 0) {
8832 dev_warn(adap->pdev_dev, "Unknown Flash Part, ID = %#x, assuming 4MB\n",
8833 flashid);
8834 size = 1 << 22;
8844 } 8835 }
8845 8836
8846 /* Store decoded Flash size and fall through into vetting code. */ 8837 /* Store decoded Flash size and fall through into vetting code. */
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index d0e196bff081..ffe7acbeaa22 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -329,7 +329,8 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
329 return; 329 return;
330 330
331failure: 331failure:
332 dev_info(dev, "replenish pools failure\n"); 332 if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
333 dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
333 pool->free_map[pool->next_free] = index; 334 pool->free_map[pool->next_free] = index;
334 pool->rx_buff[index].skb = NULL; 335 pool->rx_buff[index].skb = NULL;
335 336
@@ -1617,7 +1618,8 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1617 &tx_crq); 1618 &tx_crq);
1618 } 1619 }
1619 if (lpar_rc != H_SUCCESS) { 1620 if (lpar_rc != H_SUCCESS) {
1620 dev_err(dev, "tx failed with code %ld\n", lpar_rc); 1621 if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
1622 dev_err_ratelimited(dev, "tx: send failed\n");
1621 dev_kfree_skb_any(skb); 1623 dev_kfree_skb_any(skb);
1622 tx_buff->skb = NULL; 1624 tx_buff->skb = NULL;
1623 1625
@@ -1825,8 +1827,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
1825 1827
1826 rc = ibmvnic_login(netdev); 1828 rc = ibmvnic_login(netdev);
1827 if (rc) { 1829 if (rc) {
1828 adapter->state = VNIC_PROBED; 1830 adapter->state = reset_state;
1829 return 0; 1831 return rc;
1830 } 1832 }
1831 1833
1832 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM || 1834 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
@@ -3204,6 +3206,25 @@ static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
3204 return crq; 3206 return crq;
3205} 3207}
3206 3208
3209static void print_subcrq_error(struct device *dev, int rc, const char *func)
3210{
3211 switch (rc) {
3212 case H_PARAMETER:
3213 dev_warn_ratelimited(dev,
3214 "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
3215 func, rc);
3216 break;
3217 case H_CLOSED:
3218 dev_warn_ratelimited(dev,
3219 "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
3220 func, rc);
3221 break;
3222 default:
3223 dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc);
3224 break;
3225 }
3226}
3227
3207static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle, 3228static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
3208 union sub_crq *sub_crq) 3229 union sub_crq *sub_crq)
3209{ 3230{
@@ -3230,11 +3251,8 @@ static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
3230 cpu_to_be64(u64_crq[2]), 3251 cpu_to_be64(u64_crq[2]),
3231 cpu_to_be64(u64_crq[3])); 3252 cpu_to_be64(u64_crq[3]));
3232 3253
3233 if (rc) { 3254 if (rc)
3234 if (rc == H_CLOSED) 3255 print_subcrq_error(dev, rc, __func__);
3235 dev_warn(dev, "CRQ Queue closed\n");
3236 dev_err(dev, "Send error (rc=%d)\n", rc);
3237 }
3238 3256
3239 return rc; 3257 return rc;
3240} 3258}
@@ -3252,11 +3270,8 @@ static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
3252 cpu_to_be64(remote_handle), 3270 cpu_to_be64(remote_handle),
3253 ioba, num_entries); 3271 ioba, num_entries);
3254 3272
3255 if (rc) { 3273 if (rc)
3256 if (rc == H_CLOSED) 3274 print_subcrq_error(dev, rc, __func__);
3257 dev_warn(dev, "CRQ Queue closed\n");
3258 dev_err(dev, "Send (indirect) error (rc=%d)\n", rc);
3259 }
3260 3275
3261 return rc; 3276 return rc;
3262} 3277}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 3f5c350716bb..0bd1294ba517 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -1871,7 +1871,12 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
1871 if (enable_addr != 0) 1871 if (enable_addr != 0)
1872 rar_high |= IXGBE_RAH_AV; 1872 rar_high |= IXGBE_RAH_AV;
1873 1873
1874 /* Record lower 32 bits of MAC address and then make
1875 * sure that write is flushed to hardware before writing
1876 * the upper 16 bits and setting the valid bit.
1877 */
1874 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); 1878 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
1879 IXGBE_WRITE_FLUSH(hw);
1875 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 1880 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1876 1881
1877 return 0; 1882 return 0;
@@ -1903,8 +1908,13 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
1903 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 1908 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1904 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); 1909 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1905 1910
1906 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0); 1911 /* Clear the address valid bit and upper 16 bits of the address
1912 * before clearing the lower bits. This way we aren't updating
1913 * a live filter.
1914 */
1907 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 1915 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1916 IXGBE_WRITE_FLUSH(hw);
1917 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
1908 1918
1909 /* clear VMDq pool/queue selection for this RAR */ 1919 /* clear VMDq pool/queue selection for this RAR */
1910 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL); 1920 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
index c116f459945d..da4322e4daed 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
@@ -839,7 +839,7 @@ int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
839 } 839 }
840 840
841 itd->sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX; 841 itd->sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
842 if (unlikely(itd->sa_idx > IXGBE_IPSEC_MAX_SA_COUNT)) { 842 if (unlikely(itd->sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT)) {
843 netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n", 843 netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n",
844 __func__, itd->sa_idx, xs->xso.offload_handle); 844 __func__, itd->sa_idx, xs->xso.offload_handle);
845 return 0; 845 return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 9f54ccbddea7..3360f7b9ee73 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -474,10 +474,10 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
474{ 474{
475 const struct mlx4_en_frag_info *frag_info = priv->frag_info; 475 const struct mlx4_en_frag_info *frag_info = priv->frag_info;
476 unsigned int truesize = 0; 476 unsigned int truesize = 0;
477 bool release = true;
477 int nr, frag_size; 478 int nr, frag_size;
478 struct page *page; 479 struct page *page;
479 dma_addr_t dma; 480 dma_addr_t dma;
480 bool release;
481 481
482 /* Collect used fragments while replacing them in the HW descriptors */ 482 /* Collect used fragments while replacing them in the HW descriptors */
483 for (nr = 0;; frags++) { 483 for (nr = 0;; frags++) {
@@ -500,7 +500,11 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
500 release = page_count(page) != 1 || 500 release = page_count(page) != 1 ||
501 page_is_pfmemalloc(page) || 501 page_is_pfmemalloc(page) ||
502 page_to_nid(page) != numa_mem_id(); 502 page_to_nid(page) != numa_mem_id();
503 } else { 503 } else if (!priv->rx_headroom) {
504 /* rx_headroom for non XDP setup is always 0.
505 * When XDP is set, the above condition will
506 * guarantee page is always released.
507 */
504 u32 sz_align = ALIGN(frag_size, SMP_CACHE_BYTES); 508 u32 sz_align = ALIGN(frag_size, SMP_CACHE_BYTES);
505 509
506 frags->page_offset += sz_align; 510 frags->page_offset += sz_align;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 6aaaf3d9ba31..77b2adb29341 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -4756,6 +4756,12 @@ static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
4756 kfree(mlxsw_sp_rt6); 4756 kfree(mlxsw_sp_rt6);
4757} 4757}
4758 4758
4759static bool mlxsw_sp_fib6_rt_can_mp(const struct fib6_info *rt)
4760{
4761 /* RTF_CACHE routes are ignored */
4762 return (rt->fib6_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY;
4763}
4764
4759static struct fib6_info * 4765static struct fib6_info *
4760mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry) 4766mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
4761{ 4767{
@@ -4765,11 +4771,11 @@ mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
4765 4771
4766static struct mlxsw_sp_fib6_entry * 4772static struct mlxsw_sp_fib6_entry *
4767mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node, 4773mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
4768 const struct fib6_info *nrt, bool append) 4774 const struct fib6_info *nrt, bool replace)
4769{ 4775{
4770 struct mlxsw_sp_fib6_entry *fib6_entry; 4776 struct mlxsw_sp_fib6_entry *fib6_entry;
4771 4777
4772 if (!append) 4778 if (!mlxsw_sp_fib6_rt_can_mp(nrt) || replace)
4773 return NULL; 4779 return NULL;
4774 4780
4775 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) { 4781 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
@@ -4784,7 +4790,8 @@ mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
4784 break; 4790 break;
4785 if (rt->fib6_metric < nrt->fib6_metric) 4791 if (rt->fib6_metric < nrt->fib6_metric)
4786 continue; 4792 continue;
4787 if (rt->fib6_metric == nrt->fib6_metric) 4793 if (rt->fib6_metric == nrt->fib6_metric &&
4794 mlxsw_sp_fib6_rt_can_mp(rt))
4788 return fib6_entry; 4795 return fib6_entry;
4789 if (rt->fib6_metric > nrt->fib6_metric) 4796 if (rt->fib6_metric > nrt->fib6_metric)
4790 break; 4797 break;
@@ -5163,7 +5170,7 @@ static struct mlxsw_sp_fib6_entry *
5163mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node, 5170mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
5164 const struct fib6_info *nrt, bool replace) 5171 const struct fib6_info *nrt, bool replace)
5165{ 5172{
5166 struct mlxsw_sp_fib6_entry *fib6_entry; 5173 struct mlxsw_sp_fib6_entry *fib6_entry, *fallback = NULL;
5167 5174
5168 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) { 5175 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
5169 struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry); 5176 struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
@@ -5172,13 +5179,18 @@ mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
5172 continue; 5179 continue;
5173 if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id) 5180 if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id)
5174 break; 5181 break;
5175 if (replace && rt->fib6_metric == nrt->fib6_metric) 5182 if (replace && rt->fib6_metric == nrt->fib6_metric) {
5176 return fib6_entry; 5183 if (mlxsw_sp_fib6_rt_can_mp(rt) ==
5184 mlxsw_sp_fib6_rt_can_mp(nrt))
5185 return fib6_entry;
5186 if (mlxsw_sp_fib6_rt_can_mp(nrt))
5187 fallback = fallback ?: fib6_entry;
5188 }
5177 if (rt->fib6_metric > nrt->fib6_metric) 5189 if (rt->fib6_metric > nrt->fib6_metric)
5178 return fib6_entry; 5190 return fallback ?: fib6_entry;
5179 } 5191 }
5180 5192
5181 return NULL; 5193 return fallback;
5182} 5194}
5183 5195
5184static int 5196static int
@@ -5304,8 +5316,7 @@ static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp,
5304} 5316}
5305 5317
5306static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp, 5318static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
5307 struct fib6_info *rt, bool replace, 5319 struct fib6_info *rt, bool replace)
5308 bool append)
5309{ 5320{
5310 struct mlxsw_sp_fib6_entry *fib6_entry; 5321 struct mlxsw_sp_fib6_entry *fib6_entry;
5311 struct mlxsw_sp_fib_node *fib_node; 5322 struct mlxsw_sp_fib_node *fib_node;
@@ -5331,7 +5342,7 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
5331 /* Before creating a new entry, try to append route to an existing 5342 /* Before creating a new entry, try to append route to an existing
5332 * multipath entry. 5343 * multipath entry.
5333 */ 5344 */
5334 fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, append); 5345 fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, replace);
5335 if (fib6_entry) { 5346 if (fib6_entry) {
5336 err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt); 5347 err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt);
5337 if (err) 5348 if (err)
@@ -5339,14 +5350,6 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
5339 return 0; 5350 return 0;
5340 } 5351 }
5341 5352
5342 /* We received an append event, yet did not find any route to
5343 * append to.
5344 */
5345 if (WARN_ON(append)) {
5346 err = -EINVAL;
5347 goto err_fib6_entry_append;
5348 }
5349
5350 fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt); 5353 fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt);
5351 if (IS_ERR(fib6_entry)) { 5354 if (IS_ERR(fib6_entry)) {
5352 err = PTR_ERR(fib6_entry); 5355 err = PTR_ERR(fib6_entry);
@@ -5364,7 +5367,6 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
5364err_fib6_node_entry_link: 5367err_fib6_node_entry_link:
5365 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry); 5368 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5366err_fib6_entry_create: 5369err_fib6_entry_create:
5367err_fib6_entry_append:
5368err_fib6_entry_nexthop_add: 5370err_fib6_entry_nexthop_add:
5369 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); 5371 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5370 return err; 5372 return err;
@@ -5715,7 +5717,7 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
5715 struct mlxsw_sp_fib_event_work *fib_work = 5717 struct mlxsw_sp_fib_event_work *fib_work =
5716 container_of(work, struct mlxsw_sp_fib_event_work, work); 5718 container_of(work, struct mlxsw_sp_fib_event_work, work);
5717 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp; 5719 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
5718 bool replace, append; 5720 bool replace;
5719 int err; 5721 int err;
5720 5722
5721 rtnl_lock(); 5723 rtnl_lock();
@@ -5726,10 +5728,8 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
5726 case FIB_EVENT_ENTRY_APPEND: /* fall through */ 5728 case FIB_EVENT_ENTRY_APPEND: /* fall through */
5727 case FIB_EVENT_ENTRY_ADD: 5729 case FIB_EVENT_ENTRY_ADD:
5728 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE; 5730 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
5729 append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
5730 err = mlxsw_sp_router_fib6_add(mlxsw_sp, 5731 err = mlxsw_sp_router_fib6_add(mlxsw_sp,
5731 fib_work->fen6_info.rt, replace, 5732 fib_work->fen6_info.rt, replace);
5732 append);
5733 if (err) 5733 if (err)
5734 mlxsw_sp_router_fib_abort(mlxsw_sp); 5734 mlxsw_sp_router_fib_abort(mlxsw_sp);
5735 mlxsw_sp_rt6_release(fib_work->fen6_info.rt); 5735 mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 00db3401b898..1dfaccd151f0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -502,6 +502,7 @@ enum BAR_ID {
502struct qed_nvm_image_info { 502struct qed_nvm_image_info {
503 u32 num_images; 503 u32 num_images;
504 struct bist_nvm_image_att *image_att; 504 struct bist_nvm_image_att *image_att;
505 bool valid;
505}; 506};
506 507
507#define DRV_MODULE_VERSION \ 508#define DRV_MODULE_VERSION \
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index a14e48489029..4340c4c90bcb 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -6723,7 +6723,7 @@ static enum dbg_status qed_parse_mcp_trace_buf(u8 *trace_buf,
6723 format_idx = header & MFW_TRACE_EVENTID_MASK; 6723 format_idx = header & MFW_TRACE_EVENTID_MASK;
6724 6724
6725 /* Skip message if its index doesn't exist in the meta data */ 6725 /* Skip message if its index doesn't exist in the meta data */
6726 if (format_idx > s_mcp_trace_meta.formats_num) { 6726 if (format_idx >= s_mcp_trace_meta.formats_num) {
6727 u8 format_size = 6727 u8 format_size =
6728 (u8)((header & MFW_TRACE_PRM_SIZE_MASK) >> 6728 (u8)((header & MFW_TRACE_PRM_SIZE_MASK) >>
6729 MFW_TRACE_PRM_SIZE_SHIFT); 6729 MFW_TRACE_PRM_SIZE_SHIFT);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 0cbc74d6ca8b..758a9a5127fa 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -371,7 +371,7 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev,
371 goto err2; 371 goto err2;
372 } 372 }
373 373
374 DP_INFO(cdev, "qed_probe completed successffuly\n"); 374 DP_INFO(cdev, "qed_probe completed successfully\n");
375 375
376 return cdev; 376 return cdev;
377 377
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 4e0b443c9519..9d9e533bccdc 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -592,6 +592,9 @@ int qed_mcp_nvm_wr_cmd(struct qed_hwfn *p_hwfn,
592 *o_mcp_resp = mb_params.mcp_resp; 592 *o_mcp_resp = mb_params.mcp_resp;
593 *o_mcp_param = mb_params.mcp_param; 593 *o_mcp_param = mb_params.mcp_param;
594 594
595 /* nvm_info needs to be updated */
596 p_hwfn->nvm_info.valid = false;
597
595 return 0; 598 return 0;
596} 599}
597 600
@@ -2555,11 +2558,14 @@ int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn,
2555 2558
2556int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn) 2559int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn)
2557{ 2560{
2558 struct qed_nvm_image_info *nvm_info = &p_hwfn->nvm_info; 2561 struct qed_nvm_image_info nvm_info;
2559 struct qed_ptt *p_ptt; 2562 struct qed_ptt *p_ptt;
2560 int rc; 2563 int rc;
2561 u32 i; 2564 u32 i;
2562 2565
2566 if (p_hwfn->nvm_info.valid)
2567 return 0;
2568
2563 p_ptt = qed_ptt_acquire(p_hwfn); 2569 p_ptt = qed_ptt_acquire(p_hwfn);
2564 if (!p_ptt) { 2570 if (!p_ptt) {
2565 DP_ERR(p_hwfn, "failed to acquire ptt\n"); 2571 DP_ERR(p_hwfn, "failed to acquire ptt\n");
@@ -2567,29 +2573,29 @@ int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn)
2567 } 2573 }
2568 2574
2569 /* Acquire from MFW the amount of available images */ 2575 /* Acquire from MFW the amount of available images */
2570 nvm_info->num_images = 0; 2576 nvm_info.num_images = 0;
2571 rc = qed_mcp_bist_nvm_get_num_images(p_hwfn, 2577 rc = qed_mcp_bist_nvm_get_num_images(p_hwfn,
2572 p_ptt, &nvm_info->num_images); 2578 p_ptt, &nvm_info.num_images);
2573 if (rc == -EOPNOTSUPP) { 2579 if (rc == -EOPNOTSUPP) {
2574 DP_INFO(p_hwfn, "DRV_MSG_CODE_BIST_TEST is not supported\n"); 2580 DP_INFO(p_hwfn, "DRV_MSG_CODE_BIST_TEST is not supported\n");
2575 goto out; 2581 goto out;
2576 } else if (rc || !nvm_info->num_images) { 2582 } else if (rc || !nvm_info.num_images) {
2577 DP_ERR(p_hwfn, "Failed getting number of images\n"); 2583 DP_ERR(p_hwfn, "Failed getting number of images\n");
2578 goto err0; 2584 goto err0;
2579 } 2585 }
2580 2586
2581 nvm_info->image_att = kmalloc_array(nvm_info->num_images, 2587 nvm_info.image_att = kmalloc_array(nvm_info.num_images,
2582 sizeof(struct bist_nvm_image_att), 2588 sizeof(struct bist_nvm_image_att),
2583 GFP_KERNEL); 2589 GFP_KERNEL);
2584 if (!nvm_info->image_att) { 2590 if (!nvm_info.image_att) {
2585 rc = -ENOMEM; 2591 rc = -ENOMEM;
2586 goto err0; 2592 goto err0;
2587 } 2593 }
2588 2594
2589 /* Iterate over images and get their attributes */ 2595 /* Iterate over images and get their attributes */
2590 for (i = 0; i < nvm_info->num_images; i++) { 2596 for (i = 0; i < nvm_info.num_images; i++) {
2591 rc = qed_mcp_bist_nvm_get_image_att(p_hwfn, p_ptt, 2597 rc = qed_mcp_bist_nvm_get_image_att(p_hwfn, p_ptt,
2592 &nvm_info->image_att[i], i); 2598 &nvm_info.image_att[i], i);
2593 if (rc) { 2599 if (rc) {
2594 DP_ERR(p_hwfn, 2600 DP_ERR(p_hwfn,
2595 "Failed getting image index %d attributes\n", i); 2601 "Failed getting image index %d attributes\n", i);
@@ -2597,14 +2603,22 @@ int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn)
2597 } 2603 }
2598 2604
2599 DP_VERBOSE(p_hwfn, QED_MSG_SP, "image index %d, size %x\n", i, 2605 DP_VERBOSE(p_hwfn, QED_MSG_SP, "image index %d, size %x\n", i,
2600 nvm_info->image_att[i].len); 2606 nvm_info.image_att[i].len);
2601 } 2607 }
2602out: 2608out:
2609 /* Update hwfn's nvm_info */
2610 if (nvm_info.num_images) {
2611 p_hwfn->nvm_info.num_images = nvm_info.num_images;
2612 kfree(p_hwfn->nvm_info.image_att);
2613 p_hwfn->nvm_info.image_att = nvm_info.image_att;
2614 p_hwfn->nvm_info.valid = true;
2615 }
2616
2603 qed_ptt_release(p_hwfn, p_ptt); 2617 qed_ptt_release(p_hwfn, p_ptt);
2604 return 0; 2618 return 0;
2605 2619
2606err1: 2620err1:
2607 kfree(nvm_info->image_att); 2621 kfree(nvm_info.image_att);
2608err0: 2622err0:
2609 qed_ptt_release(p_hwfn, p_ptt); 2623 qed_ptt_release(p_hwfn, p_ptt);
2610 return rc; 2624 return rc;
@@ -2641,6 +2655,7 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
2641 return -EINVAL; 2655 return -EINVAL;
2642 } 2656 }
2643 2657
2658 qed_mcp_nvm_info_populate(p_hwfn);
2644 for (i = 0; i < p_hwfn->nvm_info.num_images; i++) 2659 for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
2645 if (type == p_hwfn->nvm_info.image_att[i].image_type) 2660 if (type == p_hwfn->nvm_info.image_att[i].image_type)
2646 break; 2661 break;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 891f03a7a33d..8d7b9bb910f2 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -1128,6 +1128,8 @@ static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp,
1128 struct qlcnic_adapter *adapter = dev_get_drvdata(dev); 1128 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
1129 1129
1130 ret = kstrtoul(buf, 16, &data); 1130 ret = kstrtoul(buf, 16, &data);
1131 if (ret)
1132 return ret;
1131 1133
1132 switch (data) { 1134 switch (data) {
1133 case QLC_83XX_FLASH_SECTOR_ERASE_CMD: 1135 case QLC_83XX_FLASH_SECTOR_ERASE_CMD:
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 5803cd6db406..206f0266463e 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -658,7 +658,7 @@ qcaspi_netdev_open(struct net_device *dev)
658 return ret; 658 return ret;
659 } 659 }
660 660
661 netif_start_queue(qca->net_dev); 661 /* SPI thread takes care of TX queue */
662 662
663 return 0; 663 return 0;
664} 664}
@@ -760,6 +760,9 @@ qcaspi_netdev_tx_timeout(struct net_device *dev)
760 qca->net_dev->stats.tx_errors++; 760 qca->net_dev->stats.tx_errors++;
761 /* Trigger tx queue flush and QCA7000 reset */ 761 /* Trigger tx queue flush and QCA7000 reset */
762 qca->sync = QCASPI_SYNC_UNKNOWN; 762 qca->sync = QCASPI_SYNC_UNKNOWN;
763
764 if (qca->spi_thread)
765 wake_up_process(qca->spi_thread);
763} 766}
764 767
765static int 768static int
@@ -878,22 +881,22 @@ qca_spi_probe(struct spi_device *spi)
878 881
879 if ((qcaspi_clkspeed < QCASPI_CLK_SPEED_MIN) || 882 if ((qcaspi_clkspeed < QCASPI_CLK_SPEED_MIN) ||
880 (qcaspi_clkspeed > QCASPI_CLK_SPEED_MAX)) { 883 (qcaspi_clkspeed > QCASPI_CLK_SPEED_MAX)) {
881 dev_info(&spi->dev, "Invalid clkspeed: %d\n", 884 dev_err(&spi->dev, "Invalid clkspeed: %d\n",
882 qcaspi_clkspeed); 885 qcaspi_clkspeed);
883 return -EINVAL; 886 return -EINVAL;
884 } 887 }
885 888
886 if ((qcaspi_burst_len < QCASPI_BURST_LEN_MIN) || 889 if ((qcaspi_burst_len < QCASPI_BURST_LEN_MIN) ||
887 (qcaspi_burst_len > QCASPI_BURST_LEN_MAX)) { 890 (qcaspi_burst_len > QCASPI_BURST_LEN_MAX)) {
888 dev_info(&spi->dev, "Invalid burst len: %d\n", 891 dev_err(&spi->dev, "Invalid burst len: %d\n",
889 qcaspi_burst_len); 892 qcaspi_burst_len);
890 return -EINVAL; 893 return -EINVAL;
891 } 894 }
892 895
893 if ((qcaspi_pluggable < QCASPI_PLUGGABLE_MIN) || 896 if ((qcaspi_pluggable < QCASPI_PLUGGABLE_MIN) ||
894 (qcaspi_pluggable > QCASPI_PLUGGABLE_MAX)) { 897 (qcaspi_pluggable > QCASPI_PLUGGABLE_MAX)) {
895 dev_info(&spi->dev, "Invalid pluggable: %d\n", 898 dev_err(&spi->dev, "Invalid pluggable: %d\n",
896 qcaspi_pluggable); 899 qcaspi_pluggable);
897 return -EINVAL; 900 return -EINVAL;
898 } 901 }
899 902
@@ -955,8 +958,8 @@ qca_spi_probe(struct spi_device *spi)
955 } 958 }
956 959
957 if (register_netdev(qcaspi_devs)) { 960 if (register_netdev(qcaspi_devs)) {
958 dev_info(&spi->dev, "Unable to register net device %s\n", 961 dev_err(&spi->dev, "Unable to register net device %s\n",
959 qcaspi_devs->name); 962 qcaspi_devs->name);
960 free_netdev(qcaspi_devs); 963 free_netdev(qcaspi_devs);
961 return -EFAULT; 964 return -EFAULT;
962 } 965 }
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index f4cae2be0fda..a3f69901ac87 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -7789,6 +7789,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7789 NETIF_F_HW_VLAN_CTAG_RX; 7789 NETIF_F_HW_VLAN_CTAG_RX;
7790 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | 7790 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
7791 NETIF_F_HIGHDMA; 7791 NETIF_F_HIGHDMA;
7792 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7792 7793
7793 tp->cp_cmd |= RxChkSum | RxVlan; 7794 tp->cp_cmd |= RxChkSum | RxVlan;
7794 7795
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 68f122140966..0d811c02ff34 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -980,6 +980,13 @@ static void ravb_adjust_link(struct net_device *ndev)
980 struct ravb_private *priv = netdev_priv(ndev); 980 struct ravb_private *priv = netdev_priv(ndev);
981 struct phy_device *phydev = ndev->phydev; 981 struct phy_device *phydev = ndev->phydev;
982 bool new_state = false; 982 bool new_state = false;
983 unsigned long flags;
984
985 spin_lock_irqsave(&priv->lock, flags);
986
987 /* Disable TX and RX right over here, if E-MAC change is ignored */
988 if (priv->no_avb_link)
989 ravb_rcv_snd_disable(ndev);
983 990
984 if (phydev->link) { 991 if (phydev->link) {
985 if (phydev->duplex != priv->duplex) { 992 if (phydev->duplex != priv->duplex) {
@@ -997,18 +1004,21 @@ static void ravb_adjust_link(struct net_device *ndev)
997 ravb_modify(ndev, ECMR, ECMR_TXF, 0); 1004 ravb_modify(ndev, ECMR, ECMR_TXF, 0);
998 new_state = true; 1005 new_state = true;
999 priv->link = phydev->link; 1006 priv->link = phydev->link;
1000 if (priv->no_avb_link)
1001 ravb_rcv_snd_enable(ndev);
1002 } 1007 }
1003 } else if (priv->link) { 1008 } else if (priv->link) {
1004 new_state = true; 1009 new_state = true;
1005 priv->link = 0; 1010 priv->link = 0;
1006 priv->speed = 0; 1011 priv->speed = 0;
1007 priv->duplex = -1; 1012 priv->duplex = -1;
1008 if (priv->no_avb_link)
1009 ravb_rcv_snd_disable(ndev);
1010 } 1013 }
1011 1014
1015 /* Enable TX and RX right over here, if E-MAC change is ignored */
1016 if (priv->no_avb_link && phydev->link)
1017 ravb_rcv_snd_enable(ndev);
1018
1019 mmiowb();
1020 spin_unlock_irqrestore(&priv->lock, flags);
1021
1012 if (new_state && netif_msg_link(priv)) 1022 if (new_state && netif_msg_link(priv))
1013 phy_print_status(phydev); 1023 phy_print_status(phydev);
1014} 1024}
@@ -1096,75 +1106,6 @@ static int ravb_phy_start(struct net_device *ndev)
1096 return 0; 1106 return 0;
1097} 1107}
1098 1108
1099static int ravb_get_link_ksettings(struct net_device *ndev,
1100 struct ethtool_link_ksettings *cmd)
1101{
1102 struct ravb_private *priv = netdev_priv(ndev);
1103 unsigned long flags;
1104
1105 if (!ndev->phydev)
1106 return -ENODEV;
1107
1108 spin_lock_irqsave(&priv->lock, flags);
1109 phy_ethtool_ksettings_get(ndev->phydev, cmd);
1110 spin_unlock_irqrestore(&priv->lock, flags);
1111
1112 return 0;
1113}
1114
1115static int ravb_set_link_ksettings(struct net_device *ndev,
1116 const struct ethtool_link_ksettings *cmd)
1117{
1118 struct ravb_private *priv = netdev_priv(ndev);
1119 unsigned long flags;
1120 int error;
1121
1122 if (!ndev->phydev)
1123 return -ENODEV;
1124
1125 spin_lock_irqsave(&priv->lock, flags);
1126
1127 /* Disable TX and RX */
1128 ravb_rcv_snd_disable(ndev);
1129
1130 error = phy_ethtool_ksettings_set(ndev->phydev, cmd);
1131 if (error)
1132 goto error_exit;
1133
1134 if (cmd->base.duplex == DUPLEX_FULL)
1135 priv->duplex = 1;
1136 else
1137 priv->duplex = 0;
1138
1139 ravb_set_duplex(ndev);
1140
1141error_exit:
1142 mdelay(1);
1143
1144 /* Enable TX and RX */
1145 ravb_rcv_snd_enable(ndev);
1146
1147 mmiowb();
1148 spin_unlock_irqrestore(&priv->lock, flags);
1149
1150 return error;
1151}
1152
1153static int ravb_nway_reset(struct net_device *ndev)
1154{
1155 struct ravb_private *priv = netdev_priv(ndev);
1156 int error = -ENODEV;
1157 unsigned long flags;
1158
1159 if (ndev->phydev) {
1160 spin_lock_irqsave(&priv->lock, flags);
1161 error = phy_start_aneg(ndev->phydev);
1162 spin_unlock_irqrestore(&priv->lock, flags);
1163 }
1164
1165 return error;
1166}
1167
1168static u32 ravb_get_msglevel(struct net_device *ndev) 1109static u32 ravb_get_msglevel(struct net_device *ndev)
1169{ 1110{
1170 struct ravb_private *priv = netdev_priv(ndev); 1111 struct ravb_private *priv = netdev_priv(ndev);
@@ -1377,7 +1318,7 @@ static int ravb_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
1377} 1318}
1378 1319
1379static const struct ethtool_ops ravb_ethtool_ops = { 1320static const struct ethtool_ops ravb_ethtool_ops = {
1380 .nway_reset = ravb_nway_reset, 1321 .nway_reset = phy_ethtool_nway_reset,
1381 .get_msglevel = ravb_get_msglevel, 1322 .get_msglevel = ravb_get_msglevel,
1382 .set_msglevel = ravb_set_msglevel, 1323 .set_msglevel = ravb_set_msglevel,
1383 .get_link = ethtool_op_get_link, 1324 .get_link = ethtool_op_get_link,
@@ -1387,8 +1328,8 @@ static const struct ethtool_ops ravb_ethtool_ops = {
1387 .get_ringparam = ravb_get_ringparam, 1328 .get_ringparam = ravb_get_ringparam,
1388 .set_ringparam = ravb_set_ringparam, 1329 .set_ringparam = ravb_set_ringparam,
1389 .get_ts_info = ravb_get_ts_info, 1330 .get_ts_info = ravb_get_ts_info,
1390 .get_link_ksettings = ravb_get_link_ksettings, 1331 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1391 .set_link_ksettings = ravb_set_link_ksettings, 1332 .set_link_ksettings = phy_ethtool_set_link_ksettings,
1392 .get_wol = ravb_get_wol, 1333 .get_wol = ravb_get_wol,
1393 .set_wol = ravb_set_wol, 1334 .set_wol = ravb_set_wol,
1394}; 1335};
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index e9007b613f17..5614fd231bbe 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1927,8 +1927,15 @@ static void sh_eth_adjust_link(struct net_device *ndev)
1927{ 1927{
1928 struct sh_eth_private *mdp = netdev_priv(ndev); 1928 struct sh_eth_private *mdp = netdev_priv(ndev);
1929 struct phy_device *phydev = ndev->phydev; 1929 struct phy_device *phydev = ndev->phydev;
1930 unsigned long flags;
1930 int new_state = 0; 1931 int new_state = 0;
1931 1932
1933 spin_lock_irqsave(&mdp->lock, flags);
1934
1935 /* Disable TX and RX right over here, if E-MAC change is ignored */
1936 if (mdp->cd->no_psr || mdp->no_ether_link)
1937 sh_eth_rcv_snd_disable(ndev);
1938
1932 if (phydev->link) { 1939 if (phydev->link) {
1933 if (phydev->duplex != mdp->duplex) { 1940 if (phydev->duplex != mdp->duplex) {
1934 new_state = 1; 1941 new_state = 1;
@@ -1947,18 +1954,21 @@ static void sh_eth_adjust_link(struct net_device *ndev)
1947 sh_eth_modify(ndev, ECMR, ECMR_TXF, 0); 1954 sh_eth_modify(ndev, ECMR, ECMR_TXF, 0);
1948 new_state = 1; 1955 new_state = 1;
1949 mdp->link = phydev->link; 1956 mdp->link = phydev->link;
1950 if (mdp->cd->no_psr || mdp->no_ether_link)
1951 sh_eth_rcv_snd_enable(ndev);
1952 } 1957 }
1953 } else if (mdp->link) { 1958 } else if (mdp->link) {
1954 new_state = 1; 1959 new_state = 1;
1955 mdp->link = 0; 1960 mdp->link = 0;
1956 mdp->speed = 0; 1961 mdp->speed = 0;
1957 mdp->duplex = -1; 1962 mdp->duplex = -1;
1958 if (mdp->cd->no_psr || mdp->no_ether_link)
1959 sh_eth_rcv_snd_disable(ndev);
1960 } 1963 }
1961 1964
1965 /* Enable TX and RX right over here, if E-MAC change is ignored */
1966 if ((mdp->cd->no_psr || mdp->no_ether_link) && phydev->link)
1967 sh_eth_rcv_snd_enable(ndev);
1968
1969 mmiowb();
1970 spin_unlock_irqrestore(&mdp->lock, flags);
1971
1962 if (new_state && netif_msg_link(mdp)) 1972 if (new_state && netif_msg_link(mdp))
1963 phy_print_status(phydev); 1973 phy_print_status(phydev);
1964} 1974}
@@ -2030,60 +2040,6 @@ static int sh_eth_phy_start(struct net_device *ndev)
2030 return 0; 2040 return 0;
2031} 2041}
2032 2042
2033static int sh_eth_get_link_ksettings(struct net_device *ndev,
2034 struct ethtool_link_ksettings *cmd)
2035{
2036 struct sh_eth_private *mdp = netdev_priv(ndev);
2037 unsigned long flags;
2038
2039 if (!ndev->phydev)
2040 return -ENODEV;
2041
2042 spin_lock_irqsave(&mdp->lock, flags);
2043 phy_ethtool_ksettings_get(ndev->phydev, cmd);
2044 spin_unlock_irqrestore(&mdp->lock, flags);
2045
2046 return 0;
2047}
2048
2049static int sh_eth_set_link_ksettings(struct net_device *ndev,
2050 const struct ethtool_link_ksettings *cmd)
2051{
2052 struct sh_eth_private *mdp = netdev_priv(ndev);
2053 unsigned long flags;
2054 int ret;
2055
2056 if (!ndev->phydev)
2057 return -ENODEV;
2058
2059 spin_lock_irqsave(&mdp->lock, flags);
2060
2061 /* disable tx and rx */
2062 sh_eth_rcv_snd_disable(ndev);
2063
2064 ret = phy_ethtool_ksettings_set(ndev->phydev, cmd);
2065 if (ret)
2066 goto error_exit;
2067
2068 if (cmd->base.duplex == DUPLEX_FULL)
2069 mdp->duplex = 1;
2070 else
2071 mdp->duplex = 0;
2072
2073 if (mdp->cd->set_duplex)
2074 mdp->cd->set_duplex(ndev);
2075
2076error_exit:
2077 mdelay(1);
2078
2079 /* enable tx and rx */
2080 sh_eth_rcv_snd_enable(ndev);
2081
2082 spin_unlock_irqrestore(&mdp->lock, flags);
2083
2084 return ret;
2085}
2086
2087/* If it is ever necessary to increase SH_ETH_REG_DUMP_MAX_REGS, the 2043/* If it is ever necessary to increase SH_ETH_REG_DUMP_MAX_REGS, the
2088 * version must be bumped as well. Just adding registers up to that 2044 * version must be bumped as well. Just adding registers up to that
2089 * limit is fine, as long as the existing register indices don't 2045 * limit is fine, as long as the existing register indices don't
@@ -2263,22 +2219,6 @@ static void sh_eth_get_regs(struct net_device *ndev, struct ethtool_regs *regs,
2263 pm_runtime_put_sync(&mdp->pdev->dev); 2219 pm_runtime_put_sync(&mdp->pdev->dev);
2264} 2220}
2265 2221
2266static int sh_eth_nway_reset(struct net_device *ndev)
2267{
2268 struct sh_eth_private *mdp = netdev_priv(ndev);
2269 unsigned long flags;
2270 int ret;
2271
2272 if (!ndev->phydev)
2273 return -ENODEV;
2274
2275 spin_lock_irqsave(&mdp->lock, flags);
2276 ret = phy_start_aneg(ndev->phydev);
2277 spin_unlock_irqrestore(&mdp->lock, flags);
2278
2279 return ret;
2280}
2281
2282static u32 sh_eth_get_msglevel(struct net_device *ndev) 2222static u32 sh_eth_get_msglevel(struct net_device *ndev)
2283{ 2223{
2284 struct sh_eth_private *mdp = netdev_priv(ndev); 2224 struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -2429,7 +2369,7 @@ static int sh_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2429static const struct ethtool_ops sh_eth_ethtool_ops = { 2369static const struct ethtool_ops sh_eth_ethtool_ops = {
2430 .get_regs_len = sh_eth_get_regs_len, 2370 .get_regs_len = sh_eth_get_regs_len,
2431 .get_regs = sh_eth_get_regs, 2371 .get_regs = sh_eth_get_regs,
2432 .nway_reset = sh_eth_nway_reset, 2372 .nway_reset = phy_ethtool_nway_reset,
2433 .get_msglevel = sh_eth_get_msglevel, 2373 .get_msglevel = sh_eth_get_msglevel,
2434 .set_msglevel = sh_eth_set_msglevel, 2374 .set_msglevel = sh_eth_set_msglevel,
2435 .get_link = ethtool_op_get_link, 2375 .get_link = ethtool_op_get_link,
@@ -2438,8 +2378,8 @@ static const struct ethtool_ops sh_eth_ethtool_ops = {
2438 .get_sset_count = sh_eth_get_sset_count, 2378 .get_sset_count = sh_eth_get_sset_count,
2439 .get_ringparam = sh_eth_get_ringparam, 2379 .get_ringparam = sh_eth_get_ringparam,
2440 .set_ringparam = sh_eth_set_ringparam, 2380 .set_ringparam = sh_eth_set_ringparam,
2441 .get_link_ksettings = sh_eth_get_link_ksettings, 2381 .get_link_ksettings = phy_ethtool_get_link_ksettings,
2442 .set_link_ksettings = sh_eth_set_link_ksettings, 2382 .set_link_ksettings = phy_ethtool_set_link_ksettings,
2443 .get_wol = sh_eth_get_wol, 2383 .get_wol = sh_eth_get_wol,
2444 .set_wol = sh_eth_set_wol, 2384 .set_wol = sh_eth_set_wol,
2445}; 2385};
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 23f0785c0573..7eeac3d6cfe8 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -4288,9 +4288,9 @@ static int efx_ef10_filter_pri(struct efx_ef10_filter_table *table,
4288 return -EPROTONOSUPPORT; 4288 return -EPROTONOSUPPORT;
4289} 4289}
4290 4290
4291static s32 efx_ef10_filter_insert(struct efx_nic *efx, 4291static s32 efx_ef10_filter_insert_locked(struct efx_nic *efx,
4292 struct efx_filter_spec *spec, 4292 struct efx_filter_spec *spec,
4293 bool replace_equal) 4293 bool replace_equal)
4294{ 4294{
4295 DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT); 4295 DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
4296 struct efx_ef10_nic_data *nic_data = efx->nic_data; 4296 struct efx_ef10_nic_data *nic_data = efx->nic_data;
@@ -4307,7 +4307,7 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx,
4307 bool is_mc_recip; 4307 bool is_mc_recip;
4308 s32 rc; 4308 s32 rc;
4309 4309
4310 down_read(&efx->filter_sem); 4310 WARN_ON(!rwsem_is_locked(&efx->filter_sem));
4311 table = efx->filter_state; 4311 table = efx->filter_state;
4312 down_write(&table->lock); 4312 down_write(&table->lock);
4313 4313
@@ -4498,10 +4498,22 @@ out_unlock:
4498 if (rss_locked) 4498 if (rss_locked)
4499 mutex_unlock(&efx->rss_lock); 4499 mutex_unlock(&efx->rss_lock);
4500 up_write(&table->lock); 4500 up_write(&table->lock);
4501 up_read(&efx->filter_sem);
4502 return rc; 4501 return rc;
4503} 4502}
4504 4503
4504static s32 efx_ef10_filter_insert(struct efx_nic *efx,
4505 struct efx_filter_spec *spec,
4506 bool replace_equal)
4507{
4508 s32 ret;
4509
4510 down_read(&efx->filter_sem);
4511 ret = efx_ef10_filter_insert_locked(efx, spec, replace_equal);
4512 up_read(&efx->filter_sem);
4513
4514 return ret;
4515}
4516
4505static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx) 4517static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
4506{ 4518{
4507 /* no need to do anything here on EF10 */ 4519 /* no need to do anything here on EF10 */
@@ -5285,7 +5297,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
5285 EFX_WARN_ON_PARANOID(ids[i] != EFX_EF10_FILTER_ID_INVALID); 5297 EFX_WARN_ON_PARANOID(ids[i] != EFX_EF10_FILTER_ID_INVALID);
5286 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); 5298 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
5287 efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr); 5299 efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr);
5288 rc = efx_ef10_filter_insert(efx, &spec, true); 5300 rc = efx_ef10_filter_insert_locked(efx, &spec, true);
5289 if (rc < 0) { 5301 if (rc < 0) {
5290 if (rollback) { 5302 if (rollback) {
5291 netif_info(efx, drv, efx->net_dev, 5303 netif_info(efx, drv, efx->net_dev,
@@ -5314,7 +5326,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
5314 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); 5326 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
5315 eth_broadcast_addr(baddr); 5327 eth_broadcast_addr(baddr);
5316 efx_filter_set_eth_local(&spec, vlan->vid, baddr); 5328 efx_filter_set_eth_local(&spec, vlan->vid, baddr);
5317 rc = efx_ef10_filter_insert(efx, &spec, true); 5329 rc = efx_ef10_filter_insert_locked(efx, &spec, true);
5318 if (rc < 0) { 5330 if (rc < 0) {
5319 netif_warn(efx, drv, efx->net_dev, 5331 netif_warn(efx, drv, efx->net_dev,
5320 "Broadcast filter insert failed rc=%d\n", rc); 5332 "Broadcast filter insert failed rc=%d\n", rc);
@@ -5370,7 +5382,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx,
5370 if (vlan->vid != EFX_FILTER_VID_UNSPEC) 5382 if (vlan->vid != EFX_FILTER_VID_UNSPEC)
5371 efx_filter_set_eth_local(&spec, vlan->vid, NULL); 5383 efx_filter_set_eth_local(&spec, vlan->vid, NULL);
5372 5384
5373 rc = efx_ef10_filter_insert(efx, &spec, true); 5385 rc = efx_ef10_filter_insert_locked(efx, &spec, true);
5374 if (rc < 0) { 5386 if (rc < 0) {
5375 const char *um = multicast ? "Multicast" : "Unicast"; 5387 const char *um = multicast ? "Multicast" : "Unicast";
5376 const char *encap_name = ""; 5388 const char *encap_name = "";
@@ -5430,7 +5442,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx,
5430 filter_flags, 0); 5442 filter_flags, 0);
5431 eth_broadcast_addr(baddr); 5443 eth_broadcast_addr(baddr);
5432 efx_filter_set_eth_local(&spec, vlan->vid, baddr); 5444 efx_filter_set_eth_local(&spec, vlan->vid, baddr);
5433 rc = efx_ef10_filter_insert(efx, &spec, true); 5445 rc = efx_ef10_filter_insert_locked(efx, &spec, true);
5434 if (rc < 0) { 5446 if (rc < 0) {
5435 netif_warn(efx, drv, efx->net_dev, 5447 netif_warn(efx, drv, efx->net_dev,
5436 "Broadcast filter insert failed rc=%d\n", 5448 "Broadcast filter insert failed rc=%d\n",
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 570ec72266f3..ce3a177081a8 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1871,12 +1871,6 @@ static void efx_remove_filters(struct efx_nic *efx)
1871 up_write(&efx->filter_sem); 1871 up_write(&efx->filter_sem);
1872} 1872}
1873 1873
1874static void efx_restore_filters(struct efx_nic *efx)
1875{
1876 down_read(&efx->filter_sem);
1877 efx->type->filter_table_restore(efx);
1878 up_read(&efx->filter_sem);
1879}
1880 1874
1881/************************************************************************** 1875/**************************************************************************
1882 * 1876 *
@@ -2688,6 +2682,7 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
2688 efx_disable_interrupts(efx); 2682 efx_disable_interrupts(efx);
2689 2683
2690 mutex_lock(&efx->mac_lock); 2684 mutex_lock(&efx->mac_lock);
2685 down_write(&efx->filter_sem);
2691 mutex_lock(&efx->rss_lock); 2686 mutex_lock(&efx->rss_lock);
2692 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE && 2687 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
2693 method != RESET_TYPE_DATAPATH) 2688 method != RESET_TYPE_DATAPATH)
@@ -2745,9 +2740,8 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
2745 if (efx->type->rx_restore_rss_contexts) 2740 if (efx->type->rx_restore_rss_contexts)
2746 efx->type->rx_restore_rss_contexts(efx); 2741 efx->type->rx_restore_rss_contexts(efx);
2747 mutex_unlock(&efx->rss_lock); 2742 mutex_unlock(&efx->rss_lock);
2748 down_read(&efx->filter_sem); 2743 efx->type->filter_table_restore(efx);
2749 efx_restore_filters(efx); 2744 up_write(&efx->filter_sem);
2750 up_read(&efx->filter_sem);
2751 if (efx->type->sriov_reset) 2745 if (efx->type->sriov_reset)
2752 efx->type->sriov_reset(efx); 2746 efx->type->sriov_reset(efx);
2753 2747
@@ -2764,6 +2758,7 @@ fail:
2764 efx->port_initialized = false; 2758 efx->port_initialized = false;
2765 2759
2766 mutex_unlock(&efx->rss_lock); 2760 mutex_unlock(&efx->rss_lock);
2761 up_write(&efx->filter_sem);
2767 mutex_unlock(&efx->mac_lock); 2762 mutex_unlock(&efx->mac_lock);
2768 2763
2769 return rc; 2764 return rc;
@@ -3473,7 +3468,9 @@ static int efx_pci_probe_main(struct efx_nic *efx)
3473 3468
3474 efx_init_napi(efx); 3469 efx_init_napi(efx);
3475 3470
3471 down_write(&efx->filter_sem);
3476 rc = efx->type->init(efx); 3472 rc = efx->type->init(efx);
3473 up_write(&efx->filter_sem);
3477 if (rc) { 3474 if (rc) {
3478 netif_err(efx, probe, efx->net_dev, 3475 netif_err(efx, probe, efx->net_dev,
3479 "failed to initialise NIC\n"); 3476 "failed to initialise NIC\n");
@@ -3765,7 +3762,9 @@ static int efx_pm_resume(struct device *dev)
3765 rc = efx->type->reset(efx, RESET_TYPE_ALL); 3762 rc = efx->type->reset(efx, RESET_TYPE_ALL);
3766 if (rc) 3763 if (rc)
3767 return rc; 3764 return rc;
3765 down_write(&efx->filter_sem);
3768 rc = efx->type->init(efx); 3766 rc = efx->type->init(efx);
3767 up_write(&efx->filter_sem);
3769 if (rc) 3768 if (rc)
3770 return rc; 3769 return rc;
3771 rc = efx_pm_thaw(dev); 3770 rc = efx_pm_thaw(dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 2e6e2a96b4f2..f9a61f90cfbc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -37,7 +37,7 @@
37 * is done in the "stmmac files" 37 * is done in the "stmmac files"
38 */ 38 */
39 39
40/* struct emac_variant - Descrive dwmac-sun8i hardware variant 40/* struct emac_variant - Describe dwmac-sun8i hardware variant
41 * @default_syscon_value: The default value of the EMAC register in syscon 41 * @default_syscon_value: The default value of the EMAC register in syscon
42 * This value is used for disabling properly EMAC 42 * This value is used for disabling properly EMAC
43 * and used as a good starting value in case of the 43 * and used as a good starting value in case of the
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 6d141f3931eb..72da77b94ecd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -94,7 +94,6 @@ static int dwmac1000_validate_ucast_entries(int ucast_entries)
94/** 94/**
95 * stmmac_axi_setup - parse DT parameters for programming the AXI register 95 * stmmac_axi_setup - parse DT parameters for programming the AXI register
96 * @pdev: platform device 96 * @pdev: platform device
97 * @priv: driver private struct.
98 * Description: 97 * Description:
99 * if required, from device-tree the AXI internal register can be tuned 98 * if required, from device-tree the AXI internal register can be tuned
100 * by using platform parameters. 99 * by using platform parameters.
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 8e9d0ee1572b..31c3d77b4733 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -1274,6 +1274,7 @@ int netvsc_poll(struct napi_struct *napi, int budget)
1274 struct hv_device *device = netvsc_channel_to_device(channel); 1274 struct hv_device *device = netvsc_channel_to_device(channel);
1275 struct net_device *ndev = hv_get_drvdata(device); 1275 struct net_device *ndev = hv_get_drvdata(device);
1276 int work_done = 0; 1276 int work_done = 0;
1277 int ret;
1277 1278
1278 /* If starting a new interval */ 1279 /* If starting a new interval */
1279 if (!nvchan->desc) 1280 if (!nvchan->desc)
@@ -1285,16 +1286,18 @@ int netvsc_poll(struct napi_struct *napi, int budget)
1285 nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc); 1286 nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
1286 } 1287 }
1287 1288
1288 /* If send of pending receive completions suceeded 1289 /* Send any pending receive completions */
1289 * and did not exhaust NAPI budget this time 1290 ret = send_recv_completions(ndev, net_device, nvchan);
1290 * and not doing busy poll 1291
1292 /* If it did not exhaust NAPI budget this time
1293 * and not doing busy poll
1291 * then re-enable host interrupts 1294 * then re-enable host interrupts
1292 * and reschedule if ring is not empty. 1295 * and reschedule if ring is not empty
1296 * or sending receive completion failed.
1293 */ 1297 */
1294 if (send_recv_completions(ndev, net_device, nvchan) == 0 && 1298 if (work_done < budget &&
1295 work_done < budget &&
1296 napi_complete_done(napi, work_done) && 1299 napi_complete_done(napi, work_done) &&
1297 hv_end_read(&channel->inbound) && 1300 (ret || hv_end_read(&channel->inbound)) &&
1298 napi_schedule_prep(napi)) { 1301 napi_schedule_prep(napi)) {
1299 hv_begin_read(&channel->inbound); 1302 hv_begin_read(&channel->inbound);
1300 __napi_schedule(napi); 1303 __napi_schedule(napi);
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 9b4e3c3787e5..408ece27131c 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -1338,6 +1338,7 @@ out:
1338 /* setting up multiple channels failed */ 1338 /* setting up multiple channels failed */
1339 net_device->max_chn = 1; 1339 net_device->max_chn = 1;
1340 net_device->num_chn = 1; 1340 net_device->num_chn = 1;
1341 return 0;
1341 1342
1342err_dev_remv: 1343err_dev_remv:
1343 rndis_filter_device_remove(dev, net_device); 1344 rndis_filter_device_remove(dev, net_device);
diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
index 64f1b1e77bc0..23a52b9293f3 100644
--- a/drivers/net/ieee802154/adf7242.c
+++ b/drivers/net/ieee802154/adf7242.c
@@ -275,6 +275,8 @@ struct adf7242_local {
275 struct spi_message stat_msg; 275 struct spi_message stat_msg;
276 struct spi_transfer stat_xfer; 276 struct spi_transfer stat_xfer;
277 struct dentry *debugfs_root; 277 struct dentry *debugfs_root;
278 struct delayed_work work;
279 struct workqueue_struct *wqueue;
278 unsigned long flags; 280 unsigned long flags;
279 int tx_stat; 281 int tx_stat;
280 bool promiscuous; 282 bool promiscuous;
@@ -575,10 +577,26 @@ static int adf7242_cmd_rx(struct adf7242_local *lp)
575 /* Wait until the ACK is sent */ 577 /* Wait until the ACK is sent */
576 adf7242_wait_status(lp, RC_STATUS_PHY_RDY, RC_STATUS_MASK, __LINE__); 578 adf7242_wait_status(lp, RC_STATUS_PHY_RDY, RC_STATUS_MASK, __LINE__);
577 adf7242_clear_irqstat(lp); 579 adf7242_clear_irqstat(lp);
580 mod_delayed_work(lp->wqueue, &lp->work, msecs_to_jiffies(400));
578 581
579 return adf7242_cmd(lp, CMD_RC_RX); 582 return adf7242_cmd(lp, CMD_RC_RX);
580} 583}
581 584
585static void adf7242_rx_cal_work(struct work_struct *work)
586{
587 struct adf7242_local *lp =
588 container_of(work, struct adf7242_local, work.work);
589
590 /* Reissuing RC_RX every 400ms - to adjust for offset
591 * drift in receiver (datasheet page 61, OCL section)
592 */
593
594 if (!test_bit(FLAG_XMIT, &lp->flags)) {
595 adf7242_cmd(lp, CMD_RC_PHY_RDY);
596 adf7242_cmd_rx(lp);
597 }
598}
599
582static int adf7242_set_txpower(struct ieee802154_hw *hw, int mbm) 600static int adf7242_set_txpower(struct ieee802154_hw *hw, int mbm)
583{ 601{
584 struct adf7242_local *lp = hw->priv; 602 struct adf7242_local *lp = hw->priv;
@@ -686,7 +704,7 @@ static int adf7242_start(struct ieee802154_hw *hw)
686 enable_irq(lp->spi->irq); 704 enable_irq(lp->spi->irq);
687 set_bit(FLAG_START, &lp->flags); 705 set_bit(FLAG_START, &lp->flags);
688 706
689 return adf7242_cmd(lp, CMD_RC_RX); 707 return adf7242_cmd_rx(lp);
690} 708}
691 709
692static void adf7242_stop(struct ieee802154_hw *hw) 710static void adf7242_stop(struct ieee802154_hw *hw)
@@ -694,6 +712,7 @@ static void adf7242_stop(struct ieee802154_hw *hw)
694 struct adf7242_local *lp = hw->priv; 712 struct adf7242_local *lp = hw->priv;
695 713
696 disable_irq(lp->spi->irq); 714 disable_irq(lp->spi->irq);
715 cancel_delayed_work_sync(&lp->work);
697 adf7242_cmd(lp, CMD_RC_IDLE); 716 adf7242_cmd(lp, CMD_RC_IDLE);
698 clear_bit(FLAG_START, &lp->flags); 717 clear_bit(FLAG_START, &lp->flags);
699 adf7242_clear_irqstat(lp); 718 adf7242_clear_irqstat(lp);
@@ -719,7 +738,10 @@ static int adf7242_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
719 adf7242_write_reg(lp, REG_CH_FREQ1, freq >> 8); 738 adf7242_write_reg(lp, REG_CH_FREQ1, freq >> 8);
720 adf7242_write_reg(lp, REG_CH_FREQ2, freq >> 16); 739 adf7242_write_reg(lp, REG_CH_FREQ2, freq >> 16);
721 740
722 return adf7242_cmd(lp, CMD_RC_RX); 741 if (test_bit(FLAG_START, &lp->flags))
742 return adf7242_cmd_rx(lp);
743 else
744 return adf7242_cmd(lp, CMD_RC_PHY_RDY);
723} 745}
724 746
725static int adf7242_set_hw_addr_filt(struct ieee802154_hw *hw, 747static int adf7242_set_hw_addr_filt(struct ieee802154_hw *hw,
@@ -814,6 +836,7 @@ static int adf7242_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
814 /* ensure existing instances of the IRQ handler have completed */ 836 /* ensure existing instances of the IRQ handler have completed */
815 disable_irq(lp->spi->irq); 837 disable_irq(lp->spi->irq);
816 set_bit(FLAG_XMIT, &lp->flags); 838 set_bit(FLAG_XMIT, &lp->flags);
839 cancel_delayed_work_sync(&lp->work);
817 reinit_completion(&lp->tx_complete); 840 reinit_completion(&lp->tx_complete);
818 adf7242_cmd(lp, CMD_RC_PHY_RDY); 841 adf7242_cmd(lp, CMD_RC_PHY_RDY);
819 adf7242_clear_irqstat(lp); 842 adf7242_clear_irqstat(lp);
@@ -952,6 +975,7 @@ static irqreturn_t adf7242_isr(int irq, void *data)
952 unsigned int xmit; 975 unsigned int xmit;
953 u8 irq1; 976 u8 irq1;
954 977
978 mod_delayed_work(lp->wqueue, &lp->work, msecs_to_jiffies(400));
955 adf7242_read_reg(lp, REG_IRQ1_SRC1, &irq1); 979 adf7242_read_reg(lp, REG_IRQ1_SRC1, &irq1);
956 980
957 if (!(irq1 & (IRQ_RX_PKT_RCVD | IRQ_CSMA_CA))) 981 if (!(irq1 & (IRQ_RX_PKT_RCVD | IRQ_CSMA_CA)))
@@ -1241,6 +1265,9 @@ static int adf7242_probe(struct spi_device *spi)
1241 spi_message_add_tail(&lp->stat_xfer, &lp->stat_msg); 1265 spi_message_add_tail(&lp->stat_xfer, &lp->stat_msg);
1242 1266
1243 spi_set_drvdata(spi, lp); 1267 spi_set_drvdata(spi, lp);
1268 INIT_DELAYED_WORK(&lp->work, adf7242_rx_cal_work);
1269 lp->wqueue = alloc_ordered_workqueue(dev_name(&spi->dev),
1270 WQ_MEM_RECLAIM);
1244 1271
1245 ret = adf7242_hw_init(lp); 1272 ret = adf7242_hw_init(lp);
1246 if (ret) 1273 if (ret)
@@ -1284,6 +1311,9 @@ static int adf7242_remove(struct spi_device *spi)
1284 if (!IS_ERR_OR_NULL(lp->debugfs_root)) 1311 if (!IS_ERR_OR_NULL(lp->debugfs_root))
1285 debugfs_remove_recursive(lp->debugfs_root); 1312 debugfs_remove_recursive(lp->debugfs_root);
1286 1313
1314 cancel_delayed_work_sync(&lp->work);
1315 destroy_workqueue(lp->wqueue);
1316
1287 ieee802154_unregister_hw(lp->hw); 1317 ieee802154_unregister_hw(lp->hw);
1288 mutex_destroy(&lp->bmux); 1318 mutex_destroy(&lp->bmux);
1289 ieee802154_free_hw(lp->hw); 1319 ieee802154_free_hw(lp->hw);
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 77abedf0b524..3d9e91579866 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -940,7 +940,7 @@ at86rf230_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
940static int 940static int
941at86rf230_ed(struct ieee802154_hw *hw, u8 *level) 941at86rf230_ed(struct ieee802154_hw *hw, u8 *level)
942{ 942{
943 BUG_ON(!level); 943 WARN_ON(!level);
944 *level = 0xbe; 944 *level = 0xbe;
945 return 0; 945 return 0;
946} 946}
@@ -1121,8 +1121,7 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
1121 if (changed & IEEE802154_AFILT_SADDR_CHANGED) { 1121 if (changed & IEEE802154_AFILT_SADDR_CHANGED) {
1122 u16 addr = le16_to_cpu(filt->short_addr); 1122 u16 addr = le16_to_cpu(filt->short_addr);
1123 1123
1124 dev_vdbg(&lp->spi->dev, 1124 dev_vdbg(&lp->spi->dev, "%s called for saddr\n", __func__);
1125 "at86rf230_set_hw_addr_filt called for saddr\n");
1126 __at86rf230_write(lp, RG_SHORT_ADDR_0, addr); 1125 __at86rf230_write(lp, RG_SHORT_ADDR_0, addr);
1127 __at86rf230_write(lp, RG_SHORT_ADDR_1, addr >> 8); 1126 __at86rf230_write(lp, RG_SHORT_ADDR_1, addr >> 8);
1128 } 1127 }
@@ -1130,8 +1129,7 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
1130 if (changed & IEEE802154_AFILT_PANID_CHANGED) { 1129 if (changed & IEEE802154_AFILT_PANID_CHANGED) {
1131 u16 pan = le16_to_cpu(filt->pan_id); 1130 u16 pan = le16_to_cpu(filt->pan_id);
1132 1131
1133 dev_vdbg(&lp->spi->dev, 1132 dev_vdbg(&lp->spi->dev, "%s called for pan id\n", __func__);
1134 "at86rf230_set_hw_addr_filt called for pan id\n");
1135 __at86rf230_write(lp, RG_PAN_ID_0, pan); 1133 __at86rf230_write(lp, RG_PAN_ID_0, pan);
1136 __at86rf230_write(lp, RG_PAN_ID_1, pan >> 8); 1134 __at86rf230_write(lp, RG_PAN_ID_1, pan >> 8);
1137 } 1135 }
@@ -1140,15 +1138,13 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
1140 u8 i, addr[8]; 1138 u8 i, addr[8];
1141 1139
1142 memcpy(addr, &filt->ieee_addr, 8); 1140 memcpy(addr, &filt->ieee_addr, 8);
1143 dev_vdbg(&lp->spi->dev, 1141 dev_vdbg(&lp->spi->dev, "%s called for IEEE addr\n", __func__);
1144 "at86rf230_set_hw_addr_filt called for IEEE addr\n");
1145 for (i = 0; i < 8; i++) 1142 for (i = 0; i < 8; i++)
1146 __at86rf230_write(lp, RG_IEEE_ADDR_0 + i, addr[i]); 1143 __at86rf230_write(lp, RG_IEEE_ADDR_0 + i, addr[i]);
1147 } 1144 }
1148 1145
1149 if (changed & IEEE802154_AFILT_PANC_CHANGED) { 1146 if (changed & IEEE802154_AFILT_PANC_CHANGED) {
1150 dev_vdbg(&lp->spi->dev, 1147 dev_vdbg(&lp->spi->dev, "%s called for panc change\n", __func__);
1151 "at86rf230_set_hw_addr_filt called for panc change\n");
1152 if (filt->pan_coord) 1148 if (filt->pan_coord)
1153 at86rf230_write_subreg(lp, SR_AACK_I_AM_COORD, 1); 1149 at86rf230_write_subreg(lp, SR_AACK_I_AM_COORD, 1);
1154 else 1150 else
@@ -1252,7 +1248,6 @@ at86rf230_set_cca_mode(struct ieee802154_hw *hw,
1252 return at86rf230_write_subreg(lp, SR_CCA_MODE, val); 1248 return at86rf230_write_subreg(lp, SR_CCA_MODE, val);
1253} 1249}
1254 1250
1255
1256static int 1251static int
1257at86rf230_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm) 1252at86rf230_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm)
1258{ 1253{
diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c
index 0d673f7682ee..176395e4b7bb 100644
--- a/drivers/net/ieee802154/fakelb.c
+++ b/drivers/net/ieee802154/fakelb.c
@@ -49,7 +49,7 @@ struct fakelb_phy {
49 49
50static int fakelb_hw_ed(struct ieee802154_hw *hw, u8 *level) 50static int fakelb_hw_ed(struct ieee802154_hw *hw, u8 *level)
51{ 51{
52 BUG_ON(!level); 52 WARN_ON(!level);
53 *level = 0xbe; 53 *level = 0xbe;
54 54
55 return 0; 55 return 0;
diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c
index de0d7f28a181..e428277781ac 100644
--- a/drivers/net/ieee802154/mcr20a.c
+++ b/drivers/net/ieee802154/mcr20a.c
@@ -15,10 +15,11 @@
15 */ 15 */
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/gpio.h> 18#include <linux/gpio/consumer.h>
19#include <linux/spi/spi.h> 19#include <linux/spi/spi.h>
20#include <linux/workqueue.h> 20#include <linux/workqueue.h>
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/irq.h>
22#include <linux/skbuff.h> 23#include <linux/skbuff.h>
23#include <linux/of_gpio.h> 24#include <linux/of_gpio.h>
24#include <linux/regmap.h> 25#include <linux/regmap.h>
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index b8f57e9b9379..1cd439bdf608 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -130,8 +130,9 @@
130#define MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS BIT(12) 130#define MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS BIT(12)
131#define MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE BIT(14) 131#define MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE BIT(14)
132 132
133#define MII_88E1121_PHY_LED_CTRL 16 133#define MII_PHY_LED_CTRL 16
134#define MII_88E1121_PHY_LED_DEF 0x0030 134#define MII_88E1121_PHY_LED_DEF 0x0030
135#define MII_88E1510_PHY_LED_DEF 0x1177
135 136
136#define MII_M1011_PHY_STATUS 0x11 137#define MII_M1011_PHY_STATUS 0x11
137#define MII_M1011_PHY_STATUS_1000 0x8000 138#define MII_M1011_PHY_STATUS_1000 0x8000
@@ -632,8 +633,40 @@ error:
632 return err; 633 return err;
633} 634}
634 635
636static void marvell_config_led(struct phy_device *phydev)
637{
638 u16 def_config;
639 int err;
640
641 switch (MARVELL_PHY_FAMILY_ID(phydev->phy_id)) {
642 /* Default PHY LED config: LED[0] .. Link, LED[1] .. Activity */
643 case MARVELL_PHY_FAMILY_ID(MARVELL_PHY_ID_88E1121R):
644 case MARVELL_PHY_FAMILY_ID(MARVELL_PHY_ID_88E1318S):
645 def_config = MII_88E1121_PHY_LED_DEF;
646 break;
647 /* Default PHY LED config:
648 * LED[0] .. 1000Mbps Link
649 * LED[1] .. 100Mbps Link
650 * LED[2] .. Blink, Activity
651 */
652 case MARVELL_PHY_FAMILY_ID(MARVELL_PHY_ID_88E1510):
653 def_config = MII_88E1510_PHY_LED_DEF;
654 break;
655 default:
656 return;
657 }
658
659 err = phy_write_paged(phydev, MII_MARVELL_LED_PAGE, MII_PHY_LED_CTRL,
660 def_config);
661 if (err < 0)
662 pr_warn("Fail to config marvell phy LED.\n");
663}
664
635static int marvell_config_init(struct phy_device *phydev) 665static int marvell_config_init(struct phy_device *phydev)
636{ 666{
667 /* Set defalut LED */
668 marvell_config_led(phydev);
669
637 /* Set registers from marvell,reg-init DT property */ 670 /* Set registers from marvell,reg-init DT property */
638 return marvell_of_reg_init(phydev); 671 return marvell_of_reg_init(phydev);
639} 672}
@@ -813,21 +846,6 @@ static int m88e1111_config_init(struct phy_device *phydev)
813 return genphy_soft_reset(phydev); 846 return genphy_soft_reset(phydev);
814} 847}
815 848
816static int m88e1121_config_init(struct phy_device *phydev)
817{
818 int err;
819
820 /* Default PHY LED config: LED[0] .. Link, LED[1] .. Activity */
821 err = phy_write_paged(phydev, MII_MARVELL_LED_PAGE,
822 MII_88E1121_PHY_LED_CTRL,
823 MII_88E1121_PHY_LED_DEF);
824 if (err < 0)
825 return err;
826
827 /* Set marvell,reg-init configuration from device tree */
828 return marvell_config_init(phydev);
829}
830
831static int m88e1318_config_init(struct phy_device *phydev) 849static int m88e1318_config_init(struct phy_device *phydev)
832{ 850{
833 if (phy_interrupt_is_valid(phydev)) { 851 if (phy_interrupt_is_valid(phydev)) {
@@ -841,7 +859,7 @@ static int m88e1318_config_init(struct phy_device *phydev)
841 return err; 859 return err;
842 } 860 }
843 861
844 return m88e1121_config_init(phydev); 862 return marvell_config_init(phydev);
845} 863}
846 864
847static int m88e1510_config_init(struct phy_device *phydev) 865static int m88e1510_config_init(struct phy_device *phydev)
@@ -2087,7 +2105,7 @@ static struct phy_driver marvell_drivers[] = {
2087 .features = PHY_GBIT_FEATURES, 2105 .features = PHY_GBIT_FEATURES,
2088 .flags = PHY_HAS_INTERRUPT, 2106 .flags = PHY_HAS_INTERRUPT,
2089 .probe = &m88e1121_probe, 2107 .probe = &m88e1121_probe,
2090 .config_init = &m88e1121_config_init, 2108 .config_init = &marvell_config_init,
2091 .config_aneg = &m88e1121_config_aneg, 2109 .config_aneg = &m88e1121_config_aneg,
2092 .read_status = &marvell_read_status, 2110 .read_status = &marvell_read_status,
2093 .ack_interrupt = &marvell_ack_interrupt, 2111 .ack_interrupt = &marvell_ack_interrupt,
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index bd0f339f69fd..b9f5f40a7ac1 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1724,11 +1724,8 @@ EXPORT_SYMBOL(genphy_loopback);
1724 1724
1725static int __set_phy_supported(struct phy_device *phydev, u32 max_speed) 1725static int __set_phy_supported(struct phy_device *phydev, u32 max_speed)
1726{ 1726{
1727 /* The default values for phydev->supported are provided by the PHY 1727 phydev->supported &= ~(PHY_1000BT_FEATURES | PHY_100BT_FEATURES |
1728 * driver "features" member, we want to reset to sane defaults first 1728 PHY_10BT_FEATURES);
1729 * before supporting higher speeds.
1730 */
1731 phydev->supported &= PHY_DEFAULT_FEATURES;
1732 1729
1733 switch (max_speed) { 1730 switch (max_speed) {
1734 default: 1731 default:
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index d437f4f5ed52..740655261e5b 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -349,7 +349,6 @@ static int sfp_register_bus(struct sfp_bus *bus)
349 } 349 }
350 if (bus->started) 350 if (bus->started)
351 bus->socket_ops->start(bus->sfp); 351 bus->socket_ops->start(bus->sfp);
352 bus->netdev->sfp_bus = bus;
353 bus->registered = true; 352 bus->registered = true;
354 return 0; 353 return 0;
355} 354}
@@ -364,7 +363,6 @@ static void sfp_unregister_bus(struct sfp_bus *bus)
364 if (bus->phydev && ops && ops->disconnect_phy) 363 if (bus->phydev && ops && ops->disconnect_phy)
365 ops->disconnect_phy(bus->upstream); 364 ops->disconnect_phy(bus->upstream);
366 } 365 }
367 bus->netdev->sfp_bus = NULL;
368 bus->registered = false; 366 bus->registered = false;
369} 367}
370 368
@@ -436,6 +434,14 @@ void sfp_upstream_stop(struct sfp_bus *bus)
436} 434}
437EXPORT_SYMBOL_GPL(sfp_upstream_stop); 435EXPORT_SYMBOL_GPL(sfp_upstream_stop);
438 436
437static void sfp_upstream_clear(struct sfp_bus *bus)
438{
439 bus->upstream_ops = NULL;
440 bus->upstream = NULL;
441 bus->netdev->sfp_bus = NULL;
442 bus->netdev = NULL;
443}
444
439/** 445/**
440 * sfp_register_upstream() - Register the neighbouring device 446 * sfp_register_upstream() - Register the neighbouring device
441 * @fwnode: firmware node for the SFP bus 447 * @fwnode: firmware node for the SFP bus
@@ -461,9 +467,13 @@ struct sfp_bus *sfp_register_upstream(struct fwnode_handle *fwnode,
461 bus->upstream_ops = ops; 467 bus->upstream_ops = ops;
462 bus->upstream = upstream; 468 bus->upstream = upstream;
463 bus->netdev = ndev; 469 bus->netdev = ndev;
470 ndev->sfp_bus = bus;
464 471
465 if (bus->sfp) 472 if (bus->sfp) {
466 ret = sfp_register_bus(bus); 473 ret = sfp_register_bus(bus);
474 if (ret)
475 sfp_upstream_clear(bus);
476 }
467 rtnl_unlock(); 477 rtnl_unlock();
468 } 478 }
469 479
@@ -488,8 +498,7 @@ void sfp_unregister_upstream(struct sfp_bus *bus)
488 rtnl_lock(); 498 rtnl_lock();
489 if (bus->sfp) 499 if (bus->sfp)
490 sfp_unregister_bus(bus); 500 sfp_unregister_bus(bus);
491 bus->upstream = NULL; 501 sfp_upstream_clear(bus);
492 bus->netdev = NULL;
493 rtnl_unlock(); 502 rtnl_unlock();
494 503
495 sfp_bus_put(bus); 504 sfp_bus_put(bus);
@@ -561,6 +570,13 @@ void sfp_module_remove(struct sfp_bus *bus)
561} 570}
562EXPORT_SYMBOL_GPL(sfp_module_remove); 571EXPORT_SYMBOL_GPL(sfp_module_remove);
563 572
573static void sfp_socket_clear(struct sfp_bus *bus)
574{
575 bus->sfp_dev = NULL;
576 bus->sfp = NULL;
577 bus->socket_ops = NULL;
578}
579
564struct sfp_bus *sfp_register_socket(struct device *dev, struct sfp *sfp, 580struct sfp_bus *sfp_register_socket(struct device *dev, struct sfp *sfp,
565 const struct sfp_socket_ops *ops) 581 const struct sfp_socket_ops *ops)
566{ 582{
@@ -573,8 +589,11 @@ struct sfp_bus *sfp_register_socket(struct device *dev, struct sfp *sfp,
573 bus->sfp = sfp; 589 bus->sfp = sfp;
574 bus->socket_ops = ops; 590 bus->socket_ops = ops;
575 591
576 if (bus->netdev) 592 if (bus->netdev) {
577 ret = sfp_register_bus(bus); 593 ret = sfp_register_bus(bus);
594 if (ret)
595 sfp_socket_clear(bus);
596 }
578 rtnl_unlock(); 597 rtnl_unlock();
579 } 598 }
580 599
@@ -592,9 +611,7 @@ void sfp_unregister_socket(struct sfp_bus *bus)
592 rtnl_lock(); 611 rtnl_lock();
593 if (bus->netdev) 612 if (bus->netdev)
594 sfp_unregister_bus(bus); 613 sfp_unregister_bus(bus);
595 bus->sfp_dev = NULL; 614 sfp_socket_clear(bus);
596 bus->sfp = NULL;
597 bus->socket_ops = NULL;
598 rtnl_unlock(); 615 rtnl_unlock();
599 616
600 sfp_bus_put(bus); 617 sfp_bus_put(bus);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index a192a017cc68..f5727baac84a 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1688,7 +1688,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1688 case XDP_TX: 1688 case XDP_TX:
1689 get_page(alloc_frag->page); 1689 get_page(alloc_frag->page);
1690 alloc_frag->offset += buflen; 1690 alloc_frag->offset += buflen;
1691 if (tun_xdp_tx(tun->dev, &xdp)) 1691 if (tun_xdp_tx(tun->dev, &xdp) < 0)
1692 goto err_redirect; 1692 goto err_redirect;
1693 rcu_read_unlock(); 1693 rcu_read_unlock();
1694 local_bh_enable(); 1694 local_bh_enable();
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 3d4f7959dabb..b1b3d8f7e67d 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -642,10 +642,12 @@ static void ax88772_restore_phy(struct usbnet *dev)
642 priv->presvd_phy_advertise); 642 priv->presvd_phy_advertise);
643 643
644 /* Restore BMCR */ 644 /* Restore BMCR */
645 if (priv->presvd_phy_bmcr & BMCR_ANENABLE)
646 priv->presvd_phy_bmcr |= BMCR_ANRESTART;
647
645 asix_mdio_write_nopm(dev->net, dev->mii.phy_id, MII_BMCR, 648 asix_mdio_write_nopm(dev->net, dev->mii.phy_id, MII_BMCR,
646 priv->presvd_phy_bmcr); 649 priv->presvd_phy_bmcr);
647 650
648 mii_nway_restart(&dev->mii);
649 priv->presvd_phy_advertise = 0; 651 priv->presvd_phy_advertise = 0;
650 priv->presvd_phy_bmcr = 0; 652 priv->presvd_phy_bmcr = 0;
651 } 653 }
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 2e4130746c40..ed10d49eb5e0 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -3344,6 +3344,7 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
3344 pkt_cnt = 0; 3344 pkt_cnt = 0;
3345 count = 0; 3345 count = 0;
3346 length = 0; 3346 length = 0;
3347 spin_lock_irqsave(&tqp->lock, flags);
3347 for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) { 3348 for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
3348 if (skb_is_gso(skb)) { 3349 if (skb_is_gso(skb)) {
3349 if (pkt_cnt) { 3350 if (pkt_cnt) {
@@ -3352,7 +3353,8 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
3352 } 3353 }
3353 count = 1; 3354 count = 1;
3354 length = skb->len - TX_OVERHEAD; 3355 length = skb->len - TX_OVERHEAD;
3355 skb2 = skb_dequeue(tqp); 3356 __skb_unlink(skb, tqp);
3357 spin_unlock_irqrestore(&tqp->lock, flags);
3356 goto gso_skb; 3358 goto gso_skb;
3357 } 3359 }
3358 3360
@@ -3361,6 +3363,7 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
3361 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32)); 3363 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3362 pkt_cnt++; 3364 pkt_cnt++;
3363 } 3365 }
3366 spin_unlock_irqrestore(&tqp->lock, flags);
3364 3367
3365 /* copy to a single skb */ 3368 /* copy to a single skb */
3366 skb = alloc_skb(skb_totallen, GFP_ATOMIC); 3369 skb = alloc_skb(skb_totallen, GFP_ATOMIC);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 8fac8e132c5b..38502809420b 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1253,6 +1253,7 @@ static const struct usb_device_id products[] = {
1253 {QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)}, /* SIMCom 7100E, 7230E, 7600E ++ */ 1253 {QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)}, /* SIMCom 7100E, 7230E, 7600E ++ */
1254 {QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */ 1254 {QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
1255 {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */ 1255 {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
1256 {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */
1256 {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */ 1257 {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */
1257 {QMI_QUIRK_SET_DTR(0x2c7c, 0x0306, 4)}, /* Quectel EP06 Mini PCIe */ 1258 {QMI_QUIRK_SET_DTR(0x2c7c, 0x0306, 4)}, /* Quectel EP06 Mini PCIe */
1258 1259
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 5f565bd574da..48ba80a8ca5c 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -681,7 +681,7 @@ static void rtl8150_set_multicast(struct net_device *netdev)
681 (netdev->flags & IFF_ALLMULTI)) { 681 (netdev->flags & IFF_ALLMULTI)) {
682 rx_creg &= 0xfffe; 682 rx_creg &= 0xfffe;
683 rx_creg |= 0x0002; 683 rx_creg |= 0x0002;
684 dev_info(&netdev->dev, "%s: allmulti set\n", netdev->name); 684 dev_dbg(&netdev->dev, "%s: allmulti set\n", netdev->name);
685 } else { 685 } else {
686 /* ~RX_MULTICAST, ~RX_PROMISCUOUS */ 686 /* ~RX_MULTICAST, ~RX_PROMISCUOUS */
687 rx_creg &= 0x00fc; 687 rx_creg &= 0x00fc;
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 7a6a1fe79309..05553d252446 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -82,6 +82,9 @@ static bool turbo_mode = true;
82module_param(turbo_mode, bool, 0644); 82module_param(turbo_mode, bool, 0644);
83MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction"); 83MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction");
84 84
85static int smsc75xx_link_ok_nopm(struct usbnet *dev);
86static int smsc75xx_phy_gig_workaround(struct usbnet *dev);
87
85static int __must_check __smsc75xx_read_reg(struct usbnet *dev, u32 index, 88static int __must_check __smsc75xx_read_reg(struct usbnet *dev, u32 index,
86 u32 *data, int in_pm) 89 u32 *data, int in_pm)
87{ 90{
@@ -852,6 +855,9 @@ static int smsc75xx_phy_initialize(struct usbnet *dev)
852 return -EIO; 855 return -EIO;
853 } 856 }
854 857
858 /* phy workaround for gig link */
859 smsc75xx_phy_gig_workaround(dev);
860
855 smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE, 861 smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
856 ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP | 862 ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP |
857 ADVERTISE_PAUSE_ASYM); 863 ADVERTISE_PAUSE_ASYM);
@@ -987,6 +993,62 @@ static int smsc75xx_wait_ready(struct usbnet *dev, int in_pm)
987 return -EIO; 993 return -EIO;
988} 994}
989 995
996static int smsc75xx_phy_gig_workaround(struct usbnet *dev)
997{
998 struct mii_if_info *mii = &dev->mii;
999 int ret = 0, timeout = 0;
1000 u32 buf, link_up = 0;
1001
1002 /* Set the phy in Gig loopback */
1003 smsc75xx_mdio_write(dev->net, mii->phy_id, MII_BMCR, 0x4040);
1004
1005 /* Wait for the link up */
1006 do {
1007 link_up = smsc75xx_link_ok_nopm(dev);
1008 usleep_range(10000, 20000);
1009 timeout++;
1010 } while ((!link_up) && (timeout < 1000));
1011
1012 if (timeout >= 1000) {
1013 netdev_warn(dev->net, "Timeout waiting for PHY link up\n");
1014 return -EIO;
1015 }
1016
1017 /* phy reset */
1018 ret = smsc75xx_read_reg(dev, PMT_CTL, &buf);
1019 if (ret < 0) {
1020 netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n", ret);
1021 return ret;
1022 }
1023
1024 buf |= PMT_CTL_PHY_RST;
1025
1026 ret = smsc75xx_write_reg(dev, PMT_CTL, buf);
1027 if (ret < 0) {
1028 netdev_warn(dev->net, "Failed to write PMT_CTL: %d\n", ret);
1029 return ret;
1030 }
1031
1032 timeout = 0;
1033 do {
1034 usleep_range(10000, 20000);
1035 ret = smsc75xx_read_reg(dev, PMT_CTL, &buf);
1036 if (ret < 0) {
1037 netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n",
1038 ret);
1039 return ret;
1040 }
1041 timeout++;
1042 } while ((buf & PMT_CTL_PHY_RST) && (timeout < 100));
1043
1044 if (timeout >= 100) {
1045 netdev_warn(dev->net, "timeout waiting for PHY Reset\n");
1046 return -EIO;
1047 }
1048
1049 return 0;
1050}
1051
990static int smsc75xx_reset(struct usbnet *dev) 1052static int smsc75xx_reset(struct usbnet *dev)
991{ 1053{
992 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); 1054 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index e9c2fb318c03..836e0a47b94a 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -6058,8 +6058,19 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
6058 ath10k_mac_max_vht_nss(vht_mcs_mask))); 6058 ath10k_mac_max_vht_nss(vht_mcs_mask)));
6059 6059
6060 if (changed & IEEE80211_RC_BW_CHANGED) { 6060 if (changed & IEEE80211_RC_BW_CHANGED) {
6061 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n", 6061 enum wmi_phy_mode mode;
6062 sta->addr, bw); 6062
6063 mode = chan_to_phymode(&def);
6064 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d phymode %d\n",
6065 sta->addr, bw, mode);
6066
6067 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
6068 WMI_PEER_PHYMODE, mode);
6069 if (err) {
6070 ath10k_warn(ar, "failed to update STA %pM peer phymode %d: %d\n",
6071 sta->addr, mode, err);
6072 goto exit;
6073 }
6063 6074
6064 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, 6075 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
6065 WMI_PEER_CHAN_WIDTH, bw); 6076 WMI_PEER_CHAN_WIDTH, bw);
@@ -6100,6 +6111,7 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
6100 sta->addr); 6111 sta->addr);
6101 } 6112 }
6102 6113
6114exit:
6103 mutex_unlock(&ar->conf_mutex); 6115 mutex_unlock(&ar->conf_mutex);
6104} 6116}
6105 6117
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index b48db54e9865..d68afb65402a 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -6144,6 +6144,7 @@ enum wmi_peer_param {
6144 WMI_PEER_NSS = 0x5, 6144 WMI_PEER_NSS = 0x5,
6145 WMI_PEER_USE_4ADDR = 0x6, 6145 WMI_PEER_USE_4ADDR = 0x6,
6146 WMI_PEER_DEBUG = 0xa, 6146 WMI_PEER_DEBUG = 0xa,
6147 WMI_PEER_PHYMODE = 0xd,
6147 WMI_PEER_DUMMY_VAR = 0xff, /* dummy parameter for STA PS workaround */ 6148 WMI_PEER_DUMMY_VAR = 0xff, /* dummy parameter for STA PS workaround */
6148}; 6149};
6149 6150
diff --git a/drivers/net/wireless/ath/wcn36xx/testmode.c b/drivers/net/wireless/ath/wcn36xx/testmode.c
index 1279064a3b71..51a038022c8b 100644
--- a/drivers/net/wireless/ath/wcn36xx/testmode.c
+++ b/drivers/net/wireless/ath/wcn36xx/testmode.c
@@ -1,4 +1,4 @@
1/* 1/*
2 * Copyright (c) 2018, The Linux Foundation. All rights reserved. 2 * Copyright (c) 2018, The Linux Foundation. All rights reserved.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index c99a191e8d69..a907d7b065fa 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -4296,6 +4296,13 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
4296 brcmf_dbg(TRACE, "Enter\n"); 4296 brcmf_dbg(TRACE, "Enter\n");
4297 4297
4298 if (bus) { 4298 if (bus) {
4299 /* Stop watchdog task */
4300 if (bus->watchdog_tsk) {
4301 send_sig(SIGTERM, bus->watchdog_tsk, 1);
4302 kthread_stop(bus->watchdog_tsk);
4303 bus->watchdog_tsk = NULL;
4304 }
4305
4299 /* De-register interrupt handler */ 4306 /* De-register interrupt handler */
4300 brcmf_sdiod_intr_unregister(bus->sdiodev); 4307 brcmf_sdiod_intr_unregister(bus->sdiodev);
4301 4308
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
index 6e3cf9817730..88f4c89f89ba 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.c
+++ b/drivers/net/wireless/marvell/mwifiex/usb.c
@@ -644,11 +644,6 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf)
644 MWIFIEX_FUNC_SHUTDOWN); 644 MWIFIEX_FUNC_SHUTDOWN);
645 } 645 }
646 646
647 if (adapter->workqueue)
648 flush_workqueue(adapter->workqueue);
649
650 mwifiex_usb_free(card);
651
652 mwifiex_dbg(adapter, FATAL, 647 mwifiex_dbg(adapter, FATAL,
653 "%s: removing card\n", __func__); 648 "%s: removing card\n", __func__);
654 mwifiex_remove_card(adapter); 649 mwifiex_remove_card(adapter);
@@ -1356,6 +1351,8 @@ static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
1356{ 1351{
1357 struct usb_card_rec *card = (struct usb_card_rec *)adapter->card; 1352 struct usb_card_rec *card = (struct usb_card_rec *)adapter->card;
1358 1353
1354 mwifiex_usb_free(card);
1355
1359 mwifiex_usb_cleanup_tx_aggr(adapter); 1356 mwifiex_usb_cleanup_tx_aggr(adapter);
1360 1357
1361 card->adapter = NULL; 1358 card->adapter = NULL;
diff --git a/drivers/net/wireless/mediatek/mt7601u/phy.c b/drivers/net/wireless/mediatek/mt7601u/phy.c
index 9d2f9a776ef1..b804abd464ae 100644
--- a/drivers/net/wireless/mediatek/mt7601u/phy.c
+++ b/drivers/net/wireless/mediatek/mt7601u/phy.c
@@ -986,13 +986,15 @@ static void mt7601u_agc_tune(struct mt7601u_dev *dev)
986 */ 986 */
987 spin_lock_bh(&dev->con_mon_lock); 987 spin_lock_bh(&dev->con_mon_lock);
988 avg_rssi = ewma_rssi_read(&dev->avg_rssi); 988 avg_rssi = ewma_rssi_read(&dev->avg_rssi);
989 WARN_ON_ONCE(avg_rssi == 0); 989 spin_unlock_bh(&dev->con_mon_lock);
990 if (avg_rssi == 0)
991 return;
992
990 avg_rssi = -avg_rssi; 993 avg_rssi = -avg_rssi;
991 if (avg_rssi <= -70) 994 if (avg_rssi <= -70)
992 val -= 0x20; 995 val -= 0x20;
993 else if (avg_rssi <= -60) 996 else if (avg_rssi <= -60)
994 val -= 0x10; 997 val -= 0x10;
995 spin_unlock_bh(&dev->con_mon_lock);
996 998
997 if (val != mt7601u_bbp_rr(dev, 66)) 999 if (val != mt7601u_bbp_rr(dev, 66))
998 mt7601u_bbp_wr(dev, 66, val); 1000 mt7601u_bbp_wr(dev, 66, val);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index 220e2b710208..ae0ca8006849 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -654,8 +654,7 @@ qtnf_disconnect(struct wiphy *wiphy, struct net_device *dev,
654 vif = qtnf_mac_get_base_vif(mac); 654 vif = qtnf_mac_get_base_vif(mac);
655 if (!vif) { 655 if (!vif) {
656 pr_err("MAC%u: primary VIF is not configured\n", mac->macid); 656 pr_err("MAC%u: primary VIF is not configured\n", mac->macid);
657 ret = -EFAULT; 657 return -EFAULT;
658 goto out;
659 } 658 }
660 659
661 if (vif->wdev.iftype != NL80211_IFTYPE_STATION) { 660 if (vif->wdev.iftype != NL80211_IFTYPE_STATION) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index 39c817eddd78..54c9f6ab0c8c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -484,18 +484,21 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
484 484
485} 485}
486 486
487void rtl_deinit_deferred_work(struct ieee80211_hw *hw) 487void rtl_deinit_deferred_work(struct ieee80211_hw *hw, bool ips_wq)
488{ 488{
489 struct rtl_priv *rtlpriv = rtl_priv(hw); 489 struct rtl_priv *rtlpriv = rtl_priv(hw);
490 490
491 del_timer_sync(&rtlpriv->works.watchdog_timer); 491 del_timer_sync(&rtlpriv->works.watchdog_timer);
492 492
493 cancel_delayed_work(&rtlpriv->works.watchdog_wq); 493 cancel_delayed_work_sync(&rtlpriv->works.watchdog_wq);
494 cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq); 494 if (ips_wq)
495 cancel_delayed_work(&rtlpriv->works.ps_work); 495 cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq);
496 cancel_delayed_work(&rtlpriv->works.ps_rfon_wq); 496 else
497 cancel_delayed_work(&rtlpriv->works.fwevt_wq); 497 cancel_delayed_work_sync(&rtlpriv->works.ips_nic_off_wq);
498 cancel_delayed_work(&rtlpriv->works.c2hcmd_wq); 498 cancel_delayed_work_sync(&rtlpriv->works.ps_work);
499 cancel_delayed_work_sync(&rtlpriv->works.ps_rfon_wq);
500 cancel_delayed_work_sync(&rtlpriv->works.fwevt_wq);
501 cancel_delayed_work_sync(&rtlpriv->works.c2hcmd_wq);
499} 502}
500EXPORT_SYMBOL_GPL(rtl_deinit_deferred_work); 503EXPORT_SYMBOL_GPL(rtl_deinit_deferred_work);
501 504
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.h b/drivers/net/wireless/realtek/rtlwifi/base.h
index 912f205779c3..a7ae40eaa3cd 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.h
+++ b/drivers/net/wireless/realtek/rtlwifi/base.h
@@ -121,7 +121,7 @@ void rtl_init_rfkill(struct ieee80211_hw *hw);
121void rtl_deinit_rfkill(struct ieee80211_hw *hw); 121void rtl_deinit_rfkill(struct ieee80211_hw *hw);
122 122
123void rtl_watch_dog_timer_callback(struct timer_list *t); 123void rtl_watch_dog_timer_callback(struct timer_list *t);
124void rtl_deinit_deferred_work(struct ieee80211_hw *hw); 124void rtl_deinit_deferred_work(struct ieee80211_hw *hw, bool ips_wq);
125 125
126bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx); 126bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx);
127int rtlwifi_rate_mapping(struct ieee80211_hw *hw, bool isht, 127int rtlwifi_rate_mapping(struct ieee80211_hw *hw, bool isht,
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index cfea57efa7f4..4bf7967590ca 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -130,7 +130,6 @@ found_alt:
130 firmware->size); 130 firmware->size);
131 rtlpriv->rtlhal.wowlan_fwsize = firmware->size; 131 rtlpriv->rtlhal.wowlan_fwsize = firmware->size;
132 } 132 }
133 rtlpriv->rtlhal.fwsize = firmware->size;
134 release_firmware(firmware); 133 release_firmware(firmware);
135} 134}
136 135
@@ -196,7 +195,7 @@ static void rtl_op_stop(struct ieee80211_hw *hw)
196 /* reset sec info */ 195 /* reset sec info */
197 rtl_cam_reset_sec_info(hw); 196 rtl_cam_reset_sec_info(hw);
198 197
199 rtl_deinit_deferred_work(hw); 198 rtl_deinit_deferred_work(hw, false);
200 } 199 }
201 rtlpriv->intf_ops->adapter_stop(hw); 200 rtlpriv->intf_ops->adapter_stop(hw);
202 201
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index ae13bcfb3bf0..5d1fda16fc8c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -2377,7 +2377,7 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
2377 ieee80211_unregister_hw(hw); 2377 ieee80211_unregister_hw(hw);
2378 rtlmac->mac80211_registered = 0; 2378 rtlmac->mac80211_registered = 0;
2379 } else { 2379 } else {
2380 rtl_deinit_deferred_work(hw); 2380 rtl_deinit_deferred_work(hw, false);
2381 rtlpriv->intf_ops->adapter_stop(hw); 2381 rtlpriv->intf_ops->adapter_stop(hw);
2382 } 2382 }
2383 rtlpriv->cfg->ops->disable_interrupt(hw); 2383 rtlpriv->cfg->ops->disable_interrupt(hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c
index 71af24e2e051..479a4cfc245d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/ps.c
+++ b/drivers/net/wireless/realtek/rtlwifi/ps.c
@@ -71,7 +71,7 @@ bool rtl_ps_disable_nic(struct ieee80211_hw *hw)
71 struct rtl_priv *rtlpriv = rtl_priv(hw); 71 struct rtl_priv *rtlpriv = rtl_priv(hw);
72 72
73 /*<1> Stop all timer */ 73 /*<1> Stop all timer */
74 rtl_deinit_deferred_work(hw); 74 rtl_deinit_deferred_work(hw, true);
75 75
76 /*<2> Disable Interrupt */ 76 /*<2> Disable Interrupt */
77 rtlpriv->cfg->ops->disable_interrupt(hw); 77 rtlpriv->cfg->ops->disable_interrupt(hw);
@@ -292,7 +292,7 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw)
292 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 292 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
293 enum rf_pwrstate rtstate; 293 enum rf_pwrstate rtstate;
294 294
295 cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq); 295 cancel_delayed_work_sync(&rtlpriv->works.ips_nic_off_wq);
296 296
297 mutex_lock(&rtlpriv->locks.ips_mutex); 297 mutex_lock(&rtlpriv->locks.ips_mutex);
298 if (ppsc->inactiveps) { 298 if (ppsc->inactiveps) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index f9faffc498bc..2ac5004d7a40 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -1132,7 +1132,7 @@ void rtl_usb_disconnect(struct usb_interface *intf)
1132 ieee80211_unregister_hw(hw); 1132 ieee80211_unregister_hw(hw);
1133 rtlmac->mac80211_registered = 0; 1133 rtlmac->mac80211_registered = 0;
1134 } else { 1134 } else {
1135 rtl_deinit_deferred_work(hw); 1135 rtl_deinit_deferred_work(hw, false);
1136 rtlpriv->intf_ops->adapter_stop(hw); 1136 rtlpriv->intf_ops->adapter_stop(hw);
1137 } 1137 }
1138 /*deinit rfkill */ 1138 /*deinit rfkill */
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 547dbdac9d54..01b0e2bb3319 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -89,6 +89,7 @@ int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin,
89 case PTP_PF_PHYSYNC: 89 case PTP_PF_PHYSYNC:
90 if (chan != 0) 90 if (chan != 0)
91 return -EINVAL; 91 return -EINVAL;
92 break;
92 default: 93 default:
93 return -EINVAL; 94 return -EINVAL;
94 } 95 }
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index 79795c5fa7c3..d50c2f0a655a 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -2,6 +2,7 @@
2#ifndef _BPF_CGROUP_H 2#ifndef _BPF_CGROUP_H
3#define _BPF_CGROUP_H 3#define _BPF_CGROUP_H
4 4
5#include <linux/errno.h>
5#include <linux/jump_label.h> 6#include <linux/jump_label.h>
6#include <uapi/linux/bpf.h> 7#include <uapi/linux/bpf.h>
7 8
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 300baad62c88..c73dd7396886 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -765,8 +765,8 @@ static inline bool bpf_dump_raw_ok(void)
765struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, 765struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
766 const struct bpf_insn *patch, u32 len); 766 const struct bpf_insn *patch, u32 len);
767 767
768static inline int __xdp_generic_ok_fwd_dev(struct sk_buff *skb, 768static inline int xdp_ok_fwd_dev(const struct net_device *fwd,
769 struct net_device *fwd) 769 unsigned int pktlen)
770{ 770{
771 unsigned int len; 771 unsigned int len;
772 772
@@ -774,7 +774,7 @@ static inline int __xdp_generic_ok_fwd_dev(struct sk_buff *skb,
774 return -ENETDOWN; 774 return -ENETDOWN;
775 775
776 len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN; 776 len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN;
777 if (skb->len > len) 777 if (pktlen > len)
778 return -EMSGSIZE; 778 return -EMSGSIZE;
779 779
780 return 0; 780 return 0;
diff --git a/include/linux/fsl/guts.h b/include/linux/fsl/guts.h
index 3efa3b861d44..941b11811f85 100644
--- a/include/linux/fsl/guts.h
+++ b/include/linux/fsl/guts.h
@@ -16,6 +16,7 @@
16#define __FSL_GUTS_H__ 16#define __FSL_GUTS_H__
17 17
18#include <linux/types.h> 18#include <linux/types.h>
19#include <linux/io.h>
19 20
20/** 21/**
21 * Global Utility Registers. 22 * Global Utility Registers.
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index 7843b98e1c6e..c20c7e197d07 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -105,13 +105,13 @@ static inline bool br_vlan_enabled(const struct net_device *dev)
105 105
106static inline int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid) 106static inline int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid)
107{ 107{
108 return -1; 108 return -EINVAL;
109} 109}
110 110
111static inline int br_vlan_get_info(const struct net_device *dev, u16 vid, 111static inline int br_vlan_get_info(const struct net_device *dev, u16 vid,
112 struct bridge_vlan_info *p_vinfo) 112 struct bridge_vlan_info *p_vinfo)
113{ 113{
114 return -1; 114 return -EINVAL;
115} 115}
116#endif 116#endif
117 117
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index f8231854b5d6..119f53941c12 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -109,6 +109,8 @@ struct ip_mc_list {
109extern int ip_check_mc_rcu(struct in_device *dev, __be32 mc_addr, __be32 src_addr, u8 proto); 109extern int ip_check_mc_rcu(struct in_device *dev, __be32 mc_addr, __be32 src_addr, u8 proto);
110extern int igmp_rcv(struct sk_buff *); 110extern int igmp_rcv(struct sk_buff *);
111extern int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr); 111extern int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr);
112extern int ip_mc_join_group_ssm(struct sock *sk, struct ip_mreqn *imr,
113 unsigned int mode);
112extern int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr); 114extern int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr);
113extern void ip_mc_drop_socket(struct sock *sk); 115extern void ip_mc_drop_socket(struct sock *sk);
114extern int ip_mc_source(int add, int omode, struct sock *sk, 116extern int ip_mc_source(int add, int omode, struct sock *sk,
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h
index 4f5f8c21e283..1eb6f244588d 100644
--- a/include/linux/marvell_phy.h
+++ b/include/linux/marvell_phy.h
@@ -27,6 +27,8 @@
27 */ 27 */
28#define MARVELL_PHY_ID_88E6390 0x01410f90 28#define MARVELL_PHY_ID_88E6390 0x01410f90
29 29
30#define MARVELL_PHY_FAMILY_ID(id) ((id) >> 4)
31
30/* struct phy_device dev_flags definitions */ 32/* struct phy_device dev_flags definitions */
31#define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001 33#define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001
32#define MARVELL_PHY_M1118_DNS323_LEDS 0x00000002 34#define MARVELL_PHY_M1118_DNS323_LEDS 0x00000002
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 164cdedf6012..610a201126ee 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -630,6 +630,7 @@ typedef unsigned char *sk_buff_data_t;
630 * @hash: the packet hash 630 * @hash: the packet hash
631 * @queue_mapping: Queue mapping for multiqueue devices 631 * @queue_mapping: Queue mapping for multiqueue devices
632 * @xmit_more: More SKBs are pending for this queue 632 * @xmit_more: More SKBs are pending for this queue
633 * @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves
633 * @ndisc_nodetype: router type (from link layer) 634 * @ndisc_nodetype: router type (from link layer)
634 * @ooo_okay: allow the mapping of a socket to a queue to be changed 635 * @ooo_okay: allow the mapping of a socket to a queue to be changed
635 * @l4_hash: indicate hash is a canonical 4-tuple hash over transport 636 * @l4_hash: indicate hash is a canonical 4-tuple hash over transport
@@ -735,7 +736,7 @@ struct sk_buff {
735 peeked:1, 736 peeked:1,
736 head_frag:1, 737 head_frag:1,
737 xmit_more:1, 738 xmit_more:1,
738 __unused:1; /* one bit hole */ 739 pfmemalloc:1;
739 740
740 /* fields enclosed in headers_start/headers_end are copied 741 /* fields enclosed in headers_start/headers_end are copied
741 * using a single memcpy() in __copy_skb_header() 742 * using a single memcpy() in __copy_skb_header()
@@ -754,31 +755,30 @@ struct sk_buff {
754 755
755 __u8 __pkt_type_offset[0]; 756 __u8 __pkt_type_offset[0];
756 __u8 pkt_type:3; 757 __u8 pkt_type:3;
757 __u8 pfmemalloc:1;
758 __u8 ignore_df:1; 758 __u8 ignore_df:1;
759
760 __u8 nf_trace:1; 759 __u8 nf_trace:1;
761 __u8 ip_summed:2; 760 __u8 ip_summed:2;
762 __u8 ooo_okay:1; 761 __u8 ooo_okay:1;
762
763 __u8 l4_hash:1; 763 __u8 l4_hash:1;
764 __u8 sw_hash:1; 764 __u8 sw_hash:1;
765 __u8 wifi_acked_valid:1; 765 __u8 wifi_acked_valid:1;
766 __u8 wifi_acked:1; 766 __u8 wifi_acked:1;
767
768 __u8 no_fcs:1; 767 __u8 no_fcs:1;
769 /* Indicates the inner headers are valid in the skbuff. */ 768 /* Indicates the inner headers are valid in the skbuff. */
770 __u8 encapsulation:1; 769 __u8 encapsulation:1;
771 __u8 encap_hdr_csum:1; 770 __u8 encap_hdr_csum:1;
772 __u8 csum_valid:1; 771 __u8 csum_valid:1;
772
773 __u8 csum_complete_sw:1; 773 __u8 csum_complete_sw:1;
774 __u8 csum_level:2; 774 __u8 csum_level:2;
775 __u8 csum_not_inet:1; 775 __u8 csum_not_inet:1;
776
777 __u8 dst_pending_confirm:1; 776 __u8 dst_pending_confirm:1;
778#ifdef CONFIG_IPV6_NDISC_NODETYPE 777#ifdef CONFIG_IPV6_NDISC_NODETYPE
779 __u8 ndisc_nodetype:2; 778 __u8 ndisc_nodetype:2;
780#endif 779#endif
781 __u8 ipvs_property:1; 780 __u8 ipvs_property:1;
781
782 __u8 inner_protocol_type:1; 782 __u8 inner_protocol_type:1;
783 __u8 remcsum_offload:1; 783 __u8 remcsum_offload:1;
784#ifdef CONFIG_NET_SWITCHDEV 784#ifdef CONFIG_NET_SWITCHDEV
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 59656fc580df..7b9c82de11cc 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -66,6 +66,12 @@ static inline bool rt6_need_strict(const struct in6_addr *daddr)
66 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK); 66 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
67} 67}
68 68
69static inline bool rt6_qualify_for_ecmp(const struct fib6_info *f6i)
70{
71 return (f6i->fib6_flags & (RTF_GATEWAY|RTF_ADDRCONF|RTF_DYNAMIC)) ==
72 RTF_GATEWAY;
73}
74
69void ip6_route_input(struct sk_buff *skb); 75void ip6_route_input(struct sk_buff *skb);
70struct dst_entry *ip6_route_input_lookup(struct net *net, 76struct dst_entry *ip6_route_input_lookup(struct net *net,
71 struct net_device *dev, 77 struct net_device *dev,
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 16475c269749..8f73be494503 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -355,14 +355,7 @@ struct ipv6_txoptions *ipv6_dup_options(struct sock *sk,
355struct ipv6_txoptions *ipv6_renew_options(struct sock *sk, 355struct ipv6_txoptions *ipv6_renew_options(struct sock *sk,
356 struct ipv6_txoptions *opt, 356 struct ipv6_txoptions *opt,
357 int newtype, 357 int newtype,
358 struct ipv6_opt_hdr __user *newopt, 358 struct ipv6_opt_hdr *newopt);
359 int newoptlen);
360struct ipv6_txoptions *
361ipv6_renew_options_kern(struct sock *sk,
362 struct ipv6_txoptions *opt,
363 int newtype,
364 struct ipv6_opt_hdr *newopt,
365 int newoptlen);
366struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space, 359struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
367 struct ipv6_txoptions *opt); 360 struct ipv6_txoptions *opt);
368 361
@@ -830,7 +823,7 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
830 * to minimize possbility that any useful information to an 823 * to minimize possbility that any useful information to an
831 * attacker is leaked. Only lower 20 bits are relevant. 824 * attacker is leaked. Only lower 20 bits are relevant.
832 */ 825 */
833 rol32(hash, 16); 826 hash = rol32(hash, 16);
834 827
835 flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK; 828 flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
836 829
@@ -1107,6 +1100,8 @@ void ipv6_sysctl_unregister(void);
1107 1100
1108int ipv6_sock_mc_join(struct sock *sk, int ifindex, 1101int ipv6_sock_mc_join(struct sock *sk, int ifindex,
1109 const struct in6_addr *addr); 1102 const struct in6_addr *addr);
1103int ipv6_sock_mc_join_ssm(struct sock *sk, int ifindex,
1104 const struct in6_addr *addr, unsigned int mode);
1110int ipv6_sock_mc_drop(struct sock *sk, int ifindex, 1105int ipv6_sock_mc_drop(struct sock *sk, int ifindex,
1111 const struct in6_addr *addr); 1106 const struct in6_addr *addr);
1112#endif /* _NET_IPV6_H */ 1107#endif /* _NET_IPV6_H */
diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
index e0c0c2558ec4..a05134507e7b 100644
--- a/include/net/netfilter/nf_tables_core.h
+++ b/include/net/netfilter/nf_tables_core.h
@@ -65,4 +65,10 @@ extern const struct nft_expr_ops nft_payload_fast_ops;
65extern struct static_key_false nft_counters_enabled; 65extern struct static_key_false nft_counters_enabled;
66extern struct static_key_false nft_trace_enabled; 66extern struct static_key_false nft_trace_enabled;
67 67
68extern struct nft_set_type nft_set_rhash_type;
69extern struct nft_set_type nft_set_hash_type;
70extern struct nft_set_type nft_set_hash_fast_type;
71extern struct nft_set_type nft_set_rbtree_type;
72extern struct nft_set_type nft_set_bitmap_type;
73
68#endif /* _NET_NF_TABLES_CORE_H */ 74#endif /* _NET_NF_TABLES_CORE_H */
diff --git a/include/net/netfilter/nf_tproxy.h b/include/net/netfilter/nf_tproxy.h
index 9754a50ecde9..4cc64c8446eb 100644
--- a/include/net/netfilter/nf_tproxy.h
+++ b/include/net/netfilter/nf_tproxy.h
@@ -64,7 +64,7 @@ nf_tproxy_handle_time_wait4(struct net *net, struct sk_buff *skb,
64 * belonging to established connections going through that one. 64 * belonging to established connections going through that one.
65 */ 65 */
66struct sock * 66struct sock *
67nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb, void *hp, 67nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb,
68 const u8 protocol, 68 const u8 protocol,
69 const __be32 saddr, const __be32 daddr, 69 const __be32 saddr, const __be32 daddr,
70 const __be16 sport, const __be16 dport, 70 const __be16 sport, const __be16 dport,
@@ -103,7 +103,7 @@ nf_tproxy_handle_time_wait6(struct sk_buff *skb, int tproto, int thoff,
103 struct sock *sk); 103 struct sock *sk);
104 104
105struct sock * 105struct sock *
106nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff, void *hp, 106nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff,
107 const u8 protocol, 107 const u8 protocol,
108 const struct in6_addr *saddr, const struct in6_addr *daddr, 108 const struct in6_addr *saddr, const struct in6_addr *daddr,
109 const __be16 sport, const __be16 dport, 109 const __be16 sport, const __be16 dport,
diff --git a/include/net/tc_act/tc_csum.h b/include/net/tc_act/tc_csum.h
index 9470fd7e4350..32d2454c0479 100644
--- a/include/net/tc_act/tc_csum.h
+++ b/include/net/tc_act/tc_csum.h
@@ -7,7 +7,6 @@
7#include <linux/tc_act/tc_csum.h> 7#include <linux/tc_act/tc_csum.h>
8 8
9struct tcf_csum_params { 9struct tcf_csum_params {
10 int action;
11 u32 update_flags; 10 u32 update_flags;
12 struct rcu_head rcu; 11 struct rcu_head rcu;
13}; 12};
diff --git a/include/net/tc_act/tc_tunnel_key.h b/include/net/tc_act/tc_tunnel_key.h
index efef0b4b1b2b..46b8c7f1c8d5 100644
--- a/include/net/tc_act/tc_tunnel_key.h
+++ b/include/net/tc_act/tc_tunnel_key.h
@@ -18,7 +18,6 @@
18struct tcf_tunnel_key_params { 18struct tcf_tunnel_key_params {
19 struct rcu_head rcu; 19 struct rcu_head rcu;
20 int tcft_action; 20 int tcft_action;
21 int action;
22 struct metadata_dst *tcft_enc_metadata; 21 struct metadata_dst *tcft_enc_metadata;
23}; 22};
24 23
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 800582b5dd54..3482d13d655b 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -828,6 +828,10 @@ struct tcp_skb_cb {
828 828
829#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0])) 829#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
830 830
831static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb)
832{
833 TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb);
834}
831 835
832#if IS_ENABLED(CONFIG_IPV6) 836#if IS_ENABLED(CONFIG_IPV6)
833/* This is the variant of inet6_iif() that must be used by TCP, 837/* This is the variant of inet6_iif() that must be used by TCP,
@@ -908,8 +912,6 @@ enum tcp_ca_event {
908 CA_EVENT_LOSS, /* loss timeout */ 912 CA_EVENT_LOSS, /* loss timeout */
909 CA_EVENT_ECN_NO_CE, /* ECT set, but not CE marked */ 913 CA_EVENT_ECN_NO_CE, /* ECT set, but not CE marked */
910 CA_EVENT_ECN_IS_CE, /* received CE marked IP packet */ 914 CA_EVENT_ECN_IS_CE, /* received CE marked IP packet */
911 CA_EVENT_DELAYED_ACK, /* Delayed ack is sent */
912 CA_EVENT_NON_DELAYED_ACK,
913}; 915};
914 916
915/* Information about inbound ACK, passed to cong_ops->in_ack_event() */ 917/* Information about inbound ACK, passed to cong_ops->in_ack_event() */
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index 9fe472f2ac95..7161856bcf9c 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -60,6 +60,10 @@ struct xdp_sock {
60 bool zc; 60 bool zc;
61 /* Protects multiple processes in the control path */ 61 /* Protects multiple processes in the control path */
62 struct mutex mutex; 62 struct mutex mutex;
63 /* Mutual exclusion of NAPI TX thread and sendmsg error paths
64 * in the SKB destructor callback.
65 */
66 spinlock_t tx_completion_lock;
63 u64 rx_dropped; 67 u64 rx_dropped;
64}; 68};
65 69
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 4ca65b56084f..7363f18e65a5 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -226,7 +226,7 @@ enum tunable_id {
226 ETHTOOL_TX_COPYBREAK, 226 ETHTOOL_TX_COPYBREAK,
227 ETHTOOL_PFC_PREVENTION_TOUT, /* timeout in msecs */ 227 ETHTOOL_PFC_PREVENTION_TOUT, /* timeout in msecs */
228 /* 228 /*
229 * Add your fresh new tubale attribute above and remember to update 229 * Add your fresh new tunable attribute above and remember to update
230 * tunable_strings[] in net/core/ethtool.c 230 * tunable_strings[] in net/core/ethtool.c
231 */ 231 */
232 __ETHTOOL_TUNABLE_COUNT, 232 __ETHTOOL_TUNABLE_COUNT,
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index 29eb659aa77a..e3f6ed8a7064 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -127,6 +127,10 @@ enum {
127 127
128#define TCP_CM_INQ TCP_INQ 128#define TCP_CM_INQ TCP_INQ
129 129
130#define TCP_REPAIR_ON 1
131#define TCP_REPAIR_OFF 0
132#define TCP_REPAIR_OFF_NO_WP -1 /* Turn off without window probes */
133
130struct tcp_repair_opt { 134struct tcp_repair_opt {
131 __u32 opt_code; 135 __u32 opt_code;
132 __u32 opt_val; 136 __u32 opt_val;
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 2d49d18b793a..e016ac3afa24 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -991,16 +991,13 @@ static void btf_int_bits_seq_show(const struct btf *btf,
991 void *data, u8 bits_offset, 991 void *data, u8 bits_offset,
992 struct seq_file *m) 992 struct seq_file *m)
993{ 993{
994 u16 left_shift_bits, right_shift_bits;
994 u32 int_data = btf_type_int(t); 995 u32 int_data = btf_type_int(t);
995 u16 nr_bits = BTF_INT_BITS(int_data); 996 u16 nr_bits = BTF_INT_BITS(int_data);
996 u16 total_bits_offset; 997 u16 total_bits_offset;
997 u16 nr_copy_bytes; 998 u16 nr_copy_bytes;
998 u16 nr_copy_bits; 999 u16 nr_copy_bits;
999 u8 nr_upper_bits; 1000 u64 print_num;
1000 union {
1001 u64 u64_num;
1002 u8 u8_nums[8];
1003 } print_num;
1004 1001
1005 total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data); 1002 total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data);
1006 data += BITS_ROUNDDOWN_BYTES(total_bits_offset); 1003 data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
@@ -1008,21 +1005,20 @@ static void btf_int_bits_seq_show(const struct btf *btf,
1008 nr_copy_bits = nr_bits + bits_offset; 1005 nr_copy_bits = nr_bits + bits_offset;
1009 nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits); 1006 nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits);
1010 1007
1011 print_num.u64_num = 0; 1008 print_num = 0;
1012 memcpy(&print_num.u64_num, data, nr_copy_bytes); 1009 memcpy(&print_num, data, nr_copy_bytes);
1013 1010
1014 /* Ditch the higher order bits */ 1011#ifdef __BIG_ENDIAN_BITFIELD
1015 nr_upper_bits = BITS_PER_BYTE_MASKED(nr_copy_bits); 1012 left_shift_bits = bits_offset;
1016 if (nr_upper_bits) { 1013#else
1017 /* We need to mask out some bits of the upper byte. */ 1014 left_shift_bits = BITS_PER_U64 - nr_copy_bits;
1018 u8 mask = (1 << nr_upper_bits) - 1; 1015#endif
1016 right_shift_bits = BITS_PER_U64 - nr_bits;
1019 1017
1020 print_num.u8_nums[nr_copy_bytes - 1] &= mask; 1018 print_num <<= left_shift_bits;
1021 } 1019 print_num >>= right_shift_bits;
1022
1023 print_num.u64_num >>= bits_offset;
1024 1020
1025 seq_printf(m, "0x%llx", print_num.u64_num); 1021 seq_printf(m, "0x%llx", print_num);
1026} 1022}
1027 1023
1028static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t, 1024static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t,
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index 642c97f6d1b8..d361fc1e3bf3 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -334,10 +334,15 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
334{ 334{
335 struct net_device *dev = dst->dev; 335 struct net_device *dev = dst->dev;
336 struct xdp_frame *xdpf; 336 struct xdp_frame *xdpf;
337 int err;
337 338
338 if (!dev->netdev_ops->ndo_xdp_xmit) 339 if (!dev->netdev_ops->ndo_xdp_xmit)
339 return -EOPNOTSUPP; 340 return -EOPNOTSUPP;
340 341
342 err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data);
343 if (unlikely(err))
344 return err;
345
341 xdpf = convert_to_xdp_frame(xdp); 346 xdpf = convert_to_xdp_frame(xdp);
342 if (unlikely(!xdpf)) 347 if (unlikely(!xdpf))
343 return -EOVERFLOW; 348 return -EOVERFLOW;
@@ -350,7 +355,7 @@ int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
350{ 355{
351 int err; 356 int err;
352 357
353 err = __xdp_generic_ok_fwd_dev(skb, dst->dev); 358 err = xdp_ok_fwd_dev(dst->dev, skb->len);
354 if (unlikely(err)) 359 if (unlikely(err))
355 return err; 360 return err;
356 skb->dev = dst->dev; 361 skb->dev = dst->dev;
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 3ca2198a6d22..513d9dfcf4ee 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -747,13 +747,15 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
747 * old element will be freed immediately. 747 * old element will be freed immediately.
748 * Otherwise return an error 748 * Otherwise return an error
749 */ 749 */
750 atomic_dec(&htab->count); 750 l_new = ERR_PTR(-E2BIG);
751 return ERR_PTR(-E2BIG); 751 goto dec_count;
752 } 752 }
753 l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN, 753 l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN,
754 htab->map.numa_node); 754 htab->map.numa_node);
755 if (!l_new) 755 if (!l_new) {
756 return ERR_PTR(-ENOMEM); 756 l_new = ERR_PTR(-ENOMEM);
757 goto dec_count;
758 }
757 } 759 }
758 760
759 memcpy(l_new->key, key, key_size); 761 memcpy(l_new->key, key, key_size);
@@ -766,7 +768,8 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
766 GFP_ATOMIC | __GFP_NOWARN); 768 GFP_ATOMIC | __GFP_NOWARN);
767 if (!pptr) { 769 if (!pptr) {
768 kfree(l_new); 770 kfree(l_new);
769 return ERR_PTR(-ENOMEM); 771 l_new = ERR_PTR(-ENOMEM);
772 goto dec_count;
770 } 773 }
771 } 774 }
772 775
@@ -780,6 +783,9 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
780 783
781 l_new->hash = hash; 784 l_new->hash = hash;
782 return l_new; 785 return l_new;
786dec_count:
787 atomic_dec(&htab->count);
788 return l_new;
783} 789}
784 790
785static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old, 791static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old,
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
index cf7b6a6dbd1f..98fb7938beea 100644
--- a/kernel/bpf/sockmap.c
+++ b/kernel/bpf/sockmap.c
@@ -312,10 +312,12 @@ static void bpf_tcp_close(struct sock *sk, long timeout)
312 struct smap_psock *psock; 312 struct smap_psock *psock;
313 struct sock *osk; 313 struct sock *osk;
314 314
315 lock_sock(sk);
315 rcu_read_lock(); 316 rcu_read_lock();
316 psock = smap_psock_sk(sk); 317 psock = smap_psock_sk(sk);
317 if (unlikely(!psock)) { 318 if (unlikely(!psock)) {
318 rcu_read_unlock(); 319 rcu_read_unlock();
320 release_sock(sk);
319 return sk->sk_prot->close(sk, timeout); 321 return sk->sk_prot->close(sk, timeout);
320 } 322 }
321 323
@@ -371,6 +373,7 @@ static void bpf_tcp_close(struct sock *sk, long timeout)
371 e = psock_map_pop(sk, psock); 373 e = psock_map_pop(sk, psock);
372 } 374 }
373 rcu_read_unlock(); 375 rcu_read_unlock();
376 release_sock(sk);
374 close_fun(sk, timeout); 377 close_fun(sk, timeout);
375} 378}
376 379
@@ -568,7 +571,8 @@ static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md)
568 while (sg[i].length) { 571 while (sg[i].length) {
569 free += sg[i].length; 572 free += sg[i].length;
570 sk_mem_uncharge(sk, sg[i].length); 573 sk_mem_uncharge(sk, sg[i].length);
571 put_page(sg_page(&sg[i])); 574 if (!md->skb)
575 put_page(sg_page(&sg[i]));
572 sg[i].length = 0; 576 sg[i].length = 0;
573 sg[i].page_link = 0; 577 sg[i].page_link = 0;
574 sg[i].offset = 0; 578 sg[i].offset = 0;
@@ -577,6 +581,8 @@ static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md)
577 if (i == MAX_SKB_FRAGS) 581 if (i == MAX_SKB_FRAGS)
578 i = 0; 582 i = 0;
579 } 583 }
584 if (md->skb)
585 consume_skb(md->skb);
580 586
581 return free; 587 return free;
582} 588}
@@ -1230,7 +1236,7 @@ static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
1230 */ 1236 */
1231 TCP_SKB_CB(skb)->bpf.sk_redir = NULL; 1237 TCP_SKB_CB(skb)->bpf.sk_redir = NULL;
1232 skb->sk = psock->sock; 1238 skb->sk = psock->sock;
1233 bpf_compute_data_pointers(skb); 1239 bpf_compute_data_end_sk_skb(skb);
1234 preempt_disable(); 1240 preempt_disable();
1235 rc = (*prog->bpf_func)(skb, prog->insnsi); 1241 rc = (*prog->bpf_func)(skb, prog->insnsi);
1236 preempt_enable(); 1242 preempt_enable();
@@ -1485,7 +1491,7 @@ static int smap_parse_func_strparser(struct strparser *strp,
1485 * any socket yet. 1491 * any socket yet.
1486 */ 1492 */
1487 skb->sk = psock->sock; 1493 skb->sk = psock->sock;
1488 bpf_compute_data_pointers(skb); 1494 bpf_compute_data_end_sk_skb(skb);
1489 rc = (*prog->bpf_func)(skb, prog->insnsi); 1495 rc = (*prog->bpf_func)(skb, prog->insnsi);
1490 skb->sk = NULL; 1496 skb->sk = NULL;
1491 rcu_read_unlock(); 1497 rcu_read_unlock();
@@ -1896,7 +1902,7 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map,
1896 e = kzalloc(sizeof(*e), GFP_ATOMIC | __GFP_NOWARN); 1902 e = kzalloc(sizeof(*e), GFP_ATOMIC | __GFP_NOWARN);
1897 if (!e) { 1903 if (!e) {
1898 err = -ENOMEM; 1904 err = -ENOMEM;
1899 goto out_progs; 1905 goto out_free;
1900 } 1906 }
1901 } 1907 }
1902 1908
@@ -2069,7 +2075,13 @@ static int sock_map_update_elem(struct bpf_map *map,
2069 return -EOPNOTSUPP; 2075 return -EOPNOTSUPP;
2070 } 2076 }
2071 2077
2078 lock_sock(skops.sk);
2079 preempt_disable();
2080 rcu_read_lock();
2072 err = sock_map_ctx_update_elem(&skops, map, key, flags); 2081 err = sock_map_ctx_update_elem(&skops, map, key, flags);
2082 rcu_read_unlock();
2083 preempt_enable();
2084 release_sock(skops.sk);
2073 fput(socket->file); 2085 fput(socket->file);
2074 return err; 2086 return err;
2075} 2087}
@@ -2342,7 +2354,10 @@ static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops,
2342 if (err) 2354 if (err)
2343 goto err; 2355 goto err;
2344 2356
2345 /* bpf_map_update_elem() can be called in_irq() */ 2357 /* psock is valid here because otherwise above *ctx_update_elem would
2358 * have thrown an error. It is safe to skip error check.
2359 */
2360 psock = smap_psock_sk(sock);
2346 raw_spin_lock_bh(&b->lock); 2361 raw_spin_lock_bh(&b->lock);
2347 l_old = lookup_elem_raw(head, hash, key, key_size); 2362 l_old = lookup_elem_raw(head, hash, key, key_size);
2348 if (l_old && map_flags == BPF_NOEXIST) { 2363 if (l_old && map_flags == BPF_NOEXIST) {
@@ -2360,12 +2375,6 @@ static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops,
2360 goto bucket_err; 2375 goto bucket_err;
2361 } 2376 }
2362 2377
2363 psock = smap_psock_sk(sock);
2364 if (unlikely(!psock)) {
2365 err = -EINVAL;
2366 goto bucket_err;
2367 }
2368
2369 rcu_assign_pointer(e->hash_link, l_new); 2378 rcu_assign_pointer(e->hash_link, l_new);
2370 rcu_assign_pointer(e->htab, 2379 rcu_assign_pointer(e->htab,
2371 container_of(map, struct bpf_htab, map)); 2380 container_of(map, struct bpf_htab, map));
@@ -2388,12 +2397,10 @@ static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops,
2388 raw_spin_unlock_bh(&b->lock); 2397 raw_spin_unlock_bh(&b->lock);
2389 return 0; 2398 return 0;
2390bucket_err: 2399bucket_err:
2400 smap_release_sock(psock, sock);
2391 raw_spin_unlock_bh(&b->lock); 2401 raw_spin_unlock_bh(&b->lock);
2392err: 2402err:
2393 kfree(e); 2403 kfree(e);
2394 psock = smap_psock_sk(sock);
2395 if (psock)
2396 smap_release_sock(psock, sock);
2397 return err; 2404 return err;
2398} 2405}
2399 2406
@@ -2415,7 +2422,13 @@ static int sock_hash_update_elem(struct bpf_map *map,
2415 return -EINVAL; 2422 return -EINVAL;
2416 } 2423 }
2417 2424
2425 lock_sock(skops.sk);
2426 preempt_disable();
2427 rcu_read_lock();
2418 err = sock_hash_ctx_update_elem(&skops, map, key, flags); 2428 err = sock_hash_ctx_update_elem(&skops, map, key, flags);
2429 rcu_read_unlock();
2430 preempt_enable();
2431 release_sock(skops.sk);
2419 fput(socket->file); 2432 fput(socket->file);
2420 return err; 2433 return err;
2421} 2434}
@@ -2472,10 +2485,8 @@ struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key)
2472 b = __select_bucket(htab, hash); 2485 b = __select_bucket(htab, hash);
2473 head = &b->head; 2486 head = &b->head;
2474 2487
2475 raw_spin_lock_bh(&b->lock);
2476 l = lookup_elem_raw(head, hash, key, key_size); 2488 l = lookup_elem_raw(head, hash, key, key_size);
2477 sk = l ? l->sk : NULL; 2489 sk = l ? l->sk : NULL;
2478 raw_spin_unlock_bh(&b->lock);
2479 return sk; 2490 return sk;
2480} 2491}
2481 2492
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index d10ecd78105f..a31a1ba0f8ea 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -735,7 +735,9 @@ static int map_update_elem(union bpf_attr *attr)
735 if (bpf_map_is_dev_bound(map)) { 735 if (bpf_map_is_dev_bound(map)) {
736 err = bpf_map_offload_update_elem(map, key, value, attr->flags); 736 err = bpf_map_offload_update_elem(map, key, value, attr->flags);
737 goto out; 737 goto out;
738 } else if (map->map_type == BPF_MAP_TYPE_CPUMAP) { 738 } else if (map->map_type == BPF_MAP_TYPE_CPUMAP ||
739 map->map_type == BPF_MAP_TYPE_SOCKHASH ||
740 map->map_type == BPF_MAP_TYPE_SOCKMAP) {
739 err = map->ops->map_update_elem(map, key, value, attr->flags); 741 err = map->ops->map_update_elem(map, key, value, attr->flags);
740 goto out; 742 goto out;
741 } 743 }
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 9e2bf834f13a..63aaac52a265 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -5430,6 +5430,10 @@ static int jit_subprogs(struct bpf_verifier_env *env)
5430 if (insn->code != (BPF_JMP | BPF_CALL) || 5430 if (insn->code != (BPF_JMP | BPF_CALL) ||
5431 insn->src_reg != BPF_PSEUDO_CALL) 5431 insn->src_reg != BPF_PSEUDO_CALL)
5432 continue; 5432 continue;
5433 /* Upon error here we cannot fall back to interpreter but
5434 * need a hard reject of the program. Thus -EFAULT is
5435 * propagated in any case.
5436 */
5433 subprog = find_subprog(env, i + insn->imm + 1); 5437 subprog = find_subprog(env, i + insn->imm + 1);
5434 if (subprog < 0) { 5438 if (subprog < 0) {
5435 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", 5439 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
@@ -5450,7 +5454,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
5450 5454
5451 func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL); 5455 func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL);
5452 if (!func) 5456 if (!func)
5453 return -ENOMEM; 5457 goto out_undo_insn;
5454 5458
5455 for (i = 0; i < env->subprog_cnt; i++) { 5459 for (i = 0; i < env->subprog_cnt; i++) {
5456 subprog_start = subprog_end; 5460 subprog_start = subprog_end;
@@ -5515,7 +5519,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
5515 tmp = bpf_int_jit_compile(func[i]); 5519 tmp = bpf_int_jit_compile(func[i]);
5516 if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) { 5520 if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) {
5517 verbose(env, "JIT doesn't support bpf-to-bpf calls\n"); 5521 verbose(env, "JIT doesn't support bpf-to-bpf calls\n");
5518 err = -EFAULT; 5522 err = -ENOTSUPP;
5519 goto out_free; 5523 goto out_free;
5520 } 5524 }
5521 cond_resched(); 5525 cond_resched();
@@ -5552,6 +5556,7 @@ out_free:
5552 if (func[i]) 5556 if (func[i])
5553 bpf_jit_free(func[i]); 5557 bpf_jit_free(func[i]);
5554 kfree(func); 5558 kfree(func);
5559out_undo_insn:
5555 /* cleanup main prog to be interpreted */ 5560 /* cleanup main prog to be interpreted */
5556 prog->jit_requested = 0; 5561 prog->jit_requested = 0;
5557 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { 5562 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
@@ -5578,6 +5583,8 @@ static int fixup_call_args(struct bpf_verifier_env *env)
5578 err = jit_subprogs(env); 5583 err = jit_subprogs(env);
5579 if (err == 0) 5584 if (err == 0)
5580 return 0; 5585 return 0;
5586 if (err == -EFAULT)
5587 return err;
5581 } 5588 }
5582#ifndef CONFIG_BPF_JIT_ALWAYS_ON 5589#ifndef CONFIG_BPF_JIT_ALWAYS_ON
5583 for (i = 0; i < prog->len; i++, insn++) { 5590 for (i = 0; i < prog->len; i++, insn++) {
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 9427b5766134..e5c8586cf717 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -774,7 +774,7 @@ int rhashtable_walk_start_check(struct rhashtable_iter *iter)
774 skip++; 774 skip++;
775 if (list == iter->list) { 775 if (list == iter->list) {
776 iter->p = p; 776 iter->p = p;
777 skip = skip; 777 iter->skip = skip;
778 goto found; 778 goto found;
779 } 779 }
780 } 780 }
@@ -964,8 +964,16 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
964 964
965static size_t rounded_hashtable_size(const struct rhashtable_params *params) 965static size_t rounded_hashtable_size(const struct rhashtable_params *params)
966{ 966{
967 return max(roundup_pow_of_two(params->nelem_hint * 4 / 3), 967 size_t retsize;
968 (unsigned long)params->min_size); 968
969 if (params->nelem_hint)
970 retsize = max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
971 (unsigned long)params->min_size);
972 else
973 retsize = max(HASH_DEFAULT_SIZE,
974 (unsigned long)params->min_size);
975
976 return retsize;
969} 977}
970 978
971static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed) 979static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
@@ -1022,8 +1030,6 @@ int rhashtable_init(struct rhashtable *ht,
1022 struct bucket_table *tbl; 1030 struct bucket_table *tbl;
1023 size_t size; 1031 size_t size;
1024 1032
1025 size = HASH_DEFAULT_SIZE;
1026
1027 if ((!params->key_len && !params->obj_hashfn) || 1033 if ((!params->key_len && !params->obj_hashfn) ||
1028 (params->obj_hashfn && !params->obj_cmpfn)) 1034 (params->obj_hashfn && !params->obj_cmpfn))
1029 return -EINVAL; 1035 return -EINVAL;
@@ -1050,8 +1056,7 @@ int rhashtable_init(struct rhashtable *ht,
1050 1056
1051 ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE); 1057 ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
1052 1058
1053 if (params->nelem_hint) 1059 size = rounded_hashtable_size(&ht->p);
1054 size = rounded_hashtable_size(&ht->p);
1055 1060
1056 if (params->locks_mul) 1061 if (params->locks_mul)
1057 ht->p.locks_mul = roundup_pow_of_two(params->locks_mul); 1062 ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
@@ -1143,13 +1148,14 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
1143 void (*free_fn)(void *ptr, void *arg), 1148 void (*free_fn)(void *ptr, void *arg),
1144 void *arg) 1149 void *arg)
1145{ 1150{
1146 struct bucket_table *tbl; 1151 struct bucket_table *tbl, *next_tbl;
1147 unsigned int i; 1152 unsigned int i;
1148 1153
1149 cancel_work_sync(&ht->run_work); 1154 cancel_work_sync(&ht->run_work);
1150 1155
1151 mutex_lock(&ht->mutex); 1156 mutex_lock(&ht->mutex);
1152 tbl = rht_dereference(ht->tbl, ht); 1157 tbl = rht_dereference(ht->tbl, ht);
1158restart:
1153 if (free_fn) { 1159 if (free_fn) {
1154 for (i = 0; i < tbl->size; i++) { 1160 for (i = 0; i < tbl->size; i++) {
1155 struct rhash_head *pos, *next; 1161 struct rhash_head *pos, *next;
@@ -1166,7 +1172,12 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
1166 } 1172 }
1167 } 1173 }
1168 1174
1175 next_tbl = rht_dereference(tbl->future_tbl, ht);
1169 bucket_table_free(tbl); 1176 bucket_table_free(tbl);
1177 if (next_tbl) {
1178 tbl = next_tbl;
1179 goto restart;
1180 }
1170 mutex_unlock(&ht->mutex); 1181 mutex_unlock(&ht->mutex);
1171} 1182}
1172EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy); 1183EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index be09a9883825..73bf6a93a3cf 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -2732,7 +2732,7 @@ static int batadv_iv_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
2732{ 2732{
2733 struct batadv_neigh_ifinfo *router_ifinfo = NULL; 2733 struct batadv_neigh_ifinfo *router_ifinfo = NULL;
2734 struct batadv_neigh_node *router; 2734 struct batadv_neigh_node *router;
2735 struct batadv_gw_node *curr_gw; 2735 struct batadv_gw_node *curr_gw = NULL;
2736 int ret = 0; 2736 int ret = 0;
2737 void *hdr; 2737 void *hdr;
2738 2738
@@ -2780,6 +2780,8 @@ static int batadv_iv_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
2780 ret = 0; 2780 ret = 0;
2781 2781
2782out: 2782out:
2783 if (curr_gw)
2784 batadv_gw_node_put(curr_gw);
2783 if (router_ifinfo) 2785 if (router_ifinfo)
2784 batadv_neigh_ifinfo_put(router_ifinfo); 2786 batadv_neigh_ifinfo_put(router_ifinfo);
2785 if (router) 2787 if (router)
diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c
index ec93337ee259..6baec4e68898 100644
--- a/net/batman-adv/bat_v.c
+++ b/net/batman-adv/bat_v.c
@@ -927,7 +927,7 @@ static int batadv_v_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
927{ 927{
928 struct batadv_neigh_ifinfo *router_ifinfo = NULL; 928 struct batadv_neigh_ifinfo *router_ifinfo = NULL;
929 struct batadv_neigh_node *router; 929 struct batadv_neigh_node *router;
930 struct batadv_gw_node *curr_gw; 930 struct batadv_gw_node *curr_gw = NULL;
931 int ret = 0; 931 int ret = 0;
932 void *hdr; 932 void *hdr;
933 933
@@ -995,6 +995,8 @@ static int batadv_v_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
995 ret = 0; 995 ret = 0;
996 996
997out: 997out:
998 if (curr_gw)
999 batadv_gw_node_put(curr_gw);
998 if (router_ifinfo) 1000 if (router_ifinfo)
999 batadv_neigh_ifinfo_put(router_ifinfo); 1001 batadv_neigh_ifinfo_put(router_ifinfo);
1000 if (router) 1002 if (router)
diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c
index 4229b01ac7b5..87479c60670e 100644
--- a/net/batman-adv/debugfs.c
+++ b/net/batman-adv/debugfs.c
@@ -19,6 +19,7 @@
19#include "debugfs.h" 19#include "debugfs.h"
20#include "main.h" 20#include "main.h"
21 21
22#include <linux/dcache.h>
22#include <linux/debugfs.h> 23#include <linux/debugfs.h>
23#include <linux/err.h> 24#include <linux/err.h>
24#include <linux/errno.h> 25#include <linux/errno.h>
@@ -344,6 +345,25 @@ out:
344} 345}
345 346
346/** 347/**
348 * batadv_debugfs_rename_hardif() - Fix debugfs path for renamed hardif
349 * @hard_iface: hard interface which was renamed
350 */
351void batadv_debugfs_rename_hardif(struct batadv_hard_iface *hard_iface)
352{
353 const char *name = hard_iface->net_dev->name;
354 struct dentry *dir;
355 struct dentry *d;
356
357 dir = hard_iface->debug_dir;
358 if (!dir)
359 return;
360
361 d = debugfs_rename(dir->d_parent, dir, dir->d_parent, name);
362 if (!d)
363 pr_err("Can't rename debugfs dir to %s\n", name);
364}
365
366/**
347 * batadv_debugfs_del_hardif() - delete the base directory for a hard interface 367 * batadv_debugfs_del_hardif() - delete the base directory for a hard interface
348 * in debugfs. 368 * in debugfs.
349 * @hard_iface: hard interface which is deleted. 369 * @hard_iface: hard interface which is deleted.
@@ -414,6 +434,26 @@ out:
414} 434}
415 435
416/** 436/**
437 * batadv_debugfs_rename_meshif() - Fix debugfs path for renamed softif
438 * @dev: net_device which was renamed
439 */
440void batadv_debugfs_rename_meshif(struct net_device *dev)
441{
442 struct batadv_priv *bat_priv = netdev_priv(dev);
443 const char *name = dev->name;
444 struct dentry *dir;
445 struct dentry *d;
446
447 dir = bat_priv->debug_dir;
448 if (!dir)
449 return;
450
451 d = debugfs_rename(dir->d_parent, dir, dir->d_parent, name);
452 if (!d)
453 pr_err("Can't rename debugfs dir to %s\n", name);
454}
455
456/**
417 * batadv_debugfs_del_meshif() - Remove interface dependent debugfs entries 457 * batadv_debugfs_del_meshif() - Remove interface dependent debugfs entries
418 * @dev: netdev struct of the soft interface 458 * @dev: netdev struct of the soft interface
419 */ 459 */
diff --git a/net/batman-adv/debugfs.h b/net/batman-adv/debugfs.h
index 37b069698b04..08a592ffbee5 100644
--- a/net/batman-adv/debugfs.h
+++ b/net/batman-adv/debugfs.h
@@ -30,8 +30,10 @@ struct net_device;
30void batadv_debugfs_init(void); 30void batadv_debugfs_init(void);
31void batadv_debugfs_destroy(void); 31void batadv_debugfs_destroy(void);
32int batadv_debugfs_add_meshif(struct net_device *dev); 32int batadv_debugfs_add_meshif(struct net_device *dev);
33void batadv_debugfs_rename_meshif(struct net_device *dev);
33void batadv_debugfs_del_meshif(struct net_device *dev); 34void batadv_debugfs_del_meshif(struct net_device *dev);
34int batadv_debugfs_add_hardif(struct batadv_hard_iface *hard_iface); 35int batadv_debugfs_add_hardif(struct batadv_hard_iface *hard_iface);
36void batadv_debugfs_rename_hardif(struct batadv_hard_iface *hard_iface);
35void batadv_debugfs_del_hardif(struct batadv_hard_iface *hard_iface); 37void batadv_debugfs_del_hardif(struct batadv_hard_iface *hard_iface);
36 38
37#else 39#else
@@ -49,6 +51,10 @@ static inline int batadv_debugfs_add_meshif(struct net_device *dev)
49 return 0; 51 return 0;
50} 52}
51 53
54static inline void batadv_debugfs_rename_meshif(struct net_device *dev)
55{
56}
57
52static inline void batadv_debugfs_del_meshif(struct net_device *dev) 58static inline void batadv_debugfs_del_meshif(struct net_device *dev)
53{ 59{
54} 60}
@@ -60,6 +66,11 @@ int batadv_debugfs_add_hardif(struct batadv_hard_iface *hard_iface)
60} 66}
61 67
62static inline 68static inline
69void batadv_debugfs_rename_hardif(struct batadv_hard_iface *hard_iface)
70{
71}
72
73static inline
63void batadv_debugfs_del_hardif(struct batadv_hard_iface *hard_iface) 74void batadv_debugfs_del_hardif(struct batadv_hard_iface *hard_iface)
64{ 75{
65} 76}
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index c405d15befd6..2f0d42f2f913 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -989,6 +989,32 @@ void batadv_hardif_remove_interfaces(void)
989 rtnl_unlock(); 989 rtnl_unlock();
990} 990}
991 991
992/**
993 * batadv_hard_if_event_softif() - Handle events for soft interfaces
994 * @event: NETDEV_* event to handle
995 * @net_dev: net_device which generated an event
996 *
997 * Return: NOTIFY_* result
998 */
999static int batadv_hard_if_event_softif(unsigned long event,
1000 struct net_device *net_dev)
1001{
1002 struct batadv_priv *bat_priv;
1003
1004 switch (event) {
1005 case NETDEV_REGISTER:
1006 batadv_sysfs_add_meshif(net_dev);
1007 bat_priv = netdev_priv(net_dev);
1008 batadv_softif_create_vlan(bat_priv, BATADV_NO_FLAGS);
1009 break;
1010 case NETDEV_CHANGENAME:
1011 batadv_debugfs_rename_meshif(net_dev);
1012 break;
1013 }
1014
1015 return NOTIFY_DONE;
1016}
1017
992static int batadv_hard_if_event(struct notifier_block *this, 1018static int batadv_hard_if_event(struct notifier_block *this,
993 unsigned long event, void *ptr) 1019 unsigned long event, void *ptr)
994{ 1020{
@@ -997,12 +1023,8 @@ static int batadv_hard_if_event(struct notifier_block *this,
997 struct batadv_hard_iface *primary_if = NULL; 1023 struct batadv_hard_iface *primary_if = NULL;
998 struct batadv_priv *bat_priv; 1024 struct batadv_priv *bat_priv;
999 1025
1000 if (batadv_softif_is_valid(net_dev) && event == NETDEV_REGISTER) { 1026 if (batadv_softif_is_valid(net_dev))
1001 batadv_sysfs_add_meshif(net_dev); 1027 return batadv_hard_if_event_softif(event, net_dev);
1002 bat_priv = netdev_priv(net_dev);
1003 batadv_softif_create_vlan(bat_priv, BATADV_NO_FLAGS);
1004 return NOTIFY_DONE;
1005 }
1006 1028
1007 hard_iface = batadv_hardif_get_by_netdev(net_dev); 1029 hard_iface = batadv_hardif_get_by_netdev(net_dev);
1008 if (!hard_iface && (event == NETDEV_REGISTER || 1030 if (!hard_iface && (event == NETDEV_REGISTER ||
@@ -1051,6 +1073,9 @@ static int batadv_hard_if_event(struct notifier_block *this,
1051 if (batadv_is_wifi_hardif(hard_iface)) 1073 if (batadv_is_wifi_hardif(hard_iface))
1052 hard_iface->num_bcasts = BATADV_NUM_BCASTS_WIRELESS; 1074 hard_iface->num_bcasts = BATADV_NUM_BCASTS_WIRELESS;
1053 break; 1075 break;
1076 case NETDEV_CHANGENAME:
1077 batadv_debugfs_rename_hardif(hard_iface);
1078 break;
1054 default: 1079 default:
1055 break; 1080 break;
1056 } 1081 }
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 3986551397ca..12a2b7d21376 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -1705,7 +1705,9 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
1705 ether_addr_copy(common->addr, tt_addr); 1705 ether_addr_copy(common->addr, tt_addr);
1706 common->vid = vid; 1706 common->vid = vid;
1707 1707
1708 common->flags = flags; 1708 if (!is_multicast_ether_addr(common->addr))
1709 common->flags = flags & (~BATADV_TT_SYNC_MASK);
1710
1709 tt_global_entry->roam_at = 0; 1711 tt_global_entry->roam_at = 0;
1710 /* node must store current time in case of roaming. This is 1712 /* node must store current time in case of roaming. This is
1711 * needed to purge this entry out on timeout (if nobody claims 1713 * needed to purge this entry out on timeout (if nobody claims
@@ -1768,7 +1770,8 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
1768 * TT_CLIENT_TEMP, therefore they have to be copied in the 1770 * TT_CLIENT_TEMP, therefore they have to be copied in the
1769 * client entry 1771 * client entry
1770 */ 1772 */
1771 common->flags |= flags & (~BATADV_TT_SYNC_MASK); 1773 if (!is_multicast_ether_addr(common->addr))
1774 common->flags |= flags & (~BATADV_TT_SYNC_MASK);
1772 1775
1773 /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only 1776 /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only
1774 * one originator left in the list and we previously received a 1777 * one originator left in the list and we previously received a
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index 68c3578343b4..22a78eedf4b1 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -96,6 +96,7 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
96 u32 size = kattr->test.data_size_in; 96 u32 size = kattr->test.data_size_in;
97 u32 repeat = kattr->test.repeat; 97 u32 repeat = kattr->test.repeat;
98 u32 retval, duration; 98 u32 retval, duration;
99 int hh_len = ETH_HLEN;
99 struct sk_buff *skb; 100 struct sk_buff *skb;
100 void *data; 101 void *data;
101 int ret; 102 int ret;
@@ -131,12 +132,22 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
131 skb_reset_network_header(skb); 132 skb_reset_network_header(skb);
132 133
133 if (is_l2) 134 if (is_l2)
134 __skb_push(skb, ETH_HLEN); 135 __skb_push(skb, hh_len);
135 if (is_direct_pkt_access) 136 if (is_direct_pkt_access)
136 bpf_compute_data_pointers(skb); 137 bpf_compute_data_pointers(skb);
137 retval = bpf_test_run(prog, skb, repeat, &duration); 138 retval = bpf_test_run(prog, skb, repeat, &duration);
138 if (!is_l2) 139 if (!is_l2) {
139 __skb_push(skb, ETH_HLEN); 140 if (skb_headroom(skb) < hh_len) {
141 int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
142
143 if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
144 kfree_skb(skb);
145 return -ENOMEM;
146 }
147 }
148 memset(__skb_push(skb, hh_len), 0, hh_len);
149 }
150
140 size = skb->len; 151 size = skb->len;
141 /* bpf program can never convert linear skb to non-linear */ 152 /* bpf program can never convert linear skb to non-linear */
142 if (WARN_ON_ONCE(skb_is_nonlinear(skb))) 153 if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
diff --git a/net/core/filter.c b/net/core/filter.c
index 0ca6907d7efe..06da770f543f 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -459,11 +459,21 @@ static bool convert_bpf_ld_abs(struct sock_filter *fp, struct bpf_insn **insnp)
459 (!unaligned_ok && offset >= 0 && 459 (!unaligned_ok && offset >= 0 &&
460 offset + ip_align >= 0 && 460 offset + ip_align >= 0 &&
461 offset + ip_align % size == 0))) { 461 offset + ip_align % size == 0))) {
462 bool ldx_off_ok = offset <= S16_MAX;
463
462 *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_H); 464 *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_H);
463 *insn++ = BPF_ALU64_IMM(BPF_SUB, BPF_REG_TMP, offset); 465 *insn++ = BPF_ALU64_IMM(BPF_SUB, BPF_REG_TMP, offset);
464 *insn++ = BPF_JMP_IMM(BPF_JSLT, BPF_REG_TMP, size, 2 + endian); 466 *insn++ = BPF_JMP_IMM(BPF_JSLT, BPF_REG_TMP,
465 *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A, BPF_REG_D, 467 size, 2 + endian + (!ldx_off_ok * 2));
466 offset); 468 if (ldx_off_ok) {
469 *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A,
470 BPF_REG_D, offset);
471 } else {
472 *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_D);
473 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_TMP, offset);
474 *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A,
475 BPF_REG_TMP, 0);
476 }
467 if (endian) 477 if (endian)
468 *insn++ = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, size * 8); 478 *insn++ = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, size * 8);
469 *insn++ = BPF_JMP_A(8); 479 *insn++ = BPF_JMP_A(8);
@@ -1762,6 +1772,37 @@ static const struct bpf_func_proto bpf_skb_pull_data_proto = {
1762 .arg2_type = ARG_ANYTHING, 1772 .arg2_type = ARG_ANYTHING,
1763}; 1773};
1764 1774
1775static inline int sk_skb_try_make_writable(struct sk_buff *skb,
1776 unsigned int write_len)
1777{
1778 int err = __bpf_try_make_writable(skb, write_len);
1779
1780 bpf_compute_data_end_sk_skb(skb);
1781 return err;
1782}
1783
1784BPF_CALL_2(sk_skb_pull_data, struct sk_buff *, skb, u32, len)
1785{
1786 /* Idea is the following: should the needed direct read/write
1787 * test fail during runtime, we can pull in more data and redo
1788 * again, since implicitly, we invalidate previous checks here.
1789 *
1790 * Or, since we know how much we need to make read/writeable,
1791 * this can be done once at the program beginning for direct
1792 * access case. By this we overcome limitations of only current
1793 * headroom being accessible.
1794 */
1795 return sk_skb_try_make_writable(skb, len ? : skb_headlen(skb));
1796}
1797
1798static const struct bpf_func_proto sk_skb_pull_data_proto = {
1799 .func = sk_skb_pull_data,
1800 .gpl_only = false,
1801 .ret_type = RET_INTEGER,
1802 .arg1_type = ARG_PTR_TO_CTX,
1803 .arg2_type = ARG_ANYTHING,
1804};
1805
1765BPF_CALL_5(bpf_l3_csum_replace, struct sk_buff *, skb, u32, offset, 1806BPF_CALL_5(bpf_l3_csum_replace, struct sk_buff *, skb, u32, offset,
1766 u64, from, u64, to, u64, flags) 1807 u64, from, u64, to, u64, flags)
1767{ 1808{
@@ -2779,7 +2820,8 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff)
2779 2820
2780static u32 __bpf_skb_max_len(const struct sk_buff *skb) 2821static u32 __bpf_skb_max_len(const struct sk_buff *skb)
2781{ 2822{
2782 return skb->dev->mtu + skb->dev->hard_header_len; 2823 return skb->dev ? skb->dev->mtu + skb->dev->hard_header_len :
2824 SKB_MAX_ALLOC;
2783} 2825}
2784 2826
2785static int bpf_skb_adjust_net(struct sk_buff *skb, s32 len_diff) 2827static int bpf_skb_adjust_net(struct sk_buff *skb, s32 len_diff)
@@ -2863,8 +2905,8 @@ static int bpf_skb_trim_rcsum(struct sk_buff *skb, unsigned int new_len)
2863 return __skb_trim_rcsum(skb, new_len); 2905 return __skb_trim_rcsum(skb, new_len);
2864} 2906}
2865 2907
2866BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len, 2908static inline int __bpf_skb_change_tail(struct sk_buff *skb, u32 new_len,
2867 u64, flags) 2909 u64 flags)
2868{ 2910{
2869 u32 max_len = __bpf_skb_max_len(skb); 2911 u32 max_len = __bpf_skb_max_len(skb);
2870 u32 min_len = __bpf_skb_min_len(skb); 2912 u32 min_len = __bpf_skb_min_len(skb);
@@ -2900,6 +2942,13 @@ BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len,
2900 if (!ret && skb_is_gso(skb)) 2942 if (!ret && skb_is_gso(skb))
2901 skb_gso_reset(skb); 2943 skb_gso_reset(skb);
2902 } 2944 }
2945 return ret;
2946}
2947
2948BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len,
2949 u64, flags)
2950{
2951 int ret = __bpf_skb_change_tail(skb, new_len, flags);
2903 2952
2904 bpf_compute_data_pointers(skb); 2953 bpf_compute_data_pointers(skb);
2905 return ret; 2954 return ret;
@@ -2914,9 +2963,27 @@ static const struct bpf_func_proto bpf_skb_change_tail_proto = {
2914 .arg3_type = ARG_ANYTHING, 2963 .arg3_type = ARG_ANYTHING,
2915}; 2964};
2916 2965
2917BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room, 2966BPF_CALL_3(sk_skb_change_tail, struct sk_buff *, skb, u32, new_len,
2918 u64, flags) 2967 u64, flags)
2919{ 2968{
2969 int ret = __bpf_skb_change_tail(skb, new_len, flags);
2970
2971 bpf_compute_data_end_sk_skb(skb);
2972 return ret;
2973}
2974
2975static const struct bpf_func_proto sk_skb_change_tail_proto = {
2976 .func = sk_skb_change_tail,
2977 .gpl_only = false,
2978 .ret_type = RET_INTEGER,
2979 .arg1_type = ARG_PTR_TO_CTX,
2980 .arg2_type = ARG_ANYTHING,
2981 .arg3_type = ARG_ANYTHING,
2982};
2983
2984static inline int __bpf_skb_change_head(struct sk_buff *skb, u32 head_room,
2985 u64 flags)
2986{
2920 u32 max_len = __bpf_skb_max_len(skb); 2987 u32 max_len = __bpf_skb_max_len(skb);
2921 u32 new_len = skb->len + head_room; 2988 u32 new_len = skb->len + head_room;
2922 int ret; 2989 int ret;
@@ -2941,8 +3008,16 @@ BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room,
2941 skb_reset_mac_header(skb); 3008 skb_reset_mac_header(skb);
2942 } 3009 }
2943 3010
3011 return ret;
3012}
3013
3014BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room,
3015 u64, flags)
3016{
3017 int ret = __bpf_skb_change_head(skb, head_room, flags);
3018
2944 bpf_compute_data_pointers(skb); 3019 bpf_compute_data_pointers(skb);
2945 return 0; 3020 return ret;
2946} 3021}
2947 3022
2948static const struct bpf_func_proto bpf_skb_change_head_proto = { 3023static const struct bpf_func_proto bpf_skb_change_head_proto = {
@@ -2954,6 +3029,23 @@ static const struct bpf_func_proto bpf_skb_change_head_proto = {
2954 .arg3_type = ARG_ANYTHING, 3029 .arg3_type = ARG_ANYTHING,
2955}; 3030};
2956 3031
3032BPF_CALL_3(sk_skb_change_head, struct sk_buff *, skb, u32, head_room,
3033 u64, flags)
3034{
3035 int ret = __bpf_skb_change_head(skb, head_room, flags);
3036
3037 bpf_compute_data_end_sk_skb(skb);
3038 return ret;
3039}
3040
3041static const struct bpf_func_proto sk_skb_change_head_proto = {
3042 .func = sk_skb_change_head,
3043 .gpl_only = false,
3044 .ret_type = RET_INTEGER,
3045 .arg1_type = ARG_PTR_TO_CTX,
3046 .arg2_type = ARG_ANYTHING,
3047 .arg3_type = ARG_ANYTHING,
3048};
2957static unsigned long xdp_get_metalen(const struct xdp_buff *xdp) 3049static unsigned long xdp_get_metalen(const struct xdp_buff *xdp)
2958{ 3050{
2959 return xdp_data_meta_unsupported(xdp) ? 0 : 3051 return xdp_data_meta_unsupported(xdp) ? 0 :
@@ -3046,12 +3138,16 @@ static int __bpf_tx_xdp(struct net_device *dev,
3046 u32 index) 3138 u32 index)
3047{ 3139{
3048 struct xdp_frame *xdpf; 3140 struct xdp_frame *xdpf;
3049 int sent; 3141 int err, sent;
3050 3142
3051 if (!dev->netdev_ops->ndo_xdp_xmit) { 3143 if (!dev->netdev_ops->ndo_xdp_xmit) {
3052 return -EOPNOTSUPP; 3144 return -EOPNOTSUPP;
3053 } 3145 }
3054 3146
3147 err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data);
3148 if (unlikely(err))
3149 return err;
3150
3055 xdpf = convert_to_xdp_frame(xdp); 3151 xdpf = convert_to_xdp_frame(xdp);
3056 if (unlikely(!xdpf)) 3152 if (unlikely(!xdpf))
3057 return -EOVERFLOW; 3153 return -EOVERFLOW;
@@ -3285,7 +3381,8 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
3285 goto err; 3381 goto err;
3286 } 3382 }
3287 3383
3288 if (unlikely((err = __xdp_generic_ok_fwd_dev(skb, fwd)))) 3384 err = xdp_ok_fwd_dev(fwd, skb->len);
3385 if (unlikely(err))
3289 goto err; 3386 goto err;
3290 3387
3291 skb->dev = fwd; 3388 skb->dev = fwd;
@@ -4439,10 +4536,10 @@ static const struct bpf_func_proto bpf_lwt_push_encap_proto = {
4439 .arg4_type = ARG_CONST_SIZE 4536 .arg4_type = ARG_CONST_SIZE
4440}; 4537};
4441 4538
4539#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
4442BPF_CALL_4(bpf_lwt_seg6_store_bytes, struct sk_buff *, skb, u32, offset, 4540BPF_CALL_4(bpf_lwt_seg6_store_bytes, struct sk_buff *, skb, u32, offset,
4443 const void *, from, u32, len) 4541 const void *, from, u32, len)
4444{ 4542{
4445#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
4446 struct seg6_bpf_srh_state *srh_state = 4543 struct seg6_bpf_srh_state *srh_state =
4447 this_cpu_ptr(&seg6_bpf_srh_states); 4544 this_cpu_ptr(&seg6_bpf_srh_states);
4448 void *srh_tlvs, *srh_end, *ptr; 4545 void *srh_tlvs, *srh_end, *ptr;
@@ -4468,9 +4565,6 @@ BPF_CALL_4(bpf_lwt_seg6_store_bytes, struct sk_buff *, skb, u32, offset,
4468 4565
4469 memcpy(skb->data + offset, from, len); 4566 memcpy(skb->data + offset, from, len);
4470 return 0; 4567 return 0;
4471#else /* CONFIG_IPV6_SEG6_BPF */
4472 return -EOPNOTSUPP;
4473#endif
4474} 4568}
4475 4569
4476static const struct bpf_func_proto bpf_lwt_seg6_store_bytes_proto = { 4570static const struct bpf_func_proto bpf_lwt_seg6_store_bytes_proto = {
@@ -4486,7 +4580,6 @@ static const struct bpf_func_proto bpf_lwt_seg6_store_bytes_proto = {
4486BPF_CALL_4(bpf_lwt_seg6_action, struct sk_buff *, skb, 4580BPF_CALL_4(bpf_lwt_seg6_action, struct sk_buff *, skb,
4487 u32, action, void *, param, u32, param_len) 4581 u32, action, void *, param, u32, param_len)
4488{ 4582{
4489#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
4490 struct seg6_bpf_srh_state *srh_state = 4583 struct seg6_bpf_srh_state *srh_state =
4491 this_cpu_ptr(&seg6_bpf_srh_states); 4584 this_cpu_ptr(&seg6_bpf_srh_states);
4492 struct ipv6_sr_hdr *srh; 4585 struct ipv6_sr_hdr *srh;
@@ -4534,9 +4627,6 @@ BPF_CALL_4(bpf_lwt_seg6_action, struct sk_buff *, skb,
4534 default: 4627 default:
4535 return -EINVAL; 4628 return -EINVAL;
4536 } 4629 }
4537#else /* CONFIG_IPV6_SEG6_BPF */
4538 return -EOPNOTSUPP;
4539#endif
4540} 4630}
4541 4631
4542static const struct bpf_func_proto bpf_lwt_seg6_action_proto = { 4632static const struct bpf_func_proto bpf_lwt_seg6_action_proto = {
@@ -4552,7 +4642,6 @@ static const struct bpf_func_proto bpf_lwt_seg6_action_proto = {
4552BPF_CALL_3(bpf_lwt_seg6_adjust_srh, struct sk_buff *, skb, u32, offset, 4642BPF_CALL_3(bpf_lwt_seg6_adjust_srh, struct sk_buff *, skb, u32, offset,
4553 s32, len) 4643 s32, len)
4554{ 4644{
4555#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
4556 struct seg6_bpf_srh_state *srh_state = 4645 struct seg6_bpf_srh_state *srh_state =
4557 this_cpu_ptr(&seg6_bpf_srh_states); 4646 this_cpu_ptr(&seg6_bpf_srh_states);
4558 void *srh_end, *srh_tlvs, *ptr; 4647 void *srh_end, *srh_tlvs, *ptr;
@@ -4596,9 +4685,6 @@ BPF_CALL_3(bpf_lwt_seg6_adjust_srh, struct sk_buff *, skb, u32, offset,
4596 srh_state->hdrlen += len; 4685 srh_state->hdrlen += len;
4597 srh_state->valid = 0; 4686 srh_state->valid = 0;
4598 return 0; 4687 return 0;
4599#else /* CONFIG_IPV6_SEG6_BPF */
4600 return -EOPNOTSUPP;
4601#endif
4602} 4688}
4603 4689
4604static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = { 4690static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = {
@@ -4609,6 +4695,7 @@ static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = {
4609 .arg2_type = ARG_ANYTHING, 4695 .arg2_type = ARG_ANYTHING,
4610 .arg3_type = ARG_ANYTHING, 4696 .arg3_type = ARG_ANYTHING,
4611}; 4697};
4698#endif /* CONFIG_IPV6_SEG6_BPF */
4612 4699
4613bool bpf_helper_changes_pkt_data(void *func) 4700bool bpf_helper_changes_pkt_data(void *func)
4614{ 4701{
@@ -4617,9 +4704,12 @@ bool bpf_helper_changes_pkt_data(void *func)
4617 func == bpf_skb_store_bytes || 4704 func == bpf_skb_store_bytes ||
4618 func == bpf_skb_change_proto || 4705 func == bpf_skb_change_proto ||
4619 func == bpf_skb_change_head || 4706 func == bpf_skb_change_head ||
4707 func == sk_skb_change_head ||
4620 func == bpf_skb_change_tail || 4708 func == bpf_skb_change_tail ||
4709 func == sk_skb_change_tail ||
4621 func == bpf_skb_adjust_room || 4710 func == bpf_skb_adjust_room ||
4622 func == bpf_skb_pull_data || 4711 func == bpf_skb_pull_data ||
4712 func == sk_skb_pull_data ||
4623 func == bpf_clone_redirect || 4713 func == bpf_clone_redirect ||
4624 func == bpf_l3_csum_replace || 4714 func == bpf_l3_csum_replace ||
4625 func == bpf_l4_csum_replace || 4715 func == bpf_l4_csum_replace ||
@@ -4627,11 +4717,12 @@ bool bpf_helper_changes_pkt_data(void *func)
4627 func == bpf_xdp_adjust_meta || 4717 func == bpf_xdp_adjust_meta ||
4628 func == bpf_msg_pull_data || 4718 func == bpf_msg_pull_data ||
4629 func == bpf_xdp_adjust_tail || 4719 func == bpf_xdp_adjust_tail ||
4630 func == bpf_lwt_push_encap || 4720#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
4631 func == bpf_lwt_seg6_store_bytes || 4721 func == bpf_lwt_seg6_store_bytes ||
4632 func == bpf_lwt_seg6_adjust_srh || 4722 func == bpf_lwt_seg6_adjust_srh ||
4633 func == bpf_lwt_seg6_action 4723 func == bpf_lwt_seg6_action ||
4634 ) 4724#endif
4725 func == bpf_lwt_push_encap)
4635 return true; 4726 return true;
4636 4727
4637 return false; 4728 return false;
@@ -4871,11 +4962,11 @@ sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
4871 case BPF_FUNC_skb_load_bytes: 4962 case BPF_FUNC_skb_load_bytes:
4872 return &bpf_skb_load_bytes_proto; 4963 return &bpf_skb_load_bytes_proto;
4873 case BPF_FUNC_skb_pull_data: 4964 case BPF_FUNC_skb_pull_data:
4874 return &bpf_skb_pull_data_proto; 4965 return &sk_skb_pull_data_proto;
4875 case BPF_FUNC_skb_change_tail: 4966 case BPF_FUNC_skb_change_tail:
4876 return &bpf_skb_change_tail_proto; 4967 return &sk_skb_change_tail_proto;
4877 case BPF_FUNC_skb_change_head: 4968 case BPF_FUNC_skb_change_head:
4878 return &bpf_skb_change_head_proto; 4969 return &sk_skb_change_head_proto;
4879 case BPF_FUNC_get_socket_cookie: 4970 case BPF_FUNC_get_socket_cookie:
4880 return &bpf_get_socket_cookie_proto; 4971 return &bpf_get_socket_cookie_proto;
4881 case BPF_FUNC_get_socket_uid: 4972 case BPF_FUNC_get_socket_uid:
@@ -4966,12 +5057,14 @@ static const struct bpf_func_proto *
4966lwt_seg6local_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 5057lwt_seg6local_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
4967{ 5058{
4968 switch (func_id) { 5059 switch (func_id) {
5060#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
4969 case BPF_FUNC_lwt_seg6_store_bytes: 5061 case BPF_FUNC_lwt_seg6_store_bytes:
4970 return &bpf_lwt_seg6_store_bytes_proto; 5062 return &bpf_lwt_seg6_store_bytes_proto;
4971 case BPF_FUNC_lwt_seg6_action: 5063 case BPF_FUNC_lwt_seg6_action:
4972 return &bpf_lwt_seg6_action_proto; 5064 return &bpf_lwt_seg6_action_proto;
4973 case BPF_FUNC_lwt_seg6_adjust_srh: 5065 case BPF_FUNC_lwt_seg6_adjust_srh:
4974 return &bpf_lwt_seg6_adjust_srh_proto; 5066 return &bpf_lwt_seg6_adjust_srh_proto;
5067#endif
4975 default: 5068 default:
4976 return lwt_out_func_proto(func_id, prog); 5069 return lwt_out_func_proto(func_id, prog);
4977 } 5070 }
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
index b2b2323bdc84..188d693cb251 100644
--- a/net/core/gen_stats.c
+++ b/net/core/gen_stats.c
@@ -77,8 +77,20 @@ gnet_stats_start_copy_compat(struct sk_buff *skb, int type, int tc_stats_type,
77 d->lock = lock; 77 d->lock = lock;
78 spin_lock_bh(lock); 78 spin_lock_bh(lock);
79 } 79 }
80 if (d->tail) 80 if (d->tail) {
81 return gnet_stats_copy(d, type, NULL, 0, padattr); 81 int ret = gnet_stats_copy(d, type, NULL, 0, padattr);
82
83 /* The initial attribute added in gnet_stats_copy() may be
84 * preceded by a padding attribute, in which case d->tail will
85 * end up pointing at the padding instead of the real attribute.
86 * Fix this so gnet_stats_finish_copy() adjusts the length of
87 * the right attribute.
88 */
89 if (ret == 0 && d->tail->nla_type == padattr)
90 d->tail = (struct nlattr *)((char *)d->tail +
91 NLA_ALIGN(d->tail->nla_len));
92 return ret;
93 }
82 94
83 return 0; 95 return 0;
84} 96}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index eba8dae22c25..8e51f8555e11 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -858,6 +858,7 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
858 n->cloned = 1; 858 n->cloned = 1;
859 n->nohdr = 0; 859 n->nohdr = 0;
860 n->peeked = 0; 860 n->peeked = 0;
861 C(pfmemalloc);
861 n->destructor = NULL; 862 n->destructor = NULL;
862 C(tail); 863 C(tail);
863 C(end); 864 C(end);
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index 40c851693f77..0c9478b91fa5 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -86,35 +86,39 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
86 opt++; 86 opt++;
87 kdebug("options: '%s'", opt); 87 kdebug("options: '%s'", opt);
88 do { 88 do {
89 int opt_len, opt_nlen;
89 const char *eq; 90 const char *eq;
90 int opt_len, opt_nlen, opt_vlen, tmp; 91 char optval[128];
91 92
92 next_opt = memchr(opt, '#', end - opt) ?: end; 93 next_opt = memchr(opt, '#', end - opt) ?: end;
93 opt_len = next_opt - opt; 94 opt_len = next_opt - opt;
94 if (opt_len <= 0 || opt_len > 128) { 95 if (opt_len <= 0 || opt_len > sizeof(optval)) {
95 pr_warn_ratelimited("Invalid option length (%d) for dns_resolver key\n", 96 pr_warn_ratelimited("Invalid option length (%d) for dns_resolver key\n",
96 opt_len); 97 opt_len);
97 return -EINVAL; 98 return -EINVAL;
98 } 99 }
99 100
100 eq = memchr(opt, '=', opt_len) ?: end; 101 eq = memchr(opt, '=', opt_len);
101 opt_nlen = eq - opt; 102 if (eq) {
102 eq++; 103 opt_nlen = eq - opt;
103 opt_vlen = next_opt - eq; /* will be -1 if no value */ 104 eq++;
105 memcpy(optval, eq, next_opt - eq);
106 optval[next_opt - eq] = '\0';
107 } else {
108 opt_nlen = opt_len;
109 optval[0] = '\0';
110 }
104 111
105 tmp = opt_vlen >= 0 ? opt_vlen : 0; 112 kdebug("option '%*.*s' val '%s'",
106 kdebug("option '%*.*s' val '%*.*s'", 113 opt_nlen, opt_nlen, opt, optval);
107 opt_nlen, opt_nlen, opt, tmp, tmp, eq);
108 114
109 /* see if it's an error number representing a DNS error 115 /* see if it's an error number representing a DNS error
110 * that's to be recorded as the result in this key */ 116 * that's to be recorded as the result in this key */
111 if (opt_nlen == sizeof(DNS_ERRORNO_OPTION) - 1 && 117 if (opt_nlen == sizeof(DNS_ERRORNO_OPTION) - 1 &&
112 memcmp(opt, DNS_ERRORNO_OPTION, opt_nlen) == 0) { 118 memcmp(opt, DNS_ERRORNO_OPTION, opt_nlen) == 0) {
113 kdebug("dns error number option"); 119 kdebug("dns error number option");
114 if (opt_vlen <= 0)
115 goto bad_option_value;
116 120
117 ret = kstrtoul(eq, 10, &derrno); 121 ret = kstrtoul(optval, 10, &derrno);
118 if (ret < 0) 122 if (ret < 0)
119 goto bad_option_value; 123 goto bad_option_value;
120 124
diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
index 275449b0d633..3297e7fa9945 100644
--- a/net/ieee802154/6lowpan/core.c
+++ b/net/ieee802154/6lowpan/core.c
@@ -90,12 +90,18 @@ static int lowpan_neigh_construct(struct net_device *dev, struct neighbour *n)
90 return 0; 90 return 0;
91} 91}
92 92
93static int lowpan_get_iflink(const struct net_device *dev)
94{
95 return lowpan_802154_dev(dev)->wdev->ifindex;
96}
97
93static const struct net_device_ops lowpan_netdev_ops = { 98static const struct net_device_ops lowpan_netdev_ops = {
94 .ndo_init = lowpan_dev_init, 99 .ndo_init = lowpan_dev_init,
95 .ndo_start_xmit = lowpan_xmit, 100 .ndo_start_xmit = lowpan_xmit,
96 .ndo_open = lowpan_open, 101 .ndo_open = lowpan_open,
97 .ndo_stop = lowpan_stop, 102 .ndo_stop = lowpan_stop,
98 .ndo_neigh_construct = lowpan_neigh_construct, 103 .ndo_neigh_construct = lowpan_neigh_construct,
104 .ndo_get_iflink = lowpan_get_iflink,
99}; 105};
100 106
101static void lowpan_setup(struct net_device *ldev) 107static void lowpan_setup(struct net_device *ldev)
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index b21833651394..e46cdd310e5f 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -300,6 +300,7 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb)
300 if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) { 300 if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) {
301 struct flowi4 fl4 = { 301 struct flowi4 fl4 = {
302 .flowi4_iif = LOOPBACK_IFINDEX, 302 .flowi4_iif = LOOPBACK_IFINDEX,
303 .flowi4_oif = l3mdev_master_ifindex_rcu(dev),
303 .daddr = ip_hdr(skb)->saddr, 304 .daddr = ip_hdr(skb)->saddr,
304 .flowi4_tos = RT_TOS(ip_hdr(skb)->tos), 305 .flowi4_tos = RT_TOS(ip_hdr(skb)->tos),
305 .flowi4_scope = scope, 306 .flowi4_scope = scope,
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 85b617b655bc..b3c899a630a0 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1200,13 +1200,14 @@ static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im)
1200 spin_lock_bh(&im->lock); 1200 spin_lock_bh(&im->lock);
1201 if (pmc) { 1201 if (pmc) {
1202 im->interface = pmc->interface; 1202 im->interface = pmc->interface;
1203 im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
1204 im->sfmode = pmc->sfmode; 1203 im->sfmode = pmc->sfmode;
1205 if (pmc->sfmode == MCAST_INCLUDE) { 1204 if (pmc->sfmode == MCAST_INCLUDE) {
1206 im->tomb = pmc->tomb; 1205 im->tomb = pmc->tomb;
1207 im->sources = pmc->sources; 1206 im->sources = pmc->sources;
1208 for (psf = im->sources; psf; psf = psf->sf_next) 1207 for (psf = im->sources; psf; psf = psf->sf_next)
1209 psf->sf_crcount = im->crcount; 1208 psf->sf_crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
1209 } else {
1210 im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
1210 } 1211 }
1211 in_dev_put(pmc->interface); 1212 in_dev_put(pmc->interface);
1212 kfree(pmc); 1213 kfree(pmc);
@@ -1288,7 +1289,7 @@ static void igmp_group_dropped(struct ip_mc_list *im)
1288#endif 1289#endif
1289} 1290}
1290 1291
1291static void igmp_group_added(struct ip_mc_list *im) 1292static void igmp_group_added(struct ip_mc_list *im, unsigned int mode)
1292{ 1293{
1293 struct in_device *in_dev = im->interface; 1294 struct in_device *in_dev = im->interface;
1294#ifdef CONFIG_IP_MULTICAST 1295#ifdef CONFIG_IP_MULTICAST
@@ -1316,7 +1317,13 @@ static void igmp_group_added(struct ip_mc_list *im)
1316 } 1317 }
1317 /* else, v3 */ 1318 /* else, v3 */
1318 1319
1319 im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; 1320 /* Based on RFC3376 5.1, for newly added INCLUDE SSM, we should
1321 * not send filter-mode change record as the mode should be from
1322 * IN() to IN(A).
1323 */
1324 if (mode == MCAST_EXCLUDE)
1325 im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
1326
1320 igmp_ifc_event(in_dev); 1327 igmp_ifc_event(in_dev);
1321#endif 1328#endif
1322} 1329}
@@ -1381,8 +1388,7 @@ static void ip_mc_hash_remove(struct in_device *in_dev,
1381/* 1388/*
1382 * A socket has joined a multicast group on device dev. 1389 * A socket has joined a multicast group on device dev.
1383 */ 1390 */
1384 1391void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr, unsigned int mode)
1385void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
1386{ 1392{
1387 struct ip_mc_list *im; 1393 struct ip_mc_list *im;
1388#ifdef CONFIG_IP_MULTICAST 1394#ifdef CONFIG_IP_MULTICAST
@@ -1394,7 +1400,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
1394 for_each_pmc_rtnl(in_dev, im) { 1400 for_each_pmc_rtnl(in_dev, im) {
1395 if (im->multiaddr == addr) { 1401 if (im->multiaddr == addr) {
1396 im->users++; 1402 im->users++;
1397 ip_mc_add_src(in_dev, &addr, MCAST_EXCLUDE, 0, NULL, 0); 1403 ip_mc_add_src(in_dev, &addr, mode, 0, NULL, 0);
1398 goto out; 1404 goto out;
1399 } 1405 }
1400 } 1406 }
@@ -1408,8 +1414,8 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
1408 in_dev_hold(in_dev); 1414 in_dev_hold(in_dev);
1409 im->multiaddr = addr; 1415 im->multiaddr = addr;
1410 /* initial mode is (EX, empty) */ 1416 /* initial mode is (EX, empty) */
1411 im->sfmode = MCAST_EXCLUDE; 1417 im->sfmode = mode;
1412 im->sfcount[MCAST_EXCLUDE] = 1; 1418 im->sfcount[mode] = 1;
1413 refcount_set(&im->refcnt, 1); 1419 refcount_set(&im->refcnt, 1);
1414 spin_lock_init(&im->lock); 1420 spin_lock_init(&im->lock);
1415#ifdef CONFIG_IP_MULTICAST 1421#ifdef CONFIG_IP_MULTICAST
@@ -1426,12 +1432,17 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
1426#ifdef CONFIG_IP_MULTICAST 1432#ifdef CONFIG_IP_MULTICAST
1427 igmpv3_del_delrec(in_dev, im); 1433 igmpv3_del_delrec(in_dev, im);
1428#endif 1434#endif
1429 igmp_group_added(im); 1435 igmp_group_added(im, mode);
1430 if (!in_dev->dead) 1436 if (!in_dev->dead)
1431 ip_rt_multicast_event(in_dev); 1437 ip_rt_multicast_event(in_dev);
1432out: 1438out:
1433 return; 1439 return;
1434} 1440}
1441
1442void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
1443{
1444 __ip_mc_inc_group(in_dev, addr, MCAST_EXCLUDE);
1445}
1435EXPORT_SYMBOL(ip_mc_inc_group); 1446EXPORT_SYMBOL(ip_mc_inc_group);
1436 1447
1437static int ip_mc_check_iphdr(struct sk_buff *skb) 1448static int ip_mc_check_iphdr(struct sk_buff *skb)
@@ -1688,7 +1699,7 @@ void ip_mc_remap(struct in_device *in_dev)
1688#ifdef CONFIG_IP_MULTICAST 1699#ifdef CONFIG_IP_MULTICAST
1689 igmpv3_del_delrec(in_dev, pmc); 1700 igmpv3_del_delrec(in_dev, pmc);
1690#endif 1701#endif
1691 igmp_group_added(pmc); 1702 igmp_group_added(pmc, pmc->sfmode);
1692 } 1703 }
1693} 1704}
1694 1705
@@ -1751,7 +1762,7 @@ void ip_mc_up(struct in_device *in_dev)
1751#ifdef CONFIG_IP_MULTICAST 1762#ifdef CONFIG_IP_MULTICAST
1752 igmpv3_del_delrec(in_dev, pmc); 1763 igmpv3_del_delrec(in_dev, pmc);
1753#endif 1764#endif
1754 igmp_group_added(pmc); 1765 igmp_group_added(pmc, pmc->sfmode);
1755 } 1766 }
1756} 1767}
1757 1768
@@ -2130,8 +2141,8 @@ static void ip_mc_clear_src(struct ip_mc_list *pmc)
2130 2141
2131/* Join a multicast group 2142/* Join a multicast group
2132 */ 2143 */
2133 2144static int __ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr,
2134int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr) 2145 unsigned int mode)
2135{ 2146{
2136 __be32 addr = imr->imr_multiaddr.s_addr; 2147 __be32 addr = imr->imr_multiaddr.s_addr;
2137 struct ip_mc_socklist *iml, *i; 2148 struct ip_mc_socklist *iml, *i;
@@ -2172,15 +2183,30 @@ int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr)
2172 memcpy(&iml->multi, imr, sizeof(*imr)); 2183 memcpy(&iml->multi, imr, sizeof(*imr));
2173 iml->next_rcu = inet->mc_list; 2184 iml->next_rcu = inet->mc_list;
2174 iml->sflist = NULL; 2185 iml->sflist = NULL;
2175 iml->sfmode = MCAST_EXCLUDE; 2186 iml->sfmode = mode;
2176 rcu_assign_pointer(inet->mc_list, iml); 2187 rcu_assign_pointer(inet->mc_list, iml);
2177 ip_mc_inc_group(in_dev, addr); 2188 __ip_mc_inc_group(in_dev, addr, mode);
2178 err = 0; 2189 err = 0;
2179done: 2190done:
2180 return err; 2191 return err;
2181} 2192}
2193
2194/* Join ASM (Any-Source Multicast) group
2195 */
2196int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr)
2197{
2198 return __ip_mc_join_group(sk, imr, MCAST_EXCLUDE);
2199}
2182EXPORT_SYMBOL(ip_mc_join_group); 2200EXPORT_SYMBOL(ip_mc_join_group);
2183 2201
2202/* Join SSM (Source-Specific Multicast) group
2203 */
2204int ip_mc_join_group_ssm(struct sock *sk, struct ip_mreqn *imr,
2205 unsigned int mode)
2206{
2207 return __ip_mc_join_group(sk, imr, mode);
2208}
2209
2184static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml, 2210static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
2185 struct in_device *in_dev) 2211 struct in_device *in_dev)
2186{ 2212{
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index c9e35b81d093..1e4cf3ab560f 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -90,7 +90,7 @@ static void inet_frags_free_cb(void *ptr, void *arg)
90 90
91void inet_frags_exit_net(struct netns_frags *nf) 91void inet_frags_exit_net(struct netns_frags *nf)
92{ 92{
93 nf->low_thresh = 0; /* prevent creation of new frags */ 93 nf->high_thresh = 0; /* prevent creation of new frags */
94 94
95 rhashtable_free_and_destroy(&nf->rhashtable, inet_frags_free_cb, NULL); 95 rhashtable_free_and_destroy(&nf->rhashtable, inet_frags_free_cb, NULL);
96} 96}
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index fc32fdbeefa6..64c76dcf7386 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -984,7 +984,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
984 mreq.imr_multiaddr.s_addr = mreqs.imr_multiaddr; 984 mreq.imr_multiaddr.s_addr = mreqs.imr_multiaddr;
985 mreq.imr_address.s_addr = mreqs.imr_interface; 985 mreq.imr_address.s_addr = mreqs.imr_interface;
986 mreq.imr_ifindex = 0; 986 mreq.imr_ifindex = 0;
987 err = ip_mc_join_group(sk, &mreq); 987 err = ip_mc_join_group_ssm(sk, &mreq, MCAST_INCLUDE);
988 if (err && err != -EADDRINUSE) 988 if (err && err != -EADDRINUSE)
989 break; 989 break;
990 omode = MCAST_INCLUDE; 990 omode = MCAST_INCLUDE;
@@ -1061,7 +1061,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
1061 mreq.imr_multiaddr = psin->sin_addr; 1061 mreq.imr_multiaddr = psin->sin_addr;
1062 mreq.imr_address.s_addr = 0; 1062 mreq.imr_address.s_addr = 0;
1063 mreq.imr_ifindex = greqs.gsr_interface; 1063 mreq.imr_ifindex = greqs.gsr_interface;
1064 err = ip_mc_join_group(sk, &mreq); 1064 err = ip_mc_join_group_ssm(sk, &mreq, MCAST_INCLUDE);
1065 if (err && err != -EADDRINUSE) 1065 if (err && err != -EADDRINUSE)
1066 break; 1066 break;
1067 greqs.gsr_interface = mreq.imr_ifindex; 1067 greqs.gsr_interface = mreq.imr_ifindex;
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index ca0dad90803a..e77872c93c20 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -1898,6 +1898,7 @@ static struct xt_match ipt_builtin_mt[] __read_mostly = {
1898 .checkentry = icmp_checkentry, 1898 .checkentry = icmp_checkentry,
1899 .proto = IPPROTO_ICMP, 1899 .proto = IPPROTO_ICMP,
1900 .family = NFPROTO_IPV4, 1900 .family = NFPROTO_IPV4,
1901 .me = THIS_MODULE,
1901 }, 1902 },
1902}; 1903};
1903 1904
diff --git a/net/ipv4/netfilter/nf_tproxy_ipv4.c b/net/ipv4/netfilter/nf_tproxy_ipv4.c
index 805e83ec3ad9..164714104965 100644
--- a/net/ipv4/netfilter/nf_tproxy_ipv4.c
+++ b/net/ipv4/netfilter/nf_tproxy_ipv4.c
@@ -37,7 +37,7 @@ nf_tproxy_handle_time_wait4(struct net *net, struct sk_buff *skb,
37 * to a listener socket if there's one */ 37 * to a listener socket if there's one */
38 struct sock *sk2; 38 struct sock *sk2;
39 39
40 sk2 = nf_tproxy_get_sock_v4(net, skb, hp, iph->protocol, 40 sk2 = nf_tproxy_get_sock_v4(net, skb, iph->protocol,
41 iph->saddr, laddr ? laddr : iph->daddr, 41 iph->saddr, laddr ? laddr : iph->daddr,
42 hp->source, lport ? lport : hp->dest, 42 hp->source, lport ? lport : hp->dest,
43 skb->dev, NF_TPROXY_LOOKUP_LISTENER); 43 skb->dev, NF_TPROXY_LOOKUP_LISTENER);
@@ -71,7 +71,7 @@ __be32 nf_tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr)
71EXPORT_SYMBOL_GPL(nf_tproxy_laddr4); 71EXPORT_SYMBOL_GPL(nf_tproxy_laddr4);
72 72
73struct sock * 73struct sock *
74nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb, void *hp, 74nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb,
75 const u8 protocol, 75 const u8 protocol,
76 const __be32 saddr, const __be32 daddr, 76 const __be32 saddr, const __be32 daddr,
77 const __be16 sport, const __be16 dport, 77 const __be16 sport, const __be16 dport,
@@ -79,16 +79,21 @@ nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb, void *hp,
79 const enum nf_tproxy_lookup_t lookup_type) 79 const enum nf_tproxy_lookup_t lookup_type)
80{ 80{
81 struct sock *sk; 81 struct sock *sk;
82 struct tcphdr *tcph;
83 82
84 switch (protocol) { 83 switch (protocol) {
85 case IPPROTO_TCP: 84 case IPPROTO_TCP: {
85 struct tcphdr _hdr, *hp;
86
87 hp = skb_header_pointer(skb, ip_hdrlen(skb),
88 sizeof(struct tcphdr), &_hdr);
89 if (hp == NULL)
90 return NULL;
91
86 switch (lookup_type) { 92 switch (lookup_type) {
87 case NF_TPROXY_LOOKUP_LISTENER: 93 case NF_TPROXY_LOOKUP_LISTENER:
88 tcph = hp;
89 sk = inet_lookup_listener(net, &tcp_hashinfo, skb, 94 sk = inet_lookup_listener(net, &tcp_hashinfo, skb,
90 ip_hdrlen(skb) + 95 ip_hdrlen(skb) +
91 __tcp_hdrlen(tcph), 96 __tcp_hdrlen(hp),
92 saddr, sport, 97 saddr, sport,
93 daddr, dport, 98 daddr, dport,
94 in->ifindex, 0); 99 in->ifindex, 0);
@@ -110,6 +115,7 @@ nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb, void *hp,
110 BUG(); 115 BUG();
111 } 116 }
112 break; 117 break;
118 }
113 case IPPROTO_UDP: 119 case IPPROTO_UDP:
114 sk = udp4_lib_lookup(net, saddr, sport, daddr, dport, 120 sk = udp4_lib_lookup(net, saddr, sport, daddr, dport,
115 in->ifindex); 121 in->ifindex);
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index af0a857d8352..5fa335fd3852 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -189,8 +189,9 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
189 if (write && ret == 0) { 189 if (write && ret == 0) {
190 low = make_kgid(user_ns, urange[0]); 190 low = make_kgid(user_ns, urange[0]);
191 high = make_kgid(user_ns, urange[1]); 191 high = make_kgid(user_ns, urange[1]);
192 if (!gid_valid(low) || !gid_valid(high) || 192 if (!gid_valid(low) || !gid_valid(high))
193 (urange[1] < urange[0]) || gid_lt(high, low)) { 193 return -EINVAL;
194 if (urange[1] < urange[0] || gid_lt(high, low)) {
194 low = make_kgid(&init_user_ns, 1); 195 low = make_kgid(&init_user_ns, 1);
195 high = make_kgid(&init_user_ns, 0); 196 high = make_kgid(&init_user_ns, 0);
196 } 197 }
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index e7b53d2a971f..4491faf83f4f 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1998,7 +1998,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
1998 * shouldn't happen. 1998 * shouldn't happen.
1999 */ 1999 */
2000 if (WARN(before(*seq, TCP_SKB_CB(skb)->seq), 2000 if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
2001 "recvmsg bug: copied %X seq %X rcvnxt %X fl %X\n", 2001 "TCP recvmsg seq # bug: copied %X, seq %X, rcvnxt %X, fl %X\n",
2002 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, 2002 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
2003 flags)) 2003 flags))
2004 break; 2004 break;
@@ -2013,7 +2013,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
2013 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 2013 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
2014 goto found_fin_ok; 2014 goto found_fin_ok;
2015 WARN(!(flags & MSG_PEEK), 2015 WARN(!(flags & MSG_PEEK),
2016 "recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n", 2016 "TCP recvmsg seq # bug 2: copied %X, seq %X, rcvnxt %X, fl %X\n",
2017 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags); 2017 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags);
2018 } 2018 }
2019 2019
@@ -2562,6 +2562,8 @@ int tcp_disconnect(struct sock *sk, int flags)
2562 2562
2563 tcp_clear_xmit_timers(sk); 2563 tcp_clear_xmit_timers(sk);
2564 __skb_queue_purge(&sk->sk_receive_queue); 2564 __skb_queue_purge(&sk->sk_receive_queue);
2565 tp->copied_seq = tp->rcv_nxt;
2566 tp->urg_data = 0;
2565 tcp_write_queue_purge(sk); 2567 tcp_write_queue_purge(sk);
2566 tcp_fastopen_active_disable_ofo_check(sk); 2568 tcp_fastopen_active_disable_ofo_check(sk);
2567 skb_rbtree_purge(&tp->out_of_order_queue); 2569 skb_rbtree_purge(&tp->out_of_order_queue);
@@ -2821,14 +2823,17 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2821 case TCP_REPAIR: 2823 case TCP_REPAIR:
2822 if (!tcp_can_repair_sock(sk)) 2824 if (!tcp_can_repair_sock(sk))
2823 err = -EPERM; 2825 err = -EPERM;
2824 else if (val == 1) { 2826 else if (val == TCP_REPAIR_ON) {
2825 tp->repair = 1; 2827 tp->repair = 1;
2826 sk->sk_reuse = SK_FORCE_REUSE; 2828 sk->sk_reuse = SK_FORCE_REUSE;
2827 tp->repair_queue = TCP_NO_QUEUE; 2829 tp->repair_queue = TCP_NO_QUEUE;
2828 } else if (val == 0) { 2830 } else if (val == TCP_REPAIR_OFF) {
2829 tp->repair = 0; 2831 tp->repair = 0;
2830 sk->sk_reuse = SK_NO_REUSE; 2832 sk->sk_reuse = SK_NO_REUSE;
2831 tcp_send_window_probe(sk); 2833 tcp_send_window_probe(sk);
2834 } else if (val == TCP_REPAIR_OFF_NO_WP) {
2835 tp->repair = 0;
2836 sk->sk_reuse = SK_NO_REUSE;
2832 } else 2837 } else
2833 err = -EINVAL; 2838 err = -EINVAL;
2834 2839
@@ -3720,8 +3725,7 @@ int tcp_abort(struct sock *sk, int err)
3720 struct request_sock *req = inet_reqsk(sk); 3725 struct request_sock *req = inet_reqsk(sk);
3721 3726
3722 local_bh_disable(); 3727 local_bh_disable();
3723 inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, 3728 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
3724 req);
3725 local_bh_enable(); 3729 local_bh_enable();
3726 return 0; 3730 return 0;
3727 } 3731 }
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
index 5f5e5936760e..5869f89ca656 100644
--- a/net/ipv4/tcp_dctcp.c
+++ b/net/ipv4/tcp_dctcp.c
@@ -55,7 +55,6 @@ struct dctcp {
55 u32 dctcp_alpha; 55 u32 dctcp_alpha;
56 u32 next_seq; 56 u32 next_seq;
57 u32 ce_state; 57 u32 ce_state;
58 u32 delayed_ack_reserved;
59 u32 loss_cwnd; 58 u32 loss_cwnd;
60}; 59};
61 60
@@ -96,7 +95,6 @@ static void dctcp_init(struct sock *sk)
96 95
97 ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA); 96 ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA);
98 97
99 ca->delayed_ack_reserved = 0;
100 ca->loss_cwnd = 0; 98 ca->loss_cwnd = 0;
101 ca->ce_state = 0; 99 ca->ce_state = 0;
102 100
@@ -134,7 +132,8 @@ static void dctcp_ce_state_0_to_1(struct sock *sk)
134 /* State has changed from CE=0 to CE=1 and delayed 132 /* State has changed from CE=0 to CE=1 and delayed
135 * ACK has not sent yet. 133 * ACK has not sent yet.
136 */ 134 */
137 if (!ca->ce_state && ca->delayed_ack_reserved) { 135 if (!ca->ce_state &&
136 inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) {
138 u32 tmp_rcv_nxt; 137 u32 tmp_rcv_nxt;
139 138
140 /* Save current rcv_nxt. */ 139 /* Save current rcv_nxt. */
@@ -164,7 +163,8 @@ static void dctcp_ce_state_1_to_0(struct sock *sk)
164 /* State has changed from CE=1 to CE=0 and delayed 163 /* State has changed from CE=1 to CE=0 and delayed
165 * ACK has not sent yet. 164 * ACK has not sent yet.
166 */ 165 */
167 if (ca->ce_state && ca->delayed_ack_reserved) { 166 if (ca->ce_state &&
167 inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) {
168 u32 tmp_rcv_nxt; 168 u32 tmp_rcv_nxt;
169 169
170 /* Save current rcv_nxt. */ 170 /* Save current rcv_nxt. */
@@ -248,25 +248,6 @@ static void dctcp_state(struct sock *sk, u8 new_state)
248 } 248 }
249} 249}
250 250
251static void dctcp_update_ack_reserved(struct sock *sk, enum tcp_ca_event ev)
252{
253 struct dctcp *ca = inet_csk_ca(sk);
254
255 switch (ev) {
256 case CA_EVENT_DELAYED_ACK:
257 if (!ca->delayed_ack_reserved)
258 ca->delayed_ack_reserved = 1;
259 break;
260 case CA_EVENT_NON_DELAYED_ACK:
261 if (ca->delayed_ack_reserved)
262 ca->delayed_ack_reserved = 0;
263 break;
264 default:
265 /* Don't care for the rest. */
266 break;
267 }
268}
269
270static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev) 251static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
271{ 252{
272 switch (ev) { 253 switch (ev) {
@@ -276,10 +257,6 @@ static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
276 case CA_EVENT_ECN_NO_CE: 257 case CA_EVENT_ECN_NO_CE:
277 dctcp_ce_state_1_to_0(sk); 258 dctcp_ce_state_1_to_0(sk);
278 break; 259 break;
279 case CA_EVENT_DELAYED_ACK:
280 case CA_EVENT_NON_DELAYED_ACK:
281 dctcp_update_ack_reserved(sk, ev);
282 break;
283 default: 260 default:
284 /* Don't care for the rest. */ 261 /* Don't care for the rest. */
285 break; 262 break;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index bea17f1e8302..3b2711e33e4c 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -156,11 +156,24 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
156 */ 156 */
157 if (tcptw->tw_ts_recent_stamp && 157 if (tcptw->tw_ts_recent_stamp &&
158 (!twp || (reuse && get_seconds() - tcptw->tw_ts_recent_stamp > 1))) { 158 (!twp || (reuse && get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
159 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2; 159 /* In case of repair and re-using TIME-WAIT sockets we still
160 if (tp->write_seq == 0) 160 * want to be sure that it is safe as above but honor the
161 tp->write_seq = 1; 161 * sequence numbers and time stamps set as part of the repair
162 tp->rx_opt.ts_recent = tcptw->tw_ts_recent; 162 * process.
163 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp; 163 *
164 * Without this check re-using a TIME-WAIT socket with TCP
165 * repair would accumulate a -1 on the repair assigned
166 * sequence number. The first time it is reused the sequence
167 * is -1, the second time -2, etc. This fixes that issue
168 * without appearing to create any others.
169 */
170 if (likely(!tp->repair)) {
171 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
172 if (tp->write_seq == 0)
173 tp->write_seq = 1;
174 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
175 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
176 }
164 sock_hold(sktw); 177 sock_hold(sktw);
165 return 1; 178 return 1;
166 } 179 }
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 8e08b409c71e..00e5a300ddb9 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -3523,8 +3523,6 @@ void tcp_send_delayed_ack(struct sock *sk)
3523 int ato = icsk->icsk_ack.ato; 3523 int ato = icsk->icsk_ack.ato;
3524 unsigned long timeout; 3524 unsigned long timeout;
3525 3525
3526 tcp_ca_event(sk, CA_EVENT_DELAYED_ACK);
3527
3528 if (ato > TCP_DELACK_MIN) { 3526 if (ato > TCP_DELACK_MIN) {
3529 const struct tcp_sock *tp = tcp_sk(sk); 3527 const struct tcp_sock *tp = tcp_sk(sk);
3530 int max_ato = HZ / 2; 3528 int max_ato = HZ / 2;
@@ -3581,8 +3579,6 @@ void tcp_send_ack(struct sock *sk)
3581 if (sk->sk_state == TCP_CLOSE) 3579 if (sk->sk_state == TCP_CLOSE)
3582 return; 3580 return;
3583 3581
3584 tcp_ca_event(sk, CA_EVENT_NON_DELAYED_ACK);
3585
3586 /* We are not putting this on the write queue, so 3582 /* We are not putting this on the write queue, so
3587 * tcp_transmit_skb() will set the ownership to this 3583 * tcp_transmit_skb() will set the ownership to this
3588 * sock. 3584 * sock.
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index 0eff75525da1..b3885ca22d6f 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -108,6 +108,7 @@ config IPV6_MIP6
108config IPV6_ILA 108config IPV6_ILA
109 tristate "IPv6: Identifier Locator Addressing (ILA)" 109 tristate "IPv6: Identifier Locator Addressing (ILA)"
110 depends on NETFILTER 110 depends on NETFILTER
111 select DST_CACHE
111 select LWTUNNEL 112 select LWTUNNEL
112 ---help--- 113 ---help---
113 Support for IPv6 Identifier Locator Addressing (ILA). 114 Support for IPv6 Identifier Locator Addressing (ILA).
diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c
index 1323b9679cf7..1c0bb9fb76e6 100644
--- a/net/ipv6/calipso.c
+++ b/net/ipv6/calipso.c
@@ -799,8 +799,7 @@ static int calipso_opt_update(struct sock *sk, struct ipv6_opt_hdr *hop)
799{ 799{
800 struct ipv6_txoptions *old = txopt_get(inet6_sk(sk)), *txopts; 800 struct ipv6_txoptions *old = txopt_get(inet6_sk(sk)), *txopts;
801 801
802 txopts = ipv6_renew_options_kern(sk, old, IPV6_HOPOPTS, 802 txopts = ipv6_renew_options(sk, old, IPV6_HOPOPTS, hop);
803 hop, hop ? ipv6_optlen(hop) : 0);
804 txopt_put(old); 803 txopt_put(old);
805 if (IS_ERR(txopts)) 804 if (IS_ERR(txopts))
806 return PTR_ERR(txopts); 805 return PTR_ERR(txopts);
@@ -1222,8 +1221,7 @@ static int calipso_req_setattr(struct request_sock *req,
1222 if (IS_ERR(new)) 1221 if (IS_ERR(new))
1223 return PTR_ERR(new); 1222 return PTR_ERR(new);
1224 1223
1225 txopts = ipv6_renew_options_kern(sk, req_inet->ipv6_opt, IPV6_HOPOPTS, 1224 txopts = ipv6_renew_options(sk, req_inet->ipv6_opt, IPV6_HOPOPTS, new);
1226 new, new ? ipv6_optlen(new) : 0);
1227 1225
1228 kfree(new); 1226 kfree(new);
1229 1227
@@ -1260,8 +1258,7 @@ static void calipso_req_delattr(struct request_sock *req)
1260 if (calipso_opt_del(req_inet->ipv6_opt->hopopt, &new)) 1258 if (calipso_opt_del(req_inet->ipv6_opt->hopopt, &new))
1261 return; /* Nothing to do */ 1259 return; /* Nothing to do */
1262 1260
1263 txopts = ipv6_renew_options_kern(sk, req_inet->ipv6_opt, IPV6_HOPOPTS, 1261 txopts = ipv6_renew_options(sk, req_inet->ipv6_opt, IPV6_HOPOPTS, new);
1264 new, new ? ipv6_optlen(new) : 0);
1265 1262
1266 if (!IS_ERR(txopts)) { 1263 if (!IS_ERR(txopts)) {
1267 txopts = xchg(&req_inet->ipv6_opt, txopts); 1264 txopts = xchg(&req_inet->ipv6_opt, txopts);
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 5bc2bf3733ab..20291c2036fc 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -1015,29 +1015,21 @@ ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt)
1015} 1015}
1016EXPORT_SYMBOL_GPL(ipv6_dup_options); 1016EXPORT_SYMBOL_GPL(ipv6_dup_options);
1017 1017
1018static int ipv6_renew_option(void *ohdr, 1018static void ipv6_renew_option(int renewtype,
1019 struct ipv6_opt_hdr __user *newopt, int newoptlen, 1019 struct ipv6_opt_hdr **dest,
1020 int inherit, 1020 struct ipv6_opt_hdr *old,
1021 struct ipv6_opt_hdr **hdr, 1021 struct ipv6_opt_hdr *new,
1022 char **p) 1022 int newtype, char **p)
1023{ 1023{
1024 if (inherit) { 1024 struct ipv6_opt_hdr *src;
1025 if (ohdr) { 1025
1026 memcpy(*p, ohdr, ipv6_optlen((struct ipv6_opt_hdr *)ohdr)); 1026 src = (renewtype == newtype ? new : old);
1027 *hdr = (struct ipv6_opt_hdr *)*p; 1027 if (!src)
1028 *p += CMSG_ALIGN(ipv6_optlen(*hdr)); 1028 return;
1029 } 1029
1030 } else { 1030 memcpy(*p, src, ipv6_optlen(src));
1031 if (newopt) { 1031 *dest = (struct ipv6_opt_hdr *)*p;
1032 if (copy_from_user(*p, newopt, newoptlen)) 1032 *p += CMSG_ALIGN(ipv6_optlen(*dest));
1033 return -EFAULT;
1034 *hdr = (struct ipv6_opt_hdr *)*p;
1035 if (ipv6_optlen(*hdr) > newoptlen)
1036 return -EINVAL;
1037 *p += CMSG_ALIGN(newoptlen);
1038 }
1039 }
1040 return 0;
1041} 1033}
1042 1034
1043/** 1035/**
@@ -1063,13 +1055,11 @@ static int ipv6_renew_option(void *ohdr,
1063 */ 1055 */
1064struct ipv6_txoptions * 1056struct ipv6_txoptions *
1065ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt, 1057ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
1066 int newtype, 1058 int newtype, struct ipv6_opt_hdr *newopt)
1067 struct ipv6_opt_hdr __user *newopt, int newoptlen)
1068{ 1059{
1069 int tot_len = 0; 1060 int tot_len = 0;
1070 char *p; 1061 char *p;
1071 struct ipv6_txoptions *opt2; 1062 struct ipv6_txoptions *opt2;
1072 int err;
1073 1063
1074 if (opt) { 1064 if (opt) {
1075 if (newtype != IPV6_HOPOPTS && opt->hopopt) 1065 if (newtype != IPV6_HOPOPTS && opt->hopopt)
@@ -1082,8 +1072,8 @@ ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
1082 tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst1opt)); 1072 tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst1opt));
1083 } 1073 }
1084 1074
1085 if (newopt && newoptlen) 1075 if (newopt)
1086 tot_len += CMSG_ALIGN(newoptlen); 1076 tot_len += CMSG_ALIGN(ipv6_optlen(newopt));
1087 1077
1088 if (!tot_len) 1078 if (!tot_len)
1089 return NULL; 1079 return NULL;
@@ -1098,29 +1088,19 @@ ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
1098 opt2->tot_len = tot_len; 1088 opt2->tot_len = tot_len;
1099 p = (char *)(opt2 + 1); 1089 p = (char *)(opt2 + 1);
1100 1090
1101 err = ipv6_renew_option(opt ? opt->hopopt : NULL, newopt, newoptlen, 1091 ipv6_renew_option(IPV6_HOPOPTS, &opt2->hopopt,
1102 newtype != IPV6_HOPOPTS, 1092 (opt ? opt->hopopt : NULL),
1103 &opt2->hopopt, &p); 1093 newopt, newtype, &p);
1104 if (err) 1094 ipv6_renew_option(IPV6_RTHDRDSTOPTS, &opt2->dst0opt,
1105 goto out; 1095 (opt ? opt->dst0opt : NULL),
1106 1096 newopt, newtype, &p);
1107 err = ipv6_renew_option(opt ? opt->dst0opt : NULL, newopt, newoptlen, 1097 ipv6_renew_option(IPV6_RTHDR,
1108 newtype != IPV6_RTHDRDSTOPTS, 1098 (struct ipv6_opt_hdr **)&opt2->srcrt,
1109 &opt2->dst0opt, &p); 1099 (opt ? (struct ipv6_opt_hdr *)opt->srcrt : NULL),
1110 if (err) 1100 newopt, newtype, &p);
1111 goto out; 1101 ipv6_renew_option(IPV6_DSTOPTS, &opt2->dst1opt,
1112 1102 (opt ? opt->dst1opt : NULL),
1113 err = ipv6_renew_option(opt ? opt->srcrt : NULL, newopt, newoptlen, 1103 newopt, newtype, &p);
1114 newtype != IPV6_RTHDR,
1115 (struct ipv6_opt_hdr **)&opt2->srcrt, &p);
1116 if (err)
1117 goto out;
1118
1119 err = ipv6_renew_option(opt ? opt->dst1opt : NULL, newopt, newoptlen,
1120 newtype != IPV6_DSTOPTS,
1121 &opt2->dst1opt, &p);
1122 if (err)
1123 goto out;
1124 1104
1125 opt2->opt_nflen = (opt2->hopopt ? ipv6_optlen(opt2->hopopt) : 0) + 1105 opt2->opt_nflen = (opt2->hopopt ? ipv6_optlen(opt2->hopopt) : 0) +
1126 (opt2->dst0opt ? ipv6_optlen(opt2->dst0opt) : 0) + 1106 (opt2->dst0opt ? ipv6_optlen(opt2->dst0opt) : 0) +
@@ -1128,37 +1108,6 @@ ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
1128 opt2->opt_flen = (opt2->dst1opt ? ipv6_optlen(opt2->dst1opt) : 0); 1108 opt2->opt_flen = (opt2->dst1opt ? ipv6_optlen(opt2->dst1opt) : 0);
1129 1109
1130 return opt2; 1110 return opt2;
1131out:
1132 sock_kfree_s(sk, opt2, opt2->tot_len);
1133 return ERR_PTR(err);
1134}
1135
1136/**
1137 * ipv6_renew_options_kern - replace a specific ext hdr with a new one.
1138 *
1139 * @sk: sock from which to allocate memory
1140 * @opt: original options
1141 * @newtype: option type to replace in @opt
1142 * @newopt: new option of type @newtype to replace (kernel-mem)
1143 * @newoptlen: length of @newopt
1144 *
1145 * See ipv6_renew_options(). The difference is that @newopt is
1146 * kernel memory, rather than user memory.
1147 */
1148struct ipv6_txoptions *
1149ipv6_renew_options_kern(struct sock *sk, struct ipv6_txoptions *opt,
1150 int newtype, struct ipv6_opt_hdr *newopt,
1151 int newoptlen)
1152{
1153 struct ipv6_txoptions *ret_val;
1154 const mm_segment_t old_fs = get_fs();
1155
1156 set_fs(KERNEL_DS);
1157 ret_val = ipv6_renew_options(sk, opt, newtype,
1158 (struct ipv6_opt_hdr __user *)newopt,
1159 newoptlen);
1160 set_fs(old_fs);
1161 return ret_val;
1162} 1111}
1163 1112
1164struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space, 1113struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 1fb2f3118d60..d212738e9d10 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -935,20 +935,19 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
935{ 935{
936 struct fib6_info *leaf = rcu_dereference_protected(fn->leaf, 936 struct fib6_info *leaf = rcu_dereference_protected(fn->leaf,
937 lockdep_is_held(&rt->fib6_table->tb6_lock)); 937 lockdep_is_held(&rt->fib6_table->tb6_lock));
938 enum fib_event_type event = FIB_EVENT_ENTRY_ADD; 938 struct fib6_info *iter = NULL;
939 struct fib6_info *iter = NULL, *match = NULL;
940 struct fib6_info __rcu **ins; 939 struct fib6_info __rcu **ins;
940 struct fib6_info __rcu **fallback_ins = NULL;
941 int replace = (info->nlh && 941 int replace = (info->nlh &&
942 (info->nlh->nlmsg_flags & NLM_F_REPLACE)); 942 (info->nlh->nlmsg_flags & NLM_F_REPLACE));
943 int append = (info->nlh &&
944 (info->nlh->nlmsg_flags & NLM_F_APPEND));
945 int add = (!info->nlh || 943 int add = (!info->nlh ||
946 (info->nlh->nlmsg_flags & NLM_F_CREATE)); 944 (info->nlh->nlmsg_flags & NLM_F_CREATE));
947 int found = 0; 945 int found = 0;
946 bool rt_can_ecmp = rt6_qualify_for_ecmp(rt);
948 u16 nlflags = NLM_F_EXCL; 947 u16 nlflags = NLM_F_EXCL;
949 int err; 948 int err;
950 949
951 if (append) 950 if (info->nlh && (info->nlh->nlmsg_flags & NLM_F_APPEND))
952 nlflags |= NLM_F_APPEND; 951 nlflags |= NLM_F_APPEND;
953 952
954 ins = &fn->leaf; 953 ins = &fn->leaf;
@@ -970,8 +969,13 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
970 969
971 nlflags &= ~NLM_F_EXCL; 970 nlflags &= ~NLM_F_EXCL;
972 if (replace) { 971 if (replace) {
973 found++; 972 if (rt_can_ecmp == rt6_qualify_for_ecmp(iter)) {
974 break; 973 found++;
974 break;
975 }
976 if (rt_can_ecmp)
977 fallback_ins = fallback_ins ?: ins;
978 goto next_iter;
975 } 979 }
976 980
977 if (rt6_duplicate_nexthop(iter, rt)) { 981 if (rt6_duplicate_nexthop(iter, rt)) {
@@ -986,51 +990,71 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
986 fib6_metric_set(iter, RTAX_MTU, rt->fib6_pmtu); 990 fib6_metric_set(iter, RTAX_MTU, rt->fib6_pmtu);
987 return -EEXIST; 991 return -EEXIST;
988 } 992 }
989 993 /* If we have the same destination and the same metric,
990 /* first route that matches */ 994 * but not the same gateway, then the route we try to
991 if (!match) 995 * add is sibling to this route, increment our counter
992 match = iter; 996 * of siblings, and later we will add our route to the
997 * list.
998 * Only static routes (which don't have flag
999 * RTF_EXPIRES) are used for ECMPv6.
1000 *
1001 * To avoid long list, we only had siblings if the
1002 * route have a gateway.
1003 */
1004 if (rt_can_ecmp &&
1005 rt6_qualify_for_ecmp(iter))
1006 rt->fib6_nsiblings++;
993 } 1007 }
994 1008
995 if (iter->fib6_metric > rt->fib6_metric) 1009 if (iter->fib6_metric > rt->fib6_metric)
996 break; 1010 break;
997 1011
1012next_iter:
998 ins = &iter->fib6_next; 1013 ins = &iter->fib6_next;
999 } 1014 }
1000 1015
1016 if (fallback_ins && !found) {
1017 /* No ECMP-able route found, replace first non-ECMP one */
1018 ins = fallback_ins;
1019 iter = rcu_dereference_protected(*ins,
1020 lockdep_is_held(&rt->fib6_table->tb6_lock));
1021 found++;
1022 }
1023
1001 /* Reset round-robin state, if necessary */ 1024 /* Reset round-robin state, if necessary */
1002 if (ins == &fn->leaf) 1025 if (ins == &fn->leaf)
1003 fn->rr_ptr = NULL; 1026 fn->rr_ptr = NULL;
1004 1027
1005 /* Link this route to others same route. */ 1028 /* Link this route to others same route. */
1006 if (append && match) { 1029 if (rt->fib6_nsiblings) {
1030 unsigned int fib6_nsiblings;
1007 struct fib6_info *sibling, *temp_sibling; 1031 struct fib6_info *sibling, *temp_sibling;
1008 1032
1009 if (rt->fib6_flags & RTF_REJECT) { 1033 /* Find the first route that have the same metric */
1010 NL_SET_ERR_MSG(extack, 1034 sibling = leaf;
1011 "Can not append a REJECT route"); 1035 while (sibling) {
1012 return -EINVAL; 1036 if (sibling->fib6_metric == rt->fib6_metric &&
1013 } else if (match->fib6_flags & RTF_REJECT) { 1037 rt6_qualify_for_ecmp(sibling)) {
1014 NL_SET_ERR_MSG(extack, 1038 list_add_tail(&rt->fib6_siblings,
1015 "Can not append to a REJECT route"); 1039 &sibling->fib6_siblings);
1016 return -EINVAL; 1040 break;
1041 }
1042 sibling = rcu_dereference_protected(sibling->fib6_next,
1043 lockdep_is_held(&rt->fib6_table->tb6_lock));
1017 } 1044 }
1018 event = FIB_EVENT_ENTRY_APPEND;
1019 rt->fib6_nsiblings = match->fib6_nsiblings;
1020 list_add_tail(&rt->fib6_siblings, &match->fib6_siblings);
1021 match->fib6_nsiblings++;
1022
1023 /* For each sibling in the list, increment the counter of 1045 /* For each sibling in the list, increment the counter of
1024 * siblings. BUG() if counters does not match, list of siblings 1046 * siblings. BUG() if counters does not match, list of siblings
1025 * is broken! 1047 * is broken!
1026 */ 1048 */
1049 fib6_nsiblings = 0;
1027 list_for_each_entry_safe(sibling, temp_sibling, 1050 list_for_each_entry_safe(sibling, temp_sibling,
1028 &match->fib6_siblings, fib6_siblings) { 1051 &rt->fib6_siblings, fib6_siblings) {
1029 sibling->fib6_nsiblings++; 1052 sibling->fib6_nsiblings++;
1030 BUG_ON(sibling->fib6_nsiblings != match->fib6_nsiblings); 1053 BUG_ON(sibling->fib6_nsiblings != rt->fib6_nsiblings);
1054 fib6_nsiblings++;
1031 } 1055 }
1032 1056 BUG_ON(fib6_nsiblings != rt->fib6_nsiblings);
1033 rt6_multipath_rebalance(match); 1057 rt6_multipath_rebalance(temp_sibling);
1034 } 1058 }
1035 1059
1036 /* 1060 /*
@@ -1043,8 +1067,9 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
1043add: 1067add:
1044 nlflags |= NLM_F_CREATE; 1068 nlflags |= NLM_F_CREATE;
1045 1069
1046 err = call_fib6_entry_notifiers(info->nl_net, event, rt, 1070 err = call_fib6_entry_notifiers(info->nl_net,
1047 extack); 1071 FIB_EVENT_ENTRY_ADD,
1072 rt, extack);
1048 if (err) 1073 if (err)
1049 return err; 1074 return err;
1050 1075
@@ -1062,7 +1087,7 @@ add:
1062 } 1087 }
1063 1088
1064 } else { 1089 } else {
1065 struct fib6_info *tmp; 1090 int nsiblings;
1066 1091
1067 if (!found) { 1092 if (!found) {
1068 if (add) 1093 if (add)
@@ -1077,57 +1102,48 @@ add:
1077 if (err) 1102 if (err)
1078 return err; 1103 return err;
1079 1104
1080 /* if route being replaced has siblings, set tmp to
1081 * last one, otherwise tmp is current route. this is
1082 * used to set fib6_next for new route
1083 */
1084 if (iter->fib6_nsiblings)
1085 tmp = list_last_entry(&iter->fib6_siblings,
1086 struct fib6_info,
1087 fib6_siblings);
1088 else
1089 tmp = iter;
1090
1091 /* insert new route */
1092 atomic_inc(&rt->fib6_ref); 1105 atomic_inc(&rt->fib6_ref);
1093 rcu_assign_pointer(rt->fib6_node, fn); 1106 rcu_assign_pointer(rt->fib6_node, fn);
1094 rt->fib6_next = tmp->fib6_next; 1107 rt->fib6_next = iter->fib6_next;
1095 rcu_assign_pointer(*ins, rt); 1108 rcu_assign_pointer(*ins, rt);
1096
1097 if (!info->skip_notify) 1109 if (!info->skip_notify)
1098 inet6_rt_notify(RTM_NEWROUTE, rt, info, NLM_F_REPLACE); 1110 inet6_rt_notify(RTM_NEWROUTE, rt, info, NLM_F_REPLACE);
1099 if (!(fn->fn_flags & RTN_RTINFO)) { 1111 if (!(fn->fn_flags & RTN_RTINFO)) {
1100 info->nl_net->ipv6.rt6_stats->fib_route_nodes++; 1112 info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
1101 fn->fn_flags |= RTN_RTINFO; 1113 fn->fn_flags |= RTN_RTINFO;
1102 } 1114 }
1115 nsiblings = iter->fib6_nsiblings;
1116 iter->fib6_node = NULL;
1117 fib6_purge_rt(iter, fn, info->nl_net);
1118 if (rcu_access_pointer(fn->rr_ptr) == iter)
1119 fn->rr_ptr = NULL;
1120 fib6_info_release(iter);
1103 1121
1104 /* delete old route */ 1122 if (nsiblings) {
1105 rt = iter;
1106
1107 if (rt->fib6_nsiblings) {
1108 struct fib6_info *tmp;
1109
1110 /* Replacing an ECMP route, remove all siblings */ 1123 /* Replacing an ECMP route, remove all siblings */
1111 list_for_each_entry_safe(iter, tmp, &rt->fib6_siblings, 1124 ins = &rt->fib6_next;
1112 fib6_siblings) { 1125 iter = rcu_dereference_protected(*ins,
1113 iter->fib6_node = NULL; 1126 lockdep_is_held(&rt->fib6_table->tb6_lock));
1114 fib6_purge_rt(iter, fn, info->nl_net); 1127 while (iter) {
1115 if (rcu_access_pointer(fn->rr_ptr) == iter) 1128 if (iter->fib6_metric > rt->fib6_metric)
1116 fn->rr_ptr = NULL; 1129 break;
1117 fib6_info_release(iter); 1130 if (rt6_qualify_for_ecmp(iter)) {
1118 1131 *ins = iter->fib6_next;
1119 rt->fib6_nsiblings--; 1132 iter->fib6_node = NULL;
1120 info->nl_net->ipv6.rt6_stats->fib_rt_entries--; 1133 fib6_purge_rt(iter, fn, info->nl_net);
1134 if (rcu_access_pointer(fn->rr_ptr) == iter)
1135 fn->rr_ptr = NULL;
1136 fib6_info_release(iter);
1137 nsiblings--;
1138 info->nl_net->ipv6.rt6_stats->fib_rt_entries--;
1139 } else {
1140 ins = &iter->fib6_next;
1141 }
1142 iter = rcu_dereference_protected(*ins,
1143 lockdep_is_held(&rt->fib6_table->tb6_lock));
1121 } 1144 }
1145 WARN_ON(nsiblings != 0);
1122 } 1146 }
1123
1124 WARN_ON(rt->fib6_nsiblings != 0);
1125
1126 rt->fib6_node = NULL;
1127 fib6_purge_rt(rt, fn, info->nl_net);
1128 if (rcu_access_pointer(fn->rr_ptr) == rt)
1129 fn->rr_ptr = NULL;
1130 fib6_info_release(rt);
1131 } 1147 }
1132 1148
1133 return 0; 1149 return 0;
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index c8cf2fdbb13b..cd2cfb04e5d8 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -927,7 +927,6 @@ tx_err:
927static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, 927static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
928 struct net_device *dev) 928 struct net_device *dev)
929{ 929{
930 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
931 struct ip6_tnl *t = netdev_priv(dev); 930 struct ip6_tnl *t = netdev_priv(dev);
932 struct dst_entry *dst = skb_dst(skb); 931 struct dst_entry *dst = skb_dst(skb);
933 struct net_device_stats *stats; 932 struct net_device_stats *stats;
@@ -1010,6 +1009,8 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
1010 goto tx_err; 1009 goto tx_err;
1011 } 1010 }
1012 } else { 1011 } else {
1012 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
1013
1013 switch (skb->protocol) { 1014 switch (skb->protocol) {
1014 case htons(ETH_P_IP): 1015 case htons(ETH_P_IP):
1015 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 1016 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 4d780c7f0130..568ca4187cd1 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -398,6 +398,12 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
398 case IPV6_DSTOPTS: 398 case IPV6_DSTOPTS:
399 { 399 {
400 struct ipv6_txoptions *opt; 400 struct ipv6_txoptions *opt;
401 struct ipv6_opt_hdr *new = NULL;
402
403 /* hop-by-hop / destination options are privileged option */
404 retv = -EPERM;
405 if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW))
406 break;
401 407
402 /* remove any sticky options header with a zero option 408 /* remove any sticky options header with a zero option
403 * length, per RFC3542. 409 * length, per RFC3542.
@@ -409,17 +415,22 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
409 else if (optlen < sizeof(struct ipv6_opt_hdr) || 415 else if (optlen < sizeof(struct ipv6_opt_hdr) ||
410 optlen & 0x7 || optlen > 8 * 255) 416 optlen & 0x7 || optlen > 8 * 255)
411 goto e_inval; 417 goto e_inval;
412 418 else {
413 /* hop-by-hop / destination options are privileged option */ 419 new = memdup_user(optval, optlen);
414 retv = -EPERM; 420 if (IS_ERR(new)) {
415 if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW)) 421 retv = PTR_ERR(new);
416 break; 422 break;
423 }
424 if (unlikely(ipv6_optlen(new) > optlen)) {
425 kfree(new);
426 goto e_inval;
427 }
428 }
417 429
418 opt = rcu_dereference_protected(np->opt, 430 opt = rcu_dereference_protected(np->opt,
419 lockdep_sock_is_held(sk)); 431 lockdep_sock_is_held(sk));
420 opt = ipv6_renew_options(sk, opt, optname, 432 opt = ipv6_renew_options(sk, opt, optname, new);
421 (struct ipv6_opt_hdr __user *)optval, 433 kfree(new);
422 optlen);
423 if (IS_ERR(opt)) { 434 if (IS_ERR(opt)) {
424 retv = PTR_ERR(opt); 435 retv = PTR_ERR(opt);
425 break; 436 break;
@@ -718,8 +729,9 @@ done:
718 struct sockaddr_in6 *psin6; 729 struct sockaddr_in6 *psin6;
719 730
720 psin6 = (struct sockaddr_in6 *)&greqs.gsr_group; 731 psin6 = (struct sockaddr_in6 *)&greqs.gsr_group;
721 retv = ipv6_sock_mc_join(sk, greqs.gsr_interface, 732 retv = ipv6_sock_mc_join_ssm(sk, greqs.gsr_interface,
722 &psin6->sin6_addr); 733 &psin6->sin6_addr,
734 MCAST_INCLUDE);
723 /* prior join w/ different source is ok */ 735 /* prior join w/ different source is ok */
724 if (retv && retv != -EADDRINUSE) 736 if (retv && retv != -EADDRINUSE)
725 break; 737 break;
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index c0c74088f2af..2699be7202be 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -95,6 +95,8 @@ static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
95 int delta); 95 int delta);
96static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml, 96static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
97 struct inet6_dev *idev); 97 struct inet6_dev *idev);
98static int __ipv6_dev_mc_inc(struct net_device *dev,
99 const struct in6_addr *addr, unsigned int mode);
98 100
99#define MLD_QRV_DEFAULT 2 101#define MLD_QRV_DEFAULT 2
100/* RFC3810, 9.2. Query Interval */ 102/* RFC3810, 9.2. Query Interval */
@@ -132,7 +134,8 @@ static int unsolicited_report_interval(struct inet6_dev *idev)
132 return iv > 0 ? iv : 1; 134 return iv > 0 ? iv : 1;
133} 135}
134 136
135int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) 137static int __ipv6_sock_mc_join(struct sock *sk, int ifindex,
138 const struct in6_addr *addr, unsigned int mode)
136{ 139{
137 struct net_device *dev = NULL; 140 struct net_device *dev = NULL;
138 struct ipv6_mc_socklist *mc_lst; 141 struct ipv6_mc_socklist *mc_lst;
@@ -179,7 +182,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
179 } 182 }
180 183
181 mc_lst->ifindex = dev->ifindex; 184 mc_lst->ifindex = dev->ifindex;
182 mc_lst->sfmode = MCAST_EXCLUDE; 185 mc_lst->sfmode = mode;
183 rwlock_init(&mc_lst->sflock); 186 rwlock_init(&mc_lst->sflock);
184 mc_lst->sflist = NULL; 187 mc_lst->sflist = NULL;
185 188
@@ -187,7 +190,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
187 * now add/increase the group membership on the device 190 * now add/increase the group membership on the device
188 */ 191 */
189 192
190 err = ipv6_dev_mc_inc(dev, addr); 193 err = __ipv6_dev_mc_inc(dev, addr, mode);
191 194
192 if (err) { 195 if (err) {
193 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); 196 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
@@ -199,8 +202,19 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
199 202
200 return 0; 203 return 0;
201} 204}
205
206int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
207{
208 return __ipv6_sock_mc_join(sk, ifindex, addr, MCAST_EXCLUDE);
209}
202EXPORT_SYMBOL(ipv6_sock_mc_join); 210EXPORT_SYMBOL(ipv6_sock_mc_join);
203 211
212int ipv6_sock_mc_join_ssm(struct sock *sk, int ifindex,
213 const struct in6_addr *addr, unsigned int mode)
214{
215 return __ipv6_sock_mc_join(sk, ifindex, addr, mode);
216}
217
204/* 218/*
205 * socket leave on multicast group 219 * socket leave on multicast group
206 */ 220 */
@@ -646,7 +660,7 @@ bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
646 return rv; 660 return rv;
647} 661}
648 662
649static void igmp6_group_added(struct ifmcaddr6 *mc) 663static void igmp6_group_added(struct ifmcaddr6 *mc, unsigned int mode)
650{ 664{
651 struct net_device *dev = mc->idev->dev; 665 struct net_device *dev = mc->idev->dev;
652 char buf[MAX_ADDR_LEN]; 666 char buf[MAX_ADDR_LEN];
@@ -672,7 +686,13 @@ static void igmp6_group_added(struct ifmcaddr6 *mc)
672 } 686 }
673 /* else v2 */ 687 /* else v2 */
674 688
675 mc->mca_crcount = mc->idev->mc_qrv; 689 /* Based on RFC3810 6.1, for newly added INCLUDE SSM, we
690 * should not send filter-mode change record as the mode
691 * should be from IN() to IN(A).
692 */
693 if (mode == MCAST_EXCLUDE)
694 mc->mca_crcount = mc->idev->mc_qrv;
695
676 mld_ifc_event(mc->idev); 696 mld_ifc_event(mc->idev);
677} 697}
678 698
@@ -770,13 +790,14 @@ static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
770 spin_lock_bh(&im->mca_lock); 790 spin_lock_bh(&im->mca_lock);
771 if (pmc) { 791 if (pmc) {
772 im->idev = pmc->idev; 792 im->idev = pmc->idev;
773 im->mca_crcount = idev->mc_qrv;
774 im->mca_sfmode = pmc->mca_sfmode; 793 im->mca_sfmode = pmc->mca_sfmode;
775 if (pmc->mca_sfmode == MCAST_INCLUDE) { 794 if (pmc->mca_sfmode == MCAST_INCLUDE) {
776 im->mca_tomb = pmc->mca_tomb; 795 im->mca_tomb = pmc->mca_tomb;
777 im->mca_sources = pmc->mca_sources; 796 im->mca_sources = pmc->mca_sources;
778 for (psf = im->mca_sources; psf; psf = psf->sf_next) 797 for (psf = im->mca_sources; psf; psf = psf->sf_next)
779 psf->sf_crcount = im->mca_crcount; 798 psf->sf_crcount = idev->mc_qrv;
799 } else {
800 im->mca_crcount = idev->mc_qrv;
780 } 801 }
781 in6_dev_put(pmc->idev); 802 in6_dev_put(pmc->idev);
782 kfree(pmc); 803 kfree(pmc);
@@ -831,7 +852,8 @@ static void ma_put(struct ifmcaddr6 *mc)
831} 852}
832 853
833static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev, 854static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev,
834 const struct in6_addr *addr) 855 const struct in6_addr *addr,
856 unsigned int mode)
835{ 857{
836 struct ifmcaddr6 *mc; 858 struct ifmcaddr6 *mc;
837 859
@@ -849,9 +871,8 @@ static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev,
849 refcount_set(&mc->mca_refcnt, 1); 871 refcount_set(&mc->mca_refcnt, 1);
850 spin_lock_init(&mc->mca_lock); 872 spin_lock_init(&mc->mca_lock);
851 873
852 /* initial mode is (EX, empty) */ 874 mc->mca_sfmode = mode;
853 mc->mca_sfmode = MCAST_EXCLUDE; 875 mc->mca_sfcount[mode] = 1;
854 mc->mca_sfcount[MCAST_EXCLUDE] = 1;
855 876
856 if (ipv6_addr_is_ll_all_nodes(&mc->mca_addr) || 877 if (ipv6_addr_is_ll_all_nodes(&mc->mca_addr) ||
857 IPV6_ADDR_MC_SCOPE(&mc->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL) 878 IPV6_ADDR_MC_SCOPE(&mc->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL)
@@ -863,7 +884,8 @@ static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev,
863/* 884/*
864 * device multicast group inc (add if not found) 885 * device multicast group inc (add if not found)
865 */ 886 */
866int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr) 887static int __ipv6_dev_mc_inc(struct net_device *dev,
888 const struct in6_addr *addr, unsigned int mode)
867{ 889{
868 struct ifmcaddr6 *mc; 890 struct ifmcaddr6 *mc;
869 struct inet6_dev *idev; 891 struct inet6_dev *idev;
@@ -887,14 +909,13 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
887 if (ipv6_addr_equal(&mc->mca_addr, addr)) { 909 if (ipv6_addr_equal(&mc->mca_addr, addr)) {
888 mc->mca_users++; 910 mc->mca_users++;
889 write_unlock_bh(&idev->lock); 911 write_unlock_bh(&idev->lock);
890 ip6_mc_add_src(idev, &mc->mca_addr, MCAST_EXCLUDE, 0, 912 ip6_mc_add_src(idev, &mc->mca_addr, mode, 0, NULL, 0);
891 NULL, 0);
892 in6_dev_put(idev); 913 in6_dev_put(idev);
893 return 0; 914 return 0;
894 } 915 }
895 } 916 }
896 917
897 mc = mca_alloc(idev, addr); 918 mc = mca_alloc(idev, addr, mode);
898 if (!mc) { 919 if (!mc) {
899 write_unlock_bh(&idev->lock); 920 write_unlock_bh(&idev->lock);
900 in6_dev_put(idev); 921 in6_dev_put(idev);
@@ -911,11 +932,16 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
911 write_unlock_bh(&idev->lock); 932 write_unlock_bh(&idev->lock);
912 933
913 mld_del_delrec(idev, mc); 934 mld_del_delrec(idev, mc);
914 igmp6_group_added(mc); 935 igmp6_group_added(mc, mode);
915 ma_put(mc); 936 ma_put(mc);
916 return 0; 937 return 0;
917} 938}
918 939
940int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
941{
942 return __ipv6_dev_mc_inc(dev, addr, MCAST_EXCLUDE);
943}
944
919/* 945/*
920 * device multicast group del 946 * device multicast group del
921 */ 947 */
@@ -1751,7 +1777,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1751 1777
1752 psf_next = psf->sf_next; 1778 psf_next = psf->sf_next;
1753 1779
1754 if (!is_in(pmc, psf, type, gdeleted, sdeleted)) { 1780 if (!is_in(pmc, psf, type, gdeleted, sdeleted) && !crsend) {
1755 psf_prev = psf; 1781 psf_prev = psf;
1756 continue; 1782 continue;
1757 } 1783 }
@@ -2066,7 +2092,7 @@ static void mld_send_initial_cr(struct inet6_dev *idev)
2066 if (pmc->mca_sfcount[MCAST_EXCLUDE]) 2092 if (pmc->mca_sfcount[MCAST_EXCLUDE])
2067 type = MLD2_CHANGE_TO_EXCLUDE; 2093 type = MLD2_CHANGE_TO_EXCLUDE;
2068 else 2094 else
2069 type = MLD2_CHANGE_TO_INCLUDE; 2095 type = MLD2_ALLOW_NEW_SOURCES;
2070 skb = add_grec(skb, pmc, type, 0, 0, 1); 2096 skb = add_grec(skb, pmc, type, 0, 0, 1);
2071 spin_unlock_bh(&pmc->mca_lock); 2097 spin_unlock_bh(&pmc->mca_lock);
2072 } 2098 }
@@ -2546,7 +2572,7 @@ void ipv6_mc_up(struct inet6_dev *idev)
2546 ipv6_mc_reset(idev); 2572 ipv6_mc_reset(idev);
2547 for (i = idev->mc_list; i; i = i->next) { 2573 for (i = idev->mc_list; i; i = i->next) {
2548 mld_del_delrec(idev, i); 2574 mld_del_delrec(idev, i);
2549 igmp6_group_added(i); 2575 igmp6_group_added(i, i->mca_sfmode);
2550 } 2576 }
2551 read_unlock_bh(&idev->lock); 2577 read_unlock_bh(&idev->lock);
2552} 2578}
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index e640d2f3c55c..0ec273997d1d 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -811,7 +811,7 @@ static void ndisc_recv_ns(struct sk_buff *skb)
811 return; 811 return;
812 } 812 }
813 } 813 }
814 if (ndopts.nd_opts_nonce) 814 if (ndopts.nd_opts_nonce && ndopts.nd_opts_nonce->nd_opt_len == 1)
815 memcpy(&nonce, (u8 *)(ndopts.nd_opts_nonce + 1), 6); 815 memcpy(&nonce, (u8 *)(ndopts.nd_opts_nonce + 1), 6);
816 816
817 inc = ipv6_addr_is_multicast(daddr); 817 inc = ipv6_addr_is_multicast(daddr);
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 7eab959734bc..daf2e9e9193d 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -1909,6 +1909,7 @@ static struct xt_match ip6t_builtin_mt[] __read_mostly = {
1909 .checkentry = icmp6_checkentry, 1909 .checkentry = icmp6_checkentry,
1910 .proto = IPPROTO_ICMPV6, 1910 .proto = IPPROTO_ICMPV6,
1911 .family = NFPROTO_IPV6, 1911 .family = NFPROTO_IPV6,
1912 .me = THIS_MODULE,
1912 }, 1913 },
1913}; 1914};
1914 1915
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index a452d99c9f52..e4d9e6976d3c 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -585,6 +585,8 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
585 fq->q.meat == fq->q.len && 585 fq->q.meat == fq->q.len &&
586 nf_ct_frag6_reasm(fq, skb, dev)) 586 nf_ct_frag6_reasm(fq, skb, dev))
587 ret = 0; 587 ret = 0;
588 else
589 skb_dst_drop(skb);
588 590
589out_unlock: 591out_unlock:
590 spin_unlock_bh(&fq->q.lock); 592 spin_unlock_bh(&fq->q.lock);
diff --git a/net/ipv6/netfilter/nf_tproxy_ipv6.c b/net/ipv6/netfilter/nf_tproxy_ipv6.c
index bf1d6c421e3b..5dfd33af6451 100644
--- a/net/ipv6/netfilter/nf_tproxy_ipv6.c
+++ b/net/ipv6/netfilter/nf_tproxy_ipv6.c
@@ -55,7 +55,7 @@ nf_tproxy_handle_time_wait6(struct sk_buff *skb, int tproto, int thoff,
55 * to a listener socket if there's one */ 55 * to a listener socket if there's one */
56 struct sock *sk2; 56 struct sock *sk2;
57 57
58 sk2 = nf_tproxy_get_sock_v6(net, skb, thoff, hp, tproto, 58 sk2 = nf_tproxy_get_sock_v6(net, skb, thoff, tproto,
59 &iph->saddr, 59 &iph->saddr,
60 nf_tproxy_laddr6(skb, laddr, &iph->daddr), 60 nf_tproxy_laddr6(skb, laddr, &iph->daddr),
61 hp->source, 61 hp->source,
@@ -72,7 +72,7 @@ nf_tproxy_handle_time_wait6(struct sk_buff *skb, int tproto, int thoff,
72EXPORT_SYMBOL_GPL(nf_tproxy_handle_time_wait6); 72EXPORT_SYMBOL_GPL(nf_tproxy_handle_time_wait6);
73 73
74struct sock * 74struct sock *
75nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff, void *hp, 75nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff,
76 const u8 protocol, 76 const u8 protocol,
77 const struct in6_addr *saddr, const struct in6_addr *daddr, 77 const struct in6_addr *saddr, const struct in6_addr *daddr,
78 const __be16 sport, const __be16 dport, 78 const __be16 sport, const __be16 dport,
@@ -80,15 +80,20 @@ nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff, void *hp,
80 const enum nf_tproxy_lookup_t lookup_type) 80 const enum nf_tproxy_lookup_t lookup_type)
81{ 81{
82 struct sock *sk; 82 struct sock *sk;
83 struct tcphdr *tcph;
84 83
85 switch (protocol) { 84 switch (protocol) {
86 case IPPROTO_TCP: 85 case IPPROTO_TCP: {
86 struct tcphdr _hdr, *hp;
87
88 hp = skb_header_pointer(skb, thoff,
89 sizeof(struct tcphdr), &_hdr);
90 if (hp == NULL)
91 return NULL;
92
87 switch (lookup_type) { 93 switch (lookup_type) {
88 case NF_TPROXY_LOOKUP_LISTENER: 94 case NF_TPROXY_LOOKUP_LISTENER:
89 tcph = hp;
90 sk = inet6_lookup_listener(net, &tcp_hashinfo, skb, 95 sk = inet6_lookup_listener(net, &tcp_hashinfo, skb,
91 thoff + __tcp_hdrlen(tcph), 96 thoff + __tcp_hdrlen(hp),
92 saddr, sport, 97 saddr, sport,
93 daddr, ntohs(dport), 98 daddr, ntohs(dport),
94 in->ifindex, 0); 99 in->ifindex, 0);
@@ -110,6 +115,7 @@ nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff, void *hp,
110 BUG(); 115 BUG();
111 } 116 }
112 break; 117 break;
118 }
113 case IPPROTO_UDP: 119 case IPPROTO_UDP:
114 sk = udp6_lib_lookup(net, saddr, sport, daddr, dport, 120 sk = udp6_lib_lookup(net, saddr, sport, daddr, dport,
115 in->ifindex); 121 in->ifindex);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 86a0e4333d42..2ce0bd17de4f 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -3842,7 +3842,7 @@ static struct fib6_info *rt6_multipath_first_sibling(const struct fib6_info *rt)
3842 lockdep_is_held(&rt->fib6_table->tb6_lock)); 3842 lockdep_is_held(&rt->fib6_table->tb6_lock));
3843 while (iter) { 3843 while (iter) {
3844 if (iter->fib6_metric == rt->fib6_metric && 3844 if (iter->fib6_metric == rt->fib6_metric &&
3845 iter->fib6_nsiblings) 3845 rt6_qualify_for_ecmp(iter))
3846 return iter; 3846 return iter;
3847 iter = rcu_dereference_protected(iter->fib6_next, 3847 iter = rcu_dereference_protected(iter->fib6_next,
3848 lockdep_is_held(&rt->fib6_table->tb6_lock)); 3848 lockdep_is_held(&rt->fib6_table->tb6_lock));
@@ -4388,6 +4388,13 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
4388 rt = NULL; 4388 rt = NULL;
4389 goto cleanup; 4389 goto cleanup;
4390 } 4390 }
4391 if (!rt6_qualify_for_ecmp(rt)) {
4392 err = -EINVAL;
4393 NL_SET_ERR_MSG(extack,
4394 "Device only routes can not be added for IPv6 using the multipath API.");
4395 fib6_info_release(rt);
4396 goto cleanup;
4397 }
4391 4398
4392 rt->fib6_nh.nh_weight = rtnh->rtnh_hops + 1; 4399 rt->fib6_nh.nh_weight = rtnh->rtnh_hops + 1;
4393 4400
@@ -4439,7 +4446,6 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
4439 */ 4446 */
4440 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL | 4447 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
4441 NLM_F_REPLACE); 4448 NLM_F_REPLACE);
4442 cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_APPEND;
4443 nhn++; 4449 nhn++;
4444 } 4450 }
4445 4451
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
index 19ccf0dc996c..a8854dd3e9c5 100644
--- a/net/ipv6/seg6_iptunnel.c
+++ b/net/ipv6/seg6_iptunnel.c
@@ -101,7 +101,7 @@ static __be32 seg6_make_flowlabel(struct net *net, struct sk_buff *skb,
101 101
102 if (do_flowlabel > 0) { 102 if (do_flowlabel > 0) {
103 hash = skb_get_hash(skb); 103 hash = skb_get_hash(skb);
104 rol32(hash, 16); 104 hash = rol32(hash, 16);
105 flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK; 105 flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
106 } else if (!do_flowlabel && skb->protocol == htons(ETH_P_IPV6)) { 106 } else if (!do_flowlabel && skb->protocol == htons(ETH_P_IPV6)) {
107 flowlabel = ip6_flowlabel(inner_hdr); 107 flowlabel = ip6_flowlabel(inner_hdr);
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index dbd7d1fad277..f0a1c536ef15 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -460,6 +460,13 @@ config NF_TABLES
460 460
461if NF_TABLES 461if NF_TABLES
462 462
463config NF_TABLES_SET
464 tristate "Netfilter nf_tables set infrastructure"
465 help
466 This option enables the nf_tables set infrastructure that allows to
467 look up for elements in a set and to build one-way mappings between
468 matchings and actions.
469
463config NF_TABLES_INET 470config NF_TABLES_INET
464 depends on IPV6 471 depends on IPV6
465 select NF_TABLES_IPV4 472 select NF_TABLES_IPV4
@@ -493,24 +500,6 @@ config NFT_FLOW_OFFLOAD
493 This option adds the "flow_offload" expression that you can use to 500 This option adds the "flow_offload" expression that you can use to
494 choose what flows are placed into the hardware. 501 choose what flows are placed into the hardware.
495 502
496config NFT_SET_RBTREE
497 tristate "Netfilter nf_tables rbtree set module"
498 help
499 This option adds the "rbtree" set type (Red Black tree) that is used
500 to build interval-based sets.
501
502config NFT_SET_HASH
503 tristate "Netfilter nf_tables hash set module"
504 help
505 This option adds the "hash" set type that is used to build one-way
506 mappings between matchings and actions.
507
508config NFT_SET_BITMAP
509 tristate "Netfilter nf_tables bitmap set module"
510 help
511 This option adds the "bitmap" set type that is used to build sets
512 whose keys are smaller or equal to 16 bits.
513
514config NFT_COUNTER 503config NFT_COUNTER
515 tristate "Netfilter nf_tables counter module" 504 tristate "Netfilter nf_tables counter module"
516 help 505 help
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 44449389e527..8a76dced974d 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -78,7 +78,11 @@ nf_tables-objs := nf_tables_core.o nf_tables_api.o nft_chain_filter.o \
78 nft_bitwise.o nft_byteorder.o nft_payload.o nft_lookup.o \ 78 nft_bitwise.o nft_byteorder.o nft_payload.o nft_lookup.o \
79 nft_dynset.o nft_meta.o nft_rt.o nft_exthdr.o 79 nft_dynset.o nft_meta.o nft_rt.o nft_exthdr.o
80 80
81nf_tables_set-objs := nf_tables_set_core.o \
82 nft_set_hash.o nft_set_bitmap.o nft_set_rbtree.o
83
81obj-$(CONFIG_NF_TABLES) += nf_tables.o 84obj-$(CONFIG_NF_TABLES) += nf_tables.o
85obj-$(CONFIG_NF_TABLES_SET) += nf_tables_set.o
82obj-$(CONFIG_NFT_COMPAT) += nft_compat.o 86obj-$(CONFIG_NFT_COMPAT) += nft_compat.o
83obj-$(CONFIG_NFT_CONNLIMIT) += nft_connlimit.o 87obj-$(CONFIG_NFT_CONNLIMIT) += nft_connlimit.o
84obj-$(CONFIG_NFT_NUMGEN) += nft_numgen.o 88obj-$(CONFIG_NFT_NUMGEN) += nft_numgen.o
@@ -91,9 +95,6 @@ obj-$(CONFIG_NFT_QUEUE) += nft_queue.o
91obj-$(CONFIG_NFT_QUOTA) += nft_quota.o 95obj-$(CONFIG_NFT_QUOTA) += nft_quota.o
92obj-$(CONFIG_NFT_REJECT) += nft_reject.o 96obj-$(CONFIG_NFT_REJECT) += nft_reject.o
93obj-$(CONFIG_NFT_REJECT_INET) += nft_reject_inet.o 97obj-$(CONFIG_NFT_REJECT_INET) += nft_reject_inet.o
94obj-$(CONFIG_NFT_SET_RBTREE) += nft_set_rbtree.o
95obj-$(CONFIG_NFT_SET_HASH) += nft_set_hash.o
96obj-$(CONFIG_NFT_SET_BITMAP) += nft_set_bitmap.o
97obj-$(CONFIG_NFT_COUNTER) += nft_counter.o 98obj-$(CONFIG_NFT_COUNTER) += nft_counter.o
98obj-$(CONFIG_NFT_LOG) += nft_log.o 99obj-$(CONFIG_NFT_LOG) += nft_log.o
99obj-$(CONFIG_NFT_MASQ) += nft_masq.o 100obj-$(CONFIG_NFT_MASQ) += nft_masq.o
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 3465da2a98bd..3d5280425027 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -2043,7 +2043,7 @@ int nf_conntrack_set_hashsize(const char *val, const struct kernel_param *kp)
2043 return -EOPNOTSUPP; 2043 return -EOPNOTSUPP;
2044 2044
2045 /* On boot, we can set this without any fancy locking. */ 2045 /* On boot, we can set this without any fancy locking. */
2046 if (!nf_conntrack_htable_size) 2046 if (!nf_conntrack_hash)
2047 return param_set_uint(val, kp); 2047 return param_set_uint(val, kp);
2048 2048
2049 rc = kstrtouint(val, 0, &hashsize); 2049 rc = kstrtouint(val, 0, &hashsize);
diff --git a/net/netfilter/nf_tables_set_core.c b/net/netfilter/nf_tables_set_core.c
new file mode 100644
index 000000000000..814789644bd3
--- /dev/null
+++ b/net/netfilter/nf_tables_set_core.c
@@ -0,0 +1,28 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#include <net/netfilter/nf_tables_core.h>
3
4static int __init nf_tables_set_module_init(void)
5{
6 nft_register_set(&nft_set_hash_fast_type);
7 nft_register_set(&nft_set_hash_type);
8 nft_register_set(&nft_set_rhash_type);
9 nft_register_set(&nft_set_bitmap_type);
10 nft_register_set(&nft_set_rbtree_type);
11
12 return 0;
13}
14
15static void __exit nf_tables_set_module_exit(void)
16{
17 nft_unregister_set(&nft_set_rbtree_type);
18 nft_unregister_set(&nft_set_bitmap_type);
19 nft_unregister_set(&nft_set_rhash_type);
20 nft_unregister_set(&nft_set_hash_type);
21 nft_unregister_set(&nft_set_hash_fast_type);
22}
23
24module_init(nf_tables_set_module_init);
25module_exit(nf_tables_set_module_exit);
26
27MODULE_LICENSE("GPL");
28MODULE_ALIAS_NFT_SET();
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index 8d1ff654e5af..32535eea51b2 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -832,10 +832,18 @@ nft_target_select_ops(const struct nft_ctx *ctx,
832 rev = ntohl(nla_get_be32(tb[NFTA_TARGET_REV])); 832 rev = ntohl(nla_get_be32(tb[NFTA_TARGET_REV]));
833 family = ctx->family; 833 family = ctx->family;
834 834
835 if (strcmp(tg_name, XT_ERROR_TARGET) == 0 ||
836 strcmp(tg_name, XT_STANDARD_TARGET) == 0 ||
837 strcmp(tg_name, "standard") == 0)
838 return ERR_PTR(-EINVAL);
839
835 /* Re-use the existing target if it's already loaded. */ 840 /* Re-use the existing target if it's already loaded. */
836 list_for_each_entry(nft_target, &nft_target_list, head) { 841 list_for_each_entry(nft_target, &nft_target_list, head) {
837 struct xt_target *target = nft_target->ops.data; 842 struct xt_target *target = nft_target->ops.data;
838 843
844 if (!target->target)
845 continue;
846
839 if (nft_target_cmp(target, tg_name, rev, family)) 847 if (nft_target_cmp(target, tg_name, rev, family))
840 return &nft_target->ops; 848 return &nft_target->ops;
841 } 849 }
@@ -844,6 +852,11 @@ nft_target_select_ops(const struct nft_ctx *ctx,
844 if (IS_ERR(target)) 852 if (IS_ERR(target))
845 return ERR_PTR(-ENOENT); 853 return ERR_PTR(-ENOENT);
846 854
855 if (!target->target) {
856 err = -EINVAL;
857 goto err;
858 }
859
847 if (target->targetsize > nla_len(tb[NFTA_TARGET_INFO])) { 860 if (target->targetsize > nla_len(tb[NFTA_TARGET_INFO])) {
848 err = -EINVAL; 861 err = -EINVAL;
849 goto err; 862 goto err;
diff --git a/net/netfilter/nft_set_bitmap.c b/net/netfilter/nft_set_bitmap.c
index d6626e01c7ee..128bc16f52dd 100644
--- a/net/netfilter/nft_set_bitmap.c
+++ b/net/netfilter/nft_set_bitmap.c
@@ -296,7 +296,7 @@ static bool nft_bitmap_estimate(const struct nft_set_desc *desc, u32 features,
296 return true; 296 return true;
297} 297}
298 298
299static struct nft_set_type nft_bitmap_type __read_mostly = { 299struct nft_set_type nft_set_bitmap_type __read_mostly = {
300 .owner = THIS_MODULE, 300 .owner = THIS_MODULE,
301 .ops = { 301 .ops = {
302 .privsize = nft_bitmap_privsize, 302 .privsize = nft_bitmap_privsize,
@@ -314,20 +314,3 @@ static struct nft_set_type nft_bitmap_type __read_mostly = {
314 .get = nft_bitmap_get, 314 .get = nft_bitmap_get,
315 }, 315 },
316}; 316};
317
318static int __init nft_bitmap_module_init(void)
319{
320 return nft_register_set(&nft_bitmap_type);
321}
322
323static void __exit nft_bitmap_module_exit(void)
324{
325 nft_unregister_set(&nft_bitmap_type);
326}
327
328module_init(nft_bitmap_module_init);
329module_exit(nft_bitmap_module_exit);
330
331MODULE_LICENSE("GPL");
332MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
333MODULE_ALIAS_NFT_SET();
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
index 6f9a1365a09f..72ef35b51cac 100644
--- a/net/netfilter/nft_set_hash.c
+++ b/net/netfilter/nft_set_hash.c
@@ -654,7 +654,7 @@ static bool nft_hash_fast_estimate(const struct nft_set_desc *desc, u32 features
654 return true; 654 return true;
655} 655}
656 656
657static struct nft_set_type nft_rhash_type __read_mostly = { 657struct nft_set_type nft_set_rhash_type __read_mostly = {
658 .owner = THIS_MODULE, 658 .owner = THIS_MODULE,
659 .features = NFT_SET_MAP | NFT_SET_OBJECT | 659 .features = NFT_SET_MAP | NFT_SET_OBJECT |
660 NFT_SET_TIMEOUT | NFT_SET_EVAL, 660 NFT_SET_TIMEOUT | NFT_SET_EVAL,
@@ -677,7 +677,7 @@ static struct nft_set_type nft_rhash_type __read_mostly = {
677 }, 677 },
678}; 678};
679 679
680static struct nft_set_type nft_hash_type __read_mostly = { 680struct nft_set_type nft_set_hash_type __read_mostly = {
681 .owner = THIS_MODULE, 681 .owner = THIS_MODULE,
682 .features = NFT_SET_MAP | NFT_SET_OBJECT, 682 .features = NFT_SET_MAP | NFT_SET_OBJECT,
683 .ops = { 683 .ops = {
@@ -697,7 +697,7 @@ static struct nft_set_type nft_hash_type __read_mostly = {
697 }, 697 },
698}; 698};
699 699
700static struct nft_set_type nft_hash_fast_type __read_mostly = { 700struct nft_set_type nft_set_hash_fast_type __read_mostly = {
701 .owner = THIS_MODULE, 701 .owner = THIS_MODULE,
702 .features = NFT_SET_MAP | NFT_SET_OBJECT, 702 .features = NFT_SET_MAP | NFT_SET_OBJECT,
703 .ops = { 703 .ops = {
@@ -716,26 +716,3 @@ static struct nft_set_type nft_hash_fast_type __read_mostly = {
716 .get = nft_hash_get, 716 .get = nft_hash_get,
717 }, 717 },
718}; 718};
719
720static int __init nft_hash_module_init(void)
721{
722 if (nft_register_set(&nft_hash_fast_type) ||
723 nft_register_set(&nft_hash_type) ||
724 nft_register_set(&nft_rhash_type))
725 return 1;
726 return 0;
727}
728
729static void __exit nft_hash_module_exit(void)
730{
731 nft_unregister_set(&nft_rhash_type);
732 nft_unregister_set(&nft_hash_type);
733 nft_unregister_set(&nft_hash_fast_type);
734}
735
736module_init(nft_hash_module_init);
737module_exit(nft_hash_module_exit);
738
739MODULE_LICENSE("GPL");
740MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
741MODULE_ALIAS_NFT_SET();
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index 7f3a9a211034..1f8f257cb518 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -462,7 +462,7 @@ static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features,
462 return true; 462 return true;
463} 463}
464 464
465static struct nft_set_type nft_rbtree_type __read_mostly = { 465struct nft_set_type nft_set_rbtree_type __read_mostly = {
466 .owner = THIS_MODULE, 466 .owner = THIS_MODULE,
467 .features = NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_OBJECT | NFT_SET_TIMEOUT, 467 .features = NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_OBJECT | NFT_SET_TIMEOUT,
468 .ops = { 468 .ops = {
@@ -481,20 +481,3 @@ static struct nft_set_type nft_rbtree_type __read_mostly = {
481 .get = nft_rbtree_get, 481 .get = nft_rbtree_get,
482 }, 482 },
483}; 483};
484
485static int __init nft_rbtree_module_init(void)
486{
487 return nft_register_set(&nft_rbtree_type);
488}
489
490static void __exit nft_rbtree_module_exit(void)
491{
492 nft_unregister_set(&nft_rbtree_type);
493}
494
495module_init(nft_rbtree_module_init);
496module_exit(nft_rbtree_module_exit);
497
498MODULE_LICENSE("GPL");
499MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
500MODULE_ALIAS_NFT_SET();
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index 58fce4e749a9..d76550a8b642 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -61,7 +61,7 @@ tproxy_tg4(struct net *net, struct sk_buff *skb, __be32 laddr, __be16 lport,
61 * addresses, this happens if the redirect already happened 61 * addresses, this happens if the redirect already happened
62 * and the current packet belongs to an already established 62 * and the current packet belongs to an already established
63 * connection */ 63 * connection */
64 sk = nf_tproxy_get_sock_v4(net, skb, hp, iph->protocol, 64 sk = nf_tproxy_get_sock_v4(net, skb, iph->protocol,
65 iph->saddr, iph->daddr, 65 iph->saddr, iph->daddr,
66 hp->source, hp->dest, 66 hp->source, hp->dest,
67 skb->dev, NF_TPROXY_LOOKUP_ESTABLISHED); 67 skb->dev, NF_TPROXY_LOOKUP_ESTABLISHED);
@@ -77,7 +77,7 @@ tproxy_tg4(struct net *net, struct sk_buff *skb, __be32 laddr, __be16 lport,
77 else if (!sk) 77 else if (!sk)
78 /* no, there's no established connection, check if 78 /* no, there's no established connection, check if
79 * there's a listener on the redirected addr/port */ 79 * there's a listener on the redirected addr/port */
80 sk = nf_tproxy_get_sock_v4(net, skb, hp, iph->protocol, 80 sk = nf_tproxy_get_sock_v4(net, skb, iph->protocol,
81 iph->saddr, laddr, 81 iph->saddr, laddr,
82 hp->source, lport, 82 hp->source, lport,
83 skb->dev, NF_TPROXY_LOOKUP_LISTENER); 83 skb->dev, NF_TPROXY_LOOKUP_LISTENER);
@@ -150,7 +150,7 @@ tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par)
150 * addresses, this happens if the redirect already happened 150 * addresses, this happens if the redirect already happened
151 * and the current packet belongs to an already established 151 * and the current packet belongs to an already established
152 * connection */ 152 * connection */
153 sk = nf_tproxy_get_sock_v6(xt_net(par), skb, thoff, hp, tproto, 153 sk = nf_tproxy_get_sock_v6(xt_net(par), skb, thoff, tproto,
154 &iph->saddr, &iph->daddr, 154 &iph->saddr, &iph->daddr,
155 hp->source, hp->dest, 155 hp->source, hp->dest,
156 xt_in(par), NF_TPROXY_LOOKUP_ESTABLISHED); 156 xt_in(par), NF_TPROXY_LOOKUP_ESTABLISHED);
@@ -171,7 +171,7 @@ tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par)
171 else if (!sk) 171 else if (!sk)
172 /* no there's no established connection, check if 172 /* no there's no established connection, check if
173 * there's a listener on the redirected addr/port */ 173 * there's a listener on the redirected addr/port */
174 sk = nf_tproxy_get_sock_v6(xt_net(par), skb, thoff, hp, 174 sk = nf_tproxy_get_sock_v6(xt_net(par), skb, thoff,
175 tproto, &iph->saddr, laddr, 175 tproto, &iph->saddr, laddr,
176 hp->source, lport, 176 hp->source, lport,
177 xt_in(par), NF_TPROXY_LOOKUP_LISTENER); 177 xt_in(par), NF_TPROXY_LOOKUP_LISTENER);
diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c
index 2ceefa183cee..6a196e438b6c 100644
--- a/net/nfc/llcp_commands.c
+++ b/net/nfc/llcp_commands.c
@@ -752,11 +752,14 @@ int nfc_llcp_send_ui_frame(struct nfc_llcp_sock *sock, u8 ssap, u8 dsap,
752 pr_debug("Fragment %zd bytes remaining %zd", 752 pr_debug("Fragment %zd bytes remaining %zd",
753 frag_len, remaining_len); 753 frag_len, remaining_len);
754 754
755 pdu = nfc_alloc_send_skb(sock->dev, &sock->sk, MSG_DONTWAIT, 755 pdu = nfc_alloc_send_skb(sock->dev, &sock->sk, 0,
756 frag_len + LLCP_HEADER_SIZE, &err); 756 frag_len + LLCP_HEADER_SIZE, &err);
757 if (pdu == NULL) { 757 if (pdu == NULL) {
758 pr_err("Could not allocate PDU\n"); 758 pr_err("Could not allocate PDU (error=%d)\n", err);
759 continue; 759 len -= remaining_len;
760 if (len == 0)
761 len = err;
762 break;
760 } 763 }
761 764
762 pdu = llcp_add_header(pdu, dsap, ssap, LLCP_PDU_UI); 765 pdu = llcp_add_header(pdu, dsap, ssap, LLCP_PDU_UI);
diff --git a/net/nsh/nsh.c b/net/nsh/nsh.c
index 9696ef96b719..1a30e165eeb4 100644
--- a/net/nsh/nsh.c
+++ b/net/nsh/nsh.c
@@ -104,7 +104,7 @@ static struct sk_buff *nsh_gso_segment(struct sk_buff *skb,
104 __skb_pull(skb, nsh_len); 104 __skb_pull(skb, nsh_len);
105 105
106 skb_reset_mac_header(skb); 106 skb_reset_mac_header(skb);
107 skb_reset_mac_len(skb); 107 skb->mac_len = proto == htons(ETH_P_TEB) ? ETH_HLEN : 0;
108 skb->protocol = proto; 108 skb->protocol = proto;
109 109
110 features &= NETIF_F_SG; 110 features &= NETIF_F_SG;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 57634bc3da74..9b27d0cd766d 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2878,6 +2878,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2878 goto out_free; 2878 goto out_free;
2879 } else if (reserve) { 2879 } else if (reserve) {
2880 skb_reserve(skb, -reserve); 2880 skb_reserve(skb, -reserve);
2881 if (len < reserve)
2882 skb_reset_network_header(skb);
2881 } 2883 }
2882 2884
2883 /* Returns -EFAULT on error */ 2885 /* Returns -EFAULT on error */
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index 2aa07b547b16..86e1e37eb4e8 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -191,8 +191,13 @@ static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb,
191 hdr->type = cpu_to_le32(type); 191 hdr->type = cpu_to_le32(type);
192 hdr->src_node_id = cpu_to_le32(from->sq_node); 192 hdr->src_node_id = cpu_to_le32(from->sq_node);
193 hdr->src_port_id = cpu_to_le32(from->sq_port); 193 hdr->src_port_id = cpu_to_le32(from->sq_port);
194 hdr->dst_node_id = cpu_to_le32(to->sq_node); 194 if (to->sq_port == QRTR_PORT_CTRL) {
195 hdr->dst_port_id = cpu_to_le32(to->sq_port); 195 hdr->dst_node_id = cpu_to_le32(node->nid);
196 hdr->dst_port_id = cpu_to_le32(QRTR_NODE_BCAST);
197 } else {
198 hdr->dst_node_id = cpu_to_le32(to->sq_node);
199 hdr->dst_port_id = cpu_to_le32(to->sq_port);
200 }
196 201
197 hdr->size = cpu_to_le32(len); 202 hdr->size = cpu_to_le32(len);
198 hdr->confirm_rx = 0; 203 hdr->confirm_rx = 0;
@@ -764,6 +769,10 @@ static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
764 node = NULL; 769 node = NULL;
765 if (addr->sq_node == QRTR_NODE_BCAST) { 770 if (addr->sq_node == QRTR_NODE_BCAST) {
766 enqueue_fn = qrtr_bcast_enqueue; 771 enqueue_fn = qrtr_bcast_enqueue;
772 if (addr->sq_port != QRTR_PORT_CTRL) {
773 release_sock(sk);
774 return -ENOTCONN;
775 }
767 } else if (addr->sq_node == ipc->us.sq_node) { 776 } else if (addr->sq_node == ipc->us.sq_node) {
768 enqueue_fn = qrtr_local_enqueue; 777 enqueue_fn = qrtr_local_enqueue;
769 } else { 778 } else {
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 526a8e491626..6e7124e57918 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -91,7 +91,7 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla,
91 } 91 }
92 params_old = rtnl_dereference(p->params); 92 params_old = rtnl_dereference(p->params);
93 93
94 params_new->action = parm->action; 94 p->tcf_action = parm->action;
95 params_new->update_flags = parm->update_flags; 95 params_new->update_flags = parm->update_flags;
96 rcu_assign_pointer(p->params, params_new); 96 rcu_assign_pointer(p->params, params_new);
97 if (params_old) 97 if (params_old)
@@ -561,7 +561,7 @@ static int tcf_csum(struct sk_buff *skb, const struct tc_action *a,
561 tcf_lastuse_update(&p->tcf_tm); 561 tcf_lastuse_update(&p->tcf_tm);
562 bstats_cpu_update(this_cpu_ptr(p->common.cpu_bstats), skb); 562 bstats_cpu_update(this_cpu_ptr(p->common.cpu_bstats), skb);
563 563
564 action = params->action; 564 action = READ_ONCE(p->tcf_action);
565 if (unlikely(action == TC_ACT_SHOT)) 565 if (unlikely(action == TC_ACT_SHOT))
566 goto drop_stats; 566 goto drop_stats;
567 567
@@ -599,11 +599,11 @@ static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind,
599 .index = p->tcf_index, 599 .index = p->tcf_index,
600 .refcnt = p->tcf_refcnt - ref, 600 .refcnt = p->tcf_refcnt - ref,
601 .bindcnt = p->tcf_bindcnt - bind, 601 .bindcnt = p->tcf_bindcnt - bind,
602 .action = p->tcf_action,
602 }; 603 };
603 struct tcf_t t; 604 struct tcf_t t;
604 605
605 params = rtnl_dereference(p->params); 606 params = rtnl_dereference(p->params);
606 opt.action = params->action;
607 opt.update_flags = params->update_flags; 607 opt.update_flags = params->update_flags;
608 608
609 if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt)) 609 if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt))
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index 626dac81a48a..9bc6c2ae98a5 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -36,7 +36,7 @@ static int tunnel_key_act(struct sk_buff *skb, const struct tc_action *a,
36 36
37 tcf_lastuse_update(&t->tcf_tm); 37 tcf_lastuse_update(&t->tcf_tm);
38 bstats_cpu_update(this_cpu_ptr(t->common.cpu_bstats), skb); 38 bstats_cpu_update(this_cpu_ptr(t->common.cpu_bstats), skb);
39 action = params->action; 39 action = READ_ONCE(t->tcf_action);
40 40
41 switch (params->tcft_action) { 41 switch (params->tcft_action) {
42 case TCA_TUNNEL_KEY_ACT_RELEASE: 42 case TCA_TUNNEL_KEY_ACT_RELEASE:
@@ -182,7 +182,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
182 182
183 params_old = rtnl_dereference(t->params); 183 params_old = rtnl_dereference(t->params);
184 184
185 params_new->action = parm->action; 185 t->tcf_action = parm->action;
186 params_new->tcft_action = parm->t_action; 186 params_new->tcft_action = parm->t_action;
187 params_new->tcft_enc_metadata = metadata; 187 params_new->tcft_enc_metadata = metadata;
188 188
@@ -254,13 +254,13 @@ static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a,
254 .index = t->tcf_index, 254 .index = t->tcf_index,
255 .refcnt = t->tcf_refcnt - ref, 255 .refcnt = t->tcf_refcnt - ref,
256 .bindcnt = t->tcf_bindcnt - bind, 256 .bindcnt = t->tcf_bindcnt - bind,
257 .action = t->tcf_action,
257 }; 258 };
258 struct tcf_t tm; 259 struct tcf_t tm;
259 260
260 params = rtnl_dereference(t->params); 261 params = rtnl_dereference(t->params);
261 262
262 opt.t_action = params->tcft_action; 263 opt.t_action = params->tcft_action;
263 opt.action = params->action;
264 264
265 if (nla_put(skb, TCA_TUNNEL_KEY_PARMS, sizeof(opt), &opt)) 265 if (nla_put(skb, TCA_TUNNEL_KEY_PARMS, sizeof(opt), &opt))
266 goto nla_put_failure; 266 goto nla_put_failure;
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index cdc3c87c53e6..f74513a7c7a8 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -1053,7 +1053,7 @@ static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
1053 for (tp = rtnl_dereference(chain->filter_chain); 1053 for (tp = rtnl_dereference(chain->filter_chain);
1054 tp; tp = rtnl_dereference(tp->next)) 1054 tp; tp = rtnl_dereference(tp->next))
1055 tfilter_notify(net, oskb, n, tp, block, 1055 tfilter_notify(net, oskb, n, tp, block,
1056 q, parent, 0, event, false); 1056 q, parent, NULL, event, false);
1057} 1057}
1058 1058
1059static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n, 1059static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
@@ -1444,7 +1444,7 @@ static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
1444 memset(&cb->args[1], 0, 1444 memset(&cb->args[1], 0,
1445 sizeof(cb->args) - sizeof(cb->args[0])); 1445 sizeof(cb->args) - sizeof(cb->args[0]));
1446 if (cb->args[1] == 0) { 1446 if (cb->args[1] == 0) {
1447 if (tcf_fill_node(net, skb, tp, block, q, parent, 0, 1447 if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
1448 NETLINK_CB(cb->skb).portid, 1448 NETLINK_CB(cb->skb).portid,
1449 cb->nlh->nlmsg_seq, NLM_F_MULTI, 1449 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1450 RTM_NEWTFILTER) <= 0) 1450 RTM_NEWTFILTER) <= 0)
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index cd2e0e342fb6..6c0a9d5dbf94 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -479,24 +479,28 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt,
479 q->cparams.mtu = psched_mtu(qdisc_dev(sch)); 479 q->cparams.mtu = psched_mtu(qdisc_dev(sch));
480 480
481 if (opt) { 481 if (opt) {
482 int err = fq_codel_change(sch, opt, extack); 482 err = fq_codel_change(sch, opt, extack);
483 if (err) 483 if (err)
484 return err; 484 goto init_failure;
485 } 485 }
486 486
487 err = tcf_block_get(&q->block, &q->filter_list, sch, extack); 487 err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
488 if (err) 488 if (err)
489 return err; 489 goto init_failure;
490 490
491 if (!q->flows) { 491 if (!q->flows) {
492 q->flows = kvcalloc(q->flows_cnt, 492 q->flows = kvcalloc(q->flows_cnt,
493 sizeof(struct fq_codel_flow), 493 sizeof(struct fq_codel_flow),
494 GFP_KERNEL); 494 GFP_KERNEL);
495 if (!q->flows) 495 if (!q->flows) {
496 return -ENOMEM; 496 err = -ENOMEM;
497 goto init_failure;
498 }
497 q->backlogs = kvcalloc(q->flows_cnt, sizeof(u32), GFP_KERNEL); 499 q->backlogs = kvcalloc(q->flows_cnt, sizeof(u32), GFP_KERNEL);
498 if (!q->backlogs) 500 if (!q->backlogs) {
499 return -ENOMEM; 501 err = -ENOMEM;
502 goto alloc_failure;
503 }
500 for (i = 0; i < q->flows_cnt; i++) { 504 for (i = 0; i < q->flows_cnt; i++) {
501 struct fq_codel_flow *flow = q->flows + i; 505 struct fq_codel_flow *flow = q->flows + i;
502 506
@@ -509,6 +513,13 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt,
509 else 513 else
510 sch->flags &= ~TCQ_F_CAN_BYPASS; 514 sch->flags &= ~TCQ_F_CAN_BYPASS;
511 return 0; 515 return 0;
516
517alloc_failure:
518 kvfree(q->flows);
519 q->flows = NULL;
520init_failure:
521 q->flows_cnt = 0;
522 return err;
512} 523}
513 524
514static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb) 525static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 445b7ef61677..12cac85da994 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -282,7 +282,7 @@ bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
282 282
283 if (dst) { 283 if (dst) {
284 /* Re-fetch, as under layers may have a higher minimum size */ 284 /* Re-fetch, as under layers may have a higher minimum size */
285 pmtu = SCTP_TRUNC4(dst_mtu(dst)); 285 pmtu = sctp_dst_mtu(dst);
286 change = t->pathmtu != pmtu; 286 change = t->pathmtu != pmtu;
287 } 287 }
288 t->pathmtu = pmtu; 288 t->pathmtu = pmtu;
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 3c1405df936c..05e4ffe5aabd 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -147,7 +147,8 @@ static int smc_release(struct socket *sock)
147 smc->clcsock = NULL; 147 smc->clcsock = NULL;
148 } 148 }
149 if (smc->use_fallback) { 149 if (smc->use_fallback) {
150 sock_put(sk); /* passive closing */ 150 if (sk->sk_state != SMC_LISTEN && sk->sk_state != SMC_INIT)
151 sock_put(sk); /* passive closing */
151 sk->sk_state = SMC_CLOSED; 152 sk->sk_state = SMC_CLOSED;
152 sk->sk_state_change(sk); 153 sk->sk_state_change(sk);
153 } 154 }
@@ -417,12 +418,18 @@ static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code)
417{ 418{
418 int rc; 419 int rc;
419 420
420 if (reason_code < 0) /* error, fallback is not possible */ 421 if (reason_code < 0) { /* error, fallback is not possible */
422 if (smc->sk.sk_state == SMC_INIT)
423 sock_put(&smc->sk); /* passive closing */
421 return reason_code; 424 return reason_code;
425 }
422 if (reason_code != SMC_CLC_DECL_REPLY) { 426 if (reason_code != SMC_CLC_DECL_REPLY) {
423 rc = smc_clc_send_decline(smc, reason_code); 427 rc = smc_clc_send_decline(smc, reason_code);
424 if (rc < 0) 428 if (rc < 0) {
429 if (smc->sk.sk_state == SMC_INIT)
430 sock_put(&smc->sk); /* passive closing */
425 return rc; 431 return rc;
432 }
426 } 433 }
427 return smc_connect_fallback(smc); 434 return smc_connect_fallback(smc);
428} 435}
@@ -435,8 +442,6 @@ static int smc_connect_abort(struct smc_sock *smc, int reason_code,
435 smc_lgr_forget(smc->conn.lgr); 442 smc_lgr_forget(smc->conn.lgr);
436 mutex_unlock(&smc_create_lgr_pending); 443 mutex_unlock(&smc_create_lgr_pending);
437 smc_conn_free(&smc->conn); 444 smc_conn_free(&smc->conn);
438 if (reason_code < 0 && smc->sk.sk_state == SMC_INIT)
439 sock_put(&smc->sk); /* passive closing */
440 return reason_code; 445 return reason_code;
441} 446}
442 447
@@ -1452,7 +1457,8 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
1452 1457
1453 if (optlen < sizeof(int)) 1458 if (optlen < sizeof(int))
1454 return -EINVAL; 1459 return -EINVAL;
1455 get_user(val, (int __user *)optval); 1460 if (get_user(val, (int __user *)optval))
1461 return -EFAULT;
1456 1462
1457 lock_sock(sk); 1463 lock_sock(sk);
1458 switch (optname) { 1464 switch (optname) {
@@ -1520,10 +1526,13 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd,
1520 return -EBADF; 1526 return -EBADF;
1521 return smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg); 1527 return smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg);
1522 } 1528 }
1529 lock_sock(&smc->sk);
1523 switch (cmd) { 1530 switch (cmd) {
1524 case SIOCINQ: /* same as FIONREAD */ 1531 case SIOCINQ: /* same as FIONREAD */
1525 if (smc->sk.sk_state == SMC_LISTEN) 1532 if (smc->sk.sk_state == SMC_LISTEN) {
1533 release_sock(&smc->sk);
1526 return -EINVAL; 1534 return -EINVAL;
1535 }
1527 if (smc->sk.sk_state == SMC_INIT || 1536 if (smc->sk.sk_state == SMC_INIT ||
1528 smc->sk.sk_state == SMC_CLOSED) 1537 smc->sk.sk_state == SMC_CLOSED)
1529 answ = 0; 1538 answ = 0;
@@ -1532,8 +1541,10 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd,
1532 break; 1541 break;
1533 case SIOCOUTQ: 1542 case SIOCOUTQ:
1534 /* output queue size (not send + not acked) */ 1543 /* output queue size (not send + not acked) */
1535 if (smc->sk.sk_state == SMC_LISTEN) 1544 if (smc->sk.sk_state == SMC_LISTEN) {
1545 release_sock(&smc->sk);
1536 return -EINVAL; 1546 return -EINVAL;
1547 }
1537 if (smc->sk.sk_state == SMC_INIT || 1548 if (smc->sk.sk_state == SMC_INIT ||
1538 smc->sk.sk_state == SMC_CLOSED) 1549 smc->sk.sk_state == SMC_CLOSED)
1539 answ = 0; 1550 answ = 0;
@@ -1543,8 +1554,10 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd,
1543 break; 1554 break;
1544 case SIOCOUTQNSD: 1555 case SIOCOUTQNSD:
1545 /* output queue size (not send only) */ 1556 /* output queue size (not send only) */
1546 if (smc->sk.sk_state == SMC_LISTEN) 1557 if (smc->sk.sk_state == SMC_LISTEN) {
1558 release_sock(&smc->sk);
1547 return -EINVAL; 1559 return -EINVAL;
1560 }
1548 if (smc->sk.sk_state == SMC_INIT || 1561 if (smc->sk.sk_state == SMC_INIT ||
1549 smc->sk.sk_state == SMC_CLOSED) 1562 smc->sk.sk_state == SMC_CLOSED)
1550 answ = 0; 1563 answ = 0;
@@ -1552,8 +1565,10 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd,
1552 answ = smc_tx_prepared_sends(&smc->conn); 1565 answ = smc_tx_prepared_sends(&smc->conn);
1553 break; 1566 break;
1554 case SIOCATMARK: 1567 case SIOCATMARK:
1555 if (smc->sk.sk_state == SMC_LISTEN) 1568 if (smc->sk.sk_state == SMC_LISTEN) {
1569 release_sock(&smc->sk);
1556 return -EINVAL; 1570 return -EINVAL;
1571 }
1557 if (smc->sk.sk_state == SMC_INIT || 1572 if (smc->sk.sk_state == SMC_INIT ||
1558 smc->sk.sk_state == SMC_CLOSED) { 1573 smc->sk.sk_state == SMC_CLOSED) {
1559 answ = 0; 1574 answ = 0;
@@ -1569,8 +1584,10 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd,
1569 } 1584 }
1570 break; 1585 break;
1571 default: 1586 default:
1587 release_sock(&smc->sk);
1572 return -ENOIOCTLCMD; 1588 return -ENOIOCTLCMD;
1573 } 1589 }
1590 release_sock(&smc->sk);
1574 1591
1575 return put_user(answ, (int __user *)arg); 1592 return put_user(answ, (int __user *)arg);
1576} 1593}
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index 717449b1da0b..ae5d168653ce 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -250,6 +250,7 @@ out:
250int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen, 250int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
251 u8 expected_type) 251 u8 expected_type)
252{ 252{
253 long rcvtimeo = smc->clcsock->sk->sk_rcvtimeo;
253 struct sock *clc_sk = smc->clcsock->sk; 254 struct sock *clc_sk = smc->clcsock->sk;
254 struct smc_clc_msg_hdr *clcm = buf; 255 struct smc_clc_msg_hdr *clcm = buf;
255 struct msghdr msg = {NULL, 0}; 256 struct msghdr msg = {NULL, 0};
@@ -306,7 +307,6 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
306 memset(&msg, 0, sizeof(struct msghdr)); 307 memset(&msg, 0, sizeof(struct msghdr));
307 iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &vec, 1, datlen); 308 iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &vec, 1, datlen);
308 krflags = MSG_WAITALL; 309 krflags = MSG_WAITALL;
309 smc->clcsock->sk->sk_rcvtimeo = CLC_WAIT_TIME;
310 len = sock_recvmsg(smc->clcsock, &msg, krflags); 310 len = sock_recvmsg(smc->clcsock, &msg, krflags);
311 if (len < datlen || !smc_clc_msg_hdr_valid(clcm)) { 311 if (len < datlen || !smc_clc_msg_hdr_valid(clcm)) {
312 smc->sk.sk_err = EPROTO; 312 smc->sk.sk_err = EPROTO;
@@ -322,6 +322,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
322 } 322 }
323 323
324out: 324out:
325 smc->clcsock->sk->sk_rcvtimeo = rcvtimeo;
325 return reason_code; 326 return reason_code;
326} 327}
327 328
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
index fa41d9881741..ac961dfb1ea1 100644
--- a/net/smc/smc_close.c
+++ b/net/smc/smc_close.c
@@ -107,6 +107,8 @@ static void smc_close_active_abort(struct smc_sock *smc)
107 } 107 }
108 switch (sk->sk_state) { 108 switch (sk->sk_state) {
109 case SMC_INIT: 109 case SMC_INIT:
110 sk->sk_state = SMC_PEERABORTWAIT;
111 break;
110 case SMC_ACTIVE: 112 case SMC_ACTIVE:
111 sk->sk_state = SMC_PEERABORTWAIT; 113 sk->sk_state = SMC_PEERABORTWAIT;
112 release_sock(sk); 114 release_sock(sk);
diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c
index cee666400752..f82886b7d1d8 100644
--- a/net/smc/smc_tx.c
+++ b/net/smc/smc_tx.c
@@ -495,7 +495,8 @@ out:
495 495
496void smc_tx_consumer_update(struct smc_connection *conn, bool force) 496void smc_tx_consumer_update(struct smc_connection *conn, bool force)
497{ 497{
498 union smc_host_cursor cfed, cons; 498 union smc_host_cursor cfed, cons, prod;
499 int sender_free = conn->rmb_desc->len;
499 int to_confirm; 500 int to_confirm;
500 501
501 smc_curs_write(&cons, 502 smc_curs_write(&cons,
@@ -505,11 +506,18 @@ void smc_tx_consumer_update(struct smc_connection *conn, bool force)
505 smc_curs_read(&conn->rx_curs_confirmed, conn), 506 smc_curs_read(&conn->rx_curs_confirmed, conn),
506 conn); 507 conn);
507 to_confirm = smc_curs_diff(conn->rmb_desc->len, &cfed, &cons); 508 to_confirm = smc_curs_diff(conn->rmb_desc->len, &cfed, &cons);
509 if (to_confirm > conn->rmbe_update_limit) {
510 smc_curs_write(&prod,
511 smc_curs_read(&conn->local_rx_ctrl.prod, conn),
512 conn);
513 sender_free = conn->rmb_desc->len -
514 smc_curs_diff(conn->rmb_desc->len, &prod, &cfed);
515 }
508 516
509 if (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req || 517 if (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req ||
510 force || 518 force ||
511 ((to_confirm > conn->rmbe_update_limit) && 519 ((to_confirm > conn->rmbe_update_limit) &&
512 ((to_confirm > (conn->rmb_desc->len / 2)) || 520 ((sender_free <= (conn->rmb_desc->len / 2)) ||
513 conn->local_rx_ctrl.prod_flags.write_blocked))) { 521 conn->local_rx_ctrl.prod_flags.write_blocked))) {
514 if ((smc_cdc_get_slot_and_msg_send(conn) < 0) && 522 if ((smc_cdc_get_slot_and_msg_send(conn) < 0) &&
515 conn->alert_token_local) { /* connection healthy */ 523 conn->alert_token_local) { /* connection healthy */
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index 9f666e0650e2..2830709957bd 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -133,6 +133,8 @@ static void disc_dupl_alert(struct tipc_bearer *b, u32 node_addr,
133} 133}
134 134
135/* tipc_disc_addr_trial(): - handle an address uniqueness trial from peer 135/* tipc_disc_addr_trial(): - handle an address uniqueness trial from peer
136 * Returns true if message should be dropped by caller, i.e., if it is a
137 * trial message or we are inside trial period. Otherwise false.
136 */ 138 */
137static bool tipc_disc_addr_trial_msg(struct tipc_discoverer *d, 139static bool tipc_disc_addr_trial_msg(struct tipc_discoverer *d,
138 struct tipc_media_addr *maddr, 140 struct tipc_media_addr *maddr,
@@ -168,8 +170,9 @@ static bool tipc_disc_addr_trial_msg(struct tipc_discoverer *d,
168 msg_set_type(buf_msg(d->skb), DSC_REQ_MSG); 170 msg_set_type(buf_msg(d->skb), DSC_REQ_MSG);
169 } 171 }
170 172
173 /* Accept regular link requests/responses only after trial period */
171 if (mtyp != DSC_TRIAL_MSG) 174 if (mtyp != DSC_TRIAL_MSG)
172 return false; 175 return trial;
173 176
174 sugg_addr = tipc_node_try_addr(net, peer_id, src); 177 sugg_addr = tipc_node_try_addr(net, peer_id, src);
175 if (sugg_addr) 178 if (sugg_addr)
@@ -284,7 +287,6 @@ static void tipc_disc_timeout(struct timer_list *t)
284{ 287{
285 struct tipc_discoverer *d = from_timer(d, t, timer); 288 struct tipc_discoverer *d = from_timer(d, t, timer);
286 struct tipc_net *tn = tipc_net(d->net); 289 struct tipc_net *tn = tipc_net(d->net);
287 u32 self = tipc_own_addr(d->net);
288 struct tipc_media_addr maddr; 290 struct tipc_media_addr maddr;
289 struct sk_buff *skb = NULL; 291 struct sk_buff *skb = NULL;
290 struct net *net = d->net; 292 struct net *net = d->net;
@@ -298,12 +300,14 @@ static void tipc_disc_timeout(struct timer_list *t)
298 goto exit; 300 goto exit;
299 } 301 }
300 302
301 /* Did we just leave the address trial period ? */ 303 /* Trial period over ? */
302 if (!self && !time_before(jiffies, tn->addr_trial_end)) { 304 if (!time_before(jiffies, tn->addr_trial_end)) {
303 self = tn->trial_addr; 305 /* Did we just leave it ? */
304 tipc_net_finalize(net, self); 306 if (!tipc_own_addr(net))
305 msg_set_prevnode(buf_msg(d->skb), self); 307 tipc_net_finalize(net, tn->trial_addr);
308
306 msg_set_type(buf_msg(d->skb), DSC_REQ_MSG); 309 msg_set_type(buf_msg(d->skb), DSC_REQ_MSG);
310 msg_set_prevnode(buf_msg(d->skb), tipc_own_addr(net));
307 } 311 }
308 312
309 /* Adjust timeout interval according to discovery phase */ 313 /* Adjust timeout interval according to discovery phase */
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 4fbaa0464405..a7f6964c3a4b 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -121,12 +121,17 @@ int tipc_net_init(struct net *net, u8 *node_id, u32 addr)
121 121
122void tipc_net_finalize(struct net *net, u32 addr) 122void tipc_net_finalize(struct net *net, u32 addr)
123{ 123{
124 tipc_set_node_addr(net, addr); 124 struct tipc_net *tn = tipc_net(net);
125 smp_mb(); 125
126 tipc_named_reinit(net); 126 spin_lock_bh(&tn->node_list_lock);
127 tipc_sk_reinit(net); 127 if (!tipc_own_addr(net)) {
128 tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr, 128 tipc_set_node_addr(net, addr);
129 TIPC_CLUSTER_SCOPE, 0, addr); 129 tipc_named_reinit(net);
130 tipc_sk_reinit(net);
131 tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr,
132 TIPC_CLUSTER_SCOPE, 0, addr);
133 }
134 spin_unlock_bh(&tn->node_list_lock);
130} 135}
131 136
132void tipc_net_stop(struct net *net) 137void tipc_net_stop(struct net *net)
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 6a44eb812baf..0453bd451ce8 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -797,6 +797,7 @@ static u32 tipc_node_suggest_addr(struct net *net, u32 addr)
797} 797}
798 798
799/* tipc_node_try_addr(): Check if addr can be used by peer, suggest other if not 799/* tipc_node_try_addr(): Check if addr can be used by peer, suggest other if not
800 * Returns suggested address if any, otherwise 0
800 */ 801 */
801u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr) 802u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr)
802{ 803{
@@ -819,12 +820,14 @@ u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr)
819 if (n) { 820 if (n) {
820 addr = n->addr; 821 addr = n->addr;
821 tipc_node_put(n); 822 tipc_node_put(n);
823 return addr;
822 } 824 }
823 /* Even this node may be in trial phase */ 825
826 /* Even this node may be in conflict */
824 if (tn->trial_addr == addr) 827 if (tn->trial_addr == addr)
825 return tipc_node_suggest_addr(net, addr); 828 return tipc_node_suggest_addr(net, addr);
826 829
827 return addr; 830 return 0;
828} 831}
829 832
830void tipc_node_check_dest(struct net *net, u32 addr, 833void tipc_node_check_dest(struct net *net, u32 addr,
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index d2380548f8f6..4618f1c31137 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -440,7 +440,7 @@ alloc_encrypted:
440 ret = tls_push_record(sk, msg->msg_flags, record_type); 440 ret = tls_push_record(sk, msg->msg_flags, record_type);
441 if (!ret) 441 if (!ret)
442 continue; 442 continue;
443 if (ret == -EAGAIN) 443 if (ret < 0)
444 goto send_end; 444 goto send_end;
445 445
446 copied -= try_to_copy; 446 copied -= try_to_copy;
@@ -701,6 +701,10 @@ static int decrypt_skb(struct sock *sk, struct sk_buff *skb,
701 nsg = skb_to_sgvec(skb, &sgin[1], 701 nsg = skb_to_sgvec(skb, &sgin[1],
702 rxm->offset + tls_ctx->rx.prepend_size, 702 rxm->offset + tls_ctx->rx.prepend_size,
703 rxm->full_len - tls_ctx->rx.prepend_size); 703 rxm->full_len - tls_ctx->rx.prepend_size);
704 if (nsg < 0) {
705 ret = nsg;
706 goto out;
707 }
704 708
705 tls_make_aad(ctx->rx_aad_ciphertext, 709 tls_make_aad(ctx->rx_aad_ciphertext,
706 rxm->full_len - tls_ctx->rx.overhead_size, 710 rxm->full_len - tls_ctx->rx.overhead_size,
@@ -712,6 +716,7 @@ static int decrypt_skb(struct sock *sk, struct sk_buff *skb,
712 rxm->full_len - tls_ctx->rx.overhead_size, 716 rxm->full_len - tls_ctx->rx.overhead_size,
713 skb, sk->sk_allocation); 717 skb, sk->sk_allocation);
714 718
719out:
715 if (sgin != &sgin_arr[0]) 720 if (sgin != &sgin_arr[0])
716 kfree(sgin); 721 kfree(sgin);
717 722
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 59fb7d3c36a3..72335c2e8108 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -199,8 +199,11 @@ static void xsk_destruct_skb(struct sk_buff *skb)
199{ 199{
200 u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg; 200 u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg;
201 struct xdp_sock *xs = xdp_sk(skb->sk); 201 struct xdp_sock *xs = xdp_sk(skb->sk);
202 unsigned long flags;
202 203
204 spin_lock_irqsave(&xs->tx_completion_lock, flags);
203 WARN_ON_ONCE(xskq_produce_addr(xs->umem->cq, addr)); 205 WARN_ON_ONCE(xskq_produce_addr(xs->umem->cq, addr));
206 spin_unlock_irqrestore(&xs->tx_completion_lock, flags);
204 207
205 sock_wfree(skb); 208 sock_wfree(skb);
206} 209}
@@ -215,9 +218,6 @@ static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
215 struct sk_buff *skb; 218 struct sk_buff *skb;
216 int err = 0; 219 int err = 0;
217 220
218 if (unlikely(!xs->tx))
219 return -ENOBUFS;
220
221 mutex_lock(&xs->mutex); 221 mutex_lock(&xs->mutex);
222 222
223 while (xskq_peek_desc(xs->tx, &desc)) { 223 while (xskq_peek_desc(xs->tx, &desc)) {
@@ -230,22 +230,13 @@ static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
230 goto out; 230 goto out;
231 } 231 }
232 232
233 if (xskq_reserve_addr(xs->umem->cq)) { 233 if (xskq_reserve_addr(xs->umem->cq))
234 err = -EAGAIN;
235 goto out;
236 }
237
238 len = desc.len;
239 if (unlikely(len > xs->dev->mtu)) {
240 err = -EMSGSIZE;
241 goto out; 234 goto out;
242 }
243 235
244 if (xs->queue_id >= xs->dev->real_num_tx_queues) { 236 if (xs->queue_id >= xs->dev->real_num_tx_queues)
245 err = -ENXIO;
246 goto out; 237 goto out;
247 }
248 238
239 len = desc.len;
249 skb = sock_alloc_send_skb(sk, len, 1, &err); 240 skb = sock_alloc_send_skb(sk, len, 1, &err);
250 if (unlikely(!skb)) { 241 if (unlikely(!skb)) {
251 err = -EAGAIN; 242 err = -EAGAIN;
@@ -268,15 +259,15 @@ static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
268 skb->destructor = xsk_destruct_skb; 259 skb->destructor = xsk_destruct_skb;
269 260
270 err = dev_direct_xmit(skb, xs->queue_id); 261 err = dev_direct_xmit(skb, xs->queue_id);
262 xskq_discard_desc(xs->tx);
271 /* Ignore NET_XMIT_CN as packet might have been sent */ 263 /* Ignore NET_XMIT_CN as packet might have been sent */
272 if (err == NET_XMIT_DROP || err == NETDEV_TX_BUSY) { 264 if (err == NET_XMIT_DROP || err == NETDEV_TX_BUSY) {
273 err = -EAGAIN; 265 /* SKB completed but not sent */
274 /* SKB consumed by dev_direct_xmit() */ 266 err = -EBUSY;
275 goto out; 267 goto out;
276 } 268 }
277 269
278 sent_frame = true; 270 sent_frame = true;
279 xskq_discard_desc(xs->tx);
280 } 271 }
281 272
282out: 273out:
@@ -297,6 +288,8 @@ static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
297 return -ENXIO; 288 return -ENXIO;
298 if (unlikely(!(xs->dev->flags & IFF_UP))) 289 if (unlikely(!(xs->dev->flags & IFF_UP)))
299 return -ENETDOWN; 290 return -ENETDOWN;
291 if (unlikely(!xs->tx))
292 return -ENOBUFS;
300 if (need_wait) 293 if (need_wait)
301 return -EOPNOTSUPP; 294 return -EOPNOTSUPP;
302 295
@@ -755,6 +748,7 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol,
755 748
756 xs = xdp_sk(sk); 749 xs = xdp_sk(sk);
757 mutex_init(&xs->mutex); 750 mutex_init(&xs->mutex);
751 spin_lock_init(&xs->tx_completion_lock);
758 752
759 local_bh_disable(); 753 local_bh_disable();
760 sock_prot_inuse_add(net, &xsk_proto, 1); 754 sock_prot_inuse_add(net, &xsk_proto, 1);
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
index ef6a6f0ec949..52ecaf770642 100644
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@ -62,14 +62,9 @@ static inline u32 xskq_nb_avail(struct xsk_queue *q, u32 dcnt)
62 return (entries > dcnt) ? dcnt : entries; 62 return (entries > dcnt) ? dcnt : entries;
63} 63}
64 64
65static inline u32 xskq_nb_free_lazy(struct xsk_queue *q, u32 producer)
66{
67 return q->nentries - (producer - q->cons_tail);
68}
69
70static inline u32 xskq_nb_free(struct xsk_queue *q, u32 producer, u32 dcnt) 65static inline u32 xskq_nb_free(struct xsk_queue *q, u32 producer, u32 dcnt)
71{ 66{
72 u32 free_entries = xskq_nb_free_lazy(q, producer); 67 u32 free_entries = q->nentries - (producer - q->cons_tail);
73 68
74 if (free_entries >= dcnt) 69 if (free_entries >= dcnt)
75 return free_entries; 70 return free_entries;
@@ -129,7 +124,7 @@ static inline int xskq_produce_addr(struct xsk_queue *q, u64 addr)
129{ 124{
130 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; 125 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
131 126
132 if (xskq_nb_free(q, q->prod_tail, LAZY_UPDATE_THRESHOLD) == 0) 127 if (xskq_nb_free(q, q->prod_tail, 1) == 0)
133 return -ENOSPC; 128 return -ENOSPC;
134 129
135 ring->desc[q->prod_tail++ & q->ring_mask] = addr; 130 ring->desc[q->prod_tail++ & q->ring_mask] = addr;
diff --git a/samples/bpf/.gitignore b/samples/bpf/.gitignore
new file mode 100644
index 000000000000..8ae4940025f8
--- /dev/null
+++ b/samples/bpf/.gitignore
@@ -0,0 +1,49 @@
1cpustat
2fds_example
3lathist
4load_sock_ops
5lwt_len_hist
6map_perf_test
7offwaketime
8per_socket_stats_example
9sampleip
10sock_example
11sockex1
12sockex2
13sockex3
14spintest
15syscall_nrs.h
16syscall_tp
17task_fd_query
18tc_l2_redirect
19test_cgrp2_array_pin
20test_cgrp2_attach
21test_cgrp2_attach2
22test_cgrp2_sock
23test_cgrp2_sock2
24test_current_task_under_cgroup
25test_lru_dist
26test_map_in_map
27test_overhead
28test_probe_write_user
29trace_event
30trace_output
31tracex1
32tracex2
33tracex3
34tracex4
35tracex5
36tracex6
37tracex7
38xdp1
39xdp2
40xdp_adjust_tail
41xdp_fwd
42xdp_monitor
43xdp_redirect
44xdp_redirect_cpu
45xdp_redirect_map
46xdp_router_ipv4
47xdp_rxq_info
48xdp_tx_iptunnel
49xdpsock
diff --git a/samples/bpf/parse_varlen.c b/samples/bpf/parse_varlen.c
index 95c16324760c..0b6f22feb2c9 100644
--- a/samples/bpf/parse_varlen.c
+++ b/samples/bpf/parse_varlen.c
@@ -6,6 +6,7 @@
6 */ 6 */
7#define KBUILD_MODNAME "foo" 7#define KBUILD_MODNAME "foo"
8#include <linux/if_ether.h> 8#include <linux/if_ether.h>
9#include <linux/if_vlan.h>
9#include <linux/ip.h> 10#include <linux/ip.h>
10#include <linux/ipv6.h> 11#include <linux/ipv6.h>
11#include <linux/in.h> 12#include <linux/in.h>
@@ -108,11 +109,6 @@ static int parse_ipv6(void *data, uint64_t nh_off, void *data_end)
108 return 0; 109 return 0;
109} 110}
110 111
111struct vlan_hdr {
112 uint16_t h_vlan_TCI;
113 uint16_t h_vlan_encapsulated_proto;
114};
115
116SEC("varlen") 112SEC("varlen")
117int handle_ingress(struct __sk_buff *skb) 113int handle_ingress(struct __sk_buff *skb)
118{ 114{
diff --git a/samples/bpf/test_overhead_user.c b/samples/bpf/test_overhead_user.c
index 6caf47afa635..9d6dcaa9db92 100644
--- a/samples/bpf/test_overhead_user.c
+++ b/samples/bpf/test_overhead_user.c
@@ -6,6 +6,7 @@
6 */ 6 */
7#define _GNU_SOURCE 7#define _GNU_SOURCE
8#include <sched.h> 8#include <sched.h>
9#include <errno.h>
9#include <stdio.h> 10#include <stdio.h>
10#include <sys/types.h> 11#include <sys/types.h>
11#include <asm/unistd.h> 12#include <asm/unistd.h>
@@ -44,8 +45,13 @@ static void test_task_rename(int cpu)
44 exit(1); 45 exit(1);
45 } 46 }
46 start_time = time_get_ns(); 47 start_time = time_get_ns();
47 for (i = 0; i < MAX_CNT; i++) 48 for (i = 0; i < MAX_CNT; i++) {
48 write(fd, buf, sizeof(buf)); 49 if (write(fd, buf, sizeof(buf)) < 0) {
50 printf("task rename failed: %s\n", strerror(errno));
51 close(fd);
52 return;
53 }
54 }
49 printf("task_rename:%d: %lld events per sec\n", 55 printf("task_rename:%d: %lld events per sec\n",
50 cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time)); 56 cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time));
51 close(fd); 57 close(fd);
@@ -63,8 +69,13 @@ static void test_urandom_read(int cpu)
63 exit(1); 69 exit(1);
64 } 70 }
65 start_time = time_get_ns(); 71 start_time = time_get_ns();
66 for (i = 0; i < MAX_CNT; i++) 72 for (i = 0; i < MAX_CNT; i++) {
67 read(fd, buf, sizeof(buf)); 73 if (read(fd, buf, sizeof(buf)) < 0) {
74 printf("failed to read from /dev/urandom: %s\n", strerror(errno));
75 close(fd);
76 return;
77 }
78 }
68 printf("urandom_read:%d: %lld events per sec\n", 79 printf("urandom_read:%d: %lld events per sec\n",
69 cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time)); 80 cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time));
70 close(fd); 81 close(fd);
diff --git a/samples/bpf/trace_event_user.c b/samples/bpf/trace_event_user.c
index 1fa1becfa641..d08046ab81f0 100644
--- a/samples/bpf/trace_event_user.c
+++ b/samples/bpf/trace_event_user.c
@@ -122,6 +122,16 @@ static void print_stacks(void)
122 } 122 }
123} 123}
124 124
125static inline int generate_load(void)
126{
127 if (system("dd if=/dev/zero of=/dev/null count=5000k status=none") < 0) {
128 printf("failed to generate some load with dd: %s\n", strerror(errno));
129 return -1;
130 }
131
132 return 0;
133}
134
125static void test_perf_event_all_cpu(struct perf_event_attr *attr) 135static void test_perf_event_all_cpu(struct perf_event_attr *attr)
126{ 136{
127 int nr_cpus = sysconf(_SC_NPROCESSORS_CONF); 137 int nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
@@ -142,7 +152,11 @@ static void test_perf_event_all_cpu(struct perf_event_attr *attr)
142 assert(ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[0]) == 0); 152 assert(ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[0]) == 0);
143 assert(ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE) == 0); 153 assert(ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE) == 0);
144 } 154 }
145 system("dd if=/dev/zero of=/dev/null count=5000k status=none"); 155
156 if (generate_load() < 0) {
157 error = 1;
158 goto all_cpu_err;
159 }
146 print_stacks(); 160 print_stacks();
147all_cpu_err: 161all_cpu_err:
148 for (i--; i >= 0; i--) { 162 for (i--; i >= 0; i--) {
@@ -156,7 +170,7 @@ all_cpu_err:
156 170
157static void test_perf_event_task(struct perf_event_attr *attr) 171static void test_perf_event_task(struct perf_event_attr *attr)
158{ 172{
159 int pmu_fd; 173 int pmu_fd, error = 0;
160 174
161 /* per task perf event, enable inherit so the "dd ..." command can be traced properly. 175 /* per task perf event, enable inherit so the "dd ..." command can be traced properly.
162 * Enabling inherit will cause bpf_perf_prog_read_time helper failure. 176 * Enabling inherit will cause bpf_perf_prog_read_time helper failure.
@@ -171,10 +185,17 @@ static void test_perf_event_task(struct perf_event_attr *attr)
171 } 185 }
172 assert(ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd[0]) == 0); 186 assert(ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd[0]) == 0);
173 assert(ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE) == 0); 187 assert(ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE) == 0);
174 system("dd if=/dev/zero of=/dev/null count=5000k status=none"); 188
189 if (generate_load() < 0) {
190 error = 1;
191 goto err;
192 }
175 print_stacks(); 193 print_stacks();
194err:
176 ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE); 195 ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
177 close(pmu_fd); 196 close(pmu_fd);
197 if (error)
198 int_exit(0);
178} 199}
179 200
180static void test_bpf_perf_event(void) 201static void test_bpf_perf_event(void)
diff --git a/samples/bpf/xdp2skb_meta.sh b/samples/bpf/xdp2skb_meta.sh
index b9c9549c4c27..4bde9d066c46 100755
--- a/samples/bpf/xdp2skb_meta.sh
+++ b/samples/bpf/xdp2skb_meta.sh
@@ -16,8 +16,8 @@
16BPF_FILE=xdp2skb_meta_kern.o 16BPF_FILE=xdp2skb_meta_kern.o
17DIR=$(dirname $0) 17DIR=$(dirname $0)
18 18
19export TC=/usr/sbin/tc 19[ -z "$TC" ] && TC=tc
20export IP=/usr/sbin/ip 20[ -z "$IP" ] && IP=ip
21 21
22function usage() { 22function usage() {
23 echo "" 23 echo ""
@@ -53,7 +53,7 @@ function _call_cmd() {
53 local allow_fail="$2" 53 local allow_fail="$2"
54 shift 2 54 shift 2
55 if [[ -n "$VERBOSE" ]]; then 55 if [[ -n "$VERBOSE" ]]; then
56 echo "$(basename $cmd) $@" 56 echo "$cmd $@"
57 fi 57 fi
58 if [[ -n "$DRYRUN" ]]; then 58 if [[ -n "$DRYRUN" ]]; then
59 return 59 return
diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c
index d69c8d78d3fd..5904b1543831 100644
--- a/samples/bpf/xdpsock_user.c
+++ b/samples/bpf/xdpsock_user.c
@@ -729,7 +729,7 @@ static void kick_tx(int fd)
729 int ret; 729 int ret;
730 730
731 ret = sendto(fd, NULL, 0, MSG_DONTWAIT, NULL, 0); 731 ret = sendto(fd, NULL, 0, MSG_DONTWAIT, NULL, 0);
732 if (ret >= 0 || errno == ENOBUFS || errno == EAGAIN) 732 if (ret >= 0 || errno == ENOBUFS || errno == EAGAIN || errno == EBUSY)
733 return; 733 return;
734 lassert(0); 734 lassert(0);
735} 735}
diff --git a/scripts/tags.sh b/scripts/tags.sh
index 412a70cce558..26de7d5aa5c8 100755
--- a/scripts/tags.sh
+++ b/scripts/tags.sh
@@ -152,6 +152,7 @@ regex_asm=(
152) 152)
153regex_c=( 153regex_c=(
154 '/^SYSCALL_DEFINE[0-9](\([[:alnum:]_]*\).*/sys_\1/' 154 '/^SYSCALL_DEFINE[0-9](\([[:alnum:]_]*\).*/sys_\1/'
155 '/^BPF_CALL_[0-9](\([[:alnum:]_]*\).*/\1/'
155 '/^COMPAT_SYSCALL_DEFINE[0-9](\([[:alnum:]_]*\).*/compat_sys_\1/' 156 '/^COMPAT_SYSCALL_DEFINE[0-9](\([[:alnum:]_]*\).*/compat_sys_\1/'
156 '/^TRACE_EVENT(\([[:alnum:]_]*\).*/trace_\1/' 157 '/^TRACE_EVENT(\([[:alnum:]_]*\).*/trace_\1/'
157 '/^TRACE_EVENT(\([[:alnum:]_]*\).*/trace_\1_rcuidle/' 158 '/^TRACE_EVENT(\([[:alnum:]_]*\).*/trace_\1_rcuidle/'
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 2ecd27b670d7..f5f7bcc96046 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -4975,6 +4975,24 @@ static struct bpf_test tests[] = {
4975 .prog_type = BPF_PROG_TYPE_LWT_XMIT, 4975 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
4976 }, 4976 },
4977 { 4977 {
4978 "make headroom for LWT_XMIT",
4979 .insns = {
4980 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
4981 BPF_MOV64_IMM(BPF_REG_2, 34),
4982 BPF_MOV64_IMM(BPF_REG_3, 0),
4983 BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
4984 /* split for s390 to succeed */
4985 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4986 BPF_MOV64_IMM(BPF_REG_2, 42),
4987 BPF_MOV64_IMM(BPF_REG_3, 0),
4988 BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
4989 BPF_MOV64_IMM(BPF_REG_0, 0),
4990 BPF_EXIT_INSN(),
4991 },
4992 .result = ACCEPT,
4993 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
4994 },
4995 {
4978 "invalid access of tc_classid for LWT_IN", 4996 "invalid access of tc_classid for LWT_IN",
4979 .insns = { 4997 .insns = {
4980 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 4998 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
@@ -12554,8 +12572,11 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
12554 } 12572 }
12555 12573
12556 if (fd_prog >= 0) { 12574 if (fd_prog >= 0) {
12575 __u8 tmp[TEST_DATA_LEN << 2];
12576 __u32 size_tmp = sizeof(tmp);
12577
12557 err = bpf_prog_test_run(fd_prog, 1, test->data, 12578 err = bpf_prog_test_run(fd_prog, 1, test->data,
12558 sizeof(test->data), NULL, NULL, 12579 sizeof(test->data), tmp, &size_tmp,
12559 &retval, NULL); 12580 &retval, NULL);
12560 if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) { 12581 if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
12561 printf("Unexpected bpf_prog_test_run error\n"); 12582 printf("Unexpected bpf_prog_test_run error\n");
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
index 78245d60d8bc..0f45633bd634 100755
--- a/tools/testing/selftests/net/fib_tests.sh
+++ b/tools/testing/selftests/net/fib_tests.sh
@@ -740,13 +740,6 @@ ipv6_rt_add()
740 run_cmd "$IP -6 ro add unreachable 2001:db8:104::/64" 740 run_cmd "$IP -6 ro add unreachable 2001:db8:104::/64"
741 log_test $? 2 "Attempt to add duplicate route - reject route" 741 log_test $? 2 "Attempt to add duplicate route - reject route"
742 742
743 # iproute2 prepend only sets NLM_F_CREATE
744 # - adds a new route; does NOT convert existing route to ECMP
745 add_route6 "2001:db8:104::/64" "via 2001:db8:101::2"
746 run_cmd "$IP -6 ro prepend 2001:db8:104::/64 via 2001:db8:103::2"
747 check_route6 "2001:db8:104::/64 via 2001:db8:101::2 dev veth1 metric 1024 2001:db8:104::/64 via 2001:db8:103::2 dev veth3 metric 1024"
748 log_test $? 0 "Add new route for existing prefix (w/o NLM_F_EXCL)"
749
750 # route append with same prefix adds a new route 743 # route append with same prefix adds a new route
751 # - iproute2 sets NLM_F_CREATE | NLM_F_APPEND 744 # - iproute2 sets NLM_F_CREATE | NLM_F_APPEND
752 add_route6 "2001:db8:104::/64" "via 2001:db8:101::2" 745 add_route6 "2001:db8:104::/64" "via 2001:db8:101::2"
@@ -754,27 +747,6 @@ ipv6_rt_add()
754 check_route6 "2001:db8:104::/64 metric 1024 nexthop via 2001:db8:101::2 dev veth1 weight 1 nexthop via 2001:db8:103::2 dev veth3 weight 1" 747 check_route6 "2001:db8:104::/64 metric 1024 nexthop via 2001:db8:101::2 dev veth1 weight 1 nexthop via 2001:db8:103::2 dev veth3 weight 1"
755 log_test $? 0 "Append nexthop to existing route - gw" 748 log_test $? 0 "Append nexthop to existing route - gw"
756 749
757 add_route6 "2001:db8:104::/64" "via 2001:db8:101::2"
758 run_cmd "$IP -6 ro append 2001:db8:104::/64 dev veth3"
759 check_route6 "2001:db8:104::/64 metric 1024 nexthop via 2001:db8:101::2 dev veth1 weight 1 nexthop dev veth3 weight 1"
760 log_test $? 0 "Append nexthop to existing route - dev only"
761
762 # multipath route can not have a nexthop that is a reject route
763 add_route6 "2001:db8:104::/64" "via 2001:db8:101::2"
764 run_cmd "$IP -6 ro append unreachable 2001:db8:104::/64"
765 log_test $? 2 "Append nexthop to existing route - reject route"
766
767 # reject route can not be converted to multipath route
768 run_cmd "$IP -6 ro flush 2001:db8:104::/64"
769 run_cmd "$IP -6 ro add unreachable 2001:db8:104::/64"
770 run_cmd "$IP -6 ro append 2001:db8:104::/64 via 2001:db8:103::2"
771 log_test $? 2 "Append nexthop to existing reject route - gw"
772
773 run_cmd "$IP -6 ro flush 2001:db8:104::/64"
774 run_cmd "$IP -6 ro add unreachable 2001:db8:104::/64"
775 run_cmd "$IP -6 ro append 2001:db8:104::/64 dev veth3"
776 log_test $? 2 "Append nexthop to existing reject route - dev only"
777
778 # insert mpath directly 750 # insert mpath directly
779 add_route6 "2001:db8:104::/64" "nexthop via 2001:db8:101::2 nexthop via 2001:db8:103::2" 751 add_route6 "2001:db8:104::/64" "nexthop via 2001:db8:101::2 nexthop via 2001:db8:103::2"
780 check_route6 "2001:db8:104::/64 metric 1024 nexthop via 2001:db8:101::2 dev veth1 weight 1 nexthop via 2001:db8:103::2 dev veth3 weight 1" 752 check_route6 "2001:db8:104::/64 metric 1024 nexthop via 2001:db8:101::2 dev veth1 weight 1 nexthop via 2001:db8:103::2 dev veth3 weight 1"
@@ -819,13 +791,6 @@ ipv6_rt_replace_single()
819 check_route6 "2001:db8:104::/64 metric 1024 nexthop via 2001:db8:101::3 dev veth1 weight 1 nexthop via 2001:db8:103::2 dev veth3 weight 1" 791 check_route6 "2001:db8:104::/64 metric 1024 nexthop via 2001:db8:101::3 dev veth1 weight 1 nexthop via 2001:db8:103::2 dev veth3 weight 1"
820 log_test $? 0 "Single path with multipath" 792 log_test $? 0 "Single path with multipath"
821 793
822 # single path with reject
823 #
824 add_initial_route6 "nexthop via 2001:db8:101::2"
825 run_cmd "$IP -6 ro replace unreachable 2001:db8:104::/64"
826 check_route6 "unreachable 2001:db8:104::/64 dev lo metric 1024"
827 log_test $? 0 "Single path with reject route"
828
829 # single path with single path using MULTIPATH attribute 794 # single path with single path using MULTIPATH attribute
830 # 795 #
831 add_initial_route6 "via 2001:db8:101::2" 796 add_initial_route6 "via 2001:db8:101::2"
@@ -873,12 +838,6 @@ ipv6_rt_replace_mpath()
873 check_route6 "2001:db8:104::/64 via 2001:db8:101::3 dev veth1 metric 1024" 838 check_route6 "2001:db8:104::/64 via 2001:db8:101::3 dev veth1 metric 1024"
874 log_test $? 0 "Multipath with single path via multipath attribute" 839 log_test $? 0 "Multipath with single path via multipath attribute"
875 840
876 # multipath with reject
877 add_initial_route6 "nexthop via 2001:db8:101::2 nexthop via 2001:db8:103::2"
878 run_cmd "$IP -6 ro replace unreachable 2001:db8:104::/64"
879 check_route6 "unreachable 2001:db8:104::/64 dev lo metric 1024"
880 log_test $? 0 "Multipath with reject route"
881
882 # route replace fails - invalid nexthop 1 841 # route replace fails - invalid nexthop 1
883 add_initial_route6 "nexthop via 2001:db8:101::2 nexthop via 2001:db8:103::2" 842 add_initial_route6 "nexthop via 2001:db8:101::2 nexthop via 2001:db8:103::2"
884 run_cmd "$IP -6 ro replace 2001:db8:104::/64 nexthop via 2001:db8:111::3 nexthop via 2001:db8:103::3" 843 run_cmd "$IP -6 ro replace 2001:db8:104::/64 nexthop via 2001:db8:111::3 nexthop via 2001:db8:103::3"
diff --git a/tools/testing/selftests/net/udpgso_bench.sh b/tools/testing/selftests/net/udpgso_bench.sh
index 792fa4d0285e..850767befa47 100755
--- a/tools/testing/selftests/net/udpgso_bench.sh
+++ b/tools/testing/selftests/net/udpgso_bench.sh
@@ -35,9 +35,6 @@ run_udp() {
35 35
36 echo "udp gso" 36 echo "udp gso"
37 run_in_netns ${args} -S 37 run_in_netns ${args} -S
38
39 echo "udp gso zerocopy"
40 run_in_netns ${args} -S -z
41} 38}
42 39
43run_tcp() { 40run_tcp() {