aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/networking/dsa/dsa.txt10
-rw-r--r--Documentation/networking/switchdev.txt10
-rw-r--r--MAINTAINERS17
-rw-r--r--drivers/net/bonding/bond_main.c35
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h14
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c149
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c128
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c27
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c19
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c15
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c17
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c4
-rw-r--r--drivers/net/phy/marvell10g.c6
-rw-r--r--drivers/net/phy/mdio_bus.c1
-rw-r--r--drivers/net/phy/realtek.c7
-rw-r--r--drivers/net/team/team.c4
-rw-r--r--drivers/net/usb/r8152.c2
-rw-r--r--drivers/net/vrf.c3
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c2
-rw-r--r--include/linux/phy.h8
-rw-r--r--include/linux/virtio_net.h14
-rw-r--r--include/net/phonet/pep.h5
-rw-r--r--include/net/xfrm.h12
-rw-r--r--kernel/bpf/lpm_trie.c1
-rw-r--r--net/bpf/test_run.c45
-rw-r--r--net/bridge/br_multicast.c9
-rw-r--r--net/compat.c6
-rw-r--r--net/dsa/port.c7
-rw-r--r--net/ipv4/esp4.c2
-rw-r--r--net/ipv4/ip_gre.c33
-rw-r--r--net/ipv4/tcp_output.c1
-rw-r--r--net/ipv4/udp.c6
-rw-r--r--net/ipv6/esp6.c2
-rw-r--r--net/ipv6/fou6.c2
-rw-r--r--net/ipv6/ip6_gre.c39
-rw-r--r--net/ipv6/route.c32
-rw-r--r--net/ipv6/udp.c12
-rw-r--r--net/ipv6/xfrm6_tunnel.c2
-rw-r--r--net/key/af_key.c42
-rw-r--r--net/mac80211/main.c4
-rw-r--r--net/mac80211/rx.c7
-rw-r--r--net/phonet/pep.c32
-rw-r--r--net/sctp/transport.c3
-rw-r--r--net/smc/smc.h6
-rw-r--r--net/tipc/socket.c11
-rw-r--r--net/unix/af_unix.c57
-rw-r--r--net/unix/diag.c3
-rw-r--r--net/x25/af_x25.c13
-rw-r--r--net/xdp/xsk.c16
-rw-r--r--net/xfrm/xfrm_interface.c4
-rw-r--r--net/xfrm/xfrm_policy.c4
-rw-r--r--net/xfrm/xfrm_state.c30
-rw-r--r--net/xfrm/xfrm_user.c2
-rw-r--r--security/lsm_audit.c10
-rw-r--r--tools/testing/selftests/bpf/test_lpm_map.c10
-rwxr-xr-xtools/testing/selftests/net/fib_tests.sh1
64 files changed, 568 insertions, 420 deletions
diff --git a/Documentation/networking/dsa/dsa.txt b/Documentation/networking/dsa/dsa.txt
index 25170ad7d25b..101f2b2c69ad 100644
--- a/Documentation/networking/dsa/dsa.txt
+++ b/Documentation/networking/dsa/dsa.txt
@@ -533,16 +533,12 @@ Bridge VLAN filtering
533 function that the driver has to call for each VLAN the given port is a member 533 function that the driver has to call for each VLAN the given port is a member
534 of. A switchdev object is used to carry the VID and bridge flags. 534 of. A switchdev object is used to carry the VID and bridge flags.
535 535
536- port_fdb_prepare: bridge layer function invoked when the bridge prepares the
537 installation of a Forwarding Database entry. If the operation is not
538 supported, this function should return -EOPNOTSUPP to inform the bridge code
539 to fallback to a software implementation. No hardware setup must be done in
540 this function. See port_fdb_add for this and details.
541
542- port_fdb_add: bridge layer function invoked when the bridge wants to install a 536- port_fdb_add: bridge layer function invoked when the bridge wants to install a
543 Forwarding Database entry, the switch hardware should be programmed with the 537 Forwarding Database entry, the switch hardware should be programmed with the
544 specified address in the specified VLAN Id in the forwarding database 538 specified address in the specified VLAN Id in the forwarding database
545 associated with this VLAN ID 539 associated with this VLAN ID. If the operation is not supported, this
540 function should return -EOPNOTSUPP to inform the bridge code to fallback to
541 a software implementation.
546 542
547Note: VLAN ID 0 corresponds to the port private database, which, in the context 543Note: VLAN ID 0 corresponds to the port private database, which, in the context
548of DSA, would be the its port-based VLAN, used by the associated bridge device. 544of DSA, would be the its port-based VLAN, used by the associated bridge device.
diff --git a/Documentation/networking/switchdev.txt b/Documentation/networking/switchdev.txt
index 82236a17b5e6..97b7ca8b9b86 100644
--- a/Documentation/networking/switchdev.txt
+++ b/Documentation/networking/switchdev.txt
@@ -92,11 +92,11 @@ device.
92Switch ID 92Switch ID
93^^^^^^^^^ 93^^^^^^^^^
94 94
95The switchdev driver must implement the switchdev op switchdev_port_attr_get 95The switchdev driver must implement the net_device operation
96for SWITCHDEV_ATTR_ID_PORT_PARENT_ID for each port netdev, returning the same 96ndo_get_port_parent_id for each port netdev, returning the same physical ID for
97physical ID for each port of a switch. The ID must be unique between switches 97each port of a switch. The ID must be unique between switches on the same
98on the same system. The ID does not need to be unique between switches on 98system. The ID does not need to be unique between switches on different
99different systems. 99systems.
100 100
101The switch ID is used to locate ports on a switch and to know if aggregated 101The switch ID is used to locate ports on a switch and to know if aggregated
102ports belong to the same switch. 102ports belong to the same switch.
diff --git a/MAINTAINERS b/MAINTAINERS
index e6e17d8c5aae..dce5c099f43c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2851,7 +2851,7 @@ R: Martin KaFai Lau <kafai@fb.com>
2851R: Song Liu <songliubraving@fb.com> 2851R: Song Liu <songliubraving@fb.com>
2852R: Yonghong Song <yhs@fb.com> 2852R: Yonghong Song <yhs@fb.com>
2853L: netdev@vger.kernel.org 2853L: netdev@vger.kernel.org
2854L: linux-kernel@vger.kernel.org 2854L: bpf@vger.kernel.org
2855T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git 2855T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git
2856T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git 2856T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git
2857Q: https://patchwork.ozlabs.org/project/netdev/list/?delegate=77147 2857Q: https://patchwork.ozlabs.org/project/netdev/list/?delegate=77147
@@ -2881,6 +2881,7 @@ N: bpf
2881BPF JIT for ARM 2881BPF JIT for ARM
2882M: Shubham Bansal <illusionist.neo@gmail.com> 2882M: Shubham Bansal <illusionist.neo@gmail.com>
2883L: netdev@vger.kernel.org 2883L: netdev@vger.kernel.org
2884L: bpf@vger.kernel.org
2884S: Maintained 2885S: Maintained
2885F: arch/arm/net/ 2886F: arch/arm/net/
2886 2887
@@ -2889,18 +2890,21 @@ M: Daniel Borkmann <daniel@iogearbox.net>
2889M: Alexei Starovoitov <ast@kernel.org> 2890M: Alexei Starovoitov <ast@kernel.org>
2890M: Zi Shen Lim <zlim.lnx@gmail.com> 2891M: Zi Shen Lim <zlim.lnx@gmail.com>
2891L: netdev@vger.kernel.org 2892L: netdev@vger.kernel.org
2893L: bpf@vger.kernel.org
2892S: Supported 2894S: Supported
2893F: arch/arm64/net/ 2895F: arch/arm64/net/
2894 2896
2895BPF JIT for MIPS (32-BIT AND 64-BIT) 2897BPF JIT for MIPS (32-BIT AND 64-BIT)
2896M: Paul Burton <paul.burton@mips.com> 2898M: Paul Burton <paul.burton@mips.com>
2897L: netdev@vger.kernel.org 2899L: netdev@vger.kernel.org
2900L: bpf@vger.kernel.org
2898S: Maintained 2901S: Maintained
2899F: arch/mips/net/ 2902F: arch/mips/net/
2900 2903
2901BPF JIT for NFP NICs 2904BPF JIT for NFP NICs
2902M: Jakub Kicinski <jakub.kicinski@netronome.com> 2905M: Jakub Kicinski <jakub.kicinski@netronome.com>
2903L: netdev@vger.kernel.org 2906L: netdev@vger.kernel.org
2907L: bpf@vger.kernel.org
2904S: Supported 2908S: Supported
2905F: drivers/net/ethernet/netronome/nfp/bpf/ 2909F: drivers/net/ethernet/netronome/nfp/bpf/
2906 2910
@@ -2908,6 +2912,7 @@ BPF JIT for POWERPC (32-BIT AND 64-BIT)
2908M: Naveen N. Rao <naveen.n.rao@linux.ibm.com> 2912M: Naveen N. Rao <naveen.n.rao@linux.ibm.com>
2909M: Sandipan Das <sandipan@linux.ibm.com> 2913M: Sandipan Das <sandipan@linux.ibm.com>
2910L: netdev@vger.kernel.org 2914L: netdev@vger.kernel.org
2915L: bpf@vger.kernel.org
2911S: Maintained 2916S: Maintained
2912F: arch/powerpc/net/ 2917F: arch/powerpc/net/
2913 2918
@@ -2915,6 +2920,7 @@ BPF JIT for S390
2915M: Martin Schwidefsky <schwidefsky@de.ibm.com> 2920M: Martin Schwidefsky <schwidefsky@de.ibm.com>
2916M: Heiko Carstens <heiko.carstens@de.ibm.com> 2921M: Heiko Carstens <heiko.carstens@de.ibm.com>
2917L: netdev@vger.kernel.org 2922L: netdev@vger.kernel.org
2923L: bpf@vger.kernel.org
2918S: Maintained 2924S: Maintained
2919F: arch/s390/net/ 2925F: arch/s390/net/
2920X: arch/s390/net/pnet.c 2926X: arch/s390/net/pnet.c
@@ -2922,12 +2928,14 @@ X: arch/s390/net/pnet.c
2922BPF JIT for SPARC (32-BIT AND 64-BIT) 2928BPF JIT for SPARC (32-BIT AND 64-BIT)
2923M: David S. Miller <davem@davemloft.net> 2929M: David S. Miller <davem@davemloft.net>
2924L: netdev@vger.kernel.org 2930L: netdev@vger.kernel.org
2931L: bpf@vger.kernel.org
2925S: Maintained 2932S: Maintained
2926F: arch/sparc/net/ 2933F: arch/sparc/net/
2927 2934
2928BPF JIT for X86 32-BIT 2935BPF JIT for X86 32-BIT
2929M: Wang YanQing <udknight@gmail.com> 2936M: Wang YanQing <udknight@gmail.com>
2930L: netdev@vger.kernel.org 2937L: netdev@vger.kernel.org
2938L: bpf@vger.kernel.org
2931S: Maintained 2939S: Maintained
2932F: arch/x86/net/bpf_jit_comp32.c 2940F: arch/x86/net/bpf_jit_comp32.c
2933 2941
@@ -2935,6 +2943,7 @@ BPF JIT for X86 64-BIT
2935M: Alexei Starovoitov <ast@kernel.org> 2943M: Alexei Starovoitov <ast@kernel.org>
2936M: Daniel Borkmann <daniel@iogearbox.net> 2944M: Daniel Borkmann <daniel@iogearbox.net>
2937L: netdev@vger.kernel.org 2945L: netdev@vger.kernel.org
2946L: bpf@vger.kernel.org
2938S: Supported 2947S: Supported
2939F: arch/x86/net/ 2948F: arch/x86/net/
2940X: arch/x86/net/bpf_jit_comp32.c 2949X: arch/x86/net/bpf_jit_comp32.c
@@ -3389,9 +3398,8 @@ F: Documentation/media/v4l-drivers/cafe_ccic*
3389F: drivers/media/platform/marvell-ccic/ 3398F: drivers/media/platform/marvell-ccic/
3390 3399
3391CAIF NETWORK LAYER 3400CAIF NETWORK LAYER
3392M: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
3393L: netdev@vger.kernel.org 3401L: netdev@vger.kernel.org
3394S: Supported 3402S: Orphan
3395F: Documentation/networking/caif/ 3403F: Documentation/networking/caif/
3396F: drivers/net/caif/ 3404F: drivers/net/caif/
3397F: include/uapi/linux/caif/ 3405F: include/uapi/linux/caif/
@@ -8486,6 +8494,7 @@ L7 BPF FRAMEWORK
8486M: John Fastabend <john.fastabend@gmail.com> 8494M: John Fastabend <john.fastabend@gmail.com>
8487M: Daniel Borkmann <daniel@iogearbox.net> 8495M: Daniel Borkmann <daniel@iogearbox.net>
8488L: netdev@vger.kernel.org 8496L: netdev@vger.kernel.org
8497L: bpf@vger.kernel.org
8489S: Maintained 8498S: Maintained
8490F: include/linux/skmsg.h 8499F: include/linux/skmsg.h
8491F: net/core/skmsg.c 8500F: net/core/skmsg.c
@@ -16713,6 +16722,7 @@ M: Jesper Dangaard Brouer <hawk@kernel.org>
16713M: John Fastabend <john.fastabend@gmail.com> 16722M: John Fastabend <john.fastabend@gmail.com>
16714L: netdev@vger.kernel.org 16723L: netdev@vger.kernel.org
16715L: xdp-newbies@vger.kernel.org 16724L: xdp-newbies@vger.kernel.org
16725L: bpf@vger.kernel.org
16716S: Supported 16726S: Supported
16717F: net/core/xdp.c 16727F: net/core/xdp.c
16718F: include/net/xdp.h 16728F: include/net/xdp.h
@@ -16726,6 +16736,7 @@ XDP SOCKETS (AF_XDP)
16726M: Björn Töpel <bjorn.topel@intel.com> 16736M: Björn Töpel <bjorn.topel@intel.com>
16727M: Magnus Karlsson <magnus.karlsson@intel.com> 16737M: Magnus Karlsson <magnus.karlsson@intel.com>
16728L: netdev@vger.kernel.org 16738L: netdev@vger.kernel.org
16739L: bpf@vger.kernel.org
16729S: Maintained 16740S: Maintained
16730F: kernel/bpf/xskmap.c 16741F: kernel/bpf/xskmap.c
16731F: net/xdp/ 16742F: net/xdp/
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 485462d3087f..537c90c8eb0a 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1183,29 +1183,22 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
1183 } 1183 }
1184 } 1184 }
1185 1185
1186 /* Link-local multicast packets should be passed to the 1186 /*
1187 * stack on the link they arrive as well as pass them to the 1187 * For packets determined by bond_should_deliver_exact_match() call to
1188 * bond-master device. These packets are mostly usable when 1188 * be suppressed we want to make an exception for link-local packets.
1189 * stack receives it with the link on which they arrive 1189 * This is necessary for e.g. LLDP daemons to be able to monitor
1190 * (e.g. LLDP) they also must be available on master. Some of 1190 * inactive slave links without being forced to bind to them
1191 * the use cases include (but are not limited to): LLDP agents 1191 * explicitly.
1192 * that must be able to operate both on enslaved interfaces as 1192 *
1193 * well as on bonds themselves; linux bridges that must be able 1193 * At the same time, packets that are passed to the bonding master
1194 * to process/pass BPDUs from attached bonds when any kind of 1194 * (including link-local ones) can have their originating interface
1195 * STP version is enabled on the network. 1195 * determined via PACKET_ORIGDEV socket option.
1196 */ 1196 */
1197 if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) { 1197 if (bond_should_deliver_exact_match(skb, slave, bond)) {
1198 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); 1198 if (is_link_local_ether_addr(eth_hdr(skb)->h_dest))
1199 1199 return RX_HANDLER_PASS;
1200 if (nskb) {
1201 nskb->dev = bond->dev;
1202 nskb->queue_mapping = 0;
1203 netif_rx(nskb);
1204 }
1205 return RX_HANDLER_PASS;
1206 }
1207 if (bond_should_deliver_exact_match(skb, slave, bond))
1208 return RX_HANDLER_EXACT; 1200 return RX_HANDLER_EXACT;
1201 }
1209 1202
1210 skb->dev = bond->dev; 1203 skb->dev = bond->dev;
1211 1204
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index bb41becb6609..31ff1e0d1baa 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -1335,13 +1335,11 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1335{ 1335{
1336 struct net_device *netdev; 1336 struct net_device *netdev;
1337 struct atl2_adapter *adapter; 1337 struct atl2_adapter *adapter;
1338 static int cards_found; 1338 static int cards_found = 0;
1339 unsigned long mmio_start; 1339 unsigned long mmio_start;
1340 int mmio_len; 1340 int mmio_len;
1341 int err; 1341 int err;
1342 1342
1343 cards_found = 0;
1344
1345 err = pci_enable_device(pdev); 1343 err = pci_enable_device(pdev);
1346 if (err) 1344 if (err)
1347 return err; 1345 return err;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 8bc7e495b027..d95730c6e0f2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -3903,7 +3903,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
3903 if (len) 3903 if (len)
3904 break; 3904 break;
3905 /* on first few passes, just barely sleep */ 3905 /* on first few passes, just barely sleep */
3906 if (i < DFLT_HWRM_CMD_TIMEOUT) 3906 if (i < HWRM_SHORT_TIMEOUT_COUNTER)
3907 usleep_range(HWRM_SHORT_MIN_TIMEOUT, 3907 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
3908 HWRM_SHORT_MAX_TIMEOUT); 3908 HWRM_SHORT_MAX_TIMEOUT);
3909 else 3909 else
@@ -3926,7 +3926,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
3926 dma_rmb(); 3926 dma_rmb();
3927 if (*valid) 3927 if (*valid)
3928 break; 3928 break;
3929 udelay(1); 3929 usleep_range(1, 5);
3930 } 3930 }
3931 3931
3932 if (j >= HWRM_VALID_BIT_DELAY_USEC) { 3932 if (j >= HWRM_VALID_BIT_DELAY_USEC) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index a451796deefe..2fb653e0048d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -582,7 +582,7 @@ struct nqe_cn {
582 (HWRM_SHORT_TIMEOUT_COUNTER * HWRM_SHORT_MIN_TIMEOUT + \ 582 (HWRM_SHORT_TIMEOUT_COUNTER * HWRM_SHORT_MIN_TIMEOUT + \
583 ((n) - HWRM_SHORT_TIMEOUT_COUNTER) * HWRM_MIN_TIMEOUT)) 583 ((n) - HWRM_SHORT_TIMEOUT_COUNTER) * HWRM_MIN_TIMEOUT))
584 584
585#define HWRM_VALID_BIT_DELAY_USEC 20 585#define HWRM_VALID_BIT_DELAY_USEC 150
586 586
587#define BNXT_HWRM_CHNL_CHIMP 0 587#define BNXT_HWRM_CHNL_CHIMP 0
588#define BNXT_HWRM_CHNL_KONG 1 588#define BNXT_HWRM_CHNL_KONG 1
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index f4d81765221e..62636c1ed141 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -271,7 +271,7 @@ struct xcast_addr_list {
271}; 271};
272 272
273struct nicvf_work { 273struct nicvf_work {
274 struct delayed_work work; 274 struct work_struct work;
275 u8 mode; 275 u8 mode;
276 struct xcast_addr_list *mc; 276 struct xcast_addr_list *mc;
277}; 277};
@@ -327,7 +327,11 @@ struct nicvf {
327 struct nicvf_work rx_mode_work; 327 struct nicvf_work rx_mode_work;
328 /* spinlock to protect workqueue arguments from concurrent access */ 328 /* spinlock to protect workqueue arguments from concurrent access */
329 spinlock_t rx_mode_wq_lock; 329 spinlock_t rx_mode_wq_lock;
330 330 /* workqueue for handling kernel ndo_set_rx_mode() calls */
331 struct workqueue_struct *nicvf_rx_mode_wq;
332 /* mutex to protect VF's mailbox contents from concurrent access */
333 struct mutex rx_mode_mtx;
334 struct delayed_work link_change_work;
331 /* PTP timestamp */ 335 /* PTP timestamp */
332 struct cavium_ptp *ptp_clock; 336 struct cavium_ptp *ptp_clock;
333 /* Inbound timestamping is on */ 337 /* Inbound timestamping is on */
@@ -575,10 +579,8 @@ struct set_ptp {
575 579
576struct xcast { 580struct xcast {
577 u8 msg; 581 u8 msg;
578 union { 582 u8 mode;
579 u8 mode; 583 u64 mac:48;
580 u64 mac;
581 } data;
582}; 584};
583 585
584/* 128 bit shared memory between PF and each VF */ 586/* 128 bit shared memory between PF and each VF */
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 6c8dcb65ff03..c90252829ed3 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -57,14 +57,8 @@ struct nicpf {
57#define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF) 57#define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF)
58#define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF) 58#define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF)
59 u8 *vf_lmac_map; 59 u8 *vf_lmac_map;
60 struct delayed_work dwork;
61 struct workqueue_struct *check_link;
62 u8 *link;
63 u8 *duplex;
64 u32 *speed;
65 u16 cpi_base[MAX_NUM_VFS_SUPPORTED]; 60 u16 cpi_base[MAX_NUM_VFS_SUPPORTED];
66 u16 rssi_base[MAX_NUM_VFS_SUPPORTED]; 61 u16 rssi_base[MAX_NUM_VFS_SUPPORTED];
67 bool mbx_lock[MAX_NUM_VFS_SUPPORTED];
68 62
69 /* MSI-X */ 63 /* MSI-X */
70 u8 num_vec; 64 u8 num_vec;
@@ -929,6 +923,35 @@ static void nic_config_timestamp(struct nicpf *nic, int vf, struct set_ptp *ptp)
929 nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (pkind_idx << 3), pkind_val); 923 nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (pkind_idx << 3), pkind_val);
930} 924}
931 925
926/* Get BGX LMAC link status and update corresponding VF
927 * if there is a change, valid only if internal L2 switch
928 * is not present otherwise VF link is always treated as up
929 */
930static void nic_link_status_get(struct nicpf *nic, u8 vf)
931{
932 union nic_mbx mbx = {};
933 struct bgx_link_status link;
934 u8 bgx, lmac;
935
936 mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
937
938 /* Get BGX, LMAC indices for the VF */
939 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
940 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
941
942 /* Get interface link status */
943 bgx_get_lmac_link_state(nic->node, bgx, lmac, &link);
944
945 /* Send a mbox message to VF with current link status */
946 mbx.link_status.link_up = link.link_up;
947 mbx.link_status.duplex = link.duplex;
948 mbx.link_status.speed = link.speed;
949 mbx.link_status.mac_type = link.mac_type;
950
951 /* reply with link status */
952 nic_send_msg_to_vf(nic, vf, &mbx);
953}
954
932/* Interrupt handler to handle mailbox messages from VFs */ 955/* Interrupt handler to handle mailbox messages from VFs */
933static void nic_handle_mbx_intr(struct nicpf *nic, int vf) 956static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
934{ 957{
@@ -941,8 +964,6 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
941 int i; 964 int i;
942 int ret = 0; 965 int ret = 0;
943 966
944 nic->mbx_lock[vf] = true;
945
946 mbx_addr = nic_get_mbx_addr(vf); 967 mbx_addr = nic_get_mbx_addr(vf);
947 mbx_data = (u64 *)&mbx; 968 mbx_data = (u64 *)&mbx;
948 969
@@ -957,12 +978,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
957 switch (mbx.msg.msg) { 978 switch (mbx.msg.msg) {
958 case NIC_MBOX_MSG_READY: 979 case NIC_MBOX_MSG_READY:
959 nic_mbx_send_ready(nic, vf); 980 nic_mbx_send_ready(nic, vf);
960 if (vf < nic->num_vf_en) { 981 return;
961 nic->link[vf] = 0;
962 nic->duplex[vf] = 0;
963 nic->speed[vf] = 0;
964 }
965 goto unlock;
966 case NIC_MBOX_MSG_QS_CFG: 982 case NIC_MBOX_MSG_QS_CFG:
967 reg_addr = NIC_PF_QSET_0_127_CFG | 983 reg_addr = NIC_PF_QSET_0_127_CFG |
968 (mbx.qs.num << NIC_QS_ID_SHIFT); 984 (mbx.qs.num << NIC_QS_ID_SHIFT);
@@ -1031,7 +1047,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
1031 break; 1047 break;
1032 case NIC_MBOX_MSG_RSS_SIZE: 1048 case NIC_MBOX_MSG_RSS_SIZE:
1033 nic_send_rss_size(nic, vf); 1049 nic_send_rss_size(nic, vf);
1034 goto unlock; 1050 return;
1035 case NIC_MBOX_MSG_RSS_CFG: 1051 case NIC_MBOX_MSG_RSS_CFG:
1036 case NIC_MBOX_MSG_RSS_CFG_CONT: 1052 case NIC_MBOX_MSG_RSS_CFG_CONT:
1037 nic_config_rss(nic, &mbx.rss_cfg); 1053 nic_config_rss(nic, &mbx.rss_cfg);
@@ -1039,7 +1055,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
1039 case NIC_MBOX_MSG_CFG_DONE: 1055 case NIC_MBOX_MSG_CFG_DONE:
1040 /* Last message of VF config msg sequence */ 1056 /* Last message of VF config msg sequence */
1041 nic_enable_vf(nic, vf, true); 1057 nic_enable_vf(nic, vf, true);
1042 goto unlock; 1058 break;
1043 case NIC_MBOX_MSG_SHUTDOWN: 1059 case NIC_MBOX_MSG_SHUTDOWN:
1044 /* First msg in VF teardown sequence */ 1060 /* First msg in VF teardown sequence */
1045 if (vf >= nic->num_vf_en) 1061 if (vf >= nic->num_vf_en)
@@ -1049,19 +1065,19 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
1049 break; 1065 break;
1050 case NIC_MBOX_MSG_ALLOC_SQS: 1066 case NIC_MBOX_MSG_ALLOC_SQS:
1051 nic_alloc_sqs(nic, &mbx.sqs_alloc); 1067 nic_alloc_sqs(nic, &mbx.sqs_alloc);
1052 goto unlock; 1068 return;
1053 case NIC_MBOX_MSG_NICVF_PTR: 1069 case NIC_MBOX_MSG_NICVF_PTR:
1054 nic->nicvf[vf] = mbx.nicvf.nicvf; 1070 nic->nicvf[vf] = mbx.nicvf.nicvf;
1055 break; 1071 break;
1056 case NIC_MBOX_MSG_PNICVF_PTR: 1072 case NIC_MBOX_MSG_PNICVF_PTR:
1057 nic_send_pnicvf(nic, vf); 1073 nic_send_pnicvf(nic, vf);
1058 goto unlock; 1074 return;
1059 case NIC_MBOX_MSG_SNICVF_PTR: 1075 case NIC_MBOX_MSG_SNICVF_PTR:
1060 nic_send_snicvf(nic, &mbx.nicvf); 1076 nic_send_snicvf(nic, &mbx.nicvf);
1061 goto unlock; 1077 return;
1062 case NIC_MBOX_MSG_BGX_STATS: 1078 case NIC_MBOX_MSG_BGX_STATS:
1063 nic_get_bgx_stats(nic, &mbx.bgx_stats); 1079 nic_get_bgx_stats(nic, &mbx.bgx_stats);
1064 goto unlock; 1080 return;
1065 case NIC_MBOX_MSG_LOOPBACK: 1081 case NIC_MBOX_MSG_LOOPBACK:
1066 ret = nic_config_loopback(nic, &mbx.lbk); 1082 ret = nic_config_loopback(nic, &mbx.lbk);
1067 break; 1083 break;
@@ -1070,7 +1086,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
1070 break; 1086 break;
1071 case NIC_MBOX_MSG_PFC: 1087 case NIC_MBOX_MSG_PFC:
1072 nic_pause_frame(nic, vf, &mbx.pfc); 1088 nic_pause_frame(nic, vf, &mbx.pfc);
1073 goto unlock; 1089 return;
1074 case NIC_MBOX_MSG_PTP_CFG: 1090 case NIC_MBOX_MSG_PTP_CFG:
1075 nic_config_timestamp(nic, vf, &mbx.ptp); 1091 nic_config_timestamp(nic, vf, &mbx.ptp);
1076 break; 1092 break;
@@ -1094,7 +1110,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
1094 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 1110 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
1095 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 1111 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
1096 bgx_set_dmac_cam_filter(nic->node, bgx, lmac, 1112 bgx_set_dmac_cam_filter(nic->node, bgx, lmac,
1097 mbx.xcast.data.mac, 1113 mbx.xcast.mac,
1098 vf < NIC_VF_PER_MBX_REG ? vf : 1114 vf < NIC_VF_PER_MBX_REG ? vf :
1099 vf - NIC_VF_PER_MBX_REG); 1115 vf - NIC_VF_PER_MBX_REG);
1100 break; 1116 break;
@@ -1106,8 +1122,15 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
1106 } 1122 }
1107 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 1123 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
1108 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 1124 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
1109 bgx_set_xcast_mode(nic->node, bgx, lmac, mbx.xcast.data.mode); 1125 bgx_set_xcast_mode(nic->node, bgx, lmac, mbx.xcast.mode);
1110 break; 1126 break;
1127 case NIC_MBOX_MSG_BGX_LINK_CHANGE:
1128 if (vf >= nic->num_vf_en) {
1129 ret = -1; /* NACK */
1130 break;
1131 }
1132 nic_link_status_get(nic, vf);
1133 return;
1111 default: 1134 default:
1112 dev_err(&nic->pdev->dev, 1135 dev_err(&nic->pdev->dev,
1113 "Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg); 1136 "Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg);
@@ -1121,8 +1144,6 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
1121 mbx.msg.msg, vf); 1144 mbx.msg.msg, vf);
1122 nic_mbx_send_nack(nic, vf); 1145 nic_mbx_send_nack(nic, vf);
1123 } 1146 }
1124unlock:
1125 nic->mbx_lock[vf] = false;
1126} 1147}
1127 1148
1128static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq) 1149static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq)
@@ -1270,52 +1291,6 @@ static int nic_sriov_init(struct pci_dev *pdev, struct nicpf *nic)
1270 return 0; 1291 return 0;
1271} 1292}
1272 1293
1273/* Poll for BGX LMAC link status and update corresponding VF
1274 * if there is a change, valid only if internal L2 switch
1275 * is not present otherwise VF link is always treated as up
1276 */
1277static void nic_poll_for_link(struct work_struct *work)
1278{
1279 union nic_mbx mbx = {};
1280 struct nicpf *nic;
1281 struct bgx_link_status link;
1282 u8 vf, bgx, lmac;
1283
1284 nic = container_of(work, struct nicpf, dwork.work);
1285
1286 mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
1287
1288 for (vf = 0; vf < nic->num_vf_en; vf++) {
1289 /* Poll only if VF is UP */
1290 if (!nic->vf_enabled[vf])
1291 continue;
1292
1293 /* Get BGX, LMAC indices for the VF */
1294 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
1295 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
1296 /* Get interface link status */
1297 bgx_get_lmac_link_state(nic->node, bgx, lmac, &link);
1298
1299 /* Inform VF only if link status changed */
1300 if (nic->link[vf] == link.link_up)
1301 continue;
1302
1303 if (!nic->mbx_lock[vf]) {
1304 nic->link[vf] = link.link_up;
1305 nic->duplex[vf] = link.duplex;
1306 nic->speed[vf] = link.speed;
1307
1308 /* Send a mbox message to VF with current link status */
1309 mbx.link_status.link_up = link.link_up;
1310 mbx.link_status.duplex = link.duplex;
1311 mbx.link_status.speed = link.speed;
1312 mbx.link_status.mac_type = link.mac_type;
1313 nic_send_msg_to_vf(nic, vf, &mbx);
1314 }
1315 }
1316 queue_delayed_work(nic->check_link, &nic->dwork, HZ * 2);
1317}
1318
1319static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1294static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1320{ 1295{
1321 struct device *dev = &pdev->dev; 1296 struct device *dev = &pdev->dev;
@@ -1384,18 +1359,6 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1384 if (!nic->vf_lmac_map) 1359 if (!nic->vf_lmac_map)
1385 goto err_release_regions; 1360 goto err_release_regions;
1386 1361
1387 nic->link = devm_kmalloc_array(dev, max_lmac, sizeof(u8), GFP_KERNEL);
1388 if (!nic->link)
1389 goto err_release_regions;
1390
1391 nic->duplex = devm_kmalloc_array(dev, max_lmac, sizeof(u8), GFP_KERNEL);
1392 if (!nic->duplex)
1393 goto err_release_regions;
1394
1395 nic->speed = devm_kmalloc_array(dev, max_lmac, sizeof(u32), GFP_KERNEL);
1396 if (!nic->speed)
1397 goto err_release_regions;
1398
1399 /* Initialize hardware */ 1362 /* Initialize hardware */
1400 nic_init_hw(nic); 1363 nic_init_hw(nic);
1401 1364
@@ -1411,22 +1374,8 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1411 if (err) 1374 if (err)
1412 goto err_unregister_interrupts; 1375 goto err_unregister_interrupts;
1413 1376
1414 /* Register a physical link status poll fn() */
1415 nic->check_link = alloc_workqueue("check_link_status",
1416 WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
1417 if (!nic->check_link) {
1418 err = -ENOMEM;
1419 goto err_disable_sriov;
1420 }
1421
1422 INIT_DELAYED_WORK(&nic->dwork, nic_poll_for_link);
1423 queue_delayed_work(nic->check_link, &nic->dwork, 0);
1424
1425 return 0; 1377 return 0;
1426 1378
1427err_disable_sriov:
1428 if (nic->flags & NIC_SRIOV_ENABLED)
1429 pci_disable_sriov(pdev);
1430err_unregister_interrupts: 1379err_unregister_interrupts:
1431 nic_unregister_interrupts(nic); 1380 nic_unregister_interrupts(nic);
1432err_release_regions: 1381err_release_regions:
@@ -1447,12 +1396,6 @@ static void nic_remove(struct pci_dev *pdev)
1447 if (nic->flags & NIC_SRIOV_ENABLED) 1396 if (nic->flags & NIC_SRIOV_ENABLED)
1448 pci_disable_sriov(pdev); 1397 pci_disable_sriov(pdev);
1449 1398
1450 if (nic->check_link) {
1451 /* Destroy work Queue */
1452 cancel_delayed_work_sync(&nic->dwork);
1453 destroy_workqueue(nic->check_link);
1454 }
1455
1456 nic_unregister_interrupts(nic); 1399 nic_unregister_interrupts(nic);
1457 pci_release_regions(pdev); 1400 pci_release_regions(pdev);
1458 1401
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 88f8a8fa93cd..503cfadff4ac 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -68,9 +68,6 @@ module_param(cpi_alg, int, 0444);
68MODULE_PARM_DESC(cpi_alg, 68MODULE_PARM_DESC(cpi_alg,
69 "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)"); 69 "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
70 70
71/* workqueue for handling kernel ndo_set_rx_mode() calls */
72static struct workqueue_struct *nicvf_rx_mode_wq;
73
74static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx) 71static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx)
75{ 72{
76 if (nic->sqs_mode) 73 if (nic->sqs_mode)
@@ -127,6 +124,9 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
127{ 124{
128 int timeout = NIC_MBOX_MSG_TIMEOUT; 125 int timeout = NIC_MBOX_MSG_TIMEOUT;
129 int sleep = 10; 126 int sleep = 10;
127 int ret = 0;
128
129 mutex_lock(&nic->rx_mode_mtx);
130 130
131 nic->pf_acked = false; 131 nic->pf_acked = false;
132 nic->pf_nacked = false; 132 nic->pf_nacked = false;
@@ -139,7 +139,8 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
139 netdev_err(nic->netdev, 139 netdev_err(nic->netdev,
140 "PF NACK to mbox msg 0x%02x from VF%d\n", 140 "PF NACK to mbox msg 0x%02x from VF%d\n",
141 (mbx->msg.msg & 0xFF), nic->vf_id); 141 (mbx->msg.msg & 0xFF), nic->vf_id);
142 return -EINVAL; 142 ret = -EINVAL;
143 break;
143 } 144 }
144 msleep(sleep); 145 msleep(sleep);
145 if (nic->pf_acked) 146 if (nic->pf_acked)
@@ -149,10 +150,12 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
149 netdev_err(nic->netdev, 150 netdev_err(nic->netdev,
150 "PF didn't ACK to mbox msg 0x%02x from VF%d\n", 151 "PF didn't ACK to mbox msg 0x%02x from VF%d\n",
151 (mbx->msg.msg & 0xFF), nic->vf_id); 152 (mbx->msg.msg & 0xFF), nic->vf_id);
152 return -EBUSY; 153 ret = -EBUSY;
154 break;
153 } 155 }
154 } 156 }
155 return 0; 157 mutex_unlock(&nic->rx_mode_mtx);
158 return ret;
156} 159}
157 160
158/* Checks if VF is able to comminicate with PF 161/* Checks if VF is able to comminicate with PF
@@ -172,6 +175,17 @@ static int nicvf_check_pf_ready(struct nicvf *nic)
172 return 1; 175 return 1;
173} 176}
174 177
178static void nicvf_send_cfg_done(struct nicvf *nic)
179{
180 union nic_mbx mbx = {};
181
182 mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
183 if (nicvf_send_msg_to_pf(nic, &mbx)) {
184 netdev_err(nic->netdev,
185 "PF didn't respond to CFG DONE msg\n");
186 }
187}
188
175static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx) 189static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx)
176{ 190{
177 if (bgx->rx) 191 if (bgx->rx)
@@ -228,21 +242,24 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic)
228 break; 242 break;
229 case NIC_MBOX_MSG_BGX_LINK_CHANGE: 243 case NIC_MBOX_MSG_BGX_LINK_CHANGE:
230 nic->pf_acked = true; 244 nic->pf_acked = true;
231 nic->link_up = mbx.link_status.link_up; 245 if (nic->link_up != mbx.link_status.link_up) {
232 nic->duplex = mbx.link_status.duplex; 246 nic->link_up = mbx.link_status.link_up;
233 nic->speed = mbx.link_status.speed; 247 nic->duplex = mbx.link_status.duplex;
234 nic->mac_type = mbx.link_status.mac_type; 248 nic->speed = mbx.link_status.speed;
235 if (nic->link_up) { 249 nic->mac_type = mbx.link_status.mac_type;
236 netdev_info(nic->netdev, "Link is Up %d Mbps %s duplex\n", 250 if (nic->link_up) {
237 nic->speed, 251 netdev_info(nic->netdev,
238 nic->duplex == DUPLEX_FULL ? 252 "Link is Up %d Mbps %s duplex\n",
239 "Full" : "Half"); 253 nic->speed,
240 netif_carrier_on(nic->netdev); 254 nic->duplex == DUPLEX_FULL ?
241 netif_tx_start_all_queues(nic->netdev); 255 "Full" : "Half");
242 } else { 256 netif_carrier_on(nic->netdev);
243 netdev_info(nic->netdev, "Link is Down\n"); 257 netif_tx_start_all_queues(nic->netdev);
244 netif_carrier_off(nic->netdev); 258 } else {
245 netif_tx_stop_all_queues(nic->netdev); 259 netdev_info(nic->netdev, "Link is Down\n");
260 netif_carrier_off(nic->netdev);
261 netif_tx_stop_all_queues(nic->netdev);
262 }
246 } 263 }
247 break; 264 break;
248 case NIC_MBOX_MSG_ALLOC_SQS: 265 case NIC_MBOX_MSG_ALLOC_SQS:
@@ -1311,6 +1328,11 @@ int nicvf_stop(struct net_device *netdev)
1311 struct nicvf_cq_poll *cq_poll = NULL; 1328 struct nicvf_cq_poll *cq_poll = NULL;
1312 union nic_mbx mbx = {}; 1329 union nic_mbx mbx = {};
1313 1330
1331 cancel_delayed_work_sync(&nic->link_change_work);
1332
1333 /* wait till all queued set_rx_mode tasks completes */
1334 drain_workqueue(nic->nicvf_rx_mode_wq);
1335
1314 mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN; 1336 mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
1315 nicvf_send_msg_to_pf(nic, &mbx); 1337 nicvf_send_msg_to_pf(nic, &mbx);
1316 1338
@@ -1410,13 +1432,27 @@ static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
1410 return nicvf_send_msg_to_pf(nic, &mbx); 1432 return nicvf_send_msg_to_pf(nic, &mbx);
1411} 1433}
1412 1434
1435static void nicvf_link_status_check_task(struct work_struct *work_arg)
1436{
1437 struct nicvf *nic = container_of(work_arg,
1438 struct nicvf,
1439 link_change_work.work);
1440 union nic_mbx mbx = {};
1441 mbx.msg.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
1442 nicvf_send_msg_to_pf(nic, &mbx);
1443 queue_delayed_work(nic->nicvf_rx_mode_wq,
1444 &nic->link_change_work, 2 * HZ);
1445}
1446
1413int nicvf_open(struct net_device *netdev) 1447int nicvf_open(struct net_device *netdev)
1414{ 1448{
1415 int cpu, err, qidx; 1449 int cpu, err, qidx;
1416 struct nicvf *nic = netdev_priv(netdev); 1450 struct nicvf *nic = netdev_priv(netdev);
1417 struct queue_set *qs = nic->qs; 1451 struct queue_set *qs = nic->qs;
1418 struct nicvf_cq_poll *cq_poll = NULL; 1452 struct nicvf_cq_poll *cq_poll = NULL;
1419 union nic_mbx mbx = {}; 1453
1454 /* wait till all queued set_rx_mode tasks completes if any */
1455 drain_workqueue(nic->nicvf_rx_mode_wq);
1420 1456
1421 netif_carrier_off(netdev); 1457 netif_carrier_off(netdev);
1422 1458
@@ -1512,8 +1548,12 @@ int nicvf_open(struct net_device *netdev)
1512 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx); 1548 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
1513 1549
1514 /* Send VF config done msg to PF */ 1550 /* Send VF config done msg to PF */
1515 mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE; 1551 nicvf_send_cfg_done(nic);
1516 nicvf_write_to_mbx(nic, &mbx); 1552
1553 INIT_DELAYED_WORK(&nic->link_change_work,
1554 nicvf_link_status_check_task);
1555 queue_delayed_work(nic->nicvf_rx_mode_wq,
1556 &nic->link_change_work, 0);
1517 1557
1518 return 0; 1558 return 0;
1519cleanup: 1559cleanup:
@@ -1941,15 +1981,17 @@ static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs,
1941 1981
1942 /* flush DMAC filters and reset RX mode */ 1982 /* flush DMAC filters and reset RX mode */
1943 mbx.xcast.msg = NIC_MBOX_MSG_RESET_XCAST; 1983 mbx.xcast.msg = NIC_MBOX_MSG_RESET_XCAST;
1944 nicvf_send_msg_to_pf(nic, &mbx); 1984 if (nicvf_send_msg_to_pf(nic, &mbx) < 0)
1985 goto free_mc;
1945 1986
1946 if (mode & BGX_XCAST_MCAST_FILTER) { 1987 if (mode & BGX_XCAST_MCAST_FILTER) {
1947 /* once enabling filtering, we need to signal to PF to add 1988 /* once enabling filtering, we need to signal to PF to add
1948 * its' own LMAC to the filter to accept packets for it. 1989 * its' own LMAC to the filter to accept packets for it.
1949 */ 1990 */
1950 mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST; 1991 mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST;
1951 mbx.xcast.data.mac = 0; 1992 mbx.xcast.mac = 0;
1952 nicvf_send_msg_to_pf(nic, &mbx); 1993 if (nicvf_send_msg_to_pf(nic, &mbx) < 0)
1994 goto free_mc;
1953 } 1995 }
1954 1996
1955 /* check if we have any specific MACs to be added to PF DMAC filter */ 1997 /* check if we have any specific MACs to be added to PF DMAC filter */
@@ -1957,23 +1999,25 @@ static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs,
1957 /* now go through kernel list of MACs and add them one by one */ 1999 /* now go through kernel list of MACs and add them one by one */
1958 for (idx = 0; idx < mc_addrs->count; idx++) { 2000 for (idx = 0; idx < mc_addrs->count; idx++) {
1959 mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST; 2001 mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST;
1960 mbx.xcast.data.mac = mc_addrs->mc[idx]; 2002 mbx.xcast.mac = mc_addrs->mc[idx];
1961 nicvf_send_msg_to_pf(nic, &mbx); 2003 if (nicvf_send_msg_to_pf(nic, &mbx) < 0)
2004 goto free_mc;
1962 } 2005 }
1963 kfree(mc_addrs);
1964 } 2006 }
1965 2007
1966 /* and finally set rx mode for PF accordingly */ 2008 /* and finally set rx mode for PF accordingly */
1967 mbx.xcast.msg = NIC_MBOX_MSG_SET_XCAST; 2009 mbx.xcast.msg = NIC_MBOX_MSG_SET_XCAST;
1968 mbx.xcast.data.mode = mode; 2010 mbx.xcast.mode = mode;
1969 2011
1970 nicvf_send_msg_to_pf(nic, &mbx); 2012 nicvf_send_msg_to_pf(nic, &mbx);
2013free_mc:
2014 kfree(mc_addrs);
1971} 2015}
1972 2016
1973static void nicvf_set_rx_mode_task(struct work_struct *work_arg) 2017static void nicvf_set_rx_mode_task(struct work_struct *work_arg)
1974{ 2018{
1975 struct nicvf_work *vf_work = container_of(work_arg, struct nicvf_work, 2019 struct nicvf_work *vf_work = container_of(work_arg, struct nicvf_work,
1976 work.work); 2020 work);
1977 struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work); 2021 struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work);
1978 u8 mode; 2022 u8 mode;
1979 struct xcast_addr_list *mc; 2023 struct xcast_addr_list *mc;
@@ -2030,7 +2074,7 @@ static void nicvf_set_rx_mode(struct net_device *netdev)
2030 kfree(nic->rx_mode_work.mc); 2074 kfree(nic->rx_mode_work.mc);
2031 nic->rx_mode_work.mc = mc_list; 2075 nic->rx_mode_work.mc = mc_list;
2032 nic->rx_mode_work.mode = mode; 2076 nic->rx_mode_work.mode = mode;
2033 queue_delayed_work(nicvf_rx_mode_wq, &nic->rx_mode_work.work, 0); 2077 queue_work(nic->nicvf_rx_mode_wq, &nic->rx_mode_work.work);
2034 spin_unlock(&nic->rx_mode_wq_lock); 2078 spin_unlock(&nic->rx_mode_wq_lock);
2035} 2079}
2036 2080
@@ -2187,8 +2231,12 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2187 2231
2188 INIT_WORK(&nic->reset_task, nicvf_reset_task); 2232 INIT_WORK(&nic->reset_task, nicvf_reset_task);
2189 2233
2190 INIT_DELAYED_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task); 2234 nic->nicvf_rx_mode_wq = alloc_ordered_workqueue("nicvf_rx_mode_wq_VF%d",
2235 WQ_MEM_RECLAIM,
2236 nic->vf_id);
2237 INIT_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task);
2191 spin_lock_init(&nic->rx_mode_wq_lock); 2238 spin_lock_init(&nic->rx_mode_wq_lock);
2239 mutex_init(&nic->rx_mode_mtx);
2192 2240
2193 err = register_netdev(netdev); 2241 err = register_netdev(netdev);
2194 if (err) { 2242 if (err) {
@@ -2228,13 +2276,15 @@ static void nicvf_remove(struct pci_dev *pdev)
2228 nic = netdev_priv(netdev); 2276 nic = netdev_priv(netdev);
2229 pnetdev = nic->pnicvf->netdev; 2277 pnetdev = nic->pnicvf->netdev;
2230 2278
2231 cancel_delayed_work_sync(&nic->rx_mode_work.work);
2232
2233 /* Check if this Qset is assigned to different VF. 2279 /* Check if this Qset is assigned to different VF.
2234 * If yes, clean primary and all secondary Qsets. 2280 * If yes, clean primary and all secondary Qsets.
2235 */ 2281 */
2236 if (pnetdev && (pnetdev->reg_state == NETREG_REGISTERED)) 2282 if (pnetdev && (pnetdev->reg_state == NETREG_REGISTERED))
2237 unregister_netdev(pnetdev); 2283 unregister_netdev(pnetdev);
2284 if (nic->nicvf_rx_mode_wq) {
2285 destroy_workqueue(nic->nicvf_rx_mode_wq);
2286 nic->nicvf_rx_mode_wq = NULL;
2287 }
2238 nicvf_unregister_interrupts(nic); 2288 nicvf_unregister_interrupts(nic);
2239 pci_set_drvdata(pdev, NULL); 2289 pci_set_drvdata(pdev, NULL);
2240 if (nic->drv_stats) 2290 if (nic->drv_stats)
@@ -2261,17 +2311,11 @@ static struct pci_driver nicvf_driver = {
2261static int __init nicvf_init_module(void) 2311static int __init nicvf_init_module(void)
2262{ 2312{
2263 pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION); 2313 pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
2264 nicvf_rx_mode_wq = alloc_ordered_workqueue("nicvf_generic",
2265 WQ_MEM_RECLAIM);
2266 return pci_register_driver(&nicvf_driver); 2314 return pci_register_driver(&nicvf_driver);
2267} 2315}
2268 2316
2269static void __exit nicvf_cleanup_module(void) 2317static void __exit nicvf_cleanup_module(void)
2270{ 2318{
2271 if (nicvf_rx_mode_wq) {
2272 destroy_workqueue(nicvf_rx_mode_wq);
2273 nicvf_rx_mode_wq = NULL;
2274 }
2275 pci_unregister_driver(&nicvf_driver); 2319 pci_unregister_driver(&nicvf_driver);
2276} 2320}
2277 2321
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index e337da6ba2a4..673c57b8023f 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1217,7 +1217,7 @@ static void bgx_init_hw(struct bgx *bgx)
1217 1217
1218 /* Disable MAC steering (NCSI traffic) */ 1218 /* Disable MAC steering (NCSI traffic) */
1219 for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++) 1219 for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++)
1220 bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00); 1220 bgx_reg_write(bgx, 0, BGX_CMR_RX_STEERING + (i * 8), 0x00);
1221} 1221}
1222 1222
1223static u8 bgx_get_lane2sds_cfg(struct bgx *bgx, struct lmac *lmac) 1223static u8 bgx_get_lane2sds_cfg(struct bgx *bgx, struct lmac *lmac)
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index cbdd20b9ee6f..5cbc54e9eb19 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -60,7 +60,7 @@
60#define RX_DMACX_CAM_EN BIT_ULL(48) 60#define RX_DMACX_CAM_EN BIT_ULL(48)
61#define RX_DMACX_CAM_LMACID(x) (((u64)x) << 49) 61#define RX_DMACX_CAM_LMACID(x) (((u64)x) << 49)
62#define RX_DMAC_COUNT 32 62#define RX_DMAC_COUNT 32
63#define BGX_CMR_RX_STREERING 0x300 63#define BGX_CMR_RX_STEERING 0x300
64#define RX_TRAFFIC_STEER_RULE_COUNT 8 64#define RX_TRAFFIC_STEER_RULE_COUNT 8
65#define BGX_CMR_CHAN_MSK_AND 0x450 65#define BGX_CMR_CHAN_MSK_AND 0x450
66#define BGX_CMR_BIST_STATUS 0x460 66#define BGX_CMR_BIST_STATUS 0x460
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index f52e2c46e6a7..e4ff531db14a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -3289,8 +3289,11 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
3289 i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring)) : 3289 i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring)) :
3290 !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); 3290 !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
3291 if (!ok) { 3291 if (!ok) {
3292 /* Log this in case the user has forgotten to give the kernel
3293 * any buffers, even later in the application.
3294 */
3292 dev_info(&vsi->back->pdev->dev, 3295 dev_info(&vsi->back->pdev->dev,
3293 "Failed allocate some buffers on %sRx ring %d (pf_q %d)\n", 3296 "Failed to allocate some buffers on %sRx ring %d (pf_q %d)\n",
3294 ring->xsk_umem ? "UMEM enabled " : "", 3297 ring->xsk_umem ? "UMEM enabled " : "",
3295 ring->queue_index, pf_q); 3298 ring->queue_index, pf_q);
3296 } 3299 }
@@ -6725,8 +6728,13 @@ void i40e_down(struct i40e_vsi *vsi)
6725 6728
6726 for (i = 0; i < vsi->num_queue_pairs; i++) { 6729 for (i = 0; i < vsi->num_queue_pairs; i++) {
6727 i40e_clean_tx_ring(vsi->tx_rings[i]); 6730 i40e_clean_tx_ring(vsi->tx_rings[i]);
6728 if (i40e_enabled_xdp_vsi(vsi)) 6731 if (i40e_enabled_xdp_vsi(vsi)) {
6732 /* Make sure that in-progress ndo_xdp_xmit
6733 * calls are completed.
6734 */
6735 synchronize_rcu();
6729 i40e_clean_tx_ring(vsi->xdp_rings[i]); 6736 i40e_clean_tx_ring(vsi->xdp_rings[i]);
6737 }
6730 i40e_clean_rx_ring(vsi->rx_rings[i]); 6738 i40e_clean_rx_ring(vsi->rx_rings[i]);
6731 } 6739 }
6732 6740
@@ -11895,6 +11903,14 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi,
11895 if (old_prog) 11903 if (old_prog)
11896 bpf_prog_put(old_prog); 11904 bpf_prog_put(old_prog);
11897 11905
11906 /* Kick start the NAPI context if there is an AF_XDP socket open
11907 * on that queue id. This so that receiving will start.
11908 */
11909 if (need_reset && prog)
11910 for (i = 0; i < vsi->num_queue_pairs; i++)
11911 if (vsi->xdp_rings[i]->xsk_umem)
11912 (void)i40e_xsk_async_xmit(vsi->netdev, i);
11913
11898 return 0; 11914 return 0;
11899} 11915}
11900 11916
@@ -11955,8 +11971,13 @@ static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair)
11955static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair) 11971static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair)
11956{ 11972{
11957 i40e_clean_tx_ring(vsi->tx_rings[queue_pair]); 11973 i40e_clean_tx_ring(vsi->tx_rings[queue_pair]);
11958 if (i40e_enabled_xdp_vsi(vsi)) 11974 if (i40e_enabled_xdp_vsi(vsi)) {
11975 /* Make sure that in-progress ndo_xdp_xmit calls are
11976 * completed.
11977 */
11978 synchronize_rcu();
11959 i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]); 11979 i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]);
11980 }
11960 i40e_clean_rx_ring(vsi->rx_rings[queue_pair]); 11981 i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
11961} 11982}
11962 11983
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index a7e14e98889f..6c97667d20ef 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -3709,6 +3709,7 @@ int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
3709 struct i40e_netdev_priv *np = netdev_priv(dev); 3709 struct i40e_netdev_priv *np = netdev_priv(dev);
3710 unsigned int queue_index = smp_processor_id(); 3710 unsigned int queue_index = smp_processor_id();
3711 struct i40e_vsi *vsi = np->vsi; 3711 struct i40e_vsi *vsi = np->vsi;
3712 struct i40e_pf *pf = vsi->back;
3712 struct i40e_ring *xdp_ring; 3713 struct i40e_ring *xdp_ring;
3713 int drops = 0; 3714 int drops = 0;
3714 int i; 3715 int i;
@@ -3716,7 +3717,8 @@ int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
3716 if (test_bit(__I40E_VSI_DOWN, vsi->state)) 3717 if (test_bit(__I40E_VSI_DOWN, vsi->state))
3717 return -ENETDOWN; 3718 return -ENETDOWN;
3718 3719
3719 if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs) 3720 if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs ||
3721 test_bit(__I40E_CONFIG_BUSY, pf->state))
3720 return -ENXIO; 3722 return -ENXIO;
3721 3723
3722 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 3724 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 870cf654e436..3827f16e6923 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -183,6 +183,11 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
183 err = i40e_queue_pair_enable(vsi, qid); 183 err = i40e_queue_pair_enable(vsi, qid);
184 if (err) 184 if (err)
185 return err; 185 return err;
186
187 /* Kick start the NAPI context so that receiving will start */
188 err = i40e_xsk_async_xmit(vsi->netdev, qid);
189 if (err)
190 return err;
186 } 191 }
187 192
188 return 0; 193 return 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index daff8183534b..cb35d8202572 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -3953,8 +3953,11 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
3953 else 3953 else
3954 mrqc = IXGBE_MRQC_VMDQRSS64EN; 3954 mrqc = IXGBE_MRQC_VMDQRSS64EN;
3955 3955
3956 /* Enable L3/L4 for Tx Switched packets */ 3956 /* Enable L3/L4 for Tx Switched packets only for X550,
3957 mrqc |= IXGBE_MRQC_L3L4TXSWEN; 3957 * older devices do not support this feature
3958 */
3959 if (hw->mac.type >= ixgbe_mac_X550)
3960 mrqc |= IXGBE_MRQC_L3L4TXSWEN;
3958 } else { 3961 } else {
3959 if (tcs > 4) 3962 if (tcs > 4)
3960 mrqc = IXGBE_MRQC_RTRSS8TCEN; 3963 mrqc = IXGBE_MRQC_RTRSS8TCEN;
@@ -10225,6 +10228,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
10225 int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 10228 int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
10226 struct ixgbe_adapter *adapter = netdev_priv(dev); 10229 struct ixgbe_adapter *adapter = netdev_priv(dev);
10227 struct bpf_prog *old_prog; 10230 struct bpf_prog *old_prog;
10231 bool need_reset;
10228 10232
10229 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) 10233 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
10230 return -EINVAL; 10234 return -EINVAL;
@@ -10247,9 +10251,10 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
10247 return -ENOMEM; 10251 return -ENOMEM;
10248 10252
10249 old_prog = xchg(&adapter->xdp_prog, prog); 10253 old_prog = xchg(&adapter->xdp_prog, prog);
10254 need_reset = (!!prog != !!old_prog);
10250 10255
10251 /* If transitioning XDP modes reconfigure rings */ 10256 /* If transitioning XDP modes reconfigure rings */
10252 if (!!prog != !!old_prog) { 10257 if (need_reset) {
10253 int err = ixgbe_setup_tc(dev, adapter->hw_tcs); 10258 int err = ixgbe_setup_tc(dev, adapter->hw_tcs);
10254 10259
10255 if (err) { 10260 if (err) {
@@ -10265,6 +10270,14 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
10265 if (old_prog) 10270 if (old_prog)
10266 bpf_prog_put(old_prog); 10271 bpf_prog_put(old_prog);
10267 10272
10273 /* Kick start the NAPI context if there is an AF_XDP socket open
10274 * on that queue id. This so that receiving will start.
10275 */
10276 if (need_reset && prog)
10277 for (i = 0; i < adapter->num_rx_queues; i++)
10278 if (adapter->xdp_ring[i]->xsk_umem)
10279 (void)ixgbe_xsk_async_xmit(adapter->netdev, i);
10280
10268 return 0; 10281 return 0;
10269} 10282}
10270 10283
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index 65c3e2c979d4..36a8879536a4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -144,11 +144,19 @@ static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
144 ixgbe_txrx_ring_disable(adapter, qid); 144 ixgbe_txrx_ring_disable(adapter, qid);
145 145
146 err = ixgbe_add_xsk_umem(adapter, umem, qid); 146 err = ixgbe_add_xsk_umem(adapter, umem, qid);
147 if (err)
148 return err;
147 149
148 if (if_running) 150 if (if_running) {
149 ixgbe_txrx_ring_enable(adapter, qid); 151 ixgbe_txrx_ring_enable(adapter, qid);
150 152
151 return err; 153 /* Kick start the NAPI context so that receiving will start */
154 err = ixgbe_xsk_async_xmit(adapter->netdev, qid);
155 if (err)
156 return err;
157 }
158
159 return 0;
152} 160}
153 161
154static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid) 162static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid)
@@ -634,7 +642,8 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
634 dma_addr_t dma; 642 dma_addr_t dma;
635 643
636 while (budget-- > 0) { 644 while (budget-- > 0) {
637 if (unlikely(!ixgbe_desc_unused(xdp_ring))) { 645 if (unlikely(!ixgbe_desc_unused(xdp_ring)) ||
646 !netif_carrier_ok(xdp_ring->netdev)) {
638 work_done = false; 647 work_done = false;
639 break; 648 break;
640 } 649 }
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 9d4568eb2297..8433fb9c3eee 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2146,7 +2146,7 @@ err_drop_frame:
2146 if (unlikely(!skb)) 2146 if (unlikely(!skb))
2147 goto err_drop_frame_ret_pool; 2147 goto err_drop_frame_ret_pool;
2148 2148
2149 dma_sync_single_range_for_cpu(dev->dev.parent, 2149 dma_sync_single_range_for_cpu(&pp->bm_priv->pdev->dev,
2150 rx_desc->buf_phys_addr, 2150 rx_desc->buf_phys_addr,
2151 MVNETA_MH_SIZE + NET_SKB_PAD, 2151 MVNETA_MH_SIZE + NET_SKB_PAD,
2152 rx_bytes, 2152 rx_bytes,
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index e23ca90289f7..0a868c829b90 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -1291,15 +1291,10 @@ wrp_alu64_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1291 1291
1292static int 1292static int
1293wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1293wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1294 enum alu_op alu_op, bool skip) 1294 enum alu_op alu_op)
1295{ 1295{
1296 const struct bpf_insn *insn = &meta->insn; 1296 const struct bpf_insn *insn = &meta->insn;
1297 1297
1298 if (skip) {
1299 meta->skip = true;
1300 return 0;
1301 }
1302
1303 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm); 1298 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm);
1304 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 1299 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
1305 1300
@@ -2309,7 +2304,7 @@ static int xor_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2309 2304
2310static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2305static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2311{ 2306{
2312 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR, !~meta->insn.imm); 2307 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR);
2313} 2308}
2314 2309
2315static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2310static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -2319,7 +2314,7 @@ static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2319 2314
2320static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2315static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2321{ 2316{
2322 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm); 2317 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND);
2323} 2318}
2324 2319
2325static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2320static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -2329,7 +2324,7 @@ static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2329 2324
2330static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2325static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2331{ 2326{
2332 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm); 2327 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR);
2333} 2328}
2334 2329
2335static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2330static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -2339,7 +2334,7 @@ static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2339 2334
2340static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2335static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2341{ 2336{
2342 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD, !meta->insn.imm); 2337 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD);
2343} 2338}
2344 2339
2345static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2340static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -2349,7 +2344,7 @@ static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2349 2344
2350static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2345static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2351{ 2346{
2352 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm); 2347 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB);
2353} 2348}
2354 2349
2355static int mul_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2350static int mul_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 7cdac77d0c68..07e41c42bcf5 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -499,6 +499,8 @@ static int ipvlan_nl_changelink(struct net_device *dev,
499 499
500 if (!data) 500 if (!data)
501 return 0; 501 return 0;
502 if (!ns_capable(dev_net(ipvlan->phy_dev)->user_ns, CAP_NET_ADMIN))
503 return -EPERM;
502 504
503 if (data[IFLA_IPVLAN_MODE]) { 505 if (data[IFLA_IPVLAN_MODE]) {
504 u16 nmode = nla_get_u16(data[IFLA_IPVLAN_MODE]); 506 u16 nmode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
@@ -601,6 +603,8 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev,
601 struct ipvl_dev *tmp = netdev_priv(phy_dev); 603 struct ipvl_dev *tmp = netdev_priv(phy_dev);
602 604
603 phy_dev = tmp->phy_dev; 605 phy_dev = tmp->phy_dev;
606 if (!ns_capable(dev_net(phy_dev)->user_ns, CAP_NET_ADMIN))
607 return -EPERM;
604 } else if (!netif_is_ipvlan_port(phy_dev)) { 608 } else if (!netif_is_ipvlan_port(phy_dev)) {
605 /* Exit early if the underlying link is invalid or busy */ 609 /* Exit early if the underlying link is invalid or busy */
606 if (phy_dev->type != ARPHRD_ETHER || 610 if (phy_dev->type != ARPHRD_ETHER ||
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 82ab6ed3b74e..6bac602094bd 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -26,6 +26,8 @@
26#include <linux/marvell_phy.h> 26#include <linux/marvell_phy.h>
27#include <linux/phy.h> 27#include <linux/phy.h>
28 28
29#define MDIO_AN_10GBT_CTRL_ADV_NBT_MASK 0x01e0
30
29enum { 31enum {
30 MV_PCS_BASE_T = 0x0000, 32 MV_PCS_BASE_T = 0x0000,
31 MV_PCS_BASE_R = 0x1000, 33 MV_PCS_BASE_R = 0x1000,
@@ -386,8 +388,10 @@ static int mv3310_config_aneg(struct phy_device *phydev)
386 else 388 else
387 reg = 0; 389 reg = 0;
388 390
391 /* Make sure we clear unsupported 2.5G/5G advertising */
389 ret = mv3310_modify(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL, 392 ret = mv3310_modify(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
390 MDIO_AN_10GBT_CTRL_ADV10G, reg); 393 MDIO_AN_10GBT_CTRL_ADV10G |
394 MDIO_AN_10GBT_CTRL_ADV_NBT_MASK, reg);
391 if (ret < 0) 395 if (ret < 0)
392 return ret; 396 return ret;
393 if (ret > 0) 397 if (ret > 0)
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 66b9cfe692fc..7368616286ae 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -379,7 +379,6 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
379 err = device_register(&bus->dev); 379 err = device_register(&bus->dev);
380 if (err) { 380 if (err) {
381 pr_err("mii_bus %s failed to register\n", bus->id); 381 pr_err("mii_bus %s failed to register\n", bus->id);
382 put_device(&bus->dev);
383 return -EINVAL; 382 return -EINVAL;
384 } 383 }
385 384
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index c6010fb1aa0f..cb4a23041a94 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -282,6 +282,13 @@ static struct phy_driver realtek_drvs[] = {
282 .name = "RTL8366RB Gigabit Ethernet", 282 .name = "RTL8366RB Gigabit Ethernet",
283 .features = PHY_GBIT_FEATURES, 283 .features = PHY_GBIT_FEATURES,
284 .config_init = &rtl8366rb_config_init, 284 .config_init = &rtl8366rb_config_init,
285 /* These interrupts are handled by the irq controller
286 * embedded inside the RTL8366RB, they get unmasked when the
287 * irq is requested and ACKed by reading the status register,
288 * which is done by the irqchip code.
289 */
290 .ack_interrupt = genphy_no_ack_interrupt,
291 .config_intr = genphy_no_config_intr,
285 .suspend = genphy_suspend, 292 .suspend = genphy_suspend,
286 .resume = genphy_resume, 293 .resume = genphy_resume,
287 }, 294 },
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 958f1cf67282..6ce3f666d142 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1256,7 +1256,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
1256 list_add_tail_rcu(&port->list, &team->port_list); 1256 list_add_tail_rcu(&port->list, &team->port_list);
1257 team_port_enable(team, port); 1257 team_port_enable(team, port);
1258 __team_compute_features(team); 1258 __team_compute_features(team);
1259 __team_port_change_port_added(port, !!netif_carrier_ok(port_dev)); 1259 __team_port_change_port_added(port, !!netif_oper_up(port_dev));
1260 __team_options_change_check(team); 1260 __team_options_change_check(team);
1261 1261
1262 netdev_info(dev, "Port device %s added\n", portname); 1262 netdev_info(dev, "Port device %s added\n", portname);
@@ -2915,7 +2915,7 @@ static int team_device_event(struct notifier_block *unused,
2915 2915
2916 switch (event) { 2916 switch (event) {
2917 case NETDEV_UP: 2917 case NETDEV_UP:
2918 if (netif_carrier_ok(dev)) 2918 if (netif_oper_up(dev))
2919 team_port_change_check(port, true); 2919 team_port_change_check(port, true);
2920 break; 2920 break;
2921 case NETDEV_DOWN: 2921 case NETDEV_DOWN:
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index ada6baf8847a..86c8c64fbb0f 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -1179,7 +1179,7 @@ static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa)
1179 } else { 1179 } else {
1180 /* test for RTL8153-BND and RTL8153-BD */ 1180 /* test for RTL8153-BND and RTL8153-BD */
1181 ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_1); 1181 ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_1);
1182 if ((ocp_data & BND_MASK) == 0 && (ocp_data & BD_MASK)) { 1182 if ((ocp_data & BND_MASK) == 0 && (ocp_data & BD_MASK) == 0) {
1183 netif_dbg(tp, probe, tp->netdev, 1183 netif_dbg(tp, probe, tp->netdev,
1184 "Invalid variant for MAC pass through\n"); 1184 "Invalid variant for MAC pass through\n");
1185 return -ENODEV; 1185 return -ENODEV;
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 95909e262ba4..7c1430ed0244 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -1273,6 +1273,9 @@ static void vrf_setup(struct net_device *dev)
1273 1273
1274 /* default to no qdisc; user can add if desired */ 1274 /* default to no qdisc; user can add if desired */
1275 dev->priv_flags |= IFF_NO_QUEUE; 1275 dev->priv_flags |= IFF_NO_QUEUE;
1276
1277 dev->min_mtu = 0;
1278 dev->max_mtu = 0;
1276} 1279}
1277 1280
1278static int vrf_validate(struct nlattr *tb[], struct nlattr *data[], 1281static int vrf_validate(struct nlattr *tb[], struct nlattr *data[],
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 320edcac4699..6359053bd0c7 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -3554,7 +3554,7 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info)
3554 goto out_err; 3554 goto out_err;
3555 } 3555 }
3556 3556
3557 genlmsg_reply(skb, info); 3557 res = genlmsg_reply(skb, info);
3558 break; 3558 break;
3559 } 3559 }
3560 3560
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 127fcc9c3778..333b56d8f746 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -992,6 +992,14 @@ static inline int genphy_no_soft_reset(struct phy_device *phydev)
992{ 992{
993 return 0; 993 return 0;
994} 994}
995static inline int genphy_no_ack_interrupt(struct phy_device *phydev)
996{
997 return 0;
998}
999static inline int genphy_no_config_intr(struct phy_device *phydev)
1000{
1001 return 0;
1002}
995int genphy_read_mmd_unsupported(struct phy_device *phdev, int devad, 1003int genphy_read_mmd_unsupported(struct phy_device *phdev, int devad,
996 u16 regnum); 1004 u16 regnum);
997int genphy_write_mmd_unsupported(struct phy_device *phdev, int devnum, 1005int genphy_write_mmd_unsupported(struct phy_device *phdev, int devnum,
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index 71f2394abbf7..e0348cb0a1dd 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -61,10 +61,20 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
61 /* gso packets without NEEDS_CSUM do not set transport_offset. 61 /* gso packets without NEEDS_CSUM do not set transport_offset.
62 * probe and drop if does not match one of the above types. 62 * probe and drop if does not match one of the above types.
63 */ 63 */
64 if (gso_type) { 64 if (gso_type && skb->network_header) {
65 if (!skb->protocol)
66 virtio_net_hdr_set_proto(skb, hdr);
67retry:
65 skb_probe_transport_header(skb, -1); 68 skb_probe_transport_header(skb, -1);
66 if (!skb_transport_header_was_set(skb)) 69 if (!skb_transport_header_was_set(skb)) {
70 /* UFO does not specify ipv4 or 6: try both */
71 if (gso_type & SKB_GSO_UDP &&
72 skb->protocol == htons(ETH_P_IP)) {
73 skb->protocol = htons(ETH_P_IPV6);
74 goto retry;
75 }
67 return -EINVAL; 76 return -EINVAL;
77 }
68 } 78 }
69 } 79 }
70 80
diff --git a/include/net/phonet/pep.h b/include/net/phonet/pep.h
index b669fe6dbc3b..98f31c7ea23d 100644
--- a/include/net/phonet/pep.h
+++ b/include/net/phonet/pep.h
@@ -63,10 +63,11 @@ struct pnpipehdr {
63 u8 state_after_reset; /* reset request */ 63 u8 state_after_reset; /* reset request */
64 u8 error_code; /* any response */ 64 u8 error_code; /* any response */
65 u8 pep_type; /* status indication */ 65 u8 pep_type; /* status indication */
66 u8 data[1]; 66 u8 data0; /* anything else */
67 }; 67 };
68 u8 data[];
68}; 69};
69#define other_pep_type data[1] 70#define other_pep_type data[0]
70 71
71static inline struct pnpipehdr *pnp_hdr(struct sk_buff *skb) 72static inline struct pnpipehdr *pnp_hdr(struct sk_buff *skb)
72{ 73{
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 7298a53b9702..85386becbaea 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -853,7 +853,7 @@ static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols)
853 xfrm_pol_put(pols[i]); 853 xfrm_pol_put(pols[i]);
854} 854}
855 855
856void __xfrm_state_destroy(struct xfrm_state *); 856void __xfrm_state_destroy(struct xfrm_state *, bool);
857 857
858static inline void __xfrm_state_put(struct xfrm_state *x) 858static inline void __xfrm_state_put(struct xfrm_state *x)
859{ 859{
@@ -863,7 +863,13 @@ static inline void __xfrm_state_put(struct xfrm_state *x)
863static inline void xfrm_state_put(struct xfrm_state *x) 863static inline void xfrm_state_put(struct xfrm_state *x)
864{ 864{
865 if (refcount_dec_and_test(&x->refcnt)) 865 if (refcount_dec_and_test(&x->refcnt))
866 __xfrm_state_destroy(x); 866 __xfrm_state_destroy(x, false);
867}
868
869static inline void xfrm_state_put_sync(struct xfrm_state *x)
870{
871 if (refcount_dec_and_test(&x->refcnt))
872 __xfrm_state_destroy(x, true);
867} 873}
868 874
869static inline void xfrm_state_hold(struct xfrm_state *x) 875static inline void xfrm_state_hold(struct xfrm_state *x)
@@ -1590,7 +1596,7 @@ struct xfrmk_spdinfo {
1590 1596
1591struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq); 1597struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
1592int xfrm_state_delete(struct xfrm_state *x); 1598int xfrm_state_delete(struct xfrm_state *x);
1593int xfrm_state_flush(struct net *net, u8 proto, bool task_valid); 1599int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync);
1594int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid); 1600int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid);
1595void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si); 1601void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
1596void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si); 1602void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index abf1002080df..93a5cbbde421 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -471,6 +471,7 @@ static int trie_delete_elem(struct bpf_map *map, void *_key)
471 } 471 }
472 472
473 if (!node || node->prefixlen != key->prefixlen || 473 if (!node || node->prefixlen != key->prefixlen ||
474 node->prefixlen != matchlen ||
474 (node->flags & LPM_TREE_NODE_FLAG_IM)) { 475 (node->flags & LPM_TREE_NODE_FLAG_IM)) {
475 ret = -ENOENT; 476 ret = -ENOENT;
476 goto out; 477 goto out;
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index fa2644d276ef..e31e1b20f7f4 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -13,27 +13,13 @@
13#include <net/sock.h> 13#include <net/sock.h>
14#include <net/tcp.h> 14#include <net/tcp.h>
15 15
16static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx, 16static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
17 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) 17 u32 *retval, u32 *time)
18{
19 u32 ret;
20
21 preempt_disable();
22 rcu_read_lock();
23 bpf_cgroup_storage_set(storage);
24 ret = BPF_PROG_RUN(prog, ctx);
25 rcu_read_unlock();
26 preempt_enable();
27
28 return ret;
29}
30
31static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *ret,
32 u32 *time)
33{ 18{
34 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 }; 19 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 };
35 enum bpf_cgroup_storage_type stype; 20 enum bpf_cgroup_storage_type stype;
36 u64 time_start, time_spent = 0; 21 u64 time_start, time_spent = 0;
22 int ret = 0;
37 u32 i; 23 u32 i;
38 24
39 for_each_cgroup_storage_type(stype) { 25 for_each_cgroup_storage_type(stype) {
@@ -48,25 +34,42 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *ret,
48 34
49 if (!repeat) 35 if (!repeat)
50 repeat = 1; 36 repeat = 1;
37
38 rcu_read_lock();
39 preempt_disable();
51 time_start = ktime_get_ns(); 40 time_start = ktime_get_ns();
52 for (i = 0; i < repeat; i++) { 41 for (i = 0; i < repeat; i++) {
53 *ret = bpf_test_run_one(prog, ctx, storage); 42 bpf_cgroup_storage_set(storage);
43 *retval = BPF_PROG_RUN(prog, ctx);
44
45 if (signal_pending(current)) {
46 ret = -EINTR;
47 break;
48 }
49
54 if (need_resched()) { 50 if (need_resched()) {
55 if (signal_pending(current))
56 break;
57 time_spent += ktime_get_ns() - time_start; 51 time_spent += ktime_get_ns() - time_start;
52 preempt_enable();
53 rcu_read_unlock();
54
58 cond_resched(); 55 cond_resched();
56
57 rcu_read_lock();
58 preempt_disable();
59 time_start = ktime_get_ns(); 59 time_start = ktime_get_ns();
60 } 60 }
61 } 61 }
62 time_spent += ktime_get_ns() - time_start; 62 time_spent += ktime_get_ns() - time_start;
63 preempt_enable();
64 rcu_read_unlock();
65
63 do_div(time_spent, repeat); 66 do_div(time_spent, repeat);
64 *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent; 67 *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
65 68
66 for_each_cgroup_storage_type(stype) 69 for_each_cgroup_storage_type(stype)
67 bpf_cgroup_storage_free(storage[stype]); 70 bpf_cgroup_storage_free(storage[stype]);
68 71
69 return 0; 72 return ret;
70} 73}
71 74
72static int bpf_test_finish(const union bpf_attr *kattr, 75static int bpf_test_finish(const union bpf_attr *kattr,
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 3aeff0895669..ac92b2eb32b1 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1204,14 +1204,7 @@ static void br_multicast_query_received(struct net_bridge *br,
1204 return; 1204 return;
1205 1205
1206 br_multicast_update_query_timer(br, query, max_delay); 1206 br_multicast_update_query_timer(br, query, max_delay);
1207 1207 br_multicast_mark_router(br, port);
1208 /* Based on RFC4541, section 2.1.1 IGMP Forwarding Rules,
1209 * the arrival port for IGMP Queries where the source address
1210 * is 0.0.0.0 should not be added to router port list.
1211 */
1212 if ((saddr->proto == htons(ETH_P_IP) && saddr->u.ip4) ||
1213 saddr->proto == htons(ETH_P_IPV6))
1214 br_multicast_mark_router(br, port);
1215} 1208}
1216 1209
1217static void br_ip4_multicast_query(struct net_bridge *br, 1210static void br_ip4_multicast_query(struct net_bridge *br,
diff --git a/net/compat.c b/net/compat.c
index 959d1c51826d..3d348198004f 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -388,8 +388,12 @@ static int __compat_sys_setsockopt(int fd, int level, int optname,
388 char __user *optval, unsigned int optlen) 388 char __user *optval, unsigned int optlen)
389{ 389{
390 int err; 390 int err;
391 struct socket *sock = sockfd_lookup(fd, &err); 391 struct socket *sock;
392
393 if (optlen > INT_MAX)
394 return -EINVAL;
392 395
396 sock = sockfd_lookup(fd, &err);
393 if (sock) { 397 if (sock) {
394 err = security_socket_setsockopt(sock, level, optname); 398 err = security_socket_setsockopt(sock, level, optname);
395 if (err) { 399 if (err) {
diff --git a/net/dsa/port.c b/net/dsa/port.c
index 2d7e01b23572..2a2a878b5ce3 100644
--- a/net/dsa/port.c
+++ b/net/dsa/port.c
@@ -69,7 +69,6 @@ static void dsa_port_set_state_now(struct dsa_port *dp, u8 state)
69 69
70int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy) 70int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
71{ 71{
72 u8 stp_state = dp->bridge_dev ? BR_STATE_BLOCKING : BR_STATE_FORWARDING;
73 struct dsa_switch *ds = dp->ds; 72 struct dsa_switch *ds = dp->ds;
74 int port = dp->index; 73 int port = dp->index;
75 int err; 74 int err;
@@ -80,7 +79,8 @@ int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
80 return err; 79 return err;
81 } 80 }
82 81
83 dsa_port_set_state_now(dp, stp_state); 82 if (!dp->bridge_dev)
83 dsa_port_set_state_now(dp, BR_STATE_FORWARDING);
84 84
85 return 0; 85 return 0;
86} 86}
@@ -90,7 +90,8 @@ void dsa_port_disable(struct dsa_port *dp, struct phy_device *phy)
90 struct dsa_switch *ds = dp->ds; 90 struct dsa_switch *ds = dp->ds;
91 int port = dp->index; 91 int port = dp->index;
92 92
93 dsa_port_set_state_now(dp, BR_STATE_DISABLED); 93 if (!dp->bridge_dev)
94 dsa_port_set_state_now(dp, BR_STATE_DISABLED);
94 95
95 if (ds->ops->port_disable) 96 if (ds->ops->port_disable)
96 ds->ops->port_disable(ds, port, phy); 97 ds->ops->port_disable(ds, port, phy);
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 5459f41fc26f..10e809b296ec 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -328,7 +328,7 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
328 skb->len += tailen; 328 skb->len += tailen;
329 skb->data_len += tailen; 329 skb->data_len += tailen;
330 skb->truesize += tailen; 330 skb->truesize += tailen;
331 if (sk) 331 if (sk && sk_fullsock(sk))
332 refcount_add(tailen, &sk->sk_wmem_alloc); 332 refcount_add(tailen, &sk->sk_wmem_alloc);
333 333
334 goto out; 334 goto out;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 3978f807fa8b..6ae89f2b541b 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -1457,9 +1457,23 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1457 struct ip_tunnel_parm *p = &t->parms; 1457 struct ip_tunnel_parm *p = &t->parms;
1458 __be16 o_flags = p->o_flags; 1458 __be16 o_flags = p->o_flags;
1459 1459
1460 if ((t->erspan_ver == 1 || t->erspan_ver == 2) && 1460 if (t->erspan_ver == 1 || t->erspan_ver == 2) {
1461 !t->collect_md) 1461 if (!t->collect_md)
1462 o_flags |= TUNNEL_KEY; 1462 o_flags |= TUNNEL_KEY;
1463
1464 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
1465 goto nla_put_failure;
1466
1467 if (t->erspan_ver == 1) {
1468 if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
1469 goto nla_put_failure;
1470 } else {
1471 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
1472 goto nla_put_failure;
1473 if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
1474 goto nla_put_failure;
1475 }
1476 }
1463 1477
1464 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || 1478 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1465 nla_put_be16(skb, IFLA_GRE_IFLAGS, 1479 nla_put_be16(skb, IFLA_GRE_IFLAGS,
@@ -1495,19 +1509,6 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1495 goto nla_put_failure; 1509 goto nla_put_failure;
1496 } 1510 }
1497 1511
1498 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
1499 goto nla_put_failure;
1500
1501 if (t->erspan_ver == 1) {
1502 if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
1503 goto nla_put_failure;
1504 } else if (t->erspan_ver == 2) {
1505 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
1506 goto nla_put_failure;
1507 if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
1508 goto nla_put_failure;
1509 }
1510
1511 return 0; 1512 return 0;
1512 1513
1513nla_put_failure: 1514nla_put_failure:
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 730bc44dbad9..ccc78f3a4b60 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2347,6 +2347,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
2347 /* "skb_mstamp_ns" is used as a start point for the retransmit timer */ 2347 /* "skb_mstamp_ns" is used as a start point for the retransmit timer */
2348 skb->skb_mstamp_ns = tp->tcp_wstamp_ns = tp->tcp_clock_cache; 2348 skb->skb_mstamp_ns = tp->tcp_wstamp_ns = tp->tcp_clock_cache;
2349 list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue); 2349 list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
2350 tcp_init_tso_segs(skb, mss_now);
2350 goto repair; /* Skip network transmission */ 2351 goto repair; /* Skip network transmission */
2351 } 2352 }
2352 2353
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 5c3cd5d84a6f..372fdc5381a9 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -562,10 +562,12 @@ static int __udp4_lib_err_encap_no_sk(struct sk_buff *skb, u32 info)
562 562
563 for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) { 563 for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
564 int (*handler)(struct sk_buff *skb, u32 info); 564 int (*handler)(struct sk_buff *skb, u32 info);
565 const struct ip_tunnel_encap_ops *encap;
565 566
566 if (!iptun_encaps[i]) 567 encap = rcu_dereference(iptun_encaps[i]);
568 if (!encap)
567 continue; 569 continue;
568 handler = rcu_dereference(iptun_encaps[i]->err_handler); 570 handler = encap->err_handler;
569 if (handler && !handler(skb, info)) 571 if (handler && !handler(skb, info))
570 return 0; 572 return 0;
571 } 573 }
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 5afe9f83374d..239d4a65ad6e 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -296,7 +296,7 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
296 skb->len += tailen; 296 skb->len += tailen;
297 skb->data_len += tailen; 297 skb->data_len += tailen;
298 skb->truesize += tailen; 298 skb->truesize += tailen;
299 if (sk) 299 if (sk && sk_fullsock(sk))
300 refcount_add(tailen, &sk->sk_wmem_alloc); 300 refcount_add(tailen, &sk->sk_wmem_alloc);
301 301
302 goto out; 302 goto out;
diff --git a/net/ipv6/fou6.c b/net/ipv6/fou6.c
index b858bd5280bf..867474abe269 100644
--- a/net/ipv6/fou6.c
+++ b/net/ipv6/fou6.c
@@ -72,7 +72,7 @@ static int gue6_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
72 72
73static int gue6_err_proto_handler(int proto, struct sk_buff *skb, 73static int gue6_err_proto_handler(int proto, struct sk_buff *skb,
74 struct inet6_skb_parm *opt, 74 struct inet6_skb_parm *opt,
75 u8 type, u8 code, int offset, u32 info) 75 u8 type, u8 code, int offset, __be32 info)
76{ 76{
77 const struct inet6_protocol *ipprot; 77 const struct inet6_protocol *ipprot;
78 78
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 43890898b0b5..26f25b6e2833 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -1722,6 +1722,9 @@ static int ip6erspan_tap_validate(struct nlattr *tb[], struct nlattr *data[],
1722static void ip6erspan_set_version(struct nlattr *data[], 1722static void ip6erspan_set_version(struct nlattr *data[],
1723 struct __ip6_tnl_parm *parms) 1723 struct __ip6_tnl_parm *parms)
1724{ 1724{
1725 if (!data)
1726 return;
1727
1725 parms->erspan_ver = 1; 1728 parms->erspan_ver = 1;
1726 if (data[IFLA_GRE_ERSPAN_VER]) 1729 if (data[IFLA_GRE_ERSPAN_VER])
1727 parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]); 1730 parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
@@ -2104,9 +2107,23 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
2104 struct __ip6_tnl_parm *p = &t->parms; 2107 struct __ip6_tnl_parm *p = &t->parms;
2105 __be16 o_flags = p->o_flags; 2108 __be16 o_flags = p->o_flags;
2106 2109
2107 if ((p->erspan_ver == 1 || p->erspan_ver == 2) && 2110 if (p->erspan_ver == 1 || p->erspan_ver == 2) {
2108 !p->collect_md) 2111 if (!p->collect_md)
2109 o_flags |= TUNNEL_KEY; 2112 o_flags |= TUNNEL_KEY;
2113
2114 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, p->erspan_ver))
2115 goto nla_put_failure;
2116
2117 if (p->erspan_ver == 1) {
2118 if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index))
2119 goto nla_put_failure;
2120 } else {
2121 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, p->dir))
2122 goto nla_put_failure;
2123 if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, p->hwid))
2124 goto nla_put_failure;
2125 }
2126 }
2110 2127
2111 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || 2128 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
2112 nla_put_be16(skb, IFLA_GRE_IFLAGS, 2129 nla_put_be16(skb, IFLA_GRE_IFLAGS,
@@ -2121,8 +2138,7 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
2121 nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) || 2138 nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) ||
2122 nla_put_be32(skb, IFLA_GRE_FLOWINFO, p->flowinfo) || 2139 nla_put_be32(skb, IFLA_GRE_FLOWINFO, p->flowinfo) ||
2123 nla_put_u32(skb, IFLA_GRE_FLAGS, p->flags) || 2140 nla_put_u32(skb, IFLA_GRE_FLAGS, p->flags) ||
2124 nla_put_u32(skb, IFLA_GRE_FWMARK, p->fwmark) || 2141 nla_put_u32(skb, IFLA_GRE_FWMARK, p->fwmark))
2125 nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index))
2126 goto nla_put_failure; 2142 goto nla_put_failure;
2127 2143
2128 if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE, 2144 if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
@@ -2140,19 +2156,6 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
2140 goto nla_put_failure; 2156 goto nla_put_failure;
2141 } 2157 }
2142 2158
2143 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, p->erspan_ver))
2144 goto nla_put_failure;
2145
2146 if (p->erspan_ver == 1) {
2147 if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index))
2148 goto nla_put_failure;
2149 } else if (p->erspan_ver == 2) {
2150 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, p->dir))
2151 goto nla_put_failure;
2152 if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, p->hwid))
2153 goto nla_put_failure;
2154 }
2155
2156 return 0; 2159 return 0;
2157 2160
2158nla_put_failure: 2161nla_put_failure:
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 964491cf3672..ce15dc4ccbfa 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1274,18 +1274,29 @@ static DEFINE_SPINLOCK(rt6_exception_lock);
1274static void rt6_remove_exception(struct rt6_exception_bucket *bucket, 1274static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
1275 struct rt6_exception *rt6_ex) 1275 struct rt6_exception *rt6_ex)
1276{ 1276{
1277 struct fib6_info *from;
1277 struct net *net; 1278 struct net *net;
1278 1279
1279 if (!bucket || !rt6_ex) 1280 if (!bucket || !rt6_ex)
1280 return; 1281 return;
1281 1282
1282 net = dev_net(rt6_ex->rt6i->dst.dev); 1283 net = dev_net(rt6_ex->rt6i->dst.dev);
1284 net->ipv6.rt6_stats->fib_rt_cache--;
1285
1286 /* purge completely the exception to allow releasing the held resources:
1287 * some [sk] cache may keep the dst around for unlimited time
1288 */
1289 from = rcu_dereference_protected(rt6_ex->rt6i->from,
1290 lockdep_is_held(&rt6_exception_lock));
1291 rcu_assign_pointer(rt6_ex->rt6i->from, NULL);
1292 fib6_info_release(from);
1293 dst_dev_put(&rt6_ex->rt6i->dst);
1294
1283 hlist_del_rcu(&rt6_ex->hlist); 1295 hlist_del_rcu(&rt6_ex->hlist);
1284 dst_release(&rt6_ex->rt6i->dst); 1296 dst_release(&rt6_ex->rt6i->dst);
1285 kfree_rcu(rt6_ex, rcu); 1297 kfree_rcu(rt6_ex, rcu);
1286 WARN_ON_ONCE(!bucket->depth); 1298 WARN_ON_ONCE(!bucket->depth);
1287 bucket->depth--; 1299 bucket->depth--;
1288 net->ipv6.rt6_stats->fib_rt_cache--;
1289} 1300}
1290 1301
1291/* Remove oldest rt6_ex in bucket and free the memory 1302/* Remove oldest rt6_ex in bucket and free the memory
@@ -1599,15 +1610,15 @@ static int rt6_remove_exception_rt(struct rt6_info *rt)
1599static void rt6_update_exception_stamp_rt(struct rt6_info *rt) 1610static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
1600{ 1611{
1601 struct rt6_exception_bucket *bucket; 1612 struct rt6_exception_bucket *bucket;
1602 struct fib6_info *from = rt->from;
1603 struct in6_addr *src_key = NULL; 1613 struct in6_addr *src_key = NULL;
1604 struct rt6_exception *rt6_ex; 1614 struct rt6_exception *rt6_ex;
1605 1615 struct fib6_info *from;
1606 if (!from ||
1607 !(rt->rt6i_flags & RTF_CACHE))
1608 return;
1609 1616
1610 rcu_read_lock(); 1617 rcu_read_lock();
1618 from = rcu_dereference(rt->from);
1619 if (!from || !(rt->rt6i_flags & RTF_CACHE))
1620 goto unlock;
1621
1611 bucket = rcu_dereference(from->rt6i_exception_bucket); 1622 bucket = rcu_dereference(from->rt6i_exception_bucket);
1612 1623
1613#ifdef CONFIG_IPV6_SUBTREES 1624#ifdef CONFIG_IPV6_SUBTREES
@@ -1626,6 +1637,7 @@ static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
1626 if (rt6_ex) 1637 if (rt6_ex)
1627 rt6_ex->stamp = jiffies; 1638 rt6_ex->stamp = jiffies;
1628 1639
1640unlock:
1629 rcu_read_unlock(); 1641 rcu_read_unlock();
1630} 1642}
1631 1643
@@ -2742,20 +2754,24 @@ static int ip6_route_check_nh_onlink(struct net *net,
2742 u32 tbid = l3mdev_fib_table(dev) ? : RT_TABLE_MAIN; 2754 u32 tbid = l3mdev_fib_table(dev) ? : RT_TABLE_MAIN;
2743 const struct in6_addr *gw_addr = &cfg->fc_gateway; 2755 const struct in6_addr *gw_addr = &cfg->fc_gateway;
2744 u32 flags = RTF_LOCAL | RTF_ANYCAST | RTF_REJECT; 2756 u32 flags = RTF_LOCAL | RTF_ANYCAST | RTF_REJECT;
2757 struct fib6_info *from;
2745 struct rt6_info *grt; 2758 struct rt6_info *grt;
2746 int err; 2759 int err;
2747 2760
2748 err = 0; 2761 err = 0;
2749 grt = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0); 2762 grt = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0);
2750 if (grt) { 2763 if (grt) {
2764 rcu_read_lock();
2765 from = rcu_dereference(grt->from);
2751 if (!grt->dst.error && 2766 if (!grt->dst.error &&
2752 /* ignore match if it is the default route */ 2767 /* ignore match if it is the default route */
2753 grt->from && !ipv6_addr_any(&grt->from->fib6_dst.addr) && 2768 from && !ipv6_addr_any(&from->fib6_dst.addr) &&
2754 (grt->rt6i_flags & flags || dev != grt->dst.dev)) { 2769 (grt->rt6i_flags & flags || dev != grt->dst.dev)) {
2755 NL_SET_ERR_MSG(extack, 2770 NL_SET_ERR_MSG(extack,
2756 "Nexthop has invalid gateway or device mismatch"); 2771 "Nexthop has invalid gateway or device mismatch");
2757 err = -EINVAL; 2772 err = -EINVAL;
2758 } 2773 }
2774 rcu_read_unlock();
2759 2775
2760 ip6_rt_put(grt); 2776 ip6_rt_put(grt);
2761 } 2777 }
@@ -4649,7 +4665,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
4649 table = rt->fib6_table->tb6_id; 4665 table = rt->fib6_table->tb6_id;
4650 else 4666 else
4651 table = RT6_TABLE_UNSPEC; 4667 table = RT6_TABLE_UNSPEC;
4652 rtm->rtm_table = table; 4668 rtm->rtm_table = table < 256 ? table : RT_TABLE_COMPAT;
4653 if (nla_put_u32(skb, RTA_TABLE, table)) 4669 if (nla_put_u32(skb, RTA_TABLE, table))
4654 goto nla_put_failure; 4670 goto nla_put_failure;
4655 4671
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 2596ffdeebea..b444483cdb2b 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -288,8 +288,8 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
288 int peeked, peeking, off; 288 int peeked, peeking, off;
289 int err; 289 int err;
290 int is_udplite = IS_UDPLITE(sk); 290 int is_udplite = IS_UDPLITE(sk);
291 struct udp_mib __percpu *mib;
291 bool checksum_valid = false; 292 bool checksum_valid = false;
292 struct udp_mib *mib;
293 int is_udp4; 293 int is_udp4;
294 294
295 if (flags & MSG_ERRQUEUE) 295 if (flags & MSG_ERRQUEUE)
@@ -420,17 +420,19 @@ EXPORT_SYMBOL(udpv6_encap_enable);
420 */ 420 */
421static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb, 421static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb,
422 struct inet6_skb_parm *opt, 422 struct inet6_skb_parm *opt,
423 u8 type, u8 code, int offset, u32 info) 423 u8 type, u8 code, int offset, __be32 info)
424{ 424{
425 int i; 425 int i;
426 426
427 for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) { 427 for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
428 int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt, 428 int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
429 u8 type, u8 code, int offset, u32 info); 429 u8 type, u8 code, int offset, __be32 info);
430 const struct ip6_tnl_encap_ops *encap;
430 431
431 if (!ip6tun_encaps[i]) 432 encap = rcu_dereference(ip6tun_encaps[i]);
433 if (!encap)
432 continue; 434 continue;
433 handler = rcu_dereference(ip6tun_encaps[i]->err_handler); 435 handler = encap->err_handler;
434 if (handler && !handler(skb, opt, type, code, offset, info)) 436 if (handler && !handler(skb, opt, type, code, offset, info))
435 return 0; 437 return 0;
436 } 438 }
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index f5b4febeaa25..bc65db782bfb 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -344,8 +344,8 @@ static void __net_exit xfrm6_tunnel_net_exit(struct net *net)
344 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net); 344 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
345 unsigned int i; 345 unsigned int i;
346 346
347 xfrm_state_flush(net, IPSEC_PROTO_ANY, false);
348 xfrm_flush_gc(); 347 xfrm_flush_gc();
348 xfrm_state_flush(net, IPSEC_PROTO_ANY, false, true);
349 349
350 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) 350 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
351 WARN_ON_ONCE(!hlist_empty(&xfrm6_tn->spi_byaddr[i])); 351 WARN_ON_ONCE(!hlist_empty(&xfrm6_tn->spi_byaddr[i]));
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 655c787f9d54..5651c29cb5bd 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -196,30 +196,22 @@ static int pfkey_release(struct socket *sock)
196 return 0; 196 return 0;
197} 197}
198 198
199static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2, 199static int pfkey_broadcast_one(struct sk_buff *skb, gfp_t allocation,
200 gfp_t allocation, struct sock *sk) 200 struct sock *sk)
201{ 201{
202 int err = -ENOBUFS; 202 int err = -ENOBUFS;
203 203
204 sock_hold(sk); 204 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
205 if (*skb2 == NULL) { 205 return err;
206 if (refcount_read(&skb->users) != 1) { 206
207 *skb2 = skb_clone(skb, allocation); 207 skb = skb_clone(skb, allocation);
208 } else { 208
209 *skb2 = skb; 209 if (skb) {
210 refcount_inc(&skb->users); 210 skb_set_owner_r(skb, sk);
211 } 211 skb_queue_tail(&sk->sk_receive_queue, skb);
212 } 212 sk->sk_data_ready(sk);
213 if (*skb2 != NULL) { 213 err = 0;
214 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
215 skb_set_owner_r(*skb2, sk);
216 skb_queue_tail(&sk->sk_receive_queue, *skb2);
217 sk->sk_data_ready(sk);
218 *skb2 = NULL;
219 err = 0;
220 }
221 } 214 }
222 sock_put(sk);
223 return err; 215 return err;
224} 216}
225 217
@@ -234,7 +226,6 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
234{ 226{
235 struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); 227 struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
236 struct sock *sk; 228 struct sock *sk;
237 struct sk_buff *skb2 = NULL;
238 int err = -ESRCH; 229 int err = -ESRCH;
239 230
240 /* XXX Do we need something like netlink_overrun? I think 231 /* XXX Do we need something like netlink_overrun? I think
@@ -253,7 +244,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
253 * socket. 244 * socket.
254 */ 245 */
255 if (pfk->promisc) 246 if (pfk->promisc)
256 pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk); 247 pfkey_broadcast_one(skb, GFP_ATOMIC, sk);
257 248
258 /* the exact target will be processed later */ 249 /* the exact target will be processed later */
259 if (sk == one_sk) 250 if (sk == one_sk)
@@ -268,7 +259,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
268 continue; 259 continue;
269 } 260 }
270 261
271 err2 = pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk); 262 err2 = pfkey_broadcast_one(skb, GFP_ATOMIC, sk);
272 263
273 /* Error is cleared after successful sending to at least one 264 /* Error is cleared after successful sending to at least one
274 * registered KM */ 265 * registered KM */
@@ -278,9 +269,8 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
278 rcu_read_unlock(); 269 rcu_read_unlock();
279 270
280 if (one_sk != NULL) 271 if (one_sk != NULL)
281 err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk); 272 err = pfkey_broadcast_one(skb, allocation, one_sk);
282 273
283 kfree_skb(skb2);
284 kfree_skb(skb); 274 kfree_skb(skb);
285 return err; 275 return err;
286} 276}
@@ -1783,7 +1773,7 @@ static int pfkey_flush(struct sock *sk, struct sk_buff *skb, const struct sadb_m
1783 if (proto == 0) 1773 if (proto == 0)
1784 return -EINVAL; 1774 return -EINVAL;
1785 1775
1786 err = xfrm_state_flush(net, proto, true); 1776 err = xfrm_state_flush(net, proto, true, false);
1787 err2 = unicast_flush_resp(sk, hdr); 1777 err2 = unicast_flush_resp(sk, hdr);
1788 if (err || err2) { 1778 if (err || err2) {
1789 if (err == -ESRCH) /* empty table - go quietly */ 1779 if (err == -ESRCH) /* empty table - go quietly */
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 87a729926734..977dea436ee8 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -615,13 +615,13 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
615 * We need a bit of data queued to build aggregates properly, so 615 * We need a bit of data queued to build aggregates properly, so
616 * instruct the TCP stack to allow more than a single ms of data 616 * instruct the TCP stack to allow more than a single ms of data
617 * to be queued in the stack. The value is a bit-shift of 1 617 * to be queued in the stack. The value is a bit-shift of 1
618 * second, so 8 is ~4ms of queued data. Only affects local TCP 618 * second, so 7 is ~8ms of queued data. Only affects local TCP
619 * sockets. 619 * sockets.
620 * This is the default, anyhow - drivers may need to override it 620 * This is the default, anyhow - drivers may need to override it
621 * for local reasons (longer buffers, longer completion time, or 621 * for local reasons (longer buffers, longer completion time, or
622 * similar). 622 * similar).
623 */ 623 */
624 local->hw.tx_sk_pacing_shift = 8; 624 local->hw.tx_sk_pacing_shift = 7;
625 625
626 /* set up some defaults */ 626 /* set up some defaults */
627 local->hw.queues = 1; 627 local->hw.queues = 1;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index bb4d71efb6fb..c2a6da5d80da 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2644,6 +2644,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
2644 struct ieee80211_sub_if_data *sdata = rx->sdata; 2644 struct ieee80211_sub_if_data *sdata = rx->sdata;
2645 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 2645 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
2646 u16 ac, q, hdrlen; 2646 u16 ac, q, hdrlen;
2647 int tailroom = 0;
2647 2648
2648 hdr = (struct ieee80211_hdr *) skb->data; 2649 hdr = (struct ieee80211_hdr *) skb->data;
2649 hdrlen = ieee80211_hdrlen(hdr->frame_control); 2650 hdrlen = ieee80211_hdrlen(hdr->frame_control);
@@ -2732,8 +2733,12 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
2732 if (!ifmsh->mshcfg.dot11MeshForwarding) 2733 if (!ifmsh->mshcfg.dot11MeshForwarding)
2733 goto out; 2734 goto out;
2734 2735
2736 if (sdata->crypto_tx_tailroom_needed_cnt)
2737 tailroom = IEEE80211_ENCRYPT_TAILROOM;
2738
2735 fwd_skb = skb_copy_expand(skb, local->tx_headroom + 2739 fwd_skb = skb_copy_expand(skb, local->tx_headroom +
2736 sdata->encrypt_headroom, 0, GFP_ATOMIC); 2740 sdata->encrypt_headroom,
2741 tailroom, GFP_ATOMIC);
2737 if (!fwd_skb) 2742 if (!fwd_skb)
2738 goto out; 2743 goto out;
2739 2744
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 9fc76b19cd3c..db3473540303 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -132,7 +132,7 @@ static int pep_indicate(struct sock *sk, u8 id, u8 code,
132 ph->utid = 0; 132 ph->utid = 0;
133 ph->message_id = id; 133 ph->message_id = id;
134 ph->pipe_handle = pn->pipe_handle; 134 ph->pipe_handle = pn->pipe_handle;
135 ph->data[0] = code; 135 ph->error_code = code;
136 return pn_skb_send(sk, skb, NULL); 136 return pn_skb_send(sk, skb, NULL);
137} 137}
138 138
@@ -153,7 +153,7 @@ static int pipe_handler_request(struct sock *sk, u8 id, u8 code,
153 ph->utid = id; /* whatever */ 153 ph->utid = id; /* whatever */
154 ph->message_id = id; 154 ph->message_id = id;
155 ph->pipe_handle = pn->pipe_handle; 155 ph->pipe_handle = pn->pipe_handle;
156 ph->data[0] = code; 156 ph->error_code = code;
157 return pn_skb_send(sk, skb, NULL); 157 return pn_skb_send(sk, skb, NULL);
158} 158}
159 159
@@ -208,7 +208,7 @@ static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code,
208 struct pnpipehdr *ph; 208 struct pnpipehdr *ph;
209 struct sockaddr_pn dst; 209 struct sockaddr_pn dst;
210 u8 data[4] = { 210 u8 data[4] = {
211 oph->data[0], /* PEP type */ 211 oph->pep_type, /* PEP type */
212 code, /* error code, at an unusual offset */ 212 code, /* error code, at an unusual offset */
213 PAD, PAD, 213 PAD, PAD,
214 }; 214 };
@@ -221,7 +221,7 @@ static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code,
221 ph->utid = oph->utid; 221 ph->utid = oph->utid;
222 ph->message_id = PNS_PEP_CTRL_RESP; 222 ph->message_id = PNS_PEP_CTRL_RESP;
223 ph->pipe_handle = oph->pipe_handle; 223 ph->pipe_handle = oph->pipe_handle;
224 ph->data[0] = oph->data[1]; /* CTRL id */ 224 ph->data0 = oph->data[0]; /* CTRL id */
225 225
226 pn_skb_get_src_sockaddr(oskb, &dst); 226 pn_skb_get_src_sockaddr(oskb, &dst);
227 return pn_skb_send(sk, skb, &dst); 227 return pn_skb_send(sk, skb, &dst);
@@ -272,17 +272,17 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
272 return -EINVAL; 272 return -EINVAL;
273 273
274 hdr = pnp_hdr(skb); 274 hdr = pnp_hdr(skb);
275 if (hdr->data[0] != PN_PEP_TYPE_COMMON) { 275 if (hdr->pep_type != PN_PEP_TYPE_COMMON) {
276 net_dbg_ratelimited("Phonet unknown PEP type: %u\n", 276 net_dbg_ratelimited("Phonet unknown PEP type: %u\n",
277 (unsigned int)hdr->data[0]); 277 (unsigned int)hdr->pep_type);
278 return -EOPNOTSUPP; 278 return -EOPNOTSUPP;
279 } 279 }
280 280
281 switch (hdr->data[1]) { 281 switch (hdr->data[0]) {
282 case PN_PEP_IND_FLOW_CONTROL: 282 case PN_PEP_IND_FLOW_CONTROL:
283 switch (pn->tx_fc) { 283 switch (pn->tx_fc) {
284 case PN_LEGACY_FLOW_CONTROL: 284 case PN_LEGACY_FLOW_CONTROL:
285 switch (hdr->data[4]) { 285 switch (hdr->data[3]) {
286 case PEP_IND_BUSY: 286 case PEP_IND_BUSY:
287 atomic_set(&pn->tx_credits, 0); 287 atomic_set(&pn->tx_credits, 0);
288 break; 288 break;
@@ -292,7 +292,7 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
292 } 292 }
293 break; 293 break;
294 case PN_ONE_CREDIT_FLOW_CONTROL: 294 case PN_ONE_CREDIT_FLOW_CONTROL:
295 if (hdr->data[4] == PEP_IND_READY) 295 if (hdr->data[3] == PEP_IND_READY)
296 atomic_set(&pn->tx_credits, wake = 1); 296 atomic_set(&pn->tx_credits, wake = 1);
297 break; 297 break;
298 } 298 }
@@ -301,12 +301,12 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
301 case PN_PEP_IND_ID_MCFC_GRANT_CREDITS: 301 case PN_PEP_IND_ID_MCFC_GRANT_CREDITS:
302 if (pn->tx_fc != PN_MULTI_CREDIT_FLOW_CONTROL) 302 if (pn->tx_fc != PN_MULTI_CREDIT_FLOW_CONTROL)
303 break; 303 break;
304 atomic_add(wake = hdr->data[4], &pn->tx_credits); 304 atomic_add(wake = hdr->data[3], &pn->tx_credits);
305 break; 305 break;
306 306
307 default: 307 default:
308 net_dbg_ratelimited("Phonet unknown PEP indication: %u\n", 308 net_dbg_ratelimited("Phonet unknown PEP indication: %u\n",
309 (unsigned int)hdr->data[1]); 309 (unsigned int)hdr->data[0]);
310 return -EOPNOTSUPP; 310 return -EOPNOTSUPP;
311 } 311 }
312 if (wake) 312 if (wake)
@@ -318,7 +318,7 @@ static int pipe_rcv_created(struct sock *sk, struct sk_buff *skb)
318{ 318{
319 struct pep_sock *pn = pep_sk(sk); 319 struct pep_sock *pn = pep_sk(sk);
320 struct pnpipehdr *hdr = pnp_hdr(skb); 320 struct pnpipehdr *hdr = pnp_hdr(skb);
321 u8 n_sb = hdr->data[0]; 321 u8 n_sb = hdr->data0;
322 322
323 pn->rx_fc = pn->tx_fc = PN_LEGACY_FLOW_CONTROL; 323 pn->rx_fc = pn->tx_fc = PN_LEGACY_FLOW_CONTROL;
324 __skb_pull(skb, sizeof(*hdr)); 324 __skb_pull(skb, sizeof(*hdr));
@@ -506,7 +506,7 @@ static int pep_connresp_rcv(struct sock *sk, struct sk_buff *skb)
506 return -ECONNREFUSED; 506 return -ECONNREFUSED;
507 507
508 /* Parse sub-blocks */ 508 /* Parse sub-blocks */
509 n_sb = hdr->data[4]; 509 n_sb = hdr->data[3];
510 while (n_sb > 0) { 510 while (n_sb > 0) {
511 u8 type, buf[6], len = sizeof(buf); 511 u8 type, buf[6], len = sizeof(buf);
512 const u8 *data = pep_get_sb(skb, &type, &len, buf); 512 const u8 *data = pep_get_sb(skb, &type, &len, buf);
@@ -739,7 +739,7 @@ static int pipe_do_remove(struct sock *sk)
739 ph->utid = 0; 739 ph->utid = 0;
740 ph->message_id = PNS_PIPE_REMOVE_REQ; 740 ph->message_id = PNS_PIPE_REMOVE_REQ;
741 ph->pipe_handle = pn->pipe_handle; 741 ph->pipe_handle = pn->pipe_handle;
742 ph->data[0] = PAD; 742 ph->data0 = PAD;
743 return pn_skb_send(sk, skb, NULL); 743 return pn_skb_send(sk, skb, NULL);
744} 744}
745 745
@@ -817,7 +817,7 @@ static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp,
817 peer_type = hdr->other_pep_type << 8; 817 peer_type = hdr->other_pep_type << 8;
818 818
819 /* Parse sub-blocks (options) */ 819 /* Parse sub-blocks (options) */
820 n_sb = hdr->data[4]; 820 n_sb = hdr->data[3];
821 while (n_sb > 0) { 821 while (n_sb > 0) {
822 u8 type, buf[1], len = sizeof(buf); 822 u8 type, buf[1], len = sizeof(buf);
823 const u8 *data = pep_get_sb(skb, &type, &len, buf); 823 const u8 *data = pep_get_sb(skb, &type, &len, buf);
@@ -1109,7 +1109,7 @@ static int pipe_skb_send(struct sock *sk, struct sk_buff *skb)
1109 ph->utid = 0; 1109 ph->utid = 0;
1110 if (pn->aligned) { 1110 if (pn->aligned) {
1111 ph->message_id = PNS_PIPE_ALIGNED_DATA; 1111 ph->message_id = PNS_PIPE_ALIGNED_DATA;
1112 ph->data[0] = 0; /* padding */ 1112 ph->data0 = 0; /* padding */
1113 } else 1113 } else
1114 ph->message_id = PNS_PIPE_DATA; 1114 ph->message_id = PNS_PIPE_DATA;
1115 ph->pipe_handle = pn->pipe_handle; 1115 ph->pipe_handle = pn->pipe_handle;
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 033696e6f74f..ad158d311ffa 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -207,7 +207,8 @@ void sctp_transport_reset_hb_timer(struct sctp_transport *transport)
207 207
208 /* When a data chunk is sent, reset the heartbeat interval. */ 208 /* When a data chunk is sent, reset the heartbeat interval. */
209 expires = jiffies + sctp_transport_timeout(transport); 209 expires = jiffies + sctp_transport_timeout(transport);
210 if (time_before(transport->hb_timer.expires, expires) && 210 if ((time_before(transport->hb_timer.expires, expires) ||
211 !timer_pending(&transport->hb_timer)) &&
211 !mod_timer(&transport->hb_timer, 212 !mod_timer(&transport->hb_timer,
212 expires + prandom_u32_max(transport->rto))) 213 expires + prandom_u32_max(transport->rto)))
213 sctp_transport_hold(transport); 214 sctp_transport_hold(transport);
diff --git a/net/smc/smc.h b/net/smc/smc.h
index 5721416d0605..adbdf195eb08 100644
--- a/net/smc/smc.h
+++ b/net/smc/smc.h
@@ -113,9 +113,9 @@ struct smc_host_cdc_msg { /* Connection Data Control message */
113} __aligned(8); 113} __aligned(8);
114 114
115enum smc_urg_state { 115enum smc_urg_state {
116 SMC_URG_VALID, /* data present */ 116 SMC_URG_VALID = 1, /* data present */
117 SMC_URG_NOTYET, /* data pending */ 117 SMC_URG_NOTYET = 2, /* data pending */
118 SMC_URG_READ /* data was already read */ 118 SMC_URG_READ = 3, /* data was already read */
119}; 119};
120 120
121struct smc_connection { 121struct smc_connection {
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 1217c90a363b..684f2125fc6b 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -388,7 +388,7 @@ static int tipc_sk_sock_err(struct socket *sock, long *timeout)
388 rc_ = tipc_sk_sock_err((sock_), timeo_); \ 388 rc_ = tipc_sk_sock_err((sock_), timeo_); \
389 if (rc_) \ 389 if (rc_) \
390 break; \ 390 break; \
391 prepare_to_wait(sk_sleep(sk_), &wait_, TASK_INTERRUPTIBLE); \ 391 add_wait_queue(sk_sleep(sk_), &wait_); \
392 release_sock(sk_); \ 392 release_sock(sk_); \
393 *(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \ 393 *(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \
394 sched_annotate_sleep(); \ 394 sched_annotate_sleep(); \
@@ -1677,7 +1677,7 @@ static void tipc_sk_send_ack(struct tipc_sock *tsk)
1677static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop) 1677static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
1678{ 1678{
1679 struct sock *sk = sock->sk; 1679 struct sock *sk = sock->sk;
1680 DEFINE_WAIT(wait); 1680 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1681 long timeo = *timeop; 1681 long timeo = *timeop;
1682 int err = sock_error(sk); 1682 int err = sock_error(sk);
1683 1683
@@ -1685,15 +1685,17 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
1685 return err; 1685 return err;
1686 1686
1687 for (;;) { 1687 for (;;) {
1688 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1689 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) { 1688 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1690 if (sk->sk_shutdown & RCV_SHUTDOWN) { 1689 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1691 err = -ENOTCONN; 1690 err = -ENOTCONN;
1692 break; 1691 break;
1693 } 1692 }
1693 add_wait_queue(sk_sleep(sk), &wait);
1694 release_sock(sk); 1694 release_sock(sk);
1695 timeo = schedule_timeout(timeo); 1695 timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
1696 sched_annotate_sleep();
1696 lock_sock(sk); 1697 lock_sock(sk);
1698 remove_wait_queue(sk_sleep(sk), &wait);
1697 } 1699 }
1698 err = 0; 1700 err = 0;
1699 if (!skb_queue_empty(&sk->sk_receive_queue)) 1701 if (!skb_queue_empty(&sk->sk_receive_queue))
@@ -1709,7 +1711,6 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
1709 if (err) 1711 if (err)
1710 break; 1712 break;
1711 } 1713 }
1712 finish_wait(sk_sleep(sk), &wait);
1713 *timeop = timeo; 1714 *timeop = timeo;
1714 return err; 1715 return err;
1715} 1716}
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 74d1eed7cbd4..a95d479caeea 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -890,7 +890,7 @@ retry:
890 addr->hash ^= sk->sk_type; 890 addr->hash ^= sk->sk_type;
891 891
892 __unix_remove_socket(sk); 892 __unix_remove_socket(sk);
893 u->addr = addr; 893 smp_store_release(&u->addr, addr);
894 __unix_insert_socket(&unix_socket_table[addr->hash], sk); 894 __unix_insert_socket(&unix_socket_table[addr->hash], sk);
895 spin_unlock(&unix_table_lock); 895 spin_unlock(&unix_table_lock);
896 err = 0; 896 err = 0;
@@ -1060,7 +1060,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1060 1060
1061 err = 0; 1061 err = 0;
1062 __unix_remove_socket(sk); 1062 __unix_remove_socket(sk);
1063 u->addr = addr; 1063 smp_store_release(&u->addr, addr);
1064 __unix_insert_socket(list, sk); 1064 __unix_insert_socket(list, sk);
1065 1065
1066out_unlock: 1066out_unlock:
@@ -1331,15 +1331,29 @@ restart:
1331 RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq); 1331 RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1332 otheru = unix_sk(other); 1332 otheru = unix_sk(other);
1333 1333
1334 /* copy address information from listening to new sock*/ 1334 /* copy address information from listening to new sock
1335 if (otheru->addr) { 1335 *
1336 refcount_inc(&otheru->addr->refcnt); 1336 * The contents of *(otheru->addr) and otheru->path
1337 newu->addr = otheru->addr; 1337 * are seen fully set up here, since we have found
1338 } 1338 * otheru in hash under unix_table_lock. Insertion
1339 * into the hash chain we'd found it in had been done
1340 * in an earlier critical area protected by unix_table_lock,
1341 * the same one where we'd set *(otheru->addr) contents,
1342 * as well as otheru->path and otheru->addr itself.
1343 *
1344 * Using smp_store_release() here to set newu->addr
1345 * is enough to make those stores, as well as stores
1346 * to newu->path visible to anyone who gets newu->addr
1347 * by smp_load_acquire(). IOW, the same warranties
1348 * as for unix_sock instances bound in unix_bind() or
1349 * in unix_autobind().
1350 */
1339 if (otheru->path.dentry) { 1351 if (otheru->path.dentry) {
1340 path_get(&otheru->path); 1352 path_get(&otheru->path);
1341 newu->path = otheru->path; 1353 newu->path = otheru->path;
1342 } 1354 }
1355 refcount_inc(&otheru->addr->refcnt);
1356 smp_store_release(&newu->addr, otheru->addr);
1343 1357
1344 /* Set credentials */ 1358 /* Set credentials */
1345 copy_peercred(sk, other); 1359 copy_peercred(sk, other);
@@ -1453,7 +1467,7 @@ out:
1453static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer) 1467static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
1454{ 1468{
1455 struct sock *sk = sock->sk; 1469 struct sock *sk = sock->sk;
1456 struct unix_sock *u; 1470 struct unix_address *addr;
1457 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr); 1471 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1458 int err = 0; 1472 int err = 0;
1459 1473
@@ -1468,19 +1482,15 @@ static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
1468 sock_hold(sk); 1482 sock_hold(sk);
1469 } 1483 }
1470 1484
1471 u = unix_sk(sk); 1485 addr = smp_load_acquire(&unix_sk(sk)->addr);
1472 unix_state_lock(sk); 1486 if (!addr) {
1473 if (!u->addr) {
1474 sunaddr->sun_family = AF_UNIX; 1487 sunaddr->sun_family = AF_UNIX;
1475 sunaddr->sun_path[0] = 0; 1488 sunaddr->sun_path[0] = 0;
1476 err = sizeof(short); 1489 err = sizeof(short);
1477 } else { 1490 } else {
1478 struct unix_address *addr = u->addr;
1479
1480 err = addr->len; 1491 err = addr->len;
1481 memcpy(sunaddr, addr->name, addr->len); 1492 memcpy(sunaddr, addr->name, addr->len);
1482 } 1493 }
1483 unix_state_unlock(sk);
1484 sock_put(sk); 1494 sock_put(sk);
1485out: 1495out:
1486 return err; 1496 return err;
@@ -2073,11 +2083,11 @@ static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
2073 2083
2074static void unix_copy_addr(struct msghdr *msg, struct sock *sk) 2084static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
2075{ 2085{
2076 struct unix_sock *u = unix_sk(sk); 2086 struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
2077 2087
2078 if (u->addr) { 2088 if (addr) {
2079 msg->msg_namelen = u->addr->len; 2089 msg->msg_namelen = addr->len;
2080 memcpy(msg->msg_name, u->addr->name, u->addr->len); 2090 memcpy(msg->msg_name, addr->name, addr->len);
2081 } 2091 }
2082} 2092}
2083 2093
@@ -2581,15 +2591,14 @@ static int unix_open_file(struct sock *sk)
2581 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) 2591 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2582 return -EPERM; 2592 return -EPERM;
2583 2593
2584 unix_state_lock(sk); 2594 if (!smp_load_acquire(&unix_sk(sk)->addr))
2595 return -ENOENT;
2596
2585 path = unix_sk(sk)->path; 2597 path = unix_sk(sk)->path;
2586 if (!path.dentry) { 2598 if (!path.dentry)
2587 unix_state_unlock(sk);
2588 return -ENOENT; 2599 return -ENOENT;
2589 }
2590 2600
2591 path_get(&path); 2601 path_get(&path);
2592 unix_state_unlock(sk);
2593 2602
2594 fd = get_unused_fd_flags(O_CLOEXEC); 2603 fd = get_unused_fd_flags(O_CLOEXEC);
2595 if (fd < 0) 2604 if (fd < 0)
@@ -2830,7 +2839,7 @@ static int unix_seq_show(struct seq_file *seq, void *v)
2830 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING), 2839 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
2831 sock_i_ino(s)); 2840 sock_i_ino(s));
2832 2841
2833 if (u->addr) { 2842 if (u->addr) { // under unix_table_lock here
2834 int i, len; 2843 int i, len;
2835 seq_putc(seq, ' '); 2844 seq_putc(seq, ' ');
2836 2845
diff --git a/net/unix/diag.c b/net/unix/diag.c
index 384c84e83462..3183d9b8ab33 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -10,7 +10,8 @@
10 10
11static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb) 11static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb)
12{ 12{
13 struct unix_address *addr = unix_sk(sk)->addr; 13 /* might or might not have unix_table_lock */
14 struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
14 15
15 if (!addr) 16 if (!addr)
16 return 0; 17 return 0;
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index ec3a828672ef..eff31348e20b 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -679,8 +679,7 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
679 struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr; 679 struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr;
680 int len, i, rc = 0; 680 int len, i, rc = 0;
681 681
682 if (!sock_flag(sk, SOCK_ZAPPED) || 682 if (addr_len != sizeof(struct sockaddr_x25) ||
683 addr_len != sizeof(struct sockaddr_x25) ||
684 addr->sx25_family != AF_X25) { 683 addr->sx25_family != AF_X25) {
685 rc = -EINVAL; 684 rc = -EINVAL;
686 goto out; 685 goto out;
@@ -699,9 +698,13 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
699 } 698 }
700 699
701 lock_sock(sk); 700 lock_sock(sk);
702 x25_sk(sk)->source_addr = addr->sx25_addr; 701 if (sock_flag(sk, SOCK_ZAPPED)) {
703 x25_insert_socket(sk); 702 x25_sk(sk)->source_addr = addr->sx25_addr;
704 sock_reset_flag(sk, SOCK_ZAPPED); 703 x25_insert_socket(sk);
704 sock_reset_flag(sk, SOCK_ZAPPED);
705 } else {
706 rc = -EINVAL;
707 }
705 release_sock(sk); 708 release_sock(sk);
706 SOCK_DEBUG(sk, "x25_bind: socket is bound\n"); 709 SOCK_DEBUG(sk, "x25_bind: socket is bound\n");
707out: 710out:
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 45f3b528dc09..85e4fe4f18cc 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -366,7 +366,6 @@ static int xsk_release(struct socket *sock)
366 366
367 xskq_destroy(xs->rx); 367 xskq_destroy(xs->rx);
368 xskq_destroy(xs->tx); 368 xskq_destroy(xs->tx);
369 xdp_put_umem(xs->umem);
370 369
371 sock_orphan(sk); 370 sock_orphan(sk);
372 sock->sk = NULL; 371 sock->sk = NULL;
@@ -718,6 +717,18 @@ static const struct proto_ops xsk_proto_ops = {
718 .sendpage = sock_no_sendpage, 717 .sendpage = sock_no_sendpage,
719}; 718};
720 719
720static void xsk_destruct(struct sock *sk)
721{
722 struct xdp_sock *xs = xdp_sk(sk);
723
724 if (!sock_flag(sk, SOCK_DEAD))
725 return;
726
727 xdp_put_umem(xs->umem);
728
729 sk_refcnt_debug_dec(sk);
730}
731
721static int xsk_create(struct net *net, struct socket *sock, int protocol, 732static int xsk_create(struct net *net, struct socket *sock, int protocol,
722 int kern) 733 int kern)
723{ 734{
@@ -744,6 +755,9 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol,
744 755
745 sk->sk_family = PF_XDP; 756 sk->sk_family = PF_XDP;
746 757
758 sk->sk_destruct = xsk_destruct;
759 sk_refcnt_debug_inc(sk);
760
747 sock_set_flag(sk, SOCK_RCU_FREE); 761 sock_set_flag(sk, SOCK_RCU_FREE);
748 762
749 xs = xdp_sk(sk); 763 xs = xdp_sk(sk);
diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c
index 6be8c7df15bb..dbb3c1945b5c 100644
--- a/net/xfrm/xfrm_interface.c
+++ b/net/xfrm/xfrm_interface.c
@@ -76,10 +76,10 @@ static struct xfrm_if *xfrmi_decode_session(struct sk_buff *skb)
76 int ifindex; 76 int ifindex;
77 struct xfrm_if *xi; 77 struct xfrm_if *xi;
78 78
79 if (!skb->dev) 79 if (!secpath_exists(skb) || !skb->dev)
80 return NULL; 80 return NULL;
81 81
82 xfrmn = net_generic(dev_net(skb->dev), xfrmi_net_id); 82 xfrmn = net_generic(xs_net(xfrm_input_state(skb)), xfrmi_net_id);
83 ifindex = skb->dev->ifindex; 83 ifindex = skb->dev->ifindex;
84 84
85 for_each_xfrmi_rcu(xfrmn->xfrmi[0], xi) { 85 for_each_xfrmi_rcu(xfrmn->xfrmi[0], xi) {
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index ba0a4048c846..8d1a898d0ba5 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -3314,8 +3314,10 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
3314 3314
3315 if (ifcb) { 3315 if (ifcb) {
3316 xi = ifcb->decode_session(skb); 3316 xi = ifcb->decode_session(skb);
3317 if (xi) 3317 if (xi) {
3318 if_id = xi->p.if_id; 3318 if_id = xi->p.if_id;
3319 net = xi->net;
3320 }
3319 } 3321 }
3320 rcu_read_unlock(); 3322 rcu_read_unlock();
3321 3323
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 23c92891758a..1bb971f46fc6 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -432,7 +432,7 @@ void xfrm_state_free(struct xfrm_state *x)
432} 432}
433EXPORT_SYMBOL(xfrm_state_free); 433EXPORT_SYMBOL(xfrm_state_free);
434 434
435static void xfrm_state_gc_destroy(struct xfrm_state *x) 435static void ___xfrm_state_destroy(struct xfrm_state *x)
436{ 436{
437 tasklet_hrtimer_cancel(&x->mtimer); 437 tasklet_hrtimer_cancel(&x->mtimer);
438 del_timer_sync(&x->rtimer); 438 del_timer_sync(&x->rtimer);
@@ -474,7 +474,7 @@ static void xfrm_state_gc_task(struct work_struct *work)
474 synchronize_rcu(); 474 synchronize_rcu();
475 475
476 hlist_for_each_entry_safe(x, tmp, &gc_list, gclist) 476 hlist_for_each_entry_safe(x, tmp, &gc_list, gclist)
477 xfrm_state_gc_destroy(x); 477 ___xfrm_state_destroy(x);
478} 478}
479 479
480static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me) 480static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
@@ -598,14 +598,19 @@ struct xfrm_state *xfrm_state_alloc(struct net *net)
598} 598}
599EXPORT_SYMBOL(xfrm_state_alloc); 599EXPORT_SYMBOL(xfrm_state_alloc);
600 600
601void __xfrm_state_destroy(struct xfrm_state *x) 601void __xfrm_state_destroy(struct xfrm_state *x, bool sync)
602{ 602{
603 WARN_ON(x->km.state != XFRM_STATE_DEAD); 603 WARN_ON(x->km.state != XFRM_STATE_DEAD);
604 604
605 spin_lock_bh(&xfrm_state_gc_lock); 605 if (sync) {
606 hlist_add_head(&x->gclist, &xfrm_state_gc_list); 606 synchronize_rcu();
607 spin_unlock_bh(&xfrm_state_gc_lock); 607 ___xfrm_state_destroy(x);
608 schedule_work(&xfrm_state_gc_work); 608 } else {
609 spin_lock_bh(&xfrm_state_gc_lock);
610 hlist_add_head(&x->gclist, &xfrm_state_gc_list);
611 spin_unlock_bh(&xfrm_state_gc_lock);
612 schedule_work(&xfrm_state_gc_work);
613 }
609} 614}
610EXPORT_SYMBOL(__xfrm_state_destroy); 615EXPORT_SYMBOL(__xfrm_state_destroy);
611 616
@@ -708,7 +713,7 @@ xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool
708} 713}
709#endif 714#endif
710 715
711int xfrm_state_flush(struct net *net, u8 proto, bool task_valid) 716int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync)
712{ 717{
713 int i, err = 0, cnt = 0; 718 int i, err = 0, cnt = 0;
714 719
@@ -730,7 +735,10 @@ restart:
730 err = xfrm_state_delete(x); 735 err = xfrm_state_delete(x);
731 xfrm_audit_state_delete(x, err ? 0 : 1, 736 xfrm_audit_state_delete(x, err ? 0 : 1,
732 task_valid); 737 task_valid);
733 xfrm_state_put(x); 738 if (sync)
739 xfrm_state_put_sync(x);
740 else
741 xfrm_state_put(x);
734 if (!err) 742 if (!err)
735 cnt++; 743 cnt++;
736 744
@@ -2215,7 +2223,7 @@ void xfrm_state_delete_tunnel(struct xfrm_state *x)
2215 if (atomic_read(&t->tunnel_users) == 2) 2223 if (atomic_read(&t->tunnel_users) == 2)
2216 xfrm_state_delete(t); 2224 xfrm_state_delete(t);
2217 atomic_dec(&t->tunnel_users); 2225 atomic_dec(&t->tunnel_users);
2218 xfrm_state_put(t); 2226 xfrm_state_put_sync(t);
2219 x->tunnel = NULL; 2227 x->tunnel = NULL;
2220 } 2228 }
2221} 2229}
@@ -2375,8 +2383,8 @@ void xfrm_state_fini(struct net *net)
2375 unsigned int sz; 2383 unsigned int sz;
2376 2384
2377 flush_work(&net->xfrm.state_hash_work); 2385 flush_work(&net->xfrm.state_hash_work);
2378 xfrm_state_flush(net, IPSEC_PROTO_ANY, false);
2379 flush_work(&xfrm_state_gc_work); 2386 flush_work(&xfrm_state_gc_work);
2387 xfrm_state_flush(net, IPSEC_PROTO_ANY, false, true);
2380 2388
2381 WARN_ON(!list_empty(&net->xfrm.state_all)); 2389 WARN_ON(!list_empty(&net->xfrm.state_all));
2382 2390
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index c6d26afcf89d..a131f9ff979e 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1932,7 +1932,7 @@ static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
1932 struct xfrm_usersa_flush *p = nlmsg_data(nlh); 1932 struct xfrm_usersa_flush *p = nlmsg_data(nlh);
1933 int err; 1933 int err;
1934 1934
1935 err = xfrm_state_flush(net, p->proto, true); 1935 err = xfrm_state_flush(net, p->proto, true, false);
1936 if (err) { 1936 if (err) {
1937 if (err == -ESRCH) /* empty table */ 1937 if (err == -ESRCH) /* empty table */
1938 return 0; 1938 return 0;
diff --git a/security/lsm_audit.c b/security/lsm_audit.c
index f84001019356..33028c098ef3 100644
--- a/security/lsm_audit.c
+++ b/security/lsm_audit.c
@@ -321,6 +321,7 @@ static void dump_common_audit_data(struct audit_buffer *ab,
321 if (a->u.net->sk) { 321 if (a->u.net->sk) {
322 struct sock *sk = a->u.net->sk; 322 struct sock *sk = a->u.net->sk;
323 struct unix_sock *u; 323 struct unix_sock *u;
324 struct unix_address *addr;
324 int len = 0; 325 int len = 0;
325 char *p = NULL; 326 char *p = NULL;
326 327
@@ -351,14 +352,15 @@ static void dump_common_audit_data(struct audit_buffer *ab,
351#endif 352#endif
352 case AF_UNIX: 353 case AF_UNIX:
353 u = unix_sk(sk); 354 u = unix_sk(sk);
355 addr = smp_load_acquire(&u->addr);
356 if (!addr)
357 break;
354 if (u->path.dentry) { 358 if (u->path.dentry) {
355 audit_log_d_path(ab, " path=", &u->path); 359 audit_log_d_path(ab, " path=", &u->path);
356 break; 360 break;
357 } 361 }
358 if (!u->addr) 362 len = addr->len-sizeof(short);
359 break; 363 p = &addr->name->sun_path[0];
360 len = u->addr->len-sizeof(short);
361 p = &u->addr->name->sun_path[0];
362 audit_log_format(ab, " path="); 364 audit_log_format(ab, " path=");
363 if (*p) 365 if (*p)
364 audit_log_untrustedstring(ab, p); 366 audit_log_untrustedstring(ab, p);
diff --git a/tools/testing/selftests/bpf/test_lpm_map.c b/tools/testing/selftests/bpf/test_lpm_map.c
index 147e34cfceb7..02d7c871862a 100644
--- a/tools/testing/selftests/bpf/test_lpm_map.c
+++ b/tools/testing/selftests/bpf/test_lpm_map.c
@@ -474,6 +474,16 @@ static void test_lpm_delete(void)
474 assert(bpf_map_lookup_elem(map_fd, key, &value) == -1 && 474 assert(bpf_map_lookup_elem(map_fd, key, &value) == -1 &&
475 errno == ENOENT); 475 errno == ENOENT);
476 476
477 key->prefixlen = 30; // unused prefix so far
478 inet_pton(AF_INET, "192.255.0.0", key->data);
479 assert(bpf_map_delete_elem(map_fd, key) == -1 &&
480 errno == ENOENT);
481
482 key->prefixlen = 16; // same prefix as the root node
483 inet_pton(AF_INET, "192.255.0.0", key->data);
484 assert(bpf_map_delete_elem(map_fd, key) == -1 &&
485 errno == ENOENT);
486
477 /* assert initial lookup */ 487 /* assert initial lookup */
478 key->prefixlen = 32; 488 key->prefixlen = 32;
479 inet_pton(AF_INET, "192.168.0.1", key->data); 489 inet_pton(AF_INET, "192.168.0.1", key->data);
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
index 802b4af18729..1080ff55a788 100755
--- a/tools/testing/selftests/net/fib_tests.sh
+++ b/tools/testing/selftests/net/fib_tests.sh
@@ -388,6 +388,7 @@ fib_carrier_unicast_test()
388 388
389 set -e 389 set -e
390 $IP link set dev dummy0 carrier off 390 $IP link set dev dummy0 carrier off
391 sleep 1
391 set +e 392 set +e
392 393
393 echo " Carrier down" 394 echo " Carrier down"