summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c50
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c34
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c21
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c1
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c7
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c28
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c12
-rw-r--r--drivers/net/hyperv/Kconfig1
-rw-r--r--drivers/net/hyperv/hyperv_net.h30
-rw-r--r--drivers/net/hyperv/netvsc_drv.c242
-rw-r--r--drivers/net/phy/mdio-gpio.c3
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c11
-rw-r--r--drivers/net/xen-netfront.c4
-rw-r--r--include/linux/netfilter.h2
-rw-r--r--include/linux/netfilter/ipset/ip_set_timeout.h20
-rw-r--r--include/net/ip_vs.h30
-rw-r--r--include/net/netfilter/nf_conntrack_count.h3
-rw-r--r--include/net/netfilter/nft_dup.h10
-rw-r--r--include/net/sctp/structs.h5
-rw-r--r--include/net/tls.h6
-rw-r--r--include/uapi/linux/netfilter/nf_conntrack_common.h2
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h2
-rw-r--r--include/uapi/linux/nl80211.h28
-rw-r--r--kernel/bpf/inode.c14
-rw-r--r--net/bridge/netfilter/ebtables.c25
-rw-r--r--net/bridge/netfilter/nft_reject_bridge.c2
-rw-r--r--net/core/neighbour.c10
-rw-r--r--net/core/sock.c15
-rw-r--r--net/dsa/tag_trailer.c3
-rw-r--r--net/ipv4/netfilter/ip_tables.c1
-rw-r--r--net/ipv4/tcp_ipv4.c4
-rw-r--r--net/ipv4/tcp_offload.c2
-rw-r--r--net/ipv6/addrconf.c2
-rw-r--r--net/ipv6/ip6_fib.c5
-rw-r--r--net/ipv6/netfilter/ip6_tables.c1
-rw-r--r--net/ipv6/route.c3
-rw-r--r--net/ipv6/tcp_ipv6.c4
-rw-r--r--net/l2tp/l2tp_netlink.c6
-rw-r--r--net/l2tp/l2tp_ppp.c28
-rw-r--r--net/mac80211/main.c12
-rw-r--r--net/netfilter/ipset/ip_set_hash_gen.h5
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c2
-rw-r--r--net/netfilter/nf_conncount.c13
-rw-r--r--net/netfilter/nf_conntrack_netlink.c3
-rw-r--r--net/netfilter/nf_tables_api.c36
-rw-r--r--net/netfilter/nf_tables_core.c3
-rw-r--r--net/netfilter/nfnetlink.c10
-rw-r--r--net/netfilter/nft_chain_filter.c5
-rw-r--r--net/netfilter/nft_connlimit.c2
-rw-r--r--net/netfilter/nft_dynset.c4
-rw-r--r--net/netfilter/nft_set_rbtree.c2
-rw-r--r--net/netfilter/nft_socket.c1
-rw-r--r--net/netfilter/xt_CT.c10
-rw-r--r--net/netfilter/xt_connmark.c2
-rw-r--r--net/netfilter/xt_set.c10
-rw-r--r--net/rds/loop.c1
-rw-r--r--net/rds/rds.h5
-rw-r--r--net/rds/recv.c5
-rw-r--r--net/sctp/output.c28
-rw-r--r--net/smc/af_smc.c12
-rw-r--r--net/tls/tls_main.c2
-rw-r--r--net/tls/tls_sw.c51
-rw-r--r--net/wireless/core.c1
-rw-r--r--net/wireless/util.c2
-rw-r--r--net/xdp/xdp_umem.c3
-rw-r--r--tools/testing/selftests/bpf/Makefile4
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/ife.json2
82 files changed, 681 insertions, 343 deletions
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index 448d1fafc827..f4d81765221e 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -325,6 +325,8 @@ struct nicvf {
325 struct tasklet_struct qs_err_task; 325 struct tasklet_struct qs_err_task;
326 struct work_struct reset_task; 326 struct work_struct reset_task;
327 struct nicvf_work rx_mode_work; 327 struct nicvf_work rx_mode_work;
328 /* spinlock to protect workqueue arguments from concurrent access */
329 spinlock_t rx_mode_wq_lock;
328 330
329 /* PTP timestamp */ 331 /* PTP timestamp */
330 struct cavium_ptp *ptp_clock; 332 struct cavium_ptp *ptp_clock;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 7135db45927e..135766c4296b 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -1923,17 +1923,12 @@ static int nicvf_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
1923 } 1923 }
1924} 1924}
1925 1925
1926static void nicvf_set_rx_mode_task(struct work_struct *work_arg) 1926static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs,
1927 struct nicvf *nic)
1927{ 1928{
1928 struct nicvf_work *vf_work = container_of(work_arg, struct nicvf_work,
1929 work.work);
1930 struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work);
1931 union nic_mbx mbx = {}; 1929 union nic_mbx mbx = {};
1932 int idx; 1930 int idx;
1933 1931
1934 if (!vf_work)
1935 return;
1936
1937 /* From the inside of VM code flow we have only 128 bits memory 1932 /* From the inside of VM code flow we have only 128 bits memory
1938 * available to send message to host's PF, so send all mc addrs 1933 * available to send message to host's PF, so send all mc addrs
1939 * one by one, starting from flush command in case if kernel 1934 * one by one, starting from flush command in case if kernel
@@ -1944,7 +1939,7 @@ static void nicvf_set_rx_mode_task(struct work_struct *work_arg)
1944 mbx.xcast.msg = NIC_MBOX_MSG_RESET_XCAST; 1939 mbx.xcast.msg = NIC_MBOX_MSG_RESET_XCAST;
1945 nicvf_send_msg_to_pf(nic, &mbx); 1940 nicvf_send_msg_to_pf(nic, &mbx);
1946 1941
1947 if (vf_work->mode & BGX_XCAST_MCAST_FILTER) { 1942 if (mode & BGX_XCAST_MCAST_FILTER) {
1948 /* once enabling filtering, we need to signal to PF to add 1943 /* once enabling filtering, we need to signal to PF to add
1949 * its' own LMAC to the filter to accept packets for it. 1944 * its' own LMAC to the filter to accept packets for it.
1950 */ 1945 */
@@ -1954,23 +1949,46 @@ static void nicvf_set_rx_mode_task(struct work_struct *work_arg)
1954 } 1949 }
1955 1950
1956 /* check if we have any specific MACs to be added to PF DMAC filter */ 1951 /* check if we have any specific MACs to be added to PF DMAC filter */
1957 if (vf_work->mc) { 1952 if (mc_addrs) {
1958 /* now go through kernel list of MACs and add them one by one */ 1953 /* now go through kernel list of MACs and add them one by one */
1959 for (idx = 0; idx < vf_work->mc->count; idx++) { 1954 for (idx = 0; idx < mc_addrs->count; idx++) {
1960 mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST; 1955 mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST;
1961 mbx.xcast.data.mac = vf_work->mc->mc[idx]; 1956 mbx.xcast.data.mac = mc_addrs->mc[idx];
1962 nicvf_send_msg_to_pf(nic, &mbx); 1957 nicvf_send_msg_to_pf(nic, &mbx);
1963 } 1958 }
1964 kfree(vf_work->mc); 1959 kfree(mc_addrs);
1965 } 1960 }
1966 1961
1967 /* and finally set rx mode for PF accordingly */ 1962 /* and finally set rx mode for PF accordingly */
1968 mbx.xcast.msg = NIC_MBOX_MSG_SET_XCAST; 1963 mbx.xcast.msg = NIC_MBOX_MSG_SET_XCAST;
1969 mbx.xcast.data.mode = vf_work->mode; 1964 mbx.xcast.data.mode = mode;
1970 1965
1971 nicvf_send_msg_to_pf(nic, &mbx); 1966 nicvf_send_msg_to_pf(nic, &mbx);
1972} 1967}
1973 1968
1969static void nicvf_set_rx_mode_task(struct work_struct *work_arg)
1970{
1971 struct nicvf_work *vf_work = container_of(work_arg, struct nicvf_work,
1972 work.work);
1973 struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work);
1974 u8 mode;
1975 struct xcast_addr_list *mc;
1976
1977 if (!vf_work)
1978 return;
1979
1980 /* Save message data locally to prevent them from
1981 * being overwritten by next ndo_set_rx_mode call().
1982 */
1983 spin_lock(&nic->rx_mode_wq_lock);
1984 mode = vf_work->mode;
1985 mc = vf_work->mc;
1986 vf_work->mc = NULL;
1987 spin_unlock(&nic->rx_mode_wq_lock);
1988
1989 __nicvf_set_rx_mode_task(mode, mc, nic);
1990}
1991
1974static void nicvf_set_rx_mode(struct net_device *netdev) 1992static void nicvf_set_rx_mode(struct net_device *netdev)
1975{ 1993{
1976 struct nicvf *nic = netdev_priv(netdev); 1994 struct nicvf *nic = netdev_priv(netdev);
@@ -2004,9 +2022,12 @@ static void nicvf_set_rx_mode(struct net_device *netdev)
2004 } 2022 }
2005 } 2023 }
2006 } 2024 }
2025 spin_lock(&nic->rx_mode_wq_lock);
2026 kfree(nic->rx_mode_work.mc);
2007 nic->rx_mode_work.mc = mc_list; 2027 nic->rx_mode_work.mc = mc_list;
2008 nic->rx_mode_work.mode = mode; 2028 nic->rx_mode_work.mode = mode;
2009 queue_delayed_work(nicvf_rx_mode_wq, &nic->rx_mode_work.work, 2 * HZ); 2029 queue_delayed_work(nicvf_rx_mode_wq, &nic->rx_mode_work.work, 0);
2030 spin_unlock(&nic->rx_mode_wq_lock);
2010} 2031}
2011 2032
2012static const struct net_device_ops nicvf_netdev_ops = { 2033static const struct net_device_ops nicvf_netdev_ops = {
@@ -2163,6 +2184,7 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2163 INIT_WORK(&nic->reset_task, nicvf_reset_task); 2184 INIT_WORK(&nic->reset_task, nicvf_reset_task);
2164 2185
2165 INIT_DELAYED_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task); 2186 INIT_DELAYED_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task);
2187 spin_lock_init(&nic->rx_mode_wq_lock);
2166 2188
2167 err = register_netdev(netdev); 2189 err = register_netdev(netdev);
2168 if (err) { 2190 if (err) {
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 2edfdbdaae48..7b795edd9d3a 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -3362,10 +3362,17 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3362 3362
3363 err = sysfs_create_group(&adapter->port[0]->dev.kobj, 3363 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3364 &cxgb3_attr_group); 3364 &cxgb3_attr_group);
3365 if (err) {
3366 dev_err(&pdev->dev, "cannot create sysfs group\n");
3367 goto out_close_led;
3368 }
3365 3369
3366 print_port_info(adapter, ai); 3370 print_port_info(adapter, ai);
3367 return 0; 3371 return 0;
3368 3372
3373out_close_led:
3374 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0);
3375
3369out_free_dev: 3376out_free_dev:
3370 iounmap(adapter->regs); 3377 iounmap(adapter->regs);
3371 for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i) 3378 for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index fc534e91c6b2..144d5fe6b944 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -760,9 +760,9 @@ struct ixgbe_adapter {
760#define IXGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */ 760#define IXGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */
761 u32 *rss_key; 761 u32 *rss_key;
762 762
763#ifdef CONFIG_XFRM 763#ifdef CONFIG_XFRM_OFFLOAD
764 struct ixgbe_ipsec *ipsec; 764 struct ixgbe_ipsec *ipsec;
765#endif /* CONFIG_XFRM */ 765#endif /* CONFIG_XFRM_OFFLOAD */
766}; 766};
767 767
768static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter) 768static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
index 344a1f213a5f..c116f459945d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
@@ -158,7 +158,16 @@ static void ixgbe_ipsec_stop_data(struct ixgbe_adapter *adapter)
158 reg |= IXGBE_SECRXCTRL_RX_DIS; 158 reg |= IXGBE_SECRXCTRL_RX_DIS;
159 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg); 159 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg);
160 160
161 IXGBE_WRITE_FLUSH(hw); 161 /* If both Tx and Rx are ready there are no packets
162 * that we need to flush so the loopback configuration
163 * below is not necessary.
164 */
165 t_rdy = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT) &
166 IXGBE_SECTXSTAT_SECTX_RDY;
167 r_rdy = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) &
168 IXGBE_SECRXSTAT_SECRX_RDY;
169 if (t_rdy && r_rdy)
170 return;
162 171
163 /* If the tx fifo doesn't have link, but still has data, 172 /* If the tx fifo doesn't have link, but still has data,
164 * we can't clear the tx sec block. Set the MAC loopback 173 * we can't clear the tx sec block. Set the MAC loopback
@@ -185,7 +194,7 @@ static void ixgbe_ipsec_stop_data(struct ixgbe_adapter *adapter)
185 IXGBE_SECTXSTAT_SECTX_RDY; 194 IXGBE_SECTXSTAT_SECTX_RDY;
186 r_rdy = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) & 195 r_rdy = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) &
187 IXGBE_SECRXSTAT_SECRX_RDY; 196 IXGBE_SECRXSTAT_SECRX_RDY;
188 } while (!t_rdy && !r_rdy && limit--); 197 } while (!(t_rdy && r_rdy) && limit--);
189 198
190 /* undo loopback if we played with it earlier */ 199 /* undo loopback if we played with it earlier */
191 if (!link) { 200 if (!link) {
@@ -966,10 +975,22 @@ void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
966 **/ 975 **/
967void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter) 976void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter)
968{ 977{
978 struct ixgbe_hw *hw = &adapter->hw;
969 struct ixgbe_ipsec *ipsec; 979 struct ixgbe_ipsec *ipsec;
980 u32 t_dis, r_dis;
970 size_t size; 981 size_t size;
971 982
972 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 983 if (hw->mac.type == ixgbe_mac_82598EB)
984 return;
985
986 /* If there is no support for either Tx or Rx offload
987 * we should not be advertising support for IPsec.
988 */
989 t_dis = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT) &
990 IXGBE_SECTXSTAT_SECTX_OFF_DIS;
991 r_dis = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) &
992 IXGBE_SECRXSTAT_SECRX_OFF_DIS;
993 if (t_dis || r_dis)
973 return; 994 return;
974 995
975 ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL); 996 ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL);
@@ -1001,13 +1022,6 @@ void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter)
1001 1022
1002 adapter->netdev->xfrmdev_ops = &ixgbe_xfrmdev_ops; 1023 adapter->netdev->xfrmdev_ops = &ixgbe_xfrmdev_ops;
1003 1024
1004#define IXGBE_ESP_FEATURES (NETIF_F_HW_ESP | \
1005 NETIF_F_HW_ESP_TX_CSUM | \
1006 NETIF_F_GSO_ESP)
1007
1008 adapter->netdev->features |= IXGBE_ESP_FEATURES;
1009 adapter->netdev->hw_enc_features |= IXGBE_ESP_FEATURES;
1010
1011 return; 1025 return;
1012 1026
1013err2: 1027err2:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 893a9206e718..d361f570ca37 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -593,6 +593,14 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
593 } 593 }
594 594
595#endif 595#endif
596 /* To support macvlan offload we have to use num_tc to
597 * restrict the queues that can be used by the device.
598 * By doing this we can avoid reporting a false number of
599 * queues.
600 */
601 if (vmdq_i > 1)
602 netdev_set_num_tc(adapter->netdev, 1);
603
596 /* populate TC0 for use by pool 0 */ 604 /* populate TC0 for use by pool 0 */
597 netdev_set_tc_queue(adapter->netdev, 0, 605 netdev_set_tc_queue(adapter->netdev, 0,
598 adapter->num_rx_queues_per_pool, 0); 606 adapter->num_rx_queues_per_pool, 0);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 0b1ba3ae159c..3e87dbbc9024 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -6117,6 +6117,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
6117#ifdef CONFIG_IXGBE_DCB 6117#ifdef CONFIG_IXGBE_DCB
6118 ixgbe_init_dcb(adapter); 6118 ixgbe_init_dcb(adapter);
6119#endif 6119#endif
6120 ixgbe_init_ipsec_offload(adapter);
6120 6121
6121 /* default flow control settings */ 6122 /* default flow control settings */
6122 hw->fc.requested_mode = ixgbe_fc_full; 6123 hw->fc.requested_mode = ixgbe_fc_full;
@@ -8822,14 +8823,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
8822 } else { 8823 } else {
8823 netdev_reset_tc(dev); 8824 netdev_reset_tc(dev);
8824 8825
8825 /* To support macvlan offload we have to use num_tc to
8826 * restrict the queues that can be used by the device.
8827 * By doing this we can avoid reporting a false number of
8828 * queues.
8829 */
8830 if (!tc && adapter->num_rx_pools > 1)
8831 netdev_set_num_tc(dev, 1);
8832
8833 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 8826 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
8834 adapter->hw.fc.requested_mode = adapter->last_lfc_mode; 8827 adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
8835 8828
@@ -9904,7 +9897,7 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
9904 * the TSO, so it's the exception. 9897 * the TSO, so it's the exception.
9905 */ 9898 */
9906 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) { 9899 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) {
9907#ifdef CONFIG_XFRM 9900#ifdef CONFIG_XFRM_OFFLOAD
9908 if (!skb->sp) 9901 if (!skb->sp)
9909#endif 9902#endif
9910 features &= ~NETIF_F_TSO; 9903 features &= ~NETIF_F_TSO;
@@ -10437,6 +10430,14 @@ skip_sriov:
10437 if (hw->mac.type >= ixgbe_mac_82599EB) 10430 if (hw->mac.type >= ixgbe_mac_82599EB)
10438 netdev->features |= NETIF_F_SCTP_CRC; 10431 netdev->features |= NETIF_F_SCTP_CRC;
10439 10432
10433#ifdef CONFIG_XFRM_OFFLOAD
10434#define IXGBE_ESP_FEATURES (NETIF_F_HW_ESP | \
10435 NETIF_F_HW_ESP_TX_CSUM | \
10436 NETIF_F_GSO_ESP)
10437
10438 if (adapter->ipsec)
10439 netdev->features |= IXGBE_ESP_FEATURES;
10440#endif
10440 /* copy netdev features into list of user selectable features */ 10441 /* copy netdev features into list of user selectable features */
10441 netdev->hw_features |= netdev->features | 10442 netdev->hw_features |= netdev->features |
10442 NETIF_F_HW_VLAN_CTAG_FILTER | 10443 NETIF_F_HW_VLAN_CTAG_FILTER |
@@ -10499,8 +10500,6 @@ skip_sriov:
10499 NETIF_F_FCOE_MTU; 10500 NETIF_F_FCOE_MTU;
10500 } 10501 }
10501#endif /* IXGBE_FCOE */ 10502#endif /* IXGBE_FCOE */
10502 ixgbe_init_ipsec_offload(adapter);
10503
10504 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) 10503 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
10505 netdev->hw_features |= NETIF_F_LRO; 10504 netdev->hw_features |= NETIF_F_LRO;
10506 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) 10505 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index e8ed37749ab1..44cfb2021145 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -599,13 +599,15 @@ struct ixgbe_nvm_version {
599#define IXGBE_SECTXCTRL_STORE_FORWARD 0x00000004 599#define IXGBE_SECTXCTRL_STORE_FORWARD 0x00000004
600 600
601#define IXGBE_SECTXSTAT_SECTX_RDY 0x00000001 601#define IXGBE_SECTXSTAT_SECTX_RDY 0x00000001
602#define IXGBE_SECTXSTAT_ECC_TXERR 0x00000002 602#define IXGBE_SECTXSTAT_SECTX_OFF_DIS 0x00000002
603#define IXGBE_SECTXSTAT_ECC_TXERR 0x00000004
603 604
604#define IXGBE_SECRXCTRL_SECRX_DIS 0x00000001 605#define IXGBE_SECRXCTRL_SECRX_DIS 0x00000001
605#define IXGBE_SECRXCTRL_RX_DIS 0x00000002 606#define IXGBE_SECRXCTRL_RX_DIS 0x00000002
606 607
607#define IXGBE_SECRXSTAT_SECRX_RDY 0x00000001 608#define IXGBE_SECRXSTAT_SECRX_RDY 0x00000001
608#define IXGBE_SECRXSTAT_ECC_RXERR 0x00000002 609#define IXGBE_SECRXSTAT_SECRX_OFF_DIS 0x00000002
610#define IXGBE_SECRXSTAT_ECC_RXERR 0x00000004
609 611
610/* LinkSec (MacSec) Registers */ 612/* LinkSec (MacSec) Registers */
611#define IXGBE_LSECTXCAP 0x08A00 613#define IXGBE_LSECTXCAP 0x08A00
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 77b2adb29341..6aaaf3d9ba31 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -4756,12 +4756,6 @@ static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
4756 kfree(mlxsw_sp_rt6); 4756 kfree(mlxsw_sp_rt6);
4757} 4757}
4758 4758
4759static bool mlxsw_sp_fib6_rt_can_mp(const struct fib6_info *rt)
4760{
4761 /* RTF_CACHE routes are ignored */
4762 return (rt->fib6_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY;
4763}
4764
4765static struct fib6_info * 4759static struct fib6_info *
4766mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry) 4760mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
4767{ 4761{
@@ -4771,11 +4765,11 @@ mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
4771 4765
4772static struct mlxsw_sp_fib6_entry * 4766static struct mlxsw_sp_fib6_entry *
4773mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node, 4767mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
4774 const struct fib6_info *nrt, bool replace) 4768 const struct fib6_info *nrt, bool append)
4775{ 4769{
4776 struct mlxsw_sp_fib6_entry *fib6_entry; 4770 struct mlxsw_sp_fib6_entry *fib6_entry;
4777 4771
4778 if (!mlxsw_sp_fib6_rt_can_mp(nrt) || replace) 4772 if (!append)
4779 return NULL; 4773 return NULL;
4780 4774
4781 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) { 4775 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
@@ -4790,8 +4784,7 @@ mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
4790 break; 4784 break;
4791 if (rt->fib6_metric < nrt->fib6_metric) 4785 if (rt->fib6_metric < nrt->fib6_metric)
4792 continue; 4786 continue;
4793 if (rt->fib6_metric == nrt->fib6_metric && 4787 if (rt->fib6_metric == nrt->fib6_metric)
4794 mlxsw_sp_fib6_rt_can_mp(rt))
4795 return fib6_entry; 4788 return fib6_entry;
4796 if (rt->fib6_metric > nrt->fib6_metric) 4789 if (rt->fib6_metric > nrt->fib6_metric)
4797 break; 4790 break;
@@ -5170,7 +5163,7 @@ static struct mlxsw_sp_fib6_entry *
5170mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node, 5163mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
5171 const struct fib6_info *nrt, bool replace) 5164 const struct fib6_info *nrt, bool replace)
5172{ 5165{
5173 struct mlxsw_sp_fib6_entry *fib6_entry, *fallback = NULL; 5166 struct mlxsw_sp_fib6_entry *fib6_entry;
5174 5167
5175 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) { 5168 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
5176 struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry); 5169 struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
@@ -5179,18 +5172,13 @@ mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
5179 continue; 5172 continue;
5180 if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id) 5173 if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id)
5181 break; 5174 break;
5182 if (replace && rt->fib6_metric == nrt->fib6_metric) { 5175 if (replace && rt->fib6_metric == nrt->fib6_metric)
5183 if (mlxsw_sp_fib6_rt_can_mp(rt) == 5176 return fib6_entry;
5184 mlxsw_sp_fib6_rt_can_mp(nrt))
5185 return fib6_entry;
5186 if (mlxsw_sp_fib6_rt_can_mp(nrt))
5187 fallback = fallback ?: fib6_entry;
5188 }
5189 if (rt->fib6_metric > nrt->fib6_metric) 5177 if (rt->fib6_metric > nrt->fib6_metric)
5190 return fallback ?: fib6_entry; 5178 return fib6_entry;
5191 } 5179 }
5192 5180
5193 return fallback; 5181 return NULL;
5194} 5182}
5195 5183
5196static int 5184static int
@@ -5316,7 +5304,8 @@ static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp,
5316} 5304}
5317 5305
5318static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp, 5306static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
5319 struct fib6_info *rt, bool replace) 5307 struct fib6_info *rt, bool replace,
5308 bool append)
5320{ 5309{
5321 struct mlxsw_sp_fib6_entry *fib6_entry; 5310 struct mlxsw_sp_fib6_entry *fib6_entry;
5322 struct mlxsw_sp_fib_node *fib_node; 5311 struct mlxsw_sp_fib_node *fib_node;
@@ -5342,7 +5331,7 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
5342 /* Before creating a new entry, try to append route to an existing 5331 /* Before creating a new entry, try to append route to an existing
5343 * multipath entry. 5332 * multipath entry.
5344 */ 5333 */
5345 fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, replace); 5334 fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, append);
5346 if (fib6_entry) { 5335 if (fib6_entry) {
5347 err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt); 5336 err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt);
5348 if (err) 5337 if (err)
@@ -5350,6 +5339,14 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
5350 return 0; 5339 return 0;
5351 } 5340 }
5352 5341
5342 /* We received an append event, yet did not find any route to
5343 * append to.
5344 */
5345 if (WARN_ON(append)) {
5346 err = -EINVAL;
5347 goto err_fib6_entry_append;
5348 }
5349
5353 fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt); 5350 fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt);
5354 if (IS_ERR(fib6_entry)) { 5351 if (IS_ERR(fib6_entry)) {
5355 err = PTR_ERR(fib6_entry); 5352 err = PTR_ERR(fib6_entry);
@@ -5367,6 +5364,7 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
5367err_fib6_node_entry_link: 5364err_fib6_node_entry_link:
5368 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry); 5365 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5369err_fib6_entry_create: 5366err_fib6_entry_create:
5367err_fib6_entry_append:
5370err_fib6_entry_nexthop_add: 5368err_fib6_entry_nexthop_add:
5371 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); 5369 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5372 return err; 5370 return err;
@@ -5717,7 +5715,7 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
5717 struct mlxsw_sp_fib_event_work *fib_work = 5715 struct mlxsw_sp_fib_event_work *fib_work =
5718 container_of(work, struct mlxsw_sp_fib_event_work, work); 5716 container_of(work, struct mlxsw_sp_fib_event_work, work);
5719 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp; 5717 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
5720 bool replace; 5718 bool replace, append;
5721 int err; 5719 int err;
5722 5720
5723 rtnl_lock(); 5721 rtnl_lock();
@@ -5728,8 +5726,10 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
5728 case FIB_EVENT_ENTRY_APPEND: /* fall through */ 5726 case FIB_EVENT_ENTRY_APPEND: /* fall through */
5729 case FIB_EVENT_ENTRY_ADD: 5727 case FIB_EVENT_ENTRY_ADD:
5730 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE; 5728 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
5729 append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
5731 err = mlxsw_sp_router_fib6_add(mlxsw_sp, 5730 err = mlxsw_sp_router_fib6_add(mlxsw_sp,
5732 fib_work->fen6_info.rt, replace); 5731 fib_work->fen6_info.rt, replace,
5732 append);
5733 if (err) 5733 if (err)
5734 mlxsw_sp_router_fib_abort(mlxsw_sp); 5734 mlxsw_sp_router_fib_abort(mlxsw_sp);
5735 mlxsw_sp_rt6_release(fib_work->fen6_info.rt); 5735 mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index e97652c40d13..eea5666a86b2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -1018,8 +1018,10 @@ mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
1018 int err; 1018 int err;
1019 1019
1020 /* No need to continue if only VLAN flags were changed */ 1020 /* No need to continue if only VLAN flags were changed */
1021 if (mlxsw_sp_port_vlan->bridge_port) 1021 if (mlxsw_sp_port_vlan->bridge_port) {
1022 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1022 return 0; 1023 return 0;
1024 }
1023 1025
1024 err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port); 1026 err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port);
1025 if (err) 1027 if (err)
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index 19cfa162ac65..1decf3a1cad3 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -455,6 +455,7 @@ static int nfp_flower_vnic_alloc(struct nfp_app *app, struct nfp_net *nn,
455 455
456 eth_hw_addr_random(nn->dp.netdev); 456 eth_hw_addr_random(nn->dp.netdev);
457 netif_keep_dst(nn->dp.netdev); 457 netif_keep_dst(nn->dp.netdev);
458 nn->vnic_no_name = true;
458 459
459 return 0; 460 return 0;
460 461
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index ec524d97869d..78afe75129ab 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -381,6 +381,8 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
381 err = PTR_ERR_OR_ZERO(rt); 381 err = PTR_ERR_OR_ZERO(rt);
382 if (err) 382 if (err)
383 return NOTIFY_DONE; 383 return NOTIFY_DONE;
384
385 ip_rt_put(rt);
384#else 386#else
385 return NOTIFY_DONE; 387 return NOTIFY_DONE;
386#endif 388#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 57cb035dcc6d..2a71a9ffd095 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -590,6 +590,8 @@ struct nfp_net_dp {
590 * @vnic_list: Entry on device vNIC list 590 * @vnic_list: Entry on device vNIC list
591 * @pdev: Backpointer to PCI device 591 * @pdev: Backpointer to PCI device
592 * @app: APP handle if available 592 * @app: APP handle if available
593 * @vnic_no_name: For non-port PF vNIC make ndo_get_phys_port_name return
594 * -EOPNOTSUPP to keep backwards compatibility (set by app)
593 * @port: Pointer to nfp_port structure if vNIC is a port 595 * @port: Pointer to nfp_port structure if vNIC is a port
594 * @app_priv: APP private data for this vNIC 596 * @app_priv: APP private data for this vNIC
595 */ 597 */
@@ -663,6 +665,8 @@ struct nfp_net {
663 struct pci_dev *pdev; 665 struct pci_dev *pdev;
664 struct nfp_app *app; 666 struct nfp_app *app;
665 667
668 bool vnic_no_name;
669
666 struct nfp_port *port; 670 struct nfp_port *port;
667 671
668 void *app_priv; 672 void *app_priv;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 75110c8d6a90..d4c27f849f9b 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -3121,7 +3121,7 @@ static void nfp_net_stat64(struct net_device *netdev,
3121 struct nfp_net *nn = netdev_priv(netdev); 3121 struct nfp_net *nn = netdev_priv(netdev);
3122 int r; 3122 int r;
3123 3123
3124 for (r = 0; r < nn->dp.num_r_vecs; r++) { 3124 for (r = 0; r < nn->max_r_vecs; r++) {
3125 struct nfp_net_r_vector *r_vec = &nn->r_vecs[r]; 3125 struct nfp_net_r_vector *r_vec = &nn->r_vecs[r];
3126 u64 data[3]; 3126 u64 data[3];
3127 unsigned int start; 3127 unsigned int start;
@@ -3286,7 +3286,7 @@ nfp_net_get_phys_port_name(struct net_device *netdev, char *name, size_t len)
3286 if (nn->port) 3286 if (nn->port)
3287 return nfp_port_get_phys_port_name(netdev, name, len); 3287 return nfp_port_get_phys_port_name(netdev, name, len);
3288 3288
3289 if (nn->dp.is_vf) 3289 if (nn->dp.is_vf || nn->vnic_no_name)
3290 return -EOPNOTSUPP; 3290 return -EOPNOTSUPP;
3291 3291
3292 n = snprintf(name, len, "n%d", nn->id); 3292 n = snprintf(name, len, "n%d", nn->id);
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c
index 2dd89dba9311..d32af598da90 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c
@@ -98,21 +98,18 @@ struct nfp_resource {
98 98
99static int nfp_cpp_resource_find(struct nfp_cpp *cpp, struct nfp_resource *res) 99static int nfp_cpp_resource_find(struct nfp_cpp *cpp, struct nfp_resource *res)
100{ 100{
101 char name_pad[NFP_RESOURCE_ENTRY_NAME_SZ] = {};
102 struct nfp_resource_entry entry; 101 struct nfp_resource_entry entry;
103 u32 cpp_id, key; 102 u32 cpp_id, key;
104 int ret, i; 103 int ret, i;
105 104
106 cpp_id = NFP_CPP_ID(NFP_RESOURCE_TBL_TARGET, 3, 0); /* Atomic read */ 105 cpp_id = NFP_CPP_ID(NFP_RESOURCE_TBL_TARGET, 3, 0); /* Atomic read */
107 106
108 strncpy(name_pad, res->name, sizeof(name_pad));
109
110 /* Search for a matching entry */ 107 /* Search for a matching entry */
111 if (!memcmp(name_pad, NFP_RESOURCE_TBL_NAME "\0\0\0\0\0\0\0\0", 8)) { 108 if (!strcmp(res->name, NFP_RESOURCE_TBL_NAME)) {
112 nfp_err(cpp, "Grabbing device lock not supported\n"); 109 nfp_err(cpp, "Grabbing device lock not supported\n");
113 return -EOPNOTSUPP; 110 return -EOPNOTSUPP;
114 } 111 }
115 key = crc32_posix(name_pad, sizeof(name_pad)); 112 key = crc32_posix(res->name, NFP_RESOURCE_ENTRY_NAME_SZ);
116 113
117 for (i = 0; i < NFP_RESOURCE_TBL_ENTRIES; i++) { 114 for (i = 0; i < NFP_RESOURCE_TBL_ENTRIES; i++) {
118 u64 addr = NFP_RESOURCE_TBL_BASE + 115 u64 addr = NFP_RESOURCE_TBL_BASE +
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
index e78e5db39458..c694e3428dfc 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
@@ -384,6 +384,7 @@ int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt)
384 } 384 }
385 385
386 sgmii_pdev = of_find_device_by_node(np); 386 sgmii_pdev = of_find_device_by_node(np);
387 of_node_put(np);
387 if (!sgmii_pdev) { 388 if (!sgmii_pdev) {
388 dev_err(&pdev->dev, "invalid internal-phy property\n"); 389 dev_err(&pdev->dev, "invalid internal-phy property\n");
389 return -ENODEV; 390 return -ENODEV;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index 4ff231df7322..c5979569fd60 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -334,9 +334,10 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
334 334
335 dwmac->data = (const struct meson8b_dwmac_data *) 335 dwmac->data = (const struct meson8b_dwmac_data *)
336 of_device_get_match_data(&pdev->dev); 336 of_device_get_match_data(&pdev->dev);
337 if (!dwmac->data) 337 if (!dwmac->data) {
338 return -EINVAL; 338 ret = -EINVAL;
339 339 goto err_remove_config_dt;
340 }
340 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 341 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
341 dwmac->regs = devm_ioremap_resource(&pdev->dev, res); 342 dwmac->regs = devm_ioremap_resource(&pdev->dev, res);
342 if (IS_ERR(dwmac->regs)) { 343 if (IS_ERR(dwmac->regs)) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c
index 14770fc8865e..1f50e83cafb2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.c
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c
@@ -252,13 +252,8 @@ int stmmac_hwif_init(struct stmmac_priv *priv)
252 return ret; 252 return ret;
253 } 253 }
254 254
255 /* Run quirks, if needed */ 255 /* Save quirks, if needed for posterior use */
256 if (entry->quirks) { 256 priv->hwif_quirks = entry->quirks;
257 ret = entry->quirks(priv);
258 if (ret)
259 return ret;
260 }
261
262 return 0; 257 return 0;
263 } 258 }
264 259
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 025efbf6145c..76649adf8fb0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -129,6 +129,7 @@ struct stmmac_priv {
129 struct net_device *dev; 129 struct net_device *dev;
130 struct device *device; 130 struct device *device;
131 struct mac_device_info *hw; 131 struct mac_device_info *hw;
132 int (*hwif_quirks)(struct stmmac_priv *priv);
132 struct mutex lock; 133 struct mutex lock;
133 134
134 /* RX Queue */ 135 /* RX Queue */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 11fb7c777d89..e79b0d7b388a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -3182,17 +3182,22 @@ dma_map_err:
3182 3182
3183static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb) 3183static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3184{ 3184{
3185 struct ethhdr *ehdr; 3185 struct vlan_ethhdr *veth;
3186 __be16 vlan_proto;
3186 u16 vlanid; 3187 u16 vlanid;
3187 3188
3188 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) == 3189 veth = (struct vlan_ethhdr *)skb->data;
3189 NETIF_F_HW_VLAN_CTAG_RX && 3190 vlan_proto = veth->h_vlan_proto;
3190 !__vlan_get_tag(skb, &vlanid)) { 3191
3192 if ((vlan_proto == htons(ETH_P_8021Q) &&
3193 dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
3194 (vlan_proto == htons(ETH_P_8021AD) &&
3195 dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
3191 /* pop the vlan tag */ 3196 /* pop the vlan tag */
3192 ehdr = (struct ethhdr *)skb->data; 3197 vlanid = ntohs(veth->h_vlan_TCI);
3193 memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2); 3198 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
3194 skb_pull(skb, VLAN_HLEN); 3199 skb_pull(skb, VLAN_HLEN);
3195 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid); 3200 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
3196 } 3201 }
3197} 3202}
3198 3203
@@ -4130,6 +4135,13 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
4130 if (priv->dma_cap.tsoen) 4135 if (priv->dma_cap.tsoen)
4131 dev_info(priv->device, "TSO supported\n"); 4136 dev_info(priv->device, "TSO supported\n");
4132 4137
4138 /* Run HW quirks, if any */
4139 if (priv->hwif_quirks) {
4140 ret = priv->hwif_quirks(priv);
4141 if (ret)
4142 return ret;
4143 }
4144
4133 return 0; 4145 return 0;
4134} 4146}
4135 4147
@@ -4235,7 +4247,7 @@ int stmmac_dvr_probe(struct device *device,
4235 ndev->watchdog_timeo = msecs_to_jiffies(watchdog); 4247 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4236#ifdef STMMAC_VLAN_TAG_USED 4248#ifdef STMMAC_VLAN_TAG_USED
4237 /* Both mac100 and gmac support receive VLAN tag detection */ 4249 /* Both mac100 and gmac support receive VLAN tag detection */
4238 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; 4250 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
4239#endif 4251#endif
4240 priv->msg_enable = netif_msg_init(debug, default_msg_level); 4252 priv->msg_enable = netif_msg_init(debug, default_msg_level);
4241 4253
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 69e31ceccfae..2a0c06e0f730 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -123,7 +123,6 @@
123 * @phy_node: pointer to the PHY device node 123 * @phy_node: pointer to the PHY device node
124 * @mii_bus: pointer to the MII bus 124 * @mii_bus: pointer to the MII bus
125 * @last_link: last link status 125 * @last_link: last link status
126 * @has_mdio: indicates whether MDIO is included in the HW
127 */ 126 */
128struct net_local { 127struct net_local {
129 128
@@ -144,7 +143,6 @@ struct net_local {
144 struct mii_bus *mii_bus; 143 struct mii_bus *mii_bus;
145 144
146 int last_link; 145 int last_link;
147 bool has_mdio;
148}; 146};
149 147
150 148
@@ -863,14 +861,14 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
863 bus->write = xemaclite_mdio_write; 861 bus->write = xemaclite_mdio_write;
864 bus->parent = dev; 862 bus->parent = dev;
865 863
866 lp->mii_bus = bus;
867
868 rc = of_mdiobus_register(bus, np); 864 rc = of_mdiobus_register(bus, np);
869 if (rc) { 865 if (rc) {
870 dev_err(dev, "Failed to register mdio bus.\n"); 866 dev_err(dev, "Failed to register mdio bus.\n");
871 goto err_register; 867 goto err_register;
872 } 868 }
873 869
870 lp->mii_bus = bus;
871
874 return 0; 872 return 0;
875 873
876err_register: 874err_register:
@@ -1145,9 +1143,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
1145 xemaclite_update_address(lp, ndev->dev_addr); 1143 xemaclite_update_address(lp, ndev->dev_addr);
1146 1144
1147 lp->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0); 1145 lp->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
1148 rc = xemaclite_mdio_setup(lp, &ofdev->dev); 1146 xemaclite_mdio_setup(lp, &ofdev->dev);
1149 if (rc)
1150 dev_warn(&ofdev->dev, "error registering MDIO bus\n");
1151 1147
1152 dev_info(dev, "MAC address is now %pM\n", ndev->dev_addr); 1148 dev_info(dev, "MAC address is now %pM\n", ndev->dev_addr);
1153 1149
@@ -1191,7 +1187,7 @@ static int xemaclite_of_remove(struct platform_device *of_dev)
1191 struct net_local *lp = netdev_priv(ndev); 1187 struct net_local *lp = netdev_priv(ndev);
1192 1188
1193 /* Un-register the mii_bus, if configured */ 1189 /* Un-register the mii_bus, if configured */
1194 if (lp->has_mdio) { 1190 if (lp->mii_bus) {
1195 mdiobus_unregister(lp->mii_bus); 1191 mdiobus_unregister(lp->mii_bus);
1196 mdiobus_free(lp->mii_bus); 1192 mdiobus_free(lp->mii_bus);
1197 lp->mii_bus = NULL; 1193 lp->mii_bus = NULL;
diff --git a/drivers/net/hyperv/Kconfig b/drivers/net/hyperv/Kconfig
index 23a2d145813a..0765d5f61714 100644
--- a/drivers/net/hyperv/Kconfig
+++ b/drivers/net/hyperv/Kconfig
@@ -2,6 +2,5 @@ config HYPERV_NET
2 tristate "Microsoft Hyper-V virtual network driver" 2 tristate "Microsoft Hyper-V virtual network driver"
3 depends on HYPERV 3 depends on HYPERV
4 select UCS2_STRING 4 select UCS2_STRING
5 select FAILOVER
6 help 5 help
7 Select this option to enable the Hyper-V virtual network driver. 6 Select this option to enable the Hyper-V virtual network driver.
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 23304aca25f9..1a924b867b07 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -901,6 +901,8 @@ struct net_device_context {
901 struct hv_device *device_ctx; 901 struct hv_device *device_ctx;
902 /* netvsc_device */ 902 /* netvsc_device */
903 struct netvsc_device __rcu *nvdev; 903 struct netvsc_device __rcu *nvdev;
904 /* list of netvsc net_devices */
905 struct list_head list;
904 /* reconfigure work */ 906 /* reconfigure work */
905 struct delayed_work dwork; 907 struct delayed_work dwork;
906 /* last reconfig time */ 908 /* last reconfig time */
@@ -931,8 +933,6 @@ struct net_device_context {
931 u32 vf_alloc; 933 u32 vf_alloc;
932 /* Serial number of the VF to team with */ 934 /* Serial number of the VF to team with */
933 u32 vf_serial; 935 u32 vf_serial;
934
935 struct failover *failover;
936}; 936};
937 937
938/* Per channel data */ 938/* Per channel data */
@@ -1277,17 +1277,17 @@ struct ndis_lsov2_offload {
1277 1277
1278struct ndis_ipsecv2_offload { 1278struct ndis_ipsecv2_offload {
1279 u32 encap; 1279 u32 encap;
1280 u16 ip6; 1280 u8 ip6;
1281 u16 ip4opt; 1281 u8 ip4opt;
1282 u16 ip6ext; 1282 u8 ip6ext;
1283 u16 ah; 1283 u8 ah;
1284 u16 esp; 1284 u8 esp;
1285 u16 ah_esp; 1285 u8 ah_esp;
1286 u16 xport; 1286 u8 xport;
1287 u16 tun; 1287 u8 tun;
1288 u16 xport_tun; 1288 u8 xport_tun;
1289 u16 lso; 1289 u8 lso;
1290 u16 extseq; 1290 u8 extseq;
1291 u32 udp_esp; 1291 u32 udp_esp;
1292 u32 auth; 1292 u32 auth;
1293 u32 crypto; 1293 u32 crypto;
@@ -1295,8 +1295,8 @@ struct ndis_ipsecv2_offload {
1295}; 1295};
1296 1296
1297struct ndis_rsc_offload { 1297struct ndis_rsc_offload {
1298 u16 ip4; 1298 u8 ip4;
1299 u16 ip6; 1299 u8 ip6;
1300}; 1300};
1301 1301
1302struct ndis_encap_offload { 1302struct ndis_encap_offload {
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 7b18a8c267c2..fe2256bf1d13 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -42,7 +42,6 @@
42#include <net/pkt_sched.h> 42#include <net/pkt_sched.h>
43#include <net/checksum.h> 43#include <net/checksum.h>
44#include <net/ip6_checksum.h> 44#include <net/ip6_checksum.h>
45#include <net/failover.h>
46 45
47#include "hyperv_net.h" 46#include "hyperv_net.h"
48 47
@@ -68,6 +67,8 @@ static int debug = -1;
68module_param(debug, int, 0444); 67module_param(debug, int, 0444);
69MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 68MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
70 69
70static LIST_HEAD(netvsc_dev_list);
71
71static void netvsc_change_rx_flags(struct net_device *net, int change) 72static void netvsc_change_rx_flags(struct net_device *net, int change)
72{ 73{
73 struct net_device_context *ndev_ctx = netdev_priv(net); 74 struct net_device_context *ndev_ctx = netdev_priv(net);
@@ -1780,6 +1781,36 @@ out_unlock:
1780 rtnl_unlock(); 1781 rtnl_unlock();
1781} 1782}
1782 1783
1784static struct net_device *get_netvsc_bymac(const u8 *mac)
1785{
1786 struct net_device_context *ndev_ctx;
1787
1788 list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
1789 struct net_device *dev = hv_get_drvdata(ndev_ctx->device_ctx);
1790
1791 if (ether_addr_equal(mac, dev->perm_addr))
1792 return dev;
1793 }
1794
1795 return NULL;
1796}
1797
1798static struct net_device *get_netvsc_byref(struct net_device *vf_netdev)
1799{
1800 struct net_device_context *net_device_ctx;
1801 struct net_device *dev;
1802
1803 dev = netdev_master_upper_dev_get(vf_netdev);
1804 if (!dev || dev->netdev_ops != &device_ops)
1805 return NULL; /* not a netvsc device */
1806
1807 net_device_ctx = netdev_priv(dev);
1808 if (!rtnl_dereference(net_device_ctx->nvdev))
1809 return NULL; /* device is removed */
1810
1811 return dev;
1812}
1813
1783/* Called when VF is injecting data into network stack. 1814/* Called when VF is injecting data into network stack.
1784 * Change the associated network device from VF to netvsc. 1815 * Change the associated network device from VF to netvsc.
1785 * note: already called with rcu_read_lock 1816 * note: already called with rcu_read_lock
@@ -1802,6 +1833,46 @@ static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb)
1802 return RX_HANDLER_ANOTHER; 1833 return RX_HANDLER_ANOTHER;
1803} 1834}
1804 1835
1836static int netvsc_vf_join(struct net_device *vf_netdev,
1837 struct net_device *ndev)
1838{
1839 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1840 int ret;
1841
1842 ret = netdev_rx_handler_register(vf_netdev,
1843 netvsc_vf_handle_frame, ndev);
1844 if (ret != 0) {
1845 netdev_err(vf_netdev,
1846 "can not register netvsc VF receive handler (err = %d)\n",
1847 ret);
1848 goto rx_handler_failed;
1849 }
1850
1851 ret = netdev_master_upper_dev_link(vf_netdev, ndev,
1852 NULL, NULL, NULL);
1853 if (ret != 0) {
1854 netdev_err(vf_netdev,
1855 "can not set master device %s (err = %d)\n",
1856 ndev->name, ret);
1857 goto upper_link_failed;
1858 }
1859
1860 /* set slave flag before open to prevent IPv6 addrconf */
1861 vf_netdev->flags |= IFF_SLAVE;
1862
1863 schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
1864
1865 call_netdevice_notifiers(NETDEV_JOIN, vf_netdev);
1866
1867 netdev_info(vf_netdev, "joined to %s\n", ndev->name);
1868 return 0;
1869
1870upper_link_failed:
1871 netdev_rx_handler_unregister(vf_netdev);
1872rx_handler_failed:
1873 return ret;
1874}
1875
1805static void __netvsc_vf_setup(struct net_device *ndev, 1876static void __netvsc_vf_setup(struct net_device *ndev,
1806 struct net_device *vf_netdev) 1877 struct net_device *vf_netdev)
1807{ 1878{
@@ -1852,95 +1923,104 @@ static void netvsc_vf_setup(struct work_struct *w)
1852 rtnl_unlock(); 1923 rtnl_unlock();
1853} 1924}
1854 1925
1855static int netvsc_pre_register_vf(struct net_device *vf_netdev, 1926static int netvsc_register_vf(struct net_device *vf_netdev)
1856 struct net_device *ndev)
1857{ 1927{
1928 struct net_device *ndev;
1858 struct net_device_context *net_device_ctx; 1929 struct net_device_context *net_device_ctx;
1859 struct netvsc_device *netvsc_dev; 1930 struct netvsc_device *netvsc_dev;
1931 int ret;
1932
1933 if (vf_netdev->addr_len != ETH_ALEN)
1934 return NOTIFY_DONE;
1935
1936 /*
1937 * We will use the MAC address to locate the synthetic interface to
1938 * associate with the VF interface. If we don't find a matching
1939 * synthetic interface, move on.
1940 */
1941 ndev = get_netvsc_bymac(vf_netdev->perm_addr);
1942 if (!ndev)
1943 return NOTIFY_DONE;
1860 1944
1861 net_device_ctx = netdev_priv(ndev); 1945 net_device_ctx = netdev_priv(ndev);
1862 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev); 1946 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
1863 if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev)) 1947 if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev))
1864 return -ENODEV; 1948 return NOTIFY_DONE;
1865
1866 return 0;
1867}
1868 1949
1869static int netvsc_register_vf(struct net_device *vf_netdev, 1950 /* if syntihetic interface is a different namespace,
1870 struct net_device *ndev) 1951 * then move the VF to that namespace; join will be
1871{ 1952 * done again in that context.
1872 struct net_device_context *ndev_ctx = netdev_priv(ndev); 1953 */
1873 1954 if (!net_eq(dev_net(ndev), dev_net(vf_netdev))) {
1874 /* set slave flag before open to prevent IPv6 addrconf */ 1955 ret = dev_change_net_namespace(vf_netdev,
1875 vf_netdev->flags |= IFF_SLAVE; 1956 dev_net(ndev), "eth%d");
1876 1957 if (ret)
1877 schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT); 1958 netdev_err(vf_netdev,
1959 "could not move to same namespace as %s: %d\n",
1960 ndev->name, ret);
1961 else
1962 netdev_info(vf_netdev,
1963 "VF moved to namespace with: %s\n",
1964 ndev->name);
1965 return NOTIFY_DONE;
1966 }
1878 1967
1879 call_netdevice_notifiers(NETDEV_JOIN, vf_netdev); 1968 netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
1880 1969
1881 netdev_info(vf_netdev, "joined to %s\n", ndev->name); 1970 if (netvsc_vf_join(vf_netdev, ndev) != 0)
1971 return NOTIFY_DONE;
1882 1972
1883 dev_hold(vf_netdev); 1973 dev_hold(vf_netdev);
1884 rcu_assign_pointer(ndev_ctx->vf_netdev, vf_netdev); 1974 rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev);
1885 1975 return NOTIFY_OK;
1886 return 0;
1887} 1976}
1888 1977
1889/* VF up/down change detected, schedule to change data path */ 1978/* VF up/down change detected, schedule to change data path */
1890static int netvsc_vf_changed(struct net_device *vf_netdev, 1979static int netvsc_vf_changed(struct net_device *vf_netdev)
1891 struct net_device *ndev)
1892{ 1980{
1893 struct net_device_context *net_device_ctx; 1981 struct net_device_context *net_device_ctx;
1894 struct netvsc_device *netvsc_dev; 1982 struct netvsc_device *netvsc_dev;
1983 struct net_device *ndev;
1895 bool vf_is_up = netif_running(vf_netdev); 1984 bool vf_is_up = netif_running(vf_netdev);
1896 1985
1986 ndev = get_netvsc_byref(vf_netdev);
1987 if (!ndev)
1988 return NOTIFY_DONE;
1989
1897 net_device_ctx = netdev_priv(ndev); 1990 net_device_ctx = netdev_priv(ndev);
1898 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev); 1991 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
1899 if (!netvsc_dev) 1992 if (!netvsc_dev)
1900 return -ENODEV; 1993 return NOTIFY_DONE;
1901 1994
1902 netvsc_switch_datapath(ndev, vf_is_up); 1995 netvsc_switch_datapath(ndev, vf_is_up);
1903 netdev_info(ndev, "Data path switched %s VF: %s\n", 1996 netdev_info(ndev, "Data path switched %s VF: %s\n",
1904 vf_is_up ? "to" : "from", vf_netdev->name); 1997 vf_is_up ? "to" : "from", vf_netdev->name);
1905 1998
1906 return 0; 1999 return NOTIFY_OK;
1907} 2000}
1908 2001
1909static int netvsc_pre_unregister_vf(struct net_device *vf_netdev, 2002static int netvsc_unregister_vf(struct net_device *vf_netdev)
1910 struct net_device *ndev)
1911{ 2003{
2004 struct net_device *ndev;
1912 struct net_device_context *net_device_ctx; 2005 struct net_device_context *net_device_ctx;
1913 2006
1914 net_device_ctx = netdev_priv(ndev); 2007 ndev = get_netvsc_byref(vf_netdev);
1915 cancel_delayed_work_sync(&net_device_ctx->vf_takeover); 2008 if (!ndev)
1916 2009 return NOTIFY_DONE;
1917 return 0;
1918}
1919
1920static int netvsc_unregister_vf(struct net_device *vf_netdev,
1921 struct net_device *ndev)
1922{
1923 struct net_device_context *net_device_ctx;
1924 2010
1925 net_device_ctx = netdev_priv(ndev); 2011 net_device_ctx = netdev_priv(ndev);
2012 cancel_delayed_work_sync(&net_device_ctx->vf_takeover);
1926 2013
1927 netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name); 2014 netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
1928 2015
2016 netdev_rx_handler_unregister(vf_netdev);
2017 netdev_upper_dev_unlink(vf_netdev, ndev);
1929 RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL); 2018 RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL);
1930 dev_put(vf_netdev); 2019 dev_put(vf_netdev);
1931 2020
1932 return 0; 2021 return NOTIFY_OK;
1933} 2022}
1934 2023
1935static struct failover_ops netvsc_failover_ops = {
1936 .slave_pre_register = netvsc_pre_register_vf,
1937 .slave_register = netvsc_register_vf,
1938 .slave_pre_unregister = netvsc_pre_unregister_vf,
1939 .slave_unregister = netvsc_unregister_vf,
1940 .slave_link_change = netvsc_vf_changed,
1941 .slave_handle_frame = netvsc_vf_handle_frame,
1942};
1943
1944static int netvsc_probe(struct hv_device *dev, 2024static int netvsc_probe(struct hv_device *dev,
1945 const struct hv_vmbus_device_id *dev_id) 2025 const struct hv_vmbus_device_id *dev_id)
1946{ 2026{
@@ -2024,23 +2104,19 @@ static int netvsc_probe(struct hv_device *dev,
2024 else 2104 else
2025 net->max_mtu = ETH_DATA_LEN; 2105 net->max_mtu = ETH_DATA_LEN;
2026 2106
2027 ret = register_netdev(net); 2107 rtnl_lock();
2108 ret = register_netdevice(net);
2028 if (ret != 0) { 2109 if (ret != 0) {
2029 pr_err("Unable to register netdev.\n"); 2110 pr_err("Unable to register netdev.\n");
2030 goto register_failed; 2111 goto register_failed;
2031 } 2112 }
2032 2113
2033 net_device_ctx->failover = failover_register(net, &netvsc_failover_ops); 2114 list_add(&net_device_ctx->list, &netvsc_dev_list);
2034 if (IS_ERR(net_device_ctx->failover)) { 2115 rtnl_unlock();
2035 ret = PTR_ERR(net_device_ctx->failover); 2116 return 0;
2036 goto err_failover;
2037 }
2038
2039 return ret;
2040 2117
2041err_failover:
2042 unregister_netdev(net);
2043register_failed: 2118register_failed:
2119 rtnl_unlock();
2044 rndis_filter_device_remove(dev, nvdev); 2120 rndis_filter_device_remove(dev, nvdev);
2045rndis_failed: 2121rndis_failed:
2046 free_percpu(net_device_ctx->vf_stats); 2122 free_percpu(net_device_ctx->vf_stats);
@@ -2080,14 +2156,13 @@ static int netvsc_remove(struct hv_device *dev)
2080 rtnl_lock(); 2156 rtnl_lock();
2081 vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); 2157 vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
2082 if (vf_netdev) 2158 if (vf_netdev)
2083 failover_slave_unregister(vf_netdev); 2159 netvsc_unregister_vf(vf_netdev);
2084 2160
2085 if (nvdev) 2161 if (nvdev)
2086 rndis_filter_device_remove(dev, nvdev); 2162 rndis_filter_device_remove(dev, nvdev);
2087 2163
2088 unregister_netdevice(net); 2164 unregister_netdevice(net);
2089 2165 list_del(&ndev_ctx->list);
2090 failover_unregister(ndev_ctx->failover);
2091 2166
2092 rtnl_unlock(); 2167 rtnl_unlock();
2093 rcu_read_unlock(); 2168 rcu_read_unlock();
@@ -2115,8 +2190,54 @@ static struct hv_driver netvsc_drv = {
2115 .remove = netvsc_remove, 2190 .remove = netvsc_remove,
2116}; 2191};
2117 2192
2193/*
2194 * On Hyper-V, every VF interface is matched with a corresponding
2195 * synthetic interface. The synthetic interface is presented first
2196 * to the guest. When the corresponding VF instance is registered,
2197 * we will take care of switching the data path.
2198 */
2199static int netvsc_netdev_event(struct notifier_block *this,
2200 unsigned long event, void *ptr)
2201{
2202 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
2203
2204 /* Skip our own events */
2205 if (event_dev->netdev_ops == &device_ops)
2206 return NOTIFY_DONE;
2207
2208 /* Avoid non-Ethernet type devices */
2209 if (event_dev->type != ARPHRD_ETHER)
2210 return NOTIFY_DONE;
2211
2212 /* Avoid Vlan dev with same MAC registering as VF */
2213 if (is_vlan_dev(event_dev))
2214 return NOTIFY_DONE;
2215
2216 /* Avoid Bonding master dev with same MAC registering as VF */
2217 if ((event_dev->priv_flags & IFF_BONDING) &&
2218 (event_dev->flags & IFF_MASTER))
2219 return NOTIFY_DONE;
2220
2221 switch (event) {
2222 case NETDEV_REGISTER:
2223 return netvsc_register_vf(event_dev);
2224 case NETDEV_UNREGISTER:
2225 return netvsc_unregister_vf(event_dev);
2226 case NETDEV_UP:
2227 case NETDEV_DOWN:
2228 return netvsc_vf_changed(event_dev);
2229 default:
2230 return NOTIFY_DONE;
2231 }
2232}
2233
2234static struct notifier_block netvsc_netdev_notifier = {
2235 .notifier_call = netvsc_netdev_event,
2236};
2237
2118static void __exit netvsc_drv_exit(void) 2238static void __exit netvsc_drv_exit(void)
2119{ 2239{
2240 unregister_netdevice_notifier(&netvsc_netdev_notifier);
2120 vmbus_driver_unregister(&netvsc_drv); 2241 vmbus_driver_unregister(&netvsc_drv);
2121} 2242}
2122 2243
@@ -2135,6 +2256,7 @@ static int __init netvsc_drv_init(void)
2135 if (ret) 2256 if (ret)
2136 return ret; 2257 return ret;
2137 2258
2259 register_netdevice_notifier(&netvsc_netdev_notifier);
2138 return 0; 2260 return 0;
2139} 2261}
2140 2262
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 4e4c8daf44c3..33265747bf39 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -26,10 +26,7 @@
26#include <linux/platform_device.h> 26#include <linux/platform_device.h>
27#include <linux/mdio-bitbang.h> 27#include <linux/mdio-bitbang.h>
28#include <linux/mdio-gpio.h> 28#include <linux/mdio-gpio.h>
29#include <linux/gpio.h>
30#include <linux/gpio/consumer.h> 29#include <linux/gpio/consumer.h>
31
32#include <linux/of_gpio.h>
33#include <linux/of_mdio.h> 30#include <linux/of_mdio.h>
34 31
35struct mdio_gpio_info { 32struct mdio_gpio_info {
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 9825bfd42abc..18e819d964f1 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -3572,11 +3572,14 @@ static int __init init_mac80211_hwsim(void)
3572 hwsim_wq = alloc_workqueue("hwsim_wq", 0, 0); 3572 hwsim_wq = alloc_workqueue("hwsim_wq", 0, 0);
3573 if (!hwsim_wq) 3573 if (!hwsim_wq)
3574 return -ENOMEM; 3574 return -ENOMEM;
3575 rhashtable_init(&hwsim_radios_rht, &hwsim_rht_params); 3575
3576 err = rhashtable_init(&hwsim_radios_rht, &hwsim_rht_params);
3577 if (err)
3578 goto out_free_wq;
3576 3579
3577 err = register_pernet_device(&hwsim_net_ops); 3580 err = register_pernet_device(&hwsim_net_ops);
3578 if (err) 3581 if (err)
3579 return err; 3582 goto out_free_rht;
3580 3583
3581 err = platform_driver_register(&mac80211_hwsim_driver); 3584 err = platform_driver_register(&mac80211_hwsim_driver);
3582 if (err) 3585 if (err)
@@ -3701,6 +3704,10 @@ out_unregister_driver:
3701 platform_driver_unregister(&mac80211_hwsim_driver); 3704 platform_driver_unregister(&mac80211_hwsim_driver);
3702out_unregister_pernet: 3705out_unregister_pernet:
3703 unregister_pernet_device(&hwsim_net_ops); 3706 unregister_pernet_device(&hwsim_net_ops);
3707out_free_rht:
3708 rhashtable_destroy(&hwsim_radios_rht);
3709out_free_wq:
3710 destroy_workqueue(hwsim_wq);
3704 return err; 3711 return err;
3705} 3712}
3706module_init(init_mac80211_hwsim); 3713module_init(init_mac80211_hwsim);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 679da1abd73c..922ce0abf5cf 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -239,7 +239,7 @@ static void rx_refill_timeout(struct timer_list *t)
239static int netfront_tx_slot_available(struct netfront_queue *queue) 239static int netfront_tx_slot_available(struct netfront_queue *queue)
240{ 240{
241 return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) < 241 return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) <
242 (NET_TX_RING_SIZE - MAX_SKB_FRAGS - 2); 242 (NET_TX_RING_SIZE - XEN_NETIF_NR_SLOTS_MIN - 1);
243} 243}
244 244
245static void xennet_maybe_wake_tx(struct netfront_queue *queue) 245static void xennet_maybe_wake_tx(struct netfront_queue *queue)
@@ -790,7 +790,7 @@ static int xennet_get_responses(struct netfront_queue *queue,
790 RING_IDX cons = queue->rx.rsp_cons; 790 RING_IDX cons = queue->rx.rsp_cons;
791 struct sk_buff *skb = xennet_get_rx_skb(queue, cons); 791 struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
792 grant_ref_t ref = xennet_get_rx_ref(queue, cons); 792 grant_ref_t ref = xennet_get_rx_ref(queue, cons);
793 int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); 793 int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD);
794 int slots = 1; 794 int slots = 1;
795 int err = 0; 795 int err = 0;
796 unsigned long ret; 796 unsigned long ret;
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 04551af2ff23..dd2052f0efb7 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -345,7 +345,7 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
345 345
346 rcu_read_lock(); 346 rcu_read_lock();
347 nat_hook = rcu_dereference(nf_nat_hook); 347 nat_hook = rcu_dereference(nf_nat_hook);
348 if (nat_hook->decode_session) 348 if (nat_hook && nat_hook->decode_session)
349 nat_hook->decode_session(skb, fl); 349 nat_hook->decode_session(skb, fl);
350 rcu_read_unlock(); 350 rcu_read_unlock();
351#endif 351#endif
diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h
index bfb3531fd88a..8ce271e187b6 100644
--- a/include/linux/netfilter/ipset/ip_set_timeout.h
+++ b/include/linux/netfilter/ipset/ip_set_timeout.h
@@ -23,6 +23,9 @@
23/* Set is defined with timeout support: timeout value may be 0 */ 23/* Set is defined with timeout support: timeout value may be 0 */
24#define IPSET_NO_TIMEOUT UINT_MAX 24#define IPSET_NO_TIMEOUT UINT_MAX
25 25
26/* Max timeout value, see msecs_to_jiffies() in jiffies.h */
27#define IPSET_MAX_TIMEOUT (UINT_MAX >> 1)/MSEC_PER_SEC
28
26#define ip_set_adt_opt_timeout(opt, set) \ 29#define ip_set_adt_opt_timeout(opt, set) \
27((opt)->ext.timeout != IPSET_NO_TIMEOUT ? (opt)->ext.timeout : (set)->timeout) 30((opt)->ext.timeout != IPSET_NO_TIMEOUT ? (opt)->ext.timeout : (set)->timeout)
28 31
@@ -32,11 +35,10 @@ ip_set_timeout_uget(struct nlattr *tb)
32 unsigned int timeout = ip_set_get_h32(tb); 35 unsigned int timeout = ip_set_get_h32(tb);
33 36
34 /* Normalize to fit into jiffies */ 37 /* Normalize to fit into jiffies */
35 if (timeout > UINT_MAX/MSEC_PER_SEC) 38 if (timeout > IPSET_MAX_TIMEOUT)
36 timeout = UINT_MAX/MSEC_PER_SEC; 39 timeout = IPSET_MAX_TIMEOUT;
37 40
38 /* Userspace supplied TIMEOUT parameter: adjust crazy size */ 41 return timeout;
39 return timeout == IPSET_NO_TIMEOUT ? IPSET_NO_TIMEOUT - 1 : timeout;
40} 42}
41 43
42static inline bool 44static inline bool
@@ -65,8 +67,14 @@ ip_set_timeout_set(unsigned long *timeout, u32 value)
65static inline u32 67static inline u32
66ip_set_timeout_get(const unsigned long *timeout) 68ip_set_timeout_get(const unsigned long *timeout)
67{ 69{
68 return *timeout == IPSET_ELEM_PERMANENT ? 0 : 70 u32 t;
69 jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC; 71
72 if (*timeout == IPSET_ELEM_PERMANENT)
73 return 0;
74
75 t = jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC;
76 /* Zero value in userspace means no timeout */
77 return t == 0 ? 1 : t;
70} 78}
71 79
72#endif /* __KERNEL__ */ 80#endif /* __KERNEL__ */
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 6d6e21dee462..a0bec23c6d5e 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -631,6 +631,7 @@ struct ip_vs_service {
631 631
632 /* alternate persistence engine */ 632 /* alternate persistence engine */
633 struct ip_vs_pe __rcu *pe; 633 struct ip_vs_pe __rcu *pe;
634 int conntrack_afmask;
634 635
635 struct rcu_head rcu_head; 636 struct rcu_head rcu_head;
636}; 637};
@@ -1611,6 +1612,35 @@ static inline bool ip_vs_conn_uses_conntrack(struct ip_vs_conn *cp,
1611 return false; 1612 return false;
1612} 1613}
1613 1614
1615static inline int ip_vs_register_conntrack(struct ip_vs_service *svc)
1616{
1617#if IS_ENABLED(CONFIG_NF_CONNTRACK)
1618 int afmask = (svc->af == AF_INET6) ? 2 : 1;
1619 int ret = 0;
1620
1621 if (!(svc->conntrack_afmask & afmask)) {
1622 ret = nf_ct_netns_get(svc->ipvs->net, svc->af);
1623 if (ret >= 0)
1624 svc->conntrack_afmask |= afmask;
1625 }
1626 return ret;
1627#else
1628 return 0;
1629#endif
1630}
1631
1632static inline void ip_vs_unregister_conntrack(struct ip_vs_service *svc)
1633{
1634#if IS_ENABLED(CONFIG_NF_CONNTRACK)
1635 int afmask = (svc->af == AF_INET6) ? 2 : 1;
1636
1637 if (svc->conntrack_afmask & afmask) {
1638 nf_ct_netns_put(svc->ipvs->net, svc->af);
1639 svc->conntrack_afmask &= ~afmask;
1640 }
1641#endif
1642}
1643
1614static inline int 1644static inline int
1615ip_vs_dest_conn_overhead(struct ip_vs_dest *dest) 1645ip_vs_dest_conn_overhead(struct ip_vs_dest *dest)
1616{ 1646{
diff --git a/include/net/netfilter/nf_conntrack_count.h b/include/net/netfilter/nf_conntrack_count.h
index 1910b6572430..3a188a0923a3 100644
--- a/include/net/netfilter/nf_conntrack_count.h
+++ b/include/net/netfilter/nf_conntrack_count.h
@@ -20,7 +20,8 @@ unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head,
20 bool *addit); 20 bool *addit);
21 21
22bool nf_conncount_add(struct hlist_head *head, 22bool nf_conncount_add(struct hlist_head *head,
23 const struct nf_conntrack_tuple *tuple); 23 const struct nf_conntrack_tuple *tuple,
24 const struct nf_conntrack_zone *zone);
24 25
25void nf_conncount_cache_free(struct hlist_head *hhead); 26void nf_conncount_cache_free(struct hlist_head *hhead);
26 27
diff --git a/include/net/netfilter/nft_dup.h b/include/net/netfilter/nft_dup.h
deleted file mode 100644
index 4d9d512984b2..000000000000
--- a/include/net/netfilter/nft_dup.h
+++ /dev/null
@@ -1,10 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _NFT_DUP_H_
3#define _NFT_DUP_H_
4
5struct nft_dup_inet {
6 enum nft_registers sreg_addr:8;
7 enum nft_registers sreg_dev:8;
8};
9
10#endif /* _NFT_DUP_H_ */
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index ebf809eed33a..dbe1b911a24d 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -1133,6 +1133,11 @@ struct sctp_input_cb {
1133}; 1133};
1134#define SCTP_INPUT_CB(__skb) ((struct sctp_input_cb *)&((__skb)->cb[0])) 1134#define SCTP_INPUT_CB(__skb) ((struct sctp_input_cb *)&((__skb)->cb[0]))
1135 1135
1136struct sctp_output_cb {
1137 struct sk_buff *last;
1138};
1139#define SCTP_OUTPUT_CB(__skb) ((struct sctp_output_cb *)&((__skb)->cb[0]))
1140
1136static inline const struct sk_buff *sctp_gso_headskb(const struct sk_buff *skb) 1141static inline const struct sk_buff *sctp_gso_headskb(const struct sk_buff *skb)
1137{ 1142{
1138 const struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; 1143 const struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
diff --git a/include/net/tls.h b/include/net/tls.h
index 70c273777fe9..7f84ea3e217c 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -109,8 +109,7 @@ struct tls_sw_context_rx {
109 109
110 struct strparser strp; 110 struct strparser strp;
111 void (*saved_data_ready)(struct sock *sk); 111 void (*saved_data_ready)(struct sock *sk);
112 unsigned int (*sk_poll)(struct file *file, struct socket *sock, 112 __poll_t (*sk_poll_mask)(struct socket *sock, __poll_t events);
113 struct poll_table_struct *wait);
114 struct sk_buff *recv_pkt; 113 struct sk_buff *recv_pkt;
115 u8 control; 114 u8 control;
116 bool decrypted; 115 bool decrypted;
@@ -225,8 +224,7 @@ void tls_sw_free_resources_tx(struct sock *sk);
225void tls_sw_free_resources_rx(struct sock *sk); 224void tls_sw_free_resources_rx(struct sock *sk);
226int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, 225int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
227 int nonblock, int flags, int *addr_len); 226 int nonblock, int flags, int *addr_len);
228unsigned int tls_sw_poll(struct file *file, struct socket *sock, 227__poll_t tls_sw_poll_mask(struct socket *sock, __poll_t events);
229 struct poll_table_struct *wait);
230ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, 228ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
231 struct pipe_inode_info *pipe, 229 struct pipe_inode_info *pipe,
232 size_t len, unsigned int flags); 230 size_t len, unsigned int flags);
diff --git a/include/uapi/linux/netfilter/nf_conntrack_common.h b/include/uapi/linux/netfilter/nf_conntrack_common.h
index c712eb6879f1..336014bf8868 100644
--- a/include/uapi/linux/netfilter/nf_conntrack_common.h
+++ b/include/uapi/linux/netfilter/nf_conntrack_common.h
@@ -112,7 +112,7 @@ enum ip_conntrack_status {
112 IPS_EXPECTED | IPS_CONFIRMED | IPS_DYING | 112 IPS_EXPECTED | IPS_CONFIRMED | IPS_DYING |
113 IPS_SEQ_ADJUST | IPS_TEMPLATE | IPS_OFFLOAD), 113 IPS_SEQ_ADJUST | IPS_TEMPLATE | IPS_OFFLOAD),
114 114
115 __IPS_MAX_BIT = 14, 115 __IPS_MAX_BIT = 15,
116}; 116};
117 117
118/* Connection tracking event types */ 118/* Connection tracking event types */
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index c9bf74b94f37..89438e68dc03 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -266,7 +266,7 @@ enum nft_rule_compat_attributes {
266 * @NFT_SET_INTERVAL: set contains intervals 266 * @NFT_SET_INTERVAL: set contains intervals
267 * @NFT_SET_MAP: set is used as a dictionary 267 * @NFT_SET_MAP: set is used as a dictionary
268 * @NFT_SET_TIMEOUT: set uses timeouts 268 * @NFT_SET_TIMEOUT: set uses timeouts
269 * @NFT_SET_EVAL: set contains expressions for evaluation 269 * @NFT_SET_EVAL: set can be updated from the evaluation path
270 * @NFT_SET_OBJECT: set contains stateful objects 270 * @NFT_SET_OBJECT: set contains stateful objects
271 */ 271 */
272enum nft_set_flags { 272enum nft_set_flags {
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 28b36545de24..27e4e441caac 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -981,18 +981,18 @@
981 * only the %NL80211_ATTR_IE data is used and updated with this command. 981 * only the %NL80211_ATTR_IE data is used and updated with this command.
982 * 982 *
983 * @NL80211_CMD_SET_PMK: For offloaded 4-Way handshake, set the PMK or PMK-R0 983 * @NL80211_CMD_SET_PMK: For offloaded 4-Way handshake, set the PMK or PMK-R0
984 * for the given authenticator address (specified with &NL80211_ATTR_MAC). 984 * for the given authenticator address (specified with %NL80211_ATTR_MAC).
985 * When &NL80211_ATTR_PMKR0_NAME is set, &NL80211_ATTR_PMK specifies the 985 * When %NL80211_ATTR_PMKR0_NAME is set, %NL80211_ATTR_PMK specifies the
986 * PMK-R0, otherwise it specifies the PMK. 986 * PMK-R0, otherwise it specifies the PMK.
987 * @NL80211_CMD_DEL_PMK: For offloaded 4-Way handshake, delete the previously 987 * @NL80211_CMD_DEL_PMK: For offloaded 4-Way handshake, delete the previously
988 * configured PMK for the authenticator address identified by 988 * configured PMK for the authenticator address identified by
989 * &NL80211_ATTR_MAC. 989 * %NL80211_ATTR_MAC.
990 * @NL80211_CMD_PORT_AUTHORIZED: An event that indicates that the 4 way 990 * @NL80211_CMD_PORT_AUTHORIZED: An event that indicates that the 4 way
991 * handshake was completed successfully by the driver. The BSSID is 991 * handshake was completed successfully by the driver. The BSSID is
992 * specified with &NL80211_ATTR_MAC. Drivers that support 4 way handshake 992 * specified with %NL80211_ATTR_MAC. Drivers that support 4 way handshake
993 * offload should send this event after indicating 802.11 association with 993 * offload should send this event after indicating 802.11 association with
994 * &NL80211_CMD_CONNECT or &NL80211_CMD_ROAM. If the 4 way handshake failed 994 * %NL80211_CMD_CONNECT or %NL80211_CMD_ROAM. If the 4 way handshake failed
995 * &NL80211_CMD_DISCONNECT should be indicated instead. 995 * %NL80211_CMD_DISCONNECT should be indicated instead.
996 * 996 *
997 * @NL80211_CMD_CONTROL_PORT_FRAME: Control Port (e.g. PAE) frame TX request 997 * @NL80211_CMD_CONTROL_PORT_FRAME: Control Port (e.g. PAE) frame TX request
998 * and RX notification. This command is used both as a request to transmit 998 * and RX notification. This command is used both as a request to transmit
@@ -1029,9 +1029,9 @@
1029 * initiated the connection through the connect request. 1029 * initiated the connection through the connect request.
1030 * 1030 *
1031 * @NL80211_CMD_STA_OPMODE_CHANGED: An event that notify station's 1031 * @NL80211_CMD_STA_OPMODE_CHANGED: An event that notify station's
1032 * ht opmode or vht opmode changes using any of &NL80211_ATTR_SMPS_MODE, 1032 * ht opmode or vht opmode changes using any of %NL80211_ATTR_SMPS_MODE,
1033 * &NL80211_ATTR_CHANNEL_WIDTH,&NL80211_ATTR_NSS attributes with its 1033 * %NL80211_ATTR_CHANNEL_WIDTH,%NL80211_ATTR_NSS attributes with its
1034 * address(specified in &NL80211_ATTR_MAC). 1034 * address(specified in %NL80211_ATTR_MAC).
1035 * 1035 *
1036 * @NL80211_CMD_MAX: highest used command number 1036 * @NL80211_CMD_MAX: highest used command number
1037 * @__NL80211_CMD_AFTER_LAST: internal use 1037 * @__NL80211_CMD_AFTER_LAST: internal use
@@ -2218,7 +2218,7 @@ enum nl80211_commands {
2218 * @NL80211_ATTR_EXTERNAL_AUTH_ACTION: Identify the requested external 2218 * @NL80211_ATTR_EXTERNAL_AUTH_ACTION: Identify the requested external
2219 * authentication operation (u32 attribute with an 2219 * authentication operation (u32 attribute with an
2220 * &enum nl80211_external_auth_action value). This is used with the 2220 * &enum nl80211_external_auth_action value). This is used with the
2221 * &NL80211_CMD_EXTERNAL_AUTH request event. 2221 * %NL80211_CMD_EXTERNAL_AUTH request event.
2222 * @NL80211_ATTR_EXTERNAL_AUTH_SUPPORT: Flag attribute indicating that the user 2222 * @NL80211_ATTR_EXTERNAL_AUTH_SUPPORT: Flag attribute indicating that the user
2223 * space supports external authentication. This attribute shall be used 2223 * space supports external authentication. This attribute shall be used
2224 * only with %NL80211_CMD_CONNECT request. The driver may offload 2224 * only with %NL80211_CMD_CONNECT request. The driver may offload
@@ -3491,7 +3491,7 @@ enum nl80211_sched_scan_match_attr {
3491 * @NL80211_RRF_AUTO_BW: maximum available bandwidth should be calculated 3491 * @NL80211_RRF_AUTO_BW: maximum available bandwidth should be calculated
3492 * base on contiguous rules and wider channels will be allowed to cross 3492 * base on contiguous rules and wider channels will be allowed to cross
3493 * multiple contiguous/overlapping frequency ranges. 3493 * multiple contiguous/overlapping frequency ranges.
3494 * @NL80211_RRF_IR_CONCURRENT: See &NL80211_FREQUENCY_ATTR_IR_CONCURRENT 3494 * @NL80211_RRF_IR_CONCURRENT: See %NL80211_FREQUENCY_ATTR_IR_CONCURRENT
3495 * @NL80211_RRF_NO_HT40MINUS: channels can't be used in HT40- operation 3495 * @NL80211_RRF_NO_HT40MINUS: channels can't be used in HT40- operation
3496 * @NL80211_RRF_NO_HT40PLUS: channels can't be used in HT40+ operation 3496 * @NL80211_RRF_NO_HT40PLUS: channels can't be used in HT40+ operation
3497 * @NL80211_RRF_NO_80MHZ: 80MHz operation not allowed 3497 * @NL80211_RRF_NO_80MHZ: 80MHz operation not allowed
@@ -5643,11 +5643,11 @@ enum nl80211_nan_func_attributes {
5643 * @NL80211_NAN_SRF_INCLUDE: present if the include bit of the SRF set. 5643 * @NL80211_NAN_SRF_INCLUDE: present if the include bit of the SRF set.
5644 * This is a flag. 5644 * This is a flag.
5645 * @NL80211_NAN_SRF_BF: Bloom Filter. Present if and only if 5645 * @NL80211_NAN_SRF_BF: Bloom Filter. Present if and only if
5646 * &NL80211_NAN_SRF_MAC_ADDRS isn't present. This attribute is binary. 5646 * %NL80211_NAN_SRF_MAC_ADDRS isn't present. This attribute is binary.
5647 * @NL80211_NAN_SRF_BF_IDX: index of the Bloom Filter. Mandatory if 5647 * @NL80211_NAN_SRF_BF_IDX: index of the Bloom Filter. Mandatory if
5648 * &NL80211_NAN_SRF_BF is present. This is a u8. 5648 * %NL80211_NAN_SRF_BF is present. This is a u8.
5649 * @NL80211_NAN_SRF_MAC_ADDRS: list of MAC addresses for the SRF. Present if 5649 * @NL80211_NAN_SRF_MAC_ADDRS: list of MAC addresses for the SRF. Present if
5650 * and only if &NL80211_NAN_SRF_BF isn't present. This is a nested 5650 * and only if %NL80211_NAN_SRF_BF isn't present. This is a nested
5651 * attribute. Each nested attribute is a MAC address. 5651 * attribute. Each nested attribute is a MAC address.
5652 * @NUM_NL80211_NAN_SRF_ATTR: internal 5652 * @NUM_NL80211_NAN_SRF_ATTR: internal
5653 * @NL80211_NAN_SRF_ATTR_MAX: highest NAN SRF attribute 5653 * @NL80211_NAN_SRF_ATTR_MAX: highest NAN SRF attribute
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index ed13645bd80c..76efe9a183f5 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -295,6 +295,15 @@ static const struct file_operations bpffs_map_fops = {
295 .release = bpffs_map_release, 295 .release = bpffs_map_release,
296}; 296};
297 297
298static int bpffs_obj_open(struct inode *inode, struct file *file)
299{
300 return -EIO;
301}
302
303static const struct file_operations bpffs_obj_fops = {
304 .open = bpffs_obj_open,
305};
306
298static int bpf_mkobj_ops(struct dentry *dentry, umode_t mode, void *raw, 307static int bpf_mkobj_ops(struct dentry *dentry, umode_t mode, void *raw,
299 const struct inode_operations *iops, 308 const struct inode_operations *iops,
300 const struct file_operations *fops) 309 const struct file_operations *fops)
@@ -314,7 +323,8 @@ static int bpf_mkobj_ops(struct dentry *dentry, umode_t mode, void *raw,
314 323
315static int bpf_mkprog(struct dentry *dentry, umode_t mode, void *arg) 324static int bpf_mkprog(struct dentry *dentry, umode_t mode, void *arg)
316{ 325{
317 return bpf_mkobj_ops(dentry, mode, arg, &bpf_prog_iops, NULL); 326 return bpf_mkobj_ops(dentry, mode, arg, &bpf_prog_iops,
327 &bpffs_obj_fops);
318} 328}
319 329
320static int bpf_mkmap(struct dentry *dentry, umode_t mode, void *arg) 330static int bpf_mkmap(struct dentry *dentry, umode_t mode, void *arg)
@@ -322,7 +332,7 @@ static int bpf_mkmap(struct dentry *dentry, umode_t mode, void *arg)
322 struct bpf_map *map = arg; 332 struct bpf_map *map = arg;
323 333
324 return bpf_mkobj_ops(dentry, mode, arg, &bpf_map_iops, 334 return bpf_mkobj_ops(dentry, mode, arg, &bpf_map_iops,
325 map->btf ? &bpffs_map_fops : NULL); 335 map->btf ? &bpffs_map_fops : &bpffs_obj_fops);
326} 336}
327 337
328static struct dentry * 338static struct dentry *
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 684b66bfa199..491828713e0b 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -411,6 +411,12 @@ ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par,
411 watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0); 411 watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0);
412 if (IS_ERR(watcher)) 412 if (IS_ERR(watcher))
413 return PTR_ERR(watcher); 413 return PTR_ERR(watcher);
414
415 if (watcher->family != NFPROTO_BRIDGE) {
416 module_put(watcher->me);
417 return -ENOENT;
418 }
419
414 w->u.watcher = watcher; 420 w->u.watcher = watcher;
415 421
416 par->target = watcher; 422 par->target = watcher;
@@ -709,6 +715,8 @@ ebt_check_entry(struct ebt_entry *e, struct net *net,
709 } 715 }
710 i = 0; 716 i = 0;
711 717
718 memset(&mtpar, 0, sizeof(mtpar));
719 memset(&tgpar, 0, sizeof(tgpar));
712 mtpar.net = tgpar.net = net; 720 mtpar.net = tgpar.net = net;
713 mtpar.table = tgpar.table = name; 721 mtpar.table = tgpar.table = name;
714 mtpar.entryinfo = tgpar.entryinfo = e; 722 mtpar.entryinfo = tgpar.entryinfo = e;
@@ -730,6 +738,13 @@ ebt_check_entry(struct ebt_entry *e, struct net *net,
730 goto cleanup_watchers; 738 goto cleanup_watchers;
731 } 739 }
732 740
741 /* Reject UNSPEC, xtables verdicts/return values are incompatible */
742 if (target->family != NFPROTO_BRIDGE) {
743 module_put(target->me);
744 ret = -ENOENT;
745 goto cleanup_watchers;
746 }
747
733 t->u.target = target; 748 t->u.target = target;
734 if (t->u.target == &ebt_standard_target) { 749 if (t->u.target == &ebt_standard_target) {
735 if (gap < sizeof(struct ebt_standard_target)) { 750 if (gap < sizeof(struct ebt_standard_target)) {
@@ -1606,16 +1621,16 @@ struct compat_ebt_entry_mwt {
1606 compat_uptr_t ptr; 1621 compat_uptr_t ptr;
1607 } u; 1622 } u;
1608 compat_uint_t match_size; 1623 compat_uint_t match_size;
1609 compat_uint_t data[0]; 1624 compat_uint_t data[0] __attribute__ ((aligned (__alignof__(struct compat_ebt_replace))));
1610}; 1625};
1611 1626
1612/* account for possible padding between match_size and ->data */ 1627/* account for possible padding between match_size and ->data */
1613static int ebt_compat_entry_padsize(void) 1628static int ebt_compat_entry_padsize(void)
1614{ 1629{
1615 BUILD_BUG_ON(XT_ALIGN(sizeof(struct ebt_entry_match)) < 1630 BUILD_BUG_ON(sizeof(struct ebt_entry_match) <
1616 COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt))); 1631 sizeof(struct compat_ebt_entry_mwt));
1617 return (int) XT_ALIGN(sizeof(struct ebt_entry_match)) - 1632 return (int) sizeof(struct ebt_entry_match) -
1618 COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt)); 1633 sizeof(struct compat_ebt_entry_mwt);
1619} 1634}
1620 1635
1621static int ebt_compat_match_offset(const struct xt_match *match, 1636static int ebt_compat_match_offset(const struct xt_match *match,
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c
index eaf05de37f75..6de981270566 100644
--- a/net/bridge/netfilter/nft_reject_bridge.c
+++ b/net/bridge/netfilter/nft_reject_bridge.c
@@ -261,7 +261,7 @@ static void nft_reject_br_send_v6_unreach(struct net *net,
261 if (!reject6_br_csum_ok(oldskb, hook)) 261 if (!reject6_br_csum_ok(oldskb, hook))
262 return; 262 return;
263 263
264 nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmp6hdr) + 264 nskb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(struct icmp6hdr) +
265 LL_MAX_HEADER + len, GFP_ATOMIC); 265 LL_MAX_HEADER + len, GFP_ATOMIC);
266 if (!nskb) 266 if (!nskb)
267 return; 267 return;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index a7a9c3d738ba..8e3fda9e725c 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -119,13 +119,14 @@ unsigned long neigh_rand_reach_time(unsigned long base)
119EXPORT_SYMBOL(neigh_rand_reach_time); 119EXPORT_SYMBOL(neigh_rand_reach_time);
120 120
121 121
122static bool neigh_del(struct neighbour *n, __u8 state, 122static bool neigh_del(struct neighbour *n, __u8 state, __u8 flags,
123 struct neighbour __rcu **np, struct neigh_table *tbl) 123 struct neighbour __rcu **np, struct neigh_table *tbl)
124{ 124{
125 bool retval = false; 125 bool retval = false;
126 126
127 write_lock(&n->lock); 127 write_lock(&n->lock);
128 if (refcount_read(&n->refcnt) == 1 && !(n->nud_state & state)) { 128 if (refcount_read(&n->refcnt) == 1 && !(n->nud_state & state) &&
129 !(n->flags & flags)) {
129 struct neighbour *neigh; 130 struct neighbour *neigh;
130 131
131 neigh = rcu_dereference_protected(n->next, 132 neigh = rcu_dereference_protected(n->next,
@@ -157,7 +158,7 @@ bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl)
157 while ((n = rcu_dereference_protected(*np, 158 while ((n = rcu_dereference_protected(*np,
158 lockdep_is_held(&tbl->lock)))) { 159 lockdep_is_held(&tbl->lock)))) {
159 if (n == ndel) 160 if (n == ndel)
160 return neigh_del(n, 0, np, tbl); 161 return neigh_del(n, 0, 0, np, tbl);
161 np = &n->next; 162 np = &n->next;
162 } 163 }
163 return false; 164 return false;
@@ -185,7 +186,8 @@ static int neigh_forced_gc(struct neigh_table *tbl)
185 * - nobody refers to it. 186 * - nobody refers to it.
186 * - it is not permanent 187 * - it is not permanent
187 */ 188 */
188 if (neigh_del(n, NUD_PERMANENT, np, tbl)) { 189 if (neigh_del(n, NUD_PERMANENT, NTF_EXT_LEARNED, np,
190 tbl)) {
189 shrunk = 1; 191 shrunk = 1;
190 continue; 192 continue;
191 } 193 }
diff --git a/net/core/sock.c b/net/core/sock.c
index f333d75ef1a9..bcc41829a16d 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -728,22 +728,9 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
728 sock_valbool_flag(sk, SOCK_DBG, valbool); 728 sock_valbool_flag(sk, SOCK_DBG, valbool);
729 break; 729 break;
730 case SO_REUSEADDR: 730 case SO_REUSEADDR:
731 val = (valbool ? SK_CAN_REUSE : SK_NO_REUSE); 731 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
732 if ((sk->sk_family == PF_INET || sk->sk_family == PF_INET6) &&
733 inet_sk(sk)->inet_num &&
734 (sk->sk_reuse != val)) {
735 ret = (sk->sk_state == TCP_ESTABLISHED) ? -EISCONN : -EUCLEAN;
736 break;
737 }
738 sk->sk_reuse = val;
739 break; 732 break;
740 case SO_REUSEPORT: 733 case SO_REUSEPORT:
741 if ((sk->sk_family == PF_INET || sk->sk_family == PF_INET6) &&
742 inet_sk(sk)->inet_num &&
743 (sk->sk_reuseport != valbool)) {
744 ret = (sk->sk_state == TCP_ESTABLISHED) ? -EISCONN : -EUCLEAN;
745 break;
746 }
747 sk->sk_reuseport = valbool; 734 sk->sk_reuseport = valbool;
748 break; 735 break;
749 case SO_TYPE: 736 case SO_TYPE:
diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c
index 7d20e1f3de28..56197f0d9608 100644
--- a/net/dsa/tag_trailer.c
+++ b/net/dsa/tag_trailer.c
@@ -75,7 +75,8 @@ static struct sk_buff *trailer_rcv(struct sk_buff *skb, struct net_device *dev,
75 if (!skb->dev) 75 if (!skb->dev)
76 return NULL; 76 return NULL;
77 77
78 pskb_trim_rcsum(skb, skb->len - 4); 78 if (pskb_trim_rcsum(skb, skb->len - 4))
79 return NULL;
79 80
80 return skb; 81 return skb;
81} 82}
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 38ab97b0a2ec..ca0dad90803a 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -531,6 +531,7 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
531 return -ENOMEM; 531 return -ENOMEM;
532 532
533 j = 0; 533 j = 0;
534 memset(&mtpar, 0, sizeof(mtpar));
534 mtpar.net = net; 535 mtpar.net = net;
535 mtpar.table = name; 536 mtpar.table = name;
536 mtpar.entryinfo = &e->ip; 537 mtpar.entryinfo = &e->ip;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index fed3f1c66167..bea17f1e8302 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1730,6 +1730,10 @@ process:
1730 reqsk_put(req); 1730 reqsk_put(req);
1731 goto discard_it; 1731 goto discard_it;
1732 } 1732 }
1733 if (tcp_checksum_complete(skb)) {
1734 reqsk_put(req);
1735 goto csum_error;
1736 }
1733 if (unlikely(sk->sk_state != TCP_LISTEN)) { 1737 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1734 inet_csk_reqsk_queue_drop_and_put(sk, req); 1738 inet_csk_reqsk_queue_drop_and_put(sk, req);
1735 goto lookup; 1739 goto lookup;
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index 4d58e2ce0b5b..8cc7c3487330 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -268,8 +268,6 @@ found:
268 goto out_check_final; 268 goto out_check_final;
269 } 269 }
270 270
271 p = *head;
272 th2 = tcp_hdr(p);
273 tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH); 271 tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
274 272
275out_check_final: 273out_check_final:
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 89019bf59f46..c134286d6a41 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1324,6 +1324,7 @@ retry:
1324 } 1324 }
1325 } 1325 }
1326 1326
1327 memset(&cfg, 0, sizeof(cfg));
1327 cfg.valid_lft = min_t(__u32, ifp->valid_lft, 1328 cfg.valid_lft = min_t(__u32, ifp->valid_lft,
1328 idev->cnf.temp_valid_lft + age); 1329 idev->cnf.temp_valid_lft + age);
1329 cfg.preferred_lft = cnf_temp_preferred_lft + age - idev->desync_factor; 1330 cfg.preferred_lft = cnf_temp_preferred_lft + age - idev->desync_factor;
@@ -1357,7 +1358,6 @@ retry:
1357 1358
1358 cfg.pfx = &addr; 1359 cfg.pfx = &addr;
1359 cfg.scope = ipv6_addr_scope(cfg.pfx); 1360 cfg.scope = ipv6_addr_scope(cfg.pfx);
1360 cfg.rt_priority = 0;
1361 1361
1362 ift = ipv6_add_addr(idev, &cfg, block, NULL); 1362 ift = ipv6_add_addr(idev, &cfg, block, NULL);
1363 if (IS_ERR(ift)) { 1363 if (IS_ERR(ift)) {
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 7aa4c41a3bd9..39d1d487eca2 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -934,6 +934,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
934{ 934{
935 struct fib6_info *leaf = rcu_dereference_protected(fn->leaf, 935 struct fib6_info *leaf = rcu_dereference_protected(fn->leaf,
936 lockdep_is_held(&rt->fib6_table->tb6_lock)); 936 lockdep_is_held(&rt->fib6_table->tb6_lock));
937 enum fib_event_type event = FIB_EVENT_ENTRY_ADD;
937 struct fib6_info *iter = NULL, *match = NULL; 938 struct fib6_info *iter = NULL, *match = NULL;
938 struct fib6_info __rcu **ins; 939 struct fib6_info __rcu **ins;
939 int replace = (info->nlh && 940 int replace = (info->nlh &&
@@ -1013,6 +1014,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
1013 "Can not append to a REJECT route"); 1014 "Can not append to a REJECT route");
1014 return -EINVAL; 1015 return -EINVAL;
1015 } 1016 }
1017 event = FIB_EVENT_ENTRY_APPEND;
1016 rt->fib6_nsiblings = match->fib6_nsiblings; 1018 rt->fib6_nsiblings = match->fib6_nsiblings;
1017 list_add_tail(&rt->fib6_siblings, &match->fib6_siblings); 1019 list_add_tail(&rt->fib6_siblings, &match->fib6_siblings);
1018 match->fib6_nsiblings++; 1020 match->fib6_nsiblings++;
@@ -1034,15 +1036,12 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
1034 * insert node 1036 * insert node
1035 */ 1037 */
1036 if (!replace) { 1038 if (!replace) {
1037 enum fib_event_type event;
1038
1039 if (!add) 1039 if (!add)
1040 pr_warn("NLM_F_CREATE should be set when creating new route\n"); 1040 pr_warn("NLM_F_CREATE should be set when creating new route\n");
1041 1041
1042add: 1042add:
1043 nlflags |= NLM_F_CREATE; 1043 nlflags |= NLM_F_CREATE;
1044 1044
1045 event = append ? FIB_EVENT_ENTRY_APPEND : FIB_EVENT_ENTRY_ADD;
1046 err = call_fib6_entry_notifiers(info->nl_net, event, rt, 1045 err = call_fib6_entry_notifiers(info->nl_net, event, rt,
1047 extack); 1046 extack);
1048 if (err) 1047 if (err)
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 0758b5bcfb29..7eab959734bc 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -550,6 +550,7 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
550 return -ENOMEM; 550 return -ENOMEM;
551 551
552 j = 0; 552 j = 0;
553 memset(&mtpar, 0, sizeof(mtpar));
553 mtpar.net = net; 554 mtpar.net = net;
554 mtpar.table = name; 555 mtpar.table = name;
555 mtpar.entryinfo = &e->ipv6; 556 mtpar.entryinfo = &e->ipv6;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index fb956989adaf..86a0e4333d42 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2307,9 +2307,6 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
2307 const struct in6_addr *daddr, *saddr; 2307 const struct in6_addr *daddr, *saddr;
2308 struct rt6_info *rt6 = (struct rt6_info *)dst; 2308 struct rt6_info *rt6 = (struct rt6_info *)dst;
2309 2309
2310 if (rt6->rt6i_flags & RTF_LOCAL)
2311 return;
2312
2313 if (dst_metric_locked(dst, RTAX_MTU)) 2310 if (dst_metric_locked(dst, RTAX_MTU))
2314 return; 2311 return;
2315 2312
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index b620d9b72e59..7efa9fd7e109 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1479,6 +1479,10 @@ process:
1479 reqsk_put(req); 1479 reqsk_put(req);
1480 goto discard_it; 1480 goto discard_it;
1481 } 1481 }
1482 if (tcp_checksum_complete(skb)) {
1483 reqsk_put(req);
1484 goto csum_error;
1485 }
1482 if (unlikely(sk->sk_state != TCP_LISTEN)) { 1486 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1483 inet_csk_reqsk_queue_drop_and_put(sk, req); 1487 inet_csk_reqsk_queue_drop_and_put(sk, req);
1484 goto lookup; 1488 goto lookup;
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index 6616c9fd292f..5b9900889e31 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -553,6 +553,12 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
553 goto out_tunnel; 553 goto out_tunnel;
554 } 554 }
555 555
556 /* L2TPv2 only accepts PPP pseudo-wires */
557 if (tunnel->version == 2 && cfg.pw_type != L2TP_PWTYPE_PPP) {
558 ret = -EPROTONOSUPPORT;
559 goto out_tunnel;
560 }
561
556 if (tunnel->version > 2) { 562 if (tunnel->version > 2) {
557 if (info->attrs[L2TP_ATTR_DATA_SEQ]) 563 if (info->attrs[L2TP_ATTR_DATA_SEQ])
558 cfg.data_seq = nla_get_u8(info->attrs[L2TP_ATTR_DATA_SEQ]); 564 cfg.data_seq = nla_get_u8(info->attrs[L2TP_ATTR_DATA_SEQ]);
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index b56cb1df4fc0..55188382845c 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -612,6 +612,8 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
612 u32 session_id, peer_session_id; 612 u32 session_id, peer_session_id;
613 bool drop_refcnt = false; 613 bool drop_refcnt = false;
614 bool drop_tunnel = false; 614 bool drop_tunnel = false;
615 bool new_session = false;
616 bool new_tunnel = false;
615 int ver = 2; 617 int ver = 2;
616 int fd; 618 int fd;
617 619
@@ -701,6 +703,15 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
701 .encap = L2TP_ENCAPTYPE_UDP, 703 .encap = L2TP_ENCAPTYPE_UDP,
702 .debug = 0, 704 .debug = 0,
703 }; 705 };
706
707 /* Prevent l2tp_tunnel_register() from trying to set up
708 * a kernel socket.
709 */
710 if (fd < 0) {
711 error = -EBADF;
712 goto end;
713 }
714
704 error = l2tp_tunnel_create(sock_net(sk), fd, ver, tunnel_id, peer_tunnel_id, &tcfg, &tunnel); 715 error = l2tp_tunnel_create(sock_net(sk), fd, ver, tunnel_id, peer_tunnel_id, &tcfg, &tunnel);
705 if (error < 0) 716 if (error < 0)
706 goto end; 717 goto end;
@@ -713,6 +724,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
713 goto end; 724 goto end;
714 } 725 }
715 drop_tunnel = true; 726 drop_tunnel = true;
727 new_tunnel = true;
716 } 728 }
717 } else { 729 } else {
718 /* Error if we can't find the tunnel */ 730 /* Error if we can't find the tunnel */
@@ -734,6 +746,12 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
734 session = l2tp_session_get(sock_net(sk), tunnel, session_id); 746 session = l2tp_session_get(sock_net(sk), tunnel, session_id);
735 if (session) { 747 if (session) {
736 drop_refcnt = true; 748 drop_refcnt = true;
749
750 if (session->pwtype != L2TP_PWTYPE_PPP) {
751 error = -EPROTOTYPE;
752 goto end;
753 }
754
737 ps = l2tp_session_priv(session); 755 ps = l2tp_session_priv(session);
738 756
739 /* Using a pre-existing session is fine as long as it hasn't 757 /* Using a pre-existing session is fine as long as it hasn't
@@ -751,6 +769,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
751 /* Default MTU must allow space for UDP/L2TP/PPP headers */ 769 /* Default MTU must allow space for UDP/L2TP/PPP headers */
752 cfg.mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD; 770 cfg.mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD;
753 cfg.mru = cfg.mtu; 771 cfg.mru = cfg.mtu;
772 cfg.pw_type = L2TP_PWTYPE_PPP;
754 773
755 session = l2tp_session_create(sizeof(struct pppol2tp_session), 774 session = l2tp_session_create(sizeof(struct pppol2tp_session),
756 tunnel, session_id, 775 tunnel, session_id,
@@ -772,6 +791,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
772 goto end; 791 goto end;
773 } 792 }
774 drop_refcnt = true; 793 drop_refcnt = true;
794 new_session = true;
775 } 795 }
776 796
777 /* Special case: if source & dest session_id == 0x0000, this 797 /* Special case: if source & dest session_id == 0x0000, this
@@ -818,6 +838,12 @@ out_no_ppp:
818 session->name); 838 session->name);
819 839
820end: 840end:
841 if (error) {
842 if (new_session)
843 l2tp_session_delete(session);
844 if (new_tunnel)
845 l2tp_tunnel_delete(tunnel);
846 }
821 if (drop_refcnt) 847 if (drop_refcnt)
822 l2tp_session_dec_refcount(session); 848 l2tp_session_dec_refcount(session);
823 if (drop_tunnel) 849 if (drop_tunnel)
@@ -1175,7 +1201,7 @@ static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel,
1175 l2tp_session_get(sock_net(sk), tunnel, 1201 l2tp_session_get(sock_net(sk), tunnel,
1176 stats.session_id); 1202 stats.session_id);
1177 1203
1178 if (session) { 1204 if (session && session->pwtype == L2TP_PWTYPE_PPP) {
1179 err = pppol2tp_session_ioctl(session, cmd, 1205 err = pppol2tp_session_ioctl(session, cmd,
1180 arg); 1206 arg);
1181 l2tp_session_dec_refcount(session); 1207 l2tp_session_dec_refcount(session);
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index fb1b1f9e7e5e..fb73451ed85e 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -1098,6 +1098,10 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
1098 1098
1099 ieee80211_led_init(local); 1099 ieee80211_led_init(local);
1100 1100
1101 result = ieee80211_txq_setup_flows(local);
1102 if (result)
1103 goto fail_flows;
1104
1101 rtnl_lock(); 1105 rtnl_lock();
1102 1106
1103 result = ieee80211_init_rate_ctrl_alg(local, 1107 result = ieee80211_init_rate_ctrl_alg(local,
@@ -1120,10 +1124,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
1120 1124
1121 rtnl_unlock(); 1125 rtnl_unlock();
1122 1126
1123 result = ieee80211_txq_setup_flows(local);
1124 if (result)
1125 goto fail_flows;
1126
1127#ifdef CONFIG_INET 1127#ifdef CONFIG_INET
1128 local->ifa_notifier.notifier_call = ieee80211_ifa_changed; 1128 local->ifa_notifier.notifier_call = ieee80211_ifa_changed;
1129 result = register_inetaddr_notifier(&local->ifa_notifier); 1129 result = register_inetaddr_notifier(&local->ifa_notifier);
@@ -1149,8 +1149,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
1149#if defined(CONFIG_INET) || defined(CONFIG_IPV6) 1149#if defined(CONFIG_INET) || defined(CONFIG_IPV6)
1150 fail_ifa: 1150 fail_ifa:
1151#endif 1151#endif
1152 ieee80211_txq_teardown_flows(local);
1153 fail_flows:
1154 rtnl_lock(); 1152 rtnl_lock();
1155 rate_control_deinitialize(local); 1153 rate_control_deinitialize(local);
1156 ieee80211_remove_interfaces(local); 1154 ieee80211_remove_interfaces(local);
@@ -1158,6 +1156,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
1158 rtnl_unlock(); 1156 rtnl_unlock();
1159 ieee80211_led_exit(local); 1157 ieee80211_led_exit(local);
1160 ieee80211_wep_free(local); 1158 ieee80211_wep_free(local);
1159 ieee80211_txq_teardown_flows(local);
1160 fail_flows:
1161 destroy_workqueue(local->workqueue); 1161 destroy_workqueue(local->workqueue);
1162 fail_workqueue: 1162 fail_workqueue:
1163 wiphy_unregister(local->hw.wiphy); 1163 wiphy_unregister(local->hw.wiphy);
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
index bbad940c0137..8a33dac4e805 100644
--- a/net/netfilter/ipset/ip_set_hash_gen.h
+++ b/net/netfilter/ipset/ip_set_hash_gen.h
@@ -1234,7 +1234,10 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
1234 pr_debug("Create set %s with family %s\n", 1234 pr_debug("Create set %s with family %s\n",
1235 set->name, set->family == NFPROTO_IPV4 ? "inet" : "inet6"); 1235 set->name, set->family == NFPROTO_IPV4 ? "inet" : "inet6");
1236 1236
1237#ifndef IP_SET_PROTO_UNDEF 1237#ifdef IP_SET_PROTO_UNDEF
1238 if (set->family != NFPROTO_UNSPEC)
1239 return -IPSET_ERR_INVALID_FAMILY;
1240#else
1238 if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6)) 1241 if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
1239 return -IPSET_ERR_INVALID_FAMILY; 1242 return -IPSET_ERR_INVALID_FAMILY;
1240#endif 1243#endif
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 0c03c0e16a96..dd21782e2f12 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -839,6 +839,9 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
839 * For now only for NAT! 839 * For now only for NAT!
840 */ 840 */
841 ip_vs_rs_hash(ipvs, dest); 841 ip_vs_rs_hash(ipvs, dest);
842 /* FTP-NAT requires conntrack for mangling */
843 if (svc->port == FTPPORT)
844 ip_vs_register_conntrack(svc);
842 } 845 }
843 atomic_set(&dest->conn_flags, conn_flags); 846 atomic_set(&dest->conn_flags, conn_flags);
844 847
@@ -1462,6 +1465,7 @@ static void __ip_vs_del_service(struct ip_vs_service *svc, bool cleanup)
1462 */ 1465 */
1463static void ip_vs_unlink_service(struct ip_vs_service *svc, bool cleanup) 1466static void ip_vs_unlink_service(struct ip_vs_service *svc, bool cleanup)
1464{ 1467{
1468 ip_vs_unregister_conntrack(svc);
1465 /* Hold svc to avoid double release from dest_trash */ 1469 /* Hold svc to avoid double release from dest_trash */
1466 atomic_inc(&svc->refcnt); 1470 atomic_inc(&svc->refcnt);
1467 /* 1471 /*
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index ba0a0fd045c8..473cce2a5231 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -168,7 +168,7 @@ static inline bool crosses_local_route_boundary(int skb_af, struct sk_buff *skb,
168 bool new_rt_is_local) 168 bool new_rt_is_local)
169{ 169{
170 bool rt_mode_allow_local = !!(rt_mode & IP_VS_RT_MODE_LOCAL); 170 bool rt_mode_allow_local = !!(rt_mode & IP_VS_RT_MODE_LOCAL);
171 bool rt_mode_allow_non_local = !!(rt_mode & IP_VS_RT_MODE_LOCAL); 171 bool rt_mode_allow_non_local = !!(rt_mode & IP_VS_RT_MODE_NON_LOCAL);
172 bool rt_mode_allow_redirect = !!(rt_mode & IP_VS_RT_MODE_RDR); 172 bool rt_mode_allow_redirect = !!(rt_mode & IP_VS_RT_MODE_RDR);
173 bool source_is_loopback; 173 bool source_is_loopback;
174 bool old_rt_is_local; 174 bool old_rt_is_local;
diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c
index 3b5059a8dcdd..d8383609fe28 100644
--- a/net/netfilter/nf_conncount.c
+++ b/net/netfilter/nf_conncount.c
@@ -46,6 +46,7 @@
46struct nf_conncount_tuple { 46struct nf_conncount_tuple {
47 struct hlist_node node; 47 struct hlist_node node;
48 struct nf_conntrack_tuple tuple; 48 struct nf_conntrack_tuple tuple;
49 struct nf_conntrack_zone zone;
49}; 50};
50 51
51struct nf_conncount_rb { 52struct nf_conncount_rb {
@@ -80,7 +81,8 @@ static int key_diff(const u32 *a, const u32 *b, unsigned int klen)
80} 81}
81 82
82bool nf_conncount_add(struct hlist_head *head, 83bool nf_conncount_add(struct hlist_head *head,
83 const struct nf_conntrack_tuple *tuple) 84 const struct nf_conntrack_tuple *tuple,
85 const struct nf_conntrack_zone *zone)
84{ 86{
85 struct nf_conncount_tuple *conn; 87 struct nf_conncount_tuple *conn;
86 88
@@ -88,6 +90,7 @@ bool nf_conncount_add(struct hlist_head *head,
88 if (conn == NULL) 90 if (conn == NULL)
89 return false; 91 return false;
90 conn->tuple = *tuple; 92 conn->tuple = *tuple;
93 conn->zone = *zone;
91 hlist_add_head(&conn->node, head); 94 hlist_add_head(&conn->node, head);
92 return true; 95 return true;
93} 96}
@@ -108,7 +111,7 @@ unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head,
108 111
109 /* check the saved connections */ 112 /* check the saved connections */
110 hlist_for_each_entry_safe(conn, n, head, node) { 113 hlist_for_each_entry_safe(conn, n, head, node) {
111 found = nf_conntrack_find_get(net, zone, &conn->tuple); 114 found = nf_conntrack_find_get(net, &conn->zone, &conn->tuple);
112 if (found == NULL) { 115 if (found == NULL) {
113 hlist_del(&conn->node); 116 hlist_del(&conn->node);
114 kmem_cache_free(conncount_conn_cachep, conn); 117 kmem_cache_free(conncount_conn_cachep, conn);
@@ -117,7 +120,8 @@ unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head,
117 120
118 found_ct = nf_ct_tuplehash_to_ctrack(found); 121 found_ct = nf_ct_tuplehash_to_ctrack(found);
119 122
120 if (tuple && nf_ct_tuple_equal(&conn->tuple, tuple)) { 123 if (tuple && nf_ct_tuple_equal(&conn->tuple, tuple) &&
124 nf_ct_zone_equal(found_ct, zone, zone->dir)) {
121 /* 125 /*
122 * Just to be sure we have it only once in the list. 126 * Just to be sure we have it only once in the list.
123 * We should not see tuples twice unless someone hooks 127 * We should not see tuples twice unless someone hooks
@@ -196,7 +200,7 @@ count_tree(struct net *net, struct rb_root *root,
196 if (!addit) 200 if (!addit)
197 return count; 201 return count;
198 202
199 if (!nf_conncount_add(&rbconn->hhead, tuple)) 203 if (!nf_conncount_add(&rbconn->hhead, tuple, zone))
200 return 0; /* hotdrop */ 204 return 0; /* hotdrop */
201 205
202 return count + 1; 206 return count + 1;
@@ -238,6 +242,7 @@ count_tree(struct net *net, struct rb_root *root,
238 } 242 }
239 243
240 conn->tuple = *tuple; 244 conn->tuple = *tuple;
245 conn->zone = *zone;
241 memcpy(rbconn->key, key, sizeof(u32) * keylen); 246 memcpy(rbconn->key, key, sizeof(u32) * keylen);
242 247
243 INIT_HLIST_HEAD(&rbconn->hhead); 248 INIT_HLIST_HEAD(&rbconn->hhead);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 39327a42879f..20a2e37c76d1 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1446,7 +1446,8 @@ ctnetlink_parse_nat_setup(struct nf_conn *ct,
1446 } 1446 }
1447 nfnl_lock(NFNL_SUBSYS_CTNETLINK); 1447 nfnl_lock(NFNL_SUBSYS_CTNETLINK);
1448 rcu_read_lock(); 1448 rcu_read_lock();
1449 if (nat_hook->parse_nat_setup) 1449 nat_hook = rcu_dereference(nf_nat_hook);
1450 if (nat_hook)
1450 return -EAGAIN; 1451 return -EAGAIN;
1451#endif 1452#endif
1452 return -EOPNOTSUPP; 1453 return -EOPNOTSUPP;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index f0411fbffe77..896d4a36081d 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -2890,12 +2890,13 @@ static struct nft_set *nft_set_lookup_byid(const struct net *net,
2890 u32 id = ntohl(nla_get_be32(nla)); 2890 u32 id = ntohl(nla_get_be32(nla));
2891 2891
2892 list_for_each_entry(trans, &net->nft.commit_list, list) { 2892 list_for_each_entry(trans, &net->nft.commit_list, list) {
2893 struct nft_set *set = nft_trans_set(trans); 2893 if (trans->msg_type == NFT_MSG_NEWSET) {
2894 struct nft_set *set = nft_trans_set(trans);
2894 2895
2895 if (trans->msg_type == NFT_MSG_NEWSET && 2896 if (id == nft_trans_set_id(trans) &&
2896 id == nft_trans_set_id(trans) && 2897 nft_active_genmask(set, genmask))
2897 nft_active_genmask(set, genmask)) 2898 return set;
2898 return set; 2899 }
2899 } 2900 }
2900 return ERR_PTR(-ENOENT); 2901 return ERR_PTR(-ENOENT);
2901} 2902}
@@ -5836,18 +5837,23 @@ static int nf_tables_flowtable_event(struct notifier_block *this,
5836 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 5837 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5837 struct nft_flowtable *flowtable; 5838 struct nft_flowtable *flowtable;
5838 struct nft_table *table; 5839 struct nft_table *table;
5840 struct net *net;
5839 5841
5840 if (event != NETDEV_UNREGISTER) 5842 if (event != NETDEV_UNREGISTER)
5841 return 0; 5843 return 0;
5842 5844
5845 net = maybe_get_net(dev_net(dev));
5846 if (!net)
5847 return 0;
5848
5843 nfnl_lock(NFNL_SUBSYS_NFTABLES); 5849 nfnl_lock(NFNL_SUBSYS_NFTABLES);
5844 list_for_each_entry(table, &dev_net(dev)->nft.tables, list) { 5850 list_for_each_entry(table, &net->nft.tables, list) {
5845 list_for_each_entry(flowtable, &table->flowtables, list) { 5851 list_for_each_entry(flowtable, &table->flowtables, list) {
5846 nft_flowtable_event(event, dev, flowtable); 5852 nft_flowtable_event(event, dev, flowtable);
5847 } 5853 }
5848 } 5854 }
5849 nfnl_unlock(NFNL_SUBSYS_NFTABLES); 5855 nfnl_unlock(NFNL_SUBSYS_NFTABLES);
5850 5856 put_net(net);
5851 return NOTIFY_DONE; 5857 return NOTIFY_DONE;
5852} 5858}
5853 5859
@@ -6438,7 +6444,7 @@ static void nf_tables_abort_release(struct nft_trans *trans)
6438 kfree(trans); 6444 kfree(trans);
6439} 6445}
6440 6446
6441static int nf_tables_abort(struct net *net, struct sk_buff *skb) 6447static int __nf_tables_abort(struct net *net)
6442{ 6448{
6443 struct nft_trans *trans, *next; 6449 struct nft_trans *trans, *next;
6444 struct nft_trans_elem *te; 6450 struct nft_trans_elem *te;
@@ -6554,6 +6560,11 @@ static void nf_tables_cleanup(struct net *net)
6554 nft_validate_state_update(net, NFT_VALIDATE_SKIP); 6560 nft_validate_state_update(net, NFT_VALIDATE_SKIP);
6555} 6561}
6556 6562
6563static int nf_tables_abort(struct net *net, struct sk_buff *skb)
6564{
6565 return __nf_tables_abort(net);
6566}
6567
6557static bool nf_tables_valid_genid(struct net *net, u32 genid) 6568static bool nf_tables_valid_genid(struct net *net, u32 genid)
6558{ 6569{
6559 return net->nft.base_seq == genid; 6570 return net->nft.base_seq == genid;
@@ -7148,9 +7159,12 @@ static int __net_init nf_tables_init_net(struct net *net)
7148 7159
7149static void __net_exit nf_tables_exit_net(struct net *net) 7160static void __net_exit nf_tables_exit_net(struct net *net)
7150{ 7161{
7162 nfnl_lock(NFNL_SUBSYS_NFTABLES);
7163 if (!list_empty(&net->nft.commit_list))
7164 __nf_tables_abort(net);
7151 __nft_release_tables(net); 7165 __nft_release_tables(net);
7166 nfnl_unlock(NFNL_SUBSYS_NFTABLES);
7152 WARN_ON_ONCE(!list_empty(&net->nft.tables)); 7167 WARN_ON_ONCE(!list_empty(&net->nft.tables));
7153 WARN_ON_ONCE(!list_empty(&net->nft.commit_list));
7154} 7168}
7155 7169
7156static struct pernet_operations nf_tables_net_ops = { 7170static struct pernet_operations nf_tables_net_ops = {
@@ -7192,13 +7206,13 @@ err1:
7192 7206
7193static void __exit nf_tables_module_exit(void) 7207static void __exit nf_tables_module_exit(void)
7194{ 7208{
7195 unregister_pernet_subsys(&nf_tables_net_ops);
7196 nfnetlink_subsys_unregister(&nf_tables_subsys); 7209 nfnetlink_subsys_unregister(&nf_tables_subsys);
7197 unregister_netdevice_notifier(&nf_tables_flowtable_notifier); 7210 unregister_netdevice_notifier(&nf_tables_flowtable_notifier);
7211 nft_chain_filter_fini();
7212 unregister_pernet_subsys(&nf_tables_net_ops);
7198 rcu_barrier(); 7213 rcu_barrier();
7199 nf_tables_core_module_exit(); 7214 nf_tables_core_module_exit();
7200 kfree(info); 7215 kfree(info);
7201 nft_chain_filter_fini();
7202} 7216}
7203 7217
7204module_init(nf_tables_module_init); 7218module_init(nf_tables_module_init);
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index deff10adef9c..8de912ca53d3 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -183,7 +183,8 @@ next_rule:
183 183
184 switch (regs.verdict.code) { 184 switch (regs.verdict.code) {
185 case NFT_JUMP: 185 case NFT_JUMP:
186 BUG_ON(stackptr >= NFT_JUMP_STACK_SIZE); 186 if (WARN_ON_ONCE(stackptr >= NFT_JUMP_STACK_SIZE))
187 return NF_DROP;
187 jumpstack[stackptr].chain = chain; 188 jumpstack[stackptr].chain = chain;
188 jumpstack[stackptr].rules = rules + 1; 189 jumpstack[stackptr].rules = rules + 1;
189 stackptr++; 190 stackptr++;
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 4d0da7042aff..e1b6be29848d 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -429,7 +429,7 @@ replay:
429 */ 429 */
430 if (err == -EAGAIN) { 430 if (err == -EAGAIN) {
431 status |= NFNL_BATCH_REPLAY; 431 status |= NFNL_BATCH_REPLAY;
432 goto next; 432 goto done;
433 } 433 }
434 } 434 }
435ack: 435ack:
@@ -456,7 +456,7 @@ ack:
456 if (err) 456 if (err)
457 status |= NFNL_BATCH_FAILURE; 457 status |= NFNL_BATCH_FAILURE;
458 } 458 }
459next: 459
460 msglen = NLMSG_ALIGN(nlh->nlmsg_len); 460 msglen = NLMSG_ALIGN(nlh->nlmsg_len);
461 if (msglen > skb->len) 461 if (msglen > skb->len)
462 msglen = skb->len; 462 msglen = skb->len;
@@ -464,7 +464,11 @@ next:
464 } 464 }
465done: 465done:
466 if (status & NFNL_BATCH_REPLAY) { 466 if (status & NFNL_BATCH_REPLAY) {
467 ss->abort(net, oskb); 467 const struct nfnetlink_subsystem *ss2;
468
469 ss2 = nfnl_dereference_protected(subsys_id);
470 if (ss2 == ss)
471 ss->abort(net, oskb);
468 nfnl_err_reset(&err_list); 472 nfnl_err_reset(&err_list);
469 nfnl_unlock(subsys_id); 473 nfnl_unlock(subsys_id);
470 kfree_skb(skb); 474 kfree_skb(skb);
diff --git a/net/netfilter/nft_chain_filter.c b/net/netfilter/nft_chain_filter.c
index 84c902477a91..d21834bed805 100644
--- a/net/netfilter/nft_chain_filter.c
+++ b/net/netfilter/nft_chain_filter.c
@@ -318,6 +318,10 @@ static int nf_tables_netdev_event(struct notifier_block *this,
318 event != NETDEV_CHANGENAME) 318 event != NETDEV_CHANGENAME)
319 return NOTIFY_DONE; 319 return NOTIFY_DONE;
320 320
321 ctx.net = maybe_get_net(ctx.net);
322 if (!ctx.net)
323 return NOTIFY_DONE;
324
321 nfnl_lock(NFNL_SUBSYS_NFTABLES); 325 nfnl_lock(NFNL_SUBSYS_NFTABLES);
322 list_for_each_entry(table, &ctx.net->nft.tables, list) { 326 list_for_each_entry(table, &ctx.net->nft.tables, list) {
323 if (table->family != NFPROTO_NETDEV) 327 if (table->family != NFPROTO_NETDEV)
@@ -334,6 +338,7 @@ static int nf_tables_netdev_event(struct notifier_block *this,
334 } 338 }
335 } 339 }
336 nfnl_unlock(NFNL_SUBSYS_NFTABLES); 340 nfnl_unlock(NFNL_SUBSYS_NFTABLES);
341 put_net(ctx.net);
337 342
338 return NOTIFY_DONE; 343 return NOTIFY_DONE;
339} 344}
diff --git a/net/netfilter/nft_connlimit.c b/net/netfilter/nft_connlimit.c
index 50c068d660e5..a832c59f0a9c 100644
--- a/net/netfilter/nft_connlimit.c
+++ b/net/netfilter/nft_connlimit.c
@@ -52,7 +52,7 @@ static inline void nft_connlimit_do_eval(struct nft_connlimit *priv,
52 if (!addit) 52 if (!addit)
53 goto out; 53 goto out;
54 54
55 if (!nf_conncount_add(&priv->hhead, tuple_ptr)) { 55 if (!nf_conncount_add(&priv->hhead, tuple_ptr, zone)) {
56 regs->verdict.code = NF_DROP; 56 regs->verdict.code = NF_DROP;
57 spin_unlock_bh(&priv->lock); 57 spin_unlock_bh(&priv->lock);
58 return; 58 return;
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index 4d49529cff61..27d7e4598ab6 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -203,9 +203,7 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
203 goto err1; 203 goto err1;
204 set->ops->gc_init(set); 204 set->ops->gc_init(set);
205 } 205 }
206 206 }
207 } else if (set->flags & NFT_SET_EVAL)
208 return -EINVAL;
209 207
210 nft_set_ext_prepare(&priv->tmpl); 208 nft_set_ext_prepare(&priv->tmpl);
211 nft_set_ext_add_length(&priv->tmpl, NFT_SET_EXT_KEY, set->klen); 209 nft_set_ext_add_length(&priv->tmpl, NFT_SET_EXT_KEY, set->klen);
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index d260ce2d6671..7f3a9a211034 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -66,7 +66,7 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
66 parent = rcu_dereference_raw(parent->rb_left); 66 parent = rcu_dereference_raw(parent->rb_left);
67 if (interval && 67 if (interval &&
68 nft_rbtree_equal(set, this, interval) && 68 nft_rbtree_equal(set, this, interval) &&
69 nft_rbtree_interval_end(this) && 69 nft_rbtree_interval_end(rbe) &&
70 !nft_rbtree_interval_end(interval)) 70 !nft_rbtree_interval_end(interval))
71 continue; 71 continue;
72 interval = rbe; 72 interval = rbe;
diff --git a/net/netfilter/nft_socket.c b/net/netfilter/nft_socket.c
index f28a0b944087..74e1b3bd6954 100644
--- a/net/netfilter/nft_socket.c
+++ b/net/netfilter/nft_socket.c
@@ -142,3 +142,4 @@ module_exit(nft_socket_module_exit);
142MODULE_LICENSE("GPL"); 142MODULE_LICENSE("GPL");
143MODULE_AUTHOR("Máté Eckl"); 143MODULE_AUTHOR("Máté Eckl");
144MODULE_DESCRIPTION("nf_tables socket match module"); 144MODULE_DESCRIPTION("nf_tables socket match module");
145MODULE_ALIAS_NFT_EXPR("socket");
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 8790190c6feb..03b9a50ec93b 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -245,12 +245,22 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
245 } 245 }
246 246
247 if (info->helper[0]) { 247 if (info->helper[0]) {
248 if (strnlen(info->helper, sizeof(info->helper)) == sizeof(info->helper)) {
249 ret = -ENAMETOOLONG;
250 goto err3;
251 }
252
248 ret = xt_ct_set_helper(ct, info->helper, par); 253 ret = xt_ct_set_helper(ct, info->helper, par);
249 if (ret < 0) 254 if (ret < 0)
250 goto err3; 255 goto err3;
251 } 256 }
252 257
253 if (info->timeout[0]) { 258 if (info->timeout[0]) {
259 if (strnlen(info->timeout, sizeof(info->timeout)) == sizeof(info->timeout)) {
260 ret = -ENAMETOOLONG;
261 goto err4;
262 }
263
254 ret = xt_ct_set_timeout(ct, par, info->timeout); 264 ret = xt_ct_set_timeout(ct, par, info->timeout);
255 if (ret < 0) 265 if (ret < 0)
256 goto err4; 266 goto err4;
diff --git a/net/netfilter/xt_connmark.c b/net/netfilter/xt_connmark.c
index 94df000abb92..29c38aa7f726 100644
--- a/net/netfilter/xt_connmark.c
+++ b/net/netfilter/xt_connmark.c
@@ -211,7 +211,7 @@ static int __init connmark_mt_init(void)
211static void __exit connmark_mt_exit(void) 211static void __exit connmark_mt_exit(void)
212{ 212{
213 xt_unregister_match(&connmark_mt_reg); 213 xt_unregister_match(&connmark_mt_reg);
214 xt_unregister_target(connmark_tg_reg); 214 xt_unregister_targets(connmark_tg_reg, ARRAY_SIZE(connmark_tg_reg));
215} 215}
216 216
217module_init(connmark_mt_init); 217module_init(connmark_mt_init);
diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c
index 6f4c5217d835..bf2890b13212 100644
--- a/net/netfilter/xt_set.c
+++ b/net/netfilter/xt_set.c
@@ -372,8 +372,8 @@ set_target_v2(struct sk_buff *skb, const struct xt_action_param *par)
372 372
373 /* Normalize to fit into jiffies */ 373 /* Normalize to fit into jiffies */
374 if (add_opt.ext.timeout != IPSET_NO_TIMEOUT && 374 if (add_opt.ext.timeout != IPSET_NO_TIMEOUT &&
375 add_opt.ext.timeout > UINT_MAX / MSEC_PER_SEC) 375 add_opt.ext.timeout > IPSET_MAX_TIMEOUT)
376 add_opt.ext.timeout = UINT_MAX / MSEC_PER_SEC; 376 add_opt.ext.timeout = IPSET_MAX_TIMEOUT;
377 if (info->add_set.index != IPSET_INVALID_ID) 377 if (info->add_set.index != IPSET_INVALID_ID)
378 ip_set_add(info->add_set.index, skb, par, &add_opt); 378 ip_set_add(info->add_set.index, skb, par, &add_opt);
379 if (info->del_set.index != IPSET_INVALID_ID) 379 if (info->del_set.index != IPSET_INVALID_ID)
@@ -407,8 +407,8 @@ set_target_v3(struct sk_buff *skb, const struct xt_action_param *par)
407 407
408 /* Normalize to fit into jiffies */ 408 /* Normalize to fit into jiffies */
409 if (add_opt.ext.timeout != IPSET_NO_TIMEOUT && 409 if (add_opt.ext.timeout != IPSET_NO_TIMEOUT &&
410 add_opt.ext.timeout > UINT_MAX / MSEC_PER_SEC) 410 add_opt.ext.timeout > IPSET_MAX_TIMEOUT)
411 add_opt.ext.timeout = UINT_MAX / MSEC_PER_SEC; 411 add_opt.ext.timeout = IPSET_MAX_TIMEOUT;
412 if (info->add_set.index != IPSET_INVALID_ID) 412 if (info->add_set.index != IPSET_INVALID_ID)
413 ip_set_add(info->add_set.index, skb, par, &add_opt); 413 ip_set_add(info->add_set.index, skb, par, &add_opt);
414 if (info->del_set.index != IPSET_INVALID_ID) 414 if (info->del_set.index != IPSET_INVALID_ID)
@@ -470,7 +470,7 @@ set_target_v3_checkentry(const struct xt_tgchk_param *par)
470 } 470 }
471 if (((info->flags & IPSET_FLAG_MAP_SKBPRIO) | 471 if (((info->flags & IPSET_FLAG_MAP_SKBPRIO) |
472 (info->flags & IPSET_FLAG_MAP_SKBQUEUE)) && 472 (info->flags & IPSET_FLAG_MAP_SKBQUEUE)) &&
473 !(par->hook_mask & (1 << NF_INET_FORWARD | 473 (par->hook_mask & ~(1 << NF_INET_FORWARD |
474 1 << NF_INET_LOCAL_OUT | 474 1 << NF_INET_LOCAL_OUT |
475 1 << NF_INET_POST_ROUTING))) { 475 1 << NF_INET_POST_ROUTING))) {
476 pr_info_ratelimited("mapping of prio or/and queue is allowed only from OUTPUT/FORWARD/POSTROUTING chains\n"); 476 pr_info_ratelimited("mapping of prio or/and queue is allowed only from OUTPUT/FORWARD/POSTROUTING chains\n");
diff --git a/net/rds/loop.c b/net/rds/loop.c
index f2bf78de5688..dac6218a460e 100644
--- a/net/rds/loop.c
+++ b/net/rds/loop.c
@@ -193,4 +193,5 @@ struct rds_transport rds_loop_transport = {
193 .inc_copy_to_user = rds_message_inc_copy_to_user, 193 .inc_copy_to_user = rds_message_inc_copy_to_user,
194 .inc_free = rds_loop_inc_free, 194 .inc_free = rds_loop_inc_free,
195 .t_name = "loopback", 195 .t_name = "loopback",
196 .t_type = RDS_TRANS_LOOP,
196}; 197};
diff --git a/net/rds/rds.h b/net/rds/rds.h
index b04c333d9d1c..f2272fb8cd45 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -479,6 +479,11 @@ struct rds_notifier {
479 int n_status; 479 int n_status;
480}; 480};
481 481
482/* Available as part of RDS core, so doesn't need to participate
483 * in get_preferred transport etc
484 */
485#define RDS_TRANS_LOOP 3
486
482/** 487/**
483 * struct rds_transport - transport specific behavioural hooks 488 * struct rds_transport - transport specific behavioural hooks
484 * 489 *
diff --git a/net/rds/recv.c b/net/rds/recv.c
index dc67458b52f0..192ac6f78ded 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -103,6 +103,11 @@ static void rds_recv_rcvbuf_delta(struct rds_sock *rs, struct sock *sk,
103 rds_stats_add(s_recv_bytes_added_to_socket, delta); 103 rds_stats_add(s_recv_bytes_added_to_socket, delta);
104 else 104 else
105 rds_stats_add(s_recv_bytes_removed_from_socket, -delta); 105 rds_stats_add(s_recv_bytes_removed_from_socket, -delta);
106
107 /* loop transport doesn't send/recv congestion updates */
108 if (rs->rs_transport->t_type == RDS_TRANS_LOOP)
109 return;
110
106 now_congested = rs->rs_rcv_bytes > rds_sk_rcvbuf(rs); 111 now_congested = rs->rs_rcv_bytes > rds_sk_rcvbuf(rs);
107 112
108 rdsdebug("rs %p (%pI4:%u) recv bytes %d buf %d " 113 rdsdebug("rs %p (%pI4:%u) recv bytes %d buf %d "
diff --git a/net/sctp/output.c b/net/sctp/output.c
index e672dee302c7..7f849b01ec8e 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -409,6 +409,21 @@ static void sctp_packet_set_owner_w(struct sk_buff *skb, struct sock *sk)
409 refcount_inc(&sk->sk_wmem_alloc); 409 refcount_inc(&sk->sk_wmem_alloc);
410} 410}
411 411
412static void sctp_packet_gso_append(struct sk_buff *head, struct sk_buff *skb)
413{
414 if (SCTP_OUTPUT_CB(head)->last == head)
415 skb_shinfo(head)->frag_list = skb;
416 else
417 SCTP_OUTPUT_CB(head)->last->next = skb;
418 SCTP_OUTPUT_CB(head)->last = skb;
419
420 head->truesize += skb->truesize;
421 head->data_len += skb->len;
422 head->len += skb->len;
423
424 __skb_header_release(skb);
425}
426
412static int sctp_packet_pack(struct sctp_packet *packet, 427static int sctp_packet_pack(struct sctp_packet *packet,
413 struct sk_buff *head, int gso, gfp_t gfp) 428 struct sk_buff *head, int gso, gfp_t gfp)
414{ 429{
@@ -422,7 +437,7 @@ static int sctp_packet_pack(struct sctp_packet *packet,
422 437
423 if (gso) { 438 if (gso) {
424 skb_shinfo(head)->gso_type = sk->sk_gso_type; 439 skb_shinfo(head)->gso_type = sk->sk_gso_type;
425 NAPI_GRO_CB(head)->last = head; 440 SCTP_OUTPUT_CB(head)->last = head;
426 } else { 441 } else {
427 nskb = head; 442 nskb = head;
428 pkt_size = packet->size; 443 pkt_size = packet->size;
@@ -503,15 +518,8 @@ merge:
503 &packet->chunk_list); 518 &packet->chunk_list);
504 } 519 }
505 520
506 if (gso) { 521 if (gso)
507 if (skb_gro_receive(&head, nskb)) { 522 sctp_packet_gso_append(head, nskb);
508 kfree_skb(nskb);
509 return 0;
510 }
511 if (WARN_ON_ONCE(skb_shinfo(head)->gso_segs >=
512 sk->sk_gso_max_segs))
513 return 0;
514 }
515 523
516 pkt_count++; 524 pkt_count++;
517 } while (!list_empty(&packet->chunk_list)); 525 } while (!list_empty(&packet->chunk_list));
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 973b4471b532..da7f02edcd37 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -1273,8 +1273,7 @@ static __poll_t smc_accept_poll(struct sock *parent)
1273 return mask; 1273 return mask;
1274} 1274}
1275 1275
1276static __poll_t smc_poll(struct file *file, struct socket *sock, 1276static __poll_t smc_poll_mask(struct socket *sock, __poll_t events)
1277 poll_table *wait)
1278{ 1277{
1279 struct sock *sk = sock->sk; 1278 struct sock *sk = sock->sk;
1280 __poll_t mask = 0; 1279 __poll_t mask = 0;
@@ -1290,7 +1289,7 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
1290 if ((sk->sk_state == SMC_INIT) || smc->use_fallback) { 1289 if ((sk->sk_state == SMC_INIT) || smc->use_fallback) {
1291 /* delegate to CLC child sock */ 1290 /* delegate to CLC child sock */
1292 release_sock(sk); 1291 release_sock(sk);
1293 mask = smc->clcsock->ops->poll(file, smc->clcsock, wait); 1292 mask = smc->clcsock->ops->poll_mask(smc->clcsock, events);
1294 lock_sock(sk); 1293 lock_sock(sk);
1295 sk->sk_err = smc->clcsock->sk->sk_err; 1294 sk->sk_err = smc->clcsock->sk->sk_err;
1296 if (sk->sk_err) { 1295 if (sk->sk_err) {
@@ -1308,11 +1307,6 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
1308 } 1307 }
1309 } 1308 }
1310 } else { 1309 } else {
1311 if (sk->sk_state != SMC_CLOSED) {
1312 release_sock(sk);
1313 sock_poll_wait(file, sk_sleep(sk), wait);
1314 lock_sock(sk);
1315 }
1316 if (sk->sk_err) 1310 if (sk->sk_err)
1317 mask |= EPOLLERR; 1311 mask |= EPOLLERR;
1318 if ((sk->sk_shutdown == SHUTDOWN_MASK) || 1312 if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
@@ -1625,7 +1619,7 @@ static const struct proto_ops smc_sock_ops = {
1625 .socketpair = sock_no_socketpair, 1619 .socketpair = sock_no_socketpair,
1626 .accept = smc_accept, 1620 .accept = smc_accept,
1627 .getname = smc_getname, 1621 .getname = smc_getname,
1628 .poll = smc_poll, 1622 .poll_mask = smc_poll_mask,
1629 .ioctl = smc_ioctl, 1623 .ioctl = smc_ioctl,
1630 .listen = smc_listen, 1624 .listen = smc_listen,
1631 .shutdown = smc_shutdown, 1625 .shutdown = smc_shutdown,
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index 301f22430469..a127d61e8af9 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -712,7 +712,7 @@ static int __init tls_register(void)
712 build_protos(tls_prots[TLSV4], &tcp_prot); 712 build_protos(tls_prots[TLSV4], &tcp_prot);
713 713
714 tls_sw_proto_ops = inet_stream_ops; 714 tls_sw_proto_ops = inet_stream_ops;
715 tls_sw_proto_ops.poll = tls_sw_poll; 715 tls_sw_proto_ops.poll_mask = tls_sw_poll_mask;
716 tls_sw_proto_ops.splice_read = tls_sw_splice_read; 716 tls_sw_proto_ops.splice_read = tls_sw_splice_read;
717 717
718#ifdef CONFIG_TLS_DEVICE 718#ifdef CONFIG_TLS_DEVICE
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 8ca57d01b18f..f127fac88acf 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -191,18 +191,12 @@ static void tls_free_both_sg(struct sock *sk)
191} 191}
192 192
193static int tls_do_encryption(struct tls_context *tls_ctx, 193static int tls_do_encryption(struct tls_context *tls_ctx,
194 struct tls_sw_context_tx *ctx, size_t data_len, 194 struct tls_sw_context_tx *ctx,
195 gfp_t flags) 195 struct aead_request *aead_req,
196 size_t data_len)
196{ 197{
197 unsigned int req_size = sizeof(struct aead_request) +
198 crypto_aead_reqsize(ctx->aead_send);
199 struct aead_request *aead_req;
200 int rc; 198 int rc;
201 199
202 aead_req = kzalloc(req_size, flags);
203 if (!aead_req)
204 return -ENOMEM;
205
206 ctx->sg_encrypted_data[0].offset += tls_ctx->tx.prepend_size; 200 ctx->sg_encrypted_data[0].offset += tls_ctx->tx.prepend_size;
207 ctx->sg_encrypted_data[0].length -= tls_ctx->tx.prepend_size; 201 ctx->sg_encrypted_data[0].length -= tls_ctx->tx.prepend_size;
208 202
@@ -219,7 +213,6 @@ static int tls_do_encryption(struct tls_context *tls_ctx,
219 ctx->sg_encrypted_data[0].offset -= tls_ctx->tx.prepend_size; 213 ctx->sg_encrypted_data[0].offset -= tls_ctx->tx.prepend_size;
220 ctx->sg_encrypted_data[0].length += tls_ctx->tx.prepend_size; 214 ctx->sg_encrypted_data[0].length += tls_ctx->tx.prepend_size;
221 215
222 kfree(aead_req);
223 return rc; 216 return rc;
224} 217}
225 218
@@ -228,8 +221,14 @@ static int tls_push_record(struct sock *sk, int flags,
228{ 221{
229 struct tls_context *tls_ctx = tls_get_ctx(sk); 222 struct tls_context *tls_ctx = tls_get_ctx(sk);
230 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 223 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
224 struct aead_request *req;
231 int rc; 225 int rc;
232 226
227 req = kzalloc(sizeof(struct aead_request) +
228 crypto_aead_reqsize(ctx->aead_send), sk->sk_allocation);
229 if (!req)
230 return -ENOMEM;
231
233 sg_mark_end(ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem - 1); 232 sg_mark_end(ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem - 1);
234 sg_mark_end(ctx->sg_encrypted_data + ctx->sg_encrypted_num_elem - 1); 233 sg_mark_end(ctx->sg_encrypted_data + ctx->sg_encrypted_num_elem - 1);
235 234
@@ -245,15 +244,14 @@ static int tls_push_record(struct sock *sk, int flags,
245 tls_ctx->pending_open_record_frags = 0; 244 tls_ctx->pending_open_record_frags = 0;
246 set_bit(TLS_PENDING_CLOSED_RECORD, &tls_ctx->flags); 245 set_bit(TLS_PENDING_CLOSED_RECORD, &tls_ctx->flags);
247 246
248 rc = tls_do_encryption(tls_ctx, ctx, ctx->sg_plaintext_size, 247 rc = tls_do_encryption(tls_ctx, ctx, req, ctx->sg_plaintext_size);
249 sk->sk_allocation);
250 if (rc < 0) { 248 if (rc < 0) {
251 /* If we are called from write_space and 249 /* If we are called from write_space and
252 * we fail, we need to set this SOCK_NOSPACE 250 * we fail, we need to set this SOCK_NOSPACE
253 * to trigger another write_space in the future. 251 * to trigger another write_space in the future.
254 */ 252 */
255 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 253 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
256 return rc; 254 goto out_req;
257 } 255 }
258 256
259 free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem, 257 free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem,
@@ -268,6 +266,8 @@ static int tls_push_record(struct sock *sk, int flags,
268 tls_err_abort(sk, EBADMSG); 266 tls_err_abort(sk, EBADMSG);
269 267
270 tls_advance_record_sn(sk, &tls_ctx->tx); 268 tls_advance_record_sn(sk, &tls_ctx->tx);
269out_req:
270 kfree(req);
271 return rc; 271 return rc;
272} 272}
273 273
@@ -754,7 +754,7 @@ int tls_sw_recvmsg(struct sock *sk,
754 struct sk_buff *skb; 754 struct sk_buff *skb;
755 ssize_t copied = 0; 755 ssize_t copied = 0;
756 bool cmsg = false; 756 bool cmsg = false;
757 int err = 0; 757 int target, err = 0;
758 long timeo; 758 long timeo;
759 759
760 flags |= nonblock; 760 flags |= nonblock;
@@ -764,6 +764,7 @@ int tls_sw_recvmsg(struct sock *sk,
764 764
765 lock_sock(sk); 765 lock_sock(sk);
766 766
767 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
767 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 768 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
768 do { 769 do {
769 bool zc = false; 770 bool zc = false;
@@ -856,6 +857,9 @@ fallback_to_reg_recv:
856 goto recv_end; 857 goto recv_end;
857 } 858 }
858 } 859 }
860 /* If we have a new message from strparser, continue now. */
861 if (copied >= target && !ctx->recv_pkt)
862 break;
859 } while (len); 863 } while (len);
860 864
861recv_end: 865recv_end:
@@ -915,23 +919,22 @@ splice_read_end:
915 return copied ? : err; 919 return copied ? : err;
916} 920}
917 921
918unsigned int tls_sw_poll(struct file *file, struct socket *sock, 922__poll_t tls_sw_poll_mask(struct socket *sock, __poll_t events)
919 struct poll_table_struct *wait)
920{ 923{
921 unsigned int ret;
922 struct sock *sk = sock->sk; 924 struct sock *sk = sock->sk;
923 struct tls_context *tls_ctx = tls_get_ctx(sk); 925 struct tls_context *tls_ctx = tls_get_ctx(sk);
924 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 926 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
927 __poll_t mask;
925 928
926 /* Grab POLLOUT and POLLHUP from the underlying socket */ 929 /* Grab EPOLLOUT and EPOLLHUP from the underlying socket */
927 ret = ctx->sk_poll(file, sock, wait); 930 mask = ctx->sk_poll_mask(sock, events);
928 931
929 /* Clear POLLIN bits, and set based on recv_pkt */ 932 /* Clear EPOLLIN bits, and set based on recv_pkt */
930 ret &= ~(POLLIN | POLLRDNORM); 933 mask &= ~(EPOLLIN | EPOLLRDNORM);
931 if (ctx->recv_pkt) 934 if (ctx->recv_pkt)
932 ret |= POLLIN | POLLRDNORM; 935 mask |= EPOLLIN | EPOLLRDNORM;
933 936
934 return ret; 937 return mask;
935} 938}
936 939
937static int tls_read_size(struct strparser *strp, struct sk_buff *skb) 940static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
@@ -1188,7 +1191,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
1188 sk->sk_data_ready = tls_data_ready; 1191 sk->sk_data_ready = tls_data_ready;
1189 write_unlock_bh(&sk->sk_callback_lock); 1192 write_unlock_bh(&sk->sk_callback_lock);
1190 1193
1191 sw_ctx_rx->sk_poll = sk->sk_socket->ops->poll; 1194 sw_ctx_rx->sk_poll_mask = sk->sk_socket->ops->poll_mask;
1192 1195
1193 strp_check_rcv(&sw_ctx_rx->strp); 1196 strp_check_rcv(&sw_ctx_rx->strp);
1194 } 1197 }
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 5fe35aafdd9c..48e8097339ab 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -1012,6 +1012,7 @@ void cfg80211_unregister_wdev(struct wireless_dev *wdev)
1012 nl80211_notify_iface(rdev, wdev, NL80211_CMD_DEL_INTERFACE); 1012 nl80211_notify_iface(rdev, wdev, NL80211_CMD_DEL_INTERFACE);
1013 1013
1014 list_del_rcu(&wdev->list); 1014 list_del_rcu(&wdev->list);
1015 synchronize_rcu();
1015 rdev->devlist_generation++; 1016 rdev->devlist_generation++;
1016 1017
1017 switch (wdev->iftype) { 1018 switch (wdev->iftype) {
diff --git a/net/wireless/util.c b/net/wireless/util.c
index b5bb1c309914..3c654cd7ba56 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -1746,6 +1746,8 @@ int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr,
1746 if (!rdev->ops->get_station) 1746 if (!rdev->ops->get_station)
1747 return -EOPNOTSUPP; 1747 return -EOPNOTSUPP;
1748 1748
1749 memset(sinfo, 0, sizeof(*sinfo));
1750
1749 return rdev_get_station(rdev, dev, mac_addr, sinfo); 1751 return rdev_get_station(rdev, dev, mac_addr, sinfo);
1750} 1752}
1751EXPORT_SYMBOL(cfg80211_get_station); 1753EXPORT_SYMBOL(cfg80211_get_station);
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index b9ef487c4618..f47abb46c587 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -204,7 +204,8 @@ static int xdp_umem_pin_pages(struct xdp_umem *umem)
204 long npgs; 204 long npgs;
205 int err; 205 int err;
206 206
207 umem->pgs = kcalloc(umem->npgs, sizeof(*umem->pgs), GFP_KERNEL); 207 umem->pgs = kcalloc(umem->npgs, sizeof(*umem->pgs),
208 GFP_KERNEL | __GFP_NOWARN);
208 if (!umem->pgs) 209 if (!umem->pgs)
209 return -ENOMEM; 210 return -ENOMEM;
210 211
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 607ed8729c06..7a6214e9ae58 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -16,9 +16,7 @@ LDLIBS += -lcap -lelf -lrt -lpthread
16TEST_CUSTOM_PROGS = $(OUTPUT)/urandom_read 16TEST_CUSTOM_PROGS = $(OUTPUT)/urandom_read
17all: $(TEST_CUSTOM_PROGS) 17all: $(TEST_CUSTOM_PROGS)
18 18
19$(TEST_CUSTOM_PROGS): urandom_read 19$(TEST_CUSTOM_PROGS): $(OUTPUT)/%: %.c
20
21urandom_read: urandom_read.c
22 $(CC) -o $(TEST_CUSTOM_PROGS) -static $< -Wl,--build-id 20 $(CC) -o $(TEST_CUSTOM_PROGS) -static $< -Wl,--build-id
23 21
24# Order correspond to 'make run_tests' order 22# Order correspond to 'make run_tests' order
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json b/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
index de97e4ff705c..637ea0219617 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
@@ -568,7 +568,7 @@
568 "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*use tcindex 65535.*index 1", 568 "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*use tcindex 65535.*index 1",
569 "matchCount": "1", 569 "matchCount": "1",
570 "teardown": [ 570 "teardown": [
571 "$TC actions flush action skbedit" 571 "$TC actions flush action ife"
572 ] 572 ]
573 }, 573 },
574 { 574 {