aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-09-12 10:56:06 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-09-12 10:56:06 -0400
commitda499f8f5385c181e29978fdaab15a58de185302 (patch)
treeeadc99e5a3c378fdc6508c5b1ad1c3933eed29b1
parent9395452b4aab7bc2475ef8935b4a4fb99d778d70 (diff)
parent373df3131aa83bd3e0ea7cd15be92d942d75fc72 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: "Mostly small sets of driver fixes scattered all over the place. 1) Mediatek driver fixes from Sean Wang. Forward port not written correctly during TX map, missed handling of EPROBE_DEFER, and mistaken use of put_page() instead of skb_free_frag(). 2) Fix socket double-free in KCM code, from WANG Cong. 3) QED driver fixes from Sudarsana Reddy Kalluru, including a fix for using the dcbx buffers before initializing them. 4) Mellanox Switch driver fixes from Jiri Pirko, including a fix for double fib removals and an error handling fix in mlxsw_sp_module_init(). 5) Fix kernel panic when enabling LLDP in i40e driver, from Dave Ertman. 6) Fix padding of TSO packets in thunderx driver, from Sunil Goutham. 7) TCP's rcv_wup not initialized properly when using fastopen, from Neal Cardwell. 8) Don't use uninitialized flow keys in flow dissector, from Gao Feng. 9) Use after free in l2tp module unload, from Sabrina Dubroca. 10) Fix interrupt registry ordering issues in smsc911x driver, from Jeremy Linton. 11) Fix crashes in bonding having to do with enslaving and rx_handler, from Mahesh Bandewar. 12) AF_UNIX deadlock fixes from Linus. 13) In mlx5 driver, don't read skb->xmit_mode after it might have been freed from the TX reclaim path. From Tariq Toukan. 14) Fix a bug from 2015 in TCP Yeah where the congestion window does not increase, from Artem Germanov. 15) Don't pad frames on receive in NFP driver, from Jakub Kicinski. 16) Fix chunk fragmenting in SCTP wrt. GSO, from Marcelo Ricardo Leitner. 17) Fix deletion of VRF routes, from Mark Tomlinson. 18) Fix device refcount leak when DAD fails in ipv6, from Wei Yongjun" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (101 commits) net/mlx4_en: Fix panic on xmit while port is down net/mlx4_en: Fixes for DCBX net/mlx4_en: Fix the return value of mlx4_en_dcbnl_set_state() net/mlx4_en: Fix the return value of mlx4_en_dcbnl_set_all() net: ethernet: renesas: sh_eth: add POST registers for rz drivers: net: phy: mdio-xgene: Add hardware dependency dwc_eth_qos: do not register semi-initialized device sctp: identify chunks that need to be fragmented at IP level mlxsw: spectrum: Set port type before setting its address mlxsw: spectrum_router: Fix error path in mlxsw_sp_router_init nfp: don't pad frames on receive nfp: drop support for old firmware ABIs nfp: remove linux/version.h includes tcp: cwnd does not increase in TCP YeAH net/mlx5e: Fix parsing of vlan packets when updating lro header net/mlx5e: Fix global PFC counters replication net/mlx5e: Prevent casting overflow net/mlx5e: Move an_disable_cap bit to a new position net/mlx5e: Fix xmit_more counter race issue tcp: fastopen: avoid negative sk_forward_alloc ...
-rw-r--r--MAINTAINERS4
-rw-r--r--drivers/net/bonding/bond_main.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c19
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c4
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c11
-rw-r--r--drivers/net/ethernet/cadence/macb.c23
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h1
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c11
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c20
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c64
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c15
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c82
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h22
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c160
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c28
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c5
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c20
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c3
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c7
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c213
-rw-r--r--drivers/net/ethernet/synopsys/dwc_eth_qos.c38
-rw-r--r--drivers/net/phy/Kconfig1
-rw-r--r--drivers/net/vxlan.c38
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c10
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c18
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c28
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_aggr.c3
-rw-r--r--include/linux/mlx5/mlx5_ifc.h5
-rw-r--r--include/linux/netdevice.h1
-rw-r--r--include/net/af_unix.h2
-rw-r--r--include/net/cfg80211.h9
-rw-r--r--include/net/ip_fib.h3
-rw-r--r--include/net/netfilter/nft_meta.h4
-rw-r--r--include/net/netfilter/nft_reject.h4
-rw-r--r--net/bridge/br_input.c7
-rw-r--r--net/bridge/br_multicast.c2
-rw-r--r--net/bridge/netfilter/ebtables.c2
-rw-r--r--net/bridge/netfilter/nft_meta_bridge.c1
-rw-r--r--net/core/dev.c16
-rw-r--r--net/core/flow_dissector.c6
-rw-r--r--net/ipv4/devinet.c11
-rw-r--r--net/ipv4/fib_frontend.c3
-rw-r--r--net/ipv4/fib_semantics.c8
-rw-r--r--net/ipv4/netfilter/nft_reject_ipv4.c1
-rw-r--r--net/ipv4/tcp_fastopen.c2
-rw-r--r--net/ipv4/tcp_yeah.c2
-rw-r--r--net/ipv4/xfrm4_policy.c2
-rw-r--r--net/ipv6/addrconf.c18
-rw-r--r--net/ipv6/ip6_tunnel.c2
-rw-r--r--net/ipv6/netfilter/nft_reject_ipv6.c1
-rw-r--r--net/ipv6/ping.c9
-rw-r--r--net/ipv6/xfrm6_input.c1
-rw-r--r--net/ipv6/xfrm6_policy.c2
-rw-r--r--net/kcm/kcmsock.c3
-rw-r--r--net/l2tp/l2tp_core.c3
-rw-r--r--net/mac80211/tdls.c7
-rw-r--r--net/netfilter/nf_tables_netdev.c1
-rw-r--r--net/netfilter/nfnetlink_acct.c6
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c49
-rw-r--r--net/netfilter/nft_meta.c17
-rw-r--r--net/netfilter/nft_reject.c16
-rw-r--r--net/netfilter/nft_reject_inet.c7
-rw-r--r--net/sctp/output.c13
-rw-r--r--net/tipc/name_distr.c8
-rw-r--r--net/unix/af_unix.c111
-rw-r--r--net/wireless/wext-core.c25
-rw-r--r--net/xfrm/xfrm_input.c14
-rw-r--r--net/xfrm/xfrm_policy.c4
-rw-r--r--net/xfrm/xfrm_user.c13
92 files changed, 875 insertions, 610 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 6781a3febd59..a5e1270dfbf1 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2485,7 +2485,7 @@ F: include/net/bluetooth/
2485BONDING DRIVER 2485BONDING DRIVER
2486M: Jay Vosburgh <j.vosburgh@gmail.com> 2486M: Jay Vosburgh <j.vosburgh@gmail.com>
2487M: Veaceslav Falico <vfalico@gmail.com> 2487M: Veaceslav Falico <vfalico@gmail.com>
2488M: Andy Gospodarek <gospo@cumulusnetworks.com> 2488M: Andy Gospodarek <andy@greyhouse.net>
2489L: netdev@vger.kernel.org 2489L: netdev@vger.kernel.org
2490W: http://sourceforge.net/projects/bonding/ 2490W: http://sourceforge.net/projects/bonding/
2491S: Supported 2491S: Supported
@@ -3269,7 +3269,7 @@ S: Maintained
3269F: drivers/net/wan/cosa* 3269F: drivers/net/wan/cosa*
3270 3270
3271CPMAC ETHERNET DRIVER 3271CPMAC ETHERNET DRIVER
3272M: Florian Fainelli <florian@openwrt.org> 3272M: Florian Fainelli <f.fainelli@gmail.com>
3273L: netdev@vger.kernel.org 3273L: netdev@vger.kernel.org
3274S: Maintained 3274S: Maintained
3275F: drivers/net/ethernet/ti/cpmac.c 3275F: drivers/net/ethernet/ti/cpmac.c
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 217e8da0628c..9599ed6f1213 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1341,9 +1341,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1341 slave_dev->name); 1341 slave_dev->name);
1342 } 1342 }
1343 1343
1344 /* already enslaved */ 1344 /* already in-use? */
1345 if (slave_dev->flags & IFF_SLAVE) { 1345 if (netdev_is_rx_handler_busy(slave_dev)) {
1346 netdev_dbg(bond_dev, "Error: Device was already enslaved\n"); 1346 netdev_err(bond_dev,
1347 "Error: Device is in use and cannot be enslaved\n");
1347 return -EBUSY; 1348 return -EBUSY;
1348 } 1349 }
1349 1350
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 97e892511666..fa3386bb14f7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -772,6 +772,11 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
772 (bp->common.bc_ver & 0xff00) >> 8, 772 (bp->common.bc_ver & 0xff00) >> 8,
773 (bp->common.bc_ver & 0xff)); 773 (bp->common.bc_ver & 0xff));
774 774
775 if (pci_channel_offline(bp->pdev)) {
776 BNX2X_ERR("Cannot dump MCP info while in PCI error\n");
777 return;
778 }
779
775 val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER); 780 val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
776 if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER)) 781 if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
777 BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val); 782 BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val);
@@ -9415,10 +9420,16 @@ unload_error:
9415 /* Release IRQs */ 9420 /* Release IRQs */
9416 bnx2x_free_irq(bp); 9421 bnx2x_free_irq(bp);
9417 9422
9418 /* Reset the chip */ 9423 /* Reset the chip, unless PCI function is offline. If we reach this
9419 rc = bnx2x_reset_hw(bp, reset_code); 9424 * point following a PCI error handling, it means device is really
9420 if (rc) 9425 * in a bad state and we're about to remove it, so reset the chip
9421 BNX2X_ERR("HW_RESET failed\n"); 9426 * is not a good idea.
9427 */
9428 if (!pci_channel_offline(bp->pdev)) {
9429 rc = bnx2x_reset_hw(bp, reset_code);
9430 if (rc)
9431 BNX2X_ERR("HW_RESET failed\n");
9432 }
9422 9433
9423 /* Report UNLOAD_DONE to MCP */ 9434 /* Report UNLOAD_DONE to MCP */
9424 bnx2x_send_unload_done(bp, keep_link); 9435 bnx2x_send_unload_done(bp, keep_link);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 2cf79100c9cb..228c964e709a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -353,8 +353,8 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
353 push_len = (length + sizeof(*tx_push) + 7) / 8; 353 push_len = (length + sizeof(*tx_push) + 7) / 8;
354 if (push_len > 16) { 354 if (push_len > 16) {
355 __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16); 355 __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16);
356 __iowrite64_copy(txr->tx_doorbell + 4, tx_push_buf + 1, 356 __iowrite32_copy(txr->tx_doorbell + 4, tx_push_buf + 1,
357 push_len - 16); 357 (push_len - 16) << 1);
358 } else { 358 } else {
359 __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 359 __iowrite64_copy(txr->tx_doorbell, tx_push_buf,
360 push_len); 360 push_len);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 659261218d9f..a2551bcd1027 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -14012,6 +14012,7 @@ static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14012 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) || 14012 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14013 (!ec->rx_coalesce_usecs) || 14013 (!ec->rx_coalesce_usecs) ||
14014 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) || 14014 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14015 (!ec->tx_coalesce_usecs) ||
14015 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) || 14016 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14016 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) || 14017 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14017 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) || 14018 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
@@ -14022,16 +14023,6 @@ static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14022 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks)) 14023 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14023 return -EINVAL; 14024 return -EINVAL;
14024 14025
14025 /* No rx interrupts will be generated if both are zero */
14026 if ((ec->rx_coalesce_usecs == 0) &&
14027 (ec->rx_max_coalesced_frames == 0))
14028 return -EINVAL;
14029
14030 /* No tx interrupts will be generated if both are zero */
14031 if ((ec->tx_coalesce_usecs == 0) &&
14032 (ec->tx_max_coalesced_frames == 0))
14033 return -EINVAL;
14034
14035 /* Only copy relevant parameters, ignore all others. */ 14026 /* Only copy relevant parameters, ignore all others. */
14036 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs; 14027 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14037 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs; 14028 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 89c0cfa9719f..d954a97b0b0b 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -1323,6 +1323,24 @@ dma_error:
1323 return 0; 1323 return 0;
1324} 1324}
1325 1325
1326static inline int macb_clear_csum(struct sk_buff *skb)
1327{
1328 /* no change for packets without checksum offloading */
1329 if (skb->ip_summed != CHECKSUM_PARTIAL)
1330 return 0;
1331
1332 /* make sure we can modify the header */
1333 if (unlikely(skb_cow_head(skb, 0)))
1334 return -1;
1335
1336 /* initialize checksum field
1337 * This is required - at least for Zynq, which otherwise calculates
1338 * wrong UDP header checksums for UDP packets with UDP data len <=2
1339 */
1340 *(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0;
1341 return 0;
1342}
1343
1326static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) 1344static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
1327{ 1345{
1328 u16 queue_index = skb_get_queue_mapping(skb); 1346 u16 queue_index = skb_get_queue_mapping(skb);
@@ -1362,6 +1380,11 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
1362 return NETDEV_TX_BUSY; 1380 return NETDEV_TX_BUSY;
1363 } 1381 }
1364 1382
1383 if (macb_clear_csum(skb)) {
1384 dev_kfree_skb_any(skb);
1385 return NETDEV_TX_OK;
1386 }
1387
1365 /* Map socket buffer for DMA transfer */ 1388 /* Map socket buffer for DMA transfer */
1366 if (!macb_tx_map(bp, queue, skb)) { 1389 if (!macb_tx_map(bp, queue, skb)) {
1367 dev_kfree_skb_any(skb); 1390 dev_kfree_skb_any(skb);
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index 83025bb4737c..e29815d9e6f4 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -279,6 +279,7 @@ struct nicvf {
279 u8 sqs_id; 279 u8 sqs_id;
280 bool sqs_mode; 280 bool sqs_mode;
281 bool hw_tso; 281 bool hw_tso;
282 bool t88;
282 283
283 /* Receive buffer alloc */ 284 /* Receive buffer alloc */
284 u32 rb_page_offset; 285 u32 rb_page_offset;
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 16ed20357c5c..85cc782b9060 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -251,9 +251,14 @@ static void nic_set_tx_pkt_pad(struct nicpf *nic, int size)
251 int lmac; 251 int lmac;
252 u64 lmac_cfg; 252 u64 lmac_cfg;
253 253
254 /* Max value that can be set is 60 */ 254 /* There is a issue in HW where-in while sending GSO sized
255 if (size > 60) 255 * pkts as part of TSO, if pkt len falls below this size
256 size = 60; 256 * NIC will zero PAD packet and also updates IP total length.
257 * Hence set this value to lessthan min pkt size of MAC+IP+TCP
258 * headers, BGX will do the padding to transmit 64 byte pkt.
259 */
260 if (size > 52)
261 size = 52;
257 262
258 for (lmac = 0; lmac < (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX); lmac++) { 263 for (lmac = 0; lmac < (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX); lmac++) {
259 lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3)); 264 lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3));
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index a19e73f11d73..3240349615bd 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -513,6 +513,7 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
513 struct nicvf *nic = netdev_priv(netdev); 513 struct nicvf *nic = netdev_priv(netdev);
514 struct snd_queue *sq; 514 struct snd_queue *sq;
515 struct sq_hdr_subdesc *hdr; 515 struct sq_hdr_subdesc *hdr;
516 struct sq_hdr_subdesc *tso_sqe;
516 517
517 sq = &nic->qs->sq[cqe_tx->sq_idx]; 518 sq = &nic->qs->sq[cqe_tx->sq_idx];
518 519
@@ -527,17 +528,21 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
527 528
528 nicvf_check_cqe_tx_errs(nic, cq, cqe_tx); 529 nicvf_check_cqe_tx_errs(nic, cq, cqe_tx);
529 skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr]; 530 skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr];
530 /* For TSO offloaded packets only one SQE will have a valid SKB */
531 if (skb) { 531 if (skb) {
532 /* Check for dummy descriptor used for HW TSO offload on 88xx */
533 if (hdr->dont_send) {
534 /* Get actual TSO descriptors and free them */
535 tso_sqe =
536 (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
537 nicvf_put_sq_desc(sq, tso_sqe->subdesc_cnt + 1);
538 }
532 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); 539 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
533 prefetch(skb); 540 prefetch(skb);
534 dev_consume_skb_any(skb); 541 dev_consume_skb_any(skb);
535 sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL; 542 sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL;
536 } else { 543 } else {
537 /* In case of HW TSO, HW sends a CQE for each segment of a TSO 544 /* In case of SW TSO on 88xx, only last segment will have
538 * packet instead of a single CQE for the whole TSO packet 545 * a SKB attached, so just free SQEs here.
539 * transmitted. Each of this CQE points to the same SQE, so
540 * avoid freeing same SQE multiple times.
541 */ 546 */
542 if (!nic->hw_tso) 547 if (!nic->hw_tso)
543 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); 548 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
@@ -1502,6 +1507,7 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1502 struct net_device *netdev; 1507 struct net_device *netdev;
1503 struct nicvf *nic; 1508 struct nicvf *nic;
1504 int err, qcount; 1509 int err, qcount;
1510 u16 sdevid;
1505 1511
1506 err = pci_enable_device(pdev); 1512 err = pci_enable_device(pdev);
1507 if (err) { 1513 if (err) {
@@ -1575,6 +1581,10 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1575 if (!pass1_silicon(nic->pdev)) 1581 if (!pass1_silicon(nic->pdev))
1576 nic->hw_tso = true; 1582 nic->hw_tso = true;
1577 1583
1584 pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid);
1585 if (sdevid == 0xA134)
1586 nic->t88 = true;
1587
1578 /* Check if this VF is in QS only mode */ 1588 /* Check if this VF is in QS only mode */
1579 if (nic->sqs_mode) 1589 if (nic->sqs_mode)
1580 return 0; 1590 return 0;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 0ff8e60deccb..dda3ea3f3bb6 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -938,6 +938,8 @@ static int nicvf_tso_count_subdescs(struct sk_buff *skb)
938 return num_edescs + sh->gso_segs; 938 return num_edescs + sh->gso_segs;
939} 939}
940 940
941#define POST_CQE_DESC_COUNT 2
942
941/* Get the number of SQ descriptors needed to xmit this skb */ 943/* Get the number of SQ descriptors needed to xmit this skb */
942static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb) 944static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
943{ 945{
@@ -948,6 +950,10 @@ static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
948 return subdesc_cnt; 950 return subdesc_cnt;
949 } 951 }
950 952
953 /* Dummy descriptors to get TSO pkt completion notification */
954 if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size)
955 subdesc_cnt += POST_CQE_DESC_COUNT;
956
951 if (skb_shinfo(skb)->nr_frags) 957 if (skb_shinfo(skb)->nr_frags)
952 subdesc_cnt += skb_shinfo(skb)->nr_frags; 958 subdesc_cnt += skb_shinfo(skb)->nr_frags;
953 959
@@ -965,14 +971,21 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
965 struct sq_hdr_subdesc *hdr; 971 struct sq_hdr_subdesc *hdr;
966 972
967 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); 973 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
968 sq->skbuff[qentry] = (u64)skb;
969
970 memset(hdr, 0, SND_QUEUE_DESC_SIZE); 974 memset(hdr, 0, SND_QUEUE_DESC_SIZE);
971 hdr->subdesc_type = SQ_DESC_TYPE_HEADER; 975 hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
972 /* Enable notification via CQE after processing SQE */ 976
973 hdr->post_cqe = 1; 977 if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size) {
974 /* No of subdescriptors following this */ 978 /* post_cqe = 0, to avoid HW posting a CQE for every TSO
975 hdr->subdesc_cnt = subdesc_cnt; 979 * segment transmitted on 88xx.
980 */
981 hdr->subdesc_cnt = subdesc_cnt - POST_CQE_DESC_COUNT;
982 } else {
983 sq->skbuff[qentry] = (u64)skb;
984 /* Enable notification via CQE after processing SQE */
985 hdr->post_cqe = 1;
986 /* No of subdescriptors following this */
987 hdr->subdesc_cnt = subdesc_cnt;
988 }
976 hdr->tot_len = len; 989 hdr->tot_len = len;
977 990
978 /* Offload checksum calculation to HW */ 991 /* Offload checksum calculation to HW */
@@ -1023,6 +1036,37 @@ static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
1023 gather->addr = data; 1036 gather->addr = data;
1024} 1037}
1025 1038
1039/* Add HDR + IMMEDIATE subdescriptors right after descriptors of a TSO
1040 * packet so that a CQE is posted as a notifation for transmission of
1041 * TSO packet.
1042 */
1043static inline void nicvf_sq_add_cqe_subdesc(struct snd_queue *sq, int qentry,
1044 int tso_sqe, struct sk_buff *skb)
1045{
1046 struct sq_imm_subdesc *imm;
1047 struct sq_hdr_subdesc *hdr;
1048
1049 sq->skbuff[qentry] = (u64)skb;
1050
1051 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
1052 memset(hdr, 0, SND_QUEUE_DESC_SIZE);
1053 hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
1054 /* Enable notification via CQE after processing SQE */
1055 hdr->post_cqe = 1;
1056 /* There is no packet to transmit here */
1057 hdr->dont_send = 1;
1058 hdr->subdesc_cnt = POST_CQE_DESC_COUNT - 1;
1059 hdr->tot_len = 1;
1060 /* Actual TSO header SQE index, needed for cleanup */
1061 hdr->rsvd2 = tso_sqe;
1062
1063 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1064 imm = (struct sq_imm_subdesc *)GET_SQ_DESC(sq, qentry);
1065 memset(imm, 0, SND_QUEUE_DESC_SIZE);
1066 imm->subdesc_type = SQ_DESC_TYPE_IMMEDIATE;
1067 imm->len = 1;
1068}
1069
1026/* Segment a TSO packet into 'gso_size' segments and append 1070/* Segment a TSO packet into 'gso_size' segments and append
1027 * them to SQ for transfer 1071 * them to SQ for transfer
1028 */ 1072 */
@@ -1096,7 +1140,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
1096int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb) 1140int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
1097{ 1141{
1098 int i, size; 1142 int i, size;
1099 int subdesc_cnt; 1143 int subdesc_cnt, tso_sqe = 0;
1100 int sq_num, qentry; 1144 int sq_num, qentry;
1101 struct queue_set *qs; 1145 struct queue_set *qs;
1102 struct snd_queue *sq; 1146 struct snd_queue *sq;
@@ -1131,6 +1175,7 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
1131 /* Add SQ header subdesc */ 1175 /* Add SQ header subdesc */
1132 nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1, 1176 nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1,
1133 skb, skb->len); 1177 skb, skb->len);
1178 tso_sqe = qentry;
1134 1179
1135 /* Add SQ gather subdescs */ 1180 /* Add SQ gather subdescs */
1136 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1181 qentry = nicvf_get_nxt_sqentry(sq, qentry);
@@ -1154,6 +1199,11 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
1154 } 1199 }
1155 1200
1156doorbell: 1201doorbell:
1202 if (nic->t88 && skb_shinfo(skb)->gso_size) {
1203 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1204 nicvf_sq_add_cqe_subdesc(sq, qentry, tso_sqe, skb);
1205 }
1206
1157 /* make sure all memory stores are done before ringing doorbell */ 1207 /* make sure all memory stores are done before ringing doorbell */
1158 smp_wmb(); 1208 smp_wmb();
1159 1209
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 828ed28c3c14..d0b3a1bb82ca 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -5113,9 +5113,13 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
5113 DCB_CAP_DCBX_VER_IEEE; 5113 DCB_CAP_DCBX_VER_IEEE;
5114 5114
5115 pf->flags |= I40E_FLAG_DCB_CAPABLE; 5115 pf->flags |= I40E_FLAG_DCB_CAPABLE;
5116 /* Enable DCB tagging only when more than one TC */ 5116 /* Enable DCB tagging only when more than one TC
5117 * or explicitly disable if only one TC
5118 */
5117 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) 5119 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
5118 pf->flags |= I40E_FLAG_DCB_ENABLED; 5120 pf->flags |= I40E_FLAG_DCB_ENABLED;
5121 else
5122 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
5119 dev_dbg(&pf->pdev->dev, 5123 dev_dbg(&pf->pdev->dev,
5120 "DCBX offload is supported for this PF.\n"); 5124 "DCBX offload is supported for this PF.\n");
5121 } 5125 }
@@ -5716,7 +5720,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
5716 u8 type; 5720 u8 type;
5717 5721
5718 /* Not DCB capable or capability disabled */ 5722 /* Not DCB capable or capability disabled */
5719 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) 5723 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5720 return ret; 5724 return ret;
5721 5725
5722 /* Ignore if event is not for Nearest Bridge */ 5726 /* Ignore if event is not for Nearest Bridge */
@@ -7896,6 +7900,7 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
7896#endif 7900#endif
7897 I40E_FLAG_RSS_ENABLED | 7901 I40E_FLAG_RSS_ENABLED |
7898 I40E_FLAG_DCB_CAPABLE | 7902 I40E_FLAG_DCB_CAPABLE |
7903 I40E_FLAG_DCB_ENABLED |
7899 I40E_FLAG_SRIOV_ENABLED | 7904 I40E_FLAG_SRIOV_ENABLED |
7900 I40E_FLAG_FD_SB_ENABLED | 7905 I40E_FLAG_FD_SB_ENABLED |
7901 I40E_FLAG_FD_ATR_ENABLED | 7906 I40E_FLAG_FD_ATR_ENABLED |
@@ -10502,6 +10507,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
10502 I40E_FLAG_FD_SB_ENABLED | 10507 I40E_FLAG_FD_SB_ENABLED |
10503 I40E_FLAG_FD_ATR_ENABLED | 10508 I40E_FLAG_FD_ATR_ENABLED |
10504 I40E_FLAG_DCB_CAPABLE | 10509 I40E_FLAG_DCB_CAPABLE |
10510 I40E_FLAG_DCB_ENABLED |
10505 I40E_FLAG_SRIOV_ENABLED | 10511 I40E_FLAG_SRIOV_ENABLED |
10506 I40E_FLAG_VMDQ_ENABLED); 10512 I40E_FLAG_VMDQ_ENABLED);
10507 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED | 10513 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
@@ -10525,7 +10531,8 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
10525 /* Not enough queues for all TCs */ 10531 /* Not enough queues for all TCs */
10526 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) && 10532 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
10527 (queues_left < I40E_MAX_TRAFFIC_CLASS)) { 10533 (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
10528 pf->flags &= ~I40E_FLAG_DCB_CAPABLE; 10534 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
10535 I40E_FLAG_DCB_ENABLED);
10529 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n"); 10536 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
10530 } 10537 }
10531 pf->num_lan_qps = max_t(int, pf->rss_size_max, 10538 pf->num_lan_qps = max_t(int, pf->rss_size_max,
@@ -10922,7 +10929,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
10922 err = i40e_init_pf_dcb(pf); 10929 err = i40e_init_pf_dcb(pf);
10923 if (err) { 10930 if (err) {
10924 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err); 10931 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
10925 pf->flags &= ~I40E_FLAG_DCB_CAPABLE; 10932 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE & I40E_FLAG_DCB_ENABLED);
10926 /* Continue without DCB enabled */ 10933 /* Continue without DCB enabled */
10927 } 10934 }
10928#endif /* CONFIG_I40E_DCB */ 10935#endif /* CONFIG_I40E_DCB */
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index f1609542adf1..d9199151a83e 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -50,6 +50,10 @@ static const struct mtk_ethtool_stats {
50 MTK_ETHTOOL_STAT(rx_flow_control_packets), 50 MTK_ETHTOOL_STAT(rx_flow_control_packets),
51}; 51};
52 52
53static const char * const mtk_clks_source_name[] = {
54 "ethif", "esw", "gp1", "gp2"
55};
56
53void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg) 57void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
54{ 58{
55 __raw_writel(val, eth->base + reg); 59 __raw_writel(val, eth->base + reg);
@@ -291,7 +295,7 @@ err_phy:
291static int mtk_mdio_init(struct mtk_eth *eth) 295static int mtk_mdio_init(struct mtk_eth *eth)
292{ 296{
293 struct device_node *mii_np; 297 struct device_node *mii_np;
294 int err; 298 int ret;
295 299
296 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus"); 300 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
297 if (!mii_np) { 301 if (!mii_np) {
@@ -300,13 +304,13 @@ static int mtk_mdio_init(struct mtk_eth *eth)
300 } 304 }
301 305
302 if (!of_device_is_available(mii_np)) { 306 if (!of_device_is_available(mii_np)) {
303 err = 0; 307 ret = -ENODEV;
304 goto err_put_node; 308 goto err_put_node;
305 } 309 }
306 310
307 eth->mii_bus = mdiobus_alloc(); 311 eth->mii_bus = devm_mdiobus_alloc(eth->dev);
308 if (!eth->mii_bus) { 312 if (!eth->mii_bus) {
309 err = -ENOMEM; 313 ret = -ENOMEM;
310 goto err_put_node; 314 goto err_put_node;
311 } 315 }
312 316
@@ -317,19 +321,11 @@ static int mtk_mdio_init(struct mtk_eth *eth)
317 eth->mii_bus->parent = eth->dev; 321 eth->mii_bus->parent = eth->dev;
318 322
319 snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%s", mii_np->name); 323 snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%s", mii_np->name);
320 err = of_mdiobus_register(eth->mii_bus, mii_np); 324 ret = of_mdiobus_register(eth->mii_bus, mii_np);
321 if (err)
322 goto err_free_bus;
323
324 return 0;
325
326err_free_bus:
327 mdiobus_free(eth->mii_bus);
328 325
329err_put_node: 326err_put_node:
330 of_node_put(mii_np); 327 of_node_put(mii_np);
331 eth->mii_bus = NULL; 328 return ret;
332 return err;
333} 329}
334 330
335static void mtk_mdio_cleanup(struct mtk_eth *eth) 331static void mtk_mdio_cleanup(struct mtk_eth *eth)
@@ -338,8 +334,6 @@ static void mtk_mdio_cleanup(struct mtk_eth *eth)
338 return; 334 return;
339 335
340 mdiobus_unregister(eth->mii_bus); 336 mdiobus_unregister(eth->mii_bus);
341 of_node_put(eth->mii_bus->dev.of_node);
342 mdiobus_free(eth->mii_bus);
343} 337}
344 338
345static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask) 339static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask)
@@ -588,14 +582,15 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
588 dma_addr_t mapped_addr; 582 dma_addr_t mapped_addr;
589 unsigned int nr_frags; 583 unsigned int nr_frags;
590 int i, n_desc = 1; 584 int i, n_desc = 1;
591 u32 txd4 = 0; 585 u32 txd4 = 0, fport;
592 586
593 itxd = ring->next_free; 587 itxd = ring->next_free;
594 if (itxd == ring->last_free) 588 if (itxd == ring->last_free)
595 return -ENOMEM; 589 return -ENOMEM;
596 590
597 /* set the forward port */ 591 /* set the forward port */
598 txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT; 592 fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
593 txd4 |= fport;
599 594
600 tx_buf = mtk_desc_to_tx_buf(ring, itxd); 595 tx_buf = mtk_desc_to_tx_buf(ring, itxd);
601 memset(tx_buf, 0, sizeof(*tx_buf)); 596 memset(tx_buf, 0, sizeof(*tx_buf));
@@ -653,7 +648,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
653 WRITE_ONCE(txd->txd3, (TX_DMA_SWC | 648 WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
654 TX_DMA_PLEN0(frag_map_size) | 649 TX_DMA_PLEN0(frag_map_size) |
655 last_frag * TX_DMA_LS0)); 650 last_frag * TX_DMA_LS0));
656 WRITE_ONCE(txd->txd4, 0); 651 WRITE_ONCE(txd->txd4, fport);
657 652
658 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC; 653 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
659 tx_buf = mtk_desc_to_tx_buf(ring, txd); 654 tx_buf = mtk_desc_to_tx_buf(ring, txd);
@@ -865,7 +860,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
865 /* receive data */ 860 /* receive data */
866 skb = build_skb(data, ring->frag_size); 861 skb = build_skb(data, ring->frag_size);
867 if (unlikely(!skb)) { 862 if (unlikely(!skb)) {
868 put_page(virt_to_head_page(new_data)); 863 skb_free_frag(new_data);
869 netdev->stats.rx_dropped++; 864 netdev->stats.rx_dropped++;
870 goto release_desc; 865 goto release_desc;
871 } 866 }
@@ -1506,10 +1501,7 @@ static void mtk_uninit(struct net_device *dev)
1506 struct mtk_eth *eth = mac->hw; 1501 struct mtk_eth *eth = mac->hw;
1507 1502
1508 phy_disconnect(mac->phy_dev); 1503 phy_disconnect(mac->phy_dev);
1509 mtk_mdio_cleanup(eth);
1510 mtk_irq_disable(eth, ~0); 1504 mtk_irq_disable(eth, ~0);
1511 free_irq(eth->irq[1], dev);
1512 free_irq(eth->irq[2], dev);
1513} 1505}
1514 1506
1515static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1507static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -1813,6 +1805,7 @@ static int mtk_probe(struct platform_device *pdev)
1813 if (!eth) 1805 if (!eth)
1814 return -ENOMEM; 1806 return -ENOMEM;
1815 1807
1808 eth->dev = &pdev->dev;
1816 eth->base = devm_ioremap_resource(&pdev->dev, res); 1809 eth->base = devm_ioremap_resource(&pdev->dev, res);
1817 if (IS_ERR(eth->base)) 1810 if (IS_ERR(eth->base))
1818 return PTR_ERR(eth->base); 1811 return PTR_ERR(eth->base);
@@ -1847,21 +1840,21 @@ static int mtk_probe(struct platform_device *pdev)
1847 return -ENXIO; 1840 return -ENXIO;
1848 } 1841 }
1849 } 1842 }
1843 for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
1844 eth->clks[i] = devm_clk_get(eth->dev,
1845 mtk_clks_source_name[i]);
1846 if (IS_ERR(eth->clks[i])) {
1847 if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
1848 return -EPROBE_DEFER;
1849 return -ENODEV;
1850 }
1851 }
1850 1852
1851 eth->clk_ethif = devm_clk_get(&pdev->dev, "ethif"); 1853 clk_prepare_enable(eth->clks[MTK_CLK_ETHIF]);
1852 eth->clk_esw = devm_clk_get(&pdev->dev, "esw"); 1854 clk_prepare_enable(eth->clks[MTK_CLK_ESW]);
1853 eth->clk_gp1 = devm_clk_get(&pdev->dev, "gp1"); 1855 clk_prepare_enable(eth->clks[MTK_CLK_GP1]);
1854 eth->clk_gp2 = devm_clk_get(&pdev->dev, "gp2"); 1856 clk_prepare_enable(eth->clks[MTK_CLK_GP2]);
1855 if (IS_ERR(eth->clk_esw) || IS_ERR(eth->clk_gp1) ||
1856 IS_ERR(eth->clk_gp2) || IS_ERR(eth->clk_ethif))
1857 return -ENODEV;
1858
1859 clk_prepare_enable(eth->clk_ethif);
1860 clk_prepare_enable(eth->clk_esw);
1861 clk_prepare_enable(eth->clk_gp1);
1862 clk_prepare_enable(eth->clk_gp2);
1863 1857
1864 eth->dev = &pdev->dev;
1865 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE); 1858 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
1866 INIT_WORK(&eth->pending_work, mtk_pending_work); 1859 INIT_WORK(&eth->pending_work, mtk_pending_work);
1867 1860
@@ -1903,15 +1896,24 @@ err_free_dev:
1903static int mtk_remove(struct platform_device *pdev) 1896static int mtk_remove(struct platform_device *pdev)
1904{ 1897{
1905 struct mtk_eth *eth = platform_get_drvdata(pdev); 1898 struct mtk_eth *eth = platform_get_drvdata(pdev);
1899 int i;
1906 1900
1907 clk_disable_unprepare(eth->clk_ethif); 1901 /* stop all devices to make sure that dma is properly shut down */
1908 clk_disable_unprepare(eth->clk_esw); 1902 for (i = 0; i < MTK_MAC_COUNT; i++) {
1909 clk_disable_unprepare(eth->clk_gp1); 1903 if (!eth->netdev[i])
1910 clk_disable_unprepare(eth->clk_gp2); 1904 continue;
1905 mtk_stop(eth->netdev[i]);
1906 }
1907
1908 clk_disable_unprepare(eth->clks[MTK_CLK_ETHIF]);
1909 clk_disable_unprepare(eth->clks[MTK_CLK_ESW]);
1910 clk_disable_unprepare(eth->clks[MTK_CLK_GP1]);
1911 clk_disable_unprepare(eth->clks[MTK_CLK_GP2]);
1911 1912
1912 netif_napi_del(&eth->tx_napi); 1913 netif_napi_del(&eth->tx_napi);
1913 netif_napi_del(&eth->rx_napi); 1914 netif_napi_del(&eth->rx_napi);
1914 mtk_cleanup(eth); 1915 mtk_cleanup(eth);
1916 mtk_mdio_cleanup(eth);
1915 platform_set_drvdata(pdev, NULL); 1917 platform_set_drvdata(pdev, NULL);
1916 1918
1917 return 0; 1919 return 0;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index f82e3acb947b..6e1ade7a25c5 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -290,6 +290,17 @@ enum mtk_tx_flags {
290 MTK_TX_FLAGS_PAGE0 = 0x02, 290 MTK_TX_FLAGS_PAGE0 = 0x02,
291}; 291};
292 292
293/* This enum allows us to identify how the clock is defined on the array of the
294 * clock in the order
295 */
296enum mtk_clks_map {
297 MTK_CLK_ETHIF,
298 MTK_CLK_ESW,
299 MTK_CLK_GP1,
300 MTK_CLK_GP2,
301 MTK_CLK_MAX
302};
303
293/* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at 304/* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at
294 * by the TX descriptor s 305 * by the TX descriptor s
295 * @skb: The SKB pointer of the packet being sent 306 * @skb: The SKB pointer of the packet being sent
@@ -370,10 +381,7 @@ struct mtk_rx_ring {
370 * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring 381 * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring
371 * @phy_scratch_ring: physical address of scratch_ring 382 * @phy_scratch_ring: physical address of scratch_ring
372 * @scratch_head: The scratch memory that scratch_ring points to. 383 * @scratch_head: The scratch memory that scratch_ring points to.
373 * @clk_ethif: The ethif clock 384 * @clks: clock array for all clocks required
374 * @clk_esw: The switch clock
375 * @clk_gp1: The gmac1 clock
376 * @clk_gp2: The gmac2 clock
377 * @mii_bus: If there is a bus we need to create an instance for it 385 * @mii_bus: If there is a bus we need to create an instance for it
378 * @pending_work: The workqueue used to reset the dma ring 386 * @pending_work: The workqueue used to reset the dma ring
379 */ 387 */
@@ -400,10 +408,8 @@ struct mtk_eth {
400 struct mtk_tx_dma *scratch_ring; 408 struct mtk_tx_dma *scratch_ring;
401 dma_addr_t phy_scratch_ring; 409 dma_addr_t phy_scratch_ring;
402 void *scratch_head; 410 void *scratch_head;
403 struct clk *clk_ethif; 411 struct clk *clks[MTK_CLK_MAX];
404 struct clk *clk_esw; 412
405 struct clk *clk_gp1;
406 struct clk *clk_gp2;
407 struct mii_bus *mii_bus; 413 struct mii_bus *mii_bus;
408 struct work_struct pending_work; 414 struct work_struct pending_work;
409}; 415};
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
index 99c6bbdff501..b04760a5034b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
@@ -94,7 +94,7 @@ static u8 mlx4_en_dcbnl_getcap(struct net_device *dev, int capid, u8 *cap)
94 *cap = true; 94 *cap = true;
95 break; 95 break;
96 case DCB_CAP_ATTR_DCBX: 96 case DCB_CAP_ATTR_DCBX:
97 *cap = priv->cee_params.dcbx_cap; 97 *cap = priv->dcbx_cap;
98 break; 98 break;
99 case DCB_CAP_ATTR_PFC_TCS: 99 case DCB_CAP_ATTR_PFC_TCS:
100 *cap = 1 << mlx4_max_tc(priv->mdev->dev); 100 *cap = 1 << mlx4_max_tc(priv->mdev->dev);
@@ -111,14 +111,14 @@ static u8 mlx4_en_dcbnl_getpfcstate(struct net_device *netdev)
111{ 111{
112 struct mlx4_en_priv *priv = netdev_priv(netdev); 112 struct mlx4_en_priv *priv = netdev_priv(netdev);
113 113
114 return priv->cee_params.dcb_cfg.pfc_state; 114 return priv->cee_config.pfc_state;
115} 115}
116 116
117static void mlx4_en_dcbnl_setpfcstate(struct net_device *netdev, u8 state) 117static void mlx4_en_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
118{ 118{
119 struct mlx4_en_priv *priv = netdev_priv(netdev); 119 struct mlx4_en_priv *priv = netdev_priv(netdev);
120 120
121 priv->cee_params.dcb_cfg.pfc_state = state; 121 priv->cee_config.pfc_state = state;
122} 122}
123 123
124static void mlx4_en_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority, 124static void mlx4_en_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
@@ -126,7 +126,7 @@ static void mlx4_en_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
126{ 126{
127 struct mlx4_en_priv *priv = netdev_priv(netdev); 127 struct mlx4_en_priv *priv = netdev_priv(netdev);
128 128
129 *setting = priv->cee_params.dcb_cfg.tc_config[priority].dcb_pfc; 129 *setting = priv->cee_config.dcb_pfc[priority];
130} 130}
131 131
132static void mlx4_en_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority, 132static void mlx4_en_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
@@ -134,8 +134,8 @@ static void mlx4_en_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
134{ 134{
135 struct mlx4_en_priv *priv = netdev_priv(netdev); 135 struct mlx4_en_priv *priv = netdev_priv(netdev);
136 136
137 priv->cee_params.dcb_cfg.tc_config[priority].dcb_pfc = setting; 137 priv->cee_config.dcb_pfc[priority] = setting;
138 priv->cee_params.dcb_cfg.pfc_state = true; 138 priv->cee_config.pfc_state = true;
139} 139}
140 140
141static int mlx4_en_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) 141static int mlx4_en_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
@@ -157,13 +157,11 @@ static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev)
157{ 157{
158 struct mlx4_en_priv *priv = netdev_priv(netdev); 158 struct mlx4_en_priv *priv = netdev_priv(netdev);
159 struct mlx4_en_dev *mdev = priv->mdev; 159 struct mlx4_en_dev *mdev = priv->mdev;
160 struct mlx4_en_cee_config *dcb_cfg = &priv->cee_params.dcb_cfg;
161 int err = 0;
162 160
163 if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE)) 161 if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
164 return -EINVAL; 162 return 1;
165 163
166 if (dcb_cfg->pfc_state) { 164 if (priv->cee_config.pfc_state) {
167 int tc; 165 int tc;
168 166
169 priv->prof->rx_pause = 0; 167 priv->prof->rx_pause = 0;
@@ -171,7 +169,7 @@ static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev)
171 for (tc = 0; tc < CEE_DCBX_MAX_PRIO; tc++) { 169 for (tc = 0; tc < CEE_DCBX_MAX_PRIO; tc++) {
172 u8 tc_mask = 1 << tc; 170 u8 tc_mask = 1 << tc;
173 171
174 switch (dcb_cfg->tc_config[tc].dcb_pfc) { 172 switch (priv->cee_config.dcb_pfc[tc]) {
175 case pfc_disabled: 173 case pfc_disabled:
176 priv->prof->tx_ppp &= ~tc_mask; 174 priv->prof->tx_ppp &= ~tc_mask;
177 priv->prof->rx_ppp &= ~tc_mask; 175 priv->prof->rx_ppp &= ~tc_mask;
@@ -199,15 +197,17 @@ static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev)
199 en_dbg(DRV, priv, "Set pfc off\n"); 197 en_dbg(DRV, priv, "Set pfc off\n");
200 } 198 }
201 199
202 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 200 if (mlx4_SET_PORT_general(mdev->dev, priv->port,
203 priv->rx_skb_size + ETH_FCS_LEN, 201 priv->rx_skb_size + ETH_FCS_LEN,
204 priv->prof->tx_pause, 202 priv->prof->tx_pause,
205 priv->prof->tx_ppp, 203 priv->prof->tx_ppp,
206 priv->prof->rx_pause, 204 priv->prof->rx_pause,
207 priv->prof->rx_ppp); 205 priv->prof->rx_ppp)) {
208 if (err)
209 en_err(priv, "Failed setting pause params\n"); 206 en_err(priv, "Failed setting pause params\n");
210 return err; 207 return 1;
208 }
209
210 return 0;
211} 211}
212 212
213static u8 mlx4_en_dcbnl_get_state(struct net_device *dev) 213static u8 mlx4_en_dcbnl_get_state(struct net_device *dev)
@@ -225,7 +225,7 @@ static u8 mlx4_en_dcbnl_set_state(struct net_device *dev, u8 state)
225 struct mlx4_en_priv *priv = netdev_priv(dev); 225 struct mlx4_en_priv *priv = netdev_priv(dev);
226 int num_tcs = 0; 226 int num_tcs = 0;
227 227
228 if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE)) 228 if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
229 return 1; 229 return 1;
230 230
231 if (!!(state) == !!(priv->flags & MLX4_EN_FLAG_DCB_ENABLED)) 231 if (!!(state) == !!(priv->flags & MLX4_EN_FLAG_DCB_ENABLED))
@@ -238,7 +238,10 @@ static u8 mlx4_en_dcbnl_set_state(struct net_device *dev, u8 state)
238 priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED; 238 priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED;
239 } 239 }
240 240
241 return mlx4_en_setup_tc(dev, num_tcs); 241 if (mlx4_en_setup_tc(dev, num_tcs))
242 return 1;
243
244 return 0;
242} 245}
243 246
244/* On success returns a non-zero 802.1p user priority bitmap 247/* On success returns a non-zero 802.1p user priority bitmap
@@ -252,7 +255,7 @@ static int mlx4_en_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
252 .selector = idtype, 255 .selector = idtype,
253 .protocol = id, 256 .protocol = id,
254 }; 257 };
255 if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE)) 258 if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
256 return 0; 259 return 0;
257 260
258 return dcb_getapp(netdev, &app); 261 return dcb_getapp(netdev, &app);
@@ -264,7 +267,7 @@ static int mlx4_en_dcbnl_setapp(struct net_device *netdev, u8 idtype,
264 struct mlx4_en_priv *priv = netdev_priv(netdev); 267 struct mlx4_en_priv *priv = netdev_priv(netdev);
265 struct dcb_app app; 268 struct dcb_app app;
266 269
267 if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE)) 270 if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
268 return -EINVAL; 271 return -EINVAL;
269 272
270 memset(&app, 0, sizeof(struct dcb_app)); 273 memset(&app, 0, sizeof(struct dcb_app));
@@ -433,7 +436,7 @@ static u8 mlx4_en_dcbnl_getdcbx(struct net_device *dev)
433{ 436{
434 struct mlx4_en_priv *priv = netdev_priv(dev); 437 struct mlx4_en_priv *priv = netdev_priv(dev);
435 438
436 return priv->cee_params.dcbx_cap; 439 return priv->dcbx_cap;
437} 440}
438 441
439static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode) 442static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode)
@@ -442,7 +445,7 @@ static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode)
442 struct ieee_ets ets = {0}; 445 struct ieee_ets ets = {0};
443 struct ieee_pfc pfc = {0}; 446 struct ieee_pfc pfc = {0};
444 447
445 if (mode == priv->cee_params.dcbx_cap) 448 if (mode == priv->dcbx_cap)
446 return 0; 449 return 0;
447 450
448 if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || 451 if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
@@ -451,7 +454,7 @@ static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode)
451 !(mode & DCB_CAP_DCBX_HOST)) 454 !(mode & DCB_CAP_DCBX_HOST))
452 goto err; 455 goto err;
453 456
454 priv->cee_params.dcbx_cap = mode; 457 priv->dcbx_cap = mode;
455 458
456 ets.ets_cap = IEEE_8021QAZ_MAX_TCS; 459 ets.ets_cap = IEEE_8021QAZ_MAX_TCS;
457 pfc.pfc_cap = IEEE_8021QAZ_MAX_TCS; 460 pfc.pfc_cap = IEEE_8021QAZ_MAX_TCS;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 4198e9bf89d0..fedb829276f4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -71,10 +71,11 @@ int mlx4_en_setup_tc(struct net_device *dev, u8 up)
71#ifdef CONFIG_MLX4_EN_DCB 71#ifdef CONFIG_MLX4_EN_DCB
72 if (!mlx4_is_slave(priv->mdev->dev)) { 72 if (!mlx4_is_slave(priv->mdev->dev)) {
73 if (up) { 73 if (up) {
74 priv->flags |= MLX4_EN_FLAG_DCB_ENABLED; 74 if (priv->dcbx_cap)
75 priv->flags |= MLX4_EN_FLAG_DCB_ENABLED;
75 } else { 76 } else {
76 priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED; 77 priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED;
77 priv->cee_params.dcb_cfg.pfc_state = false; 78 priv->cee_config.pfc_state = false;
78 } 79 }
79 } 80 }
80#endif /* CONFIG_MLX4_EN_DCB */ 81#endif /* CONFIG_MLX4_EN_DCB */
@@ -3048,9 +3049,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
3048 struct mlx4_en_priv *priv; 3049 struct mlx4_en_priv *priv;
3049 int i; 3050 int i;
3050 int err; 3051 int err;
3051#ifdef CONFIG_MLX4_EN_DCB
3052 struct tc_configuration *tc;
3053#endif
3054 3052
3055 dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv), 3053 dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
3056 MAX_TX_RINGS, MAX_RX_RINGS); 3054 MAX_TX_RINGS, MAX_RX_RINGS);
@@ -3117,16 +3115,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
3117 priv->msg_enable = MLX4_EN_MSG_LEVEL; 3115 priv->msg_enable = MLX4_EN_MSG_LEVEL;
3118#ifdef CONFIG_MLX4_EN_DCB 3116#ifdef CONFIG_MLX4_EN_DCB
3119 if (!mlx4_is_slave(priv->mdev->dev)) { 3117 if (!mlx4_is_slave(priv->mdev->dev)) {
3120 priv->cee_params.dcbx_cap = DCB_CAP_DCBX_VER_CEE | 3118 priv->dcbx_cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_HOST |
3121 DCB_CAP_DCBX_HOST | 3119 DCB_CAP_DCBX_VER_IEEE;
3122 DCB_CAP_DCBX_VER_IEEE;
3123 priv->flags |= MLX4_EN_DCB_ENABLED; 3120 priv->flags |= MLX4_EN_DCB_ENABLED;
3124 priv->cee_params.dcb_cfg.pfc_state = false; 3121 priv->cee_config.pfc_state = false;
3125 3122
3126 for (i = 0; i < MLX4_EN_NUM_UP; i++) { 3123 for (i = 0; i < MLX4_EN_NUM_UP; i++)
3127 tc = &priv->cee_params.dcb_cfg.tc_config[i]; 3124 priv->cee_config.dcb_pfc[i] = pfc_disabled;
3128 tc->dcb_pfc = pfc_disabled;
3129 }
3130 3125
3131 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) { 3126 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) {
3132 dev->dcbnl_ops = &mlx4_en_dcbnl_ops; 3127 dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 9df87ca0515a..e2509bba3e7c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -818,7 +818,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
818 real_size = get_real_size(skb, shinfo, dev, &lso_header_size, 818 real_size = get_real_size(skb, shinfo, dev, &lso_header_size,
819 &inline_ok, &fragptr); 819 &inline_ok, &fragptr);
820 if (unlikely(!real_size)) 820 if (unlikely(!real_size))
821 goto tx_drop; 821 goto tx_drop_count;
822 822
823 /* Align descriptor to TXBB size */ 823 /* Align descriptor to TXBB size */
824 desc_size = ALIGN(real_size, TXBB_SIZE); 824 desc_size = ALIGN(real_size, TXBB_SIZE);
@@ -826,7 +826,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
826 if (unlikely(nr_txbb > MAX_DESC_TXBBS)) { 826 if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
827 if (netif_msg_tx_err(priv)) 827 if (netif_msg_tx_err(priv))
828 en_warn(priv, "Oversized header or SG list\n"); 828 en_warn(priv, "Oversized header or SG list\n");
829 goto tx_drop; 829 goto tx_drop_count;
830 } 830 }
831 831
832 bf_ok = ring->bf_enabled; 832 bf_ok = ring->bf_enabled;
@@ -1071,9 +1071,10 @@ tx_drop_unmap:
1071 PCI_DMA_TODEVICE); 1071 PCI_DMA_TODEVICE);
1072 } 1072 }
1073 1073
1074tx_drop_count:
1075 ring->tx_dropped++;
1074tx_drop: 1076tx_drop:
1075 dev_kfree_skb_any(skb); 1077 dev_kfree_skb_any(skb);
1076 ring->tx_dropped++;
1077 return NETDEV_TX_OK; 1078 return NETDEV_TX_OK;
1078} 1079}
1079 1080
@@ -1106,7 +1107,7 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame,
1106 goto tx_drop; 1107 goto tx_drop;
1107 1108
1108 if (mlx4_en_is_tx_ring_full(ring)) 1109 if (mlx4_en_is_tx_ring_full(ring))
1109 goto tx_drop; 1110 goto tx_drop_count;
1110 1111
1111 /* fetch ring->cons far ahead before needing it to avoid stall */ 1112 /* fetch ring->cons far ahead before needing it to avoid stall */
1112 ring_cons = READ_ONCE(ring->cons); 1113 ring_cons = READ_ONCE(ring->cons);
@@ -1176,7 +1177,8 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame,
1176 1177
1177 return NETDEV_TX_OK; 1178 return NETDEV_TX_OK;
1178 1179
1179tx_drop: 1180tx_drop_count:
1180 ring->tx_dropped++; 1181 ring->tx_dropped++;
1182tx_drop:
1181 return NETDEV_TX_BUSY; 1183 return NETDEV_TX_BUSY;
1182} 1184}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 2c2913dcae98..9099dbd04951 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -482,20 +482,10 @@ enum dcb_pfc_type {
482 pfc_enabled_rx 482 pfc_enabled_rx
483}; 483};
484 484
485struct tc_configuration {
486 enum dcb_pfc_type dcb_pfc;
487};
488
489struct mlx4_en_cee_config { 485struct mlx4_en_cee_config {
490 bool pfc_state; 486 bool pfc_state;
491 struct tc_configuration tc_config[MLX4_EN_NUM_UP]; 487 enum dcb_pfc_type dcb_pfc[MLX4_EN_NUM_UP];
492}; 488};
493
494struct mlx4_en_cee_params {
495 u8 dcbx_cap;
496 struct mlx4_en_cee_config dcb_cfg;
497};
498
499#endif 489#endif
500 490
501struct ethtool_flow_id { 491struct ethtool_flow_id {
@@ -624,7 +614,8 @@ struct mlx4_en_priv {
624 struct ieee_ets ets; 614 struct ieee_ets ets;
625 u16 maxrate[IEEE_8021QAZ_MAX_TCS]; 615 u16 maxrate[IEEE_8021QAZ_MAX_TCS];
626 enum dcbnl_cndd_states cndd_state[IEEE_8021QAZ_MAX_TCS]; 616 enum dcbnl_cndd_states cndd_state[IEEE_8021QAZ_MAX_TCS];
627 struct mlx4_en_cee_params cee_params; 617 struct mlx4_en_cee_config cee_config;
618 u8 dcbx_cap;
628#endif 619#endif
629#ifdef CONFIG_RFS_ACCEL 620#ifdef CONFIG_RFS_ACCEL
630 spinlock_t filters_lock; 621 spinlock_t filters_lock;
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 3d2095e5c61c..c5b2064297a1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -52,7 +52,7 @@
52 52
53#define MLX4_FLAG_V_IGNORE_FCS_MASK 0x2 53#define MLX4_FLAG_V_IGNORE_FCS_MASK 0x2
54#define MLX4_IGNORE_FCS_MASK 0x1 54#define MLX4_IGNORE_FCS_MASK 0x1
55#define MLNX4_TX_MAX_NUMBER 8 55#define MLX4_TC_MAX_NUMBER 8
56 56
57void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table) 57void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
58{ 58{
@@ -2022,7 +2022,7 @@ int mlx4_max_tc(struct mlx4_dev *dev)
2022 u8 num_tc = dev->caps.max_tc_eth; 2022 u8 num_tc = dev->caps.max_tc_eth;
2023 2023
2024 if (!num_tc) 2024 if (!num_tc)
2025 num_tc = MLNX4_TX_MAX_NUMBER; 2025 num_tc = MLX4_TC_MAX_NUMBER;
2026 2026
2027 return num_tc; 2027 return num_tc;
2028} 2028}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index d0cf8fa22659..7a346bb2ed00 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -331,7 +331,7 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
331 if (mlx5e_query_global_pause_combined(priv)) { 331 if (mlx5e_query_global_pause_combined(priv)) {
332 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) { 332 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
333 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0], 333 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
334 pport_per_prio_pfc_stats_desc, 0); 334 pport_per_prio_pfc_stats_desc, i);
335 } 335 }
336 } 336 }
337 337
@@ -659,9 +659,10 @@ out:
659static void ptys2ethtool_supported_link(unsigned long *supported_modes, 659static void ptys2ethtool_supported_link(unsigned long *supported_modes,
660 u32 eth_proto_cap) 660 u32 eth_proto_cap)
661{ 661{
662 unsigned long proto_cap = eth_proto_cap;
662 int proto; 663 int proto;
663 664
664 for_each_set_bit(proto, (unsigned long *)&eth_proto_cap, MLX5E_LINK_MODES_NUMBER) 665 for_each_set_bit(proto, &proto_cap, MLX5E_LINK_MODES_NUMBER)
665 bitmap_or(supported_modes, supported_modes, 666 bitmap_or(supported_modes, supported_modes,
666 ptys2ethtool_table[proto].supported, 667 ptys2ethtool_table[proto].supported,
667 __ETHTOOL_LINK_MODE_MASK_NBITS); 668 __ETHTOOL_LINK_MODE_MASK_NBITS);
@@ -670,9 +671,10 @@ static void ptys2ethtool_supported_link(unsigned long *supported_modes,
670static void ptys2ethtool_adver_link(unsigned long *advertising_modes, 671static void ptys2ethtool_adver_link(unsigned long *advertising_modes,
671 u32 eth_proto_cap) 672 u32 eth_proto_cap)
672{ 673{
674 unsigned long proto_cap = eth_proto_cap;
673 int proto; 675 int proto;
674 676
675 for_each_set_bit(proto, (unsigned long *)&eth_proto_cap, MLX5E_LINK_MODES_NUMBER) 677 for_each_set_bit(proto, &proto_cap, MLX5E_LINK_MODES_NUMBER)
676 bitmap_or(advertising_modes, advertising_modes, 678 bitmap_or(advertising_modes, advertising_modes,
677 ptys2ethtool_table[proto].advertised, 679 ptys2ethtool_table[proto].advertised,
678 __ETHTOOL_LINK_MODE_MASK_NBITS); 680 __ETHTOOL_LINK_MODE_MASK_NBITS);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index b6f8ebbdb487..e7c969df3dad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -637,24 +637,32 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
637static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, 637static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
638 u32 cqe_bcnt) 638 u32 cqe_bcnt)
639{ 639{
640 struct ethhdr *eth = (struct ethhdr *)(skb->data); 640 struct ethhdr *eth = (struct ethhdr *)(skb->data);
641 struct iphdr *ipv4 = (struct iphdr *)(skb->data + ETH_HLEN); 641 struct iphdr *ipv4;
642 struct ipv6hdr *ipv6 = (struct ipv6hdr *)(skb->data + ETH_HLEN); 642 struct ipv6hdr *ipv6;
643 struct tcphdr *tcp; 643 struct tcphdr *tcp;
644 int network_depth = 0;
645 __be16 proto;
646 u16 tot_len;
644 647
645 u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe); 648 u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
646 int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA == l4_hdr_type) || 649 int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA == l4_hdr_type) ||
647 (CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type)); 650 (CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type));
648 651
649 u16 tot_len = cqe_bcnt - ETH_HLEN; 652 skb->mac_len = ETH_HLEN;
653 proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
650 654
651 if (eth->h_proto == htons(ETH_P_IP)) { 655 ipv4 = (struct iphdr *)(skb->data + network_depth);
652 tcp = (struct tcphdr *)(skb->data + ETH_HLEN + 656 ipv6 = (struct ipv6hdr *)(skb->data + network_depth);
657 tot_len = cqe_bcnt - network_depth;
658
659 if (proto == htons(ETH_P_IP)) {
660 tcp = (struct tcphdr *)(skb->data + network_depth +
653 sizeof(struct iphdr)); 661 sizeof(struct iphdr));
654 ipv6 = NULL; 662 ipv6 = NULL;
655 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 663 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
656 } else { 664 } else {
657 tcp = (struct tcphdr *)(skb->data + ETH_HLEN + 665 tcp = (struct tcphdr *)(skb->data + network_depth +
658 sizeof(struct ipv6hdr)); 666 sizeof(struct ipv6hdr));
659 ipv4 = NULL; 667 ipv4 = NULL;
660 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 668 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 988eca99ee0f..eb0e72537f10 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -356,6 +356,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
356 sq->stats.stopped++; 356 sq->stats.stopped++;
357 } 357 }
358 358
359 sq->stats.xmit_more += skb->xmit_more;
359 if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) { 360 if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) {
360 int bf_sz = 0; 361 int bf_sz = 0;
361 362
@@ -375,7 +376,6 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
375 376
376 sq->stats.packets++; 377 sq->stats.packets++;
377 sq->stats.bytes += num_bytes; 378 sq->stats.bytes += num_bytes;
378 sq->stats.xmit_more += skb->xmit_more;
379 return NETDEV_TX_OK; 379 return NETDEV_TX_OK;
380 380
381dma_unmap_wqe_err: 381dma_unmap_wqe_err:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 7291f2c4b0c7..d48873bcbddf 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -56,6 +56,7 @@
56#include <generated/utsrelease.h> 56#include <generated/utsrelease.h>
57#include <net/pkt_cls.h> 57#include <net/pkt_cls.h>
58#include <net/tc_act/tc_mirred.h> 58#include <net/tc_act/tc_mirred.h>
59#include <net/netevent.h>
59 60
60#include "spectrum.h" 61#include "spectrum.h"
61#include "core.h" 62#include "core.h"
@@ -2105,6 +2106,13 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2105 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 2106 dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
2106 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 2107 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
2107 2108
2109 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
2110 if (err) {
2111 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
2112 mlxsw_sp_port->local_port);
2113 goto err_port_swid_set;
2114 }
2115
2108 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 2116 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
2109 if (err) { 2117 if (err) {
2110 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 2118 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
@@ -2130,13 +2138,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2130 goto err_port_system_port_mapping_set; 2138 goto err_port_system_port_mapping_set;
2131 } 2139 }
2132 2140
2133 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
2134 if (err) {
2135 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
2136 mlxsw_sp_port->local_port);
2137 goto err_port_swid_set;
2138 }
2139
2140 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width); 2141 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
2141 if (err) { 2142 if (err) {
2142 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 2143 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
@@ -2218,10 +2219,10 @@ err_port_buffers_init:
2218err_port_admin_status_set: 2219err_port_admin_status_set:
2219err_port_mtu_set: 2220err_port_mtu_set:
2220err_port_speed_by_width_set: 2221err_port_speed_by_width_set:
2221 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
2222err_port_swid_set:
2223err_port_system_port_mapping_set: 2222err_port_system_port_mapping_set:
2224err_dev_addr_init: 2223err_dev_addr_init:
2224 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
2225err_port_swid_set:
2225 free_percpu(mlxsw_sp_port->pcpu_stats); 2226 free_percpu(mlxsw_sp_port->pcpu_stats);
2226err_alloc_stats: 2227err_alloc_stats:
2227 kfree(mlxsw_sp_port->untagged_vlans); 2228 kfree(mlxsw_sp_port->untagged_vlans);
@@ -4541,18 +4542,26 @@ static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = {
4541 .priority = 10, /* Must be called before FIB notifier block */ 4542 .priority = 10, /* Must be called before FIB notifier block */
4542}; 4543};
4543 4544
4545static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = {
4546 .notifier_call = mlxsw_sp_router_netevent_event,
4547};
4548
4544static int __init mlxsw_sp_module_init(void) 4549static int __init mlxsw_sp_module_init(void)
4545{ 4550{
4546 int err; 4551 int err;
4547 4552
4548 register_netdevice_notifier(&mlxsw_sp_netdevice_nb); 4553 register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4549 register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 4554 register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4555 register_netevent_notifier(&mlxsw_sp_router_netevent_nb);
4556
4550 err = mlxsw_core_driver_register(&mlxsw_sp_driver); 4557 err = mlxsw_core_driver_register(&mlxsw_sp_driver);
4551 if (err) 4558 if (err)
4552 goto err_core_driver_register; 4559 goto err_core_driver_register;
4553 return 0; 4560 return 0;
4554 4561
4555err_core_driver_register: 4562err_core_driver_register:
4563 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
4564 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4556 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); 4565 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4557 return err; 4566 return err;
4558} 4567}
@@ -4560,6 +4569,7 @@ err_core_driver_register:
4560static void __exit mlxsw_sp_module_exit(void) 4569static void __exit mlxsw_sp_module_exit(void)
4561{ 4570{
4562 mlxsw_core_driver_unregister(&mlxsw_sp_driver); 4571 mlxsw_core_driver_unregister(&mlxsw_sp_driver);
4572 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
4563 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 4573 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4564 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); 4574 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4565} 4575}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index ab3feb81bd43..ac48abebe904 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -587,6 +587,8 @@ int mlxsw_sp_router_neigh_construct(struct net_device *dev,
587 struct neighbour *n); 587 struct neighbour *n);
588void mlxsw_sp_router_neigh_destroy(struct net_device *dev, 588void mlxsw_sp_router_neigh_destroy(struct net_device *dev,
589 struct neighbour *n); 589 struct neighbour *n);
590int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
591 unsigned long event, void *ptr);
590 592
591int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count); 593int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count);
592void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index); 594void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 917ddd1e422f..3f5c51da6d3e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -107,6 +107,7 @@ mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
107} 107}
108 108
109struct mlxsw_sp_fib_key { 109struct mlxsw_sp_fib_key {
110 struct net_device *dev;
110 unsigned char addr[sizeof(struct in6_addr)]; 111 unsigned char addr[sizeof(struct in6_addr)];
111 unsigned char prefix_len; 112 unsigned char prefix_len;
112}; 113};
@@ -123,7 +124,7 @@ struct mlxsw_sp_fib_entry {
123 struct rhash_head ht_node; 124 struct rhash_head ht_node;
124 struct mlxsw_sp_fib_key key; 125 struct mlxsw_sp_fib_key key;
125 enum mlxsw_sp_fib_entry_type type; 126 enum mlxsw_sp_fib_entry_type type;
126 u8 added:1; 127 unsigned int ref_count;
127 u16 rif; /* used for action local */ 128 u16 rif; /* used for action local */
128 struct mlxsw_sp_vr *vr; 129 struct mlxsw_sp_vr *vr;
129 struct list_head nexthop_group_node; 130 struct list_head nexthop_group_node;
@@ -171,13 +172,15 @@ static void mlxsw_sp_fib_entry_remove(struct mlxsw_sp_fib *fib,
171 172
172static struct mlxsw_sp_fib_entry * 173static struct mlxsw_sp_fib_entry *
173mlxsw_sp_fib_entry_create(struct mlxsw_sp_fib *fib, const void *addr, 174mlxsw_sp_fib_entry_create(struct mlxsw_sp_fib *fib, const void *addr,
174 size_t addr_len, unsigned char prefix_len) 175 size_t addr_len, unsigned char prefix_len,
176 struct net_device *dev)
175{ 177{
176 struct mlxsw_sp_fib_entry *fib_entry; 178 struct mlxsw_sp_fib_entry *fib_entry;
177 179
178 fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL); 180 fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL);
179 if (!fib_entry) 181 if (!fib_entry)
180 return NULL; 182 return NULL;
183 fib_entry->key.dev = dev;
181 memcpy(fib_entry->key.addr, addr, addr_len); 184 memcpy(fib_entry->key.addr, addr, addr_len);
182 fib_entry->key.prefix_len = prefix_len; 185 fib_entry->key.prefix_len = prefix_len;
183 return fib_entry; 186 return fib_entry;
@@ -190,10 +193,13 @@ static void mlxsw_sp_fib_entry_destroy(struct mlxsw_sp_fib_entry *fib_entry)
190 193
191static struct mlxsw_sp_fib_entry * 194static struct mlxsw_sp_fib_entry *
192mlxsw_sp_fib_entry_lookup(struct mlxsw_sp_fib *fib, const void *addr, 195mlxsw_sp_fib_entry_lookup(struct mlxsw_sp_fib *fib, const void *addr,
193 size_t addr_len, unsigned char prefix_len) 196 size_t addr_len, unsigned char prefix_len,
197 struct net_device *dev)
194{ 198{
195 struct mlxsw_sp_fib_key key = {{ 0 } }; 199 struct mlxsw_sp_fib_key key;
196 200
201 memset(&key, 0, sizeof(key));
202 key.dev = dev;
197 memcpy(key.addr, addr, addr_len); 203 memcpy(key.addr, addr, addr_len);
198 key.prefix_len = prefix_len; 204 key.prefix_len = prefix_len;
199 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params); 205 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
@@ -938,8 +944,8 @@ static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work)
938 mlxsw_sp_port_dev_put(mlxsw_sp_port); 944 mlxsw_sp_port_dev_put(mlxsw_sp_port);
939} 945}
940 946
941static int mlxsw_sp_router_netevent_event(struct notifier_block *unused, 947int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
942 unsigned long event, void *ptr) 948 unsigned long event, void *ptr)
943{ 949{
944 struct mlxsw_sp_neigh_entry *neigh_entry; 950 struct mlxsw_sp_neigh_entry *neigh_entry;
945 struct mlxsw_sp_port *mlxsw_sp_port; 951 struct mlxsw_sp_port *mlxsw_sp_port;
@@ -1009,10 +1015,6 @@ static int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
1009 return NOTIFY_DONE; 1015 return NOTIFY_DONE;
1010} 1016}
1011 1017
1012static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = {
1013 .notifier_call = mlxsw_sp_router_netevent_event,
1014};
1015
1016static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp) 1018static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
1017{ 1019{
1018 int err; 1020 int err;
@@ -1027,10 +1029,6 @@ static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
1027 */ 1029 */
1028 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp); 1030 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
1029 1031
1030 err = register_netevent_notifier(&mlxsw_sp_router_netevent_nb);
1031 if (err)
1032 goto err_register_netevent_notifier;
1033
1034 /* Create the delayed works for the activity_update */ 1032 /* Create the delayed works for the activity_update */
1035 INIT_DELAYED_WORK(&mlxsw_sp->router.neighs_update.dw, 1033 INIT_DELAYED_WORK(&mlxsw_sp->router.neighs_update.dw,
1036 mlxsw_sp_router_neighs_update_work); 1034 mlxsw_sp_router_neighs_update_work);
@@ -1039,17 +1037,12 @@ static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
1039 mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw, 0); 1037 mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw, 0);
1040 mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw, 0); 1038 mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw, 0);
1041 return 0; 1039 return 0;
1042
1043err_register_netevent_notifier:
1044 rhashtable_destroy(&mlxsw_sp->router.neigh_ht);
1045 return err;
1046} 1040}
1047 1041
1048static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp) 1042static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
1049{ 1043{
1050 cancel_delayed_work_sync(&mlxsw_sp->router.neighs_update.dw); 1044 cancel_delayed_work_sync(&mlxsw_sp->router.neighs_update.dw);
1051 cancel_delayed_work_sync(&mlxsw_sp->router.nexthop_probe_dw); 1045 cancel_delayed_work_sync(&mlxsw_sp->router.nexthop_probe_dw);
1052 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
1053 rhashtable_destroy(&mlxsw_sp->router.neigh_ht); 1046 rhashtable_destroy(&mlxsw_sp->router.neigh_ht);
1054} 1047}
1055 1048
@@ -1524,7 +1517,14 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
1524 return err; 1517 return err;
1525 mlxsw_sp_lpm_init(mlxsw_sp); 1518 mlxsw_sp_lpm_init(mlxsw_sp);
1526 mlxsw_sp_vrs_init(mlxsw_sp); 1519 mlxsw_sp_vrs_init(mlxsw_sp);
1527 return mlxsw_sp_neigh_init(mlxsw_sp); 1520 err = mlxsw_sp_neigh_init(mlxsw_sp);
1521 if (err)
1522 goto err_neigh_init;
1523 return 0;
1524
1525err_neigh_init:
1526 __mlxsw_sp_router_fini(mlxsw_sp);
1527 return err;
1528} 1528}
1529 1529
1530void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) 1530void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
@@ -1626,11 +1626,8 @@ static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
1626static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp, 1626static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1627 struct mlxsw_sp_fib_entry *fib_entry) 1627 struct mlxsw_sp_fib_entry *fib_entry)
1628{ 1628{
1629 enum mlxsw_reg_ralue_op op; 1629 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
1630 1630 MLXSW_REG_RALUE_OP_WRITE_WRITE);
1631 op = !fib_entry->added ? MLXSW_REG_RALUE_OP_WRITE_WRITE :
1632 MLXSW_REG_RALUE_OP_WRITE_UPDATE;
1633 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
1634} 1631}
1635 1632
1636static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp, 1633static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
@@ -1695,34 +1692,93 @@ mlxsw_sp_router_fib4_entry_fini(struct mlxsw_sp *mlxsw_sp,
1695 mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry); 1692 mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry);
1696} 1693}
1697 1694
1698static int 1695static struct mlxsw_sp_fib_entry *
1699mlxsw_sp_router_fib4_add_prepare(struct mlxsw_sp_port *mlxsw_sp_port, 1696mlxsw_sp_fib_entry_get(struct mlxsw_sp *mlxsw_sp,
1700 const struct switchdev_obj_ipv4_fib *fib4, 1697 const struct switchdev_obj_ipv4_fib *fib4)
1701 struct switchdev_trans *trans)
1702{ 1698{
1703 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1704 struct mlxsw_sp_router_fib4_add_info *info;
1705 struct mlxsw_sp_fib_entry *fib_entry; 1699 struct mlxsw_sp_fib_entry *fib_entry;
1700 struct fib_info *fi = fib4->fi;
1706 struct mlxsw_sp_vr *vr; 1701 struct mlxsw_sp_vr *vr;
1707 int err; 1702 int err;
1708 1703
1709 vr = mlxsw_sp_vr_get(mlxsw_sp, fib4->dst_len, fib4->tb_id, 1704 vr = mlxsw_sp_vr_get(mlxsw_sp, fib4->dst_len, fib4->tb_id,
1710 MLXSW_SP_L3_PROTO_IPV4); 1705 MLXSW_SP_L3_PROTO_IPV4);
1711 if (IS_ERR(vr)) 1706 if (IS_ERR(vr))
1712 return PTR_ERR(vr); 1707 return ERR_CAST(vr);
1713 1708
1709 fib_entry = mlxsw_sp_fib_entry_lookup(vr->fib, &fib4->dst,
1710 sizeof(fib4->dst),
1711 fib4->dst_len, fi->fib_dev);
1712 if (fib_entry) {
1713 /* Already exists, just take a reference */
1714 fib_entry->ref_count++;
1715 return fib_entry;
1716 }
1714 fib_entry = mlxsw_sp_fib_entry_create(vr->fib, &fib4->dst, 1717 fib_entry = mlxsw_sp_fib_entry_create(vr->fib, &fib4->dst,
1715 sizeof(fib4->dst), fib4->dst_len); 1718 sizeof(fib4->dst),
1719 fib4->dst_len, fi->fib_dev);
1716 if (!fib_entry) { 1720 if (!fib_entry) {
1717 err = -ENOMEM; 1721 err = -ENOMEM;
1718 goto err_fib_entry_create; 1722 goto err_fib_entry_create;
1719 } 1723 }
1720 fib_entry->vr = vr; 1724 fib_entry->vr = vr;
1725 fib_entry->ref_count = 1;
1721 1726
1722 err = mlxsw_sp_router_fib4_entry_init(mlxsw_sp, fib4, fib_entry); 1727 err = mlxsw_sp_router_fib4_entry_init(mlxsw_sp, fib4, fib_entry);
1723 if (err) 1728 if (err)
1724 goto err_fib4_entry_init; 1729 goto err_fib4_entry_init;
1725 1730
1731 return fib_entry;
1732
1733err_fib4_entry_init:
1734 mlxsw_sp_fib_entry_destroy(fib_entry);
1735err_fib_entry_create:
1736 mlxsw_sp_vr_put(mlxsw_sp, vr);
1737
1738 return ERR_PTR(err);
1739}
1740
1741static struct mlxsw_sp_fib_entry *
1742mlxsw_sp_fib_entry_find(struct mlxsw_sp *mlxsw_sp,
1743 const struct switchdev_obj_ipv4_fib *fib4)
1744{
1745 struct mlxsw_sp_vr *vr;
1746
1747 vr = mlxsw_sp_vr_find(mlxsw_sp, fib4->tb_id, MLXSW_SP_L3_PROTO_IPV4);
1748 if (!vr)
1749 return NULL;
1750
1751 return mlxsw_sp_fib_entry_lookup(vr->fib, &fib4->dst,
1752 sizeof(fib4->dst), fib4->dst_len,
1753 fib4->fi->fib_dev);
1754}
1755
1756void mlxsw_sp_fib_entry_put(struct mlxsw_sp *mlxsw_sp,
1757 struct mlxsw_sp_fib_entry *fib_entry)
1758{
1759 struct mlxsw_sp_vr *vr = fib_entry->vr;
1760
1761 if (--fib_entry->ref_count == 0) {
1762 mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry);
1763 mlxsw_sp_fib_entry_destroy(fib_entry);
1764 }
1765 mlxsw_sp_vr_put(mlxsw_sp, vr);
1766}
1767
1768static int
1769mlxsw_sp_router_fib4_add_prepare(struct mlxsw_sp_port *mlxsw_sp_port,
1770 const struct switchdev_obj_ipv4_fib *fib4,
1771 struct switchdev_trans *trans)
1772{
1773 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1774 struct mlxsw_sp_router_fib4_add_info *info;
1775 struct mlxsw_sp_fib_entry *fib_entry;
1776 int err;
1777
1778 fib_entry = mlxsw_sp_fib_entry_get(mlxsw_sp, fib4);
1779 if (IS_ERR(fib_entry))
1780 return PTR_ERR(fib_entry);
1781
1726 info = kmalloc(sizeof(*info), GFP_KERNEL); 1782 info = kmalloc(sizeof(*info), GFP_KERNEL);
1727 if (!info) { 1783 if (!info) {
1728 err = -ENOMEM; 1784 err = -ENOMEM;
@@ -1736,11 +1792,7 @@ mlxsw_sp_router_fib4_add_prepare(struct mlxsw_sp_port *mlxsw_sp_port,
1736 return 0; 1792 return 0;
1737 1793
1738err_alloc_info: 1794err_alloc_info:
1739 mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry); 1795 mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
1740err_fib4_entry_init:
1741 mlxsw_sp_fib_entry_destroy(fib_entry);
1742err_fib_entry_create:
1743 mlxsw_sp_vr_put(mlxsw_sp, vr);
1744 return err; 1796 return err;
1745} 1797}
1746 1798
@@ -1759,11 +1811,14 @@ mlxsw_sp_router_fib4_add_commit(struct mlxsw_sp_port *mlxsw_sp_port,
1759 fib_entry = info->fib_entry; 1811 fib_entry = info->fib_entry;
1760 kfree(info); 1812 kfree(info);
1761 1813
1814 if (fib_entry->ref_count != 1)
1815 return 0;
1816
1762 vr = fib_entry->vr; 1817 vr = fib_entry->vr;
1763 err = mlxsw_sp_fib_entry_insert(fib_entry->vr->fib, fib_entry); 1818 err = mlxsw_sp_fib_entry_insert(vr->fib, fib_entry);
1764 if (err) 1819 if (err)
1765 goto err_fib_entry_insert; 1820 goto err_fib_entry_insert;
1766 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry); 1821 err = mlxsw_sp_fib_entry_update(mlxsw_sp_port->mlxsw_sp, fib_entry);
1767 if (err) 1822 if (err)
1768 goto err_fib_entry_add; 1823 goto err_fib_entry_add;
1769 return 0; 1824 return 0;
@@ -1771,9 +1826,7 @@ mlxsw_sp_router_fib4_add_commit(struct mlxsw_sp_port *mlxsw_sp_port,
1771err_fib_entry_add: 1826err_fib_entry_add:
1772 mlxsw_sp_fib_entry_remove(vr->fib, fib_entry); 1827 mlxsw_sp_fib_entry_remove(vr->fib, fib_entry);
1773err_fib_entry_insert: 1828err_fib_entry_insert:
1774 mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry); 1829 mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
1775 mlxsw_sp_fib_entry_destroy(fib_entry);
1776 mlxsw_sp_vr_put(mlxsw_sp, vr);
1777 return err; 1830 return err;
1778} 1831}
1779 1832
@@ -1793,23 +1846,18 @@ int mlxsw_sp_router_fib4_del(struct mlxsw_sp_port *mlxsw_sp_port,
1793{ 1846{
1794 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1847 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1795 struct mlxsw_sp_fib_entry *fib_entry; 1848 struct mlxsw_sp_fib_entry *fib_entry;
1796 struct mlxsw_sp_vr *vr;
1797 1849
1798 vr = mlxsw_sp_vr_find(mlxsw_sp, fib4->tb_id, MLXSW_SP_L3_PROTO_IPV4); 1850 fib_entry = mlxsw_sp_fib_entry_find(mlxsw_sp, fib4);
1799 if (!vr) {
1800 dev_warn(mlxsw_sp->bus_info->dev, "Failed to find virtual router for FIB4 entry being removed.\n");
1801 return -ENOENT;
1802 }
1803 fib_entry = mlxsw_sp_fib_entry_lookup(vr->fib, &fib4->dst,
1804 sizeof(fib4->dst), fib4->dst_len);
1805 if (!fib_entry) { 1851 if (!fib_entry) {
1806 dev_warn(mlxsw_sp->bus_info->dev, "Failed to find FIB4 entry being removed.\n"); 1852 dev_warn(mlxsw_sp->bus_info->dev, "Failed to find FIB4 entry being removed.\n");
1807 return -ENOENT; 1853 return -ENOENT;
1808 } 1854 }
1809 mlxsw_sp_fib_entry_del(mlxsw_sp_port->mlxsw_sp, fib_entry); 1855
1810 mlxsw_sp_fib_entry_remove(vr->fib, fib_entry); 1856 if (fib_entry->ref_count == 1) {
1811 mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry); 1857 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
1812 mlxsw_sp_fib_entry_destroy(fib_entry); 1858 mlxsw_sp_fib_entry_remove(fib_entry->vr->fib, fib_entry);
1813 mlxsw_sp_vr_put(mlxsw_sp, vr); 1859 }
1860
1861 mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
1814 return 0; 1862 return 0;
1815} 1863}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index d1b59cdfacc1..7b654c517b91 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -167,8 +167,8 @@ static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
167} 167}
168 168
169static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, 169static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
170 u16 idx_begin, u16 idx_end, bool set, 170 u16 idx_begin, u16 idx_end, bool uc_set,
171 bool only_uc) 171 bool bm_set)
172{ 172{
173 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 173 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
174 u16 local_port = mlxsw_sp_port->local_port; 174 u16 local_port = mlxsw_sp_port->local_port;
@@ -187,28 +187,22 @@ static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
187 return -ENOMEM; 187 return -ENOMEM;
188 188
189 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin, 189 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin,
190 table_type, range, local_port, set); 190 table_type, range, local_port, uc_set);
191 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); 191 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
192 if (err) 192 if (err)
193 goto buffer_out; 193 goto buffer_out;
194 194
195 /* Flooding control allows one to decide whether a given port will
196 * flood unicast traffic for which there is no FDB entry.
197 */
198 if (only_uc)
199 goto buffer_out;
200
201 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, idx_begin, 195 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, idx_begin,
202 table_type, range, local_port, set); 196 table_type, range, local_port, bm_set);
203 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); 197 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
204 if (err) 198 if (err)
205 goto err_flood_bm_set; 199 goto err_flood_bm_set;
206 else 200
207 goto buffer_out; 201 goto buffer_out;
208 202
209err_flood_bm_set: 203err_flood_bm_set:
210 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin, 204 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin,
211 table_type, range, local_port, !set); 205 table_type, range, local_port, !uc_set);
212 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); 206 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
213buffer_out: 207buffer_out:
214 kfree(sftr_pl); 208 kfree(sftr_pl);
@@ -257,8 +251,7 @@ int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
257 * the start of the vFIDs range. 251 * the start of the vFIDs range.
258 */ 252 */
259 vfid = mlxsw_sp_fid_to_vfid(fid); 253 vfid = mlxsw_sp_fid_to_vfid(fid);
260 return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, 254 return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, set);
261 false);
262} 255}
263 256
264static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port, 257static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -460,6 +453,9 @@ static int __mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
460{ 453{
461 struct mlxsw_sp_fid *f; 454 struct mlxsw_sp_fid *f;
462 455
456 if (test_bit(fid, mlxsw_sp_port->active_vlans))
457 return 0;
458
463 f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid); 459 f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid);
464 if (!f) { 460 if (!f) {
465 f = mlxsw_sp_fid_create(mlxsw_sp_port->mlxsw_sp, fid); 461 f = mlxsw_sp_fid_create(mlxsw_sp_port->mlxsw_sp, fid);
@@ -517,7 +513,7 @@ static int mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
517 } 513 }
518 514
519 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, 515 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end,
520 true, false); 516 mlxsw_sp_port->uc_flood, true);
521 if (err) 517 if (err)
522 goto err_port_flood_set; 518 goto err_port_flood_set;
523 519
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 88678c172b19..252e4924de0f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -41,7 +41,6 @@
41 * Chris Telfer <chris.telfer@netronome.com> 41 * Chris Telfer <chris.telfer@netronome.com>
42 */ 42 */
43 43
44#include <linux/version.h>
45#include <linux/module.h> 44#include <linux/module.h>
46#include <linux/kernel.h> 45#include <linux/kernel.h>
47#include <linux/init.h> 46#include <linux/init.h>
@@ -1441,10 +1440,6 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
1441 1440
1442 nfp_net_set_hash(nn->netdev, skb, rxd); 1441 nfp_net_set_hash(nn->netdev, skb, rxd);
1443 1442
1444 /* Pad small frames to minimum */
1445 if (skb_put_padto(skb, 60))
1446 break;
1447
1448 /* Stats update */ 1443 /* Stats update */
1449 u64_stats_update_begin(&r_vec->rx_sync); 1444 u64_stats_update_begin(&r_vec->rx_sync);
1450 r_vec->rx_pkts++; 1445 r_vec->rx_pkts++;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 7d7933d00b8f..4c9897220969 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -40,7 +40,6 @@
40 * Brad Petrus <brad.petrus@netronome.com> 40 * Brad Petrus <brad.petrus@netronome.com>
41 */ 41 */
42 42
43#include <linux/version.h>
44#include <linux/kernel.h> 43#include <linux/kernel.h>
45#include <linux/netdevice.h> 44#include <linux/netdevice.h>
46#include <linux/etherdevice.h> 45#include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
index 37abef016a0a..f7062cb648e1 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
@@ -38,7 +38,6 @@
38 * Rolf Neugebauer <rolf.neugebauer@netronome.com> 38 * Rolf Neugebauer <rolf.neugebauer@netronome.com>
39 */ 39 */
40 40
41#include <linux/version.h>
42#include <linux/module.h> 41#include <linux/module.h>
43#include <linux/kernel.h> 42#include <linux/kernel.h>
44#include <linux/init.h> 43#include <linux/init.h>
@@ -134,7 +133,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
134 } 133 }
135 134
136 nfp_net_get_fw_version(&fw_ver, ctrl_bar); 135 nfp_net_get_fw_version(&fw_ver, ctrl_bar);
137 if (fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) { 136 if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
138 dev_err(&pdev->dev, "Unknown Firmware ABI %d.%d.%d.%d\n", 137 dev_err(&pdev->dev, "Unknown Firmware ABI %d.%d.%d.%d\n",
139 fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor); 138 fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor);
140 err = -EINVAL; 139 err = -EINVAL;
@@ -142,9 +141,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
142 } 141 }
143 142
144 /* Determine stride */ 143 /* Determine stride */
145 if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 0) || 144 if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) {
146 nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1) ||
147 nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0x12, 0x48)) {
148 stride = 2; 145 stride = 2;
149 tx_bar_no = NFP_NET_Q0_BAR; 146 tx_bar_no = NFP_NET_Q0_BAR;
150 rx_bar_no = NFP_NET_Q1_BAR; 147 rx_bar_no = NFP_NET_Q1_BAR;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index 226cb08cc055..3656d2fd673d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -19,6 +19,7 @@
19#include "qed_dcbx.h" 19#include "qed_dcbx.h"
20#include "qed_hsi.h" 20#include "qed_hsi.h"
21#include "qed_sp.h" 21#include "qed_sp.h"
22#include "qed_sriov.h"
22#ifdef CONFIG_DCB 23#ifdef CONFIG_DCB
23#include <linux/qed/qed_eth_if.h> 24#include <linux/qed/qed_eth_if.h>
24#endif 25#endif
@@ -945,6 +946,9 @@ static int qed_dcbx_query_params(struct qed_hwfn *p_hwfn,
945 struct qed_ptt *p_ptt; 946 struct qed_ptt *p_ptt;
946 int rc; 947 int rc;
947 948
949 if (IS_VF(p_hwfn->cdev))
950 return -EINVAL;
951
948 p_ptt = qed_ptt_acquire(p_hwfn); 952 p_ptt = qed_ptt_acquire(p_hwfn);
949 if (!p_ptt) 953 if (!p_ptt)
950 return -EBUSY; 954 return -EBUSY;
@@ -984,6 +988,7 @@ qed_dcbx_set_pfc_data(struct qed_hwfn *p_hwfn,
984 if (p_params->pfc.prio[i]) 988 if (p_params->pfc.prio[i])
985 pfc_map |= BIT(i); 989 pfc_map |= BIT(i);
986 990
991 *pfc &= ~DCBX_PFC_PRI_EN_BITMAP_MASK;
987 *pfc |= (pfc_map << DCBX_PFC_PRI_EN_BITMAP_SHIFT); 992 *pfc |= (pfc_map << DCBX_PFC_PRI_EN_BITMAP_SHIFT);
988 993
989 DP_VERBOSE(p_hwfn, QED_MSG_DCB, "pfc = 0x%x\n", *pfc); 994 DP_VERBOSE(p_hwfn, QED_MSG_DCB, "pfc = 0x%x\n", *pfc);
@@ -1058,24 +1063,33 @@ qed_dcbx_set_app_data(struct qed_hwfn *p_hwfn,
1058 1063
1059 for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { 1064 for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
1060 entry = &p_app->app_pri_tbl[i].entry; 1065 entry = &p_app->app_pri_tbl[i].entry;
1066 *entry = 0;
1061 if (ieee) { 1067 if (ieee) {
1062 *entry &= ~DCBX_APP_SF_IEEE_MASK; 1068 *entry &= ~(DCBX_APP_SF_IEEE_MASK | DCBX_APP_SF_MASK);
1063 switch (p_params->app_entry[i].sf_ieee) { 1069 switch (p_params->app_entry[i].sf_ieee) {
1064 case QED_DCBX_SF_IEEE_ETHTYPE: 1070 case QED_DCBX_SF_IEEE_ETHTYPE:
1065 *entry |= ((u32)DCBX_APP_SF_IEEE_ETHTYPE << 1071 *entry |= ((u32)DCBX_APP_SF_IEEE_ETHTYPE <<
1066 DCBX_APP_SF_IEEE_SHIFT); 1072 DCBX_APP_SF_IEEE_SHIFT);
1073 *entry |= ((u32)DCBX_APP_SF_ETHTYPE <<
1074 DCBX_APP_SF_SHIFT);
1067 break; 1075 break;
1068 case QED_DCBX_SF_IEEE_TCP_PORT: 1076 case QED_DCBX_SF_IEEE_TCP_PORT:
1069 *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_PORT << 1077 *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_PORT <<
1070 DCBX_APP_SF_IEEE_SHIFT); 1078 DCBX_APP_SF_IEEE_SHIFT);
1079 *entry |= ((u32)DCBX_APP_SF_PORT <<
1080 DCBX_APP_SF_SHIFT);
1071 break; 1081 break;
1072 case QED_DCBX_SF_IEEE_UDP_PORT: 1082 case QED_DCBX_SF_IEEE_UDP_PORT:
1073 *entry |= ((u32)DCBX_APP_SF_IEEE_UDP_PORT << 1083 *entry |= ((u32)DCBX_APP_SF_IEEE_UDP_PORT <<
1074 DCBX_APP_SF_IEEE_SHIFT); 1084 DCBX_APP_SF_IEEE_SHIFT);
1085 *entry |= ((u32)DCBX_APP_SF_PORT <<
1086 DCBX_APP_SF_SHIFT);
1075 break; 1087 break;
1076 case QED_DCBX_SF_IEEE_TCP_UDP_PORT: 1088 case QED_DCBX_SF_IEEE_TCP_UDP_PORT:
1077 *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_UDP_PORT << 1089 *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_UDP_PORT <<
1078 DCBX_APP_SF_IEEE_SHIFT); 1090 DCBX_APP_SF_IEEE_SHIFT);
1091 *entry |= ((u32)DCBX_APP_SF_PORT <<
1092 DCBX_APP_SF_SHIFT);
1079 break; 1093 break;
1080 } 1094 }
1081 } else { 1095 } else {
@@ -1175,7 +1189,7 @@ int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
1175 return 0; 1189 return 0;
1176 } 1190 }
1177 1191
1178 dcbx_info = kmalloc(sizeof(*dcbx_info), GFP_KERNEL); 1192 dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL);
1179 if (!dcbx_info) { 1193 if (!dcbx_info) {
1180 DP_ERR(p_hwfn, "Failed to allocate struct qed_dcbx_info\n"); 1194 DP_ERR(p_hwfn, "Failed to allocate struct qed_dcbx_info\n");
1181 return -ENOMEM; 1195 return -ENOMEM;
@@ -1212,7 +1226,7 @@ static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn,
1212{ 1226{
1213 struct qed_dcbx_get *dcbx_info; 1227 struct qed_dcbx_get *dcbx_info;
1214 1228
1215 dcbx_info = kmalloc(sizeof(*dcbx_info), GFP_KERNEL); 1229 dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL);
1216 if (!dcbx_info) { 1230 if (!dcbx_info) {
1217 DP_ERR(hwfn->cdev, "Failed to allocate memory for dcbx_info\n"); 1231 DP_ERR(hwfn->cdev, "Failed to allocate memory for dcbx_info\n");
1218 return NULL; 1232 return NULL;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index a6eb6af8cbe8..9544e4c41359 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -2520,7 +2520,8 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
2520 edev->ops->register_ops(cdev, &qede_ll_ops, edev); 2520 edev->ops->register_ops(cdev, &qede_ll_ops, edev);
2521 2521
2522#ifdef CONFIG_DCB 2522#ifdef CONFIG_DCB
2523 qede_set_dcbnl_ops(edev->ndev); 2523 if (!IS_VF(edev))
2524 qede_set_dcbnl_ops(edev->ndev);
2524#endif 2525#endif
2525 2526
2526 INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task); 2527 INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 799d58d86e6d..054e795df90f 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -201,9 +201,14 @@ static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
201 201
202 [ARSTR] = 0x0000, 202 [ARSTR] = 0x0000,
203 [TSU_CTRST] = 0x0004, 203 [TSU_CTRST] = 0x0004,
204 [TSU_FWSLC] = 0x0038,
204 [TSU_VTAG0] = 0x0058, 205 [TSU_VTAG0] = 0x0058,
205 [TSU_ADSBSY] = 0x0060, 206 [TSU_ADSBSY] = 0x0060,
206 [TSU_TEN] = 0x0064, 207 [TSU_TEN] = 0x0064,
208 [TSU_POST1] = 0x0070,
209 [TSU_POST2] = 0x0074,
210 [TSU_POST3] = 0x0078,
211 [TSU_POST4] = 0x007c,
207 [TSU_ADRH0] = 0x0100, 212 [TSU_ADRH0] = 0x0100,
208 213
209 [TXNLCR0] = 0x0080, 214 [TXNLCR0] = 0x0080,
@@ -2786,6 +2791,8 @@ static void sh_eth_tsu_init(struct sh_eth_private *mdp)
2786{ 2791{
2787 if (sh_eth_is_rz_fast_ether(mdp)) { 2792 if (sh_eth_is_rz_fast_ether(mdp)) {
2788 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */ 2793 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
2794 sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL,
2795 TSU_FWSLC); /* Enable POST registers */
2789 return; 2796 return;
2790 } 2797 }
2791 2798
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index e17671c9d1b0..ea8465467469 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -470,7 +470,9 @@ smc_pxa_dma_insw(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma,
470#endif 470#endif
471 471
472#if ! SMC_CAN_USE_8BIT 472#if ! SMC_CAN_USE_8BIT
473#undef SMC_inb
473#define SMC_inb(ioaddr, reg) ({ BUG(); 0; }) 474#define SMC_inb(ioaddr, reg) ({ BUG(); 0; })
475#undef SMC_outb
474#define SMC_outb(x, ioaddr, reg) BUG() 476#define SMC_outb(x, ioaddr, reg) BUG()
475#define SMC_insb(a, r, p, l) BUG() 477#define SMC_insb(a, r, p, l) BUG()
476#define SMC_outsb(a, r, p, l) BUG() 478#define SMC_outsb(a, r, p, l) BUG()
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index ca3134540d2d..4f8910b7db2e 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -1099,15 +1099,8 @@ static int smsc911x_mii_init(struct platform_device *pdev,
1099 goto err_out_free_bus_2; 1099 goto err_out_free_bus_2;
1100 } 1100 }
1101 1101
1102 if (smsc911x_mii_probe(dev) < 0) {
1103 SMSC_WARN(pdata, probe, "Error registering mii bus");
1104 goto err_out_unregister_bus_3;
1105 }
1106
1107 return 0; 1102 return 0;
1108 1103
1109err_out_unregister_bus_3:
1110 mdiobus_unregister(pdata->mii_bus);
1111err_out_free_bus_2: 1104err_out_free_bus_2:
1112 mdiobus_free(pdata->mii_bus); 1105 mdiobus_free(pdata->mii_bus);
1113err_out_1: 1106err_out_1:
@@ -1514,23 +1507,90 @@ static void smsc911x_disable_irq_chip(struct net_device *dev)
1514 smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF); 1507 smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF);
1515} 1508}
1516 1509
1510static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id)
1511{
1512 struct net_device *dev = dev_id;
1513 struct smsc911x_data *pdata = netdev_priv(dev);
1514 u32 intsts = smsc911x_reg_read(pdata, INT_STS);
1515 u32 inten = smsc911x_reg_read(pdata, INT_EN);
1516 int serviced = IRQ_NONE;
1517 u32 temp;
1518
1519 if (unlikely(intsts & inten & INT_STS_SW_INT_)) {
1520 temp = smsc911x_reg_read(pdata, INT_EN);
1521 temp &= (~INT_EN_SW_INT_EN_);
1522 smsc911x_reg_write(pdata, INT_EN, temp);
1523 smsc911x_reg_write(pdata, INT_STS, INT_STS_SW_INT_);
1524 pdata->software_irq_signal = 1;
1525 smp_wmb();
1526 serviced = IRQ_HANDLED;
1527 }
1528
1529 if (unlikely(intsts & inten & INT_STS_RXSTOP_INT_)) {
1530 /* Called when there is a multicast update scheduled and
1531 * it is now safe to complete the update */
1532 SMSC_TRACE(pdata, intr, "RX Stop interrupt");
1533 smsc911x_reg_write(pdata, INT_STS, INT_STS_RXSTOP_INT_);
1534 if (pdata->multicast_update_pending)
1535 smsc911x_rx_multicast_update_workaround(pdata);
1536 serviced = IRQ_HANDLED;
1537 }
1538
1539 if (intsts & inten & INT_STS_TDFA_) {
1540 temp = smsc911x_reg_read(pdata, FIFO_INT);
1541 temp |= FIFO_INT_TX_AVAIL_LEVEL_;
1542 smsc911x_reg_write(pdata, FIFO_INT, temp);
1543 smsc911x_reg_write(pdata, INT_STS, INT_STS_TDFA_);
1544 netif_wake_queue(dev);
1545 serviced = IRQ_HANDLED;
1546 }
1547
1548 if (unlikely(intsts & inten & INT_STS_RXE_)) {
1549 SMSC_TRACE(pdata, intr, "RX Error interrupt");
1550 smsc911x_reg_write(pdata, INT_STS, INT_STS_RXE_);
1551 serviced = IRQ_HANDLED;
1552 }
1553
1554 if (likely(intsts & inten & INT_STS_RSFL_)) {
1555 if (likely(napi_schedule_prep(&pdata->napi))) {
1556 /* Disable Rx interrupts */
1557 temp = smsc911x_reg_read(pdata, INT_EN);
1558 temp &= (~INT_EN_RSFL_EN_);
1559 smsc911x_reg_write(pdata, INT_EN, temp);
1560 /* Schedule a NAPI poll */
1561 __napi_schedule(&pdata->napi);
1562 } else {
1563 SMSC_WARN(pdata, rx_err, "napi_schedule_prep failed");
1564 }
1565 serviced = IRQ_HANDLED;
1566 }
1567
1568 return serviced;
1569}
1570
1517static int smsc911x_open(struct net_device *dev) 1571static int smsc911x_open(struct net_device *dev)
1518{ 1572{
1519 struct smsc911x_data *pdata = netdev_priv(dev); 1573 struct smsc911x_data *pdata = netdev_priv(dev);
1520 unsigned int timeout; 1574 unsigned int timeout;
1521 unsigned int temp; 1575 unsigned int temp;
1522 unsigned int intcfg; 1576 unsigned int intcfg;
1577 int retval;
1578 int irq_flags;
1523 1579
1524 /* if the phy is not yet registered, retry later*/ 1580 /* find and start the given phy */
1525 if (!dev->phydev) { 1581 if (!dev->phydev) {
1526 SMSC_WARN(pdata, hw, "phy_dev is NULL"); 1582 retval = smsc911x_mii_probe(dev);
1527 return -EAGAIN; 1583 if (retval < 0) {
1584 SMSC_WARN(pdata, probe, "Error starting phy");
1585 goto out;
1586 }
1528 } 1587 }
1529 1588
1530 /* Reset the LAN911x */ 1589 /* Reset the LAN911x */
1531 if (smsc911x_soft_reset(pdata)) { 1590 retval = smsc911x_soft_reset(pdata);
1591 if (retval) {
1532 SMSC_WARN(pdata, hw, "soft reset failed"); 1592 SMSC_WARN(pdata, hw, "soft reset failed");
1533 return -EIO; 1593 goto mii_free_out;
1534 } 1594 }
1535 1595
1536 smsc911x_reg_write(pdata, HW_CFG, 0x00050000); 1596 smsc911x_reg_write(pdata, HW_CFG, 0x00050000);
@@ -1586,6 +1646,15 @@ static int smsc911x_open(struct net_device *dev)
1586 pdata->software_irq_signal = 0; 1646 pdata->software_irq_signal = 0;
1587 smp_wmb(); 1647 smp_wmb();
1588 1648
1649 irq_flags = irq_get_trigger_type(dev->irq);
1650 retval = request_irq(dev->irq, smsc911x_irqhandler,
1651 irq_flags | IRQF_SHARED, dev->name, dev);
1652 if (retval) {
1653 SMSC_WARN(pdata, probe,
1654 "Unable to claim requested irq: %d", dev->irq);
1655 goto mii_free_out;
1656 }
1657
1589 temp = smsc911x_reg_read(pdata, INT_EN); 1658 temp = smsc911x_reg_read(pdata, INT_EN);
1590 temp |= INT_EN_SW_INT_EN_; 1659 temp |= INT_EN_SW_INT_EN_;
1591 smsc911x_reg_write(pdata, INT_EN, temp); 1660 smsc911x_reg_write(pdata, INT_EN, temp);
@@ -1600,7 +1669,8 @@ static int smsc911x_open(struct net_device *dev)
1600 if (!pdata->software_irq_signal) { 1669 if (!pdata->software_irq_signal) {
1601 netdev_warn(dev, "ISR failed signaling test (IRQ %d)\n", 1670 netdev_warn(dev, "ISR failed signaling test (IRQ %d)\n",
1602 dev->irq); 1671 dev->irq);
1603 return -ENODEV; 1672 retval = -ENODEV;
1673 goto irq_stop_out;
1604 } 1674 }
1605 SMSC_TRACE(pdata, ifup, "IRQ handler passed test using IRQ %d", 1675 SMSC_TRACE(pdata, ifup, "IRQ handler passed test using IRQ %d",
1606 dev->irq); 1676 dev->irq);
@@ -1646,6 +1716,14 @@ static int smsc911x_open(struct net_device *dev)
1646 1716
1647 netif_start_queue(dev); 1717 netif_start_queue(dev);
1648 return 0; 1718 return 0;
1719
1720irq_stop_out:
1721 free_irq(dev->irq, dev);
1722mii_free_out:
1723 phy_disconnect(dev->phydev);
1724 dev->phydev = NULL;
1725out:
1726 return retval;
1649} 1727}
1650 1728
1651/* Entry point for stopping the interface */ 1729/* Entry point for stopping the interface */
@@ -1667,9 +1745,15 @@ static int smsc911x_stop(struct net_device *dev)
1667 dev->stats.rx_dropped += smsc911x_reg_read(pdata, RX_DROP); 1745 dev->stats.rx_dropped += smsc911x_reg_read(pdata, RX_DROP);
1668 smsc911x_tx_update_txcounters(dev); 1746 smsc911x_tx_update_txcounters(dev);
1669 1747
1748 free_irq(dev->irq, dev);
1749
1670 /* Bring the PHY down */ 1750 /* Bring the PHY down */
1671 if (dev->phydev) 1751 if (dev->phydev) {
1672 phy_stop(dev->phydev); 1752 phy_stop(dev->phydev);
1753 phy_disconnect(dev->phydev);
1754 dev->phydev = NULL;
1755 }
1756 netif_carrier_off(dev);
1673 1757
1674 SMSC_TRACE(pdata, ifdown, "Interface stopped"); 1758 SMSC_TRACE(pdata, ifdown, "Interface stopped");
1675 return 0; 1759 return 0;
@@ -1811,67 +1895,6 @@ static void smsc911x_set_multicast_list(struct net_device *dev)
1811 spin_unlock_irqrestore(&pdata->mac_lock, flags); 1895 spin_unlock_irqrestore(&pdata->mac_lock, flags);
1812} 1896}
1813 1897
1814static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id)
1815{
1816 struct net_device *dev = dev_id;
1817 struct smsc911x_data *pdata = netdev_priv(dev);
1818 u32 intsts = smsc911x_reg_read(pdata, INT_STS);
1819 u32 inten = smsc911x_reg_read(pdata, INT_EN);
1820 int serviced = IRQ_NONE;
1821 u32 temp;
1822
1823 if (unlikely(intsts & inten & INT_STS_SW_INT_)) {
1824 temp = smsc911x_reg_read(pdata, INT_EN);
1825 temp &= (~INT_EN_SW_INT_EN_);
1826 smsc911x_reg_write(pdata, INT_EN, temp);
1827 smsc911x_reg_write(pdata, INT_STS, INT_STS_SW_INT_);
1828 pdata->software_irq_signal = 1;
1829 smp_wmb();
1830 serviced = IRQ_HANDLED;
1831 }
1832
1833 if (unlikely(intsts & inten & INT_STS_RXSTOP_INT_)) {
1834 /* Called when there is a multicast update scheduled and
1835 * it is now safe to complete the update */
1836 SMSC_TRACE(pdata, intr, "RX Stop interrupt");
1837 smsc911x_reg_write(pdata, INT_STS, INT_STS_RXSTOP_INT_);
1838 if (pdata->multicast_update_pending)
1839 smsc911x_rx_multicast_update_workaround(pdata);
1840 serviced = IRQ_HANDLED;
1841 }
1842
1843 if (intsts & inten & INT_STS_TDFA_) {
1844 temp = smsc911x_reg_read(pdata, FIFO_INT);
1845 temp |= FIFO_INT_TX_AVAIL_LEVEL_;
1846 smsc911x_reg_write(pdata, FIFO_INT, temp);
1847 smsc911x_reg_write(pdata, INT_STS, INT_STS_TDFA_);
1848 netif_wake_queue(dev);
1849 serviced = IRQ_HANDLED;
1850 }
1851
1852 if (unlikely(intsts & inten & INT_STS_RXE_)) {
1853 SMSC_TRACE(pdata, intr, "RX Error interrupt");
1854 smsc911x_reg_write(pdata, INT_STS, INT_STS_RXE_);
1855 serviced = IRQ_HANDLED;
1856 }
1857
1858 if (likely(intsts & inten & INT_STS_RSFL_)) {
1859 if (likely(napi_schedule_prep(&pdata->napi))) {
1860 /* Disable Rx interrupts */
1861 temp = smsc911x_reg_read(pdata, INT_EN);
1862 temp &= (~INT_EN_RSFL_EN_);
1863 smsc911x_reg_write(pdata, INT_EN, temp);
1864 /* Schedule a NAPI poll */
1865 __napi_schedule(&pdata->napi);
1866 } else {
1867 SMSC_WARN(pdata, rx_err, "napi_schedule_prep failed");
1868 }
1869 serviced = IRQ_HANDLED;
1870 }
1871
1872 return serviced;
1873}
1874
1875#ifdef CONFIG_NET_POLL_CONTROLLER 1898#ifdef CONFIG_NET_POLL_CONTROLLER
1876static void smsc911x_poll_controller(struct net_device *dev) 1899static void smsc911x_poll_controller(struct net_device *dev)
1877{ 1900{
@@ -2291,16 +2314,14 @@ static int smsc911x_drv_remove(struct platform_device *pdev)
2291 pdata = netdev_priv(dev); 2314 pdata = netdev_priv(dev);
2292 BUG_ON(!pdata); 2315 BUG_ON(!pdata);
2293 BUG_ON(!pdata->ioaddr); 2316 BUG_ON(!pdata->ioaddr);
2294 BUG_ON(!dev->phydev); 2317 WARN_ON(dev->phydev);
2295 2318
2296 SMSC_TRACE(pdata, ifdown, "Stopping driver"); 2319 SMSC_TRACE(pdata, ifdown, "Stopping driver");
2297 2320
2298 phy_disconnect(dev->phydev);
2299 mdiobus_unregister(pdata->mii_bus); 2321 mdiobus_unregister(pdata->mii_bus);
2300 mdiobus_free(pdata->mii_bus); 2322 mdiobus_free(pdata->mii_bus);
2301 2323
2302 unregister_netdev(dev); 2324 unregister_netdev(dev);
2303 free_irq(dev->irq, dev);
2304 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 2325 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2305 "smsc911x-memory"); 2326 "smsc911x-memory");
2306 if (!res) 2327 if (!res)
@@ -2385,8 +2406,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
2385 struct smsc911x_data *pdata; 2406 struct smsc911x_data *pdata;
2386 struct smsc911x_platform_config *config = dev_get_platdata(&pdev->dev); 2407 struct smsc911x_platform_config *config = dev_get_platdata(&pdev->dev);
2387 struct resource *res; 2408 struct resource *res;
2388 unsigned int intcfg = 0; 2409 int res_size, irq;
2389 int res_size, irq, irq_flags;
2390 int retval; 2410 int retval;
2391 2411
2392 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 2412 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
@@ -2425,7 +2445,6 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
2425 2445
2426 pdata = netdev_priv(dev); 2446 pdata = netdev_priv(dev);
2427 dev->irq = irq; 2447 dev->irq = irq;
2428 irq_flags = irq_get_trigger_type(irq);
2429 pdata->ioaddr = ioremap_nocache(res->start, res_size); 2448 pdata->ioaddr = ioremap_nocache(res->start, res_size);
2430 2449
2431 pdata->dev = dev; 2450 pdata->dev = dev;
@@ -2472,43 +2491,23 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
2472 if (retval < 0) 2491 if (retval < 0)
2473 goto out_disable_resources; 2492 goto out_disable_resources;
2474 2493
2475 /* configure irq polarity and type before connecting isr */ 2494 netif_carrier_off(dev);
2476 if (pdata->config.irq_polarity == SMSC911X_IRQ_POLARITY_ACTIVE_HIGH)
2477 intcfg |= INT_CFG_IRQ_POL_;
2478
2479 if (pdata->config.irq_type == SMSC911X_IRQ_TYPE_PUSH_PULL)
2480 intcfg |= INT_CFG_IRQ_TYPE_;
2481
2482 smsc911x_reg_write(pdata, INT_CFG, intcfg);
2483
2484 /* Ensure interrupts are globally disabled before connecting ISR */
2485 smsc911x_disable_irq_chip(dev);
2486 2495
2487 retval = request_irq(dev->irq, smsc911x_irqhandler, 2496 retval = smsc911x_mii_init(pdev, dev);
2488 irq_flags | IRQF_SHARED, dev->name, dev);
2489 if (retval) { 2497 if (retval) {
2490 SMSC_WARN(pdata, probe, 2498 SMSC_WARN(pdata, probe, "Error %i initialising mii", retval);
2491 "Unable to claim requested irq: %d", dev->irq);
2492 goto out_disable_resources; 2499 goto out_disable_resources;
2493 } 2500 }
2494 2501
2495 netif_carrier_off(dev);
2496
2497 retval = register_netdev(dev); 2502 retval = register_netdev(dev);
2498 if (retval) { 2503 if (retval) {
2499 SMSC_WARN(pdata, probe, "Error %i registering device", retval); 2504 SMSC_WARN(pdata, probe, "Error %i registering device", retval);
2500 goto out_free_irq; 2505 goto out_disable_resources;
2501 } else { 2506 } else {
2502 SMSC_TRACE(pdata, probe, 2507 SMSC_TRACE(pdata, probe,
2503 "Network interface: \"%s\"", dev->name); 2508 "Network interface: \"%s\"", dev->name);
2504 } 2509 }
2505 2510
2506 retval = smsc911x_mii_init(pdev, dev);
2507 if (retval) {
2508 SMSC_WARN(pdata, probe, "Error %i initialising mii", retval);
2509 goto out_unregister_netdev_5;
2510 }
2511
2512 spin_lock_irq(&pdata->mac_lock); 2511 spin_lock_irq(&pdata->mac_lock);
2513 2512
2514 /* Check if mac address has been specified when bringing interface up */ 2513 /* Check if mac address has been specified when bringing interface up */
@@ -2544,10 +2543,6 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
2544 2543
2545 return 0; 2544 return 0;
2546 2545
2547out_unregister_netdev_5:
2548 unregister_netdev(dev);
2549out_free_irq:
2550 free_irq(dev->irq, dev);
2551out_disable_resources: 2546out_disable_resources:
2552 pm_runtime_put(&pdev->dev); 2547 pm_runtime_put(&pdev->dev);
2553 pm_runtime_disable(&pdev->dev); 2548 pm_runtime_disable(&pdev->dev);
diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
index 5a3941bf250f..4490ebaed127 100644
--- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c
+++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
@@ -1246,7 +1246,7 @@ static int dwceqos_mii_init(struct net_local *lp)
1246 lp->mii_bus->read = &dwceqos_mdio_read; 1246 lp->mii_bus->read = &dwceqos_mdio_read;
1247 lp->mii_bus->write = &dwceqos_mdio_write; 1247 lp->mii_bus->write = &dwceqos_mdio_write;
1248 lp->mii_bus->priv = lp; 1248 lp->mii_bus->priv = lp;
1249 lp->mii_bus->parent = &lp->ndev->dev; 1249 lp->mii_bus->parent = &lp->pdev->dev;
1250 1250
1251 of_address_to_resource(lp->pdev->dev.of_node, 0, &res); 1251 of_address_to_resource(lp->pdev->dev.of_node, 0, &res);
1252 snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%.8llx", 1252 snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%.8llx",
@@ -2853,25 +2853,17 @@ static int dwceqos_probe(struct platform_device *pdev)
2853 2853
2854 ndev->features = ndev->hw_features; 2854 ndev->features = ndev->hw_features;
2855 2855
2856 netif_napi_add(ndev, &lp->napi, dwceqos_rx_poll, NAPI_POLL_WEIGHT);
2857
2858 ret = register_netdev(ndev);
2859 if (ret) {
2860 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
2861 goto err_out_clk_dis_aper;
2862 }
2863
2864 lp->phy_ref_clk = devm_clk_get(&pdev->dev, "phy_ref_clk"); 2856 lp->phy_ref_clk = devm_clk_get(&pdev->dev, "phy_ref_clk");
2865 if (IS_ERR(lp->phy_ref_clk)) { 2857 if (IS_ERR(lp->phy_ref_clk)) {
2866 dev_err(&pdev->dev, "phy_ref_clk clock not found.\n"); 2858 dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
2867 ret = PTR_ERR(lp->phy_ref_clk); 2859 ret = PTR_ERR(lp->phy_ref_clk);
2868 goto err_out_unregister_netdev; 2860 goto err_out_clk_dis_aper;
2869 } 2861 }
2870 2862
2871 ret = clk_prepare_enable(lp->phy_ref_clk); 2863 ret = clk_prepare_enable(lp->phy_ref_clk);
2872 if (ret) { 2864 if (ret) {
2873 dev_err(&pdev->dev, "Unable to enable device clock.\n"); 2865 dev_err(&pdev->dev, "Unable to enable device clock.\n");
2874 goto err_out_unregister_netdev; 2866 goto err_out_clk_dis_aper;
2875 } 2867 }
2876 2868
2877 lp->phy_node = of_parse_phandle(lp->pdev->dev.of_node, 2869 lp->phy_node = of_parse_phandle(lp->pdev->dev.of_node,
@@ -2880,7 +2872,7 @@ static int dwceqos_probe(struct platform_device *pdev)
2880 ret = of_phy_register_fixed_link(lp->pdev->dev.of_node); 2872 ret = of_phy_register_fixed_link(lp->pdev->dev.of_node);
2881 if (ret < 0) { 2873 if (ret < 0) {
2882 dev_err(&pdev->dev, "invalid fixed-link"); 2874 dev_err(&pdev->dev, "invalid fixed-link");
2883 goto err_out_unregister_clk_notifier; 2875 goto err_out_clk_dis_phy;
2884 } 2876 }
2885 2877
2886 lp->phy_node = of_node_get(lp->pdev->dev.of_node); 2878 lp->phy_node = of_node_get(lp->pdev->dev.of_node);
@@ -2889,7 +2881,7 @@ static int dwceqos_probe(struct platform_device *pdev)
2889 ret = of_get_phy_mode(lp->pdev->dev.of_node); 2881 ret = of_get_phy_mode(lp->pdev->dev.of_node);
2890 if (ret < 0) { 2882 if (ret < 0) {
2891 dev_err(&lp->pdev->dev, "error in getting phy i/f\n"); 2883 dev_err(&lp->pdev->dev, "error in getting phy i/f\n");
2892 goto err_out_unregister_clk_notifier; 2884 goto err_out_clk_dis_phy;
2893 } 2885 }
2894 2886
2895 lp->phy_interface = ret; 2887 lp->phy_interface = ret;
@@ -2897,14 +2889,14 @@ static int dwceqos_probe(struct platform_device *pdev)
2897 ret = dwceqos_mii_init(lp); 2889 ret = dwceqos_mii_init(lp);
2898 if (ret) { 2890 if (ret) {
2899 dev_err(&lp->pdev->dev, "error in dwceqos_mii_init\n"); 2891 dev_err(&lp->pdev->dev, "error in dwceqos_mii_init\n");
2900 goto err_out_unregister_clk_notifier; 2892 goto err_out_clk_dis_phy;
2901 } 2893 }
2902 2894
2903 ret = dwceqos_mii_probe(ndev); 2895 ret = dwceqos_mii_probe(ndev);
2904 if (ret != 0) { 2896 if (ret != 0) {
2905 netdev_err(ndev, "mii_probe fail.\n"); 2897 netdev_err(ndev, "mii_probe fail.\n");
2906 ret = -ENXIO; 2898 ret = -ENXIO;
2907 goto err_out_unregister_clk_notifier; 2899 goto err_out_clk_dis_phy;
2908 } 2900 }
2909 2901
2910 dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0); 2902 dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0);
@@ -2922,7 +2914,7 @@ static int dwceqos_probe(struct platform_device *pdev)
2922 if (ret) { 2914 if (ret) {
2923 dev_err(&lp->pdev->dev, "Unable to retrieve DT, error %d\n", 2915 dev_err(&lp->pdev->dev, "Unable to retrieve DT, error %d\n",
2924 ret); 2916 ret);
2925 goto err_out_unregister_clk_notifier; 2917 goto err_out_clk_dis_phy;
2926 } 2918 }
2927 dev_info(&lp->pdev->dev, "pdev->id %d, baseaddr 0x%08lx, irq %d\n", 2919 dev_info(&lp->pdev->dev, "pdev->id %d, baseaddr 0x%08lx, irq %d\n",
2928 pdev->id, ndev->base_addr, ndev->irq); 2920 pdev->id, ndev->base_addr, ndev->irq);
@@ -2932,18 +2924,24 @@ static int dwceqos_probe(struct platform_device *pdev)
2932 if (ret) { 2924 if (ret) {
2933 dev_err(&lp->pdev->dev, "Unable to request IRQ %d, error %d\n", 2925 dev_err(&lp->pdev->dev, "Unable to request IRQ %d, error %d\n",
2934 ndev->irq, ret); 2926 ndev->irq, ret);
2935 goto err_out_unregister_clk_notifier; 2927 goto err_out_clk_dis_phy;
2936 } 2928 }
2937 2929
2938 if (netif_msg_probe(lp)) 2930 if (netif_msg_probe(lp))
2939 netdev_dbg(ndev, "net_local@%p\n", lp); 2931 netdev_dbg(ndev, "net_local@%p\n", lp);
2940 2932
2933 netif_napi_add(ndev, &lp->napi, dwceqos_rx_poll, NAPI_POLL_WEIGHT);
2934
2935 ret = register_netdev(ndev);
2936 if (ret) {
2937 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
2938 goto err_out_clk_dis_phy;
2939 }
2940
2941 return 0; 2941 return 0;
2942 2942
2943err_out_unregister_clk_notifier: 2943err_out_clk_dis_phy:
2944 clk_disable_unprepare(lp->phy_ref_clk); 2944 clk_disable_unprepare(lp->phy_ref_clk);
2945err_out_unregister_netdev:
2946 unregister_netdev(ndev);
2947err_out_clk_dis_aper: 2945err_out_clk_dis_aper:
2948 clk_disable_unprepare(lp->apb_pclk); 2946 clk_disable_unprepare(lp->apb_pclk);
2949err_out_free_netdev: 2947err_out_free_netdev:
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 47a64342cc16..b4863e4e522b 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -303,6 +303,7 @@ config MDIO_HISI_FEMAC
303 303
304config MDIO_XGENE 304config MDIO_XGENE
305 tristate "APM X-Gene SoC MDIO bus controller" 305 tristate "APM X-Gene SoC MDIO bus controller"
306 depends on ARCH_XGENE || COMPILE_TEST
306 help 307 help
307 This module provides a driver for the MDIO busses found in the 308 This module provides a driver for the MDIO busses found in the
308 APM X-Gene SoC's. 309 APM X-Gene SoC's.
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index c0dda6fc0921..6e65832051d6 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2782,14 +2782,15 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
2782 struct net_device *lowerdev = NULL; 2782 struct net_device *lowerdev = NULL;
2783 2783
2784 if (conf->flags & VXLAN_F_GPE) { 2784 if (conf->flags & VXLAN_F_GPE) {
2785 if (conf->flags & ~VXLAN_F_ALLOWED_GPE)
2786 return -EINVAL;
2787 /* For now, allow GPE only together with COLLECT_METADATA. 2785 /* For now, allow GPE only together with COLLECT_METADATA.
2788 * This can be relaxed later; in such case, the other side 2786 * This can be relaxed later; in such case, the other side
2789 * of the PtP link will have to be provided. 2787 * of the PtP link will have to be provided.
2790 */ 2788 */
2791 if (!(conf->flags & VXLAN_F_COLLECT_METADATA)) 2789 if ((conf->flags & ~VXLAN_F_ALLOWED_GPE) ||
2790 !(conf->flags & VXLAN_F_COLLECT_METADATA)) {
2791 pr_info("unsupported combination of extensions\n");
2792 return -EINVAL; 2792 return -EINVAL;
2793 }
2793 2794
2794 vxlan_raw_setup(dev); 2795 vxlan_raw_setup(dev);
2795 } else { 2796 } else {
@@ -2842,6 +2843,9 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
2842 dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); 2843 dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
2843 2844
2844 needed_headroom = lowerdev->hard_header_len; 2845 needed_headroom = lowerdev->hard_header_len;
2846 } else if (vxlan_addr_multicast(&dst->remote_ip)) {
2847 pr_info("multicast destination requires interface to be specified\n");
2848 return -EINVAL;
2845 } 2849 }
2846 2850
2847 if (conf->mtu) { 2851 if (conf->mtu) {
@@ -2874,8 +2878,10 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
2874 tmp->cfg.saddr.sa.sa_family == AF_INET6) == use_ipv6 && 2878 tmp->cfg.saddr.sa.sa_family == AF_INET6) == use_ipv6 &&
2875 tmp->cfg.dst_port == vxlan->cfg.dst_port && 2879 tmp->cfg.dst_port == vxlan->cfg.dst_port &&
2876 (tmp->flags & VXLAN_F_RCV_FLAGS) == 2880 (tmp->flags & VXLAN_F_RCV_FLAGS) ==
2877 (vxlan->flags & VXLAN_F_RCV_FLAGS)) 2881 (vxlan->flags & VXLAN_F_RCV_FLAGS)) {
2878 return -EEXIST; 2882 pr_info("duplicate VNI %u\n", be32_to_cpu(conf->vni));
2883 return -EEXIST;
2884 }
2879 } 2885 }
2880 2886
2881 dev->ethtool_ops = &vxlan_ethtool_ops; 2887 dev->ethtool_ops = &vxlan_ethtool_ops;
@@ -2909,7 +2915,6 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
2909 struct nlattr *tb[], struct nlattr *data[]) 2915 struct nlattr *tb[], struct nlattr *data[])
2910{ 2916{
2911 struct vxlan_config conf; 2917 struct vxlan_config conf;
2912 int err;
2913 2918
2914 memset(&conf, 0, sizeof(conf)); 2919 memset(&conf, 0, sizeof(conf));
2915 2920
@@ -3018,26 +3023,7 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
3018 if (tb[IFLA_MTU]) 3023 if (tb[IFLA_MTU])
3019 conf.mtu = nla_get_u32(tb[IFLA_MTU]); 3024 conf.mtu = nla_get_u32(tb[IFLA_MTU]);
3020 3025
3021 err = vxlan_dev_configure(src_net, dev, &conf); 3026 return vxlan_dev_configure(src_net, dev, &conf);
3022 switch (err) {
3023 case -ENODEV:
3024 pr_info("ifindex %d does not exist\n", conf.remote_ifindex);
3025 break;
3026
3027 case -EPERM:
3028 pr_info("IPv6 is disabled via sysctl\n");
3029 break;
3030
3031 case -EEXIST:
3032 pr_info("duplicate VNI %u\n", be32_to_cpu(conf.vni));
3033 break;
3034
3035 case -EINVAL:
3036 pr_info("unsupported combination of extensions\n");
3037 break;
3038 }
3039
3040 return err;
3041} 3027}
3042 3028
3043static void vxlan_dellink(struct net_device *dev, struct list_head *head) 3029static void vxlan_dellink(struct net_device *dev, struct list_head *head)
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 78db5d679f19..24c8d65bcf34 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -1525,7 +1525,7 @@ static void ath10k_htt_rx_h_filter(struct ath10k *ar,
1525static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt) 1525static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
1526{ 1526{
1527 struct ath10k *ar = htt->ar; 1527 struct ath10k *ar = htt->ar;
1528 static struct ieee80211_rx_status rx_status; 1528 struct ieee80211_rx_status *rx_status = &htt->rx_status;
1529 struct sk_buff_head amsdu; 1529 struct sk_buff_head amsdu;
1530 int ret; 1530 int ret;
1531 1531
@@ -1549,11 +1549,11 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
1549 return ret; 1549 return ret;
1550 } 1550 }
1551 1551
1552 ath10k_htt_rx_h_ppdu(ar, &amsdu, &rx_status, 0xffff); 1552 ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
1553 ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0); 1553 ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
1554 ath10k_htt_rx_h_filter(ar, &amsdu, &rx_status); 1554 ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
1555 ath10k_htt_rx_h_mpdu(ar, &amsdu, &rx_status); 1555 ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
1556 ath10k_htt_rx_h_deliver(ar, &amsdu, &rx_status); 1556 ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
1557 1557
1558 return 0; 1558 return 0;
1559} 1559}
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 9a22c478dd1b..07933c51a850 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -3162,7 +3162,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
3162 pci_hard_reset = ath10k_pci_qca988x_chip_reset; 3162 pci_hard_reset = ath10k_pci_qca988x_chip_reset;
3163 break; 3163 break;
3164 case QCA9887_1_0_DEVICE_ID: 3164 case QCA9887_1_0_DEVICE_ID:
3165 dev_warn(&pdev->dev, "QCA9887 support is still experimental, there are likely bugs. You have been warned.\n");
3166 hw_rev = ATH10K_HW_QCA9887; 3165 hw_rev = ATH10K_HW_QCA9887;
3167 pci_ps = false; 3166 pci_ps = false;
3168 pci_soft_reset = ath10k_pci_warm_reset; 3167 pci_soft_reset = ath10k_pci_warm_reset;
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index d1d0c06d627c..14b13f07cd1f 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -2482,6 +2482,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
2482 return -EINVAL; 2482 return -EINVAL;
2483 } 2483 }
2484 2484
2485 ath9k_gpio_cap_init(ah);
2486
2485 if (AR_SREV_9485(ah) || 2487 if (AR_SREV_9485(ah) ||
2486 AR_SREV_9285(ah) || 2488 AR_SREV_9285(ah) ||
2487 AR_SREV_9330(ah) || 2489 AR_SREV_9330(ah) ||
@@ -2531,8 +2533,6 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
2531 else 2533 else
2532 pCap->hw_caps &= ~ATH9K_HW_CAP_HT; 2534 pCap->hw_caps &= ~ATH9K_HW_CAP_HT;
2533 2535
2534 ath9k_gpio_cap_init(ah);
2535
2536 if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah)) 2536 if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah))
2537 pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX; 2537 pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX;
2538 else 2538 else
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index a394622c9022..7cb65c303f8d 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -718,9 +718,12 @@ static int ath9k_start(struct ieee80211_hw *hw)
718 if (!ath_complete_reset(sc, false)) 718 if (!ath_complete_reset(sc, false))
719 ah->reset_power_on = false; 719 ah->reset_power_on = false;
720 720
721 if (ah->led_pin >= 0) 721 if (ah->led_pin >= 0) {
722 ath9k_hw_set_gpio(ah, ah->led_pin, 722 ath9k_hw_set_gpio(ah, ah->led_pin,
723 (ah->config.led_active_high) ? 1 : 0); 723 (ah->config.led_active_high) ? 1 : 0);
724 ath9k_hw_gpio_request_out(ah, ah->led_pin, NULL,
725 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
726 }
724 727
725 /* 728 /*
726 * Reset key cache to sane defaults (all entries cleared) instead of 729 * Reset key cache to sane defaults (all entries cleared) instead of
@@ -864,9 +867,11 @@ static void ath9k_stop(struct ieee80211_hw *hw)
864 867
865 spin_lock_bh(&sc->sc_pcu_lock); 868 spin_lock_bh(&sc->sc_pcu_lock);
866 869
867 if (ah->led_pin >= 0) 870 if (ah->led_pin >= 0) {
868 ath9k_hw_set_gpio(ah, ah->led_pin, 871 ath9k_hw_set_gpio(ah, ah->led_pin,
869 (ah->config.led_active_high) ? 0 : 1); 872 (ah->config.led_active_high) ? 0 : 1);
873 ath9k_hw_gpio_request_in(ah, ah->led_pin, NULL);
874 }
870 875
871 ath_prepare_reset(sc); 876 ath_prepare_reset(sc);
872 877
@@ -1154,6 +1159,7 @@ void ath9k_calculate_summary_state(struct ath_softc *sc,
1154 bool changed = (iter_data.primary_sta != ctx->primary_sta); 1159 bool changed = (iter_data.primary_sta != ctx->primary_sta);
1155 1160
1156 if (iter_data.primary_sta) { 1161 if (iter_data.primary_sta) {
1162 iter_data.primary_beacon_vif = iter_data.primary_sta;
1157 iter_data.beacons = true; 1163 iter_data.beacons = true;
1158 ath9k_set_assoc_state(sc, iter_data.primary_sta, 1164 ath9k_set_assoc_state(sc, iter_data.primary_sta,
1159 changed); 1165 changed);
@@ -1563,13 +1569,13 @@ static int ath9k_sta_state(struct ieee80211_hw *hw,
1563 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1569 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1564 int ret = 0; 1570 int ret = 0;
1565 1571
1566 if (old_state == IEEE80211_STA_AUTH && 1572 if (old_state == IEEE80211_STA_NOTEXIST &&
1567 new_state == IEEE80211_STA_ASSOC) { 1573 new_state == IEEE80211_STA_NONE) {
1568 ret = ath9k_sta_add(hw, vif, sta); 1574 ret = ath9k_sta_add(hw, vif, sta);
1569 ath_dbg(common, CONFIG, 1575 ath_dbg(common, CONFIG,
1570 "Add station: %pM\n", sta->addr); 1576 "Add station: %pM\n", sta->addr);
1571 } else if (old_state == IEEE80211_STA_ASSOC && 1577 } else if (old_state == IEEE80211_STA_NONE &&
1572 new_state == IEEE80211_STA_AUTH) { 1578 new_state == IEEE80211_STA_NOTEXIST) {
1573 ret = ath9k_sta_remove(hw, vif, sta); 1579 ret = ath9k_sta_remove(hw, vif, sta);
1574 ath_dbg(common, CONFIG, 1580 ath_dbg(common, CONFIG,
1575 "Remove station: %pM\n", sta->addr); 1581 "Remove station: %pM\n", sta->addr);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 2628d5e12c64..b8aec5e5ef93 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -4527,7 +4527,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
4527 (u8 *)&settings->beacon.head[ie_offset], 4527 (u8 *)&settings->beacon.head[ie_offset],
4528 settings->beacon.head_len - ie_offset, 4528 settings->beacon.head_len - ie_offset,
4529 WLAN_EID_SSID); 4529 WLAN_EID_SSID);
4530 if (!ssid_ie) 4530 if (!ssid_ie || ssid_ie->len > IEEE80211_MAX_SSID_LEN)
4531 return -EINVAL; 4531 return -EINVAL;
4532 4532
4533 memcpy(ssid_le.SSID, ssid_ie->data, ssid_ie->len); 4533 memcpy(ssid_le.SSID, ssid_ie->data, ssid_ie->len);
@@ -5635,7 +5635,7 @@ static s32 brcmf_notify_vif_event(struct brcmf_if *ifp,
5635 ifevent->action, ifevent->flags, ifevent->ifidx, 5635 ifevent->action, ifevent->flags, ifevent->ifidx,
5636 ifevent->bsscfgidx); 5636 ifevent->bsscfgidx);
5637 5637
5638 mutex_lock(&event->vif_event_lock); 5638 spin_lock(&event->vif_event_lock);
5639 event->action = ifevent->action; 5639 event->action = ifevent->action;
5640 vif = event->vif; 5640 vif = event->vif;
5641 5641
@@ -5643,7 +5643,7 @@ static s32 brcmf_notify_vif_event(struct brcmf_if *ifp,
5643 case BRCMF_E_IF_ADD: 5643 case BRCMF_E_IF_ADD:
5644 /* waiting process may have timed out */ 5644 /* waiting process may have timed out */
5645 if (!cfg->vif_event.vif) { 5645 if (!cfg->vif_event.vif) {
5646 mutex_unlock(&event->vif_event_lock); 5646 spin_unlock(&event->vif_event_lock);
5647 return -EBADF; 5647 return -EBADF;
5648 } 5648 }
5649 5649
@@ -5654,24 +5654,24 @@ static s32 brcmf_notify_vif_event(struct brcmf_if *ifp,
5654 ifp->ndev->ieee80211_ptr = &vif->wdev; 5654 ifp->ndev->ieee80211_ptr = &vif->wdev;
5655 SET_NETDEV_DEV(ifp->ndev, wiphy_dev(cfg->wiphy)); 5655 SET_NETDEV_DEV(ifp->ndev, wiphy_dev(cfg->wiphy));
5656 } 5656 }
5657 mutex_unlock(&event->vif_event_lock); 5657 spin_unlock(&event->vif_event_lock);
5658 wake_up(&event->vif_wq); 5658 wake_up(&event->vif_wq);
5659 return 0; 5659 return 0;
5660 5660
5661 case BRCMF_E_IF_DEL: 5661 case BRCMF_E_IF_DEL:
5662 mutex_unlock(&event->vif_event_lock); 5662 spin_unlock(&event->vif_event_lock);
5663 /* event may not be upon user request */ 5663 /* event may not be upon user request */
5664 if (brcmf_cfg80211_vif_event_armed(cfg)) 5664 if (brcmf_cfg80211_vif_event_armed(cfg))
5665 wake_up(&event->vif_wq); 5665 wake_up(&event->vif_wq);
5666 return 0; 5666 return 0;
5667 5667
5668 case BRCMF_E_IF_CHANGE: 5668 case BRCMF_E_IF_CHANGE:
5669 mutex_unlock(&event->vif_event_lock); 5669 spin_unlock(&event->vif_event_lock);
5670 wake_up(&event->vif_wq); 5670 wake_up(&event->vif_wq);
5671 return 0; 5671 return 0;
5672 5672
5673 default: 5673 default:
5674 mutex_unlock(&event->vif_event_lock); 5674 spin_unlock(&event->vif_event_lock);
5675 break; 5675 break;
5676 } 5676 }
5677 return -EINVAL; 5677 return -EINVAL;
@@ -5792,7 +5792,7 @@ static void wl_deinit_priv(struct brcmf_cfg80211_info *cfg)
5792static void init_vif_event(struct brcmf_cfg80211_vif_event *event) 5792static void init_vif_event(struct brcmf_cfg80211_vif_event *event)
5793{ 5793{
5794 init_waitqueue_head(&event->vif_wq); 5794 init_waitqueue_head(&event->vif_wq);
5795 mutex_init(&event->vif_event_lock); 5795 spin_lock_init(&event->vif_event_lock);
5796} 5796}
5797 5797
5798static s32 brcmf_dongle_roam(struct brcmf_if *ifp) 5798static s32 brcmf_dongle_roam(struct brcmf_if *ifp)
@@ -6691,9 +6691,9 @@ static inline bool vif_event_equals(struct brcmf_cfg80211_vif_event *event,
6691{ 6691{
6692 u8 evt_action; 6692 u8 evt_action;
6693 6693
6694 mutex_lock(&event->vif_event_lock); 6694 spin_lock(&event->vif_event_lock);
6695 evt_action = event->action; 6695 evt_action = event->action;
6696 mutex_unlock(&event->vif_event_lock); 6696 spin_unlock(&event->vif_event_lock);
6697 return evt_action == action; 6697 return evt_action == action;
6698} 6698}
6699 6699
@@ -6702,10 +6702,10 @@ void brcmf_cfg80211_arm_vif_event(struct brcmf_cfg80211_info *cfg,
6702{ 6702{
6703 struct brcmf_cfg80211_vif_event *event = &cfg->vif_event; 6703 struct brcmf_cfg80211_vif_event *event = &cfg->vif_event;
6704 6704
6705 mutex_lock(&event->vif_event_lock); 6705 spin_lock(&event->vif_event_lock);
6706 event->vif = vif; 6706 event->vif = vif;
6707 event->action = 0; 6707 event->action = 0;
6708 mutex_unlock(&event->vif_event_lock); 6708 spin_unlock(&event->vif_event_lock);
6709} 6709}
6710 6710
6711bool brcmf_cfg80211_vif_event_armed(struct brcmf_cfg80211_info *cfg) 6711bool brcmf_cfg80211_vif_event_armed(struct brcmf_cfg80211_info *cfg)
@@ -6713,9 +6713,9 @@ bool brcmf_cfg80211_vif_event_armed(struct brcmf_cfg80211_info *cfg)
6713 struct brcmf_cfg80211_vif_event *event = &cfg->vif_event; 6713 struct brcmf_cfg80211_vif_event *event = &cfg->vif_event;
6714 bool armed; 6714 bool armed;
6715 6715
6716 mutex_lock(&event->vif_event_lock); 6716 spin_lock(&event->vif_event_lock);
6717 armed = event->vif != NULL; 6717 armed = event->vif != NULL;
6718 mutex_unlock(&event->vif_event_lock); 6718 spin_unlock(&event->vif_event_lock);
6719 6719
6720 return armed; 6720 return armed;
6721} 6721}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
index 7d77f869b7f1..8889832c17e0 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
@@ -227,7 +227,7 @@ struct escan_info {
227 */ 227 */
228struct brcmf_cfg80211_vif_event { 228struct brcmf_cfg80211_vif_event {
229 wait_queue_head_t vif_wq; 229 wait_queue_head_t vif_wq;
230 struct mutex vif_event_lock; 230 spinlock_t vif_event_lock;
231 u8 action; 231 u8 action;
232 struct brcmf_cfg80211_vif *vif; 232 struct brcmf_cfg80211_vif *vif;
233}; 233};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 8d16f0204985..65e8c8766441 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -743,7 +743,7 @@ static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx,
743 * serious troublesome side effects. The p2p module will clean 743 * serious troublesome side effects. The p2p module will clean
744 * up the ifp if needed. 744 * up the ifp if needed.
745 */ 745 */
746 brcmf_p2p_ifp_removed(ifp); 746 brcmf_p2p_ifp_removed(ifp, rtnl_locked);
747 kfree(ifp); 747 kfree(ifp);
748 } 748 }
749} 749}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index 66f942f7448e..de19c7c92bc6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -2297,7 +2297,7 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
2297 return err; 2297 return err;
2298} 2298}
2299 2299
2300void brcmf_p2p_ifp_removed(struct brcmf_if *ifp) 2300void brcmf_p2p_ifp_removed(struct brcmf_if *ifp, bool rtnl_locked)
2301{ 2301{
2302 struct brcmf_cfg80211_info *cfg; 2302 struct brcmf_cfg80211_info *cfg;
2303 struct brcmf_cfg80211_vif *vif; 2303 struct brcmf_cfg80211_vif *vif;
@@ -2306,9 +2306,11 @@ void brcmf_p2p_ifp_removed(struct brcmf_if *ifp)
2306 vif = ifp->vif; 2306 vif = ifp->vif;
2307 cfg = wdev_to_cfg(&vif->wdev); 2307 cfg = wdev_to_cfg(&vif->wdev);
2308 cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL; 2308 cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL;
2309 rtnl_lock(); 2309 if (!rtnl_locked)
2310 rtnl_lock();
2310 cfg80211_unregister_wdev(&vif->wdev); 2311 cfg80211_unregister_wdev(&vif->wdev);
2311 rtnl_unlock(); 2312 if (!rtnl_locked)
2313 rtnl_unlock();
2312 brcmf_free_vif(vif); 2314 brcmf_free_vif(vif);
2313} 2315}
2314 2316
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
index a3bd18c2360b..8ce9447533ef 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
@@ -155,7 +155,7 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
155int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev); 155int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev);
156int brcmf_p2p_ifchange(struct brcmf_cfg80211_info *cfg, 156int brcmf_p2p_ifchange(struct brcmf_cfg80211_info *cfg,
157 enum brcmf_fil_p2p_if_types if_type); 157 enum brcmf_fil_p2p_if_types if_type);
158void brcmf_p2p_ifp_removed(struct brcmf_if *ifp); 158void brcmf_p2p_ifp_removed(struct brcmf_if *ifp, bool rtnl_locked);
159int brcmf_p2p_start_device(struct wiphy *wiphy, struct wireless_dev *wdev); 159int brcmf_p2p_start_device(struct wiphy *wiphy, struct wireless_dev *wdev);
160void brcmf_p2p_stop_device(struct wiphy *wiphy, struct wireless_dev *wdev); 160void brcmf_p2p_stop_device(struct wiphy *wiphy, struct wireless_dev *wdev);
161int brcmf_p2p_scan_prep(struct wiphy *wiphy, 161int brcmf_p2p_scan_prep(struct wiphy *wiphy,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
index 1abcabb9b6cd..46b52bf705fb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
@@ -960,5 +960,6 @@ int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id)
960 } 960 }
961 961
962 mvm->fw_dbg_conf = conf_id; 962 mvm->fw_dbg_conf = conf_id;
963 return ret; 963
964 return 0;
964} 965}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.h
index f7dff7612c9c..e9f1be9da7d4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.h
@@ -105,7 +105,8 @@ iwl_fw_dbg_trigger_vif_match(struct iwl_fw_dbg_trigger_tlv *trig,
105{ 105{
106 u32 trig_vif = le32_to_cpu(trig->vif_type); 106 u32 trig_vif = le32_to_cpu(trig->vif_type);
107 107
108 return trig_vif == IWL_FW_DBG_CONF_VIF_ANY || vif->type == trig_vif; 108 return trig_vif == IWL_FW_DBG_CONF_VIF_ANY ||
109 ieee80211_vif_type_p2p(vif) == trig_vif;
109} 110}
110 111
111static inline bool 112static inline bool
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 6d6064534d59..5dd77e336617 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -624,6 +624,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
624 hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN | 624 hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
625 NL80211_FEATURE_LOW_PRIORITY_SCAN | 625 NL80211_FEATURE_LOW_PRIORITY_SCAN |
626 NL80211_FEATURE_P2P_GO_OPPPS | 626 NL80211_FEATURE_P2P_GO_OPPPS |
627 NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
627 NL80211_FEATURE_DYNAMIC_SMPS | 628 NL80211_FEATURE_DYNAMIC_SMPS |
628 NL80211_FEATURE_STATIC_SMPS | 629 NL80211_FEATURE_STATIC_SMPS |
629 NL80211_FEATURE_SUPPORTS_WMM_ADMISSION; 630 NL80211_FEATURE_SUPPORTS_WMM_ADMISSION;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index b4fc86d5d7ef..6a615bb73042 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -467,6 +467,8 @@ struct iwl_mvm_vif {
467static inline struct iwl_mvm_vif * 467static inline struct iwl_mvm_vif *
468iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif) 468iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif)
469{ 469{
470 if (!vif)
471 return NULL;
470 return (void *)vif->drv_priv; 472 return (void *)vif->drv_priv;
471} 473}
472 474
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_aggr.c b/drivers/net/wireless/marvell/mwifiex/11n_aggr.c
index dc49c3de1f25..c47d6366875d 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_aggr.c
@@ -205,7 +205,8 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
205 205
206 do { 206 do {
207 /* Check if AMSDU can accommodate this MSDU */ 207 /* Check if AMSDU can accommodate this MSDU */
208 if (skb_tailroom(skb_aggr) < (skb_src->len + LLC_SNAP_LEN)) 208 if ((skb_aggr->len + skb_src->len + LLC_SNAP_LEN) >
209 adapter->tx_buf_size)
209 break; 210 break;
210 211
211 skb_src = skb_dequeue(&pra_list->skb_head); 212 skb_src = skb_dequeue(&pra_list->skb_head);
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 21bc4557b67a..d1f9a581aca8 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -6710,9 +6710,10 @@ struct mlx5_ifc_pude_reg_bits {
6710}; 6710};
6711 6711
6712struct mlx5_ifc_ptys_reg_bits { 6712struct mlx5_ifc_ptys_reg_bits {
6713 u8 an_disable_cap[0x1]; 6713 u8 reserved_at_0[0x1];
6714 u8 an_disable_admin[0x1]; 6714 u8 an_disable_admin[0x1];
6715 u8 reserved_at_2[0x6]; 6715 u8 an_disable_cap[0x1];
6716 u8 reserved_at_3[0x5];
6716 u8 local_port[0x8]; 6717 u8 local_port[0x8];
6717 u8 reserved_at_10[0xd]; 6718 u8 reserved_at_10[0xd];
6718 u8 proto_mask[0x3]; 6719 u8 proto_mask[0x3];
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 3a788bf0affd..e8d79d4ebcfe 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3267,6 +3267,7 @@ static inline void napi_free_frags(struct napi_struct *napi)
3267 napi->skb = NULL; 3267 napi->skb = NULL;
3268} 3268}
3269 3269
3270bool netdev_is_rx_handler_busy(struct net_device *dev);
3270int netdev_rx_handler_register(struct net_device *dev, 3271int netdev_rx_handler_register(struct net_device *dev,
3271 rx_handler_func_t *rx_handler, 3272 rx_handler_func_t *rx_handler,
3272 void *rx_handler_data); 3273 void *rx_handler_data);
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 9b4c418bebd8..fd60eccb59a6 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -52,7 +52,7 @@ struct unix_sock {
52 struct sock sk; 52 struct sock sk;
53 struct unix_address *addr; 53 struct unix_address *addr;
54 struct path path; 54 struct path path;
55 struct mutex readlock; 55 struct mutex iolock, bindlock;
56 struct sock *peer; 56 struct sock *peer;
57 struct list_head link; 57 struct list_head link;
58 atomic_long_t inflight; 58 atomic_long_t inflight;
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 9c23f4d33e06..beb7610d64e9 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -1102,6 +1102,7 @@ struct station_info {
1102 struct cfg80211_tid_stats pertid[IEEE80211_NUM_TIDS + 1]; 1102 struct cfg80211_tid_stats pertid[IEEE80211_NUM_TIDS + 1];
1103}; 1103};
1104 1104
1105#if IS_ENABLED(CONFIG_CFG80211)
1105/** 1106/**
1106 * cfg80211_get_station - retrieve information about a given station 1107 * cfg80211_get_station - retrieve information about a given station
1107 * @dev: the device where the station is supposed to be connected to 1108 * @dev: the device where the station is supposed to be connected to
@@ -1114,6 +1115,14 @@ struct station_info {
1114 */ 1115 */
1115int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr, 1116int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr,
1116 struct station_info *sinfo); 1117 struct station_info *sinfo);
1118#else
1119static inline int cfg80211_get_station(struct net_device *dev,
1120 const u8 *mac_addr,
1121 struct station_info *sinfo)
1122{
1123 return -ENOENT;
1124}
1125#endif
1117 1126
1118/** 1127/**
1119 * enum monitor_flags - monitor flags 1128 * enum monitor_flags - monitor flags
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 4079fc18ffe4..7d4a72e75f33 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -111,6 +111,7 @@ struct fib_info {
111 unsigned char fib_scope; 111 unsigned char fib_scope;
112 unsigned char fib_type; 112 unsigned char fib_type;
113 __be32 fib_prefsrc; 113 __be32 fib_prefsrc;
114 u32 fib_tb_id;
114 u32 fib_priority; 115 u32 fib_priority;
115 u32 *fib_metrics; 116 u32 *fib_metrics;
116#define fib_mtu fib_metrics[RTAX_MTU-1] 117#define fib_mtu fib_metrics[RTAX_MTU-1]
@@ -319,7 +320,7 @@ void fib_flush_external(struct net *net);
319/* Exported by fib_semantics.c */ 320/* Exported by fib_semantics.c */
320int ip_fib_check_default(__be32 gw, struct net_device *dev); 321int ip_fib_check_default(__be32 gw, struct net_device *dev);
321int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force); 322int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force);
322int fib_sync_down_addr(struct net *net, __be32 local); 323int fib_sync_down_addr(struct net_device *dev, __be32 local);
323int fib_sync_up(struct net_device *dev, unsigned int nh_flags); 324int fib_sync_up(struct net_device *dev, unsigned int nh_flags);
324 325
325extern u32 fib_multipath_secret __read_mostly; 326extern u32 fib_multipath_secret __read_mostly;
diff --git a/include/net/netfilter/nft_meta.h b/include/net/netfilter/nft_meta.h
index d27588c8dbd9..1139cde0fdc5 100644
--- a/include/net/netfilter/nft_meta.h
+++ b/include/net/netfilter/nft_meta.h
@@ -36,4 +36,8 @@ void nft_meta_set_eval(const struct nft_expr *expr,
36void nft_meta_set_destroy(const struct nft_ctx *ctx, 36void nft_meta_set_destroy(const struct nft_ctx *ctx,
37 const struct nft_expr *expr); 37 const struct nft_expr *expr);
38 38
39int nft_meta_set_validate(const struct nft_ctx *ctx,
40 const struct nft_expr *expr,
41 const struct nft_data **data);
42
39#endif 43#endif
diff --git a/include/net/netfilter/nft_reject.h b/include/net/netfilter/nft_reject.h
index 60fa1530006b..02e28c529b29 100644
--- a/include/net/netfilter/nft_reject.h
+++ b/include/net/netfilter/nft_reject.h
@@ -8,6 +8,10 @@ struct nft_reject {
8 8
9extern const struct nla_policy nft_reject_policy[]; 9extern const struct nla_policy nft_reject_policy[];
10 10
11int nft_reject_validate(const struct nft_ctx *ctx,
12 const struct nft_expr *expr,
13 const struct nft_data **data);
14
11int nft_reject_init(const struct nft_ctx *ctx, 15int nft_reject_init(const struct nft_ctx *ctx,
12 const struct nft_expr *expr, 16 const struct nft_expr *expr,
13 const struct nlattr * const tb[]); 17 const struct nlattr * const tb[]);
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 8e486203d133..abe11f085479 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -80,13 +80,10 @@ static void br_do_proxy_arp(struct sk_buff *skb, struct net_bridge *br,
80 80
81 BR_INPUT_SKB_CB(skb)->proxyarp_replied = false; 81 BR_INPUT_SKB_CB(skb)->proxyarp_replied = false;
82 82
83 if (dev->flags & IFF_NOARP) 83 if ((dev->flags & IFF_NOARP) ||
84 !pskb_may_pull(skb, arp_hdr_len(dev)))
84 return; 85 return;
85 86
86 if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
87 dev->stats.tx_dropped++;
88 return;
89 }
90 parp = arp_hdr(skb); 87 parp = arp_hdr(skb);
91 88
92 if (parp->ar_pro != htons(ETH_P_IP) || 89 if (parp->ar_pro != htons(ETH_P_IP) ||
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index a5423a1eec05..c5fea9393946 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1138,7 +1138,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
1138 } else { 1138 } else {
1139 err = br_ip6_multicast_add_group(br, port, 1139 err = br_ip6_multicast_add_group(br, port,
1140 &grec->grec_mca, vid); 1140 &grec->grec_mca, vid);
1141 if (!err) 1141 if (err)
1142 break; 1142 break;
1143 } 1143 }
1144 } 1144 }
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index cceac5bb658f..0833c251aef7 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -368,6 +368,8 @@ ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par,
368 368
369 match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0); 369 match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0);
370 if (IS_ERR(match) || match->family != NFPROTO_BRIDGE) { 370 if (IS_ERR(match) || match->family != NFPROTO_BRIDGE) {
371 if (!IS_ERR(match))
372 module_put(match->me);
371 request_module("ebt_%s", m->u.name); 373 request_module("ebt_%s", m->u.name);
372 match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0); 374 match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0);
373 } 375 }
diff --git a/net/bridge/netfilter/nft_meta_bridge.c b/net/bridge/netfilter/nft_meta_bridge.c
index 4b901d9f2e7c..ad47a921b701 100644
--- a/net/bridge/netfilter/nft_meta_bridge.c
+++ b/net/bridge/netfilter/nft_meta_bridge.c
@@ -86,6 +86,7 @@ static const struct nft_expr_ops nft_meta_bridge_set_ops = {
86 .init = nft_meta_set_init, 86 .init = nft_meta_set_init,
87 .destroy = nft_meta_set_destroy, 87 .destroy = nft_meta_set_destroy,
88 .dump = nft_meta_set_dump, 88 .dump = nft_meta_set_dump,
89 .validate = nft_meta_set_validate,
89}; 90};
90 91
91static const struct nft_expr_ops * 92static const struct nft_expr_ops *
diff --git a/net/core/dev.c b/net/core/dev.c
index dd6ce598de89..ea6312057a71 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3975,6 +3975,22 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
3975} 3975}
3976 3976
3977/** 3977/**
3978 * netdev_is_rx_handler_busy - check if receive handler is registered
3979 * @dev: device to check
3980 *
3981 * Check if a receive handler is already registered for a given device.
3982 * Return true if there one.
3983 *
3984 * The caller must hold the rtnl_mutex.
3985 */
3986bool netdev_is_rx_handler_busy(struct net_device *dev)
3987{
3988 ASSERT_RTNL();
3989 return dev && rtnl_dereference(dev->rx_handler);
3990}
3991EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
3992
3993/**
3978 * netdev_rx_handler_register - register receive handler 3994 * netdev_rx_handler_register - register receive handler
3979 * @dev: device to register a handler for 3995 * @dev: device to register a handler for
3980 * @rx_handler: receive handler to register 3996 * @rx_handler: receive handler to register
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 61ad43f61c5e..52742a02814f 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -680,11 +680,13 @@ EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
680void __skb_get_hash(struct sk_buff *skb) 680void __skb_get_hash(struct sk_buff *skb)
681{ 681{
682 struct flow_keys keys; 682 struct flow_keys keys;
683 u32 hash;
683 684
684 __flow_hash_secret_init(); 685 __flow_hash_secret_init();
685 686
686 __skb_set_sw_hash(skb, ___skb_get_hash(skb, &keys, hashrnd), 687 hash = ___skb_get_hash(skb, &keys, hashrnd);
687 flow_keys_have_l4(&keys)); 688
689 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
688} 690}
689EXPORT_SYMBOL(__skb_get_hash); 691EXPORT_SYMBOL(__skb_get_hash);
690 692
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 415e117967c7..062a67ca9a21 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -2232,7 +2232,7 @@ static struct devinet_sysctl_table {
2232}; 2232};
2233 2233
2234static int __devinet_sysctl_register(struct net *net, char *dev_name, 2234static int __devinet_sysctl_register(struct net *net, char *dev_name,
2235 struct ipv4_devconf *p) 2235 int ifindex, struct ipv4_devconf *p)
2236{ 2236{
2237 int i; 2237 int i;
2238 struct devinet_sysctl_table *t; 2238 struct devinet_sysctl_table *t;
@@ -2255,6 +2255,8 @@ static int __devinet_sysctl_register(struct net *net, char *dev_name,
2255 goto free; 2255 goto free;
2256 2256
2257 p->sysctl = t; 2257 p->sysctl = t;
2258
2259 inet_netconf_notify_devconf(net, NETCONFA_ALL, ifindex, p);
2258 return 0; 2260 return 0;
2259 2261
2260free: 2262free:
@@ -2286,7 +2288,7 @@ static int devinet_sysctl_register(struct in_device *idev)
2286 if (err) 2288 if (err)
2287 return err; 2289 return err;
2288 err = __devinet_sysctl_register(dev_net(idev->dev), idev->dev->name, 2290 err = __devinet_sysctl_register(dev_net(idev->dev), idev->dev->name,
2289 &idev->cnf); 2291 idev->dev->ifindex, &idev->cnf);
2290 if (err) 2292 if (err)
2291 neigh_sysctl_unregister(idev->arp_parms); 2293 neigh_sysctl_unregister(idev->arp_parms);
2292 return err; 2294 return err;
@@ -2347,11 +2349,12 @@ static __net_init int devinet_init_net(struct net *net)
2347 } 2349 }
2348 2350
2349#ifdef CONFIG_SYSCTL 2351#ifdef CONFIG_SYSCTL
2350 err = __devinet_sysctl_register(net, "all", all); 2352 err = __devinet_sysctl_register(net, "all", NETCONFA_IFINDEX_ALL, all);
2351 if (err < 0) 2353 if (err < 0)
2352 goto err_reg_all; 2354 goto err_reg_all;
2353 2355
2354 err = __devinet_sysctl_register(net, "default", dflt); 2356 err = __devinet_sysctl_register(net, "default",
2357 NETCONFA_IFINDEX_DEFAULT, dflt);
2355 if (err < 0) 2358 if (err < 0)
2356 goto err_reg_dflt; 2359 goto err_reg_dflt;
2357 2360
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index ef2ebeb89d0f..1b25daf8c7f1 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -509,6 +509,7 @@ static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt,
509 if (!dev) 509 if (!dev)
510 return -ENODEV; 510 return -ENODEV;
511 cfg->fc_oif = dev->ifindex; 511 cfg->fc_oif = dev->ifindex;
512 cfg->fc_table = l3mdev_fib_table(dev);
512 if (colon) { 513 if (colon) {
513 struct in_ifaddr *ifa; 514 struct in_ifaddr *ifa;
514 struct in_device *in_dev = __in_dev_get_rtnl(dev); 515 struct in_device *in_dev = __in_dev_get_rtnl(dev);
@@ -1027,7 +1028,7 @@ no_promotions:
1027 * First of all, we scan fib_info list searching 1028 * First of all, we scan fib_info list searching
1028 * for stray nexthop entries, then ignite fib_flush. 1029 * for stray nexthop entries, then ignite fib_flush.
1029 */ 1030 */
1030 if (fib_sync_down_addr(dev_net(dev), ifa->ifa_local)) 1031 if (fib_sync_down_addr(dev, ifa->ifa_local))
1031 fib_flush(dev_net(dev)); 1032 fib_flush(dev_net(dev));
1032 } 1033 }
1033 } 1034 }
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 539fa264e67d..e9f56225e53f 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -1057,6 +1057,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
1057 fi->fib_priority = cfg->fc_priority; 1057 fi->fib_priority = cfg->fc_priority;
1058 fi->fib_prefsrc = cfg->fc_prefsrc; 1058 fi->fib_prefsrc = cfg->fc_prefsrc;
1059 fi->fib_type = cfg->fc_type; 1059 fi->fib_type = cfg->fc_type;
1060 fi->fib_tb_id = cfg->fc_table;
1060 1061
1061 fi->fib_nhs = nhs; 1062 fi->fib_nhs = nhs;
1062 change_nexthops(fi) { 1063 change_nexthops(fi) {
@@ -1337,18 +1338,21 @@ nla_put_failure:
1337 * referring to it. 1338 * referring to it.
1338 * - device went down -> we must shutdown all nexthops going via it. 1339 * - device went down -> we must shutdown all nexthops going via it.
1339 */ 1340 */
1340int fib_sync_down_addr(struct net *net, __be32 local) 1341int fib_sync_down_addr(struct net_device *dev, __be32 local)
1341{ 1342{
1342 int ret = 0; 1343 int ret = 0;
1343 unsigned int hash = fib_laddr_hashfn(local); 1344 unsigned int hash = fib_laddr_hashfn(local);
1344 struct hlist_head *head = &fib_info_laddrhash[hash]; 1345 struct hlist_head *head = &fib_info_laddrhash[hash];
1346 struct net *net = dev_net(dev);
1347 int tb_id = l3mdev_fib_table(dev);
1345 struct fib_info *fi; 1348 struct fib_info *fi;
1346 1349
1347 if (!fib_info_laddrhash || local == 0) 1350 if (!fib_info_laddrhash || local == 0)
1348 return 0; 1351 return 0;
1349 1352
1350 hlist_for_each_entry(fi, head, fib_lhash) { 1353 hlist_for_each_entry(fi, head, fib_lhash) {
1351 if (!net_eq(fi->fib_net, net)) 1354 if (!net_eq(fi->fib_net, net) ||
1355 fi->fib_tb_id != tb_id)
1352 continue; 1356 continue;
1353 if (fi->fib_prefsrc == local) { 1357 if (fi->fib_prefsrc == local) {
1354 fi->fib_flags |= RTNH_F_DEAD; 1358 fi->fib_flags |= RTNH_F_DEAD;
diff --git a/net/ipv4/netfilter/nft_reject_ipv4.c b/net/ipv4/netfilter/nft_reject_ipv4.c
index c24f41c816b3..2c2553b9026c 100644
--- a/net/ipv4/netfilter/nft_reject_ipv4.c
+++ b/net/ipv4/netfilter/nft_reject_ipv4.c
@@ -46,6 +46,7 @@ static const struct nft_expr_ops nft_reject_ipv4_ops = {
46 .eval = nft_reject_ipv4_eval, 46 .eval = nft_reject_ipv4_eval,
47 .init = nft_reject_init, 47 .init = nft_reject_init,
48 .dump = nft_reject_dump, 48 .dump = nft_reject_dump,
49 .validate = nft_reject_validate,
49}; 50};
50 51
51static struct nft_expr_type nft_reject_ipv4_type __read_mostly = { 52static struct nft_expr_type nft_reject_ipv4_type __read_mostly = {
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index 54d9f9b0120f..4e777a3243f9 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -150,6 +150,7 @@ void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
150 tp->segs_in = 0; 150 tp->segs_in = 0;
151 tcp_segs_in(tp, skb); 151 tcp_segs_in(tp, skb);
152 __skb_pull(skb, tcp_hdrlen(skb)); 152 __skb_pull(skb, tcp_hdrlen(skb));
153 sk_forced_mem_schedule(sk, skb->truesize);
153 skb_set_owner_r(skb, sk); 154 skb_set_owner_r(skb, sk);
154 155
155 TCP_SKB_CB(skb)->seq++; 156 TCP_SKB_CB(skb)->seq++;
@@ -226,6 +227,7 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk,
226 tcp_fastopen_add_skb(child, skb); 227 tcp_fastopen_add_skb(child, skb);
227 228
228 tcp_rsk(req)->rcv_nxt = tp->rcv_nxt; 229 tcp_rsk(req)->rcv_nxt = tp->rcv_nxt;
230 tp->rcv_wup = tp->rcv_nxt;
229 /* tcp_conn_request() is sending the SYNACK, 231 /* tcp_conn_request() is sending the SYNACK,
230 * and queues the child into listener accept queue. 232 * and queues the child into listener accept queue.
231 */ 233 */
diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c
index 028eb046ea40..9c5fc973267f 100644
--- a/net/ipv4/tcp_yeah.c
+++ b/net/ipv4/tcp_yeah.c
@@ -76,7 +76,7 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
76 if (!tcp_is_cwnd_limited(sk)) 76 if (!tcp_is_cwnd_limited(sk))
77 return; 77 return;
78 78
79 if (tp->snd_cwnd <= tp->snd_ssthresh) 79 if (tcp_in_slow_start(tp))
80 tcp_slow_start(tp, acked); 80 tcp_slow_start(tp, acked);
81 81
82 else if (!yeah->doing_reno_now) { 82 else if (!yeah->doing_reno_now) {
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index b644a23c3db0..41f5b504a782 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -29,7 +29,7 @@ static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4,
29 memset(fl4, 0, sizeof(*fl4)); 29 memset(fl4, 0, sizeof(*fl4));
30 fl4->daddr = daddr->a4; 30 fl4->daddr = daddr->a4;
31 fl4->flowi4_tos = tos; 31 fl4->flowi4_tos = tos;
32 fl4->flowi4_oif = oif; 32 fl4->flowi4_oif = l3mdev_master_ifindex_by_index(net, oif);
33 if (saddr) 33 if (saddr)
34 fl4->saddr = saddr->a4; 34 fl4->saddr = saddr->a4;
35 35
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index f418d2eaeddd..2f1f5d439788 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -778,7 +778,14 @@ static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf)
778 } 778 }
779 779
780 if (p == &net->ipv6.devconf_all->forwarding) { 780 if (p == &net->ipv6.devconf_all->forwarding) {
781 int old_dflt = net->ipv6.devconf_dflt->forwarding;
782
781 net->ipv6.devconf_dflt->forwarding = newf; 783 net->ipv6.devconf_dflt->forwarding = newf;
784 if ((!newf) ^ (!old_dflt))
785 inet6_netconf_notify_devconf(net, NETCONFA_FORWARDING,
786 NETCONFA_IFINDEX_DEFAULT,
787 net->ipv6.devconf_dflt);
788
782 addrconf_forward_change(net, newf); 789 addrconf_forward_change(net, newf);
783 if ((!newf) ^ (!old)) 790 if ((!newf) ^ (!old))
784 inet6_netconf_notify_devconf(net, NETCONFA_FORWARDING, 791 inet6_netconf_notify_devconf(net, NETCONFA_FORWARDING,
@@ -1941,6 +1948,7 @@ errdad:
1941 spin_unlock_bh(&ifp->lock); 1948 spin_unlock_bh(&ifp->lock);
1942 1949
1943 addrconf_mod_dad_work(ifp, 0); 1950 addrconf_mod_dad_work(ifp, 0);
1951 in6_ifa_put(ifp);
1944} 1952}
1945 1953
1946/* Join to solicited addr multicast group. 1954/* Join to solicited addr multicast group.
@@ -3850,6 +3858,7 @@ static void addrconf_dad_work(struct work_struct *w)
3850 addrconf_dad_begin(ifp); 3858 addrconf_dad_begin(ifp);
3851 goto out; 3859 goto out;
3852 } else if (action == DAD_ABORT) { 3860 } else if (action == DAD_ABORT) {
3861 in6_ifa_hold(ifp);
3853 addrconf_dad_stop(ifp, 1); 3862 addrconf_dad_stop(ifp, 1);
3854 if (disable_ipv6) 3863 if (disable_ipv6)
3855 addrconf_ifdown(idev->dev, 0); 3864 addrconf_ifdown(idev->dev, 0);
@@ -6025,7 +6034,7 @@ static const struct ctl_table addrconf_sysctl[] = {
6025static int __addrconf_sysctl_register(struct net *net, char *dev_name, 6034static int __addrconf_sysctl_register(struct net *net, char *dev_name,
6026 struct inet6_dev *idev, struct ipv6_devconf *p) 6035 struct inet6_dev *idev, struct ipv6_devconf *p)
6027{ 6036{
6028 int i; 6037 int i, ifindex;
6029 struct ctl_table *table; 6038 struct ctl_table *table;
6030 char path[sizeof("net/ipv6/conf/") + IFNAMSIZ]; 6039 char path[sizeof("net/ipv6/conf/") + IFNAMSIZ];
6031 6040
@@ -6045,6 +6054,13 @@ static int __addrconf_sysctl_register(struct net *net, char *dev_name,
6045 if (!p->sysctl_header) 6054 if (!p->sysctl_header)
6046 goto free; 6055 goto free;
6047 6056
6057 if (!strcmp(dev_name, "all"))
6058 ifindex = NETCONFA_IFINDEX_ALL;
6059 else if (!strcmp(dev_name, "default"))
6060 ifindex = NETCONFA_IFINDEX_DEFAULT;
6061 else
6062 ifindex = idev->dev->ifindex;
6063 inet6_netconf_notify_devconf(net, NETCONFA_ALL, ifindex, p);
6048 return 0; 6064 return 0;
6049 6065
6050free: 6066free:
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 7b0481e3738f..888543debe4e 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1174,6 +1174,7 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1174 encap_limit = t->parms.encap_limit; 1174 encap_limit = t->parms.encap_limit;
1175 1175
1176 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); 1176 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
1177 fl6.flowi6_proto = IPPROTO_IPIP;
1177 1178
1178 dsfield = ipv4_get_dsfield(iph); 1179 dsfield = ipv4_get_dsfield(iph);
1179 1180
@@ -1233,6 +1234,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1233 encap_limit = t->parms.encap_limit; 1234 encap_limit = t->parms.encap_limit;
1234 1235
1235 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); 1236 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
1237 fl6.flowi6_proto = IPPROTO_IPV6;
1236 1238
1237 dsfield = ipv6_get_dsfield(ipv6h); 1239 dsfield = ipv6_get_dsfield(ipv6h);
1238 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) 1240 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
diff --git a/net/ipv6/netfilter/nft_reject_ipv6.c b/net/ipv6/netfilter/nft_reject_ipv6.c
index 533cd5719c59..92bda9908bb9 100644
--- a/net/ipv6/netfilter/nft_reject_ipv6.c
+++ b/net/ipv6/netfilter/nft_reject_ipv6.c
@@ -47,6 +47,7 @@ static const struct nft_expr_ops nft_reject_ipv6_ops = {
47 .eval = nft_reject_ipv6_eval, 47 .eval = nft_reject_ipv6_eval,
48 .init = nft_reject_init, 48 .init = nft_reject_init,
49 .dump = nft_reject_dump, 49 .dump = nft_reject_dump,
50 .validate = nft_reject_validate,
50}; 51};
51 52
52static struct nft_expr_type nft_reject_ipv6_type __read_mostly = { 53static struct nft_expr_type nft_reject_ipv6_type __read_mostly = {
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index 0900352c924c..0e983b694ee8 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -126,8 +126,10 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
126 rt = (struct rt6_info *) dst; 126 rt = (struct rt6_info *) dst;
127 127
128 np = inet6_sk(sk); 128 np = inet6_sk(sk);
129 if (!np) 129 if (!np) {
130 return -EBADF; 130 err = -EBADF;
131 goto dst_err_out;
132 }
131 133
132 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) 134 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
133 fl6.flowi6_oif = np->mcast_oif; 135 fl6.flowi6_oif = np->mcast_oif;
@@ -163,6 +165,9 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
163 } 165 }
164 release_sock(sk); 166 release_sock(sk);
165 167
168dst_err_out:
169 dst_release(dst);
170
166 if (err) 171 if (err)
167 return err; 172 return err;
168 173
diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
index 0eaab1fa6be5..00a2d40677d6 100644
--- a/net/ipv6/xfrm6_input.c
+++ b/net/ipv6/xfrm6_input.c
@@ -23,6 +23,7 @@ int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb)
23 23
24int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi) 24int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
25{ 25{
26 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL;
26 XFRM_SPI_SKB_CB(skb)->family = AF_INET6; 27 XFRM_SPI_SKB_CB(skb)->family = AF_INET6;
27 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr); 28 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr);
28 return xfrm_input(skb, nexthdr, spi, 0); 29 return xfrm_input(skb, nexthdr, spi, 0);
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 6cc97003e4a9..70a86adad875 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -36,7 +36,7 @@ static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, int oif,
36 int err; 36 int err;
37 37
38 memset(&fl6, 0, sizeof(fl6)); 38 memset(&fl6, 0, sizeof(fl6));
39 fl6.flowi6_oif = oif; 39 fl6.flowi6_oif = l3mdev_master_ifindex_by_index(net, oif);
40 fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF; 40 fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF;
41 memcpy(&fl6.daddr, daddr, sizeof(fl6.daddr)); 41 memcpy(&fl6.daddr, daddr, sizeof(fl6.daddr));
42 if (saddr) 42 if (saddr)
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index cb39e05b166c..411693288648 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -13,6 +13,7 @@
13#include <linux/socket.h> 13#include <linux/socket.h>
14#include <linux/uaccess.h> 14#include <linux/uaccess.h>
15#include <linux/workqueue.h> 15#include <linux/workqueue.h>
16#include <linux/syscalls.h>
16#include <net/kcm.h> 17#include <net/kcm.h>
17#include <net/netns/generic.h> 18#include <net/netns/generic.h>
18#include <net/sock.h> 19#include <net/sock.h>
@@ -2029,7 +2030,7 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2029 if (copy_to_user((void __user *)arg, &info, 2030 if (copy_to_user((void __user *)arg, &info,
2030 sizeof(info))) { 2031 sizeof(info))) {
2031 err = -EFAULT; 2032 err = -EFAULT;
2032 sock_release(newsock); 2033 sys_close(info.fd);
2033 } 2034 }
2034 } 2035 }
2035 2036
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 1e40dacaa137..a2ed3bda4ddc 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1855,6 +1855,9 @@ static __net_exit void l2tp_exit_net(struct net *net)
1855 (void)l2tp_tunnel_delete(tunnel); 1855 (void)l2tp_tunnel_delete(tunnel);
1856 } 1856 }
1857 rcu_read_unlock_bh(); 1857 rcu_read_unlock_bh();
1858
1859 flush_workqueue(l2tp_wq);
1860 rcu_barrier();
1858} 1861}
1859 1862
1860static struct pernet_operations l2tp_net_ops = { 1863static struct pernet_operations l2tp_net_ops = {
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index b5d28f14b9cf..afca7d103684 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -333,10 +333,11 @@ ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata,
333 if (!uc.center_freq1) 333 if (!uc.center_freq1)
334 return; 334 return;
335 335
336 /* proceed to downgrade the chandef until usable or the same */ 336 /* proceed to downgrade the chandef until usable or the same as AP BW */
337 while (uc.width > max_width || 337 while (uc.width > max_width ||
338 !cfg80211_reg_can_beacon_relax(sdata->local->hw.wiphy, &uc, 338 (uc.width > sta->tdls_chandef.width &&
339 sdata->wdev.iftype)) 339 !cfg80211_reg_can_beacon_relax(sdata->local->hw.wiphy, &uc,
340 sdata->wdev.iftype)))
340 ieee80211_chandef_downgrade(&uc); 341 ieee80211_chandef_downgrade(&uc);
341 342
342 if (!cfg80211_chandef_identical(&uc, &sta->tdls_chandef)) { 343 if (!cfg80211_chandef_identical(&uc, &sta->tdls_chandef)) {
diff --git a/net/netfilter/nf_tables_netdev.c b/net/netfilter/nf_tables_netdev.c
index 5eefe4a355c6..75d696f11045 100644
--- a/net/netfilter/nf_tables_netdev.c
+++ b/net/netfilter/nf_tables_netdev.c
@@ -30,7 +30,6 @@ nft_netdev_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
30 if (!iph) 30 if (!iph)
31 return; 31 return;
32 32
33 iph = ip_hdr(skb);
34 if (iph->ihl < 5 || iph->version != 4) 33 if (iph->ihl < 5 || iph->version != 4)
35 return; 34 return;
36 35
diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
index 70eb2f6a3b01..d44d89b56127 100644
--- a/net/netfilter/nfnetlink_acct.c
+++ b/net/netfilter/nfnetlink_acct.c
@@ -343,12 +343,12 @@ static int nfnl_acct_del(struct net *net, struct sock *nfnl,
343 struct sk_buff *skb, const struct nlmsghdr *nlh, 343 struct sk_buff *skb, const struct nlmsghdr *nlh,
344 const struct nlattr * const tb[]) 344 const struct nlattr * const tb[])
345{ 345{
346 char *acct_name; 346 struct nf_acct *cur, *tmp;
347 struct nf_acct *cur;
348 int ret = -ENOENT; 347 int ret = -ENOENT;
348 char *acct_name;
349 349
350 if (!tb[NFACCT_NAME]) { 350 if (!tb[NFACCT_NAME]) {
351 list_for_each_entry(cur, &net->nfnl_acct_list, head) 351 list_for_each_entry_safe(cur, tmp, &net->nfnl_acct_list, head)
352 nfnl_acct_try_del(cur); 352 nfnl_acct_try_del(cur);
353 353
354 return 0; 354 return 0;
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index 68216cdc7083..139e0867e56e 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -98,31 +98,28 @@ static int cttimeout_new_timeout(struct net *net, struct sock *ctnl,
98 break; 98 break;
99 } 99 }
100 100
101 l4proto = nf_ct_l4proto_find_get(l3num, l4num);
102
103 /* This protocol is not supportted, skip. */
104 if (l4proto->l4proto != l4num) {
105 ret = -EOPNOTSUPP;
106 goto err_proto_put;
107 }
108
109 if (matching) { 101 if (matching) {
110 if (nlh->nlmsg_flags & NLM_F_REPLACE) { 102 if (nlh->nlmsg_flags & NLM_F_REPLACE) {
111 /* You cannot replace one timeout policy by another of 103 /* You cannot replace one timeout policy by another of
112 * different kind, sorry. 104 * different kind, sorry.
113 */ 105 */
114 if (matching->l3num != l3num || 106 if (matching->l3num != l3num ||
115 matching->l4proto->l4proto != l4num) { 107 matching->l4proto->l4proto != l4num)
116 ret = -EINVAL; 108 return -EINVAL;
117 goto err_proto_put; 109
118 } 110 return ctnl_timeout_parse_policy(&matching->data,
119 111 matching->l4proto, net,
120 ret = ctnl_timeout_parse_policy(&matching->data, 112 cda[CTA_TIMEOUT_DATA]);
121 l4proto, net,
122 cda[CTA_TIMEOUT_DATA]);
123 return ret;
124 } 113 }
125 ret = -EBUSY; 114
115 return -EBUSY;
116 }
117
118 l4proto = nf_ct_l4proto_find_get(l3num, l4num);
119
120 /* This protocol is not supportted, skip. */
121 if (l4proto->l4proto != l4num) {
122 ret = -EOPNOTSUPP;
126 goto err_proto_put; 123 goto err_proto_put;
127 } 124 }
128 125
@@ -305,7 +302,16 @@ static void ctnl_untimeout(struct net *net, struct ctnl_timeout *timeout)
305 const struct hlist_nulls_node *nn; 302 const struct hlist_nulls_node *nn;
306 unsigned int last_hsize; 303 unsigned int last_hsize;
307 spinlock_t *lock; 304 spinlock_t *lock;
308 int i; 305 int i, cpu;
306
307 for_each_possible_cpu(cpu) {
308 struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
309
310 spin_lock_bh(&pcpu->lock);
311 hlist_nulls_for_each_entry(h, nn, &pcpu->unconfirmed, hnnode)
312 untimeout(h, timeout);
313 spin_unlock_bh(&pcpu->lock);
314 }
309 315
310 local_bh_disable(); 316 local_bh_disable();
311restart: 317restart:
@@ -350,12 +356,13 @@ static int cttimeout_del_timeout(struct net *net, struct sock *ctnl,
350 const struct nlmsghdr *nlh, 356 const struct nlmsghdr *nlh,
351 const struct nlattr * const cda[]) 357 const struct nlattr * const cda[])
352{ 358{
353 struct ctnl_timeout *cur; 359 struct ctnl_timeout *cur, *tmp;
354 int ret = -ENOENT; 360 int ret = -ENOENT;
355 char *name; 361 char *name;
356 362
357 if (!cda[CTA_TIMEOUT_NAME]) { 363 if (!cda[CTA_TIMEOUT_NAME]) {
358 list_for_each_entry(cur, &net->nfct_timeout_list, head) 364 list_for_each_entry_safe(cur, tmp, &net->nfct_timeout_list,
365 head)
359 ctnl_timeout_try_del(net, cur); 366 ctnl_timeout_try_del(net, cur);
360 367
361 return 0; 368 return 0;
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index 2863f3493038..8a6bc7630912 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -291,10 +291,16 @@ int nft_meta_get_init(const struct nft_ctx *ctx,
291} 291}
292EXPORT_SYMBOL_GPL(nft_meta_get_init); 292EXPORT_SYMBOL_GPL(nft_meta_get_init);
293 293
294static int nft_meta_set_init_pkttype(const struct nft_ctx *ctx) 294int nft_meta_set_validate(const struct nft_ctx *ctx,
295 const struct nft_expr *expr,
296 const struct nft_data **data)
295{ 297{
298 struct nft_meta *priv = nft_expr_priv(expr);
296 unsigned int hooks; 299 unsigned int hooks;
297 300
301 if (priv->key != NFT_META_PKTTYPE)
302 return 0;
303
298 switch (ctx->afi->family) { 304 switch (ctx->afi->family) {
299 case NFPROTO_BRIDGE: 305 case NFPROTO_BRIDGE:
300 hooks = 1 << NF_BR_PRE_ROUTING; 306 hooks = 1 << NF_BR_PRE_ROUTING;
@@ -308,6 +314,7 @@ static int nft_meta_set_init_pkttype(const struct nft_ctx *ctx)
308 314
309 return nft_chain_validate_hooks(ctx->chain, hooks); 315 return nft_chain_validate_hooks(ctx->chain, hooks);
310} 316}
317EXPORT_SYMBOL_GPL(nft_meta_set_validate);
311 318
312int nft_meta_set_init(const struct nft_ctx *ctx, 319int nft_meta_set_init(const struct nft_ctx *ctx,
313 const struct nft_expr *expr, 320 const struct nft_expr *expr,
@@ -327,15 +334,16 @@ int nft_meta_set_init(const struct nft_ctx *ctx,
327 len = sizeof(u8); 334 len = sizeof(u8);
328 break; 335 break;
329 case NFT_META_PKTTYPE: 336 case NFT_META_PKTTYPE:
330 err = nft_meta_set_init_pkttype(ctx);
331 if (err)
332 return err;
333 len = sizeof(u8); 337 len = sizeof(u8);
334 break; 338 break;
335 default: 339 default:
336 return -EOPNOTSUPP; 340 return -EOPNOTSUPP;
337 } 341 }
338 342
343 err = nft_meta_set_validate(ctx, expr, NULL);
344 if (err < 0)
345 return err;
346
339 priv->sreg = nft_parse_register(tb[NFTA_META_SREG]); 347 priv->sreg = nft_parse_register(tb[NFTA_META_SREG]);
340 err = nft_validate_register_load(priv->sreg, len); 348 err = nft_validate_register_load(priv->sreg, len);
341 if (err < 0) 349 if (err < 0)
@@ -407,6 +415,7 @@ static const struct nft_expr_ops nft_meta_set_ops = {
407 .init = nft_meta_set_init, 415 .init = nft_meta_set_init,
408 .destroy = nft_meta_set_destroy, 416 .destroy = nft_meta_set_destroy,
409 .dump = nft_meta_set_dump, 417 .dump = nft_meta_set_dump,
418 .validate = nft_meta_set_validate,
410}; 419};
411 420
412static const struct nft_expr_ops * 421static const struct nft_expr_ops *
diff --git a/net/netfilter/nft_reject.c b/net/netfilter/nft_reject.c
index 0522fc9bfb0a..c64de3f7379d 100644
--- a/net/netfilter/nft_reject.c
+++ b/net/netfilter/nft_reject.c
@@ -26,11 +26,27 @@ const struct nla_policy nft_reject_policy[NFTA_REJECT_MAX + 1] = {
26}; 26};
27EXPORT_SYMBOL_GPL(nft_reject_policy); 27EXPORT_SYMBOL_GPL(nft_reject_policy);
28 28
29int nft_reject_validate(const struct nft_ctx *ctx,
30 const struct nft_expr *expr,
31 const struct nft_data **data)
32{
33 return nft_chain_validate_hooks(ctx->chain,
34 (1 << NF_INET_LOCAL_IN) |
35 (1 << NF_INET_FORWARD) |
36 (1 << NF_INET_LOCAL_OUT));
37}
38EXPORT_SYMBOL_GPL(nft_reject_validate);
39
29int nft_reject_init(const struct nft_ctx *ctx, 40int nft_reject_init(const struct nft_ctx *ctx,
30 const struct nft_expr *expr, 41 const struct nft_expr *expr,
31 const struct nlattr * const tb[]) 42 const struct nlattr * const tb[])
32{ 43{
33 struct nft_reject *priv = nft_expr_priv(expr); 44 struct nft_reject *priv = nft_expr_priv(expr);
45 int err;
46
47 err = nft_reject_validate(ctx, expr, NULL);
48 if (err < 0)
49 return err;
34 50
35 if (tb[NFTA_REJECT_TYPE] == NULL) 51 if (tb[NFTA_REJECT_TYPE] == NULL)
36 return -EINVAL; 52 return -EINVAL;
diff --git a/net/netfilter/nft_reject_inet.c b/net/netfilter/nft_reject_inet.c
index 759ca5248a3d..e79d9ca2ffee 100644
--- a/net/netfilter/nft_reject_inet.c
+++ b/net/netfilter/nft_reject_inet.c
@@ -66,7 +66,11 @@ static int nft_reject_inet_init(const struct nft_ctx *ctx,
66 const struct nlattr * const tb[]) 66 const struct nlattr * const tb[])
67{ 67{
68 struct nft_reject *priv = nft_expr_priv(expr); 68 struct nft_reject *priv = nft_expr_priv(expr);
69 int icmp_code; 69 int icmp_code, err;
70
71 err = nft_reject_validate(ctx, expr, NULL);
72 if (err < 0)
73 return err;
70 74
71 if (tb[NFTA_REJECT_TYPE] == NULL) 75 if (tb[NFTA_REJECT_TYPE] == NULL)
72 return -EINVAL; 76 return -EINVAL;
@@ -124,6 +128,7 @@ static const struct nft_expr_ops nft_reject_inet_ops = {
124 .eval = nft_reject_inet_eval, 128 .eval = nft_reject_inet_eval,
125 .init = nft_reject_inet_init, 129 .init = nft_reject_inet_init,
126 .dump = nft_reject_inet_dump, 130 .dump = nft_reject_inet_dump,
131 .validate = nft_reject_validate,
127}; 132};
128 133
129static struct nft_expr_type nft_reject_inet_type __read_mostly = { 134static struct nft_expr_type nft_reject_inet_type __read_mostly = {
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 1f1682b9a6a8..31b7bc35895d 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -878,7 +878,7 @@ static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet,
878 struct sctp_chunk *chunk, 878 struct sctp_chunk *chunk,
879 u16 chunk_len) 879 u16 chunk_len)
880{ 880{
881 size_t psize, pmtu; 881 size_t psize, pmtu, maxsize;
882 sctp_xmit_t retval = SCTP_XMIT_OK; 882 sctp_xmit_t retval = SCTP_XMIT_OK;
883 883
884 psize = packet->size; 884 psize = packet->size;
@@ -906,6 +906,17 @@ static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet,
906 goto out; 906 goto out;
907 } 907 }
908 908
909 /* Similarly, if this chunk was built before a PMTU
910 * reduction, we have to fragment it at IP level now. So
911 * if the packet already contains something, we need to
912 * flush.
913 */
914 maxsize = pmtu - packet->overhead;
915 if (packet->auth)
916 maxsize -= WORD_ROUND(packet->auth->skb->len);
917 if (chunk_len > maxsize)
918 retval = SCTP_XMIT_PMTU_FULL;
919
909 /* It is also okay to fragment if the chunk we are 920 /* It is also okay to fragment if the chunk we are
910 * adding is a control chunk, but only if current packet 921 * adding is a control chunk, but only if current packet
911 * is not a GSO one otherwise it causes fragmentation of 922 * is not a GSO one otherwise it causes fragmentation of
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index 6b626a64b517..a04fe9be1c60 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -62,6 +62,8 @@ static void publ_to_item(struct distr_item *i, struct publication *p)
62 62
63/** 63/**
64 * named_prepare_buf - allocate & initialize a publication message 64 * named_prepare_buf - allocate & initialize a publication message
65 *
66 * The buffer returned is of size INT_H_SIZE + payload size
65 */ 67 */
66static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size, 68static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size,
67 u32 dest) 69 u32 dest)
@@ -141,9 +143,9 @@ static void named_distribute(struct net *net, struct sk_buff_head *list,
141 struct publication *publ; 143 struct publication *publ;
142 struct sk_buff *skb = NULL; 144 struct sk_buff *skb = NULL;
143 struct distr_item *item = NULL; 145 struct distr_item *item = NULL;
144 uint msg_dsz = (tipc_node_get_mtu(net, dnode, 0) / ITEM_SIZE) * 146 u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0) - INT_H_SIZE) /
145 ITEM_SIZE; 147 ITEM_SIZE) * ITEM_SIZE;
146 uint msg_rem = msg_dsz; 148 u32 msg_rem = msg_dsz;
147 149
148 list_for_each_entry(publ, pls, local_list) { 150 list_for_each_entry(publ, pls, local_list) {
149 /* Prepare next buffer: */ 151 /* Prepare next buffer: */
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index f1dffe84f0d5..8309687a56b0 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -661,11 +661,11 @@ static int unix_set_peek_off(struct sock *sk, int val)
661{ 661{
662 struct unix_sock *u = unix_sk(sk); 662 struct unix_sock *u = unix_sk(sk);
663 663
664 if (mutex_lock_interruptible(&u->readlock)) 664 if (mutex_lock_interruptible(&u->iolock))
665 return -EINTR; 665 return -EINTR;
666 666
667 sk->sk_peek_off = val; 667 sk->sk_peek_off = val;
668 mutex_unlock(&u->readlock); 668 mutex_unlock(&u->iolock);
669 669
670 return 0; 670 return 0;
671} 671}
@@ -779,7 +779,8 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern)
779 spin_lock_init(&u->lock); 779 spin_lock_init(&u->lock);
780 atomic_long_set(&u->inflight, 0); 780 atomic_long_set(&u->inflight, 0);
781 INIT_LIST_HEAD(&u->link); 781 INIT_LIST_HEAD(&u->link);
782 mutex_init(&u->readlock); /* single task reading lock */ 782 mutex_init(&u->iolock); /* single task reading lock */
783 mutex_init(&u->bindlock); /* single task binding lock */
783 init_waitqueue_head(&u->peer_wait); 784 init_waitqueue_head(&u->peer_wait);
784 init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay); 785 init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
785 unix_insert_socket(unix_sockets_unbound(sk), sk); 786 unix_insert_socket(unix_sockets_unbound(sk), sk);
@@ -848,7 +849,7 @@ static int unix_autobind(struct socket *sock)
848 int err; 849 int err;
849 unsigned int retries = 0; 850 unsigned int retries = 0;
850 851
851 err = mutex_lock_interruptible(&u->readlock); 852 err = mutex_lock_interruptible(&u->bindlock);
852 if (err) 853 if (err)
853 return err; 854 return err;
854 855
@@ -895,7 +896,7 @@ retry:
895 spin_unlock(&unix_table_lock); 896 spin_unlock(&unix_table_lock);
896 err = 0; 897 err = 0;
897 898
898out: mutex_unlock(&u->readlock); 899out: mutex_unlock(&u->bindlock);
899 return err; 900 return err;
900} 901}
901 902
@@ -954,20 +955,32 @@ fail:
954 return NULL; 955 return NULL;
955} 956}
956 957
957static int unix_mknod(struct dentry *dentry, const struct path *path, umode_t mode, 958static int unix_mknod(const char *sun_path, umode_t mode, struct path *res)
958 struct path *res)
959{ 959{
960 int err; 960 struct dentry *dentry;
961 struct path path;
962 int err = 0;
963 /*
964 * Get the parent directory, calculate the hash for last
965 * component.
966 */
967 dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
968 err = PTR_ERR(dentry);
969 if (IS_ERR(dentry))
970 return err;
961 971
962 err = security_path_mknod(path, dentry, mode, 0); 972 /*
973 * All right, let's create it.
974 */
975 err = security_path_mknod(&path, dentry, mode, 0);
963 if (!err) { 976 if (!err) {
964 err = vfs_mknod(d_inode(path->dentry), dentry, mode, 0); 977 err = vfs_mknod(d_inode(path.dentry), dentry, mode, 0);
965 if (!err) { 978 if (!err) {
966 res->mnt = mntget(path->mnt); 979 res->mnt = mntget(path.mnt);
967 res->dentry = dget(dentry); 980 res->dentry = dget(dentry);
968 } 981 }
969 } 982 }
970 983 done_path_create(&path, dentry);
971 return err; 984 return err;
972} 985}
973 986
@@ -978,12 +991,10 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
978 struct unix_sock *u = unix_sk(sk); 991 struct unix_sock *u = unix_sk(sk);
979 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr; 992 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
980 char *sun_path = sunaddr->sun_path; 993 char *sun_path = sunaddr->sun_path;
981 int err, name_err; 994 int err;
982 unsigned int hash; 995 unsigned int hash;
983 struct unix_address *addr; 996 struct unix_address *addr;
984 struct hlist_head *list; 997 struct hlist_head *list;
985 struct path path;
986 struct dentry *dentry;
987 998
988 err = -EINVAL; 999 err = -EINVAL;
989 if (sunaddr->sun_family != AF_UNIX) 1000 if (sunaddr->sun_family != AF_UNIX)
@@ -999,34 +1010,14 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
999 goto out; 1010 goto out;
1000 addr_len = err; 1011 addr_len = err;
1001 1012
1002 name_err = 0; 1013 err = mutex_lock_interruptible(&u->bindlock);
1003 dentry = NULL;
1004 if (sun_path[0]) {
1005 /* Get the parent directory, calculate the hash for last
1006 * component.
1007 */
1008 dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
1009
1010 if (IS_ERR(dentry)) {
1011 /* delay report until after 'already bound' check */
1012 name_err = PTR_ERR(dentry);
1013 dentry = NULL;
1014 }
1015 }
1016
1017 err = mutex_lock_interruptible(&u->readlock);
1018 if (err) 1014 if (err)
1019 goto out_path; 1015 goto out;
1020 1016
1021 err = -EINVAL; 1017 err = -EINVAL;
1022 if (u->addr) 1018 if (u->addr)
1023 goto out_up; 1019 goto out_up;
1024 1020
1025 if (name_err) {
1026 err = name_err == -EEXIST ? -EADDRINUSE : name_err;
1027 goto out_up;
1028 }
1029
1030 err = -ENOMEM; 1021 err = -ENOMEM;
1031 addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL); 1022 addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
1032 if (!addr) 1023 if (!addr)
@@ -1037,11 +1028,11 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1037 addr->hash = hash ^ sk->sk_type; 1028 addr->hash = hash ^ sk->sk_type;
1038 atomic_set(&addr->refcnt, 1); 1029 atomic_set(&addr->refcnt, 1);
1039 1030
1040 if (dentry) { 1031 if (sun_path[0]) {
1041 struct path u_path; 1032 struct path path;
1042 umode_t mode = S_IFSOCK | 1033 umode_t mode = S_IFSOCK |
1043 (SOCK_INODE(sock)->i_mode & ~current_umask()); 1034 (SOCK_INODE(sock)->i_mode & ~current_umask());
1044 err = unix_mknod(dentry, &path, mode, &u_path); 1035 err = unix_mknod(sun_path, mode, &path);
1045 if (err) { 1036 if (err) {
1046 if (err == -EEXIST) 1037 if (err == -EEXIST)
1047 err = -EADDRINUSE; 1038 err = -EADDRINUSE;
@@ -1049,9 +1040,9 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1049 goto out_up; 1040 goto out_up;
1050 } 1041 }
1051 addr->hash = UNIX_HASH_SIZE; 1042 addr->hash = UNIX_HASH_SIZE;
1052 hash = d_real_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1); 1043 hash = d_real_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE - 1);
1053 spin_lock(&unix_table_lock); 1044 spin_lock(&unix_table_lock);
1054 u->path = u_path; 1045 u->path = path;
1055 list = &unix_socket_table[hash]; 1046 list = &unix_socket_table[hash];
1056 } else { 1047 } else {
1057 spin_lock(&unix_table_lock); 1048 spin_lock(&unix_table_lock);
@@ -1073,11 +1064,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1073out_unlock: 1064out_unlock:
1074 spin_unlock(&unix_table_lock); 1065 spin_unlock(&unix_table_lock);
1075out_up: 1066out_up:
1076 mutex_unlock(&u->readlock); 1067 mutex_unlock(&u->bindlock);
1077out_path:
1078 if (dentry)
1079 done_path_create(&path, dentry);
1080
1081out: 1068out:
1082 return err; 1069 return err;
1083} 1070}
@@ -1969,17 +1956,17 @@ static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
1969 if (false) { 1956 if (false) {
1970alloc_skb: 1957alloc_skb:
1971 unix_state_unlock(other); 1958 unix_state_unlock(other);
1972 mutex_unlock(&unix_sk(other)->readlock); 1959 mutex_unlock(&unix_sk(other)->iolock);
1973 newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT, 1960 newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
1974 &err, 0); 1961 &err, 0);
1975 if (!newskb) 1962 if (!newskb)
1976 goto err; 1963 goto err;
1977 } 1964 }
1978 1965
1979 /* we must acquire readlock as we modify already present 1966 /* we must acquire iolock as we modify already present
1980 * skbs in the sk_receive_queue and mess with skb->len 1967 * skbs in the sk_receive_queue and mess with skb->len
1981 */ 1968 */
1982 err = mutex_lock_interruptible(&unix_sk(other)->readlock); 1969 err = mutex_lock_interruptible(&unix_sk(other)->iolock);
1983 if (err) { 1970 if (err) {
1984 err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS; 1971 err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS;
1985 goto err; 1972 goto err;
@@ -2046,7 +2033,7 @@ alloc_skb:
2046 } 2033 }
2047 2034
2048 unix_state_unlock(other); 2035 unix_state_unlock(other);
2049 mutex_unlock(&unix_sk(other)->readlock); 2036 mutex_unlock(&unix_sk(other)->iolock);
2050 2037
2051 other->sk_data_ready(other); 2038 other->sk_data_ready(other);
2052 scm_destroy(&scm); 2039 scm_destroy(&scm);
@@ -2055,7 +2042,7 @@ alloc_skb:
2055err_state_unlock: 2042err_state_unlock:
2056 unix_state_unlock(other); 2043 unix_state_unlock(other);
2057err_unlock: 2044err_unlock:
2058 mutex_unlock(&unix_sk(other)->readlock); 2045 mutex_unlock(&unix_sk(other)->iolock);
2059err: 2046err:
2060 kfree_skb(newskb); 2047 kfree_skb(newskb);
2061 if (send_sigpipe && !(flags & MSG_NOSIGNAL)) 2048 if (send_sigpipe && !(flags & MSG_NOSIGNAL))
@@ -2123,7 +2110,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
2123 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 2110 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2124 2111
2125 do { 2112 do {
2126 mutex_lock(&u->readlock); 2113 mutex_lock(&u->iolock);
2127 2114
2128 skip = sk_peek_offset(sk, flags); 2115 skip = sk_peek_offset(sk, flags);
2129 skb = __skb_try_recv_datagram(sk, flags, &peeked, &skip, &err, 2116 skb = __skb_try_recv_datagram(sk, flags, &peeked, &skip, &err,
@@ -2131,14 +2118,14 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
2131 if (skb) 2118 if (skb)
2132 break; 2119 break;
2133 2120
2134 mutex_unlock(&u->readlock); 2121 mutex_unlock(&u->iolock);
2135 2122
2136 if (err != -EAGAIN) 2123 if (err != -EAGAIN)
2137 break; 2124 break;
2138 } while (timeo && 2125 } while (timeo &&
2139 !__skb_wait_for_more_packets(sk, &err, &timeo, last)); 2126 !__skb_wait_for_more_packets(sk, &err, &timeo, last));
2140 2127
2141 if (!skb) { /* implies readlock unlocked */ 2128 if (!skb) { /* implies iolock unlocked */
2142 unix_state_lock(sk); 2129 unix_state_lock(sk);
2143 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */ 2130 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
2144 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN && 2131 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
@@ -2203,7 +2190,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
2203 2190
2204out_free: 2191out_free:
2205 skb_free_datagram(sk, skb); 2192 skb_free_datagram(sk, skb);
2206 mutex_unlock(&u->readlock); 2193 mutex_unlock(&u->iolock);
2207out: 2194out:
2208 return err; 2195 return err;
2209} 2196}
@@ -2298,7 +2285,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state)
2298 /* Lock the socket to prevent queue disordering 2285 /* Lock the socket to prevent queue disordering
2299 * while sleeps in memcpy_tomsg 2286 * while sleeps in memcpy_tomsg
2300 */ 2287 */
2301 mutex_lock(&u->readlock); 2288 mutex_lock(&u->iolock);
2302 2289
2303 if (flags & MSG_PEEK) 2290 if (flags & MSG_PEEK)
2304 skip = sk_peek_offset(sk, flags); 2291 skip = sk_peek_offset(sk, flags);
@@ -2340,7 +2327,7 @@ again:
2340 break; 2327 break;
2341 } 2328 }
2342 2329
2343 mutex_unlock(&u->readlock); 2330 mutex_unlock(&u->iolock);
2344 2331
2345 timeo = unix_stream_data_wait(sk, timeo, last, 2332 timeo = unix_stream_data_wait(sk, timeo, last,
2346 last_len); 2333 last_len);
@@ -2351,7 +2338,7 @@ again:
2351 goto out; 2338 goto out;
2352 } 2339 }
2353 2340
2354 mutex_lock(&u->readlock); 2341 mutex_lock(&u->iolock);
2355 goto redo; 2342 goto redo;
2356unlock: 2343unlock:
2357 unix_state_unlock(sk); 2344 unix_state_unlock(sk);
@@ -2454,7 +2441,7 @@ unlock:
2454 } 2441 }
2455 } while (size); 2442 } while (size);
2456 2443
2457 mutex_unlock(&u->readlock); 2444 mutex_unlock(&u->iolock);
2458 if (state->msg) 2445 if (state->msg)
2459 scm_recv(sock, state->msg, &scm, flags); 2446 scm_recv(sock, state->msg, &scm, flags);
2460 else 2447 else
@@ -2495,9 +2482,9 @@ static ssize_t skb_unix_socket_splice(struct sock *sk,
2495 int ret; 2482 int ret;
2496 struct unix_sock *u = unix_sk(sk); 2483 struct unix_sock *u = unix_sk(sk);
2497 2484
2498 mutex_unlock(&u->readlock); 2485 mutex_unlock(&u->iolock);
2499 ret = splice_to_pipe(pipe, spd); 2486 ret = splice_to_pipe(pipe, spd);
2500 mutex_lock(&u->readlock); 2487 mutex_lock(&u->iolock);
2501 2488
2502 return ret; 2489 return ret;
2503} 2490}
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index dbb2738e356a..6250b1cfcde5 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -958,29 +958,8 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
958 return private(dev, iwr, cmd, info, handler); 958 return private(dev, iwr, cmd, info, handler);
959 } 959 }
960 /* Old driver API : call driver ioctl handler */ 960 /* Old driver API : call driver ioctl handler */
961 if (dev->netdev_ops->ndo_do_ioctl) { 961 if (dev->netdev_ops->ndo_do_ioctl)
962#ifdef CONFIG_COMPAT 962 return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
963 if (info->flags & IW_REQUEST_FLAG_COMPAT) {
964 int ret = 0;
965 struct iwreq iwr_lcl;
966 struct compat_iw_point *iwp_compat = (void *) &iwr->u.data;
967
968 memcpy(&iwr_lcl, iwr, sizeof(struct iwreq));
969 iwr_lcl.u.data.pointer = compat_ptr(iwp_compat->pointer);
970 iwr_lcl.u.data.length = iwp_compat->length;
971 iwr_lcl.u.data.flags = iwp_compat->flags;
972
973 ret = dev->netdev_ops->ndo_do_ioctl(dev, (void *) &iwr_lcl, cmd);
974
975 iwp_compat->pointer = ptr_to_compat(iwr_lcl.u.data.pointer);
976 iwp_compat->length = iwr_lcl.u.data.length;
977 iwp_compat->flags = iwr_lcl.u.data.flags;
978
979 return ret;
980 } else
981#endif
982 return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
983 }
984 return -EOPNOTSUPP; 963 return -EOPNOTSUPP;
985} 964}
986 965
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index 1c4ad477ce93..6e3f0254d8a1 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -207,15 +207,15 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
207 family = XFRM_SPI_SKB_CB(skb)->family; 207 family = XFRM_SPI_SKB_CB(skb)->family;
208 208
209 /* if tunnel is present override skb->mark value with tunnel i_key */ 209 /* if tunnel is present override skb->mark value with tunnel i_key */
210 if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4) { 210 switch (family) {
211 switch (family) { 211 case AF_INET:
212 case AF_INET: 212 if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4)
213 mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4->parms.i_key); 213 mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4->parms.i_key);
214 break; 214 break;
215 case AF_INET6: 215 case AF_INET6:
216 if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6)
216 mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6->parms.i_key); 217 mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6->parms.i_key);
217 break; 218 break;
218 }
219 } 219 }
220 220
221 /* Allocate new secpath or COW existing one. */ 221 /* Allocate new secpath or COW existing one. */
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index b5e665b3cfb0..45f9cf97ea25 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -626,6 +626,10 @@ static void xfrm_hash_rebuild(struct work_struct *work)
626 626
627 /* re-insert all policies by order of creation */ 627 /* re-insert all policies by order of creation */
628 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) { 628 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
629 if (xfrm_policy_id2dir(policy->index) >= XFRM_POLICY_MAX) {
630 /* skip socket policies */
631 continue;
632 }
629 newpos = NULL; 633 newpos = NULL;
630 chain = policy_hash_bysel(net, &policy->selector, 634 chain = policy_hash_bysel(net, &policy->selector,
631 policy->family, 635 policy->family,
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index d516845e16e3..cb65d916a345 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -896,7 +896,8 @@ static int xfrm_dump_sa_done(struct netlink_callback *cb)
896 struct sock *sk = cb->skb->sk; 896 struct sock *sk = cb->skb->sk;
897 struct net *net = sock_net(sk); 897 struct net *net = sock_net(sk);
898 898
899 xfrm_state_walk_done(walk, net); 899 if (cb->args[0])
900 xfrm_state_walk_done(walk, net);
900 return 0; 901 return 0;
901} 902}
902 903
@@ -921,8 +922,6 @@ static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
921 u8 proto = 0; 922 u8 proto = 0;
922 int err; 923 int err;
923 924
924 cb->args[0] = 1;
925
926 err = nlmsg_parse(cb->nlh, 0, attrs, XFRMA_MAX, 925 err = nlmsg_parse(cb->nlh, 0, attrs, XFRMA_MAX,
927 xfrma_policy); 926 xfrma_policy);
928 if (err < 0) 927 if (err < 0)
@@ -939,6 +938,7 @@ static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
939 proto = nla_get_u8(attrs[XFRMA_PROTO]); 938 proto = nla_get_u8(attrs[XFRMA_PROTO]);
940 939
941 xfrm_state_walk_init(walk, proto, filter); 940 xfrm_state_walk_init(walk, proto, filter);
941 cb->args[0] = 1;
942 } 942 }
943 943
944 (void) xfrm_state_walk(net, walk, dump_one_state, &info); 944 (void) xfrm_state_walk(net, walk, dump_one_state, &info);
@@ -2051,9 +2051,6 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
2051 if (up->hard) { 2051 if (up->hard) {
2052 xfrm_policy_delete(xp, p->dir); 2052 xfrm_policy_delete(xp, p->dir);
2053 xfrm_audit_policy_delete(xp, 1, true); 2053 xfrm_audit_policy_delete(xp, 1, true);
2054 } else {
2055 // reset the timers here?
2056 WARN(1, "Don't know what to do with soft policy expire\n");
2057 } 2054 }
2058 km_policy_expired(xp, p->dir, up->hard, nlh->nlmsg_pid); 2055 km_policy_expired(xp, p->dir, up->hard, nlh->nlmsg_pid);
2059 2056
@@ -2117,7 +2114,7 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
2117 2114
2118 err = verify_newpolicy_info(&ua->policy); 2115 err = verify_newpolicy_info(&ua->policy);
2119 if (err) 2116 if (err)
2120 goto bad_policy; 2117 goto free_state;
2121 2118
2122 /* build an XP */ 2119 /* build an XP */
2123 xp = xfrm_policy_construct(net, &ua->policy, attrs, &err); 2120 xp = xfrm_policy_construct(net, &ua->policy, attrs, &err);
@@ -2149,8 +2146,6 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
2149 2146
2150 return 0; 2147 return 0;
2151 2148
2152bad_policy:
2153 WARN(1, "BAD policy passed\n");
2154free_state: 2149free_state:
2155 kfree(x); 2150 kfree(x);
2156nomem: 2151nomem: