aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/bonding/bond_main.c35
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h14
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c149
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c128
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c27
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c19
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c15
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c17
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c4
-rw-r--r--drivers/net/phy/marvell10g.c6
-rw-r--r--drivers/net/phy/mdio_bus.c1
-rw-r--r--drivers/net/phy/realtek.c7
-rw-r--r--drivers/net/team/team.c4
-rw-r--r--drivers/net/usb/r8152.c2
-rw-r--r--drivers/net/vrf.c3
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c2
24 files changed, 250 insertions, 208 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 485462d3087f..537c90c8eb0a 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1183,29 +1183,22 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
1183 } 1183 }
1184 } 1184 }
1185 1185
1186 /* Link-local multicast packets should be passed to the 1186 /*
1187 * stack on the link they arrive as well as pass them to the 1187 * For packets determined by bond_should_deliver_exact_match() call to
1188 * bond-master device. These packets are mostly usable when 1188 * be suppressed we want to make an exception for link-local packets.
1189 * stack receives it with the link on which they arrive 1189 * This is necessary for e.g. LLDP daemons to be able to monitor
1190 * (e.g. LLDP) they also must be available on master. Some of 1190 * inactive slave links without being forced to bind to them
1191 * the use cases include (but are not limited to): LLDP agents 1191 * explicitly.
1192 * that must be able to operate both on enslaved interfaces as 1192 *
1193 * well as on bonds themselves; linux bridges that must be able 1193 * At the same time, packets that are passed to the bonding master
1194 * to process/pass BPDUs from attached bonds when any kind of 1194 * (including link-local ones) can have their originating interface
1195 * STP version is enabled on the network. 1195 * determined via PACKET_ORIGDEV socket option.
1196 */ 1196 */
1197 if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) { 1197 if (bond_should_deliver_exact_match(skb, slave, bond)) {
1198 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); 1198 if (is_link_local_ether_addr(eth_hdr(skb)->h_dest))
1199 1199 return RX_HANDLER_PASS;
1200 if (nskb) {
1201 nskb->dev = bond->dev;
1202 nskb->queue_mapping = 0;
1203 netif_rx(nskb);
1204 }
1205 return RX_HANDLER_PASS;
1206 }
1207 if (bond_should_deliver_exact_match(skb, slave, bond))
1208 return RX_HANDLER_EXACT; 1200 return RX_HANDLER_EXACT;
1201 }
1209 1202
1210 skb->dev = bond->dev; 1203 skb->dev = bond->dev;
1211 1204
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index bb41becb6609..31ff1e0d1baa 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -1335,13 +1335,11 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1335{ 1335{
1336 struct net_device *netdev; 1336 struct net_device *netdev;
1337 struct atl2_adapter *adapter; 1337 struct atl2_adapter *adapter;
1338 static int cards_found; 1338 static int cards_found = 0;
1339 unsigned long mmio_start; 1339 unsigned long mmio_start;
1340 int mmio_len; 1340 int mmio_len;
1341 int err; 1341 int err;
1342 1342
1343 cards_found = 0;
1344
1345 err = pci_enable_device(pdev); 1343 err = pci_enable_device(pdev);
1346 if (err) 1344 if (err)
1347 return err; 1345 return err;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 8bc7e495b027..d95730c6e0f2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -3903,7 +3903,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
3903 if (len) 3903 if (len)
3904 break; 3904 break;
3905 /* on first few passes, just barely sleep */ 3905 /* on first few passes, just barely sleep */
3906 if (i < DFLT_HWRM_CMD_TIMEOUT) 3906 if (i < HWRM_SHORT_TIMEOUT_COUNTER)
3907 usleep_range(HWRM_SHORT_MIN_TIMEOUT, 3907 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
3908 HWRM_SHORT_MAX_TIMEOUT); 3908 HWRM_SHORT_MAX_TIMEOUT);
3909 else 3909 else
@@ -3926,7 +3926,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
3926 dma_rmb(); 3926 dma_rmb();
3927 if (*valid) 3927 if (*valid)
3928 break; 3928 break;
3929 udelay(1); 3929 usleep_range(1, 5);
3930 } 3930 }
3931 3931
3932 if (j >= HWRM_VALID_BIT_DELAY_USEC) { 3932 if (j >= HWRM_VALID_BIT_DELAY_USEC) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index a451796deefe..2fb653e0048d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -582,7 +582,7 @@ struct nqe_cn {
582 (HWRM_SHORT_TIMEOUT_COUNTER * HWRM_SHORT_MIN_TIMEOUT + \ 582 (HWRM_SHORT_TIMEOUT_COUNTER * HWRM_SHORT_MIN_TIMEOUT + \
583 ((n) - HWRM_SHORT_TIMEOUT_COUNTER) * HWRM_MIN_TIMEOUT)) 583 ((n) - HWRM_SHORT_TIMEOUT_COUNTER) * HWRM_MIN_TIMEOUT))
584 584
585#define HWRM_VALID_BIT_DELAY_USEC 20 585#define HWRM_VALID_BIT_DELAY_USEC 150
586 586
587#define BNXT_HWRM_CHNL_CHIMP 0 587#define BNXT_HWRM_CHNL_CHIMP 0
588#define BNXT_HWRM_CHNL_KONG 1 588#define BNXT_HWRM_CHNL_KONG 1
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index f4d81765221e..62636c1ed141 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -271,7 +271,7 @@ struct xcast_addr_list {
271}; 271};
272 272
273struct nicvf_work { 273struct nicvf_work {
274 struct delayed_work work; 274 struct work_struct work;
275 u8 mode; 275 u8 mode;
276 struct xcast_addr_list *mc; 276 struct xcast_addr_list *mc;
277}; 277};
@@ -327,7 +327,11 @@ struct nicvf {
327 struct nicvf_work rx_mode_work; 327 struct nicvf_work rx_mode_work;
328 /* spinlock to protect workqueue arguments from concurrent access */ 328 /* spinlock to protect workqueue arguments from concurrent access */
329 spinlock_t rx_mode_wq_lock; 329 spinlock_t rx_mode_wq_lock;
330 330 /* workqueue for handling kernel ndo_set_rx_mode() calls */
331 struct workqueue_struct *nicvf_rx_mode_wq;
332 /* mutex to protect VF's mailbox contents from concurrent access */
333 struct mutex rx_mode_mtx;
334 struct delayed_work link_change_work;
331 /* PTP timestamp */ 335 /* PTP timestamp */
332 struct cavium_ptp *ptp_clock; 336 struct cavium_ptp *ptp_clock;
333 /* Inbound timestamping is on */ 337 /* Inbound timestamping is on */
@@ -575,10 +579,8 @@ struct set_ptp {
575 579
576struct xcast { 580struct xcast {
577 u8 msg; 581 u8 msg;
578 union { 582 u8 mode;
579 u8 mode; 583 u64 mac:48;
580 u64 mac;
581 } data;
582}; 584};
583 585
584/* 128 bit shared memory between PF and each VF */ 586/* 128 bit shared memory between PF and each VF */
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 6c8dcb65ff03..c90252829ed3 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -57,14 +57,8 @@ struct nicpf {
57#define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF) 57#define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF)
58#define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF) 58#define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF)
59 u8 *vf_lmac_map; 59 u8 *vf_lmac_map;
60 struct delayed_work dwork;
61 struct workqueue_struct *check_link;
62 u8 *link;
63 u8 *duplex;
64 u32 *speed;
65 u16 cpi_base[MAX_NUM_VFS_SUPPORTED]; 60 u16 cpi_base[MAX_NUM_VFS_SUPPORTED];
66 u16 rssi_base[MAX_NUM_VFS_SUPPORTED]; 61 u16 rssi_base[MAX_NUM_VFS_SUPPORTED];
67 bool mbx_lock[MAX_NUM_VFS_SUPPORTED];
68 62
69 /* MSI-X */ 63 /* MSI-X */
70 u8 num_vec; 64 u8 num_vec;
@@ -929,6 +923,35 @@ static void nic_config_timestamp(struct nicpf *nic, int vf, struct set_ptp *ptp)
929 nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (pkind_idx << 3), pkind_val); 923 nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (pkind_idx << 3), pkind_val);
930} 924}
931 925
926/* Get BGX LMAC link status and update corresponding VF
927 * if there is a change, valid only if internal L2 switch
928 * is not present otherwise VF link is always treated as up
929 */
930static void nic_link_status_get(struct nicpf *nic, u8 vf)
931{
932 union nic_mbx mbx = {};
933 struct bgx_link_status link;
934 u8 bgx, lmac;
935
936 mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
937
938 /* Get BGX, LMAC indices for the VF */
939 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
940 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
941
942 /* Get interface link status */
943 bgx_get_lmac_link_state(nic->node, bgx, lmac, &link);
944
945 /* Send a mbox message to VF with current link status */
946 mbx.link_status.link_up = link.link_up;
947 mbx.link_status.duplex = link.duplex;
948 mbx.link_status.speed = link.speed;
949 mbx.link_status.mac_type = link.mac_type;
950
951 /* reply with link status */
952 nic_send_msg_to_vf(nic, vf, &mbx);
953}
954
932/* Interrupt handler to handle mailbox messages from VFs */ 955/* Interrupt handler to handle mailbox messages from VFs */
933static void nic_handle_mbx_intr(struct nicpf *nic, int vf) 956static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
934{ 957{
@@ -941,8 +964,6 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
941 int i; 964 int i;
942 int ret = 0; 965 int ret = 0;
943 966
944 nic->mbx_lock[vf] = true;
945
946 mbx_addr = nic_get_mbx_addr(vf); 967 mbx_addr = nic_get_mbx_addr(vf);
947 mbx_data = (u64 *)&mbx; 968 mbx_data = (u64 *)&mbx;
948 969
@@ -957,12 +978,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
957 switch (mbx.msg.msg) { 978 switch (mbx.msg.msg) {
958 case NIC_MBOX_MSG_READY: 979 case NIC_MBOX_MSG_READY:
959 nic_mbx_send_ready(nic, vf); 980 nic_mbx_send_ready(nic, vf);
960 if (vf < nic->num_vf_en) { 981 return;
961 nic->link[vf] = 0;
962 nic->duplex[vf] = 0;
963 nic->speed[vf] = 0;
964 }
965 goto unlock;
966 case NIC_MBOX_MSG_QS_CFG: 982 case NIC_MBOX_MSG_QS_CFG:
967 reg_addr = NIC_PF_QSET_0_127_CFG | 983 reg_addr = NIC_PF_QSET_0_127_CFG |
968 (mbx.qs.num << NIC_QS_ID_SHIFT); 984 (mbx.qs.num << NIC_QS_ID_SHIFT);
@@ -1031,7 +1047,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
1031 break; 1047 break;
1032 case NIC_MBOX_MSG_RSS_SIZE: 1048 case NIC_MBOX_MSG_RSS_SIZE:
1033 nic_send_rss_size(nic, vf); 1049 nic_send_rss_size(nic, vf);
1034 goto unlock; 1050 return;
1035 case NIC_MBOX_MSG_RSS_CFG: 1051 case NIC_MBOX_MSG_RSS_CFG:
1036 case NIC_MBOX_MSG_RSS_CFG_CONT: 1052 case NIC_MBOX_MSG_RSS_CFG_CONT:
1037 nic_config_rss(nic, &mbx.rss_cfg); 1053 nic_config_rss(nic, &mbx.rss_cfg);
@@ -1039,7 +1055,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
1039 case NIC_MBOX_MSG_CFG_DONE: 1055 case NIC_MBOX_MSG_CFG_DONE:
1040 /* Last message of VF config msg sequence */ 1056 /* Last message of VF config msg sequence */
1041 nic_enable_vf(nic, vf, true); 1057 nic_enable_vf(nic, vf, true);
1042 goto unlock; 1058 break;
1043 case NIC_MBOX_MSG_SHUTDOWN: 1059 case NIC_MBOX_MSG_SHUTDOWN:
1044 /* First msg in VF teardown sequence */ 1060 /* First msg in VF teardown sequence */
1045 if (vf >= nic->num_vf_en) 1061 if (vf >= nic->num_vf_en)
@@ -1049,19 +1065,19 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
1049 break; 1065 break;
1050 case NIC_MBOX_MSG_ALLOC_SQS: 1066 case NIC_MBOX_MSG_ALLOC_SQS:
1051 nic_alloc_sqs(nic, &mbx.sqs_alloc); 1067 nic_alloc_sqs(nic, &mbx.sqs_alloc);
1052 goto unlock; 1068 return;
1053 case NIC_MBOX_MSG_NICVF_PTR: 1069 case NIC_MBOX_MSG_NICVF_PTR:
1054 nic->nicvf[vf] = mbx.nicvf.nicvf; 1070 nic->nicvf[vf] = mbx.nicvf.nicvf;
1055 break; 1071 break;
1056 case NIC_MBOX_MSG_PNICVF_PTR: 1072 case NIC_MBOX_MSG_PNICVF_PTR:
1057 nic_send_pnicvf(nic, vf); 1073 nic_send_pnicvf(nic, vf);
1058 goto unlock; 1074 return;
1059 case NIC_MBOX_MSG_SNICVF_PTR: 1075 case NIC_MBOX_MSG_SNICVF_PTR:
1060 nic_send_snicvf(nic, &mbx.nicvf); 1076 nic_send_snicvf(nic, &mbx.nicvf);
1061 goto unlock; 1077 return;
1062 case NIC_MBOX_MSG_BGX_STATS: 1078 case NIC_MBOX_MSG_BGX_STATS:
1063 nic_get_bgx_stats(nic, &mbx.bgx_stats); 1079 nic_get_bgx_stats(nic, &mbx.bgx_stats);
1064 goto unlock; 1080 return;
1065 case NIC_MBOX_MSG_LOOPBACK: 1081 case NIC_MBOX_MSG_LOOPBACK:
1066 ret = nic_config_loopback(nic, &mbx.lbk); 1082 ret = nic_config_loopback(nic, &mbx.lbk);
1067 break; 1083 break;
@@ -1070,7 +1086,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
1070 break; 1086 break;
1071 case NIC_MBOX_MSG_PFC: 1087 case NIC_MBOX_MSG_PFC:
1072 nic_pause_frame(nic, vf, &mbx.pfc); 1088 nic_pause_frame(nic, vf, &mbx.pfc);
1073 goto unlock; 1089 return;
1074 case NIC_MBOX_MSG_PTP_CFG: 1090 case NIC_MBOX_MSG_PTP_CFG:
1075 nic_config_timestamp(nic, vf, &mbx.ptp); 1091 nic_config_timestamp(nic, vf, &mbx.ptp);
1076 break; 1092 break;
@@ -1094,7 +1110,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
1094 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 1110 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
1095 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 1111 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
1096 bgx_set_dmac_cam_filter(nic->node, bgx, lmac, 1112 bgx_set_dmac_cam_filter(nic->node, bgx, lmac,
1097 mbx.xcast.data.mac, 1113 mbx.xcast.mac,
1098 vf < NIC_VF_PER_MBX_REG ? vf : 1114 vf < NIC_VF_PER_MBX_REG ? vf :
1099 vf - NIC_VF_PER_MBX_REG); 1115 vf - NIC_VF_PER_MBX_REG);
1100 break; 1116 break;
@@ -1106,8 +1122,15 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
1106 } 1122 }
1107 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 1123 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
1108 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 1124 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
1109 bgx_set_xcast_mode(nic->node, bgx, lmac, mbx.xcast.data.mode); 1125 bgx_set_xcast_mode(nic->node, bgx, lmac, mbx.xcast.mode);
1110 break; 1126 break;
1127 case NIC_MBOX_MSG_BGX_LINK_CHANGE:
1128 if (vf >= nic->num_vf_en) {
1129 ret = -1; /* NACK */
1130 break;
1131 }
1132 nic_link_status_get(nic, vf);
1133 return;
1111 default: 1134 default:
1112 dev_err(&nic->pdev->dev, 1135 dev_err(&nic->pdev->dev,
1113 "Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg); 1136 "Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg);
@@ -1121,8 +1144,6 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
1121 mbx.msg.msg, vf); 1144 mbx.msg.msg, vf);
1122 nic_mbx_send_nack(nic, vf); 1145 nic_mbx_send_nack(nic, vf);
1123 } 1146 }
1124unlock:
1125 nic->mbx_lock[vf] = false;
1126} 1147}
1127 1148
1128static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq) 1149static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq)
@@ -1270,52 +1291,6 @@ static int nic_sriov_init(struct pci_dev *pdev, struct nicpf *nic)
1270 return 0; 1291 return 0;
1271} 1292}
1272 1293
1273/* Poll for BGX LMAC link status and update corresponding VF
1274 * if there is a change, valid only if internal L2 switch
1275 * is not present otherwise VF link is always treated as up
1276 */
1277static void nic_poll_for_link(struct work_struct *work)
1278{
1279 union nic_mbx mbx = {};
1280 struct nicpf *nic;
1281 struct bgx_link_status link;
1282 u8 vf, bgx, lmac;
1283
1284 nic = container_of(work, struct nicpf, dwork.work);
1285
1286 mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
1287
1288 for (vf = 0; vf < nic->num_vf_en; vf++) {
1289 /* Poll only if VF is UP */
1290 if (!nic->vf_enabled[vf])
1291 continue;
1292
1293 /* Get BGX, LMAC indices for the VF */
1294 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
1295 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
1296 /* Get interface link status */
1297 bgx_get_lmac_link_state(nic->node, bgx, lmac, &link);
1298
1299 /* Inform VF only if link status changed */
1300 if (nic->link[vf] == link.link_up)
1301 continue;
1302
1303 if (!nic->mbx_lock[vf]) {
1304 nic->link[vf] = link.link_up;
1305 nic->duplex[vf] = link.duplex;
1306 nic->speed[vf] = link.speed;
1307
1308 /* Send a mbox message to VF with current link status */
1309 mbx.link_status.link_up = link.link_up;
1310 mbx.link_status.duplex = link.duplex;
1311 mbx.link_status.speed = link.speed;
1312 mbx.link_status.mac_type = link.mac_type;
1313 nic_send_msg_to_vf(nic, vf, &mbx);
1314 }
1315 }
1316 queue_delayed_work(nic->check_link, &nic->dwork, HZ * 2);
1317}
1318
1319static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1294static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1320{ 1295{
1321 struct device *dev = &pdev->dev; 1296 struct device *dev = &pdev->dev;
@@ -1384,18 +1359,6 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1384 if (!nic->vf_lmac_map) 1359 if (!nic->vf_lmac_map)
1385 goto err_release_regions; 1360 goto err_release_regions;
1386 1361
1387 nic->link = devm_kmalloc_array(dev, max_lmac, sizeof(u8), GFP_KERNEL);
1388 if (!nic->link)
1389 goto err_release_regions;
1390
1391 nic->duplex = devm_kmalloc_array(dev, max_lmac, sizeof(u8), GFP_KERNEL);
1392 if (!nic->duplex)
1393 goto err_release_regions;
1394
1395 nic->speed = devm_kmalloc_array(dev, max_lmac, sizeof(u32), GFP_KERNEL);
1396 if (!nic->speed)
1397 goto err_release_regions;
1398
1399 /* Initialize hardware */ 1362 /* Initialize hardware */
1400 nic_init_hw(nic); 1363 nic_init_hw(nic);
1401 1364
@@ -1411,22 +1374,8 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1411 if (err) 1374 if (err)
1412 goto err_unregister_interrupts; 1375 goto err_unregister_interrupts;
1413 1376
1414 /* Register a physical link status poll fn() */
1415 nic->check_link = alloc_workqueue("check_link_status",
1416 WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
1417 if (!nic->check_link) {
1418 err = -ENOMEM;
1419 goto err_disable_sriov;
1420 }
1421
1422 INIT_DELAYED_WORK(&nic->dwork, nic_poll_for_link);
1423 queue_delayed_work(nic->check_link, &nic->dwork, 0);
1424
1425 return 0; 1377 return 0;
1426 1378
1427err_disable_sriov:
1428 if (nic->flags & NIC_SRIOV_ENABLED)
1429 pci_disable_sriov(pdev);
1430err_unregister_interrupts: 1379err_unregister_interrupts:
1431 nic_unregister_interrupts(nic); 1380 nic_unregister_interrupts(nic);
1432err_release_regions: 1381err_release_regions:
@@ -1447,12 +1396,6 @@ static void nic_remove(struct pci_dev *pdev)
1447 if (nic->flags & NIC_SRIOV_ENABLED) 1396 if (nic->flags & NIC_SRIOV_ENABLED)
1448 pci_disable_sriov(pdev); 1397 pci_disable_sriov(pdev);
1449 1398
1450 if (nic->check_link) {
1451 /* Destroy work Queue */
1452 cancel_delayed_work_sync(&nic->dwork);
1453 destroy_workqueue(nic->check_link);
1454 }
1455
1456 nic_unregister_interrupts(nic); 1399 nic_unregister_interrupts(nic);
1457 pci_release_regions(pdev); 1400 pci_release_regions(pdev);
1458 1401
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 88f8a8fa93cd..503cfadff4ac 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -68,9 +68,6 @@ module_param(cpi_alg, int, 0444);
68MODULE_PARM_DESC(cpi_alg, 68MODULE_PARM_DESC(cpi_alg,
69 "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)"); 69 "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
70 70
71/* workqueue for handling kernel ndo_set_rx_mode() calls */
72static struct workqueue_struct *nicvf_rx_mode_wq;
73
74static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx) 71static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx)
75{ 72{
76 if (nic->sqs_mode) 73 if (nic->sqs_mode)
@@ -127,6 +124,9 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
127{ 124{
128 int timeout = NIC_MBOX_MSG_TIMEOUT; 125 int timeout = NIC_MBOX_MSG_TIMEOUT;
129 int sleep = 10; 126 int sleep = 10;
127 int ret = 0;
128
129 mutex_lock(&nic->rx_mode_mtx);
130 130
131 nic->pf_acked = false; 131 nic->pf_acked = false;
132 nic->pf_nacked = false; 132 nic->pf_nacked = false;
@@ -139,7 +139,8 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
139 netdev_err(nic->netdev, 139 netdev_err(nic->netdev,
140 "PF NACK to mbox msg 0x%02x from VF%d\n", 140 "PF NACK to mbox msg 0x%02x from VF%d\n",
141 (mbx->msg.msg & 0xFF), nic->vf_id); 141 (mbx->msg.msg & 0xFF), nic->vf_id);
142 return -EINVAL; 142 ret = -EINVAL;
143 break;
143 } 144 }
144 msleep(sleep); 145 msleep(sleep);
145 if (nic->pf_acked) 146 if (nic->pf_acked)
@@ -149,10 +150,12 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
149 netdev_err(nic->netdev, 150 netdev_err(nic->netdev,
150 "PF didn't ACK to mbox msg 0x%02x from VF%d\n", 151 "PF didn't ACK to mbox msg 0x%02x from VF%d\n",
151 (mbx->msg.msg & 0xFF), nic->vf_id); 152 (mbx->msg.msg & 0xFF), nic->vf_id);
152 return -EBUSY; 153 ret = -EBUSY;
154 break;
153 } 155 }
154 } 156 }
155 return 0; 157 mutex_unlock(&nic->rx_mode_mtx);
158 return ret;
156} 159}
157 160
158/* Checks if VF is able to comminicate with PF 161/* Checks if VF is able to comminicate with PF
@@ -172,6 +175,17 @@ static int nicvf_check_pf_ready(struct nicvf *nic)
172 return 1; 175 return 1;
173} 176}
174 177
178static void nicvf_send_cfg_done(struct nicvf *nic)
179{
180 union nic_mbx mbx = {};
181
182 mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
183 if (nicvf_send_msg_to_pf(nic, &mbx)) {
184 netdev_err(nic->netdev,
185 "PF didn't respond to CFG DONE msg\n");
186 }
187}
188
175static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx) 189static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx)
176{ 190{
177 if (bgx->rx) 191 if (bgx->rx)
@@ -228,21 +242,24 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic)
228 break; 242 break;
229 case NIC_MBOX_MSG_BGX_LINK_CHANGE: 243 case NIC_MBOX_MSG_BGX_LINK_CHANGE:
230 nic->pf_acked = true; 244 nic->pf_acked = true;
231 nic->link_up = mbx.link_status.link_up; 245 if (nic->link_up != mbx.link_status.link_up) {
232 nic->duplex = mbx.link_status.duplex; 246 nic->link_up = mbx.link_status.link_up;
233 nic->speed = mbx.link_status.speed; 247 nic->duplex = mbx.link_status.duplex;
234 nic->mac_type = mbx.link_status.mac_type; 248 nic->speed = mbx.link_status.speed;
235 if (nic->link_up) { 249 nic->mac_type = mbx.link_status.mac_type;
236 netdev_info(nic->netdev, "Link is Up %d Mbps %s duplex\n", 250 if (nic->link_up) {
237 nic->speed, 251 netdev_info(nic->netdev,
238 nic->duplex == DUPLEX_FULL ? 252 "Link is Up %d Mbps %s duplex\n",
239 "Full" : "Half"); 253 nic->speed,
240 netif_carrier_on(nic->netdev); 254 nic->duplex == DUPLEX_FULL ?
241 netif_tx_start_all_queues(nic->netdev); 255 "Full" : "Half");
242 } else { 256 netif_carrier_on(nic->netdev);
243 netdev_info(nic->netdev, "Link is Down\n"); 257 netif_tx_start_all_queues(nic->netdev);
244 netif_carrier_off(nic->netdev); 258 } else {
245 netif_tx_stop_all_queues(nic->netdev); 259 netdev_info(nic->netdev, "Link is Down\n");
260 netif_carrier_off(nic->netdev);
261 netif_tx_stop_all_queues(nic->netdev);
262 }
246 } 263 }
247 break; 264 break;
248 case NIC_MBOX_MSG_ALLOC_SQS: 265 case NIC_MBOX_MSG_ALLOC_SQS:
@@ -1311,6 +1328,11 @@ int nicvf_stop(struct net_device *netdev)
1311 struct nicvf_cq_poll *cq_poll = NULL; 1328 struct nicvf_cq_poll *cq_poll = NULL;
1312 union nic_mbx mbx = {}; 1329 union nic_mbx mbx = {};
1313 1330
1331 cancel_delayed_work_sync(&nic->link_change_work);
1332
1333 /* wait till all queued set_rx_mode tasks completes */
1334 drain_workqueue(nic->nicvf_rx_mode_wq);
1335
1314 mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN; 1336 mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
1315 nicvf_send_msg_to_pf(nic, &mbx); 1337 nicvf_send_msg_to_pf(nic, &mbx);
1316 1338
@@ -1410,13 +1432,27 @@ static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
1410 return nicvf_send_msg_to_pf(nic, &mbx); 1432 return nicvf_send_msg_to_pf(nic, &mbx);
1411} 1433}
1412 1434
1435static void nicvf_link_status_check_task(struct work_struct *work_arg)
1436{
1437 struct nicvf *nic = container_of(work_arg,
1438 struct nicvf,
1439 link_change_work.work);
1440 union nic_mbx mbx = {};
1441 mbx.msg.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
1442 nicvf_send_msg_to_pf(nic, &mbx);
1443 queue_delayed_work(nic->nicvf_rx_mode_wq,
1444 &nic->link_change_work, 2 * HZ);
1445}
1446
1413int nicvf_open(struct net_device *netdev) 1447int nicvf_open(struct net_device *netdev)
1414{ 1448{
1415 int cpu, err, qidx; 1449 int cpu, err, qidx;
1416 struct nicvf *nic = netdev_priv(netdev); 1450 struct nicvf *nic = netdev_priv(netdev);
1417 struct queue_set *qs = nic->qs; 1451 struct queue_set *qs = nic->qs;
1418 struct nicvf_cq_poll *cq_poll = NULL; 1452 struct nicvf_cq_poll *cq_poll = NULL;
1419 union nic_mbx mbx = {}; 1453
1454 /* wait till all queued set_rx_mode tasks completes if any */
1455 drain_workqueue(nic->nicvf_rx_mode_wq);
1420 1456
1421 netif_carrier_off(netdev); 1457 netif_carrier_off(netdev);
1422 1458
@@ -1512,8 +1548,12 @@ int nicvf_open(struct net_device *netdev)
1512 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx); 1548 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
1513 1549
1514 /* Send VF config done msg to PF */ 1550 /* Send VF config done msg to PF */
1515 mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE; 1551 nicvf_send_cfg_done(nic);
1516 nicvf_write_to_mbx(nic, &mbx); 1552
1553 INIT_DELAYED_WORK(&nic->link_change_work,
1554 nicvf_link_status_check_task);
1555 queue_delayed_work(nic->nicvf_rx_mode_wq,
1556 &nic->link_change_work, 0);
1517 1557
1518 return 0; 1558 return 0;
1519cleanup: 1559cleanup:
@@ -1941,15 +1981,17 @@ static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs,
1941 1981
1942 /* flush DMAC filters and reset RX mode */ 1982 /* flush DMAC filters and reset RX mode */
1943 mbx.xcast.msg = NIC_MBOX_MSG_RESET_XCAST; 1983 mbx.xcast.msg = NIC_MBOX_MSG_RESET_XCAST;
1944 nicvf_send_msg_to_pf(nic, &mbx); 1984 if (nicvf_send_msg_to_pf(nic, &mbx) < 0)
1985 goto free_mc;
1945 1986
1946 if (mode & BGX_XCAST_MCAST_FILTER) { 1987 if (mode & BGX_XCAST_MCAST_FILTER) {
1947 /* once enabling filtering, we need to signal to PF to add 1988 /* once enabling filtering, we need to signal to PF to add
1948 * its' own LMAC to the filter to accept packets for it. 1989 * its' own LMAC to the filter to accept packets for it.
1949 */ 1990 */
1950 mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST; 1991 mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST;
1951 mbx.xcast.data.mac = 0; 1992 mbx.xcast.mac = 0;
1952 nicvf_send_msg_to_pf(nic, &mbx); 1993 if (nicvf_send_msg_to_pf(nic, &mbx) < 0)
1994 goto free_mc;
1953 } 1995 }
1954 1996
1955 /* check if we have any specific MACs to be added to PF DMAC filter */ 1997 /* check if we have any specific MACs to be added to PF DMAC filter */
@@ -1957,23 +1999,25 @@ static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs,
1957 /* now go through kernel list of MACs and add them one by one */ 1999 /* now go through kernel list of MACs and add them one by one */
1958 for (idx = 0; idx < mc_addrs->count; idx++) { 2000 for (idx = 0; idx < mc_addrs->count; idx++) {
1959 mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST; 2001 mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST;
1960 mbx.xcast.data.mac = mc_addrs->mc[idx]; 2002 mbx.xcast.mac = mc_addrs->mc[idx];
1961 nicvf_send_msg_to_pf(nic, &mbx); 2003 if (nicvf_send_msg_to_pf(nic, &mbx) < 0)
2004 goto free_mc;
1962 } 2005 }
1963 kfree(mc_addrs);
1964 } 2006 }
1965 2007
1966 /* and finally set rx mode for PF accordingly */ 2008 /* and finally set rx mode for PF accordingly */
1967 mbx.xcast.msg = NIC_MBOX_MSG_SET_XCAST; 2009 mbx.xcast.msg = NIC_MBOX_MSG_SET_XCAST;
1968 mbx.xcast.data.mode = mode; 2010 mbx.xcast.mode = mode;
1969 2011
1970 nicvf_send_msg_to_pf(nic, &mbx); 2012 nicvf_send_msg_to_pf(nic, &mbx);
2013free_mc:
2014 kfree(mc_addrs);
1971} 2015}
1972 2016
1973static void nicvf_set_rx_mode_task(struct work_struct *work_arg) 2017static void nicvf_set_rx_mode_task(struct work_struct *work_arg)
1974{ 2018{
1975 struct nicvf_work *vf_work = container_of(work_arg, struct nicvf_work, 2019 struct nicvf_work *vf_work = container_of(work_arg, struct nicvf_work,
1976 work.work); 2020 work);
1977 struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work); 2021 struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work);
1978 u8 mode; 2022 u8 mode;
1979 struct xcast_addr_list *mc; 2023 struct xcast_addr_list *mc;
@@ -2030,7 +2074,7 @@ static void nicvf_set_rx_mode(struct net_device *netdev)
2030 kfree(nic->rx_mode_work.mc); 2074 kfree(nic->rx_mode_work.mc);
2031 nic->rx_mode_work.mc = mc_list; 2075 nic->rx_mode_work.mc = mc_list;
2032 nic->rx_mode_work.mode = mode; 2076 nic->rx_mode_work.mode = mode;
2033 queue_delayed_work(nicvf_rx_mode_wq, &nic->rx_mode_work.work, 0); 2077 queue_work(nic->nicvf_rx_mode_wq, &nic->rx_mode_work.work);
2034 spin_unlock(&nic->rx_mode_wq_lock); 2078 spin_unlock(&nic->rx_mode_wq_lock);
2035} 2079}
2036 2080
@@ -2187,8 +2231,12 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2187 2231
2188 INIT_WORK(&nic->reset_task, nicvf_reset_task); 2232 INIT_WORK(&nic->reset_task, nicvf_reset_task);
2189 2233
2190 INIT_DELAYED_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task); 2234 nic->nicvf_rx_mode_wq = alloc_ordered_workqueue("nicvf_rx_mode_wq_VF%d",
2235 WQ_MEM_RECLAIM,
2236 nic->vf_id);
2237 INIT_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task);
2191 spin_lock_init(&nic->rx_mode_wq_lock); 2238 spin_lock_init(&nic->rx_mode_wq_lock);
2239 mutex_init(&nic->rx_mode_mtx);
2192 2240
2193 err = register_netdev(netdev); 2241 err = register_netdev(netdev);
2194 if (err) { 2242 if (err) {
@@ -2228,13 +2276,15 @@ static void nicvf_remove(struct pci_dev *pdev)
2228 nic = netdev_priv(netdev); 2276 nic = netdev_priv(netdev);
2229 pnetdev = nic->pnicvf->netdev; 2277 pnetdev = nic->pnicvf->netdev;
2230 2278
2231 cancel_delayed_work_sync(&nic->rx_mode_work.work);
2232
2233 /* Check if this Qset is assigned to different VF. 2279 /* Check if this Qset is assigned to different VF.
2234 * If yes, clean primary and all secondary Qsets. 2280 * If yes, clean primary and all secondary Qsets.
2235 */ 2281 */
2236 if (pnetdev && (pnetdev->reg_state == NETREG_REGISTERED)) 2282 if (pnetdev && (pnetdev->reg_state == NETREG_REGISTERED))
2237 unregister_netdev(pnetdev); 2283 unregister_netdev(pnetdev);
2284 if (nic->nicvf_rx_mode_wq) {
2285 destroy_workqueue(nic->nicvf_rx_mode_wq);
2286 nic->nicvf_rx_mode_wq = NULL;
2287 }
2238 nicvf_unregister_interrupts(nic); 2288 nicvf_unregister_interrupts(nic);
2239 pci_set_drvdata(pdev, NULL); 2289 pci_set_drvdata(pdev, NULL);
2240 if (nic->drv_stats) 2290 if (nic->drv_stats)
@@ -2261,17 +2311,11 @@ static struct pci_driver nicvf_driver = {
2261static int __init nicvf_init_module(void) 2311static int __init nicvf_init_module(void)
2262{ 2312{
2263 pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION); 2313 pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
2264 nicvf_rx_mode_wq = alloc_ordered_workqueue("nicvf_generic",
2265 WQ_MEM_RECLAIM);
2266 return pci_register_driver(&nicvf_driver); 2314 return pci_register_driver(&nicvf_driver);
2267} 2315}
2268 2316
2269static void __exit nicvf_cleanup_module(void) 2317static void __exit nicvf_cleanup_module(void)
2270{ 2318{
2271 if (nicvf_rx_mode_wq) {
2272 destroy_workqueue(nicvf_rx_mode_wq);
2273 nicvf_rx_mode_wq = NULL;
2274 }
2275 pci_unregister_driver(&nicvf_driver); 2319 pci_unregister_driver(&nicvf_driver);
2276} 2320}
2277 2321
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index e337da6ba2a4..673c57b8023f 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1217,7 +1217,7 @@ static void bgx_init_hw(struct bgx *bgx)
1217 1217
1218 /* Disable MAC steering (NCSI traffic) */ 1218 /* Disable MAC steering (NCSI traffic) */
1219 for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++) 1219 for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++)
1220 bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00); 1220 bgx_reg_write(bgx, 0, BGX_CMR_RX_STEERING + (i * 8), 0x00);
1221} 1221}
1222 1222
1223static u8 bgx_get_lane2sds_cfg(struct bgx *bgx, struct lmac *lmac) 1223static u8 bgx_get_lane2sds_cfg(struct bgx *bgx, struct lmac *lmac)
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index cbdd20b9ee6f..5cbc54e9eb19 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -60,7 +60,7 @@
60#define RX_DMACX_CAM_EN BIT_ULL(48) 60#define RX_DMACX_CAM_EN BIT_ULL(48)
61#define RX_DMACX_CAM_LMACID(x) (((u64)x) << 49) 61#define RX_DMACX_CAM_LMACID(x) (((u64)x) << 49)
62#define RX_DMAC_COUNT 32 62#define RX_DMAC_COUNT 32
63#define BGX_CMR_RX_STREERING 0x300 63#define BGX_CMR_RX_STEERING 0x300
64#define RX_TRAFFIC_STEER_RULE_COUNT 8 64#define RX_TRAFFIC_STEER_RULE_COUNT 8
65#define BGX_CMR_CHAN_MSK_AND 0x450 65#define BGX_CMR_CHAN_MSK_AND 0x450
66#define BGX_CMR_BIST_STATUS 0x460 66#define BGX_CMR_BIST_STATUS 0x460
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index f52e2c46e6a7..e4ff531db14a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -3289,8 +3289,11 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
3289 i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring)) : 3289 i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring)) :
3290 !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); 3290 !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
3291 if (!ok) { 3291 if (!ok) {
3292 /* Log this in case the user has forgotten to give the kernel
3293 * any buffers, even later in the application.
3294 */
3292 dev_info(&vsi->back->pdev->dev, 3295 dev_info(&vsi->back->pdev->dev,
3293 "Failed allocate some buffers on %sRx ring %d (pf_q %d)\n", 3296 "Failed to allocate some buffers on %sRx ring %d (pf_q %d)\n",
3294 ring->xsk_umem ? "UMEM enabled " : "", 3297 ring->xsk_umem ? "UMEM enabled " : "",
3295 ring->queue_index, pf_q); 3298 ring->queue_index, pf_q);
3296 } 3299 }
@@ -6725,8 +6728,13 @@ void i40e_down(struct i40e_vsi *vsi)
6725 6728
6726 for (i = 0; i < vsi->num_queue_pairs; i++) { 6729 for (i = 0; i < vsi->num_queue_pairs; i++) {
6727 i40e_clean_tx_ring(vsi->tx_rings[i]); 6730 i40e_clean_tx_ring(vsi->tx_rings[i]);
6728 if (i40e_enabled_xdp_vsi(vsi)) 6731 if (i40e_enabled_xdp_vsi(vsi)) {
6732 /* Make sure that in-progress ndo_xdp_xmit
6733 * calls are completed.
6734 */
6735 synchronize_rcu();
6729 i40e_clean_tx_ring(vsi->xdp_rings[i]); 6736 i40e_clean_tx_ring(vsi->xdp_rings[i]);
6737 }
6730 i40e_clean_rx_ring(vsi->rx_rings[i]); 6738 i40e_clean_rx_ring(vsi->rx_rings[i]);
6731 } 6739 }
6732 6740
@@ -11895,6 +11903,14 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi,
11895 if (old_prog) 11903 if (old_prog)
11896 bpf_prog_put(old_prog); 11904 bpf_prog_put(old_prog);
11897 11905
11906 /* Kick start the NAPI context if there is an AF_XDP socket open
11907 * on that queue id. This so that receiving will start.
11908 */
11909 if (need_reset && prog)
11910 for (i = 0; i < vsi->num_queue_pairs; i++)
11911 if (vsi->xdp_rings[i]->xsk_umem)
11912 (void)i40e_xsk_async_xmit(vsi->netdev, i);
11913
11898 return 0; 11914 return 0;
11899} 11915}
11900 11916
@@ -11955,8 +11971,13 @@ static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair)
11955static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair) 11971static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair)
11956{ 11972{
11957 i40e_clean_tx_ring(vsi->tx_rings[queue_pair]); 11973 i40e_clean_tx_ring(vsi->tx_rings[queue_pair]);
11958 if (i40e_enabled_xdp_vsi(vsi)) 11974 if (i40e_enabled_xdp_vsi(vsi)) {
11975 /* Make sure that in-progress ndo_xdp_xmit calls are
11976 * completed.
11977 */
11978 synchronize_rcu();
11959 i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]); 11979 i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]);
11980 }
11960 i40e_clean_rx_ring(vsi->rx_rings[queue_pair]); 11981 i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
11961} 11982}
11962 11983
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index a7e14e98889f..6c97667d20ef 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -3709,6 +3709,7 @@ int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
3709 struct i40e_netdev_priv *np = netdev_priv(dev); 3709 struct i40e_netdev_priv *np = netdev_priv(dev);
3710 unsigned int queue_index = smp_processor_id(); 3710 unsigned int queue_index = smp_processor_id();
3711 struct i40e_vsi *vsi = np->vsi; 3711 struct i40e_vsi *vsi = np->vsi;
3712 struct i40e_pf *pf = vsi->back;
3712 struct i40e_ring *xdp_ring; 3713 struct i40e_ring *xdp_ring;
3713 int drops = 0; 3714 int drops = 0;
3714 int i; 3715 int i;
@@ -3716,7 +3717,8 @@ int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
3716 if (test_bit(__I40E_VSI_DOWN, vsi->state)) 3717 if (test_bit(__I40E_VSI_DOWN, vsi->state))
3717 return -ENETDOWN; 3718 return -ENETDOWN;
3718 3719
3719 if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs) 3720 if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs ||
3721 test_bit(__I40E_CONFIG_BUSY, pf->state))
3720 return -ENXIO; 3722 return -ENXIO;
3721 3723
3722 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 3724 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 870cf654e436..3827f16e6923 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -183,6 +183,11 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
183 err = i40e_queue_pair_enable(vsi, qid); 183 err = i40e_queue_pair_enable(vsi, qid);
184 if (err) 184 if (err)
185 return err; 185 return err;
186
187 /* Kick start the NAPI context so that receiving will start */
188 err = i40e_xsk_async_xmit(vsi->netdev, qid);
189 if (err)
190 return err;
186 } 191 }
187 192
188 return 0; 193 return 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index daff8183534b..cb35d8202572 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -3953,8 +3953,11 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
3953 else 3953 else
3954 mrqc = IXGBE_MRQC_VMDQRSS64EN; 3954 mrqc = IXGBE_MRQC_VMDQRSS64EN;
3955 3955
3956 /* Enable L3/L4 for Tx Switched packets */ 3956 /* Enable L3/L4 for Tx Switched packets only for X550,
3957 mrqc |= IXGBE_MRQC_L3L4TXSWEN; 3957 * older devices do not support this feature
3958 */
3959 if (hw->mac.type >= ixgbe_mac_X550)
3960 mrqc |= IXGBE_MRQC_L3L4TXSWEN;
3958 } else { 3961 } else {
3959 if (tcs > 4) 3962 if (tcs > 4)
3960 mrqc = IXGBE_MRQC_RTRSS8TCEN; 3963 mrqc = IXGBE_MRQC_RTRSS8TCEN;
@@ -10225,6 +10228,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
10225 int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 10228 int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
10226 struct ixgbe_adapter *adapter = netdev_priv(dev); 10229 struct ixgbe_adapter *adapter = netdev_priv(dev);
10227 struct bpf_prog *old_prog; 10230 struct bpf_prog *old_prog;
10231 bool need_reset;
10228 10232
10229 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) 10233 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
10230 return -EINVAL; 10234 return -EINVAL;
@@ -10247,9 +10251,10 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
10247 return -ENOMEM; 10251 return -ENOMEM;
10248 10252
10249 old_prog = xchg(&adapter->xdp_prog, prog); 10253 old_prog = xchg(&adapter->xdp_prog, prog);
10254 need_reset = (!!prog != !!old_prog);
10250 10255
10251 /* If transitioning XDP modes reconfigure rings */ 10256 /* If transitioning XDP modes reconfigure rings */
10252 if (!!prog != !!old_prog) { 10257 if (need_reset) {
10253 int err = ixgbe_setup_tc(dev, adapter->hw_tcs); 10258 int err = ixgbe_setup_tc(dev, adapter->hw_tcs);
10254 10259
10255 if (err) { 10260 if (err) {
@@ -10265,6 +10270,14 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
10265 if (old_prog) 10270 if (old_prog)
10266 bpf_prog_put(old_prog); 10271 bpf_prog_put(old_prog);
10267 10272
10273 /* Kick start the NAPI context if there is an AF_XDP socket open
10274 * on that queue id. This so that receiving will start.
10275 */
10276 if (need_reset && prog)
10277 for (i = 0; i < adapter->num_rx_queues; i++)
10278 if (adapter->xdp_ring[i]->xsk_umem)
10279 (void)ixgbe_xsk_async_xmit(adapter->netdev, i);
10280
10268 return 0; 10281 return 0;
10269} 10282}
10270 10283
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index 65c3e2c979d4..36a8879536a4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -144,11 +144,19 @@ static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
144 ixgbe_txrx_ring_disable(adapter, qid); 144 ixgbe_txrx_ring_disable(adapter, qid);
145 145
146 err = ixgbe_add_xsk_umem(adapter, umem, qid); 146 err = ixgbe_add_xsk_umem(adapter, umem, qid);
147 if (err)
148 return err;
147 149
148 if (if_running) 150 if (if_running) {
149 ixgbe_txrx_ring_enable(adapter, qid); 151 ixgbe_txrx_ring_enable(adapter, qid);
150 152
151 return err; 153 /* Kick start the NAPI context so that receiving will start */
154 err = ixgbe_xsk_async_xmit(adapter->netdev, qid);
155 if (err)
156 return err;
157 }
158
159 return 0;
152} 160}
153 161
154static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid) 162static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid)
@@ -634,7 +642,8 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
634 dma_addr_t dma; 642 dma_addr_t dma;
635 643
636 while (budget-- > 0) { 644 while (budget-- > 0) {
637 if (unlikely(!ixgbe_desc_unused(xdp_ring))) { 645 if (unlikely(!ixgbe_desc_unused(xdp_ring)) ||
646 !netif_carrier_ok(xdp_ring->netdev)) {
638 work_done = false; 647 work_done = false;
639 break; 648 break;
640 } 649 }
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 9d4568eb2297..8433fb9c3eee 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2146,7 +2146,7 @@ err_drop_frame:
2146 if (unlikely(!skb)) 2146 if (unlikely(!skb))
2147 goto err_drop_frame_ret_pool; 2147 goto err_drop_frame_ret_pool;
2148 2148
2149 dma_sync_single_range_for_cpu(dev->dev.parent, 2149 dma_sync_single_range_for_cpu(&pp->bm_priv->pdev->dev,
2150 rx_desc->buf_phys_addr, 2150 rx_desc->buf_phys_addr,
2151 MVNETA_MH_SIZE + NET_SKB_PAD, 2151 MVNETA_MH_SIZE + NET_SKB_PAD,
2152 rx_bytes, 2152 rx_bytes,
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index e23ca90289f7..0a868c829b90 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -1291,15 +1291,10 @@ wrp_alu64_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1291 1291
1292static int 1292static int
1293wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 1293wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1294 enum alu_op alu_op, bool skip) 1294 enum alu_op alu_op)
1295{ 1295{
1296 const struct bpf_insn *insn = &meta->insn; 1296 const struct bpf_insn *insn = &meta->insn;
1297 1297
1298 if (skip) {
1299 meta->skip = true;
1300 return 0;
1301 }
1302
1303 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm); 1298 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm);
1304 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); 1299 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
1305 1300
@@ -2309,7 +2304,7 @@ static int xor_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2309 2304
2310static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2305static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2311{ 2306{
2312 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR, !~meta->insn.imm); 2307 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR);
2313} 2308}
2314 2309
2315static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2310static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -2319,7 +2314,7 @@ static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2319 2314
2320static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2315static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2321{ 2316{
2322 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm); 2317 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND);
2323} 2318}
2324 2319
2325static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2320static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -2329,7 +2324,7 @@ static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2329 2324
2330static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2325static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2331{ 2326{
2332 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm); 2327 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR);
2333} 2328}
2334 2329
2335static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2330static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -2339,7 +2334,7 @@ static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2339 2334
2340static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2335static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2341{ 2336{
2342 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD, !meta->insn.imm); 2337 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD);
2343} 2338}
2344 2339
2345static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2340static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -2349,7 +2344,7 @@ static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2349 2344
2350static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2345static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2351{ 2346{
2352 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm); 2347 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB);
2353} 2348}
2354 2349
2355static int mul_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2350static int mul_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 7cdac77d0c68..07e41c42bcf5 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -499,6 +499,8 @@ static int ipvlan_nl_changelink(struct net_device *dev,
499 499
500 if (!data) 500 if (!data)
501 return 0; 501 return 0;
502 if (!ns_capable(dev_net(ipvlan->phy_dev)->user_ns, CAP_NET_ADMIN))
503 return -EPERM;
502 504
503 if (data[IFLA_IPVLAN_MODE]) { 505 if (data[IFLA_IPVLAN_MODE]) {
504 u16 nmode = nla_get_u16(data[IFLA_IPVLAN_MODE]); 506 u16 nmode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
@@ -601,6 +603,8 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev,
601 struct ipvl_dev *tmp = netdev_priv(phy_dev); 603 struct ipvl_dev *tmp = netdev_priv(phy_dev);
602 604
603 phy_dev = tmp->phy_dev; 605 phy_dev = tmp->phy_dev;
606 if (!ns_capable(dev_net(phy_dev)->user_ns, CAP_NET_ADMIN))
607 return -EPERM;
604 } else if (!netif_is_ipvlan_port(phy_dev)) { 608 } else if (!netif_is_ipvlan_port(phy_dev)) {
605 /* Exit early if the underlying link is invalid or busy */ 609 /* Exit early if the underlying link is invalid or busy */
606 if (phy_dev->type != ARPHRD_ETHER || 610 if (phy_dev->type != ARPHRD_ETHER ||
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 82ab6ed3b74e..6bac602094bd 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -26,6 +26,8 @@
26#include <linux/marvell_phy.h> 26#include <linux/marvell_phy.h>
27#include <linux/phy.h> 27#include <linux/phy.h>
28 28
29#define MDIO_AN_10GBT_CTRL_ADV_NBT_MASK 0x01e0
30
29enum { 31enum {
30 MV_PCS_BASE_T = 0x0000, 32 MV_PCS_BASE_T = 0x0000,
31 MV_PCS_BASE_R = 0x1000, 33 MV_PCS_BASE_R = 0x1000,
@@ -386,8 +388,10 @@ static int mv3310_config_aneg(struct phy_device *phydev)
386 else 388 else
387 reg = 0; 389 reg = 0;
388 390
391 /* Make sure we clear unsupported 2.5G/5G advertising */
389 ret = mv3310_modify(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL, 392 ret = mv3310_modify(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
390 MDIO_AN_10GBT_CTRL_ADV10G, reg); 393 MDIO_AN_10GBT_CTRL_ADV10G |
394 MDIO_AN_10GBT_CTRL_ADV_NBT_MASK, reg);
391 if (ret < 0) 395 if (ret < 0)
392 return ret; 396 return ret;
393 if (ret > 0) 397 if (ret > 0)
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 66b9cfe692fc..7368616286ae 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -379,7 +379,6 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
379 err = device_register(&bus->dev); 379 err = device_register(&bus->dev);
380 if (err) { 380 if (err) {
381 pr_err("mii_bus %s failed to register\n", bus->id); 381 pr_err("mii_bus %s failed to register\n", bus->id);
382 put_device(&bus->dev);
383 return -EINVAL; 382 return -EINVAL;
384 } 383 }
385 384
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index c6010fb1aa0f..cb4a23041a94 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -282,6 +282,13 @@ static struct phy_driver realtek_drvs[] = {
282 .name = "RTL8366RB Gigabit Ethernet", 282 .name = "RTL8366RB Gigabit Ethernet",
283 .features = PHY_GBIT_FEATURES, 283 .features = PHY_GBIT_FEATURES,
284 .config_init = &rtl8366rb_config_init, 284 .config_init = &rtl8366rb_config_init,
285 /* These interrupts are handled by the irq controller
286 * embedded inside the RTL8366RB, they get unmasked when the
287 * irq is requested and ACKed by reading the status register,
288 * which is done by the irqchip code.
289 */
290 .ack_interrupt = genphy_no_ack_interrupt,
291 .config_intr = genphy_no_config_intr,
285 .suspend = genphy_suspend, 292 .suspend = genphy_suspend,
286 .resume = genphy_resume, 293 .resume = genphy_resume,
287 }, 294 },
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 958f1cf67282..6ce3f666d142 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1256,7 +1256,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
1256 list_add_tail_rcu(&port->list, &team->port_list); 1256 list_add_tail_rcu(&port->list, &team->port_list);
1257 team_port_enable(team, port); 1257 team_port_enable(team, port);
1258 __team_compute_features(team); 1258 __team_compute_features(team);
1259 __team_port_change_port_added(port, !!netif_carrier_ok(port_dev)); 1259 __team_port_change_port_added(port, !!netif_oper_up(port_dev));
1260 __team_options_change_check(team); 1260 __team_options_change_check(team);
1261 1261
1262 netdev_info(dev, "Port device %s added\n", portname); 1262 netdev_info(dev, "Port device %s added\n", portname);
@@ -2915,7 +2915,7 @@ static int team_device_event(struct notifier_block *unused,
2915 2915
2916 switch (event) { 2916 switch (event) {
2917 case NETDEV_UP: 2917 case NETDEV_UP:
2918 if (netif_carrier_ok(dev)) 2918 if (netif_oper_up(dev))
2919 team_port_change_check(port, true); 2919 team_port_change_check(port, true);
2920 break; 2920 break;
2921 case NETDEV_DOWN: 2921 case NETDEV_DOWN:
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index ada6baf8847a..86c8c64fbb0f 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -1179,7 +1179,7 @@ static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa)
1179 } else { 1179 } else {
1180 /* test for RTL8153-BND and RTL8153-BD */ 1180 /* test for RTL8153-BND and RTL8153-BD */
1181 ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_1); 1181 ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_1);
1182 if ((ocp_data & BND_MASK) == 0 && (ocp_data & BD_MASK)) { 1182 if ((ocp_data & BND_MASK) == 0 && (ocp_data & BD_MASK) == 0) {
1183 netif_dbg(tp, probe, tp->netdev, 1183 netif_dbg(tp, probe, tp->netdev,
1184 "Invalid variant for MAC pass through\n"); 1184 "Invalid variant for MAC pass through\n");
1185 return -ENODEV; 1185 return -ENODEV;
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 95909e262ba4..7c1430ed0244 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -1273,6 +1273,9 @@ static void vrf_setup(struct net_device *dev)
1273 1273
1274 /* default to no qdisc; user can add if desired */ 1274 /* default to no qdisc; user can add if desired */
1275 dev->priv_flags |= IFF_NO_QUEUE; 1275 dev->priv_flags |= IFF_NO_QUEUE;
1276
1277 dev->min_mtu = 0;
1278 dev->max_mtu = 0;
1276} 1279}
1277 1280
1278static int vrf_validate(struct nlattr *tb[], struct nlattr *data[], 1281static int vrf_validate(struct nlattr *tb[], struct nlattr *data[],
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 320edcac4699..6359053bd0c7 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -3554,7 +3554,7 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info)
3554 goto out_err; 3554 goto out_err;
3555 } 3555 }
3556 3556
3557 genlmsg_reply(skb, info); 3557 res = genlmsg_reply(skb, info);
3558 break; 3558 break;
3559 } 3559 }
3560 3560