aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h14
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c149
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c128
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h2
5 files changed, 142 insertions, 153 deletions
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index f4d81765221e..62636c1ed141 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -271,7 +271,7 @@ struct xcast_addr_list {
271}; 271};
272 272
273struct nicvf_work { 273struct nicvf_work {
274 struct delayed_work work; 274 struct work_struct work;
275 u8 mode; 275 u8 mode;
276 struct xcast_addr_list *mc; 276 struct xcast_addr_list *mc;
277}; 277};
@@ -327,7 +327,11 @@ struct nicvf {
327 struct nicvf_work rx_mode_work; 327 struct nicvf_work rx_mode_work;
328 /* spinlock to protect workqueue arguments from concurrent access */ 328 /* spinlock to protect workqueue arguments from concurrent access */
329 spinlock_t rx_mode_wq_lock; 329 spinlock_t rx_mode_wq_lock;
330 330 /* workqueue for handling kernel ndo_set_rx_mode() calls */
331 struct workqueue_struct *nicvf_rx_mode_wq;
332 /* mutex to protect VF's mailbox contents from concurrent access */
333 struct mutex rx_mode_mtx;
334 struct delayed_work link_change_work;
331 /* PTP timestamp */ 335 /* PTP timestamp */
332 struct cavium_ptp *ptp_clock; 336 struct cavium_ptp *ptp_clock;
333 /* Inbound timestamping is on */ 337 /* Inbound timestamping is on */
@@ -575,10 +579,8 @@ struct set_ptp {
575 579
576struct xcast { 580struct xcast {
577 u8 msg; 581 u8 msg;
578 union { 582 u8 mode;
579 u8 mode; 583 u64 mac:48;
580 u64 mac;
581 } data;
582}; 584};
583 585
584/* 128 bit shared memory between PF and each VF */ 586/* 128 bit shared memory between PF and each VF */
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 6c8dcb65ff03..c90252829ed3 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -57,14 +57,8 @@ struct nicpf {
57#define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF) 57#define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF)
58#define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF) 58#define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF)
59 u8 *vf_lmac_map; 59 u8 *vf_lmac_map;
60 struct delayed_work dwork;
61 struct workqueue_struct *check_link;
62 u8 *link;
63 u8 *duplex;
64 u32 *speed;
65 u16 cpi_base[MAX_NUM_VFS_SUPPORTED]; 60 u16 cpi_base[MAX_NUM_VFS_SUPPORTED];
66 u16 rssi_base[MAX_NUM_VFS_SUPPORTED]; 61 u16 rssi_base[MAX_NUM_VFS_SUPPORTED];
67 bool mbx_lock[MAX_NUM_VFS_SUPPORTED];
68 62
69 /* MSI-X */ 63 /* MSI-X */
70 u8 num_vec; 64 u8 num_vec;
@@ -929,6 +923,35 @@ static void nic_config_timestamp(struct nicpf *nic, int vf, struct set_ptp *ptp)
929 nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (pkind_idx << 3), pkind_val); 923 nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (pkind_idx << 3), pkind_val);
930} 924}
931 925
926/* Get BGX LMAC link status and update corresponding VF
927 * if there is a change, valid only if internal L2 switch
928 * is not present otherwise VF link is always treated as up
929 */
930static void nic_link_status_get(struct nicpf *nic, u8 vf)
931{
932 union nic_mbx mbx = {};
933 struct bgx_link_status link;
934 u8 bgx, lmac;
935
936 mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
937
938 /* Get BGX, LMAC indices for the VF */
939 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
940 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
941
942 /* Get interface link status */
943 bgx_get_lmac_link_state(nic->node, bgx, lmac, &link);
944
945 /* Send a mbox message to VF with current link status */
946 mbx.link_status.link_up = link.link_up;
947 mbx.link_status.duplex = link.duplex;
948 mbx.link_status.speed = link.speed;
949 mbx.link_status.mac_type = link.mac_type;
950
951 /* reply with link status */
952 nic_send_msg_to_vf(nic, vf, &mbx);
953}
954
932/* Interrupt handler to handle mailbox messages from VFs */ 955/* Interrupt handler to handle mailbox messages from VFs */
933static void nic_handle_mbx_intr(struct nicpf *nic, int vf) 956static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
934{ 957{
@@ -941,8 +964,6 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
941 int i; 964 int i;
942 int ret = 0; 965 int ret = 0;
943 966
944 nic->mbx_lock[vf] = true;
945
946 mbx_addr = nic_get_mbx_addr(vf); 967 mbx_addr = nic_get_mbx_addr(vf);
947 mbx_data = (u64 *)&mbx; 968 mbx_data = (u64 *)&mbx;
948 969
@@ -957,12 +978,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
957 switch (mbx.msg.msg) { 978 switch (mbx.msg.msg) {
958 case NIC_MBOX_MSG_READY: 979 case NIC_MBOX_MSG_READY:
959 nic_mbx_send_ready(nic, vf); 980 nic_mbx_send_ready(nic, vf);
960 if (vf < nic->num_vf_en) { 981 return;
961 nic->link[vf] = 0;
962 nic->duplex[vf] = 0;
963 nic->speed[vf] = 0;
964 }
965 goto unlock;
966 case NIC_MBOX_MSG_QS_CFG: 982 case NIC_MBOX_MSG_QS_CFG:
967 reg_addr = NIC_PF_QSET_0_127_CFG | 983 reg_addr = NIC_PF_QSET_0_127_CFG |
968 (mbx.qs.num << NIC_QS_ID_SHIFT); 984 (mbx.qs.num << NIC_QS_ID_SHIFT);
@@ -1031,7 +1047,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
1031 break; 1047 break;
1032 case NIC_MBOX_MSG_RSS_SIZE: 1048 case NIC_MBOX_MSG_RSS_SIZE:
1033 nic_send_rss_size(nic, vf); 1049 nic_send_rss_size(nic, vf);
1034 goto unlock; 1050 return;
1035 case NIC_MBOX_MSG_RSS_CFG: 1051 case NIC_MBOX_MSG_RSS_CFG:
1036 case NIC_MBOX_MSG_RSS_CFG_CONT: 1052 case NIC_MBOX_MSG_RSS_CFG_CONT:
1037 nic_config_rss(nic, &mbx.rss_cfg); 1053 nic_config_rss(nic, &mbx.rss_cfg);
@@ -1039,7 +1055,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
1039 case NIC_MBOX_MSG_CFG_DONE: 1055 case NIC_MBOX_MSG_CFG_DONE:
1040 /* Last message of VF config msg sequence */ 1056 /* Last message of VF config msg sequence */
1041 nic_enable_vf(nic, vf, true); 1057 nic_enable_vf(nic, vf, true);
1042 goto unlock; 1058 break;
1043 case NIC_MBOX_MSG_SHUTDOWN: 1059 case NIC_MBOX_MSG_SHUTDOWN:
1044 /* First msg in VF teardown sequence */ 1060 /* First msg in VF teardown sequence */
1045 if (vf >= nic->num_vf_en) 1061 if (vf >= nic->num_vf_en)
@@ -1049,19 +1065,19 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
1049 break; 1065 break;
1050 case NIC_MBOX_MSG_ALLOC_SQS: 1066 case NIC_MBOX_MSG_ALLOC_SQS:
1051 nic_alloc_sqs(nic, &mbx.sqs_alloc); 1067 nic_alloc_sqs(nic, &mbx.sqs_alloc);
1052 goto unlock; 1068 return;
1053 case NIC_MBOX_MSG_NICVF_PTR: 1069 case NIC_MBOX_MSG_NICVF_PTR:
1054 nic->nicvf[vf] = mbx.nicvf.nicvf; 1070 nic->nicvf[vf] = mbx.nicvf.nicvf;
1055 break; 1071 break;
1056 case NIC_MBOX_MSG_PNICVF_PTR: 1072 case NIC_MBOX_MSG_PNICVF_PTR:
1057 nic_send_pnicvf(nic, vf); 1073 nic_send_pnicvf(nic, vf);
1058 goto unlock; 1074 return;
1059 case NIC_MBOX_MSG_SNICVF_PTR: 1075 case NIC_MBOX_MSG_SNICVF_PTR:
1060 nic_send_snicvf(nic, &mbx.nicvf); 1076 nic_send_snicvf(nic, &mbx.nicvf);
1061 goto unlock; 1077 return;
1062 case NIC_MBOX_MSG_BGX_STATS: 1078 case NIC_MBOX_MSG_BGX_STATS:
1063 nic_get_bgx_stats(nic, &mbx.bgx_stats); 1079 nic_get_bgx_stats(nic, &mbx.bgx_stats);
1064 goto unlock; 1080 return;
1065 case NIC_MBOX_MSG_LOOPBACK: 1081 case NIC_MBOX_MSG_LOOPBACK:
1066 ret = nic_config_loopback(nic, &mbx.lbk); 1082 ret = nic_config_loopback(nic, &mbx.lbk);
1067 break; 1083 break;
@@ -1070,7 +1086,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
1070 break; 1086 break;
1071 case NIC_MBOX_MSG_PFC: 1087 case NIC_MBOX_MSG_PFC:
1072 nic_pause_frame(nic, vf, &mbx.pfc); 1088 nic_pause_frame(nic, vf, &mbx.pfc);
1073 goto unlock; 1089 return;
1074 case NIC_MBOX_MSG_PTP_CFG: 1090 case NIC_MBOX_MSG_PTP_CFG:
1075 nic_config_timestamp(nic, vf, &mbx.ptp); 1091 nic_config_timestamp(nic, vf, &mbx.ptp);
1076 break; 1092 break;
@@ -1094,7 +1110,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
1094 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 1110 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
1095 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 1111 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
1096 bgx_set_dmac_cam_filter(nic->node, bgx, lmac, 1112 bgx_set_dmac_cam_filter(nic->node, bgx, lmac,
1097 mbx.xcast.data.mac, 1113 mbx.xcast.mac,
1098 vf < NIC_VF_PER_MBX_REG ? vf : 1114 vf < NIC_VF_PER_MBX_REG ? vf :
1099 vf - NIC_VF_PER_MBX_REG); 1115 vf - NIC_VF_PER_MBX_REG);
1100 break; 1116 break;
@@ -1106,8 +1122,15 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
1106 } 1122 }
1107 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 1123 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
1108 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 1124 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
1109 bgx_set_xcast_mode(nic->node, bgx, lmac, mbx.xcast.data.mode); 1125 bgx_set_xcast_mode(nic->node, bgx, lmac, mbx.xcast.mode);
1110 break; 1126 break;
1127 case NIC_MBOX_MSG_BGX_LINK_CHANGE:
1128 if (vf >= nic->num_vf_en) {
1129 ret = -1; /* NACK */
1130 break;
1131 }
1132 nic_link_status_get(nic, vf);
1133 return;
1111 default: 1134 default:
1112 dev_err(&nic->pdev->dev, 1135 dev_err(&nic->pdev->dev,
1113 "Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg); 1136 "Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg);
@@ -1121,8 +1144,6 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
1121 mbx.msg.msg, vf); 1144 mbx.msg.msg, vf);
1122 nic_mbx_send_nack(nic, vf); 1145 nic_mbx_send_nack(nic, vf);
1123 } 1146 }
1124unlock:
1125 nic->mbx_lock[vf] = false;
1126} 1147}
1127 1148
1128static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq) 1149static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq)
@@ -1270,52 +1291,6 @@ static int nic_sriov_init(struct pci_dev *pdev, struct nicpf *nic)
1270 return 0; 1291 return 0;
1271} 1292}
1272 1293
1273/* Poll for BGX LMAC link status and update corresponding VF
1274 * if there is a change, valid only if internal L2 switch
1275 * is not present otherwise VF link is always treated as up
1276 */
1277static void nic_poll_for_link(struct work_struct *work)
1278{
1279 union nic_mbx mbx = {};
1280 struct nicpf *nic;
1281 struct bgx_link_status link;
1282 u8 vf, bgx, lmac;
1283
1284 nic = container_of(work, struct nicpf, dwork.work);
1285
1286 mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
1287
1288 for (vf = 0; vf < nic->num_vf_en; vf++) {
1289 /* Poll only if VF is UP */
1290 if (!nic->vf_enabled[vf])
1291 continue;
1292
1293 /* Get BGX, LMAC indices for the VF */
1294 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
1295 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
1296 /* Get interface link status */
1297 bgx_get_lmac_link_state(nic->node, bgx, lmac, &link);
1298
1299 /* Inform VF only if link status changed */
1300 if (nic->link[vf] == link.link_up)
1301 continue;
1302
1303 if (!nic->mbx_lock[vf]) {
1304 nic->link[vf] = link.link_up;
1305 nic->duplex[vf] = link.duplex;
1306 nic->speed[vf] = link.speed;
1307
1308 /* Send a mbox message to VF with current link status */
1309 mbx.link_status.link_up = link.link_up;
1310 mbx.link_status.duplex = link.duplex;
1311 mbx.link_status.speed = link.speed;
1312 mbx.link_status.mac_type = link.mac_type;
1313 nic_send_msg_to_vf(nic, vf, &mbx);
1314 }
1315 }
1316 queue_delayed_work(nic->check_link, &nic->dwork, HZ * 2);
1317}
1318
1319static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1294static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1320{ 1295{
1321 struct device *dev = &pdev->dev; 1296 struct device *dev = &pdev->dev;
@@ -1384,18 +1359,6 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1384 if (!nic->vf_lmac_map) 1359 if (!nic->vf_lmac_map)
1385 goto err_release_regions; 1360 goto err_release_regions;
1386 1361
1387 nic->link = devm_kmalloc_array(dev, max_lmac, sizeof(u8), GFP_KERNEL);
1388 if (!nic->link)
1389 goto err_release_regions;
1390
1391 nic->duplex = devm_kmalloc_array(dev, max_lmac, sizeof(u8), GFP_KERNEL);
1392 if (!nic->duplex)
1393 goto err_release_regions;
1394
1395 nic->speed = devm_kmalloc_array(dev, max_lmac, sizeof(u32), GFP_KERNEL);
1396 if (!nic->speed)
1397 goto err_release_regions;
1398
1399 /* Initialize hardware */ 1362 /* Initialize hardware */
1400 nic_init_hw(nic); 1363 nic_init_hw(nic);
1401 1364
@@ -1411,22 +1374,8 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1411 if (err) 1374 if (err)
1412 goto err_unregister_interrupts; 1375 goto err_unregister_interrupts;
1413 1376
1414 /* Register a physical link status poll fn() */
1415 nic->check_link = alloc_workqueue("check_link_status",
1416 WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
1417 if (!nic->check_link) {
1418 err = -ENOMEM;
1419 goto err_disable_sriov;
1420 }
1421
1422 INIT_DELAYED_WORK(&nic->dwork, nic_poll_for_link);
1423 queue_delayed_work(nic->check_link, &nic->dwork, 0);
1424
1425 return 0; 1377 return 0;
1426 1378
1427err_disable_sriov:
1428 if (nic->flags & NIC_SRIOV_ENABLED)
1429 pci_disable_sriov(pdev);
1430err_unregister_interrupts: 1379err_unregister_interrupts:
1431 nic_unregister_interrupts(nic); 1380 nic_unregister_interrupts(nic);
1432err_release_regions: 1381err_release_regions:
@@ -1447,12 +1396,6 @@ static void nic_remove(struct pci_dev *pdev)
1447 if (nic->flags & NIC_SRIOV_ENABLED) 1396 if (nic->flags & NIC_SRIOV_ENABLED)
1448 pci_disable_sriov(pdev); 1397 pci_disable_sriov(pdev);
1449 1398
1450 if (nic->check_link) {
1451 /* Destroy work Queue */
1452 cancel_delayed_work_sync(&nic->dwork);
1453 destroy_workqueue(nic->check_link);
1454 }
1455
1456 nic_unregister_interrupts(nic); 1399 nic_unregister_interrupts(nic);
1457 pci_release_regions(pdev); 1400 pci_release_regions(pdev);
1458 1401
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 88f8a8fa93cd..503cfadff4ac 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -68,9 +68,6 @@ module_param(cpi_alg, int, 0444);
68MODULE_PARM_DESC(cpi_alg, 68MODULE_PARM_DESC(cpi_alg,
69 "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)"); 69 "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
70 70
71/* workqueue for handling kernel ndo_set_rx_mode() calls */
72static struct workqueue_struct *nicvf_rx_mode_wq;
73
74static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx) 71static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx)
75{ 72{
76 if (nic->sqs_mode) 73 if (nic->sqs_mode)
@@ -127,6 +124,9 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
127{ 124{
128 int timeout = NIC_MBOX_MSG_TIMEOUT; 125 int timeout = NIC_MBOX_MSG_TIMEOUT;
129 int sleep = 10; 126 int sleep = 10;
127 int ret = 0;
128
129 mutex_lock(&nic->rx_mode_mtx);
130 130
131 nic->pf_acked = false; 131 nic->pf_acked = false;
132 nic->pf_nacked = false; 132 nic->pf_nacked = false;
@@ -139,7 +139,8 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
139 netdev_err(nic->netdev, 139 netdev_err(nic->netdev,
140 "PF NACK to mbox msg 0x%02x from VF%d\n", 140 "PF NACK to mbox msg 0x%02x from VF%d\n",
141 (mbx->msg.msg & 0xFF), nic->vf_id); 141 (mbx->msg.msg & 0xFF), nic->vf_id);
142 return -EINVAL; 142 ret = -EINVAL;
143 break;
143 } 144 }
144 msleep(sleep); 145 msleep(sleep);
145 if (nic->pf_acked) 146 if (nic->pf_acked)
@@ -149,10 +150,12 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
149 netdev_err(nic->netdev, 150 netdev_err(nic->netdev,
150 "PF didn't ACK to mbox msg 0x%02x from VF%d\n", 151 "PF didn't ACK to mbox msg 0x%02x from VF%d\n",
151 (mbx->msg.msg & 0xFF), nic->vf_id); 152 (mbx->msg.msg & 0xFF), nic->vf_id);
152 return -EBUSY; 153 ret = -EBUSY;
154 break;
153 } 155 }
154 } 156 }
155 return 0; 157 mutex_unlock(&nic->rx_mode_mtx);
158 return ret;
156} 159}
157 160
158/* Checks if VF is able to comminicate with PF 161/* Checks if VF is able to comminicate with PF
@@ -172,6 +175,17 @@ static int nicvf_check_pf_ready(struct nicvf *nic)
172 return 1; 175 return 1;
173} 176}
174 177
178static void nicvf_send_cfg_done(struct nicvf *nic)
179{
180 union nic_mbx mbx = {};
181
182 mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
183 if (nicvf_send_msg_to_pf(nic, &mbx)) {
184 netdev_err(nic->netdev,
185 "PF didn't respond to CFG DONE msg\n");
186 }
187}
188
175static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx) 189static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx)
176{ 190{
177 if (bgx->rx) 191 if (bgx->rx)
@@ -228,21 +242,24 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic)
228 break; 242 break;
229 case NIC_MBOX_MSG_BGX_LINK_CHANGE: 243 case NIC_MBOX_MSG_BGX_LINK_CHANGE:
230 nic->pf_acked = true; 244 nic->pf_acked = true;
231 nic->link_up = mbx.link_status.link_up; 245 if (nic->link_up != mbx.link_status.link_up) {
232 nic->duplex = mbx.link_status.duplex; 246 nic->link_up = mbx.link_status.link_up;
233 nic->speed = mbx.link_status.speed; 247 nic->duplex = mbx.link_status.duplex;
234 nic->mac_type = mbx.link_status.mac_type; 248 nic->speed = mbx.link_status.speed;
235 if (nic->link_up) { 249 nic->mac_type = mbx.link_status.mac_type;
236 netdev_info(nic->netdev, "Link is Up %d Mbps %s duplex\n", 250 if (nic->link_up) {
237 nic->speed, 251 netdev_info(nic->netdev,
238 nic->duplex == DUPLEX_FULL ? 252 "Link is Up %d Mbps %s duplex\n",
239 "Full" : "Half"); 253 nic->speed,
240 netif_carrier_on(nic->netdev); 254 nic->duplex == DUPLEX_FULL ?
241 netif_tx_start_all_queues(nic->netdev); 255 "Full" : "Half");
242 } else { 256 netif_carrier_on(nic->netdev);
243 netdev_info(nic->netdev, "Link is Down\n"); 257 netif_tx_start_all_queues(nic->netdev);
244 netif_carrier_off(nic->netdev); 258 } else {
245 netif_tx_stop_all_queues(nic->netdev); 259 netdev_info(nic->netdev, "Link is Down\n");
260 netif_carrier_off(nic->netdev);
261 netif_tx_stop_all_queues(nic->netdev);
262 }
246 } 263 }
247 break; 264 break;
248 case NIC_MBOX_MSG_ALLOC_SQS: 265 case NIC_MBOX_MSG_ALLOC_SQS:
@@ -1311,6 +1328,11 @@ int nicvf_stop(struct net_device *netdev)
1311 struct nicvf_cq_poll *cq_poll = NULL; 1328 struct nicvf_cq_poll *cq_poll = NULL;
1312 union nic_mbx mbx = {}; 1329 union nic_mbx mbx = {};
1313 1330
1331 cancel_delayed_work_sync(&nic->link_change_work);
1332
1333 /* wait till all queued set_rx_mode tasks completes */
1334 drain_workqueue(nic->nicvf_rx_mode_wq);
1335
1314 mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN; 1336 mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
1315 nicvf_send_msg_to_pf(nic, &mbx); 1337 nicvf_send_msg_to_pf(nic, &mbx);
1316 1338
@@ -1410,13 +1432,27 @@ static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
1410 return nicvf_send_msg_to_pf(nic, &mbx); 1432 return nicvf_send_msg_to_pf(nic, &mbx);
1411} 1433}
1412 1434
1435static void nicvf_link_status_check_task(struct work_struct *work_arg)
1436{
1437 struct nicvf *nic = container_of(work_arg,
1438 struct nicvf,
1439 link_change_work.work);
1440 union nic_mbx mbx = {};
1441 mbx.msg.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
1442 nicvf_send_msg_to_pf(nic, &mbx);
1443 queue_delayed_work(nic->nicvf_rx_mode_wq,
1444 &nic->link_change_work, 2 * HZ);
1445}
1446
1413int nicvf_open(struct net_device *netdev) 1447int nicvf_open(struct net_device *netdev)
1414{ 1448{
1415 int cpu, err, qidx; 1449 int cpu, err, qidx;
1416 struct nicvf *nic = netdev_priv(netdev); 1450 struct nicvf *nic = netdev_priv(netdev);
1417 struct queue_set *qs = nic->qs; 1451 struct queue_set *qs = nic->qs;
1418 struct nicvf_cq_poll *cq_poll = NULL; 1452 struct nicvf_cq_poll *cq_poll = NULL;
1419 union nic_mbx mbx = {}; 1453
1454 /* wait till all queued set_rx_mode tasks completes if any */
1455 drain_workqueue(nic->nicvf_rx_mode_wq);
1420 1456
1421 netif_carrier_off(netdev); 1457 netif_carrier_off(netdev);
1422 1458
@@ -1512,8 +1548,12 @@ int nicvf_open(struct net_device *netdev)
1512 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx); 1548 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
1513 1549
1514 /* Send VF config done msg to PF */ 1550 /* Send VF config done msg to PF */
1515 mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE; 1551 nicvf_send_cfg_done(nic);
1516 nicvf_write_to_mbx(nic, &mbx); 1552
1553 INIT_DELAYED_WORK(&nic->link_change_work,
1554 nicvf_link_status_check_task);
1555 queue_delayed_work(nic->nicvf_rx_mode_wq,
1556 &nic->link_change_work, 0);
1517 1557
1518 return 0; 1558 return 0;
1519cleanup: 1559cleanup:
@@ -1941,15 +1981,17 @@ static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs,
1941 1981
1942 /* flush DMAC filters and reset RX mode */ 1982 /* flush DMAC filters and reset RX mode */
1943 mbx.xcast.msg = NIC_MBOX_MSG_RESET_XCAST; 1983 mbx.xcast.msg = NIC_MBOX_MSG_RESET_XCAST;
1944 nicvf_send_msg_to_pf(nic, &mbx); 1984 if (nicvf_send_msg_to_pf(nic, &mbx) < 0)
1985 goto free_mc;
1945 1986
1946 if (mode & BGX_XCAST_MCAST_FILTER) { 1987 if (mode & BGX_XCAST_MCAST_FILTER) {
1947 /* once enabling filtering, we need to signal to PF to add 1988 /* once enabling filtering, we need to signal to PF to add
1948 * its' own LMAC to the filter to accept packets for it. 1989 * its' own LMAC to the filter to accept packets for it.
1949 */ 1990 */
1950 mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST; 1991 mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST;
1951 mbx.xcast.data.mac = 0; 1992 mbx.xcast.mac = 0;
1952 nicvf_send_msg_to_pf(nic, &mbx); 1993 if (nicvf_send_msg_to_pf(nic, &mbx) < 0)
1994 goto free_mc;
1953 } 1995 }
1954 1996
1955 /* check if we have any specific MACs to be added to PF DMAC filter */ 1997 /* check if we have any specific MACs to be added to PF DMAC filter */
@@ -1957,23 +1999,25 @@ static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs,
1957 /* now go through kernel list of MACs and add them one by one */ 1999 /* now go through kernel list of MACs and add them one by one */
1958 for (idx = 0; idx < mc_addrs->count; idx++) { 2000 for (idx = 0; idx < mc_addrs->count; idx++) {
1959 mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST; 2001 mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST;
1960 mbx.xcast.data.mac = mc_addrs->mc[idx]; 2002 mbx.xcast.mac = mc_addrs->mc[idx];
1961 nicvf_send_msg_to_pf(nic, &mbx); 2003 if (nicvf_send_msg_to_pf(nic, &mbx) < 0)
2004 goto free_mc;
1962 } 2005 }
1963 kfree(mc_addrs);
1964 } 2006 }
1965 2007
1966 /* and finally set rx mode for PF accordingly */ 2008 /* and finally set rx mode for PF accordingly */
1967 mbx.xcast.msg = NIC_MBOX_MSG_SET_XCAST; 2009 mbx.xcast.msg = NIC_MBOX_MSG_SET_XCAST;
1968 mbx.xcast.data.mode = mode; 2010 mbx.xcast.mode = mode;
1969 2011
1970 nicvf_send_msg_to_pf(nic, &mbx); 2012 nicvf_send_msg_to_pf(nic, &mbx);
2013free_mc:
2014 kfree(mc_addrs);
1971} 2015}
1972 2016
1973static void nicvf_set_rx_mode_task(struct work_struct *work_arg) 2017static void nicvf_set_rx_mode_task(struct work_struct *work_arg)
1974{ 2018{
1975 struct nicvf_work *vf_work = container_of(work_arg, struct nicvf_work, 2019 struct nicvf_work *vf_work = container_of(work_arg, struct nicvf_work,
1976 work.work); 2020 work);
1977 struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work); 2021 struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work);
1978 u8 mode; 2022 u8 mode;
1979 struct xcast_addr_list *mc; 2023 struct xcast_addr_list *mc;
@@ -2030,7 +2074,7 @@ static void nicvf_set_rx_mode(struct net_device *netdev)
2030 kfree(nic->rx_mode_work.mc); 2074 kfree(nic->rx_mode_work.mc);
2031 nic->rx_mode_work.mc = mc_list; 2075 nic->rx_mode_work.mc = mc_list;
2032 nic->rx_mode_work.mode = mode; 2076 nic->rx_mode_work.mode = mode;
2033 queue_delayed_work(nicvf_rx_mode_wq, &nic->rx_mode_work.work, 0); 2077 queue_work(nic->nicvf_rx_mode_wq, &nic->rx_mode_work.work);
2034 spin_unlock(&nic->rx_mode_wq_lock); 2078 spin_unlock(&nic->rx_mode_wq_lock);
2035} 2079}
2036 2080
@@ -2187,8 +2231,12 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2187 2231
2188 INIT_WORK(&nic->reset_task, nicvf_reset_task); 2232 INIT_WORK(&nic->reset_task, nicvf_reset_task);
2189 2233
2190 INIT_DELAYED_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task); 2234 nic->nicvf_rx_mode_wq = alloc_ordered_workqueue("nicvf_rx_mode_wq_VF%d",
2235 WQ_MEM_RECLAIM,
2236 nic->vf_id);
2237 INIT_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task);
2191 spin_lock_init(&nic->rx_mode_wq_lock); 2238 spin_lock_init(&nic->rx_mode_wq_lock);
2239 mutex_init(&nic->rx_mode_mtx);
2192 2240
2193 err = register_netdev(netdev); 2241 err = register_netdev(netdev);
2194 if (err) { 2242 if (err) {
@@ -2228,13 +2276,15 @@ static void nicvf_remove(struct pci_dev *pdev)
2228 nic = netdev_priv(netdev); 2276 nic = netdev_priv(netdev);
2229 pnetdev = nic->pnicvf->netdev; 2277 pnetdev = nic->pnicvf->netdev;
2230 2278
2231 cancel_delayed_work_sync(&nic->rx_mode_work.work);
2232
2233 /* Check if this Qset is assigned to different VF. 2279 /* Check if this Qset is assigned to different VF.
2234 * If yes, clean primary and all secondary Qsets. 2280 * If yes, clean primary and all secondary Qsets.
2235 */ 2281 */
2236 if (pnetdev && (pnetdev->reg_state == NETREG_REGISTERED)) 2282 if (pnetdev && (pnetdev->reg_state == NETREG_REGISTERED))
2237 unregister_netdev(pnetdev); 2283 unregister_netdev(pnetdev);
2284 if (nic->nicvf_rx_mode_wq) {
2285 destroy_workqueue(nic->nicvf_rx_mode_wq);
2286 nic->nicvf_rx_mode_wq = NULL;
2287 }
2238 nicvf_unregister_interrupts(nic); 2288 nicvf_unregister_interrupts(nic);
2239 pci_set_drvdata(pdev, NULL); 2289 pci_set_drvdata(pdev, NULL);
2240 if (nic->drv_stats) 2290 if (nic->drv_stats)
@@ -2261,17 +2311,11 @@ static struct pci_driver nicvf_driver = {
2261static int __init nicvf_init_module(void) 2311static int __init nicvf_init_module(void)
2262{ 2312{
2263 pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION); 2313 pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
2264 nicvf_rx_mode_wq = alloc_ordered_workqueue("nicvf_generic",
2265 WQ_MEM_RECLAIM);
2266 return pci_register_driver(&nicvf_driver); 2314 return pci_register_driver(&nicvf_driver);
2267} 2315}
2268 2316
2269static void __exit nicvf_cleanup_module(void) 2317static void __exit nicvf_cleanup_module(void)
2270{ 2318{
2271 if (nicvf_rx_mode_wq) {
2272 destroy_workqueue(nicvf_rx_mode_wq);
2273 nicvf_rx_mode_wq = NULL;
2274 }
2275 pci_unregister_driver(&nicvf_driver); 2319 pci_unregister_driver(&nicvf_driver);
2276} 2320}
2277 2321
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index e337da6ba2a4..673c57b8023f 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1217,7 +1217,7 @@ static void bgx_init_hw(struct bgx *bgx)
1217 1217
1218 /* Disable MAC steering (NCSI traffic) */ 1218 /* Disable MAC steering (NCSI traffic) */
1219 for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++) 1219 for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++)
1220 bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00); 1220 bgx_reg_write(bgx, 0, BGX_CMR_RX_STEERING + (i * 8), 0x00);
1221} 1221}
1222 1222
1223static u8 bgx_get_lane2sds_cfg(struct bgx *bgx, struct lmac *lmac) 1223static u8 bgx_get_lane2sds_cfg(struct bgx *bgx, struct lmac *lmac)
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index cbdd20b9ee6f..5cbc54e9eb19 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -60,7 +60,7 @@
60#define RX_DMACX_CAM_EN BIT_ULL(48) 60#define RX_DMACX_CAM_EN BIT_ULL(48)
61#define RX_DMACX_CAM_LMACID(x) (((u64)x) << 49) 61#define RX_DMACX_CAM_LMACID(x) (((u64)x) << 49)
62#define RX_DMAC_COUNT 32 62#define RX_DMAC_COUNT 32
63#define BGX_CMR_RX_STREERING 0x300 63#define BGX_CMR_RX_STEERING 0x300
64#define RX_TRAFFIC_STEER_RULE_COUNT 8 64#define RX_TRAFFIC_STEER_RULE_COUNT 8
65#define BGX_CMR_CHAN_MSK_AND 0x450 65#define BGX_CMR_CHAN_MSK_AND 0x450
66#define BGX_CMR_BIST_STATUS 0x460 66#define BGX_CMR_BIST_STATUS 0x460