aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c99
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c23
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c11
5 files changed, 112 insertions, 34 deletions
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index aacec8bc19d5..dc5de275352a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -214,6 +214,8 @@ static const u16 bnxt_async_events_arr[] = {
214 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE, 214 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
215}; 215};
216 216
217static struct workqueue_struct *bnxt_pf_wq;
218
217static bool bnxt_vf_pciid(enum board_idx idx) 219static bool bnxt_vf_pciid(enum board_idx idx)
218{ 220{
219 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF); 221 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF);
@@ -1024,12 +1026,28 @@ static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi,
1024 return 0; 1026 return 0;
1025} 1027}
1026 1028
1029static void bnxt_queue_sp_work(struct bnxt *bp)
1030{
1031 if (BNXT_PF(bp))
1032 queue_work(bnxt_pf_wq, &bp->sp_task);
1033 else
1034 schedule_work(&bp->sp_task);
1035}
1036
1037static void bnxt_cancel_sp_work(struct bnxt *bp)
1038{
1039 if (BNXT_PF(bp))
1040 flush_workqueue(bnxt_pf_wq);
1041 else
1042 cancel_work_sync(&bp->sp_task);
1043}
1044
1027static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 1045static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1028{ 1046{
1029 if (!rxr->bnapi->in_reset) { 1047 if (!rxr->bnapi->in_reset) {
1030 rxr->bnapi->in_reset = true; 1048 rxr->bnapi->in_reset = true;
1031 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); 1049 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1032 schedule_work(&bp->sp_task); 1050 bnxt_queue_sp_work(bp);
1033 } 1051 }
1034 rxr->rx_next_cons = 0xffff; 1052 rxr->rx_next_cons = 0xffff;
1035} 1053}
@@ -1717,7 +1735,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
1717 default: 1735 default:
1718 goto async_event_process_exit; 1736 goto async_event_process_exit;
1719 } 1737 }
1720 schedule_work(&bp->sp_task); 1738 bnxt_queue_sp_work(bp);
1721async_event_process_exit: 1739async_event_process_exit:
1722 bnxt_ulp_async_events(bp, cmpl); 1740 bnxt_ulp_async_events(bp, cmpl);
1723 return 0; 1741 return 0;
@@ -1751,7 +1769,7 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
1751 1769
1752 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap); 1770 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
1753 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event); 1771 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
1754 schedule_work(&bp->sp_task); 1772 bnxt_queue_sp_work(bp);
1755 break; 1773 break;
1756 1774
1757 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: 1775 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
@@ -3448,6 +3466,12 @@ int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3448 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false); 3466 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
3449} 3467}
3450 3468
3469int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
3470 int timeout)
3471{
3472 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
3473}
3474
3451int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) 3475int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3452{ 3476{
3453 int rc; 3477 int rc;
@@ -6327,7 +6351,9 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
6327 } 6351 }
6328 6352
6329 if (link_re_init) { 6353 if (link_re_init) {
6354 mutex_lock(&bp->link_lock);
6330 rc = bnxt_update_phy_setting(bp); 6355 rc = bnxt_update_phy_setting(bp);
6356 mutex_unlock(&bp->link_lock);
6331 if (rc) 6357 if (rc)
6332 netdev_warn(bp->dev, "failed to update phy settings\n"); 6358 netdev_warn(bp->dev, "failed to update phy settings\n");
6333 } 6359 }
@@ -6647,7 +6673,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
6647 vnic->rx_mask = mask; 6673 vnic->rx_mask = mask;
6648 6674
6649 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event); 6675 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
6650 schedule_work(&bp->sp_task); 6676 bnxt_queue_sp_work(bp);
6651 } 6677 }
6652} 6678}
6653 6679
@@ -6920,7 +6946,7 @@ static void bnxt_tx_timeout(struct net_device *dev)
6920 6946
6921 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n"); 6947 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
6922 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); 6948 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
6923 schedule_work(&bp->sp_task); 6949 bnxt_queue_sp_work(bp);
6924} 6950}
6925 6951
6926#ifdef CONFIG_NET_POLL_CONTROLLER 6952#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -6952,7 +6978,7 @@ static void bnxt_timer(unsigned long data)
6952 if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) && 6978 if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) &&
6953 bp->stats_coal_ticks) { 6979 bp->stats_coal_ticks) {
6954 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event); 6980 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
6955 schedule_work(&bp->sp_task); 6981 bnxt_queue_sp_work(bp);
6956 } 6982 }
6957bnxt_restart_timer: 6983bnxt_restart_timer:
6958 mod_timer(&bp->timer, jiffies + bp->current_interval); 6984 mod_timer(&bp->timer, jiffies + bp->current_interval);
@@ -7025,30 +7051,28 @@ static void bnxt_sp_task(struct work_struct *work)
7025 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) 7051 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
7026 bnxt_hwrm_port_qstats(bp); 7052 bnxt_hwrm_port_qstats(bp);
7027 7053
7028 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
7029 * must be the last functions to be called before exiting.
7030 */
7031 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { 7054 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
7032 int rc = 0; 7055 int rc;
7033 7056
7057 mutex_lock(&bp->link_lock);
7034 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, 7058 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
7035 &bp->sp_event)) 7059 &bp->sp_event))
7036 bnxt_hwrm_phy_qcaps(bp); 7060 bnxt_hwrm_phy_qcaps(bp);
7037 7061
7038 bnxt_rtnl_lock_sp(bp); 7062 rc = bnxt_update_link(bp, true);
7039 if (test_bit(BNXT_STATE_OPEN, &bp->state)) 7063 mutex_unlock(&bp->link_lock);
7040 rc = bnxt_update_link(bp, true);
7041 bnxt_rtnl_unlock_sp(bp);
7042 if (rc) 7064 if (rc)
7043 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", 7065 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
7044 rc); 7066 rc);
7045 } 7067 }
7046 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) { 7068 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
7047 bnxt_rtnl_lock_sp(bp); 7069 mutex_lock(&bp->link_lock);
7048 if (test_bit(BNXT_STATE_OPEN, &bp->state)) 7070 bnxt_get_port_module_status(bp);
7049 bnxt_get_port_module_status(bp); 7071 mutex_unlock(&bp->link_lock);
7050 bnxt_rtnl_unlock_sp(bp);
7051 } 7072 }
7073 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
7074 * must be the last functions to be called before exiting.
7075 */
7052 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) 7076 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
7053 bnxt_reset(bp, false); 7077 bnxt_reset(bp, false);
7054 7078
@@ -7433,7 +7457,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
7433 spin_unlock_bh(&bp->ntp_fltr_lock); 7457 spin_unlock_bh(&bp->ntp_fltr_lock);
7434 7458
7435 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event); 7459 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
7436 schedule_work(&bp->sp_task); 7460 bnxt_queue_sp_work(bp);
7437 7461
7438 return new_fltr->sw_id; 7462 return new_fltr->sw_id;
7439 7463
@@ -7516,7 +7540,7 @@ static void bnxt_udp_tunnel_add(struct net_device *dev,
7516 if (bp->vxlan_port_cnt == 1) { 7540 if (bp->vxlan_port_cnt == 1) {
7517 bp->vxlan_port = ti->port; 7541 bp->vxlan_port = ti->port;
7518 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event); 7542 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
7519 schedule_work(&bp->sp_task); 7543 bnxt_queue_sp_work(bp);
7520 } 7544 }
7521 break; 7545 break;
7522 case UDP_TUNNEL_TYPE_GENEVE: 7546 case UDP_TUNNEL_TYPE_GENEVE:
@@ -7533,7 +7557,7 @@ static void bnxt_udp_tunnel_add(struct net_device *dev,
7533 return; 7557 return;
7534 } 7558 }
7535 7559
7536 schedule_work(&bp->sp_task); 7560 bnxt_queue_sp_work(bp);
7537} 7561}
7538 7562
7539static void bnxt_udp_tunnel_del(struct net_device *dev, 7563static void bnxt_udp_tunnel_del(struct net_device *dev,
@@ -7572,7 +7596,7 @@ static void bnxt_udp_tunnel_del(struct net_device *dev,
7572 return; 7596 return;
7573 } 7597 }
7574 7598
7575 schedule_work(&bp->sp_task); 7599 bnxt_queue_sp_work(bp);
7576} 7600}
7577 7601
7578static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 7602static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
@@ -7720,7 +7744,7 @@ static void bnxt_remove_one(struct pci_dev *pdev)
7720 pci_disable_pcie_error_reporting(pdev); 7744 pci_disable_pcie_error_reporting(pdev);
7721 unregister_netdev(dev); 7745 unregister_netdev(dev);
7722 bnxt_shutdown_tc(bp); 7746 bnxt_shutdown_tc(bp);
7723 cancel_work_sync(&bp->sp_task); 7747 bnxt_cancel_sp_work(bp);
7724 bp->sp_event = 0; 7748 bp->sp_event = 0;
7725 7749
7726 bnxt_clear_int_mode(bp); 7750 bnxt_clear_int_mode(bp);
@@ -7748,6 +7772,7 @@ static int bnxt_probe_phy(struct bnxt *bp)
7748 rc); 7772 rc);
7749 return rc; 7773 return rc;
7750 } 7774 }
7775 mutex_init(&bp->link_lock);
7751 7776
7752 rc = bnxt_update_link(bp, false); 7777 rc = bnxt_update_link(bp, false);
7753 if (rc) { 7778 if (rc) {
@@ -7946,7 +7971,7 @@ static void bnxt_parse_log_pcie_link(struct bnxt *bp)
7946 enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN; 7971 enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
7947 enum pci_bus_speed speed = PCI_SPEED_UNKNOWN; 7972 enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
7948 7973
7949 if (pcie_get_minimum_link(bp->pdev, &speed, &width) || 7974 if (pcie_get_minimum_link(pci_physfn(bp->pdev), &speed, &width) ||
7950 speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) 7975 speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
7951 netdev_info(bp->dev, "Failed to determine PCIe Link Info\n"); 7976 netdev_info(bp->dev, "Failed to determine PCIe Link Info\n");
7952 else 7977 else
@@ -8138,8 +8163,17 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8138 else 8163 else
8139 device_set_wakeup_capable(&pdev->dev, false); 8164 device_set_wakeup_capable(&pdev->dev, false);
8140 8165
8141 if (BNXT_PF(bp)) 8166 if (BNXT_PF(bp)) {
8167 if (!bnxt_pf_wq) {
8168 bnxt_pf_wq =
8169 create_singlethread_workqueue("bnxt_pf_wq");
8170 if (!bnxt_pf_wq) {
8171 dev_err(&pdev->dev, "Unable to create workqueue.\n");
8172 goto init_err_pci_clean;
8173 }
8174 }
8142 bnxt_init_tc(bp); 8175 bnxt_init_tc(bp);
8176 }
8143 8177
8144 rc = register_netdev(dev); 8178 rc = register_netdev(dev);
8145 if (rc) 8179 if (rc)
@@ -8375,4 +8409,17 @@ static struct pci_driver bnxt_pci_driver = {
8375#endif 8409#endif
8376}; 8410};
8377 8411
8378module_pci_driver(bnxt_pci_driver); 8412static int __init bnxt_init(void)
8413{
8414 return pci_register_driver(&bnxt_pci_driver);
8415}
8416
8417static void __exit bnxt_exit(void)
8418{
8419 pci_unregister_driver(&bnxt_pci_driver);
8420 if (bnxt_pf_wq)
8421 destroy_workqueue(bnxt_pf_wq);
8422}
8423
8424module_init(bnxt_init);
8425module_exit(bnxt_exit);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 7b888d4b2b55..c911e69ff25f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1290,6 +1290,10 @@ struct bnxt {
1290 unsigned long *ntp_fltr_bmap; 1290 unsigned long *ntp_fltr_bmap;
1291 int ntp_fltr_count; 1291 int ntp_fltr_count;
1292 1292
1293 /* To protect link related settings during link changes and
1294 * ethtool settings changes.
1295 */
1296 struct mutex link_lock;
1293 struct bnxt_link_info link_info; 1297 struct bnxt_link_info link_info;
1294 struct ethtool_eee eee; 1298 struct ethtool_eee eee;
1295 u32 lpi_tmr_lo; 1299 u32 lpi_tmr_lo;
@@ -1358,6 +1362,7 @@ void bnxt_set_ring_params(struct bnxt *);
1358int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode); 1362int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
1359void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16); 1363void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16);
1360int _hwrm_send_message(struct bnxt *, void *, u32, int); 1364int _hwrm_send_message(struct bnxt *, void *, u32, int);
1365int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 len, int timeout);
1361int hwrm_send_message(struct bnxt *, void *, u32, int); 1366int hwrm_send_message(struct bnxt *, void *, u32, int);
1362int hwrm_send_message_silent(struct bnxt *, void *, u32, int); 1367int hwrm_send_message_silent(struct bnxt *, void *, u32, int);
1363int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap, 1368int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index aa1f3a2c7a78..fed37cd9ae1d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -50,7 +50,9 @@ static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets)
50 50
51 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_QCFG, -1, -1); 51 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
52 req.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); 52 req.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
53 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 53
54 mutex_lock(&bp->hwrm_cmd_lock);
55 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
54 if (!rc) { 56 if (!rc) {
55 u8 *pri2cos = &resp->pri0_cos_queue_id; 57 u8 *pri2cos = &resp->pri0_cos_queue_id;
56 int i, j; 58 int i, j;
@@ -66,6 +68,7 @@ static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets)
66 } 68 }
67 } 69 }
68 } 70 }
71 mutex_unlock(&bp->hwrm_cmd_lock);
69 return rc; 72 return rc;
70} 73}
71 74
@@ -119,9 +122,13 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
119 int rc, i; 122 int rc, i;
120 123
121 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_QCFG, -1, -1); 124 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_QCFG, -1, -1);
122 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 125
123 if (rc) 126 mutex_lock(&bp->hwrm_cmd_lock);
127 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
128 if (rc) {
129 mutex_unlock(&bp->hwrm_cmd_lock);
124 return rc; 130 return rc;
131 }
125 132
126 data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id); 133 data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id);
127 for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw) - 4) { 134 for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw) - 4) {
@@ -143,6 +150,7 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
143 } 150 }
144 } 151 }
145 } 152 }
153 mutex_unlock(&bp->hwrm_cmd_lock);
146 return 0; 154 return 0;
147} 155}
148 156
@@ -240,12 +248,17 @@ static int bnxt_hwrm_queue_pfc_qcfg(struct bnxt *bp, struct ieee_pfc *pfc)
240 int rc; 248 int rc;
241 249
242 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_QCFG, -1, -1); 250 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_QCFG, -1, -1);
243 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 251
244 if (rc) 252 mutex_lock(&bp->hwrm_cmd_lock);
253 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
254 if (rc) {
255 mutex_unlock(&bp->hwrm_cmd_lock);
245 return rc; 256 return rc;
257 }
246 258
247 pri_mask = le32_to_cpu(resp->flags); 259 pri_mask = le32_to_cpu(resp->flags);
248 pfc->pfc_en = pri_mask; 260 pfc->pfc_en = pri_mask;
261 mutex_unlock(&bp->hwrm_cmd_lock);
249 return 0; 262 return 0;
250} 263}
251 264
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 8eff05a3e0e4..3cbe771b3352 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -1052,6 +1052,7 @@ static int bnxt_get_link_ksettings(struct net_device *dev,
1052 u32 ethtool_speed; 1052 u32 ethtool_speed;
1053 1053
1054 ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported); 1054 ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported);
1055 mutex_lock(&bp->link_lock);
1055 bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings); 1056 bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings);
1056 1057
1057 ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising); 1058 ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising);
@@ -1099,6 +1100,7 @@ static int bnxt_get_link_ksettings(struct net_device *dev,
1099 base->port = PORT_FIBRE; 1100 base->port = PORT_FIBRE;
1100 } 1101 }
1101 base->phy_address = link_info->phy_addr; 1102 base->phy_address = link_info->phy_addr;
1103 mutex_unlock(&bp->link_lock);
1102 1104
1103 return 0; 1105 return 0;
1104} 1106}
@@ -1190,6 +1192,7 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
1190 if (!BNXT_SINGLE_PF(bp)) 1192 if (!BNXT_SINGLE_PF(bp))
1191 return -EOPNOTSUPP; 1193 return -EOPNOTSUPP;
1192 1194
1195 mutex_lock(&bp->link_lock);
1193 if (base->autoneg == AUTONEG_ENABLE) { 1196 if (base->autoneg == AUTONEG_ENABLE) {
1194 BNXT_ETHTOOL_TO_FW_SPDS(fw_advertising, lk_ksettings, 1197 BNXT_ETHTOOL_TO_FW_SPDS(fw_advertising, lk_ksettings,
1195 advertising); 1198 advertising);
@@ -1234,6 +1237,7 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
1234 rc = bnxt_hwrm_set_link_setting(bp, set_pause, false); 1237 rc = bnxt_hwrm_set_link_setting(bp, set_pause, false);
1235 1238
1236set_setting_exit: 1239set_setting_exit:
1240 mutex_unlock(&bp->link_lock);
1237 return rc; 1241 return rc;
1238} 1242}
1239 1243
@@ -1805,7 +1809,8 @@ static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
1805 req.dir_ordinal = cpu_to_le16(ordinal); 1809 req.dir_ordinal = cpu_to_le16(ordinal);
1806 req.dir_ext = cpu_to_le16(ext); 1810 req.dir_ext = cpu_to_le16(ext);
1807 req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ; 1811 req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ;
1808 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 1812 mutex_lock(&bp->hwrm_cmd_lock);
1813 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1809 if (rc == 0) { 1814 if (rc == 0) {
1810 if (index) 1815 if (index)
1811 *index = le16_to_cpu(output->dir_idx); 1816 *index = le16_to_cpu(output->dir_idx);
@@ -1814,6 +1819,7 @@ static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
1814 if (data_length) 1819 if (data_length)
1815 *data_length = le32_to_cpu(output->dir_data_length); 1820 *data_length = le32_to_cpu(output->dir_data_length);
1816 } 1821 }
1822 mutex_unlock(&bp->hwrm_cmd_lock);
1817 return rc; 1823 return rc;
1818} 1824}
1819 1825
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index d37925a8a65b..5ee18660bc33 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -502,6 +502,7 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
502 int rc = 0, vfs_supported; 502 int rc = 0, vfs_supported;
503 int min_rx_rings, min_tx_rings, min_rss_ctxs; 503 int min_rx_rings, min_tx_rings, min_rss_ctxs;
504 int tx_ok = 0, rx_ok = 0, rss_ok = 0; 504 int tx_ok = 0, rx_ok = 0, rss_ok = 0;
505 int avail_cp, avail_stat;
505 506
506 /* Check if we can enable requested num of vf's. At a mininum 507 /* Check if we can enable requested num of vf's. At a mininum
507 * we require 1 RX 1 TX rings for each VF. In this minimum conf 508 * we require 1 RX 1 TX rings for each VF. In this minimum conf
@@ -509,6 +510,10 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
509 */ 510 */
510 vfs_supported = *num_vfs; 511 vfs_supported = *num_vfs;
511 512
513 avail_cp = bp->pf.max_cp_rings - bp->cp_nr_rings;
514 avail_stat = bp->pf.max_stat_ctxs - bp->num_stat_ctxs;
515 avail_cp = min_t(int, avail_cp, avail_stat);
516
512 while (vfs_supported) { 517 while (vfs_supported) {
513 min_rx_rings = vfs_supported; 518 min_rx_rings = vfs_supported;
514 min_tx_rings = vfs_supported; 519 min_tx_rings = vfs_supported;
@@ -523,10 +528,12 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
523 min_rx_rings) 528 min_rx_rings)
524 rx_ok = 1; 529 rx_ok = 1;
525 } 530 }
526 if (bp->pf.max_vnics - bp->nr_vnics < min_rx_rings) 531 if (bp->pf.max_vnics - bp->nr_vnics < min_rx_rings ||
532 avail_cp < min_rx_rings)
527 rx_ok = 0; 533 rx_ok = 0;
528 534
529 if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings) 535 if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings &&
536 avail_cp >= min_tx_rings)
530 tx_ok = 1; 537 tx_ok = 1;
531 538
532 if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs) 539 if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs)