aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/ice/ice_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_main.c')
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c115
1 files changed, 66 insertions, 49 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 5299caf55a7f..f1e80eed2fd6 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -901,7 +901,7 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
901 case ice_aqc_opc_get_link_status: 901 case ice_aqc_opc_get_link_status:
902 if (ice_handle_link_event(pf)) 902 if (ice_handle_link_event(pf))
903 dev_err(&pf->pdev->dev, 903 dev_err(&pf->pdev->dev,
904 "Could not handle link event"); 904 "Could not handle link event\n");
905 break; 905 break;
906 default: 906 default:
907 dev_dbg(&pf->pdev->dev, 907 dev_dbg(&pf->pdev->dev,
@@ -917,13 +917,27 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
917} 917}
918 918
919/** 919/**
920 * ice_ctrlq_pending - check if there is a difference between ntc and ntu
921 * @hw: pointer to hardware info
922 * @cq: control queue information
923 *
924 * returns true if there are pending messages in a queue, false if there aren't
925 */
926static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
927{
928 u16 ntu;
929
930 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
931 return cq->rq.next_to_clean != ntu;
932}
933
934/**
920 * ice_clean_adminq_subtask - clean the AdminQ rings 935 * ice_clean_adminq_subtask - clean the AdminQ rings
921 * @pf: board private structure 936 * @pf: board private structure
922 */ 937 */
923static void ice_clean_adminq_subtask(struct ice_pf *pf) 938static void ice_clean_adminq_subtask(struct ice_pf *pf)
924{ 939{
925 struct ice_hw *hw = &pf->hw; 940 struct ice_hw *hw = &pf->hw;
926 u32 val;
927 941
928 if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state)) 942 if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
929 return; 943 return;
@@ -933,9 +947,13 @@ static void ice_clean_adminq_subtask(struct ice_pf *pf)
933 947
934 clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state); 948 clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
935 949
936 /* re-enable Admin queue interrupt causes */ 950 /* There might be a situation where new messages arrive to a control
937 val = rd32(hw, PFINT_FW_CTL); 951 * queue between processing the last message and clearing the
938 wr32(hw, PFINT_FW_CTL, (val | PFINT_FW_CTL_CAUSE_ENA_M)); 952 * EVENT_PENDING bit. So before exiting, check queue head again (using
953 * ice_ctrlq_pending) and process new messages if any.
954 */
955 if (ice_ctrlq_pending(hw, &hw->adminq))
956 __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
939 957
940 ice_flush(hw); 958 ice_flush(hw);
941} 959}
@@ -1295,11 +1313,8 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
1295 qcount = numq_tc; 1313 qcount = numq_tc;
1296 } 1314 }
1297 1315
1298 /* find higher power-of-2 of qcount */ 1316 /* find the (rounded up) power-of-2 of qcount */
1299 pow = ilog2(qcount); 1317 pow = order_base_2(qcount);
1300
1301 if (!is_power_of_2(qcount))
1302 pow++;
1303 1318
1304 for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { 1319 for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
1305 if (!(vsi->tc_cfg.ena_tc & BIT(i))) { 1320 if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
@@ -1352,14 +1367,15 @@ static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
1352 ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE; 1367 ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE;
1353 /* Traffic from VSI can be sent to LAN */ 1368 /* Traffic from VSI can be sent to LAN */
1354 ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; 1369 ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
1355 /* Allow all packets untagged/tagged */ 1370
1356 ctxt->info.port_vlan_flags = ((ICE_AQ_VSI_PVLAN_MODE_ALL & 1371 /* By default bits 3 and 4 in vlan_flags are 0's which results in legacy
1357 ICE_AQ_VSI_PVLAN_MODE_M) >> 1372 * behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all
1358 ICE_AQ_VSI_PVLAN_MODE_S); 1373 * packets untagged/tagged.
1359 /* Show VLAN/UP from packets in Rx descriptors */ 1374 */
1360 ctxt->info.port_vlan_flags |= ((ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH & 1375 ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL &
1361 ICE_AQ_VSI_PVLAN_EMOD_M) >> 1376 ICE_AQ_VSI_VLAN_MODE_M) >>
1362 ICE_AQ_VSI_PVLAN_EMOD_S); 1377 ICE_AQ_VSI_VLAN_MODE_S);
1378
1363 /* Have 1:1 UP mapping for both ingress/egress tables */ 1379 /* Have 1:1 UP mapping for both ingress/egress tables */
1364 table |= ICE_UP_TABLE_TRANSLATE(0, 0); 1380 table |= ICE_UP_TABLE_TRANSLATE(0, 0);
1365 table |= ICE_UP_TABLE_TRANSLATE(1, 1); 1381 table |= ICE_UP_TABLE_TRANSLATE(1, 1);
@@ -1688,15 +1704,12 @@ static void ice_ena_misc_vector(struct ice_pf *pf)
1688 wr32(hw, PFINT_OICR_ENA, 0); /* disable all */ 1704 wr32(hw, PFINT_OICR_ENA, 0); /* disable all */
1689 rd32(hw, PFINT_OICR); /* read to clear */ 1705 rd32(hw, PFINT_OICR); /* read to clear */
1690 1706
1691 val = (PFINT_OICR_HLP_RDY_M | 1707 val = (PFINT_OICR_ECC_ERR_M |
1692 PFINT_OICR_CPM_RDY_M |
1693 PFINT_OICR_ECC_ERR_M |
1694 PFINT_OICR_MAL_DETECT_M | 1708 PFINT_OICR_MAL_DETECT_M |
1695 PFINT_OICR_GRST_M | 1709 PFINT_OICR_GRST_M |
1696 PFINT_OICR_PCI_EXCEPTION_M | 1710 PFINT_OICR_PCI_EXCEPTION_M |
1697 PFINT_OICR_GPIO_M | 1711 PFINT_OICR_HMC_ERR_M |
1698 PFINT_OICR_STORM_DETECT_M | 1712 PFINT_OICR_PE_CRITERR_M);
1699 PFINT_OICR_HMC_ERR_M);
1700 1713
1701 wr32(hw, PFINT_OICR_ENA, val); 1714 wr32(hw, PFINT_OICR_ENA, val);
1702 1715
@@ -2058,15 +2071,13 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
2058skip_req_irq: 2071skip_req_irq:
2059 ice_ena_misc_vector(pf); 2072 ice_ena_misc_vector(pf);
2060 2073
2061 val = (pf->oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) | 2074 val = ((pf->oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
2062 (ICE_RX_ITR & PFINT_OICR_CTL_ITR_INDX_M) | 2075 PFINT_OICR_CTL_CAUSE_ENA_M);
2063 PFINT_OICR_CTL_CAUSE_ENA_M;
2064 wr32(hw, PFINT_OICR_CTL, val); 2076 wr32(hw, PFINT_OICR_CTL, val);
2065 2077
2066 /* This enables Admin queue Interrupt causes */ 2078 /* This enables Admin queue Interrupt causes */
2067 val = (pf->oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) | 2079 val = ((pf->oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) |
2068 (ICE_RX_ITR & PFINT_FW_CTL_ITR_INDX_M) | 2080 PFINT_FW_CTL_CAUSE_ENA_M);
2069 PFINT_FW_CTL_CAUSE_ENA_M;
2070 wr32(hw, PFINT_FW_CTL, val); 2081 wr32(hw, PFINT_FW_CTL, val);
2071 2082
2072 itr_gran = hw->itr_gran_200; 2083 itr_gran = hw->itr_gran_200;
@@ -3246,8 +3257,10 @@ static void ice_clear_interrupt_scheme(struct ice_pf *pf)
3246 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) 3257 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
3247 ice_dis_msix(pf); 3258 ice_dis_msix(pf);
3248 3259
3249 devm_kfree(&pf->pdev->dev, pf->irq_tracker); 3260 if (pf->irq_tracker) {
3250 pf->irq_tracker = NULL; 3261 devm_kfree(&pf->pdev->dev, pf->irq_tracker);
3262 pf->irq_tracker = NULL;
3263 }
3251} 3264}
3252 3265
3253/** 3266/**
@@ -3271,7 +3284,7 @@ static int ice_probe(struct pci_dev *pdev,
3271 3284
3272 err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev)); 3285 err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev));
3273 if (err) { 3286 if (err) {
3274 dev_err(&pdev->dev, "I/O map error %d\n", err); 3287 dev_err(&pdev->dev, "BAR0 I/O map error %d\n", err);
3275 return err; 3288 return err;
3276 } 3289 }
3277 3290
@@ -3720,10 +3733,10 @@ static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
3720 enum ice_status status; 3733 enum ice_status status;
3721 3734
3722 /* Here we are configuring the VSI to let the driver add VLAN tags by 3735 /* Here we are configuring the VSI to let the driver add VLAN tags by
3723 * setting port_vlan_flags to ICE_AQ_VSI_PVLAN_MODE_ALL. The actual VLAN 3736 * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag
3724 * tag insertion happens in the Tx hot path, in ice_tx_map. 3737 * insertion happens in the Tx hot path, in ice_tx_map.
3725 */ 3738 */
3726 ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_MODE_ALL; 3739 ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
3727 3740
3728 ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); 3741 ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
3729 ctxt.vsi_num = vsi->vsi_num; 3742 ctxt.vsi_num = vsi->vsi_num;
@@ -3735,7 +3748,7 @@ static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
3735 return -EIO; 3748 return -EIO;
3736 } 3749 }
3737 3750
3738 vsi->info.port_vlan_flags = ctxt.info.port_vlan_flags; 3751 vsi->info.vlan_flags = ctxt.info.vlan_flags;
3739 return 0; 3752 return 0;
3740} 3753}
3741 3754
@@ -3757,12 +3770,15 @@ static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
3757 */ 3770 */
3758 if (ena) { 3771 if (ena) {
3759 /* Strip VLAN tag from Rx packet and put it in the desc */ 3772 /* Strip VLAN tag from Rx packet and put it in the desc */
3760 ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH; 3773 ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
3761 } else { 3774 } else {
3762 /* Disable stripping. Leave tag in packet */ 3775 /* Disable stripping. Leave tag in packet */
3763 ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_EMOD_NOTHING; 3776 ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
3764 } 3777 }
3765 3778
3779 /* Allow all packets untagged/tagged */
3780 ctxt.info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
3781
3766 ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); 3782 ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
3767 ctxt.vsi_num = vsi->vsi_num; 3783 ctxt.vsi_num = vsi->vsi_num;
3768 3784
@@ -3773,7 +3789,7 @@ static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
3773 return -EIO; 3789 return -EIO;
3774 } 3790 }
3775 3791
3776 vsi->info.port_vlan_flags = ctxt.info.port_vlan_flags; 3792 vsi->info.vlan_flags = ctxt.info.vlan_flags;
3777 return 0; 3793 return 0;
3778} 3794}
3779 3795
@@ -3986,7 +4002,7 @@ static int ice_setup_rx_ctx(struct ice_ring *ring)
3986 /* clear the context structure first */ 4002 /* clear the context structure first */
3987 memset(&rlan_ctx, 0, sizeof(rlan_ctx)); 4003 memset(&rlan_ctx, 0, sizeof(rlan_ctx));
3988 4004
3989 rlan_ctx.base = ring->dma >> 7; 4005 rlan_ctx.base = ring->dma >> ICE_RLAN_BASE_S;
3990 4006
3991 rlan_ctx.qlen = ring->count; 4007 rlan_ctx.qlen = ring->count;
3992 4008
@@ -4098,11 +4114,12 @@ static int ice_vsi_cfg(struct ice_vsi *vsi)
4098{ 4114{
4099 int err; 4115 int err;
4100 4116
4101 ice_set_rx_mode(vsi->netdev); 4117 if (vsi->netdev) {
4102 4118 ice_set_rx_mode(vsi->netdev);
4103 err = ice_restore_vlan(vsi); 4119 err = ice_restore_vlan(vsi);
4104 if (err) 4120 if (err)
4105 return err; 4121 return err;
4122 }
4106 4123
4107 err = ice_vsi_cfg_txqs(vsi); 4124 err = ice_vsi_cfg_txqs(vsi);
4108 if (!err) 4125 if (!err)
@@ -4868,7 +4885,7 @@ int ice_down(struct ice_vsi *vsi)
4868 */ 4885 */
4869static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) 4886static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
4870{ 4887{
4871 int i, err; 4888 int i, err = 0;
4872 4889
4873 if (!vsi->num_txq) { 4890 if (!vsi->num_txq) {
4874 dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Tx queues\n", 4891 dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Tx queues\n",
@@ -4893,7 +4910,7 @@ static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
4893 */ 4910 */
4894static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) 4911static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
4895{ 4912{
4896 int i, err; 4913 int i, err = 0;
4897 4914
4898 if (!vsi->num_rxq) { 4915 if (!vsi->num_rxq) {
4899 dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Rx queues\n", 4916 dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Rx queues\n",
@@ -5235,7 +5252,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
5235 u8 count = 0; 5252 u8 count = 0;
5236 5253
5237 if (new_mtu == netdev->mtu) { 5254 if (new_mtu == netdev->mtu) {
5238 netdev_warn(netdev, "mtu is already %d\n", netdev->mtu); 5255 netdev_warn(netdev, "mtu is already %u\n", netdev->mtu);
5239 return 0; 5256 return 0;
5240 } 5257 }
5241 5258