diff options
author | Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> | 2018-09-19 20:23:06 -0400 |
---|---|---|
committer | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2018-10-01 15:48:58 -0400 |
commit | 5153a18e57ff3f7ef8bc76d31a968116e7f1963d (patch) | |
tree | 2ef36e1c898f4fbb5522b2c875f22c5483e2265e /drivers/net/ethernet/intel/ice/ice_lib.c | |
parent | 72adf2421d9bb25b08f79f3148892af1fe96ef8b (diff) |
ice: Move common functions out of ice_main.c part 3/7
This patch continues the code move out of ice_main.c
The following top level functions (and related dependency functions) were
moved to ice_lib.c:
ice_vsi_delete
ice_free_res
ice_get_res
ice_is_reset_recovery_pending
ice_vsi_put_qs
ice_vsi_dis_irq
ice_vsi_free_irq
ice_vsi_free_rx_rings
ice_vsi_free_tx_rings
ice_msix_clean_rings
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_lib.c')
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_lib.c | 388 |
1 files changed, 388 insertions, 0 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 06a54d79fba8..474ce5828bd4 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c | |||
@@ -226,6 +226,67 @@ static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena) | |||
226 | } | 226 | } |
227 | 227 | ||
228 | /** | 228 | /** |
229 | * ice_vsi_delete - delete a VSI from the switch | ||
230 | * @vsi: pointer to VSI being removed | ||
231 | */ | ||
232 | void ice_vsi_delete(struct ice_vsi *vsi) | ||
233 | { | ||
234 | struct ice_pf *pf = vsi->back; | ||
235 | struct ice_vsi_ctx ctxt; | ||
236 | enum ice_status status; | ||
237 | |||
238 | ctxt.vsi_num = vsi->vsi_num; | ||
239 | |||
240 | memcpy(&ctxt.info, &vsi->info, sizeof(struct ice_aqc_vsi_props)); | ||
241 | |||
242 | status = ice_free_vsi(&pf->hw, vsi->idx, &ctxt, false, NULL); | ||
243 | if (status) | ||
244 | dev_err(&pf->pdev->dev, "Failed to delete VSI %i in FW\n", | ||
245 | vsi->vsi_num); | ||
246 | } | ||
247 | |||
248 | /** | ||
249 | * ice_msix_clean_rings - MSIX mode Interrupt Handler | ||
250 | * @irq: interrupt number | ||
251 | * @data: pointer to a q_vector | ||
252 | */ | ||
253 | irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data) | ||
254 | { | ||
255 | struct ice_q_vector *q_vector = (struct ice_q_vector *)data; | ||
256 | |||
257 | if (!q_vector->tx.ring && !q_vector->rx.ring) | ||
258 | return IRQ_HANDLED; | ||
259 | |||
260 | napi_schedule(&q_vector->napi); | ||
261 | |||
262 | return IRQ_HANDLED; | ||
263 | } | ||
264 | |||
265 | /** | ||
266 | * ice_vsi_put_qs - Release queues from VSI to PF | ||
267 | * @vsi: the VSI that is going to release queues | ||
268 | */ | ||
269 | void ice_vsi_put_qs(struct ice_vsi *vsi) | ||
270 | { | ||
271 | struct ice_pf *pf = vsi->back; | ||
272 | int i; | ||
273 | |||
274 | mutex_lock(&pf->avail_q_mutex); | ||
275 | |||
276 | for (i = 0; i < vsi->alloc_txq; i++) { | ||
277 | clear_bit(vsi->txq_map[i], pf->avail_txqs); | ||
278 | vsi->txq_map[i] = ICE_INVAL_Q_INDEX; | ||
279 | } | ||
280 | |||
281 | for (i = 0; i < vsi->alloc_rxq; i++) { | ||
282 | clear_bit(vsi->rxq_map[i], pf->avail_rxqs); | ||
283 | vsi->rxq_map[i] = ICE_INVAL_Q_INDEX; | ||
284 | } | ||
285 | |||
286 | mutex_unlock(&pf->avail_q_mutex); | ||
287 | } | ||
288 | |||
289 | /** | ||
229 | * ice_add_mac_to_list - Add a mac address filter entry to the list | 290 | * ice_add_mac_to_list - Add a mac address filter entry to the list |
230 | * @vsi: the VSI to be forwarded to | 291 | * @vsi: the VSI to be forwarded to |
231 | * @add_list: pointer to the list which contains MAC filter entries | 292 | * @add_list: pointer to the list which contains MAC filter entries |
@@ -747,3 +808,330 @@ err_alloc_q_ids: | |||
747 | 808 | ||
748 | return err; | 809 | return err; |
749 | } | 810 | } |
811 | |||
812 | /** | ||
813 | * ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI | ||
814 | * @vsi: VSI to enable or disable VLAN pruning on | ||
815 | * @ena: set to true to enable VLAN pruning and false to disable it | ||
816 | * | ||
817 | * returns 0 if VSI is updated, negative otherwise | ||
818 | */ | ||
819 | int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena) | ||
820 | { | ||
821 | struct ice_vsi_ctx *ctxt; | ||
822 | struct device *dev; | ||
823 | int status; | ||
824 | |||
825 | if (!vsi) | ||
826 | return -EINVAL; | ||
827 | |||
828 | dev = &vsi->back->pdev->dev; | ||
829 | ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL); | ||
830 | if (!ctxt) | ||
831 | return -ENOMEM; | ||
832 | |||
833 | ctxt->info = vsi->info; | ||
834 | |||
835 | if (ena) { | ||
836 | ctxt->info.sec_flags |= | ||
837 | ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << | ||
838 | ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S; | ||
839 | ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; | ||
840 | } else { | ||
841 | ctxt->info.sec_flags &= | ||
842 | ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << | ||
843 | ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); | ||
844 | ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; | ||
845 | } | ||
846 | |||
847 | ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID | | ||
848 | ICE_AQ_VSI_PROP_SW_VALID); | ||
849 | ctxt->vsi_num = vsi->vsi_num; | ||
850 | status = ice_aq_update_vsi(&vsi->back->hw, ctxt, NULL); | ||
851 | if (status) { | ||
852 | netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI %d failed, err = %d, aq_err = %d\n", | ||
853 | ena ? "Ena" : "Dis", vsi->vsi_num, status, | ||
854 | vsi->back->hw.adminq.sq_last_status); | ||
855 | goto err_out; | ||
856 | } | ||
857 | |||
858 | vsi->info.sec_flags = ctxt->info.sec_flags; | ||
859 | vsi->info.sw_flags2 = ctxt->info.sw_flags2; | ||
860 | |||
861 | devm_kfree(dev, ctxt); | ||
862 | return 0; | ||
863 | |||
864 | err_out: | ||
865 | devm_kfree(dev, ctxt); | ||
866 | return -EIO; | ||
867 | } | ||
868 | |||
869 | /** | ||
870 | * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW | ||
871 | * @vsi: the VSI being cleaned up | ||
872 | */ | ||
873 | static void ice_vsi_release_msix(struct ice_vsi *vsi) | ||
874 | { | ||
875 | struct ice_pf *pf = vsi->back; | ||
876 | u16 vector = vsi->base_vector; | ||
877 | struct ice_hw *hw = &pf->hw; | ||
878 | u32 txq = 0; | ||
879 | u32 rxq = 0; | ||
880 | int i, q; | ||
881 | |||
882 | for (i = 0; i < vsi->num_q_vectors; i++, vector++) { | ||
883 | struct ice_q_vector *q_vector = vsi->q_vectors[i]; | ||
884 | |||
885 | wr32(hw, GLINT_ITR(ICE_RX_ITR, vector), 0); | ||
886 | wr32(hw, GLINT_ITR(ICE_TX_ITR, vector), 0); | ||
887 | for (q = 0; q < q_vector->num_ring_tx; q++) { | ||
888 | wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0); | ||
889 | txq++; | ||
890 | } | ||
891 | |||
892 | for (q = 0; q < q_vector->num_ring_rx; q++) { | ||
893 | wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0); | ||
894 | rxq++; | ||
895 | } | ||
896 | } | ||
897 | |||
898 | ice_flush(hw); | ||
899 | } | ||
900 | |||
901 | /** | ||
902 | * ice_vsi_free_irq - Free the IRQ association with the OS | ||
903 | * @vsi: the VSI being configured | ||
904 | */ | ||
905 | void ice_vsi_free_irq(struct ice_vsi *vsi) | ||
906 | { | ||
907 | struct ice_pf *pf = vsi->back; | ||
908 | int base = vsi->base_vector; | ||
909 | |||
910 | if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { | ||
911 | int i; | ||
912 | |||
913 | if (!vsi->q_vectors || !vsi->irqs_ready) | ||
914 | return; | ||
915 | |||
916 | vsi->irqs_ready = false; | ||
917 | for (i = 0; i < vsi->num_q_vectors; i++) { | ||
918 | u16 vector = i + base; | ||
919 | int irq_num; | ||
920 | |||
921 | irq_num = pf->msix_entries[vector].vector; | ||
922 | |||
923 | /* free only the irqs that were actually requested */ | ||
924 | if (!vsi->q_vectors[i] || | ||
925 | !(vsi->q_vectors[i]->num_ring_tx || | ||
926 | vsi->q_vectors[i]->num_ring_rx)) | ||
927 | continue; | ||
928 | |||
929 | /* clear the affinity notifier in the IRQ descriptor */ | ||
930 | irq_set_affinity_notifier(irq_num, NULL); | ||
931 | |||
932 | /* clear the affinity_mask in the IRQ descriptor */ | ||
933 | irq_set_affinity_hint(irq_num, NULL); | ||
934 | synchronize_irq(irq_num); | ||
935 | devm_free_irq(&pf->pdev->dev, irq_num, | ||
936 | vsi->q_vectors[i]); | ||
937 | } | ||
938 | ice_vsi_release_msix(vsi); | ||
939 | } | ||
940 | } | ||
941 | |||
942 | /** | ||
943 | * ice_vsi_free_tx_rings - Free Tx resources for VSI queues | ||
944 | * @vsi: the VSI having resources freed | ||
945 | */ | ||
946 | void ice_vsi_free_tx_rings(struct ice_vsi *vsi) | ||
947 | { | ||
948 | int i; | ||
949 | |||
950 | if (!vsi->tx_rings) | ||
951 | return; | ||
952 | |||
953 | ice_for_each_txq(vsi, i) | ||
954 | if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) | ||
955 | ice_free_tx_ring(vsi->tx_rings[i]); | ||
956 | } | ||
957 | |||
958 | /** | ||
959 | * ice_vsi_free_rx_rings - Free Rx resources for VSI queues | ||
960 | * @vsi: the VSI having resources freed | ||
961 | */ | ||
962 | void ice_vsi_free_rx_rings(struct ice_vsi *vsi) | ||
963 | { | ||
964 | int i; | ||
965 | |||
966 | if (!vsi->rx_rings) | ||
967 | return; | ||
968 | |||
969 | ice_for_each_rxq(vsi, i) | ||
970 | if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) | ||
971 | ice_free_rx_ring(vsi->rx_rings[i]); | ||
972 | } | ||
973 | |||
974 | /** | ||
975 | * ice_free_res - free a block of resources | ||
976 | * @res: pointer to the resource | ||
977 | * @index: starting index previously returned by ice_get_res | ||
978 | * @id: identifier to track owner | ||
979 | * | ||
980 | * Returns number of resources freed | ||
981 | */ | ||
982 | int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id) | ||
983 | { | ||
984 | int count = 0; | ||
985 | int i; | ||
986 | |||
987 | if (!res || index >= res->num_entries) | ||
988 | return -EINVAL; | ||
989 | |||
990 | id |= ICE_RES_VALID_BIT; | ||
991 | for (i = index; i < res->num_entries && res->list[i] == id; i++) { | ||
992 | res->list[i] = 0; | ||
993 | count++; | ||
994 | } | ||
995 | |||
996 | return count; | ||
997 | } | ||
998 | |||
999 | /** | ||
1000 | * ice_search_res - Search the tracker for a block of resources | ||
1001 | * @res: pointer to the resource | ||
1002 | * @needed: size of the block needed | ||
1003 | * @id: identifier to track owner | ||
1004 | * | ||
1005 | * Returns the base item index of the block, or -ENOMEM for error | ||
1006 | */ | ||
1007 | static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id) | ||
1008 | { | ||
1009 | int start = res->search_hint; | ||
1010 | int end = start; | ||
1011 | |||
1012 | id |= ICE_RES_VALID_BIT; | ||
1013 | |||
1014 | do { | ||
1015 | /* skip already allocated entries */ | ||
1016 | if (res->list[end++] & ICE_RES_VALID_BIT) { | ||
1017 | start = end; | ||
1018 | if ((start + needed) > res->num_entries) | ||
1019 | break; | ||
1020 | } | ||
1021 | |||
1022 | if (end == (start + needed)) { | ||
1023 | int i = start; | ||
1024 | |||
1025 | /* there was enough, so assign it to the requestor */ | ||
1026 | while (i != end) | ||
1027 | res->list[i++] = id; | ||
1028 | |||
1029 | if (end == res->num_entries) | ||
1030 | end = 0; | ||
1031 | |||
1032 | res->search_hint = end; | ||
1033 | return start; | ||
1034 | } | ||
1035 | } while (1); | ||
1036 | |||
1037 | return -ENOMEM; | ||
1038 | } | ||
1039 | |||
1040 | /** | ||
1041 | * ice_get_res - get a block of resources | ||
1042 | * @pf: board private structure | ||
1043 | * @res: pointer to the resource | ||
1044 | * @needed: size of the block needed | ||
1045 | * @id: identifier to track owner | ||
1046 | * | ||
1047 | * Returns the base item index of the block, or -ENOMEM for error | ||
1048 | * The search_hint trick and lack of advanced fit-finding only works | ||
1049 | * because we're highly likely to have all the same sized requests. | ||
1050 | * Linear search time and any fragmentation should be minimal. | ||
1051 | */ | ||
1052 | int | ||
1053 | ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id) | ||
1054 | { | ||
1055 | int ret; | ||
1056 | |||
1057 | if (!res || !pf) | ||
1058 | return -EINVAL; | ||
1059 | |||
1060 | if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) { | ||
1061 | dev_err(&pf->pdev->dev, | ||
1062 | "param err: needed=%d, num_entries = %d id=0x%04x\n", | ||
1063 | needed, res->num_entries, id); | ||
1064 | return -EINVAL; | ||
1065 | } | ||
1066 | |||
1067 | /* search based on search_hint */ | ||
1068 | ret = ice_search_res(res, needed, id); | ||
1069 | |||
1070 | if (ret < 0) { | ||
1071 | /* previous search failed. Reset search hint and try again */ | ||
1072 | res->search_hint = 0; | ||
1073 | ret = ice_search_res(res, needed, id); | ||
1074 | } | ||
1075 | |||
1076 | return ret; | ||
1077 | } | ||
1078 | |||
1079 | /** | ||
1080 | * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI | ||
1081 | * @vsi: the VSI being un-configured | ||
1082 | */ | ||
1083 | void ice_vsi_dis_irq(struct ice_vsi *vsi) | ||
1084 | { | ||
1085 | struct ice_pf *pf = vsi->back; | ||
1086 | struct ice_hw *hw = &pf->hw; | ||
1087 | int base = vsi->base_vector; | ||
1088 | u32 val; | ||
1089 | int i; | ||
1090 | |||
1091 | /* disable interrupt causation from each queue */ | ||
1092 | if (vsi->tx_rings) { | ||
1093 | ice_for_each_txq(vsi, i) { | ||
1094 | if (vsi->tx_rings[i]) { | ||
1095 | u16 reg; | ||
1096 | |||
1097 | reg = vsi->tx_rings[i]->reg_idx; | ||
1098 | val = rd32(hw, QINT_TQCTL(reg)); | ||
1099 | val &= ~QINT_TQCTL_CAUSE_ENA_M; | ||
1100 | wr32(hw, QINT_TQCTL(reg), val); | ||
1101 | } | ||
1102 | } | ||
1103 | } | ||
1104 | |||
1105 | if (vsi->rx_rings) { | ||
1106 | ice_for_each_rxq(vsi, i) { | ||
1107 | if (vsi->rx_rings[i]) { | ||
1108 | u16 reg; | ||
1109 | |||
1110 | reg = vsi->rx_rings[i]->reg_idx; | ||
1111 | val = rd32(hw, QINT_RQCTL(reg)); | ||
1112 | val &= ~QINT_RQCTL_CAUSE_ENA_M; | ||
1113 | wr32(hw, QINT_RQCTL(reg), val); | ||
1114 | } | ||
1115 | } | ||
1116 | } | ||
1117 | |||
1118 | /* disable each interrupt */ | ||
1119 | if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { | ||
1120 | for (i = vsi->base_vector; | ||
1121 | i < (vsi->num_q_vectors + vsi->base_vector); i++) | ||
1122 | wr32(hw, GLINT_DYN_CTL(i), 0); | ||
1123 | |||
1124 | ice_flush(hw); | ||
1125 | for (i = 0; i < vsi->num_q_vectors; i++) | ||
1126 | synchronize_irq(pf->msix_entries[i + base].vector); | ||
1127 | } | ||
1128 | } | ||
1129 | |||
1130 | /** | ||
1131 | * ice_is_reset_recovery_pending - schedule a reset | ||
1132 | * @state: pf state field | ||
1133 | */ | ||
1134 | bool ice_is_reset_recovery_pending(unsigned long *state) | ||
1135 | { | ||
1136 | return test_bit(__ICE_RESET_RECOVERY_PENDING, state); | ||
1137 | } | ||