aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/ice/ice_sched.c
diff options
context:
space:
mode:
authorAnirudh Venkataramanan <anirudh.venkataramanan@intel.com>2018-08-09 09:29:45 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2018-08-28 12:58:13 -0400
commitb36c598c999c628130f6743dc2362585360de65c (patch)
tree562b2258e24c735abc56961a9effe3a7cc5254f5 /drivers/net/ethernet/intel/ice/ice_sched.c
parent22ef683b48182f4d6125a2fb2725eb8a141514ff (diff)
ice: Updates to Tx scheduler code
1) The maximum device nodes is a global value and shared by the whole device. Add element AQ command would fail if there is no space to add new nodes so the check for max nodes isn't required. So remove ice_sched_get_num_nodes_per_layer and ice_sched_val_max_nodes. 2) In ice_sched_add_elems, set default node's CIR/EIR bandwidth weight. 3) Fix default scheduler topology buffer size as the firmware expects a 4KB buffer at all times, and will error out if one of any other size is provided. 4) In the latest spec, max children per node per layer is replaced by max sibling group size. Now it provides the max children of the below layer node, not the current layer node. 5) Fix some newline/whitespace issues for consistency. Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_sched.c')
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c161
1 files changed, 50 insertions, 111 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index eeae199469b6..9b7b50554952 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -17,7 +17,6 @@ ice_sched_add_root_node(struct ice_port_info *pi,
17{ 17{
18 struct ice_sched_node *root; 18 struct ice_sched_node *root;
19 struct ice_hw *hw; 19 struct ice_hw *hw;
20 u16 max_children;
21 20
22 if (!pi) 21 if (!pi)
23 return ICE_ERR_PARAM; 22 return ICE_ERR_PARAM;
@@ -28,8 +27,8 @@ ice_sched_add_root_node(struct ice_port_info *pi,
28 if (!root) 27 if (!root)
29 return ICE_ERR_NO_MEMORY; 28 return ICE_ERR_NO_MEMORY;
30 29
31 max_children = le16_to_cpu(hw->layer_info[0].max_children); 30 /* coverity[suspicious_sizeof] */
32 root->children = devm_kcalloc(ice_hw_to_dev(hw), max_children, 31 root->children = devm_kcalloc(ice_hw_to_dev(hw), hw->max_children[0],
33 sizeof(*root), GFP_KERNEL); 32 sizeof(*root), GFP_KERNEL);
34 if (!root->children) { 33 if (!root->children) {
35 devm_kfree(ice_hw_to_dev(hw), root); 34 devm_kfree(ice_hw_to_dev(hw), root);
@@ -100,7 +99,6 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
100 struct ice_sched_node *parent; 99 struct ice_sched_node *parent;
101 struct ice_sched_node *node; 100 struct ice_sched_node *node;
102 struct ice_hw *hw; 101 struct ice_hw *hw;
103 u16 max_children;
104 102
105 if (!pi) 103 if (!pi)
106 return ICE_ERR_PARAM; 104 return ICE_ERR_PARAM;
@@ -120,9 +118,10 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
120 node = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*node), GFP_KERNEL); 118 node = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*node), GFP_KERNEL);
121 if (!node) 119 if (!node)
122 return ICE_ERR_NO_MEMORY; 120 return ICE_ERR_NO_MEMORY;
123 max_children = le16_to_cpu(hw->layer_info[layer].max_children); 121 if (hw->max_children[layer]) {
124 if (max_children) { 122 /* coverity[suspicious_sizeof] */
125 node->children = devm_kcalloc(ice_hw_to_dev(hw), max_children, 123 node->children = devm_kcalloc(ice_hw_to_dev(hw),
124 hw->max_children[layer],
126 sizeof(*node), GFP_KERNEL); 125 sizeof(*node), GFP_KERNEL);
127 if (!node->children) { 126 if (!node->children) {
128 devm_kfree(ice_hw_to_dev(hw), node); 127 devm_kfree(ice_hw_to_dev(hw), node);
@@ -192,14 +191,17 @@ ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
192 buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL); 191 buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
193 if (!buf) 192 if (!buf)
194 return ICE_ERR_NO_MEMORY; 193 return ICE_ERR_NO_MEMORY;
194
195 buf->hdr.parent_teid = parent->info.node_teid; 195 buf->hdr.parent_teid = parent->info.node_teid;
196 buf->hdr.num_elems = cpu_to_le16(num_nodes); 196 buf->hdr.num_elems = cpu_to_le16(num_nodes);
197 for (i = 0; i < num_nodes; i++) 197 for (i = 0; i < num_nodes; i++)
198 buf->teid[i] = cpu_to_le32(node_teids[i]); 198 buf->teid[i] = cpu_to_le32(node_teids[i]);
199
199 status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size, 200 status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size,
200 &num_groups_removed, NULL); 201 &num_groups_removed, NULL);
201 if (status || num_groups_removed != 1) 202 if (status || num_groups_removed != 1)
202 ice_debug(hw, ICE_DBG_SCHED, "remove elements failed\n"); 203 ice_debug(hw, ICE_DBG_SCHED, "remove elements failed\n");
204
203 devm_kfree(ice_hw_to_dev(hw), buf); 205 devm_kfree(ice_hw_to_dev(hw), buf);
204 return status; 206 return status;
205} 207}
@@ -592,13 +594,16 @@ static void ice_sched_clear_port(struct ice_port_info *pi)
592 */ 594 */
593void ice_sched_cleanup_all(struct ice_hw *hw) 595void ice_sched_cleanup_all(struct ice_hw *hw)
594{ 596{
595 if (!hw || !hw->port_info) 597 if (!hw)
596 return; 598 return;
597 599
598 if (hw->layer_info) 600 if (hw->layer_info) {
599 devm_kfree(ice_hw_to_dev(hw), hw->layer_info); 601 devm_kfree(ice_hw_to_dev(hw), hw->layer_info);
602 hw->layer_info = NULL;
603 }
600 604
601 ice_sched_clear_port(hw->port_info); 605 if (hw->port_info)
606 ice_sched_clear_port(hw->port_info);
602 607
603 hw->num_tx_sched_layers = 0; 608 hw->num_tx_sched_layers = 0;
604 hw->num_tx_sched_phys_layers = 0; 609 hw->num_tx_sched_phys_layers = 0;
@@ -671,9 +676,13 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
671 ICE_AQC_ELEM_VALID_EIR; 676 ICE_AQC_ELEM_VALID_EIR;
672 buf->generic[i].data.generic = 0; 677 buf->generic[i].data.generic = 0;
673 buf->generic[i].data.cir_bw.bw_profile_idx = 678 buf->generic[i].data.cir_bw.bw_profile_idx =
674 ICE_SCHED_DFLT_RL_PROF_ID; 679 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
680 buf->generic[i].data.cir_bw.bw_alloc =
681 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
675 buf->generic[i].data.eir_bw.bw_profile_idx = 682 buf->generic[i].data.eir_bw.bw_profile_idx =
676 ICE_SCHED_DFLT_RL_PROF_ID; 683 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
684 buf->generic[i].data.eir_bw.bw_alloc =
685 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
677 } 686 }
678 687
679 status = ice_aq_add_sched_elems(hw, 1, buf, buf_size, 688 status = ice_aq_add_sched_elems(hw, 1, buf, buf_size,
@@ -697,7 +706,6 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
697 706
698 teid = le32_to_cpu(buf->generic[i].node_teid); 707 teid = le32_to_cpu(buf->generic[i].node_teid);
699 new_node = ice_sched_find_node_by_teid(parent, teid); 708 new_node = ice_sched_find_node_by_teid(parent, teid);
700
701 if (!new_node) { 709 if (!new_node) {
702 ice_debug(hw, ICE_DBG_SCHED, 710 ice_debug(hw, ICE_DBG_SCHED,
703 "Node is missing for teid =%d\n", teid); 711 "Node is missing for teid =%d\n", teid);
@@ -710,7 +718,6 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
710 /* add it to previous node sibling pointer */ 718 /* add it to previous node sibling pointer */
711 /* Note: siblings are not linked across branches */ 719 /* Note: siblings are not linked across branches */
712 prev = ice_sched_get_first_node(hw, tc_node, layer); 720 prev = ice_sched_get_first_node(hw, tc_node, layer);
713
714 if (prev && prev != new_node) { 721 if (prev && prev != new_node) {
715 while (prev->sibling) 722 while (prev->sibling)
716 prev = prev->sibling; 723 prev = prev->sibling;
@@ -760,8 +767,7 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
760 return ICE_ERR_PARAM; 767 return ICE_ERR_PARAM;
761 768
762 /* max children per node per layer */ 769 /* max children per node per layer */
763 max_child_nodes = 770 max_child_nodes = hw->max_children[parent->tx_sched_layer];
764 le16_to_cpu(hw->layer_info[parent->tx_sched_layer].max_children);
765 771
766 /* current number of children + required nodes exceed max children ? */ 772 /* current number of children + required nodes exceed max children ? */
767 if ((parent->num_children + num_nodes) > max_child_nodes) { 773 if ((parent->num_children + num_nodes) > max_child_nodes) {
@@ -851,78 +857,6 @@ static u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
851} 857}
852 858
853/** 859/**
854 * ice_sched_get_num_nodes_per_layer - Get the total number of nodes per layer
855 * @pi: pointer to the port info struct
856 * @layer: layer number
857 *
858 * This function calculates the number of nodes present in the scheduler tree
859 * including all the branches for a given layer
860 */
861static u16
862ice_sched_get_num_nodes_per_layer(struct ice_port_info *pi, u8 layer)
863{
864 struct ice_hw *hw;
865 u16 num_nodes = 0;
866 u8 i;
867
868 if (!pi)
869 return num_nodes;
870
871 hw = pi->hw;
872
873 /* Calculate the number of nodes for all TCs */
874 for (i = 0; i < pi->root->num_children; i++) {
875 struct ice_sched_node *tc_node, *node;
876
877 tc_node = pi->root->children[i];
878
879 /* Get the first node */
880 node = ice_sched_get_first_node(hw, tc_node, layer);
881 if (!node)
882 continue;
883
884 /* count the siblings */
885 while (node) {
886 num_nodes++;
887 node = node->sibling;
888 }
889 }
890
891 return num_nodes;
892}
893
894/**
895 * ice_sched_val_max_nodes - check max number of nodes reached or not
896 * @pi: port information structure
897 * @new_num_nodes_per_layer: pointer to the new number of nodes array
898 *
899 * This function checks whether the scheduler tree layers have enough space to
900 * add new nodes
901 */
902static enum ice_status
903ice_sched_validate_for_max_nodes(struct ice_port_info *pi,
904 u16 *new_num_nodes_per_layer)
905{
906 struct ice_hw *hw = pi->hw;
907 u8 i, qg_layer;
908 u16 num_nodes;
909
910 qg_layer = ice_sched_get_qgrp_layer(hw);
911
912 /* walk through all the layers from SW entry point to qgroup layer */
913 for (i = hw->sw_entry_point_layer; i <= qg_layer; i++) {
914 num_nodes = ice_sched_get_num_nodes_per_layer(pi, i);
915 if (num_nodes + new_num_nodes_per_layer[i] >
916 le16_to_cpu(hw->layer_info[i].max_pf_nodes)) {
917 ice_debug(hw, ICE_DBG_SCHED,
918 "max nodes reached for layer = %d\n", i);
919 return ICE_ERR_CFG;
920 }
921 }
922 return 0;
923}
924
925/**
926 * ice_rm_dflt_leaf_node - remove the default leaf node in the tree 860 * ice_rm_dflt_leaf_node - remove the default leaf node in the tree
927 * @pi: port information structure 861 * @pi: port information structure
928 * 862 *
@@ -1003,14 +937,12 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi)
1003 hw = pi->hw; 937 hw = pi->hw;
1004 938
1005 /* Query the Default Topology from FW */ 939 /* Query the Default Topology from FW */
1006 buf = devm_kcalloc(ice_hw_to_dev(hw), ICE_TXSCHED_MAX_BRANCHES, 940 buf = devm_kzalloc(ice_hw_to_dev(hw), ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
1007 sizeof(*buf), GFP_KERNEL);
1008 if (!buf) 941 if (!buf)
1009 return ICE_ERR_NO_MEMORY; 942 return ICE_ERR_NO_MEMORY;
1010 943
1011 /* Query default scheduling tree topology */ 944 /* Query default scheduling tree topology */
1012 status = ice_aq_get_dflt_topo(hw, pi->lport, buf, 945 status = ice_aq_get_dflt_topo(hw, pi->lport, buf, ICE_AQ_MAX_BUF_LEN,
1013 sizeof(*buf) * ICE_TXSCHED_MAX_BRANCHES,
1014 &num_branches, NULL); 946 &num_branches, NULL);
1015 if (status) 947 if (status)
1016 goto err_init_port; 948 goto err_init_port;
@@ -1097,6 +1029,8 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
1097{ 1029{
1098 struct ice_aqc_query_txsched_res_resp *buf; 1030 struct ice_aqc_query_txsched_res_resp *buf;
1099 enum ice_status status = 0; 1031 enum ice_status status = 0;
1032 __le16 max_sibl;
1033 u8 i;
1100 1034
1101 if (hw->layer_info) 1035 if (hw->layer_info)
1102 return status; 1036 return status;
@@ -1115,7 +1049,20 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
1115 hw->flattened_layers = buf->sched_props.flattening_bitmap; 1049 hw->flattened_layers = buf->sched_props.flattening_bitmap;
1116 hw->max_cgds = buf->sched_props.max_pf_cgds; 1050 hw->max_cgds = buf->sched_props.max_pf_cgds;
1117 1051
1118 hw->layer_info = devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props, 1052 /* max sibling group size of current layer refers to the max children
1053 * of the below layer node.
1054 * layer 1 node max children will be layer 2 max sibling group size
1055 * layer 2 node max children will be layer 3 max sibling group size
1056 * and so on. This array will be populated from root (index 0) to
1057 * qgroup layer 7. Leaf node has no children.
1058 */
1059 for (i = 0; i < hw->num_tx_sched_layers; i++) {
1060 max_sibl = buf->layer_props[i].max_sibl_grp_sz;
1061 hw->max_children[i] = le16_to_cpu(max_sibl);
1062 }
1063
1064 hw->layer_info = (struct ice_aqc_layer_props *)
1065 devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props,
1119 (hw->num_tx_sched_layers * 1066 (hw->num_tx_sched_layers *
1120 sizeof(*hw->layer_info)), 1067 sizeof(*hw->layer_info)),
1121 GFP_KERNEL); 1068 GFP_KERNEL);
@@ -1202,7 +1149,7 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_id, u8 tc,
1202 u8 qgrp_layer; 1149 u8 qgrp_layer;
1203 1150
1204 qgrp_layer = ice_sched_get_qgrp_layer(pi->hw); 1151 qgrp_layer = ice_sched_get_qgrp_layer(pi->hw);
1205 max_children = le16_to_cpu(pi->hw->layer_info[qgrp_layer].max_children); 1152 max_children = pi->hw->max_children[qgrp_layer];
1206 1153
1207 list_elem = ice_sched_get_vsi_info_entry(pi, vsi_id); 1154 list_elem = ice_sched_get_vsi_info_entry(pi, vsi_id);
1208 if (!list_elem) 1155 if (!list_elem)
@@ -1278,10 +1225,8 @@ ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
1278 1225
1279 /* calculate num nodes from q group to VSI layer */ 1226 /* calculate num nodes from q group to VSI layer */
1280 for (i = qgl; i > vsil; i--) { 1227 for (i = qgl; i > vsil; i--) {
1281 u16 max_children = le16_to_cpu(hw->layer_info[i].max_children);
1282
1283 /* round to the next integer if there is a remainder */ 1228 /* round to the next integer if there is a remainder */
1284 num = DIV_ROUND_UP(num, max_children); 1229 num = DIV_ROUND_UP(num, hw->max_children[i]);
1285 1230
1286 /* need at least one node */ 1231 /* need at least one node */
1287 num_nodes[i] = num ? num : 1; 1232 num_nodes[i] = num ? num : 1;
@@ -1311,16 +1256,13 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id,
1311 u16 num_added = 0; 1256 u16 num_added = 0;
1312 u8 i, qgl, vsil; 1257 u8 i, qgl, vsil;
1313 1258
1314 status = ice_sched_validate_for_max_nodes(pi, num_nodes);
1315 if (status)
1316 return status;
1317
1318 qgl = ice_sched_get_qgrp_layer(hw); 1259 qgl = ice_sched_get_qgrp_layer(hw);
1319 vsil = ice_sched_get_vsi_layer(hw); 1260 vsil = ice_sched_get_vsi_layer(hw);
1320 parent = ice_sched_get_vsi_node(hw, tc_node, vsi_id); 1261 parent = ice_sched_get_vsi_node(hw, tc_node, vsi_id);
1321 for (i = vsil + 1; i <= qgl; i++) { 1262 for (i = vsil + 1; i <= qgl; i++) {
1322 if (!parent) 1263 if (!parent)
1323 return ICE_ERR_CFG; 1264 return ICE_ERR_CFG;
1265
1324 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i, 1266 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
1325 num_nodes[i], 1267 num_nodes[i],
1326 &first_node_teid, 1268 &first_node_teid,
@@ -1398,8 +1340,8 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,
1398 struct ice_sched_node *tc_node, u16 *num_nodes) 1340 struct ice_sched_node *tc_node, u16 *num_nodes)
1399{ 1341{
1400 struct ice_sched_node *node; 1342 struct ice_sched_node *node;
1401 u16 max_child; 1343 u8 vsil;
1402 u8 i, vsil; 1344 int i;
1403 1345
1404 vsil = ice_sched_get_vsi_layer(hw); 1346 vsil = ice_sched_get_vsi_layer(hw);
1405 for (i = vsil; i >= hw->sw_entry_point_layer; i--) 1347 for (i = vsil; i >= hw->sw_entry_point_layer; i--)
@@ -1412,12 +1354,10 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,
1412 /* If intermediate nodes are reached max children 1354 /* If intermediate nodes are reached max children
1413 * then add a new one. 1355 * then add a new one.
1414 */ 1356 */
1415 node = ice_sched_get_first_node(hw, tc_node, i); 1357 node = ice_sched_get_first_node(hw, tc_node, (u8)i);
1416 max_child = le16_to_cpu(hw->layer_info[i].max_children);
1417
1418 /* scan all the siblings */ 1358 /* scan all the siblings */
1419 while (node) { 1359 while (node) {
1420 if (node->num_children < max_child) 1360 if (node->num_children < hw->max_children[i])
1421 break; 1361 break;
1422 node = node->sibling; 1362 node = node->sibling;
1423 } 1363 }
@@ -1451,10 +1391,6 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_id,
1451 if (!pi) 1391 if (!pi)
1452 return ICE_ERR_PARAM; 1392 return ICE_ERR_PARAM;
1453 1393
1454 status = ice_sched_validate_for_max_nodes(pi, num_nodes);
1455 if (status)
1456 return status;
1457
1458 vsil = ice_sched_get_vsi_layer(pi->hw); 1394 vsil = ice_sched_get_vsi_layer(pi->hw);
1459 for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) { 1395 for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) {
1460 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, 1396 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
@@ -1479,6 +1415,7 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_id,
1479 if (i == vsil) 1415 if (i == vsil)
1480 parent->vsi_id = vsi_id; 1416 parent->vsi_id = vsi_id;
1481 } 1417 }
1418
1482 return 0; 1419 return 0;
1483} 1420}
1484 1421
@@ -1633,9 +1570,11 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_id, u8 tc, u16 maxqs,
1633 status = ice_sched_add_vsi_to_topo(pi, vsi_id, tc); 1570 status = ice_sched_add_vsi_to_topo(pi, vsi_id, tc);
1634 if (status) 1571 if (status)
1635 return status; 1572 return status;
1573
1636 vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_id); 1574 vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_id);
1637 if (!vsi_node) 1575 if (!vsi_node)
1638 return ICE_ERR_CFG; 1576 return ICE_ERR_CFG;
1577
1639 vsi->vsi_node[tc] = vsi_node; 1578 vsi->vsi_node[tc] = vsi_node;
1640 vsi_node->in_use = true; 1579 vsi_node->in_use = true;
1641 } 1580 }