aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorAnirudh Venkataramanan <anirudh.venkataramanan@intel.com>2019-02-28 18:24:24 -0500
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2019-04-18 11:38:47 -0400
commit7b9ffc76bf5998aad8feaa26d9d3fcb65ec7a21b (patch)
tree9935f6c7b53786d7f9ce06d804aa4a42ce541ec4 /drivers/net
parent0ebd3ff13ccad2940516ba522ca8d21cea4f56f6 (diff)
ice: Add code for DCB initialization part 3/4
This patch adds a new function ice_pf_dcb_cfg (and related helpers) which applies the DCB configuration obtained from the firmware. As part of this, VSIs/netdevs are updated with traffic class information. This patch requires a bit of a refactor of existing code. 1. For a MIB change event, the associated VSI is closed and brought up again. The gap between closing and opening the VSI can cause a race condition. Fix this by grabbing the rtnl_lock prior to closing the VSI and then only free it after re-opening the VSI during a MIB change event. 2. ice_sched_query_elem is used in ice_sched.c and with this patch, in ice_dcb.c as well. However, ice_dcb.c is not built when CONFIG_DCB is unset. This results in namespace warnings (ice_sched.o: Externally defined symbols with no external references) when CONFIG_DCB is unset. To avoid this move ice_sched_query_elem from ice_sched.c to ice_common.c. Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Tested-by: Andrew Bowers <andrewx.bowers@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h47
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c25
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c463
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.h17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c204
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c135
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c118
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c27
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h2
14 files changed, 997 insertions, 82 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index d76333c808a3..6ca1094cb24a 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -378,6 +378,9 @@ struct ice_pf {
378 struct ice_hw_port_stats stats_prev; 378 struct ice_hw_port_stats stats_prev;
379 struct ice_hw hw; 379 struct ice_hw hw;
380 u8 stat_prev_loaded; /* has previous stats been loaded */ 380 u8 stat_prev_loaded; /* has previous stats been loaded */
381#ifdef CONFIG_DCB
382 u16 dcbx_cap;
383#endif /* CONFIG_DCB */
381 u32 tx_timeout_count; 384 u32 tx_timeout_count;
382 unsigned long tx_timeout_last_recovery; 385 unsigned long tx_timeout_last_recovery;
383 u32 tx_timeout_recovery_level; 386 u32 tx_timeout_recovery_level;
@@ -414,12 +417,6 @@ ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi,
414 wr32(hw, GLINT_DYN_CTL(vector), val); 417 wr32(hw, GLINT_DYN_CTL(vector), val);
415} 418}
416 419
417static inline void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
418{
419 vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS;
420 vsi->tc_cfg.numtc = 1;
421}
422
423void ice_set_ethtool_ops(struct net_device *netdev); 420void ice_set_ethtool_ops(struct net_device *netdev);
424int ice_up(struct ice_vsi *vsi); 421int ice_up(struct ice_vsi *vsi);
425int ice_down(struct ice_vsi *vsi); 422int ice_down(struct ice_vsi *vsi);
@@ -428,5 +425,9 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
428void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size); 425void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
429void ice_print_link_msg(struct ice_vsi *vsi, bool isup); 426void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
430void ice_napi_del(struct ice_vsi *vsi); 427void ice_napi_del(struct ice_vsi *vsi);
428#ifdef CONFIG_DCB
429int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked);
430void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked);
431#endif /* CONFIG_DCB */
431 432
432#endif /* _ICE_H_ */ 433#endif /* _ICE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index bbceaca11541..cda93826a065 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -747,6 +747,32 @@ struct ice_aqc_delete_elem {
747 __le32 teid[1]; 747 __le32 teid[1];
748}; 748};
749 749
750/* Query Port ETS (indirect 0x040E)
751 *
752 * This indirect command is used to query port TC node configuration.
753 */
754struct ice_aqc_query_port_ets {
755 __le32 port_teid;
756 __le32 reserved;
757 __le32 addr_high;
758 __le32 addr_low;
759};
760
761struct ice_aqc_port_ets_elem {
762 u8 tc_valid_bits;
763 u8 reserved[3];
764 /* 3 bits for UP per TC 0-7, 4th byte reserved */
765 __le32 up2tc;
766 u8 tc_bw_share[8];
767 __le32 port_eir_prof_id;
768 __le32 port_cir_prof_id;
769 /* 3 bits per Node priority to TC 0-7, 4th byte reserved */
770 __le32 tc_node_prio;
771#define ICE_TC_NODE_PRIO_S 0x4
772 u8 reserved1[4];
773 __le32 tc_node_teid[8]; /* Used for response, reserved in command */
774};
775
750/* Query Scheduler Resource Allocation (indirect 0x0412) 776/* Query Scheduler Resource Allocation (indirect 0x0412)
751 * This indirect command retrieves the scheduler resources allocated by 777 * This indirect command retrieves the scheduler resources allocated by
752 * EMP Firmware to the given PF. 778 * EMP Firmware to the given PF.
@@ -1212,6 +1238,23 @@ struct ice_aqc_get_cee_dcb_cfg_resp {
1212 u8 reserved[12]; 1238 u8 reserved[12];
1213}; 1239};
1214 1240
1241/* Set Local LLDP MIB (indirect 0x0A08)
1242 * Used to replace the local MIB of a given LLDP agent. e.g. DCBx
1243 */
1244struct ice_aqc_lldp_set_local_mib {
1245 u8 type;
1246#define SET_LOCAL_MIB_TYPE_DCBX_M BIT(0)
1247#define SET_LOCAL_MIB_TYPE_LOCAL_MIB 0
1248#define SET_LOCAL_MIB_TYPE_CEE_M BIT(1)
1249#define SET_LOCAL_MIB_TYPE_CEE_WILLING 0
1250#define SET_LOCAL_MIB_TYPE_CEE_NON_WILLING SET_LOCAL_MIB_TYPE_CEE_M
1251 u8 reserved0;
1252 __le16 length;
1253 u8 reserved1[4];
1254 __le32 addr_high;
1255 __le32 addr_low;
1256};
1257
1215/* Stop/Start LLDP Agent (direct 0x0A09) 1258/* Stop/Start LLDP Agent (direct 0x0A09)
1216 * Used for stopping/starting specific LLDP agent. e.g. DCBx. 1259 * Used for stopping/starting specific LLDP agent. e.g. DCBx.
1217 * The same structure is used for the response, with the command field 1260 * The same structure is used for the response, with the command field
@@ -1481,11 +1524,13 @@ struct ice_aq_desc {
1481 struct ice_aqc_get_topo get_topo; 1524 struct ice_aqc_get_topo get_topo;
1482 struct ice_aqc_sched_elem_cmd sched_elem_cmd; 1525 struct ice_aqc_sched_elem_cmd sched_elem_cmd;
1483 struct ice_aqc_query_txsched_res query_sched_res; 1526 struct ice_aqc_query_txsched_res query_sched_res;
1527 struct ice_aqc_query_port_ets port_ets;
1484 struct ice_aqc_nvm nvm; 1528 struct ice_aqc_nvm nvm;
1485 struct ice_aqc_pf_vf_msg virt; 1529 struct ice_aqc_pf_vf_msg virt;
1486 struct ice_aqc_lldp_get_mib lldp_get_mib; 1530 struct ice_aqc_lldp_get_mib lldp_get_mib;
1487 struct ice_aqc_lldp_set_mib_change lldp_set_event; 1531 struct ice_aqc_lldp_set_mib_change lldp_set_event;
1488 struct ice_aqc_lldp_start lldp_start; 1532 struct ice_aqc_lldp_start lldp_start;
1533 struct ice_aqc_lldp_set_local_mib lldp_set_mib;
1489 struct ice_aqc_lldp_stop_start_specific_agent lldp_agent_ctrl; 1534 struct ice_aqc_lldp_stop_start_specific_agent lldp_agent_ctrl;
1490 struct ice_aqc_get_set_rss_lut get_set_rss_lut; 1535 struct ice_aqc_get_set_rss_lut get_set_rss_lut;
1491 struct ice_aqc_get_set_rss_key get_set_rss_key; 1536 struct ice_aqc_get_set_rss_key get_set_rss_key;
@@ -1573,6 +1618,7 @@ enum ice_adminq_opc {
1573 ice_aqc_opc_get_sched_elems = 0x0404, 1618 ice_aqc_opc_get_sched_elems = 0x0404,
1574 ice_aqc_opc_suspend_sched_elems = 0x0409, 1619 ice_aqc_opc_suspend_sched_elems = 0x0409,
1575 ice_aqc_opc_resume_sched_elems = 0x040A, 1620 ice_aqc_opc_resume_sched_elems = 0x040A,
1621 ice_aqc_opc_query_port_ets = 0x040E,
1576 ice_aqc_opc_delete_sched_elems = 0x040F, 1622 ice_aqc_opc_delete_sched_elems = 0x040F,
1577 ice_aqc_opc_query_sched_res = 0x0412, 1623 ice_aqc_opc_query_sched_res = 0x0412,
1578 1624
@@ -1595,6 +1641,7 @@ enum ice_adminq_opc {
1595 ice_aqc_opc_lldp_set_mib_change = 0x0A01, 1641 ice_aqc_opc_lldp_set_mib_change = 0x0A01,
1596 ice_aqc_opc_lldp_start = 0x0A06, 1642 ice_aqc_opc_lldp_start = 0x0A06,
1597 ice_aqc_opc_get_cee_dcb_cfg = 0x0A07, 1643 ice_aqc_opc_get_cee_dcb_cfg = 0x0A07,
1644 ice_aqc_opc_lldp_set_local_mib = 0x0A08,
1598 ice_aqc_opc_lldp_stop_start_specific_agent = 0x0A09, 1645 ice_aqc_opc_lldp_stop_start_specific_agent = 0x0A09,
1599 1646
1600 /* RSS commands */ 1647 /* RSS commands */
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 3730daf1bc1a..2937c6be1aee 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -3106,3 +3106,28 @@ ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
3106 /* to manage the potential roll-over */ 3106 /* to manage the potential roll-over */
3107 *cur_stat = (new_data + BIT_ULL(32)) - *prev_stat; 3107 *cur_stat = (new_data + BIT_ULL(32)) - *prev_stat;
3108} 3108}
3109
3110/**
3111 * ice_sched_query_elem - query element information from HW
3112 * @hw: pointer to the HW struct
3113 * @node_teid: node TEID to be queried
3114 * @buf: buffer to element information
3115 *
3116 * This function queries HW element information
3117 */
3118enum ice_status
3119ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
3120 struct ice_aqc_get_elem *buf)
3121{
3122 u16 buf_size, num_elem_ret = 0;
3123 enum ice_status status;
3124
3125 buf_size = sizeof(*buf);
3126 memset(buf, 0, buf_size);
3127 buf->generic[0].node_teid = cpu_to_le32(node_teid);
3128 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
3129 NULL);
3130 if (status || num_elem_ret != 1)
3131 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
3132 return status;
3133}
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index fbdfdee353bc..faefc45e4a1e 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -118,4 +118,7 @@ ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
118void 118void
119ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 119ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
120 u64 *prev_stat, u64 *cur_stat); 120 u64 *prev_stat, u64 *cur_stat);
121enum ice_status
122ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
123 struct ice_aqc_get_elem *buf);
121#endif /* _ICE_COMMON_H_ */ 124#endif /* _ICE_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
index 6f0c6f323c60..fbc656589144 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -99,6 +99,39 @@ enum ice_status ice_aq_start_lldp(struct ice_hw *hw, struct ice_sq_cd *cd)
99} 99}
100 100
101/** 101/**
102 * ice_aq_set_lldp_mib - Set the LLDP MIB
103 * @hw: pointer to the HW struct
104 * @mib_type: Local, Remote or both Local and Remote MIBs
105 * @buf: pointer to the caller-supplied buffer to store the MIB block
106 * @buf_size: size of the buffer (in bytes)
107 * @cd: pointer to command details structure or NULL
108 *
109 * Set the LLDP MIB. (0x0A08)
110 */
111static enum ice_status
112ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
113 struct ice_sq_cd *cd)
114{
115 struct ice_aqc_lldp_set_local_mib *cmd;
116 struct ice_aq_desc desc;
117
118 cmd = &desc.params.lldp_set_mib;
119
120 if (buf_size == 0 || !buf)
121 return ICE_ERR_PARAM;
122
123 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
124
125 desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD);
126 desc.datalen = cpu_to_le16(buf_size);
127
128 cmd->type = mib_type;
129 cmd->length = cpu_to_le16(buf_size);
130
131 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
132}
133
134/**
102 * ice_get_dcbx_status 135 * ice_get_dcbx_status
103 * @hw: pointer to the HW struct 136 * @hw: pointer to the HW struct
104 * 137 *
@@ -902,3 +935,433 @@ enum ice_status ice_init_dcb(struct ice_hw *hw)
902 935
903 return ret; 936 return ret;
904} 937}
938
939/**
940 * ice_add_ieee_ets_common_tlv
941 * @buf: Data buffer to be populated with ice_dcb_ets_cfg data
942 * @ets_cfg: Container for ice_dcb_ets_cfg data
943 *
944 * Populate the TLV buffer with ice_dcb_ets_cfg data
945 */
946static void
947ice_add_ieee_ets_common_tlv(u8 *buf, struct ice_dcb_ets_cfg *ets_cfg)
948{
949 u8 priority0, priority1;
950 u8 offset = 0;
951 int i;
952
953 /* Priority Assignment Table (4 octets)
954 * Octets:| 1 | 2 | 3 | 4 |
955 * -----------------------------------------
956 * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
957 * -----------------------------------------
958 * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
959 * -----------------------------------------
960 */
961 for (i = 0; i < ICE_MAX_TRAFFIC_CLASS / 2; i++) {
962 priority0 = ets_cfg->prio_table[i * 2] & 0xF;
963 priority1 = ets_cfg->prio_table[i * 2 + 1] & 0xF;
964 buf[offset] = (priority0 << ICE_IEEE_ETS_PRIO_1_S) | priority1;
965 offset++;
966 }
967
968 /* TC Bandwidth Table (8 octets)
969 * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
970 * ---------------------------------
971 * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
972 * ---------------------------------
973 *
974 * TSA Assignment Table (8 octets)
975 * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
976 * ---------------------------------
977 * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
978 * ---------------------------------
979 */
980 ice_for_each_traffic_class(i) {
981 buf[offset] = ets_cfg->tcbwtable[i];
982 buf[ICE_MAX_TRAFFIC_CLASS + offset] = ets_cfg->tsatable[i];
983 offset++;
984 }
985}
986
987/**
988 * ice_add_ieee_ets_tlv - Prepare ETS TLV in IEEE format
989 * @tlv: Fill the ETS config data in IEEE format
990 * @dcbcfg: Local store which holds the DCB Config
991 *
992 * Prepare IEEE 802.1Qaz ETS CFG TLV
993 */
994static void
995ice_add_ieee_ets_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
996{
997 struct ice_dcb_ets_cfg *etscfg;
998 u8 *buf = tlv->tlvinfo;
999 u8 maxtcwilling = 0;
1000 u32 ouisubtype;
1001 u16 typelen;
1002
1003 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
1004 ICE_IEEE_ETS_TLV_LEN);
1005 tlv->typelen = htons(typelen);
1006
1007 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
1008 ICE_IEEE_SUBTYPE_ETS_CFG);
1009 tlv->ouisubtype = htonl(ouisubtype);
1010
1011 /* First Octet post subtype
1012 * --------------------------
1013 * |will-|CBS | Re- | Max |
1014 * |ing | |served| TCs |
1015 * --------------------------
1016 * |1bit | 1bit|3 bits|3bits|
1017 */
1018 etscfg = &dcbcfg->etscfg;
1019 if (etscfg->willing)
1020 maxtcwilling = BIT(ICE_IEEE_ETS_WILLING_S);
1021 maxtcwilling |= etscfg->maxtcs & ICE_IEEE_ETS_MAXTC_M;
1022 buf[0] = maxtcwilling;
1023
1024 /* Begin adding at Priority Assignment Table (offset 1 in buf) */
1025 ice_add_ieee_ets_common_tlv(&buf[1], etscfg);
1026}
1027
1028/**
1029 * ice_add_ieee_etsrec_tlv - Prepare ETS Recommended TLV in IEEE format
1030 * @tlv: Fill ETS Recommended TLV in IEEE format
1031 * @dcbcfg: Local store which holds the DCB Config
1032 *
1033 * Prepare IEEE 802.1Qaz ETS REC TLV
1034 */
1035static void
1036ice_add_ieee_etsrec_tlv(struct ice_lldp_org_tlv *tlv,
1037 struct ice_dcbx_cfg *dcbcfg)
1038{
1039 struct ice_dcb_ets_cfg *etsrec;
1040 u8 *buf = tlv->tlvinfo;
1041 u32 ouisubtype;
1042 u16 typelen;
1043
1044 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
1045 ICE_IEEE_ETS_TLV_LEN);
1046 tlv->typelen = htons(typelen);
1047
1048 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
1049 ICE_IEEE_SUBTYPE_ETS_REC);
1050 tlv->ouisubtype = htonl(ouisubtype);
1051
1052 etsrec = &dcbcfg->etsrec;
1053
1054 /* First Octet is reserved */
1055 /* Begin adding at Priority Assignment Table (offset 1 in buf) */
1056 ice_add_ieee_ets_common_tlv(&buf[1], etsrec);
1057}
1058
1059/**
1060 * ice_add_ieee_pfc_tlv - Prepare PFC TLV in IEEE format
1061 * @tlv: Fill PFC TLV in IEEE format
1062 * @dcbcfg: Local store which holds the PFC CFG data
1063 *
1064 * Prepare IEEE 802.1Qaz PFC CFG TLV
1065 */
1066static void
1067ice_add_ieee_pfc_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
1068{
1069 u8 *buf = tlv->tlvinfo;
1070 u32 ouisubtype;
1071 u16 typelen;
1072
1073 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
1074 ICE_IEEE_PFC_TLV_LEN);
1075 tlv->typelen = htons(typelen);
1076
1077 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
1078 ICE_IEEE_SUBTYPE_PFC_CFG);
1079 tlv->ouisubtype = htonl(ouisubtype);
1080
1081 /* ----------------------------------------
1082 * |will-|MBC | Re- | PFC | PFC Enable |
1083 * |ing | |served| cap | |
1084 * -----------------------------------------
1085 * |1bit | 1bit|2 bits|4bits| 1 octet |
1086 */
1087 if (dcbcfg->pfc.willing)
1088 buf[0] = BIT(ICE_IEEE_PFC_WILLING_S);
1089
1090 if (dcbcfg->pfc.mbc)
1091 buf[0] |= BIT(ICE_IEEE_PFC_MBC_S);
1092
1093 buf[0] |= dcbcfg->pfc.pfccap & 0xF;
1094 buf[1] = dcbcfg->pfc.pfcena;
1095}
1096
1097/**
1098 * ice_add_ieee_app_pri_tlv - Prepare APP TLV in IEEE format
1099 * @tlv: Fill APP TLV in IEEE format
1100 * @dcbcfg: Local store which holds the APP CFG data
1101 *
1102 * Prepare IEEE 802.1Qaz APP CFG TLV
1103 */
1104static void
1105ice_add_ieee_app_pri_tlv(struct ice_lldp_org_tlv *tlv,
1106 struct ice_dcbx_cfg *dcbcfg)
1107{
1108 u16 typelen, len, offset = 0;
1109 u8 priority, selector, i = 0;
1110 u8 *buf = tlv->tlvinfo;
1111 u32 ouisubtype;
1112
1113 /* No APP TLVs then just return */
1114 if (dcbcfg->numapps == 0)
1115 return;
1116 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
1117 ICE_IEEE_SUBTYPE_APP_PRI);
1118 tlv->ouisubtype = htonl(ouisubtype);
1119
1120 /* Move offset to App Priority Table */
1121 offset++;
1122 /* Application Priority Table (3 octets)
1123 * Octets:| 1 | 2 | 3 |
1124 * -----------------------------------------
1125 * |Priority|Rsrvd| Sel | Protocol ID |
1126 * -----------------------------------------
1127 * Bits:|23 21|20 19|18 16|15 0|
1128 * -----------------------------------------
1129 */
1130 while (i < dcbcfg->numapps) {
1131 priority = dcbcfg->app[i].priority & 0x7;
1132 selector = dcbcfg->app[i].selector & 0x7;
1133 buf[offset] = (priority << ICE_IEEE_APP_PRIO_S) | selector;
1134 buf[offset + 1] = (dcbcfg->app[i].prot_id >> 0x8) & 0xFF;
1135 buf[offset + 2] = dcbcfg->app[i].prot_id & 0xFF;
1136 /* Move to next app */
1137 offset += 3;
1138 i++;
1139 if (i >= ICE_DCBX_MAX_APPS)
1140 break;
1141 }
1142 /* len includes size of ouisubtype + 1 reserved + 3*numapps */
1143 len = sizeof(tlv->ouisubtype) + 1 + (i * 3);
1144 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | (len & 0x1FF));
1145 tlv->typelen = htons(typelen);
1146}
1147
1148/**
1149 * ice_add_dcb_tlv - Add all IEEE TLVs
1150 * @tlv: Fill TLV data in IEEE format
1151 * @dcbcfg: Local store which holds the DCB Config
1152 * @tlvid: Type of IEEE TLV
1153 *
1154 * Add tlv information
1155 */
1156static void
1157ice_add_dcb_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg,
1158 u16 tlvid)
1159{
1160 switch (tlvid) {
1161 case ICE_IEEE_TLV_ID_ETS_CFG:
1162 ice_add_ieee_ets_tlv(tlv, dcbcfg);
1163 break;
1164 case ICE_IEEE_TLV_ID_ETS_REC:
1165 ice_add_ieee_etsrec_tlv(tlv, dcbcfg);
1166 break;
1167 case ICE_IEEE_TLV_ID_PFC_CFG:
1168 ice_add_ieee_pfc_tlv(tlv, dcbcfg);
1169 break;
1170 case ICE_IEEE_TLV_ID_APP_PRI:
1171 ice_add_ieee_app_pri_tlv(tlv, dcbcfg);
1172 break;
1173 default:
1174 break;
1175 }
1176}
1177
1178/**
1179 * ice_dcb_cfg_to_lldp - Convert DCB configuration to MIB format
1180 * @lldpmib: pointer to the HW struct
1181 * @miblen: length of LLDP MIB
1182 * @dcbcfg: Local store which holds the DCB Config
1183 *
1184 * Convert the DCB configuration to MIB format
1185 */
1186static void
1187ice_dcb_cfg_to_lldp(u8 *lldpmib, u16 *miblen, struct ice_dcbx_cfg *dcbcfg)
1188{
1189 u16 len, offset = 0, tlvid = ICE_TLV_ID_START;
1190 struct ice_lldp_org_tlv *tlv;
1191 u16 typelen;
1192
1193 tlv = (struct ice_lldp_org_tlv *)lldpmib;
1194 while (1) {
1195 ice_add_dcb_tlv(tlv, dcbcfg, tlvid++);
1196 typelen = ntohs(tlv->typelen);
1197 len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
1198 if (len)
1199 offset += len + 2;
1200 /* END TLV or beyond LLDPDU size */
1201 if (tlvid >= ICE_TLV_ID_END_OF_LLDPPDU ||
1202 offset > ICE_LLDPDU_SIZE)
1203 break;
1204 /* Move to next TLV */
1205 if (len)
1206 tlv = (struct ice_lldp_org_tlv *)
1207 ((char *)tlv + sizeof(tlv->typelen) + len);
1208 }
1209 *miblen = offset;
1210}
1211
1212/**
1213 * ice_set_dcb_cfg - Set the local LLDP MIB to FW
1214 * @pi: port information structure
1215 *
1216 * Set DCB configuration to the Firmware
1217 */
1218enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi)
1219{
1220 u8 mib_type, *lldpmib = NULL;
1221 struct ice_dcbx_cfg *dcbcfg;
1222 enum ice_status ret;
1223 struct ice_hw *hw;
1224 u16 miblen;
1225
1226 if (!pi)
1227 return ICE_ERR_PARAM;
1228
1229 hw = pi->hw;
1230
1231 /* update the HW local config */
1232 dcbcfg = &pi->local_dcbx_cfg;
1233 /* Allocate the LLDPDU */
1234 lldpmib = devm_kzalloc(ice_hw_to_dev(hw), ICE_LLDPDU_SIZE, GFP_KERNEL);
1235 if (!lldpmib)
1236 return ICE_ERR_NO_MEMORY;
1237
1238 mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
1239 if (dcbcfg->app_mode == ICE_DCBX_APPS_NON_WILLING)
1240 mib_type |= SET_LOCAL_MIB_TYPE_CEE_NON_WILLING;
1241
1242 ice_dcb_cfg_to_lldp(lldpmib, &miblen, dcbcfg);
1243 ret = ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, miblen,
1244 NULL);
1245
1246 devm_kfree(ice_hw_to_dev(hw), lldpmib);
1247
1248 return ret;
1249}
1250
1251/**
1252 * ice_aq_query_port_ets - query port ets configuration
1253 * @pi: port information structure
1254 * @buf: pointer to buffer
1255 * @buf_size: buffer size in bytes
1256 * @cd: pointer to command details structure or NULL
1257 *
1258 * query current port ets configuration
1259 */
1260static enum ice_status
1261ice_aq_query_port_ets(struct ice_port_info *pi,
1262 struct ice_aqc_port_ets_elem *buf, u16 buf_size,
1263 struct ice_sq_cd *cd)
1264{
1265 struct ice_aqc_query_port_ets *cmd;
1266 struct ice_aq_desc desc;
1267 enum ice_status status;
1268
1269 if (!pi)
1270 return ICE_ERR_PARAM;
1271 cmd = &desc.params.port_ets;
1272 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_port_ets);
1273 cmd->port_teid = pi->root->info.node_teid;
1274
1275 status = ice_aq_send_cmd(pi->hw, &desc, buf, buf_size, cd);
1276 return status;
1277}
1278
1279/**
1280 * ice_update_port_tc_tree_cfg - update TC tree configuration
1281 * @pi: port information structure
1282 * @buf: pointer to buffer
1283 *
1284 * update the SW DB with the new TC changes
1285 */
1286static enum ice_status
1287ice_update_port_tc_tree_cfg(struct ice_port_info *pi,
1288 struct ice_aqc_port_ets_elem *buf)
1289{
1290 struct ice_sched_node *node, *tc_node;
1291 struct ice_aqc_get_elem elem;
1292 enum ice_status status = 0;
1293 u32 teid1, teid2;
1294 u8 i, j;
1295
1296 if (!pi)
1297 return ICE_ERR_PARAM;
1298 /* suspend the missing TC nodes */
1299 for (i = 0; i < pi->root->num_children; i++) {
1300 teid1 = le32_to_cpu(pi->root->children[i]->info.node_teid);
1301 ice_for_each_traffic_class(j) {
1302 teid2 = le32_to_cpu(buf->tc_node_teid[j]);
1303 if (teid1 == teid2)
1304 break;
1305 }
1306 if (j < ICE_MAX_TRAFFIC_CLASS)
1307 continue;
1308 /* TC is missing */
1309 pi->root->children[i]->in_use = false;
1310 }
1311 /* add the new TC nodes */
1312 ice_for_each_traffic_class(j) {
1313 teid2 = le32_to_cpu(buf->tc_node_teid[j]);
1314 if (teid2 == ICE_INVAL_TEID)
1315 continue;
1316 /* Is it already present in the tree ? */
1317 for (i = 0; i < pi->root->num_children; i++) {
1318 tc_node = pi->root->children[i];
1319 if (!tc_node)
1320 continue;
1321 teid1 = le32_to_cpu(tc_node->info.node_teid);
1322 if (teid1 == teid2) {
1323 tc_node->tc_num = j;
1324 tc_node->in_use = true;
1325 break;
1326 }
1327 }
1328 if (i < pi->root->num_children)
1329 continue;
1330 /* new TC */
1331 status = ice_sched_query_elem(pi->hw, teid2, &elem);
1332 if (!status)
1333 status = ice_sched_add_node(pi, 1, &elem.generic[0]);
1334 if (status)
1335 break;
1336 /* update the TC number */
1337 node = ice_sched_find_node_by_teid(pi->root, teid2);
1338 if (node)
1339 node->tc_num = j;
1340 }
1341 return status;
1342}
1343
1344/**
1345 * ice_query_port_ets - query port ets configuration
1346 * @pi: port information structure
1347 * @buf: pointer to buffer
1348 * @buf_size: buffer size in bytes
1349 * @cd: pointer to command details structure or NULL
1350 *
1351 * query current port ets configuration and update the
1352 * SW DB with the TC changes
1353 */
1354enum ice_status
1355ice_query_port_ets(struct ice_port_info *pi,
1356 struct ice_aqc_port_ets_elem *buf, u16 buf_size,
1357 struct ice_sq_cd *cd)
1358{
1359 enum ice_status status;
1360
1361 mutex_lock(&pi->sched_lock);
1362 status = ice_aq_query_port_ets(pi, buf, buf_size, cd);
1363 if (!status)
1364 status = ice_update_port_tc_tree_cfg(pi, buf);
1365 mutex_unlock(&pi->sched_lock);
1366 return status;
1367}
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.h b/drivers/net/ethernet/intel/ice/ice_dcb.h
index c2c2692990e8..dd0050162973 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.h
@@ -70,6 +70,18 @@
70#define ICE_IEEE_APP_PRIO_S 5 70#define ICE_IEEE_APP_PRIO_S 5
71#define ICE_IEEE_APP_PRIO_M (0x7 << ICE_IEEE_APP_PRIO_S) 71#define ICE_IEEE_APP_PRIO_M (0x7 << ICE_IEEE_APP_PRIO_S)
72 72
73/* TLV definitions for preparing MIB */
74#define ICE_IEEE_TLV_ID_ETS_CFG 3
75#define ICE_IEEE_TLV_ID_ETS_REC 4
76#define ICE_IEEE_TLV_ID_PFC_CFG 5
77#define ICE_IEEE_TLV_ID_APP_PRI 6
78#define ICE_TLV_ID_END_OF_LLDPPDU 7
79#define ICE_TLV_ID_START ICE_IEEE_TLV_ID_ETS_CFG
80
81#define ICE_IEEE_ETS_TLV_LEN 25
82#define ICE_IEEE_PFC_TLV_LEN 6
83#define ICE_IEEE_APP_TLV_LEN 11
84
73/* IEEE 802.1AB LLDP Organization specific TLV */ 85/* IEEE 802.1AB LLDP Organization specific TLV */
74struct ice_lldp_org_tlv { 86struct ice_lldp_org_tlv {
75 __be16 typelen; 87 __be16 typelen;
@@ -108,7 +120,12 @@ struct ice_cee_app_prio {
108} __packed; 120} __packed;
109 121
110u8 ice_get_dcbx_status(struct ice_hw *hw); 122u8 ice_get_dcbx_status(struct ice_hw *hw);
123enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi);
111enum ice_status ice_init_dcb(struct ice_hw *hw); 124enum ice_status ice_init_dcb(struct ice_hw *hw);
125enum ice_status
126ice_query_port_ets(struct ice_port_info *pi,
127 struct ice_aqc_port_ets_elem *buf, u16 buf_size,
128 struct ice_sq_cd *cmd_details);
112#ifdef CONFIG_DCB 129#ifdef CONFIG_DCB
113enum ice_status ice_aq_start_lldp(struct ice_hw *hw, struct ice_sq_cd *cd); 130enum ice_status ice_aq_start_lldp(struct ice_hw *hw, struct ice_sq_cd *cd);
114enum ice_status 131enum ice_status
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index f2dd41408652..210487a0671d 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -4,6 +4,189 @@
4#include "ice_dcb_lib.h" 4#include "ice_dcb_lib.h"
5 5
6/** 6/**
7 * ice_dcb_get_ena_tc - return bitmap of enabled TCs
8 * @dcbcfg: DCB config to evaluate for enabled TCs
9 */
10u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg)
11{
12 u8 i, num_tc, ena_tc = 1;
13
14 num_tc = ice_dcb_get_num_tc(dcbcfg);
15
16 for (i = 0; i < num_tc; i++)
17 ena_tc |= BIT(i);
18
19 return ena_tc;
20}
21
22/**
23 * ice_dcb_get_num_tc - Get the number of TCs from DCBX config
24 * @dcbcfg: config to retrieve number of TCs from
25 */
26u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg)
27{
28 bool tc_unused = false;
29 u8 num_tc = 0;
30 u8 ret = 0;
31 int i;
32
33 /* Scan the ETS Config Priority Table to find traffic classes
34 * enabled and create a bitmask of enabled TCs
35 */
36 for (i = 0; i < CEE_DCBX_MAX_PRIO; i++)
37 num_tc |= BIT(dcbcfg->etscfg.prio_table[i]);
38
39 /* Scan bitmask for contiguous TCs starting with TC0 */
40 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
41 if (num_tc & BIT(i)) {
42 if (!tc_unused) {
43 ret++;
44 } else {
45 pr_err("Non-contiguous TCs - Disabling DCB\n");
46 return 1;
47 }
48 } else {
49 tc_unused = true;
50 }
51 }
52
53 /* There is always at least 1 TC */
54 if (!ret)
55 ret = 1;
56
57 return ret;
58}
59
60/**
61 * ice_pf_dcb_recfg - Reconfigure all VEBs and VSIs
62 * @pf: pointer to the PF struct
63 *
64 * Assumed caller has already disabled all VSIs before
65 * calling this function. Reconfiguring DCB based on
66 * local_dcbx_cfg.
67 */
68static void ice_pf_dcb_recfg(struct ice_pf *pf)
69{
70 struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->local_dcbx_cfg;
71 u8 tc_map = 0;
72 int v, ret;
73
74 /* Update each VSI */
75 ice_for_each_vsi(pf, v) {
76 if (!pf->vsi[v])
77 continue;
78
79 if (pf->vsi[v]->type == ICE_VSI_PF)
80 tc_map = ice_dcb_get_ena_tc(dcbcfg);
81 else
82 tc_map = ICE_DFLT_TRAFFIC_CLASS;
83
84 ret = ice_vsi_cfg_tc(pf->vsi[v], tc_map);
85 if (ret)
86 dev_err(&pf->pdev->dev,
87 "Failed to config TC for VSI index: %d\n",
88 pf->vsi[v]->idx);
89 else
90 ice_vsi_map_rings_to_vectors(pf->vsi[v]);
91 }
92}
93
94/**
95 * ice_pf_dcb_cfg - Apply new DCB configuration
96 * @pf: pointer to the PF struct
97 * @new_cfg: DCBX config to apply
98 */
99static int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg)
100{
101 struct ice_dcbx_cfg *old_cfg, *curr_cfg;
102 struct ice_aqc_port_ets_elem buf = { 0 };
103 int ret = 0;
104
105 curr_cfg = &pf->hw.port_info->local_dcbx_cfg;
106
107 /* Enable DCB tagging only when more than one TC */
108 if (ice_dcb_get_num_tc(new_cfg) > 1) {
109 dev_dbg(&pf->pdev->dev, "DCB tagging enabled (num TC > 1)\n");
110 set_bit(ICE_FLAG_DCB_ENA, pf->flags);
111 } else {
112 dev_dbg(&pf->pdev->dev, "DCB tagging disabled (num TC = 1)\n");
113 clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
114 }
115
116 if (!memcmp(new_cfg, curr_cfg, sizeof(*new_cfg))) {
117 dev_dbg(&pf->pdev->dev, "No change in DCB config required\n");
118 return ret;
119 }
120
121 /* Store old config in case FW config fails */
122 old_cfg = devm_kzalloc(&pf->pdev->dev, sizeof(*old_cfg), GFP_KERNEL);
123 memcpy(old_cfg, curr_cfg, sizeof(*old_cfg));
124
125 /* avoid race conditions by holding the lock while disabling and
126 * re-enabling the VSI
127 */
128 rtnl_lock();
129 ice_pf_dis_all_vsi(pf, true);
130
131 memcpy(curr_cfg, new_cfg, sizeof(*curr_cfg));
132 memcpy(&curr_cfg->etsrec, &curr_cfg->etscfg, sizeof(curr_cfg->etsrec));
133
134 /* Only send new config to HW if we are in SW LLDP mode. Otherwise,
135 * the new config came from the HW in the first place.
136 */
137 if (pf->hw.port_info->is_sw_lldp) {
138 ret = ice_set_dcb_cfg(pf->hw.port_info);
139 if (ret) {
140 dev_err(&pf->pdev->dev, "Set DCB Config failed\n");
141 /* Restore previous settings to local config */
142 memcpy(curr_cfg, old_cfg, sizeof(*curr_cfg));
143 goto out;
144 }
145 }
146
147 ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
148 if (ret) {
149 dev_err(&pf->pdev->dev, "Query Port ETS failed\n");
150 goto out;
151 }
152
153 ice_pf_dcb_recfg(pf);
154
155out:
156 ice_pf_ena_all_vsi(pf, true);
157 rtnl_unlock();
158 devm_kfree(&pf->pdev->dev, old_cfg);
159 return ret;
160}
161
162/**
163 * ice_dcb_init_cfg - set the initial DCB config in SW
164 * @pf: pf to apply config to
165 */
166static int ice_dcb_init_cfg(struct ice_pf *pf)
167{
168 struct ice_dcbx_cfg *newcfg;
169 struct ice_port_info *pi;
170 int ret = 0;
171
172 pi = pf->hw.port_info;
173 newcfg = devm_kzalloc(&pf->pdev->dev, sizeof(*newcfg), GFP_KERNEL);
174 if (!newcfg)
175 return -ENOMEM;
176
177 memcpy(newcfg, &pi->local_dcbx_cfg, sizeof(*newcfg));
178 memset(&pi->local_dcbx_cfg, 0, sizeof(*newcfg));
179
180 dev_info(&pf->pdev->dev, "Configuring initial DCB values\n");
181 if (ice_pf_dcb_cfg(pf, newcfg))
182 ret = -EINVAL;
183
184 devm_kfree(&pf->pdev->dev, newcfg);
185
186 return ret;
187}
188
189/**
7 * ice_init_pf_dcb - initialize DCB for a PF 190 * ice_init_pf_dcb - initialize DCB for a PF
8 * @pf: pf to initiialize DCB for 191 * @pf: pf to initiialize DCB for
9 */ 192 */
@@ -12,6 +195,7 @@ int ice_init_pf_dcb(struct ice_pf *pf)
12 struct device *dev = &pf->pdev->dev; 195 struct device *dev = &pf->pdev->dev;
13 struct ice_port_info *port_info; 196 struct ice_port_info *port_info;
14 struct ice_hw *hw = &pf->hw; 197 struct ice_hw *hw = &pf->hw;
198 int err;
15 199
16 port_info = hw->port_info; 200 port_info = hw->port_info;
17 201
@@ -38,5 +222,23 @@ int ice_init_pf_dcb(struct ice_pf *pf)
38 ice_aq_start_stop_dcbx(hw, true, &dcbx_status, NULL); 222 ice_aq_start_stop_dcbx(hw, true, &dcbx_status, NULL);
39 } 223 }
40 224
41 return ice_init_dcb(hw); 225 err = ice_init_dcb(hw);
226 if (err)
227 goto dcb_init_err;
228
229 /* DCBX in FW and LLDP enabled in FW */
230 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_IEEE;
231
232 set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
233
234 err = ice_dcb_init_cfg(pf);
235 if (err)
236 goto dcb_init_err;
237
238 dev_info(&pf->pdev->dev, "DCBX offload supported\n");
239 return err;
240
241dcb_init_err:
242 dev_err(dev, "DCB init failed\n");
243 return err;
42} 244}
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
index d67c769a9fb5..9c2fa11f6383 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
@@ -8,12 +8,25 @@
8#include "ice_lib.h" 8#include "ice_lib.h"
9 9
10#ifdef CONFIG_DCB 10#ifdef CONFIG_DCB
11u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg);
12u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg);
11int ice_init_pf_dcb(struct ice_pf *pf); 13int ice_init_pf_dcb(struct ice_pf *pf);
12#else 14#else
15static inline u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg __always_unused *dcbcfg)
16{
17 return ICE_DFLT_TRAFFIC_CLASS;
18}
19
20static inline u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg __always_unused *dcbcfg)
21{
22 return 1;
23}
24
13static inline int ice_init_pf_dcb(struct ice_pf *pf) 25static inline int ice_init_pf_dcb(struct ice_pf *pf)
14{ 26{
15 dev_dbg(&pf->pdev->dev, "DCB not supported\n"); 27 dev_dbg(&pf->pdev->dev, "DCB not supported\n");
16 return -EOPNOTSUPP; 28 return -EOPNOTSUPP;
17} 29}
30
18#endif /* CONFIG_DCB */ 31#endif /* CONFIG_DCB */
19#endif /* _ICE_DCB_LIB_H_ */ 32#endif /* _ICE_DCB_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index d24da511b775..f3574daa147c 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -3,6 +3,7 @@
3 3
4#include "ice.h" 4#include "ice.h"
5#include "ice_lib.h" 5#include "ice_lib.h"
6#include "ice_dcb_lib.h"
6 7
7/** 8/**
8 * ice_setup_rx_ctx - Configure a receive ring context 9 * ice_setup_rx_ctx - Configure a receive ring context
@@ -1301,7 +1302,11 @@ err_out:
1301 * through the MSI-X enabling code. On a constrained vector budget, we map Tx 1302 * through the MSI-X enabling code. On a constrained vector budget, we map Tx
1302 * and Rx rings to the vector as "efficiently" as possible. 1303 * and Rx rings to the vector as "efficiently" as possible.
1303 */ 1304 */
1305#ifdef CONFIG_DCB
1306void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
1307#else
1304static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) 1308static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
1309#endif /* CONFIG_DCB */
1305{ 1310{
1306 int q_vectors = vsi->num_q_vectors; 1311 int q_vectors = vsi->num_q_vectors;
1307 int tx_rings_rem, rx_rings_rem; 1312 int tx_rings_rem, rx_rings_rem;
@@ -2172,6 +2177,14 @@ err_out:
2172 return -EIO; 2177 return -EIO;
2173} 2178}
2174 2179
2180static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
2181{
2182 struct ice_dcbx_cfg *cfg = &vsi->port_info->local_dcbx_cfg;
2183
2184 vsi->tc_cfg.ena_tc = ice_dcb_get_ena_tc(cfg);
2185 vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg);
2186}
2187
2175/** 2188/**
2176 * ice_vsi_setup - Set up a VSI by a given type 2189 * ice_vsi_setup - Set up a VSI by a given type
2177 * @pf: board private structure 2190 * @pf: board private structure
@@ -2815,3 +2828,125 @@ bool ice_is_reset_in_progress(unsigned long *state)
2815 test_bit(__ICE_CORER_REQ, state) || 2828 test_bit(__ICE_CORER_REQ, state) ||
2816 test_bit(__ICE_GLOBR_REQ, state); 2829 test_bit(__ICE_GLOBR_REQ, state);
2817} 2830}
2831
2832#ifdef CONFIG_DCB
2833/**
2834 * ice_vsi_update_q_map - update our copy of the VSI info with new queue map
2835 * @vsi: VSI being configured
2836 * @ctx: the context buffer returned from AQ VSI update command
2837 */
2838static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx)
2839{
2840 vsi->info.mapping_flags = ctx->info.mapping_flags;
2841 memcpy(&vsi->info.q_mapping, &ctx->info.q_mapping,
2842 sizeof(vsi->info.q_mapping));
2843 memcpy(&vsi->info.tc_mapping, ctx->info.tc_mapping,
2844 sizeof(vsi->info.tc_mapping));
2845}
2846
2847/**
2848 * ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration
2849 * @vsi: the VSI being configured
2850 * @ena_tc: TC map to be enabled
2851 */
2852static void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc)
2853{
2854 struct net_device *netdev = vsi->netdev;
2855 struct ice_pf *pf = vsi->back;
2856 struct ice_dcbx_cfg *dcbcfg;
2857 u8 netdev_tc;
2858 int i;
2859
2860 if (!netdev)
2861 return;
2862
2863 if (!ena_tc) {
2864 netdev_reset_tc(netdev);
2865 return;
2866 }
2867
2868 if (netdev_set_num_tc(netdev, vsi->tc_cfg.numtc))
2869 return;
2870
2871 dcbcfg = &pf->hw.port_info->local_dcbx_cfg;
2872
2873 ice_for_each_traffic_class(i)
2874 if (vsi->tc_cfg.ena_tc & BIT(i))
2875 netdev_set_tc_queue(netdev,
2876 vsi->tc_cfg.tc_info[i].netdev_tc,
2877 vsi->tc_cfg.tc_info[i].qcount_tx,
2878 vsi->tc_cfg.tc_info[i].qoffset);
2879
2880 for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
2881 u8 ets_tc = dcbcfg->etscfg.prio_table[i];
2882
2883 /* Get the mapped netdev TC# for the UP */
2884 netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc;
2885 netdev_set_prio_tc_map(netdev, i, netdev_tc);
2886 }
2887}
2888
2889/**
2890 * ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map
2891 * @vsi: VSI to be configured
2892 * @ena_tc: TC bitmap
2893 *
2894 * VSI queues expected to be quiesced before calling this function
2895 */
2896int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
2897{
2898 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2899 struct ice_vsi_ctx *ctx;
2900 struct ice_pf *pf = vsi->back;
2901 enum ice_status status;
2902 int i, ret = 0;
2903 u8 num_tc = 0;
2904
2905 ice_for_each_traffic_class(i) {
2906 /* build bitmap of enabled TCs */
2907 if (ena_tc & BIT(i))
2908 num_tc++;
2909 /* populate max_txqs per TC */
2910 max_txqs[i] = pf->num_lan_tx;
2911 }
2912
2913 vsi->tc_cfg.ena_tc = ena_tc;
2914 vsi->tc_cfg.numtc = num_tc;
2915
2916 ctx = devm_kzalloc(&pf->pdev->dev, sizeof(*ctx), GFP_KERNEL);
2917 if (!ctx)
2918 return -ENOMEM;
2919
2920 ctx->vf_num = 0;
2921 ctx->info = vsi->info;
2922
2923 ice_vsi_setup_q_map(vsi, ctx);
2924
2925 /* must to indicate which section of VSI context are being modified */
2926 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
2927 status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
2928 if (status) {
2929 dev_info(&pf->pdev->dev, "Failed VSI Update\n");
2930 ret = -EIO;
2931 goto out;
2932 }
2933
2934 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2935 max_txqs);
2936
2937 if (status) {
2938 dev_err(&pf->pdev->dev,
2939 "VSI %d failed TC config, error %d\n",
2940 vsi->vsi_num, status);
2941 ret = -EIO;
2942 goto out;
2943 }
2944 ice_vsi_update_q_map(vsi, ctx);
2945 vsi->info.valid_sections = 0;
2946
2947 ice_vsi_cfg_netdev_tc(vsi, ena_tc);
2948out:
2949 devm_kfree(&pf->pdev->dev, ctx);
2950 return ret;
2951}
2952#endif /* CONFIG_DCB */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 519ef59e9e43..714ace077796 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -41,6 +41,10 @@ void ice_vsi_delete(struct ice_vsi *vsi);
41 41
42int ice_vsi_clear(struct ice_vsi *vsi); 42int ice_vsi_clear(struct ice_vsi *vsi);
43 43
44#ifdef CONFIG_DCB
45int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc);
46#endif /* CONFIG_DCB */
47
44struct ice_vsi * 48struct ice_vsi *
45ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, 49ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
46 enum ice_vsi_type type, u16 vf_id); 50 enum ice_vsi_type type, u16 vf_id);
@@ -62,6 +66,10 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi);
62 66
63void ice_vsi_put_qs(struct ice_vsi *vsi); 67void ice_vsi_put_qs(struct ice_vsi *vsi);
64 68
69#ifdef CONFIG_DCB
70void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi);
71#endif /* CONFIG_DCB */
72
65void ice_vsi_dis_irq(struct ice_vsi *vsi); 73void ice_vsi_dis_irq(struct ice_vsi *vsi);
66 74
67void ice_vsi_free_irq(struct ice_vsi *vsi); 75void ice_vsi_free_irq(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 22fe0605aa9f..ff84a6c318a6 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -31,7 +31,6 @@ MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
31static struct workqueue_struct *ice_wq; 31static struct workqueue_struct *ice_wq;
32static const struct net_device_ops ice_netdev_ops; 32static const struct net_device_ops ice_netdev_ops;
33 33
34static void ice_pf_dis_all_vsi(struct ice_pf *pf);
35static void ice_rebuild(struct ice_pf *pf); 34static void ice_rebuild(struct ice_pf *pf);
36 35
37static void ice_vsi_release_all(struct ice_pf *pf); 36static void ice_vsi_release_all(struct ice_pf *pf);
@@ -398,6 +397,51 @@ static void ice_sync_fltr_subtask(struct ice_pf *pf)
398} 397}
399 398
400/** 399/**
400 * ice_dis_vsi - pause a VSI
401 * @vsi: the VSI being paused
402 * @locked: is the rtnl_lock already held
403 */
404static void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
405{
406 if (test_bit(__ICE_DOWN, vsi->state))
407 return;
408
409 set_bit(__ICE_NEEDS_RESTART, vsi->state);
410
411 if (vsi->type == ICE_VSI_PF && vsi->netdev) {
412 if (netif_running(vsi->netdev)) {
413 if (!locked) {
414 rtnl_lock();
415 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
416 rtnl_unlock();
417 } else {
418 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
419 }
420 } else {
421 ice_vsi_close(vsi);
422 }
423 }
424}
425
426/**
427 * ice_pf_dis_all_vsi - Pause all VSIs on a PF
428 * @pf: the PF
429 * @locked: is the rtnl_lock already held
430 */
431#ifdef CONFIG_DCB
432void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
433#else
434static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
435#endif /* CONFIG_DCB */
436{
437 int v;
438
439 ice_for_each_vsi(pf, v)
440 if (pf->vsi[v])
441 ice_dis_vsi(pf->vsi[v], locked);
442}
443
444/**
401 * ice_prepare_for_reset - prep for the core to reset 445 * ice_prepare_for_reset - prep for the core to reset
402 * @pf: board private structure 446 * @pf: board private structure
403 * 447 *
@@ -417,7 +461,7 @@ ice_prepare_for_reset(struct ice_pf *pf)
417 ice_vc_notify_reset(pf); 461 ice_vc_notify_reset(pf);
418 462
419 /* disable the VSIs and their queues that are not already DOWN */ 463 /* disable the VSIs and their queues that are not already DOWN */
420 ice_pf_dis_all_vsi(pf); 464 ice_pf_dis_all_vsi(pf, false);
421 465
422 if (hw->port_info) 466 if (hw->port_info)
423 ice_sched_clear_port(hw->port_info); 467 ice_sched_clear_port(hw->port_info);
@@ -3581,47 +3625,31 @@ static void ice_vsi_release_all(struct ice_pf *pf)
3581} 3625}
3582 3626
3583/** 3627/**
3584 * ice_dis_vsi - pause a VSI 3628 * ice_ena_vsi - resume a VSI
3585 * @vsi: the VSI being paused 3629 * @vsi: the VSI being resume
3586 * @locked: is the rtnl_lock already held 3630 * @locked: is the rtnl_lock already held
3587 */ 3631 */
3588static void ice_dis_vsi(struct ice_vsi *vsi, bool locked) 3632static int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
3589{ 3633{
3590 if (test_bit(__ICE_DOWN, vsi->state)) 3634 int err = 0;
3591 return;
3592 3635
3593 set_bit(__ICE_NEEDS_RESTART, vsi->state); 3636 if (!test_bit(__ICE_NEEDS_RESTART, vsi->state))
3637 return err;
3638
3639 clear_bit(__ICE_NEEDS_RESTART, vsi->state);
3640
3641 if (vsi->netdev && vsi->type == ICE_VSI_PF) {
3642 struct net_device *netd = vsi->netdev;
3594 3643
3595 if (vsi->type == ICE_VSI_PF && vsi->netdev) {
3596 if (netif_running(vsi->netdev)) { 3644 if (netif_running(vsi->netdev)) {
3597 if (!locked) { 3645 if (locked) {
3646 err = netd->netdev_ops->ndo_open(netd);
3647 } else {
3598 rtnl_lock(); 3648 rtnl_lock();
3599 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); 3649 err = netd->netdev_ops->ndo_open(netd);
3600 rtnl_unlock(); 3650 rtnl_unlock();
3601 } else {
3602 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
3603 } 3651 }
3604 } else { 3652 } else {
3605 ice_vsi_close(vsi);
3606 }
3607 }
3608}
3609
3610/**
3611 * ice_ena_vsi - resume a VSI
3612 * @vsi: the VSI being resume
3613 */
3614static int ice_ena_vsi(struct ice_vsi *vsi)
3615{
3616 int err = 0;
3617
3618 if (test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state) &&
3619 vsi->netdev) {
3620 if (netif_running(vsi->netdev)) {
3621 rtnl_lock();
3622 err = vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
3623 rtnl_unlock();
3624 } else {
3625 err = ice_vsi_open(vsi); 3653 err = ice_vsi_open(vsi);
3626 } 3654 }
3627 } 3655 }
@@ -3630,29 +3658,21 @@ static int ice_ena_vsi(struct ice_vsi *vsi)
3630} 3658}
3631 3659
3632/** 3660/**
3633 * ice_pf_dis_all_vsi - Pause all VSIs on a PF
3634 * @pf: the PF
3635 */
3636static void ice_pf_dis_all_vsi(struct ice_pf *pf)
3637{
3638 int v;
3639
3640 ice_for_each_vsi(pf, v)
3641 if (pf->vsi[v])
3642 ice_dis_vsi(pf->vsi[v], false);
3643}
3644
3645/**
3646 * ice_pf_ena_all_vsi - Resume all VSIs on a PF 3661 * ice_pf_ena_all_vsi - Resume all VSIs on a PF
3647 * @pf: the PF 3662 * @pf: the PF
3663 * @locked: is the rtnl_lock already held
3648 */ 3664 */
3649static int ice_pf_ena_all_vsi(struct ice_pf *pf) 3665#ifdef CONFIG_DCB
3666int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked)
3667#else
3668static int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked)
3669#endif /* CONFIG_DCB */
3650{ 3670{
3651 int v; 3671 int v;
3652 3672
3653 ice_for_each_vsi(pf, v) 3673 ice_for_each_vsi(pf, v)
3654 if (pf->vsi[v]) 3674 if (pf->vsi[v])
3655 if (ice_ena_vsi(pf->vsi[v])) 3675 if (ice_ena_vsi(pf->vsi[v], locked))
3656 return -EIO; 3676 return -EIO;
3657 3677
3658 return 0; 3678 return 0;
@@ -3800,7 +3820,7 @@ static void ice_rebuild(struct ice_pf *pf)
3800 } 3820 }
3801 3821
3802 /* restart the VSIs that were rebuilt and running before the reset */ 3822 /* restart the VSIs that were rebuilt and running before the reset */
3803 err = ice_pf_ena_all_vsi(pf); 3823 err = ice_pf_ena_all_vsi(pf, false);
3804 if (err) { 3824 if (err) {
3805 dev_err(&pf->pdev->dev, "error enabling VSIs\n"); 3825 dev_err(&pf->pdev->dev, "error enabling VSIs\n");
3806 /* no need to disable VSIs in tear down path in ice_rebuild() 3826 /* no need to disable VSIs in tear down path in ice_rebuild()
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 3d1c941a938e..124feaf0e730 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -127,7 +127,7 @@ ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
127 * 127 *
128 * Query scheduling elements (0x0404) 128 * Query scheduling elements (0x0404)
129 */ 129 */
130static enum ice_status 130enum ice_status
131ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req, 131ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
132 struct ice_aqc_get_elem *buf, u16 buf_size, 132 struct ice_aqc_get_elem *buf, u16 buf_size,
133 u16 *elems_ret, struct ice_sq_cd *cd) 133 u16 *elems_ret, struct ice_sq_cd *cd)
@@ -138,31 +138,6 @@ ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
138} 138}
139 139
140/** 140/**
141 * ice_sched_query_elem - query element information from HW
142 * @hw: pointer to the HW struct
143 * @node_teid: node TEID to be queried
144 * @buf: buffer to element information
145 *
146 * This function queries HW element information
147 */
148static enum ice_status
149ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
150 struct ice_aqc_get_elem *buf)
151{
152 u16 buf_size, num_elem_ret = 0;
153 enum ice_status status;
154
155 buf_size = sizeof(*buf);
156 memset(buf, 0, buf_size);
157 buf->generic[0].node_teid = cpu_to_le32(node_teid);
158 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
159 NULL);
160 if (status || num_elem_ret != 1)
161 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
162 return status;
163}
164
165/**
166 * ice_sched_add_node - Insert the Tx scheduler node in SW DB 141 * ice_sched_add_node - Insert the Tx scheduler node in SW DB
167 * @pi: port information structure 142 * @pi: port information structure
168 * @layer: Scheduler layer of the node 143 * @layer: Scheduler layer of the node
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h
index bee8221ad146..3902a8ad3025 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.h
+++ b/drivers/net/ethernet/intel/ice/ice_sched.h
@@ -24,6 +24,10 @@ struct ice_sched_agg_info {
24}; 24};
25 25
26/* FW AQ command calls */ 26/* FW AQ command calls */
27enum ice_status
28ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
29 struct ice_aqc_get_elem *buf, u16 buf_size,
30 u16 *elems_ret, struct ice_sq_cd *cd);
27enum ice_status ice_sched_init_port(struct ice_port_info *pi); 31enum ice_status ice_sched_init_port(struct ice_port_info *pi);
28enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw); 32enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw);
29void ice_sched_clear_port(struct ice_port_info *pi); 33void ice_sched_clear_port(struct ice_port_info *pi);
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index d276e9a952db..c4cdfb2e0c4b 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -215,6 +215,8 @@ struct ice_nvm_info {
215#define ice_for_each_traffic_class(_i) \ 215#define ice_for_each_traffic_class(_i) \
216 for ((_i) = 0; (_i) < ICE_MAX_TRAFFIC_CLASS; (_i)++) 216 for ((_i) = 0; (_i) < ICE_MAX_TRAFFIC_CLASS; (_i)++)
217 217
218#define ICE_INVAL_TEID 0xFFFFFFFF
219
218struct ice_sched_node { 220struct ice_sched_node {
219 struct ice_sched_node *parent; 221 struct ice_sched_node *parent;
220 struct ice_sched_node *sibling; /* next sibling in the same layer */ 222 struct ice_sched_node *sibling; /* next sibling in the same layer */