aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorAnirudh Venkataramanan <anirudh.venkataramanan@intel.com>2018-03-20 10:58:13 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2018-03-26 14:18:36 -0400
commitcdedef59deb020e78721d820a5692100128c8c73 (patch)
tree2bdeeb20d1fadea63eb26501b64bc6eee455f6e8 /drivers/net
parent9daf8208dd4dee4e13079bd0520a5fb8d20e8b06 (diff)
ice: Configure VSIs for Tx/Rx
This patch configures the VSIs to be able to send and receive packets by doing the following: 1) Initialize flexible parser to extract and include certain fields in the Rx descriptor. 2) Add Tx queues by programming the Tx queue context (implemented in ice_vsi_cfg_txqs). Note that adding the queues also enables (starts) the queues. 3) Add Rx queues by programming Rx queue context (implemented in ice_vsi_cfg_rxqs). Note that this only adds queues but doesn't start them. The rings will be started by calling ice_vsi_start_rx_rings on interface up. 4) Configure interrupts for VSI queues. 5) Implement ice_open and ice_stop. Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Tested-by: Tony Brelinski <tonyx.brelinski@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile3
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h36
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h86
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c602
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h59
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h246
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c1140
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c105
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_status.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c361
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h75
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h2
14 files changed, 2729 insertions, 6 deletions
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index bd2cbe14e76e..de82fc875775 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -12,4 +12,5 @@ ice-y := ice_main.o \
12 ice_common.o \ 12 ice_common.o \
13 ice_nvm.o \ 13 ice_nvm.o \
14 ice_switch.o \ 14 ice_switch.o \
15 ice_sched.o 15 ice_sched.o \
16 ice_txrx.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 21d0c237ee3f..46cf82212ed8 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -11,8 +11,10 @@
11#include <linux/netdevice.h> 11#include <linux/netdevice.h>
12#include <linux/compiler.h> 12#include <linux/compiler.h>
13#include <linux/etherdevice.h> 13#include <linux/etherdevice.h>
14#include <linux/skbuff.h>
14#include <linux/cpumask.h> 15#include <linux/cpumask.h>
15#include <linux/if_vlan.h> 16#include <linux/if_vlan.h>
17#include <linux/dma-mapping.h>
16#include <linux/pci.h> 18#include <linux/pci.h>
17#include <linux/workqueue.h> 19#include <linux/workqueue.h>
18#include <linux/aer.h> 20#include <linux/aer.h>
@@ -43,6 +45,8 @@
43#define ICE_VSI_MAP_SCATTER 1 45#define ICE_VSI_MAP_SCATTER 1
44#define ICE_MAX_SCATTER_TXQS 16 46#define ICE_MAX_SCATTER_TXQS 16
45#define ICE_MAX_SCATTER_RXQS 16 47#define ICE_MAX_SCATTER_RXQS 16
48#define ICE_Q_WAIT_RETRY_LIMIT 10
49#define ICE_Q_WAIT_MAX_RETRY (5 * ICE_Q_WAIT_RETRY_LIMIT)
46#define ICE_RES_VALID_BIT 0x8000 50#define ICE_RES_VALID_BIT 0x8000
47#define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1) 51#define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1)
48#define ICE_INVAL_Q_INDEX 0xffff 52#define ICE_INVAL_Q_INDEX 0xffff
@@ -56,6 +60,14 @@
56 (((val) << ICE_AQ_VSI_UP_TABLE_UP##i##_S) & \ 60 (((val) << ICE_AQ_VSI_UP_TABLE_UP##i##_S) & \
57 ICE_AQ_VSI_UP_TABLE_UP##i##_M) 61 ICE_AQ_VSI_UP_TABLE_UP##i##_M)
58 62
63#define ICE_RX_DESC(R, i) (&(((union ice_32b_rx_flex_desc *)((R)->desc))[i]))
64
65#define ice_for_each_txq(vsi, i) \
66 for ((i) = 0; (i) < (vsi)->num_txq; (i)++)
67
68#define ice_for_each_rxq(vsi, i) \
69 for ((i) = 0; (i) < (vsi)->num_rxq; (i)++)
70
59struct ice_tc_info { 71struct ice_tc_info {
60 u16 qoffset; 72 u16 qoffset;
61 u16 qcount; 73 u16 qcount;
@@ -96,6 +108,9 @@ struct ice_vsi {
96 struct ice_ring **rx_rings; /* rx ring array */ 108 struct ice_ring **rx_rings; /* rx ring array */
97 struct ice_ring **tx_rings; /* tx ring array */ 109 struct ice_ring **tx_rings; /* tx ring array */
98 struct ice_q_vector **q_vectors; /* q_vector array */ 110 struct ice_q_vector **q_vectors; /* q_vector array */
111
112 irqreturn_t (*irq_handler)(int irq, void *data);
113
99 DECLARE_BITMAP(state, __ICE_STATE_NBITS); 114 DECLARE_BITMAP(state, __ICE_STATE_NBITS);
100 int num_q_vectors; 115 int num_q_vectors;
101 int base_vector; 116 int base_vector;
@@ -106,8 +121,14 @@ struct ice_vsi {
106 /* Interrupt thresholds */ 121 /* Interrupt thresholds */
107 u16 work_lmt; 122 u16 work_lmt;
108 123
124 u16 max_frame;
125 u16 rx_buf_len;
126
109 struct ice_aqc_vsi_props info; /* VSI properties */ 127 struct ice_aqc_vsi_props info; /* VSI properties */
110 128
129 bool irqs_ready;
130 bool current_isup; /* Sync 'link up' logging */
131
111 /* queue information */ 132 /* queue information */
112 u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ 133 u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
113 u8 rx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ 134 u8 rx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
@@ -128,9 +149,11 @@ struct ice_q_vector {
128 struct napi_struct napi; 149 struct napi_struct napi;
129 struct ice_ring_container rx; 150 struct ice_ring_container rx;
130 struct ice_ring_container tx; 151 struct ice_ring_container tx;
152 struct irq_affinity_notify affinity_notify;
131 u16 v_idx; /* index in the vsi->q_vector array. */ 153 u16 v_idx; /* index in the vsi->q_vector array. */
132 u8 num_ring_tx; /* total number of tx rings in vector */ 154 u8 num_ring_tx; /* total number of tx rings in vector */
133 u8 num_ring_rx; /* total number of rx rings in vector */ 155 u8 num_ring_rx; /* total number of rx rings in vector */
156 char name[ICE_INT_NAME_STR_LEN];
134} ____cacheline_internodealigned_in_smp; 157} ____cacheline_internodealigned_in_smp;
135 158
136enum ice_pf_flags { 159enum ice_pf_flags {
@@ -178,10 +201,14 @@ struct ice_netdev_priv {
178/** 201/**
179 * ice_irq_dynamic_ena - Enable default interrupt generation settings 202 * ice_irq_dynamic_ena - Enable default interrupt generation settings
180 * @hw: pointer to hw struct 203 * @hw: pointer to hw struct
204 * @vsi: pointer to vsi struct, can be NULL
205 * @q_vector: pointer to q_vector, can be NULL
181 */ 206 */
182static inline void ice_irq_dynamic_ena(struct ice_hw *hw) 207static inline void ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi,
208 struct ice_q_vector *q_vector)
183{ 209{
184 u32 vector = ((struct ice_pf *)hw->back)->oicr_idx; 210 u32 vector = (vsi && q_vector) ? vsi->base_vector + q_vector->v_idx :
211 ((struct ice_pf *)hw->back)->oicr_idx;
185 int itr = ICE_ITR_NONE; 212 int itr = ICE_ITR_NONE;
186 u32 val; 213 u32 val;
187 214
@@ -190,7 +217,10 @@ static inline void ice_irq_dynamic_ena(struct ice_hw *hw)
190 */ 217 */
191 val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | 218 val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
192 (itr << GLINT_DYN_CTL_ITR_INDX_S); 219 (itr << GLINT_DYN_CTL_ITR_INDX_S);
193 220 if (vsi)
221 if (test_bit(__ICE_DOWN, vsi->state))
222 return;
194 wr32(hw, GLINT_DYN_CTL(vector), val); 223 wr32(hw, GLINT_DYN_CTL(vector), val);
195} 224}
225
196#endif /* _ICE_H_ */ 226#endif /* _ICE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 9237841439da..c7abcc1dbbcc 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -968,6 +968,87 @@ struct ice_aqc_nvm {
968 __le32 addr_low; 968 __le32 addr_low;
969}; 969};
970 970
971/* Add TX LAN Queues (indirect 0x0C30) */
972struct ice_aqc_add_txqs {
973 u8 num_qgrps;
974 u8 reserved[3];
975 __le32 reserved1;
976 __le32 addr_high;
977 __le32 addr_low;
978};
979
980/* This is the descriptor of each queue entry for the Add TX LAN Queues
981 * command (0x0C30). Only used within struct ice_aqc_add_tx_qgrp.
982 */
983struct ice_aqc_add_txqs_perq {
984 __le16 txq_id;
985 u8 rsvd[2];
986 __le32 q_teid;
987 u8 txq_ctx[22];
988 u8 rsvd2[2];
989 struct ice_aqc_txsched_elem info;
990};
991
992/* The format of the command buffer for Add TX LAN Queues (0x0C30)
993 * is an array of the following structs. Please note that the length of
994 * each struct ice_aqc_add_tx_qgrp is variable due
995 * to the variable number of queues in each group!
996 */
997struct ice_aqc_add_tx_qgrp {
998 __le32 parent_teid;
999 u8 num_txqs;
1000 u8 rsvd[3];
1001 struct ice_aqc_add_txqs_perq txqs[1];
1002};
1003
1004/* Disable TX LAN Queues (indirect 0x0C31) */
1005struct ice_aqc_dis_txqs {
1006 u8 cmd_type;
1007#define ICE_AQC_Q_DIS_CMD_S 0
1008#define ICE_AQC_Q_DIS_CMD_M (0x3 << ICE_AQC_Q_DIS_CMD_S)
1009#define ICE_AQC_Q_DIS_CMD_NO_FUNC_RESET (0 << ICE_AQC_Q_DIS_CMD_S)
1010#define ICE_AQC_Q_DIS_CMD_VM_RESET BIT(ICE_AQC_Q_DIS_CMD_S)
1011#define ICE_AQC_Q_DIS_CMD_VF_RESET (2 << ICE_AQC_Q_DIS_CMD_S)
1012#define ICE_AQC_Q_DIS_CMD_PF_RESET (3 << ICE_AQC_Q_DIS_CMD_S)
1013#define ICE_AQC_Q_DIS_CMD_SUBSEQ_CALL BIT(2)
1014#define ICE_AQC_Q_DIS_CMD_FLUSH_PIPE BIT(3)
1015 u8 num_entries;
1016 __le16 vmvf_and_timeout;
1017#define ICE_AQC_Q_DIS_VMVF_NUM_S 0
1018#define ICE_AQC_Q_DIS_VMVF_NUM_M (0x3FF << ICE_AQC_Q_DIS_VMVF_NUM_S)
1019#define ICE_AQC_Q_DIS_TIMEOUT_S 10
1020#define ICE_AQC_Q_DIS_TIMEOUT_M (0x3F << ICE_AQC_Q_DIS_TIMEOUT_S)
1021 __le32 blocked_cgds;
1022 __le32 addr_high;
1023 __le32 addr_low;
1024};
1025
1026/* The buffer for Disable TX LAN Queues (indirect 0x0C31)
1027 * contains the following structures, arrayed one after the
1028 * other.
1029 * Note: Since the q_id is 16 bits wide, if the
1030 * number of queues is even, then 2 bytes of alignment MUST be
1031 * added before the start of the next group, to allow correct
1032 * alignment of the parent_teid field.
1033 */
1034struct ice_aqc_dis_txq_item {
1035 __le32 parent_teid;
1036 u8 num_qs;
1037 u8 rsvd;
1038 /* The length of the q_id array varies according to num_qs */
1039 __le16 q_id[1];
1040 /* This only applies from F8 onward */
1041#define ICE_AQC_Q_DIS_BUF_ELEM_TYPE_S 15
1042#define ICE_AQC_Q_DIS_BUF_ELEM_TYPE_LAN_Q \
1043 (0 << ICE_AQC_Q_DIS_BUF_ELEM_TYPE_S)
1044#define ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET \
1045 (1 << ICE_AQC_Q_DIS_BUF_ELEM_TYPE_S)
1046};
1047
1048struct ice_aqc_dis_txq {
1049 struct ice_aqc_dis_txq_item qgrps[1];
1050};
1051
971/** 1052/**
972 * struct ice_aq_desc - Admin Queue (AQ) descriptor 1053 * struct ice_aq_desc - Admin Queue (AQ) descriptor
973 * @flags: ICE_AQ_FLAG_* flags 1054 * @flags: ICE_AQ_FLAG_* flags
@@ -1008,6 +1089,8 @@ struct ice_aq_desc {
1008 struct ice_aqc_query_txsched_res query_sched_res; 1089 struct ice_aqc_query_txsched_res query_sched_res;
1009 struct ice_aqc_add_move_delete_elem add_move_delete_elem; 1090 struct ice_aqc_add_move_delete_elem add_move_delete_elem;
1010 struct ice_aqc_nvm nvm; 1091 struct ice_aqc_nvm nvm;
1092 struct ice_aqc_add_txqs add_txqs;
1093 struct ice_aqc_dis_txqs dis_txqs;
1011 struct ice_aqc_add_get_update_free_vsi vsi_cmd; 1094 struct ice_aqc_add_get_update_free_vsi vsi_cmd;
1012 struct ice_aqc_alloc_free_res_cmd sw_res_ctrl; 1095 struct ice_aqc_alloc_free_res_cmd sw_res_ctrl;
1013 struct ice_aqc_get_link_status get_link_status; 1096 struct ice_aqc_get_link_status get_link_status;
@@ -1088,6 +1171,9 @@ enum ice_adminq_opc {
1088 /* NVM commands */ 1171 /* NVM commands */
1089 ice_aqc_opc_nvm_read = 0x0701, 1172 ice_aqc_opc_nvm_read = 0x0701,
1090 1173
1174 /* TX queue handling commands/events */
1175 ice_aqc_opc_add_txqs = 0x0C30,
1176 ice_aqc_opc_dis_txqs = 0x0C31,
1091}; 1177};
1092 1178
1093#endif /* _ICE_ADMINQ_CMD_H_ */ 1179#endif /* _ICE_ADMINQ_CMD_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 67301fe75482..3d5686636656 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -7,6 +7,25 @@
7 7
8#define ICE_PF_RESET_WAIT_COUNT 200 8#define ICE_PF_RESET_WAIT_COUNT 200
9 9
10#define ICE_NIC_FLX_ENTRY(hw, mdid, idx) \
11 wr32((hw), GLFLXP_RXDID_FLX_WRD_##idx(ICE_RXDID_FLEX_NIC), \
12 ((ICE_RX_OPC_MDID << \
13 GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_S) & \
14 GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_M) | \
15 (((mdid) << GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_S) & \
16 GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_M))
17
18#define ICE_NIC_FLX_FLG_ENTRY(hw, flg_0, flg_1, flg_2, flg_3, idx) \
19 wr32((hw), GLFLXP_RXDID_FLAGS(ICE_RXDID_FLEX_NIC, idx), \
20 (((flg_0) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) & \
21 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) | \
22 (((flg_1) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S) & \
23 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M) | \
24 (((flg_2) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S) & \
25 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M) | \
26 (((flg_3) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S) & \
27 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M))
28
10/** 29/**
11 * ice_set_mac_type - Sets MAC type 30 * ice_set_mac_type - Sets MAC type
12 * @hw: pointer to the HW structure 31 * @hw: pointer to the HW structure
@@ -259,6 +278,33 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
259} 278}
260 279
261/** 280/**
281 * ice_init_flex_parser - initialize rx flex parser
282 * @hw: pointer to the hardware structure
283 *
284 * Function to initialize flex descriptors
285 */
286static void ice_init_flex_parser(struct ice_hw *hw)
287{
288 u8 idx = 0;
289
290 ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_HASH_LOW, 0);
291 ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_HASH_HIGH, 1);
292 ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_FLOW_ID_LOWER, 2);
293 ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_FLOW_ID_HIGH, 3);
294 ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_PKT_FRG, ICE_RXFLG_UDP_GRE,
295 ICE_RXFLG_PKT_DSI, ICE_RXFLG_FIN, idx++);
296 ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_SYN, ICE_RXFLG_RST,
297 ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx++);
298 ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI,
299 ICE_RXFLG_EVLAN_x8100, ICE_RXFLG_EVLAN_x9100,
300 idx++);
301 ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_VLAN_x8100, ICE_RXFLG_TNL_VLAN,
302 ICE_RXFLG_TNL_MAC, ICE_RXFLG_TNL0, idx++);
303 ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_TNL1, ICE_RXFLG_TNL2,
304 ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx);
305}
306
307/**
262 * ice_init_fltr_mgmt_struct - initializes filter management list and locks 308 * ice_init_fltr_mgmt_struct - initializes filter management list and locks
263 * @hw: pointer to the hw struct 309 * @hw: pointer to the hw struct
264 */ 310 */
@@ -431,6 +477,8 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
431 if (status) 477 if (status)
432 goto err_unroll_fltr_mgmt_struct; 478 goto err_unroll_fltr_mgmt_struct;
433 479
480 ice_init_flex_parser(hw);
481
434 return 0; 482 return 0;
435 483
436err_unroll_fltr_mgmt_struct: 484err_unroll_fltr_mgmt_struct:
@@ -598,6 +646,114 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
598} 646}
599 647
600/** 648/**
649 * ice_copy_rxq_ctx_to_hw
650 * @hw: pointer to the hardware structure
651 * @ice_rxq_ctx: pointer to the rxq context
652 * @rxq_index: the index of the rx queue
653 *
654 * Copies rxq context from dense structure to hw register space
655 */
656static enum ice_status
657ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
658{
659 u8 i;
660
661 if (!ice_rxq_ctx)
662 return ICE_ERR_BAD_PTR;
663
664 if (rxq_index > QRX_CTRL_MAX_INDEX)
665 return ICE_ERR_PARAM;
666
667 /* Copy each dword separately to hw */
668 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
669 wr32(hw, QRX_CONTEXT(i, rxq_index),
670 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
671
672 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
673 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
674 }
675
676 return 0;
677}
678
679/* LAN Rx Queue Context */
680static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
681 /* Field Width LSB */
682 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
683 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
684 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
685 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89),
686 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102),
687 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109),
688 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114),
689 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116),
690 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117),
691 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119),
692 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120),
693 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124),
694 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127),
695 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174),
696 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193),
697 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194),
698 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
699 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
700 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
701 { 0 }
702};
703
704/**
705 * ice_write_rxq_ctx
706 * @hw: pointer to the hardware structure
707 * @rlan_ctx: pointer to the rxq context
708 * @rxq_index: the index of the rx queue
709 *
710 * Converts rxq context from sparse to dense structure and then writes
711 * it to hw register space
712 */
713enum ice_status
714ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
715 u32 rxq_index)
716{
717 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
718
719 ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
720 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
721}
722
723/* LAN Tx Queue Context */
724const struct ice_ctx_ele ice_tlan_ctx_info[] = {
725 /* Field Width LSB */
726 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
727 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
728 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60),
729 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65),
730 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68),
731 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
732 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
733 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
734 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
735 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
736 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
737 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102),
738 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103),
739 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104),
740 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105),
741 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114),
742 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128),
743 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129),
744 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135),
745 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148),
746 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152),
747 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153),
748 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164),
749 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
750 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
751 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
752 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 110, 171),
753 { 0 }
754};
755
756/**
601 * ice_debug_cq 757 * ice_debug_cq
602 * @hw: pointer to the hardware structure 758 * @hw: pointer to the hardware structure
603 * @mask: debug mask 759 * @mask: debug mask
@@ -1104,3 +1260,449 @@ void ice_clear_pxe_mode(struct ice_hw *hw)
1104 if (ice_check_sq_alive(hw, &hw->adminq)) 1260 if (ice_check_sq_alive(hw, &hw->adminq))
1105 ice_aq_clear_pxe_mode(hw); 1261 ice_aq_clear_pxe_mode(hw);
1106} 1262}
1263
1264/**
1265 * ice_aq_add_lan_txq
1266 * @hw: pointer to the hardware structure
1267 * @num_qgrps: Number of added queue groups
1268 * @qg_list: list of queue groups to be added
1269 * @buf_size: size of buffer for indirect command
1270 * @cd: pointer to command details structure or NULL
1271 *
1272 * Add Tx LAN queue (0x0C30)
1273 *
1274 * NOTE:
1275 * Prior to calling add Tx LAN queue:
1276 * Initialize the following as part of the Tx queue context:
1277 * Completion queue ID if the queue uses Completion queue, Quanta profile,
1278 * Cache profile and Packet shaper profile.
1279 *
1280 * After add Tx LAN queue AQ command is completed:
1281 * Interrupts should be associated with specific queues,
1282 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
1283 * flow.
1284 */
1285static enum ice_status
1286ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
1287 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
1288 struct ice_sq_cd *cd)
1289{
1290 u16 i, sum_header_size, sum_q_size = 0;
1291 struct ice_aqc_add_tx_qgrp *list;
1292 struct ice_aqc_add_txqs *cmd;
1293 struct ice_aq_desc desc;
1294
1295 cmd = &desc.params.add_txqs;
1296
1297 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
1298
1299 if (!qg_list)
1300 return ICE_ERR_PARAM;
1301
1302 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
1303 return ICE_ERR_PARAM;
1304
1305 sum_header_size = num_qgrps *
1306 (sizeof(*qg_list) - sizeof(*qg_list->txqs));
1307
1308 list = qg_list;
1309 for (i = 0; i < num_qgrps; i++) {
1310 struct ice_aqc_add_txqs_perq *q = list->txqs;
1311
1312 sum_q_size += list->num_txqs * sizeof(*q);
1313 list = (struct ice_aqc_add_tx_qgrp *)(q + list->num_txqs);
1314 }
1315
1316 if (buf_size != (sum_header_size + sum_q_size))
1317 return ICE_ERR_PARAM;
1318
1319 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1320
1321 cmd->num_qgrps = num_qgrps;
1322
1323 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
1324}
1325
1326/**
1327 * ice_aq_dis_lan_txq
1328 * @hw: pointer to the hardware structure
1329 * @num_qgrps: number of groups in the list
1330 * @qg_list: the list of groups to disable
1331 * @buf_size: the total size of the qg_list buffer in bytes
1332 * @cd: pointer to command details structure or NULL
1333 *
1334 * Disable LAN Tx queue (0x0C31)
1335 */
1336static enum ice_status
1337ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
1338 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
1339 struct ice_sq_cd *cd)
1340{
1341 struct ice_aqc_dis_txqs *cmd;
1342 struct ice_aq_desc desc;
1343 u16 i, sz = 0;
1344
1345 cmd = &desc.params.dis_txqs;
1346 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
1347
1348 if (!qg_list)
1349 return ICE_ERR_PARAM;
1350
1351 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
1352 return ICE_ERR_PARAM;
1353 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1354 cmd->num_entries = num_qgrps;
1355
1356 for (i = 0; i < num_qgrps; ++i) {
1357 /* Calculate the size taken up by the queue IDs in this group */
1358 sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id);
1359
1360 /* Add the size of the group header */
1361 sz += sizeof(qg_list[i]) - sizeof(qg_list[i].q_id);
1362
1363 /* If the num of queues is even, add 2 bytes of padding */
1364 if ((qg_list[i].num_qs % 2) == 0)
1365 sz += 2;
1366 }
1367
1368 if (buf_size != sz)
1369 return ICE_ERR_PARAM;
1370
1371 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
1372}
1373
1374/* End of FW Admin Queue command wrappers */
1375
1376/**
1377 * ice_write_byte - write a byte to a packed context structure
1378 * @src_ctx: the context structure to read from
1379 * @dest_ctx: the context to be written to
1380 * @ce_info: a description of the struct to be filled
1381 */
1382static void ice_write_byte(u8 *src_ctx, u8 *dest_ctx,
1383 const struct ice_ctx_ele *ce_info)
1384{
1385 u8 src_byte, dest_byte, mask;
1386 u8 *from, *dest;
1387 u16 shift_width;
1388
1389 /* copy from the next struct field */
1390 from = src_ctx + ce_info->offset;
1391
1392 /* prepare the bits and mask */
1393 shift_width = ce_info->lsb % 8;
1394 mask = (u8)(BIT(ce_info->width) - 1);
1395
1396 src_byte = *from;
1397 src_byte &= mask;
1398
1399 /* shift to correct alignment */
1400 mask <<= shift_width;
1401 src_byte <<= shift_width;
1402
1403 /* get the current bits from the target bit string */
1404 dest = dest_ctx + (ce_info->lsb / 8);
1405
1406 memcpy(&dest_byte, dest, sizeof(dest_byte));
1407
1408 dest_byte &= ~mask; /* get the bits not changing */
1409 dest_byte |= src_byte; /* add in the new bits */
1410
1411 /* put it all back */
1412 memcpy(dest, &dest_byte, sizeof(dest_byte));
1413}
1414
1415/**
1416 * ice_write_word - write a word to a packed context structure
1417 * @src_ctx: the context structure to read from
1418 * @dest_ctx: the context to be written to
1419 * @ce_info: a description of the struct to be filled
1420 */
1421static void ice_write_word(u8 *src_ctx, u8 *dest_ctx,
1422 const struct ice_ctx_ele *ce_info)
1423{
1424 u16 src_word, mask;
1425 __le16 dest_word;
1426 u8 *from, *dest;
1427 u16 shift_width;
1428
1429 /* copy from the next struct field */
1430 from = src_ctx + ce_info->offset;
1431
1432 /* prepare the bits and mask */
1433 shift_width = ce_info->lsb % 8;
1434 mask = BIT(ce_info->width) - 1;
1435
1436 /* don't swizzle the bits until after the mask because the mask bits
1437 * will be in a different bit position on big endian machines
1438 */
1439 src_word = *(u16 *)from;
1440 src_word &= mask;
1441
1442 /* shift to correct alignment */
1443 mask <<= shift_width;
1444 src_word <<= shift_width;
1445
1446 /* get the current bits from the target bit string */
1447 dest = dest_ctx + (ce_info->lsb / 8);
1448
1449 memcpy(&dest_word, dest, sizeof(dest_word));
1450
1451 dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */
1452 dest_word |= cpu_to_le16(src_word); /* add in the new bits */
1453
1454 /* put it all back */
1455 memcpy(dest, &dest_word, sizeof(dest_word));
1456}
1457
1458/**
1459 * ice_write_dword - write a dword to a packed context structure
1460 * @src_ctx: the context structure to read from
1461 * @dest_ctx: the context to be written to
1462 * @ce_info: a description of the struct to be filled
1463 */
1464static void ice_write_dword(u8 *src_ctx, u8 *dest_ctx,
1465 const struct ice_ctx_ele *ce_info)
1466{
1467 u32 src_dword, mask;
1468 __le32 dest_dword;
1469 u8 *from, *dest;
1470 u16 shift_width;
1471
1472 /* copy from the next struct field */
1473 from = src_ctx + ce_info->offset;
1474
1475 /* prepare the bits and mask */
1476 shift_width = ce_info->lsb % 8;
1477
1478 /* if the field width is exactly 32 on an x86 machine, then the shift
1479 * operation will not work because the SHL instructions count is masked
1480 * to 5 bits so the shift will do nothing
1481 */
1482 if (ce_info->width < 32)
1483 mask = BIT(ce_info->width) - 1;
1484 else
1485 mask = (u32)~0;
1486
1487 /* don't swizzle the bits until after the mask because the mask bits
1488 * will be in a different bit position on big endian machines
1489 */
1490 src_dword = *(u32 *)from;
1491 src_dword &= mask;
1492
1493 /* shift to correct alignment */
1494 mask <<= shift_width;
1495 src_dword <<= shift_width;
1496
1497 /* get the current bits from the target bit string */
1498 dest = dest_ctx + (ce_info->lsb / 8);
1499
1500 memcpy(&dest_dword, dest, sizeof(dest_dword));
1501
1502 dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */
1503 dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */
1504
1505 /* put it all back */
1506 memcpy(dest, &dest_dword, sizeof(dest_dword));
1507}
1508
1509/**
1510 * ice_write_qword - write a qword to a packed context structure
1511 * @src_ctx: the context structure to read from
1512 * @dest_ctx: the context to be written to
1513 * @ce_info: a description of the struct to be filled
1514 */
1515static void ice_write_qword(u8 *src_ctx, u8 *dest_ctx,
1516 const struct ice_ctx_ele *ce_info)
1517{
1518 u64 src_qword, mask;
1519 __le64 dest_qword;
1520 u8 *from, *dest;
1521 u16 shift_width;
1522
1523 /* copy from the next struct field */
1524 from = src_ctx + ce_info->offset;
1525
1526 /* prepare the bits and mask */
1527 shift_width = ce_info->lsb % 8;
1528
1529 /* if the field width is exactly 64 on an x86 machine, then the shift
1530 * operation will not work because the SHL instructions count is masked
1531 * to 6 bits so the shift will do nothing
1532 */
1533 if (ce_info->width < 64)
1534 mask = BIT_ULL(ce_info->width) - 1;
1535 else
1536 mask = (u64)~0;
1537
1538 /* don't swizzle the bits until after the mask because the mask bits
1539 * will be in a different bit position on big endian machines
1540 */
1541 src_qword = *(u64 *)from;
1542 src_qword &= mask;
1543
1544 /* shift to correct alignment */
1545 mask <<= shift_width;
1546 src_qword <<= shift_width;
1547
1548 /* get the current bits from the target bit string */
1549 dest = dest_ctx + (ce_info->lsb / 8);
1550
1551 memcpy(&dest_qword, dest, sizeof(dest_qword));
1552
1553 dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */
1554 dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */
1555
1556 /* put it all back */
1557 memcpy(dest, &dest_qword, sizeof(dest_qword));
1558}
1559
1560/**
1561 * ice_set_ctx - set context bits in packed structure
1562 * @src_ctx: pointer to a generic non-packed context structure
1563 * @dest_ctx: pointer to memory for the packed structure
1564 * @ce_info: a description of the structure to be transformed
1565 */
1566enum ice_status
1567ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
1568{
1569 int f;
1570
1571 for (f = 0; ce_info[f].width; f++) {
1572 /* We have to deal with each element of the FW response
1573 * using the correct size so that we are correct regardless
1574 * of the endianness of the machine.
1575 */
1576 switch (ce_info[f].size_of) {
1577 case sizeof(u8):
1578 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
1579 break;
1580 case sizeof(u16):
1581 ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
1582 break;
1583 case sizeof(u32):
1584 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
1585 break;
1586 case sizeof(u64):
1587 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
1588 break;
1589 default:
1590 return ICE_ERR_INVAL_SIZE;
1591 }
1592 }
1593
1594 return 0;
1595}
1596
1597/**
1598 * ice_ena_vsi_txq
1599 * @pi: port information structure
1600 * @vsi_id: VSI id
1601 * @tc: tc number
1602 * @num_qgrps: Number of added queue groups
1603 * @buf: list of queue groups to be added
1604 * @buf_size: size of buffer for indirect command
1605 * @cd: pointer to command details structure or NULL
1606 *
1607 * This function adds one lan q
1608 */
1609enum ice_status
1610ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_id, u8 tc, u8 num_qgrps,
1611 struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
1612 struct ice_sq_cd *cd)
1613{
1614 struct ice_aqc_txsched_elem_data node = { 0 };
1615 struct ice_sched_node *parent;
1616 enum ice_status status;
1617 struct ice_hw *hw;
1618
1619 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
1620 return ICE_ERR_CFG;
1621
1622 if (num_qgrps > 1 || buf->num_txqs > 1)
1623 return ICE_ERR_MAX_LIMIT;
1624
1625 hw = pi->hw;
1626
1627 mutex_lock(&pi->sched_lock);
1628
1629 /* find a parent node */
1630 parent = ice_sched_get_free_qparent(pi, vsi_id, tc,
1631 ICE_SCHED_NODE_OWNER_LAN);
1632 if (!parent) {
1633 status = ICE_ERR_PARAM;
1634 goto ena_txq_exit;
1635 }
1636 buf->parent_teid = parent->info.node_teid;
1637 node.parent_teid = parent->info.node_teid;
1638 /* Mark that the values in the "generic" section as valid. The default
1639 * value in the "generic" section is zero. This means that :
1640 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
1641 * - 0 priority among siblings, indicated by Bit 1-3.
1642 * - WFQ, indicated by Bit 4.
1643 * - 0 Adjustment value is used in PSM credit update flow, indicated by
1644 * Bit 5-6.
1645 * - Bit 7 is reserved.
1646 * Without setting the generic section as valid in valid_sections, the
1647 * Admin Q command will fail with error code ICE_AQ_RC_EINVAL.
1648 */
1649 buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC;
1650
1651 /* add the lan q */
1652 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
1653 if (status)
1654 goto ena_txq_exit;
1655
1656 node.node_teid = buf->txqs[0].q_teid;
1657 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
1658
1659 /* add a leaf node into schduler tree q layer */
1660 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
1661
1662ena_txq_exit:
1663 mutex_unlock(&pi->sched_lock);
1664 return status;
1665}
1666
1667/**
1668 * ice_dis_vsi_txq
1669 * @pi: port information structure
1670 * @num_queues: number of queues
1671 * @q_ids: pointer to the q_id array
1672 * @q_teids: pointer to queue node teids
1673 * @cd: pointer to command details structure or NULL
1674 *
1675 * This function removes queues and their corresponding nodes in SW DB
1676 */
1677enum ice_status
1678ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
1679 u32 *q_teids, struct ice_sq_cd *cd)
1680{
1681 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
1682 struct ice_aqc_dis_txq_item qg_list;
1683 u16 i;
1684
1685 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
1686 return ICE_ERR_CFG;
1687
1688 mutex_lock(&pi->sched_lock);
1689
1690 for (i = 0; i < num_queues; i++) {
1691 struct ice_sched_node *node;
1692
1693 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
1694 if (!node)
1695 continue;
1696 qg_list.parent_teid = node->info.parent_teid;
1697 qg_list.num_qs = 1;
1698 qg_list.q_id[0] = cpu_to_le16(q_ids[i]);
1699 status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list,
1700 sizeof(qg_list), cd);
1701
1702 if (status)
1703 break;
1704 ice_free_sched_node(pi, node);
1705 }
1706 mutex_unlock(&pi->sched_lock);
1707 return status;
1708}
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index dd4473e84ebb..8ed1135bb189 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -30,9 +30,15 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
30 struct ice_sq_cd *cd); 30 struct ice_sq_cd *cd);
31void ice_clear_pxe_mode(struct ice_hw *hw); 31void ice_clear_pxe_mode(struct ice_hw *hw);
32enum ice_status ice_get_caps(struct ice_hw *hw); 32enum ice_status ice_get_caps(struct ice_hw *hw);
33enum ice_status
34ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
35 u32 rxq_index);
33bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq); 36bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq);
34enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading); 37enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading);
35void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode); 38void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode);
39extern const struct ice_ctx_ele ice_tlan_ctx_info[];
40enum ice_status
41ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info);
36enum ice_status 42enum ice_status
37ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, 43ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc,
38 void *buf, u16 buf_size, struct ice_sq_cd *cd); 44 void *buf, u16 buf_size, struct ice_sq_cd *cd);
@@ -41,4 +47,11 @@ enum ice_status ice_clear_pf_cfg(struct ice_hw *hw);
41enum ice_status 47enum ice_status
42ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, 48ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
43 struct ice_link_status *link, struct ice_sq_cd *cd); 49 struct ice_link_status *link, struct ice_sq_cd *cd);
50enum ice_status
51ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
52 u32 *q_teids, struct ice_sq_cd *cmd_details);
53enum ice_status
54ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_id, u8 tc, u8 num_qgrps,
55 struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
56 struct ice_sq_cd *cd);
44#endif /* _ICE_COMMON_H_ */ 57#endif /* _ICE_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 446a8bbef488..fc9b0b179e99 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -6,6 +6,7 @@
6#ifndef _ICE_HW_AUTOGEN_H_ 6#ifndef _ICE_HW_AUTOGEN_H_
7#define _ICE_HW_AUTOGEN_H_ 7#define _ICE_HW_AUTOGEN_H_
8 8
9#define QTX_COMM_DBELL(_DBQM) (0x002C0000 + ((_DBQM) * 4))
9#define PF_FW_ARQBAH 0x00080180 10#define PF_FW_ARQBAH 0x00080180
10#define PF_FW_ARQBAL 0x00080080 11#define PF_FW_ARQBAL 0x00080080
11#define PF_FW_ARQH 0x00080380 12#define PF_FW_ARQH 0x00080380
@@ -40,6 +41,44 @@
40#define PF_FW_ATQLEN_ATQENABLE_S 31 41#define PF_FW_ATQLEN_ATQENABLE_S 31
41#define PF_FW_ATQLEN_ATQENABLE_M BIT(PF_FW_ATQLEN_ATQENABLE_S) 42#define PF_FW_ATQLEN_ATQENABLE_M BIT(PF_FW_ATQLEN_ATQENABLE_S)
42#define PF_FW_ATQT 0x00080400 43#define PF_FW_ATQT 0x00080400
44
45#define GLFLXP_RXDID_FLAGS(_i, _j) (0x0045D000 + ((_i) * 4 + (_j) * 256))
46#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S 0
47#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M ICE_M(0x3F, GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S)
48#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S 8
49#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M ICE_M(0x3F, GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S)
50#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S 16
51#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M ICE_M(0x3F, GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S)
52#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S 24
53#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M ICE_M(0x3F, GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S)
54#define GLFLXP_RXDID_FLX_WRD_0(_i) (0x0045c800 + ((_i) * 4))
55#define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_S 0
56#define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_M ICE_M(0xFF, GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_S)
57#define GLFLXP_RXDID_FLX_WRD_0_RXDID_OPCODE_S 30
58#define GLFLXP_RXDID_FLX_WRD_0_RXDID_OPCODE_M ICE_M(0x3, GLFLXP_RXDID_FLX_WRD_0_RXDID_OPCODE_S)
59#define GLFLXP_RXDID_FLX_WRD_1(_i) (0x0045c900 + ((_i) * 4))
60#define GLFLXP_RXDID_FLX_WRD_1_PROT_MDID_S 0
61#define GLFLXP_RXDID_FLX_WRD_1_PROT_MDID_M ICE_M(0xFF, GLFLXP_RXDID_FLX_WRD_1_PROT_MDID_S)
62#define GLFLXP_RXDID_FLX_WRD_1_RXDID_OPCODE_S 30
63#define GLFLXP_RXDID_FLX_WRD_1_RXDID_OPCODE_M ICE_M(0x3, GLFLXP_RXDID_FLX_WRD_1_RXDID_OPCODE_S)
64#define GLFLXP_RXDID_FLX_WRD_2(_i) (0x0045ca00 + ((_i) * 4))
65#define GLFLXP_RXDID_FLX_WRD_2_PROT_MDID_S 0
66#define GLFLXP_RXDID_FLX_WRD_2_PROT_MDID_M ICE_M(0xFF, GLFLXP_RXDID_FLX_WRD_2_PROT_MDID_S)
67#define GLFLXP_RXDID_FLX_WRD_2_RXDID_OPCODE_S 30
68#define GLFLXP_RXDID_FLX_WRD_2_RXDID_OPCODE_M ICE_M(0x3, GLFLXP_RXDID_FLX_WRD_2_RXDID_OPCODE_S)
69#define GLFLXP_RXDID_FLX_WRD_3(_i) (0x0045cb00 + ((_i) * 4))
70#define GLFLXP_RXDID_FLX_WRD_3_PROT_MDID_S 0
71#define GLFLXP_RXDID_FLX_WRD_3_PROT_MDID_M ICE_M(0xFF, GLFLXP_RXDID_FLX_WRD_3_PROT_MDID_S)
72#define GLFLXP_RXDID_FLX_WRD_3_RXDID_OPCODE_S 30
73#define GLFLXP_RXDID_FLX_WRD_3_RXDID_OPCODE_M ICE_M(0x3, GLFLXP_RXDID_FLX_WRD_3_RXDID_OPCODE_S)
74
75#define QRXFLXP_CNTXT(_QRX) (0x00480000 + ((_QRX) * 4))
76#define QRXFLXP_CNTXT_RXDID_IDX_S 0
77#define QRXFLXP_CNTXT_RXDID_IDX_M ICE_M(0x3F, QRXFLXP_CNTXT_RXDID_IDX_S)
78#define QRXFLXP_CNTXT_RXDID_PRIO_S 8
79#define QRXFLXP_CNTXT_RXDID_PRIO_M ICE_M(0x7, QRXFLXP_CNTXT_RXDID_PRIO_S)
80#define QRXFLXP_CNTXT_TS_S 11
81#define QRXFLXP_CNTXT_TS_M BIT(QRXFLXP_CNTXT_TS_S)
43#define GLGEN_RSTAT 0x000B8188 82#define GLGEN_RSTAT 0x000B8188
44#define GLGEN_RSTAT_DEVSTATE_S 0 83#define GLGEN_RSTAT_DEVSTATE_S 0
45#define GLGEN_RSTAT_DEVSTATE_M ICE_M(0x3, GLGEN_RSTAT_DEVSTATE_S) 84#define GLGEN_RSTAT_DEVSTATE_M ICE_M(0x3, GLGEN_RSTAT_DEVSTATE_S)
@@ -62,6 +101,8 @@
62#define GLINT_DYN_CTL_INTENA_M BIT(GLINT_DYN_CTL_INTENA_S) 101#define GLINT_DYN_CTL_INTENA_M BIT(GLINT_DYN_CTL_INTENA_S)
63#define GLINT_DYN_CTL_CLEARPBA_S 1 102#define GLINT_DYN_CTL_CLEARPBA_S 1
64#define GLINT_DYN_CTL_CLEARPBA_M BIT(GLINT_DYN_CTL_CLEARPBA_S) 103#define GLINT_DYN_CTL_CLEARPBA_M BIT(GLINT_DYN_CTL_CLEARPBA_S)
104#define GLINT_DYN_CTL_SWINT_TRIG_S 2
105#define GLINT_DYN_CTL_SWINT_TRIG_M BIT(GLINT_DYN_CTL_SWINT_TRIG_S)
65#define GLINT_DYN_CTL_ITR_INDX_S 3 106#define GLINT_DYN_CTL_ITR_INDX_S 3
66#define GLINT_DYN_CTL_SW_ITR_INDX_S 25 107#define GLINT_DYN_CTL_SW_ITR_INDX_S 25
67#define GLINT_DYN_CTL_SW_ITR_INDX_M ICE_M(0x3, GLINT_DYN_CTL_SW_ITR_INDX_S) 108#define GLINT_DYN_CTL_SW_ITR_INDX_M ICE_M(0x3, GLINT_DYN_CTL_SW_ITR_INDX_S)
@@ -106,7 +147,25 @@
106#define PFINT_OICR_CTL_CAUSE_ENA_S 30 147#define PFINT_OICR_CTL_CAUSE_ENA_S 30
107#define PFINT_OICR_CTL_CAUSE_ENA_M BIT(PFINT_OICR_CTL_CAUSE_ENA_S) 148#define PFINT_OICR_CTL_CAUSE_ENA_M BIT(PFINT_OICR_CTL_CAUSE_ENA_S)
108#define PFINT_OICR_ENA 0x0016C900 149#define PFINT_OICR_ENA 0x0016C900
150#define QINT_RQCTL(_QRX) (0x00150000 + ((_QRX) * 4))
151#define QINT_RQCTL_MSIX_INDX_S 0
152#define QINT_RQCTL_ITR_INDX_S 11
153#define QINT_RQCTL_CAUSE_ENA_S 30
154#define QINT_RQCTL_CAUSE_ENA_M BIT(QINT_RQCTL_CAUSE_ENA_S)
155#define QINT_TQCTL(_DBQM) (0x00140000 + ((_DBQM) * 4))
156#define QINT_TQCTL_MSIX_INDX_S 0
157#define QINT_TQCTL_ITR_INDX_S 11
158#define QINT_TQCTL_CAUSE_ENA_S 30
159#define QINT_TQCTL_CAUSE_ENA_M BIT(QINT_TQCTL_CAUSE_ENA_S)
109#define GLLAN_RCTL_0 0x002941F8 160#define GLLAN_RCTL_0 0x002941F8
161#define QRX_CONTEXT(_i, _QRX) (0x00280000 + ((_i) * 8192 + (_QRX) * 4))
162#define QRX_CTRL(_QRX) (0x00120000 + ((_QRX) * 4))
163#define QRX_CTRL_MAX_INDEX 2047
164#define QRX_CTRL_QENA_REQ_S 0
165#define QRX_CTRL_QENA_REQ_M BIT(QRX_CTRL_QENA_REQ_S)
166#define QRX_CTRL_QENA_STAT_S 2
167#define QRX_CTRL_QENA_STAT_M BIT(QRX_CTRL_QENA_STAT_S)
168#define QRX_TAIL(_QRX) (0x00290000 + ((_QRX) * 4))
110#define GLNVM_FLA 0x000B6108 169#define GLNVM_FLA 0x000B6108
111#define GLNVM_FLA_LOCKED_S 6 170#define GLNVM_FLA_LOCKED_S 6
112#define GLNVM_FLA_LOCKED_M BIT(GLNVM_FLA_LOCKED_S) 171#define GLNVM_FLA_LOCKED_M BIT(GLNVM_FLA_LOCKED_S)
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
new file mode 100644
index 000000000000..3c1aeb74d950
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -0,0 +1,246 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (c) 2018, Intel Corporation. */
3
4#ifndef _ICE_LAN_TX_RX_H_
5#define _ICE_LAN_TX_RX_H_
6
7union ice_32byte_rx_desc {
8 struct {
9 __le64 pkt_addr; /* Packet buffer address */
10 __le64 hdr_addr; /* Header buffer address */
11 /* bit 0 of hdr_addr is DD bit */
12 __le64 rsvd1;
13 __le64 rsvd2;
14 } read;
15 struct {
16 struct {
17 struct {
18 __le16 mirroring_status;
19 __le16 l2tag1;
20 } lo_dword;
21 union {
22 __le32 rss; /* RSS Hash */
23 __le32 fd_id; /* Flow Director filter id */
24 } hi_dword;
25 } qword0;
26 struct {
27 /* status/error/PTYPE/length */
28 __le64 status_error_len;
29 } qword1;
30 struct {
31 __le16 ext_status; /* extended status */
32 __le16 rsvd;
33 __le16 l2tag2_1;
34 __le16 l2tag2_2;
35 } qword2;
36 struct {
37 __le32 reserved;
38 __le32 fd_id;
39 } qword3;
40 } wb; /* writeback */
41};
42
43/* RX Flex Descriptor
44 * This descriptor is used instead of the legacy version descriptor when
45 * ice_rlan_ctx.adv_desc is set
46 */
47union ice_32b_rx_flex_desc {
48 struct {
49 __le64 pkt_addr; /* Packet buffer address */
50 __le64 hdr_addr; /* Header buffer address */
51 /* bit 0 of hdr_addr is DD bit */
52 __le64 rsvd1;
53 __le64 rsvd2;
54 } read;
55 struct {
56 /* Qword 0 */
57 u8 rxdid; /* descriptor builder profile id */
58 u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */
59 __le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */
60 __le16 pkt_len; /* [15:14] are reserved */
61 __le16 hdr_len_sph_flex_flags1; /* header=[10:0] */
62 /* sph=[11:11] */
63 /* ff1/ext=[15:12] */
64
65 /* Qword 1 */
66 __le16 status_error0;
67 __le16 l2tag1;
68 __le16 flex_meta0;
69 __le16 flex_meta1;
70
71 /* Qword 2 */
72 __le16 status_error1;
73 u8 flex_flags2;
74 u8 time_stamp_low;
75 __le16 l2tag2_1st;
76 __le16 l2tag2_2nd;
77
78 /* Qword 3 */
79 __le16 flex_meta2;
80 __le16 flex_meta3;
81 union {
82 struct {
83 __le16 flex_meta4;
84 __le16 flex_meta5;
85 } flex;
86 __le32 ts_high;
87 } flex_ts;
88 } wb; /* writeback */
89};
90
91/* Receive Flex Descriptor profile IDs: There are a total
92 * of 64 profiles where profile IDs 0/1 are for legacy; and
93 * profiles 2-63 are flex profiles that can be programmed
94 * with a specific metadata (profile 7 reserved for HW)
95 */
96enum ice_rxdid {
97 ICE_RXDID_START = 0,
98 ICE_RXDID_LEGACY_0 = ICE_RXDID_START,
99 ICE_RXDID_LEGACY_1,
100 ICE_RXDID_FLX_START,
101 ICE_RXDID_FLEX_NIC = ICE_RXDID_FLX_START,
102 ICE_RXDID_FLX_LAST = 63,
103 ICE_RXDID_LAST = ICE_RXDID_FLX_LAST
104};
105
106/* Receive Flex Descriptor Rx opcode values */
107#define ICE_RX_OPC_MDID 0x01
108
109/* Receive Descriptor MDID values */
110#define ICE_RX_MDID_FLOW_ID_LOWER 5
111#define ICE_RX_MDID_FLOW_ID_HIGH 6
112#define ICE_RX_MDID_HASH_LOW 56
113#define ICE_RX_MDID_HASH_HIGH 57
114
115/* Rx Flag64 packet flag bits */
116enum ice_rx_flg64_bits {
117 ICE_RXFLG_PKT_DSI = 0,
118 ICE_RXFLG_EVLAN_x8100 = 15,
119 ICE_RXFLG_EVLAN_x9100,
120 ICE_RXFLG_VLAN_x8100,
121 ICE_RXFLG_TNL_MAC = 22,
122 ICE_RXFLG_TNL_VLAN,
123 ICE_RXFLG_PKT_FRG,
124 ICE_RXFLG_FIN = 32,
125 ICE_RXFLG_SYN,
126 ICE_RXFLG_RST,
127 ICE_RXFLG_TNL0 = 38,
128 ICE_RXFLG_TNL1,
129 ICE_RXFLG_TNL2,
130 ICE_RXFLG_UDP_GRE,
131 ICE_RXFLG_RSVD = 63
132};
133
134#define ICE_RXQ_CTX_SIZE_DWORDS 8
135#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32))
136
137/* RLAN Rx queue context data
138 *
139 * The sizes of the variables may be larger than needed due to crossing byte
140 * boundaries. If we do not have the width of the variable set to the correct
141 * size then we could end up shifting bits off the top of the variable when the
142 * variable is at the top of a byte and crosses over into the next byte.
143 */
144struct ice_rlan_ctx {
145 u16 head;
146 u16 cpuid; /* bigger than needed, see above for reason */
147 u64 base;
148 u16 qlen;
149#define ICE_RLAN_CTX_DBUF_S 7
150 u16 dbuf; /* bigger than needed, see above for reason */
151#define ICE_RLAN_CTX_HBUF_S 6
152 u16 hbuf; /* bigger than needed, see above for reason */
153 u8 dtype;
154 u8 dsize;
155 u8 crcstrip;
156 u8 l2tsel;
157 u8 hsplit_0;
158 u8 hsplit_1;
159 u8 showiv;
160 u32 rxmax; /* bigger than needed, see above for reason */
161 u8 tphrdesc_ena;
162 u8 tphwdesc_ena;
163 u8 tphdata_ena;
164 u8 tphhead_ena;
165 u16 lrxqthresh; /* bigger than needed, see above for reason */
166};
167
168struct ice_ctx_ele {
169 u16 offset;
170 u16 size_of;
171 u16 width;
172 u16 lsb;
173};
174
175#define ICE_CTX_STORE(_struct, _ele, _width, _lsb) { \
176 .offset = offsetof(struct _struct, _ele), \
177 .size_of = FIELD_SIZEOF(struct _struct, _ele), \
178 .width = _width, \
179 .lsb = _lsb, \
180}
181
182/* for hsplit_0 field of Rx RLAN context */
183enum ice_rlan_ctx_rx_hsplit_0 {
184 ICE_RLAN_RX_HSPLIT_0_NO_SPLIT = 0,
185 ICE_RLAN_RX_HSPLIT_0_SPLIT_L2 = 1,
186 ICE_RLAN_RX_HSPLIT_0_SPLIT_IP = 2,
187 ICE_RLAN_RX_HSPLIT_0_SPLIT_TCP_UDP = 4,
188 ICE_RLAN_RX_HSPLIT_0_SPLIT_SCTP = 8,
189};
190
191/* for hsplit_1 field of Rx RLAN context */
192enum ice_rlan_ctx_rx_hsplit_1 {
193 ICE_RLAN_RX_HSPLIT_1_NO_SPLIT = 0,
194 ICE_RLAN_RX_HSPLIT_1_SPLIT_L2 = 1,
195 ICE_RLAN_RX_HSPLIT_1_SPLIT_ALWAYS = 2,
196};
197
198/* TX Descriptor */
199struct ice_tx_desc {
200 __le64 buf_addr; /* Address of descriptor's data buf */
201 __le64 cmd_type_offset_bsz;
202};
203
204#define ICE_LAN_TXQ_MAX_QGRPS 127
205#define ICE_LAN_TXQ_MAX_QDIS 1023
206
207/* Tx queue context data
208 *
209 * The sizes of the variables may be larger than needed due to crossing byte
210 * boundaries. If we do not have the width of the variable set to the correct
211 * size then we could end up shifting bits off the top of the variable when the
212 * variable is at the top of a byte and crosses over into the next byte.
213 */
214struct ice_tlan_ctx {
215#define ICE_TLAN_CTX_BASE_S 7
216 u64 base; /* base is defined in 128-byte units */
217 u8 port_num;
218 u16 cgd_num; /* bigger than needed, see above for reason */
219 u8 pf_num;
220 u16 vmvf_num;
221 u8 vmvf_type;
222#define ICE_TLAN_CTX_VMVF_TYPE_VMQ 1
223#define ICE_TLAN_CTX_VMVF_TYPE_PF 2
224 u16 src_vsi;
225 u8 tsyn_ena;
226 u8 alt_vlan;
227 u16 cpuid; /* bigger than needed, see above for reason */
228 u8 wb_mode;
229 u8 tphrd_desc;
230 u8 tphrd;
231 u8 tphwr_desc;
232 u16 cmpq_id;
233 u16 qnum_in_func;
234 u8 itr_notification_mode;
235 u8 adjust_prof_id;
236 u32 qlen; /* bigger than needed, see above for reason */
237 u8 quanta_prof_idx;
238 u8 tso_ena;
239 u16 tso_qnum;
240 u8 legacy_int;
241 u8 drop_ena;
242 u8 cache_prof_idx;
243 u8 pkt_shaper_prof_idx;
244 u8 int_q_state; /* width not needed - internal do not write */
245};
246#endif /* _ICE_LAN_TX_RX_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 3b4a2691ddac..58e9eb40f64a 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -27,6 +27,7 @@ MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
27#endif /* !CONFIG_DYNAMIC_DEBUG */ 27#endif /* !CONFIG_DYNAMIC_DEBUG */
28 28
29static struct workqueue_struct *ice_wq; 29static struct workqueue_struct *ice_wq;
30static const struct net_device_ops ice_netdev_ops;
30 31
31static int ice_vsi_release(struct ice_vsi *vsi); 32static int ice_vsi_release(struct ice_vsi *vsi);
32 33
@@ -214,6 +215,75 @@ static void ice_free_fltr_list(struct device *dev, struct list_head *h)
214} 215}
215 216
216/** 217/**
218 * ice_print_link_msg - print link up or down message
219 * @vsi: the VSI whose link status is being queried
220 * @isup: boolean for if the link is now up or down
221 */
222static void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
223{
224 const char *speed;
225 const char *fc;
226
227 if (vsi->current_isup == isup)
228 return;
229
230 vsi->current_isup = isup;
231
232 if (!isup) {
233 netdev_info(vsi->netdev, "NIC Link is Down\n");
234 return;
235 }
236
237 switch (vsi->port_info->phy.link_info.link_speed) {
238 case ICE_AQ_LINK_SPEED_40GB:
239 speed = "40 G";
240 break;
241 case ICE_AQ_LINK_SPEED_25GB:
242 speed = "25 G";
243 break;
244 case ICE_AQ_LINK_SPEED_20GB:
245 speed = "20 G";
246 break;
247 case ICE_AQ_LINK_SPEED_10GB:
248 speed = "10 G";
249 break;
250 case ICE_AQ_LINK_SPEED_5GB:
251 speed = "5 G";
252 break;
253 case ICE_AQ_LINK_SPEED_2500MB:
254 speed = "2.5 G";
255 break;
256 case ICE_AQ_LINK_SPEED_1000MB:
257 speed = "1 G";
258 break;
259 case ICE_AQ_LINK_SPEED_100MB:
260 speed = "100 M";
261 break;
262 default:
263 speed = "Unknown";
264 break;
265 }
266
267 switch (vsi->port_info->fc.current_mode) {
268 case ICE_FC_FULL:
269 fc = "RX/TX";
270 break;
271 case ICE_FC_TX_PAUSE:
272 fc = "TX";
273 break;
274 case ICE_FC_RX_PAUSE:
275 fc = "RX";
276 break;
277 default:
278 fc = "Unknown";
279 break;
280 }
281
282 netdev_info(vsi->netdev, "NIC Link is up %sbps, Flow Control: %s\n",
283 speed, fc);
284}
285
286/**
217 * __ice_clean_ctrlq - helper function to clean controlq rings 287 * __ice_clean_ctrlq - helper function to clean controlq rings
218 * @pf: ptr to struct ice_pf 288 * @pf: ptr to struct ice_pf
219 * @q_type: specific Control queue type 289 * @q_type: specific Control queue type
@@ -409,6 +479,104 @@ static void ice_set_ctrlq_len(struct ice_hw *hw)
409} 479}
410 480
411/** 481/**
482 * ice_irq_affinity_notify - Callback for affinity changes
483 * @notify: context as to what irq was changed
484 * @mask: the new affinity mask
485 *
486 * This is a callback function used by the irq_set_affinity_notifier function
487 * so that we may register to receive changes to the irq affinity masks.
488 */
489static void ice_irq_affinity_notify(struct irq_affinity_notify *notify,
490 const cpumask_t *mask)
491{
492 struct ice_q_vector *q_vector =
493 container_of(notify, struct ice_q_vector, affinity_notify);
494
495 cpumask_copy(&q_vector->affinity_mask, mask);
496}
497
498/**
499 * ice_irq_affinity_release - Callback for affinity notifier release
500 * @ref: internal core kernel usage
501 *
502 * This is a callback function used by the irq_set_affinity_notifier function
503 * to inform the current notification subscriber that they will no longer
504 * receive notifications.
505 */
506static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
507
508/**
509 * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
510 * @vsi: the VSI being un-configured
511 */
512static void ice_vsi_dis_irq(struct ice_vsi *vsi)
513{
514 struct ice_pf *pf = vsi->back;
515 struct ice_hw *hw = &pf->hw;
516 int base = vsi->base_vector;
517 u32 val;
518 int i;
519
520 /* disable interrupt causation from each queue */
521 if (vsi->tx_rings) {
522 ice_for_each_txq(vsi, i) {
523 if (vsi->tx_rings[i]) {
524 u16 reg;
525
526 reg = vsi->tx_rings[i]->reg_idx;
527 val = rd32(hw, QINT_TQCTL(reg));
528 val &= ~QINT_TQCTL_CAUSE_ENA_M;
529 wr32(hw, QINT_TQCTL(reg), val);
530 }
531 }
532 }
533
534 if (vsi->rx_rings) {
535 ice_for_each_rxq(vsi, i) {
536 if (vsi->rx_rings[i]) {
537 u16 reg;
538
539 reg = vsi->rx_rings[i]->reg_idx;
540 val = rd32(hw, QINT_RQCTL(reg));
541 val &= ~QINT_RQCTL_CAUSE_ENA_M;
542 wr32(hw, QINT_RQCTL(reg), val);
543 }
544 }
545 }
546
547 /* disable each interrupt */
548 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
549 for (i = vsi->base_vector;
550 i < (vsi->num_q_vectors + vsi->base_vector); i++)
551 wr32(hw, GLINT_DYN_CTL(i), 0);
552
553 ice_flush(hw);
554 for (i = 0; i < vsi->num_q_vectors; i++)
555 synchronize_irq(pf->msix_entries[i + base].vector);
556 }
557}
558
559/**
560 * ice_vsi_ena_irq - Enable IRQ for the given VSI
561 * @vsi: the VSI being configured
562 */
563static int ice_vsi_ena_irq(struct ice_vsi *vsi)
564{
565 struct ice_pf *pf = vsi->back;
566 struct ice_hw *hw = &pf->hw;
567
568 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
569 int i;
570
571 for (i = 0; i < vsi->num_q_vectors; i++)
572 ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
573 }
574
575 ice_flush(hw);
576 return 0;
577}
578
579/**
412 * ice_vsi_delete - delete a VSI from the switch 580 * ice_vsi_delete - delete a VSI from the switch
413 * @vsi: pointer to VSI being removed 581 * @vsi: pointer to VSI being removed
414 */ 582 */
@@ -429,6 +597,73 @@ static void ice_vsi_delete(struct ice_vsi *vsi)
429} 597}
430 598
431/** 599/**
600 * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
601 * @vsi: the VSI being configured
602 * @basename: name for the vector
603 */
604static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
605{
606 int q_vectors = vsi->num_q_vectors;
607 struct ice_pf *pf = vsi->back;
608 int base = vsi->base_vector;
609 int rx_int_idx = 0;
610 int tx_int_idx = 0;
611 int vector, err;
612 int irq_num;
613
614 for (vector = 0; vector < q_vectors; vector++) {
615 struct ice_q_vector *q_vector = vsi->q_vectors[vector];
616
617 irq_num = pf->msix_entries[base + vector].vector;
618
619 if (q_vector->tx.ring && q_vector->rx.ring) {
620 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
621 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
622 tx_int_idx++;
623 } else if (q_vector->rx.ring) {
624 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
625 "%s-%s-%d", basename, "rx", rx_int_idx++);
626 } else if (q_vector->tx.ring) {
627 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
628 "%s-%s-%d", basename, "tx", tx_int_idx++);
629 } else {
630 /* skip this unused q_vector */
631 continue;
632 }
633 err = devm_request_irq(&pf->pdev->dev,
634 pf->msix_entries[base + vector].vector,
635 vsi->irq_handler, 0, q_vector->name,
636 q_vector);
637 if (err) {
638 netdev_err(vsi->netdev,
639 "MSIX request_irq failed, error: %d\n", err);
640 goto free_q_irqs;
641 }
642
643 /* register for affinity change notifications */
644 q_vector->affinity_notify.notify = ice_irq_affinity_notify;
645 q_vector->affinity_notify.release = ice_irq_affinity_release;
646 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
647
648 /* assign the mask for this irq */
649 irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
650 }
651
652 vsi->irqs_ready = true;
653 return 0;
654
655free_q_irqs:
656 while (vector) {
657 vector--;
658 irq_num = pf->msix_entries[base + vector].vector,
659 irq_set_affinity_notifier(irq_num, NULL);
660 irq_set_affinity_hint(irq_num, NULL);
661 devm_free_irq(&pf->pdev->dev, irq_num, &vsi->q_vectors[vector]);
662 }
663 return err;
664}
665
666/**
432 * ice_vsi_setup_q_map - Setup a VSI queue map 667 * ice_vsi_setup_q_map - Setup a VSI queue map
433 * @vsi: the VSI being configured 668 * @vsi: the VSI being configured
434 * @ctxt: VSI context structure 669 * @ctxt: VSI context structure
@@ -591,6 +826,38 @@ static int ice_vsi_add(struct ice_vsi *vsi)
591} 826}
592 827
593/** 828/**
829 * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW
830 * @vsi: the VSI being cleaned up
831 */
832static void ice_vsi_release_msix(struct ice_vsi *vsi)
833{
834 struct ice_pf *pf = vsi->back;
835 u16 vector = vsi->base_vector;
836 struct ice_hw *hw = &pf->hw;
837 u32 txq = 0;
838 u32 rxq = 0;
839 int i, q;
840
841 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
842 struct ice_q_vector *q_vector = vsi->q_vectors[i];
843
844 wr32(hw, GLINT_ITR(ICE_RX_ITR, vector), 0);
845 wr32(hw, GLINT_ITR(ICE_TX_ITR, vector), 0);
846 for (q = 0; q < q_vector->num_ring_tx; q++) {
847 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
848 txq++;
849 }
850
851 for (q = 0; q < q_vector->num_ring_rx; q++) {
852 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0);
853 rxq++;
854 }
855 }
856
857 ice_flush(hw);
858}
859
860/**
594 * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI 861 * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI
595 * @vsi: the VSI having rings deallocated 862 * @vsi: the VSI having rings deallocated
596 */ 863 */
@@ -673,6 +940,118 @@ err_out:
673} 940}
674 941
675/** 942/**
943 * ice_vsi_free_irq - Free the irq association with the OS
944 * @vsi: the VSI being configured
945 */
946static void ice_vsi_free_irq(struct ice_vsi *vsi)
947{
948 struct ice_pf *pf = vsi->back;
949 int base = vsi->base_vector;
950
951 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
952 int i;
953
954 if (!vsi->q_vectors || !vsi->irqs_ready)
955 return;
956
957 vsi->irqs_ready = false;
958 for (i = 0; i < vsi->num_q_vectors; i++) {
959 u16 vector = i + base;
960 int irq_num;
961
962 irq_num = pf->msix_entries[vector].vector;
963
964 /* free only the irqs that were actually requested */
965 if (!vsi->q_vectors[i] ||
966 !(vsi->q_vectors[i]->num_ring_tx ||
967 vsi->q_vectors[i]->num_ring_rx))
968 continue;
969
970 /* clear the affinity notifier in the IRQ descriptor */
971 irq_set_affinity_notifier(irq_num, NULL);
972
973 /* clear the affinity_mask in the IRQ descriptor */
974 irq_set_affinity_hint(irq_num, NULL);
975 synchronize_irq(irq_num);
976 devm_free_irq(&pf->pdev->dev, irq_num,
977 vsi->q_vectors[i]);
978 }
979 ice_vsi_release_msix(vsi);
980 }
981}
982
983/**
984 * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW
985 * @vsi: the VSI being configured
986 */
987static void ice_vsi_cfg_msix(struct ice_vsi *vsi)
988{
989 struct ice_pf *pf = vsi->back;
990 u16 vector = vsi->base_vector;
991 struct ice_hw *hw = &pf->hw;
992 u32 txq = 0, rxq = 0;
993 int i, q, itr;
994 u8 itr_gran;
995
996 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
997 struct ice_q_vector *q_vector = vsi->q_vectors[i];
998
999 itr_gran = hw->itr_gran_200;
1000
1001 if (q_vector->num_ring_rx) {
1002 q_vector->rx.itr =
1003 ITR_TO_REG(vsi->rx_rings[rxq]->rx_itr_setting,
1004 itr_gran);
1005 q_vector->rx.latency_range = ICE_LOW_LATENCY;
1006 }
1007
1008 if (q_vector->num_ring_tx) {
1009 q_vector->tx.itr =
1010 ITR_TO_REG(vsi->tx_rings[txq]->tx_itr_setting,
1011 itr_gran);
1012 q_vector->tx.latency_range = ICE_LOW_LATENCY;
1013 }
1014 wr32(hw, GLINT_ITR(ICE_RX_ITR, vector), q_vector->rx.itr);
1015 wr32(hw, GLINT_ITR(ICE_TX_ITR, vector), q_vector->tx.itr);
1016
1017 /* Both Transmit Queue Interrupt Cause Control register
1018 * and Receive Queue Interrupt Cause control register
1019 * expects MSIX_INDX field to be the vector index
1020 * within the function space and not the absolute
1021 * vector index across PF or across device.
1022 * For SR-IOV VF VSIs queue vector index always starts
1023 * with 1 since first vector index(0) is used for OICR
1024 * in VF space. Since VMDq and other PF VSIs are withtin
1025 * the PF function space, use the vector index thats
1026 * tracked for this PF.
1027 */
1028 for (q = 0; q < q_vector->num_ring_tx; q++) {
1029 u32 val;
1030
1031 itr = ICE_TX_ITR;
1032 val = QINT_TQCTL_CAUSE_ENA_M |
1033 (itr << QINT_TQCTL_ITR_INDX_S) |
1034 (vector << QINT_TQCTL_MSIX_INDX_S);
1035 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
1036 txq++;
1037 }
1038
1039 for (q = 0; q < q_vector->num_ring_rx; q++) {
1040 u32 val;
1041
1042 itr = ICE_RX_ITR;
1043 val = QINT_RQCTL_CAUSE_ENA_M |
1044 (itr << QINT_RQCTL_ITR_INDX_S) |
1045 (vector << QINT_RQCTL_MSIX_INDX_S);
1046 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
1047 rxq++;
1048 }
1049 }
1050
1051 ice_flush(hw);
1052}
1053
1054/**
676 * ice_ena_misc_vector - enable the non-queue interrupts 1055 * ice_ena_misc_vector - enable the non-queue interrupts
677 * @pf: board private structure 1056 * @pf: board private structure
678 */ 1057 */
@@ -752,7 +1131,7 @@ ena_intr:
752 wr32(hw, PFINT_OICR_ENA, ena_mask); 1131 wr32(hw, PFINT_OICR_ENA, ena_mask);
753 if (!test_bit(__ICE_DOWN, pf->state)) { 1132 if (!test_bit(__ICE_DOWN, pf->state)) {
754 ice_service_task_schedule(pf); 1133 ice_service_task_schedule(pf);
755 ice_irq_dynamic_ena(hw); 1134 ice_irq_dynamic_ena(hw, NULL, NULL);
756 } 1135 }
757 1136
758 return ret; 1137 return ret;
@@ -1017,7 +1396,7 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
1017 ITR_TO_REG(ICE_ITR_8K, itr_gran)); 1396 ITR_TO_REG(ICE_ITR_8K, itr_gran));
1018 1397
1019 ice_flush(hw); 1398 ice_flush(hw);
1020 ice_irq_dynamic_ena(hw); 1399 ice_irq_dynamic_ena(hw, NULL, NULL);
1021 1400
1022 return 0; 1401 return 0;
1023} 1402}
@@ -1262,6 +1641,9 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
1262 1641
1263 netdev->priv_flags |= IFF_UNICAST_FLT; 1642 netdev->priv_flags |= IFF_UNICAST_FLT;
1264 1643
1644 /* assign netdev_ops */
1645 netdev->netdev_ops = &ice_netdev_ops;
1646
1265 /* setup watchdog timeout value to be 5 second */ 1647 /* setup watchdog timeout value to be 5 second */
1266 netdev->watchdog_timeo = 5 * HZ; 1648 netdev->watchdog_timeo = 5 * HZ;
1267 1649
@@ -2080,6 +2462,704 @@ static void __exit ice_module_exit(void)
2080module_exit(ice_module_exit); 2462module_exit(ice_module_exit);
2081 2463
2082/** 2464/**
2465 * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance
2466 * @ring: The Tx ring to configure
2467 * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized
2468 * @pf_q: queue index in the PF space
2469 *
2470 * Configure the Tx descriptor ring in TLAN context.
2471 */
2472static void
2473ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
2474{
2475 struct ice_vsi *vsi = ring->vsi;
2476 struct ice_hw *hw = &vsi->back->hw;
2477
2478 tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;
2479
2480 tlan_ctx->port_num = vsi->port_info->lport;
2481
2482 /* Transmit Queue Length */
2483 tlan_ctx->qlen = ring->count;
2484
2485 /* PF number */
2486 tlan_ctx->pf_num = hw->pf_id;
2487
2488 /* queue belongs to a specific VSI type
2489 * VF / VM index should be programmed per vmvf_type setting:
2490 * for vmvf_type = VF, it is VF number between 0-256
2491 * for vmvf_type = VM, it is VM number between 0-767
2492 * for PF or EMP this field should be set to zero
2493 */
2494 switch (vsi->type) {
2495 case ICE_VSI_PF:
2496 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
2497 break;
2498 default:
2499 return;
2500 }
2501
2502 /* make sure the context is associated with the right VSI */
2503 tlan_ctx->src_vsi = vsi->vsi_num;
2504
2505 tlan_ctx->tso_ena = ICE_TX_LEGACY;
2506 tlan_ctx->tso_qnum = pf_q;
2507
2508 /* Legacy or Advanced Host Interface:
2509 * 0: Advanced Host Interface
2510 * 1: Legacy Host Interface
2511 */
2512 tlan_ctx->legacy_int = ICE_TX_LEGACY;
2513}
2514
2515/**
2516 * ice_vsi_cfg_txqs - Configure the VSI for Tx
2517 * @vsi: the VSI being configured
2518 *
2519 * Return 0 on success and a negative value on error
2520 * Configure the Tx VSI for operation.
2521 */
2522static int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
2523{
2524 struct ice_aqc_add_tx_qgrp *qg_buf;
2525 struct ice_aqc_add_txqs_perq *txq;
2526 struct ice_pf *pf = vsi->back;
2527 enum ice_status status;
2528 u16 buf_len, i, pf_q;
2529 int err = 0, tc = 0;
2530 u8 num_q_grps;
2531
2532 buf_len = sizeof(struct ice_aqc_add_tx_qgrp);
2533 qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL);
2534 if (!qg_buf)
2535 return -ENOMEM;
2536
2537 if (vsi->num_txq > ICE_MAX_TXQ_PER_TXQG) {
2538 err = -EINVAL;
2539 goto err_cfg_txqs;
2540 }
2541 qg_buf->num_txqs = 1;
2542 num_q_grps = 1;
2543
2544 /* set up and configure the tx queues */
2545 ice_for_each_txq(vsi, i) {
2546 struct ice_tlan_ctx tlan_ctx = { 0 };
2547
2548 pf_q = vsi->txq_map[i];
2549 ice_setup_tx_ctx(vsi->tx_rings[i], &tlan_ctx, pf_q);
2550 /* copy context contents into the qg_buf */
2551 qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
2552 ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
2553 ice_tlan_ctx_info);
2554
2555 /* init queue specific tail reg. It is referred as transmit
2556 * comm scheduler queue doorbell.
2557 */
2558 vsi->tx_rings[i]->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
2559 status = ice_ena_vsi_txq(vsi->port_info, vsi->vsi_num, tc,
2560 num_q_grps, qg_buf, buf_len, NULL);
2561 if (status) {
2562 dev_err(&vsi->back->pdev->dev,
2563 "Failed to set LAN Tx queue context, error: %d\n",
2564 status);
2565 err = -ENODEV;
2566 goto err_cfg_txqs;
2567 }
2568
2569 /* Add Tx Queue TEID into the VSI tx ring from the response
2570 * This will complete configuring and enabling the queue.
2571 */
2572 txq = &qg_buf->txqs[0];
2573 if (pf_q == le16_to_cpu(txq->txq_id))
2574 vsi->tx_rings[i]->txq_teid =
2575 le32_to_cpu(txq->q_teid);
2576 }
2577err_cfg_txqs:
2578 devm_kfree(&pf->pdev->dev, qg_buf);
2579 return err;
2580}
2581
2582/**
2583 * ice_setup_rx_ctx - Configure a receive ring context
2584 * @ring: The Rx ring to configure
2585 *
2586 * Configure the Rx descriptor ring in RLAN context.
2587 */
2588static int ice_setup_rx_ctx(struct ice_ring *ring)
2589{
2590 struct ice_vsi *vsi = ring->vsi;
2591 struct ice_hw *hw = &vsi->back->hw;
2592 u32 rxdid = ICE_RXDID_FLEX_NIC;
2593 struct ice_rlan_ctx rlan_ctx;
2594 u32 regval;
2595 u16 pf_q;
2596 int err;
2597
2598 /* what is RX queue number in global space of 2K rx queues */
2599 pf_q = vsi->rxq_map[ring->q_index];
2600
2601 /* clear the context structure first */
2602 memset(&rlan_ctx, 0, sizeof(rlan_ctx));
2603
2604 rlan_ctx.base = ring->dma >> 7;
2605
2606 rlan_ctx.qlen = ring->count;
2607
2608 /* Receive Packet Data Buffer Size.
2609 * The Packet Data Buffer Size is defined in 128 byte units.
2610 */
2611 rlan_ctx.dbuf = vsi->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
2612
2613 /* use 32 byte descriptors */
2614 rlan_ctx.dsize = 1;
2615
2616 /* Strip the Ethernet CRC bytes before the packet is posted to host
2617 * memory.
2618 */
2619 rlan_ctx.crcstrip = 1;
2620
2621 /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */
2622 rlan_ctx.l2tsel = 1;
2623
2624 rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;
2625 rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;
2626 rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT;
2627
2628 /* This controls whether VLAN is stripped from inner headers
2629 * The VLAN in the inner L2 header is stripped to the receive
2630 * descriptor if enabled by this flag.
2631 */
2632 rlan_ctx.showiv = 0;
2633
2634 /* Max packet size for this queue - must not be set to a larger value
2635 * than 5 x DBUF
2636 */
2637 rlan_ctx.rxmax = min_t(u16, vsi->max_frame,
2638 ICE_MAX_CHAINED_RX_BUFS * vsi->rx_buf_len);
2639
2640 /* Rx queue threshold in units of 64 */
2641 rlan_ctx.lrxqthresh = 1;
2642
2643 /* Enable Flexible Descriptors in the queue context which
2644 * allows this driver to select a specific receive descriptor format
2645 */
2646 regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
2647 regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
2648 QRXFLXP_CNTXT_RXDID_IDX_M;
2649
2650 /* increasing context priority to pick up profile id;
2651 * default is 0x01; setting to 0x03 to ensure profile
2652 * is programming if prev context is of same priority
2653 */
2654 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
2655 QRXFLXP_CNTXT_RXDID_PRIO_M;
2656
2657 wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
2658
2659 /* Absolute queue number out of 2K needs to be passed */
2660 err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
2661 if (err) {
2662 dev_err(&vsi->back->pdev->dev,
2663 "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
2664 pf_q, err);
2665 return -EIO;
2666 }
2667
2668 /* init queue specific tail register */
2669 ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
2670 writel(0, ring->tail);
2671 ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring));
2672
2673 return 0;
2674}
2675
2676/**
2677 * ice_vsi_cfg_rxqs - Configure the VSI for Rx
2678 * @vsi: the VSI being configured
2679 *
2680 * Return 0 on success and a negative value on error
2681 * Configure the Rx VSI for operation.
2682 */
2683static int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
2684{
2685 int err = 0;
2686 u16 i;
2687
2688 if (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN)
2689 vsi->max_frame = vsi->netdev->mtu +
2690 ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
2691 else
2692 vsi->max_frame = ICE_RXBUF_2048;
2693
2694 vsi->rx_buf_len = ICE_RXBUF_2048;
2695 /* set up individual rings */
2696 for (i = 0; i < vsi->num_rxq && !err; i++)
2697 err = ice_setup_rx_ctx(vsi->rx_rings[i]);
2698
2699 if (err) {
2700 dev_err(&vsi->back->pdev->dev, "ice_setup_rx_ctx failed\n");
2701 return -EIO;
2702 }
2703 return err;
2704}
2705
2706/**
2707 * ice_vsi_cfg - Setup the VSI
2708 * @vsi: the VSI being configured
2709 *
2710 * Return 0 on success and negative value on error
2711 */
2712static int ice_vsi_cfg(struct ice_vsi *vsi)
2713{
2714 int err;
2715
2716 err = ice_vsi_cfg_txqs(vsi);
2717 if (!err)
2718 err = ice_vsi_cfg_rxqs(vsi);
2719
2720 return err;
2721}
2722
2723/**
2724 * ice_vsi_stop_tx_rings - Disable Tx rings
2725 * @vsi: the VSI being configured
2726 */
2727static int ice_vsi_stop_tx_rings(struct ice_vsi *vsi)
2728{
2729 struct ice_pf *pf = vsi->back;
2730 struct ice_hw *hw = &pf->hw;
2731 enum ice_status status;
2732 u32 *q_teids, val;
2733 u16 *q_ids, i;
2734 int err = 0;
2735
2736 if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
2737 return -EINVAL;
2738
2739 q_teids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_teids),
2740 GFP_KERNEL);
2741 if (!q_teids)
2742 return -ENOMEM;
2743
2744 q_ids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_ids),
2745 GFP_KERNEL);
2746 if (!q_ids) {
2747 err = -ENOMEM;
2748 goto err_alloc_q_ids;
2749 }
2750
2751 /* set up the tx queue list to be disabled */
2752 ice_for_each_txq(vsi, i) {
2753 u16 v_idx;
2754
2755 if (!vsi->tx_rings || !vsi->tx_rings[i]) {
2756 err = -EINVAL;
2757 goto err_out;
2758 }
2759
2760 q_ids[i] = vsi->txq_map[i];
2761 q_teids[i] = vsi->tx_rings[i]->txq_teid;
2762
2763 /* clear cause_ena bit for disabled queues */
2764 val = rd32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
2765 val &= ~QINT_TQCTL_CAUSE_ENA_M;
2766 wr32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
2767
2768 /* software is expected to wait for 100 ns */
2769 ndelay(100);
2770
2771 /* trigger a software interrupt for the vector associated to
2772 * the queue to schedule napi handler
2773 */
2774 v_idx = vsi->tx_rings[i]->q_vector->v_idx;
2775 wr32(hw, GLINT_DYN_CTL(vsi->base_vector + v_idx),
2776 GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M);
2777 }
2778 status = ice_dis_vsi_txq(vsi->port_info, vsi->num_txq, q_ids, q_teids,
2779 NULL);
2780 if (status) {
2781 dev_err(&pf->pdev->dev,
2782 "Failed to disable LAN Tx queues, error: %d\n",
2783 status);
2784 err = -ENODEV;
2785 }
2786
2787err_out:
2788 devm_kfree(&pf->pdev->dev, q_ids);
2789
2790err_alloc_q_ids:
2791 devm_kfree(&pf->pdev->dev, q_teids);
2792
2793 return err;
2794}
2795
2796/**
2797 * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
2798 * @pf: the PF being configured
2799 * @pf_q: the PF queue
2800 * @ena: enable or disable state of the queue
2801 *
2802 * This routine will wait for the given Rx queue of the PF to reach the
2803 * enabled or disabled state.
2804 * Returns -ETIMEDOUT in case of failing to reach the requested state after
2805 * multiple retries; else will return 0 in case of success.
2806 */
2807static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
2808{
2809 int i;
2810
2811 for (i = 0; i < ICE_Q_WAIT_RETRY_LIMIT; i++) {
2812 u32 rx_reg = rd32(&pf->hw, QRX_CTRL(pf_q));
2813
2814 if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
2815 break;
2816
2817 usleep_range(10, 20);
2818 }
2819 if (i >= ICE_Q_WAIT_RETRY_LIMIT)
2820 return -ETIMEDOUT;
2821
2822 return 0;
2823}
2824
2825/**
2826 * ice_vsi_ctrl_rx_rings - Start or stop a VSI's rx rings
2827 * @vsi: the VSI being configured
2828 * @ena: start or stop the rx rings
2829 */
2830static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena)
2831{
2832 struct ice_pf *pf = vsi->back;
2833 struct ice_hw *hw = &pf->hw;
2834 int i, j, ret = 0;
2835
2836 for (i = 0; i < vsi->num_rxq; i++) {
2837 int pf_q = vsi->rxq_map[i];
2838 u32 rx_reg;
2839
2840 for (j = 0; j < ICE_Q_WAIT_MAX_RETRY; j++) {
2841 rx_reg = rd32(hw, QRX_CTRL(pf_q));
2842 if (((rx_reg >> QRX_CTRL_QENA_REQ_S) & 1) ==
2843 ((rx_reg >> QRX_CTRL_QENA_STAT_S) & 1))
2844 break;
2845 usleep_range(1000, 2000);
2846 }
2847
2848 /* Skip if the queue is already in the requested state */
2849 if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
2850 continue;
2851
2852 /* turn on/off the queue */
2853 if (ena)
2854 rx_reg |= QRX_CTRL_QENA_REQ_M;
2855 else
2856 rx_reg &= ~QRX_CTRL_QENA_REQ_M;
2857 wr32(hw, QRX_CTRL(pf_q), rx_reg);
2858
2859 /* wait for the change to finish */
2860 ret = ice_pf_rxq_wait(pf, pf_q, ena);
2861 if (ret) {
2862 dev_err(&pf->pdev->dev,
2863 "VSI idx %d Rx ring %d %sable timeout\n",
2864 vsi->idx, pf_q, (ena ? "en" : "dis"));
2865 break;
2866 }
2867 }
2868
2869 return ret;
2870}
2871
2872/**
2873 * ice_vsi_start_rx_rings - start VSI's rx rings
2874 * @vsi: the VSI whose rings are to be started
2875 *
2876 * Returns 0 on success and a negative value on error
2877 */
2878static int ice_vsi_start_rx_rings(struct ice_vsi *vsi)
2879{
2880 return ice_vsi_ctrl_rx_rings(vsi, true);
2881}
2882
2883/**
2884 * ice_vsi_stop_rx_rings - stop VSI's rx rings
2885 * @vsi: the VSI
2886 *
2887 * Returns 0 on success and a negative value on error
2888 */
2889static int ice_vsi_stop_rx_rings(struct ice_vsi *vsi)
2890{
2891 return ice_vsi_ctrl_rx_rings(vsi, false);
2892}
2893
2894/**
2895 * ice_vsi_stop_tx_rx_rings - stop VSI's tx and rx rings
2896 * @vsi: the VSI
2897 * Returns 0 on success and a negative value on error
2898 */
2899static int ice_vsi_stop_tx_rx_rings(struct ice_vsi *vsi)
2900{
2901 int err_tx, err_rx;
2902
2903 err_tx = ice_vsi_stop_tx_rings(vsi);
2904 if (err_tx)
2905 dev_dbg(&vsi->back->pdev->dev, "Failed to disable Tx rings\n");
2906
2907 err_rx = ice_vsi_stop_rx_rings(vsi);
2908 if (err_rx)
2909 dev_dbg(&vsi->back->pdev->dev, "Failed to disable Rx rings\n");
2910
2911 if (err_tx || err_rx)
2912 return -EIO;
2913
2914 return 0;
2915}
2916
2917/**
2918 * ice_up_complete - Finish the last steps of bringing up a connection
2919 * @vsi: The VSI being configured
2920 *
2921 * Return 0 on success and negative value on error
2922 */
2923static int ice_up_complete(struct ice_vsi *vsi)
2924{
2925 struct ice_pf *pf = vsi->back;
2926 int err;
2927
2928 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
2929 ice_vsi_cfg_msix(vsi);
2930 else
2931 return -ENOTSUPP;
2932
2933 /* Enable only Rx rings, Tx rings were enabled by the FW when the
2934 * Tx queue group list was configured and the context bits were
2935 * programmed using ice_vsi_cfg_txqs
2936 */
2937 err = ice_vsi_start_rx_rings(vsi);
2938 if (err)
2939 return err;
2940
2941 clear_bit(__ICE_DOWN, vsi->state);
2942 ice_vsi_ena_irq(vsi);
2943
2944 if (vsi->port_info &&
2945 (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
2946 vsi->netdev) {
2947 ice_print_link_msg(vsi, true);
2948 netif_tx_start_all_queues(vsi->netdev);
2949 netif_carrier_on(vsi->netdev);
2950 }
2951
2952 ice_service_task_schedule(pf);
2953
2954 return err;
2955}
2956
2957/**
2958 * ice_down - Shutdown the connection
2959 * @vsi: The VSI being stopped
2960 */
2961static int ice_down(struct ice_vsi *vsi)
2962{
2963 int i, err;
2964
2965 /* Caller of this function is expected to set the
2966 * vsi->state __ICE_DOWN bit
2967 */
2968 if (vsi->netdev) {
2969 netif_carrier_off(vsi->netdev);
2970 netif_tx_disable(vsi->netdev);
2971 }
2972
2973 ice_vsi_dis_irq(vsi);
2974 err = ice_vsi_stop_tx_rx_rings(vsi);
2975
2976 ice_for_each_txq(vsi, i)
2977 ice_clean_tx_ring(vsi->tx_rings[i]);
2978
2979 ice_for_each_rxq(vsi, i)
2980 ice_clean_rx_ring(vsi->rx_rings[i]);
2981
2982 if (err)
2983 netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
2984 vsi->vsi_num, vsi->vsw->sw_id);
2985 return err;
2986}
2987
2988/**
2989 * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
2990 * @vsi: VSI having resources allocated
2991 *
2992 * Return 0 on success, negative on failure
2993 */
2994static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
2995{
2996 int i, err;
2997
2998 if (!vsi->num_txq) {
2999 dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Tx queues\n",
3000 vsi->vsi_num);
3001 return -EINVAL;
3002 }
3003
3004 ice_for_each_txq(vsi, i) {
3005 err = ice_setup_tx_ring(vsi->tx_rings[i]);
3006 if (err)
3007 break;
3008 }
3009
3010 return err;
3011}
3012
3013/**
3014 * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
3015 * @vsi: VSI having resources allocated
3016 *
3017 * Return 0 on success, negative on failure
3018 */
3019static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
3020{
3021 int i, err;
3022
3023 if (!vsi->num_rxq) {
3024 dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Rx queues\n",
3025 vsi->vsi_num);
3026 return -EINVAL;
3027 }
3028
3029 ice_for_each_rxq(vsi, i) {
3030 err = ice_setup_rx_ring(vsi->rx_rings[i]);
3031 if (err)
3032 break;
3033 }
3034
3035 return err;
3036}
3037
3038/**
3039 * ice_vsi_req_irq - Request IRQ from the OS
3040 * @vsi: The VSI IRQ is being requested for
3041 * @basename: name for the vector
3042 *
3043 * Return 0 on success and a negative value on error
3044 */
3045static int ice_vsi_req_irq(struct ice_vsi *vsi, char *basename)
3046{
3047 struct ice_pf *pf = vsi->back;
3048 int err = -EINVAL;
3049
3050 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
3051 err = ice_vsi_req_irq_msix(vsi, basename);
3052
3053 return err;
3054}
3055
3056/**
3057 * ice_vsi_free_tx_rings - Free Tx resources for VSI queues
3058 * @vsi: the VSI having resources freed
3059 */
3060static void ice_vsi_free_tx_rings(struct ice_vsi *vsi)
3061{
3062 int i;
3063
3064 if (!vsi->tx_rings)
3065 return;
3066
3067 ice_for_each_txq(vsi, i)
3068 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
3069 ice_free_tx_ring(vsi->tx_rings[i]);
3070}
3071
3072/**
3073 * ice_vsi_free_rx_rings - Free Rx resources for VSI queues
3074 * @vsi: the VSI having resources freed
3075 */
3076static void ice_vsi_free_rx_rings(struct ice_vsi *vsi)
3077{
3078 int i;
3079
3080 if (!vsi->rx_rings)
3081 return;
3082
3083 ice_for_each_rxq(vsi, i)
3084 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
3085 ice_free_rx_ring(vsi->rx_rings[i]);
3086}
3087
3088/**
3089 * ice_vsi_open - Called when a network interface is made active
3090 * @vsi: the VSI to open
3091 *
3092 * Initialization of the VSI
3093 *
3094 * Returns 0 on success, negative value on error
3095 */
3096static int ice_vsi_open(struct ice_vsi *vsi)
3097{
3098 char int_name[ICE_INT_NAME_STR_LEN];
3099 struct ice_pf *pf = vsi->back;
3100 int err;
3101
3102 /* allocate descriptors */
3103 err = ice_vsi_setup_tx_rings(vsi);
3104 if (err)
3105 goto err_setup_tx;
3106
3107 err = ice_vsi_setup_rx_rings(vsi);
3108 if (err)
3109 goto err_setup_rx;
3110
3111 err = ice_vsi_cfg(vsi);
3112 if (err)
3113 goto err_setup_rx;
3114
3115 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
3116 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
3117 err = ice_vsi_req_irq(vsi, int_name);
3118 if (err)
3119 goto err_setup_rx;
3120
3121 /* Notify the stack of the actual queue counts. */
3122 err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
3123 if (err)
3124 goto err_set_qs;
3125
3126 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
3127 if (err)
3128 goto err_set_qs;
3129
3130 err = ice_up_complete(vsi);
3131 if (err)
3132 goto err_up_complete;
3133
3134 return 0;
3135
3136err_up_complete:
3137 ice_down(vsi);
3138err_set_qs:
3139 ice_vsi_free_irq(vsi);
3140err_setup_rx:
3141 ice_vsi_free_rx_rings(vsi);
3142err_setup_tx:
3143 ice_vsi_free_tx_rings(vsi);
3144
3145 return err;
3146}
3147
3148/**
3149 * ice_vsi_close - Shut down a VSI
3150 * @vsi: the VSI being shut down
3151 */
3152static void ice_vsi_close(struct ice_vsi *vsi)
3153{
3154 if (!test_and_set_bit(__ICE_DOWN, vsi->state))
3155 ice_down(vsi);
3156
3157 ice_vsi_free_irq(vsi);
3158 ice_vsi_free_tx_rings(vsi);
3159 ice_vsi_free_rx_rings(vsi);
3160}
3161
3162/**
2083 * ice_vsi_release - Delete a VSI and free its resources 3163 * ice_vsi_release - Delete a VSI and free its resources
2084 * @vsi: the VSI being removed 3164 * @vsi: the VSI being removed
2085 * 3165 *
@@ -2099,6 +3179,9 @@ static int ice_vsi_release(struct ice_vsi *vsi)
2099 vsi->netdev = NULL; 3179 vsi->netdev = NULL;
2100 } 3180 }
2101 3181
3182 ice_vsi_dis_irq(vsi);
3183 ice_vsi_close(vsi);
3184
2102 /* reclaim interrupt vectors back to PF */ 3185 /* reclaim interrupt vectors back to PF */
2103 ice_free_res(vsi->back->irq_tracker, vsi->base_vector, vsi->idx); 3186 ice_free_res(vsi->back->irq_tracker, vsi->base_vector, vsi->idx);
2104 pf->num_avail_msix += vsi->num_q_vectors; 3187 pf->num_avail_msix += vsi->num_q_vectors;
@@ -2116,3 +3199,56 @@ static int ice_vsi_release(struct ice_vsi *vsi)
2116 3199
2117 return 0; 3200 return 0;
2118} 3201}
3202
3203/**
3204 * ice_open - Called when a network interface becomes active
3205 * @netdev: network interface device structure
3206 *
3207 * The open entry point is called when a network interface is made
3208 * active by the system (IFF_UP). At this point all resources needed
3209 * for transmit and receive operations are allocated, the interrupt
3210 * handler is registered with the OS, the netdev watchdog is enabled,
3211 * and the stack is notified that the interface is ready.
3212 *
3213 * Returns 0 on success, negative value on failure
3214 */
3215static int ice_open(struct net_device *netdev)
3216{
3217 struct ice_netdev_priv *np = netdev_priv(netdev);
3218 struct ice_vsi *vsi = np->vsi;
3219 int err;
3220
3221 netif_carrier_off(netdev);
3222
3223 err = ice_vsi_open(vsi);
3224
3225 if (err)
3226 netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
3227 vsi->vsi_num, vsi->vsw->sw_id);
3228 return err;
3229}
3230
3231/**
3232 * ice_stop - Disables a network interface
3233 * @netdev: network interface device structure
3234 *
3235 * The stop entry point is called when an interface is de-activated by the OS,
3236 * and the netdevice enters the DOWN state. The hardware is still under the
3237 * driver's control, but the netdev interface is disabled.
3238 *
3239 * Returns success only - not allowed to fail
3240 */
3241static int ice_stop(struct net_device *netdev)
3242{
3243 struct ice_netdev_priv *np = netdev_priv(netdev);
3244 struct ice_vsi *vsi = np->vsi;
3245
3246 ice_vsi_close(vsi);
3247
3248 return 0;
3249}
3250
3251static const struct net_device_ops ice_netdev_ops = {
3252 .ndo_open = ice_open,
3253 .ndo_stop = ice_stop,
3254};
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 22039f9eb591..e50eebbf78e3 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -463,6 +463,18 @@ void ice_sched_cleanup_all(struct ice_hw *hw)
463} 463}
464 464
465/** 465/**
466 * ice_sched_get_qgrp_layer - get the current queue group layer number
467 * @hw: pointer to the hw struct
468 *
469 * This function returns the current queue group layer number
470 */
471static u8 ice_sched_get_qgrp_layer(struct ice_hw *hw)
472{
473 /* It's always total layers - 1, the array is 0 relative so -2 */
474 return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET;
475}
476
477/**
466 * ice_rm_dflt_leaf_node - remove the default leaf node in the tree 478 * ice_rm_dflt_leaf_node - remove the default leaf node in the tree
467 * @pi: port information structure 479 * @pi: port information structure
468 * 480 *
@@ -666,3 +678,96 @@ sched_query_out:
666 devm_kfree(ice_hw_to_dev(hw), buf); 678 devm_kfree(ice_hw_to_dev(hw), buf);
667 return status; 679 return status;
668} 680}
681
682/**
683 * ice_sched_get_vsi_info_entry - Get the vsi entry list for given vsi_id
684 * @pi: port information structure
685 * @vsi_id: vsi id
686 *
687 * This function retrieves the vsi list for the given vsi id
688 */
689static struct ice_sched_vsi_info *
690ice_sched_get_vsi_info_entry(struct ice_port_info *pi, u16 vsi_id)
691{
692 struct ice_sched_vsi_info *list_elem;
693
694 if (!pi)
695 return NULL;
696
697 list_for_each_entry(list_elem, &pi->vsi_info_list, list_entry)
698 if (list_elem->vsi_id == vsi_id)
699 return list_elem;
700 return NULL;
701}
702
703/**
704 * ice_sched_find_node_in_subtree - Find node in part of base node subtree
705 * @hw: pointer to the hw struct
706 * @base: pointer to the base node
707 * @node: pointer to the node to search
708 *
709 * This function checks whether a given node is part of the base node
710 * subtree or not
711 */
712static bool
713ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base,
714 struct ice_sched_node *node)
715{
716 u8 i;
717
718 for (i = 0; i < base->num_children; i++) {
719 struct ice_sched_node *child = base->children[i];
720
721 if (node == child)
722 return true;
723 if (child->tx_sched_layer > node->tx_sched_layer)
724 return false;
725 /* this recursion is intentional, and wouldn't
726 * go more than 8 calls
727 */
728 if (ice_sched_find_node_in_subtree(hw, child, node))
729 return true;
730 }
731 return false;
732}
733
734/**
735 * ice_sched_get_free_qparent - Get a free lan or rdma q group node
736 * @pi: port information structure
737 * @vsi_id: vsi id
738 * @tc: branch number
739 * @owner: lan or rdma
740 *
741 * This function retrieves a free lan or rdma q group node
742 */
743struct ice_sched_node *
744ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_id, u8 tc,
745 u8 owner)
746{
747 struct ice_sched_node *vsi_node, *qgrp_node = NULL;
748 struct ice_sched_vsi_info *list_elem;
749 u16 max_children;
750 u8 qgrp_layer;
751
752 qgrp_layer = ice_sched_get_qgrp_layer(pi->hw);
753 max_children = le16_to_cpu(pi->hw->layer_info[qgrp_layer].max_children);
754 list_elem = ice_sched_get_vsi_info_entry(pi, vsi_id);
755 if (!list_elem)
756 goto lan_q_exit;
757 vsi_node = list_elem->vsi_node[tc];
758 /* validate invalid VSI id */
759 if (!vsi_node)
760 goto lan_q_exit;
761 /* get the first q group node from VSI sub-tree */
762 qgrp_node = ice_sched_get_first_node(pi->hw, vsi_node, qgrp_layer);
763 while (qgrp_node) {
764 /* make sure the qgroup node is part of the VSI subtree */
765 if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
766 if (qgrp_node->num_children < max_children &&
767 qgrp_node->owner == owner)
768 break;
769 qgrp_node = qgrp_node->sibling;
770 }
771lan_q_exit:
772 return qgrp_node;
773}
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h
index 2926ee9c373e..639859a3d0f0 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.h
+++ b/drivers/net/ethernet/intel/ice/ice_sched.h
@@ -6,6 +6,8 @@
6 6
7#include "ice_common.h" 7#include "ice_common.h"
8 8
9#define ICE_QGRP_LAYER_OFFSET 2
10
9struct ice_sched_agg_vsi_info { 11struct ice_sched_agg_vsi_info {
10 struct list_head list_entry; 12 struct list_head list_entry;
11 DECLARE_BITMAP(tc_bitmap, ICE_MAX_TRAFFIC_CLASS); 13 DECLARE_BITMAP(tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
@@ -31,4 +33,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
31 struct ice_aqc_txsched_elem_data *info); 33 struct ice_aqc_txsched_elem_data *info);
32void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node); 34void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node);
33struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc); 35struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc);
36struct ice_sched_node *
37ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_id, u8 tc,
38 u8 owner);
34#endif /* _ICE_SCHED_H_ */ 39#endif /* _ICE_SCHED_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_status.h b/drivers/net/ethernet/intel/ice/ice_status.h
index 365dfb86dcb9..9a95c4ffd7d7 100644
--- a/drivers/net/ethernet/intel/ice/ice_status.h
+++ b/drivers/net/ethernet/intel/ice/ice_status.h
@@ -9,6 +9,7 @@ enum ice_status {
9 ICE_ERR_PARAM = -1, 9 ICE_ERR_PARAM = -1,
10 ICE_ERR_NOT_IMPL = -2, 10 ICE_ERR_NOT_IMPL = -2,
11 ICE_ERR_NOT_READY = -3, 11 ICE_ERR_NOT_READY = -3,
12 ICE_ERR_BAD_PTR = -5,
12 ICE_ERR_INVAL_SIZE = -6, 13 ICE_ERR_INVAL_SIZE = -6,
13 ICE_ERR_DEVICE_NOT_SUPPORTED = -8, 14 ICE_ERR_DEVICE_NOT_SUPPORTED = -8,
14 ICE_ERR_RESET_FAILED = -9, 15 ICE_ERR_RESET_FAILED = -9,
@@ -18,6 +19,7 @@ enum ice_status {
18 ICE_ERR_OUT_OF_RANGE = -13, 19 ICE_ERR_OUT_OF_RANGE = -13,
19 ICE_ERR_ALREADY_EXISTS = -14, 20 ICE_ERR_ALREADY_EXISTS = -14,
20 ICE_ERR_DOES_NOT_EXIST = -15, 21 ICE_ERR_DOES_NOT_EXIST = -15,
22 ICE_ERR_MAX_LIMIT = -17,
21 ICE_ERR_BUF_TOO_SHORT = -52, 23 ICE_ERR_BUF_TOO_SHORT = -52,
22 ICE_ERR_NVM_BLANK_MODE = -53, 24 ICE_ERR_NVM_BLANK_MODE = -53,
23 ICE_ERR_AQ_ERROR = -100, 25 ICE_ERR_AQ_ERROR = -100,
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
new file mode 100644
index 000000000000..6190ea30ee01
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -0,0 +1,361 @@
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Intel Corporation. */
3
4/* The driver transmit and receive code */
5
6#include <linux/prefetch.h>
7#include <linux/mm.h>
8#include "ice.h"
9
10/**
11 * ice_unmap_and_free_tx_buf - Release a Tx buffer
12 * @ring: the ring that owns the buffer
13 * @tx_buf: the buffer to free
14 */
15static void
16ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf)
17{
18 if (tx_buf->skb) {
19 dev_kfree_skb_any(tx_buf->skb);
20 if (dma_unmap_len(tx_buf, len))
21 dma_unmap_single(ring->dev,
22 dma_unmap_addr(tx_buf, dma),
23 dma_unmap_len(tx_buf, len),
24 DMA_TO_DEVICE);
25 } else if (dma_unmap_len(tx_buf, len)) {
26 dma_unmap_page(ring->dev,
27 dma_unmap_addr(tx_buf, dma),
28 dma_unmap_len(tx_buf, len),
29 DMA_TO_DEVICE);
30 }
31
32 tx_buf->next_to_watch = NULL;
33 tx_buf->skb = NULL;
34 dma_unmap_len_set(tx_buf, len, 0);
35 /* tx_buf must be completely set up in the transmit path */
36}
37
38static struct netdev_queue *txring_txq(const struct ice_ring *ring)
39{
40 return netdev_get_tx_queue(ring->netdev, ring->q_index);
41}
42
43/**
44 * ice_clean_tx_ring - Free any empty Tx buffers
45 * @tx_ring: ring to be cleaned
46 */
47void ice_clean_tx_ring(struct ice_ring *tx_ring)
48{
49 unsigned long size;
50 u16 i;
51
52 /* ring already cleared, nothing to do */
53 if (!tx_ring->tx_buf)
54 return;
55
56 /* Free all the Tx ring sk_bufss */
57 for (i = 0; i < tx_ring->count; i++)
58 ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);
59
60 size = sizeof(struct ice_tx_buf) * tx_ring->count;
61 memset(tx_ring->tx_buf, 0, size);
62
63 /* Zero out the descriptor ring */
64 memset(tx_ring->desc, 0, tx_ring->size);
65
66 tx_ring->next_to_use = 0;
67 tx_ring->next_to_clean = 0;
68
69 if (!tx_ring->netdev)
70 return;
71
72 /* cleanup Tx queue statistics */
73 netdev_tx_reset_queue(txring_txq(tx_ring));
74}
75
76/**
77 * ice_free_tx_ring - Free Tx resources per queue
78 * @tx_ring: Tx descriptor ring for a specific queue
79 *
80 * Free all transmit software resources
81 */
82void ice_free_tx_ring(struct ice_ring *tx_ring)
83{
84 ice_clean_tx_ring(tx_ring);
85 devm_kfree(tx_ring->dev, tx_ring->tx_buf);
86 tx_ring->tx_buf = NULL;
87
88 if (tx_ring->desc) {
89 dmam_free_coherent(tx_ring->dev, tx_ring->size,
90 tx_ring->desc, tx_ring->dma);
91 tx_ring->desc = NULL;
92 }
93}
94
95/**
96 * ice_setup_tx_ring - Allocate the Tx descriptors
97 * @tx_ring: the tx ring to set up
98 *
99 * Return 0 on success, negative on error
100 */
101int ice_setup_tx_ring(struct ice_ring *tx_ring)
102{
103 struct device *dev = tx_ring->dev;
104 int bi_size;
105
106 if (!dev)
107 return -ENOMEM;
108
109 /* warn if we are about to overwrite the pointer */
110 WARN_ON(tx_ring->tx_buf);
111 bi_size = sizeof(struct ice_tx_buf) * tx_ring->count;
112 tx_ring->tx_buf = devm_kzalloc(dev, bi_size, GFP_KERNEL);
113 if (!tx_ring->tx_buf)
114 return -ENOMEM;
115
116 /* round up to nearest 4K */
117 tx_ring->size = tx_ring->count * sizeof(struct ice_tx_desc);
118 tx_ring->size = ALIGN(tx_ring->size, 4096);
119 tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma,
120 GFP_KERNEL);
121 if (!tx_ring->desc) {
122 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
123 tx_ring->size);
124 goto err;
125 }
126
127 tx_ring->next_to_use = 0;
128 tx_ring->next_to_clean = 0;
129 return 0;
130
131err:
132 devm_kfree(dev, tx_ring->tx_buf);
133 tx_ring->tx_buf = NULL;
134 return -ENOMEM;
135}
136
137/**
138 * ice_clean_rx_ring - Free Rx buffers
139 * @rx_ring: ring to be cleaned
140 */
141void ice_clean_rx_ring(struct ice_ring *rx_ring)
142{
143 struct device *dev = rx_ring->dev;
144 unsigned long size;
145 u16 i;
146
147 /* ring already cleared, nothing to do */
148 if (!rx_ring->rx_buf)
149 return;
150
151 /* Free all the Rx ring sk_buffs */
152 for (i = 0; i < rx_ring->count; i++) {
153 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
154
155 if (rx_buf->skb) {
156 dev_kfree_skb(rx_buf->skb);
157 rx_buf->skb = NULL;
158 }
159 if (!rx_buf->page)
160 continue;
161
162 dma_unmap_page(dev, rx_buf->dma, PAGE_SIZE, DMA_FROM_DEVICE);
163 __free_pages(rx_buf->page, 0);
164
165 rx_buf->page = NULL;
166 rx_buf->page_offset = 0;
167 }
168
169 size = sizeof(struct ice_rx_buf) * rx_ring->count;
170 memset(rx_ring->rx_buf, 0, size);
171
172 /* Zero out the descriptor ring */
173 memset(rx_ring->desc, 0, rx_ring->size);
174
175 rx_ring->next_to_alloc = 0;
176 rx_ring->next_to_clean = 0;
177 rx_ring->next_to_use = 0;
178}
179
180/**
181 * ice_free_rx_ring - Free Rx resources
182 * @rx_ring: ring to clean the resources from
183 *
184 * Free all receive software resources
185 */
186void ice_free_rx_ring(struct ice_ring *rx_ring)
187{
188 ice_clean_rx_ring(rx_ring);
189 devm_kfree(rx_ring->dev, rx_ring->rx_buf);
190 rx_ring->rx_buf = NULL;
191
192 if (rx_ring->desc) {
193 dmam_free_coherent(rx_ring->dev, rx_ring->size,
194 rx_ring->desc, rx_ring->dma);
195 rx_ring->desc = NULL;
196 }
197}
198
199/**
200 * ice_setup_rx_ring - Allocate the Rx descriptors
201 * @rx_ring: the rx ring to set up
202 *
203 * Return 0 on success, negative on error
204 */
205int ice_setup_rx_ring(struct ice_ring *rx_ring)
206{
207 struct device *dev = rx_ring->dev;
208 int bi_size;
209
210 if (!dev)
211 return -ENOMEM;
212
213 /* warn if we are about to overwrite the pointer */
214 WARN_ON(rx_ring->rx_buf);
215 bi_size = sizeof(struct ice_rx_buf) * rx_ring->count;
216 rx_ring->rx_buf = devm_kzalloc(dev, bi_size, GFP_KERNEL);
217 if (!rx_ring->rx_buf)
218 return -ENOMEM;
219
220 /* round up to nearest 4K */
221 rx_ring->size = rx_ring->count * sizeof(union ice_32byte_rx_desc);
222 rx_ring->size = ALIGN(rx_ring->size, 4096);
223 rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma,
224 GFP_KERNEL);
225 if (!rx_ring->desc) {
226 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
227 rx_ring->size);
228 goto err;
229 }
230
231 rx_ring->next_to_use = 0;
232 rx_ring->next_to_clean = 0;
233 return 0;
234
235err:
236 devm_kfree(dev, rx_ring->rx_buf);
237 rx_ring->rx_buf = NULL;
238 return -ENOMEM;
239}
240
241/**
242 * ice_release_rx_desc - Store the new tail and head values
243 * @rx_ring: ring to bump
244 * @val: new head index
245 */
246static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
247{
248 rx_ring->next_to_use = val;
249
250 /* update next to alloc since we have filled the ring */
251 rx_ring->next_to_alloc = val;
252
253 /* Force memory writes to complete before letting h/w
254 * know there are new descriptors to fetch. (Only
255 * applicable for weak-ordered memory model archs,
256 * such as IA-64).
257 */
258 wmb();
259 writel(val, rx_ring->tail);
260}
261
262/**
263 * ice_alloc_mapped_page - recycle or make a new page
264 * @rx_ring: ring to use
265 * @bi: rx_buf struct to modify
266 *
267 * Returns true if the page was successfully allocated or
268 * reused.
269 */
270static bool ice_alloc_mapped_page(struct ice_ring *rx_ring,
271 struct ice_rx_buf *bi)
272{
273 struct page *page = bi->page;
274 dma_addr_t dma;
275
276 /* since we are recycling buffers we should seldom need to alloc */
277 if (likely(page))
278 return true;
279
280 /* alloc new page for storage */
281 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
282 if (unlikely(!page))
283 return false;
284
285 /* map page for use */
286 dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
287
288 /* if mapping failed free memory back to system since
289 * there isn't much point in holding memory we can't use
290 */
291 if (dma_mapping_error(rx_ring->dev, dma)) {
292 __free_pages(page, 0);
293 return false;
294 }
295
296 bi->dma = dma;
297 bi->page = page;
298 bi->page_offset = 0;
299
300 return true;
301}
302
303/**
304 * ice_alloc_rx_bufs - Replace used receive buffers
305 * @rx_ring: ring to place buffers on
306 * @cleaned_count: number of buffers to replace
307 *
308 * Returns false if all allocations were successful, true if any fail
309 */
310bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
311{
312 union ice_32b_rx_flex_desc *rx_desc;
313 u16 ntu = rx_ring->next_to_use;
314 struct ice_rx_buf *bi;
315
316 /* do nothing if no valid netdev defined */
317 if (!rx_ring->netdev || !cleaned_count)
318 return false;
319
320 /* get the RX descriptor and buffer based on next_to_use */
321 rx_desc = ICE_RX_DESC(rx_ring, ntu);
322 bi = &rx_ring->rx_buf[ntu];
323
324 do {
325 if (!ice_alloc_mapped_page(rx_ring, bi))
326 goto no_bufs;
327
328 /* Refresh the desc even if buffer_addrs didn't change
329 * because each write-back erases this info.
330 */
331 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
332
333 rx_desc++;
334 bi++;
335 ntu++;
336 if (unlikely(ntu == rx_ring->count)) {
337 rx_desc = ICE_RX_DESC(rx_ring, 0);
338 bi = rx_ring->rx_buf;
339 ntu = 0;
340 }
341
342 /* clear the status bits for the next_to_use descriptor */
343 rx_desc->wb.status_error0 = 0;
344
345 cleaned_count--;
346 } while (cleaned_count);
347
348 if (rx_ring->next_to_use != ntu)
349 ice_release_rx_desc(rx_ring, ntu);
350
351 return false;
352
353no_bufs:
354 if (rx_ring->next_to_use != ntu)
355 ice_release_rx_desc(rx_ring, ntu);
356
357 /* make sure to come back via polling to try again after
358 * allocation failure
359 */
360 return true;
361}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 2dd2232127a7..b7015cfad2d7 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -5,6 +5,30 @@
5#define _ICE_TXRX_H_ 5#define _ICE_TXRX_H_
6 6
7#define ICE_DFLT_IRQ_WORK 256 7#define ICE_DFLT_IRQ_WORK 256
8#define ICE_RXBUF_2048 2048
9#define ICE_MAX_CHAINED_RX_BUFS 5
10#define ICE_MAX_TXQ_PER_TXQG 128
11
12#define ICE_DESC_UNUSED(R) \
13 ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
14 (R)->next_to_clean - (R)->next_to_use - 1)
15
16struct ice_tx_buf {
17 struct ice_tx_desc *next_to_watch;
18 struct sk_buff *skb;
19 unsigned int bytecount;
20 unsigned short gso_segs;
21 u32 tx_flags;
22 DEFINE_DMA_UNMAP_ADDR(dma);
23 DEFINE_DMA_UNMAP_LEN(len);
24};
25
26struct ice_rx_buf {
27 struct sk_buff *skb;
28 dma_addr_t dma;
29 struct page *page;
30 unsigned int page_offset;
31};
8 32
9/* this enum matches hardware bits and is meant to be used by DYN_CTLN 33/* this enum matches hardware bits and is meant to be used by DYN_CTLN
10 * registers and QINT registers or more generally anywhere in the manual 34 * registers and QINT registers or more generally anywhere in the manual
@@ -18,33 +42,77 @@ enum ice_dyn_idx_t {
18 ICE_ITR_NONE = 3 /* ITR_NONE must not be used as an index */ 42 ICE_ITR_NONE = 3 /* ITR_NONE must not be used as an index */
19}; 43};
20 44
45/* Header split modes defined by DTYPE field of Rx RLAN context */
46enum ice_rx_dtype {
47 ICE_RX_DTYPE_NO_SPLIT = 0,
48 ICE_RX_DTYPE_HEADER_SPLIT = 1,
49 ICE_RX_DTYPE_SPLIT_ALWAYS = 2,
50};
51
21/* indices into GLINT_ITR registers */ 52/* indices into GLINT_ITR registers */
22#define ICE_RX_ITR ICE_IDX_ITR0 53#define ICE_RX_ITR ICE_IDX_ITR0
54#define ICE_TX_ITR ICE_IDX_ITR1
23#define ICE_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ 55#define ICE_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
24#define ICE_ITR_8K 0x003E 56#define ICE_ITR_8K 0x003E
25 57
26/* apply ITR HW granularity translation to program the HW registers */ 58/* apply ITR HW granularity translation to program the HW registers */
27#define ITR_TO_REG(val, itr_gran) (((val) & ~ICE_ITR_DYNAMIC) >> (itr_gran)) 59#define ITR_TO_REG(val, itr_gran) (((val) & ~ICE_ITR_DYNAMIC) >> (itr_gran))
28 60
61/* Legacy or Advanced Mode Queue */
62#define ICE_TX_ADVANCED 0
63#define ICE_TX_LEGACY 1
64
29/* descriptor ring, associated with a VSI */ 65/* descriptor ring, associated with a VSI */
30struct ice_ring { 66struct ice_ring {
31 struct ice_ring *next; /* pointer to next ring in q_vector */ 67 struct ice_ring *next; /* pointer to next ring in q_vector */
68 void *desc; /* Descriptor ring memory */
32 struct device *dev; /* Used for DMA mapping */ 69 struct device *dev; /* Used for DMA mapping */
33 struct net_device *netdev; /* netdev ring maps to */ 70 struct net_device *netdev; /* netdev ring maps to */
34 struct ice_vsi *vsi; /* Backreference to associated VSI */ 71 struct ice_vsi *vsi; /* Backreference to associated VSI */
35 struct ice_q_vector *q_vector; /* Backreference to associated vector */ 72 struct ice_q_vector *q_vector; /* Backreference to associated vector */
73 u8 __iomem *tail;
74 union {
75 struct ice_tx_buf *tx_buf;
76 struct ice_rx_buf *rx_buf;
77 };
36 u16 q_index; /* Queue number of ring */ 78 u16 q_index; /* Queue number of ring */
79 u32 txq_teid; /* Added Tx queue TEID */
80
81 /* high bit set means dynamic, use accessor routines to read/write.
82 * hardware supports 2us/1us resolution for the ITR registers.
83 * these values always store the USER setting, and must be converted
84 * before programming to a register.
85 */
86 u16 rx_itr_setting;
87 u16 tx_itr_setting;
88
37 u16 count; /* Number of descriptors */ 89 u16 count; /* Number of descriptors */
38 u16 reg_idx; /* HW register index of the ring */ 90 u16 reg_idx; /* HW register index of the ring */
91
92 /* used in interrupt processing */
93 u16 next_to_use;
94 u16 next_to_clean;
95
39 bool ring_active; /* is ring online or not */ 96 bool ring_active; /* is ring online or not */
97 unsigned int size; /* length of descriptor ring in bytes */
98 dma_addr_t dma; /* physical address of ring */
40 struct rcu_head rcu; /* to avoid race on free */ 99 struct rcu_head rcu; /* to avoid race on free */
100 u16 next_to_alloc;
41} ____cacheline_internodealigned_in_smp; 101} ____cacheline_internodealigned_in_smp;
42 102
103enum ice_latency_range {
104 ICE_LOWEST_LATENCY = 0,
105 ICE_LOW_LATENCY = 1,
106 ICE_BULK_LATENCY = 2,
107 ICE_ULTRA_LATENCY = 3,
108};
109
43struct ice_ring_container { 110struct ice_ring_container {
44 /* array of pointers to rings */ 111 /* array of pointers to rings */
45 struct ice_ring *ring; 112 struct ice_ring *ring;
46 unsigned int total_bytes; /* total bytes processed this int */ 113 unsigned int total_bytes; /* total bytes processed this int */
47 unsigned int total_pkts; /* total packets processed this int */ 114 unsigned int total_pkts; /* total packets processed this int */
115 enum ice_latency_range latency_range;
48 u16 itr; 116 u16 itr;
49}; 117};
50 118
@@ -52,4 +120,11 @@ struct ice_ring_container {
52#define ice_for_each_ring(pos, head) \ 120#define ice_for_each_ring(pos, head) \
53 for (pos = (head).ring; pos; pos = pos->next) 121 for (pos = (head).ring; pos; pos = pos->next)
54 122
123bool ice_alloc_rx_bufs(struct ice_ring *rxr, u16 cleaned_count);
124void ice_clean_tx_ring(struct ice_ring *tx_ring);
125void ice_clean_rx_ring(struct ice_ring *rx_ring);
126int ice_setup_tx_ring(struct ice_ring *tx_ring);
127int ice_setup_rx_ring(struct ice_ring *rx_ring);
128void ice_free_tx_ring(struct ice_ring *tx_ring);
129void ice_free_rx_ring(struct ice_ring *rx_ring);
55#endif /* _ICE_TXRX_H_ */ 130#endif /* _ICE_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 8926715b76ee..991ac56ca7b3 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -8,9 +8,11 @@
8#include "ice_hw_autogen.h" 8#include "ice_hw_autogen.h"
9#include "ice_osdep.h" 9#include "ice_osdep.h"
10#include "ice_controlq.h" 10#include "ice_controlq.h"
11#include "ice_lan_tx_rx.h"
11 12
12/* debug masks - set these bits in hw->debug_mask to control output */ 13/* debug masks - set these bits in hw->debug_mask to control output */
13#define ICE_DBG_INIT BIT_ULL(1) 14#define ICE_DBG_INIT BIT_ULL(1)
15#define ICE_DBG_QCTX BIT_ULL(6)
14#define ICE_DBG_NVM BIT_ULL(7) 16#define ICE_DBG_NVM BIT_ULL(7)
15#define ICE_DBG_LAN BIT_ULL(8) 17#define ICE_DBG_LAN BIT_ULL(8)
16#define ICE_DBG_SW BIT_ULL(13) 18#define ICE_DBG_SW BIT_ULL(13)