aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile4
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h209
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c231
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c340
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.h28
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c144
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h109
10 files changed, 1082 insertions, 1 deletions
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index b9a32ddecb17..bd2cbe14e76e 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -10,4 +10,6 @@ obj-$(CONFIG_ICE) += ice.o
10ice-y := ice_main.o \ 10ice-y := ice_main.o \
11 ice_controlq.o \ 11 ice_controlq.o \
12 ice_common.o \ 12 ice_common.o \
13 ice_nvm.o 13 ice_nvm.o \
14 ice_switch.o \
15 ice_sched.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 4c4f161768ed..716c555532f4 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -16,7 +16,9 @@
16#include <linux/bitmap.h> 16#include <linux/bitmap.h>
17#include "ice_devids.h" 17#include "ice_devids.h"
18#include "ice_type.h" 18#include "ice_type.h"
19#include "ice_switch.h"
19#include "ice_common.h" 20#include "ice_common.h"
21#include "ice_sched.h"
20 22
21#define ICE_BAR0 0 23#define ICE_BAR0 0
22#define ICE_AQ_LEN 64 24#define ICE_AQ_LEN 64
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 70fe00c90329..d1b70f0ed0e2 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -8,6 +8,8 @@
8 * descriptor format. It is shared between Firmware and Software. 8 * descriptor format. It is shared between Firmware and Software.
9 */ 9 */
10 10
11#define ICE_AQC_TOPO_MAX_LEVEL_NUM 0x9
12
11struct ice_aqc_generic { 13struct ice_aqc_generic {
12 __le32 param0; 14 __le32 param0;
13 __le32 param1; 15 __le32 param1;
@@ -68,6 +70,40 @@ struct ice_aqc_req_res {
68 u8 reserved[2]; 70 u8 reserved[2];
69}; 71};
70 72
73/* Get function capabilities (indirect 0x000A)
74 * Get device capabilities (indirect 0x000B)
75 */
76struct ice_aqc_list_caps {
77 u8 cmd_flags;
78 u8 pf_index;
79 u8 reserved[2];
80 __le32 count;
81 __le32 addr_high;
82 __le32 addr_low;
83};
84
85/* Device/Function buffer entry, repeated per reported capability */
86struct ice_aqc_list_caps_elem {
87 __le16 cap;
88#define ICE_AQC_CAPS_VSI 0x0017
89#define ICE_AQC_CAPS_RSS 0x0040
90#define ICE_AQC_CAPS_RXQS 0x0041
91#define ICE_AQC_CAPS_TXQS 0x0042
92#define ICE_AQC_CAPS_MSIX 0x0043
93#define ICE_AQC_CAPS_MAX_MTU 0x0047
94
95 u8 major_ver;
96 u8 minor_ver;
97 /* Number of resources described by this capability */
98 __le32 number;
99 /* Only meaningful for some types of resources */
100 __le32 logical_id;
101 /* Only meaningful for some types of resources */
102 __le32 phys_id;
103 __le64 rsvd1;
104 __le64 rsvd2;
105};
106
71/* Clear PXE Command and response (direct 0x0110) */ 107/* Clear PXE Command and response (direct 0x0110) */
72struct ice_aqc_clear_pxe { 108struct ice_aqc_clear_pxe {
73 u8 rx_cnt; 109 u8 rx_cnt;
@@ -75,6 +111,161 @@ struct ice_aqc_clear_pxe {
75 u8 reserved[15]; 111 u8 reserved[15];
76}; 112};
77 113
114/* Get switch configuration (0x0200) */
115struct ice_aqc_get_sw_cfg {
116 /* Reserved for command and copy of request flags for response */
117 __le16 flags;
118 /* First desc in case of command and next_elem in case of response
119 * In case of response, if it is not zero, means all the configuration
120 * was not returned and new command shall be sent with this value in
121 * the 'first desc' field
122 */
123 __le16 element;
124 /* Reserved for command, only used for response */
125 __le16 num_elems;
126 __le16 rsvd;
127 __le32 addr_high;
128 __le32 addr_low;
129};
130
131/* Each entry in the response buffer is of the following type: */
132struct ice_aqc_get_sw_cfg_resp_elem {
133 /* VSI/Port Number */
134 __le16 vsi_port_num;
135#define ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_S 0
136#define ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M \
137 (0x3FF << ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_S)
138#define ICE_AQC_GET_SW_CONF_RESP_TYPE_S 14
139#define ICE_AQC_GET_SW_CONF_RESP_TYPE_M (0x3 << ICE_AQC_GET_SW_CONF_RESP_TYPE_S)
140#define ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT 0
141#define ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT 1
142#define ICE_AQC_GET_SW_CONF_RESP_VSI 2
143
144 /* SWID VSI/Port belongs to */
145 __le16 swid;
146
147 /* Bit 14..0 : PF/VF number VSI belongs to
148 * Bit 15 : VF indication bit
149 */
150 __le16 pf_vf_num;
151#define ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_S 0
152#define ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M \
153 (0x7FFF << ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_S)
154#define ICE_AQC_GET_SW_CONF_RESP_IS_VF BIT(15)
155};
156
157/* The response buffer is as follows. Note that the length of the
158 * elements array varies with the length of the command response.
159 */
160struct ice_aqc_get_sw_cfg_resp {
161 struct ice_aqc_get_sw_cfg_resp_elem elements[1];
162};
163
164/* Add TSE (indirect 0x0401)
165 * Delete TSE (indirect 0x040F)
166 * Move TSE (indirect 0x0408)
167 */
168struct ice_aqc_add_move_delete_elem {
169 __le16 num_grps_req;
170 __le16 num_grps_updated;
171 __le32 reserved;
172 __le32 addr_high;
173 __le32 addr_low;
174};
175
176struct ice_aqc_elem_info_bw {
177 __le16 bw_profile_idx;
178 __le16 bw_alloc;
179};
180
181struct ice_aqc_txsched_elem {
182 u8 elem_type; /* Special field, reserved for some aq calls */
183#define ICE_AQC_ELEM_TYPE_UNDEFINED 0x0
184#define ICE_AQC_ELEM_TYPE_ROOT_PORT 0x1
185#define ICE_AQC_ELEM_TYPE_TC 0x2
186#define ICE_AQC_ELEM_TYPE_SE_GENERIC 0x3
187#define ICE_AQC_ELEM_TYPE_ENTRY_POINT 0x4
188#define ICE_AQC_ELEM_TYPE_LEAF 0x5
189#define ICE_AQC_ELEM_TYPE_SE_PADDED 0x6
190 u8 valid_sections;
191#define ICE_AQC_ELEM_VALID_GENERIC BIT(0)
192#define ICE_AQC_ELEM_VALID_CIR BIT(1)
193#define ICE_AQC_ELEM_VALID_EIR BIT(2)
194#define ICE_AQC_ELEM_VALID_SHARED BIT(3)
195 u8 generic;
196#define ICE_AQC_ELEM_GENERIC_MODE_M 0x1
197#define ICE_AQC_ELEM_GENERIC_PRIO_S 0x1
198#define ICE_AQC_ELEM_GENERIC_PRIO_M (0x7 << ICE_AQC_ELEM_GENERIC_PRIO_S)
199#define ICE_AQC_ELEM_GENERIC_SP_S 0x4
200#define ICE_AQC_ELEM_GENERIC_SP_M (0x1 << ICE_AQC_ELEM_GENERIC_SP_S)
201#define ICE_AQC_ELEM_GENERIC_ADJUST_VAL_S 0x5
202#define ICE_AQC_ELEM_GENERIC_ADJUST_VAL_M \
203 (0x3 << ICE_AQC_ELEM_GENERIC_ADJUST_VAL_S)
204 u8 flags; /* Special field, reserved for some aq calls */
205#define ICE_AQC_ELEM_FLAG_SUSPEND_M 0x1
206 struct ice_aqc_elem_info_bw cir_bw;
207 struct ice_aqc_elem_info_bw eir_bw;
208 __le16 srl_id;
209 __le16 reserved2;
210};
211
212struct ice_aqc_txsched_elem_data {
213 __le32 parent_teid;
214 __le32 node_teid;
215 struct ice_aqc_txsched_elem data;
216};
217
218struct ice_aqc_txsched_topo_grp_info_hdr {
219 __le32 parent_teid;
220 __le16 num_elems;
221 __le16 reserved2;
222};
223
224struct ice_aqc_delete_elem {
225 struct ice_aqc_txsched_topo_grp_info_hdr hdr;
226 __le32 teid[1];
227};
228
229/* Query Scheduler Resource Allocation (indirect 0x0412)
230 * This indirect command retrieves the scheduler resources allocated by
231 * EMP Firmware to the given PF.
232 */
233struct ice_aqc_query_txsched_res {
234 u8 reserved[8];
235 __le32 addr_high;
236 __le32 addr_low;
237};
238
239struct ice_aqc_generic_sched_props {
240 __le16 phys_levels;
241 __le16 logical_levels;
242 u8 flattening_bitmap;
243 u8 max_device_cgds;
244 u8 max_pf_cgds;
245 u8 rsvd0;
246 __le16 rdma_qsets;
247 u8 rsvd1[22];
248};
249
250struct ice_aqc_layer_props {
251 u8 logical_layer;
252 u8 chunk_size;
253 __le16 max_device_nodes;
254 __le16 max_pf_nodes;
255 u8 rsvd0[2];
256 __le16 max_shared_rate_lmtr;
257 __le16 max_children;
258 __le16 max_cir_rl_profiles;
259 __le16 max_eir_rl_profiles;
260 __le16 max_srl_profiles;
261 u8 rsvd1[14];
262};
263
264struct ice_aqc_query_txsched_res_resp {
265 struct ice_aqc_generic_sched_props sched_props;
266 struct ice_aqc_layer_props layer_props[ICE_AQC_TOPO_MAX_LEVEL_NUM];
267};
268
78/* NVM Read command (indirect 0x0701) 269/* NVM Read command (indirect 0x0701)
79 * NVM Erase commands (direct 0x0702) 270 * NVM Erase commands (direct 0x0702)
80 * NVM Update commands (indirect 0x0703) 271 * NVM Update commands (indirect 0x0703)
@@ -128,6 +319,10 @@ struct ice_aq_desc {
128 struct ice_aqc_q_shutdown q_shutdown; 319 struct ice_aqc_q_shutdown q_shutdown;
129 struct ice_aqc_req_res res_owner; 320 struct ice_aqc_req_res res_owner;
130 struct ice_aqc_clear_pxe clear_pxe; 321 struct ice_aqc_clear_pxe clear_pxe;
322 struct ice_aqc_list_caps get_cap;
323 struct ice_aqc_get_sw_cfg get_sw_conf;
324 struct ice_aqc_query_txsched_res query_sched_res;
325 struct ice_aqc_add_move_delete_elem add_move_delete_elem;
131 struct ice_aqc_nvm nvm; 326 struct ice_aqc_nvm nvm;
132 } params; 327 } params;
133}; 328};
@@ -136,16 +331,19 @@ struct ice_aq_desc {
136#define ICE_AQ_LG_BUF 512 331#define ICE_AQ_LG_BUF 512
137 332
138#define ICE_AQ_FLAG_LB_S 9 333#define ICE_AQ_FLAG_LB_S 9
334#define ICE_AQ_FLAG_RD_S 10
139#define ICE_AQ_FLAG_BUF_S 12 335#define ICE_AQ_FLAG_BUF_S 12
140#define ICE_AQ_FLAG_SI_S 13 336#define ICE_AQ_FLAG_SI_S 13
141 337
142#define ICE_AQ_FLAG_LB BIT(ICE_AQ_FLAG_LB_S) /* 0x200 */ 338#define ICE_AQ_FLAG_LB BIT(ICE_AQ_FLAG_LB_S) /* 0x200 */
339#define ICE_AQ_FLAG_RD BIT(ICE_AQ_FLAG_RD_S) /* 0x400 */
143#define ICE_AQ_FLAG_BUF BIT(ICE_AQ_FLAG_BUF_S) /* 0x1000 */ 340#define ICE_AQ_FLAG_BUF BIT(ICE_AQ_FLAG_BUF_S) /* 0x1000 */
144#define ICE_AQ_FLAG_SI BIT(ICE_AQ_FLAG_SI_S) /* 0x2000 */ 341#define ICE_AQ_FLAG_SI BIT(ICE_AQ_FLAG_SI_S) /* 0x2000 */
145 342
146/* error codes */ 343/* error codes */
147enum ice_aq_err { 344enum ice_aq_err {
148 ICE_AQ_RC_OK = 0, /* success */ 345 ICE_AQ_RC_OK = 0, /* success */
346 ICE_AQ_RC_ENOMEM = 9, /* Out of memory */
149 ICE_AQ_RC_EBUSY = 12, /* Device or resource busy */ 347 ICE_AQ_RC_EBUSY = 12, /* Device or resource busy */
150 ICE_AQ_RC_EEXIST = 13, /* object already exists */ 348 ICE_AQ_RC_EEXIST = 13, /* object already exists */
151}; 349};
@@ -160,11 +358,22 @@ enum ice_adminq_opc {
160 ice_aqc_opc_req_res = 0x0008, 358 ice_aqc_opc_req_res = 0x0008,
161 ice_aqc_opc_release_res = 0x0009, 359 ice_aqc_opc_release_res = 0x0009,
162 360
361 /* device/function capabilities */
362 ice_aqc_opc_list_func_caps = 0x000A,
363 ice_aqc_opc_list_dev_caps = 0x000B,
364
163 /* PXE */ 365 /* PXE */
164 ice_aqc_opc_clear_pxe_mode = 0x0110, 366 ice_aqc_opc_clear_pxe_mode = 0x0110,
165 367
368 /* internal switch commands */
369 ice_aqc_opc_get_sw_cfg = 0x0200,
370
166 ice_aqc_opc_clear_pf_cfg = 0x02A4, 371 ice_aqc_opc_clear_pf_cfg = 0x02A4,
167 372
373 /* transmit scheduler commands */
374 ice_aqc_opc_delete_sched_elems = 0x040F,
375 ice_aqc_opc_query_sched_res = 0x0412,
376
168 /* NVM commands */ 377 /* NVM commands */
169 ice_aqc_opc_nvm_read = 0x0701, 378 ice_aqc_opc_nvm_read = 0x0701,
170 379
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index d3d420c3ba7b..f9567dc1aefd 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -2,6 +2,7 @@
2/* Copyright (c) 2018, Intel Corporation. */ 2/* Copyright (c) 2018, Intel Corporation. */
3 3
4#include "ice_common.h" 4#include "ice_common.h"
5#include "ice_sched.h"
5#include "ice_adminq_cmd.h" 6#include "ice_adminq_cmd.h"
6 7
7#define ICE_PF_RESET_WAIT_COUNT 200 8#define ICE_PF_RESET_WAIT_COUNT 200
@@ -70,8 +71,37 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
70 if (status) 71 if (status)
71 goto err_unroll_cqinit; 72 goto err_unroll_cqinit;
72 73
74 status = ice_get_caps(hw);
75 if (status)
76 goto err_unroll_cqinit;
77
78 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
79 sizeof(*hw->port_info), GFP_KERNEL);
80 if (!hw->port_info) {
81 status = ICE_ERR_NO_MEMORY;
82 goto err_unroll_cqinit;
83 }
84
85 /* set the back pointer to hw */
86 hw->port_info->hw = hw;
87
88 /* Initialize port_info struct with switch configuration data */
89 status = ice_get_initial_sw_cfg(hw);
90 if (status)
91 goto err_unroll_alloc;
92
93 /* Query the allocated resources for tx scheduler */
94 status = ice_sched_query_res_alloc(hw);
95 if (status) {
96 ice_debug(hw, ICE_DBG_SCHED,
97 "Failed to get scheduler allocated resources\n");
98 goto err_unroll_alloc;
99 }
100
73 return 0; 101 return 0;
74 102
103err_unroll_alloc:
104 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
75err_unroll_cqinit: 105err_unroll_cqinit:
76 ice_shutdown_all_ctrlq(hw); 106 ice_shutdown_all_ctrlq(hw);
77 return status; 107 return status;
@@ -83,7 +113,12 @@ err_unroll_cqinit:
83 */ 113 */
84void ice_deinit_hw(struct ice_hw *hw) 114void ice_deinit_hw(struct ice_hw *hw)
85{ 115{
116 ice_sched_cleanup_all(hw);
86 ice_shutdown_all_ctrlq(hw); 117 ice_shutdown_all_ctrlq(hw);
118 if (hw->port_info) {
119 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
120 hw->port_info = NULL;
121 }
87} 122}
88 123
89/** 124/**
@@ -506,6 +541,202 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
506} 541}
507 542
508/** 543/**
544 * ice_parse_caps - parse function/device capabilities
545 * @hw: pointer to the hw struct
546 * @buf: pointer to a buffer containing function/device capability records
547 * @cap_count: number of capability records in the list
548 * @opc: type of capabilities list to parse
549 *
550 * Helper function to parse function(0x000a)/device(0x000b) capabilities list.
551 */
552static void
553ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
554 enum ice_adminq_opc opc)
555{
556 struct ice_aqc_list_caps_elem *cap_resp;
557 struct ice_hw_func_caps *func_p = NULL;
558 struct ice_hw_dev_caps *dev_p = NULL;
559 struct ice_hw_common_caps *caps;
560 u32 i;
561
562 if (!buf)
563 return;
564
565 cap_resp = (struct ice_aqc_list_caps_elem *)buf;
566
567 if (opc == ice_aqc_opc_list_dev_caps) {
568 dev_p = &hw->dev_caps;
569 caps = &dev_p->common_cap;
570 } else if (opc == ice_aqc_opc_list_func_caps) {
571 func_p = &hw->func_caps;
572 caps = &func_p->common_cap;
573 } else {
574 ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n");
575 return;
576 }
577
578 for (i = 0; caps && i < cap_count; i++, cap_resp++) {
579 u32 logical_id = le32_to_cpu(cap_resp->logical_id);
580 u32 phys_id = le32_to_cpu(cap_resp->phys_id);
581 u32 number = le32_to_cpu(cap_resp->number);
582 u16 cap = le16_to_cpu(cap_resp->cap);
583
584 switch (cap) {
585 case ICE_AQC_CAPS_VSI:
586 if (dev_p) {
587 dev_p->num_vsi_allocd_to_host = number;
588 ice_debug(hw, ICE_DBG_INIT,
589 "HW caps: Dev.VSI cnt = %d\n",
590 dev_p->num_vsi_allocd_to_host);
591 } else if (func_p) {
592 func_p->guaranteed_num_vsi = number;
593 ice_debug(hw, ICE_DBG_INIT,
594 "HW caps: Func.VSI cnt = %d\n",
595 func_p->guaranteed_num_vsi);
596 }
597 break;
598 case ICE_AQC_CAPS_RSS:
599 caps->rss_table_size = number;
600 caps->rss_table_entry_width = logical_id;
601 ice_debug(hw, ICE_DBG_INIT,
602 "HW caps: RSS table size = %d\n",
603 caps->rss_table_size);
604 ice_debug(hw, ICE_DBG_INIT,
605 "HW caps: RSS table width = %d\n",
606 caps->rss_table_entry_width);
607 break;
608 case ICE_AQC_CAPS_RXQS:
609 caps->num_rxq = number;
610 caps->rxq_first_id = phys_id;
611 ice_debug(hw, ICE_DBG_INIT,
612 "HW caps: Num Rx Qs = %d\n", caps->num_rxq);
613 ice_debug(hw, ICE_DBG_INIT,
614 "HW caps: Rx first queue ID = %d\n",
615 caps->rxq_first_id);
616 break;
617 case ICE_AQC_CAPS_TXQS:
618 caps->num_txq = number;
619 caps->txq_first_id = phys_id;
620 ice_debug(hw, ICE_DBG_INIT,
621 "HW caps: Num Tx Qs = %d\n", caps->num_txq);
622 ice_debug(hw, ICE_DBG_INIT,
623 "HW caps: Tx first queue ID = %d\n",
624 caps->txq_first_id);
625 break;
626 case ICE_AQC_CAPS_MSIX:
627 caps->num_msix_vectors = number;
628 caps->msix_vector_first_id = phys_id;
629 ice_debug(hw, ICE_DBG_INIT,
630 "HW caps: MSIX vector count = %d\n",
631 caps->num_msix_vectors);
632 ice_debug(hw, ICE_DBG_INIT,
633 "HW caps: MSIX first vector index = %d\n",
634 caps->msix_vector_first_id);
635 break;
636 case ICE_AQC_CAPS_MAX_MTU:
637 caps->max_mtu = number;
638 if (dev_p)
639 ice_debug(hw, ICE_DBG_INIT,
640 "HW caps: Dev.MaxMTU = %d\n",
641 caps->max_mtu);
642 else if (func_p)
643 ice_debug(hw, ICE_DBG_INIT,
644 "HW caps: func.MaxMTU = %d\n",
645 caps->max_mtu);
646 break;
647 default:
648 ice_debug(hw, ICE_DBG_INIT,
649 "HW caps: Unknown capability[%d]: 0x%x\n", i,
650 cap);
651 break;
652 }
653 }
654}
655
656/**
657 * ice_aq_discover_caps - query function/device capabilities
658 * @hw: pointer to the hw struct
659 * @buf: a virtual buffer to hold the capabilities
660 * @buf_size: Size of the virtual buffer
661 * @data_size: Size of the returned data, or buf size needed if AQ err==ENOMEM
662 * @opc: capabilities type to discover - pass in the command opcode
663 * @cd: pointer to command details structure or NULL
664 *
665 * Get the function(0x000a)/device(0x000b) capabilities description from
666 * the firmware.
667 */
668static enum ice_status
669ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u16 *data_size,
670 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
671{
672 struct ice_aqc_list_caps *cmd;
673 struct ice_aq_desc desc;
674 enum ice_status status;
675
676 cmd = &desc.params.get_cap;
677
678 if (opc != ice_aqc_opc_list_func_caps &&
679 opc != ice_aqc_opc_list_dev_caps)
680 return ICE_ERR_PARAM;
681
682 ice_fill_dflt_direct_cmd_desc(&desc, opc);
683
684 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
685 if (!status)
686 ice_parse_caps(hw, buf, le32_to_cpu(cmd->count), opc);
687 *data_size = le16_to_cpu(desc.datalen);
688
689 return status;
690}
691
692/**
693 * ice_get_caps - get info about the HW
694 * @hw: pointer to the hardware structure
695 */
696enum ice_status ice_get_caps(struct ice_hw *hw)
697{
698 enum ice_status status;
699 u16 data_size = 0;
700 u16 cbuf_len;
701 u8 retries;
702
703 /* The driver doesn't know how many capabilities the device will return
704 * so the buffer size required isn't known ahead of time. The driver
705 * starts with cbuf_len and if this turns out to be insufficient, the
706 * device returns ICE_AQ_RC_ENOMEM and also the buffer size it needs.
707 * The driver then allocates the buffer of this size and retries the
708 * operation. So it follows that the retry count is 2.
709 */
710#define ICE_GET_CAP_BUF_COUNT 40
711#define ICE_GET_CAP_RETRY_COUNT 2
712
713 cbuf_len = ICE_GET_CAP_BUF_COUNT *
714 sizeof(struct ice_aqc_list_caps_elem);
715
716 retries = ICE_GET_CAP_RETRY_COUNT;
717
718 do {
719 void *cbuf;
720
721 cbuf = devm_kzalloc(ice_hw_to_dev(hw), cbuf_len, GFP_KERNEL);
722 if (!cbuf)
723 return ICE_ERR_NO_MEMORY;
724
725 status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &data_size,
726 ice_aqc_opc_list_func_caps, NULL);
727 devm_kfree(ice_hw_to_dev(hw), cbuf);
728
729 if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM)
730 break;
731
732 /* If ENOMEM is returned, try again with bigger buffer */
733 cbuf_len = data_size;
734 } while (--retries);
735
736 return status;
737}
738
739/**
509 * ice_aq_clear_pxe_mode 740 * ice_aq_clear_pxe_mode
510 * @hw: pointer to the hw struct 741 * @hw: pointer to the hw struct
511 * 742 *
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index b1a7c5afe86b..87d873493bdd 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -6,6 +6,7 @@
6 6
7#include "ice.h" 7#include "ice.h"
8#include "ice_type.h" 8#include "ice_type.h"
9#include "ice_switch.h"
9 10
10void ice_debug_cq(struct ice_hw *hw, u32 mask, void *desc, void *buf, 11void ice_debug_cq(struct ice_hw *hw, u32 mask, void *desc, void *buf,
11 u16 buf_len); 12 u16 buf_len);
@@ -25,6 +26,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
25 struct ice_aq_desc *desc, void *buf, u16 buf_size, 26 struct ice_aq_desc *desc, void *buf, u16 buf_size,
26 struct ice_sq_cd *cd); 27 struct ice_sq_cd *cd);
27void ice_clear_pxe_mode(struct ice_hw *hw); 28void ice_clear_pxe_mode(struct ice_hw *hw);
29enum ice_status ice_get_caps(struct ice_hw *hw);
28bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq); 30bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq);
29enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading); 31enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading);
30void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode); 32void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode);
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
new file mode 100644
index 000000000000..ce4edf61ec8e
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -0,0 +1,340 @@
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Intel Corporation. */
3
4#include "ice_sched.h"
5
6/**
7 * ice_aq_delete_sched_elems - delete scheduler elements
8 * @hw: pointer to the hw struct
9 * @grps_req: number of groups to delete
10 * @buf: pointer to buffer
11 * @buf_size: buffer size in bytes
12 * @grps_del: returns total number of elements deleted
13 * @cd: pointer to command details structure or NULL
14 *
15 * Delete scheduling elements (0x040F)
16 */
17static enum ice_status
18ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,
19 struct ice_aqc_delete_elem *buf, u16 buf_size,
20 u16 *grps_del, struct ice_sq_cd *cd)
21{
22 struct ice_aqc_add_move_delete_elem *cmd;
23 struct ice_aq_desc desc;
24 enum ice_status status;
25
26 cmd = &desc.params.add_move_delete_elem;
27 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_delete_sched_elems);
28 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
29 cmd->num_grps_req = cpu_to_le16(grps_req);
30
31 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
32 if (!status && grps_del)
33 *grps_del = le16_to_cpu(cmd->num_grps_updated);
34
35 return status;
36}
37
38/**
39 * ice_sched_remove_elems - remove nodes from hw
40 * @hw: pointer to the hw struct
41 * @parent: pointer to the parent node
42 * @num_nodes: number of nodes
43 * @node_teids: array of node teids to be deleted
44 *
45 * This function remove nodes from hw
46 */
47static enum ice_status
48ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
49 u16 num_nodes, u32 *node_teids)
50{
51 struct ice_aqc_delete_elem *buf;
52 u16 i, num_groups_removed = 0;
53 enum ice_status status;
54 u16 buf_size;
55
56 buf_size = sizeof(*buf) + sizeof(u32) * (num_nodes - 1);
57 buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
58 if (!buf)
59 return ICE_ERR_NO_MEMORY;
60 buf->hdr.parent_teid = parent->info.node_teid;
61 buf->hdr.num_elems = cpu_to_le16(num_nodes);
62 for (i = 0; i < num_nodes; i++)
63 buf->teid[i] = cpu_to_le32(node_teids[i]);
64 status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size,
65 &num_groups_removed, NULL);
66 if (status || num_groups_removed != 1)
67 ice_debug(hw, ICE_DBG_SCHED, "remove elements failed\n");
68 devm_kfree(ice_hw_to_dev(hw), buf);
69 return status;
70}
71
72/**
73 * ice_sched_get_first_node - get the first node of the given layer
74 * @hw: pointer to the hw struct
75 * @parent: pointer the base node of the subtree
76 * @layer: layer number
77 *
78 * This function retrieves the first node of the given layer from the subtree
79 */
80static struct ice_sched_node *
81ice_sched_get_first_node(struct ice_hw *hw, struct ice_sched_node *parent,
82 u8 layer)
83{
84 u8 i;
85
86 if (layer < hw->sw_entry_point_layer)
87 return NULL;
88 for (i = 0; i < parent->num_children; i++) {
89 struct ice_sched_node *node = parent->children[i];
90
91 if (node) {
92 if (node->tx_sched_layer == layer)
93 return node;
94 /* this recursion is intentional, and wouldn't
95 * go more than 9 calls
96 */
97 return ice_sched_get_first_node(hw, node, layer);
98 }
99 }
100 return NULL;
101}
102
103/**
104 * ice_sched_get_tc_node - get pointer to TC node
105 * @pi: port information structure
106 * @tc: TC number
107 *
108 * This function returns the TC node pointer
109 */
110struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc)
111{
112 u8 i;
113
114 if (!pi)
115 return NULL;
116 for (i = 0; i < pi->root->num_children; i++)
117 if (pi->root->children[i]->tc_num == tc)
118 return pi->root->children[i];
119 return NULL;
120}
121
122/**
123 * ice_free_sched_node - Free a Tx scheduler node from SW DB
124 * @pi: port information structure
125 * @node: pointer to the ice_sched_node struct
126 *
127 * This function frees up a node from SW DB as well as from HW
128 *
129 * This function needs to be called with the port_info->sched_lock held
130 */
131void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
132{
133 struct ice_sched_node *parent;
134 struct ice_hw *hw = pi->hw;
135 u8 i, j;
136
137 /* Free the children before freeing up the parent node
138 * The parent array is updated below and that shifts the nodes
139 * in the array. So always pick the first child if num children > 0
140 */
141 while (node->num_children)
142 ice_free_sched_node(pi, node->children[0]);
143
144 /* Leaf, TC and root nodes can't be deleted by SW */
145 if (node->tx_sched_layer >= hw->sw_entry_point_layer &&
146 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&
147 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT &&
148 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) {
149 u32 teid = le32_to_cpu(node->info.node_teid);
150 enum ice_status status;
151
152 status = ice_sched_remove_elems(hw, node->parent, 1, &teid);
153 if (status)
154 ice_debug(hw, ICE_DBG_SCHED,
155 "remove element failed %d\n", status);
156 }
157 parent = node->parent;
158 /* root has no parent */
159 if (parent) {
160 struct ice_sched_node *p, *tc_node;
161
162 /* update the parent */
163 for (i = 0; i < parent->num_children; i++)
164 if (parent->children[i] == node) {
165 for (j = i + 1; j < parent->num_children; j++)
166 parent->children[j - 1] =
167 parent->children[j];
168 parent->num_children--;
169 break;
170 }
171
172 /* search for previous sibling that points to this node and
173 * remove the reference
174 */
175 tc_node = ice_sched_get_tc_node(pi, node->tc_num);
176 if (!tc_node) {
177 ice_debug(hw, ICE_DBG_SCHED,
178 "Invalid TC number %d\n", node->tc_num);
179 goto err_exit;
180 }
181 p = ice_sched_get_first_node(hw, tc_node, node->tx_sched_layer);
182 while (p) {
183 if (p->sibling == node) {
184 p->sibling = node->sibling;
185 break;
186 }
187 p = p->sibling;
188 }
189 }
190err_exit:
191 /* leaf nodes have no children */
192 if (node->children)
193 devm_kfree(ice_hw_to_dev(hw), node->children);
194 devm_kfree(ice_hw_to_dev(hw), node);
195}
196
197/**
198 * ice_aq_query_sched_res - query scheduler resource
199 * @hw: pointer to the hw struct
200 * @buf_size: buffer size in bytes
201 * @buf: pointer to buffer
202 * @cd: pointer to command details structure or NULL
203 *
204 * Query scheduler resource allocation (0x0412)
205 */
206static enum ice_status
207ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,
208 struct ice_aqc_query_txsched_res_resp *buf,
209 struct ice_sq_cd *cd)
210{
211 struct ice_aq_desc desc;
212
213 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_sched_res);
214 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
215}
216
217/**
218 * ice_sched_clear_tx_topo - clears the schduler tree nodes
219 * @pi: port information structure
220 *
221 * This function removes all the nodes from HW as well as from SW DB.
222 */
223static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
224{
225 struct ice_sched_agg_info *agg_info;
226 struct ice_sched_vsi_info *vsi_elem;
227 struct ice_sched_agg_info *atmp;
228 struct ice_sched_vsi_info *tmp;
229 struct ice_hw *hw;
230
231 if (!pi)
232 return;
233
234 hw = pi->hw;
235
236 list_for_each_entry_safe(agg_info, atmp, &pi->agg_list, list_entry) {
237 struct ice_sched_agg_vsi_info *agg_vsi_info;
238 struct ice_sched_agg_vsi_info *vtmp;
239
240 list_for_each_entry_safe(agg_vsi_info, vtmp,
241 &agg_info->agg_vsi_list, list_entry) {
242 list_del(&agg_vsi_info->list_entry);
243 devm_kfree(ice_hw_to_dev(hw), agg_vsi_info);
244 }
245 }
246
247 /* remove the vsi list */
248 list_for_each_entry_safe(vsi_elem, tmp, &pi->vsi_info_list,
249 list_entry) {
250 list_del(&vsi_elem->list_entry);
251 devm_kfree(ice_hw_to_dev(hw), vsi_elem);
252 }
253
254 if (pi->root) {
255 ice_free_sched_node(pi, pi->root);
256 pi->root = NULL;
257 }
258}
259
260/**
261 * ice_sched_clear_port - clear the scheduler elements from SW DB for a port
262 * @pi: port information structure
263 *
264 * Cleanup scheduling elements from SW DB
265 */
266static void ice_sched_clear_port(struct ice_port_info *pi)
267{
268 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
269 return;
270
271 pi->port_state = ICE_SCHED_PORT_STATE_INIT;
272 mutex_lock(&pi->sched_lock);
273 ice_sched_clear_tx_topo(pi);
274 mutex_unlock(&pi->sched_lock);
275 mutex_destroy(&pi->sched_lock);
276}
277
278/**
279 * ice_sched_cleanup_all - cleanup scheduler elements from SW DB for all ports
280 * @hw: pointer to the hw struct
281 *
282 * Cleanup scheduling elements from SW DB for all the ports
283 */
284void ice_sched_cleanup_all(struct ice_hw *hw)
285{
286 if (!hw || !hw->port_info)
287 return;
288
289 if (hw->layer_info)
290 devm_kfree(ice_hw_to_dev(hw), hw->layer_info);
291
292 ice_sched_clear_port(hw->port_info);
293
294 hw->num_tx_sched_layers = 0;
295 hw->num_tx_sched_phys_layers = 0;
296 hw->flattened_layers = 0;
297 hw->max_cgds = 0;
298}
299
300/**
301 * ice_sched_query_res_alloc - query the FW for num of logical sched layers
302 * @hw: pointer to the HW struct
303 *
304 * query FW for allocated scheduler resources and store in HW struct
305 */
306enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
307{
308 struct ice_aqc_query_txsched_res_resp *buf;
309 enum ice_status status = 0;
310
311 if (hw->layer_info)
312 return status;
313
314 buf = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*buf), GFP_KERNEL);
315 if (!buf)
316 return ICE_ERR_NO_MEMORY;
317
318 status = ice_aq_query_sched_res(hw, sizeof(*buf), buf, NULL);
319 if (status)
320 goto sched_query_out;
321
322 hw->num_tx_sched_layers = le16_to_cpu(buf->sched_props.logical_levels);
323 hw->num_tx_sched_phys_layers =
324 le16_to_cpu(buf->sched_props.phys_levels);
325 hw->flattened_layers = buf->sched_props.flattening_bitmap;
326 hw->max_cgds = buf->sched_props.max_pf_cgds;
327
328 hw->layer_info = devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props,
329 (hw->num_tx_sched_layers *
330 sizeof(*hw->layer_info)),
331 GFP_KERNEL);
332 if (!hw->layer_info) {
333 status = ICE_ERR_NO_MEMORY;
334 goto sched_query_out;
335 }
336
337sched_query_out:
338 devm_kfree(ice_hw_to_dev(hw), buf);
339 return status;
340}
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h
new file mode 100644
index 000000000000..e329f6ec6a0e
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_sched.h
@@ -0,0 +1,28 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (c) 2018, Intel Corporation. */
3
4#ifndef _ICE_SCHED_H_
5#define _ICE_SCHED_H_
6
7#include "ice_common.h"
8
9struct ice_sched_agg_vsi_info {
10 struct list_head list_entry;
11 DECLARE_BITMAP(tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
12 u16 vsi_id;
13};
14
15struct ice_sched_agg_info {
16 struct list_head agg_vsi_list;
17 struct list_head list_entry;
18 DECLARE_BITMAP(tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
19 u32 agg_id;
20 enum ice_agg_type agg_type;
21};
22
23/* FW AQ command calls */
24enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw);
25void ice_sched_cleanup_all(struct ice_hw *hw);
26void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node);
27struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc);
28#endif /* _ICE_SCHED_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
new file mode 100644
index 000000000000..8fc0579b0bbb
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -0,0 +1,144 @@
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Intel Corporation. */
3
4#include "ice_switch.h"
5
6/**
7 * ice_aq_get_sw_cfg - get switch configuration
8 * @hw: pointer to the hardware structure
9 * @buf: pointer to the result buffer
10 * @buf_size: length of the buffer available for response
11 * @req_desc: pointer to requested descriptor
12 * @num_elems: pointer to number of elements
13 * @cd: pointer to command details structure or NULL
14 *
15 * Get switch configuration (0x0200) to be placed in 'buff'.
16 * This admin command returns information such as initial VSI/port number
17 * and switch ID it belongs to.
18 *
19 * NOTE: *req_desc is both an input/output parameter.
20 * The caller of this function first calls this function with *request_desc set
21 * to 0. If the response from f/w has *req_desc set to 0, all the switch
22 * configuration information has been returned; if non-zero (meaning not all
23 * the information was returned), the caller should call this function again
24 * with *req_desc set to the previous value returned by f/w to get the
25 * next block of switch configuration information.
26 *
27 * *num_elems is output only parameter. This reflects the number of elements
28 * in response buffer. The caller of this function to use *num_elems while
29 * parsing the response buffer.
30 */
31static enum ice_status
32ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
33 u16 buf_size, u16 *req_desc, u16 *num_elems,
34 struct ice_sq_cd *cd)
35{
36 struct ice_aqc_get_sw_cfg *cmd;
37 enum ice_status status;
38 struct ice_aq_desc desc;
39
40 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
41 cmd = &desc.params.get_sw_conf;
42 cmd->element = cpu_to_le16(*req_desc);
43
44 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
45 if (!status) {
46 *req_desc = le16_to_cpu(cmd->element);
47 *num_elems = le16_to_cpu(cmd->num_elems);
48 }
49
50 return status;
51}
52
53/* ice_init_port_info - Initialize port_info with switch configuration data
54 * @pi: pointer to port_info
55 * @vsi_port_num: VSI number or port number
56 * @type: Type of switch element (port or VSI)
57 * @swid: switch ID of the switch the element is attached to
58 * @pf_vf_num: PF or VF number
59 * @is_vf: true if the element is a VF, false otherwise
60 */
61static void
62ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
63 u16 swid, u16 pf_vf_num, bool is_vf)
64{
65 switch (type) {
66 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
67 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
68 pi->sw_id = swid;
69 pi->pf_vf_num = pf_vf_num;
70 pi->is_vf = is_vf;
71 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
72 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
73 break;
74 default:
75 ice_debug(pi->hw, ICE_DBG_SW,
76 "incorrect VSI/port type received\n");
77 break;
78 }
79}
80
81/* ice_get_initial_sw_cfg - Get initial port and default VSI data
82 * @hw: pointer to the hardware structure
83 */
84enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
85{
86 struct ice_aqc_get_sw_cfg_resp *rbuf;
87 enum ice_status status;
88 u16 req_desc = 0;
89 u16 num_elems;
90 u16 i;
91
92 rbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_CFG_MAX_BUF_LEN,
93 GFP_KERNEL);
94
95 if (!rbuf)
96 return ICE_ERR_NO_MEMORY;
97
98 /* Multiple calls to ice_aq_get_sw_cfg may be required
99 * to get all the switch configuration information. The need
100 * for additional calls is indicated by ice_aq_get_sw_cfg
101 * writing a non-zero value in req_desc
102 */
103 do {
104 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
105 &req_desc, &num_elems, NULL);
106
107 if (status)
108 break;
109
110 for (i = 0; i < num_elems; i++) {
111 struct ice_aqc_get_sw_cfg_resp_elem *ele;
112 u16 pf_vf_num, swid, vsi_port_num;
113 bool is_vf = false;
114 u8 type;
115
116 ele = rbuf[i].elements;
117 vsi_port_num = le16_to_cpu(ele->vsi_port_num) &
118 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
119
120 pf_vf_num = le16_to_cpu(ele->pf_vf_num) &
121 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
122
123 swid = le16_to_cpu(ele->swid);
124
125 if (le16_to_cpu(ele->pf_vf_num) &
126 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
127 is_vf = true;
128
129 type = le16_to_cpu(ele->vsi_port_num) >>
130 ICE_AQC_GET_SW_CONF_RESP_TYPE_S;
131
132 if (type == ICE_AQC_GET_SW_CONF_RESP_VSI) {
133 /* FW VSI is not needed. Just continue. */
134 continue;
135 }
136
137 ice_init_port_info(hw->port_info, vsi_port_num,
138 type, swid, pf_vf_num, is_vf);
139 }
140 } while (req_desc && !status);
141
142 devm_kfree(ice_hw_to_dev(hw), (void *)rbuf);
143 return status;
144}
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
new file mode 100644
index 000000000000..b98cb978a129
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -0,0 +1,14 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (c) 2018, Intel Corporation. */
3
4#ifndef _ICE_SWITCH_H_
5#define _ICE_SWITCH_H_
6
7#include "ice_common.h"
8
9#define ICE_SW_CFG_MAX_BUF_LEN 2048
10#define ICE_DFLT_VSI_INVAL 0xff
11
12enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw);
13
14#endif /* _ICE_SWITCH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 31be38369a48..0b71634668bb 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -12,6 +12,8 @@
12/* debug masks - set these bits in hw->debug_mask to control output */ 12/* debug masks - set these bits in hw->debug_mask to control output */
13#define ICE_DBG_INIT BIT_ULL(1) 13#define ICE_DBG_INIT BIT_ULL(1)
14#define ICE_DBG_NVM BIT_ULL(7) 14#define ICE_DBG_NVM BIT_ULL(7)
15#define ICE_DBG_SW BIT_ULL(13)
16#define ICE_DBG_SCHED BIT_ULL(14)
15#define ICE_DBG_RES BIT_ULL(17) 17#define ICE_DBG_RES BIT_ULL(17)
16#define ICE_DBG_AQ_MSG BIT_ULL(24) 18#define ICE_DBG_AQ_MSG BIT_ULL(24)
17#define ICE_DBG_AQ_CMD BIT_ULL(27) 19#define ICE_DBG_AQ_CMD BIT_ULL(27)
@@ -34,6 +36,38 @@ enum ice_mac_type {
34 ICE_MAC_GENERIC, 36 ICE_MAC_GENERIC,
35}; 37};
36 38
39/* Common HW capabilities for SW use */
40struct ice_hw_common_caps {
41 /* TX/RX queues */
42 u16 num_rxq; /* Number/Total RX queues */
43 u16 rxq_first_id; /* First queue ID for RX queues */
44 u16 num_txq; /* Number/Total TX queues */
45 u16 txq_first_id; /* First queue ID for TX queues */
46
47 /* MSI-X vectors */
48 u16 num_msix_vectors;
49 u16 msix_vector_first_id;
50
51 /* Max MTU for function or device */
52 u16 max_mtu;
53
54 /* RSS related capabilities */
55 u16 rss_table_size; /* 512 for PFs and 64 for VFs */
56 u8 rss_table_entry_width; /* RSS Entry width in bits */
57};
58
59/* Function specific capabilities */
60struct ice_hw_func_caps {
61 struct ice_hw_common_caps common_cap;
62 u32 guaranteed_num_vsi;
63};
64
65/* Device wide capabilities */
66struct ice_hw_dev_caps {
67 struct ice_hw_common_caps common_cap;
68 u32 num_vsi_allocd_to_host; /* Excluding EMP VSI */
69};
70
37/* Various RESET request, These are not tied with HW reset types */ 71/* Various RESET request, These are not tied with HW reset types */
38enum ice_reset_req { 72enum ice_reset_req {
39 ICE_RESET_PFR = 0, 73 ICE_RESET_PFR = 0,
@@ -56,10 +90,76 @@ struct ice_nvm_info {
56 bool blank_nvm_mode; /* is NVM empty (no FW present) */ 90 bool blank_nvm_mode; /* is NVM empty (no FW present) */
57}; 91};
58 92
93/* Max number of port to queue branches w.r.t topology */
94#define ICE_MAX_TRAFFIC_CLASS 8
95
96struct ice_sched_node {
97 struct ice_sched_node *parent;
98 struct ice_sched_node *sibling; /* next sibling in the same layer */
99 struct ice_sched_node **children;
100 struct ice_aqc_txsched_elem_data info;
101 u32 agg_id; /* aggregator group id */
102 u16 vsi_id;
103 bool in_use; /* suspended or in use */
104 u8 tx_sched_layer; /* Logical Layer (1-9) */
105 u8 num_children;
106 u8 tc_num;
107 u8 owner;
108#define ICE_SCHED_NODE_OWNER_LAN 0
109};
110
111/* The aggregator type determines if identifier is for a VSI group,
112 * aggregator group, aggregator of queues, or queue group.
113 */
114enum ice_agg_type {
115 ICE_AGG_TYPE_UNKNOWN = 0,
116 ICE_AGG_TYPE_VSI,
117 ICE_AGG_TYPE_AGG, /* aggregator */
118 ICE_AGG_TYPE_Q,
119 ICE_AGG_TYPE_QG
120};
121
122/* vsi type list entry to locate corresponding vsi/ag nodes */
123struct ice_sched_vsi_info {
124 struct ice_sched_node *vsi_node[ICE_MAX_TRAFFIC_CLASS];
125 struct ice_sched_node *ag_node[ICE_MAX_TRAFFIC_CLASS];
126 struct list_head list_entry;
127 u16 max_lanq[ICE_MAX_TRAFFIC_CLASS];
128 u16 vsi_id;
129};
130
131/* driver defines the policy */
132struct ice_sched_tx_policy {
133 u16 max_num_vsis;
134 u8 max_num_lan_qs_per_tc[ICE_MAX_TRAFFIC_CLASS];
135 bool rdma_ena;
136};
137
138struct ice_port_info {
139 struct ice_sched_node *root; /* Root Node per Port */
140 struct ice_hw *hw; /* back pointer to hw instance */
141 u16 sw_id; /* Initial switch ID belongs to port */
142 u16 pf_vf_num;
143 u8 port_state;
144#define ICE_SCHED_PORT_STATE_INIT 0x0
145#define ICE_SCHED_PORT_STATE_READY 0x1
146 u16 dflt_tx_vsi_num;
147 u16 dflt_rx_vsi_num;
148 struct mutex sched_lock; /* protect access to TXSched tree */
149 struct ice_sched_tx_policy sched_policy;
150 struct list_head vsi_info_list;
151 struct list_head agg_list; /* lists all aggregator */
152 u8 lport;
153#define ICE_LPORT_MASK 0xff
154 bool is_vf;
155};
156
59/* Port hardware description */ 157/* Port hardware description */
60struct ice_hw { 158struct ice_hw {
61 u8 __iomem *hw_addr; 159 u8 __iomem *hw_addr;
62 void *back; 160 void *back;
161 struct ice_aqc_layer_props *layer_info;
162 struct ice_port_info *port_info;
63 u64 debug_mask; /* bitmap for debug mask */ 163 u64 debug_mask; /* bitmap for debug mask */
64 enum ice_mac_type mac_type; 164 enum ice_mac_type mac_type;
65 165
@@ -72,8 +172,17 @@ struct ice_hw {
72 172
73 u8 pf_id; /* device profile info */ 173 u8 pf_id; /* device profile info */
74 174
175 /* TX Scheduler values */
176 u16 num_tx_sched_layers;
177 u16 num_tx_sched_phys_layers;
178 u8 flattened_layers;
179 u8 max_cgds;
180 u8 sw_entry_point_layer;
181
75 struct ice_bus_info bus; 182 struct ice_bus_info bus;
76 struct ice_nvm_info nvm; 183 struct ice_nvm_info nvm;
184 struct ice_hw_dev_caps dev_caps; /* device capabilities */
185 struct ice_hw_func_caps func_caps; /* function capabilities */
77 186
78 /* Control Queue info */ 187 /* Control Queue info */
79 struct ice_ctl_q_info adminq; 188 struct ice_ctl_q_info adminq;