aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile1
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h34
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c78
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c46
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h69
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c282
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c150
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c127
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.h34
-rw-r--r--drivers/net/ethernet/intel/ice/ice_status.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c66
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c2668
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h173
21 files changed, 3699 insertions, 108 deletions
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 45125bd074d9..e5d6f684437e 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -16,3 +16,4 @@ ice-y := ice_main.o \
16 ice_lib.o \ 16 ice_lib.o \
17 ice_txrx.o \ 17 ice_txrx.o \
18 ice_ethtool.o 18 ice_ethtool.o
19ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 0b269c470343..4c4b5717a627 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -28,6 +28,7 @@
28#include <linux/ip.h> 28#include <linux/ip.h>
29#include <linux/ipv6.h> 29#include <linux/ipv6.h>
30#include <linux/if_bridge.h> 30#include <linux/if_bridge.h>
31#include <linux/avf/virtchnl.h>
31#include <net/ipv6.h> 32#include <net/ipv6.h>
32#include "ice_devids.h" 33#include "ice_devids.h"
33#include "ice_type.h" 34#include "ice_type.h"
@@ -35,6 +36,8 @@
35#include "ice_switch.h" 36#include "ice_switch.h"
36#include "ice_common.h" 37#include "ice_common.h"
37#include "ice_sched.h" 38#include "ice_sched.h"
39#include "ice_virtchnl_pf.h"
40#include "ice_sriov.h"
38 41
39extern const char ice_drv_ver[]; 42extern const char ice_drv_ver[];
40#define ICE_BAR0 0 43#define ICE_BAR0 0
@@ -46,6 +49,7 @@ extern const char ice_drv_ver[];
46#define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16) 49#define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16)
47#define ICE_ETHTOOL_FWVER_LEN 32 50#define ICE_ETHTOOL_FWVER_LEN 32
48#define ICE_AQ_LEN 64 51#define ICE_AQ_LEN 64
52#define ICE_MBXQ_LEN 64
49#define ICE_MIN_MSIX 2 53#define ICE_MIN_MSIX 2
50#define ICE_NO_VSI 0xffff 54#define ICE_NO_VSI 0xffff
51#define ICE_MAX_VSI_ALLOC 130 55#define ICE_MAX_VSI_ALLOC 130
@@ -63,6 +67,14 @@ extern const char ice_drv_ver[];
63#define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1) 67#define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1)
64#define ICE_INVAL_Q_INDEX 0xffff 68#define ICE_INVAL_Q_INDEX 0xffff
65#define ICE_INVAL_VFID 256 69#define ICE_INVAL_VFID 256
70#define ICE_MAX_VF_COUNT 256
71#define ICE_MAX_QS_PER_VF 256
72#define ICE_MIN_QS_PER_VF 1
73#define ICE_DFLT_QS_PER_VF 4
74#define ICE_MAX_BASE_QS_PER_VF 16
75#define ICE_MAX_INTR_PER_VF 65
76#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
77#define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1)
66 78
67#define ICE_VSIQF_HKEY_ARRAY_SIZE ((VSIQF_HKEY_MAX_INDEX + 1) * 4) 79#define ICE_VSIQF_HKEY_ARRAY_SIZE ((VSIQF_HKEY_MAX_INDEX + 1) * 4)
68 80
@@ -133,9 +145,21 @@ enum ice_state {
133 __ICE_EMPR_RECV, /* set by OICR handler */ 145 __ICE_EMPR_RECV, /* set by OICR handler */
134 __ICE_SUSPENDED, /* set on module remove path */ 146 __ICE_SUSPENDED, /* set on module remove path */
135 __ICE_RESET_FAILED, /* set by reset/rebuild */ 147 __ICE_RESET_FAILED, /* set by reset/rebuild */
148 /* When checking for the PF to be in a nominal operating state, the
149 * bits that are grouped at the beginning of the list need to be
150 * checked. Bits occurring before __ICE_STATE_NOMINAL_CHECK_BITS will
151 * be checked. If you need to add a bit into consideration for nominal
152 * operating state, it must be added before
153 * __ICE_STATE_NOMINAL_CHECK_BITS. Do not move this entry's position
154 * without appropriate consideration.
155 */
156 __ICE_STATE_NOMINAL_CHECK_BITS,
136 __ICE_ADMINQ_EVENT_PENDING, 157 __ICE_ADMINQ_EVENT_PENDING,
158 __ICE_MAILBOXQ_EVENT_PENDING,
137 __ICE_MDD_EVENT_PENDING, 159 __ICE_MDD_EVENT_PENDING,
160 __ICE_VFLR_EVENT_PENDING,
138 __ICE_FLTR_OVERFLOW_PROMISC, 161 __ICE_FLTR_OVERFLOW_PROMISC,
162 __ICE_VF_DIS,
139 __ICE_CFG_BUSY, 163 __ICE_CFG_BUSY,
140 __ICE_SERVICE_SCHED, 164 __ICE_SERVICE_SCHED,
141 __ICE_SERVICE_DIS, 165 __ICE_SERVICE_DIS,
@@ -181,6 +205,8 @@ struct ice_vsi {
181 /* Interrupt thresholds */ 205 /* Interrupt thresholds */
182 u16 work_lmt; 206 u16 work_lmt;
183 207
208 s16 vf_id; /* VF ID for SR-IOV VSIs */
209
184 /* RSS config */ 210 /* RSS config */
185 u16 rss_table_size; /* HW RSS table size */ 211 u16 rss_table_size; /* HW RSS table size */
186 u16 rss_size; /* Allocated RSS queues */ 212 u16 rss_size; /* Allocated RSS queues */
@@ -240,6 +266,8 @@ enum ice_pf_flags {
240 ICE_FLAG_MSIX_ENA, 266 ICE_FLAG_MSIX_ENA,
241 ICE_FLAG_FLTR_SYNC, 267 ICE_FLAG_FLTR_SYNC,
242 ICE_FLAG_RSS_ENA, 268 ICE_FLAG_RSS_ENA,
269 ICE_FLAG_SRIOV_ENA,
270 ICE_FLAG_SRIOV_CAPABLE,
243 ICE_PF_FLAGS_NBITS /* must be last */ 271 ICE_PF_FLAGS_NBITS /* must be last */
244}; 272};
245 273
@@ -255,6 +283,12 @@ struct ice_pf {
255 283
256 struct ice_vsi **vsi; /* VSIs created by the driver */ 284 struct ice_vsi **vsi; /* VSIs created by the driver */
257 struct ice_sw *first_sw; /* first switch created by firmware */ 285 struct ice_sw *first_sw; /* first switch created by firmware */
286 /* Virtchnl/SR-IOV config info */
287 struct ice_vf *vf;
288 int num_alloc_vfs; /* actual number of VFs allocated */
289 u16 num_vfs_supported; /* num VFs supported for this PF */
290 u16 num_vf_qps; /* num queue pairs per VF */
291 u16 num_vf_msix; /* num vectors per VF */
258 DECLARE_BITMAP(state, __ICE_STATE_NBITS); 292 DECLARE_BITMAP(state, __ICE_STATE_NBITS);
259 DECLARE_BITMAP(avail_txqs, ICE_MAX_TXQS); 293 DECLARE_BITMAP(avail_txqs, ICE_MAX_TXQS);
260 DECLARE_BITMAP(avail_rxqs, ICE_MAX_RXQS); 294 DECLARE_BITMAP(avail_rxqs, ICE_MAX_RXQS);
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index c100b4bda195..6653555f55dd 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -87,6 +87,8 @@ struct ice_aqc_list_caps {
87/* Device/Function buffer entry, repeated per reported capability */ 87/* Device/Function buffer entry, repeated per reported capability */
88struct ice_aqc_list_caps_elem { 88struct ice_aqc_list_caps_elem {
89 __le16 cap; 89 __le16 cap;
90#define ICE_AQC_CAPS_SRIOV 0x0012
91#define ICE_AQC_CAPS_VF 0x0013
90#define ICE_AQC_CAPS_VSI 0x0017 92#define ICE_AQC_CAPS_VSI 0x0017
91#define ICE_AQC_CAPS_RSS 0x0040 93#define ICE_AQC_CAPS_RSS 0x0040
92#define ICE_AQC_CAPS_RXQS 0x0041 94#define ICE_AQC_CAPS_RXQS 0x0041
@@ -1075,6 +1077,19 @@ struct ice_aqc_nvm {
1075 __le32 addr_low; 1077 __le32 addr_low;
1076}; 1078};
1077 1079
1080/**
1081 * Send to PF command (indirect 0x0801) id is only used by PF
1082 *
1083 * Send to VF command (indirect 0x0802) id is only used by PF
1084 *
1085 */
1086struct ice_aqc_pf_vf_msg {
1087 __le32 id;
1088 u32 reserved;
1089 __le32 addr_high;
1090 __le32 addr_low;
1091};
1092
1078/* Get/Set RSS key (indirect 0x0B04/0x0B02) */ 1093/* Get/Set RSS key (indirect 0x0B04/0x0B02) */
1079struct ice_aqc_get_set_rss_key { 1094struct ice_aqc_get_set_rss_key {
1080#define ICE_AQC_GSET_RSS_KEY_VSI_VALID BIT(15) 1095#define ICE_AQC_GSET_RSS_KEY_VSI_VALID BIT(15)
@@ -1332,6 +1347,7 @@ struct ice_aq_desc {
1332 struct ice_aqc_query_txsched_res query_sched_res; 1347 struct ice_aqc_query_txsched_res query_sched_res;
1333 struct ice_aqc_add_move_delete_elem add_move_delete_elem; 1348 struct ice_aqc_add_move_delete_elem add_move_delete_elem;
1334 struct ice_aqc_nvm nvm; 1349 struct ice_aqc_nvm nvm;
1350 struct ice_aqc_pf_vf_msg virt;
1335 struct ice_aqc_get_set_rss_lut get_set_rss_lut; 1351 struct ice_aqc_get_set_rss_lut get_set_rss_lut;
1336 struct ice_aqc_get_set_rss_key get_set_rss_key; 1352 struct ice_aqc_get_set_rss_key get_set_rss_key;
1337 struct ice_aqc_add_txqs add_txqs; 1353 struct ice_aqc_add_txqs add_txqs;
@@ -1429,6 +1445,10 @@ enum ice_adminq_opc {
1429 /* NVM commands */ 1445 /* NVM commands */
1430 ice_aqc_opc_nvm_read = 0x0701, 1446 ice_aqc_opc_nvm_read = 0x0701,
1431 1447
1448 /* PF/VF mailbox commands */
1449 ice_mbx_opc_send_msg_to_pf = 0x0801,
1450 ice_mbx_opc_send_msg_to_vf = 0x0802,
1451
1432 /* RSS commands */ 1452 /* RSS commands */
1433 ice_aqc_opc_set_rss_key = 0x0B02, 1453 ice_aqc_opc_set_rss_key = 0x0B02,
1434 ice_aqc_opc_set_rss_lut = 0x0B03, 1454 ice_aqc_opc_set_rss_lut = 0x0B03,
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 68fbbb92d504..c52f450f2c0d 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -1406,6 +1406,28 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
1406 u16 cap = le16_to_cpu(cap_resp->cap); 1406 u16 cap = le16_to_cpu(cap_resp->cap);
1407 1407
1408 switch (cap) { 1408 switch (cap) {
1409 case ICE_AQC_CAPS_SRIOV:
1410 caps->sr_iov_1_1 = (number == 1);
1411 ice_debug(hw, ICE_DBG_INIT,
1412 "HW caps: SR-IOV = %d\n", caps->sr_iov_1_1);
1413 break;
1414 case ICE_AQC_CAPS_VF:
1415 if (dev_p) {
1416 dev_p->num_vfs_exposed = number;
1417 ice_debug(hw, ICE_DBG_INIT,
1418 "HW caps: VFs exposed = %d\n",
1419 dev_p->num_vfs_exposed);
1420 } else if (func_p) {
1421 func_p->num_allocd_vfs = number;
1422 func_p->vf_base_id = logical_id;
1423 ice_debug(hw, ICE_DBG_INIT,
1424 "HW caps: VFs allocated = %d\n",
1425 func_p->num_allocd_vfs);
1426 ice_debug(hw, ICE_DBG_INIT,
1427 "HW caps: VF base_id = %d\n",
1428 func_p->vf_base_id);
1429 }
1430 break;
1409 case ICE_AQC_CAPS_VSI: 1431 case ICE_AQC_CAPS_VSI:
1410 if (dev_p) { 1432 if (dev_p) {
1411 dev_p->num_vsi_allocd_to_host = number; 1433 dev_p->num_vsi_allocd_to_host = number;
@@ -2265,6 +2287,8 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
2265 * @num_qgrps: number of groups in the list 2287 * @num_qgrps: number of groups in the list
2266 * @qg_list: the list of groups to disable 2288 * @qg_list: the list of groups to disable
2267 * @buf_size: the total size of the qg_list buffer in bytes 2289 * @buf_size: the total size of the qg_list buffer in bytes
2290 * @rst_src: if called due to reset, specifies the RST source
2291 * @vmvf_num: the relative VM or VF number that is undergoing the reset
2268 * @cd: pointer to command details structure or NULL 2292 * @cd: pointer to command details structure or NULL
2269 * 2293 *
2270 * Disable LAN Tx queue (0x0C31) 2294 * Disable LAN Tx queue (0x0C31)
@@ -2272,6 +2296,7 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
2272static enum ice_status 2296static enum ice_status
2273ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, 2297ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
2274 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size, 2298 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
2299 enum ice_disq_rst_src rst_src, u16 vmvf_num,
2275 struct ice_sq_cd *cd) 2300 struct ice_sq_cd *cd)
2276{ 2301{
2277 struct ice_aqc_dis_txqs *cmd; 2302 struct ice_aqc_dis_txqs *cmd;
@@ -2281,14 +2306,45 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
2281 cmd = &desc.params.dis_txqs; 2306 cmd = &desc.params.dis_txqs;
2282 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs); 2307 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
2283 2308
2284 if (!qg_list) 2309 /* qg_list can be NULL only in VM/VF reset flow */
2310 if (!qg_list && !rst_src)
2285 return ICE_ERR_PARAM; 2311 return ICE_ERR_PARAM;
2286 2312
2287 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 2313 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
2288 return ICE_ERR_PARAM; 2314 return ICE_ERR_PARAM;
2289 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 2315
2290 cmd->num_entries = num_qgrps; 2316 cmd->num_entries = num_qgrps;
2291 2317
2318 cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
2319 ICE_AQC_Q_DIS_TIMEOUT_M);
2320
2321 switch (rst_src) {
2322 case ICE_VM_RESET:
2323 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
2324 cmd->vmvf_and_timeout |=
2325 cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
2326 break;
2327 case ICE_VF_RESET:
2328 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
2329 /* In this case, FW expects vmvf_num to be absolute VF id */
2330 cmd->vmvf_and_timeout |=
2331 cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) &
2332 ICE_AQC_Q_DIS_VMVF_NUM_M);
2333 break;
2334 case ICE_NO_RESET:
2335 default:
2336 break;
2337 }
2338
2339 /* If no queue group info, we are in a reset flow. Issue the AQ */
2340 if (!qg_list)
2341 goto do_aq;
2342
2343 /* set RD bit to indicate that command buffer is provided by the driver
2344 * and it needs to be read by the firmware
2345 */
2346 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2347
2292 for (i = 0; i < num_qgrps; ++i) { 2348 for (i = 0; i < num_qgrps; ++i) {
2293 /* Calculate the size taken up by the queue IDs in this group */ 2349 /* Calculate the size taken up by the queue IDs in this group */
2294 sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id); 2350 sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id);
@@ -2304,6 +2360,7 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
2304 if (buf_size != sz) 2360 if (buf_size != sz)
2305 return ICE_ERR_PARAM; 2361 return ICE_ERR_PARAM;
2306 2362
2363do_aq:
2307 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 2364 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
2308} 2365}
2309 2366
@@ -2610,13 +2667,16 @@ ena_txq_exit:
2610 * @num_queues: number of queues 2667 * @num_queues: number of queues
2611 * @q_ids: pointer to the q_id array 2668 * @q_ids: pointer to the q_id array
2612 * @q_teids: pointer to queue node teids 2669 * @q_teids: pointer to queue node teids
2670 * @rst_src: if called due to reset, specifies the RST source
2671 * @vmvf_num: the relative VM or VF number that is undergoing the reset
2613 * @cd: pointer to command details structure or NULL 2672 * @cd: pointer to command details structure or NULL
2614 * 2673 *
2615 * This function removes queues and their corresponding nodes in SW DB 2674 * This function removes queues and their corresponding nodes in SW DB
2616 */ 2675 */
2617enum ice_status 2676enum ice_status
2618ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids, 2677ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
2619 u32 *q_teids, struct ice_sq_cd *cd) 2678 u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num,
2679 struct ice_sq_cd *cd)
2620{ 2680{
2621 enum ice_status status = ICE_ERR_DOES_NOT_EXIST; 2681 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
2622 struct ice_aqc_dis_txq_item qg_list; 2682 struct ice_aqc_dis_txq_item qg_list;
@@ -2625,6 +2685,15 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
2625 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 2685 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
2626 return ICE_ERR_CFG; 2686 return ICE_ERR_CFG;
2627 2687
2688 /* if queue is disabled already yet the disable queue command has to be
2689 * sent to complete the VF reset, then call ice_aq_dis_lan_txq without
2690 * any queue information
2691 */
2692
2693 if (!num_queues && rst_src)
2694 return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src, vmvf_num,
2695 NULL);
2696
2628 mutex_lock(&pi->sched_lock); 2697 mutex_lock(&pi->sched_lock);
2629 2698
2630 for (i = 0; i < num_queues; i++) { 2699 for (i = 0; i < num_queues; i++) {
@@ -2637,7 +2706,8 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
2637 qg_list.num_qs = 1; 2706 qg_list.num_qs = 1;
2638 qg_list.q_id[0] = cpu_to_le16(q_ids[i]); 2707 qg_list.q_id[0] = cpu_to_le16(q_ids[i]);
2639 status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list, 2708 status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list,
2640 sizeof(qg_list), cd); 2709 sizeof(qg_list), rst_src, vmvf_num,
2710 cd);
2641 2711
2642 if (status) 2712 if (status)
2643 break; 2713 break;
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 7b2a5bb2e550..1900681289a4 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -7,6 +7,7 @@
7#include "ice.h" 7#include "ice.h"
8#include "ice_type.h" 8#include "ice_type.h"
9#include "ice_switch.h" 9#include "ice_switch.h"
10#include <linux/avf/virtchnl.h>
10 11
11void ice_debug_cq(struct ice_hw *hw, u32 mask, void *desc, void *buf, 12void ice_debug_cq(struct ice_hw *hw, u32 mask, void *desc, void *buf,
12 u16 buf_len); 13 u16 buf_len);
@@ -89,7 +90,8 @@ ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
89 struct ice_sq_cd *cd); 90 struct ice_sq_cd *cd);
90enum ice_status 91enum ice_status
91ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids, 92ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
92 u32 *q_teids, struct ice_sq_cd *cmd_details); 93 u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num,
94 struct ice_sq_cd *cmd_details);
93enum ice_status 95enum ice_status
94ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 96ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
95 u16 *max_lanqs); 97 u16 *max_lanqs);
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index b25ce4f587f5..84c967294eaf 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -33,6 +33,36 @@ static void ice_adminq_init_regs(struct ice_hw *hw)
33} 33}
34 34
35/** 35/**
36 * ice_mailbox_init_regs - Initialize Mailbox registers
37 * @hw: pointer to the hardware structure
38 *
39 * This assumes the alloc_sq and alloc_rq functions have already been called
40 */
41static void ice_mailbox_init_regs(struct ice_hw *hw)
42{
43 struct ice_ctl_q_info *cq = &hw->mailboxq;
44
45 /* set head and tail registers in our local struct */
46 cq->sq.head = PF_MBX_ATQH;
47 cq->sq.tail = PF_MBX_ATQT;
48 cq->sq.len = PF_MBX_ATQLEN;
49 cq->sq.bah = PF_MBX_ATQBAH;
50 cq->sq.bal = PF_MBX_ATQBAL;
51 cq->sq.len_mask = PF_MBX_ATQLEN_ATQLEN_M;
52 cq->sq.len_ena_mask = PF_MBX_ATQLEN_ATQENABLE_M;
53 cq->sq.head_mask = PF_MBX_ATQH_ATQH_M;
54
55 cq->rq.head = PF_MBX_ARQH;
56 cq->rq.tail = PF_MBX_ARQT;
57 cq->rq.len = PF_MBX_ARQLEN;
58 cq->rq.bah = PF_MBX_ARQBAH;
59 cq->rq.bal = PF_MBX_ARQBAL;
60 cq->rq.len_mask = PF_MBX_ARQLEN_ARQLEN_M;
61 cq->rq.len_ena_mask = PF_MBX_ARQLEN_ARQENABLE_M;
62 cq->rq.head_mask = PF_MBX_ARQH_ARQH_M;
63}
64
65/**
36 * ice_check_sq_alive 66 * ice_check_sq_alive
37 * @hw: pointer to the hw struct 67 * @hw: pointer to the hw struct
38 * @cq: pointer to the specific Control queue 68 * @cq: pointer to the specific Control queue
@@ -639,6 +669,10 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
639 ice_adminq_init_regs(hw); 669 ice_adminq_init_regs(hw);
640 cq = &hw->adminq; 670 cq = &hw->adminq;
641 break; 671 break;
672 case ICE_CTL_Q_MAILBOX:
673 ice_mailbox_init_regs(hw);
674 cq = &hw->mailboxq;
675 break;
642 default: 676 default:
643 return ICE_ERR_PARAM; 677 return ICE_ERR_PARAM;
644 } 678 }
@@ -696,7 +730,12 @@ enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
696 if (ret_code) 730 if (ret_code)
697 return ret_code; 731 return ret_code;
698 732
699 return ice_init_check_adminq(hw); 733 ret_code = ice_init_check_adminq(hw);
734 if (ret_code)
735 return ret_code;
736
737 /* Init Mailbox queue */
738 return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
700} 739}
701 740
702/** 741/**
@@ -714,6 +753,9 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
714 if (ice_check_sq_alive(hw, cq)) 753 if (ice_check_sq_alive(hw, cq))
715 ice_aq_q_shutdown(hw, true); 754 ice_aq_q_shutdown(hw, true);
716 break; 755 break;
756 case ICE_CTL_Q_MAILBOX:
757 cq = &hw->mailboxq;
758 break;
717 default: 759 default:
718 return; 760 return;
719 } 761 }
@@ -736,6 +778,8 @@ void ice_shutdown_all_ctrlq(struct ice_hw *hw)
736{ 778{
737 /* Shutdown FW admin queue */ 779 /* Shutdown FW admin queue */
738 ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN); 780 ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
781 /* Shutdown PF-VF Mailbox */
782 ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
739} 783}
740 784
741/** 785/**
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h
index ea02b89243e2..437f832fd7c4 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.h
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.h
@@ -8,6 +8,7 @@
8 8
9/* Maximum buffer lengths for all control queue types */ 9/* Maximum buffer lengths for all control queue types */
10#define ICE_AQ_MAX_BUF_LEN 4096 10#define ICE_AQ_MAX_BUF_LEN 4096
11#define ICE_MBXQ_MAX_BUF_LEN 4096
11 12
12#define ICE_CTL_Q_DESC(R, i) \ 13#define ICE_CTL_Q_DESC(R, i) \
13 (&(((struct ice_aq_desc *)((R).desc_buf.va))[i])) 14 (&(((struct ice_aq_desc *)((R).desc_buf.va))[i]))
@@ -28,6 +29,7 @@
28enum ice_ctl_q { 29enum ice_ctl_q {
29 ICE_CTL_Q_UNKNOWN = 0, 30 ICE_CTL_Q_UNKNOWN = 0,
30 ICE_CTL_Q_ADMIN, 31 ICE_CTL_Q_ADMIN,
32 ICE_CTL_Q_MAILBOX,
31}; 33};
32 34
33/* Control Queue default settings */ 35/* Control Queue default settings */
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 9a78d83eaa3e..a6679a9bfd3a 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -29,6 +29,22 @@
29#define PF_FW_ATQLEN_ATQCRIT_M BIT(30) 29#define PF_FW_ATQLEN_ATQCRIT_M BIT(30)
30#define PF_FW_ATQLEN_ATQENABLE_M BIT(31) 30#define PF_FW_ATQLEN_ATQENABLE_M BIT(31)
31#define PF_FW_ATQT 0x00080400 31#define PF_FW_ATQT 0x00080400
32#define PF_MBX_ARQBAH 0x0022E400
33#define PF_MBX_ARQBAL 0x0022E380
34#define PF_MBX_ARQH 0x0022E500
35#define PF_MBX_ARQH_ARQH_M ICE_M(0x3FF, 0)
36#define PF_MBX_ARQLEN 0x0022E480
37#define PF_MBX_ARQLEN_ARQLEN_M ICE_M(0x3FF, 0)
38#define PF_MBX_ARQLEN_ARQENABLE_M BIT(31)
39#define PF_MBX_ARQT 0x0022E580
40#define PF_MBX_ATQBAH 0x0022E180
41#define PF_MBX_ATQBAL 0x0022E100
42#define PF_MBX_ATQH 0x0022E280
43#define PF_MBX_ATQH_ATQH_M ICE_M(0x3FF, 0)
44#define PF_MBX_ATQLEN 0x0022E200
45#define PF_MBX_ATQLEN_ATQLEN_M ICE_M(0x3FF, 0)
46#define PF_MBX_ATQLEN_ATQENABLE_M BIT(31)
47#define PF_MBX_ATQT 0x0022E300
32#define GLFLXP_RXDID_FLAGS(_i, _j) (0x0045D000 + ((_i) * 4 + (_j) * 256)) 48#define GLFLXP_RXDID_FLAGS(_i, _j) (0x0045D000 + ((_i) * 4 + (_j) * 256))
33#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S 0 49#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S 0
34#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M ICE_M(0x3F, 0) 50#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M ICE_M(0x3F, 0)
@@ -74,10 +90,16 @@
74#define GLGEN_RTRIG_CORER_M BIT(0) 90#define GLGEN_RTRIG_CORER_M BIT(0)
75#define GLGEN_RTRIG_GLOBR_M BIT(1) 91#define GLGEN_RTRIG_GLOBR_M BIT(1)
76#define GLGEN_STAT 0x000B612C 92#define GLGEN_STAT 0x000B612C
93#define GLGEN_VFLRSTAT(_i) (0x00093A04 + ((_i) * 4))
77#define PFGEN_CTRL 0x00091000 94#define PFGEN_CTRL 0x00091000
78#define PFGEN_CTRL_PFSWR_M BIT(0) 95#define PFGEN_CTRL_PFSWR_M BIT(0)
79#define PFGEN_STATE 0x00088000 96#define PFGEN_STATE 0x00088000
80#define PRTGEN_STATUS 0x000B8100 97#define PRTGEN_STATUS 0x000B8100
98#define VFGEN_RSTAT(_VF) (0x00074000 + ((_VF) * 4))
99#define VPGEN_VFRSTAT(_VF) (0x00090800 + ((_VF) * 4))
100#define VPGEN_VFRSTAT_VFRD_M BIT(0)
101#define VPGEN_VFRTRIG(_VF) (0x00090000 + ((_VF) * 4))
102#define VPGEN_VFRTRIG_VFSWR_M BIT(0)
81#define PFHMC_ERRORDATA 0x00520500 103#define PFHMC_ERRORDATA 0x00520500
82#define PFHMC_ERRORINFO 0x00520400 104#define PFHMC_ERRORINFO 0x00520400
83#define GLINT_DYN_CTL(_INT) (0x00160000 + ((_INT) * 4)) 105#define GLINT_DYN_CTL(_INT) (0x00160000 + ((_INT) * 4))
@@ -90,11 +112,23 @@
90#define GLINT_ITR(_i, _INT) (0x00154000 + ((_i) * 8192 + (_INT) * 4)) 112#define GLINT_ITR(_i, _INT) (0x00154000 + ((_i) * 8192 + (_INT) * 4))
91#define GLINT_RATE(_INT) (0x0015A000 + ((_INT) * 4)) 113#define GLINT_RATE(_INT) (0x0015A000 + ((_INT) * 4))
92#define GLINT_RATE_INTRL_ENA_M BIT(6) 114#define GLINT_RATE_INTRL_ENA_M BIT(6)
115#define GLINT_VECT2FUNC(_INT) (0x00162000 + ((_INT) * 4))
116#define GLINT_VECT2FUNC_VF_NUM_S 0
117#define GLINT_VECT2FUNC_VF_NUM_M ICE_M(0xFF, 0)
118#define GLINT_VECT2FUNC_PF_NUM_S 12
119#define GLINT_VECT2FUNC_PF_NUM_M ICE_M(0x7, 12)
120#define GLINT_VECT2FUNC_IS_PF_S 16
121#define GLINT_VECT2FUNC_IS_PF_M BIT(16)
93#define PFINT_FW_CTL 0x0016C800 122#define PFINT_FW_CTL 0x0016C800
94#define PFINT_FW_CTL_MSIX_INDX_M ICE_M(0x7FF, 0) 123#define PFINT_FW_CTL_MSIX_INDX_M ICE_M(0x7FF, 0)
95#define PFINT_FW_CTL_ITR_INDX_S 11 124#define PFINT_FW_CTL_ITR_INDX_S 11
96#define PFINT_FW_CTL_ITR_INDX_M ICE_M(0x3, 11) 125#define PFINT_FW_CTL_ITR_INDX_M ICE_M(0x3, 11)
97#define PFINT_FW_CTL_CAUSE_ENA_M BIT(30) 126#define PFINT_FW_CTL_CAUSE_ENA_M BIT(30)
127#define PFINT_MBX_CTL 0x0016B280
128#define PFINT_MBX_CTL_MSIX_INDX_M ICE_M(0x7FF, 0)
129#define PFINT_MBX_CTL_ITR_INDX_S 11
130#define PFINT_MBX_CTL_ITR_INDX_M ICE_M(0x3, 11)
131#define PFINT_MBX_CTL_CAUSE_ENA_M BIT(30)
98#define PFINT_OICR 0x0016CA00 132#define PFINT_OICR 0x0016CA00
99#define PFINT_OICR_ECC_ERR_M BIT(16) 133#define PFINT_OICR_ECC_ERR_M BIT(16)
100#define PFINT_OICR_MAL_DETECT_M BIT(19) 134#define PFINT_OICR_MAL_DETECT_M BIT(19)
@@ -102,6 +136,7 @@
102#define PFINT_OICR_PCI_EXCEPTION_M BIT(21) 136#define PFINT_OICR_PCI_EXCEPTION_M BIT(21)
103#define PFINT_OICR_HMC_ERR_M BIT(26) 137#define PFINT_OICR_HMC_ERR_M BIT(26)
104#define PFINT_OICR_PE_CRITERR_M BIT(28) 138#define PFINT_OICR_PE_CRITERR_M BIT(28)
139#define PFINT_OICR_VFLR_M BIT(29)
105#define PFINT_OICR_CTL 0x0016CA80 140#define PFINT_OICR_CTL 0x0016CA80
106#define PFINT_OICR_CTL_MSIX_INDX_M ICE_M(0x7FF, 0) 141#define PFINT_OICR_CTL_MSIX_INDX_M ICE_M(0x7FF, 0)
107#define PFINT_OICR_CTL_ITR_INDX_S 11 142#define PFINT_OICR_CTL_ITR_INDX_S 11
@@ -116,6 +151,12 @@
116#define QINT_TQCTL_MSIX_INDX_S 0 151#define QINT_TQCTL_MSIX_INDX_S 0
117#define QINT_TQCTL_ITR_INDX_S 11 152#define QINT_TQCTL_ITR_INDX_S 11
118#define QINT_TQCTL_CAUSE_ENA_M BIT(30) 153#define QINT_TQCTL_CAUSE_ENA_M BIT(30)
154#define VPINT_ALLOC(_VF) (0x001D1000 + ((_VF) * 4))
155#define VPINT_ALLOC_FIRST_S 0
156#define VPINT_ALLOC_FIRST_M ICE_M(0x7FF, 0)
157#define VPINT_ALLOC_LAST_S 12
158#define VPINT_ALLOC_LAST_M ICE_M(0x7FF, 12)
159#define VPINT_ALLOC_VALID_M BIT(31)
119#define QRX_CONTEXT(_i, _QRX) (0x00280000 + ((_i) * 8192 + (_QRX) * 4)) 160#define QRX_CONTEXT(_i, _QRX) (0x00280000 + ((_i) * 8192 + (_QRX) * 4))
120#define QRX_CTRL(_QRX) (0x00120000 + ((_QRX) * 4)) 161#define QRX_CTRL(_QRX) (0x00120000 + ((_QRX) * 4))
121#define QRX_CTRL_MAX_INDEX 2047 162#define QRX_CTRL_MAX_INDEX 2047
@@ -128,6 +169,20 @@
128#define QRX_TAIL_MAX_INDEX 2047 169#define QRX_TAIL_MAX_INDEX 2047
129#define QRX_TAIL_TAIL_S 0 170#define QRX_TAIL_TAIL_S 0
130#define QRX_TAIL_TAIL_M ICE_M(0x1FFF, 0) 171#define QRX_TAIL_TAIL_M ICE_M(0x1FFF, 0)
172#define VPLAN_RX_QBASE(_VF) (0x00072000 + ((_VF) * 4))
173#define VPLAN_RX_QBASE_VFFIRSTQ_S 0
174#define VPLAN_RX_QBASE_VFFIRSTQ_M ICE_M(0x7FF, 0)
175#define VPLAN_RX_QBASE_VFNUMQ_S 16
176#define VPLAN_RX_QBASE_VFNUMQ_M ICE_M(0xFF, 16)
177#define VPLAN_RXQ_MAPENA(_VF) (0x00073000 + ((_VF) * 4))
178#define VPLAN_RXQ_MAPENA_RX_ENA_M BIT(0)
179#define VPLAN_TX_QBASE(_VF) (0x001D1800 + ((_VF) * 4))
180#define VPLAN_TX_QBASE_VFFIRSTQ_S 0
181#define VPLAN_TX_QBASE_VFFIRSTQ_M ICE_M(0x3FFF, 0)
182#define VPLAN_TX_QBASE_VFNUMQ_S 16
183#define VPLAN_TX_QBASE_VFNUMQ_M ICE_M(0xFF, 16)
184#define VPLAN_TXQ_MAPENA(_VF) (0x00073800 + ((_VF) * 4))
185#define VPLAN_TXQ_MAPENA_TX_ENA_M BIT(0)
131#define GL_MDET_RX 0x00294C00 186#define GL_MDET_RX 0x00294C00
132#define GL_MDET_RX_QNUM_S 0 187#define GL_MDET_RX_QNUM_S 0
133#define GL_MDET_RX_QNUM_M ICE_M(0x7FFF, 0) 188#define GL_MDET_RX_QNUM_M ICE_M(0x7FFF, 0)
@@ -164,6 +219,14 @@
164#define PF_MDET_TX_PQM_VALID_M BIT(0) 219#define PF_MDET_TX_PQM_VALID_M BIT(0)
165#define PF_MDET_TX_TCLAN 0x000FC000 220#define PF_MDET_TX_TCLAN 0x000FC000
166#define PF_MDET_TX_TCLAN_VALID_M BIT(0) 221#define PF_MDET_TX_TCLAN_VALID_M BIT(0)
222#define VP_MDET_RX(_VF) (0x00294400 + ((_VF) * 4))
223#define VP_MDET_RX_VALID_M BIT(0)
224#define VP_MDET_TX_PQM(_VF) (0x002D2000 + ((_VF) * 4))
225#define VP_MDET_TX_PQM_VALID_M BIT(0)
226#define VP_MDET_TX_TCLAN(_VF) (0x000FB800 + ((_VF) * 4))
227#define VP_MDET_TX_TCLAN_VALID_M BIT(0)
228#define VP_MDET_TX_TDPU(_VF) (0x00040000 + ((_VF) * 4))
229#define VP_MDET_TX_TDPU_VALID_M BIT(0)
167#define GLNVM_FLA 0x000B6108 230#define GLNVM_FLA 0x000B6108
168#define GLNVM_FLA_LOCKED_M BIT(6) 231#define GLNVM_FLA_LOCKED_M BIT(6)
169#define GLNVM_GENS 0x000B6100 232#define GLNVM_GENS 0x000B6100
@@ -175,6 +238,9 @@
175#define PF_FUNC_RID 0x0009E880 238#define PF_FUNC_RID 0x0009E880
176#define PF_FUNC_RID_FUNC_NUM_S 0 239#define PF_FUNC_RID_FUNC_NUM_S 0
177#define PF_FUNC_RID_FUNC_NUM_M ICE_M(0x7, 0) 240#define PF_FUNC_RID_FUNC_NUM_M ICE_M(0x7, 0)
241#define PF_PCI_CIAA 0x0009E580
242#define PF_PCI_CIAA_VF_NUM_S 12
243#define PF_PCI_CIAD 0x0009E500
178#define GL_PWR_MODE_CTL 0x000B820C 244#define GL_PWR_MODE_CTL 0x000B820C
179#define GL_PWR_MODE_CTL_CAR_MAX_BW_S 30 245#define GL_PWR_MODE_CTL_CAR_MAX_BW_S 30
180#define GL_PWR_MODE_CTL_CAR_MAX_BW_M ICE_M(0x3, 30) 246#define GL_PWR_MODE_CTL_CAR_MAX_BW_M ICE_M(0x3, 30)
@@ -255,5 +321,8 @@
255#define GLV_UPTCH(_i) (0x0030A004 + ((_i) * 8)) 321#define GLV_UPTCH(_i) (0x0030A004 + ((_i) * 8))
256#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8)) 322#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8))
257#define VSIQF_HKEY_MAX_INDEX 12 323#define VSIQF_HKEY_MAX_INDEX 12
324#define VSIQF_HLUT_MAX_INDEX 15
325#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4))
326#define VFINT_DYN_CTLN_CLEARPBA_M BIT(1)
258 327
259#endif /* _ICE_HW_AUTOGEN_H_ */ 328#endif /* _ICE_HW_AUTOGEN_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 94504023d86e..7d2a66739e3f 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -418,6 +418,7 @@ struct ice_tlan_ctx {
418 u8 pf_num; 418 u8 pf_num;
419 u16 vmvf_num; 419 u16 vmvf_num;
420 u8 vmvf_type; 420 u8 vmvf_type;
421#define ICE_TLAN_CTX_VMVF_TYPE_VF 0
421#define ICE_TLAN_CTX_VMVF_TYPE_VMQ 1 422#define ICE_TLAN_CTX_VMVF_TYPE_VMQ 1
422#define ICE_TLAN_CTX_VMVF_TYPE_PF 2 423#define ICE_TLAN_CTX_VMVF_TYPE_PF 2
423 u16 src_vsi; 424 u16 src_vsi;
@@ -473,4 +474,16 @@ static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype)
473{ 474{
474 return ice_ptype_lkup[ptype]; 475 return ice_ptype_lkup[ptype];
475} 476}
477
478#define ICE_LINK_SPEED_UNKNOWN 0
479#define ICE_LINK_SPEED_10MBPS 10
480#define ICE_LINK_SPEED_100MBPS 100
481#define ICE_LINK_SPEED_1000MBPS 1000
482#define ICE_LINK_SPEED_2500MBPS 2500
483#define ICE_LINK_SPEED_5000MBPS 5000
484#define ICE_LINK_SPEED_10000MBPS 10000
485#define ICE_LINK_SPEED_20000MBPS 20000
486#define ICE_LINK_SPEED_25000MBPS 25000
487#define ICE_LINK_SPEED_40000MBPS 40000
488
476#endif /* _ICE_LAN_TX_RX_H_ */ 489#endif /* _ICE_LAN_TX_RX_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index acf3478a3f3b..49f1940772ed 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -68,18 +68,20 @@ static int ice_setup_rx_ctx(struct ice_ring *ring)
68 /* Enable Flexible Descriptors in the queue context which 68 /* Enable Flexible Descriptors in the queue context which
69 * allows this driver to select a specific receive descriptor format 69 * allows this driver to select a specific receive descriptor format
70 */ 70 */
71 regval = rd32(hw, QRXFLXP_CNTXT(pf_q)); 71 if (vsi->type != ICE_VSI_VF) {
72 regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) & 72 regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
73 QRXFLXP_CNTXT_RXDID_IDX_M; 73 regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
74 74 QRXFLXP_CNTXT_RXDID_IDX_M;
75 /* increasing context priority to pick up profile id; 75
76 * default is 0x01; setting to 0x03 to ensure profile 76 /* increasing context priority to pick up profile id;
77 * is programming if prev context is of same priority 77 * default is 0x01; setting to 0x03 to ensure profile
78 */ 78 * is programming if prev context is of same priority
79 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) & 79 */
80 QRXFLXP_CNTXT_RXDID_PRIO_M; 80 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
81 QRXFLXP_CNTXT_RXDID_PRIO_M;
81 82
82 wr32(hw, QRXFLXP_CNTXT(pf_q), regval); 83 wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
84 }
83 85
84 /* Absolute queue number out of 2K needs to be passed */ 86 /* Absolute queue number out of 2K needs to be passed */
85 err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q); 87 err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
@@ -90,6 +92,9 @@ static int ice_setup_rx_ctx(struct ice_ring *ring)
90 return -EIO; 92 return -EIO;
91 } 93 }
92 94
95 if (vsi->type == ICE_VSI_VF)
96 return 0;
97
93 /* init queue specific tail register */ 98 /* init queue specific tail register */
94 ring->tail = hw->hw_addr + QRX_TAIL(pf_q); 99 ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
95 writel(0, ring->tail); 100 writel(0, ring->tail);
@@ -132,6 +137,11 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
132 case ICE_VSI_PF: 137 case ICE_VSI_PF:
133 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; 138 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
134 break; 139 break;
140 case ICE_VSI_VF:
141 /* Firmware expects vmvf_num to be absolute VF id */
142 tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id;
143 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
144 break;
135 default: 145 default:
136 return; 146 return;
137 } 147 }
@@ -285,6 +295,16 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
285 vsi->num_desc = ALIGN(ICE_DFLT_NUM_DESC, ICE_REQ_DESC_MULTIPLE); 295 vsi->num_desc = ALIGN(ICE_DFLT_NUM_DESC, ICE_REQ_DESC_MULTIPLE);
286 vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx); 296 vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx);
287 break; 297 break;
298 case ICE_VSI_VF:
299 vsi->alloc_txq = pf->num_vf_qps;
300 vsi->alloc_rxq = pf->num_vf_qps;
301 /* pf->num_vf_msix includes (VF miscellaneous vector +
302 * data queue interrupts). Since vsi->num_q_vectors is number
303 * of queues vectors, subtract 1 from the original vector
304 * count
305 */
306 vsi->num_q_vectors = pf->num_vf_msix - 1;
307 break;
288 default: 308 default:
289 dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n", 309 dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
290 vsi->type); 310 vsi->type);
@@ -331,6 +351,8 @@ void ice_vsi_delete(struct ice_vsi *vsi)
331 struct ice_vsi_ctx ctxt; 351 struct ice_vsi_ctx ctxt;
332 enum ice_status status; 352 enum ice_status status;
333 353
354 if (vsi->type == ICE_VSI_VF)
355 ctxt.vf_num = vsi->vf_id;
334 ctxt.vsi_num = vsi->vsi_num; 356 ctxt.vsi_num = vsi->vsi_num;
335 357
336 memcpy(&ctxt.info, &vsi->info, sizeof(struct ice_aqc_vsi_props)); 358 memcpy(&ctxt.info, &vsi->info, sizeof(struct ice_aqc_vsi_props));
@@ -466,6 +488,10 @@ static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type)
466 /* Setup default MSIX irq handler for VSI */ 488 /* Setup default MSIX irq handler for VSI */
467 vsi->irq_handler = ice_msix_clean_rings; 489 vsi->irq_handler = ice_msix_clean_rings;
468 break; 490 break;
491 case ICE_VSI_VF:
492 if (ice_vsi_alloc_arrays(vsi, true))
493 goto err_rings;
494 break;
469 default: 495 default:
470 dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type); 496 dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
471 goto unlock_pf; 497 goto unlock_pf;
@@ -685,6 +711,15 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
685 BIT(cap->rss_table_entry_width)); 711 BIT(cap->rss_table_entry_width));
686 vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF; 712 vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
687 break; 713 break;
714 case ICE_VSI_VF:
715 /* VF VSI will gets a small RSS table
716 * For VSI_LUT, LUT size should be set to 64 bytes
717 */
718 vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
719 vsi->rss_size = min_t(int, num_online_cpus(),
720 BIT(cap->rss_table_entry_width));
721 vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI;
722 break;
688 default: 723 default:
689 dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", 724 dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n",
690 vsi->type); 725 vsi->type);
@@ -773,17 +808,17 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
773 * Setup number and offset of Rx queues for all TCs for the VSI 808 * Setup number and offset of Rx queues for all TCs for the VSI
774 */ 809 */
775 810
811 qcount = numq_tc;
776 /* qcount will change if RSS is enabled */ 812 /* qcount will change if RSS is enabled */
777 if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) { 813 if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) {
778 if (vsi->type == ICE_VSI_PF) 814 if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF) {
779 max_rss = ICE_MAX_LG_RSS_QS; 815 if (vsi->type == ICE_VSI_PF)
780 else 816 max_rss = ICE_MAX_LG_RSS_QS;
781 max_rss = ICE_MAX_SMALL_RSS_QS; 817 else
782 818 max_rss = ICE_MAX_SMALL_RSS_QS;
783 qcount = min_t(int, numq_tc, max_rss); 819 qcount = min_t(int, numq_tc, max_rss);
784 qcount = min_t(int, qcount, vsi->rss_size); 820 qcount = min_t(int, qcount, vsi->rss_size);
785 } else { 821 }
786 qcount = numq_tc;
787 } 822 }
788 823
789 /* find the (rounded up) power-of-2 of qcount */ 824 /* find the (rounded up) power-of-2 of qcount */
@@ -813,6 +848,14 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
813 vsi->num_txq = qcount_tx; 848 vsi->num_txq = qcount_tx;
814 vsi->num_rxq = offset; 849 vsi->num_rxq = offset;
815 850
851 if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
852 dev_dbg(&vsi->back->pdev->dev, "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
853 /* since there is a chance that num_rxq could have been changed
854 * in the above for loop, make num_txq equal to num_rxq.
855 */
856 vsi->num_txq = vsi->num_rxq;
857 }
858
816 /* Rx queue mapping */ 859 /* Rx queue mapping */
817 ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG); 860 ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
818 /* q_mapping buffer holds the info for the first queue allocated for 861 /* q_mapping buffer holds the info for the first queue allocated for
@@ -838,6 +881,11 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
838 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF; 881 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
839 hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; 882 hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
840 break; 883 break;
884 case ICE_VSI_VF:
885 /* VF VSI will gets a small RSS table which is a VSI LUT type */
886 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
887 hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
888 break;
841 default: 889 default:
842 dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n", 890 dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
843 vsi->type); 891 vsi->type);
@@ -868,6 +916,11 @@ static int ice_vsi_init(struct ice_vsi *vsi)
868 case ICE_VSI_PF: 916 case ICE_VSI_PF:
869 ctxt.flags = ICE_AQ_VSI_TYPE_PF; 917 ctxt.flags = ICE_AQ_VSI_TYPE_PF;
870 break; 918 break;
919 case ICE_VSI_VF:
920 ctxt.flags = ICE_AQ_VSI_TYPE_VF;
921 /* VF number here is the absolute VF number (0-255) */
922 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
923 break;
871 default: 924 default:
872 return -ENODEV; 925 return -ENODEV;
873 } 926 }
@@ -961,6 +1014,8 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
961 1014
962 q_vector->vsi = vsi; 1015 q_vector->vsi = vsi;
963 q_vector->v_idx = v_idx; 1016 q_vector->v_idx = v_idx;
1017 if (vsi->type == ICE_VSI_VF)
1018 goto out;
964 /* only set affinity_mask if the CPU is online */ 1019 /* only set affinity_mask if the CPU is online */
965 if (cpu_online(v_idx)) 1020 if (cpu_online(v_idx))
966 cpumask_set_cpu(v_idx, &q_vector->affinity_mask); 1021 cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
@@ -973,6 +1028,7 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
973 netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll, 1028 netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll,
974 NAPI_POLL_WEIGHT); 1029 NAPI_POLL_WEIGHT);
975 1030
1031out:
976 /* tie q_vector and VSI together */ 1032 /* tie q_vector and VSI together */
977 vsi->q_vectors[v_idx] = q_vector; 1033 vsi->q_vectors[v_idx] = q_vector;
978 1034
@@ -1067,6 +1123,13 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
1067 vsi->hw_base_vector = ice_get_res(pf, pf->hw_irq_tracker, 1123 vsi->hw_base_vector = ice_get_res(pf, pf->hw_irq_tracker,
1068 num_q_vectors, vsi->idx); 1124 num_q_vectors, vsi->idx);
1069 break; 1125 break;
1126 case ICE_VSI_VF:
1127 /* take VF misc vector and data vectors into account */
1128 num_q_vectors = pf->num_vf_msix;
1129 /* For VF VSI, reserve slots only from HW interrupts */
1130 vsi->hw_base_vector = ice_get_res(pf, pf->hw_irq_tracker,
1131 num_q_vectors, vsi->idx);
1132 break;
1070 default: 1133 default:
1071 dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n", 1134 dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
1072 vsi->type); 1135 vsi->type);
@@ -1077,9 +1140,11 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
1077 dev_err(&pf->pdev->dev, 1140 dev_err(&pf->pdev->dev,
1078 "Failed to get tracking for %d HW vectors for VSI %d, err=%d\n", 1141 "Failed to get tracking for %d HW vectors for VSI %d, err=%d\n",
1079 num_q_vectors, vsi->vsi_num, vsi->hw_base_vector); 1142 num_q_vectors, vsi->vsi_num, vsi->hw_base_vector);
1080 ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector, 1143 if (vsi->type != ICE_VSI_VF) {
1081 vsi->idx); 1144 ice_free_res(vsi->back->sw_irq_tracker,
1082 pf->num_avail_sw_msix += num_q_vectors; 1145 vsi->sw_base_vector, vsi->idx);
1146 pf->num_avail_sw_msix += num_q_vectors;
1147 }
1083 return -ENOENT; 1148 return -ENOENT;
1084 } 1149 }
1085 1150
@@ -1139,7 +1204,6 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
1139 ring->vsi = vsi; 1204 ring->vsi = vsi;
1140 ring->dev = &pf->pdev->dev; 1205 ring->dev = &pf->pdev->dev;
1141 ring->count = vsi->num_desc; 1206 ring->count = vsi->num_desc;
1142 ring->itr_setting = ICE_DFLT_TX_ITR;
1143 vsi->tx_rings[i] = ring; 1207 vsi->tx_rings[i] = ring;
1144 } 1208 }
1145 1209
@@ -1159,7 +1223,6 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
1159 ring->netdev = vsi->netdev; 1223 ring->netdev = vsi->netdev;
1160 ring->dev = &pf->pdev->dev; 1224 ring->dev = &pf->pdev->dev;
1161 ring->count = vsi->num_desc; 1225 ring->count = vsi->num_desc;
1162 ring->itr_setting = ICE_DFLT_RX_ITR;
1163 vsi->rx_rings[i] = ring; 1226 vsi->rx_rings[i] = ring;
1164 } 1227 }
1165 1228
@@ -1196,6 +1259,7 @@ static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
1196 tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id); 1259 tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id);
1197 q_vector->num_ring_tx = tx_rings_per_v; 1260 q_vector->num_ring_tx = tx_rings_per_v;
1198 q_vector->tx.ring = NULL; 1261 q_vector->tx.ring = NULL;
1262 q_vector->tx.itr_idx = ICE_TX_ITR;
1199 q_base = vsi->num_txq - tx_rings_rem; 1263 q_base = vsi->num_txq - tx_rings_rem;
1200 1264
1201 for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) { 1265 for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {
@@ -1211,6 +1275,7 @@ static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
1211 rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id); 1275 rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id);
1212 q_vector->num_ring_rx = rx_rings_per_v; 1276 q_vector->num_ring_rx = rx_rings_per_v;
1213 q_vector->rx.ring = NULL; 1277 q_vector->rx.ring = NULL;
1278 q_vector->rx.itr_idx = ICE_RX_ITR;
1214 q_base = vsi->num_rxq - rx_rings_rem; 1279 q_base = vsi->num_rxq - rx_rings_rem;
1215 1280
1216 for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) { 1281 for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {
@@ -1512,6 +1577,9 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
1512 int err = 0; 1577 int err = 0;
1513 u16 i; 1578 u16 i;
1514 1579
1580 if (vsi->type == ICE_VSI_VF)
1581 goto setup_rings;
1582
1515 if (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN) 1583 if (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN)
1516 vsi->max_frame = vsi->netdev->mtu + 1584 vsi->max_frame = vsi->netdev->mtu +
1517 ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 1585 ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
@@ -1519,6 +1587,7 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
1519 vsi->max_frame = ICE_RXBUF_2048; 1587 vsi->max_frame = ICE_RXBUF_2048;
1520 1588
1521 vsi->rx_buf_len = ICE_RXBUF_2048; 1589 vsi->rx_buf_len = ICE_RXBUF_2048;
1590setup_rings:
1522 /* set up individual rings */ 1591 /* set up individual rings */
1523 for (i = 0; i < vsi->num_rxq && !err; i++) 1592 for (i = 0; i < vsi->num_rxq && !err; i++)
1524 err = ice_setup_rx_ctx(vsi->rx_rings[i]); 1593 err = ice_setup_rx_ctx(vsi->rx_rings[i]);
@@ -1615,6 +1684,37 @@ static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
1615} 1684}
1616 1685
1617/** 1686/**
1687 * ice_cfg_itr - configure the initial interrupt throttle values
1688 * @hw: pointer to the HW structure
1689 * @q_vector: interrupt vector that's being configured
1690 * @vector: HW vector index to apply the interrupt throttling to
1691 *
1692 * Configure interrupt throttling values for the ring containers that are
1693 * associated with the interrupt vector passed in.
1694 */
1695static void
1696ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector, u16 vector)
1697{
1698 u8 itr_gran = hw->itr_gran;
1699
1700 if (q_vector->num_ring_rx) {
1701 struct ice_ring_container *rc = &q_vector->rx;
1702
1703 rc->itr = ITR_TO_REG(ICE_DFLT_RX_ITR, itr_gran);
1704 rc->latency_range = ICE_LOW_LATENCY;
1705 wr32(hw, GLINT_ITR(rc->itr_idx, vector), rc->itr);
1706 }
1707
1708 if (q_vector->num_ring_tx) {
1709 struct ice_ring_container *rc = &q_vector->tx;
1710
1711 rc->itr = ITR_TO_REG(ICE_DFLT_TX_ITR, itr_gran);
1712 rc->latency_range = ICE_LOW_LATENCY;
1713 wr32(hw, GLINT_ITR(rc->itr_idx, vector), rc->itr);
1714 }
1715}
1716
1717/**
1618 * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW 1718 * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW
1619 * @vsi: the VSI being configured 1719 * @vsi: the VSI being configured
1620 */ 1720 */
@@ -1624,31 +1724,13 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi)
1624 u16 vector = vsi->hw_base_vector; 1724 u16 vector = vsi->hw_base_vector;
1625 struct ice_hw *hw = &pf->hw; 1725 struct ice_hw *hw = &pf->hw;
1626 u32 txq = 0, rxq = 0; 1726 u32 txq = 0, rxq = 0;
1627 int i, q, itr; 1727 int i, q;
1628 u8 itr_gran;
1629 1728
1630 for (i = 0; i < vsi->num_q_vectors; i++, vector++) { 1729 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
1631 struct ice_q_vector *q_vector = vsi->q_vectors[i]; 1730 struct ice_q_vector *q_vector = vsi->q_vectors[i];
1632 1731
1633 itr_gran = hw->itr_gran; 1732 ice_cfg_itr(hw, q_vector, vector);
1634
1635 q_vector->intrl = ICE_DFLT_INTRL;
1636
1637 if (q_vector->num_ring_rx) {
1638 q_vector->rx.itr =
1639 ITR_TO_REG(vsi->rx_rings[rxq]->itr_setting,
1640 itr_gran);
1641 q_vector->rx.latency_range = ICE_LOW_LATENCY;
1642 }
1643 1733
1644 if (q_vector->num_ring_tx) {
1645 q_vector->tx.itr =
1646 ITR_TO_REG(vsi->tx_rings[txq]->itr_setting,
1647 itr_gran);
1648 q_vector->tx.latency_range = ICE_LOW_LATENCY;
1649 }
1650 wr32(hw, GLINT_ITR(ICE_RX_ITR, vector), q_vector->rx.itr);
1651 wr32(hw, GLINT_ITR(ICE_TX_ITR, vector), q_vector->tx.itr);
1652 wr32(hw, GLINT_RATE(vector), 1734 wr32(hw, GLINT_RATE(vector),
1653 ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran)); 1735 ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran));
1654 1736
@@ -1664,23 +1746,33 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi)
1664 * tracked for this PF. 1746 * tracked for this PF.
1665 */ 1747 */
1666 for (q = 0; q < q_vector->num_ring_tx; q++) { 1748 for (q = 0; q < q_vector->num_ring_tx; q++) {
1749 int itr_idx = q_vector->tx.itr_idx;
1667 u32 val; 1750 u32 val;
1668 1751
1669 itr = ICE_ITR_NONE; 1752 if (vsi->type == ICE_VSI_VF)
1670 val = QINT_TQCTL_CAUSE_ENA_M | 1753 val = QINT_TQCTL_CAUSE_ENA_M |
1671 (itr << QINT_TQCTL_ITR_INDX_S) | 1754 (itr_idx << QINT_TQCTL_ITR_INDX_S) |
1672 (vector << QINT_TQCTL_MSIX_INDX_S); 1755 ((i + 1) << QINT_TQCTL_MSIX_INDX_S);
1756 else
1757 val = QINT_TQCTL_CAUSE_ENA_M |
1758 (itr_idx << QINT_TQCTL_ITR_INDX_S) |
1759 (vector << QINT_TQCTL_MSIX_INDX_S);
1673 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val); 1760 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
1674 txq++; 1761 txq++;
1675 } 1762 }
1676 1763
1677 for (q = 0; q < q_vector->num_ring_rx; q++) { 1764 for (q = 0; q < q_vector->num_ring_rx; q++) {
1765 int itr_idx = q_vector->rx.itr_idx;
1678 u32 val; 1766 u32 val;
1679 1767
1680 itr = ICE_ITR_NONE; 1768 if (vsi->type == ICE_VSI_VF)
1681 val = QINT_RQCTL_CAUSE_ENA_M | 1769 val = QINT_RQCTL_CAUSE_ENA_M |
1682 (itr << QINT_RQCTL_ITR_INDX_S) | 1770 (itr_idx << QINT_RQCTL_ITR_INDX_S) |
1683 (vector << QINT_RQCTL_MSIX_INDX_S); 1771 ((i + 1) << QINT_RQCTL_MSIX_INDX_S);
1772 else
1773 val = QINT_RQCTL_CAUSE_ENA_M |
1774 (itr_idx << QINT_RQCTL_ITR_INDX_S) |
1775 (vector << QINT_RQCTL_MSIX_INDX_S);
1684 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val); 1776 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
1685 rxq++; 1777 rxq++;
1686 } 1778 }
@@ -1784,8 +1876,11 @@ int ice_vsi_stop_rx_rings(struct ice_vsi *vsi)
1784/** 1876/**
1785 * ice_vsi_stop_tx_rings - Disable Tx rings 1877 * ice_vsi_stop_tx_rings - Disable Tx rings
1786 * @vsi: the VSI being configured 1878 * @vsi: the VSI being configured
1879 * @rst_src: reset source
1880 * @rel_vmvf_num: Relative id of VF/VM
1787 */ 1881 */
1788int ice_vsi_stop_tx_rings(struct ice_vsi *vsi) 1882int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
1883 u16 rel_vmvf_num)
1789{ 1884{
1790 struct ice_pf *pf = vsi->back; 1885 struct ice_pf *pf = vsi->back;
1791 struct ice_hw *hw = &pf->hw; 1886 struct ice_hw *hw = &pf->hw;
@@ -1837,7 +1932,7 @@ int ice_vsi_stop_tx_rings(struct ice_vsi *vsi)
1837 GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M); 1932 GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M);
1838 } 1933 }
1839 status = ice_dis_vsi_txq(vsi->port_info, vsi->num_txq, q_ids, q_teids, 1934 status = ice_dis_vsi_txq(vsi->port_info, vsi->num_txq, q_ids, q_teids,
1840 NULL); 1935 rst_src, rel_vmvf_num, NULL);
1841 /* if the disable queue command was exercised during an active reset 1936 /* if the disable queue command was exercised during an active reset
1842 * flow, ICE_ERR_RESET_ONGOING is returned. This is not an error as 1937 * flow, ICE_ERR_RESET_ONGOING is returned. This is not an error as
1843 * the reset operation disables queues at the hardware level anyway. 1938 * the reset operation disables queues at the hardware level anyway.
@@ -1934,7 +2029,7 @@ err_out:
1934 */ 2029 */
1935struct ice_vsi * 2030struct ice_vsi *
1936ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, 2031ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
1937 enum ice_vsi_type type, u16 __always_unused vf_id) 2032 enum ice_vsi_type type, u16 vf_id)
1938{ 2033{
1939 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 2034 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
1940 struct device *dev = &pf->pdev->dev; 2035 struct device *dev = &pf->pdev->dev;
@@ -1949,6 +2044,8 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
1949 2044
1950 vsi->port_info = pi; 2045 vsi->port_info = pi;
1951 vsi->vsw = pf->first_sw; 2046 vsi->vsw = pf->first_sw;
2047 if (vsi->type == ICE_VSI_VF)
2048 vsi->vf_id = vf_id;
1952 2049
1953 if (ice_vsi_get_qs(vsi)) { 2050 if (ice_vsi_get_qs(vsi)) {
1954 dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n", 2051 dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
@@ -1987,6 +2084,34 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
1987 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) 2084 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
1988 ice_vsi_cfg_rss_lut_key(vsi); 2085 ice_vsi_cfg_rss_lut_key(vsi);
1989 break; 2086 break;
2087 case ICE_VSI_VF:
2088 /* VF driver will take care of creating netdev for this type and
2089 * map queues to vectors through Virtchnl, PF driver only
2090 * creates a VSI and corresponding structures for bookkeeping
2091 * purpose
2092 */
2093 ret = ice_vsi_alloc_q_vectors(vsi);
2094 if (ret)
2095 goto unroll_vsi_init;
2096
2097 ret = ice_vsi_alloc_rings(vsi);
2098 if (ret)
2099 goto unroll_alloc_q_vector;
2100
2101 /* Setup Vector base only during VF init phase or when VF asks
2102 * for more vectors than assigned number. In all other cases,
2103 * assign hw_base_vector to the value given earlier.
2104 */
2105 if (test_bit(ICE_VF_STATE_CFG_INTR, pf->vf[vf_id].vf_states)) {
2106 ret = ice_vsi_setup_vector_base(vsi);
2107 if (ret)
2108 goto unroll_vector_base;
2109 } else {
2110 vsi->hw_base_vector = pf->vf[vf_id].first_vector_idx;
2111 }
2112 pf->q_left_tx -= vsi->alloc_txq;
2113 pf->q_left_rx -= vsi->alloc_rxq;
2114 break;
1990 default: 2115 default:
1991 /* if VSI type is not recognized, clean up the resources and 2116 /* if VSI type is not recognized, clean up the resources and
1992 * exit 2117 * exit
@@ -2045,8 +2170,8 @@ static void ice_vsi_release_msix(struct ice_vsi *vsi)
2045 for (i = 0; i < vsi->num_q_vectors; i++, vector++) { 2170 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
2046 struct ice_q_vector *q_vector = vsi->q_vectors[i]; 2171 struct ice_q_vector *q_vector = vsi->q_vectors[i];
2047 2172
2048 wr32(hw, GLINT_ITR(ICE_RX_ITR, vector), 0); 2173 wr32(hw, GLINT_ITR(ICE_IDX_ITR0, vector), 0);
2049 wr32(hw, GLINT_ITR(ICE_TX_ITR, vector), 0); 2174 wr32(hw, GLINT_ITR(ICE_IDX_ITR1, vector), 0);
2050 for (q = 0; q < q_vector->num_ring_tx; q++) { 2175 for (q = 0; q < q_vector->num_ring_tx; q++) {
2051 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0); 2176 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
2052 txq++; 2177 txq++;
@@ -2077,6 +2202,8 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
2077 return; 2202 return;
2078 2203
2079 ice_vsi_release_msix(vsi); 2204 ice_vsi_release_msix(vsi);
2205 if (vsi->type == ICE_VSI_VF)
2206 return;
2080 2207
2081 vsi->irqs_ready = false; 2208 vsi->irqs_ready = false;
2082 for (i = 0; i < vsi->num_q_vectors; i++) { 2209 for (i = 0; i < vsi->num_q_vectors; i++) {
@@ -2317,10 +2444,12 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi)
2317int ice_vsi_release(struct ice_vsi *vsi) 2444int ice_vsi_release(struct ice_vsi *vsi)
2318{ 2445{
2319 struct ice_pf *pf; 2446 struct ice_pf *pf;
2447 struct ice_vf *vf;
2320 2448
2321 if (!vsi->back) 2449 if (!vsi->back)
2322 return -ENODEV; 2450 return -ENODEV;
2323 pf = vsi->back; 2451 pf = vsi->back;
2452 vf = &pf->vf[vsi->vf_id];
2324 /* do not unregister and free netdevs while driver is in the reset 2453 /* do not unregister and free netdevs while driver is in the reset
2325 * recovery pending state. Since reset/rebuild happens through PF 2454 * recovery pending state. Since reset/rebuild happens through PF
2326 * service task workqueue, its not a good idea to unregister netdev 2455 * service task workqueue, its not a good idea to unregister netdev
@@ -2342,10 +2471,23 @@ int ice_vsi_release(struct ice_vsi *vsi)
2342 ice_vsi_close(vsi); 2471 ice_vsi_close(vsi);
2343 2472
2344 /* reclaim interrupt vectors back to PF */ 2473 /* reclaim interrupt vectors back to PF */
2345 ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector, vsi->idx); 2474 if (vsi->type != ICE_VSI_VF) {
2346 pf->num_avail_sw_msix += vsi->num_q_vectors; 2475 /* reclaim SW interrupts back to the common pool */
2347 ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector, vsi->idx); 2476 ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector,
2348 pf->num_avail_hw_msix += vsi->num_q_vectors; 2477 vsi->idx);
2478 pf->num_avail_sw_msix += vsi->num_q_vectors;
2479 /* reclaim HW interrupts back to the common pool */
2480 ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector,
2481 vsi->idx);
2482 pf->num_avail_hw_msix += vsi->num_q_vectors;
2483 } else if (test_bit(ICE_VF_STATE_CFG_INTR, vf->vf_states)) {
2484 /* Reclaim VF resources back only while freeing all VFs or
2485 * vector reassignment is requested
2486 */
2487 ice_free_res(vsi->back->hw_irq_tracker, vf->first_vector_idx,
2488 vsi->idx);
2489 pf->num_avail_hw_msix += pf->num_vf_msix;
2490 }
2349 2491
2350 ice_remove_vsi_fltr(&pf->hw, vsi->idx); 2492 ice_remove_vsi_fltr(&pf->hw, vsi->idx);
2351 ice_vsi_delete(vsi); 2493 ice_vsi_delete(vsi);
@@ -2414,6 +2556,22 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
2414 2556
2415 ice_vsi_map_rings_to_vectors(vsi); 2557 ice_vsi_map_rings_to_vectors(vsi);
2416 break; 2558 break;
2559 case ICE_VSI_VF:
2560 ret = ice_vsi_alloc_q_vectors(vsi);
2561 if (ret)
2562 goto err_rings;
2563
2564 ret = ice_vsi_setup_vector_base(vsi);
2565 if (ret)
2566 goto err_vectors;
2567
2568 ret = ice_vsi_alloc_rings(vsi);
2569 if (ret)
2570 goto err_vectors;
2571
2572 vsi->back->q_left_tx -= vsi->alloc_txq;
2573 vsi->back->q_left_rx -= vsi->alloc_rxq;
2574 break;
2417 default: 2575 default:
2418 break; 2576 break;
2419 } 2577 }
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 2617afe01c82..677db40338f5 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -31,7 +31,8 @@ int ice_vsi_start_rx_rings(struct ice_vsi *vsi);
31 31
32int ice_vsi_stop_rx_rings(struct ice_vsi *vsi); 32int ice_vsi_stop_rx_rings(struct ice_vsi *vsi);
33 33
34int ice_vsi_stop_tx_rings(struct ice_vsi *vsi); 34int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
35 u16 rel_vmvf_num);
35 36
36int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena); 37int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena);
37 38
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 46ccf265c218..8f61b375e768 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -8,7 +8,7 @@
8#include "ice.h" 8#include "ice.h"
9#include "ice_lib.h" 9#include "ice_lib.h"
10 10
11#define DRV_VERSION "0.7.1-k" 11#define DRV_VERSION "0.7.2-k"
12#define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver" 12#define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
13const char ice_drv_ver[] = DRV_VERSION; 13const char ice_drv_ver[] = DRV_VERSION;
14static const char ice_driver_string[] = DRV_SUMMARY; 14static const char ice_driver_string[] = DRV_SUMMARY;
@@ -342,6 +342,10 @@ ice_prepare_for_reset(struct ice_pf *pf)
342{ 342{
343 struct ice_hw *hw = &pf->hw; 343 struct ice_hw *hw = &pf->hw;
344 344
345 /* Notify VFs of impending reset */
346 if (ice_check_sq_alive(hw, &hw->mailboxq))
347 ice_vc_notify_reset(pf);
348
345 /* disable the VSIs and their queues that are not already DOWN */ 349 /* disable the VSIs and their queues that are not already DOWN */
346 ice_pf_dis_all_vsi(pf); 350 ice_pf_dis_all_vsi(pf);
347 351
@@ -661,6 +665,8 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi)
661 } 665 }
662 } 666 }
663 667
668 ice_vc_notify_link_state(pf);
669
664 return 0; 670 return 0;
665} 671}
666 672
@@ -711,6 +717,10 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
711 cq = &hw->adminq; 717 cq = &hw->adminq;
712 qtype = "Admin"; 718 qtype = "Admin";
713 break; 719 break;
720 case ICE_CTL_Q_MAILBOX:
721 cq = &hw->mailboxq;
722 qtype = "Mailbox";
723 break;
714 default: 724 default:
715 dev_warn(&pf->pdev->dev, "Unknown control queue type 0x%x\n", 725 dev_warn(&pf->pdev->dev, "Unknown control queue type 0x%x\n",
716 q_type); 726 q_type);
@@ -792,6 +802,9 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
792 dev_err(&pf->pdev->dev, 802 dev_err(&pf->pdev->dev,
793 "Could not handle link event\n"); 803 "Could not handle link event\n");
794 break; 804 break;
805 case ice_mbx_opc_send_msg_to_pf:
806 ice_vc_process_vf_msg(pf, &event);
807 break;
795 case ice_aqc_opc_fw_logging: 808 case ice_aqc_opc_fw_logging:
796 ice_output_fw_log(hw, &event.desc, event.msg_buf); 809 ice_output_fw_log(hw, &event.desc, event.msg_buf);
797 break; 810 break;
@@ -851,6 +864,28 @@ static void ice_clean_adminq_subtask(struct ice_pf *pf)
851} 864}
852 865
853/** 866/**
867 * ice_clean_mailboxq_subtask - clean the MailboxQ rings
868 * @pf: board private structure
869 */
870static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
871{
872 struct ice_hw *hw = &pf->hw;
873
874 if (!test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state))
875 return;
876
877 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
878 return;
879
880 clear_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state);
881
882 if (ice_ctrlq_pending(hw, &hw->mailboxq))
883 __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
884
885 ice_flush(hw);
886}
887
888/**
854 * ice_service_task_schedule - schedule the service task to wake up 889 * ice_service_task_schedule - schedule the service task to wake up
855 * @pf: board private structure 890 * @pf: board private structure
856 * 891 *
@@ -916,6 +951,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
916 struct ice_hw *hw = &pf->hw; 951 struct ice_hw *hw = &pf->hw;
917 bool mdd_detected = false; 952 bool mdd_detected = false;
918 u32 reg; 953 u32 reg;
954 int i;
919 955
920 if (!test_bit(__ICE_MDD_EVENT_PENDING, pf->state)) 956 if (!test_bit(__ICE_MDD_EVENT_PENDING, pf->state))
921 return; 957 return;
@@ -1005,6 +1041,51 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
1005 } 1041 }
1006 } 1042 }
1007 1043
1044 /* see if one of the VFs needs to be reset */
1045 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
1046 struct ice_vf *vf = &pf->vf[i];
1047
1048 reg = rd32(hw, VP_MDET_TX_PQM(i));
1049 if (reg & VP_MDET_TX_PQM_VALID_M) {
1050 wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
1051 vf->num_mdd_events++;
1052 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
1053 i);
1054 }
1055
1056 reg = rd32(hw, VP_MDET_TX_TCLAN(i));
1057 if (reg & VP_MDET_TX_TCLAN_VALID_M) {
1058 wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
1059 vf->num_mdd_events++;
1060 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
1061 i);
1062 }
1063
1064 reg = rd32(hw, VP_MDET_TX_TDPU(i));
1065 if (reg & VP_MDET_TX_TDPU_VALID_M) {
1066 wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
1067 vf->num_mdd_events++;
1068 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
1069 i);
1070 }
1071
1072 reg = rd32(hw, VP_MDET_RX(i));
1073 if (reg & VP_MDET_RX_VALID_M) {
1074 wr32(hw, VP_MDET_RX(i), 0xFFFF);
1075 vf->num_mdd_events++;
1076 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
1077 i);
1078 }
1079
1080 if (vf->num_mdd_events > ICE_DFLT_NUM_MDD_EVENTS_ALLOWED) {
1081 dev_info(&pf->pdev->dev,
1082 "Too many MDD events on VF %d, disabled\n", i);
1083 dev_info(&pf->pdev->dev,
1084 "Use PF Control I/F to re-enable the VF\n");
1085 set_bit(ICE_VF_STATE_DIS, vf->vf_states);
1086 }
1087 }
1088
1008 /* re-enable MDD interrupt cause */ 1089 /* re-enable MDD interrupt cause */
1009 clear_bit(__ICE_MDD_EVENT_PENDING, pf->state); 1090 clear_bit(__ICE_MDD_EVENT_PENDING, pf->state);
1010 reg = rd32(hw, PFINT_OICR_ENA); 1091 reg = rd32(hw, PFINT_OICR_ENA);
@@ -1038,8 +1119,10 @@ static void ice_service_task(struct work_struct *work)
1038 ice_check_for_hang_subtask(pf); 1119 ice_check_for_hang_subtask(pf);
1039 ice_sync_fltr_subtask(pf); 1120 ice_sync_fltr_subtask(pf);
1040 ice_handle_mdd_event(pf); 1121 ice_handle_mdd_event(pf);
1122 ice_process_vflr_event(pf);
1041 ice_watchdog_subtask(pf); 1123 ice_watchdog_subtask(pf);
1042 ice_clean_adminq_subtask(pf); 1124 ice_clean_adminq_subtask(pf);
1125 ice_clean_mailboxq_subtask(pf);
1043 1126
1044 /* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */ 1127 /* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */
1045 ice_service_task_complete(pf); 1128 ice_service_task_complete(pf);
@@ -1050,6 +1133,8 @@ static void ice_service_task(struct work_struct *work)
1050 */ 1133 */
1051 if (time_after(jiffies, (start_time + pf->serv_tmr_period)) || 1134 if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
1052 test_bit(__ICE_MDD_EVENT_PENDING, pf->state) || 1135 test_bit(__ICE_MDD_EVENT_PENDING, pf->state) ||
1136 test_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
1137 test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
1053 test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state)) 1138 test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
1054 mod_timer(&pf->serv_tmr, jiffies); 1139 mod_timer(&pf->serv_tmr, jiffies);
1055} 1140}
@@ -1064,6 +1149,10 @@ static void ice_set_ctrlq_len(struct ice_hw *hw)
1064 hw->adminq.num_sq_entries = ICE_AQ_LEN; 1149 hw->adminq.num_sq_entries = ICE_AQ_LEN;
1065 hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN; 1150 hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
1066 hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN; 1151 hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
1152 hw->mailboxq.num_rq_entries = ICE_MBXQ_LEN;
1153 hw->mailboxq.num_sq_entries = ICE_MBXQ_LEN;
1154 hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
1155 hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
1067} 1156}
1068 1157
1069/** 1158/**
@@ -1197,6 +1286,7 @@ static void ice_ena_misc_vector(struct ice_pf *pf)
1197 PFINT_OICR_MAL_DETECT_M | 1286 PFINT_OICR_MAL_DETECT_M |
1198 PFINT_OICR_GRST_M | 1287 PFINT_OICR_GRST_M |
1199 PFINT_OICR_PCI_EXCEPTION_M | 1288 PFINT_OICR_PCI_EXCEPTION_M |
1289 PFINT_OICR_VFLR_M |
1200 PFINT_OICR_HMC_ERR_M | 1290 PFINT_OICR_HMC_ERR_M |
1201 PFINT_OICR_PE_CRITERR_M); 1291 PFINT_OICR_PE_CRITERR_M);
1202 1292
@@ -1220,6 +1310,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
1220 u32 oicr, ena_mask; 1310 u32 oicr, ena_mask;
1221 1311
1222 set_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state); 1312 set_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
1313 set_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state);
1223 1314
1224 oicr = rd32(hw, PFINT_OICR); 1315 oicr = rd32(hw, PFINT_OICR);
1225 ena_mask = rd32(hw, PFINT_OICR_ENA); 1316 ena_mask = rd32(hw, PFINT_OICR_ENA);
@@ -1228,6 +1319,10 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
1228 ena_mask &= ~PFINT_OICR_MAL_DETECT_M; 1319 ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
1229 set_bit(__ICE_MDD_EVENT_PENDING, pf->state); 1320 set_bit(__ICE_MDD_EVENT_PENDING, pf->state);
1230 } 1321 }
1322 if (oicr & PFINT_OICR_VFLR_M) {
1323 ena_mask &= ~PFINT_OICR_VFLR_M;
1324 set_bit(__ICE_VFLR_EVENT_PENDING, pf->state);
1325 }
1231 1326
1232 if (oicr & PFINT_OICR_GRST_M) { 1327 if (oicr & PFINT_OICR_GRST_M) {
1233 u32 reset; 1328 u32 reset;
@@ -1406,6 +1501,11 @@ skip_req_irq:
1406 PFINT_FW_CTL_CAUSE_ENA_M); 1501 PFINT_FW_CTL_CAUSE_ENA_M);
1407 wr32(hw, PFINT_FW_CTL, val); 1502 wr32(hw, PFINT_FW_CTL, val);
1408 1503
1504 /* This enables Mailbox queue Interrupt causes */
1505 val = ((pf->hw_oicr_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
1506 PFINT_MBX_CTL_CAUSE_ENA_M);
1507 wr32(hw, PFINT_MBX_CTL, val);
1508
1409 itr_gran = hw->itr_gran; 1509 itr_gran = hw->itr_gran;
1410 1510
1411 wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->hw_oicr_idx), 1511 wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->hw_oicr_idx),
@@ -1775,6 +1875,15 @@ static void ice_init_pf(struct ice_pf *pf)
1775{ 1875{
1776 bitmap_zero(pf->flags, ICE_PF_FLAGS_NBITS); 1876 bitmap_zero(pf->flags, ICE_PF_FLAGS_NBITS);
1777 set_bit(ICE_FLAG_MSIX_ENA, pf->flags); 1877 set_bit(ICE_FLAG_MSIX_ENA, pf->flags);
1878#ifdef CONFIG_PCI_IOV
1879 if (pf->hw.func_caps.common_cap.sr_iov_1_1) {
1880 struct ice_hw *hw = &pf->hw;
1881
1882 set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
1883 pf->num_vfs_supported = min_t(int, hw->func_caps.num_allocd_vfs,
1884 ICE_MAX_VF_COUNT);
1885 }
1886#endif /* CONFIG_PCI_IOV */
1778 1887
1779 mutex_init(&pf->sw_mutex); 1888 mutex_init(&pf->sw_mutex);
1780 mutex_init(&pf->avail_q_mutex); 1889 mutex_init(&pf->avail_q_mutex);
@@ -2138,6 +2247,8 @@ static void ice_remove(struct pci_dev *pdev)
2138 set_bit(__ICE_DOWN, pf->state); 2247 set_bit(__ICE_DOWN, pf->state);
2139 ice_service_task_stop(pf); 2248 ice_service_task_stop(pf);
2140 2249
2250 if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags))
2251 ice_free_vfs(pf);
2141 ice_vsi_release_all(pf); 2252 ice_vsi_release_all(pf);
2142 ice_free_irq_msix_misc(pf); 2253 ice_free_irq_msix_misc(pf);
2143 ice_for_each_vsi(pf, i) { 2254 ice_for_each_vsi(pf, i) {
@@ -2173,6 +2284,7 @@ static struct pci_driver ice_driver = {
2173 .id_table = ice_pci_tbl, 2284 .id_table = ice_pci_tbl,
2174 .probe = ice_probe, 2285 .probe = ice_probe,
2175 .remove = ice_remove, 2286 .remove = ice_remove,
2287 .sriov_configure = ice_sriov_configure,
2176}; 2288};
2177 2289
2178/** 2290/**
@@ -2908,7 +3020,7 @@ int ice_down(struct ice_vsi *vsi)
2908 } 3020 }
2909 3021
2910 ice_vsi_dis_irq(vsi); 3022 ice_vsi_dis_irq(vsi);
2911 tx_err = ice_vsi_stop_tx_rings(vsi); 3023 tx_err = ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0);
2912 if (tx_err) 3024 if (tx_err)
2913 netdev_err(vsi->netdev, 3025 netdev_err(vsi->netdev,
2914 "Failed stop Tx rings, VSI %d error %d\n", 3026 "Failed stop Tx rings, VSI %d error %d\n",
@@ -3102,13 +3214,14 @@ static void ice_dis_vsi(struct ice_vsi *vsi)
3102 3214
3103 set_bit(__ICE_NEEDS_RESTART, vsi->state); 3215 set_bit(__ICE_NEEDS_RESTART, vsi->state);
3104 3216
3105 if (vsi->netdev && netif_running(vsi->netdev) && 3217 if (vsi->type == ICE_VSI_PF && vsi->netdev) {
3106 vsi->type == ICE_VSI_PF) { 3218 if (netif_running(vsi->netdev)) {
3107 rtnl_lock(); 3219 rtnl_lock();
3108 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); 3220 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
3109 rtnl_unlock(); 3221 rtnl_unlock();
3110 } else { 3222 } else {
3111 ice_vsi_close(vsi); 3223 ice_vsi_close(vsi);
3224 }
3112 } 3225 }
3113} 3226}
3114 3227
@@ -3120,12 +3233,16 @@ static int ice_ena_vsi(struct ice_vsi *vsi)
3120{ 3233{
3121 int err = 0; 3234 int err = 0;
3122 3235
3123 if (test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state)) 3236 if (test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state) &&
3124 if (vsi->netdev && netif_running(vsi->netdev)) { 3237 vsi->netdev) {
3238 if (netif_running(vsi->netdev)) {
3125 rtnl_lock(); 3239 rtnl_lock();
3126 err = vsi->netdev->netdev_ops->ndo_open(vsi->netdev); 3240 err = vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
3127 rtnl_unlock(); 3241 rtnl_unlock();
3242 } else {
3243 err = ice_vsi_open(vsi);
3128 } 3244 }
3245 }
3129 3246
3130 return err; 3247 return err;
3131} 3248}
@@ -3174,6 +3291,10 @@ static int ice_vsi_rebuild_all(struct ice_pf *pf)
3174 if (!pf->vsi[i]) 3291 if (!pf->vsi[i])
3175 continue; 3292 continue;
3176 3293
3294 /* VF VSI rebuild isn't supported yet */
3295 if (pf->vsi[i]->type == ICE_VSI_VF)
3296 continue;
3297
3177 err = ice_vsi_rebuild(pf->vsi[i]); 3298 err = ice_vsi_rebuild(pf->vsi[i]);
3178 if (err) { 3299 if (err) {
3179 dev_err(&pf->pdev->dev, 3300 dev_err(&pf->pdev->dev,
@@ -3310,6 +3431,7 @@ static void ice_rebuild(struct ice_pf *pf)
3310 goto err_vsi_rebuild; 3431 goto err_vsi_rebuild;
3311 } 3432 }
3312 3433
3434 ice_reset_all_vfs(pf, true);
3313 /* if we get here, reset flow is successful */ 3435 /* if we get here, reset flow is successful */
3314 clear_bit(__ICE_RESET_FAILED, pf->state); 3436 clear_bit(__ICE_RESET_FAILED, pf->state);
3315 return; 3437 return;
@@ -3818,6 +3940,12 @@ static const struct net_device_ops ice_netdev_ops = {
3818 .ndo_validate_addr = eth_validate_addr, 3940 .ndo_validate_addr = eth_validate_addr,
3819 .ndo_change_mtu = ice_change_mtu, 3941 .ndo_change_mtu = ice_change_mtu,
3820 .ndo_get_stats64 = ice_get_stats64, 3942 .ndo_get_stats64 = ice_get_stats64,
3943 .ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
3944 .ndo_set_vf_mac = ice_set_vf_mac,
3945 .ndo_get_vf_config = ice_get_vf_cfg,
3946 .ndo_set_vf_trust = ice_set_vf_trust,
3947 .ndo_set_vf_vlan = ice_set_vf_port_vlan,
3948 .ndo_set_vf_link_state = ice_set_vf_link_state,
3821 .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid, 3949 .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
3822 .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid, 3950 .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
3823 .ndo_set_features = ice_set_features, 3951 .ndo_set_features = ice_set_features,
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
new file mode 100644
index 000000000000..027eba4e13f8
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -0,0 +1,127 @@
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Intel Corporation. */
3
4#include "ice_common.h"
5#include "ice_adminq_cmd.h"
6#include "ice_sriov.h"
7
8/**
9 * ice_aq_send_msg_to_vf
10 * @hw: pointer to the hardware structure
11 * @vfid: VF ID to send msg
12 * @v_opcode: opcodes for VF-PF communication
13 * @v_retval: return error code
14 * @msg: pointer to the msg buffer
15 * @msglen: msg length
16 * @cd: pointer to command details
17 *
18 * Send message to VF driver (0x0802) using mailbox
19 * queue and asynchronously sending message via
20 * ice_sq_send_cmd() function
21 */
22enum ice_status
23ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
24 u8 *msg, u16 msglen, struct ice_sq_cd *cd)
25{
26 struct ice_aqc_pf_vf_msg *cmd;
27 struct ice_aq_desc desc;
28
29 ice_fill_dflt_direct_cmd_desc(&desc, ice_mbx_opc_send_msg_to_vf);
30
31 cmd = &desc.params.virt;
32 cmd->id = cpu_to_le32(vfid);
33
34 desc.cookie_high = cpu_to_le32(v_opcode);
35 desc.cookie_low = cpu_to_le32(v_retval);
36
37 if (msglen)
38 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
39
40 return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd);
41}
42
43/**
44 * ice_conv_link_speed_to_virtchnl
45 * @adv_link_support: determines the format of the returned link speed
46 * @link_speed: variable containing the link_speed to be converted
47 *
48 * Convert link speed supported by HW to link speed supported by virtchnl.
49 * If adv_link_support is true, then return link speed in Mbps. Else return
50 * link speed as a VIRTCHNL_LINK_SPEED_* casted to a u32. Note that the caller
51 * needs to cast back to an enum virtchnl_link_speed in the case where
52 * adv_link_support is false, but when adv_link_support is true the caller can
53 * expect the speed in Mbps.
54 */
55u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
56{
57 u32 speed;
58
59 if (adv_link_support)
60 switch (link_speed) {
61 case ICE_AQ_LINK_SPEED_10MB:
62 speed = ICE_LINK_SPEED_10MBPS;
63 break;
64 case ICE_AQ_LINK_SPEED_100MB:
65 speed = ICE_LINK_SPEED_100MBPS;
66 break;
67 case ICE_AQ_LINK_SPEED_1000MB:
68 speed = ICE_LINK_SPEED_1000MBPS;
69 break;
70 case ICE_AQ_LINK_SPEED_2500MB:
71 speed = ICE_LINK_SPEED_2500MBPS;
72 break;
73 case ICE_AQ_LINK_SPEED_5GB:
74 speed = ICE_LINK_SPEED_5000MBPS;
75 break;
76 case ICE_AQ_LINK_SPEED_10GB:
77 speed = ICE_LINK_SPEED_10000MBPS;
78 break;
79 case ICE_AQ_LINK_SPEED_20GB:
80 speed = ICE_LINK_SPEED_20000MBPS;
81 break;
82 case ICE_AQ_LINK_SPEED_25GB:
83 speed = ICE_LINK_SPEED_25000MBPS;
84 break;
85 case ICE_AQ_LINK_SPEED_40GB:
86 speed = ICE_LINK_SPEED_40000MBPS;
87 break;
88 default:
89 speed = ICE_LINK_SPEED_UNKNOWN;
90 break;
91 }
92 else
93 /* Virtchnl speeds are not defined for every speed supported in
94 * the hardware. To maintain compatibility with older AVF
95 * drivers, while reporting the speed the new speed values are
96 * resolved to the closest known virtchnl speeds
97 */
98 switch (link_speed) {
99 case ICE_AQ_LINK_SPEED_10MB:
100 case ICE_AQ_LINK_SPEED_100MB:
101 speed = (u32)VIRTCHNL_LINK_SPEED_100MB;
102 break;
103 case ICE_AQ_LINK_SPEED_1000MB:
104 case ICE_AQ_LINK_SPEED_2500MB:
105 case ICE_AQ_LINK_SPEED_5GB:
106 speed = (u32)VIRTCHNL_LINK_SPEED_1GB;
107 break;
108 case ICE_AQ_LINK_SPEED_10GB:
109 speed = (u32)VIRTCHNL_LINK_SPEED_10GB;
110 break;
111 case ICE_AQ_LINK_SPEED_20GB:
112 speed = (u32)VIRTCHNL_LINK_SPEED_20GB;
113 break;
114 case ICE_AQ_LINK_SPEED_25GB:
115 speed = (u32)VIRTCHNL_LINK_SPEED_25GB;
116 break;
117 case ICE_AQ_LINK_SPEED_40GB:
118 /* fall through */
119 speed = (u32)VIRTCHNL_LINK_SPEED_40GB;
120 break;
121 default:
122 speed = (u32)VIRTCHNL_LINK_SPEED_UNKNOWN;
123 break;
124 }
125
126 return speed;
127}
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.h b/drivers/net/ethernet/intel/ice/ice_sriov.h
new file mode 100644
index 000000000000..3d78a0795138
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.h
@@ -0,0 +1,34 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (c) 2018, Intel Corporation. */
3
4#ifndef _ICE_SRIOV_H_
5#define _ICE_SRIOV_H_
6
7#include "ice_common.h"
8
9#ifdef CONFIG_PCI_IOV
10enum ice_status
11ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
12 u8 *msg, u16 msglen, struct ice_sq_cd *cd);
13
14u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed);
15#else /* CONFIG_PCI_IOV */
16static inline enum ice_status
17ice_aq_send_msg_to_vf(struct ice_hw __always_unused *hw,
18 u16 __always_unused vfid, u32 __always_unused v_opcode,
19 u32 __always_unused v_retval, u8 __always_unused *msg,
20 u16 __always_unused msglen,
21 struct ice_sq_cd __always_unused *cd)
22{
23 return 0;
24}
25
26static inline u32
27ice_conv_link_speed_to_virtchnl(bool __always_unused adv_link_support,
28 u16 __always_unused link_speed)
29{
30 return 0;
31}
32
33#endif /* CONFIG_PCI_IOV */
34#endif /* _ICE_SRIOV_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_status.h b/drivers/net/ethernet/intel/ice/ice_status.h
index d2dae913d81e..f49f299ddf2c 100644
--- a/drivers/net/ethernet/intel/ice/ice_status.h
+++ b/drivers/net/ethernet/intel/ice/ice_status.h
@@ -6,6 +6,9 @@
6 6
7/* Error Codes */ 7/* Error Codes */
8enum ice_status { 8enum ice_status {
9 ICE_SUCCESS = 0,
10
11 /* Generic codes : Range -1..-49 */
9 ICE_ERR_PARAM = -1, 12 ICE_ERR_PARAM = -1,
10 ICE_ERR_NOT_IMPL = -2, 13 ICE_ERR_NOT_IMPL = -2,
11 ICE_ERR_NOT_READY = -3, 14 ICE_ERR_NOT_READY = -3,
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index e949224b5282..33403f39f1b3 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -187,6 +187,7 @@ ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
187 if (!vsi_ctx->alloc_from_pool) 187 if (!vsi_ctx->alloc_from_pool)
188 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | 188 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num |
189 ICE_AQ_VSI_IS_VALID); 189 ICE_AQ_VSI_IS_VALID);
190 cmd->vf_id = vsi_ctx->vf_num;
190 191
191 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags); 192 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
192 193
@@ -655,6 +656,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
655 u8 *eth_hdr; 656 u8 *eth_hdr;
656 u32 act = 0; 657 u32 act = 0;
657 __be16 *off; 658 __be16 *off;
659 u8 q_rgn;
658 660
659 if (opc == ice_aqc_opc_remove_sw_rules) { 661 if (opc == ice_aqc_opc_remove_sw_rules) {
660 s_rule->pdata.lkup_tx_rx.act = 0; 662 s_rule->pdata.lkup_tx_rx.act = 0;
@@ -693,14 +695,19 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
693 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & 695 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
694 ICE_SINGLE_ACT_Q_INDEX_M; 696 ICE_SINGLE_ACT_Q_INDEX_M;
695 break; 697 break;
698 case ICE_DROP_PACKET:
699 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
700 ICE_SINGLE_ACT_VALID_BIT;
701 break;
696 case ICE_FWD_TO_QGRP: 702 case ICE_FWD_TO_QGRP:
703 q_rgn = f_info->qgrp_size > 0 ?
704 (u8)ilog2(f_info->qgrp_size) : 0;
697 act |= ICE_SINGLE_ACT_TO_Q; 705 act |= ICE_SINGLE_ACT_TO_Q;
698 act |= (f_info->qgrp_size << ICE_SINGLE_ACT_Q_REGION_S) & 706 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
707 ICE_SINGLE_ACT_Q_INDEX_M;
708 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
699 ICE_SINGLE_ACT_Q_REGION_M; 709 ICE_SINGLE_ACT_Q_REGION_M;
700 break; 710 break;
701 case ICE_DROP_PACKET:
702 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP;
703 break;
704 default: 711 default:
705 return; 712 return;
706 } 713 }
@@ -1415,8 +1422,8 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
1415 fm_list->vsi_count--; 1422 fm_list->vsi_count--;
1416 clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map); 1423 clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
1417 1424
1418 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) || 1425 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
1419 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) { 1426 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
1420 struct ice_vsi_list_map_info *vsi_list_info = 1427 struct ice_vsi_list_map_info *vsi_list_info =
1421 fm_list->vsi_list_info; 1428 fm_list->vsi_list_info;
1422 u16 rem_vsi_handle; 1429 u16 rem_vsi_handle;
@@ -1425,6 +1432,8 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
1425 ICE_MAX_VSI); 1432 ICE_MAX_VSI);
1426 if (!ice_is_vsi_valid(hw, rem_vsi_handle)) 1433 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
1427 return ICE_ERR_OUT_OF_RANGE; 1434 return ICE_ERR_OUT_OF_RANGE;
1435
1436 /* Make sure VSI list is empty before removing it below */
1428 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1, 1437 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
1429 vsi_list_id, true, 1438 vsi_list_id, true,
1430 ice_aqc_opc_update_sw_rules, 1439 ice_aqc_opc_update_sw_rules,
@@ -1432,16 +1441,34 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
1432 if (status) 1441 if (status)
1433 return status; 1442 return status;
1434 1443
1444 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
1445 tmp_fltr_info.fwd_id.hw_vsi_id =
1446 ice_get_hw_vsi_num(hw, rem_vsi_handle);
1447 tmp_fltr_info.vsi_handle = rem_vsi_handle;
1448 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
1449 if (status) {
1450 ice_debug(hw, ICE_DBG_SW,
1451 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
1452 tmp_fltr_info.fwd_id.hw_vsi_id, status);
1453 return status;
1454 }
1455
1456 fm_list->fltr_info = tmp_fltr_info;
1457 }
1458
1459 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
1460 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
1461 struct ice_vsi_list_map_info *vsi_list_info =
1462 fm_list->vsi_list_info;
1463
1435 /* Remove the VSI list since it is no longer used */ 1464 /* Remove the VSI list since it is no longer used */
1436 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type); 1465 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
1437 if (status) 1466 if (status) {
1467 ice_debug(hw, ICE_DBG_SW,
1468 "Failed to remove VSI list %d, error %d\n",
1469 vsi_list_id, status);
1438 return status; 1470 return status;
1439 1471 }
1440 /* Change the list entry action from VSI_LIST to VSI */
1441 fm_list->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1442 fm_list->fltr_info.fwd_id.hw_vsi_id =
1443 ice_get_hw_vsi_num(hw, rem_vsi_handle);
1444 fm_list->fltr_info.vsi_handle = rem_vsi_handle;
1445 1472
1446 list_del(&vsi_list_info->list_entry); 1473 list_del(&vsi_list_info->list_entry);
1447 devm_kfree(ice_hw_to_dev(hw), vsi_list_info); 1474 devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
@@ -1983,12 +2010,12 @@ out:
1983enum ice_status 2010enum ice_status
1984ice_remove_mac(struct ice_hw *hw, struct list_head *m_list) 2011ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
1985{ 2012{
1986 struct ice_fltr_list_entry *list_itr; 2013 struct ice_fltr_list_entry *list_itr, *tmp;
1987 2014
1988 if (!m_list) 2015 if (!m_list)
1989 return ICE_ERR_PARAM; 2016 return ICE_ERR_PARAM;
1990 2017
1991 list_for_each_entry(list_itr, m_list, list_entry) { 2018 list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) {
1992 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type; 2019 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
1993 2020
1994 if (l_type != ICE_SW_LKUP_MAC) 2021 if (l_type != ICE_SW_LKUP_MAC)
@@ -2010,12 +2037,12 @@ ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
2010enum ice_status 2037enum ice_status
2011ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list) 2038ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
2012{ 2039{
2013 struct ice_fltr_list_entry *v_list_itr; 2040 struct ice_fltr_list_entry *v_list_itr, *tmp;
2014 2041
2015 if (!v_list || !hw) 2042 if (!v_list || !hw)
2016 return ICE_ERR_PARAM; 2043 return ICE_ERR_PARAM;
2017 2044
2018 list_for_each_entry(v_list_itr, v_list, list_entry) { 2045 list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
2019 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type; 2046 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
2020 2047
2021 if (l_type != ICE_SW_LKUP_VLAN) 2048 if (l_type != ICE_SW_LKUP_VLAN)
@@ -2115,7 +2142,7 @@ ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
2115 struct ice_fltr_info *fi; 2142 struct ice_fltr_info *fi;
2116 2143
2117 fi = &fm_entry->fltr_info; 2144 fi = &fm_entry->fltr_info;
2118 if (!ice_vsi_uses_fltr(fm_entry, vsi_handle)) 2145 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
2119 continue; 2146 continue;
2120 2147
2121 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle, 2148 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
@@ -2232,7 +2259,8 @@ ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
2232 goto end; 2259 goto end;
2233 continue; 2260 continue;
2234 } 2261 }
2235 if (!test_bit(vsi_handle, itr->vsi_list_info->vsi_map)) 2262 if (!itr->vsi_list_info ||
2263 !test_bit(vsi_handle, itr->vsi_list_info->vsi_map))
2236 continue; 2264 continue;
2237 /* Clearing it so that the logic can add it back */ 2265 /* Clearing it so that the logic can add it back */
2238 clear_bit(vsi_handle, itr->vsi_list_info->vsi_map); 2266 clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index 7706e9b6003c..b88d96a1ef69 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -19,6 +19,7 @@ struct ice_vsi_ctx {
19 struct ice_aqc_vsi_props info; 19 struct ice_aqc_vsi_props info;
20 struct ice_sched_vsi_info sched; 20 struct ice_sched_vsi_info sched;
21 u8 alloc_from_pool; 21 u8 alloc_from_pool;
22 u8 vf_num;
22}; 23};
23 24
24enum ice_sw_fwd_act_type { 25enum ice_sw_fwd_act_type {
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index a9b92974e041..1d0f58bd389b 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -105,8 +105,9 @@ enum ice_rx_dtype {
105#define ICE_TX_ITR ICE_IDX_ITR1 105#define ICE_TX_ITR ICE_IDX_ITR1
106#define ICE_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ 106#define ICE_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
107#define ICE_ITR_8K 125 107#define ICE_ITR_8K 125
108#define ICE_DFLT_TX_ITR ICE_ITR_8K 108#define ICE_ITR_20K 50
109#define ICE_DFLT_RX_ITR ICE_ITR_8K 109#define ICE_DFLT_TX_ITR ICE_ITR_20K
110#define ICE_DFLT_RX_ITR ICE_ITR_20K
110/* apply ITR granularity translation to program the register. itr_gran is either 111/* apply ITR granularity translation to program the register. itr_gran is either
111 * 2 or 4 usecs so we need to divide by 2 first then shift by that value 112 * 2 or 4 usecs so we need to divide by 2 first then shift by that value
112 */ 113 */
@@ -135,13 +136,6 @@ struct ice_ring {
135 u16 q_index; /* Queue number of ring */ 136 u16 q_index; /* Queue number of ring */
136 u32 txq_teid; /* Added Tx queue TEID */ 137 u32 txq_teid; /* Added Tx queue TEID */
137 138
138 /* high bit set means dynamic, use accessor routines to read/write.
139 * hardware supports 4us/2us resolution for the ITR registers.
140 * these values always store the USER setting, and must be converted
141 * before programming to a register.
142 */
143 u16 itr_setting;
144
145 u16 count; /* Number of descriptors */ 139 u16 count; /* Number of descriptors */
146 u16 reg_idx; /* HW register index of the ring */ 140 u16 reg_idx; /* HW register index of the ring */
147 141
@@ -178,6 +172,7 @@ struct ice_ring_container {
178 unsigned int total_bytes; /* total bytes processed this int */ 172 unsigned int total_bytes; /* total bytes processed this int */
179 unsigned int total_pkts; /* total packets processed this int */ 173 unsigned int total_pkts; /* total packets processed this int */
180 enum ice_latency_range latency_range; 174 enum ice_latency_range latency_range;
175 int itr_idx; /* index in the interrupt vector */
181 u16 itr; 176 u16 itr;
182}; 177};
183 178
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index f5c8de0ed0eb..12f9432abf11 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -84,6 +84,7 @@ enum ice_media_type {
84 84
85enum ice_vsi_type { 85enum ice_vsi_type {
86 ICE_VSI_PF = 0, 86 ICE_VSI_PF = 0,
87 ICE_VSI_VF,
87}; 88};
88 89
89struct ice_link_status { 90struct ice_link_status {
@@ -103,6 +104,15 @@ struct ice_link_status {
103 u8 module_type[ICE_MODULE_TYPE_TOTAL_BYTE]; 104 u8 module_type[ICE_MODULE_TYPE_TOTAL_BYTE];
104}; 105};
105 106
107/* Different reset sources for which a disable queue AQ call has to be made in
108 * order to clean the TX scheduler as a part of the reset
109 */
110enum ice_disq_rst_src {
111 ICE_NO_RESET = 0,
112 ICE_VM_RESET,
113 ICE_VF_RESET,
114};
115
106/* PHY info such as phy_type, etc... */ 116/* PHY info such as phy_type, etc... */
107struct ice_phy_info { 117struct ice_phy_info {
108 struct ice_link_status link_info; 118 struct ice_link_status link_info;
@@ -127,6 +137,9 @@ struct ice_hw_common_caps {
127 /* Max MTU for function or device */ 137 /* Max MTU for function or device */
128 u16 max_mtu; 138 u16 max_mtu;
129 139
140 /* Virtualization support */
141 u8 sr_iov_1_1; /* SR-IOV enabled */
142
130 /* RSS related capabilities */ 143 /* RSS related capabilities */
131 u16 rss_table_size; /* 512 for PFs and 64 for VFs */ 144 u16 rss_table_size; /* 512 for PFs and 64 for VFs */
132 u8 rss_table_entry_width; /* RSS Entry width in bits */ 145 u8 rss_table_entry_width; /* RSS Entry width in bits */
@@ -135,12 +148,15 @@ struct ice_hw_common_caps {
135/* Function specific capabilities */ 148/* Function specific capabilities */
136struct ice_hw_func_caps { 149struct ice_hw_func_caps {
137 struct ice_hw_common_caps common_cap; 150 struct ice_hw_common_caps common_cap;
151 u32 num_allocd_vfs; /* Number of allocated VFs */
152 u32 vf_base_id; /* Logical ID of the first VF */
138 u32 guaranteed_num_vsi; 153 u32 guaranteed_num_vsi;
139}; 154};
140 155
141/* Device wide capabilities */ 156/* Device wide capabilities */
142struct ice_hw_dev_caps { 157struct ice_hw_dev_caps {
143 struct ice_hw_common_caps common_cap; 158 struct ice_hw_common_caps common_cap;
159 u32 num_vfs_exposed; /* Total number of VFs exposed */
144 u32 num_vsi_allocd_to_host; /* Excluding EMP VSI */ 160 u32 num_vsi_allocd_to_host; /* Excluding EMP VSI */
145}; 161};
146 162
@@ -321,6 +337,7 @@ struct ice_hw {
321 337
322 /* Control Queue info */ 338 /* Control Queue info */
323 struct ice_ctl_q_info adminq; 339 struct ice_ctl_q_info adminq;
340 struct ice_ctl_q_info mailboxq;
324 341
325 u8 api_branch; /* API branch version */ 342 u8 api_branch; /* API branch version */
326 u8 api_maj_ver; /* API major version */ 343 u8 api_maj_ver; /* API major version */
@@ -426,4 +443,7 @@ struct ice_hw_port_stats {
426#define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800 443#define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800
427#define ICE_SR_WORDS_IN_1KB 512 444#define ICE_SR_WORDS_IN_1KB 512
428 445
446/* Hash redirection LUT for VSI - maximum array size */
447#define ICE_VSIQF_HLUT_ARRAY_SIZE ((VSIQF_HLUT_MAX_INDEX + 1) * 4)
448
429#endif /* _ICE_TYPE_H_ */ 449#endif /* _ICE_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
new file mode 100644
index 000000000000..c25e486706f3
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -0,0 +1,2668 @@
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Intel Corporation. */
3
4#include "ice.h"
5#include "ice_lib.h"
6
7/**
8 * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
9 * @pf: pointer to the PF structure
10 * @v_opcode: operation code
11 * @v_retval: return value
12 * @msg: pointer to the msg buffer
13 * @msglen: msg length
14 */
15static void
16ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
17 enum ice_status v_retval, u8 *msg, u16 msglen)
18{
19 struct ice_hw *hw = &pf->hw;
20 struct ice_vf *vf = pf->vf;
21 int i;
22
23 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
24 /* Not all vfs are enabled so skip the ones that are not */
25 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
26 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
27 continue;
28
29 /* Ignore return value on purpose - a given VF may fail, but
30 * we need to keep going and send to all of them
31 */
32 ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg,
33 msglen, NULL);
34 }
35}
36
37/**
38 * ice_set_pfe_link - Set the link speed/status of the virtchnl_pf_event
39 * @vf: pointer to the VF structure
40 * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
41 * @ice_link_speed: link speed specified by ICE_AQ_LINK_SPEED_*
42 * @link_up: whether or not to set the link up/down
43 */
44static void
45ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
46 int ice_link_speed, bool link_up)
47{
48 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
49 pfe->event_data.link_event_adv.link_status = link_up;
50 /* Speed in Mbps */
51 pfe->event_data.link_event_adv.link_speed =
52 ice_conv_link_speed_to_virtchnl(true, ice_link_speed);
53 } else {
54 pfe->event_data.link_event.link_status = link_up;
55 /* Legacy method for virtchnl link speeds */
56 pfe->event_data.link_event.link_speed =
57 (enum virtchnl_link_speed)
58 ice_conv_link_speed_to_virtchnl(false, ice_link_speed);
59 }
60}
61
62/**
63 * ice_set_pfe_link_forced - Force the virtchnl_pf_event link speed/status
64 * @vf: pointer to the VF structure
65 * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
66 * @link_up: whether or not to set the link up/down
67 */
68static void
69ice_set_pfe_link_forced(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
70 bool link_up)
71{
72 u16 link_speed;
73
74 if (link_up)
75 link_speed = ICE_AQ_LINK_SPEED_40GB;
76 else
77 link_speed = ICE_AQ_LINK_SPEED_UNKNOWN;
78
79 ice_set_pfe_link(vf, pfe, link_speed, link_up);
80}
81
82/**
83 * ice_vc_notify_vf_link_state - Inform a VF of link status
84 * @vf: pointer to the VF structure
85 *
86 * send a link status message to a single VF
87 */
88static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
89{
90 struct virtchnl_pf_event pfe = { 0 };
91 struct ice_link_status *ls;
92 struct ice_pf *pf = vf->pf;
93 struct ice_hw *hw;
94
95 hw = &pf->hw;
96 ls = &hw->port_info->phy.link_info;
97
98 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
99 pfe.severity = PF_EVENT_SEVERITY_INFO;
100
101 if (vf->link_forced)
102 ice_set_pfe_link_forced(vf, &pfe, vf->link_up);
103 else
104 ice_set_pfe_link(vf, &pfe, ls->link_speed, ls->link_info &
105 ICE_AQ_LINK_UP);
106
107 ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe,
108 sizeof(pfe), NULL);
109}
110
111/**
112 * ice_get_vf_vector - get VF interrupt vector register offset
113 * @vf_msix: number of MSIx vector per VF on a PF
114 * @vf_id: VF identifier
115 * @i: index of MSIx vector
116 */
117static u32 ice_get_vf_vector(int vf_msix, int vf_id, int i)
118{
119 return ((i == 0) ? VFINT_DYN_CTLN(vf_id) :
120 VFINT_DYN_CTLN(((vf_msix - 1) * (vf_id)) + (i - 1)));
121}
122
123/**
124 * ice_free_vf_res - Free a VF's resources
125 * @vf: pointer to the VF info
126 */
127static void ice_free_vf_res(struct ice_vf *vf)
128{
129 struct ice_pf *pf = vf->pf;
130 int i, pf_vf_msix;
131
132 /* First, disable VF's configuration API to prevent OS from
133 * accessing the VF's VSI after it's freed or invalidated.
134 */
135 clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
136
137 /* free vsi & disconnect it from the parent uplink */
138 if (vf->lan_vsi_idx) {
139 ice_vsi_release(pf->vsi[vf->lan_vsi_idx]);
140 vf->lan_vsi_idx = 0;
141 vf->lan_vsi_num = 0;
142 vf->num_mac = 0;
143 }
144
145 pf_vf_msix = pf->num_vf_msix;
146 /* Disable interrupts so that VF starts in a known state */
147 for (i = 0; i < pf_vf_msix; i++) {
148 u32 reg_idx;
149
150 reg_idx = ice_get_vf_vector(pf_vf_msix, vf->vf_id, i);
151 wr32(&pf->hw, reg_idx, VFINT_DYN_CTLN_CLEARPBA_M);
152 ice_flush(&pf->hw);
153 }
154 /* reset some of the state variables keeping track of the resources */
155 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
156 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
157}
158
159/***********************enable_vf routines*****************************/
160
161/**
162 * ice_dis_vf_mappings
163 * @vf: pointer to the VF structure
164 */
165static void ice_dis_vf_mappings(struct ice_vf *vf)
166{
167 struct ice_pf *pf = vf->pf;
168 struct ice_vsi *vsi;
169 int first, last, v;
170 struct ice_hw *hw;
171
172 hw = &pf->hw;
173 vsi = pf->vsi[vf->lan_vsi_idx];
174
175 wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
176
177 first = vf->first_vector_idx;
178 last = first + pf->num_vf_msix - 1;
179 for (v = first; v <= last; v++) {
180 u32 reg;
181
182 reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
183 GLINT_VECT2FUNC_IS_PF_M) |
184 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
185 GLINT_VECT2FUNC_PF_NUM_M));
186 wr32(hw, GLINT_VECT2FUNC(v), reg);
187 }
188
189 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
190 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
191 else
192 dev_err(&pf->pdev->dev,
193 "Scattered mode for VF Tx queues is not yet implemented\n");
194
195 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
196 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
197 else
198 dev_err(&pf->pdev->dev,
199 "Scattered mode for VF Rx queues is not yet implemented\n");
200}
201
202/**
203 * ice_free_vfs - Free all VFs
204 * @pf: pointer to the PF structure
205 */
206void ice_free_vfs(struct ice_pf *pf)
207{
208 struct ice_hw *hw = &pf->hw;
209 int tmp, i;
210
211 if (!pf->vf)
212 return;
213
214 while (test_and_set_bit(__ICE_VF_DIS, pf->state))
215 usleep_range(1000, 2000);
216
217 /* Avoid wait time by stopping all VFs at the same time */
218 for (i = 0; i < pf->num_alloc_vfs; i++) {
219 if (!test_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states))
220 continue;
221
222 /* stop rings without wait time */
223 ice_vsi_stop_tx_rings(pf->vsi[pf->vf[i].lan_vsi_idx],
224 ICE_NO_RESET, i);
225 ice_vsi_stop_rx_rings(pf->vsi[pf->vf[i].lan_vsi_idx]);
226
227 clear_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states);
228 }
229
230 /* Disable IOV before freeing resources. This lets any VF drivers
231 * running in the host get themselves cleaned up before we yank
232 * the carpet out from underneath their feet.
233 */
234 if (!pci_vfs_assigned(pf->pdev))
235 pci_disable_sriov(pf->pdev);
236 else
237 dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
238
239 tmp = pf->num_alloc_vfs;
240 pf->num_vf_qps = 0;
241 pf->num_alloc_vfs = 0;
242 for (i = 0; i < tmp; i++) {
243 if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
244 /* disable VF qp mappings */
245 ice_dis_vf_mappings(&pf->vf[i]);
246
247 /* Set this state so that assigned VF vectors can be
248 * reclaimed by PF for reuse in ice_vsi_release(). No
249 * need to clear this bit since pf->vf array is being
250 * freed anyways after this for loop
251 */
252 set_bit(ICE_VF_STATE_CFG_INTR, pf->vf[i].vf_states);
253 ice_free_vf_res(&pf->vf[i]);
254 }
255 }
256
257 devm_kfree(&pf->pdev->dev, pf->vf);
258 pf->vf = NULL;
259
260 /* This check is for when the driver is unloaded while VFs are
261 * assigned. Setting the number of VFs to 0 through sysfs is caught
262 * before this function ever gets called.
263 */
264 if (!pci_vfs_assigned(pf->pdev)) {
265 int vf_id;
266
267 /* Acknowledge VFLR for all VFs. Without this, VFs will fail to
268 * work correctly when SR-IOV gets re-enabled.
269 */
270 for (vf_id = 0; vf_id < tmp; vf_id++) {
271 u32 reg_idx, bit_idx;
272
273 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
274 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
275 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
276 }
277 }
278 clear_bit(__ICE_VF_DIS, pf->state);
279 clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
280}
281
282/**
283 * ice_trigger_vf_reset - Reset a VF on HW
284 * @vf: pointer to the VF structure
285 * @is_vflr: true if VFLR was issued, false if not
286 *
287 * Trigger hardware to start a reset for a particular VF. Expects the caller
288 * to wait the proper amount of time to allow hardware to reset the VF before
289 * it cleans up and restores VF functionality.
290 */
291static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr)
292{
293 struct ice_pf *pf = vf->pf;
294 u32 reg, reg_idx, bit_idx;
295 struct ice_hw *hw;
296 int vf_abs_id, i;
297
298 hw = &pf->hw;
299 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
300
301 /* Inform VF that it is no longer active, as a warning */
302 clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
303
304 /* Disable VF's configuration API during reset. The flag is re-enabled
305 * in ice_alloc_vf_res(), when it's safe again to access VF's VSI.
306 * It's normally disabled in ice_free_vf_res(), but it's safer
307 * to do it earlier to give some time to finish to any VF config
308 * functions that may still be running at this point.
309 */
310 clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
311
312 /* In the case of a VFLR, the HW has already reset the VF and we
313 * just need to clean up, so don't hit the VFRTRIG register.
314 */
315 if (!is_vflr) {
316 /* reset VF using VPGEN_VFRTRIG reg */
317 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
318 reg |= VPGEN_VFRTRIG_VFSWR_M;
319 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
320 }
321 /* clear the VFLR bit in GLGEN_VFLRSTAT */
322 reg_idx = (vf_abs_id) / 32;
323 bit_idx = (vf_abs_id) % 32;
324 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
325 ice_flush(hw);
326
327 wr32(hw, PF_PCI_CIAA,
328 VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
329 for (i = 0; i < 100; i++) {
330 reg = rd32(hw, PF_PCI_CIAD);
331 if ((reg & VF_TRANS_PENDING_M) != 0)
332 dev_err(&pf->pdev->dev,
333 "VF %d PCI transactions stuck\n", vf->vf_id);
334 udelay(1);
335 }
336}
337
338/**
339 * ice_vsi_set_pvid - Set port VLAN id for the VSI
340 * @vsi: the VSI being changed
341 * @vid: the VLAN id to set as a PVID
342 */
343static int ice_vsi_set_pvid(struct ice_vsi *vsi, u16 vid)
344{
345 struct device *dev = &vsi->back->pdev->dev;
346 struct ice_hw *hw = &vsi->back->hw;
347 struct ice_vsi_ctx ctxt = { 0 };
348 enum ice_status status;
349
350 ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_TAGGED |
351 ICE_AQ_VSI_PVLAN_INSERT_PVID |
352 ICE_AQ_VSI_VLAN_EMOD_STR;
353 ctxt.info.pvid = cpu_to_le16(vid);
354 ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
355
356 status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
357 if (status) {
358 dev_info(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n",
359 status, hw->adminq.sq_last_status);
360 return -EIO;
361 }
362
363 vsi->info.pvid = ctxt.info.pvid;
364 vsi->info.vlan_flags = ctxt.info.vlan_flags;
365 return 0;
366}
367
368/**
369 * ice_vsi_kill_pvid - Remove port VLAN id from the VSI
370 * @vsi: the VSI being changed
371 */
372static int ice_vsi_kill_pvid(struct ice_vsi *vsi)
373{
374 struct ice_pf *pf = vsi->back;
375
376 if (ice_vsi_manage_vlan_stripping(vsi, false)) {
377 dev_err(&pf->pdev->dev, "Error removing Port VLAN on VSI %i\n",
378 vsi->vsi_num);
379 return -ENODEV;
380 }
381
382 vsi->info.pvid = 0;
383 return 0;
384}
385
386/**
387 * ice_vf_vsi_setup - Set up a VF VSI
388 * @pf: board private structure
389 * @pi: pointer to the port_info instance
390 * @vf_id: defines VF id to which this VSI connects.
391 *
392 * Returns pointer to the successfully allocated VSI struct on success,
393 * otherwise returns NULL on failure.
394 */
395static struct ice_vsi *
396ice_vf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, u16 vf_id)
397{
398 return ice_vsi_setup(pf, pi, ICE_VSI_VF, vf_id);
399}
400
401/**
402 * ice_alloc_vsi_res - Setup VF VSI and its resources
403 * @vf: pointer to the VF structure
404 *
405 * Returns 0 on success, negative value on failure
406 */
407static int ice_alloc_vsi_res(struct ice_vf *vf)
408{
409 struct ice_pf *pf = vf->pf;
410 LIST_HEAD(tmp_add_list);
411 u8 broadcast[ETH_ALEN];
412 struct ice_vsi *vsi;
413 int status = 0;
414
415 vsi = ice_vf_vsi_setup(pf, pf->hw.port_info, vf->vf_id);
416
417 if (!vsi) {
418 dev_err(&pf->pdev->dev, "Failed to create VF VSI\n");
419 return -ENOMEM;
420 }
421
422 vf->lan_vsi_idx = vsi->idx;
423 vf->lan_vsi_num = vsi->vsi_num;
424
425 /* first vector index is the VFs OICR index */
426 vf->first_vector_idx = vsi->hw_base_vector;
427 /* Since hw_base_vector holds the vector where data queue interrupts
428 * starts, increment by 1 since VFs allocated vectors include OICR intr
429 * as well.
430 */
431 vsi->hw_base_vector += 1;
432
433 /* Check if port VLAN exist before, and restore it accordingly */
434 if (vf->port_vlan_id)
435 ice_vsi_set_pvid(vsi, vf->port_vlan_id);
436
437 eth_broadcast_addr(broadcast);
438
439 status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast);
440 if (status)
441 goto ice_alloc_vsi_res_exit;
442
443 if (is_valid_ether_addr(vf->dflt_lan_addr.addr)) {
444 status = ice_add_mac_to_list(vsi, &tmp_add_list,
445 vf->dflt_lan_addr.addr);
446 if (status)
447 goto ice_alloc_vsi_res_exit;
448 }
449
450 status = ice_add_mac(&pf->hw, &tmp_add_list);
451 if (status)
452 dev_err(&pf->pdev->dev, "could not add mac filters\n");
453
454 /* Clear this bit after VF initialization since we shouldn't reclaim
455 * and reassign interrupts for synchronous or asynchronous VFR events.
456 * We don't want to reconfigure interrupts since AVF driver doesn't
457 * expect vector assignment to be changed unless there is a request for
458 * more vectors.
459 */
460 clear_bit(ICE_VF_STATE_CFG_INTR, vf->vf_states);
461ice_alloc_vsi_res_exit:
462 ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
463 return status;
464}
465
466/**
467 * ice_alloc_vf_res - Allocate VF resources
468 * @vf: pointer to the VF structure
469 */
470static int ice_alloc_vf_res(struct ice_vf *vf)
471{
472 int status;
473
474 /* setup VF VSI and necessary resources */
475 status = ice_alloc_vsi_res(vf);
476 if (status)
477 goto ice_alloc_vf_res_exit;
478
479 if (vf->trusted)
480 set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
481 else
482 clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
483
484 /* VF is now completely initialized */
485 set_bit(ICE_VF_STATE_INIT, vf->vf_states);
486
487 return status;
488
489ice_alloc_vf_res_exit:
490 ice_free_vf_res(vf);
491 return status;
492}
493
494/**
495 * ice_ena_vf_mappings
496 * @vf: pointer to the VF structure
497 *
498 * Enable VF vectors and queues allocation by writing the details into
499 * respective registers.
500 */
501static void ice_ena_vf_mappings(struct ice_vf *vf)
502{
503 struct ice_pf *pf = vf->pf;
504 struct ice_vsi *vsi;
505 int first, last, v;
506 struct ice_hw *hw;
507 int abs_vf_id;
508 u32 reg;
509
510 hw = &pf->hw;
511 vsi = pf->vsi[vf->lan_vsi_idx];
512 first = vf->first_vector_idx;
513 last = (first + pf->num_vf_msix) - 1;
514 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
515
516 /* VF Vector allocation */
517 reg = (((first << VPINT_ALLOC_FIRST_S) & VPINT_ALLOC_FIRST_M) |
518 ((last << VPINT_ALLOC_LAST_S) & VPINT_ALLOC_LAST_M) |
519 VPINT_ALLOC_VALID_M);
520 wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
521
522 /* map the interrupts to its functions */
523 for (v = first; v <= last; v++) {
524 reg = (((abs_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
525 GLINT_VECT2FUNC_VF_NUM_M) |
526 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
527 GLINT_VECT2FUNC_PF_NUM_M));
528 wr32(hw, GLINT_VECT2FUNC(v), reg);
529 }
530
531 /* VF Tx queues allocation */
532 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
533 wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id),
534 VPLAN_TXQ_MAPENA_TX_ENA_M);
535 /* set the VF PF Tx queue range
536 * VFNUMQ value should be set to (number of queues - 1). A value
537 * of 0 means 1 queue and a value of 255 means 256 queues
538 */
539 reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
540 VPLAN_TX_QBASE_VFFIRSTQ_M) |
541 (((vsi->alloc_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
542 VPLAN_TX_QBASE_VFNUMQ_M));
543 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
544 } else {
545 dev_err(&pf->pdev->dev,
546 "Scattered mode for VF Tx queues is not yet implemented\n");
547 }
548
549 /* VF Rx queues allocation */
550 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
551 wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id),
552 VPLAN_RXQ_MAPENA_RX_ENA_M);
553 /* set the VF PF Rx queue range
554 * VFNUMQ value should be set to (number of queues - 1). A value
555 * of 0 means 1 queue and a value of 255 means 256 queues
556 */
557 reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
558 VPLAN_RX_QBASE_VFFIRSTQ_M) |
559 (((vsi->alloc_txq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
560 VPLAN_RX_QBASE_VFNUMQ_M));
561 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
562 } else {
563 dev_err(&pf->pdev->dev,
564 "Scattered mode for VF Rx queues is not yet implemented\n");
565 }
566}
567
568/**
569 * ice_determine_res
570 * @pf: pointer to the PF structure
571 * @avail_res: available resources in the PF structure
572 * @max_res: maximum resources that can be given per VF
573 * @min_res: minimum resources that can be given per VF
574 *
575 * Returns non-zero value if resources (queues/vectors) are available or
576 * returns zero if PF cannot accommodate for all num_alloc_vfs.
577 */
578static int
579ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res)
580{
581 bool checked_min_res = false;
582 int res;
583
584 /* start by checking if PF can assign max number of resources for
585 * all num_alloc_vfs.
586 * if yes, return number per VF
587 * If no, divide by 2 and roundup, check again
588 * repeat the loop till we reach a point where even minimum resources
589 * are not available, in that case return 0
590 */
591 res = max_res;
592 while ((res >= min_res) && !checked_min_res) {
593 int num_all_res;
594
595 num_all_res = pf->num_alloc_vfs * res;
596 if (num_all_res <= avail_res)
597 return res;
598
599 if (res == min_res)
600 checked_min_res = true;
601
602 res = DIV_ROUND_UP(res, 2);
603 }
604 return 0;
605}
606
607/**
608 * ice_check_avail_res - check if vectors and queues are available
609 * @pf: pointer to the PF structure
610 *
611 * This function is where we calculate actual number of resources for VF VSIs,
612 * we don't reserve ahead of time during probe. Returns success if vectors and
613 * queues resources are available, otherwise returns error code
614 */
615static int ice_check_avail_res(struct ice_pf *pf)
616{
617 u16 num_msix, num_txq, num_rxq;
618
619 if (!pf->num_alloc_vfs)
620 return -EINVAL;
621
622 /* Grab from HW interrupts common pool
623 * Note: By the time the user decides it needs more vectors in a VF
624 * its already too late since one must decide this prior to creating the
625 * VF interface. So the best we can do is take a guess as to what the
626 * user might want.
627 *
628 * We have two policies for vector allocation:
629 * 1. if num_alloc_vfs is from 1 to 16, then we consider this as small
630 * number of NFV VFs used for NFV appliances, since this is a special
631 * case, we try to assign maximum vectors per VF (65) as much as
632 * possible, based on determine_resources algorithm.
633 * 2. if num_alloc_vfs is from 17 to 256, then its large number of
634 * regular VFs which are not used for any special purpose. Hence try to
635 * grab default interrupt vectors (5 as supported by AVF driver).
636 */
637 if (pf->num_alloc_vfs <= 16) {
638 num_msix = ice_determine_res(pf, pf->num_avail_hw_msix,
639 ICE_MAX_INTR_PER_VF,
640 ICE_MIN_INTR_PER_VF);
641 } else if (pf->num_alloc_vfs <= ICE_MAX_VF_COUNT) {
642 num_msix = ice_determine_res(pf, pf->num_avail_hw_msix,
643 ICE_DFLT_INTR_PER_VF,
644 ICE_MIN_INTR_PER_VF);
645 } else {
646 dev_err(&pf->pdev->dev,
647 "Number of VFs %d exceeds max VF count %d\n",
648 pf->num_alloc_vfs, ICE_MAX_VF_COUNT);
649 return -EIO;
650 }
651
652 if (!num_msix)
653 return -EIO;
654
655 /* Grab from the common pool
656 * start by requesting Default queues (4 as supported by AVF driver),
657 * Note that, the main difference between queues and vectors is, latter
658 * can only be reserved at init time but queues can be requested by VF
659 * at runtime through Virtchnl, that is the reason we start by reserving
660 * few queues.
661 */
662 num_txq = ice_determine_res(pf, pf->q_left_tx, ICE_DFLT_QS_PER_VF,
663 ICE_MIN_QS_PER_VF);
664
665 num_rxq = ice_determine_res(pf, pf->q_left_rx, ICE_DFLT_QS_PER_VF,
666 ICE_MIN_QS_PER_VF);
667
668 if (!num_txq || !num_rxq)
669 return -EIO;
670
671 /* since AVF driver works with only queue pairs which means, it expects
672 * to have equal number of Rx and Tx queues, so take the minimum of
673 * available Tx or Rx queues
674 */
675 pf->num_vf_qps = min_t(int, num_txq, num_rxq);
676 pf->num_vf_msix = num_msix;
677
678 return 0;
679}
680
681/**
682 * ice_cleanup_and_realloc_vf - Clean up VF and reallocate resources after reset
683 * @vf: pointer to the VF structure
684 *
685 * Cleanup a VF after the hardware reset is finished. Expects the caller to
686 * have verified whether the reset is finished properly, and ensure the
687 * minimum amount of wait time has passed. Reallocate VF resources back to make
688 * VF state active
689 */
690static void ice_cleanup_and_realloc_vf(struct ice_vf *vf)
691{
692 struct ice_pf *pf = vf->pf;
693 struct ice_hw *hw;
694 u32 reg;
695
696 hw = &pf->hw;
697
698 /* PF software completes the flow by notifying VF that reset flow is
699 * completed. This is done by enabling hardware by clearing the reset
700 * bit in the VPGEN_VFRTRIG reg and setting VFR_STATE in the VFGEN_RSTAT
701 * register to VFR completed (done at the end of this function)
702 * By doing this we allow HW to access VF memory at any point. If we
703 * did it any sooner, HW could access memory while it was being freed
704 * in ice_free_vf_res(), causing an IOMMU fault.
705 *
706 * On the other hand, this needs to be done ASAP, because the VF driver
707 * is waiting for this to happen and may report a timeout. It's
708 * harmless, but it gets logged into Guest OS kernel log, so best avoid
709 * it.
710 */
711 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
712 reg &= ~VPGEN_VFRTRIG_VFSWR_M;
713 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
714
715 /* reallocate VF resources to finish resetting the VSI state */
716 if (!ice_alloc_vf_res(vf)) {
717 ice_ena_vf_mappings(vf);
718 set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
719 clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
720 vf->num_vlan = 0;
721 }
722
723 /* Tell the VF driver the reset is done. This needs to be done only
724 * after VF has been fully initialized, because the VF driver may
725 * request resources immediately after setting this flag.
726 */
727 wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
728}
729
730/**
731 * ice_reset_all_vfs - reset all allocated VFs in one go
732 * @pf: pointer to the PF structure
733 * @is_vflr: true if VFLR was issued, false if not
734 *
735 * First, tell the hardware to reset each VF, then do all the waiting in one
736 * chunk, and finally finish restoring each VF after the wait. This is useful
737 * during PF routines which need to reset all VFs, as otherwise it must perform
738 * these resets in a serialized fashion.
739 *
740 * Returns true if any VFs were reset, and false otherwise.
741 */
742bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
743{
744 struct ice_hw *hw = &pf->hw;
745 int v, i;
746
747 /* If we don't have any VFs, then there is nothing to reset */
748 if (!pf->num_alloc_vfs)
749 return false;
750
751 /* If VFs have been disabled, there is no need to reset */
752 if (test_and_set_bit(__ICE_VF_DIS, pf->state))
753 return false;
754
755 /* Begin reset on all VFs at once */
756 for (v = 0; v < pf->num_alloc_vfs; v++)
757 ice_trigger_vf_reset(&pf->vf[v], is_vflr);
758
759 /* Call Disable LAN Tx queue AQ call with VFR bit set and 0
760 * queues to inform Firmware about VF reset.
761 */
762 for (v = 0; v < pf->num_alloc_vfs; v++)
763 ice_dis_vsi_txq(pf->vsi[0]->port_info, 0, NULL, NULL,
764 ICE_VF_RESET, v, NULL);
765
766 /* HW requires some time to make sure it can flush the FIFO for a VF
767 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
768 * sequence to make sure that it has completed. We'll keep track of
769 * the VFs using a simple iterator that increments once that VF has
770 * finished resetting.
771 */
772 for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
773 usleep_range(10000, 20000);
774
775 /* Check each VF in sequence */
776 while (v < pf->num_alloc_vfs) {
777 struct ice_vf *vf = &pf->vf[v];
778 u32 reg;
779
780 reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
781 if (!(reg & VPGEN_VFRSTAT_VFRD_M))
782 break;
783
784 /* If the current VF has finished resetting, move on
785 * to the next VF in sequence.
786 */
787 v++;
788 }
789 }
790
791 /* Display a warning if at least one VF didn't manage to reset in
792 * time, but continue on with the operation.
793 */
794 if (v < pf->num_alloc_vfs)
795 dev_warn(&pf->pdev->dev, "VF reset check timeout\n");
796 usleep_range(10000, 20000);
797
798 /* free VF resources to begin resetting the VSI state */
799 for (v = 0; v < pf->num_alloc_vfs; v++)
800 ice_free_vf_res(&pf->vf[v]);
801
802 if (ice_check_avail_res(pf)) {
803 dev_err(&pf->pdev->dev,
804 "Cannot allocate VF resources, try with fewer number of VFs\n");
805 return false;
806 }
807
808 /* Finish the reset on each VF */
809 for (v = 0; v < pf->num_alloc_vfs; v++)
810 ice_cleanup_and_realloc_vf(&pf->vf[v]);
811
812 ice_flush(hw);
813 clear_bit(__ICE_VF_DIS, pf->state);
814
815 return true;
816}
817
818/**
819 * ice_reset_vf - Reset a particular VF
820 * @vf: pointer to the VF structure
821 * @is_vflr: true if VFLR was issued, false if not
822 *
823 * Returns true if the VF is reset, false otherwise.
824 */
825static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
826{
827 struct ice_pf *pf = vf->pf;
828 struct ice_hw *hw = &pf->hw;
829 bool rsd = false;
830 u32 reg;
831 int i;
832
833 /* If the VFs have been disabled, this means something else is
834 * resetting the VF, so we shouldn't continue.
835 */
836 if (test_and_set_bit(__ICE_VF_DIS, pf->state))
837 return false;
838
839 ice_trigger_vf_reset(vf, is_vflr);
840
841 if (test_bit(ICE_VF_STATE_ENA, vf->vf_states)) {
842 ice_vsi_stop_tx_rings(pf->vsi[vf->lan_vsi_idx], ICE_VF_RESET,
843 vf->vf_id);
844 ice_vsi_stop_rx_rings(pf->vsi[vf->lan_vsi_idx]);
845 clear_bit(ICE_VF_STATE_ENA, vf->vf_states);
846 } else {
847 /* Call Disable LAN Tx queue AQ call even when queues are not
848 * enabled. This is needed for successful completiom of VFR
849 */
850 ice_dis_vsi_txq(pf->vsi[vf->lan_vsi_idx]->port_info, 0,
851 NULL, NULL, ICE_VF_RESET, vf->vf_id, NULL);
852 }
853
854 /* poll VPGEN_VFRSTAT reg to make sure
855 * that reset is complete
856 */
857 for (i = 0; i < 10; i++) {
858 /* VF reset requires driver to first reset the VF and then
859 * poll the status register to make sure that the reset
860 * completed successfully.
861 */
862 usleep_range(10000, 20000);
863 reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
864 if (reg & VPGEN_VFRSTAT_VFRD_M) {
865 rsd = true;
866 break;
867 }
868 }
869
870 /* Display a warning if VF didn't manage to reset in time, but need to
871 * continue on with the operation.
872 */
873 if (!rsd)
874 dev_warn(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
875 vf->vf_id);
876
877 usleep_range(10000, 20000);
878
879 /* free VF resources to begin resetting the VSI state */
880 ice_free_vf_res(vf);
881
882 ice_cleanup_and_realloc_vf(vf);
883
884 ice_flush(hw);
885 clear_bit(__ICE_VF_DIS, pf->state);
886
887 return true;
888}
889
890/**
891 * ice_vc_notify_link_state - Inform all VFs on a PF of link status
892 * @pf: pointer to the PF structure
893 */
894void ice_vc_notify_link_state(struct ice_pf *pf)
895{
896 int i;
897
898 for (i = 0; i < pf->num_alloc_vfs; i++)
899 ice_vc_notify_vf_link_state(&pf->vf[i]);
900}
901
902/**
903 * ice_vc_notify_reset - Send pending reset message to all VFs
904 * @pf: pointer to the PF structure
905 *
906 * indicate a pending reset to all VFs on a given PF
907 */
908void ice_vc_notify_reset(struct ice_pf *pf)
909{
910 struct virtchnl_pf_event pfe;
911
912 if (!pf->num_alloc_vfs)
913 return;
914
915 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
916 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
917 ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, ICE_SUCCESS,
918 (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
919}
920
921/**
922 * ice_vc_notify_vf_reset - Notify VF of a reset event
923 * @vf: pointer to the VF structure
924 */
925static void ice_vc_notify_vf_reset(struct ice_vf *vf)
926{
927 struct virtchnl_pf_event pfe;
928
929 /* validate the request */
930 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
931 return;
932
933 /* verify if the VF is in either init or active before proceeding */
934 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
935 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
936 return;
937
938 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
939 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
940 ice_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT, 0,
941 (u8 *)&pfe, sizeof(pfe), NULL);
942}
943
944/**
945 * ice_alloc_vfs - Allocate and set up VFs resources
946 * @pf: pointer to the PF structure
947 * @num_alloc_vfs: number of VFs to allocate
948 */
949static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
950{
951 struct ice_hw *hw = &pf->hw;
952 struct ice_vf *vfs;
953 int i, ret;
954
955 /* Disable global interrupt 0 so we don't try to handle the VFLR. */
956 wr32(hw, GLINT_DYN_CTL(pf->hw_oicr_idx),
957 ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
958
959 ice_flush(hw);
960
961 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
962 if (ret) {
963 pf->num_alloc_vfs = 0;
964 goto err_unroll_intr;
965 }
966 /* allocate memory */
967 vfs = devm_kcalloc(&pf->pdev->dev, num_alloc_vfs, sizeof(*vfs),
968 GFP_KERNEL);
969 if (!vfs) {
970 ret = -ENOMEM;
971 goto err_unroll_sriov;
972 }
973 pf->vf = vfs;
974
975 /* apply default profile */
976 for (i = 0; i < num_alloc_vfs; i++) {
977 vfs[i].pf = pf;
978 vfs[i].vf_sw_id = pf->first_sw;
979 vfs[i].vf_id = i;
980
981 /* assign default capabilities */
982 set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
983 vfs[i].spoofchk = true;
984
985 /* Set this state so that PF driver does VF vector assignment */
986 set_bit(ICE_VF_STATE_CFG_INTR, vfs[i].vf_states);
987 }
988 pf->num_alloc_vfs = num_alloc_vfs;
989
990 /* VF resources get allocated during reset */
991 if (!ice_reset_all_vfs(pf, false))
992 goto err_unroll_sriov;
993
994 goto err_unroll_intr;
995
996err_unroll_sriov:
997 pci_disable_sriov(pf->pdev);
998err_unroll_intr:
999 /* rearm interrupts here */
1000 ice_irq_dynamic_ena(hw, NULL, NULL);
1001 return ret;
1002}
1003
1004/**
1005 * ice_pf_state_is_nominal - checks the pf for nominal state
1006 * @pf: pointer to pf to check
1007 *
1008 * Check the PF's state for a collection of bits that would indicate
1009 * the PF is in a state that would inhibit normal operation for
1010 * driver functionality.
1011 *
1012 * Returns true if PF is in a nominal state.
1013 * Returns false otherwise
1014 */
1015static bool ice_pf_state_is_nominal(struct ice_pf *pf)
1016{
1017 DECLARE_BITMAP(check_bits, __ICE_STATE_NBITS) = { 0 };
1018
1019 if (!pf)
1020 return false;
1021
1022 bitmap_set(check_bits, 0, __ICE_STATE_NOMINAL_CHECK_BITS);
1023 if (bitmap_intersects(pf->state, check_bits, __ICE_STATE_NBITS))
1024 return false;
1025
1026 return true;
1027}
1028
1029/**
1030 * ice_pci_sriov_ena - Enable or change number of VFs
1031 * @pf: pointer to the PF structure
1032 * @num_vfs: number of VFs to allocate
1033 */
1034static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
1035{
1036 int pre_existing_vfs = pci_num_vf(pf->pdev);
1037 struct device *dev = &pf->pdev->dev;
1038 int err;
1039
1040 if (!ice_pf_state_is_nominal(pf)) {
1041 dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
1042 return -EBUSY;
1043 }
1044
1045 if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
1046 dev_err(dev, "This device is not capable of SR-IOV\n");
1047 return -ENODEV;
1048 }
1049
1050 if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1051 ice_free_vfs(pf);
1052 else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1053 return num_vfs;
1054
1055 if (num_vfs > pf->num_vfs_supported) {
1056 dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
1057 num_vfs, pf->num_vfs_supported);
1058 return -ENOTSUPP;
1059 }
1060
1061 dev_info(dev, "Allocating %d VFs\n", num_vfs);
1062 err = ice_alloc_vfs(pf, num_vfs);
1063 if (err) {
1064 dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
1065 return err;
1066 }
1067
1068 set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
1069 return num_vfs;
1070}
1071
1072/**
1073 * ice_sriov_configure - Enable or change number of VFs via sysfs
1074 * @pdev: pointer to a pci_dev structure
1075 * @num_vfs: number of VFs to allocate
1076 *
1077 * This function is called when the user updates the number of VFs in sysfs.
1078 */
1079int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
1080{
1081 struct ice_pf *pf = pci_get_drvdata(pdev);
1082
1083 if (num_vfs)
1084 return ice_pci_sriov_ena(pf, num_vfs);
1085
1086 if (!pci_vfs_assigned(pdev)) {
1087 ice_free_vfs(pf);
1088 } else {
1089 dev_err(&pf->pdev->dev,
1090 "can't free VFs because some are assigned to VMs.\n");
1091 return -EBUSY;
1092 }
1093
1094 return 0;
1095}
1096
1097/**
1098 * ice_process_vflr_event - Free VF resources via IRQ calls
1099 * @pf: pointer to the PF structure
1100 *
1101 * called from the VLFR IRQ handler to
1102 * free up VF resources and state variables
1103 */
1104void ice_process_vflr_event(struct ice_pf *pf)
1105{
1106 struct ice_hw *hw = &pf->hw;
1107 int vf_id;
1108 u32 reg;
1109
1110 if (!test_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
1111 !pf->num_alloc_vfs)
1112 return;
1113
1114 /* Re-enable the VFLR interrupt cause here, before looking for which
1115 * VF got reset. Otherwise, if another VF gets a reset while the
1116 * first one is being processed, that interrupt will be lost, and
1117 * that VF will be stuck in reset forever.
1118 */
1119 reg = rd32(hw, PFINT_OICR_ENA);
1120 reg |= PFINT_OICR_VFLR_M;
1121 wr32(hw, PFINT_OICR_ENA, reg);
1122 ice_flush(hw);
1123
1124 clear_bit(__ICE_VFLR_EVENT_PENDING, pf->state);
1125 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
1126 struct ice_vf *vf = &pf->vf[vf_id];
1127 u32 reg_idx, bit_idx;
1128
1129 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1130 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1131 /* read GLGEN_VFLRSTAT register to find out the flr VFs */
1132 reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
1133 if (reg & BIT(bit_idx))
1134 /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
1135 ice_reset_vf(vf, true);
1136 }
1137}
1138
1139/**
1140 * ice_vc_dis_vf - Disable a given VF via SW reset
1141 * @vf: pointer to the VF info
1142 *
1143 * Disable the VF through a SW reset
1144 */
1145static void ice_vc_dis_vf(struct ice_vf *vf)
1146{
1147 ice_vc_notify_vf_reset(vf);
1148 ice_reset_vf(vf, false);
1149}
1150
1151/**
1152 * ice_vc_send_msg_to_vf - Send message to VF
1153 * @vf: pointer to the VF info
1154 * @v_opcode: virtual channel opcode
1155 * @v_retval: virtual channel return value
1156 * @msg: pointer to the msg buffer
1157 * @msglen: msg length
1158 *
1159 * send msg to VF
1160 */
1161static int ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
1162 enum ice_status v_retval, u8 *msg, u16 msglen)
1163{
1164 enum ice_status aq_ret;
1165 struct ice_pf *pf;
1166
1167 /* validate the request */
1168 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
1169 return -EINVAL;
1170
1171 pf = vf->pf;
1172
1173 /* single place to detect unsuccessful return values */
1174 if (v_retval) {
1175 vf->num_inval_msgs++;
1176 dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
1177 vf->vf_id, v_opcode, v_retval);
1178 if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) {
1179 dev_err(&pf->pdev->dev,
1180 "Number of invalid messages exceeded for VF %d\n",
1181 vf->vf_id);
1182 dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
1183 set_bit(ICE_VF_STATE_DIS, vf->vf_states);
1184 return -EIO;
1185 }
1186 } else {
1187 vf->num_valid_msgs++;
1188 /* reset the invalid counter, if a valid message is received. */
1189 vf->num_inval_msgs = 0;
1190 }
1191
1192 aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
1193 msg, msglen, NULL);
1194 if (aq_ret) {
1195 dev_info(&pf->pdev->dev,
1196 "Unable to send the message to VF %d aq_err %d\n",
1197 vf->vf_id, pf->hw.mailboxq.sq_last_status);
1198 return -EIO;
1199 }
1200
1201 return 0;
1202}
1203
1204/**
1205 * ice_vc_get_ver_msg
1206 * @vf: pointer to the VF info
1207 * @msg: pointer to the msg buffer
1208 *
1209 * called from the VF to request the API version used by the PF
1210 */
1211static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
1212{
1213 struct virtchnl_version_info info = {
1214 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
1215 };
1216
1217 vf->vf_ver = *(struct virtchnl_version_info *)msg;
1218 /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
1219 if (VF_IS_V10(&vf->vf_ver))
1220 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
1221
1222 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION, ICE_SUCCESS,
1223 (u8 *)&info,
1224 sizeof(struct virtchnl_version_info));
1225}
1226
1227/**
1228 * ice_vc_get_vf_res_msg
1229 * @vf: pointer to the VF info
1230 * @msg: pointer to the msg buffer
1231 *
1232 * called from the VF to request its resources
1233 */
1234static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
1235{
1236 struct virtchnl_vf_resource *vfres = NULL;
1237 enum ice_status aq_ret = 0;
1238 struct ice_pf *pf = vf->pf;
1239 struct ice_vsi *vsi;
1240 int len = 0;
1241 int ret;
1242
1243 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
1244 aq_ret = ICE_ERR_PARAM;
1245 goto err;
1246 }
1247
1248 len = sizeof(struct virtchnl_vf_resource);
1249
1250 vfres = devm_kzalloc(&pf->pdev->dev, len, GFP_KERNEL);
1251 if (!vfres) {
1252 aq_ret = ICE_ERR_NO_MEMORY;
1253 len = 0;
1254 goto err;
1255 }
1256 if (VF_IS_V11(&vf->vf_ver))
1257 vf->driver_caps = *(u32 *)msg;
1258 else
1259 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
1260 VIRTCHNL_VF_OFFLOAD_RSS_REG |
1261 VIRTCHNL_VF_OFFLOAD_VLAN;
1262
1263 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
1264 vsi = pf->vsi[vf->lan_vsi_idx];
1265 if (!vsi->info.pvid)
1266 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
1267
1268 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
1269 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
1270 } else {
1271 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
1272 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
1273 else
1274 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
1275 }
1276
1277 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1278 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
1279
1280 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
1281 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
1282
1283 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)
1284 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
1285
1286 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING)
1287 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
1288
1289 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1290 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
1291
1292 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
1293 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
1294
1295 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
1296 vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
1297
1298 vfres->num_vsis = 1;
1299 /* Tx and Rx queue are equal for VF */
1300 vfres->num_queue_pairs = vsi->num_txq;
1301 vfres->max_vectors = pf->num_vf_msix;
1302 vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
1303 vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
1304
1305 vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
1306 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
1307 vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
1308 ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
1309 vf->dflt_lan_addr.addr);
1310
1311 set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
1312
1313err:
1314 /* send the response back to the VF */
1315 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, aq_ret,
1316 (u8 *)vfres, len);
1317
1318 devm_kfree(&pf->pdev->dev, vfres);
1319 return ret;
1320}
1321
1322/**
1323 * ice_vc_reset_vf_msg
1324 * @vf: pointer to the VF info
1325 *
1326 * called from the VF to reset itself,
1327 * unlike other virtchnl messages, PF driver
1328 * doesn't send the response back to the VF
1329 */
1330static void ice_vc_reset_vf_msg(struct ice_vf *vf)
1331{
1332 if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
1333 ice_reset_vf(vf, false);
1334}
1335
1336/**
1337 * ice_find_vsi_from_id
1338 * @pf: the pf structure to search for the VSI
1339 * @id: id of the VSI it is searching for
1340 *
1341 * searches for the VSI with the given id
1342 */
1343static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
1344{
1345 int i;
1346
1347 for (i = 0; i < pf->num_alloc_vsi; i++)
1348 if (pf->vsi[i] && pf->vsi[i]->vsi_num == id)
1349 return pf->vsi[i];
1350
1351 return NULL;
1352}
1353
1354/**
1355 * ice_vc_isvalid_vsi_id
1356 * @vf: pointer to the VF info
1357 * @vsi_id: VF relative VSI id
1358 *
1359 * check for the valid VSI id
1360 */
1361static bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
1362{
1363 struct ice_pf *pf = vf->pf;
1364 struct ice_vsi *vsi;
1365
1366 vsi = ice_find_vsi_from_id(pf, vsi_id);
1367
1368 return (vsi && (vsi->vf_id == vf->vf_id));
1369}
1370
1371/**
1372 * ice_vc_isvalid_q_id
1373 * @vf: pointer to the VF info
1374 * @vsi_id: VSI id
1375 * @qid: VSI relative queue id
1376 *
1377 * check for the valid queue id
1378 */
1379static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
1380{
1381 struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
1382 /* allocated Tx and Rx queues should be always equal for VF VSI */
1383 return (vsi && (qid < vsi->alloc_txq));
1384}
1385
1386/**
1387 * ice_vc_config_rss_key
1388 * @vf: pointer to the VF info
1389 * @msg: pointer to the msg buffer
1390 *
1391 * Configure the VF's RSS key
1392 */
1393static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
1394{
1395 struct virtchnl_rss_key *vrk =
1396 (struct virtchnl_rss_key *)msg;
1397 struct ice_vsi *vsi = NULL;
1398 enum ice_status aq_ret;
1399 int ret;
1400
1401 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1402 aq_ret = ICE_ERR_PARAM;
1403 goto error_param;
1404 }
1405
1406 if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
1407 aq_ret = ICE_ERR_PARAM;
1408 goto error_param;
1409 }
1410
1411 vsi = ice_find_vsi_from_id(vf->pf, vrk->vsi_id);
1412 if (!vsi) {
1413 aq_ret = ICE_ERR_PARAM;
1414 goto error_param;
1415 }
1416
1417 if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
1418 aq_ret = ICE_ERR_PARAM;
1419 goto error_param;
1420 }
1421
1422 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
1423 aq_ret = ICE_ERR_PARAM;
1424 goto error_param;
1425 }
1426
1427 ret = ice_set_rss(vsi, vrk->key, NULL, 0);
1428 aq_ret = ret ? ICE_ERR_PARAM : ICE_SUCCESS;
1429error_param:
1430 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, aq_ret,
1431 NULL, 0);
1432}
1433
1434/**
1435 * ice_vc_config_rss_lut
1436 * @vf: pointer to the VF info
1437 * @msg: pointer to the msg buffer
1438 *
1439 * Configure the VF's RSS LUT
1440 */
1441static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
1442{
1443 struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
1444 struct ice_vsi *vsi = NULL;
1445 enum ice_status aq_ret;
1446 int ret;
1447
1448 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1449 aq_ret = ICE_ERR_PARAM;
1450 goto error_param;
1451 }
1452
1453 if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
1454 aq_ret = ICE_ERR_PARAM;
1455 goto error_param;
1456 }
1457
1458 vsi = ice_find_vsi_from_id(vf->pf, vrl->vsi_id);
1459 if (!vsi) {
1460 aq_ret = ICE_ERR_PARAM;
1461 goto error_param;
1462 }
1463
1464 if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
1465 aq_ret = ICE_ERR_PARAM;
1466 goto error_param;
1467 }
1468
1469 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
1470 aq_ret = ICE_ERR_PARAM;
1471 goto error_param;
1472 }
1473
1474 ret = ice_set_rss(vsi, NULL, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE);
1475 aq_ret = ret ? ICE_ERR_PARAM : ICE_SUCCESS;
1476error_param:
1477 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, aq_ret,
1478 NULL, 0);
1479}
1480
1481/**
1482 * ice_vc_get_stats_msg
1483 * @vf: pointer to the VF info
1484 * @msg: pointer to the msg buffer
1485 *
1486 * called from the VF to get VSI stats
1487 */
1488static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
1489{
1490 struct virtchnl_queue_select *vqs =
1491 (struct virtchnl_queue_select *)msg;
1492 enum ice_status aq_ret = 0;
1493 struct ice_eth_stats stats;
1494 struct ice_vsi *vsi;
1495
1496 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1497 aq_ret = ICE_ERR_PARAM;
1498 goto error_param;
1499 }
1500
1501 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1502 aq_ret = ICE_ERR_PARAM;
1503 goto error_param;
1504 }
1505
1506 vsi = ice_find_vsi_from_id(vf->pf, vqs->vsi_id);
1507 if (!vsi) {
1508 aq_ret = ICE_ERR_PARAM;
1509 goto error_param;
1510 }
1511
1512 memset(&stats, 0, sizeof(struct ice_eth_stats));
1513 ice_update_eth_stats(vsi);
1514
1515 stats = vsi->eth_stats;
1516
1517error_param:
1518 /* send the response to the VF */
1519 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret,
1520 (u8 *)&stats, sizeof(stats));
1521}
1522
1523/**
1524 * ice_vc_ena_qs_msg
1525 * @vf: pointer to the VF info
1526 * @msg: pointer to the msg buffer
1527 *
1528 * called from the VF to enable all or specific queue(s)
1529 */
1530static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
1531{
1532 struct virtchnl_queue_select *vqs =
1533 (struct virtchnl_queue_select *)msg;
1534 enum ice_status aq_ret = 0;
1535 struct ice_vsi *vsi;
1536
1537 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1538 aq_ret = ICE_ERR_PARAM;
1539 goto error_param;
1540 }
1541
1542 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1543 aq_ret = ICE_ERR_PARAM;
1544 goto error_param;
1545 }
1546
1547 if (!vqs->rx_queues && !vqs->tx_queues) {
1548 aq_ret = ICE_ERR_PARAM;
1549 goto error_param;
1550 }
1551
1552 vsi = ice_find_vsi_from_id(vf->pf, vqs->vsi_id);
1553 if (!vsi) {
1554 aq_ret = ICE_ERR_PARAM;
1555 goto error_param;
1556 }
1557
1558 /* Enable only Rx rings, Tx rings were enabled by the FW when the
1559 * Tx queue group list was configured and the context bits were
1560 * programmed using ice_vsi_cfg_txqs
1561 */
1562 if (ice_vsi_start_rx_rings(vsi))
1563 aq_ret = ICE_ERR_PARAM;
1564
1565 /* Set flag to indicate that queues are enabled */
1566 if (!aq_ret)
1567 set_bit(ICE_VF_STATE_ENA, vf->vf_states);
1568
1569error_param:
1570 /* send the response to the VF */
1571 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, aq_ret,
1572 NULL, 0);
1573}
1574
1575/**
1576 * ice_vc_dis_qs_msg
1577 * @vf: pointer to the VF info
1578 * @msg: pointer to the msg buffer
1579 *
1580 * called from the VF to disable all or specific
1581 * queue(s)
1582 */
1583static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
1584{
1585 struct virtchnl_queue_select *vqs =
1586 (struct virtchnl_queue_select *)msg;
1587 enum ice_status aq_ret = 0;
1588 struct ice_vsi *vsi;
1589
1590 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
1591 !test_bit(ICE_VF_STATE_ENA, vf->vf_states)) {
1592 aq_ret = ICE_ERR_PARAM;
1593 goto error_param;
1594 }
1595
1596 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1597 aq_ret = ICE_ERR_PARAM;
1598 goto error_param;
1599 }
1600
1601 if (!vqs->rx_queues && !vqs->tx_queues) {
1602 aq_ret = ICE_ERR_PARAM;
1603 goto error_param;
1604 }
1605
1606 vsi = ice_find_vsi_from_id(vf->pf, vqs->vsi_id);
1607 if (!vsi) {
1608 aq_ret = ICE_ERR_PARAM;
1609 goto error_param;
1610 }
1611
1612 if (ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, vf->vf_id)) {
1613 dev_err(&vsi->back->pdev->dev,
1614 "Failed to stop tx rings on VSI %d\n",
1615 vsi->vsi_num);
1616 aq_ret = ICE_ERR_PARAM;
1617 }
1618
1619 if (ice_vsi_stop_rx_rings(vsi)) {
1620 dev_err(&vsi->back->pdev->dev,
1621 "Failed to stop rx rings on VSI %d\n",
1622 vsi->vsi_num);
1623 aq_ret = ICE_ERR_PARAM;
1624 }
1625
1626 /* Clear enabled queues flag */
1627 if (!aq_ret)
1628 clear_bit(ICE_VF_STATE_ENA, vf->vf_states);
1629
1630error_param:
1631 /* send the response to the VF */
1632 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, aq_ret,
1633 NULL, 0);
1634}
1635
1636/**
1637 * ice_vc_cfg_irq_map_msg
1638 * @vf: pointer to the VF info
1639 * @msg: pointer to the msg buffer
1640 *
1641 * called from the VF to configure the IRQ to queue map
1642 */
1643static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
1644{
1645 struct virtchnl_irq_map_info *irqmap_info =
1646 (struct virtchnl_irq_map_info *)msg;
1647 u16 vsi_id, vsi_q_id, vector_id;
1648 struct virtchnl_vector_map *map;
1649 struct ice_vsi *vsi = NULL;
1650 struct ice_pf *pf = vf->pf;
1651 enum ice_status aq_ret = 0;
1652 unsigned long qmap;
1653 int i;
1654
1655 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1656 aq_ret = ICE_ERR_PARAM;
1657 goto error_param;
1658 }
1659
1660 for (i = 0; i < irqmap_info->num_vectors; i++) {
1661 map = &irqmap_info->vecmap[i];
1662
1663 vector_id = map->vector_id;
1664 vsi_id = map->vsi_id;
1665 /* validate msg params */
1666 if (!(vector_id < pf->hw.func_caps.common_cap
1667 .num_msix_vectors) || !ice_vc_isvalid_vsi_id(vf, vsi_id)) {
1668 aq_ret = ICE_ERR_PARAM;
1669 goto error_param;
1670 }
1671
1672 vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
1673 if (!vsi) {
1674 aq_ret = ICE_ERR_PARAM;
1675 goto error_param;
1676 }
1677
1678 /* lookout for the invalid queue index */
1679 qmap = map->rxq_map;
1680 for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) {
1681 struct ice_q_vector *q_vector;
1682
1683 if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
1684 aq_ret = ICE_ERR_PARAM;
1685 goto error_param;
1686 }
1687 q_vector = vsi->q_vectors[i];
1688 q_vector->num_ring_rx++;
1689 q_vector->rx.itr_idx = map->rxitr_idx;
1690 vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
1691 }
1692
1693 qmap = map->txq_map;
1694 for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) {
1695 struct ice_q_vector *q_vector;
1696
1697 if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
1698 aq_ret = ICE_ERR_PARAM;
1699 goto error_param;
1700 }
1701 q_vector = vsi->q_vectors[i];
1702 q_vector->num_ring_tx++;
1703 q_vector->tx.itr_idx = map->txitr_idx;
1704 vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
1705 }
1706 }
1707
1708 if (vsi)
1709 ice_vsi_cfg_msix(vsi);
1710error_param:
1711 /* send the response to the VF */
1712 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, aq_ret,
1713 NULL, 0);
1714}
1715
1716/**
1717 * ice_vc_cfg_qs_msg
1718 * @vf: pointer to the VF info
1719 * @msg: pointer to the msg buffer
1720 *
1721 * called from the VF to configure the Rx/Tx queues
1722 */
1723static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
1724{
1725 struct virtchnl_vsi_queue_config_info *qci =
1726 (struct virtchnl_vsi_queue_config_info *)msg;
1727 struct virtchnl_queue_pair_info *qpi;
1728 enum ice_status aq_ret = 0;
1729 struct ice_vsi *vsi;
1730 int i;
1731
1732 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1733 aq_ret = ICE_ERR_PARAM;
1734 goto error_param;
1735 }
1736
1737 if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
1738 aq_ret = ICE_ERR_PARAM;
1739 goto error_param;
1740 }
1741
1742 vsi = ice_find_vsi_from_id(vf->pf, qci->vsi_id);
1743 if (!vsi) {
1744 aq_ret = ICE_ERR_PARAM;
1745 goto error_param;
1746 }
1747
1748 for (i = 0; i < qci->num_queue_pairs; i++) {
1749 qpi = &qci->qpair[i];
1750 if (qpi->txq.vsi_id != qci->vsi_id ||
1751 qpi->rxq.vsi_id != qci->vsi_id ||
1752 qpi->rxq.queue_id != qpi->txq.queue_id ||
1753 !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
1754 aq_ret = ICE_ERR_PARAM;
1755 goto error_param;
1756 }
1757 /* copy Tx queue info from VF into VSI */
1758 vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
1759 vsi->tx_rings[i]->count = qpi->txq.ring_len;
1760 /* copy Rx queue info from VF into vsi */
1761 vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
1762 vsi->rx_rings[i]->count = qpi->rxq.ring_len;
1763 if (qpi->rxq.databuffer_size > ((16 * 1024) - 128)) {
1764 aq_ret = ICE_ERR_PARAM;
1765 goto error_param;
1766 }
1767 vsi->rx_buf_len = qpi->rxq.databuffer_size;
1768 if (qpi->rxq.max_pkt_size >= (16 * 1024) ||
1769 qpi->rxq.max_pkt_size < 64) {
1770 aq_ret = ICE_ERR_PARAM;
1771 goto error_param;
1772 }
1773 vsi->max_frame = qpi->rxq.max_pkt_size;
1774 }
1775
1776 /* VF can request to configure less than allocated queues
1777 * or default allocated queues. So update the VSI with new number
1778 */
1779 vsi->num_txq = qci->num_queue_pairs;
1780 vsi->num_rxq = qci->num_queue_pairs;
1781
1782 if (!ice_vsi_cfg_txqs(vsi) && !ice_vsi_cfg_rxqs(vsi))
1783 aq_ret = 0;
1784 else
1785 aq_ret = ICE_ERR_PARAM;
1786
1787error_param:
1788 /* send the response to the VF */
1789 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, aq_ret,
1790 NULL, 0);
1791}
1792
1793/**
1794 * ice_is_vf_trusted
1795 * @vf: pointer to the VF info
1796 */
1797static bool ice_is_vf_trusted(struct ice_vf *vf)
1798{
1799 return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1800}
1801
1802/**
1803 * ice_can_vf_change_mac
1804 * @vf: pointer to the VF info
1805 *
1806 * Return true if the VF is allowed to change its MAC filters, false otherwise
1807 */
1808static bool ice_can_vf_change_mac(struct ice_vf *vf)
1809{
1810 /* If the VF MAC address has been set administratively (via the
1811 * ndo_set_vf_mac command), then deny permission to the VF to
1812 * add/delete unicast MAC addresses, unless the VF is trusted
1813 */
1814 if (vf->pf_set_mac && !ice_is_vf_trusted(vf))
1815 return false;
1816
1817 return true;
1818}
1819
1820/**
1821 * ice_vc_handle_mac_addr_msg
1822 * @vf: pointer to the VF info
1823 * @msg: pointer to the msg buffer
1824 * @set: true if mac filters are being set, false otherwise
1825 *
1826 * add guest mac address filter
1827 */
1828static int
1829ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
1830{
1831 struct virtchnl_ether_addr_list *al =
1832 (struct virtchnl_ether_addr_list *)msg;
1833 struct ice_pf *pf = vf->pf;
1834 enum virtchnl_ops vc_op;
1835 enum ice_status ret;
1836 LIST_HEAD(mac_list);
1837 struct ice_vsi *vsi;
1838 int mac_count = 0;
1839 int i;
1840
1841 if (set)
1842 vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
1843 else
1844 vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
1845
1846 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
1847 !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
1848 ret = ICE_ERR_PARAM;
1849 goto handle_mac_exit;
1850 }
1851
1852 if (set && !ice_is_vf_trusted(vf) &&
1853 (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
1854 dev_err(&pf->pdev->dev,
1855 "Can't add more MAC addresses, because VF is not trusted, switch the VF to trusted mode in order to add more functionalities\n");
1856 ret = ICE_ERR_PARAM;
1857 goto handle_mac_exit;
1858 }
1859
1860 vsi = pf->vsi[vf->lan_vsi_idx];
1861
1862 for (i = 0; i < al->num_elements; i++) {
1863 u8 *maddr = al->list[i].addr;
1864
1865 if (ether_addr_equal(maddr, vf->dflt_lan_addr.addr) ||
1866 is_broadcast_ether_addr(maddr)) {
1867 if (set) {
1868 /* VF is trying to add filters that the PF
1869 * already added. Just continue.
1870 */
1871 dev_info(&pf->pdev->dev,
1872 "mac %pM already set for VF %d\n",
1873 maddr, vf->vf_id);
1874 continue;
1875 } else {
1876 /* VF can't remove dflt_lan_addr/bcast mac */
1877 dev_err(&pf->pdev->dev,
1878 "can't remove mac %pM for VF %d\n",
1879 maddr, vf->vf_id);
1880 ret = ICE_ERR_PARAM;
1881 goto handle_mac_exit;
1882 }
1883 }
1884
1885 /* check for the invalid cases and bail if necessary */
1886 if (is_zero_ether_addr(maddr)) {
1887 dev_err(&pf->pdev->dev,
1888 "invalid mac %pM provided for VF %d\n",
1889 maddr, vf->vf_id);
1890 ret = ICE_ERR_PARAM;
1891 goto handle_mac_exit;
1892 }
1893
1894 if (is_unicast_ether_addr(maddr) &&
1895 !ice_can_vf_change_mac(vf)) {
1896 dev_err(&pf->pdev->dev,
1897 "can't change unicast mac for untrusted VF %d\n",
1898 vf->vf_id);
1899 ret = ICE_ERR_PARAM;
1900 goto handle_mac_exit;
1901 }
1902
1903 /* get here if maddr is multicast or if VF can change mac */
1904 if (ice_add_mac_to_list(vsi, &mac_list, al->list[i].addr)) {
1905 ret = ICE_ERR_NO_MEMORY;
1906 goto handle_mac_exit;
1907 }
1908 mac_count++;
1909 }
1910
1911 /* program the updated filter list */
1912 if (set)
1913 ret = ice_add_mac(&pf->hw, &mac_list);
1914 else
1915 ret = ice_remove_mac(&pf->hw, &mac_list);
1916
1917 if (ret) {
1918 dev_err(&pf->pdev->dev,
1919 "can't update mac filters for VF %d, error %d\n",
1920 vf->vf_id, ret);
1921 } else {
1922 if (set)
1923 vf->num_mac += mac_count;
1924 else
1925 vf->num_mac -= mac_count;
1926 }
1927
1928handle_mac_exit:
1929 ice_free_fltr_list(&pf->pdev->dev, &mac_list);
1930 /* send the response to the VF */
1931 return ice_vc_send_msg_to_vf(vf, vc_op, ret, NULL, 0);
1932}
1933
1934/**
1935 * ice_vc_add_mac_addr_msg
1936 * @vf: pointer to the VF info
1937 * @msg: pointer to the msg buffer
1938 *
1939 * add guest MAC address filter
1940 */
1941static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg)
1942{
1943 return ice_vc_handle_mac_addr_msg(vf, msg, true);
1944}
1945
1946/**
1947 * ice_vc_del_mac_addr_msg
1948 * @vf: pointer to the VF info
1949 * @msg: pointer to the msg buffer
1950 *
1951 * remove guest MAC address filter
1952 */
1953static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
1954{
1955 return ice_vc_handle_mac_addr_msg(vf, msg, false);
1956}
1957
1958/**
1959 * ice_vc_request_qs_msg
1960 * @vf: pointer to the VF info
1961 * @msg: pointer to the msg buffer
1962 *
1963 * VFs get a default number of queues but can use this message to request a
1964 * different number. If the request is successful, PF will reset the VF and
1965 * return 0. If unsuccessful, PF will send message informing VF of number of
1966 * available queue pairs via virtchnl message response to VF.
1967 */
1968static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
1969{
1970 struct virtchnl_vf_res_request *vfres =
1971 (struct virtchnl_vf_res_request *)msg;
1972 int req_queues = vfres->num_queue_pairs;
1973 enum ice_status aq_ret = 0;
1974 struct ice_pf *pf = vf->pf;
1975 int tx_rx_queue_left;
1976 int cur_queues;
1977
1978 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1979 aq_ret = ICE_ERR_PARAM;
1980 goto error_param;
1981 }
1982
1983 cur_queues = pf->num_vf_qps;
1984 tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx);
1985 if (req_queues <= 0) {
1986 dev_err(&pf->pdev->dev,
1987 "VF %d tried to request %d queues. Ignoring.\n",
1988 vf->vf_id, req_queues);
1989 } else if (req_queues > ICE_MAX_QS_PER_VF) {
1990 dev_err(&pf->pdev->dev,
1991 "VF %d tried to request more than %d queues.\n",
1992 vf->vf_id, ICE_MAX_QS_PER_VF);
1993 vfres->num_queue_pairs = ICE_MAX_QS_PER_VF;
1994 } else if (req_queues - cur_queues > tx_rx_queue_left) {
1995 dev_warn(&pf->pdev->dev,
1996 "VF %d requested %d more queues, but only %d left.\n",
1997 vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
1998 vfres->num_queue_pairs = tx_rx_queue_left + cur_queues;
1999 } else {
2000 /* request is successful, then reset VF */
2001 vf->num_req_qs = req_queues;
2002 ice_vc_dis_vf(vf);
2003 dev_info(&pf->pdev->dev,
2004 "VF %d granted request of %d queues.\n",
2005 vf->vf_id, req_queues);
2006 return 0;
2007 }
2008
2009error_param:
2010 /* send the response to the VF */
2011 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
2012 aq_ret, (u8 *)vfres, sizeof(*vfres));
2013}
2014
2015/**
2016 * ice_set_vf_port_vlan
2017 * @netdev: network interface device structure
2018 * @vf_id: VF identifier
2019 * @vlan_id: VLAN id being set
2020 * @qos: priority setting
2021 * @vlan_proto: VLAN protocol
2022 *
2023 * program VF Port VLAN id and/or qos
2024 */
2025int
2026ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
2027 __be16 vlan_proto)
2028{
2029 u16 vlanprio = vlan_id | (qos << ICE_VLAN_PRIORITY_S);
2030 struct ice_netdev_priv *np = netdev_priv(netdev);
2031 struct ice_pf *pf = np->vsi->back;
2032 struct ice_vsi *vsi;
2033 struct ice_vf *vf;
2034 int ret = 0;
2035
2036 /* validate the request */
2037 if (vf_id >= pf->num_alloc_vfs) {
2038 dev_err(&pf->pdev->dev, "invalid VF id: %d\n", vf_id);
2039 return -EINVAL;
2040 }
2041
2042 if (vlan_id > ICE_MAX_VLANID || qos > 7) {
2043 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
2044 return -EINVAL;
2045 }
2046
2047 if (vlan_proto != htons(ETH_P_8021Q)) {
2048 dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
2049 return -EPROTONOSUPPORT;
2050 }
2051
2052 vf = &pf->vf[vf_id];
2053 vsi = pf->vsi[vf->lan_vsi_idx];
2054 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
2055 dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
2056 return -EBUSY;
2057 }
2058
2059 if (le16_to_cpu(vsi->info.pvid) == vlanprio) {
2060 /* duplicate request, so just return success */
2061 dev_info(&pf->pdev->dev,
2062 "Duplicate pvid %d request\n", vlanprio);
2063 return ret;
2064 }
2065
2066 /* If pvid, then remove all filters on the old VLAN */
2067 if (vsi->info.pvid)
2068 ice_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
2069 VLAN_VID_MASK));
2070
2071 if (vlan_id || qos) {
2072 ret = ice_vsi_set_pvid(vsi, vlanprio);
2073 if (ret)
2074 goto error_set_pvid;
2075 } else {
2076 ice_vsi_kill_pvid(vsi);
2077 }
2078
2079 if (vlan_id) {
2080 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
2081 vlan_id, qos, vf_id);
2082
2083 /* add new VLAN filter for each MAC */
2084 ret = ice_vsi_add_vlan(vsi, vlan_id);
2085 if (ret)
2086 goto error_set_pvid;
2087 }
2088
2089 /* The Port VLAN needs to be saved across resets the same as the
2090 * default LAN MAC address.
2091 */
2092 vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
2093
2094error_set_pvid:
2095 return ret;
2096}
2097
2098/**
2099 * ice_vc_process_vlan_msg
2100 * @vf: pointer to the VF info
2101 * @msg: pointer to the msg buffer
2102 * @add_v: Add VLAN if true, otherwise delete VLAN
2103 *
2104 * Process virtchnl op to add or remove programmed guest VLAN id
2105 */
2106static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
2107{
2108 struct virtchnl_vlan_filter_list *vfl =
2109 (struct virtchnl_vlan_filter_list *)msg;
2110 enum ice_status aq_ret = 0;
2111 struct ice_pf *pf = vf->pf;
2112 struct ice_vsi *vsi;
2113 int i;
2114
2115 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2116 aq_ret = ICE_ERR_PARAM;
2117 goto error_param;
2118 }
2119
2120 if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
2121 aq_ret = ICE_ERR_PARAM;
2122 goto error_param;
2123 }
2124
2125 if (add_v && !ice_is_vf_trusted(vf) &&
2126 vf->num_vlan >= ICE_MAX_VLAN_PER_VF) {
2127 dev_info(&pf->pdev->dev,
2128 "VF is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n");
2129 aq_ret = ICE_ERR_PARAM;
2130 goto error_param;
2131 }
2132
2133 for (i = 0; i < vfl->num_elements; i++) {
2134 if (vfl->vlan_id[i] > ICE_MAX_VLANID) {
2135 aq_ret = ICE_ERR_PARAM;
2136 dev_err(&pf->pdev->dev,
2137 "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
2138 goto error_param;
2139 }
2140 }
2141
2142 vsi = ice_find_vsi_from_id(vf->pf, vfl->vsi_id);
2143 if (!vsi) {
2144 aq_ret = ICE_ERR_PARAM;
2145 goto error_param;
2146 }
2147
2148 if (vsi->info.pvid) {
2149 aq_ret = ICE_ERR_PARAM;
2150 goto error_param;
2151 }
2152
2153 if (ice_vsi_manage_vlan_stripping(vsi, add_v)) {
2154 dev_err(&pf->pdev->dev,
2155 "%sable VLAN stripping failed for VSI %i\n",
2156 add_v ? "en" : "dis", vsi->vsi_num);
2157 aq_ret = ICE_ERR_PARAM;
2158 goto error_param;
2159 }
2160
2161 if (add_v) {
2162 for (i = 0; i < vfl->num_elements; i++) {
2163 u16 vid = vfl->vlan_id[i];
2164
2165 if (!ice_vsi_add_vlan(vsi, vid)) {
2166 vf->num_vlan++;
2167 set_bit(vid, vsi->active_vlans);
2168
2169 /* Enable VLAN pruning when VLAN 0 is added */
2170 if (unlikely(!vid))
2171 if (ice_cfg_vlan_pruning(vsi, true))
2172 aq_ret = ICE_ERR_PARAM;
2173 } else {
2174 aq_ret = ICE_ERR_PARAM;
2175 }
2176 }
2177 } else {
2178 for (i = 0; i < vfl->num_elements; i++) {
2179 u16 vid = vfl->vlan_id[i];
2180
2181 /* Make sure ice_vsi_kill_vlan is successful before
2182 * updating VLAN information
2183 */
2184 if (!ice_vsi_kill_vlan(vsi, vid)) {
2185 vf->num_vlan--;
2186 clear_bit(vid, vsi->active_vlans);
2187
2188 /* Disable VLAN pruning when removing VLAN 0 */
2189 if (unlikely(!vid))
2190 ice_cfg_vlan_pruning(vsi, false);
2191 }
2192 }
2193 }
2194
2195error_param:
2196 /* send the response to the VF */
2197 if (add_v)
2198 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret,
2199 NULL, 0);
2200 else
2201 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret,
2202 NULL, 0);
2203}
2204
2205/**
2206 * ice_vc_add_vlan_msg
2207 * @vf: pointer to the VF info
2208 * @msg: pointer to the msg buffer
2209 *
2210 * Add and program guest VLAN id
2211 */
2212static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
2213{
2214 return ice_vc_process_vlan_msg(vf, msg, true);
2215}
2216
2217/**
2218 * ice_vc_remove_vlan_msg
2219 * @vf: pointer to the VF info
2220 * @msg: pointer to the msg buffer
2221 *
2222 * remove programmed guest VLAN id
2223 */
2224static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
2225{
2226 return ice_vc_process_vlan_msg(vf, msg, false);
2227}
2228
2229/**
2230 * ice_vc_ena_vlan_stripping
2231 * @vf: pointer to the VF info
2232 *
2233 * Enable VLAN header stripping for a given VF
2234 */
2235static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
2236{
2237 enum ice_status aq_ret = 0;
2238 struct ice_pf *pf = vf->pf;
2239 struct ice_vsi *vsi;
2240
2241 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2242 aq_ret = ICE_ERR_PARAM;
2243 goto error_param;
2244 }
2245
2246 vsi = pf->vsi[vf->lan_vsi_idx];
2247 if (ice_vsi_manage_vlan_stripping(vsi, true))
2248 aq_ret = ICE_ERR_AQ_ERROR;
2249
2250error_param:
2251 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
2252 aq_ret, NULL, 0);
2253}
2254
2255/**
2256 * ice_vc_dis_vlan_stripping
2257 * @vf: pointer to the VF info
2258 *
2259 * Disable VLAN header stripping for a given VF
2260 */
2261static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
2262{
2263 enum ice_status aq_ret = 0;
2264 struct ice_pf *pf = vf->pf;
2265 struct ice_vsi *vsi;
2266
2267 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2268 aq_ret = ICE_ERR_PARAM;
2269 goto error_param;
2270 }
2271
2272 vsi = pf->vsi[vf->lan_vsi_idx];
2273 if (ice_vsi_manage_vlan_stripping(vsi, false))
2274 aq_ret = ICE_ERR_AQ_ERROR;
2275
2276error_param:
2277 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
2278 aq_ret, NULL, 0);
2279}
2280
2281/**
2282 * ice_vc_process_vf_msg - Process request from VF
2283 * @pf: pointer to the PF structure
2284 * @event: pointer to the AQ event
2285 *
2286 * called from the common asq/arq handler to
2287 * process request from VF
2288 */
2289void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
2290{
2291 u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
2292 s16 vf_id = le16_to_cpu(event->desc.retval);
2293 u16 msglen = event->msg_len;
2294 u8 *msg = event->msg_buf;
2295 struct ice_vf *vf = NULL;
2296 int err = 0;
2297
2298 if (vf_id >= pf->num_alloc_vfs) {
2299 err = -EINVAL;
2300 goto error_handler;
2301 }
2302
2303 vf = &pf->vf[vf_id];
2304
2305 /* Check if VF is disabled. */
2306 if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
2307 err = -EPERM;
2308 goto error_handler;
2309 }
2310
2311 /* Perform basic checks on the msg */
2312 err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
2313 if (err) {
2314 if (err == VIRTCHNL_ERR_PARAM)
2315 err = -EPERM;
2316 else
2317 err = -EINVAL;
2318 goto error_handler;
2319 }
2320
2321 /* Perform additional checks specific to RSS and Virtchnl */
2322 if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_KEY) {
2323 struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg;
2324
2325 if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE)
2326 err = -EINVAL;
2327 } else if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_LUT) {
2328 struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
2329
2330 if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE)
2331 err = -EINVAL;
2332 }
2333
2334error_handler:
2335 if (err) {
2336 ice_vc_send_msg_to_vf(vf, v_opcode, ICE_ERR_PARAM, NULL, 0);
2337 dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
2338 vf_id, v_opcode, msglen, err);
2339 return;
2340 }
2341
2342 switch (v_opcode) {
2343 case VIRTCHNL_OP_VERSION:
2344 err = ice_vc_get_ver_msg(vf, msg);
2345 break;
2346 case VIRTCHNL_OP_GET_VF_RESOURCES:
2347 err = ice_vc_get_vf_res_msg(vf, msg);
2348 break;
2349 case VIRTCHNL_OP_RESET_VF:
2350 ice_vc_reset_vf_msg(vf);
2351 break;
2352 case VIRTCHNL_OP_ADD_ETH_ADDR:
2353 err = ice_vc_add_mac_addr_msg(vf, msg);
2354 break;
2355 case VIRTCHNL_OP_DEL_ETH_ADDR:
2356 err = ice_vc_del_mac_addr_msg(vf, msg);
2357 break;
2358 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
2359 err = ice_vc_cfg_qs_msg(vf, msg);
2360 break;
2361 case VIRTCHNL_OP_ENABLE_QUEUES:
2362 err = ice_vc_ena_qs_msg(vf, msg);
2363 ice_vc_notify_vf_link_state(vf);
2364 break;
2365 case VIRTCHNL_OP_DISABLE_QUEUES:
2366 err = ice_vc_dis_qs_msg(vf, msg);
2367 break;
2368 case VIRTCHNL_OP_REQUEST_QUEUES:
2369 err = ice_vc_request_qs_msg(vf, msg);
2370 break;
2371 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
2372 err = ice_vc_cfg_irq_map_msg(vf, msg);
2373 break;
2374 case VIRTCHNL_OP_CONFIG_RSS_KEY:
2375 err = ice_vc_config_rss_key(vf, msg);
2376 break;
2377 case VIRTCHNL_OP_CONFIG_RSS_LUT:
2378 err = ice_vc_config_rss_lut(vf, msg);
2379 break;
2380 case VIRTCHNL_OP_GET_STATS:
2381 err = ice_vc_get_stats_msg(vf, msg);
2382 break;
2383 case VIRTCHNL_OP_ADD_VLAN:
2384 err = ice_vc_add_vlan_msg(vf, msg);
2385 break;
2386 case VIRTCHNL_OP_DEL_VLAN:
2387 err = ice_vc_remove_vlan_msg(vf, msg);
2388 break;
2389 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
2390 err = ice_vc_ena_vlan_stripping(vf);
2391 break;
2392 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
2393 err = ice_vc_dis_vlan_stripping(vf);
2394 break;
2395 case VIRTCHNL_OP_UNKNOWN:
2396 default:
2397 dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
2398 v_opcode, vf_id);
2399 err = ice_vc_send_msg_to_vf(vf, v_opcode, ICE_ERR_NOT_IMPL,
2400 NULL, 0);
2401 break;
2402 }
2403 if (err) {
2404 /* Helper function cares less about error return values here
2405 * as it is busy with pending work.
2406 */
2407 dev_info(&pf->pdev->dev,
2408 "PF failed to honor VF %d, opcode %d\n, error %d\n",
2409 vf_id, v_opcode, err);
2410 }
2411}
2412
2413/**
2414 * ice_get_vf_cfg
2415 * @netdev: network interface device structure
2416 * @vf_id: VF identifier
2417 * @ivi: VF configuration structure
2418 *
2419 * return VF configuration
2420 */
2421int ice_get_vf_cfg(struct net_device *netdev, int vf_id,
2422 struct ifla_vf_info *ivi)
2423{
2424 struct ice_netdev_priv *np = netdev_priv(netdev);
2425 struct ice_vsi *vsi = np->vsi;
2426 struct ice_pf *pf = vsi->back;
2427 struct ice_vf *vf;
2428
2429 /* validate the request */
2430 if (vf_id >= pf->num_alloc_vfs) {
2431 netdev_err(netdev, "invalid VF id: %d\n", vf_id);
2432 return -EINVAL;
2433 }
2434
2435 vf = &pf->vf[vf_id];
2436 vsi = pf->vsi[vf->lan_vsi_idx];
2437
2438 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
2439 netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id);
2440 return -EBUSY;
2441 }
2442
2443 ivi->vf = vf_id;
2444 ether_addr_copy(ivi->mac, vf->dflt_lan_addr.addr);
2445
2446 /* VF configuration for VLAN and applicable QoS */
2447 ivi->vlan = le16_to_cpu(vsi->info.pvid) & ICE_VLAN_M;
2448 ivi->qos = (le16_to_cpu(vsi->info.pvid) & ICE_PRIORITY_M) >>
2449 ICE_VLAN_PRIORITY_S;
2450
2451 ivi->trusted = vf->trusted;
2452 ivi->spoofchk = vf->spoofchk;
2453 if (!vf->link_forced)
2454 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
2455 else if (vf->link_up)
2456 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
2457 else
2458 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
2459 ivi->max_tx_rate = vf->tx_rate;
2460 ivi->min_tx_rate = 0;
2461 return 0;
2462}
2463
2464/**
2465 * ice_set_vf_spoofchk
2466 * @netdev: network interface device structure
2467 * @vf_id: VF identifier
2468 * @ena: flag to enable or disable feature
2469 *
2470 * Enable or disable VF spoof checking
2471 */
2472int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
2473{
2474 struct ice_netdev_priv *np = netdev_priv(netdev);
2475 struct ice_vsi_ctx ctx = { 0 };
2476 struct ice_vsi *vsi = np->vsi;
2477 struct ice_pf *pf = vsi->back;
2478 struct ice_vf *vf;
2479 int status;
2480
2481 /* validate the request */
2482 if (vf_id >= pf->num_alloc_vfs) {
2483 netdev_err(netdev, "invalid VF id: %d\n", vf_id);
2484 return -EINVAL;
2485 }
2486
2487 vf = &pf->vf[vf_id];
2488 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
2489 netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id);
2490 return -EBUSY;
2491 }
2492
2493 if (ena == vf->spoofchk) {
2494 dev_dbg(&pf->pdev->dev, "VF spoofchk already %s\n",
2495 ena ? "ON" : "OFF");
2496 return 0;
2497 }
2498
2499 ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
2500
2501 if (ena) {
2502 ctx.info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
2503 ctx.info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M;
2504 }
2505
2506 status = ice_update_vsi(&pf->hw, vsi->idx, &ctx, NULL);
2507 if (status) {
2508 dev_dbg(&pf->pdev->dev,
2509 "Error %d, failed to update VSI* parameters\n", status);
2510 return -EIO;
2511 }
2512
2513 vf->spoofchk = ena;
2514 vsi->info.sec_flags = ctx.info.sec_flags;
2515 vsi->info.sw_flags2 = ctx.info.sw_flags2;
2516
2517 return status;
2518}
2519
2520/**
2521 * ice_set_vf_mac
2522 * @netdev: network interface device structure
2523 * @vf_id: VF identifier
2524 * @mac: mac address
2525 *
2526 * program VF mac address
2527 */
2528int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
2529{
2530 struct ice_netdev_priv *np = netdev_priv(netdev);
2531 struct ice_vsi *vsi = np->vsi;
2532 struct ice_pf *pf = vsi->back;
2533 struct ice_vf *vf;
2534 int ret = 0;
2535
2536 /* validate the request */
2537 if (vf_id >= pf->num_alloc_vfs) {
2538 netdev_err(netdev, "invalid VF id: %d\n", vf_id);
2539 return -EINVAL;
2540 }
2541
2542 vf = &pf->vf[vf_id];
2543 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
2544 netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id);
2545 return -EBUSY;
2546 }
2547
2548 if (is_zero_ether_addr(mac) || is_multicast_ether_addr(mac)) {
2549 netdev_err(netdev, "%pM not a valid unicast address\n", mac);
2550 return -EINVAL;
2551 }
2552
2553 /* copy mac into dflt_lan_addr and trigger a VF reset. The reset
2554 * flow will use the updated dflt_lan_addr and add a MAC filter
2555 * using ice_add_mac. Also set pf_set_mac to indicate that the PF has
2556 * set the MAC address for this VF.
2557 */
2558 ether_addr_copy(vf->dflt_lan_addr.addr, mac);
2559 vf->pf_set_mac = true;
2560 netdev_info(netdev,
2561 "mac on VF %d set to %pM\n. VF driver will be reinitialized\n",
2562 vf_id, mac);
2563
2564 ice_vc_dis_vf(vf);
2565 return ret;
2566}
2567
2568/**
2569 * ice_set_vf_trust
2570 * @netdev: network interface device structure
2571 * @vf_id: VF identifier
2572 * @trusted: Boolean value to enable/disable trusted VF
2573 *
2574 * Enable or disable a given VF as trusted
2575 */
2576int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
2577{
2578 struct ice_netdev_priv *np = netdev_priv(netdev);
2579 struct ice_vsi *vsi = np->vsi;
2580 struct ice_pf *pf = vsi->back;
2581 struct ice_vf *vf;
2582
2583 /* validate the request */
2584 if (vf_id >= pf->num_alloc_vfs) {
2585 dev_err(&pf->pdev->dev, "invalid VF id: %d\n", vf_id);
2586 return -EINVAL;
2587 }
2588
2589 vf = &pf->vf[vf_id];
2590 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
2591 dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
2592 return -EBUSY;
2593 }
2594
2595 /* Check if already trusted */
2596 if (trusted == vf->trusted)
2597 return 0;
2598
2599 vf->trusted = trusted;
2600 ice_vc_dis_vf(vf);
2601 dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
2602 vf_id, trusted ? "" : "un");
2603
2604 return 0;
2605}
2606
2607/**
2608 * ice_set_vf_link_state
2609 * @netdev: network interface device structure
2610 * @vf_id: VF identifier
2611 * @link_state: required link state
2612 *
2613 * Set VF's link state, irrespective of physical link state status
2614 */
2615int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
2616{
2617 struct ice_netdev_priv *np = netdev_priv(netdev);
2618 struct ice_pf *pf = np->vsi->back;
2619 struct virtchnl_pf_event pfe = { 0 };
2620 struct ice_link_status *ls;
2621 struct ice_vf *vf;
2622 struct ice_hw *hw;
2623
2624 if (vf_id >= pf->num_alloc_vfs) {
2625 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
2626 return -EINVAL;
2627 }
2628
2629 vf = &pf->vf[vf_id];
2630 hw = &pf->hw;
2631 ls = &pf->hw.port_info->phy.link_info;
2632
2633 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
2634 dev_err(&pf->pdev->dev, "vf %d in reset. Try again.\n", vf_id);
2635 return -EBUSY;
2636 }
2637
2638 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
2639 pfe.severity = PF_EVENT_SEVERITY_INFO;
2640
2641 switch (link_state) {
2642 case IFLA_VF_LINK_STATE_AUTO:
2643 vf->link_forced = false;
2644 vf->link_up = ls->link_info & ICE_AQ_LINK_UP;
2645 break;
2646 case IFLA_VF_LINK_STATE_ENABLE:
2647 vf->link_forced = true;
2648 vf->link_up = true;
2649 break;
2650 case IFLA_VF_LINK_STATE_DISABLE:
2651 vf->link_forced = true;
2652 vf->link_up = false;
2653 break;
2654 default:
2655 return -EINVAL;
2656 }
2657
2658 if (vf->link_forced)
2659 ice_set_pfe_link_forced(vf, &pfe, vf->link_up);
2660 else
2661 ice_set_pfe_link(vf, &pfe, ls->link_speed, vf->link_up);
2662
2663 /* Notify the VF of its new link state */
2664 ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe,
2665 sizeof(pfe), NULL);
2666
2667 return 0;
2668}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
new file mode 100644
index 000000000000..10131e0180f9
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -0,0 +1,173 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (c) 2018, Intel Corporation. */
3
4#ifndef _ICE_VIRTCHNL_PF_H_
5#define _ICE_VIRTCHNL_PF_H_
6#include "ice.h"
7
8#define ICE_MAX_VLANID 4095
9#define ICE_VLAN_PRIORITY_S 12
10#define ICE_VLAN_M 0xFFF
11#define ICE_PRIORITY_M 0x7000
12
13/* Restrict number of MAC Addr and VLAN that non-trusted VF can programmed */
14#define ICE_MAX_VLAN_PER_VF 8
15#define ICE_MAX_MACADDR_PER_VF 12
16
17/* Malicious Driver Detection */
18#define ICE_DFLT_NUM_MDD_EVENTS_ALLOWED 3
19#define ICE_DFLT_NUM_INVAL_MSGS_ALLOWED 10
20
21/* Static VF transaction/status register def */
22#define VF_DEVICE_STATUS 0xAA
23#define VF_TRANS_PENDING_M 0x20
24
25/* Specific VF states */
26enum ice_vf_states {
27 ICE_VF_STATE_INIT = 0,
28 ICE_VF_STATE_ACTIVE,
29 ICE_VF_STATE_ENA,
30 ICE_VF_STATE_DIS,
31 ICE_VF_STATE_MC_PROMISC,
32 ICE_VF_STATE_UC_PROMISC,
33 /* state to indicate if PF needs to do vector assignment for VF.
34 * This needs to be set during first time VF initialization or later
35 * when VF asks for more Vectors through virtchnl OP.
36 */
37 ICE_VF_STATE_CFG_INTR,
38 ICE_VF_STATES_NBITS
39};
40
41/* VF capabilities */
42enum ice_virtchnl_cap {
43 ICE_VIRTCHNL_VF_CAP_L2 = 0,
44 ICE_VIRTCHNL_VF_CAP_PRIVILEGE,
45};
46
47/* VF information structure */
48struct ice_vf {
49 struct ice_pf *pf;
50
51 s16 vf_id; /* VF id in the PF space */
52 u32 driver_caps; /* reported by VF driver */
53 int first_vector_idx; /* first vector index of this VF */
54 struct ice_sw *vf_sw_id; /* switch id the VF VSIs connect to */
55 struct virtchnl_version_info vf_ver;
56 struct virtchnl_ether_addr dflt_lan_addr;
57 u16 port_vlan_id;
58 u8 pf_set_mac; /* VF MAC address set by VMM admin */
59 u8 trusted;
60 u16 lan_vsi_idx; /* index into PF struct */
61 u16 lan_vsi_num; /* ID as used by firmware */
62 u64 num_mdd_events; /* number of mdd events detected */
63 u64 num_inval_msgs; /* number of continuous invalid msgs */
64 u64 num_valid_msgs; /* number of valid msgs detected */
65 unsigned long vf_caps; /* vf's adv. capabilities */
66 DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
67 unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
68 u8 link_forced;
69 u8 link_up; /* only valid if VF link is forced */
70 u8 spoofchk;
71 u16 num_mac;
72 u16 num_vlan;
73 u8 num_req_qs; /* num of queue pairs requested by VF */
74};
75
76#ifdef CONFIG_PCI_IOV
77void ice_process_vflr_event(struct ice_pf *pf);
78int ice_sriov_configure(struct pci_dev *pdev, int num_vfs);
79int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
80int ice_get_vf_cfg(struct net_device *netdev, int vf_id,
81 struct ifla_vf_info *ivi);
82
83void ice_free_vfs(struct ice_pf *pf);
84void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event);
85void ice_vc_notify_link_state(struct ice_pf *pf);
86void ice_vc_notify_reset(struct ice_pf *pf);
87bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr);
88
89int ice_set_vf_port_vlan(struct net_device *netdev, int vf_id,
90 u16 vlan_id, u8 qos, __be16 vlan_proto);
91
92int ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
93 int max_tx_rate);
94
95int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted);
96
97int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state);
98
99int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena);
100#else /* CONFIG_PCI_IOV */
101#define ice_process_vflr_event(pf) do {} while (0)
102#define ice_free_vfs(pf) do {} while (0)
103#define ice_vc_process_vf_msg(pf, event) do {} while (0)
104#define ice_vc_notify_link_state(pf) do {} while (0)
105#define ice_vc_notify_reset(pf) do {} while (0)
106
107static inline bool
108ice_reset_all_vfs(struct ice_pf __always_unused *pf,
109 bool __always_unused is_vflr)
110{
111 return true;
112}
113
114static inline int
115ice_sriov_configure(struct pci_dev __always_unused *pdev,
116 int __always_unused num_vfs)
117{
118 return -EOPNOTSUPP;
119}
120
121static inline int
122ice_set_vf_mac(struct net_device __always_unused *netdev,
123 int __always_unused vf_id, u8 __always_unused *mac)
124{
125 return -EOPNOTSUPP;
126}
127
128static inline int
129ice_get_vf_cfg(struct net_device __always_unused *netdev,
130 int __always_unused vf_id,
131 struct ifla_vf_info __always_unused *ivi)
132{
133 return -EOPNOTSUPP;
134}
135
136static inline int
137ice_set_vf_trust(struct net_device __always_unused *netdev,
138 int __always_unused vf_id, bool __always_unused trusted)
139{
140 return -EOPNOTSUPP;
141}
142
143static inline int
144ice_set_vf_port_vlan(struct net_device __always_unused *netdev,
145 int __always_unused vf_id, u16 __always_unused vid,
146 u8 __always_unused qos, __be16 __always_unused v_proto)
147{
148 return -EOPNOTSUPP;
149}
150
151static inline int
152ice_set_vf_spoofchk(struct net_device __always_unused *netdev,
153 int __always_unused vf_id, bool __always_unused ena)
154{
155 return -EOPNOTSUPP;
156}
157
158static inline int
159ice_set_vf_link_state(struct net_device __always_unused *netdev,
160 int __always_unused vf_id, int __always_unused link_state)
161{
162 return -EOPNOTSUPP;
163}
164
165static inline int
166ice_set_vf_bw(struct net_device __always_unused *netdev,
167 int __always_unused vf_id, int __always_unused min_tx_rate,
168 int __always_unused max_tx_rate)
169{
170 return -EOPNOTSUPP;
171}
172#endif /* CONFIG_PCI_IOV */
173#endif /* _ICE_VIRTCHNL_PF_H_ */