aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAnirudh Venkataramanan <anirudh.venkataramanan@intel.com>2018-09-19 20:42:55 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2018-10-03 10:42:29 -0400
commitddf30f7ff840d4467ef45ec0b443575f9e95bec6 (patch)
treec9059b346847f2818907c2857ea2db3860e51419 /drivers
parent75d2b253026b8b1cb625f6ccdb9d54cdecae7935 (diff)
ice: Add handler to configure SR-IOV
This patch implements parts of ice_sriov_configure and VF reset flow. To create virtual functions (VFs), the user sets a value in num_vfs through sysfs. This results in the kernel calling the handler for .sriov_configure which is ice_sriov_configure. VF setup first starts with a VF reset, followed by allocation of the VF VSI using ice_vf_vsi_setup. Once the VF setup is complete a state bit ICE_VF_STATE_INIT is set in the vf->states bitmap to indicate that the VF is ready to go. Also for VF reset to go into effect, it's necessary to issue a disable queue command (ice_aqc_opc_dis_txqs). So this patch updates multiple functions in the disable queue flow to take additional parameters that distinguish if queues are being disabled due to VF reset. Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Tested-by: Andrew Bowers <andrewx.bowers@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile1
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c56
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h38
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c847
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h74
11 files changed, 1061 insertions, 9 deletions
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 45125bd074d9..1999cd09239e 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -16,3 +16,4 @@ ice-y := ice_main.o \
16 ice_lib.o \ 16 ice_lib.o \
17 ice_txrx.o \ 17 ice_txrx.o \
18 ice_ethtool.o 18 ice_ethtool.o
19ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 639d45d1da49..f788cd63237a 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -28,6 +28,7 @@
28#include <linux/ip.h> 28#include <linux/ip.h>
29#include <linux/ipv6.h> 29#include <linux/ipv6.h>
30#include <linux/if_bridge.h> 30#include <linux/if_bridge.h>
31#include <linux/avf/virtchnl.h>
31#include <net/ipv6.h> 32#include <net/ipv6.h>
32#include "ice_devids.h" 33#include "ice_devids.h"
33#include "ice_type.h" 34#include "ice_type.h"
@@ -35,6 +36,7 @@
35#include "ice_switch.h" 36#include "ice_switch.h"
36#include "ice_common.h" 37#include "ice_common.h"
37#include "ice_sched.h" 38#include "ice_sched.h"
39#include "ice_virtchnl_pf.h"
38 40
39extern const char ice_drv_ver[]; 41extern const char ice_drv_ver[];
40#define ICE_BAR0 0 42#define ICE_BAR0 0
@@ -65,6 +67,12 @@ extern const char ice_drv_ver[];
65#define ICE_INVAL_Q_INDEX 0xffff 67#define ICE_INVAL_Q_INDEX 0xffff
66#define ICE_INVAL_VFID 256 68#define ICE_INVAL_VFID 256
67#define ICE_MAX_VF_COUNT 256 69#define ICE_MAX_VF_COUNT 256
70#define ICE_MAX_QS_PER_VF 256
71#define ICE_MIN_QS_PER_VF 1
72#define ICE_DFLT_QS_PER_VF 4
73#define ICE_MAX_INTR_PER_VF 65
74#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
75#define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1)
68 76
69#define ICE_VSIQF_HKEY_ARRAY_SIZE ((VSIQF_HKEY_MAX_INDEX + 1) * 4) 77#define ICE_VSIQF_HKEY_ARRAY_SIZE ((VSIQF_HKEY_MAX_INDEX + 1) * 4)
70 78
@@ -135,10 +143,20 @@ enum ice_state {
135 __ICE_EMPR_RECV, /* set by OICR handler */ 143 __ICE_EMPR_RECV, /* set by OICR handler */
136 __ICE_SUSPENDED, /* set on module remove path */ 144 __ICE_SUSPENDED, /* set on module remove path */
137 __ICE_RESET_FAILED, /* set by reset/rebuild */ 145 __ICE_RESET_FAILED, /* set by reset/rebuild */
146 /* When checking for the PF to be in a nominal operating state, the
147 * bits that are grouped at the beginning of the list need to be
148 * checked. Bits occurring before __ICE_STATE_NOMINAL_CHECK_BITS will
149 * be checked. If you need to add a bit into consideration for nominal
150 * operating state, it must be added before
151 * __ICE_STATE_NOMINAL_CHECK_BITS. Do not move this entry's position
152 * without appropriate consideration.
153 */
154 __ICE_STATE_NOMINAL_CHECK_BITS,
138 __ICE_ADMINQ_EVENT_PENDING, 155 __ICE_ADMINQ_EVENT_PENDING,
139 __ICE_MAILBOXQ_EVENT_PENDING, 156 __ICE_MAILBOXQ_EVENT_PENDING,
140 __ICE_MDD_EVENT_PENDING, 157 __ICE_MDD_EVENT_PENDING,
141 __ICE_FLTR_OVERFLOW_PROMISC, 158 __ICE_FLTR_OVERFLOW_PROMISC,
159 __ICE_VF_DIS,
142 __ICE_CFG_BUSY, 160 __ICE_CFG_BUSY,
143 __ICE_SERVICE_SCHED, 161 __ICE_SERVICE_SCHED,
144 __ICE_SERVICE_DIS, 162 __ICE_SERVICE_DIS,
@@ -243,6 +261,7 @@ enum ice_pf_flags {
243 ICE_FLAG_MSIX_ENA, 261 ICE_FLAG_MSIX_ENA,
244 ICE_FLAG_FLTR_SYNC, 262 ICE_FLAG_FLTR_SYNC,
245 ICE_FLAG_RSS_ENA, 263 ICE_FLAG_RSS_ENA,
264 ICE_FLAG_SRIOV_ENA,
246 ICE_FLAG_SRIOV_CAPABLE, 265 ICE_FLAG_SRIOV_CAPABLE,
247 ICE_PF_FLAGS_NBITS /* must be last */ 266 ICE_PF_FLAGS_NBITS /* must be last */
248}; 267};
@@ -259,7 +278,12 @@ struct ice_pf {
259 278
260 struct ice_vsi **vsi; /* VSIs created by the driver */ 279 struct ice_vsi **vsi; /* VSIs created by the driver */
261 struct ice_sw *first_sw; /* first switch created by firmware */ 280 struct ice_sw *first_sw; /* first switch created by firmware */
281 /* Virtchnl/SR-IOV config info */
282 struct ice_vf *vf;
283 int num_alloc_vfs; /* actual number of VFs allocated */
262 u16 num_vfs_supported; /* num VFs supported for this PF */ 284 u16 num_vfs_supported; /* num VFs supported for this PF */
285 u16 num_vf_qps; /* num queue pairs per VF */
286 u16 num_vf_msix; /* num vectors per VF */
263 DECLARE_BITMAP(state, __ICE_STATE_NBITS); 287 DECLARE_BITMAP(state, __ICE_STATE_NBITS);
264 DECLARE_BITMAP(avail_txqs, ICE_MAX_TXQS); 288 DECLARE_BITMAP(avail_txqs, ICE_MAX_TXQS);
265 DECLARE_BITMAP(avail_rxqs, ICE_MAX_RXQS); 289 DECLARE_BITMAP(avail_rxqs, ICE_MAX_RXQS);
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 0fe054e4bfb8..c52f450f2c0d 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -2287,6 +2287,8 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
2287 * @num_qgrps: number of groups in the list 2287 * @num_qgrps: number of groups in the list
2288 * @qg_list: the list of groups to disable 2288 * @qg_list: the list of groups to disable
2289 * @buf_size: the total size of the qg_list buffer in bytes 2289 * @buf_size: the total size of the qg_list buffer in bytes
2290 * @rst_src: if called due to reset, specifies the RST source
2291 * @vmvf_num: the relative VM or VF number that is undergoing the reset
2290 * @cd: pointer to command details structure or NULL 2292 * @cd: pointer to command details structure or NULL
2291 * 2293 *
2292 * Disable LAN Tx queue (0x0C31) 2294 * Disable LAN Tx queue (0x0C31)
@@ -2294,6 +2296,7 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
2294static enum ice_status 2296static enum ice_status
2295ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, 2297ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
2296 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size, 2298 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
2299 enum ice_disq_rst_src rst_src, u16 vmvf_num,
2297 struct ice_sq_cd *cd) 2300 struct ice_sq_cd *cd)
2298{ 2301{
2299 struct ice_aqc_dis_txqs *cmd; 2302 struct ice_aqc_dis_txqs *cmd;
@@ -2303,14 +2306,45 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
2303 cmd = &desc.params.dis_txqs; 2306 cmd = &desc.params.dis_txqs;
2304 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs); 2307 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
2305 2308
2306 if (!qg_list) 2309 /* qg_list can be NULL only in VM/VF reset flow */
2310 if (!qg_list && !rst_src)
2307 return ICE_ERR_PARAM; 2311 return ICE_ERR_PARAM;
2308 2312
2309 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 2313 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
2310 return ICE_ERR_PARAM; 2314 return ICE_ERR_PARAM;
2311 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 2315
2312 cmd->num_entries = num_qgrps; 2316 cmd->num_entries = num_qgrps;
2313 2317
2318 cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
2319 ICE_AQC_Q_DIS_TIMEOUT_M);
2320
2321 switch (rst_src) {
2322 case ICE_VM_RESET:
2323 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
2324 cmd->vmvf_and_timeout |=
2325 cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
2326 break;
2327 case ICE_VF_RESET:
2328 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
2329 /* In this case, FW expects vmvf_num to be absolute VF id */
2330 cmd->vmvf_and_timeout |=
2331 cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) &
2332 ICE_AQC_Q_DIS_VMVF_NUM_M);
2333 break;
2334 case ICE_NO_RESET:
2335 default:
2336 break;
2337 }
2338
2339 /* If no queue group info, we are in a reset flow. Issue the AQ */
2340 if (!qg_list)
2341 goto do_aq;
2342
2343 /* set RD bit to indicate that command buffer is provided by the driver
2344 * and it needs to be read by the firmware
2345 */
2346 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2347
2314 for (i = 0; i < num_qgrps; ++i) { 2348 for (i = 0; i < num_qgrps; ++i) {
2315 /* Calculate the size taken up by the queue IDs in this group */ 2349 /* Calculate the size taken up by the queue IDs in this group */
2316 sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id); 2350 sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id);
@@ -2326,6 +2360,7 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
2326 if (buf_size != sz) 2360 if (buf_size != sz)
2327 return ICE_ERR_PARAM; 2361 return ICE_ERR_PARAM;
2328 2362
2363do_aq:
2329 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 2364 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
2330} 2365}
2331 2366
@@ -2632,13 +2667,16 @@ ena_txq_exit:
2632 * @num_queues: number of queues 2667 * @num_queues: number of queues
2633 * @q_ids: pointer to the q_id array 2668 * @q_ids: pointer to the q_id array
2634 * @q_teids: pointer to queue node teids 2669 * @q_teids: pointer to queue node teids
2670 * @rst_src: if called due to reset, specifies the RST source
2671 * @vmvf_num: the relative VM or VF number that is undergoing the reset
2635 * @cd: pointer to command details structure or NULL 2672 * @cd: pointer to command details structure or NULL
2636 * 2673 *
2637 * This function removes queues and their corresponding nodes in SW DB 2674 * This function removes queues and their corresponding nodes in SW DB
2638 */ 2675 */
2639enum ice_status 2676enum ice_status
2640ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids, 2677ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
2641 u32 *q_teids, struct ice_sq_cd *cd) 2678 u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num,
2679 struct ice_sq_cd *cd)
2642{ 2680{
2643 enum ice_status status = ICE_ERR_DOES_NOT_EXIST; 2681 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
2644 struct ice_aqc_dis_txq_item qg_list; 2682 struct ice_aqc_dis_txq_item qg_list;
@@ -2647,6 +2685,15 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
2647 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 2685 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
2648 return ICE_ERR_CFG; 2686 return ICE_ERR_CFG;
2649 2687
2688 /* if queue is disabled already yet the disable queue command has to be
2689 * sent to complete the VF reset, then call ice_aq_dis_lan_txq without
2690 * any queue information
2691 */
2692
2693 if (!num_queues && rst_src)
2694 return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src, vmvf_num,
2695 NULL);
2696
2650 mutex_lock(&pi->sched_lock); 2697 mutex_lock(&pi->sched_lock);
2651 2698
2652 for (i = 0; i < num_queues; i++) { 2699 for (i = 0; i < num_queues; i++) {
@@ -2659,7 +2706,8 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
2659 qg_list.num_qs = 1; 2706 qg_list.num_qs = 1;
2660 qg_list.q_id[0] = cpu_to_le16(q_ids[i]); 2707 qg_list.q_id[0] = cpu_to_le16(q_ids[i]);
2661 status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list, 2708 status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list,
2662 sizeof(qg_list), cd); 2709 sizeof(qg_list), rst_src, vmvf_num,
2710 cd);
2663 2711
2664 if (status) 2712 if (status)
2665 break; 2713 break;
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 7b2a5bb2e550..1900681289a4 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -7,6 +7,7 @@
7#include "ice.h" 7#include "ice.h"
8#include "ice_type.h" 8#include "ice_type.h"
9#include "ice_switch.h" 9#include "ice_switch.h"
10#include <linux/avf/virtchnl.h>
10 11
11void ice_debug_cq(struct ice_hw *hw, u32 mask, void *desc, void *buf, 12void ice_debug_cq(struct ice_hw *hw, u32 mask, void *desc, void *buf,
12 u16 buf_len); 13 u16 buf_len);
@@ -89,7 +90,8 @@ ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
89 struct ice_sq_cd *cd); 90 struct ice_sq_cd *cd);
90enum ice_status 91enum ice_status
91ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids, 92ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
92 u32 *q_teids, struct ice_sq_cd *cmd_details); 93 u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num,
94 struct ice_sq_cd *cmd_details);
93enum ice_status 95enum ice_status
94ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 96ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
95 u16 *max_lanqs); 97 u16 *max_lanqs);
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index c2d867b756ef..b676b3151d04 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -90,10 +90,16 @@
90#define GLGEN_RTRIG_CORER_M BIT(0) 90#define GLGEN_RTRIG_CORER_M BIT(0)
91#define GLGEN_RTRIG_GLOBR_M BIT(1) 91#define GLGEN_RTRIG_GLOBR_M BIT(1)
92#define GLGEN_STAT 0x000B612C 92#define GLGEN_STAT 0x000B612C
93#define GLGEN_VFLRSTAT(_i) (0x00093A04 + ((_i) * 4))
93#define PFGEN_CTRL 0x00091000 94#define PFGEN_CTRL 0x00091000
94#define PFGEN_CTRL_PFSWR_M BIT(0) 95#define PFGEN_CTRL_PFSWR_M BIT(0)
95#define PFGEN_STATE 0x00088000 96#define PFGEN_STATE 0x00088000
96#define PRTGEN_STATUS 0x000B8100 97#define PRTGEN_STATUS 0x000B8100
98#define VFGEN_RSTAT(_VF) (0x00074000 + ((_VF) * 4))
99#define VPGEN_VFRSTAT(_VF) (0x00090800 + ((_VF) * 4))
100#define VPGEN_VFRSTAT_VFRD_M BIT(0)
101#define VPGEN_VFRTRIG(_VF) (0x00090000 + ((_VF) * 4))
102#define VPGEN_VFRTRIG_VFSWR_M BIT(0)
97#define PFHMC_ERRORDATA 0x00520500 103#define PFHMC_ERRORDATA 0x00520500
98#define PFHMC_ERRORINFO 0x00520400 104#define PFHMC_ERRORINFO 0x00520400
99#define GLINT_DYN_CTL(_INT) (0x00160000 + ((_INT) * 4)) 105#define GLINT_DYN_CTL(_INT) (0x00160000 + ((_INT) * 4))
@@ -106,6 +112,13 @@
106#define GLINT_ITR(_i, _INT) (0x00154000 + ((_i) * 8192 + (_INT) * 4)) 112#define GLINT_ITR(_i, _INT) (0x00154000 + ((_i) * 8192 + (_INT) * 4))
107#define GLINT_RATE(_INT) (0x0015A000 + ((_INT) * 4)) 113#define GLINT_RATE(_INT) (0x0015A000 + ((_INT) * 4))
108#define GLINT_RATE_INTRL_ENA_M BIT(6) 114#define GLINT_RATE_INTRL_ENA_M BIT(6)
115#define GLINT_VECT2FUNC(_INT) (0x00162000 + ((_INT) * 4))
116#define GLINT_VECT2FUNC_VF_NUM_S 0
117#define GLINT_VECT2FUNC_VF_NUM_M ICE_M(0xFF, 0)
118#define GLINT_VECT2FUNC_PF_NUM_S 12
119#define GLINT_VECT2FUNC_PF_NUM_M ICE_M(0x7, 12)
120#define GLINT_VECT2FUNC_IS_PF_S 16
121#define GLINT_VECT2FUNC_IS_PF_M BIT(16)
109#define PFINT_FW_CTL 0x0016C800 122#define PFINT_FW_CTL 0x0016C800
110#define PFINT_FW_CTL_MSIX_INDX_M ICE_M(0x7FF, 0) 123#define PFINT_FW_CTL_MSIX_INDX_M ICE_M(0x7FF, 0)
111#define PFINT_FW_CTL_ITR_INDX_S 11 124#define PFINT_FW_CTL_ITR_INDX_S 11
@@ -137,6 +150,12 @@
137#define QINT_TQCTL_MSIX_INDX_S 0 150#define QINT_TQCTL_MSIX_INDX_S 0
138#define QINT_TQCTL_ITR_INDX_S 11 151#define QINT_TQCTL_ITR_INDX_S 11
139#define QINT_TQCTL_CAUSE_ENA_M BIT(30) 152#define QINT_TQCTL_CAUSE_ENA_M BIT(30)
153#define VPINT_ALLOC(_VF) (0x001D1000 + ((_VF) * 4))
154#define VPINT_ALLOC_FIRST_S 0
155#define VPINT_ALLOC_FIRST_M ICE_M(0x7FF, 0)
156#define VPINT_ALLOC_LAST_S 12
157#define VPINT_ALLOC_LAST_M ICE_M(0x7FF, 12)
158#define VPINT_ALLOC_VALID_M BIT(31)
140#define QRX_CONTEXT(_i, _QRX) (0x00280000 + ((_i) * 8192 + (_QRX) * 4)) 159#define QRX_CONTEXT(_i, _QRX) (0x00280000 + ((_i) * 8192 + (_QRX) * 4))
141#define QRX_CTRL(_QRX) (0x00120000 + ((_QRX) * 4)) 160#define QRX_CTRL(_QRX) (0x00120000 + ((_QRX) * 4))
142#define QRX_CTRL_MAX_INDEX 2047 161#define QRX_CTRL_MAX_INDEX 2047
@@ -149,6 +168,20 @@
149#define QRX_TAIL_MAX_INDEX 2047 168#define QRX_TAIL_MAX_INDEX 2047
150#define QRX_TAIL_TAIL_S 0 169#define QRX_TAIL_TAIL_S 0
151#define QRX_TAIL_TAIL_M ICE_M(0x1FFF, 0) 170#define QRX_TAIL_TAIL_M ICE_M(0x1FFF, 0)
171#define VPLAN_RX_QBASE(_VF) (0x00072000 + ((_VF) * 4))
172#define VPLAN_RX_QBASE_VFFIRSTQ_S 0
173#define VPLAN_RX_QBASE_VFFIRSTQ_M ICE_M(0x7FF, 0)
174#define VPLAN_RX_QBASE_VFNUMQ_S 16
175#define VPLAN_RX_QBASE_VFNUMQ_M ICE_M(0xFF, 16)
176#define VPLAN_RXQ_MAPENA(_VF) (0x00073000 + ((_VF) * 4))
177#define VPLAN_RXQ_MAPENA_RX_ENA_M BIT(0)
178#define VPLAN_TX_QBASE(_VF) (0x001D1800 + ((_VF) * 4))
179#define VPLAN_TX_QBASE_VFFIRSTQ_S 0
180#define VPLAN_TX_QBASE_VFFIRSTQ_M ICE_M(0x3FFF, 0)
181#define VPLAN_TX_QBASE_VFNUMQ_S 16
182#define VPLAN_TX_QBASE_VFNUMQ_M ICE_M(0xFF, 16)
183#define VPLAN_TXQ_MAPENA(_VF) (0x00073800 + ((_VF) * 4))
184#define VPLAN_TXQ_MAPENA_TX_ENA_M BIT(0)
152#define GL_MDET_RX 0x00294C00 185#define GL_MDET_RX 0x00294C00
153#define GL_MDET_RX_QNUM_S 0 186#define GL_MDET_RX_QNUM_S 0
154#define GL_MDET_RX_QNUM_M ICE_M(0x7FFF, 0) 187#define GL_MDET_RX_QNUM_M ICE_M(0x7FFF, 0)
@@ -196,6 +229,9 @@
196#define PF_FUNC_RID 0x0009E880 229#define PF_FUNC_RID 0x0009E880
197#define PF_FUNC_RID_FUNC_NUM_S 0 230#define PF_FUNC_RID_FUNC_NUM_S 0
198#define PF_FUNC_RID_FUNC_NUM_M ICE_M(0x7, 0) 231#define PF_FUNC_RID_FUNC_NUM_M ICE_M(0x7, 0)
232#define PF_PCI_CIAA 0x0009E580
233#define PF_PCI_CIAA_VF_NUM_S 12
234#define PF_PCI_CIAD 0x0009E500
199#define GL_PWR_MODE_CTL 0x000B820C 235#define GL_PWR_MODE_CTL 0x000B820C
200#define GL_PWR_MODE_CTL_CAR_MAX_BW_S 30 236#define GL_PWR_MODE_CTL_CAR_MAX_BW_S 30
201#define GL_PWR_MODE_CTL_CAR_MAX_BW_M ICE_M(0x3, 30) 237#define GL_PWR_MODE_CTL_CAR_MAX_BW_M ICE_M(0x3, 30)
@@ -276,5 +312,7 @@
276#define GLV_UPTCH(_i) (0x0030A004 + ((_i) * 8)) 312#define GLV_UPTCH(_i) (0x0030A004 + ((_i) * 8))
277#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8)) 313#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8))
278#define VSIQF_HKEY_MAX_INDEX 12 314#define VSIQF_HKEY_MAX_INDEX 12
315#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4))
316#define VFINT_DYN_CTLN_CLEARPBA_M BIT(1)
279 317
280#endif /* _ICE_HW_AUTOGEN_H_ */ 318#endif /* _ICE_HW_AUTOGEN_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index acf3478a3f3b..4b26705a9ab5 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -1784,8 +1784,11 @@ int ice_vsi_stop_rx_rings(struct ice_vsi *vsi)
1784/** 1784/**
1785 * ice_vsi_stop_tx_rings - Disable Tx rings 1785 * ice_vsi_stop_tx_rings - Disable Tx rings
1786 * @vsi: the VSI being configured 1786 * @vsi: the VSI being configured
1787 * @rst_src: reset source
1788 * @rel_vmvf_num: Relative id of VF/VM
1787 */ 1789 */
1788int ice_vsi_stop_tx_rings(struct ice_vsi *vsi) 1790int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
1791 u16 rel_vmvf_num)
1789{ 1792{
1790 struct ice_pf *pf = vsi->back; 1793 struct ice_pf *pf = vsi->back;
1791 struct ice_hw *hw = &pf->hw; 1794 struct ice_hw *hw = &pf->hw;
@@ -1837,7 +1840,7 @@ int ice_vsi_stop_tx_rings(struct ice_vsi *vsi)
1837 GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M); 1840 GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M);
1838 } 1841 }
1839 status = ice_dis_vsi_txq(vsi->port_info, vsi->num_txq, q_ids, q_teids, 1842 status = ice_dis_vsi_txq(vsi->port_info, vsi->num_txq, q_ids, q_teids,
1840 NULL); 1843 rst_src, rel_vmvf_num, NULL);
1841 /* if the disable queue command was exercised during an active reset 1844 /* if the disable queue command was exercised during an active reset
1842 * flow, ICE_ERR_RESET_ONGOING is returned. This is not an error as 1845 * flow, ICE_ERR_RESET_ONGOING is returned. This is not an error as
1843 * the reset operation disables queues at the hardware level anyway. 1846 * the reset operation disables queues at the hardware level anyway.
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 2617afe01c82..677db40338f5 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -31,7 +31,8 @@ int ice_vsi_start_rx_rings(struct ice_vsi *vsi);
31 31
32int ice_vsi_stop_rx_rings(struct ice_vsi *vsi); 32int ice_vsi_stop_rx_rings(struct ice_vsi *vsi);
33 33
34int ice_vsi_stop_tx_rings(struct ice_vsi *vsi); 34int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
35 u16 rel_vmvf_num);
35 36
36int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena); 37int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena);
37 38
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 3fd3bb783707..5b8c950d219a 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -2185,6 +2185,8 @@ static void ice_remove(struct pci_dev *pdev)
2185 set_bit(__ICE_DOWN, pf->state); 2185 set_bit(__ICE_DOWN, pf->state);
2186 ice_service_task_stop(pf); 2186 ice_service_task_stop(pf);
2187 2187
2188 if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags))
2189 ice_free_vfs(pf);
2188 ice_vsi_release_all(pf); 2190 ice_vsi_release_all(pf);
2189 ice_free_irq_msix_misc(pf); 2191 ice_free_irq_msix_misc(pf);
2190 ice_for_each_vsi(pf, i) { 2192 ice_for_each_vsi(pf, i) {
@@ -2220,6 +2222,7 @@ static struct pci_driver ice_driver = {
2220 .id_table = ice_pci_tbl, 2222 .id_table = ice_pci_tbl,
2221 .probe = ice_probe, 2223 .probe = ice_probe,
2222 .remove = ice_remove, 2224 .remove = ice_remove,
2225 .sriov_configure = ice_sriov_configure,
2223}; 2226};
2224 2227
2225/** 2228/**
@@ -2955,7 +2958,7 @@ int ice_down(struct ice_vsi *vsi)
2955 } 2958 }
2956 2959
2957 ice_vsi_dis_irq(vsi); 2960 ice_vsi_dis_irq(vsi);
2958 tx_err = ice_vsi_stop_tx_rings(vsi); 2961 tx_err = ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0);
2959 if (tx_err) 2962 if (tx_err)
2960 netdev_err(vsi->netdev, 2963 netdev_err(vsi->netdev,
2961 "Failed stop Tx rings, VSI %d error %d\n", 2964 "Failed stop Tx rings, VSI %d error %d\n",
@@ -3357,6 +3360,7 @@ static void ice_rebuild(struct ice_pf *pf)
3357 goto err_vsi_rebuild; 3360 goto err_vsi_rebuild;
3358 } 3361 }
3359 3362
3363 ice_reset_all_vfs(pf, true);
3360 /* if we get here, reset flow is successful */ 3364 /* if we get here, reset flow is successful */
3361 clear_bit(__ICE_RESET_FAILED, pf->state); 3365 clear_bit(__ICE_RESET_FAILED, pf->state);
3362 return; 3366 return;
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 6d053fb5f941..15b3c999006a 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -104,6 +104,15 @@ struct ice_link_status {
104 u8 module_type[ICE_MODULE_TYPE_TOTAL_BYTE]; 104 u8 module_type[ICE_MODULE_TYPE_TOTAL_BYTE];
105}; 105};
106 106
107/* Different reset sources for which a disable queue AQ call has to be made in
108 * order to clean the TX scheduler as a part of the reset
109 */
110enum ice_disq_rst_src {
111 ICE_NO_RESET = 0,
112 ICE_VM_RESET,
113 ICE_VF_RESET,
114};
115
107/* PHY info such as phy_type, etc... */ 116/* PHY info such as phy_type, etc... */
108struct ice_phy_info { 117struct ice_phy_info {
109 struct ice_link_status link_info; 118 struct ice_link_status link_info;
@@ -130,6 +139,7 @@ struct ice_hw_common_caps {
130 139
131 /* Virtualization support */ 140 /* Virtualization support */
132 u8 sr_iov_1_1; /* SR-IOV enabled */ 141 u8 sr_iov_1_1; /* SR-IOV enabled */
142
133 /* RSS related capabilities */ 143 /* RSS related capabilities */
134 u16 rss_table_size; /* 512 for PFs and 64 for VFs */ 144 u16 rss_table_size; /* 512 for PFs and 64 for VFs */
135 u8 rss_table_entry_width; /* RSS Entry width in bits */ 145 u8 rss_table_entry_width; /* RSS Entry width in bits */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
new file mode 100644
index 000000000000..7f041fd785d6
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -0,0 +1,847 @@
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Intel Corporation. */
3
4#include "ice.h"
5#include "ice_lib.h"
6
7/**
8 * ice_get_vf_vector - get VF interrupt vector register offset
9 * @vf_msix: number of MSIx vector per VF on a PF
10 * @vf_id: VF identifier
11 * @i: index of MSIx vector
12 */
13static u32 ice_get_vf_vector(int vf_msix, int vf_id, int i)
14{
15 return ((i == 0) ? VFINT_DYN_CTLN(vf_id) :
16 VFINT_DYN_CTLN(((vf_msix - 1) * (vf_id)) + (i - 1)));
17}
18
19/**
20 * ice_free_vf_res - Free a VF's resources
21 * @vf: pointer to the VF info
22 */
23static void ice_free_vf_res(struct ice_vf *vf)
24{
25 struct ice_pf *pf = vf->pf;
26 int i, pf_vf_msix;
27
28 /* First, disable VF's configuration API to prevent OS from
29 * accessing the VF's VSI after it's freed or invalidated.
30 */
31 clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
32
33 /* free vsi & disconnect it from the parent uplink */
34 if (vf->lan_vsi_idx) {
35 ice_vsi_release(pf->vsi[vf->lan_vsi_idx]);
36 vf->lan_vsi_idx = 0;
37 vf->lan_vsi_num = 0;
38 vf->num_mac = 0;
39 }
40
41 pf_vf_msix = pf->num_vf_msix;
42 /* Disable interrupts so that VF starts in a known state */
43 for (i = 0; i < pf_vf_msix; i++) {
44 u32 reg_idx;
45
46 reg_idx = ice_get_vf_vector(pf_vf_msix, vf->vf_id, i);
47 wr32(&pf->hw, reg_idx, VFINT_DYN_CTLN_CLEARPBA_M);
48 ice_flush(&pf->hw);
49 }
50 /* reset some of the state variables keeping track of the resources */
51 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
52 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
53}
54
55/***********************enable_vf routines*****************************/
56
57/**
58 * ice_dis_vf_mappings
59 * @vf: pointer to the VF structure
60 */
61static void ice_dis_vf_mappings(struct ice_vf *vf)
62{
63 struct ice_pf *pf = vf->pf;
64 struct ice_vsi *vsi;
65 int first, last, v;
66 struct ice_hw *hw;
67
68 hw = &pf->hw;
69 vsi = pf->vsi[vf->lan_vsi_idx];
70
71 wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
72
73 first = vf->first_vector_idx;
74 last = first + pf->num_vf_msix - 1;
75 for (v = first; v <= last; v++) {
76 u32 reg;
77
78 reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
79 GLINT_VECT2FUNC_IS_PF_M) |
80 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
81 GLINT_VECT2FUNC_PF_NUM_M));
82 wr32(hw, GLINT_VECT2FUNC(v), reg);
83 }
84
85 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
86 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
87 else
88 dev_err(&pf->pdev->dev,
89 "Scattered mode for VF Tx queues is not yet implemented\n");
90
91 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
92 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
93 else
94 dev_err(&pf->pdev->dev,
95 "Scattered mode for VF Rx queues is not yet implemented\n");
96}
97
98/**
99 * ice_free_vfs - Free all VFs
100 * @pf: pointer to the PF structure
101 */
102void ice_free_vfs(struct ice_pf *pf)
103{
104 struct ice_hw *hw = &pf->hw;
105 int tmp, i;
106
107 if (!pf->vf)
108 return;
109
110 while (test_and_set_bit(__ICE_VF_DIS, pf->state))
111 usleep_range(1000, 2000);
112
113 /* Avoid wait time by stopping all VFs at the same time */
114 for (i = 0; i < pf->num_alloc_vfs; i++) {
115 if (!test_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states))
116 continue;
117
118 /* stop rings without wait time */
119 ice_vsi_stop_tx_rings(pf->vsi[pf->vf[i].lan_vsi_idx],
120 ICE_NO_RESET, i);
121 ice_vsi_stop_rx_rings(pf->vsi[pf->vf[i].lan_vsi_idx]);
122
123 clear_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states);
124 }
125
126 /* Disable IOV before freeing resources. This lets any VF drivers
127 * running in the host get themselves cleaned up before we yank
128 * the carpet out from underneath their feet.
129 */
130 if (!pci_vfs_assigned(pf->pdev))
131 pci_disable_sriov(pf->pdev);
132 else
133 dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
134
135 tmp = pf->num_alloc_vfs;
136 pf->num_vf_qps = 0;
137 pf->num_alloc_vfs = 0;
138 for (i = 0; i < tmp; i++) {
139 if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
140 /* disable VF qp mappings */
141 ice_dis_vf_mappings(&pf->vf[i]);
142
143 /* Set this state so that assigned VF vectors can be
144 * reclaimed by PF for reuse in ice_vsi_release(). No
145 * need to clear this bit since pf->vf array is being
146 * freed anyways after this for loop
147 */
148 set_bit(ICE_VF_STATE_CFG_INTR, pf->vf[i].vf_states);
149 ice_free_vf_res(&pf->vf[i]);
150 }
151 }
152
153 devm_kfree(&pf->pdev->dev, pf->vf);
154 pf->vf = NULL;
155
156 /* This check is for when the driver is unloaded while VFs are
157 * assigned. Setting the number of VFs to 0 through sysfs is caught
158 * before this function ever gets called.
159 */
160 if (!pci_vfs_assigned(pf->pdev)) {
161 int vf_id;
162
163 /* Acknowledge VFLR for all VFs. Without this, VFs will fail to
164 * work correctly when SR-IOV gets re-enabled.
165 */
166 for (vf_id = 0; vf_id < tmp; vf_id++) {
167 u32 reg_idx, bit_idx;
168
169 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
170 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
171 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
172 }
173 }
174 clear_bit(__ICE_VF_DIS, pf->state);
175 clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
176}
177
178/**
179 * ice_trigger_vf_reset - Reset a VF on HW
180 * @vf: pointer to the VF structure
181 * @is_vflr: true if VFLR was issued, false if not
182 *
183 * Trigger hardware to start a reset for a particular VF. Expects the caller
184 * to wait the proper amount of time to allow hardware to reset the VF before
185 * it cleans up and restores VF functionality.
186 */
187static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr)
188{
189 struct ice_pf *pf = vf->pf;
190 u32 reg, reg_idx, bit_idx;
191 struct ice_hw *hw;
192 int vf_abs_id, i;
193
194 hw = &pf->hw;
195 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
196
197 /* Inform VF that it is no longer active, as a warning */
198 clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
199
200 /* Disable VF's configuration API during reset. The flag is re-enabled
201 * in ice_alloc_vf_res(), when it's safe again to access VF's VSI.
202 * It's normally disabled in ice_free_vf_res(), but it's safer
203 * to do it earlier to give some time to finish to any VF config
204 * functions that may still be running at this point.
205 */
206 clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
207
208 /* In the case of a VFLR, the HW has already reset the VF and we
209 * just need to clean up, so don't hit the VFRTRIG register.
210 */
211 if (!is_vflr) {
212 /* reset VF using VPGEN_VFRTRIG reg */
213 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
214 reg |= VPGEN_VFRTRIG_VFSWR_M;
215 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
216 }
217 /* clear the VFLR bit in GLGEN_VFLRSTAT */
218 reg_idx = (vf_abs_id) / 32;
219 bit_idx = (vf_abs_id) % 32;
220 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
221 ice_flush(hw);
222
223 wr32(hw, PF_PCI_CIAA,
224 VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
225 for (i = 0; i < 100; i++) {
226 reg = rd32(hw, PF_PCI_CIAD);
227 if ((reg & VF_TRANS_PENDING_M) != 0)
228 dev_err(&pf->pdev->dev,
229 "VF %d PCI transactions stuck\n", vf->vf_id);
230 udelay(1);
231 }
232}
233
234/**
235 * ice_vsi_set_pvid - Set port VLAN id for the VSI
236 * @vsi: the VSI being changed
237 * @vid: the VLAN id to set as a PVID
238 */
239static int ice_vsi_set_pvid(struct ice_vsi *vsi, u16 vid)
240{
241 struct device *dev = &vsi->back->pdev->dev;
242 struct ice_hw *hw = &vsi->back->hw;
243 struct ice_vsi_ctx ctxt = { 0 };
244 enum ice_status status;
245
246 ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_TAGGED |
247 ICE_AQ_VSI_PVLAN_INSERT_PVID |
248 ICE_AQ_VSI_VLAN_EMOD_STR;
249 ctxt.info.pvid = cpu_to_le16(vid);
250 ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
251
252 status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
253 if (status) {
254 dev_info(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n",
255 status, hw->adminq.sq_last_status);
256 return -EIO;
257 }
258
259 vsi->info.pvid = ctxt.info.pvid;
260 vsi->info.vlan_flags = ctxt.info.vlan_flags;
261 return 0;
262}
263
264/**
265 * ice_vf_vsi_setup - Set up a VF VSI
266 * @pf: board private structure
267 * @pi: pointer to the port_info instance
268 * @vf_id: defines VF id to which this VSI connects.
269 *
270 * Returns pointer to the successfully allocated VSI struct on success,
271 * otherwise returns NULL on failure.
272 */
273static struct ice_vsi *
274ice_vf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, u16 vf_id)
275{
276 return ice_vsi_setup(pf, pi, ICE_VSI_VF, vf_id);
277}
278
279/**
280 * ice_alloc_vsi_res - Setup VF VSI and its resources
281 * @vf: pointer to the VF structure
282 *
283 * Returns 0 on success, negative value on failure
284 */
285static int ice_alloc_vsi_res(struct ice_vf *vf)
286{
287 struct ice_pf *pf = vf->pf;
288 LIST_HEAD(tmp_add_list);
289 u8 broadcast[ETH_ALEN];
290 struct ice_vsi *vsi;
291 int status = 0;
292
293 vsi = ice_vf_vsi_setup(pf, pf->hw.port_info, vf->vf_id);
294
295 if (!vsi) {
296 dev_err(&pf->pdev->dev, "Failed to create VF VSI\n");
297 return -ENOMEM;
298 }
299
300 vf->lan_vsi_idx = vsi->idx;
301 vf->lan_vsi_num = vsi->vsi_num;
302
303 /* first vector index is the VFs OICR index */
304 vf->first_vector_idx = vsi->hw_base_vector;
305 /* Since hw_base_vector holds the vector where data queue interrupts
306 * starts, increment by 1 since VFs allocated vectors include OICR intr
307 * as well.
308 */
309 vsi->hw_base_vector += 1;
310
311 /* Check if port VLAN exist before, and restore it accordingly */
312 if (vf->port_vlan_id)
313 ice_vsi_set_pvid(vsi, vf->port_vlan_id);
314
315 eth_broadcast_addr(broadcast);
316
317 status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast);
318 if (status)
319 goto ice_alloc_vsi_res_exit;
320
321 if (is_valid_ether_addr(vf->dflt_lan_addr.addr)) {
322 status = ice_add_mac_to_list(vsi, &tmp_add_list,
323 vf->dflt_lan_addr.addr);
324 if (status)
325 goto ice_alloc_vsi_res_exit;
326 }
327
328 status = ice_add_mac(&pf->hw, &tmp_add_list);
329 if (status)
330 dev_err(&pf->pdev->dev, "could not add mac filters\n");
331
332 /* Clear this bit after VF initialization since we shouldn't reclaim
333 * and reassign interrupts for synchronous or asynchronous VFR events.
334 * We don't want to reconfigure interrupts since AVF driver doesn't
335 * expect vector assignment to be changed unless there is a request for
336 * more vectors.
337 */
338 clear_bit(ICE_VF_STATE_CFG_INTR, vf->vf_states);
339ice_alloc_vsi_res_exit:
340 ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
341 return status;
342}
343
344/**
345 * ice_alloc_vf_res - Allocate VF resources
346 * @vf: pointer to the VF structure
347 */
348static int ice_alloc_vf_res(struct ice_vf *vf)
349{
350 int status;
351
352 /* setup VF VSI and necessary resources */
353 status = ice_alloc_vsi_res(vf);
354 if (status)
355 goto ice_alloc_vf_res_exit;
356
357 if (vf->trusted)
358 set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
359 else
360 clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
361
362 /* VF is now completely initialized */
363 set_bit(ICE_VF_STATE_INIT, vf->vf_states);
364
365 return status;
366
367ice_alloc_vf_res_exit:
368 ice_free_vf_res(vf);
369 return status;
370}
371
372/**
373 * ice_ena_vf_mappings
374 * @vf: pointer to the VF structure
375 *
376 * Enable VF vectors and queues allocation by writing the details into
377 * respective registers.
378 */
379static void ice_ena_vf_mappings(struct ice_vf *vf)
380{
381 struct ice_pf *pf = vf->pf;
382 struct ice_vsi *vsi;
383 int first, last, v;
384 struct ice_hw *hw;
385 int abs_vf_id;
386 u32 reg;
387
388 hw = &pf->hw;
389 vsi = pf->vsi[vf->lan_vsi_idx];
390 first = vf->first_vector_idx;
391 last = (first + pf->num_vf_msix) - 1;
392 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
393
394 /* VF Vector allocation */
395 reg = (((first << VPINT_ALLOC_FIRST_S) & VPINT_ALLOC_FIRST_M) |
396 ((last << VPINT_ALLOC_LAST_S) & VPINT_ALLOC_LAST_M) |
397 VPINT_ALLOC_VALID_M);
398 wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
399
400 /* map the interrupts to its functions */
401 for (v = first; v <= last; v++) {
402 reg = (((abs_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
403 GLINT_VECT2FUNC_VF_NUM_M) |
404 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
405 GLINT_VECT2FUNC_PF_NUM_M));
406 wr32(hw, GLINT_VECT2FUNC(v), reg);
407 }
408
409 /* VF Tx queues allocation */
410 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
411 wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id),
412 VPLAN_TXQ_MAPENA_TX_ENA_M);
413 /* set the VF PF Tx queue range
414 * VFNUMQ value should be set to (number of queues - 1). A value
415 * of 0 means 1 queue and a value of 255 means 256 queues
416 */
417 reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
418 VPLAN_TX_QBASE_VFFIRSTQ_M) |
419 (((vsi->alloc_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
420 VPLAN_TX_QBASE_VFNUMQ_M));
421 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
422 } else {
423 dev_err(&pf->pdev->dev,
424 "Scattered mode for VF Tx queues is not yet implemented\n");
425 }
426
427 /* VF Rx queues allocation */
428 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
429 wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id),
430 VPLAN_RXQ_MAPENA_RX_ENA_M);
431 /* set the VF PF Rx queue range
432 * VFNUMQ value should be set to (number of queues - 1). A value
433 * of 0 means 1 queue and a value of 255 means 256 queues
434 */
435 reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
436 VPLAN_RX_QBASE_VFFIRSTQ_M) |
437 (((vsi->alloc_txq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
438 VPLAN_RX_QBASE_VFNUMQ_M));
439 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
440 } else {
441 dev_err(&pf->pdev->dev,
442 "Scattered mode for VF Rx queues is not yet implemented\n");
443 }
444}
445
446/**
447 * ice_determine_res
448 * @pf: pointer to the PF structure
449 * @avail_res: available resources in the PF structure
450 * @max_res: maximum resources that can be given per VF
451 * @min_res: minimum resources that can be given per VF
452 *
453 * Returns non-zero value if resources (queues/vectors) are available or
454 * returns zero if PF cannot accommodate for all num_alloc_vfs.
455 */
456static int
457ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res)
458{
459 bool checked_min_res = false;
460 int res;
461
462 /* start by checking if PF can assign max number of resources for
463 * all num_alloc_vfs.
464 * if yes, return number per VF
465 * If no, divide by 2 and roundup, check again
466 * repeat the loop till we reach a point where even minimum resources
467 * are not available, in that case return 0
468 */
469 res = max_res;
470 while ((res >= min_res) && !checked_min_res) {
471 int num_all_res;
472
473 num_all_res = pf->num_alloc_vfs * res;
474 if (num_all_res <= avail_res)
475 return res;
476
477 if (res == min_res)
478 checked_min_res = true;
479
480 res = DIV_ROUND_UP(res, 2);
481 }
482 return 0;
483}
484
485/**
486 * ice_check_avail_res - check if vectors and queues are available
487 * @pf: pointer to the PF structure
488 *
489 * This function is where we calculate actual number of resources for VF VSIs,
490 * we don't reserve ahead of time during probe. Returns success if vectors and
491 * queues resources are available, otherwise returns error code
492 */
493static int ice_check_avail_res(struct ice_pf *pf)
494{
495 u16 num_msix, num_txq, num_rxq;
496
497 if (!pf->num_alloc_vfs)
498 return -EINVAL;
499
500 /* Grab from HW interrupts common pool
501 * Note: By the time the user decides it needs more vectors in a VF
502 * its already too late since one must decide this prior to creating the
503 * VF interface. So the best we can do is take a guess as to what the
504 * user might want.
505 *
506 * We have two policies for vector allocation:
507 * 1. if num_alloc_vfs is from 1 to 16, then we consider this as small
508 * number of NFV VFs used for NFV appliances, since this is a special
509 * case, we try to assign maximum vectors per VF (65) as much as
510 * possible, based on determine_resources algorithm.
511 * 2. if num_alloc_vfs is from 17 to 256, then its large number of
512 * regular VFs which are not used for any special purpose. Hence try to
513 * grab default interrupt vectors (5 as supported by AVF driver).
514 */
515 if (pf->num_alloc_vfs <= 16) {
516 num_msix = ice_determine_res(pf, pf->num_avail_hw_msix,
517 ICE_MAX_INTR_PER_VF,
518 ICE_MIN_INTR_PER_VF);
519 } else if (pf->num_alloc_vfs <= ICE_MAX_VF_COUNT) {
520 num_msix = ice_determine_res(pf, pf->num_avail_hw_msix,
521 ICE_DFLT_INTR_PER_VF,
522 ICE_MIN_INTR_PER_VF);
523 } else {
524 dev_err(&pf->pdev->dev,
525 "Number of VFs %d exceeds max VF count %d\n",
526 pf->num_alloc_vfs, ICE_MAX_VF_COUNT);
527 return -EIO;
528 }
529
530 if (!num_msix)
531 return -EIO;
532
533 /* Grab from the common pool
534 * start by requesting Default queues (4 as supported by AVF driver),
535 * Note that, the main difference between queues and vectors is, latter
536 * can only be reserved at init time but queues can be requested by VF
537 * at runtime through Virtchnl, that is the reason we start by reserving
538 * few queues.
539 */
540 num_txq = ice_determine_res(pf, pf->q_left_tx, ICE_DFLT_QS_PER_VF,
541 ICE_MIN_QS_PER_VF);
542
543 num_rxq = ice_determine_res(pf, pf->q_left_rx, ICE_DFLT_QS_PER_VF,
544 ICE_MIN_QS_PER_VF);
545
546 if (!num_txq || !num_rxq)
547 return -EIO;
548
549 /* since AVF driver works with only queue pairs which means, it expects
550 * to have equal number of Rx and Tx queues, so take the minimum of
551 * available Tx or Rx queues
552 */
553 pf->num_vf_qps = min_t(int, num_txq, num_rxq);
554 pf->num_vf_msix = num_msix;
555
556 return 0;
557}
558
559/**
560 * ice_cleanup_and_realloc_vf - Clean up VF and reallocate resources after reset
561 * @vf: pointer to the VF structure
562 *
563 * Cleanup a VF after the hardware reset is finished. Expects the caller to
564 * have verified whether the reset is finished properly, and ensure the
565 * minimum amount of wait time has passed. Reallocate VF resources back to make
566 * VF state active
567 */
568static void ice_cleanup_and_realloc_vf(struct ice_vf *vf)
569{
570 struct ice_pf *pf = vf->pf;
571 struct ice_hw *hw;
572 u32 reg;
573
574 hw = &pf->hw;
575
576 /* PF software completes the flow by notifying VF that reset flow is
577 * completed. This is done by enabling hardware by clearing the reset
578 * bit in the VPGEN_VFRTRIG reg and setting VFR_STATE in the VFGEN_RSTAT
579 * register to VFR completed (done at the end of this function)
580 * By doing this we allow HW to access VF memory at any point. If we
581 * did it any sooner, HW could access memory while it was being freed
582 * in ice_free_vf_res(), causing an IOMMU fault.
583 *
584 * On the other hand, this needs to be done ASAP, because the VF driver
585 * is waiting for this to happen and may report a timeout. It's
586 * harmless, but it gets logged into Guest OS kernel log, so best avoid
587 * it.
588 */
589 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
590 reg &= ~VPGEN_VFRTRIG_VFSWR_M;
591 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
592
593 /* reallocate VF resources to finish resetting the VSI state */
594 if (!ice_alloc_vf_res(vf)) {
595 ice_ena_vf_mappings(vf);
596 set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
597 clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
598 vf->num_vlan = 0;
599 }
600
601 /* Tell the VF driver the reset is done. This needs to be done only
602 * after VF has been fully initialized, because the VF driver may
603 * request resources immediately after setting this flag.
604 */
605 wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
606}
607
608/**
609 * ice_reset_all_vfs - reset all allocated VFs in one go
610 * @pf: pointer to the PF structure
611 * @is_vflr: true if VFLR was issued, false if not
612 *
613 * First, tell the hardware to reset each VF, then do all the waiting in one
614 * chunk, and finally finish restoring each VF after the wait. This is useful
615 * during PF routines which need to reset all VFs, as otherwise it must perform
616 * these resets in a serialized fashion.
617 *
618 * Returns true if any VFs were reset, and false otherwise.
619 */
620bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
621{
622 struct ice_hw *hw = &pf->hw;
623 int v, i;
624
625 /* If we don't have any VFs, then there is nothing to reset */
626 if (!pf->num_alloc_vfs)
627 return false;
628
629 /* If VFs have been disabled, there is no need to reset */
630 if (test_and_set_bit(__ICE_VF_DIS, pf->state))
631 return false;
632
633 /* Begin reset on all VFs at once */
634 for (v = 0; v < pf->num_alloc_vfs; v++)
635 ice_trigger_vf_reset(&pf->vf[v], is_vflr);
636
637 /* Call Disable LAN Tx queue AQ call with VFR bit set and 0
638 * queues to inform Firmware about VF reset.
639 */
640 for (v = 0; v < pf->num_alloc_vfs; v++)
641 ice_dis_vsi_txq(pf->vsi[0]->port_info, 0, NULL, NULL,
642 ICE_VF_RESET, v, NULL);
643
644 /* HW requires some time to make sure it can flush the FIFO for a VF
645 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
646 * sequence to make sure that it has completed. We'll keep track of
647 * the VFs using a simple iterator that increments once that VF has
648 * finished resetting.
649 */
650 for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
651 usleep_range(10000, 20000);
652
653 /* Check each VF in sequence */
654 while (v < pf->num_alloc_vfs) {
655 struct ice_vf *vf = &pf->vf[v];
656 u32 reg;
657
658 reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
659 if (!(reg & VPGEN_VFRSTAT_VFRD_M))
660 break;
661
662 /* If the current VF has finished resetting, move on
663 * to the next VF in sequence.
664 */
665 v++;
666 }
667 }
668
669 /* Display a warning if at least one VF didn't manage to reset in
670 * time, but continue on with the operation.
671 */
672 if (v < pf->num_alloc_vfs)
673 dev_warn(&pf->pdev->dev, "VF reset check timeout\n");
674 usleep_range(10000, 20000);
675
676 /* free VF resources to begin resetting the VSI state */
677 for (v = 0; v < pf->num_alloc_vfs; v++)
678 ice_free_vf_res(&pf->vf[v]);
679
680 if (ice_check_avail_res(pf)) {
681 dev_err(&pf->pdev->dev,
682 "Cannot allocate VF resources, try with fewer number of VFs\n");
683 return false;
684 }
685
686 /* Finish the reset on each VF */
687 for (v = 0; v < pf->num_alloc_vfs; v++)
688 ice_cleanup_and_realloc_vf(&pf->vf[v]);
689
690 ice_flush(hw);
691 clear_bit(__ICE_VF_DIS, pf->state);
692
693 return true;
694}
695
696/**
697 * ice_alloc_vfs - Allocate and set up VFs resources
698 * @pf: pointer to the PF structure
699 * @num_alloc_vfs: number of VFs to allocate
700 */
701static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
702{
703 struct ice_hw *hw = &pf->hw;
704 struct ice_vf *vfs;
705 int i, ret;
706
707 /* Disable global interrupt 0 so we don't try to handle the VFLR. */
708 wr32(hw, GLINT_DYN_CTL(pf->hw_oicr_idx),
709 ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
710
711 ice_flush(hw);
712
713 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
714 if (ret) {
715 pf->num_alloc_vfs = 0;
716 goto err_unroll_intr;
717 }
718 /* allocate memory */
719 vfs = devm_kcalloc(&pf->pdev->dev, num_alloc_vfs, sizeof(*vfs),
720 GFP_KERNEL);
721 if (!vfs) {
722 ret = -ENOMEM;
723 goto err_unroll_sriov;
724 }
725 pf->vf = vfs;
726
727 /* apply default profile */
728 for (i = 0; i < num_alloc_vfs; i++) {
729 vfs[i].pf = pf;
730 vfs[i].vf_sw_id = pf->first_sw;
731 vfs[i].vf_id = i;
732
733 /* assign default capabilities */
734 set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
735 vfs[i].spoofchk = true;
736
737 /* Set this state so that PF driver does VF vector assignment */
738 set_bit(ICE_VF_STATE_CFG_INTR, vfs[i].vf_states);
739 }
740 pf->num_alloc_vfs = num_alloc_vfs;
741
742 /* VF resources get allocated during reset */
743 if (!ice_reset_all_vfs(pf, false))
744 goto err_unroll_sriov;
745
746 goto err_unroll_intr;
747
748err_unroll_sriov:
749 pci_disable_sriov(pf->pdev);
750err_unroll_intr:
751 /* rearm interrupts here */
752 ice_irq_dynamic_ena(hw, NULL, NULL);
753 return ret;
754}
755
756/**
757 * ice_pf_state_is_nominal - checks the pf for nominal state
758 * @pf: pointer to pf to check
759 *
760 * Check the PF's state for a collection of bits that would indicate
761 * the PF is in a state that would inhibit normal operation for
762 * driver functionality.
763 *
764 * Returns true if PF is in a nominal state.
765 * Returns false otherwise
766 */
767static bool ice_pf_state_is_nominal(struct ice_pf *pf)
768{
769 DECLARE_BITMAP(check_bits, __ICE_STATE_NBITS) = { 0 };
770
771 if (!pf)
772 return false;
773
774 bitmap_set(check_bits, 0, __ICE_STATE_NOMINAL_CHECK_BITS);
775 if (bitmap_intersects(pf->state, check_bits, __ICE_STATE_NBITS))
776 return false;
777
778 return true;
779}
780
781/**
782 * ice_pci_sriov_ena - Enable or change number of VFs
783 * @pf: pointer to the PF structure
784 * @num_vfs: number of VFs to allocate
785 */
786static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
787{
788 int pre_existing_vfs = pci_num_vf(pf->pdev);
789 struct device *dev = &pf->pdev->dev;
790 int err;
791
792 if (!ice_pf_state_is_nominal(pf)) {
793 dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
794 return -EBUSY;
795 }
796
797 if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
798 dev_err(dev, "This device is not capable of SR-IOV\n");
799 return -ENODEV;
800 }
801
802 if (pre_existing_vfs && pre_existing_vfs != num_vfs)
803 ice_free_vfs(pf);
804 else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
805 return num_vfs;
806
807 if (num_vfs > pf->num_vfs_supported) {
808 dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
809 num_vfs, pf->num_vfs_supported);
810 return -ENOTSUPP;
811 }
812
813 dev_info(dev, "Allocating %d VFs\n", num_vfs);
814 err = ice_alloc_vfs(pf, num_vfs);
815 if (err) {
816 dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
817 return err;
818 }
819
820 set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
821 return num_vfs;
822}
823
824/**
825 * ice_sriov_configure - Enable or change number of VFs via sysfs
826 * @pdev: pointer to a pci_dev structure
827 * @num_vfs: number of VFs to allocate
828 *
829 * This function is called when the user updates the number of VFs in sysfs.
830 */
831int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
832{
833 struct ice_pf *pf = pci_get_drvdata(pdev);
834
835 if (num_vfs)
836 return ice_pci_sriov_ena(pf, num_vfs);
837
838 if (!pci_vfs_assigned(pdev)) {
839 ice_free_vfs(pf);
840 } else {
841 dev_err(&pf->pdev->dev,
842 "can't free VFs because some are assigned to VMs.\n");
843 return -EBUSY;
844 }
845
846 return 0;
847}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
new file mode 100644
index 000000000000..85c263a7494c
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -0,0 +1,74 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (c) 2018, Intel Corporation. */
3
4#ifndef _ICE_VIRTCHNL_PF_H_
5#define _ICE_VIRTCHNL_PF_H_
6#include "ice.h"
7
8/* Static VF transaction/status register def */
9#define VF_DEVICE_STATUS 0xAA
10#define VF_TRANS_PENDING_M 0x20
11
12/* Specific VF states */
13enum ice_vf_states {
14 ICE_VF_STATE_INIT = 0,
15 ICE_VF_STATE_ACTIVE,
16 ICE_VF_STATE_ENA,
17 ICE_VF_STATE_DIS,
18 ICE_VF_STATE_MC_PROMISC,
19 ICE_VF_STATE_UC_PROMISC,
20 /* state to indicate if PF needs to do vector assignment for VF.
21 * This needs to be set during first time VF initialization or later
22 * when VF asks for more Vectors through virtchnl OP.
23 */
24 ICE_VF_STATE_CFG_INTR,
25 ICE_VF_STATES_NBITS
26};
27
28/* VF capabilities */
29enum ice_virtchnl_cap {
30 ICE_VIRTCHNL_VF_CAP_L2 = 0,
31 ICE_VIRTCHNL_VF_CAP_PRIVILEGE,
32};
33
34/* VF information structure */
35struct ice_vf {
36 struct ice_pf *pf;
37
38 s16 vf_id; /* VF id in the PF space */
39 int first_vector_idx; /* first vector index of this VF */
40 struct ice_sw *vf_sw_id; /* switch id the VF VSIs connect to */
41 struct virtchnl_ether_addr dflt_lan_addr;
42 u16 port_vlan_id;
43 u8 trusted;
44 u16 lan_vsi_idx; /* index into PF struct */
45 u16 lan_vsi_num; /* ID as used by firmware */
46 unsigned long vf_caps; /* vf's adv. capabilities */
47 DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
48 u8 spoofchk;
49 u16 num_mac;
50 u16 num_vlan;
51};
52
53#ifdef CONFIG_PCI_IOV
54int ice_sriov_configure(struct pci_dev *pdev, int num_vfs);
55void ice_free_vfs(struct ice_pf *pf);
56bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr);
57#else /* CONFIG_PCI_IOV */
58#define ice_free_vfs(pf) do {} while (0)
59
60static inline bool
61ice_reset_all_vfs(struct ice_pf __always_unused *pf,
62 bool __always_unused is_vflr)
63{
64 return true;
65}
66
67static inline int
68ice_sriov_configure(struct pci_dev __always_unused *pdev,
69 int __always_unused num_vfs)
70{
71 return -EOPNOTSUPP;
72}
73#endif /* CONFIG_PCI_IOV */
74#endif /* _ICE_VIRTCHNL_PF_H_ */