aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/i40e
diff options
context:
space:
mode:
authorJesse Brandeburg <jesse.brandeburg@intel.com>2013-09-11 04:40:07 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2013-09-11 05:04:56 -0400
commit5c3c48ac6bf56367c4e89f6453cd2d61e50375bd (patch)
treefdf6946523bfe293e51fb81481c9fd2575e9389a /drivers/net/ethernet/intel/i40e
parent7daa6bf3294e518cf939830c1a8ec2a6a96204ac (diff)
i40e: implement virtual device interface
While not part of this patch series, an i40evf driver is on its way, and uses these files to communicate to the PF driver. This patch contains the header and implementation files for the PF to VF interface. Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com> Signed-off-by: Shannon Nelson <shannon.nelson@intel.com> Signed-off-by: Mitch Williams <mitch.a.williams@intel.com> CC: PJ Waskiewicz <peter.p.waskiewicz.jr@intel.com> CC: e1000-devel@lists.sourceforge.net Tested-by: Kavindya Deegala <kavindya.s.deegala@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/i40e')
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl.h368
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c2335
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h120
3 files changed, 2823 insertions, 0 deletions
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
new file mode 100644
index 000000000000..cc6654f1dac7
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
@@ -0,0 +1,368 @@
1/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27
28#ifndef _I40E_VIRTCHNL_H_
29#define _I40E_VIRTCHNL_H_
30
31#include "i40e_type.h"
32
33/* Description:
34 * This header file describes the VF-PF communication protocol used
35 * by the various i40e drivers.
36 *
37 * Admin queue buffer usage:
38 * desc->opcode is always i40e_aqc_opc_send_msg_to_pf
39 * flags, retval, datalen, and data addr are all used normally.
40 * Firmware copies the cookie fields when sending messages between the PF and
41 * VF, but uses all other fields internally. Due to this limitation, we
42 * must send all messages as "indirect", i.e. using an external buffer.
43 *
44 * All the vsi indexes are relative to the VF. Each VF can have maximum of
45 * three VSIs. All the queue indexes are relative to the VSI. Each VF can
46 * have a maximum of sixteen queues for all of its VSIs.
47 *
48 * The PF is required to return a status code in v_retval for all messages
49 * except RESET_VF, which does not require any response. The return value is of
50 * i40e_status_code type, defined in the i40e_type.h.
51 *
52 * In general, VF driver initialization should roughly follow the order of these
53 * opcodes. The VF driver must first validate the API version of the PF driver,
54 * then request a reset, then get resources, then configure queues and
55 * interrupts. After these operations are complete, the VF driver may start
56 * its queues, optionally add MAC and VLAN filters, and process traffic.
57 */
58
59/* Opcodes for VF-PF communication. These are placed in the v_opcode field
60 * of the virtchnl_msg structure.
61 */
62enum i40e_virtchnl_ops {
63/* VF sends req. to pf for the following
64 * ops.
65 */
66 I40E_VIRTCHNL_OP_UNKNOWN = 0,
67 I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
68 I40E_VIRTCHNL_OP_RESET_VF,
69 I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
70 I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE,
71 I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE,
72 I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
73 I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
74 I40E_VIRTCHNL_OP_ENABLE_QUEUES,
75 I40E_VIRTCHNL_OP_DISABLE_QUEUES,
76 I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
77 I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
78 I40E_VIRTCHNL_OP_ADD_VLAN,
79 I40E_VIRTCHNL_OP_DEL_VLAN,
80 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
81 I40E_VIRTCHNL_OP_GET_STATS,
82 I40E_VIRTCHNL_OP_FCOE,
83/* PF sends status change events to vfs using
84 * the following op.
85 */
86 I40E_VIRTCHNL_OP_EVENT,
87};
88
89/* Virtual channel message descriptor. This overlays the admin queue
90 * descriptor. All other data is passed in external buffers.
91 */
92
93struct i40e_virtchnl_msg {
94 u8 pad[8]; /* AQ flags/opcode/len/retval fields */
95 enum i40e_virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
96 i40e_status v_retval; /* ditto for desc->retval */
97 u32 vfid; /* used by PF when sending to VF */
98};
99
100/* Message descriptions and data structures.*/
101
102/* I40E_VIRTCHNL_OP_VERSION
103 * VF posts its version number to the PF. PF responds with its version number
104 * in the same format, along with a return code.
105 * Reply from PF has its major/minor versions also in param0 and param1.
106 * If there is a major version mismatch, then the VF cannot operate.
107 * If there is a minor version mismatch, then the VF can operate but should
108 * add a warning to the system log.
109 *
110 * This enum element MUST always be specified as == 1, regardless of other
111 * changes in the API. The PF must always respond to this message without
112 * error regardless of version mismatch.
113 */
114#define I40E_VIRTCHNL_VERSION_MAJOR 1
115#define I40E_VIRTCHNL_VERSION_MINOR 0
116struct i40e_virtchnl_version_info {
117 u32 major;
118 u32 minor;
119};
120
121/* I40E_VIRTCHNL_OP_RESET_VF
122 * VF sends this request to PF with no parameters
123 * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
124 * until reset completion is indicated. The admin queue must be reinitialized
125 * after this operation.
126 *
127 * When reset is complete, PF must ensure that all queues in all VSIs associated
128 * with the VF are stopped, all queue configurations in the HMC are set to 0,
129 * and all MAC and VLAN filters (except the default MAC address) on all VSIs
130 * are cleared.
131 */
132
133/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
134 * VF sends this request to PF with no parameters
135 * PF responds with an indirect message containing
136 * i40e_virtchnl_vf_resource and one or more
137 * i40e_virtchnl_vsi_resource structures.
138 */
139
140struct i40e_virtchnl_vsi_resource {
141 u16 vsi_id;
142 u16 num_queue_pairs;
143 enum i40e_vsi_type vsi_type;
144 u16 qset_handle;
145 u8 default_mac_addr[I40E_ETH_LENGTH_OF_ADDRESS];
146};
147/* VF offload flags */
148#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
149#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
150#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
151
152struct i40e_virtchnl_vf_resource {
153 u16 num_vsis;
154 u16 num_queue_pairs;
155 u16 max_vectors;
156 u16 max_mtu;
157
158 u32 vf_offload_flags;
159 u32 max_fcoe_contexts;
160 u32 max_fcoe_filters;
161
162 struct i40e_virtchnl_vsi_resource vsi_res[1];
163};
164
165/* I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE
166 * VF sends this message to set up parameters for one TX queue.
167 * External data buffer contains one instance of i40e_virtchnl_txq_info.
168 * PF configures requested queue and returns a status code.
169 */
170
171/* Tx queue config info */
172struct i40e_virtchnl_txq_info {
173 u16 vsi_id;
174 u16 queue_id;
175 u16 ring_len; /* number of descriptors, multiple of 8 */
176 u16 headwb_enabled;
177 u64 dma_ring_addr;
178 u64 dma_headwb_addr;
179};
180
181/* I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE
182 * VF sends this message to set up parameters for one RX queue.
183 * External data buffer contains one instance of i40e_virtchnl_rxq_info.
184 * PF configures requested queue and returns a status code.
185 */
186
187/* Rx queue config info */
188struct i40e_virtchnl_rxq_info {
189 u16 vsi_id;
190 u16 queue_id;
191 u32 ring_len; /* number of descriptors, multiple of 32 */
192 u16 hdr_size;
193 u16 splithdr_enabled;
194 u32 databuffer_size;
195 u32 max_pkt_size;
196 u64 dma_ring_addr;
197 enum i40e_hmc_obj_rx_hsplit_0 rx_split_pos;
198};
199
200/* I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES
201 * VF sends this message to set parameters for all active TX and RX queues
202 * associated with the specified VSI.
203 * PF configures queues and returns status.
204 * If the number of queues specified is greater than the number of queues
205 * associated with the VSI, an error is returned and no queues are configured.
206 */
207struct i40e_virtchnl_queue_pair_info {
208 /* NOTE: vsi_id and queue_id should be identical for both queues. */
209 struct i40e_virtchnl_txq_info txq;
210 struct i40e_virtchnl_rxq_info rxq;
211};
212
213struct i40e_virtchnl_vsi_queue_config_info {
214 u16 vsi_id;
215 u16 num_queue_pairs;
216 struct i40e_virtchnl_queue_pair_info qpair[1];
217};
218
219/* I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP
220 * VF uses this message to map vectors to queues.
221 * The rxq_map and txq_map fields are bitmaps used to indicate which queues
222 * are to be associated with the specified vector.
223 * The "other" causes are always mapped to vector 0.
224 * PF configures interrupt mapping and returns status.
225 */
226struct i40e_virtchnl_vector_map {
227 u16 vsi_id;
228 u16 vector_id;
229 u16 rxq_map;
230 u16 txq_map;
231 u16 rxitr_idx;
232 u16 txitr_idx;
233};
234
235struct i40e_virtchnl_irq_map_info {
236 u16 num_vectors;
237 struct i40e_virtchnl_vector_map vecmap[1];
238};
239
240/* I40E_VIRTCHNL_OP_ENABLE_QUEUES
241 * I40E_VIRTCHNL_OP_DISABLE_QUEUES
242 * VF sends these message to enable or disable TX/RX queue pairs.
243 * The queues fields are bitmaps indicating which queues to act upon.
244 * (Currently, we only support 16 queues per VF, but we make the field
245 * u32 to allow for expansion.)
246 * PF performs requested action and returns status.
247 */
248struct i40e_virtchnl_queue_select {
249 u16 vsi_id;
250 u16 pad;
251 u32 rx_queues;
252 u32 tx_queues;
253};
254
255/* I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS
256 * VF sends this message in order to add one or more unicast or multicast
257 * address filters for the specified VSI.
258 * PF adds the filters and returns status.
259 */
260
261/* I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS
262 * VF sends this message in order to remove one or more unicast or multicast
263 * filters for the specified VSI.
264 * PF removes the filters and returns status.
265 */
266
267struct i40e_virtchnl_ether_addr {
268 u8 addr[I40E_ETH_LENGTH_OF_ADDRESS];
269 u8 pad[2];
270};
271
272struct i40e_virtchnl_ether_addr_list {
273 u16 vsi_id;
274 u16 num_elements;
275 struct i40e_virtchnl_ether_addr list[1];
276};
277
278/* I40E_VIRTCHNL_OP_ADD_VLAN
279 * VF sends this message to add one or more VLAN tag filters for receives.
280 * PF adds the filters and returns status.
281 * If a port VLAN is configured by the PF, this operation will return an
282 * error to the VF.
283 */
284
285/* I40E_VIRTCHNL_OP_DEL_VLAN
286 * VF sends this message to remove one or more VLAN tag filters for receives.
287 * PF removes the filters and returns status.
288 * If a port VLAN is configured by the PF, this operation will return an
289 * error to the VF.
290 */
291
292struct i40e_virtchnl_vlan_filter_list {
293 u16 vsi_id;
294 u16 num_elements;
295 u16 vlan_id[1];
296};
297
298/* I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
299 * VF sends VSI id and flags.
300 * PF returns status code in retval.
301 * Note: we assume that broadcast accept mode is always enabled.
302 */
303struct i40e_virtchnl_promisc_info {
304 u16 vsi_id;
305 u16 flags;
306};
307
308#define I40E_FLAG_VF_UNICAST_PROMISC 0x00000001
309#define I40E_FLAG_VF_MULTICAST_PROMISC 0x00000002
310
311/* I40E_VIRTCHNL_OP_GET_STATS
312 * VF sends this message to request stats for the selected VSI. VF uses
313 * the i40e_virtchnl_queue_select struct to specify the VSI. The queue_id
314 * field is ignored by the PF.
315 *
316 * PF replies with struct i40e_eth_stats in an external buffer.
317 */
318
319/* I40E_VIRTCHNL_OP_EVENT
320 * PF sends this message to inform the VF driver of events that may affect it.
321 * No direct response is expected from the VF, though it may generate other
322 * messages in response to this one.
323 */
324enum i40e_virtchnl_event_codes {
325 I40E_VIRTCHNL_EVENT_UNKNOWN = 0,
326 I40E_VIRTCHNL_EVENT_LINK_CHANGE,
327 I40E_VIRTCHNL_EVENT_RESET_IMPENDING,
328 I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
329};
330#define I40E_PF_EVENT_SEVERITY_INFO 0
331#define I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM 255
332
333struct i40e_virtchnl_pf_event {
334 enum i40e_virtchnl_event_codes event;
335 union {
336 struct {
337 enum i40e_aq_link_speed link_speed;
338 bool link_status;
339 } link_event;
340 } event_data;
341
342 int severity;
343};
344
345/* The following are TBD, not necessary for LAN functionality.
346 * I40E_VIRTCHNL_OP_FCOE
347 */
348
349/* VF reset states - these are written into the RSTAT register:
350 * I40E_VFGEN_RSTAT1 on the PF
351 * I40E_VFGEN_RSTAT on the VF
352 * When the PF initiates a reset, it writes 0
353 * When the reset is complete, it writes 1
354 * When the PF detects that the VF has recovered, it writes 2
355 * VF checks this register periodically to determine if a reset has occurred,
356 * then polls it to know when the reset is complete.
357 * If either the PF or VF reads the register while the hardware
358 * is in a reset state, it will return DEADBEEF, which, when masked
359 * will result in 3.
360 */
361enum i40e_vfr_states {
362 I40E_VFR_INPROGRESS = 0,
363 I40E_VFR_COMPLETED,
364 I40E_VFR_VFACTIVE,
365 I40E_VFR_UNKNOWN,
366};
367
368#endif /* _I40E_VIRTCHNL_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
new file mode 100644
index 000000000000..8967e58e2408
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -0,0 +1,2335 @@
1/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27
28#include "i40e.h"
29
30/***********************misc routines*****************************/
31
32/**
33 * i40e_vc_isvalid_vsi_id
34 * @vf: pointer to the vf info
35 * @vsi_id: vf relative vsi id
36 *
37 * check for the valid vsi id
38 **/
39static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u8 vsi_id)
40{
41 struct i40e_pf *pf = vf->pf;
42
43 return pf->vsi[vsi_id]->vf_id == vf->vf_id;
44}
45
46/**
47 * i40e_vc_isvalid_queue_id
48 * @vf: pointer to the vf info
49 * @vsi_id: vsi id
50 * @qid: vsi relative queue id
51 *
52 * check for the valid queue id
53 **/
54static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u8 vsi_id,
55 u8 qid)
56{
57 struct i40e_pf *pf = vf->pf;
58
59 return qid < pf->vsi[vsi_id]->num_queue_pairs;
60}
61
62/**
63 * i40e_vc_isvalid_vector_id
64 * @vf: pointer to the vf info
65 * @vector_id: vf relative vector id
66 *
67 * check for the valid vector id
68 **/
69static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
70{
71 struct i40e_pf *pf = vf->pf;
72
73 return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
74}
75
76/***********************vf resource mgmt routines*****************/
77
78/**
79 * i40e_vc_get_pf_queue_id
80 * @vf: pointer to the vf info
81 * @vsi_idx: index of VSI in PF struct
82 * @vsi_queue_id: vsi relative queue id
83 *
84 * return pf relative queue id
85 **/
86static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u8 vsi_idx,
87 u8 vsi_queue_id)
88{
89 struct i40e_pf *pf = vf->pf;
90 struct i40e_vsi *vsi = pf->vsi[vsi_idx];
91 u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
92
93 if (le16_to_cpu(vsi->info.mapping_flags) &
94 I40E_AQ_VSI_QUE_MAP_NONCONTIG)
95 pf_queue_id =
96 le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]);
97 else
98 pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) +
99 vsi_queue_id;
100
101 return pf_queue_id;
102}
103
104/**
105 * i40e_ctrl_vsi_tx_queue
106 * @vf: pointer to the vf info
107 * @vsi_idx: index of VSI in PF struct
108 * @vsi_queue_id: vsi relative queue index
109 * @ctrl: control flags
110 *
111 * enable/disable/enable check/disable check
112 **/
113static int i40e_ctrl_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
114 u16 vsi_queue_id,
115 enum i40e_queue_ctrl ctrl)
116{
117 struct i40e_pf *pf = vf->pf;
118 struct i40e_hw *hw = &pf->hw;
119 bool writeback = false;
120 u16 pf_queue_id;
121 int ret = 0;
122 u32 reg;
123
124 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
125 reg = rd32(hw, I40E_QTX_ENA(pf_queue_id));
126
127 switch (ctrl) {
128 case I40E_QUEUE_CTRL_ENABLE:
129 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
130 writeback = true;
131 break;
132 case I40E_QUEUE_CTRL_ENABLECHECK:
133 ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? 0 : -EPERM;
134 break;
135 case I40E_QUEUE_CTRL_DISABLE:
136 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
137 writeback = true;
138 break;
139 case I40E_QUEUE_CTRL_DISABLECHECK:
140 ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
141 break;
142 case I40E_QUEUE_CTRL_FASTDISABLE:
143 reg |= I40E_QTX_ENA_FAST_QDIS_MASK;
144 writeback = true;
145 break;
146 case I40E_QUEUE_CTRL_FASTDISABLECHECK:
147 ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
148 if (!ret) {
149 reg &= ~I40E_QTX_ENA_FAST_QDIS_MASK;
150 writeback = true;
151 }
152 break;
153 default:
154 ret = -EINVAL;
155 break;
156 }
157
158 if (writeback) {
159 wr32(hw, I40E_QTX_ENA(pf_queue_id), reg);
160 i40e_flush(hw);
161 }
162
163 return ret;
164}
165
166/**
167 * i40e_ctrl_vsi_rx_queue
168 * @vf: pointer to the vf info
169 * @vsi_idx: index of VSI in PF struct
170 * @vsi_queue_id: vsi relative queue index
171 * @ctrl: control flags
172 *
173 * enable/disable/enable check/disable check
174 **/
175static int i40e_ctrl_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
176 u16 vsi_queue_id,
177 enum i40e_queue_ctrl ctrl)
178{
179 struct i40e_pf *pf = vf->pf;
180 struct i40e_hw *hw = &pf->hw;
181 bool writeback = false;
182 u16 pf_queue_id;
183 int ret = 0;
184 u32 reg;
185
186 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
187 reg = rd32(hw, I40E_QRX_ENA(pf_queue_id));
188
189 switch (ctrl) {
190 case I40E_QUEUE_CTRL_ENABLE:
191 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
192 writeback = true;
193 break;
194 case I40E_QUEUE_CTRL_ENABLECHECK:
195 ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? 0 : -EPERM;
196 break;
197 case I40E_QUEUE_CTRL_DISABLE:
198 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
199 writeback = true;
200 break;
201 case I40E_QUEUE_CTRL_DISABLECHECK:
202 ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
203 break;
204 case I40E_QUEUE_CTRL_FASTDISABLE:
205 reg |= I40E_QRX_ENA_FAST_QDIS_MASK;
206 writeback = true;
207 break;
208 case I40E_QUEUE_CTRL_FASTDISABLECHECK:
209 ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
210 if (!ret) {
211 reg &= ~I40E_QRX_ENA_FAST_QDIS_MASK;
212 writeback = true;
213 }
214 break;
215 default:
216 ret = -EINVAL;
217 break;
218 }
219
220 if (writeback) {
221 wr32(hw, I40E_QRX_ENA(pf_queue_id), reg);
222 i40e_flush(hw);
223 }
224
225 return ret;
226}
227
228/**
229 * i40e_config_irq_link_list
230 * @vf: pointer to the vf info
231 * @vsi_idx: index of VSI in PF struct
232 * @vecmap: irq map info
233 *
234 * configure irq link list from the map
235 **/
236static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
237 struct i40e_virtchnl_vector_map *vecmap)
238{
239 unsigned long linklistmap = 0, tempmap;
240 struct i40e_pf *pf = vf->pf;
241 struct i40e_hw *hw = &pf->hw;
242 u16 vsi_queue_id, pf_queue_id;
243 enum i40e_queue_type qtype;
244 u16 next_q, vector_id;
245 u32 reg, reg_idx;
246 u16 itr_idx = 0;
247
248 vector_id = vecmap->vector_id;
249 /* setup the head */
250 if (0 == vector_id)
251 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
252 else
253 reg_idx = I40E_VPINT_LNKLSTN(
254 ((pf->hw.func_caps.num_msix_vectors_vf - 1)
255 * vf->vf_id) + (vector_id - 1));
256
257 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
258 /* Special case - No queues mapped on this vector */
259 wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
260 goto irq_list_done;
261 }
262 tempmap = vecmap->rxq_map;
263 vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
264 while (vsi_queue_id < I40E_MAX_VSI_QP) {
265 linklistmap |= (1 <<
266 (I40E_VIRTCHNL_SUPPORTED_QTYPES *
267 vsi_queue_id));
268 vsi_queue_id =
269 find_next_bit(&tempmap, I40E_MAX_VSI_QP, vsi_queue_id + 1);
270 }
271
272 tempmap = vecmap->txq_map;
273 vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
274 while (vsi_queue_id < I40E_MAX_VSI_QP) {
275 linklistmap |= (1 <<
276 (I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id
277 + 1));
278 vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
279 vsi_queue_id + 1);
280 }
281
282 next_q = find_first_bit(&linklistmap,
283 (I40E_MAX_VSI_QP *
284 I40E_VIRTCHNL_SUPPORTED_QTYPES));
285 vsi_queue_id = next_q/I40E_VIRTCHNL_SUPPORTED_QTYPES;
286 qtype = next_q%I40E_VIRTCHNL_SUPPORTED_QTYPES;
287 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
288 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
289
290 wr32(hw, reg_idx, reg);
291
292 while (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
293 switch (qtype) {
294 case I40E_QUEUE_TYPE_RX:
295 reg_idx = I40E_QINT_RQCTL(pf_queue_id);
296 itr_idx = vecmap->rxitr_idx;
297 break;
298 case I40E_QUEUE_TYPE_TX:
299 reg_idx = I40E_QINT_TQCTL(pf_queue_id);
300 itr_idx = vecmap->txitr_idx;
301 break;
302 default:
303 break;
304 }
305
306 next_q = find_next_bit(&linklistmap,
307 (I40E_MAX_VSI_QP *
308 I40E_VIRTCHNL_SUPPORTED_QTYPES),
309 next_q + 1);
310 if (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
311 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
312 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
313 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx,
314 vsi_queue_id);
315 } else {
316 pf_queue_id = I40E_QUEUE_END_OF_LIST;
317 qtype = 0;
318 }
319
320 /* format for the RQCTL & TQCTL regs is same */
321 reg = (vector_id) |
322 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
323 (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
324 (1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
325 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
326 wr32(hw, reg_idx, reg);
327 }
328
329irq_list_done:
330 i40e_flush(hw);
331}
332
333/**
334 * i40e_config_vsi_tx_queue
335 * @vf: pointer to the vf info
336 * @vsi_idx: index of VSI in PF struct
337 * @vsi_queue_id: vsi relative queue index
338 * @info: config. info
339 *
340 * configure tx queue
341 **/
342static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
343 u16 vsi_queue_id,
344 struct i40e_virtchnl_txq_info *info)
345{
346 struct i40e_pf *pf = vf->pf;
347 struct i40e_hw *hw = &pf->hw;
348 struct i40e_hmc_obj_txq tx_ctx;
349 u16 pf_queue_id;
350 u32 qtx_ctl;
351 int ret = 0;
352
353 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
354
355 /* clear the context structure first */
356 memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
357
358 /* only set the required fields */
359 tx_ctx.base = info->dma_ring_addr / 128;
360 tx_ctx.qlen = info->ring_len;
361 tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]);
362 tx_ctx.rdylist_act = 0;
363
364 /* clear the context in the HMC */
365 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
366 if (ret) {
367 dev_err(&pf->pdev->dev,
368 "Failed to clear VF LAN Tx queue context %d, error: %d\n",
369 pf_queue_id, ret);
370 ret = -ENOENT;
371 goto error_context;
372 }
373
374 /* set the context in the HMC */
375 ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx);
376 if (ret) {
377 dev_err(&pf->pdev->dev,
378 "Failed to set VF LAN Tx queue context %d error: %d\n",
379 pf_queue_id, ret);
380 ret = -ENOENT;
381 goto error_context;
382 }
383
384 /* associate this queue with the PCI VF function */
385 qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
386 qtx_ctl |= ((hw->hmc.hmc_fn_id << I40E_QTX_CTL_PF_INDX_SHIFT)
387 & I40E_QTX_CTL_PF_INDX_MASK);
388 qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
389 << I40E_QTX_CTL_VFVM_INDX_SHIFT)
390 & I40E_QTX_CTL_VFVM_INDX_MASK);
391 wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
392 i40e_flush(hw);
393
394error_context:
395 return ret;
396}
397
398/**
399 * i40e_config_vsi_rx_queue
400 * @vf: pointer to the vf info
401 * @vsi_idx: index of VSI in PF struct
402 * @vsi_queue_id: vsi relative queue index
403 * @info: config. info
404 *
405 * configure rx queue
406 **/
407static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
408 u16 vsi_queue_id,
409 struct i40e_virtchnl_rxq_info *info)
410{
411 struct i40e_pf *pf = vf->pf;
412 struct i40e_hw *hw = &pf->hw;
413 struct i40e_hmc_obj_rxq rx_ctx;
414 u16 pf_queue_id;
415 int ret = 0;
416
417 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
418
419 /* clear the context structure first */
420 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
421
422 /* only set the required fields */
423 rx_ctx.base = info->dma_ring_addr / 128;
424 rx_ctx.qlen = info->ring_len;
425
426 if (info->splithdr_enabled) {
427 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
428 I40E_RX_SPLIT_IP |
429 I40E_RX_SPLIT_TCP_UDP |
430 I40E_RX_SPLIT_SCTP;
431 /* header length validation */
432 if (info->hdr_size > ((2 * 1024) - 64)) {
433 ret = -EINVAL;
434 goto error_param;
435 }
436 rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
437
438 /* set splitalways mode 10b */
439 rx_ctx.dtype = 0x2;
440 }
441
442 /* databuffer length validation */
443 if (info->databuffer_size > ((16 * 1024) - 128)) {
444 ret = -EINVAL;
445 goto error_param;
446 }
447 rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
448
449 /* max pkt. length validation */
450 if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) {
451 ret = -EINVAL;
452 goto error_param;
453 }
454 rx_ctx.rxmax = info->max_pkt_size;
455
456 /* enable 32bytes desc always */
457 rx_ctx.dsize = 1;
458
459 /* default values */
460 rx_ctx.tphrdesc_ena = 1;
461 rx_ctx.tphwdesc_ena = 1;
462 rx_ctx.tphdata_ena = 1;
463 rx_ctx.tphhead_ena = 1;
464 rx_ctx.lrxqthresh = 2;
465 rx_ctx.crcstrip = 1;
466
467 /* clear the context in the HMC */
468 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
469 if (ret) {
470 dev_err(&pf->pdev->dev,
471 "Failed to clear VF LAN Rx queue context %d, error: %d\n",
472 pf_queue_id, ret);
473 ret = -ENOENT;
474 goto error_param;
475 }
476
477 /* set the context in the HMC */
478 ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx);
479 if (ret) {
480 dev_err(&pf->pdev->dev,
481 "Failed to set VF LAN Rx queue context %d error: %d\n",
482 pf_queue_id, ret);
483 ret = -ENOENT;
484 goto error_param;
485 }
486
487error_param:
488 return ret;
489}
490
491/**
492 * i40e_alloc_vsi_res
493 * @vf: pointer to the vf info
494 * @type: type of VSI to allocate
495 *
496 * alloc vf vsi context & resources
497 **/
498static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
499{
500 struct i40e_mac_filter *f = NULL;
501 struct i40e_pf *pf = vf->pf;
502 struct i40e_hw *hw = &pf->hw;
503 struct i40e_vsi *vsi;
504 int ret = 0;
505
506 vsi = i40e_vsi_setup(pf, type, pf->vsi[pf->lan_vsi]->seid, vf->vf_id);
507
508 if (!vsi) {
509 dev_err(&pf->pdev->dev,
510 "add vsi failed for vf %d, aq_err %d\n",
511 vf->vf_id, pf->hw.aq.asq_last_status);
512 ret = -ENOENT;
513 goto error_alloc_vsi_res;
514 }
515 if (type == I40E_VSI_SRIOV) {
516 vf->lan_vsi_index = vsi->idx;
517 vf->lan_vsi_id = vsi->id;
518 dev_info(&pf->pdev->dev,
519 "LAN VSI index %d, VSI id %d\n",
520 vsi->idx, vsi->id);
521 f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
522 0, true, false);
523 }
524 if (!f) {
525 dev_err(&pf->pdev->dev, "Unable to add ucast filter\n");
526 ret = -ENOMEM;
527 goto error_alloc_vsi_res;
528 }
529
530 /* program mac filter */
531 ret = i40e_sync_vsi_filters(vsi);
532 if (ret) {
533 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
534 goto error_alloc_vsi_res;
535 }
536
537 /* accept bcast pkts. by default */
538 ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
539 if (ret) {
540 dev_err(&pf->pdev->dev,
541 "set vsi bcast failed for vf %d, vsi %d, aq_err %d\n",
542 vf->vf_id, vsi->idx, pf->hw.aq.asq_last_status);
543 ret = -EINVAL;
544 }
545
546error_alloc_vsi_res:
547 return ret;
548}
549
550/**
551 * i40e_reset_vf
552 * @vf: pointer to the vf structure
553 * @flr: VFLR was issued or not
554 *
555 * reset the vf
556 **/
557int i40e_reset_vf(struct i40e_vf *vf, bool flr)
558{
559 int ret = -ENOENT;
560 struct i40e_pf *pf = vf->pf;
561 struct i40e_hw *hw = &pf->hw;
562 u32 reg, reg_idx, msix_vf;
563 bool rsd = false;
564 u16 pf_queue_id;
565 int i, j;
566
567 /* warn the VF */
568 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_INPROGRESS);
569
570 clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
571
572 /* PF triggers VFR only when VF requests, in case of
573 * VFLR, HW triggers VFR
574 */
575 if (!flr) {
576 /* reset vf using VPGEN_VFRTRIG reg */
577 reg = I40E_VPGEN_VFRTRIG_VFSWR_MASK;
578 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
579 i40e_flush(hw);
580 }
581
582 /* poll VPGEN_VFRSTAT reg to make sure
583 * that reset is complete
584 */
585 for (i = 0; i < 4; i++) {
586 /* vf reset requires driver to first reset the
587 * vf & than poll the status register to make sure
588 * that the requested op was completed
589 * successfully
590 */
591 udelay(10);
592 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
593 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
594 rsd = true;
595 break;
596 }
597 }
598
599 if (!rsd)
600 dev_err(&pf->pdev->dev, "VF reset check timeout %d\n",
601 vf->vf_id);
602
603 /* fast disable qps */
604 for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
605 ret = i40e_ctrl_vsi_tx_queue(vf, vf->lan_vsi_index, j,
606 I40E_QUEUE_CTRL_FASTDISABLE);
607 ret = i40e_ctrl_vsi_rx_queue(vf, vf->lan_vsi_index, j,
608 I40E_QUEUE_CTRL_FASTDISABLE);
609 }
610
611 /* Queue enable/disable requires driver to
612 * first reset the vf & than poll the status register
613 * to make sure that the requested op was completed
614 * successfully
615 */
616 udelay(10);
617 for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
618 ret = i40e_ctrl_vsi_tx_queue(vf, vf->lan_vsi_index, j,
619 I40E_QUEUE_CTRL_FASTDISABLECHECK);
620 if (ret)
621 dev_info(&pf->pdev->dev,
622 "Queue control check failed on Tx queue %d of VSI %d VF %d\n",
623 vf->lan_vsi_index, j, vf->vf_id);
624 ret = i40e_ctrl_vsi_rx_queue(vf, vf->lan_vsi_index, j,
625 I40E_QUEUE_CTRL_FASTDISABLECHECK);
626 if (ret)
627 dev_info(&pf->pdev->dev,
628 "Queue control check failed on Rx queue %d of VSI %d VF %d\n",
629 vf->lan_vsi_index, j, vf->vf_id);
630 }
631
632 /* clear the irq settings */
633 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
634 for (i = 0; i < msix_vf; i++) {
635 /* format is same for both registers */
636 if (0 == i)
637 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
638 else
639 reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
640 (vf->vf_id))
641 + (i - 1));
642 reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
643 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
644 wr32(hw, reg_idx, reg);
645 i40e_flush(hw);
646 }
647 /* disable interrupts so the VF starts in a known state */
648 for (i = 0; i < msix_vf; i++) {
649 /* format is same for both registers */
650 if (0 == i)
651 reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
652 else
653 reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
654 (vf->vf_id))
655 + (i - 1));
656 wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
657 i40e_flush(hw);
658 }
659
660 /* set the defaults for the rqctl & tqctl registers */
661 reg = (I40E_QINT_RQCTL_NEXTQ_INDX_MASK | I40E_QINT_RQCTL_ITR_INDX_MASK |
662 I40E_QINT_RQCTL_NEXTQ_TYPE_MASK);
663 for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
664 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j);
665 wr32(hw, I40E_QINT_RQCTL(pf_queue_id), reg);
666 wr32(hw, I40E_QINT_TQCTL(pf_queue_id), reg);
667 }
668
669 /* clear the reset bit in the VPGEN_VFRTRIG reg */
670 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
671 reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
672 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
673 /* tell the VF the reset is done */
674 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED);
675 i40e_flush(hw);
676
677 return ret;
678}
679
680/**
681 * i40e_enable_vf_mappings
682 * @vf: pointer to the vf info
683 *
684 * enable vf mappings
685 **/
686static void i40e_enable_vf_mappings(struct i40e_vf *vf)
687{
688 struct i40e_pf *pf = vf->pf;
689 struct i40e_hw *hw = &pf->hw;
690 u32 reg, total_queue_pairs = 0;
691 int j;
692
693 /* Tell the hardware we're using noncontiguous mapping. HW requires
694 * that VF queues be mapped using this method, even when they are
695 * contiguous in real life
696 */
697 wr32(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
698 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
699
700 /* enable VF vplan_qtable mappings */
701 reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
702 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
703
704 /* map PF queues to VF queues */
705 for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
706 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j);
707 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
708 wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg);
709 total_queue_pairs++;
710 }
711
712 /* map PF queues to VSI */
713 for (j = 0; j < 7; j++) {
714 if (j * 2 >= pf->vsi[vf->lan_vsi_index]->num_queue_pairs) {
715 reg = 0x07FF07FF; /* unused */
716 } else {
717 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index,
718 j * 2);
719 reg = qid;
720 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index,
721 (j * 2) + 1);
722 reg |= qid << 16;
723 }
724 wr32(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id), reg);
725 }
726
727 i40e_flush(hw);
728}
729
730/**
731 * i40e_disable_vf_mappings
732 * @vf: pointer to the vf info
733 *
734 * disable vf mappings
735 **/
736static void i40e_disable_vf_mappings(struct i40e_vf *vf)
737{
738 struct i40e_pf *pf = vf->pf;
739 struct i40e_hw *hw = &pf->hw;
740 int i;
741
742 /* disable qp mappings */
743 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
744 for (i = 0; i < I40E_MAX_VSI_QP; i++)
745 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
746 I40E_QUEUE_END_OF_LIST);
747 i40e_flush(hw);
748}
749
750/**
751 * i40e_free_vf_res
752 * @vf: pointer to the vf info
753 *
754 * free vf resources
755 **/
756static void i40e_free_vf_res(struct i40e_vf *vf)
757{
758 struct i40e_pf *pf = vf->pf;
759
760 /* free vsi & disconnect it from the parent uplink */
761 if (vf->lan_vsi_index) {
762 i40e_vsi_release(pf->vsi[vf->lan_vsi_index]);
763 vf->lan_vsi_index = 0;
764 vf->lan_vsi_id = 0;
765 }
766 /* reset some of the state varibles keeping
767 * track of the resources
768 */
769 vf->num_queue_pairs = 0;
770 vf->vf_states = 0;
771}
772
773/**
774 * i40e_alloc_vf_res
775 * @vf: pointer to the vf info
776 *
777 * allocate vf resources
778 **/
779static int i40e_alloc_vf_res(struct i40e_vf *vf)
780{
781 struct i40e_pf *pf = vf->pf;
782 int total_queue_pairs = 0;
783 int ret;
784
785 /* allocate hw vsi context & associated resources */
786 ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV);
787 if (ret)
788 goto error_alloc;
789 total_queue_pairs += pf->vsi[vf->lan_vsi_index]->num_queue_pairs;
790 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
791
792 /* store the total qps number for the runtime
793 * vf req validation
794 */
795 vf->num_queue_pairs = total_queue_pairs;
796
797 /* vf is now completely initialized */
798 set_bit(I40E_VF_STAT_INIT, &vf->vf_states);
799
800error_alloc:
801 if (ret)
802 i40e_free_vf_res(vf);
803
804 return ret;
805}
806
807/**
808 * i40e_vfs_are_assigned
809 * @pf: pointer to the pf structure
810 *
811 * Determine if any VFs are assigned to VMs
812 **/
813static bool i40e_vfs_are_assigned(struct i40e_pf *pf)
814{
815 struct pci_dev *pdev = pf->pdev;
816 struct pci_dev *vfdev;
817
818 /* loop through all the VFs to see if we own any that are assigned */
819 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, I40E_VF_DEVICE_ID , NULL);
820 while (vfdev) {
821 /* if we don't own it we don't care */
822 if (vfdev->is_virtfn && pci_physfn(vfdev) == pdev) {
823 /* if it is assigned we cannot release it */
824 if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
825 return true;
826 }
827
828 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
829 I40E_VF_DEVICE_ID,
830 vfdev);
831 }
832
833 return false;
834}
835
836/**
837 * i40e_free_vfs
838 * @pf: pointer to the pf structure
839 *
840 * free vf resources
841 **/
842void i40e_free_vfs(struct i40e_pf *pf)
843{
844 struct i40e_hw *hw = &pf->hw;
845 int i;
846
847 if (!pf->vf)
848 return;
849
850 /* Disable interrupt 0 so we don't try to handle the VFLR. */
851 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
852 i40e_flush(hw);
853
854 /* free up vf resources */
855 for (i = 0; i < pf->num_alloc_vfs; i++) {
856 if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
857 i40e_free_vf_res(&pf->vf[i]);
858 /* disable qp mappings */
859 i40e_disable_vf_mappings(&pf->vf[i]);
860 }
861
862 kfree(pf->vf);
863 pf->vf = NULL;
864 pf->num_alloc_vfs = 0;
865
866 if (!i40e_vfs_are_assigned(pf))
867 pci_disable_sriov(pf->pdev);
868 else
869 dev_warn(&pf->pdev->dev,
870 "unable to disable SR-IOV because VFs are assigned.\n");
871
872 /* Re-enable interrupt 0. */
873 wr32(hw, I40E_PFINT_DYN_CTL0,
874 I40E_PFINT_DYN_CTL0_INTENA_MASK |
875 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
876 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
877 i40e_flush(hw);
878}
879
880#ifdef CONFIG_PCI_IOV
881/**
882 * i40e_alloc_vfs
883 * @pf: pointer to the pf structure
884 * @num_alloc_vfs: number of vfs to allocate
885 *
886 * allocate vf resources
887 **/
888static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
889{
890 struct i40e_vf *vfs;
891 int i, ret = 0;
892
893 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
894 if (ret) {
895 dev_err(&pf->pdev->dev,
896 "pci_enable_sriov failed with error %d!\n", ret);
897 pf->num_alloc_vfs = 0;
898 goto err_iov;
899 }
900
901 /* allocate memory */
902 vfs = kzalloc(num_alloc_vfs * sizeof(struct i40e_vf), GFP_KERNEL);
903 if (!vfs) {
904 ret = -ENOMEM;
905 goto err_alloc;
906 }
907
908 /* apply default profile */
909 for (i = 0; i < num_alloc_vfs; i++) {
910 vfs[i].pf = pf;
911 vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
912 vfs[i].vf_id = i;
913
914 /* assign default capabilities */
915 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
916
917 ret = i40e_alloc_vf_res(&vfs[i]);
918 i40e_reset_vf(&vfs[i], true);
919 if (ret)
920 break;
921
922 /* enable vf vplan_qtable mappings */
923 i40e_enable_vf_mappings(&vfs[i]);
924 }
925 pf->vf = vfs;
926 pf->num_alloc_vfs = num_alloc_vfs;
927
928err_alloc:
929 if (ret)
930 i40e_free_vfs(pf);
931err_iov:
932 return ret;
933}
934
935#endif
936/**
937 * i40e_pci_sriov_enable
938 * @pdev: pointer to a pci_dev structure
939 * @num_vfs: number of vfs to allocate
940 *
941 * Enable or change the number of VFs
942 **/
943static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
944{
945#ifdef CONFIG_PCI_IOV
946 struct i40e_pf *pf = pci_get_drvdata(pdev);
947 int pre_existing_vfs = pci_num_vf(pdev);
948 int err = 0;
949
950 dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
951 if (pre_existing_vfs && pre_existing_vfs != num_vfs)
952 i40e_free_vfs(pf);
953 else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
954 goto out;
955
956 if (num_vfs > pf->num_req_vfs) {
957 err = -EPERM;
958 goto err_out;
959 }
960
961 err = i40e_alloc_vfs(pf, num_vfs);
962 if (err) {
963 dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
964 goto err_out;
965 }
966
967out:
968 return num_vfs;
969
970err_out:
971 return err;
972#endif
973 return 0;
974}
975
976/**
977 * i40e_pci_sriov_configure
978 * @pdev: pointer to a pci_dev structure
979 * @num_vfs: number of vfs to allocate
980 *
981 * Enable or change the number of VFs. Called when the user updates the number
982 * of VFs in sysfs.
983 **/
984int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
985{
986 struct i40e_pf *pf = pci_get_drvdata(pdev);
987
988 if (num_vfs)
989 return i40e_pci_sriov_enable(pdev, num_vfs);
990
991 i40e_free_vfs(pf);
992 return 0;
993}
994
995/***********************virtual channel routines******************/
996
997/**
998 * i40e_vc_send_msg_to_vf
999 * @vf: pointer to the vf info
1000 * @v_opcode: virtual channel opcode
1001 * @v_retval: virtual channel return value
1002 * @msg: pointer to the msg buffer
1003 * @msglen: msg length
1004 *
1005 * send msg to vf
1006 **/
1007static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
1008 u32 v_retval, u8 *msg, u16 msglen)
1009{
1010 struct i40e_pf *pf = vf->pf;
1011 struct i40e_hw *hw = &pf->hw;
1012 i40e_status aq_ret;
1013
1014 /* single place to detect unsuccessful return values */
1015 if (v_retval) {
1016 vf->num_invalid_msgs++;
1017 dev_err(&pf->pdev->dev, "Failed opcode %d Error: %d\n",
1018 v_opcode, v_retval);
1019 if (vf->num_invalid_msgs >
1020 I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
1021 dev_err(&pf->pdev->dev,
1022 "Number of invalid messages exceeded for VF %d\n",
1023 vf->vf_id);
1024 dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
1025 set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
1026 }
1027 } else {
1028 vf->num_valid_msgs++;
1029 }
1030
1031 aq_ret = i40e_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval,
1032 msg, msglen, NULL);
1033 if (aq_ret) {
1034 dev_err(&pf->pdev->dev,
1035 "Unable to send the message to VF %d aq_err %d\n",
1036 vf->vf_id, pf->hw.aq.asq_last_status);
1037 return -EIO;
1038 }
1039
1040 return 0;
1041}
1042
1043/**
1044 * i40e_vc_send_resp_to_vf
1045 * @vf: pointer to the vf info
1046 * @opcode: operation code
1047 * @retval: return value
1048 *
1049 * send resp msg to vf
1050 **/
1051static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
1052 enum i40e_virtchnl_ops opcode,
1053 i40e_status retval)
1054{
1055 return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
1056}
1057
1058/**
1059 * i40e_vc_get_version_msg
1060 * @vf: pointer to the vf info
1061 *
1062 * called from the vf to request the API version used by the PF
1063 **/
1064static int i40e_vc_get_version_msg(struct i40e_vf *vf)
1065{
1066 struct i40e_virtchnl_version_info info = {
1067 I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR
1068 };
1069
1070 return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
1071 I40E_SUCCESS, (u8 *)&info,
1072 sizeof(struct
1073 i40e_virtchnl_version_info));
1074}
1075
1076/**
1077 * i40e_vc_get_vf_resources_msg
1078 * @vf: pointer to the vf info
1079 * @msg: pointer to the msg buffer
1080 * @msglen: msg length
1081 *
1082 * called from the vf to request its resources
1083 **/
1084static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf)
1085{
1086 struct i40e_virtchnl_vf_resource *vfres = NULL;
1087 struct i40e_pf *pf = vf->pf;
1088 i40e_status aq_ret = 0;
1089 struct i40e_vsi *vsi;
1090 int i = 0, len = 0;
1091 int num_vsis = 1;
1092 int ret;
1093
1094 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
1095 aq_ret = I40E_ERR_PARAM;
1096 goto err;
1097 }
1098
1099 len = (sizeof(struct i40e_virtchnl_vf_resource) +
1100 sizeof(struct i40e_virtchnl_vsi_resource) * num_vsis);
1101
1102 vfres = kzalloc(len, GFP_KERNEL);
1103 if (!vfres) {
1104 aq_ret = I40E_ERR_NO_MEMORY;
1105 len = 0;
1106 goto err;
1107 }
1108
1109 vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
1110 vsi = pf->vsi[vf->lan_vsi_index];
1111 if (!vsi->info.pvid)
1112 vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
1113
1114 vfres->num_vsis = num_vsis;
1115 vfres->num_queue_pairs = vf->num_queue_pairs;
1116 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
1117 if (vf->lan_vsi_index) {
1118 vfres->vsi_res[i].vsi_id = vf->lan_vsi_index;
1119 vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV;
1120 vfres->vsi_res[i].num_queue_pairs =
1121 pf->vsi[vf->lan_vsi_index]->num_queue_pairs;
1122 memcpy(vfres->vsi_res[i].default_mac_addr,
1123 vf->default_lan_addr.addr, ETH_ALEN);
1124 i++;
1125 }
1126 set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
1127
1128err:
1129 /* send the response back to the vf */
1130 ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
1131 aq_ret, (u8 *)vfres, len);
1132
1133 kfree(vfres);
1134 return ret;
1135}
1136
1137/**
1138 * i40e_vc_reset_vf_msg
1139 * @vf: pointer to the vf info
1140 * @msg: pointer to the msg buffer
1141 * @msglen: msg length
1142 *
1143 * called from the vf to reset itself,
1144 * unlike other virtchnl messages, pf driver
1145 * doesn't send the response back to the vf
1146 **/
1147static int i40e_vc_reset_vf_msg(struct i40e_vf *vf)
1148{
1149 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
1150 return -ENOENT;
1151
1152 return i40e_reset_vf(vf, false);
1153}
1154
1155/**
1156 * i40e_vc_config_promiscuous_mode_msg
1157 * @vf: pointer to the vf info
1158 * @msg: pointer to the msg buffer
1159 * @msglen: msg length
1160 *
1161 * called from the vf to configure the promiscuous mode of
1162 * vf vsis
1163 **/
1164static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
1165 u8 *msg, u16 msglen)
1166{
1167 struct i40e_virtchnl_promisc_info *info =
1168 (struct i40e_virtchnl_promisc_info *)msg;
1169 struct i40e_pf *pf = vf->pf;
1170 struct i40e_hw *hw = &pf->hw;
1171 bool allmulti = false;
1172 bool promisc = false;
1173 i40e_status aq_ret;
1174
1175 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1176 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1177 !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
1178 (pf->vsi[info->vsi_id]->type != I40E_VSI_FCOE)) {
1179 aq_ret = I40E_ERR_PARAM;
1180 goto error_param;
1181 }
1182
1183 if (info->flags & I40E_FLAG_VF_UNICAST_PROMISC)
1184 promisc = true;
1185 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, info->vsi_id,
1186 promisc, NULL);
1187 if (aq_ret)
1188 goto error_param;
1189
1190 if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC)
1191 allmulti = true;
1192 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, info->vsi_id,
1193 allmulti, NULL);
1194
1195error_param:
1196 /* send the response to the vf */
1197 return i40e_vc_send_resp_to_vf(vf,
1198 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
1199 aq_ret);
1200}
1201
1202/**
1203 * i40e_vc_config_queues_msg
1204 * @vf: pointer to the vf info
1205 * @msg: pointer to the msg buffer
1206 * @msglen: msg length
1207 *
1208 * called from the vf to configure the rx/tx
1209 * queues
1210 **/
1211static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1212{
1213 struct i40e_virtchnl_vsi_queue_config_info *qci =
1214 (struct i40e_virtchnl_vsi_queue_config_info *)msg;
1215 struct i40e_virtchnl_queue_pair_info *qpi;
1216 u16 vsi_id, vsi_queue_id;
1217 i40e_status aq_ret = 0;
1218 int i;
1219
1220 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
1221 aq_ret = I40E_ERR_PARAM;
1222 goto error_param;
1223 }
1224
1225 vsi_id = qci->vsi_id;
1226 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1227 aq_ret = I40E_ERR_PARAM;
1228 goto error_param;
1229 }
1230 for (i = 0; i < qci->num_queue_pairs; i++) {
1231 qpi = &qci->qpair[i];
1232 vsi_queue_id = qpi->txq.queue_id;
1233 if ((qpi->txq.vsi_id != vsi_id) ||
1234 (qpi->rxq.vsi_id != vsi_id) ||
1235 (qpi->rxq.queue_id != vsi_queue_id) ||
1236 !i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) {
1237 aq_ret = I40E_ERR_PARAM;
1238 goto error_param;
1239 }
1240
1241 if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
1242 &qpi->rxq) ||
1243 i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
1244 &qpi->txq)) {
1245 aq_ret = I40E_ERR_PARAM;
1246 goto error_param;
1247 }
1248 }
1249
1250error_param:
1251 /* send the response to the vf */
1252 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
1253 aq_ret);
1254}
1255
1256/**
1257 * i40e_vc_config_irq_map_msg
1258 * @vf: pointer to the vf info
1259 * @msg: pointer to the msg buffer
1260 * @msglen: msg length
1261 *
1262 * called from the vf to configure the irq to
1263 * queue map
1264 **/
1265static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1266{
1267 struct i40e_virtchnl_irq_map_info *irqmap_info =
1268 (struct i40e_virtchnl_irq_map_info *)msg;
1269 struct i40e_virtchnl_vector_map *map;
1270 u16 vsi_id, vsi_queue_id, vector_id;
1271 i40e_status aq_ret = 0;
1272 unsigned long tempmap;
1273 int i;
1274
1275 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
1276 aq_ret = I40E_ERR_PARAM;
1277 goto error_param;
1278 }
1279
1280 for (i = 0; i < irqmap_info->num_vectors; i++) {
1281 map = &irqmap_info->vecmap[i];
1282
1283 vector_id = map->vector_id;
1284 vsi_id = map->vsi_id;
1285 /* validate msg params */
1286 if (!i40e_vc_isvalid_vector_id(vf, vector_id) ||
1287 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1288 aq_ret = I40E_ERR_PARAM;
1289 goto error_param;
1290 }
1291
1292 /* lookout for the invalid queue index */
1293 tempmap = map->rxq_map;
1294 vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
1295 while (vsi_queue_id < I40E_MAX_VSI_QP) {
1296 if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
1297 vsi_queue_id)) {
1298 aq_ret = I40E_ERR_PARAM;
1299 goto error_param;
1300 }
1301 vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
1302 vsi_queue_id + 1);
1303 }
1304
1305 tempmap = map->txq_map;
1306 vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
1307 while (vsi_queue_id < I40E_MAX_VSI_QP) {
1308 if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
1309 vsi_queue_id)) {
1310 aq_ret = I40E_ERR_PARAM;
1311 goto error_param;
1312 }
1313 vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
1314 vsi_queue_id + 1);
1315 }
1316
1317 i40e_config_irq_link_list(vf, vsi_id, map);
1318 }
1319error_param:
1320 /* send the response to the vf */
1321 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
1322 aq_ret);
1323}
1324
1325/**
1326 * i40e_vc_enable_queues_msg
1327 * @vf: pointer to the vf info
1328 * @msg: pointer to the msg buffer
1329 * @msglen: msg length
1330 *
1331 * called from the vf to enable all or specific queue(s)
1332 **/
1333static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1334{
1335 struct i40e_virtchnl_queue_select *vqs =
1336 (struct i40e_virtchnl_queue_select *)msg;
1337 struct i40e_pf *pf = vf->pf;
1338 u16 vsi_id = vqs->vsi_id;
1339 i40e_status aq_ret = 0;
1340 unsigned long tempmap;
1341 u16 queue_id;
1342
1343 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
1344 aq_ret = I40E_ERR_PARAM;
1345 goto error_param;
1346 }
1347
1348 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1349 aq_ret = I40E_ERR_PARAM;
1350 goto error_param;
1351 }
1352
1353 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
1354 aq_ret = I40E_ERR_PARAM;
1355 goto error_param;
1356 }
1357
1358 tempmap = vqs->rx_queues;
1359 queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
1360 while (queue_id < I40E_MAX_VSI_QP) {
1361 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
1362 aq_ret = I40E_ERR_PARAM;
1363 goto error_param;
1364 }
1365 i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
1366 I40E_QUEUE_CTRL_ENABLE);
1367
1368 queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
1369 queue_id + 1);
1370 }
1371
1372 tempmap = vqs->tx_queues;
1373 queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
1374 while (queue_id < I40E_MAX_VSI_QP) {
1375 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
1376 aq_ret = I40E_ERR_PARAM;
1377 goto error_param;
1378 }
1379 i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
1380 I40E_QUEUE_CTRL_ENABLE);
1381
1382 queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
1383 queue_id + 1);
1384 }
1385
1386 /* Poll the status register to make sure that the
1387 * requested op was completed successfully
1388 */
1389 udelay(10);
1390
1391 tempmap = vqs->rx_queues;
1392 queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
1393 while (queue_id < I40E_MAX_VSI_QP) {
1394 if (i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
1395 I40E_QUEUE_CTRL_ENABLECHECK)) {
1396 dev_err(&pf->pdev->dev,
1397 "Queue control check failed on RX queue %d of VSI %d VF %d\n",
1398 queue_id, vsi_id, vf->vf_id);
1399 }
1400 queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
1401 queue_id + 1);
1402 }
1403
1404 tempmap = vqs->tx_queues;
1405 queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
1406 while (queue_id < I40E_MAX_VSI_QP) {
1407 if (i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
1408 I40E_QUEUE_CTRL_ENABLECHECK)) {
1409 dev_err(&pf->pdev->dev,
1410 "Queue control check failed on TX queue %d of VSI %d VF %d\n",
1411 queue_id, vsi_id, vf->vf_id);
1412 }
1413 queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
1414 queue_id + 1);
1415 }
1416
1417error_param:
1418 /* send the response to the vf */
1419 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
1420 aq_ret);
1421}
1422
1423/**
1424 * i40e_vc_disable_queues_msg
1425 * @vf: pointer to the vf info
1426 * @msg: pointer to the msg buffer
1427 * @msglen: msg length
1428 *
1429 * called from the vf to disable all or specific
1430 * queue(s)
1431 **/
1432static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1433{
1434 struct i40e_virtchnl_queue_select *vqs =
1435 (struct i40e_virtchnl_queue_select *)msg;
1436 struct i40e_pf *pf = vf->pf;
1437 u16 vsi_id = vqs->vsi_id;
1438 i40e_status aq_ret = 0;
1439 unsigned long tempmap;
1440 u16 queue_id;
1441
1442 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
1443 aq_ret = I40E_ERR_PARAM;
1444 goto error_param;
1445 }
1446
1447 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1448 aq_ret = I40E_ERR_PARAM;
1449 goto error_param;
1450 }
1451
1452 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
1453 aq_ret = I40E_ERR_PARAM;
1454 goto error_param;
1455 }
1456
1457 tempmap = vqs->rx_queues;
1458 queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
1459 while (queue_id < I40E_MAX_VSI_QP) {
1460 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
1461 aq_ret = I40E_ERR_PARAM;
1462 goto error_param;
1463 }
1464 i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
1465 I40E_QUEUE_CTRL_DISABLE);
1466
1467 queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
1468 queue_id + 1);
1469 }
1470
1471 tempmap = vqs->tx_queues;
1472 queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
1473 while (queue_id < I40E_MAX_VSI_QP) {
1474 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
1475 aq_ret = I40E_ERR_PARAM;
1476 goto error_param;
1477 }
1478 i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
1479 I40E_QUEUE_CTRL_DISABLE);
1480
1481 queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
1482 queue_id + 1);
1483 }
1484
1485 /* Poll the status register to make sure that the
1486 * requested op was completed successfully
1487 */
1488 udelay(10);
1489
1490 tempmap = vqs->rx_queues;
1491 queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
1492 while (queue_id < I40E_MAX_VSI_QP) {
1493 if (i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
1494 I40E_QUEUE_CTRL_DISABLECHECK)) {
1495 dev_err(&pf->pdev->dev,
1496 "Queue control check failed on RX queue %d of VSI %d VF %d\n",
1497 queue_id, vsi_id, vf->vf_id);
1498 }
1499 queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
1500 queue_id + 1);
1501 }
1502
1503 tempmap = vqs->tx_queues;
1504 queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
1505 while (queue_id < I40E_MAX_VSI_QP) {
1506 if (i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
1507 I40E_QUEUE_CTRL_DISABLECHECK)) {
1508 dev_err(&pf->pdev->dev,
1509 "Queue control check failed on TX queue %d of VSI %d VF %d\n",
1510 queue_id, vsi_id, vf->vf_id);
1511 }
1512 queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
1513 queue_id + 1);
1514 }
1515
1516error_param:
1517 /* send the response to the vf */
1518 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
1519 aq_ret);
1520}
1521
1522/**
1523 * i40e_vc_get_stats_msg
1524 * @vf: pointer to the vf info
1525 * @msg: pointer to the msg buffer
1526 * @msglen: msg length
1527 *
1528 * called from the vf to get vsi stats
1529 **/
1530static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1531{
1532 struct i40e_virtchnl_queue_select *vqs =
1533 (struct i40e_virtchnl_queue_select *)msg;
1534 struct i40e_pf *pf = vf->pf;
1535 struct i40e_eth_stats stats;
1536 i40e_status aq_ret = 0;
1537 struct i40e_vsi *vsi;
1538
1539 memset(&stats, 0, sizeof(struct i40e_eth_stats));
1540
1541 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
1542 aq_ret = I40E_ERR_PARAM;
1543 goto error_param;
1544 }
1545
1546 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1547 aq_ret = I40E_ERR_PARAM;
1548 goto error_param;
1549 }
1550
1551 vsi = pf->vsi[vqs->vsi_id];
1552 if (!vsi) {
1553 aq_ret = I40E_ERR_PARAM;
1554 goto error_param;
1555 }
1556 i40e_update_eth_stats(vsi);
1557 memcpy(&stats, &vsi->eth_stats, sizeof(struct i40e_eth_stats));
1558
1559error_param:
1560 /* send the response back to the vf */
1561 return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS, aq_ret,
1562 (u8 *)&stats, sizeof(stats));
1563}
1564
1565/**
1566 * i40e_vc_add_mac_addr_msg
1567 * @vf: pointer to the vf info
1568 * @msg: pointer to the msg buffer
1569 * @msglen: msg length
1570 *
1571 * add guest mac address filter
1572 **/
1573static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1574{
1575 struct i40e_virtchnl_ether_addr_list *al =
1576 (struct i40e_virtchnl_ether_addr_list *)msg;
1577 struct i40e_pf *pf = vf->pf;
1578 struct i40e_vsi *vsi = NULL;
1579 u16 vsi_id = al->vsi_id;
1580 i40e_status aq_ret = 0;
1581 int i;
1582
1583 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1584 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1585 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1586 aq_ret = I40E_ERR_PARAM;
1587 goto error_param;
1588 }
1589
1590 for (i = 0; i < al->num_elements; i++) {
1591 if (is_broadcast_ether_addr(al->list[i].addr) ||
1592 is_zero_ether_addr(al->list[i].addr)) {
1593 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pMAC\n",
1594 al->list[i].addr);
1595 aq_ret = I40E_ERR_PARAM;
1596 goto error_param;
1597 }
1598 }
1599 vsi = pf->vsi[vsi_id];
1600
1601 /* add new addresses to the list */
1602 for (i = 0; i < al->num_elements; i++) {
1603 struct i40e_mac_filter *f;
1604
1605 f = i40e_find_mac(vsi, al->list[i].addr, true, false);
1606 if (f) {
1607 if (i40e_is_vsi_in_vlan(vsi))
1608 f = i40e_put_mac_in_vlan(vsi, al->list[i].addr,
1609 true, false);
1610 else
1611 f = i40e_add_filter(vsi, al->list[i].addr, -1,
1612 true, false);
1613 }
1614
1615 if (!f) {
1616 dev_err(&pf->pdev->dev,
1617 "Unable to add VF MAC filter\n");
1618 aq_ret = I40E_ERR_PARAM;
1619 goto error_param;
1620 }
1621 }
1622
1623 /* program the updated filter list */
1624 if (i40e_sync_vsi_filters(vsi))
1625 dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n");
1626
1627error_param:
1628 /* send the response to the vf */
1629 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
1630 aq_ret);
1631}
1632
1633/**
1634 * i40e_vc_del_mac_addr_msg
1635 * @vf: pointer to the vf info
1636 * @msg: pointer to the msg buffer
1637 * @msglen: msg length
1638 *
1639 * remove guest mac address filter
1640 **/
1641static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1642{
1643 struct i40e_virtchnl_ether_addr_list *al =
1644 (struct i40e_virtchnl_ether_addr_list *)msg;
1645 struct i40e_pf *pf = vf->pf;
1646 struct i40e_vsi *vsi = NULL;
1647 u16 vsi_id = al->vsi_id;
1648 i40e_status aq_ret = 0;
1649 int i;
1650
1651 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1652 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1653 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1654 aq_ret = I40E_ERR_PARAM;
1655 goto error_param;
1656 }
1657 vsi = pf->vsi[vsi_id];
1658
1659 /* delete addresses from the list */
1660 for (i = 0; i < al->num_elements; i++)
1661 i40e_del_filter(vsi, al->list[i].addr,
1662 I40E_VLAN_ANY, true, false);
1663
1664 /* program the updated filter list */
1665 if (i40e_sync_vsi_filters(vsi))
1666 dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n");
1667
1668error_param:
1669 /* send the response to the vf */
1670 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
1671 aq_ret);
1672}
1673
1674/**
1675 * i40e_vc_add_vlan_msg
1676 * @vf: pointer to the vf info
1677 * @msg: pointer to the msg buffer
1678 * @msglen: msg length
1679 *
1680 * program guest vlan id
1681 **/
1682static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1683{
1684 struct i40e_virtchnl_vlan_filter_list *vfl =
1685 (struct i40e_virtchnl_vlan_filter_list *)msg;
1686 struct i40e_pf *pf = vf->pf;
1687 struct i40e_vsi *vsi = NULL;
1688 u16 vsi_id = vfl->vsi_id;
1689 i40e_status aq_ret = 0;
1690 int i;
1691
1692 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1693 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1694 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1695 aq_ret = I40E_ERR_PARAM;
1696 goto error_param;
1697 }
1698
1699 for (i = 0; i < vfl->num_elements; i++) {
1700 if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
1701 aq_ret = I40E_ERR_PARAM;
1702 dev_err(&pf->pdev->dev,
1703 "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
1704 goto error_param;
1705 }
1706 }
1707 vsi = pf->vsi[vsi_id];
1708 if (vsi->info.pvid) {
1709 aq_ret = I40E_ERR_PARAM;
1710 goto error_param;
1711 }
1712
1713 i40e_vlan_stripping_enable(vsi);
1714 for (i = 0; i < vfl->num_elements; i++) {
1715 /* add new VLAN filter */
1716 int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
1717 if (ret)
1718 dev_err(&pf->pdev->dev,
1719 "Unable to add VF vlan filter %d, error %d\n",
1720 vfl->vlan_id[i], ret);
1721 }
1722
1723error_param:
1724 /* send the response to the vf */
1725 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN, aq_ret);
1726}
1727
1728/**
1729 * i40e_vc_remove_vlan_msg
1730 * @vf: pointer to the vf info
1731 * @msg: pointer to the msg buffer
1732 * @msglen: msg length
1733 *
1734 * remove programmed guest vlan id
1735 **/
1736static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1737{
1738 struct i40e_virtchnl_vlan_filter_list *vfl =
1739 (struct i40e_virtchnl_vlan_filter_list *)msg;
1740 struct i40e_pf *pf = vf->pf;
1741 struct i40e_vsi *vsi = NULL;
1742 u16 vsi_id = vfl->vsi_id;
1743 i40e_status aq_ret = 0;
1744 int i;
1745
1746 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1747 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1748 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1749 aq_ret = I40E_ERR_PARAM;
1750 goto error_param;
1751 }
1752
1753 for (i = 0; i < vfl->num_elements; i++) {
1754 if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
1755 aq_ret = I40E_ERR_PARAM;
1756 goto error_param;
1757 }
1758 }
1759
1760 vsi = pf->vsi[vsi_id];
1761 if (vsi->info.pvid) {
1762 aq_ret = I40E_ERR_PARAM;
1763 goto error_param;
1764 }
1765
1766 for (i = 0; i < vfl->num_elements; i++) {
1767 int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
1768 if (ret)
1769 dev_err(&pf->pdev->dev,
1770 "Unable to delete VF vlan filter %d, error %d\n",
1771 vfl->vlan_id[i], ret);
1772 }
1773
1774error_param:
1775 /* send the response to the vf */
1776 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN, aq_ret);
1777}
1778
1779/**
1780 * i40e_vc_fcoe_msg
1781 * @vf: pointer to the vf info
1782 * @msg: pointer to the msg buffer
1783 * @msglen: msg length
1784 *
1785 * called from the vf for the fcoe msgs
1786 **/
1787static int i40e_vc_fcoe_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1788{
1789 i40e_status aq_ret = 0;
1790
1791 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1792 !test_bit(I40E_VF_STAT_FCOEENA, &vf->vf_states)) {
1793 aq_ret = I40E_ERR_PARAM;
1794 goto error_param;
1795 }
1796 aq_ret = I40E_ERR_NOT_IMPLEMENTED;
1797
1798error_param:
1799 /* send the response to the vf */
1800 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_FCOE, aq_ret);
1801}
1802
1803/**
1804 * i40e_vc_validate_vf_msg
1805 * @vf: pointer to the vf info
1806 * @msg: pointer to the msg buffer
1807 * @msglen: msg length
1808 * @msghndl: msg handle
1809 *
1810 * validate msg
1811 **/
1812static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
1813 u32 v_retval, u8 *msg, u16 msglen)
1814{
1815 bool err_msg_format = false;
1816 int valid_len;
1817
1818 /* Check if VF is disabled. */
1819 if (test_bit(I40E_VF_STAT_DISABLED, &vf->vf_states))
1820 return I40E_ERR_PARAM;
1821
1822 /* Validate message length. */
1823 switch (v_opcode) {
1824 case I40E_VIRTCHNL_OP_VERSION:
1825 valid_len = sizeof(struct i40e_virtchnl_version_info);
1826 break;
1827 case I40E_VIRTCHNL_OP_RESET_VF:
1828 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
1829 valid_len = 0;
1830 break;
1831 case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
1832 valid_len = sizeof(struct i40e_virtchnl_txq_info);
1833 break;
1834 case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
1835 valid_len = sizeof(struct i40e_virtchnl_rxq_info);
1836 break;
1837 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1838 valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info);
1839 if (msglen >= valid_len) {
1840 struct i40e_virtchnl_vsi_queue_config_info *vqc =
1841 (struct i40e_virtchnl_vsi_queue_config_info *)msg;
1842 valid_len += (vqc->num_queue_pairs *
1843 sizeof(struct
1844 i40e_virtchnl_queue_pair_info));
1845 if (vqc->num_queue_pairs == 0)
1846 err_msg_format = true;
1847 }
1848 break;
1849 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
1850 valid_len = sizeof(struct i40e_virtchnl_irq_map_info);
1851 if (msglen >= valid_len) {
1852 struct i40e_virtchnl_irq_map_info *vimi =
1853 (struct i40e_virtchnl_irq_map_info *)msg;
1854 valid_len += (vimi->num_vectors *
1855 sizeof(struct i40e_virtchnl_vector_map));
1856 if (vimi->num_vectors == 0)
1857 err_msg_format = true;
1858 }
1859 break;
1860 case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
1861 case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
1862 valid_len = sizeof(struct i40e_virtchnl_queue_select);
1863 break;
1864 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
1865 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
1866 valid_len = sizeof(struct i40e_virtchnl_ether_addr_list);
1867 if (msglen >= valid_len) {
1868 struct i40e_virtchnl_ether_addr_list *veal =
1869 (struct i40e_virtchnl_ether_addr_list *)msg;
1870 valid_len += veal->num_elements *
1871 sizeof(struct i40e_virtchnl_ether_addr);
1872 if (veal->num_elements == 0)
1873 err_msg_format = true;
1874 }
1875 break;
1876 case I40E_VIRTCHNL_OP_ADD_VLAN:
1877 case I40E_VIRTCHNL_OP_DEL_VLAN:
1878 valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list);
1879 if (msglen >= valid_len) {
1880 struct i40e_virtchnl_vlan_filter_list *vfl =
1881 (struct i40e_virtchnl_vlan_filter_list *)msg;
1882 valid_len += vfl->num_elements * sizeof(u16);
1883 if (vfl->num_elements == 0)
1884 err_msg_format = true;
1885 }
1886 break;
1887 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1888 valid_len = sizeof(struct i40e_virtchnl_promisc_info);
1889 break;
1890 case I40E_VIRTCHNL_OP_GET_STATS:
1891 valid_len = sizeof(struct i40e_virtchnl_queue_select);
1892 break;
1893 /* These are always errors coming from the VF. */
1894 case I40E_VIRTCHNL_OP_EVENT:
1895 case I40E_VIRTCHNL_OP_UNKNOWN:
1896 default:
1897 return -EPERM;
1898 break;
1899 }
1900 /* few more checks */
1901 if ((valid_len != msglen) || (err_msg_format)) {
1902 i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
1903 return -EINVAL;
1904 } else {
1905 return 0;
1906 }
1907}
1908
1909/**
1910 * i40e_vc_process_vf_msg
1911 * @pf: pointer to the pf structure
1912 * @vf_id: source vf id
1913 * @msg: pointer to the msg buffer
1914 * @msglen: msg length
1915 * @msghndl: msg handle
1916 *
1917 * called from the common aeq/arq handler to
1918 * process request from vf
1919 **/
1920int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
1921 u32 v_retval, u8 *msg, u16 msglen)
1922{
1923 struct i40e_vf *vf = &(pf->vf[vf_id]);
1924 struct i40e_hw *hw = &pf->hw;
1925 int ret;
1926
1927 pf->vf_aq_requests++;
1928 /* perform basic checks on the msg */
1929 ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen);
1930
1931 if (ret) {
1932 dev_err(&pf->pdev->dev, "invalid message from vf %d\n", vf_id);
1933 return ret;
1934 }
1935 wr32(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_VFACTIVE);
1936 switch (v_opcode) {
1937 case I40E_VIRTCHNL_OP_VERSION:
1938 ret = i40e_vc_get_version_msg(vf);
1939 break;
1940 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
1941 ret = i40e_vc_get_vf_resources_msg(vf);
1942 break;
1943 case I40E_VIRTCHNL_OP_RESET_VF:
1944 ret = i40e_vc_reset_vf_msg(vf);
1945 break;
1946 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1947 ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen);
1948 break;
1949 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1950 ret = i40e_vc_config_queues_msg(vf, msg, msglen);
1951 break;
1952 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
1953 ret = i40e_vc_config_irq_map_msg(vf, msg, msglen);
1954 break;
1955 case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
1956 ret = i40e_vc_enable_queues_msg(vf, msg, msglen);
1957 break;
1958 case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
1959 ret = i40e_vc_disable_queues_msg(vf, msg, msglen);
1960 break;
1961 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
1962 ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen);
1963 break;
1964 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
1965 ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen);
1966 break;
1967 case I40E_VIRTCHNL_OP_ADD_VLAN:
1968 ret = i40e_vc_add_vlan_msg(vf, msg, msglen);
1969 break;
1970 case I40E_VIRTCHNL_OP_DEL_VLAN:
1971 ret = i40e_vc_remove_vlan_msg(vf, msg, msglen);
1972 break;
1973 case I40E_VIRTCHNL_OP_GET_STATS:
1974 ret = i40e_vc_get_stats_msg(vf, msg, msglen);
1975 break;
1976 case I40E_VIRTCHNL_OP_FCOE:
1977 ret = i40e_vc_fcoe_msg(vf, msg, msglen);
1978 break;
1979 case I40E_VIRTCHNL_OP_UNKNOWN:
1980 default:
1981 dev_err(&pf->pdev->dev,
1982 "Unsupported opcode %d from vf %d\n", v_opcode, vf_id);
1983 ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
1984 I40E_ERR_NOT_IMPLEMENTED);
1985 break;
1986 }
1987
1988 return ret;
1989}
1990
1991/**
1992 * i40e_vc_process_vflr_event
1993 * @pf: pointer to the pf structure
1994 *
1995 * called from the vlfr irq handler to
1996 * free up vf resources and state variables
1997 **/
1998int i40e_vc_process_vflr_event(struct i40e_pf *pf)
1999{
2000 u32 reg, reg_idx, bit_idx, vf_id;
2001 struct i40e_hw *hw = &pf->hw;
2002 struct i40e_vf *vf;
2003
2004 if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
2005 return 0;
2006
2007 clear_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
2008 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
2009 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
2010 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
2011 /* read GLGEN_VFLRSTAT register to find out the flr vfs */
2012 vf = &pf->vf[vf_id];
2013 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
2014 if (reg & (1 << bit_idx)) {
2015 /* clear the bit in GLGEN_VFLRSTAT */
2016 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
2017
2018 if (i40e_reset_vf(vf, true))
2019 dev_err(&pf->pdev->dev,
2020 "Unable to reset the VF %d\n", vf_id);
2021 /* free up vf resources to destroy vsi state */
2022 i40e_free_vf_res(vf);
2023
2024 /* allocate new vf resources with the default state */
2025 if (i40e_alloc_vf_res(vf))
2026 dev_err(&pf->pdev->dev,
2027 "Unable to allocate VF resources %d\n",
2028 vf_id);
2029
2030 i40e_enable_vf_mappings(vf);
2031 }
2032 }
2033
2034 /* re-enable vflr interrupt cause */
2035 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
2036 reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
2037 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2038 i40e_flush(hw);
2039
2040 return 0;
2041}
2042
2043/**
2044 * i40e_vc_vf_broadcast
2045 * @pf: pointer to the pf structure
2046 * @opcode: operation code
2047 * @retval: return value
2048 * @msg: pointer to the msg buffer
2049 * @msglen: msg length
2050 *
2051 * send a message to all VFs on a given PF
2052 **/
2053static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
2054 enum i40e_virtchnl_ops v_opcode,
2055 i40e_status v_retval, u8 *msg,
2056 u16 msglen)
2057{
2058 struct i40e_hw *hw = &pf->hw;
2059 struct i40e_vf *vf = pf->vf;
2060 int i;
2061
2062 for (i = 0; i < pf->num_alloc_vfs; i++) {
2063 /* Ignore return value on purpose - a given VF may fail, but
2064 * we need to keep going and send to all of them
2065 */
2066 i40e_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval,
2067 msg, msglen, NULL);
2068 vf++;
2069 }
2070}
2071
2072/**
2073 * i40e_vc_notify_link_state
2074 * @pf: pointer to the pf structure
2075 *
2076 * send a link status message to all VFs on a given PF
2077 **/
2078void i40e_vc_notify_link_state(struct i40e_pf *pf)
2079{
2080 struct i40e_virtchnl_pf_event pfe;
2081
2082 pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
2083 pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
2084 pfe.event_data.link_event.link_status =
2085 pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
2086 pfe.event_data.link_event.link_speed = pf->hw.phy.link_info.link_speed;
2087
2088 i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS,
2089 (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event));
2090}
2091
2092/**
2093 * i40e_vc_notify_reset
2094 * @pf: pointer to the pf structure
2095 *
2096 * indicate a pending reset to all VFs on a given PF
2097 **/
2098void i40e_vc_notify_reset(struct i40e_pf *pf)
2099{
2100 struct i40e_virtchnl_pf_event pfe;
2101
2102 pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
2103 pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
2104 i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS,
2105 (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event));
2106}
2107
2108/**
2109 * i40e_vc_notify_vf_reset
2110 * @vf: pointer to the vf structure
2111 *
2112 * indicate a pending reset to the given VF
2113 **/
2114void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
2115{
2116 struct i40e_virtchnl_pf_event pfe;
2117
2118 pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
2119 pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
2120 i40e_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT,
2121 I40E_SUCCESS, (u8 *)&pfe,
2122 sizeof(struct i40e_virtchnl_pf_event), NULL);
2123}
2124
2125/**
2126 * i40e_ndo_set_vf_mac
2127 * @netdev: network interface device structure
2128 * @vf_id: vf identifier
2129 * @mac: mac address
2130 *
2131 * program vf mac address
2132 **/
2133int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
2134{
2135 struct i40e_netdev_priv *np = netdev_priv(netdev);
2136 struct i40e_vsi *vsi = np->vsi;
2137 struct i40e_pf *pf = vsi->back;
2138 struct i40e_mac_filter *f;
2139 struct i40e_vf *vf;
2140 int ret = 0;
2141
2142 /* validate the request */
2143 if (vf_id >= pf->num_alloc_vfs) {
2144 dev_err(&pf->pdev->dev,
2145 "Invalid VF Identifier %d\n", vf_id);
2146 ret = -EINVAL;
2147 goto error_param;
2148 }
2149
2150 vf = &(pf->vf[vf_id]);
2151 vsi = pf->vsi[vf->lan_vsi_index];
2152 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
2153 dev_err(&pf->pdev->dev,
2154 "Uninitialized VF %d\n", vf_id);
2155 ret = -EINVAL;
2156 goto error_param;
2157 }
2158
2159 if (!is_valid_ether_addr(mac)) {
2160 dev_err(&pf->pdev->dev,
2161 "Invalid VF ethernet address\n");
2162 ret = -EINVAL;
2163 goto error_param;
2164 }
2165
2166 /* delete the temporary mac address */
2167 i40e_del_filter(vsi, vf->default_lan_addr.addr, 0, true, false);
2168
2169 /* add the new mac address */
2170 f = i40e_add_filter(vsi, mac, 0, true, false);
2171 if (!f) {
2172 dev_err(&pf->pdev->dev,
2173 "Unable to add VF ucast filter\n");
2174 ret = -ENOMEM;
2175 goto error_param;
2176 }
2177
2178 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id);
2179 /* program mac filter */
2180 if (i40e_sync_vsi_filters(vsi)) {
2181 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
2182 ret = -EIO;
2183 goto error_param;
2184 }
2185 memcpy(vf->default_lan_addr.addr, mac, ETH_ALEN);
2186 dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
2187 ret = 0;
2188
2189error_param:
2190 return ret;
2191}
2192
2193/**
2194 * i40e_ndo_set_vf_port_vlan
2195 * @netdev: network interface device structure
2196 * @vf_id: vf identifier
2197 * @vlan_id: mac address
2198 * @qos: priority setting
2199 *
2200 * program vf vlan id and/or qos
2201 **/
2202int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
2203 int vf_id, u16 vlan_id, u8 qos)
2204{
2205 struct i40e_netdev_priv *np = netdev_priv(netdev);
2206 struct i40e_pf *pf = np->vsi->back;
2207 struct i40e_vsi *vsi;
2208 struct i40e_vf *vf;
2209 int ret = 0;
2210
2211 /* validate the request */
2212 if (vf_id >= pf->num_alloc_vfs) {
2213 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
2214 ret = -EINVAL;
2215 goto error_pvid;
2216 }
2217
2218 if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
2219 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
2220 ret = -EINVAL;
2221 goto error_pvid;
2222 }
2223
2224 vf = &(pf->vf[vf_id]);
2225 vsi = pf->vsi[vf->lan_vsi_index];
2226 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
2227 dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id);
2228 ret = -EINVAL;
2229 goto error_pvid;
2230 }
2231
2232 if (vsi->info.pvid) {
2233 /* kill old VLAN */
2234 ret = i40e_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
2235 VLAN_VID_MASK));
2236 if (ret) {
2237 dev_info(&vsi->back->pdev->dev,
2238 "remove VLAN failed, ret=%d, aq_err=%d\n",
2239 ret, pf->hw.aq.asq_last_status);
2240 }
2241 }
2242 if (vlan_id || qos)
2243 ret = i40e_vsi_add_pvid(vsi,
2244 vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT));
2245 else
2246 i40e_vlan_stripping_disable(vsi);
2247
2248 if (vlan_id) {
2249 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
2250 vlan_id, qos, vf_id);
2251
2252 /* add new VLAN filter */
2253 ret = i40e_vsi_add_vlan(vsi, vlan_id);
2254 if (ret) {
2255 dev_info(&vsi->back->pdev->dev,
2256 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
2257 vsi->back->hw.aq.asq_last_status);
2258 goto error_pvid;
2259 }
2260 }
2261
2262 if (ret) {
2263 dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
2264 goto error_pvid;
2265 }
2266 ret = 0;
2267
2268error_pvid:
2269 return ret;
2270}
2271
2272/**
2273 * i40e_ndo_set_vf_bw
2274 * @netdev: network interface device structure
2275 * @vf_id: vf identifier
2276 * @tx_rate: tx rate
2277 *
2278 * configure vf tx rate
2279 **/
2280int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate)
2281{
2282 return -EOPNOTSUPP;
2283}
2284
2285/**
2286 * i40e_ndo_get_vf_config
2287 * @netdev: network interface device structure
2288 * @vf_id: vf identifier
2289 * @ivi: vf configuration structure
2290 *
2291 * return vf configuration
2292 **/
2293int i40e_ndo_get_vf_config(struct net_device *netdev,
2294 int vf_id, struct ifla_vf_info *ivi)
2295{
2296 struct i40e_netdev_priv *np = netdev_priv(netdev);
2297 struct i40e_mac_filter *f, *ftmp;
2298 struct i40e_vsi *vsi = np->vsi;
2299 struct i40e_pf *pf = vsi->back;
2300 struct i40e_vf *vf;
2301 int ret = 0;
2302
2303 /* validate the request */
2304 if (vf_id >= pf->num_alloc_vfs) {
2305 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
2306 ret = -EINVAL;
2307 goto error_param;
2308 }
2309
2310 vf = &(pf->vf[vf_id]);
2311 /* first vsi is always the LAN vsi */
2312 vsi = pf->vsi[vf->lan_vsi_index];
2313 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
2314 dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id);
2315 ret = -EINVAL;
2316 goto error_param;
2317 }
2318
2319 ivi->vf = vf_id;
2320
2321 /* first entry of the list is the default ethernet address */
2322 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
2323 memcpy(&ivi->mac, f->macaddr, I40E_ETH_LENGTH_OF_ADDRESS);
2324 break;
2325 }
2326
2327 ivi->tx_rate = 0;
2328 ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
2329 ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
2330 I40E_VLAN_PRIORITY_SHIFT;
2331 ret = 0;
2332
2333error_param:
2334 return ret;
2335}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
new file mode 100644
index 000000000000..360382cf3040
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -0,0 +1,120 @@
1/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27
28#ifndef _I40E_VIRTCHNL_PF_H_
29#define _I40E_VIRTCHNL_PF_H_
30
31#include "i40e.h"
32
33#define I40E_MAX_MACVLAN_FILTERS 256
34#define I40E_MAX_VLAN_FILTERS 256
35#define I40E_MAX_VLANID 4095
36
37#define I40E_VIRTCHNL_SUPPORTED_QTYPES 2
38
39#define I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED 3
40#define I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED 10
41
42#define I40E_VLAN_PRIORITY_SHIFT 12
43#define I40E_VLAN_MASK 0xFFF
44#define I40E_PRIORITY_MASK 0x7000
45
46/* Various queue ctrls */
47enum i40e_queue_ctrl {
48 I40E_QUEUE_CTRL_UNKNOWN = 0,
49 I40E_QUEUE_CTRL_ENABLE,
50 I40E_QUEUE_CTRL_ENABLECHECK,
51 I40E_QUEUE_CTRL_DISABLE,
52 I40E_QUEUE_CTRL_DISABLECHECK,
53 I40E_QUEUE_CTRL_FASTDISABLE,
54 I40E_QUEUE_CTRL_FASTDISABLECHECK,
55};
56
57/* VF states */
58enum i40e_vf_states {
59 I40E_VF_STAT_INIT = 0,
60 I40E_VF_STAT_ACTIVE,
61 I40E_VF_STAT_FCOEENA,
62 I40E_VF_STAT_DISABLED,
63};
64
65/* VF capabilities */
66enum i40e_vf_capabilities {
67 I40E_VIRTCHNL_VF_CAP_PRIVILEGE = 0,
68 I40E_VIRTCHNL_VF_CAP_L2,
69};
70
71/* VF information structure */
72struct i40e_vf {
73 struct i40e_pf *pf;
74
75 /* vf id in the pf space */
76 u16 vf_id;
77 /* all vf vsis connect to the same parent */
78 enum i40e_switch_element_types parent_type;
79
80 /* vf Port Extender (PE) stag if used */
81 u16 stag;
82
83 struct i40e_virtchnl_ether_addr default_lan_addr;
84 struct i40e_virtchnl_ether_addr default_fcoe_addr;
85
86 /* VSI indices - actual VSI pointers are maintained in the PF structure
87 * When assigned, these will be non-zero, because VSI 0 is always
88 * the main LAN VSI for the PF.
89 */
90 u8 lan_vsi_index; /* index into PF struct */
91 u8 lan_vsi_id; /* ID as used by firmware */
92
93 u8 num_queue_pairs; /* num of qps assigned to vf vsis */
94 u64 num_mdd_events; /* num of mdd events detected */
95 u64 num_invalid_msgs; /* num of malformed or invalid msgs detected */
96 u64 num_valid_msgs; /* num of valid msgs detected */
97
98 unsigned long vf_caps; /* vf's adv. capabilities */
99 unsigned long vf_states; /* vf's runtime states */
100};
101
102void i40e_free_vfs(struct i40e_pf *pf);
103int i40e_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
104int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
105 u32 v_retval, u8 *msg, u16 msglen);
106int i40e_vc_process_vflr_event(struct i40e_pf *pf);
107int i40e_reset_vf(struct i40e_vf *vf, bool flr);
108void i40e_vc_notify_vf_reset(struct i40e_vf *vf);
109
110/* vf configuration related iplink handlers */
111int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
112int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
113 int vf_id, u16 vlan_id, u8 qos);
114int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate);
115int i40e_ndo_get_vf_config(struct net_device *netdev,
116 int vf_id, struct ifla_vf_info *ivi);
117void i40e_vc_notify_link_state(struct i40e_pf *pf);
118void i40e_vc_notify_reset(struct i40e_pf *pf);
119
120#endif /* _I40E_VIRTCHNL_PF_H_ */