aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorJesse Brandeburg <jesse.brandeburg@intel.com>2013-09-11 04:39:46 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2013-09-11 04:30:00 -0400
commit41c445ff0f482bb6e6b72dcee9e598e20575f743 (patch)
treee39f25a630083c43badc05a1d6e3ef805e7807f5 /drivers/net
parentc19d65c95c6d472d69829fea7d473228493d5245 (diff)
i40e: main driver core
This is the driver for the Intel(R) Ethernet Controller XL710 Family. This driver is targeted at basic ethernet functionality only, and will be improved upon further as time goes on. This patch contains the driver entry points but does not include transmit and receive (see the next patch in the series) routines. Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com> Signed-off-by: Shannon Nelson <shannon.nelson@intel.com> CC: PJ Waskiewicz <peter.p.waskiewicz.jr@intel.com> CC: e1000-devel@lists.sourceforge.net Tested-by: Kavindya Deegala <kavindya.s.deegala@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c7375
1 files changed, 7375 insertions, 0 deletions
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
new file mode 100644
index 000000000000..601d482694ea
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -0,0 +1,7375 @@
1/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27
28/* Local includes */
29#include "i40e.h"
30
31const char i40e_driver_name[] = "i40e";
32static const char i40e_driver_string[] =
33 "Intel(R) Ethernet Connection XL710 Network Driver";
34
35#define DRV_KERN "-k"
36
37#define DRV_VERSION_MAJOR 0
38#define DRV_VERSION_MINOR 3
39#define DRV_VERSION_BUILD 9
40#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
41 __stringify(DRV_VERSION_MINOR) "." \
42 __stringify(DRV_VERSION_BUILD) DRV_KERN
43const char i40e_driver_version_str[] = DRV_VERSION;
44static const char i40e_copyright[] = "Copyright (c) 2013 Intel Corporation.";
45
46/* a bit of forward declarations */
47static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
48static void i40e_handle_reset_warning(struct i40e_pf *pf);
49static int i40e_add_vsi(struct i40e_vsi *vsi);
50static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
51static int i40e_setup_pf_switch(struct i40e_pf *pf);
52static int i40e_setup_misc_vector(struct i40e_pf *pf);
53static void i40e_determine_queue_usage(struct i40e_pf *pf);
54static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
55
56/* i40e_pci_tbl - PCI Device ID Table
57 *
58 * Last entry must be all 0s
59 *
60 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
61 * Class, Class Mask, private data (not used) }
62 */
63static DEFINE_PCI_DEVICE_TABLE(i40e_pci_tbl) = {
64 {PCI_VDEVICE(INTEL, I40E_SFP_XL710_DEVICE_ID), 0},
65 {PCI_VDEVICE(INTEL, I40E_SFP_X710_DEVICE_ID), 0},
66 {PCI_VDEVICE(INTEL, I40E_QEMU_DEVICE_ID), 0},
67 {PCI_VDEVICE(INTEL, I40E_KX_A_DEVICE_ID), 0},
68 {PCI_VDEVICE(INTEL, I40E_KX_B_DEVICE_ID), 0},
69 {PCI_VDEVICE(INTEL, I40E_KX_C_DEVICE_ID), 0},
70 {PCI_VDEVICE(INTEL, I40E_KX_D_DEVICE_ID), 0},
71 {PCI_VDEVICE(INTEL, I40E_QSFP_A_DEVICE_ID), 0},
72 {PCI_VDEVICE(INTEL, I40E_QSFP_B_DEVICE_ID), 0},
73 {PCI_VDEVICE(INTEL, I40E_QSFP_C_DEVICE_ID), 0},
74 /* required last entry */
75 {0, }
76};
77MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
78
79#define I40E_MAX_VF_COUNT 128
80static int debug = -1;
81module_param(debug, int, 0);
82MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
83
84MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
85MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
86MODULE_LICENSE("GPL");
87MODULE_VERSION(DRV_VERSION);
88
89/**
90 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
91 * @hw: pointer to the HW structure
92 * @mem: ptr to mem struct to fill out
93 * @size: size of memory requested
94 * @alignment: what to align the allocation to
95 **/
96int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
97 u64 size, u32 alignment)
98{
99 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
100
101 mem->size = ALIGN(size, alignment);
102 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
103 &mem->pa, GFP_KERNEL);
104 if (mem->va)
105 return 0;
106
107 return -ENOMEM;
108}
109
110/**
111 * i40e_free_dma_mem_d - OS specific memory free for shared code
112 * @hw: pointer to the HW structure
113 * @mem: ptr to mem struct to free
114 **/
115int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
116{
117 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
118
119 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
120 mem->va = NULL;
121 mem->pa = 0;
122 mem->size = 0;
123
124 return 0;
125}
126
127/**
128 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
129 * @hw: pointer to the HW structure
130 * @mem: ptr to mem struct to fill out
131 * @size: size of memory requested
132 **/
133int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
134 u32 size)
135{
136 mem->size = size;
137 mem->va = kzalloc(size, GFP_KERNEL);
138
139 if (mem->va)
140 return 0;
141
142 return -ENOMEM;
143}
144
145/**
146 * i40e_free_virt_mem_d - OS specific memory free for shared code
147 * @hw: pointer to the HW structure
148 * @mem: ptr to mem struct to free
149 **/
150int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
151{
152 /* it's ok to kfree a NULL pointer */
153 kfree(mem->va);
154 mem->va = NULL;
155 mem->size = 0;
156
157 return 0;
158}
159
160/**
161 * i40e_get_lump - find a lump of free generic resource
162 * @pf: board private structure
163 * @pile: the pile of resource to search
164 * @needed: the number of items needed
165 * @id: an owner id to stick on the items assigned
166 *
167 * Returns the base item index of the lump, or negative for error
168 *
169 * The search_hint trick and lack of advanced fit-finding only work
170 * because we're highly likely to have all the same size lump requests.
171 * Linear search time and any fragmentation should be minimal.
172 **/
173static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
174 u16 needed, u16 id)
175{
176 int ret = -ENOMEM;
177 int i = 0;
178 int j = 0;
179
180 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
181 dev_info(&pf->pdev->dev,
182 "param err: pile=%p needed=%d id=0x%04x\n",
183 pile, needed, id);
184 return -EINVAL;
185 }
186
187 /* start the linear search with an imperfect hint */
188 i = pile->search_hint;
189 while (i < pile->num_entries && ret < 0) {
190 /* skip already allocated entries */
191 if (pile->list[i] & I40E_PILE_VALID_BIT) {
192 i++;
193 continue;
194 }
195
196 /* do we have enough in this lump? */
197 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
198 if (pile->list[i+j] & I40E_PILE_VALID_BIT)
199 break;
200 }
201
202 if (j == needed) {
203 /* there was enough, so assign it to the requestor */
204 for (j = 0; j < needed; j++)
205 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
206 ret = i;
207 pile->search_hint = i + j;
208 } else {
209 /* not enough, so skip over it and continue looking */
210 i += j;
211 }
212 }
213
214 return ret;
215}
216
217/**
218 * i40e_put_lump - return a lump of generic resource
219 * @pile: the pile of resource to search
220 * @index: the base item index
221 * @id: the owner id of the items assigned
222 *
223 * Returns the count of items in the lump
224 **/
225static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
226{
227 int valid_id = (id | I40E_PILE_VALID_BIT);
228 int count = 0;
229 int i;
230
231 if (!pile || index >= pile->num_entries)
232 return -EINVAL;
233
234 for (i = index;
235 i < pile->num_entries && pile->list[i] == valid_id;
236 i++) {
237 pile->list[i] = 0;
238 count++;
239 }
240
241 if (count && index < pile->search_hint)
242 pile->search_hint = index;
243
244 return count;
245}
246
247/**
248 * i40e_service_event_schedule - Schedule the service task to wake up
249 * @pf: board private structure
250 *
251 * If not already scheduled, this puts the task into the work queue
252 **/
253static void i40e_service_event_schedule(struct i40e_pf *pf)
254{
255 if (!test_bit(__I40E_DOWN, &pf->state) &&
256 !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) &&
257 !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
258 schedule_work(&pf->service_task);
259}
260
261/**
262 * i40e_tx_timeout - Respond to a Tx Hang
263 * @netdev: network interface device structure
264 *
265 * If any port has noticed a Tx timeout, it is likely that the whole
266 * device is munged, not just the one netdev port, so go for the full
267 * reset.
268 **/
269static void i40e_tx_timeout(struct net_device *netdev)
270{
271 struct i40e_netdev_priv *np = netdev_priv(netdev);
272 struct i40e_vsi *vsi = np->vsi;
273 struct i40e_pf *pf = vsi->back;
274
275 pf->tx_timeout_count++;
276
277 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
278 pf->tx_timeout_recovery_level = 0;
279 pf->tx_timeout_last_recovery = jiffies;
280 netdev_info(netdev, "tx_timeout recovery level %d\n",
281 pf->tx_timeout_recovery_level);
282
283 switch (pf->tx_timeout_recovery_level) {
284 case 0:
285 /* disable and re-enable queues for the VSI */
286 if (in_interrupt()) {
287 set_bit(__I40E_REINIT_REQUESTED, &pf->state);
288 set_bit(__I40E_REINIT_REQUESTED, &vsi->state);
289 } else {
290 i40e_vsi_reinit_locked(vsi);
291 }
292 break;
293 case 1:
294 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
295 break;
296 case 2:
297 set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
298 break;
299 case 3:
300 set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
301 break;
302 default:
303 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
304 i40e_down(vsi);
305 break;
306 }
307 i40e_service_event_schedule(pf);
308 pf->tx_timeout_recovery_level++;
309}
310
311/**
312 * i40e_release_rx_desc - Store the new tail and head values
313 * @rx_ring: ring to bump
314 * @val: new head index
315 **/
316static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
317{
318 rx_ring->next_to_use = val;
319
320 /* Force memory writes to complete before letting h/w
321 * know there are new descriptors to fetch. (Only
322 * applicable for weak-ordered memory model archs,
323 * such as IA-64).
324 */
325 wmb();
326 writel(val, rx_ring->tail);
327}
328
329/**
330 * i40e_get_vsi_stats_struct - Get System Network Statistics
331 * @vsi: the VSI we care about
332 *
333 * Returns the address of the device statistics structure.
334 * The statistics are actually updated from the service task.
335 **/
336struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
337{
338 return &vsi->net_stats;
339}
340
341/**
342 * i40e_get_netdev_stats_struct - Get statistics for netdev interface
343 * @netdev: network interface device structure
344 *
345 * Returns the address of the device statistics structure.
346 * The statistics are actually updated from the service task.
347 **/
348static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
349 struct net_device *netdev,
350 struct rtnl_link_stats64 *storage)
351{
352 struct i40e_netdev_priv *np = netdev_priv(netdev);
353 struct i40e_vsi *vsi = np->vsi;
354
355 *storage = *i40e_get_vsi_stats_struct(vsi);
356
357 return storage;
358}
359
360/**
361 * i40e_vsi_reset_stats - Resets all stats of the given vsi
362 * @vsi: the VSI to have its stats reset
363 **/
364void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
365{
366 struct rtnl_link_stats64 *ns;
367 int i;
368
369 if (!vsi)
370 return;
371
372 ns = i40e_get_vsi_stats_struct(vsi);
373 memset(ns, 0, sizeof(*ns));
374 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
375 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
376 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
377 if (vsi->rx_rings)
378 for (i = 0; i < vsi->num_queue_pairs; i++) {
379 memset(&vsi->rx_rings[i].rx_stats, 0 ,
380 sizeof(vsi->rx_rings[i].rx_stats));
381 memset(&vsi->tx_rings[i].tx_stats, 0,
382 sizeof(vsi->tx_rings[i].tx_stats));
383 }
384 vsi->stat_offsets_loaded = false;
385}
386
387/**
388 * i40e_pf_reset_stats - Reset all of the stats for the given pf
389 * @pf: the PF to be reset
390 **/
391void i40e_pf_reset_stats(struct i40e_pf *pf)
392{
393 memset(&pf->stats, 0, sizeof(pf->stats));
394 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
395 pf->stat_offsets_loaded = false;
396}
397
398/**
399 * i40e_stat_update48 - read and update a 48 bit stat from the chip
400 * @hw: ptr to the hardware info
401 * @hireg: the high 32 bit reg to read
402 * @loreg: the low 32 bit reg to read
403 * @offset_loaded: has the initial offset been loaded yet
404 * @offset: ptr to current offset value
405 * @stat: ptr to the stat
406 *
407 * Since the device stats are not reset at PFReset, they likely will not
408 * be zeroed when the driver starts. We'll save the first values read
409 * and use them as offsets to be subtracted from the raw values in order
410 * to report stats that count from zero. In the process, we also manage
411 * the potential roll-over.
412 **/
413static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
414 bool offset_loaded, u64 *offset, u64 *stat)
415{
416 u64 new_data;
417
418 if (hw->device_id == I40E_QEMU_DEVICE_ID) {
419 new_data = rd32(hw, loreg);
420 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
421 } else {
422 new_data = rd64(hw, loreg);
423 }
424 if (!offset_loaded)
425 *offset = new_data;
426 if (likely(new_data >= *offset))
427 *stat = new_data - *offset;
428 else
429 *stat = (new_data + ((u64)1 << 48)) - *offset;
430 *stat &= 0xFFFFFFFFFFFFULL;
431}
432
433/**
434 * i40e_stat_update32 - read and update a 32 bit stat from the chip
435 * @hw: ptr to the hardware info
436 * @reg: the hw reg to read
437 * @offset_loaded: has the initial offset been loaded yet
438 * @offset: ptr to current offset value
439 * @stat: ptr to the stat
440 **/
441static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
442 bool offset_loaded, u64 *offset, u64 *stat)
443{
444 u32 new_data;
445
446 new_data = rd32(hw, reg);
447 if (!offset_loaded)
448 *offset = new_data;
449 if (likely(new_data >= *offset))
450 *stat = (u32)(new_data - *offset);
451 else
452 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
453}
454
455/**
456 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
457 * @vsi: the VSI to be updated
458 **/
459void i40e_update_eth_stats(struct i40e_vsi *vsi)
460{
461 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
462 struct i40e_pf *pf = vsi->back;
463 struct i40e_hw *hw = &pf->hw;
464 struct i40e_eth_stats *oes;
465 struct i40e_eth_stats *es; /* device's eth stats */
466
467 es = &vsi->eth_stats;
468 oes = &vsi->eth_stats_offsets;
469
470 /* Gather up the stats that the hw collects */
471 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
472 vsi->stat_offsets_loaded,
473 &oes->tx_errors, &es->tx_errors);
474 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
475 vsi->stat_offsets_loaded,
476 &oes->rx_discards, &es->rx_discards);
477
478 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
479 I40E_GLV_GORCL(stat_idx),
480 vsi->stat_offsets_loaded,
481 &oes->rx_bytes, &es->rx_bytes);
482 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
483 I40E_GLV_UPRCL(stat_idx),
484 vsi->stat_offsets_loaded,
485 &oes->rx_unicast, &es->rx_unicast);
486 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
487 I40E_GLV_MPRCL(stat_idx),
488 vsi->stat_offsets_loaded,
489 &oes->rx_multicast, &es->rx_multicast);
490 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
491 I40E_GLV_BPRCL(stat_idx),
492 vsi->stat_offsets_loaded,
493 &oes->rx_broadcast, &es->rx_broadcast);
494
495 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
496 I40E_GLV_GOTCL(stat_idx),
497 vsi->stat_offsets_loaded,
498 &oes->tx_bytes, &es->tx_bytes);
499 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
500 I40E_GLV_UPTCL(stat_idx),
501 vsi->stat_offsets_loaded,
502 &oes->tx_unicast, &es->tx_unicast);
503 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
504 I40E_GLV_MPTCL(stat_idx),
505 vsi->stat_offsets_loaded,
506 &oes->tx_multicast, &es->tx_multicast);
507 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
508 I40E_GLV_BPTCL(stat_idx),
509 vsi->stat_offsets_loaded,
510 &oes->tx_broadcast, &es->tx_broadcast);
511 vsi->stat_offsets_loaded = true;
512}
513
514/**
515 * i40e_update_veb_stats - Update Switch component statistics
516 * @veb: the VEB being updated
517 **/
518static void i40e_update_veb_stats(struct i40e_veb *veb)
519{
520 struct i40e_pf *pf = veb->pf;
521 struct i40e_hw *hw = &pf->hw;
522 struct i40e_eth_stats *oes;
523 struct i40e_eth_stats *es; /* device's eth stats */
524 int idx = 0;
525
526 idx = veb->stats_idx;
527 es = &veb->stats;
528 oes = &veb->stats_offsets;
529
530 /* Gather up the stats that the hw collects */
531 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
532 veb->stat_offsets_loaded,
533 &oes->tx_discards, &es->tx_discards);
534 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
535 veb->stat_offsets_loaded,
536 &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
537
538 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
539 veb->stat_offsets_loaded,
540 &oes->rx_bytes, &es->rx_bytes);
541 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
542 veb->stat_offsets_loaded,
543 &oes->rx_unicast, &es->rx_unicast);
544 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
545 veb->stat_offsets_loaded,
546 &oes->rx_multicast, &es->rx_multicast);
547 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
548 veb->stat_offsets_loaded,
549 &oes->rx_broadcast, &es->rx_broadcast);
550
551 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
552 veb->stat_offsets_loaded,
553 &oes->tx_bytes, &es->tx_bytes);
554 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
555 veb->stat_offsets_loaded,
556 &oes->tx_unicast, &es->tx_unicast);
557 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
558 veb->stat_offsets_loaded,
559 &oes->tx_multicast, &es->tx_multicast);
560 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
561 veb->stat_offsets_loaded,
562 &oes->tx_broadcast, &es->tx_broadcast);
563 veb->stat_offsets_loaded = true;
564}
565
566/**
567 * i40e_update_link_xoff_rx - Update XOFF received in link flow control mode
568 * @pf: the corresponding PF
569 *
570 * Update the Rx XOFF counter (PAUSE frames) in link flow control mode
571 **/
572static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
573{
574 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
575 struct i40e_hw_port_stats *nsd = &pf->stats;
576 struct i40e_hw *hw = &pf->hw;
577 u64 xoff = 0;
578 u16 i, v;
579
580 if ((hw->fc.current_mode != I40E_FC_FULL) &&
581 (hw->fc.current_mode != I40E_FC_RX_PAUSE))
582 return;
583
584 xoff = nsd->link_xoff_rx;
585 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
586 pf->stat_offsets_loaded,
587 &osd->link_xoff_rx, &nsd->link_xoff_rx);
588
589 /* No new LFC xoff rx */
590 if (!(nsd->link_xoff_rx - xoff))
591 return;
592
593 /* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */
594 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
595 struct i40e_vsi *vsi = pf->vsi[v];
596
597 if (!vsi)
598 continue;
599
600 for (i = 0; i < vsi->num_queue_pairs; i++) {
601 struct i40e_ring *ring = &vsi->tx_rings[i];
602 clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
603 }
604 }
605}
606
607/**
608 * i40e_update_prio_xoff_rx - Update XOFF received in PFC mode
609 * @pf: the corresponding PF
610 *
611 * Update the Rx XOFF counter (PAUSE frames) in PFC mode
612 **/
613static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
614{
615 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
616 struct i40e_hw_port_stats *nsd = &pf->stats;
617 bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false};
618 struct i40e_dcbx_config *dcb_cfg;
619 struct i40e_hw *hw = &pf->hw;
620 u16 i, v;
621 u8 tc;
622
623 dcb_cfg = &hw->local_dcbx_config;
624
625 /* See if DCB enabled with PFC TC */
626 if (!(pf->flags & I40E_FLAG_DCB_ENABLED) ||
627 !(dcb_cfg->pfc.pfcenable)) {
628 i40e_update_link_xoff_rx(pf);
629 return;
630 }
631
632 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
633 u64 prio_xoff = nsd->priority_xoff_rx[i];
634 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
635 pf->stat_offsets_loaded,
636 &osd->priority_xoff_rx[i],
637 &nsd->priority_xoff_rx[i]);
638
639 /* No new PFC xoff rx */
640 if (!(nsd->priority_xoff_rx[i] - prio_xoff))
641 continue;
642 /* Get the TC for given priority */
643 tc = dcb_cfg->etscfg.prioritytable[i];
644 xoff[tc] = true;
645 }
646
647 /* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */
648 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
649 struct i40e_vsi *vsi = pf->vsi[v];
650
651 if (!vsi)
652 continue;
653
654 for (i = 0; i < vsi->num_queue_pairs; i++) {
655 struct i40e_ring *ring = &vsi->tx_rings[i];
656
657 tc = ring->dcb_tc;
658 if (xoff[tc])
659 clear_bit(__I40E_HANG_CHECK_ARMED,
660 &ring->state);
661 }
662 }
663}
664
665/**
666 * i40e_update_stats - Update the board statistics counters.
667 * @vsi: the VSI to be updated
668 *
669 * There are a few instances where we store the same stat in a
670 * couple of different structs. This is partly because we have
671 * the netdev stats that need to be filled out, which is slightly
672 * different from the "eth_stats" defined by the chip and used in
673 * VF communications. We sort it all out here in a central place.
674 **/
675void i40e_update_stats(struct i40e_vsi *vsi)
676{
677 struct i40e_pf *pf = vsi->back;
678 struct i40e_hw *hw = &pf->hw;
679 struct rtnl_link_stats64 *ons;
680 struct rtnl_link_stats64 *ns; /* netdev stats */
681 struct i40e_eth_stats *oes;
682 struct i40e_eth_stats *es; /* device's eth stats */
683 u32 tx_restart, tx_busy;
684 u32 rx_page, rx_buf;
685 u64 rx_p, rx_b;
686 u64 tx_p, tx_b;
687 int i;
688 u16 q;
689
690 if (test_bit(__I40E_DOWN, &vsi->state) ||
691 test_bit(__I40E_CONFIG_BUSY, &pf->state))
692 return;
693
694 ns = i40e_get_vsi_stats_struct(vsi);
695 ons = &vsi->net_stats_offsets;
696 es = &vsi->eth_stats;
697 oes = &vsi->eth_stats_offsets;
698
699 /* Gather up the netdev and vsi stats that the driver collects
700 * on the fly during packet processing
701 */
702 rx_b = rx_p = 0;
703 tx_b = tx_p = 0;
704 tx_restart = tx_busy = 0;
705 rx_page = 0;
706 rx_buf = 0;
707 for (q = 0; q < vsi->num_queue_pairs; q++) {
708 struct i40e_ring *p;
709
710 p = &vsi->rx_rings[q];
711 rx_b += p->rx_stats.bytes;
712 rx_p += p->rx_stats.packets;
713 rx_buf += p->rx_stats.alloc_rx_buff_failed;
714 rx_page += p->rx_stats.alloc_rx_page_failed;
715
716 p = &vsi->tx_rings[q];
717 tx_b += p->tx_stats.bytes;
718 tx_p += p->tx_stats.packets;
719 tx_restart += p->tx_stats.restart_queue;
720 tx_busy += p->tx_stats.tx_busy;
721 }
722 vsi->tx_restart = tx_restart;
723 vsi->tx_busy = tx_busy;
724 vsi->rx_page_failed = rx_page;
725 vsi->rx_buf_failed = rx_buf;
726
727 ns->rx_packets = rx_p;
728 ns->rx_bytes = rx_b;
729 ns->tx_packets = tx_p;
730 ns->tx_bytes = tx_b;
731
732 i40e_update_eth_stats(vsi);
733 /* update netdev stats from eth stats */
734 ons->rx_errors = oes->rx_errors;
735 ns->rx_errors = es->rx_errors;
736 ons->tx_errors = oes->tx_errors;
737 ns->tx_errors = es->tx_errors;
738 ons->multicast = oes->rx_multicast;
739 ns->multicast = es->rx_multicast;
740 ons->tx_dropped = oes->tx_discards;
741 ns->tx_dropped = es->tx_discards;
742
743 /* Get the port data only if this is the main PF VSI */
744 if (vsi == pf->vsi[pf->lan_vsi]) {
745 struct i40e_hw_port_stats *nsd = &pf->stats;
746 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
747
748 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
749 I40E_GLPRT_GORCL(hw->port),
750 pf->stat_offsets_loaded,
751 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
752 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
753 I40E_GLPRT_GOTCL(hw->port),
754 pf->stat_offsets_loaded,
755 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
756 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
757 pf->stat_offsets_loaded,
758 &osd->eth.rx_discards,
759 &nsd->eth.rx_discards);
760 i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port),
761 pf->stat_offsets_loaded,
762 &osd->eth.tx_discards,
763 &nsd->eth.tx_discards);
764 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
765 I40E_GLPRT_MPRCL(hw->port),
766 pf->stat_offsets_loaded,
767 &osd->eth.rx_multicast,
768 &nsd->eth.rx_multicast);
769
770 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
771 pf->stat_offsets_loaded,
772 &osd->tx_dropped_link_down,
773 &nsd->tx_dropped_link_down);
774
775 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
776 pf->stat_offsets_loaded,
777 &osd->crc_errors, &nsd->crc_errors);
778 ns->rx_crc_errors = nsd->crc_errors;
779
780 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
781 pf->stat_offsets_loaded,
782 &osd->illegal_bytes, &nsd->illegal_bytes);
783 ns->rx_errors = nsd->crc_errors
784 + nsd->illegal_bytes;
785
786 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
787 pf->stat_offsets_loaded,
788 &osd->mac_local_faults,
789 &nsd->mac_local_faults);
790 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
791 pf->stat_offsets_loaded,
792 &osd->mac_remote_faults,
793 &nsd->mac_remote_faults);
794
795 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
796 pf->stat_offsets_loaded,
797 &osd->rx_length_errors,
798 &nsd->rx_length_errors);
799 ns->rx_length_errors = nsd->rx_length_errors;
800
801 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
802 pf->stat_offsets_loaded,
803 &osd->link_xon_rx, &nsd->link_xon_rx);
804 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
805 pf->stat_offsets_loaded,
806 &osd->link_xon_tx, &nsd->link_xon_tx);
807 i40e_update_prio_xoff_rx(pf); /* handles I40E_GLPRT_LXOFFRXC */
808 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
809 pf->stat_offsets_loaded,
810 &osd->link_xoff_tx, &nsd->link_xoff_tx);
811
812 for (i = 0; i < 8; i++) {
813 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
814 pf->stat_offsets_loaded,
815 &osd->priority_xon_rx[i],
816 &nsd->priority_xon_rx[i]);
817 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
818 pf->stat_offsets_loaded,
819 &osd->priority_xon_tx[i],
820 &nsd->priority_xon_tx[i]);
821 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
822 pf->stat_offsets_loaded,
823 &osd->priority_xoff_tx[i],
824 &nsd->priority_xoff_tx[i]);
825 i40e_stat_update32(hw,
826 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
827 pf->stat_offsets_loaded,
828 &osd->priority_xon_2_xoff[i],
829 &nsd->priority_xon_2_xoff[i]);
830 }
831
832 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
833 I40E_GLPRT_PRC64L(hw->port),
834 pf->stat_offsets_loaded,
835 &osd->rx_size_64, &nsd->rx_size_64);
836 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
837 I40E_GLPRT_PRC127L(hw->port),
838 pf->stat_offsets_loaded,
839 &osd->rx_size_127, &nsd->rx_size_127);
840 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
841 I40E_GLPRT_PRC255L(hw->port),
842 pf->stat_offsets_loaded,
843 &osd->rx_size_255, &nsd->rx_size_255);
844 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
845 I40E_GLPRT_PRC511L(hw->port),
846 pf->stat_offsets_loaded,
847 &osd->rx_size_511, &nsd->rx_size_511);
848 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
849 I40E_GLPRT_PRC1023L(hw->port),
850 pf->stat_offsets_loaded,
851 &osd->rx_size_1023, &nsd->rx_size_1023);
852 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
853 I40E_GLPRT_PRC1522L(hw->port),
854 pf->stat_offsets_loaded,
855 &osd->rx_size_1522, &nsd->rx_size_1522);
856 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
857 I40E_GLPRT_PRC9522L(hw->port),
858 pf->stat_offsets_loaded,
859 &osd->rx_size_big, &nsd->rx_size_big);
860
861 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
862 I40E_GLPRT_PTC64L(hw->port),
863 pf->stat_offsets_loaded,
864 &osd->tx_size_64, &nsd->tx_size_64);
865 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
866 I40E_GLPRT_PTC127L(hw->port),
867 pf->stat_offsets_loaded,
868 &osd->tx_size_127, &nsd->tx_size_127);
869 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
870 I40E_GLPRT_PTC255L(hw->port),
871 pf->stat_offsets_loaded,
872 &osd->tx_size_255, &nsd->tx_size_255);
873 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
874 I40E_GLPRT_PTC511L(hw->port),
875 pf->stat_offsets_loaded,
876 &osd->tx_size_511, &nsd->tx_size_511);
877 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
878 I40E_GLPRT_PTC1023L(hw->port),
879 pf->stat_offsets_loaded,
880 &osd->tx_size_1023, &nsd->tx_size_1023);
881 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
882 I40E_GLPRT_PTC1522L(hw->port),
883 pf->stat_offsets_loaded,
884 &osd->tx_size_1522, &nsd->tx_size_1522);
885 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
886 I40E_GLPRT_PTC9522L(hw->port),
887 pf->stat_offsets_loaded,
888 &osd->tx_size_big, &nsd->tx_size_big);
889
890 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
891 pf->stat_offsets_loaded,
892 &osd->rx_undersize, &nsd->rx_undersize);
893 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
894 pf->stat_offsets_loaded,
895 &osd->rx_fragments, &nsd->rx_fragments);
896 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
897 pf->stat_offsets_loaded,
898 &osd->rx_oversize, &nsd->rx_oversize);
899 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
900 pf->stat_offsets_loaded,
901 &osd->rx_jabber, &nsd->rx_jabber);
902 }
903
904 pf->stat_offsets_loaded = true;
905}
906
907/**
908 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
909 * @vsi: the VSI to be searched
910 * @macaddr: the MAC address
911 * @vlan: the vlan
912 * @is_vf: make sure its a vf filter, else doesn't matter
913 * @is_netdev: make sure its a netdev filter, else doesn't matter
914 *
915 * Returns ptr to the filter object or NULL
916 **/
917static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
918 u8 *macaddr, s16 vlan,
919 bool is_vf, bool is_netdev)
920{
921 struct i40e_mac_filter *f;
922
923 if (!vsi || !macaddr)
924 return NULL;
925
926 list_for_each_entry(f, &vsi->mac_filter_list, list) {
927 if ((ether_addr_equal(macaddr, f->macaddr)) &&
928 (vlan == f->vlan) &&
929 (!is_vf || f->is_vf) &&
930 (!is_netdev || f->is_netdev))
931 return f;
932 }
933 return NULL;
934}
935
936/**
937 * i40e_find_mac - Find a mac addr in the macvlan filters list
938 * @vsi: the VSI to be searched
939 * @macaddr: the MAC address we are searching for
940 * @is_vf: make sure its a vf filter, else doesn't matter
941 * @is_netdev: make sure its a netdev filter, else doesn't matter
942 *
943 * Returns the first filter with the provided MAC address or NULL if
944 * MAC address was not found
945 **/
946struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
947 bool is_vf, bool is_netdev)
948{
949 struct i40e_mac_filter *f;
950
951 if (!vsi || !macaddr)
952 return NULL;
953
954 list_for_each_entry(f, &vsi->mac_filter_list, list) {
955 if ((ether_addr_equal(macaddr, f->macaddr)) &&
956 (!is_vf || f->is_vf) &&
957 (!is_netdev || f->is_netdev))
958 return f;
959 }
960 return NULL;
961}
962
963/**
964 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
965 * @vsi: the VSI to be searched
966 *
967 * Returns true if VSI is in vlan mode or false otherwise
968 **/
969bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
970{
971 struct i40e_mac_filter *f;
972
973 /* Only -1 for all the filters denotes not in vlan mode
974 * so we have to go through all the list in order to make sure
975 */
976 list_for_each_entry(f, &vsi->mac_filter_list, list) {
977 if (f->vlan >= 0)
978 return true;
979 }
980
981 return false;
982}
983
984/**
985 * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
986 * @vsi: the VSI to be searched
987 * @macaddr: the mac address to be filtered
988 * @is_vf: true if it is a vf
989 * @is_netdev: true if it is a netdev
990 *
991 * Goes through all the macvlan filters and adds a
992 * macvlan filter for each unique vlan that already exists
993 *
994 * Returns first filter found on success, else NULL
995 **/
996struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
997 bool is_vf, bool is_netdev)
998{
999 struct i40e_mac_filter *f;
1000
1001 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1002 if (!i40e_find_filter(vsi, macaddr, f->vlan,
1003 is_vf, is_netdev)) {
1004 if (!i40e_add_filter(vsi, macaddr, f->vlan,
1005 is_vf, is_netdev))
1006 return NULL;
1007 }
1008 }
1009
1010 return list_first_entry_or_null(&vsi->mac_filter_list,
1011 struct i40e_mac_filter, list);
1012}
1013
1014/**
1015 * i40e_add_filter - Add a mac/vlan filter to the VSI
1016 * @vsi: the VSI to be searched
1017 * @macaddr: the MAC address
1018 * @vlan: the vlan
1019 * @is_vf: make sure its a vf filter, else doesn't matter
1020 * @is_netdev: make sure its a netdev filter, else doesn't matter
1021 *
1022 * Returns ptr to the filter object or NULL when no memory available.
1023 **/
1024struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1025 u8 *macaddr, s16 vlan,
1026 bool is_vf, bool is_netdev)
1027{
1028 struct i40e_mac_filter *f;
1029
1030 if (!vsi || !macaddr)
1031 return NULL;
1032
1033 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1034 if (!f) {
1035 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1036 if (!f)
1037 goto add_filter_out;
1038
1039 memcpy(f->macaddr, macaddr, ETH_ALEN);
1040 f->vlan = vlan;
1041 f->changed = true;
1042
1043 INIT_LIST_HEAD(&f->list);
1044 list_add(&f->list, &vsi->mac_filter_list);
1045 }
1046
1047 /* increment counter and add a new flag if needed */
1048 if (is_vf) {
1049 if (!f->is_vf) {
1050 f->is_vf = true;
1051 f->counter++;
1052 }
1053 } else if (is_netdev) {
1054 if (!f->is_netdev) {
1055 f->is_netdev = true;
1056 f->counter++;
1057 }
1058 } else {
1059 f->counter++;
1060 }
1061
1062 /* changed tells sync_filters_subtask to
1063 * push the filter down to the firmware
1064 */
1065 if (f->changed) {
1066 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1067 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1068 }
1069
1070add_filter_out:
1071 return f;
1072}
1073
1074/**
1075 * i40e_del_filter - Remove a mac/vlan filter from the VSI
1076 * @vsi: the VSI to be searched
1077 * @macaddr: the MAC address
1078 * @vlan: the vlan
1079 * @is_vf: make sure it's a vf filter, else doesn't matter
1080 * @is_netdev: make sure it's a netdev filter, else doesn't matter
1081 **/
1082void i40e_del_filter(struct i40e_vsi *vsi,
1083 u8 *macaddr, s16 vlan,
1084 bool is_vf, bool is_netdev)
1085{
1086 struct i40e_mac_filter *f;
1087
1088 if (!vsi || !macaddr)
1089 return;
1090
1091 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1092 if (!f || f->counter == 0)
1093 return;
1094
1095 if (is_vf) {
1096 if (f->is_vf) {
1097 f->is_vf = false;
1098 f->counter--;
1099 }
1100 } else if (is_netdev) {
1101 if (f->is_netdev) {
1102 f->is_netdev = false;
1103 f->counter--;
1104 }
1105 } else {
1106 /* make sure we don't remove a filter in use by vf or netdev */
1107 int min_f = 0;
1108 min_f += (f->is_vf ? 1 : 0);
1109 min_f += (f->is_netdev ? 1 : 0);
1110
1111 if (f->counter > min_f)
1112 f->counter--;
1113 }
1114
1115 /* counter == 0 tells sync_filters_subtask to
1116 * remove the filter from the firmware's list
1117 */
1118 if (f->counter == 0) {
1119 f->changed = true;
1120 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1121 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1122 }
1123}
1124
1125/**
1126 * i40e_set_mac - NDO callback to set mac address
1127 * @netdev: network interface device structure
1128 * @p: pointer to an address structure
1129 *
1130 * Returns 0 on success, negative on failure
1131 **/
1132static int i40e_set_mac(struct net_device *netdev, void *p)
1133{
1134 struct i40e_netdev_priv *np = netdev_priv(netdev);
1135 struct i40e_vsi *vsi = np->vsi;
1136 struct sockaddr *addr = p;
1137 struct i40e_mac_filter *f;
1138
1139 if (!is_valid_ether_addr(addr->sa_data))
1140 return -EADDRNOTAVAIL;
1141
1142 netdev_info(netdev, "set mac address=%pM\n", addr->sa_data);
1143
1144 if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
1145 return 0;
1146
1147 if (vsi->type == I40E_VSI_MAIN) {
1148 i40e_status ret;
1149 ret = i40e_aq_mac_address_write(&vsi->back->hw,
1150 I40E_AQC_WRITE_TYPE_LAA_ONLY,
1151 addr->sa_data, NULL);
1152 if (ret) {
1153 netdev_info(netdev,
1154 "Addr change for Main VSI failed: %d\n",
1155 ret);
1156 return -EADDRNOTAVAIL;
1157 }
1158
1159 memcpy(vsi->back->hw.mac.addr, addr->sa_data, netdev->addr_len);
1160 }
1161
1162 /* In order to be sure to not drop any packets, add the new address
1163 * then delete the old one.
1164 */
1165 f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY, false, false);
1166 if (!f)
1167 return -ENOMEM;
1168
1169 i40e_sync_vsi_filters(vsi);
1170 i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, false, false);
1171 i40e_sync_vsi_filters(vsi);
1172
1173 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1174
1175 return 0;
1176}
1177
1178/**
1179 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1180 * @vsi: the VSI being setup
1181 * @ctxt: VSI context structure
1182 * @enabled_tc: Enabled TCs bitmap
1183 * @is_add: True if called before Add VSI
1184 *
1185 * Setup VSI queue mapping for enabled traffic classes.
1186 **/
1187static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1188 struct i40e_vsi_context *ctxt,
1189 u8 enabled_tc,
1190 bool is_add)
1191{
1192 struct i40e_pf *pf = vsi->back;
1193 u16 sections = 0;
1194 u8 netdev_tc = 0;
1195 u16 numtc = 0;
1196 u16 qcount;
1197 u8 offset;
1198 u16 qmap;
1199 int i;
1200
1201 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1202 offset = 0;
1203
1204 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1205 /* Find numtc from enabled TC bitmap */
1206 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1207 if (enabled_tc & (1 << i)) /* TC is enabled */
1208 numtc++;
1209 }
1210 if (!numtc) {
1211 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1212 numtc = 1;
1213 }
1214 } else {
1215 /* At least TC0 is enabled in case of non-DCB case */
1216 numtc = 1;
1217 }
1218
1219 vsi->tc_config.numtc = numtc;
1220 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1221
1222 /* Setup queue offset/count for all TCs for given VSI */
1223 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1224 /* See if the given TC is enabled for the given VSI */
1225 if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */
1226 int pow, num_qps;
1227
1228 vsi->tc_config.tc_info[i].qoffset = offset;
1229 switch (vsi->type) {
1230 case I40E_VSI_MAIN:
1231 if (i == 0)
1232 qcount = pf->rss_size;
1233 else
1234 qcount = pf->num_tc_qps;
1235 vsi->tc_config.tc_info[i].qcount = qcount;
1236 break;
1237 case I40E_VSI_FDIR:
1238 case I40E_VSI_SRIOV:
1239 case I40E_VSI_VMDQ2:
1240 default:
1241 qcount = vsi->alloc_queue_pairs;
1242 vsi->tc_config.tc_info[i].qcount = qcount;
1243 WARN_ON(i != 0);
1244 break;
1245 }
1246
1247 /* find the power-of-2 of the number of queue pairs */
1248 num_qps = vsi->tc_config.tc_info[i].qcount;
1249 pow = 0;
1250 while (num_qps &&
1251 ((1 << pow) < vsi->tc_config.tc_info[i].qcount)) {
1252 pow++;
1253 num_qps >>= 1;
1254 }
1255
1256 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1257 qmap =
1258 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1259 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1260
1261 offset += vsi->tc_config.tc_info[i].qcount;
1262 } else {
1263 /* TC is not enabled so set the offset to
1264 * default queue and allocate one queue
1265 * for the given TC.
1266 */
1267 vsi->tc_config.tc_info[i].qoffset = 0;
1268 vsi->tc_config.tc_info[i].qcount = 1;
1269 vsi->tc_config.tc_info[i].netdev_tc = 0;
1270
1271 qmap = 0;
1272 }
1273 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1274 }
1275
1276 /* Set actual Tx/Rx queue pairs */
1277 vsi->num_queue_pairs = offset;
1278
1279 /* Scheduler section valid can only be set for ADD VSI */
1280 if (is_add) {
1281 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1282
1283 ctxt->info.up_enable_bits = enabled_tc;
1284 }
1285 if (vsi->type == I40E_VSI_SRIOV) {
1286 ctxt->info.mapping_flags |=
1287 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1288 for (i = 0; i < vsi->num_queue_pairs; i++)
1289 ctxt->info.queue_mapping[i] =
1290 cpu_to_le16(vsi->base_queue + i);
1291 } else {
1292 ctxt->info.mapping_flags |=
1293 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1294 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1295 }
1296 ctxt->info.valid_sections |= cpu_to_le16(sections);
1297}
1298
1299/**
1300 * i40e_set_rx_mode - NDO callback to set the netdev filters
1301 * @netdev: network interface device structure
1302 **/
1303static void i40e_set_rx_mode(struct net_device *netdev)
1304{
1305 struct i40e_netdev_priv *np = netdev_priv(netdev);
1306 struct i40e_mac_filter *f, *ftmp;
1307 struct i40e_vsi *vsi = np->vsi;
1308 struct netdev_hw_addr *uca;
1309 struct netdev_hw_addr *mca;
1310 struct netdev_hw_addr *ha;
1311
1312 /* add addr if not already in the filter list */
1313 netdev_for_each_uc_addr(uca, netdev) {
1314 if (!i40e_find_mac(vsi, uca->addr, false, true)) {
1315 if (i40e_is_vsi_in_vlan(vsi))
1316 i40e_put_mac_in_vlan(vsi, uca->addr,
1317 false, true);
1318 else
1319 i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY,
1320 false, true);
1321 }
1322 }
1323
1324 netdev_for_each_mc_addr(mca, netdev) {
1325 if (!i40e_find_mac(vsi, mca->addr, false, true)) {
1326 if (i40e_is_vsi_in_vlan(vsi))
1327 i40e_put_mac_in_vlan(vsi, mca->addr,
1328 false, true);
1329 else
1330 i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY,
1331 false, true);
1332 }
1333 }
1334
1335 /* remove filter if not in netdev list */
1336 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1337 bool found = false;
1338
1339 if (!f->is_netdev)
1340 continue;
1341
1342 if (is_multicast_ether_addr(f->macaddr)) {
1343 netdev_for_each_mc_addr(mca, netdev) {
1344 if (ether_addr_equal(mca->addr, f->macaddr)) {
1345 found = true;
1346 break;
1347 }
1348 }
1349 } else {
1350 netdev_for_each_uc_addr(uca, netdev) {
1351 if (ether_addr_equal(uca->addr, f->macaddr)) {
1352 found = true;
1353 break;
1354 }
1355 }
1356
1357 for_each_dev_addr(netdev, ha) {
1358 if (ether_addr_equal(ha->addr, f->macaddr)) {
1359 found = true;
1360 break;
1361 }
1362 }
1363 }
1364 if (!found)
1365 i40e_del_filter(
1366 vsi, f->macaddr, I40E_VLAN_ANY, false, true);
1367 }
1368
1369 /* check for other flag changes */
1370 if (vsi->current_netdev_flags != vsi->netdev->flags) {
1371 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1372 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1373 }
1374}
1375
1376/**
1377 * i40e_sync_vsi_filters - Update the VSI filter list to the HW
1378 * @vsi: ptr to the VSI
1379 *
1380 * Push any outstanding VSI filter changes through the AdminQ.
1381 *
1382 * Returns 0 or error value
1383 **/
1384int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1385{
1386 struct i40e_mac_filter *f, *ftmp;
1387 bool promisc_forced_on = false;
1388 bool add_happened = false;
1389 int filter_list_len = 0;
1390 u32 changed_flags = 0;
1391 i40e_status ret = 0;
1392 struct i40e_pf *pf;
1393 int num_add = 0;
1394 int num_del = 0;
1395 u16 cmd_flags;
1396
1397 /* empty array typed pointers, kcalloc later */
1398 struct i40e_aqc_add_macvlan_element_data *add_list;
1399 struct i40e_aqc_remove_macvlan_element_data *del_list;
1400
1401 while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state))
1402 usleep_range(1000, 2000);
1403 pf = vsi->back;
1404
1405 if (vsi->netdev) {
1406 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
1407 vsi->current_netdev_flags = vsi->netdev->flags;
1408 }
1409
1410 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
1411 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
1412
1413 filter_list_len = pf->hw.aq.asq_buf_size /
1414 sizeof(struct i40e_aqc_remove_macvlan_element_data);
1415 del_list = kcalloc(filter_list_len,
1416 sizeof(struct i40e_aqc_remove_macvlan_element_data),
1417 GFP_KERNEL);
1418 if (!del_list)
1419 return -ENOMEM;
1420
1421 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1422 if (!f->changed)
1423 continue;
1424
1425 if (f->counter != 0)
1426 continue;
1427 f->changed = false;
1428 cmd_flags = 0;
1429
1430 /* add to delete list */
1431 memcpy(del_list[num_del].mac_addr,
1432 f->macaddr, ETH_ALEN);
1433 del_list[num_del].vlan_tag =
1434 cpu_to_le16((u16)(f->vlan ==
1435 I40E_VLAN_ANY ? 0 : f->vlan));
1436
1437 /* vlan0 as wild card to allow packets from all vlans */
1438 if (f->vlan == I40E_VLAN_ANY ||
1439 (vsi->netdev && !(vsi->netdev->features &
1440 NETIF_F_HW_VLAN_CTAG_FILTER)))
1441 cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1442 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1443 del_list[num_del].flags = cmd_flags;
1444 num_del++;
1445
1446 /* unlink from filter list */
1447 list_del(&f->list);
1448 kfree(f);
1449
1450 /* flush a full buffer */
1451 if (num_del == filter_list_len) {
1452 ret = i40e_aq_remove_macvlan(&pf->hw,
1453 vsi->seid, del_list, num_del,
1454 NULL);
1455 num_del = 0;
1456 memset(del_list, 0, sizeof(*del_list));
1457
1458 if (ret)
1459 dev_info(&pf->pdev->dev,
1460 "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",
1461 ret,
1462 pf->hw.aq.asq_last_status);
1463 }
1464 }
1465 if (num_del) {
1466 ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
1467 del_list, num_del, NULL);
1468 num_del = 0;
1469
1470 if (ret)
1471 dev_info(&pf->pdev->dev,
1472 "ignoring delete macvlan error, err %d, aq_err %d\n",
1473 ret, pf->hw.aq.asq_last_status);
1474 }
1475
1476 kfree(del_list);
1477 del_list = NULL;
1478
1479 /* do all the adds now */
1480 filter_list_len = pf->hw.aq.asq_buf_size /
1481 sizeof(struct i40e_aqc_add_macvlan_element_data),
1482 add_list = kcalloc(filter_list_len,
1483 sizeof(struct i40e_aqc_add_macvlan_element_data),
1484 GFP_KERNEL);
1485 if (!add_list)
1486 return -ENOMEM;
1487
1488 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1489 if (!f->changed)
1490 continue;
1491
1492 if (f->counter == 0)
1493 continue;
1494 f->changed = false;
1495 add_happened = true;
1496 cmd_flags = 0;
1497
1498 /* add to add array */
1499 memcpy(add_list[num_add].mac_addr,
1500 f->macaddr, ETH_ALEN);
1501 add_list[num_add].vlan_tag =
1502 cpu_to_le16(
1503 (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan));
1504 add_list[num_add].queue_number = 0;
1505
1506 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
1507
1508 /* vlan0 as wild card to allow packets from all vlans */
1509 if (f->vlan == I40E_VLAN_ANY || (vsi->netdev &&
1510 !(vsi->netdev->features &
1511 NETIF_F_HW_VLAN_CTAG_FILTER)))
1512 cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
1513 add_list[num_add].flags = cpu_to_le16(cmd_flags);
1514 num_add++;
1515
1516 /* flush a full buffer */
1517 if (num_add == filter_list_len) {
1518 ret = i40e_aq_add_macvlan(&pf->hw,
1519 vsi->seid,
1520 add_list,
1521 num_add,
1522 NULL);
1523 num_add = 0;
1524
1525 if (ret)
1526 break;
1527 memset(add_list, 0, sizeof(*add_list));
1528 }
1529 }
1530 if (num_add) {
1531 ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1532 add_list, num_add, NULL);
1533 num_add = 0;
1534 }
1535 kfree(add_list);
1536 add_list = NULL;
1537
1538 if (add_happened && (!ret)) {
1539 /* do nothing */;
1540 } else if (add_happened && (ret)) {
1541 dev_info(&pf->pdev->dev,
1542 "add filter failed, err %d, aq_err %d\n",
1543 ret, pf->hw.aq.asq_last_status);
1544 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
1545 !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1546 &vsi->state)) {
1547 promisc_forced_on = true;
1548 set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1549 &vsi->state);
1550 dev_info(&pf->pdev->dev, "promiscuous mode forced on\n");
1551 }
1552 }
1553 }
1554
1555 /* check for changes in promiscuous modes */
1556 if (changed_flags & IFF_ALLMULTI) {
1557 bool cur_multipromisc;
1558 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
1559 ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
1560 vsi->seid,
1561 cur_multipromisc,
1562 NULL);
1563 if (ret)
1564 dev_info(&pf->pdev->dev,
1565 "set multi promisc failed, err %d, aq_err %d\n",
1566 ret, pf->hw.aq.asq_last_status);
1567 }
1568 if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
1569 bool cur_promisc;
1570 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
1571 test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1572 &vsi->state));
1573 ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw,
1574 vsi->seid,
1575 cur_promisc,
1576 NULL);
1577 if (ret)
1578 dev_info(&pf->pdev->dev,
1579 "set uni promisc failed, err %d, aq_err %d\n",
1580 ret, pf->hw.aq.asq_last_status);
1581 }
1582
1583 clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
1584 return 0;
1585}
1586
1587/**
1588 * i40e_sync_filters_subtask - Sync the VSI filter list with HW
1589 * @pf: board private structure
1590 **/
1591static void i40e_sync_filters_subtask(struct i40e_pf *pf)
1592{
1593 int v;
1594
1595 if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC))
1596 return;
1597 pf->flags &= ~I40E_FLAG_FILTER_SYNC;
1598
1599 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
1600 if (pf->vsi[v] &&
1601 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED))
1602 i40e_sync_vsi_filters(pf->vsi[v]);
1603 }
1604}
1605
1606/**
1607 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
1608 * @netdev: network interface device structure
1609 * @new_mtu: new value for maximum frame size
1610 *
1611 * Returns 0 on success, negative on failure
1612 **/
1613static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
1614{
1615 struct i40e_netdev_priv *np = netdev_priv(netdev);
1616 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
1617 struct i40e_vsi *vsi = np->vsi;
1618
1619 /* MTU < 68 is an error and causes problems on some kernels */
1620 if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
1621 return -EINVAL;
1622
1623 netdev_info(netdev, "changing MTU from %d to %d\n",
1624 netdev->mtu, new_mtu);
1625 netdev->mtu = new_mtu;
1626 if (netif_running(netdev))
1627 i40e_vsi_reinit_locked(vsi);
1628
1629 return 0;
1630}
1631
1632/**
1633 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
1634 * @vsi: the vsi being adjusted
1635 **/
1636void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
1637{
1638 struct i40e_vsi_context ctxt;
1639 i40e_status ret;
1640
1641 if ((vsi->info.valid_sections &
1642 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
1643 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
1644 return; /* already enabled */
1645
1646 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
1647 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1648 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1649
1650 ctxt.seid = vsi->seid;
1651 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1652 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
1653 if (ret) {
1654 dev_info(&vsi->back->pdev->dev,
1655 "%s: update vsi failed, aq_err=%d\n",
1656 __func__, vsi->back->hw.aq.asq_last_status);
1657 }
1658}
1659
1660/**
1661 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
1662 * @vsi: the vsi being adjusted
1663 **/
1664void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
1665{
1666 struct i40e_vsi_context ctxt;
1667 i40e_status ret;
1668
1669 if ((vsi->info.valid_sections &
1670 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
1671 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
1672 I40E_AQ_VSI_PVLAN_EMOD_MASK))
1673 return; /* already disabled */
1674
1675 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
1676 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1677 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
1678
1679 ctxt.seid = vsi->seid;
1680 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1681 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
1682 if (ret) {
1683 dev_info(&vsi->back->pdev->dev,
1684 "%s: update vsi failed, aq_err=%d\n",
1685 __func__, vsi->back->hw.aq.asq_last_status);
1686 }
1687}
1688
1689/**
1690 * i40e_vlan_rx_register - Setup or shutdown vlan offload
1691 * @netdev: network interface to be adjusted
1692 * @features: netdev features to test if VLAN offload is enabled or not
1693 **/
1694static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
1695{
1696 struct i40e_netdev_priv *np = netdev_priv(netdev);
1697 struct i40e_vsi *vsi = np->vsi;
1698
1699 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1700 i40e_vlan_stripping_enable(vsi);
1701 else
1702 i40e_vlan_stripping_disable(vsi);
1703}
1704
1705/**
1706 * i40e_vsi_add_vlan - Add vsi membership for given vlan
1707 * @vsi: the vsi being configured
1708 * @vid: vlan id to be added (0 = untagged only , -1 = any)
1709 **/
1710int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
1711{
1712 struct i40e_mac_filter *f, *add_f;
1713 bool is_netdev, is_vf;
1714 int ret;
1715
1716 is_vf = (vsi->type == I40E_VSI_SRIOV);
1717 is_netdev = !!(vsi->netdev);
1718
1719 if (is_netdev) {
1720 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid,
1721 is_vf, is_netdev);
1722 if (!add_f) {
1723 dev_info(&vsi->back->pdev->dev,
1724 "Could not add vlan filter %d for %pM\n",
1725 vid, vsi->netdev->dev_addr);
1726 return -ENOMEM;
1727 }
1728 }
1729
1730 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1731 add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
1732 if (!add_f) {
1733 dev_info(&vsi->back->pdev->dev,
1734 "Could not add vlan filter %d for %pM\n",
1735 vid, f->macaddr);
1736 return -ENOMEM;
1737 }
1738 }
1739
1740 ret = i40e_sync_vsi_filters(vsi);
1741 if (ret) {
1742 dev_info(&vsi->back->pdev->dev,
1743 "Could not sync filters for vid %d\n", vid);
1744 return ret;
1745 }
1746
1747 /* Now if we add a vlan tag, make sure to check if it is the first
1748 * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"
1749 * with 0, so we now accept untagged and specified tagged traffic
1750 * (and not any taged and untagged)
1751 */
1752 if (vid > 0) {
1753 if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr,
1754 I40E_VLAN_ANY,
1755 is_vf, is_netdev)) {
1756 i40e_del_filter(vsi, vsi->netdev->dev_addr,
1757 I40E_VLAN_ANY, is_vf, is_netdev);
1758 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0,
1759 is_vf, is_netdev);
1760 if (!add_f) {
1761 dev_info(&vsi->back->pdev->dev,
1762 "Could not add filter 0 for %pM\n",
1763 vsi->netdev->dev_addr);
1764 return -ENOMEM;
1765 }
1766 }
1767
1768 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1769 if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
1770 is_vf, is_netdev)) {
1771 i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY,
1772 is_vf, is_netdev);
1773 add_f = i40e_add_filter(vsi, f->macaddr,
1774 0, is_vf, is_netdev);
1775 if (!add_f) {
1776 dev_info(&vsi->back->pdev->dev,
1777 "Could not add filter 0 for %pM\n",
1778 f->macaddr);
1779 return -ENOMEM;
1780 }
1781 }
1782 }
1783 ret = i40e_sync_vsi_filters(vsi);
1784 }
1785
1786 return ret;
1787}
1788
1789/**
1790 * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
1791 * @vsi: the vsi being configured
1792 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
1793 **/
1794int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
1795{
1796 struct net_device *netdev = vsi->netdev;
1797 struct i40e_mac_filter *f, *add_f;
1798 bool is_vf, is_netdev;
1799 int filter_count = 0;
1800 int ret;
1801
1802 is_vf = (vsi->type == I40E_VSI_SRIOV);
1803 is_netdev = !!(netdev);
1804
1805 if (is_netdev)
1806 i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev);
1807
1808 list_for_each_entry(f, &vsi->mac_filter_list, list)
1809 i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
1810
1811 ret = i40e_sync_vsi_filters(vsi);
1812 if (ret) {
1813 dev_info(&vsi->back->pdev->dev, "Could not sync filters\n");
1814 return ret;
1815 }
1816
1817 /* go through all the filters for this VSI and if there is only
1818 * vid == 0 it means there are no other filters, so vid 0 must
1819 * be replaced with -1. This signifies that we should from now
1820 * on accept any traffic (with any tag present, or untagged)
1821 */
1822 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1823 if (is_netdev) {
1824 if (f->vlan &&
1825 ether_addr_equal(netdev->dev_addr, f->macaddr))
1826 filter_count++;
1827 }
1828
1829 if (f->vlan)
1830 filter_count++;
1831 }
1832
1833 if (!filter_count && is_netdev) {
1834 i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev);
1835 f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
1836 is_vf, is_netdev);
1837 if (!f) {
1838 dev_info(&vsi->back->pdev->dev,
1839 "Could not add filter %d for %pM\n",
1840 I40E_VLAN_ANY, netdev->dev_addr);
1841 return -ENOMEM;
1842 }
1843 }
1844
1845 if (!filter_count) {
1846 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1847 i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev);
1848 add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY,
1849 is_vf, is_netdev);
1850 if (!add_f) {
1851 dev_info(&vsi->back->pdev->dev,
1852 "Could not add filter %d for %pM\n",
1853 I40E_VLAN_ANY, f->macaddr);
1854 return -ENOMEM;
1855 }
1856 }
1857 }
1858
1859 return i40e_sync_vsi_filters(vsi);
1860}
1861
1862/**
1863 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
1864 * @netdev: network interface to be adjusted
1865 * @vid: vlan id to be added
1866 **/
1867static int i40e_vlan_rx_add_vid(struct net_device *netdev,
1868 __always_unused __be16 proto, u16 vid)
1869{
1870 struct i40e_netdev_priv *np = netdev_priv(netdev);
1871 struct i40e_vsi *vsi = np->vsi;
1872 int ret;
1873
1874 if (vid > 4095)
1875 return 0;
1876
1877 netdev_info(vsi->netdev, "adding %pM vid=%d\n",
1878 netdev->dev_addr, vid);
1879 /* If the network stack called us with vid = 0, we should
1880 * indicate to i40e_vsi_add_vlan() that we want to receive
1881 * any traffic (i.e. with any vlan tag, or untagged)
1882 */
1883 ret = i40e_vsi_add_vlan(vsi, vid ? vid : I40E_VLAN_ANY);
1884
1885 if (!ret) {
1886 if (vid < VLAN_N_VID)
1887 set_bit(vid, vsi->active_vlans);
1888 }
1889
1890 return 0;
1891}
1892
1893/**
1894 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
1895 * @netdev: network interface to be adjusted
1896 * @vid: vlan id to be removed
1897 **/
1898static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
1899 __always_unused __be16 proto, u16 vid)
1900{
1901 struct i40e_netdev_priv *np = netdev_priv(netdev);
1902 struct i40e_vsi *vsi = np->vsi;
1903
1904 netdev_info(vsi->netdev, "removing %pM vid=%d\n",
1905 netdev->dev_addr, vid);
1906 /* return code is ignored as there is nothing a user
1907 * can do about failure to remove and a log message was
1908 * already printed from another function
1909 */
1910 i40e_vsi_kill_vlan(vsi, vid);
1911
1912 clear_bit(vid, vsi->active_vlans);
1913 return 0;
1914}
1915
1916/**
1917 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
1918 * @vsi: the vsi being brought back up
1919 **/
1920static void i40e_restore_vlan(struct i40e_vsi *vsi)
1921{
1922 u16 vid;
1923
1924 if (!vsi->netdev)
1925 return;
1926
1927 i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
1928
1929 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
1930 i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
1931 vid);
1932}
1933
1934/**
1935 * i40e_vsi_add_pvid - Add pvid for the VSI
1936 * @vsi: the vsi being adjusted
1937 * @vid: the vlan id to set as a PVID
1938 **/
1939i40e_status i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
1940{
1941 struct i40e_vsi_context ctxt;
1942 i40e_status ret;
1943
1944 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
1945 vsi->info.pvid = cpu_to_le16(vid);
1946 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID;
1947 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
1948
1949 ctxt.seid = vsi->seid;
1950 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1951 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
1952 if (ret) {
1953 dev_info(&vsi->back->pdev->dev,
1954 "%s: update vsi failed, aq_err=%d\n",
1955 __func__, vsi->back->hw.aq.asq_last_status);
1956 }
1957
1958 return ret;
1959}
1960
1961/**
1962 * i40e_vsi_remove_pvid - Remove the pvid from the VSI
1963 * @vsi: the vsi being adjusted
1964 *
1965 * Just use the vlan_rx_register() service to put it back to normal
1966 **/
1967void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
1968{
1969 vsi->info.pvid = 0;
1970 i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
1971}
1972
1973/**
1974 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
1975 * @vsi: ptr to the VSI
1976 *
1977 * If this function returns with an error, then it's possible one or
1978 * more of the rings is populated (while the rest are not). It is the
1979 * callers duty to clean those orphaned rings.
1980 *
1981 * Return 0 on success, negative on failure
1982 **/
1983static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
1984{
1985 int i, err = 0;
1986
1987 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
1988 err = i40e_setup_tx_descriptors(&vsi->tx_rings[i]);
1989
1990 return err;
1991}
1992
1993/**
1994 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
1995 * @vsi: ptr to the VSI
1996 *
1997 * Free VSI's transmit software resources
1998 **/
1999static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
2000{
2001 int i;
2002
2003 for (i = 0; i < vsi->num_queue_pairs; i++)
2004 if (vsi->tx_rings[i].desc)
2005 i40e_free_tx_resources(&vsi->tx_rings[i]);
2006}
2007
2008/**
2009 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
2010 * @vsi: ptr to the VSI
2011 *
2012 * If this function returns with an error, then it's possible one or
2013 * more of the rings is populated (while the rest are not). It is the
2014 * callers duty to clean those orphaned rings.
2015 *
2016 * Return 0 on success, negative on failure
2017 **/
2018static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
2019{
2020 int i, err = 0;
2021
2022 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2023 err = i40e_setup_rx_descriptors(&vsi->rx_rings[i]);
2024 return err;
2025}
2026
2027/**
2028 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
2029 * @vsi: ptr to the VSI
2030 *
2031 * Free all receive software resources
2032 **/
2033static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
2034{
2035 int i;
2036
2037 for (i = 0; i < vsi->num_queue_pairs; i++)
2038 if (vsi->rx_rings[i].desc)
2039 i40e_free_rx_resources(&vsi->rx_rings[i]);
2040}
2041
2042/**
2043 * i40e_configure_tx_ring - Configure a transmit ring context and rest
2044 * @ring: The Tx ring to configure
2045 *
2046 * Configure the Tx descriptor ring in the HMC context.
2047 **/
2048static int i40e_configure_tx_ring(struct i40e_ring *ring)
2049{
2050 struct i40e_vsi *vsi = ring->vsi;
2051 u16 pf_q = vsi->base_queue + ring->queue_index;
2052 struct i40e_hw *hw = &vsi->back->hw;
2053 struct i40e_hmc_obj_txq tx_ctx;
2054 i40e_status err = 0;
2055 u32 qtx_ctl = 0;
2056
2057 /* some ATR related tx ring init */
2058 if (vsi->back->flags & I40E_FLAG_FDIR_ATR_ENABLED) {
2059 ring->atr_sample_rate = vsi->back->atr_sample_rate;
2060 ring->atr_count = 0;
2061 } else {
2062 ring->atr_sample_rate = 0;
2063 }
2064
2065 /* initialize XPS */
2066 if (ring->q_vector && ring->netdev &&
2067 !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
2068 netif_set_xps_queue(ring->netdev,
2069 &ring->q_vector->affinity_mask,
2070 ring->queue_index);
2071
2072 /* clear the context structure first */
2073 memset(&tx_ctx, 0, sizeof(tx_ctx));
2074
2075 tx_ctx.new_context = 1;
2076 tx_ctx.base = (ring->dma / 128);
2077 tx_ctx.qlen = ring->count;
2078 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FDIR_ENABLED |
2079 I40E_FLAG_FDIR_ATR_ENABLED));
2080
2081 /* As part of VSI creation/update, FW allocates certain
2082 * Tx arbitration queue sets for each TC enabled for
2083 * the VSI. The FW returns the handles to these queue
2084 * sets as part of the response buffer to Add VSI,
2085 * Update VSI, etc. AQ commands. It is expected that
2086 * these queue set handles be associated with the Tx
2087 * queues by the driver as part of the TX queue context
2088 * initialization. This has to be done regardless of
2089 * DCB as by default everything is mapped to TC0.
2090 */
2091 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
2092 tx_ctx.rdylist_act = 0;
2093
2094 /* clear the context in the HMC */
2095 err = i40e_clear_lan_tx_queue_context(hw, pf_q);
2096 if (err) {
2097 dev_info(&vsi->back->pdev->dev,
2098 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
2099 ring->queue_index, pf_q, err);
2100 return -ENOMEM;
2101 }
2102
2103 /* set the context in the HMC */
2104 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
2105 if (err) {
2106 dev_info(&vsi->back->pdev->dev,
2107 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
2108 ring->queue_index, pf_q, err);
2109 return -ENOMEM;
2110 }
2111
2112 /* Now associate this queue with this PCI function */
2113 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
2114 qtx_ctl |= ((hw->hmc.hmc_fn_id << I40E_QTX_CTL_PF_INDX_SHIFT)
2115 & I40E_QTX_CTL_PF_INDX_MASK);
2116 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
2117 i40e_flush(hw);
2118
2119 clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
2120
2121 /* cache tail off for easier writes later */
2122 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
2123
2124 return 0;
2125}
2126
2127/**
2128 * i40e_configure_rx_ring - Configure a receive ring context
2129 * @ring: The Rx ring to configure
2130 *
2131 * Configure the Rx descriptor ring in the HMC context.
2132 **/
2133static int i40e_configure_rx_ring(struct i40e_ring *ring)
2134{
2135 struct i40e_vsi *vsi = ring->vsi;
2136 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
2137 u16 pf_q = vsi->base_queue + ring->queue_index;
2138 struct i40e_hw *hw = &vsi->back->hw;
2139 struct i40e_hmc_obj_rxq rx_ctx;
2140 i40e_status err = 0;
2141
2142 ring->state = 0;
2143
2144 /* clear the context structure first */
2145 memset(&rx_ctx, 0, sizeof(rx_ctx));
2146
2147 ring->rx_buf_len = vsi->rx_buf_len;
2148 ring->rx_hdr_len = vsi->rx_hdr_len;
2149
2150 rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
2151 rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT;
2152
2153 rx_ctx.base = (ring->dma / 128);
2154 rx_ctx.qlen = ring->count;
2155
2156 if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) {
2157 set_ring_16byte_desc_enabled(ring);
2158 rx_ctx.dsize = 0;
2159 } else {
2160 rx_ctx.dsize = 1;
2161 }
2162
2163 rx_ctx.dtype = vsi->dtype;
2164 if (vsi->dtype) {
2165 set_ring_ps_enabled(ring);
2166 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
2167 I40E_RX_SPLIT_IP |
2168 I40E_RX_SPLIT_TCP_UDP |
2169 I40E_RX_SPLIT_SCTP;
2170 } else {
2171 rx_ctx.hsplit_0 = 0;
2172 }
2173
2174 rx_ctx.rxmax = min_t(u16, vsi->max_frame,
2175 (chain_len * ring->rx_buf_len));
2176 rx_ctx.tphrdesc_ena = 1;
2177 rx_ctx.tphwdesc_ena = 1;
2178 rx_ctx.tphdata_ena = 1;
2179 rx_ctx.tphhead_ena = 1;
2180 rx_ctx.lrxqthresh = 2;
2181 rx_ctx.crcstrip = 1;
2182 rx_ctx.l2tsel = 1;
2183 rx_ctx.showiv = 1;
2184
2185 /* clear the context in the HMC */
2186 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
2187 if (err) {
2188 dev_info(&vsi->back->pdev->dev,
2189 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2190 ring->queue_index, pf_q, err);
2191 return -ENOMEM;
2192 }
2193
2194 /* set the context in the HMC */
2195 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
2196 if (err) {
2197 dev_info(&vsi->back->pdev->dev,
2198 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2199 ring->queue_index, pf_q, err);
2200 return -ENOMEM;
2201 }
2202
2203 /* cache tail for quicker writes, and clear the reg before use */
2204 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
2205 writel(0, ring->tail);
2206
2207 i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
2208
2209 return 0;
2210}
2211
2212/**
2213 * i40e_vsi_configure_tx - Configure the VSI for Tx
2214 * @vsi: VSI structure describing this set of rings and resources
2215 *
2216 * Configure the Tx VSI for operation.
2217 **/
2218static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
2219{
2220 int err = 0;
2221 u16 i;
2222
2223 for (i = 0; (i < vsi->num_queue_pairs) && (!err); i++)
2224 err = i40e_configure_tx_ring(&vsi->tx_rings[i]);
2225
2226 return err;
2227}
2228
2229/**
2230 * i40e_vsi_configure_rx - Configure the VSI for Rx
2231 * @vsi: the VSI being configured
2232 *
2233 * Configure the Rx VSI for operation.
2234 **/
2235static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
2236{
2237 int err = 0;
2238 u16 i;
2239
2240 if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN))
2241 vsi->max_frame = vsi->netdev->mtu + ETH_HLEN
2242 + ETH_FCS_LEN + VLAN_HLEN;
2243 else
2244 vsi->max_frame = I40E_RXBUFFER_2048;
2245
2246 /* figure out correct receive buffer length */
2247 switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED |
2248 I40E_FLAG_RX_PS_ENABLED)) {
2249 case I40E_FLAG_RX_1BUF_ENABLED:
2250 vsi->rx_hdr_len = 0;
2251 vsi->rx_buf_len = vsi->max_frame;
2252 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
2253 break;
2254 case I40E_FLAG_RX_PS_ENABLED:
2255 vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2256 vsi->rx_buf_len = I40E_RXBUFFER_2048;
2257 vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT;
2258 break;
2259 default:
2260 vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2261 vsi->rx_buf_len = I40E_RXBUFFER_2048;
2262 vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS;
2263 break;
2264 }
2265
2266 /* round up for the chip's needs */
2267 vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
2268 (1 << I40E_RXQ_CTX_HBUFF_SHIFT));
2269 vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
2270 (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
2271
2272 /* set up individual rings */
2273 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2274 err = i40e_configure_rx_ring(&vsi->rx_rings[i]);
2275
2276 return err;
2277}
2278
2279/**
2280 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
2281 * @vsi: ptr to the VSI
2282 **/
2283static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
2284{
2285 u16 qoffset, qcount;
2286 int i, n;
2287
2288 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2289 return;
2290
2291 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
2292 if (!(vsi->tc_config.enabled_tc & (1 << n)))
2293 continue;
2294
2295 qoffset = vsi->tc_config.tc_info[n].qoffset;
2296 qcount = vsi->tc_config.tc_info[n].qcount;
2297 for (i = qoffset; i < (qoffset + qcount); i++) {
2298 struct i40e_ring *rx_ring = &vsi->rx_rings[i];
2299 struct i40e_ring *tx_ring = &vsi->tx_rings[i];
2300 rx_ring->dcb_tc = n;
2301 tx_ring->dcb_tc = n;
2302 }
2303 }
2304}
2305
2306/**
2307 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
2308 * @vsi: ptr to the VSI
2309 **/
2310static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
2311{
2312 if (vsi->netdev)
2313 i40e_set_rx_mode(vsi->netdev);
2314}
2315
2316/**
2317 * i40e_vsi_configure - Set up the VSI for action
2318 * @vsi: the VSI being configured
2319 **/
2320static int i40e_vsi_configure(struct i40e_vsi *vsi)
2321{
2322 int err;
2323
2324 i40e_set_vsi_rx_mode(vsi);
2325 i40e_restore_vlan(vsi);
2326 i40e_vsi_config_dcb_rings(vsi);
2327 err = i40e_vsi_configure_tx(vsi);
2328 if (!err)
2329 err = i40e_vsi_configure_rx(vsi);
2330
2331 return err;
2332}
2333
2334/**
2335 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
2336 * @vsi: the VSI being configured
2337 **/
2338static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
2339{
2340 struct i40e_pf *pf = vsi->back;
2341 struct i40e_q_vector *q_vector;
2342 struct i40e_hw *hw = &pf->hw;
2343 u16 vector;
2344 int i, q;
2345 u32 val;
2346 u32 qp;
2347
2348 /* The interrupt indexing is offset by 1 in the PFINT_ITRn
2349 * and PFINT_LNKLSTn registers, e.g.:
2350 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts)
2351 */
2352 qp = vsi->base_queue;
2353 vector = vsi->base_vector;
2354 q_vector = vsi->q_vectors;
2355 for (i = 0; i < vsi->num_q_vectors; i++, q_vector++, vector++) {
2356 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
2357 q_vector->rx.latency_range = I40E_LOW_LATENCY;
2358 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
2359 q_vector->rx.itr);
2360 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
2361 q_vector->tx.latency_range = I40E_LOW_LATENCY;
2362 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
2363 q_vector->tx.itr);
2364
2365 /* Linked list for the queuepairs assigned to this vector */
2366 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
2367 for (q = 0; q < q_vector->num_ringpairs; q++) {
2368 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2369 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2370 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2371 (qp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
2372 (I40E_QUEUE_TYPE_TX
2373 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2374
2375 wr32(hw, I40E_QINT_RQCTL(qp), val);
2376
2377 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2378 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2379 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2380 ((qp+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)|
2381 (I40E_QUEUE_TYPE_RX
2382 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2383
2384 /* Terminate the linked list */
2385 if (q == (q_vector->num_ringpairs - 1))
2386 val |= (I40E_QUEUE_END_OF_LIST
2387 << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2388
2389 wr32(hw, I40E_QINT_TQCTL(qp), val);
2390 qp++;
2391 }
2392 }
2393
2394 i40e_flush(hw);
2395}
2396
2397/**
2398 * i40e_enable_misc_int_causes - enable the non-queue interrupts
2399 * @hw: ptr to the hardware info
2400 **/
2401static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
2402{
2403 u32 val;
2404
2405 /* clear things first */
2406 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
2407 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
2408
2409 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
2410 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
2411 I40E_PFINT_ICR0_ENA_GRST_MASK |
2412 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
2413 I40E_PFINT_ICR0_ENA_GPIO_MASK |
2414 I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK |
2415 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
2416 I40E_PFINT_ICR0_ENA_VFLR_MASK |
2417 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
2418
2419 wr32(hw, I40E_PFINT_ICR0_ENA, val);
2420
2421 /* SW_ITR_IDX = 0, but don't change INTENA */
2422 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
2423 I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
2424
2425 /* OTHER_ITR_IDX = 0 */
2426 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2427}
2428
2429/**
2430 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
2431 * @vsi: the VSI being configured
2432 **/
2433static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
2434{
2435 struct i40e_q_vector *q_vector = vsi->q_vectors;
2436 struct i40e_pf *pf = vsi->back;
2437 struct i40e_hw *hw = &pf->hw;
2438 u32 val;
2439
2440 /* set the ITR configuration */
2441 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
2442 q_vector->rx.latency_range = I40E_LOW_LATENCY;
2443 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
2444 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
2445 q_vector->tx.latency_range = I40E_LOW_LATENCY;
2446 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
2447
2448 i40e_enable_misc_int_causes(hw);
2449
2450 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2451 wr32(hw, I40E_PFINT_LNKLST0, 0);
2452
2453 /* Associate the queue pair to the vector and enable the q int */
2454 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2455 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2456 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2457
2458 wr32(hw, I40E_QINT_RQCTL(0), val);
2459
2460 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2461 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2462 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2463
2464 wr32(hw, I40E_QINT_TQCTL(0), val);
2465 i40e_flush(hw);
2466}
2467
2468/**
2469 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
2470 * @pf: board private structure
2471 **/
2472static void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
2473{
2474 struct i40e_hw *hw = &pf->hw;
2475 u32 val;
2476
2477 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
2478 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2479 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
2480
2481 wr32(hw, I40E_PFINT_DYN_CTL0, val);
2482 i40e_flush(hw);
2483}
2484
2485/**
2486 * i40e_irq_dynamic_enable - Enable default interrupt generation settings
2487 * @vsi: pointer to a vsi
2488 * @vector: enable a particular Hw Interrupt vector
2489 **/
2490void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
2491{
2492 struct i40e_pf *pf = vsi->back;
2493 struct i40e_hw *hw = &pf->hw;
2494 u32 val;
2495
2496 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2497 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2498 (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2499 wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
2500 i40e_flush(hw);
2501}
2502
2503/**
2504 * i40e_msix_clean_rings - MSIX mode Interrupt Handler
2505 * @irq: interrupt number
2506 * @data: pointer to a q_vector
2507 **/
2508static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
2509{
2510 struct i40e_q_vector *q_vector = data;
2511
2512 if (!q_vector->tx.ring[0] && !q_vector->rx.ring[0])
2513 return IRQ_HANDLED;
2514
2515 napi_schedule(&q_vector->napi);
2516
2517 return IRQ_HANDLED;
2518}
2519
2520/**
2521 * i40e_fdir_clean_rings - Interrupt Handler for FDIR rings
2522 * @irq: interrupt number
2523 * @data: pointer to a q_vector
2524 **/
2525static irqreturn_t i40e_fdir_clean_rings(int irq, void *data)
2526{
2527 struct i40e_q_vector *q_vector = data;
2528
2529 if (!q_vector->tx.ring[0] && !q_vector->rx.ring[0])
2530 return IRQ_HANDLED;
2531
2532 pr_info("fdir ring cleaning needed\n");
2533
2534 return IRQ_HANDLED;
2535}
2536
2537/**
2538 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
2539 * @vsi: the VSI being configured
2540 * @basename: name for the vector
2541 *
2542 * Allocates MSI-X vectors and requests interrupts from the kernel.
2543 **/
2544static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
2545{
2546 int q_vectors = vsi->num_q_vectors;
2547 struct i40e_pf *pf = vsi->back;
2548 int base = vsi->base_vector;
2549 int rx_int_idx = 0;
2550 int tx_int_idx = 0;
2551 int vector, err;
2552
2553 for (vector = 0; vector < q_vectors; vector++) {
2554 struct i40e_q_vector *q_vector = &(vsi->q_vectors[vector]);
2555
2556 if (q_vector->tx.ring[0] && q_vector->rx.ring[0]) {
2557 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2558 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2559 tx_int_idx++;
2560 } else if (q_vector->rx.ring[0]) {
2561 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2562 "%s-%s-%d", basename, "rx", rx_int_idx++);
2563 } else if (q_vector->tx.ring[0]) {
2564 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2565 "%s-%s-%d", basename, "tx", tx_int_idx++);
2566 } else {
2567 /* skip this unused q_vector */
2568 continue;
2569 }
2570 err = request_irq(pf->msix_entries[base + vector].vector,
2571 vsi->irq_handler,
2572 0,
2573 q_vector->name,
2574 q_vector);
2575 if (err) {
2576 dev_info(&pf->pdev->dev,
2577 "%s: request_irq failed, error: %d\n",
2578 __func__, err);
2579 goto free_queue_irqs;
2580 }
2581 /* assign the mask for this irq */
2582 irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
2583 &q_vector->affinity_mask);
2584 }
2585
2586 return 0;
2587
2588free_queue_irqs:
2589 while (vector) {
2590 vector--;
2591 irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
2592 NULL);
2593 free_irq(pf->msix_entries[base + vector].vector,
2594 &(vsi->q_vectors[vector]));
2595 }
2596 return err;
2597}
2598
2599/**
2600 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
2601 * @vsi: the VSI being un-configured
2602 **/
2603static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
2604{
2605 struct i40e_pf *pf = vsi->back;
2606 struct i40e_hw *hw = &pf->hw;
2607 int base = vsi->base_vector;
2608 int i;
2609
2610 for (i = 0; i < vsi->num_queue_pairs; i++) {
2611 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i].reg_idx), 0);
2612 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i].reg_idx), 0);
2613 }
2614
2615 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
2616 for (i = vsi->base_vector;
2617 i < (vsi->num_q_vectors + vsi->base_vector); i++)
2618 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
2619
2620 i40e_flush(hw);
2621 for (i = 0; i < vsi->num_q_vectors; i++)
2622 synchronize_irq(pf->msix_entries[i + base].vector);
2623 } else {
2624 /* Legacy and MSI mode - this stops all interrupt handling */
2625 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
2626 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
2627 i40e_flush(hw);
2628 synchronize_irq(pf->pdev->irq);
2629 }
2630}
2631
2632/**
2633 * i40e_vsi_enable_irq - Enable IRQ for the given VSI
2634 * @vsi: the VSI being configured
2635 **/
2636static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
2637{
2638 struct i40e_pf *pf = vsi->back;
2639 int i;
2640
2641 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
2642 for (i = vsi->base_vector;
2643 i < (vsi->num_q_vectors + vsi->base_vector); i++)
2644 i40e_irq_dynamic_enable(vsi, i);
2645 } else {
2646 i40e_irq_dynamic_enable_icr0(pf);
2647 }
2648
2649 return 0;
2650}
2651
2652/**
2653 * i40e_stop_misc_vector - Stop the vector that handles non-queue events
2654 * @pf: board private structure
2655 **/
2656static void i40e_stop_misc_vector(struct i40e_pf *pf)
2657{
2658 /* Disable ICR 0 */
2659 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
2660 i40e_flush(&pf->hw);
2661}
2662
2663/**
2664 * i40e_intr - MSI/Legacy and non-queue interrupt handler
2665 * @irq: interrupt number
2666 * @data: pointer to a q_vector
2667 *
2668 * This is the handler used for all MSI/Legacy interrupts, and deals
2669 * with both queue and non-queue interrupts. This is also used in
2670 * MSIX mode to handle the non-queue interrupts.
2671 **/
2672static irqreturn_t i40e_intr(int irq, void *data)
2673{
2674 struct i40e_pf *pf = (struct i40e_pf *)data;
2675 struct i40e_hw *hw = &pf->hw;
2676 u32 icr0, icr0_remaining;
2677 u32 val, ena_mask;
2678
2679 icr0 = rd32(hw, I40E_PFINT_ICR0);
2680
2681 /* if sharing a legacy IRQ, we might get called w/o an intr pending */
2682 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
2683 return IRQ_NONE;
2684
2685 val = rd32(hw, I40E_PFINT_DYN_CTL0);
2686 val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
2687 wr32(hw, I40E_PFINT_DYN_CTL0, val);
2688
2689 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
2690
2691 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
2692 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
2693
2694 /* temporarily disable queue cause for NAPI processing */
2695 u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
2696 qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
2697 wr32(hw, I40E_QINT_RQCTL(0), qval);
2698
2699 qval = rd32(hw, I40E_QINT_TQCTL(0));
2700 qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
2701 wr32(hw, I40E_QINT_TQCTL(0), qval);
2702 i40e_flush(hw);
2703
2704 if (!test_bit(__I40E_DOWN, &pf->state))
2705 napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0].napi);
2706 }
2707
2708 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
2709 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
2710 set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
2711 }
2712
2713 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
2714 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
2715 set_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
2716 }
2717
2718 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
2719 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
2720 set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
2721 }
2722
2723 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
2724 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
2725 set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
2726 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
2727 val = rd32(hw, I40E_GLGEN_RSTAT);
2728 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
2729 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
2730 if (val & I40E_RESET_CORER)
2731 pf->corer_count++;
2732 else if (val & I40E_RESET_GLOBR)
2733 pf->globr_count++;
2734 else if (val & I40E_RESET_EMPR)
2735 pf->empr_count++;
2736 }
2737
2738 /* If a critical error is pending we have no choice but to reset the
2739 * device.
2740 * Report and mask out any remaining unexpected interrupts.
2741 */
2742 icr0_remaining = icr0 & ena_mask;
2743 if (icr0_remaining) {
2744 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
2745 icr0_remaining);
2746 if ((icr0_remaining & I40E_PFINT_ICR0_HMC_ERR_MASK) ||
2747 (icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
2748 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
2749 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK) ||
2750 (icr0_remaining & I40E_PFINT_ICR0_MAL_DETECT_MASK)) {
2751 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
2752 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
2753 } else {
2754 dev_info(&pf->pdev->dev, "device will be reset\n");
2755 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
2756 i40e_service_event_schedule(pf);
2757 }
2758 }
2759 ena_mask &= ~icr0_remaining;
2760 }
2761
2762 /* re-enable interrupt causes */
2763 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
2764 i40e_flush(hw);
2765 if (!test_bit(__I40E_DOWN, &pf->state)) {
2766 i40e_service_event_schedule(pf);
2767 i40e_irq_dynamic_enable_icr0(pf);
2768 }
2769
2770 return IRQ_HANDLED;
2771}
2772
2773/**
2774 * i40e_map_vector_to_rxq - Assigns the Rx queue to the vector
2775 * @vsi: the VSI being configured
2776 * @v_idx: vector index
2777 * @r_idx: rx queue index
2778 **/
2779static void map_vector_to_rxq(struct i40e_vsi *vsi, int v_idx, int r_idx)
2780{
2781 struct i40e_q_vector *q_vector = &(vsi->q_vectors[v_idx]);
2782 struct i40e_ring *rx_ring = &(vsi->rx_rings[r_idx]);
2783
2784 rx_ring->q_vector = q_vector;
2785 q_vector->rx.ring[q_vector->rx.count] = rx_ring;
2786 q_vector->rx.count++;
2787 q_vector->rx.latency_range = I40E_LOW_LATENCY;
2788 q_vector->vsi = vsi;
2789}
2790
2791/**
2792 * i40e_map_vector_to_txq - Assigns the Tx queue to the vector
2793 * @vsi: the VSI being configured
2794 * @v_idx: vector index
2795 * @t_idx: tx queue index
2796 **/
2797static void map_vector_to_txq(struct i40e_vsi *vsi, int v_idx, int t_idx)
2798{
2799 struct i40e_q_vector *q_vector = &(vsi->q_vectors[v_idx]);
2800 struct i40e_ring *tx_ring = &(vsi->tx_rings[t_idx]);
2801
2802 tx_ring->q_vector = q_vector;
2803 q_vector->tx.ring[q_vector->tx.count] = tx_ring;
2804 q_vector->tx.count++;
2805 q_vector->tx.latency_range = I40E_LOW_LATENCY;
2806 q_vector->num_ringpairs++;
2807 q_vector->vsi = vsi;
2808}
2809
2810/**
2811 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
2812 * @vsi: the VSI being configured
2813 *
2814 * This function maps descriptor rings to the queue-specific vectors
2815 * we were allotted through the MSI-X enabling code. Ideally, we'd have
2816 * one vector per queue pair, but on a constrained vector budget, we
2817 * group the queue pairs as "efficiently" as possible.
2818 **/
2819static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
2820{
2821 int qp_remaining = vsi->num_queue_pairs;
2822 int q_vectors = vsi->num_q_vectors;
2823 int qp_per_vector;
2824 int v_start = 0;
2825 int qp_idx = 0;
2826
2827 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
2828 * group them so there are multiple queues per vector.
2829 */
2830 for (; v_start < q_vectors && qp_remaining; v_start++) {
2831 qp_per_vector = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
2832 for (; qp_per_vector;
2833 qp_per_vector--, qp_idx++, qp_remaining--) {
2834 map_vector_to_rxq(vsi, v_start, qp_idx);
2835 map_vector_to_txq(vsi, v_start, qp_idx);
2836 }
2837 }
2838}
2839
2840/**
2841 * i40e_vsi_request_irq - Request IRQ from the OS
2842 * @vsi: the VSI being configured
2843 * @basename: name for the vector
2844 **/
2845static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
2846{
2847 struct i40e_pf *pf = vsi->back;
2848 int err;
2849
2850 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
2851 err = i40e_vsi_request_irq_msix(vsi, basename);
2852 else if (pf->flags & I40E_FLAG_MSI_ENABLED)
2853 err = request_irq(pf->pdev->irq, i40e_intr, 0,
2854 pf->misc_int_name, pf);
2855 else
2856 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
2857 pf->misc_int_name, pf);
2858
2859 if (err)
2860 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
2861
2862 return err;
2863}
2864
2865#ifdef CONFIG_NET_POLL_CONTROLLER
2866/**
2867 * i40e_netpoll - A Polling 'interrupt'handler
2868 * @netdev: network interface device structure
2869 *
2870 * This is used by netconsole to send skbs without having to re-enable
2871 * interrupts. It's not called while the normal interrupt routine is executing.
2872 **/
2873static void i40e_netpoll(struct net_device *netdev)
2874{
2875 struct i40e_netdev_priv *np = netdev_priv(netdev);
2876 struct i40e_vsi *vsi = np->vsi;
2877 struct i40e_pf *pf = vsi->back;
2878 int i;
2879
2880 /* if interface is down do nothing */
2881 if (test_bit(__I40E_DOWN, &vsi->state))
2882 return;
2883
2884 pf->flags |= I40E_FLAG_IN_NETPOLL;
2885 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
2886 for (i = 0; i < vsi->num_q_vectors; i++)
2887 i40e_msix_clean_rings(0, &vsi->q_vectors[i]);
2888 } else {
2889 i40e_intr(pf->pdev->irq, netdev);
2890 }
2891 pf->flags &= ~I40E_FLAG_IN_NETPOLL;
2892}
2893#endif
2894
2895/**
2896 * i40e_vsi_control_tx - Start or stop a VSI's rings
2897 * @vsi: the VSI being configured
2898 * @enable: start or stop the rings
2899 **/
2900static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
2901{
2902 struct i40e_pf *pf = vsi->back;
2903 struct i40e_hw *hw = &pf->hw;
2904 int i, j, pf_q;
2905 u32 tx_reg;
2906
2907 pf_q = vsi->base_queue;
2908 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
2909 j = 1000;
2910 do {
2911 usleep_range(1000, 2000);
2912 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
2913 } while (j-- && ((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT)
2914 ^ (tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)) & 1);
2915
2916 if (enable) {
2917 /* is STAT set ? */
2918 if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) {
2919 dev_info(&pf->pdev->dev,
2920 "Tx %d already enabled\n", i);
2921 continue;
2922 }
2923 } else {
2924 /* is !STAT set ? */
2925 if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) {
2926 dev_info(&pf->pdev->dev,
2927 "Tx %d already disabled\n", i);
2928 continue;
2929 }
2930 }
2931
2932 /* turn on/off the queue */
2933 if (enable)
2934 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK |
2935 I40E_QTX_ENA_QENA_STAT_MASK;
2936 else
2937 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
2938
2939 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
2940
2941 /* wait for the change to finish */
2942 for (j = 0; j < 10; j++) {
2943 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
2944 if (enable) {
2945 if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
2946 break;
2947 } else {
2948 if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
2949 break;
2950 }
2951
2952 udelay(10);
2953 }
2954 if (j >= 10) {
2955 dev_info(&pf->pdev->dev, "Tx ring %d %sable timeout\n",
2956 pf_q, (enable ? "en" : "dis"));
2957 return -ETIMEDOUT;
2958 }
2959 }
2960
2961 return 0;
2962}
2963
2964/**
2965 * i40e_vsi_control_rx - Start or stop a VSI's rings
2966 * @vsi: the VSI being configured
2967 * @enable: start or stop the rings
2968 **/
2969static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
2970{
2971 struct i40e_pf *pf = vsi->back;
2972 struct i40e_hw *hw = &pf->hw;
2973 int i, j, pf_q;
2974 u32 rx_reg;
2975
2976 pf_q = vsi->base_queue;
2977 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
2978 j = 1000;
2979 do {
2980 usleep_range(1000, 2000);
2981 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
2982 } while (j-- && ((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT)
2983 ^ (rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT)) & 1);
2984
2985 if (enable) {
2986 /* is STAT set ? */
2987 if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
2988 continue;
2989 } else {
2990 /* is !STAT set ? */
2991 if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
2992 continue;
2993 }
2994
2995 /* turn on/off the queue */
2996 if (enable)
2997 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK |
2998 I40E_QRX_ENA_QENA_STAT_MASK;
2999 else
3000 rx_reg &= ~(I40E_QRX_ENA_QENA_REQ_MASK |
3001 I40E_QRX_ENA_QENA_STAT_MASK);
3002 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
3003
3004 /* wait for the change to finish */
3005 for (j = 0; j < 10; j++) {
3006 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
3007
3008 if (enable) {
3009 if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3010 break;
3011 } else {
3012 if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3013 break;
3014 }
3015
3016 udelay(10);
3017 }
3018 if (j >= 10) {
3019 dev_info(&pf->pdev->dev, "Rx ring %d %sable timeout\n",
3020 pf_q, (enable ? "en" : "dis"));
3021 return -ETIMEDOUT;
3022 }
3023 }
3024
3025 return 0;
3026}
3027
3028/**
3029 * i40e_vsi_control_rings - Start or stop a VSI's rings
3030 * @vsi: the VSI being configured
3031 * @enable: start or stop the rings
3032 **/
3033static int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
3034{
3035 int ret;
3036
3037 /* do rx first for enable and last for disable */
3038 if (request) {
3039 ret = i40e_vsi_control_rx(vsi, request);
3040 if (ret)
3041 return ret;
3042 ret = i40e_vsi_control_tx(vsi, request);
3043 } else {
3044 ret = i40e_vsi_control_tx(vsi, request);
3045 if (ret)
3046 return ret;
3047 ret = i40e_vsi_control_rx(vsi, request);
3048 }
3049
3050 return ret;
3051}
3052
3053/**
3054 * i40e_vsi_free_irq - Free the irq association with the OS
3055 * @vsi: the VSI being configured
3056 **/
3057static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
3058{
3059 struct i40e_pf *pf = vsi->back;
3060 struct i40e_hw *hw = &pf->hw;
3061 int base = vsi->base_vector;
3062 u32 val, qp;
3063 int i;
3064
3065 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3066 if (!vsi->q_vectors)
3067 return;
3068
3069 for (i = 0; i < vsi->num_q_vectors; i++) {
3070 u16 vector = i + base;
3071
3072 /* free only the irqs that were actually requested */
3073 if (vsi->q_vectors[i].num_ringpairs == 0)
3074 continue;
3075
3076 /* clear the affinity_mask in the IRQ descriptor */
3077 irq_set_affinity_hint(pf->msix_entries[vector].vector,
3078 NULL);
3079 free_irq(pf->msix_entries[vector].vector,
3080 &vsi->q_vectors[i]);
3081
3082 /* Tear down the interrupt queue link list
3083 *
3084 * We know that they come in pairs and always
3085 * the Rx first, then the Tx. To clear the
3086 * link list, stick the EOL value into the
3087 * next_q field of the registers.
3088 */
3089 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
3090 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
3091 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3092 val |= I40E_QUEUE_END_OF_LIST
3093 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3094 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
3095
3096 while (qp != I40E_QUEUE_END_OF_LIST) {
3097 u32 next;
3098
3099 val = rd32(hw, I40E_QINT_RQCTL(qp));
3100
3101 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
3102 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
3103 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3104 I40E_QINT_RQCTL_INTEVENT_MASK);
3105
3106 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
3107 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
3108
3109 wr32(hw, I40E_QINT_RQCTL(qp), val);
3110
3111 val = rd32(hw, I40E_QINT_TQCTL(qp));
3112
3113 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
3114 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
3115
3116 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
3117 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
3118 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3119 I40E_QINT_TQCTL_INTEVENT_MASK);
3120
3121 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
3122 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
3123
3124 wr32(hw, I40E_QINT_TQCTL(qp), val);
3125 qp = next;
3126 }
3127 }
3128 } else {
3129 free_irq(pf->pdev->irq, pf);
3130
3131 val = rd32(hw, I40E_PFINT_LNKLST0);
3132 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
3133 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3134 val |= I40E_QUEUE_END_OF_LIST
3135 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
3136 wr32(hw, I40E_PFINT_LNKLST0, val);
3137
3138 val = rd32(hw, I40E_QINT_RQCTL(qp));
3139 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
3140 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
3141 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3142 I40E_QINT_RQCTL_INTEVENT_MASK);
3143
3144 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
3145 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
3146
3147 wr32(hw, I40E_QINT_RQCTL(qp), val);
3148
3149 val = rd32(hw, I40E_QINT_TQCTL(qp));
3150
3151 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
3152 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
3153 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3154 I40E_QINT_TQCTL_INTEVENT_MASK);
3155
3156 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
3157 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
3158
3159 wr32(hw, I40E_QINT_TQCTL(qp), val);
3160 }
3161}
3162
3163/**
3164 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
3165 * @vsi: the VSI being un-configured
3166 *
3167 * This frees the memory allocated to the q_vectors and
3168 * deletes references to the NAPI struct.
3169 **/
3170static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
3171{
3172 int v_idx;
3173
3174 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) {
3175 struct i40e_q_vector *q_vector = &vsi->q_vectors[v_idx];
3176 int r_idx;
3177
3178 if (!q_vector)
3179 continue;
3180
3181 /* disassociate q_vector from rings */
3182 for (r_idx = 0; r_idx < q_vector->tx.count; r_idx++)
3183 q_vector->tx.ring[r_idx]->q_vector = NULL;
3184 for (r_idx = 0; r_idx < q_vector->rx.count; r_idx++)
3185 q_vector->rx.ring[r_idx]->q_vector = NULL;
3186
3187 /* only VSI w/ an associated netdev is set up w/ NAPI */
3188 if (vsi->netdev)
3189 netif_napi_del(&q_vector->napi);
3190 }
3191 kfree(vsi->q_vectors);
3192}
3193
3194/**
3195 * i40e_reset_interrupt_capability - Disable interrupt setup in OS
3196 * @pf: board private structure
3197 **/
3198static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
3199{
3200 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
3201 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3202 pci_disable_msix(pf->pdev);
3203 kfree(pf->msix_entries);
3204 pf->msix_entries = NULL;
3205 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
3206 pci_disable_msi(pf->pdev);
3207 }
3208 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
3209}
3210
3211/**
3212 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
3213 * @pf: board private structure
3214 *
3215 * We go through and clear interrupt specific resources and reset the structure
3216 * to pre-load conditions
3217 **/
3218static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
3219{
3220 int i;
3221
3222 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
3223 for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
3224 if (pf->vsi[i])
3225 i40e_vsi_free_q_vectors(pf->vsi[i]);
3226 i40e_reset_interrupt_capability(pf);
3227}
3228
3229/**
3230 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
3231 * @vsi: the VSI being configured
3232 **/
3233static void i40e_napi_enable_all(struct i40e_vsi *vsi)
3234{
3235 int q_idx;
3236
3237 if (!vsi->netdev)
3238 return;
3239
3240 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
3241 napi_enable(&vsi->q_vectors[q_idx].napi);
3242}
3243
3244/**
3245 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
3246 * @vsi: the VSI being configured
3247 **/
3248static void i40e_napi_disable_all(struct i40e_vsi *vsi)
3249{
3250 int q_idx;
3251
3252 if (!vsi->netdev)
3253 return;
3254
3255 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
3256 napi_disable(&vsi->q_vectors[q_idx].napi);
3257}
3258
3259/**
3260 * i40e_quiesce_vsi - Pause a given VSI
3261 * @vsi: the VSI being paused
3262 **/
3263static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
3264{
3265 if (test_bit(__I40E_DOWN, &vsi->state))
3266 return;
3267
3268 set_bit(__I40E_NEEDS_RESTART, &vsi->state);
3269 if (vsi->netdev && netif_running(vsi->netdev)) {
3270 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
3271 } else {
3272 set_bit(__I40E_DOWN, &vsi->state);
3273 i40e_down(vsi);
3274 }
3275}
3276
3277/**
3278 * i40e_unquiesce_vsi - Resume a given VSI
3279 * @vsi: the VSI being resumed
3280 **/
3281static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
3282{
3283 if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state))
3284 return;
3285
3286 clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
3287 if (vsi->netdev && netif_running(vsi->netdev))
3288 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
3289 else
3290 i40e_up(vsi); /* this clears the DOWN bit */
3291}
3292
3293/**
3294 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
3295 * @pf: the PF
3296 **/
3297static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
3298{
3299 int v;
3300
3301 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
3302 if (pf->vsi[v])
3303 i40e_quiesce_vsi(pf->vsi[v]);
3304 }
3305}
3306
3307/**
3308 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
3309 * @pf: the PF
3310 **/
3311static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
3312{
3313 int v;
3314
3315 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
3316 if (pf->vsi[v])
3317 i40e_unquiesce_vsi(pf->vsi[v]);
3318 }
3319}
3320
3321/**
3322 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
3323 * @dcbcfg: the corresponding DCBx configuration structure
3324 *
3325 * Return the number of TCs from given DCBx configuration
3326 **/
3327static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
3328{
3329 int num_tc = 0, i;
3330
3331 /* Scan the ETS Config Priority Table to find
3332 * traffic class enabled for a given priority
3333 * and use the traffic class index to get the
3334 * number of traffic classes enabled
3335 */
3336 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
3337 if (dcbcfg->etscfg.prioritytable[i] > num_tc)
3338 num_tc = dcbcfg->etscfg.prioritytable[i];
3339 }
3340
3341 /* Traffic class index starts from zero so
3342 * increment to return the actual count
3343 */
3344 num_tc++;
3345
3346 return num_tc;
3347}
3348
3349/**
3350 * i40e_dcb_get_enabled_tc - Get enabled traffic classes
3351 * @dcbcfg: the corresponding DCBx configuration structure
3352 *
3353 * Query the current DCB configuration and return the number of
3354 * traffic classes enabled from the given DCBX config
3355 **/
3356static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
3357{
3358 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
3359 u8 enabled_tc = 1;
3360 u8 i;
3361
3362 for (i = 0; i < num_tc; i++)
3363 enabled_tc |= 1 << i;
3364
3365 return enabled_tc;
3366}
3367
3368/**
3369 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
3370 * @pf: PF being queried
3371 *
3372 * Return number of traffic classes enabled for the given PF
3373 **/
3374static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
3375{
3376 struct i40e_hw *hw = &pf->hw;
3377 u8 i, enabled_tc;
3378 u8 num_tc = 0;
3379 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
3380
3381 /* If DCB is not enabled then always in single TC */
3382 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
3383 return 1;
3384
3385 /* MFP mode return count of enabled TCs for this PF */
3386 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
3387 enabled_tc = pf->hw.func_caps.enabled_tcmap;
3388 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3389 if (enabled_tc & (1 << i))
3390 num_tc++;
3391 }
3392 return num_tc;
3393 }
3394
3395 /* SFP mode will be enabled for all TCs on port */
3396 return i40e_dcb_get_num_tc(dcbcfg);
3397}
3398
3399/**
3400 * i40e_pf_get_default_tc - Get bitmap for first enabled TC
3401 * @pf: PF being queried
3402 *
3403 * Return a bitmap for first enabled traffic class for this PF.
3404 **/
3405static u8 i40e_pf_get_default_tc(struct i40e_pf *pf)
3406{
3407 u8 enabled_tc = pf->hw.func_caps.enabled_tcmap;
3408 u8 i = 0;
3409
3410 if (!enabled_tc)
3411 return 0x1; /* TC0 */
3412
3413 /* Find the first enabled TC */
3414 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3415 if (enabled_tc & (1 << i))
3416 break;
3417 }
3418
3419 return 1 << i;
3420}
3421
3422/**
3423 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
3424 * @pf: PF being queried
3425 *
3426 * Return a bitmap for enabled traffic classes for this PF.
3427 **/
3428static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
3429{
3430 /* If DCB is not enabled for this PF then just return default TC */
3431 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
3432 return i40e_pf_get_default_tc(pf);
3433
3434 /* MFP mode will have enabled TCs set by FW */
3435 if (pf->flags & I40E_FLAG_MFP_ENABLED)
3436 return pf->hw.func_caps.enabled_tcmap;
3437
3438 /* SFP mode we want PF to be enabled for all TCs */
3439 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
3440}
3441
3442/**
3443 * i40e_vsi_get_bw_info - Query VSI BW Information
3444 * @vsi: the VSI being queried
3445 *
3446 * Returns 0 on success, negative value on failure
3447 **/
3448static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
3449{
3450 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
3451 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
3452 struct i40e_pf *pf = vsi->back;
3453 struct i40e_hw *hw = &pf->hw;
3454 u32 tc_bw_max;
3455 int ret;
3456 int i;
3457
3458 /* Get the VSI level BW configuration */
3459 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
3460 if (ret) {
3461 dev_info(&pf->pdev->dev,
3462 "couldn't get pf vsi bw config, err %d, aq_err %d\n",
3463 ret, pf->hw.aq.asq_last_status);
3464 return ret;
3465 }
3466
3467 /* Get the VSI level BW configuration per TC */
3468 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
3469 &bw_ets_config,
3470 NULL);
3471 if (ret) {
3472 dev_info(&pf->pdev->dev,
3473 "couldn't get pf vsi ets bw config, err %d, aq_err %d\n",
3474 ret, pf->hw.aq.asq_last_status);
3475 return ret;
3476 }
3477
3478 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
3479 dev_info(&pf->pdev->dev,
3480 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
3481 bw_config.tc_valid_bits,
3482 bw_ets_config.tc_valid_bits);
3483 /* Still continuing */
3484 }
3485
3486 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
3487 vsi->bw_max_quanta = bw_config.max_bw;
3488 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
3489 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
3490 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3491 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
3492 vsi->bw_ets_limit_credits[i] =
3493 le16_to_cpu(bw_ets_config.credits[i]);
3494 /* 3 bits out of 4 for each TC */
3495 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
3496 }
3497 return ret;
3498}
3499
3500/**
3501 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
3502 * @vsi: the VSI being configured
3503 * @enabled_tc: TC bitmap
3504 * @bw_credits: BW shared credits per TC
3505 *
3506 * Returns 0 on success, negative value on failure
3507 **/
3508static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi,
3509 u8 enabled_tc,
3510 u8 *bw_share)
3511{
3512 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
3513 int i, ret = 0;
3514
3515 bw_data.tc_valid_bits = enabled_tc;
3516 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
3517 bw_data.tc_bw_credits[i] = bw_share[i];
3518
3519 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid,
3520 &bw_data, NULL);
3521 if (ret) {
3522 dev_info(&vsi->back->pdev->dev,
3523 "%s: AQ command Config VSI BW allocation per TC failed = %d\n",
3524 __func__, vsi->back->hw.aq.asq_last_status);
3525 return ret;
3526 }
3527
3528 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
3529 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
3530
3531 return ret;
3532}
3533
3534/**
3535 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
3536 * @vsi: the VSI being configured
3537 * @enabled_tc: TC map to be enabled
3538 *
3539 **/
3540static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
3541{
3542 struct net_device *netdev = vsi->netdev;
3543 struct i40e_pf *pf = vsi->back;
3544 struct i40e_hw *hw = &pf->hw;
3545 u8 netdev_tc = 0;
3546 int i;
3547 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
3548
3549 if (!netdev)
3550 return;
3551
3552 if (!enabled_tc) {
3553 netdev_reset_tc(netdev);
3554 return;
3555 }
3556
3557 /* Set up actual enabled TCs on the VSI */
3558 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
3559 return;
3560
3561 /* set per TC queues for the VSI */
3562 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3563 /* Only set TC queues for enabled tcs
3564 *
3565 * e.g. For a VSI that has TC0 and TC3 enabled the
3566 * enabled_tc bitmap would be 0x00001001; the driver
3567 * will set the numtc for netdev as 2 that will be
3568 * referenced by the netdev layer as TC 0 and 1.
3569 */
3570 if (vsi->tc_config.enabled_tc & (1 << i))
3571 netdev_set_tc_queue(netdev,
3572 vsi->tc_config.tc_info[i].netdev_tc,
3573 vsi->tc_config.tc_info[i].qcount,
3574 vsi->tc_config.tc_info[i].qoffset);
3575 }
3576
3577 /* Assign UP2TC map for the VSI */
3578 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
3579 /* Get the actual TC# for the UP */
3580 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
3581 /* Get the mapped netdev TC# for the UP */
3582 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
3583 netdev_set_prio_tc_map(netdev, i, netdev_tc);
3584 }
3585}
3586
3587/**
3588 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
3589 * @vsi: the VSI being configured
3590 * @ctxt: the ctxt buffer returned from AQ VSI update param command
3591 **/
3592static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
3593 struct i40e_vsi_context *ctxt)
3594{
3595 /* copy just the sections touched not the entire info
3596 * since not all sections are valid as returned by
3597 * update vsi params
3598 */
3599 vsi->info.mapping_flags = ctxt->info.mapping_flags;
3600 memcpy(&vsi->info.queue_mapping,
3601 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
3602 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
3603 sizeof(vsi->info.tc_mapping));
3604}
3605
3606/**
3607 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
3608 * @vsi: VSI to be configured
3609 * @enabled_tc: TC bitmap
3610 *
3611 * This configures a particular VSI for TCs that are mapped to the
3612 * given TC bitmap. It uses default bandwidth share for TCs across
3613 * VSIs to configure TC for a particular VSI.
3614 *
3615 * NOTE:
3616 * It is expected that the VSI queues have been quisced before calling
3617 * this function.
3618 **/
3619static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
3620{
3621 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
3622 struct i40e_vsi_context ctxt;
3623 int ret = 0;
3624 int i;
3625
3626 /* Check if enabled_tc is same as existing or new TCs */
3627 if (vsi->tc_config.enabled_tc == enabled_tc)
3628 return ret;
3629
3630 /* Enable ETS TCs with equal BW Share for now across all VSIs */
3631 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3632 if (enabled_tc & (1 << i))
3633 bw_share[i] = 1;
3634 }
3635
3636 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
3637 if (ret) {
3638 dev_info(&vsi->back->pdev->dev,
3639 "Failed configuring TC map %d for VSI %d\n",
3640 enabled_tc, vsi->seid);
3641 goto out;
3642 }
3643
3644 /* Update Queue Pairs Mapping for currently enabled UPs */
3645 ctxt.seid = vsi->seid;
3646 ctxt.pf_num = vsi->back->hw.pf_id;
3647 ctxt.vf_num = 0;
3648 ctxt.uplink_seid = vsi->uplink_seid;
3649 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
3650 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
3651
3652 /* Update the VSI after updating the VSI queue-mapping information */
3653 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
3654 if (ret) {
3655 dev_info(&vsi->back->pdev->dev,
3656 "update vsi failed, aq_err=%d\n",
3657 vsi->back->hw.aq.asq_last_status);
3658 goto out;
3659 }
3660 /* update the local VSI info with updated queue map */
3661 i40e_vsi_update_queue_map(vsi, &ctxt);
3662 vsi->info.valid_sections = 0;
3663
3664 /* Update current VSI BW information */
3665 ret = i40e_vsi_get_bw_info(vsi);
3666 if (ret) {
3667 dev_info(&vsi->back->pdev->dev,
3668 "Failed updating vsi bw info, aq_err=%d\n",
3669 vsi->back->hw.aq.asq_last_status);
3670 goto out;
3671 }
3672
3673 /* Update the netdev TC setup */
3674 i40e_vsi_config_netdev_tc(vsi, enabled_tc);
3675out:
3676 return ret;
3677}
3678
3679/**
3680 * i40e_up_complete - Finish the last steps of bringing up a connection
3681 * @vsi: the VSI being configured
3682 **/
3683static int i40e_up_complete(struct i40e_vsi *vsi)
3684{
3685 struct i40e_pf *pf = vsi->back;
3686 int err;
3687
3688 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
3689 i40e_vsi_configure_msix(vsi);
3690 else
3691 i40e_configure_msi_and_legacy(vsi);
3692
3693 /* start rings */
3694 err = i40e_vsi_control_rings(vsi, true);
3695 if (err)
3696 return err;
3697
3698 clear_bit(__I40E_DOWN, &vsi->state);
3699 i40e_napi_enable_all(vsi);
3700 i40e_vsi_enable_irq(vsi);
3701
3702 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
3703 (vsi->netdev)) {
3704 netif_tx_start_all_queues(vsi->netdev);
3705 netif_carrier_on(vsi->netdev);
3706 }
3707 i40e_service_event_schedule(pf);
3708
3709 return 0;
3710}
3711
3712/**
3713 * i40e_vsi_reinit_locked - Reset the VSI
3714 * @vsi: the VSI being configured
3715 *
3716 * Rebuild the ring structs after some configuration
3717 * has changed, e.g. MTU size.
3718 **/
3719static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
3720{
3721 struct i40e_pf *pf = vsi->back;
3722
3723 WARN_ON(in_interrupt());
3724 while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
3725 usleep_range(1000, 2000);
3726 i40e_down(vsi);
3727
3728 /* Give a VF some time to respond to the reset. The
3729 * two second wait is based upon the watchdog cycle in
3730 * the VF driver.
3731 */
3732 if (vsi->type == I40E_VSI_SRIOV)
3733 msleep(2000);
3734 i40e_up(vsi);
3735 clear_bit(__I40E_CONFIG_BUSY, &pf->state);
3736}
3737
3738/**
3739 * i40e_up - Bring the connection back up after being down
3740 * @vsi: the VSI being configured
3741 **/
3742int i40e_up(struct i40e_vsi *vsi)
3743{
3744 int err;
3745
3746 err = i40e_vsi_configure(vsi);
3747 if (!err)
3748 err = i40e_up_complete(vsi);
3749
3750 return err;
3751}
3752
3753/**
3754 * i40e_down - Shutdown the connection processing
3755 * @vsi: the VSI being stopped
3756 **/
3757void i40e_down(struct i40e_vsi *vsi)
3758{
3759 int i;
3760
3761 /* It is assumed that the caller of this function
3762 * sets the vsi->state __I40E_DOWN bit.
3763 */
3764 if (vsi->netdev) {
3765 netif_carrier_off(vsi->netdev);
3766 netif_tx_disable(vsi->netdev);
3767 }
3768 i40e_vsi_disable_irq(vsi);
3769 i40e_vsi_control_rings(vsi, false);
3770 i40e_napi_disable_all(vsi);
3771
3772 for (i = 0; i < vsi->num_queue_pairs; i++) {
3773 i40e_clean_tx_ring(&vsi->tx_rings[i]);
3774 i40e_clean_rx_ring(&vsi->rx_rings[i]);
3775 }
3776}
3777
3778/**
3779 * i40e_setup_tc - configure multiple traffic classes
3780 * @netdev: net device to configure
3781 * @tc: number of traffic classes to enable
3782 **/
3783static int i40e_setup_tc(struct net_device *netdev, u8 tc)
3784{
3785 struct i40e_netdev_priv *np = netdev_priv(netdev);
3786 struct i40e_vsi *vsi = np->vsi;
3787 struct i40e_pf *pf = vsi->back;
3788 u8 enabled_tc = 0;
3789 int ret = -EINVAL;
3790 int i;
3791
3792 /* Check if DCB enabled to continue */
3793 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
3794 netdev_info(netdev, "DCB is not enabled for adapter\n");
3795 goto exit;
3796 }
3797
3798 /* Check if MFP enabled */
3799 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
3800 netdev_info(netdev, "Configuring TC not supported in MFP mode\n");
3801 goto exit;
3802 }
3803
3804 /* Check whether tc count is within enabled limit */
3805 if (tc > i40e_pf_get_num_tc(pf)) {
3806 netdev_info(netdev, "TC count greater than enabled on link for adapter\n");
3807 goto exit;
3808 }
3809
3810 /* Generate TC map for number of tc requested */
3811 for (i = 0; i < tc; i++)
3812 enabled_tc |= (1 << i);
3813
3814 /* Requesting same TC configuration as already enabled */
3815 if (enabled_tc == vsi->tc_config.enabled_tc)
3816 return 0;
3817
3818 /* Quiesce VSI queues */
3819 i40e_quiesce_vsi(vsi);
3820
3821 /* Configure VSI for enabled TCs */
3822 ret = i40e_vsi_config_tc(vsi, enabled_tc);
3823 if (ret) {
3824 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
3825 vsi->seid);
3826 goto exit;
3827 }
3828
3829 /* Unquiesce VSI */
3830 i40e_unquiesce_vsi(vsi);
3831
3832exit:
3833 return ret;
3834}
3835
3836/**
3837 * i40e_open - Called when a network interface is made active
3838 * @netdev: network interface device structure
3839 *
3840 * The open entry point is called when a network interface is made
3841 * active by the system (IFF_UP). At this point all resources needed
3842 * for transmit and receive operations are allocated, the interrupt
3843 * handler is registered with the OS, the netdev watchdog subtask is
3844 * enabled, and the stack is notified that the interface is ready.
3845 *
3846 * Returns 0 on success, negative value on failure
3847 **/
3848static int i40e_open(struct net_device *netdev)
3849{
3850 struct i40e_netdev_priv *np = netdev_priv(netdev);
3851 struct i40e_vsi *vsi = np->vsi;
3852 struct i40e_pf *pf = vsi->back;
3853 char int_name[IFNAMSIZ];
3854 int err;
3855
3856 /* disallow open during test */
3857 if (test_bit(__I40E_TESTING, &pf->state))
3858 return -EBUSY;
3859
3860 netif_carrier_off(netdev);
3861
3862 /* allocate descriptors */
3863 err = i40e_vsi_setup_tx_resources(vsi);
3864 if (err)
3865 goto err_setup_tx;
3866 err = i40e_vsi_setup_rx_resources(vsi);
3867 if (err)
3868 goto err_setup_rx;
3869
3870 err = i40e_vsi_configure(vsi);
3871 if (err)
3872 goto err_setup_rx;
3873
3874 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
3875 dev_driver_string(&pf->pdev->dev), netdev->name);
3876 err = i40e_vsi_request_irq(vsi, int_name);
3877 if (err)
3878 goto err_setup_rx;
3879
3880 err = i40e_up_complete(vsi);
3881 if (err)
3882 goto err_up_complete;
3883
3884 if ((vsi->type == I40E_VSI_MAIN) || (vsi->type == I40E_VSI_VMDQ2)) {
3885 err = i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, true, NULL);
3886 if (err)
3887 netdev_info(netdev,
3888 "couldn't set broadcast err %d aq_err %d\n",
3889 err, pf->hw.aq.asq_last_status);
3890 }
3891
3892 return 0;
3893
3894err_up_complete:
3895 i40e_down(vsi);
3896 i40e_vsi_free_irq(vsi);
3897err_setup_rx:
3898 i40e_vsi_free_rx_resources(vsi);
3899err_setup_tx:
3900 i40e_vsi_free_tx_resources(vsi);
3901 if (vsi == pf->vsi[pf->lan_vsi])
3902 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
3903
3904 return err;
3905}
3906
3907/**
3908 * i40e_close - Disables a network interface
3909 * @netdev: network interface device structure
3910 *
3911 * The close entry point is called when an interface is de-activated
3912 * by the OS. The hardware is still under the driver's control, but
3913 * this netdev interface is disabled.
3914 *
3915 * Returns 0, this is not allowed to fail
3916 **/
3917static int i40e_close(struct net_device *netdev)
3918{
3919 struct i40e_netdev_priv *np = netdev_priv(netdev);
3920 struct i40e_vsi *vsi = np->vsi;
3921
3922 if (test_and_set_bit(__I40E_DOWN, &vsi->state))
3923 return 0;
3924
3925 i40e_down(vsi);
3926 i40e_vsi_free_irq(vsi);
3927
3928 i40e_vsi_free_tx_resources(vsi);
3929 i40e_vsi_free_rx_resources(vsi);
3930
3931 return 0;
3932}
3933
3934/**
3935 * i40e_do_reset - Start a PF or Core Reset sequence
3936 * @pf: board private structure
3937 * @reset_flags: which reset is requested
3938 *
3939 * The essential difference in resets is that the PF Reset
3940 * doesn't clear the packet buffers, doesn't reset the PE
3941 * firmware, and doesn't bother the other PFs on the chip.
3942 **/
3943void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
3944{
3945 u32 val;
3946
3947 WARN_ON(in_interrupt());
3948
3949 /* do the biggest reset indicated */
3950 if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) {
3951
3952 /* Request a Global Reset
3953 *
3954 * This will start the chip's countdown to the actual full
3955 * chip reset event, and a warning interrupt to be sent
3956 * to all PFs, including the requestor. Our handler
3957 * for the warning interrupt will deal with the shutdown
3958 * and recovery of the switch setup.
3959 */
3960 dev_info(&pf->pdev->dev, "GlobalR requested\n");
3961 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
3962 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
3963 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
3964
3965 } else if (reset_flags & (1 << __I40E_CORE_RESET_REQUESTED)) {
3966
3967 /* Request a Core Reset
3968 *
3969 * Same as Global Reset, except does *not* include the MAC/PHY
3970 */
3971 dev_info(&pf->pdev->dev, "CoreR requested\n");
3972 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
3973 val |= I40E_GLGEN_RTRIG_CORER_MASK;
3974 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
3975 i40e_flush(&pf->hw);
3976
3977 } else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) {
3978
3979 /* Request a PF Reset
3980 *
3981 * Resets only the PF-specific registers
3982 *
3983 * This goes directly to the tear-down and rebuild of
3984 * the switch, since we need to do all the recovery as
3985 * for the Core Reset.
3986 */
3987 dev_info(&pf->pdev->dev, "PFR requested\n");
3988 i40e_handle_reset_warning(pf);
3989
3990 } else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) {
3991 int v;
3992
3993 /* Find the VSI(s) that requested a re-init */
3994 dev_info(&pf->pdev->dev,
3995 "VSI reinit requested\n");
3996 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
3997 struct i40e_vsi *vsi = pf->vsi[v];
3998 if (vsi != NULL &&
3999 test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
4000 i40e_vsi_reinit_locked(pf->vsi[v]);
4001 clear_bit(__I40E_REINIT_REQUESTED, &vsi->state);
4002 }
4003 }
4004
4005 /* no further action needed, so return now */
4006 return;
4007 } else {
4008 dev_info(&pf->pdev->dev,
4009 "bad reset request 0x%08x\n", reset_flags);
4010 return;
4011 }
4012}
4013
4014/**
4015 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
4016 * @pf: board private structure
4017 * @e: event info posted on ARQ
4018 *
4019 * Handler for LAN Queue Overflow Event generated by the firmware for PF
4020 * and VF queues
4021 **/
4022static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
4023 struct i40e_arq_event_info *e)
4024{
4025 struct i40e_aqc_lan_overflow *data =
4026 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
4027 u32 queue = le32_to_cpu(data->prtdcb_rupto);
4028 u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
4029 struct i40e_hw *hw = &pf->hw;
4030 struct i40e_vf *vf;
4031 u16 vf_id;
4032
4033 dev_info(&pf->pdev->dev, "%s: Rx Queue Number = %d QTX_CTL=0x%08x\n",
4034 __func__, queue, qtx_ctl);
4035
4036 /* Queue belongs to VF, find the VF and issue VF reset */
4037 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
4038 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
4039 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
4040 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
4041 vf_id -= hw->func_caps.vf_base_id;
4042 vf = &pf->vf[vf_id];
4043 i40e_vc_notify_vf_reset(vf);
4044 /* Allow VF to process pending reset notification */
4045 msleep(20);
4046 i40e_reset_vf(vf, false);
4047 }
4048}
4049
4050/**
4051 * i40e_service_event_complete - Finish up the service event
4052 * @pf: board private structure
4053 **/
4054static void i40e_service_event_complete(struct i40e_pf *pf)
4055{
4056 BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state));
4057
4058 /* flush memory to make sure state is correct before next watchog */
4059 smp_mb__before_clear_bit();
4060 clear_bit(__I40E_SERVICE_SCHED, &pf->state);
4061}
4062
4063/**
4064 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
4065 * @pf: board private structure
4066 **/
4067static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
4068{
4069 if (!(pf->flags & I40E_FLAG_FDIR_REQUIRES_REINIT))
4070 return;
4071
4072 pf->flags &= ~I40E_FLAG_FDIR_REQUIRES_REINIT;
4073
4074 /* if interface is down do nothing */
4075 if (test_bit(__I40E_DOWN, &pf->state))
4076 return;
4077}
4078
4079/**
4080 * i40e_vsi_link_event - notify VSI of a link event
4081 * @vsi: vsi to be notified
4082 * @link_up: link up or down
4083 **/
4084static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
4085{
4086 if (!vsi)
4087 return;
4088
4089 switch (vsi->type) {
4090 case I40E_VSI_MAIN:
4091 if (!vsi->netdev || !vsi->netdev_registered)
4092 break;
4093
4094 if (link_up) {
4095 netif_carrier_on(vsi->netdev);
4096 netif_tx_wake_all_queues(vsi->netdev);
4097 } else {
4098 netif_carrier_off(vsi->netdev);
4099 netif_tx_stop_all_queues(vsi->netdev);
4100 }
4101 break;
4102
4103 case I40E_VSI_SRIOV:
4104 break;
4105
4106 case I40E_VSI_VMDQ2:
4107 case I40E_VSI_CTRL:
4108 case I40E_VSI_MIRROR:
4109 default:
4110 /* there is no notification for other VSIs */
4111 break;
4112 }
4113}
4114
4115/**
4116 * i40e_veb_link_event - notify elements on the veb of a link event
4117 * @veb: veb to be notified
4118 * @link_up: link up or down
4119 **/
4120static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
4121{
4122 struct i40e_pf *pf;
4123 int i;
4124
4125 if (!veb || !veb->pf)
4126 return;
4127 pf = veb->pf;
4128
4129 /* depth first... */
4130 for (i = 0; i < I40E_MAX_VEB; i++)
4131 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
4132 i40e_veb_link_event(pf->veb[i], link_up);
4133
4134 /* ... now the local VSIs */
4135 for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
4136 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
4137 i40e_vsi_link_event(pf->vsi[i], link_up);
4138}
4139
4140/**
4141 * i40e_link_event - Update netif_carrier status
4142 * @pf: board private structure
4143 **/
4144static void i40e_link_event(struct i40e_pf *pf)
4145{
4146 bool new_link, old_link;
4147
4148 new_link = (pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP);
4149 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
4150
4151 if (new_link == old_link)
4152 return;
4153
4154 netdev_info(pf->vsi[pf->lan_vsi]->netdev,
4155 "NIC Link is %s\n", (new_link ? "Up" : "Down"));
4156
4157 /* Notify the base of the switch tree connected to
4158 * the link. Floating VEBs are not notified.
4159 */
4160 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
4161 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
4162 else
4163 i40e_vsi_link_event(pf->vsi[pf->lan_vsi], new_link);
4164
4165 if (pf->vf)
4166 i40e_vc_notify_link_state(pf);
4167}
4168
4169/**
4170 * i40e_check_hang_subtask - Check for hung queues and dropped interrupts
4171 * @pf: board private structure
4172 *
4173 * Set the per-queue flags to request a check for stuck queues in the irq
4174 * clean functions, then force interrupts to be sure the irq clean is called.
4175 **/
4176static void i40e_check_hang_subtask(struct i40e_pf *pf)
4177{
4178 int i, v;
4179
4180 /* If we're down or resetting, just bail */
4181 if (test_bit(__I40E_CONFIG_BUSY, &pf->state))
4182 return;
4183
4184 /* for each VSI/netdev
4185 * for each Tx queue
4186 * set the check flag
4187 * for each q_vector
4188 * force an interrupt
4189 */
4190 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4191 struct i40e_vsi *vsi = pf->vsi[v];
4192 int armed = 0;
4193
4194 if (!pf->vsi[v] ||
4195 test_bit(__I40E_DOWN, &vsi->state) ||
4196 (vsi->netdev && !netif_carrier_ok(vsi->netdev)))
4197 continue;
4198
4199 for (i = 0; i < vsi->num_queue_pairs; i++) {
4200 set_check_for_tx_hang(&vsi->tx_rings[i]);
4201 if (test_bit(__I40E_HANG_CHECK_ARMED,
4202 &vsi->tx_rings[i].state))
4203 armed++;
4204 }
4205
4206 if (armed) {
4207 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
4208 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0,
4209 (I40E_PFINT_DYN_CTL0_INTENA_MASK |
4210 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK));
4211 } else {
4212 u16 vec = vsi->base_vector - 1;
4213 u32 val = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
4214 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
4215 for (i = 0; i < vsi->num_q_vectors; i++, vec++)
4216 wr32(&vsi->back->hw,
4217 I40E_PFINT_DYN_CTLN(vec), val);
4218 }
4219 i40e_flush(&vsi->back->hw);
4220 }
4221 }
4222}
4223
4224/**
4225 * i40e_watchdog_subtask - Check and bring link up
4226 * @pf: board private structure
4227 **/
4228static void i40e_watchdog_subtask(struct i40e_pf *pf)
4229{
4230 int i;
4231
4232 /* if interface is down do nothing */
4233 if (test_bit(__I40E_DOWN, &pf->state) ||
4234 test_bit(__I40E_CONFIG_BUSY, &pf->state))
4235 return;
4236
4237 /* Update the stats for active netdevs so the network stack
4238 * can look at updated numbers whenever it cares to
4239 */
4240 for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
4241 if (pf->vsi[i] && pf->vsi[i]->netdev)
4242 i40e_update_stats(pf->vsi[i]);
4243
4244 /* Update the stats for the active switching components */
4245 for (i = 0; i < I40E_MAX_VEB; i++)
4246 if (pf->veb[i])
4247 i40e_update_veb_stats(pf->veb[i]);
4248}
4249
4250/**
4251 * i40e_reset_subtask - Set up for resetting the device and driver
4252 * @pf: board private structure
4253 **/
4254static void i40e_reset_subtask(struct i40e_pf *pf)
4255{
4256 u32 reset_flags = 0;
4257
4258 if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
4259 reset_flags |= (1 << __I40E_REINIT_REQUESTED);
4260 clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
4261 }
4262 if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
4263 reset_flags |= (1 << __I40E_PF_RESET_REQUESTED);
4264 clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
4265 }
4266 if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
4267 reset_flags |= (1 << __I40E_CORE_RESET_REQUESTED);
4268 clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
4269 }
4270 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
4271 reset_flags |= (1 << __I40E_GLOBAL_RESET_REQUESTED);
4272 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
4273 }
4274
4275 /* If there's a recovery already waiting, it takes
4276 * precedence before starting a new reset sequence.
4277 */
4278 if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
4279 i40e_handle_reset_warning(pf);
4280 return;
4281 }
4282
4283 /* If we're already down or resetting, just bail */
4284 if (reset_flags &&
4285 !test_bit(__I40E_DOWN, &pf->state) &&
4286 !test_bit(__I40E_CONFIG_BUSY, &pf->state))
4287 i40e_do_reset(pf, reset_flags);
4288}
4289
4290/**
4291 * i40e_handle_link_event - Handle link event
4292 * @pf: board private structure
4293 * @e: event info posted on ARQ
4294 **/
4295static void i40e_handle_link_event(struct i40e_pf *pf,
4296 struct i40e_arq_event_info *e)
4297{
4298 struct i40e_hw *hw = &pf->hw;
4299 struct i40e_aqc_get_link_status *status =
4300 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
4301 struct i40e_link_status *hw_link_info = &hw->phy.link_info;
4302
4303 /* save off old link status information */
4304 memcpy(&pf->hw.phy.link_info_old, hw_link_info,
4305 sizeof(pf->hw.phy.link_info_old));
4306
4307 /* update link status */
4308 hw_link_info->phy_type = (enum i40e_aq_phy_type)status->phy_type;
4309 hw_link_info->link_speed = (enum i40e_aq_link_speed)status->link_speed;
4310 hw_link_info->link_info = status->link_info;
4311 hw_link_info->an_info = status->an_info;
4312 hw_link_info->ext_info = status->ext_info;
4313 hw_link_info->lse_enable =
4314 le16_to_cpu(status->command_flags) &
4315 I40E_AQ_LSE_ENABLE;
4316
4317 /* process the event */
4318 i40e_link_event(pf);
4319
4320 /* Do a new status request to re-enable LSE reporting
4321 * and load new status information into the hw struct,
4322 * then see if the status changed while processing the
4323 * initial event.
4324 */
4325 i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
4326 i40e_link_event(pf);
4327}
4328
4329/**
4330 * i40e_clean_adminq_subtask - Clean the AdminQ rings
4331 * @pf: board private structure
4332 **/
4333static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
4334{
4335 struct i40e_arq_event_info event;
4336 struct i40e_hw *hw = &pf->hw;
4337 u16 pending, i = 0;
4338 i40e_status ret;
4339 u16 opcode;
4340 u32 val;
4341
4342 if (!test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state))
4343 return;
4344
4345 event.msg_size = I40E_MAX_AQ_BUF_SIZE;
4346 event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
4347 if (!event.msg_buf)
4348 return;
4349
4350 do {
4351 ret = i40e_clean_arq_element(hw, &event, &pending);
4352 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
4353 dev_info(&pf->pdev->dev, "No ARQ event found\n");
4354 break;
4355 } else if (ret) {
4356 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
4357 break;
4358 }
4359
4360 opcode = le16_to_cpu(event.desc.opcode);
4361 switch (opcode) {
4362
4363 case i40e_aqc_opc_get_link_status:
4364 i40e_handle_link_event(pf, &event);
4365 break;
4366 case i40e_aqc_opc_send_msg_to_pf:
4367 ret = i40e_vc_process_vf_msg(pf,
4368 le16_to_cpu(event.desc.retval),
4369 le32_to_cpu(event.desc.cookie_high),
4370 le32_to_cpu(event.desc.cookie_low),
4371 event.msg_buf,
4372 event.msg_size);
4373 break;
4374 case i40e_aqc_opc_lldp_update_mib:
4375 dev_info(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
4376 break;
4377 case i40e_aqc_opc_event_lan_overflow:
4378 dev_info(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
4379 i40e_handle_lan_overflow_event(pf, &event);
4380 break;
4381 default:
4382 dev_info(&pf->pdev->dev,
4383 "ARQ Error: Unknown event %d received\n",
4384 event.desc.opcode);
4385 break;
4386 }
4387 } while (pending && (i++ < pf->adminq_work_limit));
4388
4389 clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
4390 /* re-enable Admin queue interrupt cause */
4391 val = rd32(hw, I40E_PFINT_ICR0_ENA);
4392 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
4393 wr32(hw, I40E_PFINT_ICR0_ENA, val);
4394 i40e_flush(hw);
4395
4396 kfree(event.msg_buf);
4397}
4398
4399/**
4400 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
4401 * @veb: pointer to the VEB instance
4402 *
4403 * This is a recursive function that first builds the attached VSIs then
4404 * recurses in to build the next layer of VEB. We track the connections
4405 * through our own index numbers because the seid's from the HW could
4406 * change across the reset.
4407 **/
4408static int i40e_reconstitute_veb(struct i40e_veb *veb)
4409{
4410 struct i40e_vsi *ctl_vsi = NULL;
4411 struct i40e_pf *pf = veb->pf;
4412 int v, veb_idx;
4413 int ret;
4414
4415 /* build VSI that owns this VEB, temporarily attached to base VEB */
4416 for (v = 0; v < pf->hw.func_caps.num_vsis && !ctl_vsi; v++) {
4417 if (pf->vsi[v] &&
4418 pf->vsi[v]->veb_idx == veb->idx &&
4419 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
4420 ctl_vsi = pf->vsi[v];
4421 break;
4422 }
4423 }
4424 if (!ctl_vsi) {
4425 dev_info(&pf->pdev->dev,
4426 "missing owner VSI for veb_idx %d\n", veb->idx);
4427 ret = -ENOENT;
4428 goto end_reconstitute;
4429 }
4430 if (ctl_vsi != pf->vsi[pf->lan_vsi])
4431 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
4432 ret = i40e_add_vsi(ctl_vsi);
4433 if (ret) {
4434 dev_info(&pf->pdev->dev,
4435 "rebuild of owner VSI failed: %d\n", ret);
4436 goto end_reconstitute;
4437 }
4438 i40e_vsi_reset_stats(ctl_vsi);
4439
4440 /* create the VEB in the switch and move the VSI onto the VEB */
4441 ret = i40e_add_veb(veb, ctl_vsi);
4442 if (ret)
4443 goto end_reconstitute;
4444
4445 /* create the remaining VSIs attached to this VEB */
4446 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4447 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
4448 continue;
4449
4450 if (pf->vsi[v]->veb_idx == veb->idx) {
4451 struct i40e_vsi *vsi = pf->vsi[v];
4452 vsi->uplink_seid = veb->seid;
4453 ret = i40e_add_vsi(vsi);
4454 if (ret) {
4455 dev_info(&pf->pdev->dev,
4456 "rebuild of vsi_idx %d failed: %d\n",
4457 v, ret);
4458 goto end_reconstitute;
4459 }
4460 i40e_vsi_reset_stats(vsi);
4461 }
4462 }
4463
4464 /* create any VEBs attached to this VEB - RECURSION */
4465 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
4466 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
4467 pf->veb[veb_idx]->uplink_seid = veb->seid;
4468 ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
4469 if (ret)
4470 break;
4471 }
4472 }
4473
4474end_reconstitute:
4475 return ret;
4476}
4477
4478/**
4479 * i40e_get_capabilities - get info about the HW
4480 * @pf: the PF struct
4481 **/
4482static int i40e_get_capabilities(struct i40e_pf *pf)
4483{
4484 struct i40e_aqc_list_capabilities_element_resp *cap_buf;
4485 u16 data_size;
4486 int buf_len;
4487 int err;
4488
4489 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
4490 do {
4491 cap_buf = kzalloc(buf_len, GFP_KERNEL);
4492 if (!cap_buf)
4493 return -ENOMEM;
4494
4495 /* this loads the data into the hw struct for us */
4496 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
4497 &data_size,
4498 i40e_aqc_opc_list_func_capabilities,
4499 NULL);
4500 /* data loaded, buffer no longer needed */
4501 kfree(cap_buf);
4502
4503 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
4504 /* retry with a larger buffer */
4505 buf_len = data_size;
4506 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
4507 dev_info(&pf->pdev->dev,
4508 "capability discovery failed: aq=%d\n",
4509 pf->hw.aq.asq_last_status);
4510 return -ENODEV;
4511 }
4512 } while (err);
4513
4514 if (pf->hw.debug_mask & I40E_DEBUG_USER)
4515 dev_info(&pf->pdev->dev,
4516 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
4517 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
4518 pf->hw.func_caps.num_msix_vectors,
4519 pf->hw.func_caps.num_msix_vectors_vf,
4520 pf->hw.func_caps.fd_filters_guaranteed,
4521 pf->hw.func_caps.fd_filters_best_effort,
4522 pf->hw.func_caps.num_tx_qp,
4523 pf->hw.func_caps.num_vsis);
4524
4525 return 0;
4526}
4527
4528/**
4529 * i40e_fdir_setup - initialize the Flow Director resources
4530 * @pf: board private structure
4531 **/
4532static void i40e_fdir_setup(struct i40e_pf *pf)
4533{
4534 struct i40e_vsi *vsi;
4535 bool new_vsi = false;
4536 int err, i;
4537
4538 if (!(pf->flags & (I40E_FLAG_FDIR_ENABLED|I40E_FLAG_FDIR_ATR_ENABLED)))
4539 return;
4540
4541 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
4542
4543 /* find existing or make new FDIR VSI */
4544 vsi = NULL;
4545 for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
4546 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
4547 vsi = pf->vsi[i];
4548 if (!vsi) {
4549 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->mac_seid, 0);
4550 if (!vsi) {
4551 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
4552 pf->flags &= ~I40E_FLAG_FDIR_ENABLED;
4553 return;
4554 }
4555 new_vsi = true;
4556 }
4557 WARN_ON(vsi->base_queue != I40E_FDIR_RING);
4558 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_rings);
4559
4560 err = i40e_vsi_setup_tx_resources(vsi);
4561 if (!err)
4562 err = i40e_vsi_setup_rx_resources(vsi);
4563 if (!err)
4564 err = i40e_vsi_configure(vsi);
4565 if (!err && new_vsi) {
4566 char int_name[IFNAMSIZ + 9];
4567 snprintf(int_name, sizeof(int_name) - 1, "%s-fdir",
4568 dev_driver_string(&pf->pdev->dev));
4569 err = i40e_vsi_request_irq(vsi, int_name);
4570 }
4571 if (!err)
4572 err = i40e_up_complete(vsi);
4573
4574 clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
4575}
4576
4577/**
4578 * i40e_fdir_teardown - release the Flow Director resources
4579 * @pf: board private structure
4580 **/
4581static void i40e_fdir_teardown(struct i40e_pf *pf)
4582{
4583 int i;
4584
4585 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
4586 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
4587 i40e_vsi_release(pf->vsi[i]);
4588 break;
4589 }
4590 }
4591}
4592
4593/**
4594 * i40e_handle_reset_warning - prep for the core to reset
4595 * @pf: board private structure
4596 *
4597 * Close up the VFs and other things in prep for a Core Reset,
4598 * then get ready to rebuild the world.
4599 **/
4600static void i40e_handle_reset_warning(struct i40e_pf *pf)
4601{
4602 struct i40e_driver_version dv;
4603 struct i40e_hw *hw = &pf->hw;
4604 i40e_status ret;
4605 u32 v;
4606
4607 clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
4608 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
4609 return;
4610
4611 dev_info(&pf->pdev->dev, "Tearing down internal switch for reset\n");
4612
4613 i40e_vc_notify_reset(pf);
4614
4615 /* quiesce the VSIs and their queues that are not already DOWN */
4616 i40e_pf_quiesce_all_vsi(pf);
4617
4618 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4619 if (pf->vsi[v])
4620 pf->vsi[v]->seid = 0;
4621 }
4622
4623 i40e_shutdown_adminq(&pf->hw);
4624
4625 /* Now we wait for GRST to settle out.
4626 * We don't have to delete the VEBs or VSIs from the hw switch
4627 * because the reset will make them disappear.
4628 */
4629 ret = i40e_pf_reset(hw);
4630 if (ret)
4631 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
4632 pf->pfr_count++;
4633
4634 if (test_bit(__I40E_DOWN, &pf->state))
4635 goto end_core_reset;
4636 dev_info(&pf->pdev->dev, "Rebuilding internal switch\n");
4637
4638 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
4639 ret = i40e_init_adminq(&pf->hw);
4640 if (ret) {
4641 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret);
4642 goto end_core_reset;
4643 }
4644
4645 ret = i40e_get_capabilities(pf);
4646 if (ret) {
4647 dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n",
4648 ret);
4649 goto end_core_reset;
4650 }
4651
4652 /* call shutdown HMC */
4653 ret = i40e_shutdown_lan_hmc(hw);
4654 if (ret) {
4655 dev_info(&pf->pdev->dev, "shutdown_lan_hmc failed: %d\n", ret);
4656 goto end_core_reset;
4657 }
4658
4659 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
4660 hw->func_caps.num_rx_qp,
4661 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
4662 if (ret) {
4663 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
4664 goto end_core_reset;
4665 }
4666 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
4667 if (ret) {
4668 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
4669 goto end_core_reset;
4670 }
4671
4672 /* do basic switch setup */
4673 ret = i40e_setup_pf_switch(pf);
4674 if (ret)
4675 goto end_core_reset;
4676
4677 /* Rebuild the VSIs and VEBs that existed before reset.
4678 * They are still in our local switch element arrays, so only
4679 * need to rebuild the switch model in the HW.
4680 *
4681 * If there were VEBs but the reconstitution failed, we'll try
4682 * try to recover minimal use by getting the basic PF VSI working.
4683 */
4684 if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
4685 dev_info(&pf->pdev->dev, "attempting to rebuild switch\n");
4686 /* find the one VEB connected to the MAC, and find orphans */
4687 for (v = 0; v < I40E_MAX_VEB; v++) {
4688 if (!pf->veb[v])
4689 continue;
4690
4691 if (pf->veb[v]->uplink_seid == pf->mac_seid ||
4692 pf->veb[v]->uplink_seid == 0) {
4693 ret = i40e_reconstitute_veb(pf->veb[v]);
4694
4695 if (!ret)
4696 continue;
4697
4698 /* If Main VEB failed, we're in deep doodoo,
4699 * so give up rebuilding the switch and set up
4700 * for minimal rebuild of PF VSI.
4701 * If orphan failed, we'll report the error
4702 * but try to keep going.
4703 */
4704 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
4705 dev_info(&pf->pdev->dev,
4706 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
4707 ret);
4708 pf->vsi[pf->lan_vsi]->uplink_seid
4709 = pf->mac_seid;
4710 break;
4711 } else if (pf->veb[v]->uplink_seid == 0) {
4712 dev_info(&pf->pdev->dev,
4713 "rebuild of orphan VEB failed: %d\n",
4714 ret);
4715 }
4716 }
4717 }
4718 }
4719
4720 if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) {
4721 dev_info(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
4722 /* no VEB, so rebuild only the Main VSI */
4723 ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]);
4724 if (ret) {
4725 dev_info(&pf->pdev->dev,
4726 "rebuild of Main VSI failed: %d\n", ret);
4727 goto end_core_reset;
4728 }
4729 }
4730
4731 /* reinit the misc interrupt */
4732 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4733 ret = i40e_setup_misc_vector(pf);
4734
4735 /* restart the VSIs that were rebuilt and running before the reset */
4736 i40e_pf_unquiesce_all_vsi(pf);
4737
4738 /* tell the firmware that we're starting */
4739 dv.major_version = DRV_VERSION_MAJOR;
4740 dv.minor_version = DRV_VERSION_MINOR;
4741 dv.build_version = DRV_VERSION_BUILD;
4742 dv.subbuild_version = 0;
4743 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
4744
4745 dev_info(&pf->pdev->dev, "PF reset done\n");
4746
4747end_core_reset:
4748 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
4749}
4750
4751/**
4752 * i40e_handle_mdd_event
4753 * @pf: pointer to the pf structure
4754 *
4755 * Called from the MDD irq handler to identify possibly malicious vfs
4756 **/
4757static void i40e_handle_mdd_event(struct i40e_pf *pf)
4758{
4759 struct i40e_hw *hw = &pf->hw;
4760 bool mdd_detected = false;
4761 struct i40e_vf *vf;
4762 u32 reg;
4763 int i;
4764
4765 if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state))
4766 return;
4767
4768 /* find what triggered the MDD event */
4769 reg = rd32(hw, I40E_GL_MDET_TX);
4770 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
4771 u8 func = (reg & I40E_GL_MDET_TX_FUNCTION_MASK)
4772 >> I40E_GL_MDET_TX_FUNCTION_SHIFT;
4773 u8 event = (reg & I40E_GL_MDET_TX_EVENT_SHIFT)
4774 >> I40E_GL_MDET_TX_EVENT_SHIFT;
4775 u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK)
4776 >> I40E_GL_MDET_TX_QUEUE_SHIFT;
4777 dev_info(&pf->pdev->dev,
4778 "Malicious Driver Detection TX event 0x%02x on q %d of function 0x%02x\n",
4779 event, queue, func);
4780 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
4781 mdd_detected = true;
4782 }
4783 reg = rd32(hw, I40E_GL_MDET_RX);
4784 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
4785 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK)
4786 >> I40E_GL_MDET_RX_FUNCTION_SHIFT;
4787 u8 event = (reg & I40E_GL_MDET_RX_EVENT_SHIFT)
4788 >> I40E_GL_MDET_RX_EVENT_SHIFT;
4789 u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK)
4790 >> I40E_GL_MDET_RX_QUEUE_SHIFT;
4791 dev_info(&pf->pdev->dev,
4792 "Malicious Driver Detection RX event 0x%02x on q %d of function 0x%02x\n",
4793 event, queue, func);
4794 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
4795 mdd_detected = true;
4796 }
4797
4798 /* see if one of the VFs needs its hand slapped */
4799 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
4800 vf = &(pf->vf[i]);
4801 reg = rd32(hw, I40E_VP_MDET_TX(i));
4802 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
4803 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
4804 vf->num_mdd_events++;
4805 dev_info(&pf->pdev->dev, "MDD TX event on VF %d\n", i);
4806 }
4807
4808 reg = rd32(hw, I40E_VP_MDET_RX(i));
4809 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
4810 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
4811 vf->num_mdd_events++;
4812 dev_info(&pf->pdev->dev, "MDD RX event on VF %d\n", i);
4813 }
4814
4815 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
4816 dev_info(&pf->pdev->dev,
4817 "Too many MDD events on VF %d, disabled\n", i);
4818 dev_info(&pf->pdev->dev,
4819 "Use PF Control I/F to re-enable the VF\n");
4820 set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
4821 }
4822 }
4823
4824 /* re-enable mdd interrupt cause */
4825 clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
4826 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4827 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
4828 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4829 i40e_flush(hw);
4830}
4831
4832/**
4833 * i40e_service_task - Run the driver's async subtasks
4834 * @work: pointer to work_struct containing our data
4835 **/
4836static void i40e_service_task(struct work_struct *work)
4837{
4838 struct i40e_pf *pf = container_of(work,
4839 struct i40e_pf,
4840 service_task);
4841 unsigned long start_time = jiffies;
4842
4843 i40e_reset_subtask(pf);
4844 i40e_handle_mdd_event(pf);
4845 i40e_vc_process_vflr_event(pf);
4846 i40e_watchdog_subtask(pf);
4847 i40e_fdir_reinit_subtask(pf);
4848 i40e_check_hang_subtask(pf);
4849 i40e_sync_filters_subtask(pf);
4850 i40e_clean_adminq_subtask(pf);
4851
4852 i40e_service_event_complete(pf);
4853
4854 /* If the tasks have taken longer than one timer cycle or there
4855 * is more work to be done, reschedule the service task now
4856 * rather than wait for the timer to tick again.
4857 */
4858 if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
4859 test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state) ||
4860 test_bit(__I40E_MDD_EVENT_PENDING, &pf->state) ||
4861 test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
4862 i40e_service_event_schedule(pf);
4863}
4864
4865/**
4866 * i40e_service_timer - timer callback
4867 * @data: pointer to PF struct
4868 **/
4869static void i40e_service_timer(unsigned long data)
4870{
4871 struct i40e_pf *pf = (struct i40e_pf *)data;
4872
4873 mod_timer(&pf->service_timer,
4874 round_jiffies(jiffies + pf->service_timer_period));
4875 i40e_service_event_schedule(pf);
4876}
4877
4878/**
4879 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
4880 * @vsi: the VSI being configured
4881 **/
4882static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
4883{
4884 struct i40e_pf *pf = vsi->back;
4885
4886 switch (vsi->type) {
4887 case I40E_VSI_MAIN:
4888 vsi->alloc_queue_pairs = pf->num_lan_qps;
4889 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
4890 I40E_REQ_DESCRIPTOR_MULTIPLE);
4891 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4892 vsi->num_q_vectors = pf->num_lan_msix;
4893 else
4894 vsi->num_q_vectors = 1;
4895
4896 break;
4897
4898 case I40E_VSI_FDIR:
4899 vsi->alloc_queue_pairs = 1;
4900 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
4901 I40E_REQ_DESCRIPTOR_MULTIPLE);
4902 vsi->num_q_vectors = 1;
4903 break;
4904
4905 case I40E_VSI_VMDQ2:
4906 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
4907 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
4908 I40E_REQ_DESCRIPTOR_MULTIPLE);
4909 vsi->num_q_vectors = pf->num_vmdq_msix;
4910 break;
4911
4912 case I40E_VSI_SRIOV:
4913 vsi->alloc_queue_pairs = pf->num_vf_qps;
4914 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
4915 I40E_REQ_DESCRIPTOR_MULTIPLE);
4916 break;
4917
4918 default:
4919 WARN_ON(1);
4920 return -ENODATA;
4921 }
4922
4923 return 0;
4924}
4925
4926/**
4927 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
4928 * @pf: board private structure
4929 * @type: type of VSI
4930 *
4931 * On error: returns error code (negative)
4932 * On success: returns vsi index in PF (positive)
4933 **/
4934static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
4935{
4936 int ret = -ENODEV;
4937 struct i40e_vsi *vsi;
4938 int vsi_idx;
4939 int i;
4940
4941 /* Need to protect the allocation of the VSIs at the PF level */
4942 mutex_lock(&pf->switch_mutex);
4943
4944 /* VSI list may be fragmented if VSI creation/destruction has
4945 * been happening. We can afford to do a quick scan to look
4946 * for any free VSIs in the list.
4947 *
4948 * find next empty vsi slot, looping back around if necessary
4949 */
4950 i = pf->next_vsi;
4951 while (i < pf->hw.func_caps.num_vsis && pf->vsi[i])
4952 i++;
4953 if (i >= pf->hw.func_caps.num_vsis) {
4954 i = 0;
4955 while (i < pf->next_vsi && pf->vsi[i])
4956 i++;
4957 }
4958
4959 if (i < pf->hw.func_caps.num_vsis && !pf->vsi[i]) {
4960 vsi_idx = i; /* Found one! */
4961 } else {
4962 ret = -ENODEV;
4963 goto err_alloc_vsi; /* out of VSI slots! */
4964 }
4965 pf->next_vsi = ++i;
4966
4967 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
4968 if (!vsi) {
4969 ret = -ENOMEM;
4970 goto err_alloc_vsi;
4971 }
4972 vsi->type = type;
4973 vsi->back = pf;
4974 set_bit(__I40E_DOWN, &vsi->state);
4975 vsi->flags = 0;
4976 vsi->idx = vsi_idx;
4977 vsi->rx_itr_setting = pf->rx_itr_default;
4978 vsi->tx_itr_setting = pf->tx_itr_default;
4979 vsi->netdev_registered = false;
4980 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
4981 INIT_LIST_HEAD(&vsi->mac_filter_list);
4982
4983 i40e_set_num_rings_in_vsi(vsi);
4984
4985 /* Setup default MSIX irq handler for VSI */
4986 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
4987
4988 pf->vsi[vsi_idx] = vsi;
4989 ret = vsi_idx;
4990err_alloc_vsi:
4991 mutex_unlock(&pf->switch_mutex);
4992 return ret;
4993}
4994
4995/**
4996 * i40e_vsi_clear - Deallocate the VSI provided
4997 * @vsi: the VSI being un-configured
4998 **/
4999static int i40e_vsi_clear(struct i40e_vsi *vsi)
5000{
5001 struct i40e_pf *pf;
5002
5003 if (!vsi)
5004 return 0;
5005
5006 if (!vsi->back)
5007 goto free_vsi;
5008 pf = vsi->back;
5009
5010 mutex_lock(&pf->switch_mutex);
5011 if (!pf->vsi[vsi->idx]) {
5012 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
5013 vsi->idx, vsi->idx, vsi, vsi->type);
5014 goto unlock_vsi;
5015 }
5016
5017 if (pf->vsi[vsi->idx] != vsi) {
5018 dev_err(&pf->pdev->dev,
5019 "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
5020 pf->vsi[vsi->idx]->idx,
5021 pf->vsi[vsi->idx],
5022 pf->vsi[vsi->idx]->type,
5023 vsi->idx, vsi, vsi->type);
5024 goto unlock_vsi;
5025 }
5026
5027 /* updates the pf for this cleared vsi */
5028 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
5029 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
5030
5031 pf->vsi[vsi->idx] = NULL;
5032 if (vsi->idx < pf->next_vsi)
5033 pf->next_vsi = vsi->idx;
5034
5035unlock_vsi:
5036 mutex_unlock(&pf->switch_mutex);
5037free_vsi:
5038 kfree(vsi);
5039
5040 return 0;
5041}
5042
5043/**
5044 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
5045 * @vsi: the VSI being configured
5046 **/
5047static int i40e_alloc_rings(struct i40e_vsi *vsi)
5048{
5049 struct i40e_pf *pf = vsi->back;
5050 int ret = 0;
5051 int i;
5052
5053 vsi->rx_rings = kcalloc(vsi->alloc_queue_pairs,
5054 sizeof(struct i40e_ring), GFP_KERNEL);
5055 if (!vsi->rx_rings) {
5056 ret = -ENOMEM;
5057 goto err_alloc_rings;
5058 }
5059
5060 vsi->tx_rings = kcalloc(vsi->alloc_queue_pairs,
5061 sizeof(struct i40e_ring), GFP_KERNEL);
5062 if (!vsi->tx_rings) {
5063 ret = -ENOMEM;
5064 kfree(vsi->rx_rings);
5065 goto err_alloc_rings;
5066 }
5067
5068 /* Set basic values in the rings to be used later during open() */
5069 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
5070 struct i40e_ring *rx_ring = &vsi->rx_rings[i];
5071 struct i40e_ring *tx_ring = &vsi->tx_rings[i];
5072
5073 tx_ring->queue_index = i;
5074 tx_ring->reg_idx = vsi->base_queue + i;
5075 tx_ring->ring_active = false;
5076 tx_ring->vsi = vsi;
5077 tx_ring->netdev = vsi->netdev;
5078 tx_ring->dev = &pf->pdev->dev;
5079 tx_ring->count = vsi->num_desc;
5080 tx_ring->size = 0;
5081 tx_ring->dcb_tc = 0;
5082
5083 rx_ring->queue_index = i;
5084 rx_ring->reg_idx = vsi->base_queue + i;
5085 rx_ring->ring_active = false;
5086 rx_ring->vsi = vsi;
5087 rx_ring->netdev = vsi->netdev;
5088 rx_ring->dev = &pf->pdev->dev;
5089 rx_ring->count = vsi->num_desc;
5090 rx_ring->size = 0;
5091 rx_ring->dcb_tc = 0;
5092 if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED)
5093 set_ring_16byte_desc_enabled(rx_ring);
5094 else
5095 clear_ring_16byte_desc_enabled(rx_ring);
5096 }
5097
5098err_alloc_rings:
5099 return ret;
5100}
5101
5102/**
5103 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
5104 * @vsi: the VSI being cleaned
5105 **/
5106static int i40e_vsi_clear_rings(struct i40e_vsi *vsi)
5107{
5108 if (vsi) {
5109 kfree(vsi->rx_rings);
5110 kfree(vsi->tx_rings);
5111 }
5112
5113 return 0;
5114}
5115
5116/**
5117 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
5118 * @pf: board private structure
5119 * @vectors: the number of MSI-X vectors to request
5120 *
5121 * Returns the number of vectors reserved, or error
5122 **/
5123static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
5124{
5125 int err = 0;
5126
5127 pf->num_msix_entries = 0;
5128 while (vectors >= I40E_MIN_MSIX) {
5129 err = pci_enable_msix(pf->pdev, pf->msix_entries, vectors);
5130 if (err == 0) {
5131 /* good to go */
5132 pf->num_msix_entries = vectors;
5133 break;
5134 } else if (err < 0) {
5135 /* total failure */
5136 dev_info(&pf->pdev->dev,
5137 "MSI-X vector reservation failed: %d\n", err);
5138 vectors = 0;
5139 break;
5140 } else {
5141 /* err > 0 is the hint for retry */
5142 dev_info(&pf->pdev->dev,
5143 "MSI-X vectors wanted %d, retrying with %d\n",
5144 vectors, err);
5145 vectors = err;
5146 }
5147 }
5148
5149 if (vectors > 0 && vectors < I40E_MIN_MSIX) {
5150 dev_info(&pf->pdev->dev,
5151 "Couldn't get enough vectors, only %d available\n",
5152 vectors);
5153 vectors = 0;
5154 }
5155
5156 return vectors;
5157}
5158
5159/**
5160 * i40e_init_msix - Setup the MSIX capability
5161 * @pf: board private structure
5162 *
5163 * Work with the OS to set up the MSIX vectors needed.
5164 *
5165 * Returns 0 on success, negative on failure
5166 **/
5167static int i40e_init_msix(struct i40e_pf *pf)
5168{
5169 i40e_status err = 0;
5170 struct i40e_hw *hw = &pf->hw;
5171 int v_budget, i;
5172 int vec;
5173
5174 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
5175 return -ENODEV;
5176
5177 /* The number of vectors we'll request will be comprised of:
5178 * - Add 1 for "other" cause for Admin Queue events, etc.
5179 * - The number of LAN queue pairs
5180 * already adjusted for the NUMA node
5181 * assumes symmetric Tx/Rx pairing
5182 * - The number of VMDq pairs
5183 * Once we count this up, try the request.
5184 *
5185 * If we can't get what we want, we'll simplify to nearly nothing
5186 * and try again. If that still fails, we punt.
5187 */
5188 pf->num_lan_msix = pf->num_lan_qps;
5189 pf->num_vmdq_msix = pf->num_vmdq_qps;
5190 v_budget = 1 + pf->num_lan_msix;
5191 v_budget += (pf->num_vmdq_vsis * pf->num_vmdq_msix);
5192 if (pf->flags & I40E_FLAG_FDIR_ENABLED)
5193 v_budget++;
5194
5195 /* Scale down if necessary, and the rings will share vectors */
5196 v_budget = min_t(int, v_budget, hw->func_caps.num_msix_vectors);
5197
5198 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
5199 GFP_KERNEL);
5200 if (!pf->msix_entries)
5201 return -ENOMEM;
5202
5203 for (i = 0; i < v_budget; i++)
5204 pf->msix_entries[i].entry = i;
5205 vec = i40e_reserve_msix_vectors(pf, v_budget);
5206 if (vec < I40E_MIN_MSIX) {
5207 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
5208 kfree(pf->msix_entries);
5209 pf->msix_entries = NULL;
5210 return -ENODEV;
5211
5212 } else if (vec == I40E_MIN_MSIX) {
5213 /* Adjust for minimal MSIX use */
5214 dev_info(&pf->pdev->dev, "Features disabled, not enough MSIX vectors\n");
5215 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
5216 pf->num_vmdq_vsis = 0;
5217 pf->num_vmdq_qps = 0;
5218 pf->num_vmdq_msix = 0;
5219 pf->num_lan_qps = 1;
5220 pf->num_lan_msix = 1;
5221
5222 } else if (vec != v_budget) {
5223 /* Scale vector usage down */
5224 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
5225 vec--; /* reserve the misc vector */
5226
5227 /* partition out the remaining vectors */
5228 switch (vec) {
5229 case 2:
5230 pf->num_vmdq_vsis = 1;
5231 pf->num_lan_msix = 1;
5232 break;
5233 case 3:
5234 pf->num_vmdq_vsis = 1;
5235 pf->num_lan_msix = 2;
5236 break;
5237 default:
5238 pf->num_lan_msix = min_t(int, (vec / 2),
5239 pf->num_lan_qps);
5240 pf->num_vmdq_vsis = min_t(int, (vec - pf->num_lan_msix),
5241 I40E_DEFAULT_NUM_VMDQ_VSI);
5242 break;
5243 }
5244 }
5245
5246 return err;
5247}
5248
5249/**
5250 * i40e_alloc_q_vectors - Allocate memory for interrupt vectors
5251 * @vsi: the VSI being configured
5252 *
5253 * We allocate one q_vector per queue interrupt. If allocation fails we
5254 * return -ENOMEM.
5255 **/
5256static int i40e_alloc_q_vectors(struct i40e_vsi *vsi)
5257{
5258 struct i40e_pf *pf = vsi->back;
5259 int v_idx, num_q_vectors;
5260
5261 /* if not MSIX, give the one vector only to the LAN VSI */
5262 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
5263 num_q_vectors = vsi->num_q_vectors;
5264 else if (vsi == pf->vsi[pf->lan_vsi])
5265 num_q_vectors = 1;
5266 else
5267 return -EINVAL;
5268
5269 vsi->q_vectors = kcalloc(num_q_vectors,
5270 sizeof(struct i40e_q_vector),
5271 GFP_KERNEL);
5272 if (!vsi->q_vectors)
5273 return -ENOMEM;
5274
5275 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
5276 vsi->q_vectors[v_idx].vsi = vsi;
5277 vsi->q_vectors[v_idx].v_idx = v_idx;
5278 cpumask_set_cpu(v_idx, &vsi->q_vectors[v_idx].affinity_mask);
5279 if (vsi->netdev)
5280 netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx].napi,
5281 i40e_napi_poll, vsi->work_limit);
5282 }
5283
5284 return 0;
5285}
5286
5287/**
5288 * i40e_init_interrupt_scheme - Determine proper interrupt scheme
5289 * @pf: board private structure to initialize
5290 **/
5291static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
5292{
5293 int err = 0;
5294
5295 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
5296 err = i40e_init_msix(pf);
5297 if (err) {
5298 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
5299 I40E_FLAG_MQ_ENABLED |
5300 I40E_FLAG_DCB_ENABLED |
5301 I40E_FLAG_SRIOV_ENABLED |
5302 I40E_FLAG_FDIR_ENABLED |
5303 I40E_FLAG_FDIR_ATR_ENABLED |
5304 I40E_FLAG_VMDQ_ENABLED);
5305
5306 /* rework the queue expectations without MSIX */
5307 i40e_determine_queue_usage(pf);
5308 }
5309 }
5310
5311 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
5312 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
5313 err = pci_enable_msi(pf->pdev);
5314 if (err) {
5315 dev_info(&pf->pdev->dev,
5316 "MSI init failed (%d), trying legacy.\n", err);
5317 pf->flags &= ~I40E_FLAG_MSI_ENABLED;
5318 }
5319 }
5320
5321 /* track first vector for misc interrupts */
5322 err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1);
5323}
5324
5325/**
5326 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
5327 * @pf: board private structure
5328 *
5329 * This sets up the handler for MSIX 0, which is used to manage the
5330 * non-queue interrupts, e.g. AdminQ and errors. This is not used
5331 * when in MSI or Legacy interrupt mode.
5332 **/
5333static int i40e_setup_misc_vector(struct i40e_pf *pf)
5334{
5335 struct i40e_hw *hw = &pf->hw;
5336 int err = 0;
5337
5338 /* Only request the irq if this is the first time through, and
5339 * not when we're rebuilding after a Reset
5340 */
5341 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
5342 err = request_irq(pf->msix_entries[0].vector,
5343 i40e_intr, 0, pf->misc_int_name, pf);
5344 if (err) {
5345 dev_info(&pf->pdev->dev,
5346 "request_irq for msix_misc failed: %d\n", err);
5347 return -EFAULT;
5348 }
5349 }
5350
5351 i40e_enable_misc_int_causes(hw);
5352
5353 /* associate no queues to the misc vector */
5354 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
5355 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
5356
5357 i40e_flush(hw);
5358
5359 i40e_irq_dynamic_enable_icr0(pf);
5360
5361 return err;
5362}
5363
5364/**
5365 * i40e_config_rss - Prepare for RSS if used
5366 * @pf: board private structure
5367 **/
5368static int i40e_config_rss(struct i40e_pf *pf)
5369{
5370 struct i40e_hw *hw = &pf->hw;
5371 u32 lut = 0;
5372 int i, j;
5373 u64 hena;
5374 /* Set of random keys generated using kernel random number generator */
5375 static const u32 seed[I40E_PFQF_HKEY_MAX_INDEX + 1] = {0x41b01687,
5376 0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377,
5377 0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d,
5378 0xcd15a2c1, 0xe8580225, 0x4a1e9d11, 0xfe5731be};
5379
5380 /* Fill out hash function seed */
5381 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
5382 wr32(hw, I40E_PFQF_HKEY(i), seed[i]);
5383
5384 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
5385 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
5386 ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
5387 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
5388 ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
5389 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
5390 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
5391 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
5392 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
5393 ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
5394 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
5395 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4)|
5396 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
5397 wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
5398 wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
5399
5400 /* Populate the LUT with max no. of queues in round robin fashion */
5401 for (i = 0, j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
5402
5403 /* The assumption is that lan qp count will be the highest
5404 * qp count for any PF VSI that needs RSS.
5405 * If multiple VSIs need RSS support, all the qp counts
5406 * for those VSIs should be a power of 2 for RSS to work.
5407 * If LAN VSI is the only consumer for RSS then this requirement
5408 * is not necessary.
5409 */
5410 if (j == pf->rss_size)
5411 j = 0;
5412 /* lut = 4-byte sliding window of 4 lut entries */
5413 lut = (lut << 8) | (j &
5414 ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
5415 /* On i = 3, we have 4 entries in lut; write to the register */
5416 if ((i & 3) == 3)
5417 wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
5418 }
5419 i40e_flush(hw);
5420
5421 return 0;
5422}
5423
5424/**
5425 * i40e_sw_init - Initialize general software structures (struct i40e_pf)
5426 * @pf: board private structure to initialize
5427 *
5428 * i40e_sw_init initializes the Adapter private data structure.
5429 * Fields are initialized based on PCI device information and
5430 * OS network device settings (MTU size).
5431 **/
5432static int i40e_sw_init(struct i40e_pf *pf)
5433{
5434 int err = 0;
5435 int size;
5436
5437 pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
5438 (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
5439 if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
5440 if (I40E_DEBUG_USER & debug)
5441 pf->hw.debug_mask = debug;
5442 pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER),
5443 I40E_DEFAULT_MSG_ENABLE);
5444 }
5445
5446 /* Set default capability flags */
5447 pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
5448 I40E_FLAG_MSI_ENABLED |
5449 I40E_FLAG_MSIX_ENABLED |
5450 I40E_FLAG_RX_PS_ENABLED |
5451 I40E_FLAG_MQ_ENABLED |
5452 I40E_FLAG_RX_1BUF_ENABLED;
5453
5454 pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width;
5455 if (pf->hw.func_caps.rss) {
5456 pf->flags |= I40E_FLAG_RSS_ENABLED;
5457 pf->rss_size = min_t(int, pf->rss_size_max,
5458 nr_cpus_node(numa_node_id()));
5459 } else {
5460 pf->rss_size = 1;
5461 }
5462
5463 if (pf->hw.func_caps.dcb)
5464 pf->num_tc_qps = I40E_DEFAULT_QUEUES_PER_TC;
5465 else
5466 pf->num_tc_qps = 0;
5467
5468 if (pf->hw.func_caps.fd) {
5469 /* FW/NVM is not yet fixed in this regard */
5470 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
5471 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
5472 pf->flags |= I40E_FLAG_FDIR_ATR_ENABLED;
5473 dev_info(&pf->pdev->dev,
5474 "Flow Director ATR mode Enabled\n");
5475 pf->flags |= I40E_FLAG_FDIR_ENABLED;
5476 dev_info(&pf->pdev->dev,
5477 "Flow Director Side Band mode Enabled\n");
5478 pf->fdir_pf_filter_count =
5479 pf->hw.func_caps.fd_filters_guaranteed;
5480 }
5481 } else {
5482 pf->fdir_pf_filter_count = 0;
5483 }
5484
5485 if (pf->hw.func_caps.vmdq) {
5486 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
5487 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
5488 pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ;
5489 }
5490
5491 /* MFP mode enabled */
5492 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) {
5493 pf->flags |= I40E_FLAG_MFP_ENABLED;
5494 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
5495 }
5496
5497#ifdef CONFIG_PCI_IOV
5498 if (pf->hw.func_caps.num_vfs) {
5499 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
5500 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
5501 pf->num_req_vfs = min_t(int,
5502 pf->hw.func_caps.num_vfs,
5503 I40E_MAX_VF_COUNT);
5504 }
5505#endif /* CONFIG_PCI_IOV */
5506 pf->eeprom_version = 0xDEAD;
5507 pf->lan_veb = I40E_NO_VEB;
5508 pf->lan_vsi = I40E_NO_VSI;
5509
5510 /* set up queue assignment tracking */
5511 size = sizeof(struct i40e_lump_tracking)
5512 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
5513 pf->qp_pile = kzalloc(size, GFP_KERNEL);
5514 if (!pf->qp_pile) {
5515 err = -ENOMEM;
5516 goto sw_init_done;
5517 }
5518 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
5519 pf->qp_pile->search_hint = 0;
5520
5521 /* set up vector assignment tracking */
5522 size = sizeof(struct i40e_lump_tracking)
5523 + (sizeof(u16) * pf->hw.func_caps.num_msix_vectors);
5524 pf->irq_pile = kzalloc(size, GFP_KERNEL);
5525 if (!pf->irq_pile) {
5526 kfree(pf->qp_pile);
5527 err = -ENOMEM;
5528 goto sw_init_done;
5529 }
5530 pf->irq_pile->num_entries = pf->hw.func_caps.num_msix_vectors;
5531 pf->irq_pile->search_hint = 0;
5532
5533 mutex_init(&pf->switch_mutex);
5534
5535sw_init_done:
5536 return err;
5537}
5538
5539/**
5540 * i40e_set_features - set the netdev feature flags
5541 * @netdev: ptr to the netdev being adjusted
5542 * @features: the feature set that the stack is suggesting
5543 **/
5544static int i40e_set_features(struct net_device *netdev,
5545 netdev_features_t features)
5546{
5547 struct i40e_netdev_priv *np = netdev_priv(netdev);
5548 struct i40e_vsi *vsi = np->vsi;
5549
5550 if (features & NETIF_F_HW_VLAN_CTAG_RX)
5551 i40e_vlan_stripping_enable(vsi);
5552 else
5553 i40e_vlan_stripping_disable(vsi);
5554
5555 return 0;
5556}
5557
5558static const struct net_device_ops i40e_netdev_ops = {
5559 .ndo_open = i40e_open,
5560 .ndo_stop = i40e_close,
5561 .ndo_start_xmit = i40e_lan_xmit_frame,
5562 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
5563 .ndo_set_rx_mode = i40e_set_rx_mode,
5564 .ndo_validate_addr = eth_validate_addr,
5565 .ndo_set_mac_address = i40e_set_mac,
5566 .ndo_change_mtu = i40e_change_mtu,
5567 .ndo_tx_timeout = i40e_tx_timeout,
5568 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
5569 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
5570#ifdef CONFIG_NET_POLL_CONTROLLER
5571 .ndo_poll_controller = i40e_netpoll,
5572#endif
5573 .ndo_setup_tc = i40e_setup_tc,
5574 .ndo_set_features = i40e_set_features,
5575 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
5576 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
5577 .ndo_set_vf_tx_rate = i40e_ndo_set_vf_bw,
5578 .ndo_get_vf_config = i40e_ndo_get_vf_config,
5579};
5580
5581/**
5582 * i40e_config_netdev - Setup the netdev flags
5583 * @vsi: the VSI being configured
5584 *
5585 * Returns 0 on success, negative value on failure
5586 **/
5587static int i40e_config_netdev(struct i40e_vsi *vsi)
5588{
5589 struct i40e_pf *pf = vsi->back;
5590 struct i40e_hw *hw = &pf->hw;
5591 struct i40e_netdev_priv *np;
5592 struct net_device *netdev;
5593 u8 mac_addr[ETH_ALEN];
5594 int etherdev_size;
5595
5596 etherdev_size = sizeof(struct i40e_netdev_priv);
5597 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
5598 if (!netdev)
5599 return -ENOMEM;
5600
5601 vsi->netdev = netdev;
5602 np = netdev_priv(netdev);
5603 np->vsi = vsi;
5604
5605 netdev->hw_enc_features = NETIF_F_IP_CSUM |
5606 NETIF_F_GSO_UDP_TUNNEL |
5607 NETIF_F_TSO |
5608 NETIF_F_SG;
5609
5610 netdev->features = NETIF_F_SG |
5611 NETIF_F_IP_CSUM |
5612 NETIF_F_SCTP_CSUM |
5613 NETIF_F_HIGHDMA |
5614 NETIF_F_GSO_UDP_TUNNEL |
5615 NETIF_F_HW_VLAN_CTAG_TX |
5616 NETIF_F_HW_VLAN_CTAG_RX |
5617 NETIF_F_HW_VLAN_CTAG_FILTER |
5618 NETIF_F_IPV6_CSUM |
5619 NETIF_F_TSO |
5620 NETIF_F_TSO6 |
5621 NETIF_F_RXCSUM |
5622 NETIF_F_RXHASH |
5623 0;
5624
5625 /* copy netdev features into list of user selectable features */
5626 netdev->hw_features |= netdev->features;
5627
5628 if (vsi->type == I40E_VSI_MAIN) {
5629 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
5630 memcpy(mac_addr, hw->mac.perm_addr, ETH_ALEN);
5631 } else {
5632 /* relate the VSI_VMDQ name to the VSI_MAIN name */
5633 snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
5634 pf->vsi[pf->lan_vsi]->netdev->name);
5635 random_ether_addr(mac_addr);
5636 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);
5637 }
5638
5639 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
5640 memcpy(netdev->perm_addr, mac_addr, ETH_ALEN);
5641 /* vlan gets same features (except vlan offload)
5642 * after any tweaks for specific VSI types
5643 */
5644 netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX |
5645 NETIF_F_HW_VLAN_CTAG_RX |
5646 NETIF_F_HW_VLAN_CTAG_FILTER);
5647 netdev->priv_flags |= IFF_UNICAST_FLT;
5648 netdev->priv_flags |= IFF_SUPP_NOFCS;
5649 /* Setup netdev TC information */
5650 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
5651
5652 netdev->netdev_ops = &i40e_netdev_ops;
5653 netdev->watchdog_timeo = 5 * HZ;
5654 i40e_set_ethtool_ops(netdev);
5655
5656 return 0;
5657}
5658
5659/**
5660 * i40e_vsi_delete - Delete a VSI from the switch
5661 * @vsi: the VSI being removed
5662 *
5663 * Returns 0 on success, negative value on failure
5664 **/
5665static void i40e_vsi_delete(struct i40e_vsi *vsi)
5666{
5667 /* remove default VSI is not allowed */
5668 if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
5669 return;
5670
5671 /* there is no HW VSI for FDIR */
5672 if (vsi->type == I40E_VSI_FDIR)
5673 return;
5674
5675 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
5676 return;
5677}
5678
5679/**
5680 * i40e_add_vsi - Add a VSI to the switch
5681 * @vsi: the VSI being configured
5682 *
5683 * This initializes a VSI context depending on the VSI type to be added and
5684 * passes it down to the add_vsi aq command.
5685 **/
5686static int i40e_add_vsi(struct i40e_vsi *vsi)
5687{
5688 int ret = -ENODEV;
5689 struct i40e_mac_filter *f, *ftmp;
5690 struct i40e_pf *pf = vsi->back;
5691 struct i40e_hw *hw = &pf->hw;
5692 struct i40e_vsi_context ctxt;
5693 u8 enabled_tc = 0x1; /* TC0 enabled */
5694 int f_count = 0;
5695
5696 memset(&ctxt, 0, sizeof(ctxt));
5697 switch (vsi->type) {
5698 case I40E_VSI_MAIN:
5699 /* The PF's main VSI is already setup as part of the
5700 * device initialization, so we'll not bother with
5701 * the add_vsi call, but we will retrieve the current
5702 * VSI context.
5703 */
5704 ctxt.seid = pf->main_vsi_seid;
5705 ctxt.pf_num = pf->hw.pf_id;
5706 ctxt.vf_num = 0;
5707 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
5708 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5709 if (ret) {
5710 dev_info(&pf->pdev->dev,
5711 "couldn't get pf vsi config, err %d, aq_err %d\n",
5712 ret, pf->hw.aq.asq_last_status);
5713 return -ENOENT;
5714 }
5715 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
5716 vsi->info.valid_sections = 0;
5717
5718 vsi->seid = ctxt.seid;
5719 vsi->id = ctxt.vsi_number;
5720
5721 enabled_tc = i40e_pf_get_tc_map(pf);
5722
5723 /* MFP mode setup queue map and update VSI */
5724 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
5725 memset(&ctxt, 0, sizeof(ctxt));
5726 ctxt.seid = pf->main_vsi_seid;
5727 ctxt.pf_num = pf->hw.pf_id;
5728 ctxt.vf_num = 0;
5729 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
5730 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5731 if (ret) {
5732 dev_info(&pf->pdev->dev,
5733 "update vsi failed, aq_err=%d\n",
5734 pf->hw.aq.asq_last_status);
5735 ret = -ENOENT;
5736 goto err;
5737 }
5738 /* update the local VSI info queue map */
5739 i40e_vsi_update_queue_map(vsi, &ctxt);
5740 vsi->info.valid_sections = 0;
5741 } else {
5742 /* Default/Main VSI is only enabled for TC0
5743 * reconfigure it to enable all TCs that are
5744 * available on the port in SFP mode.
5745 */
5746 ret = i40e_vsi_config_tc(vsi, enabled_tc);
5747 if (ret) {
5748 dev_info(&pf->pdev->dev,
5749 "failed to configure TCs for main VSI tc_map 0x%08x, err %d, aq_err %d\n",
5750 enabled_tc, ret,
5751 pf->hw.aq.asq_last_status);
5752 ret = -ENOENT;
5753 }
5754 }
5755 break;
5756
5757 case I40E_VSI_FDIR:
5758 /* no queue mapping or actual HW VSI needed */
5759 vsi->info.valid_sections = 0;
5760 vsi->seid = 0;
5761 vsi->id = 0;
5762 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
5763 return 0;
5764 break;
5765
5766 case I40E_VSI_VMDQ2:
5767 ctxt.pf_num = hw->pf_id;
5768 ctxt.vf_num = 0;
5769 ctxt.uplink_seid = vsi->uplink_seid;
5770 ctxt.connection_type = 0x1; /* regular data port */
5771 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
5772
5773 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5774
5775 /* This VSI is connected to VEB so the switch_id
5776 * should be set to zero by default.
5777 */
5778 ctxt.info.switch_id = 0;
5779 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
5780 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5781
5782 /* Setup the VSI tx/rx queue map for TC0 only for now */
5783 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
5784 break;
5785
5786 case I40E_VSI_SRIOV:
5787 ctxt.pf_num = hw->pf_id;
5788 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
5789 ctxt.uplink_seid = vsi->uplink_seid;
5790 ctxt.connection_type = 0x1; /* regular data port */
5791 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
5792
5793 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5794
5795 /* This VSI is connected to VEB so the switch_id
5796 * should be set to zero by default.
5797 */
5798 ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5799
5800 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
5801 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5802 /* Setup the VSI tx/rx queue map for TC0 only for now */
5803 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
5804 break;
5805
5806 default:
5807 return -ENODEV;
5808 }
5809
5810 if (vsi->type != I40E_VSI_MAIN) {
5811 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
5812 if (ret) {
5813 dev_info(&vsi->back->pdev->dev,
5814 "add vsi failed, aq_err=%d\n",
5815 vsi->back->hw.aq.asq_last_status);
5816 ret = -ENOENT;
5817 goto err;
5818 }
5819 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
5820 vsi->info.valid_sections = 0;
5821 vsi->seid = ctxt.seid;
5822 vsi->id = ctxt.vsi_number;
5823 }
5824
5825 /* If macvlan filters already exist, force them to get loaded */
5826 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
5827 f->changed = true;
5828 f_count++;
5829 }
5830 if (f_count) {
5831 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
5832 pf->flags |= I40E_FLAG_FILTER_SYNC;
5833 }
5834
5835 /* Update VSI BW information */
5836 ret = i40e_vsi_get_bw_info(vsi);
5837 if (ret) {
5838 dev_info(&pf->pdev->dev,
5839 "couldn't get vsi bw info, err %d, aq_err %d\n",
5840 ret, pf->hw.aq.asq_last_status);
5841 /* VSI is already added so not tearing that up */
5842 ret = 0;
5843 }
5844
5845err:
5846 return ret;
5847}
5848
5849/**
5850 * i40e_vsi_release - Delete a VSI and free its resources
5851 * @vsi: the VSI being removed
5852 *
5853 * Returns 0 on success or < 0 on error
5854 **/
5855int i40e_vsi_release(struct i40e_vsi *vsi)
5856{
5857 struct i40e_mac_filter *f, *ftmp;
5858 struct i40e_veb *veb = NULL;
5859 struct i40e_pf *pf;
5860 u16 uplink_seid;
5861 int i, n;
5862
5863 pf = vsi->back;
5864
5865 /* release of a VEB-owner or last VSI is not allowed */
5866 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
5867 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
5868 vsi->seid, vsi->uplink_seid);
5869 return -ENODEV;
5870 }
5871 if (vsi == pf->vsi[pf->lan_vsi] &&
5872 !test_bit(__I40E_DOWN, &pf->state)) {
5873 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
5874 return -ENODEV;
5875 }
5876
5877 uplink_seid = vsi->uplink_seid;
5878 if (vsi->type != I40E_VSI_SRIOV) {
5879 if (vsi->netdev_registered) {
5880 vsi->netdev_registered = false;
5881 if (vsi->netdev) {
5882 /* results in a call to i40e_close() */
5883 unregister_netdev(vsi->netdev);
5884 free_netdev(vsi->netdev);
5885 vsi->netdev = NULL;
5886 }
5887 } else {
5888 if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
5889 i40e_down(vsi);
5890 i40e_vsi_free_irq(vsi);
5891 i40e_vsi_free_tx_resources(vsi);
5892 i40e_vsi_free_rx_resources(vsi);
5893 }
5894 i40e_vsi_disable_irq(vsi);
5895 }
5896
5897 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
5898 i40e_del_filter(vsi, f->macaddr, f->vlan,
5899 f->is_vf, f->is_netdev);
5900 i40e_sync_vsi_filters(vsi);
5901
5902 i40e_vsi_delete(vsi);
5903 i40e_vsi_free_q_vectors(vsi);
5904 i40e_vsi_clear_rings(vsi);
5905 i40e_vsi_clear(vsi);
5906
5907 /* If this was the last thing on the VEB, except for the
5908 * controlling VSI, remove the VEB, which puts the controlling
5909 * VSI onto the next level down in the switch.
5910 *
5911 * Well, okay, there's one more exception here: don't remove
5912 * the orphan VEBs yet. We'll wait for an explicit remove request
5913 * from up the network stack.
5914 */
5915 for (n = 0, i = 0; i < pf->hw.func_caps.num_vsis; i++) {
5916 if (pf->vsi[i] &&
5917 pf->vsi[i]->uplink_seid == uplink_seid &&
5918 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
5919 n++; /* count the VSIs */
5920 }
5921 }
5922 for (i = 0; i < I40E_MAX_VEB; i++) {
5923 if (!pf->veb[i])
5924 continue;
5925 if (pf->veb[i]->uplink_seid == uplink_seid)
5926 n++; /* count the VEBs */
5927 if (pf->veb[i]->seid == uplink_seid)
5928 veb = pf->veb[i];
5929 }
5930 if (n == 0 && veb && veb->uplink_seid != 0)
5931 i40e_veb_release(veb);
5932
5933 return 0;
5934}
5935
5936/**
5937 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
5938 * @vsi: ptr to the VSI
5939 *
5940 * This should only be called after i40e_vsi_mem_alloc() which allocates the
5941 * corresponding SW VSI structure and initializes num_queue_pairs for the
5942 * newly allocated VSI.
5943 *
5944 * Returns 0 on success or negative on failure
5945 **/
5946static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
5947{
5948 int ret = -ENOENT;
5949 struct i40e_pf *pf = vsi->back;
5950
5951 if (vsi->q_vectors) {
5952 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
5953 vsi->seid);
5954 return -EEXIST;
5955 }
5956
5957 if (vsi->base_vector) {
5958 dev_info(&pf->pdev->dev,
5959 "VSI %d has non-zero base vector %d\n",
5960 vsi->seid, vsi->base_vector);
5961 return -EEXIST;
5962 }
5963
5964 ret = i40e_alloc_q_vectors(vsi);
5965 if (ret) {
5966 dev_info(&pf->pdev->dev,
5967 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
5968 vsi->num_q_vectors, vsi->seid, ret);
5969 vsi->num_q_vectors = 0;
5970 goto vector_setup_out;
5971 }
5972
5973 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
5974 vsi->num_q_vectors, vsi->idx);
5975 if (vsi->base_vector < 0) {
5976 dev_info(&pf->pdev->dev,
5977 "failed to get q tracking for VSI %d, err=%d\n",
5978 vsi->seid, vsi->base_vector);
5979 i40e_vsi_free_q_vectors(vsi);
5980 ret = -ENOENT;
5981 goto vector_setup_out;
5982 }
5983
5984vector_setup_out:
5985 return ret;
5986}
5987
5988/**
5989 * i40e_vsi_setup - Set up a VSI by a given type
5990 * @pf: board private structure
5991 * @type: VSI type
5992 * @uplink_seid: the switch element to link to
5993 * @param1: usage depends upon VSI type. For VF types, indicates VF id
5994 *
5995 * This allocates the sw VSI structure and its queue resources, then add a VSI
5996 * to the identified VEB.
5997 *
5998 * Returns pointer to the successfully allocated and configure VSI sw struct on
5999 * success, otherwise returns NULL on failure.
6000 **/
6001struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
6002 u16 uplink_seid, u32 param1)
6003{
6004 struct i40e_vsi *vsi = NULL;
6005 struct i40e_veb *veb = NULL;
6006 int ret, i;
6007 int v_idx;
6008
6009 /* The requested uplink_seid must be either
6010 * - the PF's port seid
6011 * no VEB is needed because this is the PF
6012 * or this is a Flow Director special case VSI
6013 * - seid of an existing VEB
6014 * - seid of a VSI that owns an existing VEB
6015 * - seid of a VSI that doesn't own a VEB
6016 * a new VEB is created and the VSI becomes the owner
6017 * - seid of the PF VSI, which is what creates the first VEB
6018 * this is a special case of the previous
6019 *
6020 * Find which uplink_seid we were given and create a new VEB if needed
6021 */
6022 for (i = 0; i < I40E_MAX_VEB; i++) {
6023 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
6024 veb = pf->veb[i];
6025 break;
6026 }
6027 }
6028
6029 if (!veb && uplink_seid != pf->mac_seid) {
6030
6031 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
6032 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
6033 vsi = pf->vsi[i];
6034 break;
6035 }
6036 }
6037 if (!vsi) {
6038 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
6039 uplink_seid);
6040 return NULL;
6041 }
6042
6043 if (vsi->uplink_seid == pf->mac_seid)
6044 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
6045 vsi->tc_config.enabled_tc);
6046 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
6047 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
6048 vsi->tc_config.enabled_tc);
6049
6050 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
6051 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
6052 veb = pf->veb[i];
6053 }
6054 if (!veb) {
6055 dev_info(&pf->pdev->dev, "couldn't add VEB\n");
6056 return NULL;
6057 }
6058
6059 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
6060 uplink_seid = veb->seid;
6061 }
6062
6063 /* get vsi sw struct */
6064 v_idx = i40e_vsi_mem_alloc(pf, type);
6065 if (v_idx < 0)
6066 goto err_alloc;
6067 vsi = pf->vsi[v_idx];
6068 vsi->type = type;
6069 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
6070
6071 if (type == I40E_VSI_MAIN)
6072 pf->lan_vsi = v_idx;
6073 else if (type == I40E_VSI_SRIOV)
6074 vsi->vf_id = param1;
6075 /* assign it some queues */
6076 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
6077 if (ret < 0) {
6078 dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n",
6079 vsi->seid, ret);
6080 goto err_vsi;
6081 }
6082 vsi->base_queue = ret;
6083
6084 /* get a VSI from the hardware */
6085 vsi->uplink_seid = uplink_seid;
6086 ret = i40e_add_vsi(vsi);
6087 if (ret)
6088 goto err_vsi;
6089
6090 switch (vsi->type) {
6091 /* setup the netdev if needed */
6092 case I40E_VSI_MAIN:
6093 case I40E_VSI_VMDQ2:
6094 ret = i40e_config_netdev(vsi);
6095 if (ret)
6096 goto err_netdev;
6097 ret = register_netdev(vsi->netdev);
6098 if (ret)
6099 goto err_netdev;
6100 vsi->netdev_registered = true;
6101 netif_carrier_off(vsi->netdev);
6102 /* fall through */
6103
6104 case I40E_VSI_FDIR:
6105 /* set up vectors and rings if needed */
6106 ret = i40e_vsi_setup_vectors(vsi);
6107 if (ret)
6108 goto err_msix;
6109
6110 ret = i40e_alloc_rings(vsi);
6111 if (ret)
6112 goto err_rings;
6113
6114 /* map all of the rings to the q_vectors */
6115 i40e_vsi_map_rings_to_vectors(vsi);
6116
6117 i40e_vsi_reset_stats(vsi);
6118 break;
6119
6120 default:
6121 /* no netdev or rings for the other VSI types */
6122 break;
6123 }
6124
6125 return vsi;
6126
6127err_rings:
6128 i40e_vsi_free_q_vectors(vsi);
6129err_msix:
6130 if (vsi->netdev_registered) {
6131 vsi->netdev_registered = false;
6132 unregister_netdev(vsi->netdev);
6133 free_netdev(vsi->netdev);
6134 vsi->netdev = NULL;
6135 }
6136err_netdev:
6137 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
6138err_vsi:
6139 i40e_vsi_clear(vsi);
6140err_alloc:
6141 return NULL;
6142}
6143
6144/**
6145 * i40e_veb_get_bw_info - Query VEB BW information
6146 * @veb: the veb to query
6147 *
6148 * Query the Tx scheduler BW configuration data for given VEB
6149 **/
6150static int i40e_veb_get_bw_info(struct i40e_veb *veb)
6151{
6152 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
6153 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
6154 struct i40e_pf *pf = veb->pf;
6155 struct i40e_hw *hw = &pf->hw;
6156 u32 tc_bw_max;
6157 int ret = 0;
6158 int i;
6159
6160 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
6161 &bw_data, NULL);
6162 if (ret) {
6163 dev_info(&pf->pdev->dev,
6164 "query veb bw config failed, aq_err=%d\n",
6165 hw->aq.asq_last_status);
6166 goto out;
6167 }
6168
6169 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
6170 &ets_data, NULL);
6171 if (ret) {
6172 dev_info(&pf->pdev->dev,
6173 "query veb bw ets config failed, aq_err=%d\n",
6174 hw->aq.asq_last_status);
6175 goto out;
6176 }
6177
6178 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
6179 veb->bw_max_quanta = ets_data.tc_bw_max;
6180 veb->is_abs_credits = bw_data.absolute_credits_enable;
6181 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
6182 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
6183 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6184 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
6185 veb->bw_tc_limit_credits[i] =
6186 le16_to_cpu(bw_data.tc_bw_limits[i]);
6187 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
6188 }
6189
6190out:
6191 return ret;
6192}
6193
6194/**
6195 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
6196 * @pf: board private structure
6197 *
6198 * On error: returns error code (negative)
6199 * On success: returns vsi index in PF (positive)
6200 **/
6201static int i40e_veb_mem_alloc(struct i40e_pf *pf)
6202{
6203 int ret = -ENOENT;
6204 struct i40e_veb *veb;
6205 int i;
6206
6207 /* Need to protect the allocation of switch elements at the PF level */
6208 mutex_lock(&pf->switch_mutex);
6209
6210 /* VEB list may be fragmented if VEB creation/destruction has
6211 * been happening. We can afford to do a quick scan to look
6212 * for any free slots in the list.
6213 *
6214 * find next empty veb slot, looping back around if necessary
6215 */
6216 i = 0;
6217 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
6218 i++;
6219 if (i >= I40E_MAX_VEB) {
6220 ret = -ENOMEM;
6221 goto err_alloc_veb; /* out of VEB slots! */
6222 }
6223
6224 veb = kzalloc(sizeof(*veb), GFP_KERNEL);
6225 if (!veb) {
6226 ret = -ENOMEM;
6227 goto err_alloc_veb;
6228 }
6229 veb->pf = pf;
6230 veb->idx = i;
6231 veb->enabled_tc = 1;
6232
6233 pf->veb[i] = veb;
6234 ret = i;
6235err_alloc_veb:
6236 mutex_unlock(&pf->switch_mutex);
6237 return ret;
6238}
6239
6240/**
6241 * i40e_switch_branch_release - Delete a branch of the switch tree
6242 * @branch: where to start deleting
6243 *
6244 * This uses recursion to find the tips of the branch to be
6245 * removed, deleting until we get back to and can delete this VEB.
6246 **/
6247static void i40e_switch_branch_release(struct i40e_veb *branch)
6248{
6249 struct i40e_pf *pf = branch->pf;
6250 u16 branch_seid = branch->seid;
6251 u16 veb_idx = branch->idx;
6252 int i;
6253
6254 /* release any VEBs on this VEB - RECURSION */
6255 for (i = 0; i < I40E_MAX_VEB; i++) {
6256 if (!pf->veb[i])
6257 continue;
6258 if (pf->veb[i]->uplink_seid == branch->seid)
6259 i40e_switch_branch_release(pf->veb[i]);
6260 }
6261
6262 /* Release the VSIs on this VEB, but not the owner VSI.
6263 *
6264 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
6265 * the VEB itself, so don't use (*branch) after this loop.
6266 */
6267 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
6268 if (!pf->vsi[i])
6269 continue;
6270 if (pf->vsi[i]->uplink_seid == branch_seid &&
6271 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
6272 i40e_vsi_release(pf->vsi[i]);
6273 }
6274 }
6275
6276 /* There's one corner case where the VEB might not have been
6277 * removed, so double check it here and remove it if needed.
6278 * This case happens if the veb was created from the debugfs
6279 * commands and no VSIs were added to it.
6280 */
6281 if (pf->veb[veb_idx])
6282 i40e_veb_release(pf->veb[veb_idx]);
6283}
6284
6285/**
6286 * i40e_veb_clear - remove veb struct
6287 * @veb: the veb to remove
6288 **/
6289static void i40e_veb_clear(struct i40e_veb *veb)
6290{
6291 if (!veb)
6292 return;
6293
6294 if (veb->pf) {
6295 struct i40e_pf *pf = veb->pf;
6296
6297 mutex_lock(&pf->switch_mutex);
6298 if (pf->veb[veb->idx] == veb)
6299 pf->veb[veb->idx] = NULL;
6300 mutex_unlock(&pf->switch_mutex);
6301 }
6302
6303 kfree(veb);
6304}
6305
6306/**
6307 * i40e_veb_release - Delete a VEB and free its resources
6308 * @veb: the VEB being removed
6309 **/
6310void i40e_veb_release(struct i40e_veb *veb)
6311{
6312 struct i40e_vsi *vsi = NULL;
6313 struct i40e_pf *pf;
6314 int i, n = 0;
6315
6316 pf = veb->pf;
6317
6318 /* find the remaining VSI and check for extras */
6319 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
6320 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
6321 n++;
6322 vsi = pf->vsi[i];
6323 }
6324 }
6325 if (n != 1) {
6326 dev_info(&pf->pdev->dev,
6327 "can't remove VEB %d with %d VSIs left\n",
6328 veb->seid, n);
6329 return;
6330 }
6331
6332 /* move the remaining VSI to uplink veb */
6333 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
6334 if (veb->uplink_seid) {
6335 vsi->uplink_seid = veb->uplink_seid;
6336 if (veb->uplink_seid == pf->mac_seid)
6337 vsi->veb_idx = I40E_NO_VEB;
6338 else
6339 vsi->veb_idx = veb->veb_idx;
6340 } else {
6341 /* floating VEB */
6342 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6343 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
6344 }
6345
6346 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
6347 i40e_veb_clear(veb);
6348
6349 return;
6350}
6351
6352/**
6353 * i40e_add_veb - create the VEB in the switch
6354 * @veb: the VEB to be instantiated
6355 * @vsi: the controlling VSI
6356 **/
6357static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
6358{
6359 bool is_default = (vsi->idx == vsi->back->lan_vsi);
6360 int ret;
6361
6362 /* get a VEB from the hardware */
6363 ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid,
6364 veb->enabled_tc, is_default, &veb->seid, NULL);
6365 if (ret) {
6366 dev_info(&veb->pf->pdev->dev,
6367 "couldn't add VEB, err %d, aq_err %d\n",
6368 ret, veb->pf->hw.aq.asq_last_status);
6369 return -EPERM;
6370 }
6371
6372 /* get statistics counter */
6373 ret = i40e_aq_get_veb_parameters(&veb->pf->hw, veb->seid, NULL, NULL,
6374 &veb->stats_idx, NULL, NULL, NULL);
6375 if (ret) {
6376 dev_info(&veb->pf->pdev->dev,
6377 "couldn't get VEB statistics idx, err %d, aq_err %d\n",
6378 ret, veb->pf->hw.aq.asq_last_status);
6379 return -EPERM;
6380 }
6381 ret = i40e_veb_get_bw_info(veb);
6382 if (ret) {
6383 dev_info(&veb->pf->pdev->dev,
6384 "couldn't get VEB bw info, err %d, aq_err %d\n",
6385 ret, veb->pf->hw.aq.asq_last_status);
6386 i40e_aq_delete_element(&veb->pf->hw, veb->seid, NULL);
6387 return -ENOENT;
6388 }
6389
6390 vsi->uplink_seid = veb->seid;
6391 vsi->veb_idx = veb->idx;
6392 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
6393
6394 return 0;
6395}
6396
6397/**
6398 * i40e_veb_setup - Set up a VEB
6399 * @pf: board private structure
6400 * @flags: VEB setup flags
6401 * @uplink_seid: the switch element to link to
6402 * @vsi_seid: the initial VSI seid
6403 * @enabled_tc: Enabled TC bit-map
6404 *
6405 * This allocates the sw VEB structure and links it into the switch
6406 * It is possible and legal for this to be a duplicate of an already
6407 * existing VEB. It is also possible for both uplink and vsi seids
6408 * to be zero, in order to create a floating VEB.
6409 *
6410 * Returns pointer to the successfully allocated VEB sw struct on
6411 * success, otherwise returns NULL on failure.
6412 **/
6413struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
6414 u16 uplink_seid, u16 vsi_seid,
6415 u8 enabled_tc)
6416{
6417 struct i40e_veb *veb, *uplink_veb = NULL;
6418 int vsi_idx, veb_idx;
6419 int ret;
6420
6421 /* if one seid is 0, the other must be 0 to create a floating relay */
6422 if ((uplink_seid == 0 || vsi_seid == 0) &&
6423 (uplink_seid + vsi_seid != 0)) {
6424 dev_info(&pf->pdev->dev,
6425 "one, not both seid's are 0: uplink=%d vsi=%d\n",
6426 uplink_seid, vsi_seid);
6427 return NULL;
6428 }
6429
6430 /* make sure there is such a vsi and uplink */
6431 for (vsi_idx = 0; vsi_idx < pf->hw.func_caps.num_vsis; vsi_idx++)
6432 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
6433 break;
6434 if (vsi_idx >= pf->hw.func_caps.num_vsis && vsi_seid != 0) {
6435 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
6436 vsi_seid);
6437 return NULL;
6438 }
6439
6440 if (uplink_seid && uplink_seid != pf->mac_seid) {
6441 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
6442 if (pf->veb[veb_idx] &&
6443 pf->veb[veb_idx]->seid == uplink_seid) {
6444 uplink_veb = pf->veb[veb_idx];
6445 break;
6446 }
6447 }
6448 if (!uplink_veb) {
6449 dev_info(&pf->pdev->dev,
6450 "uplink seid %d not found\n", uplink_seid);
6451 return NULL;
6452 }
6453 }
6454
6455 /* get veb sw struct */
6456 veb_idx = i40e_veb_mem_alloc(pf);
6457 if (veb_idx < 0)
6458 goto err_alloc;
6459 veb = pf->veb[veb_idx];
6460 veb->flags = flags;
6461 veb->uplink_seid = uplink_seid;
6462 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
6463 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
6464
6465 /* create the VEB in the switch */
6466 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
6467 if (ret)
6468 goto err_veb;
6469
6470 return veb;
6471
6472err_veb:
6473 i40e_veb_clear(veb);
6474err_alloc:
6475 return NULL;
6476}
6477
6478/**
6479 * i40e_setup_pf_switch_element - set pf vars based on switch type
6480 * @pf: board private structure
6481 * @ele: element we are building info from
6482 * @num_reported: total number of elements
6483 * @printconfig: should we print the contents
6484 *
6485 * helper function to assist in extracting a few useful SEID values.
6486 **/
6487static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
6488 struct i40e_aqc_switch_config_element_resp *ele,
6489 u16 num_reported, bool printconfig)
6490{
6491 u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
6492 u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
6493 u8 element_type = ele->element_type;
6494 u16 seid = le16_to_cpu(ele->seid);
6495
6496 if (printconfig)
6497 dev_info(&pf->pdev->dev,
6498 "type=%d seid=%d uplink=%d downlink=%d\n",
6499 element_type, seid, uplink_seid, downlink_seid);
6500
6501 switch (element_type) {
6502 case I40E_SWITCH_ELEMENT_TYPE_MAC:
6503 pf->mac_seid = seid;
6504 break;
6505 case I40E_SWITCH_ELEMENT_TYPE_VEB:
6506 /* Main VEB? */
6507 if (uplink_seid != pf->mac_seid)
6508 break;
6509 if (pf->lan_veb == I40E_NO_VEB) {
6510 int v;
6511
6512 /* find existing or else empty VEB */
6513 for (v = 0; v < I40E_MAX_VEB; v++) {
6514 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
6515 pf->lan_veb = v;
6516 break;
6517 }
6518 }
6519 if (pf->lan_veb == I40E_NO_VEB) {
6520 v = i40e_veb_mem_alloc(pf);
6521 if (v < 0)
6522 break;
6523 pf->lan_veb = v;
6524 }
6525 }
6526
6527 pf->veb[pf->lan_veb]->seid = seid;
6528 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
6529 pf->veb[pf->lan_veb]->pf = pf;
6530 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
6531 break;
6532 case I40E_SWITCH_ELEMENT_TYPE_VSI:
6533 if (num_reported != 1)
6534 break;
6535 /* This is immediately after a reset so we can assume this is
6536 * the PF's VSI
6537 */
6538 pf->mac_seid = uplink_seid;
6539 pf->pf_seid = downlink_seid;
6540 pf->main_vsi_seid = seid;
6541 if (printconfig)
6542 dev_info(&pf->pdev->dev,
6543 "pf_seid=%d main_vsi_seid=%d\n",
6544 pf->pf_seid, pf->main_vsi_seid);
6545 break;
6546 case I40E_SWITCH_ELEMENT_TYPE_PF:
6547 case I40E_SWITCH_ELEMENT_TYPE_VF:
6548 case I40E_SWITCH_ELEMENT_TYPE_EMP:
6549 case I40E_SWITCH_ELEMENT_TYPE_BMC:
6550 case I40E_SWITCH_ELEMENT_TYPE_PE:
6551 case I40E_SWITCH_ELEMENT_TYPE_PA:
6552 /* ignore these for now */
6553 break;
6554 default:
6555 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
6556 element_type, seid);
6557 break;
6558 }
6559}
6560
6561/**
6562 * i40e_fetch_switch_configuration - Get switch config from firmware
6563 * @pf: board private structure
6564 * @printconfig: should we print the contents
6565 *
6566 * Get the current switch configuration from the device and
6567 * extract a few useful SEID values.
6568 **/
6569int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
6570{
6571 struct i40e_aqc_get_switch_config_resp *sw_config;
6572 u16 next_seid = 0;
6573 int ret = 0;
6574 u8 *aq_buf;
6575 int i;
6576
6577 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
6578 if (!aq_buf)
6579 return -ENOMEM;
6580
6581 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
6582 do {
6583 u16 num_reported, num_total;
6584
6585 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
6586 I40E_AQ_LARGE_BUF,
6587 &next_seid, NULL);
6588 if (ret) {
6589 dev_info(&pf->pdev->dev,
6590 "get switch config failed %d aq_err=%x\n",
6591 ret, pf->hw.aq.asq_last_status);
6592 kfree(aq_buf);
6593 return -ENOENT;
6594 }
6595
6596 num_reported = le16_to_cpu(sw_config->header.num_reported);
6597 num_total = le16_to_cpu(sw_config->header.num_total);
6598
6599 if (printconfig)
6600 dev_info(&pf->pdev->dev,
6601 "header: %d reported %d total\n",
6602 num_reported, num_total);
6603
6604 if (num_reported) {
6605 int sz = sizeof(*sw_config) * num_reported;
6606
6607 kfree(pf->sw_config);
6608 pf->sw_config = kzalloc(sz, GFP_KERNEL);
6609 if (pf->sw_config)
6610 memcpy(pf->sw_config, sw_config, sz);
6611 }
6612
6613 for (i = 0; i < num_reported; i++) {
6614 struct i40e_aqc_switch_config_element_resp *ele =
6615 &sw_config->element[i];
6616
6617 i40e_setup_pf_switch_element(pf, ele, num_reported,
6618 printconfig);
6619 }
6620 } while (next_seid != 0);
6621
6622 kfree(aq_buf);
6623 return ret;
6624}
6625
6626/**
6627 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
6628 * @pf: board private structure
6629 *
6630 * Returns 0 on success, negative value on failure
6631 **/
6632static int i40e_setup_pf_switch(struct i40e_pf *pf)
6633{
6634 int ret;
6635
6636 /* find out what's out there already */
6637 ret = i40e_fetch_switch_configuration(pf, false);
6638 if (ret) {
6639 dev_info(&pf->pdev->dev,
6640 "couldn't fetch switch config, err %d, aq_err %d\n",
6641 ret, pf->hw.aq.asq_last_status);
6642 return ret;
6643 }
6644 i40e_pf_reset_stats(pf);
6645
6646 /* fdir VSI must happen first to be sure it gets queue 0, but only
6647 * if there is enough room for the fdir VSI
6648 */
6649 if (pf->num_lan_qps > 1)
6650 i40e_fdir_setup(pf);
6651
6652 /* first time setup */
6653 if (pf->lan_vsi == I40E_NO_VSI) {
6654 struct i40e_vsi *vsi = NULL;
6655 u16 uplink_seid;
6656
6657 /* Set up the PF VSI associated with the PF's main VSI
6658 * that is already in the HW switch
6659 */
6660 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
6661 uplink_seid = pf->veb[pf->lan_veb]->seid;
6662 else
6663 uplink_seid = pf->mac_seid;
6664
6665 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
6666 if (!vsi) {
6667 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
6668 i40e_fdir_teardown(pf);
6669 return -EAGAIN;
6670 }
6671 /* accommodate kcompat by copying the main VSI queue count
6672 * into the pf, since this newer code pushes the pf queue
6673 * info down a level into a VSI
6674 */
6675 pf->num_rx_queues = vsi->alloc_queue_pairs;
6676 pf->num_tx_queues = vsi->alloc_queue_pairs;
6677 } else {
6678 /* force a reset of TC and queue layout configurations */
6679 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
6680 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
6681 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
6682 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
6683 }
6684 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
6685
6686 /* Setup static PF queue filter control settings */
6687 ret = i40e_setup_pf_filter_control(pf);
6688 if (ret) {
6689 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
6690 ret);
6691 /* Failure here should not stop continuing other steps */
6692 }
6693
6694 /* enable RSS in the HW, even for only one queue, as the stack can use
6695 * the hash
6696 */
6697 if ((pf->flags & I40E_FLAG_RSS_ENABLED))
6698 i40e_config_rss(pf);
6699
6700 /* fill in link information and enable LSE reporting */
6701 i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
6702 i40e_link_event(pf);
6703
6704 /* Initialize user-specifics link properties */
6705 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
6706 I40E_AQ_AN_COMPLETED) ? true : false);
6707 pf->hw.fc.requested_mode = I40E_FC_DEFAULT;
6708 if (pf->hw.phy.link_info.an_info &
6709 (I40E_AQ_LINK_PAUSE_TX | I40E_AQ_LINK_PAUSE_RX))
6710 pf->hw.fc.current_mode = I40E_FC_FULL;
6711 else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
6712 pf->hw.fc.current_mode = I40E_FC_TX_PAUSE;
6713 else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
6714 pf->hw.fc.current_mode = I40E_FC_RX_PAUSE;
6715 else
6716 pf->hw.fc.current_mode = I40E_FC_DEFAULT;
6717
6718 return ret;
6719}
6720
6721/**
6722 * i40e_set_rss_size - helper to set rss_size
6723 * @pf: board private structure
6724 * @queues_left: how many queues
6725 */
6726static u16 i40e_set_rss_size(struct i40e_pf *pf, int queues_left)
6727{
6728 int num_tc0;
6729
6730 num_tc0 = min_t(int, queues_left, pf->rss_size_max);
6731 num_tc0 = min_t(int, num_tc0, nr_cpus_node(numa_node_id()));
6732 num_tc0 = rounddown_pow_of_two(num_tc0);
6733
6734 return num_tc0;
6735}
6736
6737/**
6738 * i40e_determine_queue_usage - Work out queue distribution
6739 * @pf: board private structure
6740 **/
6741static void i40e_determine_queue_usage(struct i40e_pf *pf)
6742{
6743 int accum_tc_size;
6744 int queues_left;
6745
6746 pf->num_lan_qps = 0;
6747 pf->num_tc_qps = rounddown_pow_of_two(pf->num_tc_qps);
6748 accum_tc_size = (I40E_MAX_TRAFFIC_CLASS - 1) * pf->num_tc_qps;
6749
6750 /* Find the max queues to be put into basic use. We'll always be
6751 * using TC0, whether or not DCB is running, and TC0 will get the
6752 * big RSS set.
6753 */
6754 queues_left = pf->hw.func_caps.num_tx_qp;
6755
6756 if (!((pf->flags & I40E_FLAG_MSIX_ENABLED) &&
6757 (pf->flags & I40E_FLAG_MQ_ENABLED)) ||
6758 !(pf->flags & (I40E_FLAG_RSS_ENABLED |
6759 I40E_FLAG_FDIR_ENABLED | I40E_FLAG_DCB_ENABLED)) ||
6760 (queues_left == 1)) {
6761
6762 /* one qp for PF, no queues for anything else */
6763 queues_left = 0;
6764 pf->rss_size = pf->num_lan_qps = 1;
6765
6766 /* make sure all the fancies are disabled */
6767 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
6768 I40E_FLAG_MQ_ENABLED |
6769 I40E_FLAG_FDIR_ENABLED |
6770 I40E_FLAG_FDIR_ATR_ENABLED |
6771 I40E_FLAG_DCB_ENABLED |
6772 I40E_FLAG_SRIOV_ENABLED |
6773 I40E_FLAG_VMDQ_ENABLED);
6774
6775 } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
6776 !(pf->flags & I40E_FLAG_FDIR_ENABLED) &&
6777 !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
6778
6779 pf->rss_size = i40e_set_rss_size(pf, queues_left);
6780
6781 queues_left -= pf->rss_size;
6782 pf->num_lan_qps = pf->rss_size;
6783
6784 } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
6785 !(pf->flags & I40E_FLAG_FDIR_ENABLED) &&
6786 (pf->flags & I40E_FLAG_DCB_ENABLED)) {
6787
6788 /* save num_tc_qps queues for TCs 1 thru 7 and the rest
6789 * are set up for RSS in TC0
6790 */
6791 queues_left -= accum_tc_size;
6792
6793 pf->rss_size = i40e_set_rss_size(pf, queues_left);
6794
6795 queues_left -= pf->rss_size;
6796 if (queues_left < 0) {
6797 dev_info(&pf->pdev->dev, "not enough queues for DCB\n");
6798 return;
6799 }
6800
6801 pf->num_lan_qps = pf->rss_size + accum_tc_size;
6802
6803 } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
6804 (pf->flags & I40E_FLAG_FDIR_ENABLED) &&
6805 !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
6806
6807 queues_left -= 1; /* save 1 queue for FD */
6808
6809 pf->rss_size = i40e_set_rss_size(pf, queues_left);
6810
6811 queues_left -= pf->rss_size;
6812 if (queues_left < 0) {
6813 dev_info(&pf->pdev->dev, "not enough queues for Flow Director\n");
6814 return;
6815 }
6816
6817 pf->num_lan_qps = pf->rss_size;
6818
6819 } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
6820 (pf->flags & I40E_FLAG_FDIR_ENABLED) &&
6821 (pf->flags & I40E_FLAG_DCB_ENABLED)) {
6822
6823 /* save 1 queue for TCs 1 thru 7,
6824 * 1 queue for flow director,
6825 * and the rest are set up for RSS in TC0
6826 */
6827 queues_left -= 1;
6828 queues_left -= accum_tc_size;
6829
6830 pf->rss_size = i40e_set_rss_size(pf, queues_left);
6831 queues_left -= pf->rss_size;
6832 if (queues_left < 0) {
6833 dev_info(&pf->pdev->dev, "not enough queues for DCB and Flow Director\n");
6834 return;
6835 }
6836
6837 pf->num_lan_qps = pf->rss_size + accum_tc_size;
6838
6839 } else {
6840 dev_info(&pf->pdev->dev,
6841 "Invalid configuration, flags=0x%08llx\n", pf->flags);
6842 return;
6843 }
6844
6845 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
6846 pf->num_vf_qps && pf->num_req_vfs && queues_left) {
6847 pf->num_req_vfs = min_t(int, pf->num_req_vfs, (queues_left /
6848 pf->num_vf_qps));
6849 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
6850 }
6851
6852 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
6853 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
6854 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
6855 (queues_left / pf->num_vmdq_qps));
6856 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
6857 }
6858
6859 return;
6860}
6861
6862/**
6863 * i40e_setup_pf_filter_control - Setup PF static filter control
6864 * @pf: PF to be setup
6865 *
6866 * i40e_setup_pf_filter_control sets up a pf's initial filter control
6867 * settings. If PE/FCoE are enabled then it will also set the per PF
6868 * based filter sizes required for them. It also enables Flow director,
6869 * ethertype and macvlan type filter settings for the pf.
6870 *
6871 * Returns 0 on success, negative on failure
6872 **/
6873static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
6874{
6875 struct i40e_filter_control_settings *settings = &pf->filter_settings;
6876
6877 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
6878
6879 /* Flow Director is enabled */
6880 if (pf->flags & (I40E_FLAG_FDIR_ENABLED | I40E_FLAG_FDIR_ATR_ENABLED))
6881 settings->enable_fdir = true;
6882
6883 /* Ethtype and MACVLAN filters enabled for PF */
6884 settings->enable_ethtype = true;
6885 settings->enable_macvlan = true;
6886
6887 if (i40e_set_filter_control(&pf->hw, settings))
6888 return -ENOENT;
6889
6890 return 0;
6891}
6892
6893/**
6894 * i40e_probe - Device initialization routine
6895 * @pdev: PCI device information struct
6896 * @ent: entry in i40e_pci_tbl
6897 *
6898 * i40e_probe initializes a pf identified by a pci_dev structure.
6899 * The OS initialization, configuring of the pf private structure,
6900 * and a hardware reset occur.
6901 *
6902 * Returns 0 on success, negative on failure
6903 **/
6904static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
6905{
6906 struct i40e_driver_version dv;
6907 struct i40e_pf *pf;
6908 struct i40e_hw *hw;
6909 int err = 0;
6910 u32 len;
6911
6912 err = pci_enable_device_mem(pdev);
6913 if (err)
6914 return err;
6915
6916 /* set up for high or low dma */
6917 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
6918 /* coherent mask for the same size will always succeed if
6919 * dma_set_mask does
6920 */
6921 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
6922 } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
6923 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
6924 } else {
6925 dev_err(&pdev->dev, "DMA configuration failed: %d\n", err);
6926 err = -EIO;
6927 goto err_dma;
6928 }
6929
6930 /* set up pci connections */
6931 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
6932 IORESOURCE_MEM), i40e_driver_name);
6933 if (err) {
6934 dev_info(&pdev->dev,
6935 "pci_request_selected_regions failed %d\n", err);
6936 goto err_pci_reg;
6937 }
6938
6939 pci_enable_pcie_error_reporting(pdev);
6940 pci_set_master(pdev);
6941
6942 /* Now that we have a PCI connection, we need to do the
6943 * low level device setup. This is primarily setting up
6944 * the Admin Queue structures and then querying for the
6945 * device's current profile information.
6946 */
6947 pf = kzalloc(sizeof(*pf), GFP_KERNEL);
6948 if (!pf) {
6949 err = -ENOMEM;
6950 goto err_pf_alloc;
6951 }
6952 pf->next_vsi = 0;
6953 pf->pdev = pdev;
6954 set_bit(__I40E_DOWN, &pf->state);
6955
6956 hw = &pf->hw;
6957 hw->back = pf;
6958 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
6959 pci_resource_len(pdev, 0));
6960 if (!hw->hw_addr) {
6961 err = -EIO;
6962 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
6963 (unsigned int)pci_resource_start(pdev, 0),
6964 (unsigned int)pci_resource_len(pdev, 0), err);
6965 goto err_ioremap;
6966 }
6967 hw->vendor_id = pdev->vendor;
6968 hw->device_id = pdev->device;
6969 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
6970 hw->subsystem_vendor_id = pdev->subsystem_vendor;
6971 hw->subsystem_device_id = pdev->subsystem_device;
6972 hw->bus.device = PCI_SLOT(pdev->devfn);
6973 hw->bus.func = PCI_FUNC(pdev->devfn);
6974
6975 /* Reset here to make sure all is clean and to define PF 'n' */
6976 err = i40e_pf_reset(hw);
6977 if (err) {
6978 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
6979 goto err_pf_reset;
6980 }
6981 pf->pfr_count++;
6982
6983 hw->aq.num_arq_entries = I40E_AQ_LEN;
6984 hw->aq.num_asq_entries = I40E_AQ_LEN;
6985 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
6986 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
6987 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
6988 snprintf(pf->misc_int_name, sizeof(pf->misc_int_name) - 1,
6989 "%s-pf%d:misc",
6990 dev_driver_string(&pf->pdev->dev), pf->hw.pf_id);
6991
6992 err = i40e_init_shared_code(hw);
6993 if (err) {
6994 dev_info(&pdev->dev, "init_shared_code failed: %d\n", err);
6995 goto err_pf_reset;
6996 }
6997
6998 err = i40e_init_adminq(hw);
6999 dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw));
7000 if (err) {
7001 dev_info(&pdev->dev,
7002 "init_adminq failed: %d expecting API %02x.%02x\n",
7003 err,
7004 I40E_FW_API_VERSION_MAJOR, I40E_FW_API_VERSION_MINOR);
7005 goto err_pf_reset;
7006 }
7007
7008 err = i40e_get_capabilities(pf);
7009 if (err)
7010 goto err_adminq_setup;
7011
7012 err = i40e_sw_init(pf);
7013 if (err) {
7014 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
7015 goto err_sw_init;
7016 }
7017
7018 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
7019 hw->func_caps.num_rx_qp,
7020 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
7021 if (err) {
7022 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
7023 goto err_init_lan_hmc;
7024 }
7025
7026 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
7027 if (err) {
7028 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
7029 err = -ENOENT;
7030 goto err_configure_lan_hmc;
7031 }
7032
7033 i40e_get_mac_addr(hw, hw->mac.addr);
7034 if (i40e_validate_mac_addr(hw->mac.addr)) {
7035 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
7036 err = -EIO;
7037 goto err_mac_addr;
7038 }
7039 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
7040 memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
7041
7042 pci_set_drvdata(pdev, pf);
7043 pci_save_state(pdev);
7044
7045 /* set up periodic task facility */
7046 setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
7047 pf->service_timer_period = HZ;
7048
7049 INIT_WORK(&pf->service_task, i40e_service_task);
7050 clear_bit(__I40E_SERVICE_SCHED, &pf->state);
7051 pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
7052 pf->link_check_timeout = jiffies;
7053
7054 /* set up the main switch operations */
7055 i40e_determine_queue_usage(pf);
7056 i40e_init_interrupt_scheme(pf);
7057
7058 /* Set up the *vsi struct based on the number of VSIs in the HW,
7059 * and set up our local tracking of the MAIN PF vsi.
7060 */
7061 len = sizeof(struct i40e_vsi *) * pf->hw.func_caps.num_vsis;
7062 pf->vsi = kzalloc(len, GFP_KERNEL);
7063 if (!pf->vsi)
7064 goto err_switch_setup;
7065
7066 err = i40e_setup_pf_switch(pf);
7067 if (err) {
7068 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
7069 goto err_vsis;
7070 }
7071
7072 /* The main driver is (mostly) up and happy. We need to set this state
7073 * before setting up the misc vector or we get a race and the vector
7074 * ends up disabled forever.
7075 */
7076 clear_bit(__I40E_DOWN, &pf->state);
7077
7078 /* In case of MSIX we are going to setup the misc vector right here
7079 * to handle admin queue events etc. In case of legacy and MSI
7080 * the misc functionality and queue processing is combined in
7081 * the same vector and that gets setup at open.
7082 */
7083 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
7084 err = i40e_setup_misc_vector(pf);
7085 if (err) {
7086 dev_info(&pdev->dev,
7087 "setup of misc vector failed: %d\n", err);
7088 goto err_vsis;
7089 }
7090 }
7091
7092 /* prep for VF support */
7093 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
7094 (pf->flags & I40E_FLAG_MSIX_ENABLED)) {
7095 u32 val;
7096
7097 /* disable link interrupts for VFs */
7098 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
7099 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
7100 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
7101 i40e_flush(hw);
7102 }
7103
7104 i40e_dbg_pf_init(pf);
7105
7106 /* tell the firmware that we're starting */
7107 dv.major_version = DRV_VERSION_MAJOR;
7108 dv.minor_version = DRV_VERSION_MINOR;
7109 dv.build_version = DRV_VERSION_BUILD;
7110 dv.subbuild_version = 0;
7111 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
7112
7113 /* since everything's happy, start the service_task timer */
7114 mod_timer(&pf->service_timer,
7115 round_jiffies(jiffies + pf->service_timer_period));
7116
7117 return 0;
7118
7119 /* Unwind what we've done if something failed in the setup */
7120err_vsis:
7121 set_bit(__I40E_DOWN, &pf->state);
7122err_switch_setup:
7123 i40e_clear_interrupt_scheme(pf);
7124 kfree(pf->vsi);
7125 del_timer_sync(&pf->service_timer);
7126err_mac_addr:
7127err_configure_lan_hmc:
7128 (void)i40e_shutdown_lan_hmc(hw);
7129err_init_lan_hmc:
7130 kfree(pf->qp_pile);
7131 kfree(pf->irq_pile);
7132err_sw_init:
7133err_adminq_setup:
7134 (void)i40e_shutdown_adminq(hw);
7135err_pf_reset:
7136 iounmap(hw->hw_addr);
7137err_ioremap:
7138 kfree(pf);
7139err_pf_alloc:
7140 pci_disable_pcie_error_reporting(pdev);
7141 pci_release_selected_regions(pdev,
7142 pci_select_bars(pdev, IORESOURCE_MEM));
7143err_pci_reg:
7144err_dma:
7145 pci_disable_device(pdev);
7146 return err;
7147}
7148
7149/**
7150 * i40e_remove - Device removal routine
7151 * @pdev: PCI device information struct
7152 *
7153 * i40e_remove is called by the PCI subsystem to alert the driver
7154 * that is should release a PCI device. This could be caused by a
7155 * Hot-Plug event, or because the driver is going to be removed from
7156 * memory.
7157 **/
7158static void i40e_remove(struct pci_dev *pdev)
7159{
7160 struct i40e_pf *pf = pci_get_drvdata(pdev);
7161 i40e_status ret_code;
7162 u32 reg;
7163 int i;
7164
7165 i40e_dbg_pf_exit(pf);
7166
7167 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
7168 i40e_free_vfs(pf);
7169 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
7170 }
7171
7172 /* no more scheduling of any task */
7173 set_bit(__I40E_DOWN, &pf->state);
7174 del_timer_sync(&pf->service_timer);
7175 cancel_work_sync(&pf->service_task);
7176
7177 i40e_fdir_teardown(pf);
7178
7179 /* If there is a switch structure or any orphans, remove them.
7180 * This will leave only the PF's VSI remaining.
7181 */
7182 for (i = 0; i < I40E_MAX_VEB; i++) {
7183 if (!pf->veb[i])
7184 continue;
7185
7186 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
7187 pf->veb[i]->uplink_seid == 0)
7188 i40e_switch_branch_release(pf->veb[i]);
7189 }
7190
7191 /* Now we can shutdown the PF's VSI, just before we kill
7192 * adminq and hmc.
7193 */
7194 if (pf->vsi[pf->lan_vsi])
7195 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
7196
7197 i40e_stop_misc_vector(pf);
7198 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
7199 synchronize_irq(pf->msix_entries[0].vector);
7200 free_irq(pf->msix_entries[0].vector, pf);
7201 }
7202
7203 /* shutdown and destroy the HMC */
7204 ret_code = i40e_shutdown_lan_hmc(&pf->hw);
7205 if (ret_code)
7206 dev_warn(&pdev->dev,
7207 "Failed to destroy the HMC resources: %d\n", ret_code);
7208
7209 /* shutdown the adminq */
7210 i40e_aq_queue_shutdown(&pf->hw, true);
7211 ret_code = i40e_shutdown_adminq(&pf->hw);
7212 if (ret_code)
7213 dev_warn(&pdev->dev,
7214 "Failed to destroy the Admin Queue resources: %d\n",
7215 ret_code);
7216
7217 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
7218 i40e_clear_interrupt_scheme(pf);
7219 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
7220 if (pf->vsi[i]) {
7221 i40e_vsi_clear_rings(pf->vsi[i]);
7222 i40e_vsi_clear(pf->vsi[i]);
7223 pf->vsi[i] = NULL;
7224 }
7225 }
7226
7227 for (i = 0; i < I40E_MAX_VEB; i++) {
7228 kfree(pf->veb[i]);
7229 pf->veb[i] = NULL;
7230 }
7231
7232 kfree(pf->qp_pile);
7233 kfree(pf->irq_pile);
7234 kfree(pf->sw_config);
7235 kfree(pf->vsi);
7236
7237 /* force a PF reset to clean anything leftover */
7238 reg = rd32(&pf->hw, I40E_PFGEN_CTRL);
7239 wr32(&pf->hw, I40E_PFGEN_CTRL, (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
7240 i40e_flush(&pf->hw);
7241
7242 iounmap(pf->hw.hw_addr);
7243 kfree(pf);
7244 pci_release_selected_regions(pdev,
7245 pci_select_bars(pdev, IORESOURCE_MEM));
7246
7247 pci_disable_pcie_error_reporting(pdev);
7248 pci_disable_device(pdev);
7249}
7250
7251/**
7252 * i40e_pci_error_detected - warning that something funky happened in PCI land
7253 * @pdev: PCI device information struct
7254 *
7255 * Called to warn that something happened and the error handling steps
7256 * are in progress. Allows the driver to quiesce things, be ready for
7257 * remediation.
7258 **/
7259static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
7260 enum pci_channel_state error)
7261{
7262 struct i40e_pf *pf = pci_get_drvdata(pdev);
7263
7264 dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
7265
7266 /* shutdown all operations */
7267 i40e_pf_quiesce_all_vsi(pf);
7268
7269 /* Request a slot reset */
7270 return PCI_ERS_RESULT_NEED_RESET;
7271}
7272
7273/**
7274 * i40e_pci_error_slot_reset - a PCI slot reset just happened
7275 * @pdev: PCI device information struct
7276 *
7277 * Called to find if the driver can work with the device now that
7278 * the pci slot has been reset. If a basic connection seems good
7279 * (registers are readable and have sane content) then return a
7280 * happy little PCI_ERS_RESULT_xxx.
7281 **/
7282static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
7283{
7284 struct i40e_pf *pf = pci_get_drvdata(pdev);
7285 pci_ers_result_t result;
7286 int err;
7287 u32 reg;
7288
7289 dev_info(&pdev->dev, "%s\n", __func__);
7290 if (pci_enable_device_mem(pdev)) {
7291 dev_info(&pdev->dev,
7292 "Cannot re-enable PCI device after reset.\n");
7293 result = PCI_ERS_RESULT_DISCONNECT;
7294 } else {
7295 pci_set_master(pdev);
7296 pci_restore_state(pdev);
7297 pci_save_state(pdev);
7298 pci_wake_from_d3(pdev, false);
7299
7300 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
7301 if (reg == 0)
7302 result = PCI_ERS_RESULT_RECOVERED;
7303 else
7304 result = PCI_ERS_RESULT_DISCONNECT;
7305 }
7306
7307 err = pci_cleanup_aer_uncorrect_error_status(pdev);
7308 if (err) {
7309 dev_info(&pdev->dev,
7310 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
7311 err);
7312 /* non-fatal, continue */
7313 }
7314
7315 return result;
7316}
7317
7318/**
7319 * i40e_pci_error_resume - restart operations after PCI error recovery
7320 * @pdev: PCI device information struct
7321 *
7322 * Called to allow the driver to bring things back up after PCI error
7323 * and/or reset recovery has finished.
7324 **/
7325static void i40e_pci_error_resume(struct pci_dev *pdev)
7326{
7327 struct i40e_pf *pf = pci_get_drvdata(pdev);
7328
7329 dev_info(&pdev->dev, "%s\n", __func__);
7330 i40e_handle_reset_warning(pf);
7331}
7332
7333static const struct pci_error_handlers i40e_err_handler = {
7334 .error_detected = i40e_pci_error_detected,
7335 .slot_reset = i40e_pci_error_slot_reset,
7336 .resume = i40e_pci_error_resume,
7337};
7338
7339static struct pci_driver i40e_driver = {
7340 .name = i40e_driver_name,
7341 .id_table = i40e_pci_tbl,
7342 .probe = i40e_probe,
7343 .remove = i40e_remove,
7344 .err_handler = &i40e_err_handler,
7345 .sriov_configure = i40e_pci_sriov_configure,
7346};
7347
7348/**
7349 * i40e_init_module - Driver registration routine
7350 *
7351 * i40e_init_module is the first routine called when the driver is
7352 * loaded. All it does is register with the PCI subsystem.
7353 **/
7354static int __init i40e_init_module(void)
7355{
7356 pr_info("%s: %s - version %s\n", i40e_driver_name,
7357 i40e_driver_string, i40e_driver_version_str);
7358 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
7359 i40e_dbg_init();
7360 return pci_register_driver(&i40e_driver);
7361}
7362module_init(i40e_init_module);
7363
7364/**
7365 * i40e_exit_module - Driver exit cleanup routine
7366 *
7367 * i40e_exit_module is called just before the driver is removed
7368 * from memory.
7369 **/
7370static void __exit i40e_exit_module(void)
7371{
7372 pci_unregister_driver(&i40e_driver);
7373 i40e_dbg_exit();
7374}
7375module_exit(i40e_exit_module);