aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/i40e/i40e_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/i40e/i40e_main.c')
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c484
1 files changed, 314 insertions, 170 deletions
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 221aa4795017..be15938ba213 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -36,7 +36,7 @@ static const char i40e_driver_string[] =
36 36
37#define DRV_VERSION_MAJOR 0 37#define DRV_VERSION_MAJOR 0
38#define DRV_VERSION_MINOR 3 38#define DRV_VERSION_MINOR 3
39#define DRV_VERSION_BUILD 9 39#define DRV_VERSION_BUILD 11
40#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ 40#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
41 __stringify(DRV_VERSION_MINOR) "." \ 41 __stringify(DRV_VERSION_MINOR) "." \
42 __stringify(DRV_VERSION_BUILD) DRV_KERN 42 __stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -347,14 +347,53 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
347 **/ 347 **/
348static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( 348static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
349 struct net_device *netdev, 349 struct net_device *netdev,
350 struct rtnl_link_stats64 *storage) 350 struct rtnl_link_stats64 *stats)
351{ 351{
352 struct i40e_netdev_priv *np = netdev_priv(netdev); 352 struct i40e_netdev_priv *np = netdev_priv(netdev);
353 struct i40e_vsi *vsi = np->vsi; 353 struct i40e_vsi *vsi = np->vsi;
354 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
355 int i;
356
357 rcu_read_lock();
358 for (i = 0; i < vsi->num_queue_pairs; i++) {
359 struct i40e_ring *tx_ring, *rx_ring;
360 u64 bytes, packets;
361 unsigned int start;
362
363 tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
364 if (!tx_ring)
365 continue;
354 366
355 *storage = *i40e_get_vsi_stats_struct(vsi); 367 do {
368 start = u64_stats_fetch_begin_bh(&tx_ring->syncp);
369 packets = tx_ring->stats.packets;
370 bytes = tx_ring->stats.bytes;
371 } while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start));
372
373 stats->tx_packets += packets;
374 stats->tx_bytes += bytes;
375 rx_ring = &tx_ring[1];
356 376
357 return storage; 377 do {
378 start = u64_stats_fetch_begin_bh(&rx_ring->syncp);
379 packets = rx_ring->stats.packets;
380 bytes = rx_ring->stats.bytes;
381 } while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start));
382
383 stats->rx_packets += packets;
384 stats->rx_bytes += bytes;
385 }
386 rcu_read_unlock();
387
388 /* following stats updated by ixgbe_watchdog_task() */
389 stats->multicast = vsi_stats->multicast;
390 stats->tx_errors = vsi_stats->tx_errors;
391 stats->tx_dropped = vsi_stats->tx_dropped;
392 stats->rx_errors = vsi_stats->rx_errors;
393 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
394 stats->rx_length_errors = vsi_stats->rx_length_errors;
395
396 return stats;
358} 397}
359 398
360/** 399/**
@@ -376,10 +415,14 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
376 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets)); 415 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
377 if (vsi->rx_rings) 416 if (vsi->rx_rings)
378 for (i = 0; i < vsi->num_queue_pairs; i++) { 417 for (i = 0; i < vsi->num_queue_pairs; i++) {
379 memset(&vsi->rx_rings[i].rx_stats, 0 , 418 memset(&vsi->rx_rings[i]->stats, 0 ,
380 sizeof(vsi->rx_rings[i].rx_stats)); 419 sizeof(vsi->rx_rings[i]->stats));
381 memset(&vsi->tx_rings[i].tx_stats, 0, 420 memset(&vsi->rx_rings[i]->rx_stats, 0 ,
382 sizeof(vsi->tx_rings[i].tx_stats)); 421 sizeof(vsi->rx_rings[i]->rx_stats));
422 memset(&vsi->tx_rings[i]->stats, 0 ,
423 sizeof(vsi->tx_rings[i]->stats));
424 memset(&vsi->tx_rings[i]->tx_stats, 0,
425 sizeof(vsi->tx_rings[i]->tx_stats));
383 } 426 }
384 vsi->stat_offsets_loaded = false; 427 vsi->stat_offsets_loaded = false;
385} 428}
@@ -598,7 +641,7 @@ static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
598 continue; 641 continue;
599 642
600 for (i = 0; i < vsi->num_queue_pairs; i++) { 643 for (i = 0; i < vsi->num_queue_pairs; i++) {
601 struct i40e_ring *ring = &vsi->tx_rings[i]; 644 struct i40e_ring *ring = vsi->tx_rings[i];
602 clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state); 645 clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
603 } 646 }
604 } 647 }
@@ -652,7 +695,7 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
652 continue; 695 continue;
653 696
654 for (i = 0; i < vsi->num_queue_pairs; i++) { 697 for (i = 0; i < vsi->num_queue_pairs; i++) {
655 struct i40e_ring *ring = &vsi->tx_rings[i]; 698 struct i40e_ring *ring = vsi->tx_rings[i];
656 699
657 tc = ring->dcb_tc; 700 tc = ring->dcb_tc;
658 if (xoff[tc]) 701 if (xoff[tc])
@@ -704,21 +747,38 @@ void i40e_update_stats(struct i40e_vsi *vsi)
704 tx_restart = tx_busy = 0; 747 tx_restart = tx_busy = 0;
705 rx_page = 0; 748 rx_page = 0;
706 rx_buf = 0; 749 rx_buf = 0;
750 rcu_read_lock();
707 for (q = 0; q < vsi->num_queue_pairs; q++) { 751 for (q = 0; q < vsi->num_queue_pairs; q++) {
708 struct i40e_ring *p; 752 struct i40e_ring *p;
753 u64 bytes, packets;
754 unsigned int start;
709 755
710 p = &vsi->rx_rings[q]; 756 /* locate Tx ring */
711 rx_b += p->rx_stats.bytes; 757 p = ACCESS_ONCE(vsi->tx_rings[q]);
712 rx_p += p->rx_stats.packets;
713 rx_buf += p->rx_stats.alloc_rx_buff_failed;
714 rx_page += p->rx_stats.alloc_rx_page_failed;
715 758
716 p = &vsi->tx_rings[q]; 759 do {
717 tx_b += p->tx_stats.bytes; 760 start = u64_stats_fetch_begin_bh(&p->syncp);
718 tx_p += p->tx_stats.packets; 761 packets = p->stats.packets;
762 bytes = p->stats.bytes;
763 } while (u64_stats_fetch_retry_bh(&p->syncp, start));
764 tx_b += bytes;
765 tx_p += packets;
719 tx_restart += p->tx_stats.restart_queue; 766 tx_restart += p->tx_stats.restart_queue;
720 tx_busy += p->tx_stats.tx_busy; 767 tx_busy += p->tx_stats.tx_busy;
768
769 /* Rx queue is part of the same block as Tx queue */
770 p = &p[1];
771 do {
772 start = u64_stats_fetch_begin_bh(&p->syncp);
773 packets = p->stats.packets;
774 bytes = p->stats.bytes;
775 } while (u64_stats_fetch_retry_bh(&p->syncp, start));
776 rx_b += bytes;
777 rx_p += packets;
778 rx_buf += p->rx_stats.alloc_rx_buff_failed;
779 rx_page += p->rx_stats.alloc_rx_page_failed;
721 } 780 }
781 rcu_read_unlock();
722 vsi->tx_restart = tx_restart; 782 vsi->tx_restart = tx_restart;
723 vsi->tx_busy = tx_busy; 783 vsi->tx_busy = tx_busy;
724 vsi->rx_page_failed = rx_page; 784 vsi->rx_page_failed = rx_page;
@@ -1988,7 +2048,7 @@ static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
1988 int i, err = 0; 2048 int i, err = 0;
1989 2049
1990 for (i = 0; i < vsi->num_queue_pairs && !err; i++) 2050 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
1991 err = i40e_setup_tx_descriptors(&vsi->tx_rings[i]); 2051 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
1992 2052
1993 return err; 2053 return err;
1994} 2054}
@@ -2004,8 +2064,8 @@ static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
2004 int i; 2064 int i;
2005 2065
2006 for (i = 0; i < vsi->num_queue_pairs; i++) 2066 for (i = 0; i < vsi->num_queue_pairs; i++)
2007 if (vsi->tx_rings[i].desc) 2067 if (vsi->tx_rings[i]->desc)
2008 i40e_free_tx_resources(&vsi->tx_rings[i]); 2068 i40e_free_tx_resources(vsi->tx_rings[i]);
2009} 2069}
2010 2070
2011/** 2071/**
@@ -2023,7 +2083,7 @@ static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
2023 int i, err = 0; 2083 int i, err = 0;
2024 2084
2025 for (i = 0; i < vsi->num_queue_pairs && !err; i++) 2085 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2026 err = i40e_setup_rx_descriptors(&vsi->rx_rings[i]); 2086 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
2027 return err; 2087 return err;
2028} 2088}
2029 2089
@@ -2038,8 +2098,8 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
2038 int i; 2098 int i;
2039 2099
2040 for (i = 0; i < vsi->num_queue_pairs; i++) 2100 for (i = 0; i < vsi->num_queue_pairs; i++)
2041 if (vsi->rx_rings[i].desc) 2101 if (vsi->rx_rings[i]->desc)
2042 i40e_free_rx_resources(&vsi->rx_rings[i]); 2102 i40e_free_rx_resources(vsi->rx_rings[i]);
2043} 2103}
2044 2104
2045/** 2105/**
@@ -2114,8 +2174,8 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
2114 2174
2115 /* Now associate this queue with this PCI function */ 2175 /* Now associate this queue with this PCI function */
2116 qtx_ctl = I40E_QTX_CTL_PF_QUEUE; 2176 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
2117 qtx_ctl |= ((hw->hmc.hmc_fn_id << I40E_QTX_CTL_PF_INDX_SHIFT) 2177 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2118 & I40E_QTX_CTL_PF_INDX_MASK); 2178 I40E_QTX_CTL_PF_INDX_MASK);
2119 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl); 2179 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
2120 i40e_flush(hw); 2180 i40e_flush(hw);
2121 2181
@@ -2223,8 +2283,8 @@ static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
2223 int err = 0; 2283 int err = 0;
2224 u16 i; 2284 u16 i;
2225 2285
2226 for (i = 0; (i < vsi->num_queue_pairs) && (!err); i++) 2286 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
2227 err = i40e_configure_tx_ring(&vsi->tx_rings[i]); 2287 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
2228 2288
2229 return err; 2289 return err;
2230} 2290}
@@ -2274,7 +2334,7 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
2274 2334
2275 /* set up individual rings */ 2335 /* set up individual rings */
2276 for (i = 0; i < vsi->num_queue_pairs && !err; i++) 2336 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2277 err = i40e_configure_rx_ring(&vsi->rx_rings[i]); 2337 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
2278 2338
2279 return err; 2339 return err;
2280} 2340}
@@ -2298,8 +2358,8 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
2298 qoffset = vsi->tc_config.tc_info[n].qoffset; 2358 qoffset = vsi->tc_config.tc_info[n].qoffset;
2299 qcount = vsi->tc_config.tc_info[n].qcount; 2359 qcount = vsi->tc_config.tc_info[n].qcount;
2300 for (i = qoffset; i < (qoffset + qcount); i++) { 2360 for (i = qoffset; i < (qoffset + qcount); i++) {
2301 struct i40e_ring *rx_ring = &vsi->rx_rings[i]; 2361 struct i40e_ring *rx_ring = vsi->rx_rings[i];
2302 struct i40e_ring *tx_ring = &vsi->tx_rings[i]; 2362 struct i40e_ring *tx_ring = vsi->tx_rings[i];
2303 rx_ring->dcb_tc = n; 2363 rx_ring->dcb_tc = n;
2304 tx_ring->dcb_tc = n; 2364 tx_ring->dcb_tc = n;
2305 } 2365 }
@@ -2354,8 +2414,8 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
2354 */ 2414 */
2355 qp = vsi->base_queue; 2415 qp = vsi->base_queue;
2356 vector = vsi->base_vector; 2416 vector = vsi->base_vector;
2357 q_vector = vsi->q_vectors; 2417 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
2358 for (i = 0; i < vsi->num_q_vectors; i++, q_vector++, vector++) { 2418 q_vector = vsi->q_vectors[i];
2359 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); 2419 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
2360 q_vector->rx.latency_range = I40E_LOW_LATENCY; 2420 q_vector->rx.latency_range = I40E_LOW_LATENCY;
2361 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), 2421 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
@@ -2435,7 +2495,7 @@ static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
2435 **/ 2495 **/
2436static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi) 2496static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
2437{ 2497{
2438 struct i40e_q_vector *q_vector = vsi->q_vectors; 2498 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
2439 struct i40e_pf *pf = vsi->back; 2499 struct i40e_pf *pf = vsi->back;
2440 struct i40e_hw *hw = &pf->hw; 2500 struct i40e_hw *hw = &pf->hw;
2441 u32 val; 2501 u32 val;
@@ -2472,7 +2532,7 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
2472 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0 2532 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
2473 * @pf: board private structure 2533 * @pf: board private structure
2474 **/ 2534 **/
2475static void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf) 2535void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
2476{ 2536{
2477 struct i40e_hw *hw = &pf->hw; 2537 struct i40e_hw *hw = &pf->hw;
2478 u32 val; 2538 u32 val;
@@ -2500,7 +2560,7 @@ void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
2500 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | 2560 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2501 (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); 2561 (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2502 wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val); 2562 wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
2503 i40e_flush(hw); 2563 /* skip the flush */
2504} 2564}
2505 2565
2506/** 2566/**
@@ -2512,7 +2572,7 @@ static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
2512{ 2572{
2513 struct i40e_q_vector *q_vector = data; 2573 struct i40e_q_vector *q_vector = data;
2514 2574
2515 if (!q_vector->tx.ring[0] && !q_vector->rx.ring[0]) 2575 if (!q_vector->tx.ring && !q_vector->rx.ring)
2516 return IRQ_HANDLED; 2576 return IRQ_HANDLED;
2517 2577
2518 napi_schedule(&q_vector->napi); 2578 napi_schedule(&q_vector->napi);
@@ -2529,7 +2589,7 @@ static irqreturn_t i40e_fdir_clean_rings(int irq, void *data)
2529{ 2589{
2530 struct i40e_q_vector *q_vector = data; 2590 struct i40e_q_vector *q_vector = data;
2531 2591
2532 if (!q_vector->tx.ring[0] && !q_vector->rx.ring[0]) 2592 if (!q_vector->tx.ring && !q_vector->rx.ring)
2533 return IRQ_HANDLED; 2593 return IRQ_HANDLED;
2534 2594
2535 pr_info("fdir ring cleaning needed\n"); 2595 pr_info("fdir ring cleaning needed\n");
@@ -2554,16 +2614,16 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
2554 int vector, err; 2614 int vector, err;
2555 2615
2556 for (vector = 0; vector < q_vectors; vector++) { 2616 for (vector = 0; vector < q_vectors; vector++) {
2557 struct i40e_q_vector *q_vector = &(vsi->q_vectors[vector]); 2617 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
2558 2618
2559 if (q_vector->tx.ring[0] && q_vector->rx.ring[0]) { 2619 if (q_vector->tx.ring && q_vector->rx.ring) {
2560 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2620 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2561 "%s-%s-%d", basename, "TxRx", rx_int_idx++); 2621 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2562 tx_int_idx++; 2622 tx_int_idx++;
2563 } else if (q_vector->rx.ring[0]) { 2623 } else if (q_vector->rx.ring) {
2564 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2624 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2565 "%s-%s-%d", basename, "rx", rx_int_idx++); 2625 "%s-%s-%d", basename, "rx", rx_int_idx++);
2566 } else if (q_vector->tx.ring[0]) { 2626 } else if (q_vector->tx.ring) {
2567 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2627 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2568 "%s-%s-%d", basename, "tx", tx_int_idx++); 2628 "%s-%s-%d", basename, "tx", tx_int_idx++);
2569 } else { 2629 } else {
@@ -2611,8 +2671,8 @@ static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
2611 int i; 2671 int i;
2612 2672
2613 for (i = 0; i < vsi->num_queue_pairs; i++) { 2673 for (i = 0; i < vsi->num_queue_pairs; i++) {
2614 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i].reg_idx), 0); 2674 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0);
2615 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i].reg_idx), 0); 2675 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);
2616 } 2676 }
2617 2677
2618 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 2678 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
@@ -2649,6 +2709,7 @@ static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
2649 i40e_irq_dynamic_enable_icr0(pf); 2709 i40e_irq_dynamic_enable_icr0(pf);
2650 } 2710 }
2651 2711
2712 i40e_flush(&pf->hw);
2652 return 0; 2713 return 0;
2653} 2714}
2654 2715
@@ -2681,14 +2742,14 @@ static irqreturn_t i40e_intr(int irq, void *data)
2681 2742
2682 icr0 = rd32(hw, I40E_PFINT_ICR0); 2743 icr0 = rd32(hw, I40E_PFINT_ICR0);
2683 2744
2684 /* if sharing a legacy IRQ, we might get called w/o an intr pending */
2685 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
2686 return IRQ_NONE;
2687
2688 val = rd32(hw, I40E_PFINT_DYN_CTL0); 2745 val = rd32(hw, I40E_PFINT_DYN_CTL0);
2689 val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK; 2746 val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
2690 wr32(hw, I40E_PFINT_DYN_CTL0, val); 2747 wr32(hw, I40E_PFINT_DYN_CTL0, val);
2691 2748
2749 /* if sharing a legacy IRQ, we might get called w/o an intr pending */
2750 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
2751 return IRQ_NONE;
2752
2692 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA); 2753 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
2693 2754
2694 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */ 2755 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
@@ -2702,10 +2763,9 @@ static irqreturn_t i40e_intr(int irq, void *data)
2702 qval = rd32(hw, I40E_QINT_TQCTL(0)); 2763 qval = rd32(hw, I40E_QINT_TQCTL(0));
2703 qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK; 2764 qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
2704 wr32(hw, I40E_QINT_TQCTL(0), qval); 2765 wr32(hw, I40E_QINT_TQCTL(0), qval);
2705 i40e_flush(hw);
2706 2766
2707 if (!test_bit(__I40E_DOWN, &pf->state)) 2767 if (!test_bit(__I40E_DOWN, &pf->state))
2708 napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0].napi); 2768 napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi);
2709 } 2769 }
2710 2770
2711 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) { 2771 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
@@ -2764,7 +2824,6 @@ static irqreturn_t i40e_intr(int irq, void *data)
2764 2824
2765 /* re-enable interrupt causes */ 2825 /* re-enable interrupt causes */
2766 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask); 2826 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
2767 i40e_flush(hw);
2768 if (!test_bit(__I40E_DOWN, &pf->state)) { 2827 if (!test_bit(__I40E_DOWN, &pf->state)) {
2769 i40e_service_event_schedule(pf); 2828 i40e_service_event_schedule(pf);
2770 i40e_irq_dynamic_enable_icr0(pf); 2829 i40e_irq_dynamic_enable_icr0(pf);
@@ -2774,40 +2833,26 @@ static irqreturn_t i40e_intr(int irq, void *data)
2774} 2833}
2775 2834
2776/** 2835/**
2777 * i40e_map_vector_to_rxq - Assigns the Rx queue to the vector 2836 * i40e_map_vector_to_qp - Assigns the queue pair to the vector
2778 * @vsi: the VSI being configured 2837 * @vsi: the VSI being configured
2779 * @v_idx: vector index 2838 * @v_idx: vector index
2780 * @r_idx: rx queue index 2839 * @qp_idx: queue pair index
2781 **/ 2840 **/
2782static void map_vector_to_rxq(struct i40e_vsi *vsi, int v_idx, int r_idx) 2841static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
2783{ 2842{
2784 struct i40e_q_vector *q_vector = &(vsi->q_vectors[v_idx]); 2843 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
2785 struct i40e_ring *rx_ring = &(vsi->rx_rings[r_idx]); 2844 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
2786 2845 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
2787 rx_ring->q_vector = q_vector;
2788 q_vector->rx.ring[q_vector->rx.count] = rx_ring;
2789 q_vector->rx.count++;
2790 q_vector->rx.latency_range = I40E_LOW_LATENCY;
2791 q_vector->vsi = vsi;
2792}
2793
2794/**
2795 * i40e_map_vector_to_txq - Assigns the Tx queue to the vector
2796 * @vsi: the VSI being configured
2797 * @v_idx: vector index
2798 * @t_idx: tx queue index
2799 **/
2800static void map_vector_to_txq(struct i40e_vsi *vsi, int v_idx, int t_idx)
2801{
2802 struct i40e_q_vector *q_vector = &(vsi->q_vectors[v_idx]);
2803 struct i40e_ring *tx_ring = &(vsi->tx_rings[t_idx]);
2804 2846
2805 tx_ring->q_vector = q_vector; 2847 tx_ring->q_vector = q_vector;
2806 q_vector->tx.ring[q_vector->tx.count] = tx_ring; 2848 tx_ring->next = q_vector->tx.ring;
2849 q_vector->tx.ring = tx_ring;
2807 q_vector->tx.count++; 2850 q_vector->tx.count++;
2808 q_vector->tx.latency_range = I40E_LOW_LATENCY; 2851
2809 q_vector->num_ringpairs++; 2852 rx_ring->q_vector = q_vector;
2810 q_vector->vsi = vsi; 2853 rx_ring->next = q_vector->rx.ring;
2854 q_vector->rx.ring = rx_ring;
2855 q_vector->rx.count++;
2811} 2856}
2812 2857
2813/** 2858/**
@@ -2823,7 +2868,7 @@ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
2823{ 2868{
2824 int qp_remaining = vsi->num_queue_pairs; 2869 int qp_remaining = vsi->num_queue_pairs;
2825 int q_vectors = vsi->num_q_vectors; 2870 int q_vectors = vsi->num_q_vectors;
2826 int qp_per_vector; 2871 int num_ringpairs;
2827 int v_start = 0; 2872 int v_start = 0;
2828 int qp_idx = 0; 2873 int qp_idx = 0;
2829 2874
@@ -2831,11 +2876,21 @@ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
2831 * group them so there are multiple queues per vector. 2876 * group them so there are multiple queues per vector.
2832 */ 2877 */
2833 for (; v_start < q_vectors && qp_remaining; v_start++) { 2878 for (; v_start < q_vectors && qp_remaining; v_start++) {
2834 qp_per_vector = DIV_ROUND_UP(qp_remaining, q_vectors - v_start); 2879 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
2835 for (; qp_per_vector; 2880
2836 qp_per_vector--, qp_idx++, qp_remaining--) { 2881 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
2837 map_vector_to_rxq(vsi, v_start, qp_idx); 2882
2838 map_vector_to_txq(vsi, v_start, qp_idx); 2883 q_vector->num_ringpairs = num_ringpairs;
2884
2885 q_vector->rx.count = 0;
2886 q_vector->tx.count = 0;
2887 q_vector->rx.ring = NULL;
2888 q_vector->tx.ring = NULL;
2889
2890 while (num_ringpairs--) {
2891 map_vector_to_qp(vsi, v_start, qp_idx);
2892 qp_idx++;
2893 qp_remaining--;
2839 } 2894 }
2840 } 2895 }
2841} 2896}
@@ -2887,7 +2942,7 @@ static void i40e_netpoll(struct net_device *netdev)
2887 pf->flags |= I40E_FLAG_IN_NETPOLL; 2942 pf->flags |= I40E_FLAG_IN_NETPOLL;
2888 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 2943 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
2889 for (i = 0; i < vsi->num_q_vectors; i++) 2944 for (i = 0; i < vsi->num_q_vectors; i++)
2890 i40e_msix_clean_rings(0, &vsi->q_vectors[i]); 2945 i40e_msix_clean_rings(0, vsi->q_vectors[i]);
2891 } else { 2946 } else {
2892 i40e_intr(pf->pdev->irq, netdev); 2947 i40e_intr(pf->pdev->irq, netdev);
2893 } 2948 }
@@ -3073,14 +3128,14 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
3073 u16 vector = i + base; 3128 u16 vector = i + base;
3074 3129
3075 /* free only the irqs that were actually requested */ 3130 /* free only the irqs that were actually requested */
3076 if (vsi->q_vectors[i].num_ringpairs == 0) 3131 if (vsi->q_vectors[i]->num_ringpairs == 0)
3077 continue; 3132 continue;
3078 3133
3079 /* clear the affinity_mask in the IRQ descriptor */ 3134 /* clear the affinity_mask in the IRQ descriptor */
3080 irq_set_affinity_hint(pf->msix_entries[vector].vector, 3135 irq_set_affinity_hint(pf->msix_entries[vector].vector,
3081 NULL); 3136 NULL);
3082 free_irq(pf->msix_entries[vector].vector, 3137 free_irq(pf->msix_entries[vector].vector,
3083 &vsi->q_vectors[i]); 3138 vsi->q_vectors[i]);
3084 3139
3085 /* Tear down the interrupt queue link list 3140 /* Tear down the interrupt queue link list
3086 * 3141 *
@@ -3164,6 +3219,39 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
3164} 3219}
3165 3220
3166/** 3221/**
3222 * i40e_free_q_vector - Free memory allocated for specific interrupt vector
3223 * @vsi: the VSI being configured
3224 * @v_idx: Index of vector to be freed
3225 *
3226 * This function frees the memory allocated to the q_vector. In addition if
3227 * NAPI is enabled it will delete any references to the NAPI struct prior
3228 * to freeing the q_vector.
3229 **/
3230static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
3231{
3232 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
3233 struct i40e_ring *ring;
3234
3235 if (!q_vector)
3236 return;
3237
3238 /* disassociate q_vector from rings */
3239 i40e_for_each_ring(ring, q_vector->tx)
3240 ring->q_vector = NULL;
3241
3242 i40e_for_each_ring(ring, q_vector->rx)
3243 ring->q_vector = NULL;
3244
3245 /* only VSI w/ an associated netdev is set up w/ NAPI */
3246 if (vsi->netdev)
3247 netif_napi_del(&q_vector->napi);
3248
3249 vsi->q_vectors[v_idx] = NULL;
3250
3251 kfree_rcu(q_vector, rcu);
3252}
3253
3254/**
3167 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors 3255 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
3168 * @vsi: the VSI being un-configured 3256 * @vsi: the VSI being un-configured
3169 * 3257 *
@@ -3174,24 +3262,8 @@ static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
3174{ 3262{
3175 int v_idx; 3263 int v_idx;
3176 3264
3177 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) { 3265 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
3178 struct i40e_q_vector *q_vector = &vsi->q_vectors[v_idx]; 3266 i40e_free_q_vector(vsi, v_idx);
3179 int r_idx;
3180
3181 if (!q_vector)
3182 continue;
3183
3184 /* disassociate q_vector from rings */
3185 for (r_idx = 0; r_idx < q_vector->tx.count; r_idx++)
3186 q_vector->tx.ring[r_idx]->q_vector = NULL;
3187 for (r_idx = 0; r_idx < q_vector->rx.count; r_idx++)
3188 q_vector->rx.ring[r_idx]->q_vector = NULL;
3189
3190 /* only VSI w/ an associated netdev is set up w/ NAPI */
3191 if (vsi->netdev)
3192 netif_napi_del(&q_vector->napi);
3193 }
3194 kfree(vsi->q_vectors);
3195} 3267}
3196 3268
3197/** 3269/**
@@ -3241,7 +3313,7 @@ static void i40e_napi_enable_all(struct i40e_vsi *vsi)
3241 return; 3313 return;
3242 3314
3243 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) 3315 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
3244 napi_enable(&vsi->q_vectors[q_idx].napi); 3316 napi_enable(&vsi->q_vectors[q_idx]->napi);
3245} 3317}
3246 3318
3247/** 3319/**
@@ -3256,7 +3328,7 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi)
3256 return; 3328 return;
3257 3329
3258 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) 3330 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
3259 napi_disable(&vsi->q_vectors[q_idx].napi); 3331 napi_disable(&vsi->q_vectors[q_idx]->napi);
3260} 3332}
3261 3333
3262/** 3334/**
@@ -3703,8 +3775,11 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
3703 3775
3704 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) && 3776 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
3705 (vsi->netdev)) { 3777 (vsi->netdev)) {
3778 netdev_info(vsi->netdev, "NIC Link is Up\n");
3706 netif_tx_start_all_queues(vsi->netdev); 3779 netif_tx_start_all_queues(vsi->netdev);
3707 netif_carrier_on(vsi->netdev); 3780 netif_carrier_on(vsi->netdev);
3781 } else if (vsi->netdev) {
3782 netdev_info(vsi->netdev, "NIC Link is Down\n");
3708 } 3783 }
3709 i40e_service_event_schedule(pf); 3784 i40e_service_event_schedule(pf);
3710 3785
@@ -3772,8 +3847,8 @@ void i40e_down(struct i40e_vsi *vsi)
3772 i40e_napi_disable_all(vsi); 3847 i40e_napi_disable_all(vsi);
3773 3848
3774 for (i = 0; i < vsi->num_queue_pairs; i++) { 3849 for (i = 0; i < vsi->num_queue_pairs; i++) {
3775 i40e_clean_tx_ring(&vsi->tx_rings[i]); 3850 i40e_clean_tx_ring(vsi->tx_rings[i]);
3776 i40e_clean_rx_ring(&vsi->rx_rings[i]); 3851 i40e_clean_rx_ring(vsi->rx_rings[i]);
3777 } 3852 }
3778} 3853}
3779 3854
@@ -4153,8 +4228,9 @@ static void i40e_link_event(struct i40e_pf *pf)
4153 if (new_link == old_link) 4228 if (new_link == old_link)
4154 return; 4229 return;
4155 4230
4156 netdev_info(pf->vsi[pf->lan_vsi]->netdev, 4231 if (!test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state))
4157 "NIC Link is %s\n", (new_link ? "Up" : "Down")); 4232 netdev_info(pf->vsi[pf->lan_vsi]->netdev,
4233 "NIC Link is %s\n", (new_link ? "Up" : "Down"));
4158 4234
4159 /* Notify the base of the switch tree connected to 4235 /* Notify the base of the switch tree connected to
4160 * the link. Floating VEBs are not notified. 4236 * the link. Floating VEBs are not notified.
@@ -4199,9 +4275,9 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf)
4199 continue; 4275 continue;
4200 4276
4201 for (i = 0; i < vsi->num_queue_pairs; i++) { 4277 for (i = 0; i < vsi->num_queue_pairs; i++) {
4202 set_check_for_tx_hang(&vsi->tx_rings[i]); 4278 set_check_for_tx_hang(vsi->tx_rings[i]);
4203 if (test_bit(__I40E_HANG_CHECK_ARMED, 4279 if (test_bit(__I40E_HANG_CHECK_ARMED,
4204 &vsi->tx_rings[i].state)) 4280 &vsi->tx_rings[i]->state))
4205 armed++; 4281 armed++;
4206 } 4282 }
4207 4283
@@ -4537,7 +4613,8 @@ static void i40e_fdir_setup(struct i40e_pf *pf)
4537 bool new_vsi = false; 4613 bool new_vsi = false;
4538 int err, i; 4614 int err, i;
4539 4615
4540 if (!(pf->flags & (I40E_FLAG_FDIR_ENABLED|I40E_FLAG_FDIR_ATR_ENABLED))) 4616 if (!(pf->flags & (I40E_FLAG_FDIR_ENABLED |
4617 I40E_FLAG_FDIR_ATR_ENABLED)))
4541 return; 4618 return;
4542 4619
4543 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; 4620 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
@@ -4937,6 +5014,8 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
4937{ 5014{
4938 int ret = -ENODEV; 5015 int ret = -ENODEV;
4939 struct i40e_vsi *vsi; 5016 struct i40e_vsi *vsi;
5017 int sz_vectors;
5018 int sz_rings;
4940 int vsi_idx; 5019 int vsi_idx;
4941 int i; 5020 int i;
4942 5021
@@ -4962,14 +5041,14 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
4962 vsi_idx = i; /* Found one! */ 5041 vsi_idx = i; /* Found one! */
4963 } else { 5042 } else {
4964 ret = -ENODEV; 5043 ret = -ENODEV;
4965 goto err_alloc_vsi; /* out of VSI slots! */ 5044 goto unlock_pf; /* out of VSI slots! */
4966 } 5045 }
4967 pf->next_vsi = ++i; 5046 pf->next_vsi = ++i;
4968 5047
4969 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL); 5048 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
4970 if (!vsi) { 5049 if (!vsi) {
4971 ret = -ENOMEM; 5050 ret = -ENOMEM;
4972 goto err_alloc_vsi; 5051 goto unlock_pf;
4973 } 5052 }
4974 vsi->type = type; 5053 vsi->type = type;
4975 vsi->back = pf; 5054 vsi->back = pf;
@@ -4982,14 +5061,40 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
4982 vsi->work_limit = I40E_DEFAULT_IRQ_WORK; 5061 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
4983 INIT_LIST_HEAD(&vsi->mac_filter_list); 5062 INIT_LIST_HEAD(&vsi->mac_filter_list);
4984 5063
4985 i40e_set_num_rings_in_vsi(vsi); 5064 ret = i40e_set_num_rings_in_vsi(vsi);
5065 if (ret)
5066 goto err_rings;
5067
5068 /* allocate memory for ring pointers */
5069 sz_rings = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
5070 vsi->tx_rings = kzalloc(sz_rings, GFP_KERNEL);
5071 if (!vsi->tx_rings) {
5072 ret = -ENOMEM;
5073 goto err_rings;
5074 }
5075 vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
5076
5077 /* allocate memory for q_vector pointers */
5078 sz_vectors = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors;
5079 vsi->q_vectors = kzalloc(sz_vectors, GFP_KERNEL);
5080 if (!vsi->q_vectors) {
5081 ret = -ENOMEM;
5082 goto err_vectors;
5083 }
4986 5084
4987 /* Setup default MSIX irq handler for VSI */ 5085 /* Setup default MSIX irq handler for VSI */
4988 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings); 5086 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
4989 5087
4990 pf->vsi[vsi_idx] = vsi; 5088 pf->vsi[vsi_idx] = vsi;
4991 ret = vsi_idx; 5089 ret = vsi_idx;
4992err_alloc_vsi: 5090 goto unlock_pf;
5091
5092err_vectors:
5093 kfree(vsi->tx_rings);
5094err_rings:
5095 pf->next_vsi = i - 1;
5096 kfree(vsi);
5097unlock_pf:
4993 mutex_unlock(&pf->switch_mutex); 5098 mutex_unlock(&pf->switch_mutex);
4994 return ret; 5099 return ret;
4995} 5100}
@@ -5030,6 +5135,10 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi)
5030 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); 5135 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
5031 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); 5136 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
5032 5137
5138 /* free the ring and vector containers */
5139 kfree(vsi->q_vectors);
5140 kfree(vsi->tx_rings);
5141
5033 pf->vsi[vsi->idx] = NULL; 5142 pf->vsi[vsi->idx] = NULL;
5034 if (vsi->idx < pf->next_vsi) 5143 if (vsi->idx < pf->next_vsi)
5035 pf->next_vsi = vsi->idx; 5144 pf->next_vsi = vsi->idx;
@@ -5043,34 +5152,40 @@ free_vsi:
5043} 5152}
5044 5153
5045/** 5154/**
5155 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
5156 * @vsi: the VSI being cleaned
5157 **/
5158static s32 i40e_vsi_clear_rings(struct i40e_vsi *vsi)
5159{
5160 int i;
5161
5162 if (vsi->tx_rings[0])
5163 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
5164 kfree_rcu(vsi->tx_rings[i], rcu);
5165 vsi->tx_rings[i] = NULL;
5166 vsi->rx_rings[i] = NULL;
5167 }
5168
5169 return 0;
5170}
5171
5172/**
5046 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI 5173 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
5047 * @vsi: the VSI being configured 5174 * @vsi: the VSI being configured
5048 **/ 5175 **/
5049static int i40e_alloc_rings(struct i40e_vsi *vsi) 5176static int i40e_alloc_rings(struct i40e_vsi *vsi)
5050{ 5177{
5051 struct i40e_pf *pf = vsi->back; 5178 struct i40e_pf *pf = vsi->back;
5052 int ret = 0;
5053 int i; 5179 int i;
5054 5180
5055 vsi->rx_rings = kcalloc(vsi->alloc_queue_pairs,
5056 sizeof(struct i40e_ring), GFP_KERNEL);
5057 if (!vsi->rx_rings) {
5058 ret = -ENOMEM;
5059 goto err_alloc_rings;
5060 }
5061
5062 vsi->tx_rings = kcalloc(vsi->alloc_queue_pairs,
5063 sizeof(struct i40e_ring), GFP_KERNEL);
5064 if (!vsi->tx_rings) {
5065 ret = -ENOMEM;
5066 kfree(vsi->rx_rings);
5067 goto err_alloc_rings;
5068 }
5069
5070 /* Set basic values in the rings to be used later during open() */ 5181 /* Set basic values in the rings to be used later during open() */
5071 for (i = 0; i < vsi->alloc_queue_pairs; i++) { 5182 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
5072 struct i40e_ring *rx_ring = &vsi->rx_rings[i]; 5183 struct i40e_ring *tx_ring;
5073 struct i40e_ring *tx_ring = &vsi->tx_rings[i]; 5184 struct i40e_ring *rx_ring;
5185
5186 tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
5187 if (!tx_ring)
5188 goto err_out;
5074 5189
5075 tx_ring->queue_index = i; 5190 tx_ring->queue_index = i;
5076 tx_ring->reg_idx = vsi->base_queue + i; 5191 tx_ring->reg_idx = vsi->base_queue + i;
@@ -5081,7 +5196,9 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
5081 tx_ring->count = vsi->num_desc; 5196 tx_ring->count = vsi->num_desc;
5082 tx_ring->size = 0; 5197 tx_ring->size = 0;
5083 tx_ring->dcb_tc = 0; 5198 tx_ring->dcb_tc = 0;
5199 vsi->tx_rings[i] = tx_ring;
5084 5200
5201 rx_ring = &tx_ring[1];
5085 rx_ring->queue_index = i; 5202 rx_ring->queue_index = i;
5086 rx_ring->reg_idx = vsi->base_queue + i; 5203 rx_ring->reg_idx = vsi->base_queue + i;
5087 rx_ring->ring_active = false; 5204 rx_ring->ring_active = false;
@@ -5095,24 +5212,14 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
5095 set_ring_16byte_desc_enabled(rx_ring); 5212 set_ring_16byte_desc_enabled(rx_ring);
5096 else 5213 else
5097 clear_ring_16byte_desc_enabled(rx_ring); 5214 clear_ring_16byte_desc_enabled(rx_ring);
5098 } 5215 vsi->rx_rings[i] = rx_ring;
5099
5100err_alloc_rings:
5101 return ret;
5102}
5103
5104/**
5105 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
5106 * @vsi: the VSI being cleaned
5107 **/
5108static int i40e_vsi_clear_rings(struct i40e_vsi *vsi)
5109{
5110 if (vsi) {
5111 kfree(vsi->rx_rings);
5112 kfree(vsi->tx_rings);
5113 } 5216 }
5114 5217
5115 return 0; 5218 return 0;
5219
5220err_out:
5221 i40e_vsi_clear_rings(vsi);
5222 return -ENOMEM;
5116} 5223}
5117 5224
5118/** 5225/**
@@ -5249,6 +5356,38 @@ static int i40e_init_msix(struct i40e_pf *pf)
5249} 5356}
5250 5357
5251/** 5358/**
5359 * i40e_alloc_q_vector - Allocate memory for a single interrupt vector
5360 * @vsi: the VSI being configured
5361 * @v_idx: index of the vector in the vsi struct
5362 *
5363 * We allocate one q_vector. If allocation fails we return -ENOMEM.
5364 **/
5365static int i40e_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
5366{
5367 struct i40e_q_vector *q_vector;
5368
5369 /* allocate q_vector */
5370 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
5371 if (!q_vector)
5372 return -ENOMEM;
5373
5374 q_vector->vsi = vsi;
5375 q_vector->v_idx = v_idx;
5376 cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
5377 if (vsi->netdev)
5378 netif_napi_add(vsi->netdev, &q_vector->napi,
5379 i40e_napi_poll, vsi->work_limit);
5380
5381 q_vector->rx.latency_range = I40E_LOW_LATENCY;
5382 q_vector->tx.latency_range = I40E_LOW_LATENCY;
5383
5384 /* tie q_vector and vsi together */
5385 vsi->q_vectors[v_idx] = q_vector;
5386
5387 return 0;
5388}
5389
5390/**
5252 * i40e_alloc_q_vectors - Allocate memory for interrupt vectors 5391 * i40e_alloc_q_vectors - Allocate memory for interrupt vectors
5253 * @vsi: the VSI being configured 5392 * @vsi: the VSI being configured
5254 * 5393 *
@@ -5259,6 +5398,7 @@ static int i40e_alloc_q_vectors(struct i40e_vsi *vsi)
5259{ 5398{
5260 struct i40e_pf *pf = vsi->back; 5399 struct i40e_pf *pf = vsi->back;
5261 int v_idx, num_q_vectors; 5400 int v_idx, num_q_vectors;
5401 int err;
5262 5402
5263 /* if not MSIX, give the one vector only to the LAN VSI */ 5403 /* if not MSIX, give the one vector only to the LAN VSI */
5264 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 5404 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
@@ -5268,22 +5408,19 @@ static int i40e_alloc_q_vectors(struct i40e_vsi *vsi)
5268 else 5408 else
5269 return -EINVAL; 5409 return -EINVAL;
5270 5410
5271 vsi->q_vectors = kcalloc(num_q_vectors,
5272 sizeof(struct i40e_q_vector),
5273 GFP_KERNEL);
5274 if (!vsi->q_vectors)
5275 return -ENOMEM;
5276
5277 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { 5411 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
5278 vsi->q_vectors[v_idx].vsi = vsi; 5412 err = i40e_alloc_q_vector(vsi, v_idx);
5279 vsi->q_vectors[v_idx].v_idx = v_idx; 5413 if (err)
5280 cpumask_set_cpu(v_idx, &vsi->q_vectors[v_idx].affinity_mask); 5414 goto err_out;
5281 if (vsi->netdev)
5282 netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx].napi,
5283 i40e_napi_poll, vsi->work_limit);
5284 } 5415 }
5285 5416
5286 return 0; 5417 return 0;
5418
5419err_out:
5420 while (v_idx--)
5421 i40e_free_q_vector(vsi, v_idx);
5422
5423 return err;
5287} 5424}
5288 5425
5289/** 5426/**
@@ -5297,7 +5434,8 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
5297 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 5434 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
5298 err = i40e_init_msix(pf); 5435 err = i40e_init_msix(pf);
5299 if (err) { 5436 if (err) {
5300 pf->flags &= ~(I40E_FLAG_RSS_ENABLED | 5437 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
5438 I40E_FLAG_RSS_ENABLED |
5301 I40E_FLAG_MQ_ENABLED | 5439 I40E_FLAG_MQ_ENABLED |
5302 I40E_FLAG_DCB_ENABLED | 5440 I40E_FLAG_DCB_ENABLED |
5303 I40E_FLAG_SRIOV_ENABLED | 5441 I40E_FLAG_SRIOV_ENABLED |
@@ -5312,14 +5450,17 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
5312 5450
5313 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) && 5451 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
5314 (pf->flags & I40E_FLAG_MSI_ENABLED)) { 5452 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
5453 dev_info(&pf->pdev->dev, "MSIX not available, trying MSI\n");
5315 err = pci_enable_msi(pf->pdev); 5454 err = pci_enable_msi(pf->pdev);
5316 if (err) { 5455 if (err) {
5317 dev_info(&pf->pdev->dev, 5456 dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err);
5318 "MSI init failed (%d), trying legacy.\n", err);
5319 pf->flags &= ~I40E_FLAG_MSI_ENABLED; 5457 pf->flags &= ~I40E_FLAG_MSI_ENABLED;
5320 } 5458 }
5321 } 5459 }
5322 5460
5461 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
5462 dev_info(&pf->pdev->dev, "MSIX and MSI not available, falling back to Legacy IRQ\n");
5463
5323 /* track first vector for misc interrupts */ 5464 /* track first vector for misc interrupts */
5324 err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1); 5465 err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1);
5325} 5466}
@@ -5950,7 +6091,7 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
5950 int ret = -ENOENT; 6091 int ret = -ENOENT;
5951 struct i40e_pf *pf = vsi->back; 6092 struct i40e_pf *pf = vsi->back;
5952 6093
5953 if (vsi->q_vectors) { 6094 if (vsi->q_vectors[0]) {
5954 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n", 6095 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
5955 vsi->seid); 6096 vsi->seid);
5956 return -EEXIST; 6097 return -EEXIST;
@@ -5972,8 +6113,9 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
5972 goto vector_setup_out; 6113 goto vector_setup_out;
5973 } 6114 }
5974 6115
5975 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile, 6116 if (vsi->num_q_vectors)
5976 vsi->num_q_vectors, vsi->idx); 6117 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
6118 vsi->num_q_vectors, vsi->idx);
5977 if (vsi->base_vector < 0) { 6119 if (vsi->base_vector < 0) {
5978 dev_info(&pf->pdev->dev, 6120 dev_info(&pf->pdev->dev,
5979 "failed to get q tracking for VSI %d, err=%d\n", 6121 "failed to get q tracking for VSI %d, err=%d\n",
@@ -7062,8 +7204,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
7062 */ 7204 */
7063 len = sizeof(struct i40e_vsi *) * pf->hw.func_caps.num_vsis; 7205 len = sizeof(struct i40e_vsi *) * pf->hw.func_caps.num_vsis;
7064 pf->vsi = kzalloc(len, GFP_KERNEL); 7206 pf->vsi = kzalloc(len, GFP_KERNEL);
7065 if (!pf->vsi) 7207 if (!pf->vsi) {
7208 err = -ENOMEM;
7066 goto err_switch_setup; 7209 goto err_switch_setup;
7210 }
7067 7211
7068 err = i40e_setup_pf_switch(pf); 7212 err = i40e_setup_pf_switch(pf);
7069 if (err) { 7213 if (err) {