aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/ixgbevf
diff options
context:
space:
mode:
authorDon Skidmore <donald.c.skidmore@intel.com>2014-01-16 05:30:08 -0500
committerDavid S. Miller <davem@davemloft.net>2014-01-16 18:34:24 -0500
commit87e70ab9088fc81e02bffa30e71eb1a18891608b (patch)
treefc8ccdfbbfc49f3d89814f7ca494872c0c7426fb /drivers/net/ethernet/intel/ixgbevf
parent27ae296716e527e5e5f0b54bc92739a2864d2b9a (diff)
ixgbevf: Convert ring storage form pointer to an array to array of pointers
This will change how we store rings arrays in the adapter sturct. We use to have a pointer to an array now we will be using an array of pointers. This will allow us to support multiple queues on muliple nodes at some point we would be able to reallocate the rings so that each is on a local node if needed. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Signed-off-by: Don Skidmore <donald.c.skidmore@intel.com> Tested-by: Phil Schmitt <phillip.j.schmitt@intel.com> Signed-off-by: Aaron Brown <aaron.f.brown@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbevf')
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c30
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c150
3 files changed, 98 insertions, 86 deletions
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 54d9acef9c4e..515ba4e29760 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -286,9 +286,9 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
286 286
287 if (!netif_running(adapter->netdev)) { 287 if (!netif_running(adapter->netdev)) {
288 for (i = 0; i < adapter->num_tx_queues; i++) 288 for (i = 0; i < adapter->num_tx_queues; i++)
289 adapter->tx_ring[i].count = new_tx_count; 289 adapter->tx_ring[i]->count = new_tx_count;
290 for (i = 0; i < adapter->num_rx_queues; i++) 290 for (i = 0; i < adapter->num_rx_queues; i++)
291 adapter->rx_ring[i].count = new_rx_count; 291 adapter->rx_ring[i]->count = new_rx_count;
292 adapter->tx_ring_count = new_tx_count; 292 adapter->tx_ring_count = new_tx_count;
293 adapter->rx_ring_count = new_rx_count; 293 adapter->rx_ring_count = new_rx_count;
294 goto clear_reset; 294 goto clear_reset;
@@ -303,7 +303,7 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
303 303
304 for (i = 0; i < adapter->num_tx_queues; i++) { 304 for (i = 0; i < adapter->num_tx_queues; i++) {
305 /* clone ring and setup updated count */ 305 /* clone ring and setup updated count */
306 tx_ring[i] = adapter->tx_ring[i]; 306 tx_ring[i] = *adapter->tx_ring[i];
307 tx_ring[i].count = new_tx_count; 307 tx_ring[i].count = new_tx_count;
308 err = ixgbevf_setup_tx_resources(adapter, &tx_ring[i]); 308 err = ixgbevf_setup_tx_resources(adapter, &tx_ring[i]);
309 if (!err) 309 if (!err)
@@ -329,7 +329,7 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
329 329
330 for (i = 0; i < adapter->num_rx_queues; i++) { 330 for (i = 0; i < adapter->num_rx_queues; i++) {
331 /* clone ring and setup updated count */ 331 /* clone ring and setup updated count */
332 rx_ring[i] = adapter->rx_ring[i]; 332 rx_ring[i] = *adapter->rx_ring[i];
333 rx_ring[i].count = new_rx_count; 333 rx_ring[i].count = new_rx_count;
334 err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]); 334 err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
335 if (!err) 335 if (!err)
@@ -352,9 +352,8 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
352 /* Tx */ 352 /* Tx */
353 if (tx_ring) { 353 if (tx_ring) {
354 for (i = 0; i < adapter->num_tx_queues; i++) { 354 for (i = 0; i < adapter->num_tx_queues; i++) {
355 ixgbevf_free_tx_resources(adapter, 355 ixgbevf_free_tx_resources(adapter, adapter->tx_ring[i]);
356 &adapter->tx_ring[i]); 356 *adapter->tx_ring[i] = tx_ring[i];
357 adapter->tx_ring[i] = tx_ring[i];
358 } 357 }
359 adapter->tx_ring_count = new_tx_count; 358 adapter->tx_ring_count = new_tx_count;
360 359
@@ -365,9 +364,8 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
365 /* Rx */ 364 /* Rx */
366 if (rx_ring) { 365 if (rx_ring) {
367 for (i = 0; i < adapter->num_rx_queues; i++) { 366 for (i = 0; i < adapter->num_rx_queues; i++) {
368 ixgbevf_free_rx_resources(adapter, 367 ixgbevf_free_rx_resources(adapter, adapter->rx_ring[i]);
369 &adapter->rx_ring[i]); 368 *adapter->rx_ring[i] = rx_ring[i];
370 adapter->rx_ring[i] = rx_ring[i];
371 } 369 }
372 adapter->rx_ring_count = new_rx_count; 370 adapter->rx_ring_count = new_rx_count;
373 371
@@ -413,15 +411,15 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
413 tx_yields = 0, tx_cleaned = 0, tx_missed = 0; 411 tx_yields = 0, tx_cleaned = 0, tx_missed = 0;
414 412
415 for (i = 0; i < adapter->num_rx_queues; i++) { 413 for (i = 0; i < adapter->num_rx_queues; i++) {
416 rx_yields += adapter->rx_ring[i].bp_yields; 414 rx_yields += adapter->rx_ring[i]->bp_yields;
417 rx_cleaned += adapter->rx_ring[i].bp_cleaned; 415 rx_cleaned += adapter->rx_ring[i]->bp_cleaned;
418 rx_yields += adapter->rx_ring[i].bp_yields; 416 rx_yields += adapter->rx_ring[i]->bp_yields;
419 } 417 }
420 418
421 for (i = 0; i < adapter->num_tx_queues; i++) { 419 for (i = 0; i < adapter->num_tx_queues; i++) {
422 tx_yields += adapter->tx_ring[i].bp_yields; 420 tx_yields += adapter->tx_ring[i]->bp_yields;
423 tx_cleaned += adapter->tx_ring[i].bp_cleaned; 421 tx_cleaned += adapter->tx_ring[i]->bp_cleaned;
424 tx_yields += adapter->tx_ring[i].bp_yields; 422 tx_yields += adapter->tx_ring[i]->bp_yields;
425 } 423 }
426 424
427 adapter->bp_rx_yields = rx_yields; 425 adapter->bp_rx_yields = rx_yields;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index d7cec1d59821..0547e40980cb 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -327,7 +327,7 @@ struct ixgbevf_adapter {
327 u32 eims_other; 327 u32 eims_other;
328 328
329 /* TX */ 329 /* TX */
330 struct ixgbevf_ring *tx_ring; /* One per active queue */ 330 struct ixgbevf_ring *tx_ring[MAX_TX_QUEUES]; /* One per active queue */
331 int num_tx_queues; 331 int num_tx_queues;
332 u64 restart_queue; 332 u64 restart_queue;
333 u64 hw_csum_tx_good; 333 u64 hw_csum_tx_good;
@@ -337,7 +337,7 @@ struct ixgbevf_adapter {
337 u32 tx_timeout_count; 337 u32 tx_timeout_count;
338 338
339 /* RX */ 339 /* RX */
340 struct ixgbevf_ring *rx_ring; /* One per active queue */ 340 struct ixgbevf_ring *rx_ring[MAX_TX_QUEUES]; /* One per active queue */
341 int num_rx_queues; 341 int num_rx_queues;
342 u64 hw_csum_rx_error; 342 u64 hw_csum_rx_error;
343 u64 hw_rx_no_dma_resources; 343 u64 hw_rx_no_dma_resources;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 74af295ae019..202fc47e00d2 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -848,8 +848,8 @@ static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
848{ 848{
849 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; 849 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
850 850
851 a->rx_ring[r_idx].next = q_vector->rx.ring; 851 a->rx_ring[r_idx]->next = q_vector->rx.ring;
852 q_vector->rx.ring = &a->rx_ring[r_idx]; 852 q_vector->rx.ring = a->rx_ring[r_idx];
853 q_vector->rx.count++; 853 q_vector->rx.count++;
854} 854}
855 855
@@ -858,8 +858,8 @@ static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
858{ 858{
859 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; 859 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
860 860
861 a->tx_ring[t_idx].next = q_vector->tx.ring; 861 a->tx_ring[t_idx]->next = q_vector->tx.ring;
862 q_vector->tx.ring = &a->tx_ring[t_idx]; 862 q_vector->tx.ring = a->tx_ring[t_idx];
863 q_vector->tx.count++; 863 q_vector->tx.count++;
864} 864}
865 865
@@ -1100,7 +1100,7 @@ static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1100 1100
1101 /* Setup the HW Tx Head and Tail descriptor pointers */ 1101 /* Setup the HW Tx Head and Tail descriptor pointers */
1102 for (i = 0; i < adapter->num_tx_queues; i++) { 1102 for (i = 0; i < adapter->num_tx_queues; i++) {
1103 struct ixgbevf_ring *ring = &adapter->tx_ring[i]; 1103 struct ixgbevf_ring *ring = adapter->tx_ring[i];
1104 j = ring->reg_idx; 1104 j = ring->reg_idx;
1105 tdba = ring->dma; 1105 tdba = ring->dma;
1106 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc); 1106 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
@@ -1130,7 +1130,7 @@ static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1130 struct ixgbe_hw *hw = &adapter->hw; 1130 struct ixgbe_hw *hw = &adapter->hw;
1131 u32 srrctl; 1131 u32 srrctl;
1132 1132
1133 rx_ring = &adapter->rx_ring[index]; 1133 rx_ring = adapter->rx_ring[index];
1134 1134
1135 srrctl = IXGBE_SRRCTL_DROP_EN; 1135 srrctl = IXGBE_SRRCTL_DROP_EN;
1136 1136
@@ -1188,7 +1188,7 @@ static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
1188 rx_buf_len = IXGBEVF_RXBUFFER_10K; 1188 rx_buf_len = IXGBEVF_RXBUFFER_10K;
1189 1189
1190 for (i = 0; i < adapter->num_rx_queues; i++) 1190 for (i = 0; i < adapter->num_rx_queues; i++)
1191 adapter->rx_ring[i].rx_buf_len = rx_buf_len; 1191 adapter->rx_ring[i]->rx_buf_len = rx_buf_len;
1192} 1192}
1193 1193
1194/** 1194/**
@@ -1212,7 +1212,7 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1212 /* Setup the HW Rx Head and Tail Descriptor Pointers and 1212 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1213 * the Base and Length of the Rx Descriptor Ring */ 1213 * the Base and Length of the Rx Descriptor Ring */
1214 for (i = 0; i < adapter->num_rx_queues; i++) { 1214 for (i = 0; i < adapter->num_rx_queues; i++) {
1215 struct ixgbevf_ring *ring = &adapter->rx_ring[i]; 1215 struct ixgbevf_ring *ring = adapter->rx_ring[i];
1216 rdba = ring->dma; 1216 rdba = ring->dma;
1217 j = ring->reg_idx; 1217 j = ring->reg_idx;
1218 rdlen = ring->count * sizeof(union ixgbe_adv_rx_desc); 1218 rdlen = ring->count * sizeof(union ixgbe_adv_rx_desc);
@@ -1389,7 +1389,7 @@ static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
1389 1389
1390 if (num_tcs > 1) { 1390 if (num_tcs > 1) {
1391 /* update default Tx ring register index */ 1391 /* update default Tx ring register index */
1392 adapter->tx_ring[0].reg_idx = def_q; 1392 adapter->tx_ring[0]->reg_idx = def_q;
1393 1393
1394 /* we need as many queues as traffic classes */ 1394 /* we need as many queues as traffic classes */
1395 num_rx_queues = num_tcs; 1395 num_rx_queues = num_tcs;
@@ -1421,7 +1421,7 @@ static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1421 ixgbevf_configure_tx(adapter); 1421 ixgbevf_configure_tx(adapter);
1422 ixgbevf_configure_rx(adapter); 1422 ixgbevf_configure_rx(adapter);
1423 for (i = 0; i < adapter->num_rx_queues; i++) { 1423 for (i = 0; i < adapter->num_rx_queues; i++) {
1424 struct ixgbevf_ring *ring = &adapter->rx_ring[i]; 1424 struct ixgbevf_ring *ring = adapter->rx_ring[i];
1425 ixgbevf_alloc_rx_buffers(adapter, ring, 1425 ixgbevf_alloc_rx_buffers(adapter, ring,
1426 ixgbevf_desc_unused(ring)); 1426 ixgbevf_desc_unused(ring));
1427 } 1427 }
@@ -1429,24 +1429,23 @@ static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1429 1429
1430#define IXGBEVF_MAX_RX_DESC_POLL 10 1430#define IXGBEVF_MAX_RX_DESC_POLL 10
1431static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter, 1431static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1432 int rxr) 1432 struct ixgbevf_ring *ring)
1433{ 1433{
1434 struct ixgbe_hw *hw = &adapter->hw; 1434 struct ixgbe_hw *hw = &adapter->hw;
1435 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL; 1435 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1436 u32 rxdctl; 1436 u32 rxdctl;
1437 int j = adapter->rx_ring[rxr].reg_idx; 1437 u8 reg_idx = ring->reg_idx;
1438 1438
1439 do { 1439 do {
1440 usleep_range(1000, 2000); 1440 usleep_range(1000, 2000);
1441 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)); 1441 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1442 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE)); 1442 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
1443 1443
1444 if (!wait_loop) 1444 if (!wait_loop)
1445 hw_dbg(hw, "RXDCTL.ENABLE queue %d not set while polling\n", 1445 hw_dbg(hw, "RXDCTL.ENABLE queue %d not set while polling\n",
1446 rxr); 1446 reg_idx);
1447 1447
1448 ixgbevf_release_rx_desc(&adapter->rx_ring[rxr], 1448 ixgbevf_release_rx_desc(ring, ring->count - 1);
1449 (adapter->rx_ring[rxr].count - 1));
1450} 1449}
1451 1450
1452static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter, 1451static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
@@ -1541,7 +1540,7 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1541 u32 txdctl, rxdctl; 1540 u32 txdctl, rxdctl;
1542 1541
1543 for (i = 0; i < adapter->num_tx_queues; i++) { 1542 for (i = 0; i < adapter->num_tx_queues; i++) {
1544 j = adapter->tx_ring[i].reg_idx; 1543 j = adapter->tx_ring[i]->reg_idx;
1545 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); 1544 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1546 /* enable WTHRESH=8 descriptors, to encourage burst writeback */ 1545 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
1547 txdctl |= (8 << 16); 1546 txdctl |= (8 << 16);
@@ -1549,14 +1548,14 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1549 } 1548 }
1550 1549
1551 for (i = 0; i < adapter->num_tx_queues; i++) { 1550 for (i = 0; i < adapter->num_tx_queues; i++) {
1552 j = adapter->tx_ring[i].reg_idx; 1551 j = adapter->tx_ring[i]->reg_idx;
1553 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); 1552 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1554 txdctl |= IXGBE_TXDCTL_ENABLE; 1553 txdctl |= IXGBE_TXDCTL_ENABLE;
1555 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); 1554 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1556 } 1555 }
1557 1556
1558 for (i = 0; i < num_rx_rings; i++) { 1557 for (i = 0; i < num_rx_rings; i++) {
1559 j = adapter->rx_ring[i].reg_idx; 1558 j = adapter->rx_ring[i]->reg_idx;
1560 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)); 1559 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1561 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME; 1560 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1562 if (hw->mac.type == ixgbe_mac_X540_vf) { 1561 if (hw->mac.type == ixgbe_mac_X540_vf) {
@@ -1565,7 +1564,7 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1565 IXGBE_RXDCTL_RLPML_EN); 1564 IXGBE_RXDCTL_RLPML_EN);
1566 } 1565 }
1567 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl); 1566 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1568 ixgbevf_rx_desc_queue_enable(adapter, i); 1567 ixgbevf_rx_desc_queue_enable(adapter, adapter->rx_ring[i]);
1569 } 1568 }
1570 1569
1571 ixgbevf_configure_msix(adapter); 1570 ixgbevf_configure_msix(adapter);
@@ -1686,7 +1685,7 @@ static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
1686 int i; 1685 int i;
1687 1686
1688 for (i = 0; i < adapter->num_rx_queues; i++) 1687 for (i = 0; i < adapter->num_rx_queues; i++)
1689 ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]); 1688 ixgbevf_clean_rx_ring(adapter, adapter->rx_ring[i]);
1690} 1689}
1691 1690
1692/** 1691/**
@@ -1698,7 +1697,7 @@ static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
1698 int i; 1697 int i;
1699 1698
1700 for (i = 0; i < adapter->num_tx_queues; i++) 1699 for (i = 0; i < adapter->num_tx_queues; i++)
1701 ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]); 1700 ixgbevf_clean_tx_ring(adapter, adapter->tx_ring[i]);
1702} 1701}
1703 1702
1704void ixgbevf_down(struct ixgbevf_adapter *adapter) 1703void ixgbevf_down(struct ixgbevf_adapter *adapter)
@@ -1713,7 +1712,7 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
1713 1712
1714 /* disable all enabled rx queues */ 1713 /* disable all enabled rx queues */
1715 for (i = 0; i < adapter->num_rx_queues; i++) 1714 for (i = 0; i < adapter->num_rx_queues; i++)
1716 ixgbevf_disable_rx_queue(adapter, &adapter->rx_ring[i]); 1715 ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
1717 1716
1718 netif_tx_disable(netdev); 1717 netif_tx_disable(netdev);
1719 1718
@@ -1734,7 +1733,7 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
1734 1733
1735 /* disable transmits in the hardware now that interrupts are off */ 1734 /* disable transmits in the hardware now that interrupts are off */
1736 for (i = 0; i < adapter->num_tx_queues; i++) { 1735 for (i = 0; i < adapter->num_tx_queues; i++) {
1737 j = adapter->tx_ring[i].reg_idx; 1736 j = adapter->tx_ring[i]->reg_idx;
1738 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); 1737 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1739 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), 1738 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j),
1740 (txdctl & ~IXGBE_TXDCTL_ENABLE)); 1739 (txdctl & ~IXGBE_TXDCTL_ENABLE));
@@ -1875,40 +1874,50 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
1875 **/ 1874 **/
1876static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter) 1875static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
1877{ 1876{
1878 int i; 1877 struct ixgbevf_ring *ring;
1878 int rx = 0, tx = 0;
1879 1879
1880 adapter->tx_ring = kcalloc(adapter->num_tx_queues, 1880 for (; tx < adapter->num_tx_queues; tx++) {
1881 sizeof(struct ixgbevf_ring), GFP_KERNEL); 1881 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1882 if (!adapter->tx_ring) 1882 if (!ring)
1883 goto err_tx_ring_allocation; 1883 goto err_allocation;
1884 1884
1885 adapter->rx_ring = kcalloc(adapter->num_rx_queues, 1885 ring->dev = &adapter->pdev->dev;
1886 sizeof(struct ixgbevf_ring), GFP_KERNEL); 1886 ring->netdev = adapter->netdev;
1887 if (!adapter->rx_ring) 1887 ring->count = adapter->tx_ring_count;
1888 goto err_rx_ring_allocation; 1888 ring->queue_index = tx;
1889 ring->reg_idx = tx;
1889 1890
1890 for (i = 0; i < adapter->num_tx_queues; i++) { 1891 adapter->tx_ring[tx] = ring;
1891 adapter->tx_ring[i].count = adapter->tx_ring_count;
1892 adapter->tx_ring[i].queue_index = i;
1893 /* reg_idx may be remapped later by DCB config */
1894 adapter->tx_ring[i].reg_idx = i;
1895 adapter->tx_ring[i].dev = &adapter->pdev->dev;
1896 adapter->tx_ring[i].netdev = adapter->netdev;
1897 } 1892 }
1898 1893
1899 for (i = 0; i < adapter->num_rx_queues; i++) { 1894 for (; rx < adapter->num_rx_queues; rx++) {
1900 adapter->rx_ring[i].count = adapter->rx_ring_count; 1895 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1901 adapter->rx_ring[i].queue_index = i; 1896 if (!ring)
1902 adapter->rx_ring[i].reg_idx = i; 1897 goto err_allocation;
1903 adapter->rx_ring[i].dev = &adapter->pdev->dev; 1898
1904 adapter->rx_ring[i].netdev = adapter->netdev; 1899 ring->dev = &adapter->pdev->dev;
1900 ring->netdev = adapter->netdev;
1901
1902 ring->count = adapter->rx_ring_count;
1903 ring->queue_index = rx;
1904 ring->reg_idx = rx;
1905
1906 adapter->rx_ring[rx] = ring;
1905 } 1907 }
1906 1908
1907 return 0; 1909 return 0;
1908 1910
1909err_rx_ring_allocation: 1911err_allocation:
1910 kfree(adapter->tx_ring); 1912 while (tx) {
1911err_tx_ring_allocation: 1913 kfree(adapter->tx_ring[--tx]);
1914 adapter->tx_ring[tx] = NULL;
1915 }
1916
1917 while (rx) {
1918 kfree(adapter->rx_ring[--rx]);
1919 adapter->rx_ring[rx] = NULL;
1920 }
1912 return -ENOMEM; 1921 return -ENOMEM;
1913} 1922}
1914 1923
@@ -2099,6 +2108,17 @@ err_set_interrupt:
2099 **/ 2108 **/
2100static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter) 2109static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
2101{ 2110{
2111 int i;
2112
2113 for (i = 0; i < adapter->num_tx_queues; i++) {
2114 kfree(adapter->tx_ring[i]);
2115 adapter->tx_ring[i] = NULL;
2116 }
2117 for (i = 0; i < adapter->num_rx_queues; i++) {
2118 kfree(adapter->rx_ring[i]);
2119 adapter->rx_ring[i] = NULL;
2120 }
2121
2102 adapter->num_tx_queues = 0; 2122 adapter->num_tx_queues = 0;
2103 adapter->num_rx_queues = 0; 2123 adapter->num_rx_queues = 0;
2104 2124
@@ -2229,11 +2249,11 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2229 2249
2230 for (i = 0; i < adapter->num_rx_queues; i++) { 2250 for (i = 0; i < adapter->num_rx_queues; i++) {
2231 adapter->hw_csum_rx_error += 2251 adapter->hw_csum_rx_error +=
2232 adapter->rx_ring[i].hw_csum_rx_error; 2252 adapter->rx_ring[i]->hw_csum_rx_error;
2233 adapter->hw_csum_rx_good += 2253 adapter->hw_csum_rx_good +=
2234 adapter->rx_ring[i].hw_csum_rx_good; 2254 adapter->rx_ring[i]->hw_csum_rx_good;
2235 adapter->rx_ring[i].hw_csum_rx_error = 0; 2255 adapter->rx_ring[i]->hw_csum_rx_error = 0;
2236 adapter->rx_ring[i].hw_csum_rx_good = 0; 2256 adapter->rx_ring[i]->hw_csum_rx_good = 0;
2237 } 2257 }
2238} 2258}
2239 2259
@@ -2413,10 +2433,8 @@ static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
2413 int i; 2433 int i;
2414 2434
2415 for (i = 0; i < adapter->num_tx_queues; i++) 2435 for (i = 0; i < adapter->num_tx_queues; i++)
2416 if (adapter->tx_ring[i].desc) 2436 if (adapter->tx_ring[i]->desc)
2417 ixgbevf_free_tx_resources(adapter, 2437 ixgbevf_free_tx_resources(adapter, adapter->tx_ring[i]);
2418 &adapter->tx_ring[i]);
2419
2420} 2438}
2421 2439
2422/** 2440/**
@@ -2471,7 +2489,7 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
2471 int i, err = 0; 2489 int i, err = 0;
2472 2490
2473 for (i = 0; i < adapter->num_tx_queues; i++) { 2491 for (i = 0; i < adapter->num_tx_queues; i++) {
2474 err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]); 2492 err = ixgbevf_setup_tx_resources(adapter, adapter->tx_ring[i]);
2475 if (!err) 2493 if (!err)
2476 continue; 2494 continue;
2477 hw_dbg(&adapter->hw, 2495 hw_dbg(&adapter->hw,
@@ -2533,7 +2551,7 @@ static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
2533 int i, err = 0; 2551 int i, err = 0;
2534 2552
2535 for (i = 0; i < adapter->num_rx_queues; i++) { 2553 for (i = 0; i < adapter->num_rx_queues; i++) {
2536 err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]); 2554 err = ixgbevf_setup_rx_resources(adapter, adapter->rx_ring[i]);
2537 if (!err) 2555 if (!err)
2538 continue; 2556 continue;
2539 hw_dbg(&adapter->hw, 2557 hw_dbg(&adapter->hw,
@@ -2577,9 +2595,8 @@ static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
2577 int i; 2595 int i;
2578 2596
2579 for (i = 0; i < adapter->num_rx_queues; i++) 2597 for (i = 0; i < adapter->num_rx_queues; i++)
2580 if (adapter->rx_ring[i].desc) 2598 if (adapter->rx_ring[i]->desc)
2581 ixgbevf_free_rx_resources(adapter, 2599 ixgbevf_free_rx_resources(adapter, adapter->rx_ring[i]);
2582 &adapter->rx_ring[i]);
2583} 2600}
2584 2601
2585/** 2602/**
@@ -3069,7 +3086,7 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3069 return NETDEV_TX_OK; 3086 return NETDEV_TX_OK;
3070 } 3087 }
3071 3088
3072 tx_ring = &adapter->tx_ring[r_idx]; 3089 tx_ring = adapter->tx_ring[r_idx];
3073 3090
3074 /* 3091 /*
3075 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, 3092 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
@@ -3282,7 +3299,7 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
3282 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc; 3299 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3283 3300
3284 for (i = 0; i < adapter->num_rx_queues; i++) { 3301 for (i = 0; i < adapter->num_rx_queues; i++) {
3285 ring = &adapter->rx_ring[i]; 3302 ring = adapter->rx_ring[i];
3286 do { 3303 do {
3287 start = u64_stats_fetch_begin_bh(&ring->syncp); 3304 start = u64_stats_fetch_begin_bh(&ring->syncp);
3288 bytes = ring->total_bytes; 3305 bytes = ring->total_bytes;
@@ -3293,7 +3310,7 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
3293 } 3310 }
3294 3311
3295 for (i = 0; i < adapter->num_tx_queues; i++) { 3312 for (i = 0; i < adapter->num_tx_queues; i++) {
3296 ring = &adapter->tx_ring[i]; 3313 ring = adapter->tx_ring[i];
3297 do { 3314 do {
3298 start = u64_stats_fetch_begin_bh(&ring->syncp); 3315 start = u64_stats_fetch_begin_bh(&ring->syncp);
3299 bytes = ring->total_bytes; 3316 bytes = ring->total_bytes;
@@ -3528,9 +3545,6 @@ static void ixgbevf_remove(struct pci_dev *pdev)
3528 3545
3529 hw_dbg(&adapter->hw, "Remove complete\n"); 3546 hw_dbg(&adapter->hw, "Remove complete\n");
3530 3547
3531 kfree(adapter->tx_ring);
3532 kfree(adapter->rx_ring);
3533
3534 free_netdev(netdev); 3548 free_netdev(netdev);
3535 3549
3536 pci_disable_device(pdev); 3550 pci_disable_device(pdev);