aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgbe
diff options
context:
space:
mode:
authorPJ Waskiewicz <peter.p.waskiewicz.jr@intel.com>2010-02-03 09:19:12 -0500
committerDavid S. Miller <davem@davemloft.net>2010-02-03 22:48:35 -0500
commit4a0b9ca015bae64df7d97c9e0a1d33159b36e69f (patch)
tree5289dc2752eaeec08282a94008ff76c980a3f645 /drivers/net/ixgbe
parent1a6c14a2c7c313c584f26730e67f062f474bb744 (diff)
ixgbe: Make descriptor ring allocations NUMA-aware
This patch allocates the ring structures themselves on each NUMA node along with the buffer_info structures. This way we don't allocate the entire ring memory on a single node in one big block, thus reducing NUMA node memory crosstalk. Signed-off-by: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ixgbe')
-rw-r--r--drivers/net/ixgbe/ixgbe.h5
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c71
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c4
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c276
4 files changed, 189 insertions, 167 deletions
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 33b79e812b4d..bffbe0d52d33 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -175,6 +175,7 @@ struct ixgbe_ring {
175 175
176 struct ixgbe_queue_stats stats; 176 struct ixgbe_queue_stats stats;
177 unsigned long reinit_state; 177 unsigned long reinit_state;
178 int numa_node;
178 u64 rsc_count; /* stat for coalesced packets */ 179 u64 rsc_count; /* stat for coalesced packets */
179 u64 rsc_flush; /* stats for flushed packets */ 180 u64 rsc_flush; /* stats for flushed packets */
180 u32 restart_queue; /* track tx queue restarts */ 181 u32 restart_queue; /* track tx queue restarts */
@@ -293,7 +294,7 @@ struct ixgbe_adapter {
293 u16 eitr_high; 294 u16 eitr_high;
294 295
295 /* TX */ 296 /* TX */
296 struct ixgbe_ring *tx_ring ____cacheline_aligned_in_smp; /* One per active queue */ 297 struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
297 int num_tx_queues; 298 int num_tx_queues;
298 u32 tx_timeout_count; 299 u32 tx_timeout_count;
299 bool detect_tx_hung; 300 bool detect_tx_hung;
@@ -302,7 +303,7 @@ struct ixgbe_adapter {
302 u64 lsc_int; 303 u64 lsc_int;
303 304
304 /* RX */ 305 /* RX */
305 struct ixgbe_ring *rx_ring ____cacheline_aligned_in_smp; /* One per active queue */ 306 struct ixgbe_ring *rx_ring[MAX_RX_QUEUES] ____cacheline_aligned_in_smp;
306 int num_rx_queues; 307 int num_rx_queues;
307 int num_rx_pools; /* == num_rx_queues in 82598 */ 308 int num_rx_pools; /* == num_rx_queues in 82598 */
308 int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */ 309 int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 1525c86cbccf..07a9410c08d4 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -834,8 +834,8 @@ static void ixgbe_get_ringparam(struct net_device *netdev,
834 struct ethtool_ringparam *ring) 834 struct ethtool_ringparam *ring)
835{ 835{
836 struct ixgbe_adapter *adapter = netdev_priv(netdev); 836 struct ixgbe_adapter *adapter = netdev_priv(netdev);
837 struct ixgbe_ring *tx_ring = adapter->tx_ring; 837 struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
838 struct ixgbe_ring *rx_ring = adapter->rx_ring; 838 struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
839 839
840 ring->rx_max_pending = IXGBE_MAX_RXD; 840 ring->rx_max_pending = IXGBE_MAX_RXD;
841 ring->tx_max_pending = IXGBE_MAX_TXD; 841 ring->tx_max_pending = IXGBE_MAX_TXD;
@@ -867,8 +867,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
867 new_tx_count = min(new_tx_count, (u32)IXGBE_MAX_TXD); 867 new_tx_count = min(new_tx_count, (u32)IXGBE_MAX_TXD);
868 new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); 868 new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
869 869
870 if ((new_tx_count == adapter->tx_ring->count) && 870 if ((new_tx_count == adapter->tx_ring[0]->count) &&
871 (new_rx_count == adapter->rx_ring->count)) { 871 (new_rx_count == adapter->rx_ring[0]->count)) {
872 /* nothing to do */ 872 /* nothing to do */
873 return 0; 873 return 0;
874 } 874 }
@@ -878,25 +878,24 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
878 878
879 if (!netif_running(adapter->netdev)) { 879 if (!netif_running(adapter->netdev)) {
880 for (i = 0; i < adapter->num_tx_queues; i++) 880 for (i = 0; i < adapter->num_tx_queues; i++)
881 adapter->tx_ring[i].count = new_tx_count; 881 adapter->tx_ring[i]->count = new_tx_count;
882 for (i = 0; i < adapter->num_rx_queues; i++) 882 for (i = 0; i < adapter->num_rx_queues; i++)
883 adapter->rx_ring[i].count = new_rx_count; 883 adapter->rx_ring[i]->count = new_rx_count;
884 adapter->tx_ring_count = new_tx_count; 884 adapter->tx_ring_count = new_tx_count;
885 adapter->rx_ring_count = new_rx_count; 885 adapter->rx_ring_count = new_rx_count;
886 goto err_setup; 886 goto clear_reset;
887 } 887 }
888 888
889 temp_tx_ring = kcalloc(adapter->num_tx_queues, 889 temp_tx_ring = vmalloc(adapter->num_tx_queues * sizeof(struct ixgbe_ring));
890 sizeof(struct ixgbe_ring), GFP_KERNEL);
891 if (!temp_tx_ring) { 890 if (!temp_tx_ring) {
892 err = -ENOMEM; 891 err = -ENOMEM;
893 goto err_setup; 892 goto clear_reset;
894 } 893 }
895 894
896 if (new_tx_count != adapter->tx_ring_count) { 895 if (new_tx_count != adapter->tx_ring_count) {
897 memcpy(temp_tx_ring, adapter->tx_ring,
898 adapter->num_tx_queues * sizeof(struct ixgbe_ring));
899 for (i = 0; i < adapter->num_tx_queues; i++) { 896 for (i = 0; i < adapter->num_tx_queues; i++) {
897 memcpy(&temp_tx_ring[i], adapter->tx_ring[i],
898 sizeof(struct ixgbe_ring));
900 temp_tx_ring[i].count = new_tx_count; 899 temp_tx_ring[i].count = new_tx_count;
901 err = ixgbe_setup_tx_resources(adapter, 900 err = ixgbe_setup_tx_resources(adapter,
902 &temp_tx_ring[i]); 901 &temp_tx_ring[i]);
@@ -904,28 +903,24 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
904 while (i) { 903 while (i) {
905 i--; 904 i--;
906 ixgbe_free_tx_resources(adapter, 905 ixgbe_free_tx_resources(adapter,
907 &temp_tx_ring[i]); 906 &temp_tx_ring[i]);
908 } 907 }
909 goto err_setup; 908 goto clear_reset;
910 } 909 }
911 } 910 }
912 need_update = true; 911 need_update = true;
913 } 912 }
914 913
915 temp_rx_ring = kcalloc(adapter->num_rx_queues, 914 temp_rx_ring = vmalloc(adapter->num_rx_queues * sizeof(struct ixgbe_ring));
916 sizeof(struct ixgbe_ring), GFP_KERNEL); 915 if (!temp_rx_ring) {
917 if ((!temp_rx_ring) && (need_update)) {
918 for (i = 0; i < adapter->num_tx_queues; i++)
919 ixgbe_free_tx_resources(adapter, &temp_tx_ring[i]);
920 kfree(temp_tx_ring);
921 err = -ENOMEM; 916 err = -ENOMEM;
922 goto err_setup; 917 goto err_setup;
923 } 918 }
924 919
925 if (new_rx_count != adapter->rx_ring_count) { 920 if (new_rx_count != adapter->rx_ring_count) {
926 memcpy(temp_rx_ring, adapter->rx_ring,
927 adapter->num_rx_queues * sizeof(struct ixgbe_ring));
928 for (i = 0; i < adapter->num_rx_queues; i++) { 921 for (i = 0; i < adapter->num_rx_queues; i++) {
922 memcpy(&temp_rx_ring[i], adapter->rx_ring[i],
923 sizeof(struct ixgbe_ring));
929 temp_rx_ring[i].count = new_rx_count; 924 temp_rx_ring[i].count = new_rx_count;
930 err = ixgbe_setup_rx_resources(adapter, 925 err = ixgbe_setup_rx_resources(adapter,
931 &temp_rx_ring[i]); 926 &temp_rx_ring[i]);
@@ -947,22 +942,32 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
947 942
948 /* tx */ 943 /* tx */
949 if (new_tx_count != adapter->tx_ring_count) { 944 if (new_tx_count != adapter->tx_ring_count) {
950 kfree(adapter->tx_ring); 945 for (i = 0; i < adapter->num_tx_queues; i++) {
951 adapter->tx_ring = temp_tx_ring; 946 ixgbe_free_tx_resources(adapter,
952 temp_tx_ring = NULL; 947 adapter->tx_ring[i]);
948 memcpy(adapter->tx_ring[i], &temp_tx_ring[i],
949 sizeof(struct ixgbe_ring));
950 }
953 adapter->tx_ring_count = new_tx_count; 951 adapter->tx_ring_count = new_tx_count;
954 } 952 }
955 953
956 /* rx */ 954 /* rx */
957 if (new_rx_count != adapter->rx_ring_count) { 955 if (new_rx_count != adapter->rx_ring_count) {
958 kfree(adapter->rx_ring); 956 for (i = 0; i < adapter->num_rx_queues; i++) {
959 adapter->rx_ring = temp_rx_ring; 957 ixgbe_free_rx_resources(adapter,
960 temp_rx_ring = NULL; 958 adapter->rx_ring[i]);
959 memcpy(adapter->rx_ring[i], &temp_rx_ring[i],
960 sizeof(struct ixgbe_ring));
961 }
961 adapter->rx_ring_count = new_rx_count; 962 adapter->rx_ring_count = new_rx_count;
962 } 963 }
963 ixgbe_up(adapter); 964 ixgbe_up(adapter);
964 } 965 }
966
967 vfree(temp_rx_ring);
965err_setup: 968err_setup:
969 vfree(temp_tx_ring);
970clear_reset:
966 clear_bit(__IXGBE_RESETTING, &adapter->state); 971 clear_bit(__IXGBE_RESETTING, &adapter->state);
967 return err; 972 return err;
968} 973}
@@ -1007,13 +1012,13 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1007 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1012 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1008 } 1013 }
1009 for (j = 0; j < adapter->num_tx_queues; j++) { 1014 for (j = 0; j < adapter->num_tx_queues; j++) {
1010 queue_stat = (u64 *)&adapter->tx_ring[j].stats; 1015 queue_stat = (u64 *)&adapter->tx_ring[j]->stats;
1011 for (k = 0; k < stat_count; k++) 1016 for (k = 0; k < stat_count; k++)
1012 data[i + k] = queue_stat[k]; 1017 data[i + k] = queue_stat[k];
1013 i += k; 1018 i += k;
1014 } 1019 }
1015 for (j = 0; j < adapter->num_rx_queues; j++) { 1020 for (j = 0; j < adapter->num_rx_queues; j++) {
1016 queue_stat = (u64 *)&adapter->rx_ring[j].stats; 1021 queue_stat = (u64 *)&adapter->rx_ring[j]->stats;
1017 for (k = 0; k < stat_count; k++) 1022 for (k = 0; k < stat_count; k++)
1018 data[i + k] = queue_stat[k]; 1023 data[i + k] = queue_stat[k];
1019 i += k; 1024 i += k;
@@ -1627,7 +1632,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1627 reg_data |= IXGBE_RXDCTL_ENABLE; 1632 reg_data |= IXGBE_RXDCTL_ENABLE;
1628 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(0), reg_data); 1633 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(0), reg_data);
1629 if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 1634 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1630 int j = adapter->rx_ring[0].reg_idx; 1635 int j = adapter->rx_ring[0]->reg_idx;
1631 u32 k; 1636 u32 k;
1632 for (k = 0; k < 10; k++) { 1637 for (k = 0; k < 10; k++) {
1633 if (IXGBE_READ_REG(&adapter->hw, 1638 if (IXGBE_READ_REG(&adapter->hw,
@@ -2011,7 +2016,7 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
2011{ 2016{
2012 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2017 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2013 2018
2014 ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0].work_limit; 2019 ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0]->work_limit;
2015 2020
2016 /* only valid if in constant ITR mode */ 2021 /* only valid if in constant ITR mode */
2017 switch (adapter->rx_itr_setting) { 2022 switch (adapter->rx_itr_setting) {
@@ -2064,7 +2069,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
2064 return -EINVAL; 2069 return -EINVAL;
2065 2070
2066 if (ec->tx_max_coalesced_frames_irq) 2071 if (ec->tx_max_coalesced_frames_irq)
2067 adapter->tx_ring[0].work_limit = ec->tx_max_coalesced_frames_irq; 2072 adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq;
2068 2073
2069 if (ec->rx_coalesce_usecs > 1) { 2074 if (ec->rx_coalesce_usecs > 1) {
2070 /* check the limits */ 2075 /* check the limits */
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index e9a20c88c155..4123dec0dfb7 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -525,7 +525,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
525 for (i = 0; i < IXGBE_FCRETA_SIZE; i++) { 525 for (i = 0; i < IXGBE_FCRETA_SIZE; i++) {
526 fcoe_i = f->mask + i % f->indices; 526 fcoe_i = f->mask + i % f->indices;
527 fcoe_i &= IXGBE_FCRETA_ENTRY_MASK; 527 fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
528 fcoe_q = adapter->rx_ring[fcoe_i].reg_idx; 528 fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
529 IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q); 529 IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q);
530 } 530 }
531 IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA); 531 IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA);
@@ -533,7 +533,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
533 } else { 533 } else {
534 /* Use single rx queue for FCoE */ 534 /* Use single rx queue for FCoE */
535 fcoe_i = f->mask; 535 fcoe_i = f->mask;
536 fcoe_q = adapter->rx_ring[fcoe_i].reg_idx; 536 fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
537 IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, 0); 537 IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, 0);
538 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 538 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE),
539 IXGBE_ETQS_QUEUE_EN | 539 IXGBE_ETQS_QUEUE_EN |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 87e1aa5ba788..2a3c8315e357 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -494,7 +494,7 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
494{ 494{
495 u32 rxctrl; 495 u32 rxctrl;
496 int cpu = get_cpu(); 496 int cpu = get_cpu();
497 int q = rx_ring - adapter->rx_ring; 497 int q = rx_ring->reg_idx;
498 498
499 if (rx_ring->cpu != cpu) { 499 if (rx_ring->cpu != cpu) {
500 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q)); 500 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
@@ -522,7 +522,7 @@ static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
522{ 522{
523 u32 txctrl; 523 u32 txctrl;
524 int cpu = get_cpu(); 524 int cpu = get_cpu();
525 int q = tx_ring - adapter->tx_ring; 525 int q = tx_ring->reg_idx;
526 struct ixgbe_hw *hw = &adapter->hw; 526 struct ixgbe_hw *hw = &adapter->hw;
527 527
528 if (tx_ring->cpu != cpu) { 528 if (tx_ring->cpu != cpu) {
@@ -556,12 +556,12 @@ static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
556 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); 556 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
557 557
558 for (i = 0; i < adapter->num_tx_queues; i++) { 558 for (i = 0; i < adapter->num_tx_queues; i++) {
559 adapter->tx_ring[i].cpu = -1; 559 adapter->tx_ring[i]->cpu = -1;
560 ixgbe_update_tx_dca(adapter, &adapter->tx_ring[i]); 560 ixgbe_update_tx_dca(adapter, adapter->tx_ring[i]);
561 } 561 }
562 for (i = 0; i < adapter->num_rx_queues; i++) { 562 for (i = 0; i < adapter->num_rx_queues; i++) {
563 adapter->rx_ring[i].cpu = -1; 563 adapter->rx_ring[i]->cpu = -1;
564 ixgbe_update_rx_dca(adapter, &adapter->rx_ring[i]); 564 ixgbe_update_rx_dca(adapter, adapter->rx_ring[i]);
565 } 565 }
566} 566}
567 567
@@ -1032,7 +1032,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1032 adapter->num_rx_queues); 1032 adapter->num_rx_queues);
1033 1033
1034 for (i = 0; i < q_vector->rxr_count; i++) { 1034 for (i = 0; i < q_vector->rxr_count; i++) {
1035 j = adapter->rx_ring[r_idx].reg_idx; 1035 j = adapter->rx_ring[r_idx]->reg_idx;
1036 ixgbe_set_ivar(adapter, 0, j, v_idx); 1036 ixgbe_set_ivar(adapter, 0, j, v_idx);
1037 r_idx = find_next_bit(q_vector->rxr_idx, 1037 r_idx = find_next_bit(q_vector->rxr_idx,
1038 adapter->num_rx_queues, 1038 adapter->num_rx_queues,
@@ -1042,7 +1042,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1042 adapter->num_tx_queues); 1042 adapter->num_tx_queues);
1043 1043
1044 for (i = 0; i < q_vector->txr_count; i++) { 1044 for (i = 0; i < q_vector->txr_count; i++) {
1045 j = adapter->tx_ring[r_idx].reg_idx; 1045 j = adapter->tx_ring[r_idx]->reg_idx;
1046 ixgbe_set_ivar(adapter, 1, j, v_idx); 1046 ixgbe_set_ivar(adapter, 1, j, v_idx);
1047 r_idx = find_next_bit(q_vector->txr_idx, 1047 r_idx = find_next_bit(q_vector->txr_idx,
1048 adapter->num_tx_queues, 1048 adapter->num_tx_queues,
@@ -1182,7 +1182,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
1182 1182
1183 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 1183 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1184 for (i = 0; i < q_vector->txr_count; i++) { 1184 for (i = 0; i < q_vector->txr_count; i++) {
1185 tx_ring = &(adapter->tx_ring[r_idx]); 1185 tx_ring = adapter->tx_ring[r_idx];
1186 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, 1186 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
1187 q_vector->tx_itr, 1187 q_vector->tx_itr,
1188 tx_ring->total_packets, 1188 tx_ring->total_packets,
@@ -1197,7 +1197,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
1197 1197
1198 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1198 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1199 for (i = 0; i < q_vector->rxr_count; i++) { 1199 for (i = 0; i < q_vector->rxr_count; i++) {
1200 rx_ring = &(adapter->rx_ring[r_idx]); 1200 rx_ring = adapter->rx_ring[r_idx];
1201 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, 1201 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
1202 q_vector->rx_itr, 1202 q_vector->rx_itr,
1203 rx_ring->total_packets, 1203 rx_ring->total_packets,
@@ -1319,7 +1319,7 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
1319 netif_tx_stop_all_queues(netdev); 1319 netif_tx_stop_all_queues(netdev);
1320 for (i = 0; i < adapter->num_tx_queues; i++) { 1320 for (i = 0; i < adapter->num_tx_queues; i++) {
1321 struct ixgbe_ring *tx_ring = 1321 struct ixgbe_ring *tx_ring =
1322 &adapter->tx_ring[i]; 1322 adapter->tx_ring[i];
1323 if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE, 1323 if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE,
1324 &tx_ring->reinit_state)) 1324 &tx_ring->reinit_state))
1325 schedule_work(&adapter->fdir_reinit_task); 1325 schedule_work(&adapter->fdir_reinit_task);
@@ -1378,7 +1378,7 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
1378 1378
1379 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 1379 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1380 for (i = 0; i < q_vector->txr_count; i++) { 1380 for (i = 0; i < q_vector->txr_count; i++) {
1381 tx_ring = &(adapter->tx_ring[r_idx]); 1381 tx_ring = adapter->tx_ring[r_idx];
1382 tx_ring->total_bytes = 0; 1382 tx_ring->total_bytes = 0;
1383 tx_ring->total_packets = 0; 1383 tx_ring->total_packets = 0;
1384 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 1384 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
@@ -1406,7 +1406,7 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
1406 1406
1407 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1407 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1408 for (i = 0; i < q_vector->rxr_count; i++) { 1408 for (i = 0; i < q_vector->rxr_count; i++) {
1409 rx_ring = &(adapter->rx_ring[r_idx]); 1409 rx_ring = adapter->rx_ring[r_idx];
1410 rx_ring->total_bytes = 0; 1410 rx_ring->total_bytes = 0;
1411 rx_ring->total_packets = 0; 1411 rx_ring->total_packets = 0;
1412 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 1412 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
@@ -1436,7 +1436,7 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
1436 1436
1437 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 1437 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1438 for (i = 0; i < q_vector->txr_count; i++) { 1438 for (i = 0; i < q_vector->txr_count; i++) {
1439 ring = &(adapter->tx_ring[r_idx]); 1439 ring = adapter->tx_ring[r_idx];
1440 ring->total_bytes = 0; 1440 ring->total_bytes = 0;
1441 ring->total_packets = 0; 1441 ring->total_packets = 0;
1442 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 1442 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
@@ -1445,7 +1445,7 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
1445 1445
1446 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1446 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1447 for (i = 0; i < q_vector->rxr_count; i++) { 1447 for (i = 0; i < q_vector->rxr_count; i++) {
1448 ring = &(adapter->rx_ring[r_idx]); 1448 ring = adapter->rx_ring[r_idx];
1449 ring->total_bytes = 0; 1449 ring->total_bytes = 0;
1450 ring->total_packets = 0; 1450 ring->total_packets = 0;
1451 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 1451 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
@@ -1476,7 +1476,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
1476 long r_idx; 1476 long r_idx;
1477 1477
1478 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1478 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1479 rx_ring = &(adapter->rx_ring[r_idx]); 1479 rx_ring = adapter->rx_ring[r_idx];
1480#ifdef CONFIG_IXGBE_DCA 1480#ifdef CONFIG_IXGBE_DCA
1481 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 1481 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1482 ixgbe_update_rx_dca(adapter, rx_ring); 1482 ixgbe_update_rx_dca(adapter, rx_ring);
@@ -1517,7 +1517,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
1517 1517
1518 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 1518 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1519 for (i = 0; i < q_vector->txr_count; i++) { 1519 for (i = 0; i < q_vector->txr_count; i++) {
1520 ring = &(adapter->tx_ring[r_idx]); 1520 ring = adapter->tx_ring[r_idx];
1521#ifdef CONFIG_IXGBE_DCA 1521#ifdef CONFIG_IXGBE_DCA
1522 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 1522 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1523 ixgbe_update_tx_dca(adapter, ring); 1523 ixgbe_update_tx_dca(adapter, ring);
@@ -1533,7 +1533,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
1533 budget = max(budget, 1); 1533 budget = max(budget, 1);
1534 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1534 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1535 for (i = 0; i < q_vector->rxr_count; i++) { 1535 for (i = 0; i < q_vector->rxr_count; i++) {
1536 ring = &(adapter->rx_ring[r_idx]); 1536 ring = adapter->rx_ring[r_idx];
1537#ifdef CONFIG_IXGBE_DCA 1537#ifdef CONFIG_IXGBE_DCA
1538 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 1538 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1539 ixgbe_update_rx_dca(adapter, ring); 1539 ixgbe_update_rx_dca(adapter, ring);
@@ -1544,7 +1544,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
1544 } 1544 }
1545 1545
1546 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1546 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1547 ring = &(adapter->rx_ring[r_idx]); 1547 ring = adapter->rx_ring[r_idx];
1548 /* If all Rx work done, exit the polling mode */ 1548 /* If all Rx work done, exit the polling mode */
1549 if (work_done < budget) { 1549 if (work_done < budget) {
1550 napi_complete(napi); 1550 napi_complete(napi);
@@ -1577,7 +1577,7 @@ static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
1577 long r_idx; 1577 long r_idx;
1578 1578
1579 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 1579 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1580 tx_ring = &(adapter->tx_ring[r_idx]); 1580 tx_ring = adapter->tx_ring[r_idx];
1581#ifdef CONFIG_IXGBE_DCA 1581#ifdef CONFIG_IXGBE_DCA
1582 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 1582 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1583 ixgbe_update_tx_dca(adapter, tx_ring); 1583 ixgbe_update_tx_dca(adapter, tx_ring);
@@ -1762,8 +1762,8 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
1762 struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; 1762 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
1763 u8 current_itr; 1763 u8 current_itr;
1764 u32 new_itr = q_vector->eitr; 1764 u32 new_itr = q_vector->eitr;
1765 struct ixgbe_ring *rx_ring = &adapter->rx_ring[0]; 1765 struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
1766 struct ixgbe_ring *tx_ring = &adapter->tx_ring[0]; 1766 struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
1767 1767
1768 q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr, 1768 q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
1769 q_vector->tx_itr, 1769 q_vector->tx_itr,
@@ -1875,10 +1875,10 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
1875 ixgbe_check_fan_failure(adapter, eicr); 1875 ixgbe_check_fan_failure(adapter, eicr);
1876 1876
1877 if (napi_schedule_prep(&(q_vector->napi))) { 1877 if (napi_schedule_prep(&(q_vector->napi))) {
1878 adapter->tx_ring[0].total_packets = 0; 1878 adapter->tx_ring[0]->total_packets = 0;
1879 adapter->tx_ring[0].total_bytes = 0; 1879 adapter->tx_ring[0]->total_bytes = 0;
1880 adapter->rx_ring[0].total_packets = 0; 1880 adapter->rx_ring[0]->total_packets = 0;
1881 adapter->rx_ring[0].total_bytes = 0; 1881 adapter->rx_ring[0]->total_bytes = 0;
1882 /* would disable interrupts here but EIAM disabled it */ 1882 /* would disable interrupts here but EIAM disabled it */
1883 __napi_schedule(&(q_vector->napi)); 1883 __napi_schedule(&(q_vector->napi));
1884 } 1884 }
@@ -2010,7 +2010,7 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
2010 2010
2011 /* Setup the HW Tx Head and Tail descriptor pointers */ 2011 /* Setup the HW Tx Head and Tail descriptor pointers */
2012 for (i = 0; i < adapter->num_tx_queues; i++) { 2012 for (i = 0; i < adapter->num_tx_queues; i++) {
2013 struct ixgbe_ring *ring = &adapter->tx_ring[i]; 2013 struct ixgbe_ring *ring = adapter->tx_ring[i];
2014 j = ring->reg_idx; 2014 j = ring->reg_idx;
2015 tdba = ring->dma; 2015 tdba = ring->dma;
2016 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc); 2016 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
@@ -2020,8 +2020,8 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
2020 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen); 2020 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen);
2021 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); 2021 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
2022 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); 2022 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
2023 adapter->tx_ring[i].head = IXGBE_TDH(j); 2023 adapter->tx_ring[i]->head = IXGBE_TDH(j);
2024 adapter->tx_ring[i].tail = IXGBE_TDT(j); 2024 adapter->tx_ring[i]->tail = IXGBE_TDT(j);
2025 /* 2025 /*
2026 * Disable Tx Head Writeback RO bit, since this hoses 2026 * Disable Tx Head Writeback RO bit, since this hoses
2027 * bookkeeping if things aren't delivered in order. 2027 * bookkeeping if things aren't delivered in order.
@@ -2168,7 +2168,7 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, int index)
2168 u32 rscctrl; 2168 u32 rscctrl;
2169 int rx_buf_len; 2169 int rx_buf_len;
2170 2170
2171 rx_ring = &adapter->rx_ring[index]; 2171 rx_ring = adapter->rx_ring[index];
2172 j = rx_ring->reg_idx; 2172 j = rx_ring->reg_idx;
2173 rx_buf_len = rx_ring->rx_buf_len; 2173 rx_buf_len = rx_ring->rx_buf_len;
2174 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j)); 2174 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j));
@@ -2266,7 +2266,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2266#endif 2266#endif
2267 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); 2267 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
2268 2268
2269 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc); 2269 rdlen = adapter->rx_ring[0]->count * sizeof(union ixgbe_adv_rx_desc);
2270 /* disable receives while setting up the descriptors */ 2270 /* disable receives while setting up the descriptors */
2271 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 2271 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2272 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); 2272 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
@@ -2276,7 +2276,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2276 * the Base and Length of the Rx Descriptor Ring 2276 * the Base and Length of the Rx Descriptor Ring
2277 */ 2277 */
2278 for (i = 0; i < adapter->num_rx_queues; i++) { 2278 for (i = 0; i < adapter->num_rx_queues; i++) {
2279 rx_ring = &adapter->rx_ring[i]; 2279 rx_ring = adapter->rx_ring[i];
2280 rdba = rx_ring->dma; 2280 rdba = rx_ring->dma;
2281 j = rx_ring->reg_idx; 2281 j = rx_ring->reg_idx;
2282 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_BIT_MASK(32))); 2282 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_BIT_MASK(32)));
@@ -2483,7 +2483,7 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev,
2483 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 2483 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
2484 for (i = 0; i < adapter->num_rx_queues; i++) { 2484 for (i = 0; i < adapter->num_rx_queues; i++) {
2485 u32 ctrl; 2485 u32 ctrl;
2486 j = adapter->rx_ring[i].reg_idx; 2486 j = adapter->rx_ring[i]->reg_idx;
2487 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(j)); 2487 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(j));
2488 ctrl |= IXGBE_RXDCTL_VME; 2488 ctrl |= IXGBE_RXDCTL_VME;
2489 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(j), ctrl); 2489 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(j), ctrl);
@@ -2646,7 +2646,7 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
2646 ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg); 2646 ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg);
2647 2647
2648 for (i = 0; i < adapter->num_tx_queues; i++) { 2648 for (i = 0; i < adapter->num_tx_queues; i++) {
2649 j = adapter->tx_ring[i].reg_idx; 2649 j = adapter->tx_ring[i]->reg_idx;
2650 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); 2650 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
2651 /* PThresh workaround for Tx hang with DFP enabled. */ 2651 /* PThresh workaround for Tx hang with DFP enabled. */
2652 txdctl |= 32; 2652 txdctl |= 32;
@@ -2663,7 +2663,7 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
2663 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; 2663 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2664 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 2664 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2665 for (i = 0; i < adapter->num_rx_queues; i++) { 2665 for (i = 0; i < adapter->num_rx_queues; i++) {
2666 j = adapter->rx_ring[i].reg_idx; 2666 j = adapter->rx_ring[i]->reg_idx;
2667 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); 2667 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
2668 vlnctrl |= IXGBE_RXDCTL_VME; 2668 vlnctrl |= IXGBE_RXDCTL_VME;
2669 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); 2669 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
@@ -2703,7 +2703,7 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
2703#endif /* IXGBE_FCOE */ 2703#endif /* IXGBE_FCOE */
2704 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { 2704 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
2705 for (i = 0; i < adapter->num_tx_queues; i++) 2705 for (i = 0; i < adapter->num_tx_queues; i++)
2706 adapter->tx_ring[i].atr_sample_rate = 2706 adapter->tx_ring[i]->atr_sample_rate =
2707 adapter->atr_sample_rate; 2707 adapter->atr_sample_rate;
2708 ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc); 2708 ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc);
2709 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { 2709 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
@@ -2713,8 +2713,8 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
2713 ixgbe_configure_tx(adapter); 2713 ixgbe_configure_tx(adapter);
2714 ixgbe_configure_rx(adapter); 2714 ixgbe_configure_rx(adapter);
2715 for (i = 0; i < adapter->num_rx_queues; i++) 2715 for (i = 0; i < adapter->num_rx_queues; i++)
2716 ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i], 2716 ixgbe_alloc_rx_buffers(adapter, adapter->rx_ring[i],
2717 (adapter->rx_ring[i].count - 1)); 2717 (adapter->rx_ring[i]->count - 1));
2718} 2718}
2719 2719
2720static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) 2720static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
@@ -2797,7 +2797,7 @@ link_cfg_out:
2797static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, 2797static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
2798 int rxr) 2798 int rxr)
2799{ 2799{
2800 int j = adapter->rx_ring[rxr].reg_idx; 2800 int j = adapter->rx_ring[rxr]->reg_idx;
2801 int k; 2801 int k;
2802 2802
2803 for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) { 2803 for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
@@ -2811,8 +2811,8 @@ static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
2811 DPRINTK(DRV, ERR, "RXDCTL.ENABLE on Rx queue %d " 2811 DPRINTK(DRV, ERR, "RXDCTL.ENABLE on Rx queue %d "
2812 "not set within the polling period\n", rxr); 2812 "not set within the polling period\n", rxr);
2813 } 2813 }
2814 ixgbe_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr], 2814 ixgbe_release_rx_desc(&adapter->hw, adapter->rx_ring[rxr],
2815 (adapter->rx_ring[rxr].count - 1)); 2815 (adapter->rx_ring[rxr]->count - 1));
2816} 2816}
2817 2817
2818static int ixgbe_up_complete(struct ixgbe_adapter *adapter) 2818static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
@@ -2899,7 +2899,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
2899 } 2899 }
2900 2900
2901 for (i = 0; i < adapter->num_tx_queues; i++) { 2901 for (i = 0; i < adapter->num_tx_queues; i++) {
2902 j = adapter->tx_ring[i].reg_idx; 2902 j = adapter->tx_ring[i]->reg_idx;
2903 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); 2903 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
2904 /* enable WTHRESH=8 descriptors, to encourage burst writeback */ 2904 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
2905 txdctl |= (8 << 16); 2905 txdctl |= (8 << 16);
@@ -2913,7 +2913,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
2913 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); 2913 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2914 } 2914 }
2915 for (i = 0; i < adapter->num_tx_queues; i++) { 2915 for (i = 0; i < adapter->num_tx_queues; i++) {
2916 j = adapter->tx_ring[i].reg_idx; 2916 j = adapter->tx_ring[i]->reg_idx;
2917 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); 2917 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
2918 txdctl |= IXGBE_TXDCTL_ENABLE; 2918 txdctl |= IXGBE_TXDCTL_ENABLE;
2919 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl); 2919 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
@@ -2932,7 +2932,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
2932 } 2932 }
2933 2933
2934 for (i = 0; i < num_rx_rings; i++) { 2934 for (i = 0; i < num_rx_rings; i++) {
2935 j = adapter->rx_ring[i].reg_idx; 2935 j = adapter->rx_ring[i]->reg_idx;
2936 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); 2936 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
2937 /* enable PTHRESH=32 descriptors (half the internal cache) 2937 /* enable PTHRESH=32 descriptors (half the internal cache)
2938 * and HTHRESH=0 descriptors (to minimize latency on fetch), 2938 * and HTHRESH=0 descriptors (to minimize latency on fetch),
@@ -3006,7 +3006,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
3006 3006
3007 for (i = 0; i < adapter->num_tx_queues; i++) 3007 for (i = 0; i < adapter->num_tx_queues; i++)
3008 set_bit(__IXGBE_FDIR_INIT_DONE, 3008 set_bit(__IXGBE_FDIR_INIT_DONE,
3009 &(adapter->tx_ring[i].reinit_state)); 3009 &(adapter->tx_ring[i]->reinit_state));
3010 3010
3011 /* enable transmits */ 3011 /* enable transmits */
3012 netif_tx_start_all_queues(netdev); 3012 netif_tx_start_all_queues(netdev);
@@ -3177,7 +3177,7 @@ static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
3177 int i; 3177 int i;
3178 3178
3179 for (i = 0; i < adapter->num_rx_queues; i++) 3179 for (i = 0; i < adapter->num_rx_queues; i++)
3180 ixgbe_clean_rx_ring(adapter, &adapter->rx_ring[i]); 3180 ixgbe_clean_rx_ring(adapter, adapter->rx_ring[i]);
3181} 3181}
3182 3182
3183/** 3183/**
@@ -3189,7 +3189,7 @@ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
3189 int i; 3189 int i;
3190 3190
3191 for (i = 0; i < adapter->num_tx_queues; i++) 3191 for (i = 0; i < adapter->num_tx_queues; i++)
3192 ixgbe_clean_tx_ring(adapter, &adapter->tx_ring[i]); 3192 ixgbe_clean_tx_ring(adapter, adapter->tx_ring[i]);
3193} 3193}
3194 3194
3195void ixgbe_down(struct ixgbe_adapter *adapter) 3195void ixgbe_down(struct ixgbe_adapter *adapter)
@@ -3240,7 +3240,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
3240 3240
3241 /* disable transmits in the hardware now that interrupts are off */ 3241 /* disable transmits in the hardware now that interrupts are off */
3242 for (i = 0; i < adapter->num_tx_queues; i++) { 3242 for (i = 0; i < adapter->num_tx_queues; i++) {
3243 j = adapter->tx_ring[i].reg_idx; 3243 j = adapter->tx_ring[i]->reg_idx;
3244 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); 3244 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
3245 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), 3245 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j),
3246 (txdctl & ~IXGBE_TXDCTL_ENABLE)); 3246 (txdctl & ~IXGBE_TXDCTL_ENABLE));
@@ -3280,13 +3280,13 @@ static int ixgbe_poll(struct napi_struct *napi, int budget)
3280 3280
3281#ifdef CONFIG_IXGBE_DCA 3281#ifdef CONFIG_IXGBE_DCA
3282 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { 3282 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
3283 ixgbe_update_tx_dca(adapter, adapter->tx_ring); 3283 ixgbe_update_tx_dca(adapter, adapter->tx_ring[0]);
3284 ixgbe_update_rx_dca(adapter, adapter->rx_ring); 3284 ixgbe_update_rx_dca(adapter, adapter->rx_ring[0]);
3285 } 3285 }
3286#endif 3286#endif
3287 3287
3288 tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring); 3288 tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]);
3289 ixgbe_clean_rx_irq(q_vector, adapter->rx_ring, &work_done, budget); 3289 ixgbe_clean_rx_irq(q_vector, adapter->rx_ring[0], &work_done, budget);
3290 3290
3291 if (!tx_clean_complete) 3291 if (!tx_clean_complete)
3292 work_done = budget; 3292 work_done = budget;
@@ -3574,9 +3574,9 @@ static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
3574 3574
3575 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { 3575 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
3576 for (i = 0; i < adapter->num_rx_queues; i++) 3576 for (i = 0; i < adapter->num_rx_queues; i++)
3577 adapter->rx_ring[i].reg_idx = i; 3577 adapter->rx_ring[i]->reg_idx = i;
3578 for (i = 0; i < adapter->num_tx_queues; i++) 3578 for (i = 0; i < adapter->num_tx_queues; i++)
3579 adapter->tx_ring[i].reg_idx = i; 3579 adapter->tx_ring[i]->reg_idx = i;
3580 ret = true; 3580 ret = true;
3581 } else { 3581 } else {
3582 ret = false; 3582 ret = false;
@@ -3603,8 +3603,8 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
3603 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 3603 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3604 /* the number of queues is assumed to be symmetric */ 3604 /* the number of queues is assumed to be symmetric */
3605 for (i = 0; i < dcb_i; i++) { 3605 for (i = 0; i < dcb_i; i++) {
3606 adapter->rx_ring[i].reg_idx = i << 3; 3606 adapter->rx_ring[i]->reg_idx = i << 3;
3607 adapter->tx_ring[i].reg_idx = i << 2; 3607 adapter->tx_ring[i]->reg_idx = i << 2;
3608 } 3608 }
3609 ret = true; 3609 ret = true;
3610 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 3610 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
@@ -3622,18 +3622,18 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
3622 * Rx TC0-TC7 are offset by 16 queues each 3622 * Rx TC0-TC7 are offset by 16 queues each
3623 */ 3623 */
3624 for (i = 0; i < 3; i++) { 3624 for (i = 0; i < 3; i++) {
3625 adapter->tx_ring[i].reg_idx = i << 5; 3625 adapter->tx_ring[i]->reg_idx = i << 5;
3626 adapter->rx_ring[i].reg_idx = i << 4; 3626 adapter->rx_ring[i]->reg_idx = i << 4;
3627 } 3627 }
3628 for ( ; i < 5; i++) { 3628 for ( ; i < 5; i++) {
3629 adapter->tx_ring[i].reg_idx = 3629 adapter->tx_ring[i]->reg_idx =
3630 ((i + 2) << 4); 3630 ((i + 2) << 4);
3631 adapter->rx_ring[i].reg_idx = i << 4; 3631 adapter->rx_ring[i]->reg_idx = i << 4;
3632 } 3632 }
3633 for ( ; i < dcb_i; i++) { 3633 for ( ; i < dcb_i; i++) {
3634 adapter->tx_ring[i].reg_idx = 3634 adapter->tx_ring[i]->reg_idx =
3635 ((i + 8) << 3); 3635 ((i + 8) << 3);
3636 adapter->rx_ring[i].reg_idx = i << 4; 3636 adapter->rx_ring[i]->reg_idx = i << 4;
3637 } 3637 }
3638 3638
3639 ret = true; 3639 ret = true;
@@ -3646,12 +3646,12 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
3646 * 3646 *
3647 * Rx TC0-TC3 are offset by 32 queues each 3647 * Rx TC0-TC3 are offset by 32 queues each
3648 */ 3648 */
3649 adapter->tx_ring[0].reg_idx = 0; 3649 adapter->tx_ring[0]->reg_idx = 0;
3650 adapter->tx_ring[1].reg_idx = 64; 3650 adapter->tx_ring[1]->reg_idx = 64;
3651 adapter->tx_ring[2].reg_idx = 96; 3651 adapter->tx_ring[2]->reg_idx = 96;
3652 adapter->tx_ring[3].reg_idx = 112; 3652 adapter->tx_ring[3]->reg_idx = 112;
3653 for (i = 0 ; i < dcb_i; i++) 3653 for (i = 0 ; i < dcb_i; i++)
3654 adapter->rx_ring[i].reg_idx = i << 5; 3654 adapter->rx_ring[i]->reg_idx = i << 5;
3655 3655
3656 ret = true; 3656 ret = true;
3657 } else { 3657 } else {
@@ -3684,9 +3684,9 @@ static bool inline ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
3684 ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || 3684 ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
3685 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))) { 3685 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))) {
3686 for (i = 0; i < adapter->num_rx_queues; i++) 3686 for (i = 0; i < adapter->num_rx_queues; i++)
3687 adapter->rx_ring[i].reg_idx = i; 3687 adapter->rx_ring[i]->reg_idx = i;
3688 for (i = 0; i < adapter->num_tx_queues; i++) 3688 for (i = 0; i < adapter->num_tx_queues; i++)
3689 adapter->tx_ring[i].reg_idx = i; 3689 adapter->tx_ring[i]->reg_idx = i;
3690 ret = true; 3690 ret = true;
3691 } 3691 }
3692 3692
@@ -3714,8 +3714,8 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
3714 3714
3715 ixgbe_cache_ring_dcb(adapter); 3715 ixgbe_cache_ring_dcb(adapter);
3716 /* find out queues in TC for FCoE */ 3716 /* find out queues in TC for FCoE */
3717 fcoe_rx_i = adapter->rx_ring[fcoe->tc].reg_idx + 1; 3717 fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1;
3718 fcoe_tx_i = adapter->tx_ring[fcoe->tc].reg_idx + 1; 3718 fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1;
3719 /* 3719 /*
3720 * In 82599, the number of Tx queues for each traffic 3720 * In 82599, the number of Tx queues for each traffic
3721 * class for both 8-TC and 4-TC modes are: 3721 * class for both 8-TC and 4-TC modes are:
@@ -3746,8 +3746,8 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
3746 fcoe_tx_i = f->mask; 3746 fcoe_tx_i = f->mask;
3747 } 3747 }
3748 for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) { 3748 for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
3749 adapter->rx_ring[f->mask + i].reg_idx = fcoe_rx_i; 3749 adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
3750 adapter->tx_ring[f->mask + i].reg_idx = fcoe_tx_i; 3750 adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
3751 } 3751 }
3752 ret = true; 3752 ret = true;
3753 } 3753 }
@@ -3765,8 +3765,8 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
3765 */ 3765 */
3766static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) 3766static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
3767{ 3767{
3768 adapter->rx_ring[0].reg_idx = adapter->num_vfs * 2; 3768 adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2;
3769 adapter->tx_ring[0].reg_idx = adapter->num_vfs * 2; 3769 adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2;
3770 if (adapter->num_vfs) 3770 if (adapter->num_vfs)
3771 return true; 3771 return true;
3772 else 3772 else
@@ -3787,8 +3787,8 @@ static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
3787static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) 3787static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
3788{ 3788{
3789 /* start with default case */ 3789 /* start with default case */
3790 adapter->rx_ring[0].reg_idx = 0; 3790 adapter->rx_ring[0]->reg_idx = 0;
3791 adapter->tx_ring[0].reg_idx = 0; 3791 adapter->tx_ring[0]->reg_idx = 0;
3792 3792
3793 if (ixgbe_cache_ring_sriov(adapter)) 3793 if (ixgbe_cache_ring_sriov(adapter))
3794 return; 3794 return;
@@ -3821,33 +3821,63 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
3821static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter) 3821static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
3822{ 3822{
3823 int i; 3823 int i;
3824 3824 int orig_node = adapter->node;
3825 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
3826 sizeof(struct ixgbe_ring), GFP_KERNEL);
3827 if (!adapter->tx_ring)
3828 goto err_tx_ring_allocation;
3829
3830 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
3831 sizeof(struct ixgbe_ring), GFP_KERNEL);
3832 if (!adapter->rx_ring)
3833 goto err_rx_ring_allocation;
3834 3825
3835 for (i = 0; i < adapter->num_tx_queues; i++) { 3826 for (i = 0; i < adapter->num_tx_queues; i++) {
3836 adapter->tx_ring[i].count = adapter->tx_ring_count; 3827 struct ixgbe_ring *ring = adapter->tx_ring[i];
3837 adapter->tx_ring[i].queue_index = i; 3828 if (orig_node == -1) {
3829 int cur_node = next_online_node(adapter->node);
3830 if (cur_node == MAX_NUMNODES)
3831 cur_node = first_online_node;
3832 adapter->node = cur_node;
3833 }
3834 ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
3835 adapter->node);
3836 if (!ring)
3837 ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
3838 if (!ring)
3839 goto err_tx_ring_allocation;
3840 ring->count = adapter->tx_ring_count;
3841 ring->queue_index = i;
3842 ring->numa_node = adapter->node;
3843
3844 adapter->tx_ring[i] = ring;
3838 } 3845 }
3839 3846
3847 /* Restore the adapter's original node */
3848 adapter->node = orig_node;
3849
3840 for (i = 0; i < adapter->num_rx_queues; i++) { 3850 for (i = 0; i < adapter->num_rx_queues; i++) {
3841 adapter->rx_ring[i].count = adapter->rx_ring_count; 3851 struct ixgbe_ring *ring = adapter->rx_ring[i];
3842 adapter->rx_ring[i].queue_index = i; 3852 if (orig_node == -1) {
3853 int cur_node = next_online_node(adapter->node);
3854 if (cur_node == MAX_NUMNODES)
3855 cur_node = first_online_node;
3856 adapter->node = cur_node;
3857 }
3858 ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
3859 adapter->node);
3860 if (!ring)
3861 ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
3862 if (!ring)
3863 goto err_rx_ring_allocation;
3864 ring->count = adapter->rx_ring_count;
3865 ring->queue_index = i;
3866 ring->numa_node = adapter->node;
3867
3868 adapter->rx_ring[i] = ring;
3843 } 3869 }
3844 3870
3871 /* Restore the adapter's original node */
3872 adapter->node = orig_node;
3873
3845 ixgbe_cache_ring_register(adapter); 3874 ixgbe_cache_ring_register(adapter);
3846 3875
3847 return 0; 3876 return 0;
3848 3877
3849err_rx_ring_allocation: 3878err_rx_ring_allocation:
3850 kfree(adapter->tx_ring); 3879 for (i = 0; i < adapter->num_tx_queues; i++)
3880 kfree(adapter->tx_ring[i]);
3851err_tx_ring_allocation: 3881err_tx_ring_allocation:
3852 return -ENOMEM; 3882 return -ENOMEM;
3853} 3883}
@@ -4077,10 +4107,16 @@ err_set_interrupt:
4077 **/ 4107 **/
4078void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) 4108void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
4079{ 4109{
4080 kfree(adapter->tx_ring); 4110 int i;
4081 kfree(adapter->rx_ring); 4111
4082 adapter->tx_ring = NULL; 4112 for (i = 0; i < adapter->num_tx_queues; i++) {
4083 adapter->rx_ring = NULL; 4113 kfree(adapter->tx_ring[i]);
4114 adapter->tx_ring[i] = NULL;
4115 }
4116 for (i = 0; i < adapter->num_rx_queues; i++) {
4117 kfree(adapter->rx_ring[i]);
4118 adapter->rx_ring[i] = NULL;
4119 }
4084 4120
4085 ixgbe_free_q_vectors(adapter); 4121 ixgbe_free_q_vectors(adapter);
4086 ixgbe_reset_interrupt_capability(adapter); 4122 ixgbe_reset_interrupt_capability(adapter);
@@ -4272,7 +4308,7 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
4272 int size; 4308 int size;
4273 4309
4274 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; 4310 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
4275 tx_ring->tx_buffer_info = vmalloc_node(size, adapter->node); 4311 tx_ring->tx_buffer_info = vmalloc_node(size, tx_ring->numa_node);
4276 if (!tx_ring->tx_buffer_info) 4312 if (!tx_ring->tx_buffer_info)
4277 tx_ring->tx_buffer_info = vmalloc(size); 4313 tx_ring->tx_buffer_info = vmalloc(size);
4278 if (!tx_ring->tx_buffer_info) 4314 if (!tx_ring->tx_buffer_info)
@@ -4314,25 +4350,15 @@ err:
4314static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) 4350static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
4315{ 4351{
4316 int i, err = 0; 4352 int i, err = 0;
4317 int orig_node = adapter->node;
4318 4353
4319 for (i = 0; i < adapter->num_tx_queues; i++) { 4354 for (i = 0; i < adapter->num_tx_queues; i++) {
4320 if (orig_node == -1) { 4355 err = ixgbe_setup_tx_resources(adapter, adapter->tx_ring[i]);
4321 int cur_node = next_online_node(adapter->node);
4322 if (cur_node == MAX_NUMNODES)
4323 cur_node = first_online_node;
4324 adapter->node = cur_node;
4325 }
4326 err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]);
4327 if (!err) 4356 if (!err)
4328 continue; 4357 continue;
4329 DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i); 4358 DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i);
4330 break; 4359 break;
4331 } 4360 }
4332 4361
4333 /* reset the node back to its starting value */
4334 adapter->node = orig_node;
4335
4336 return err; 4362 return err;
4337} 4363}
4338 4364
@@ -4396,25 +4422,15 @@ alloc_failed:
4396static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) 4422static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
4397{ 4423{
4398 int i, err = 0; 4424 int i, err = 0;
4399 int orig_node = adapter->node;
4400 4425
4401 for (i = 0; i < adapter->num_rx_queues; i++) { 4426 for (i = 0; i < adapter->num_rx_queues; i++) {
4402 if (orig_node == -1) { 4427 err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]);
4403 int cur_node = next_online_node(adapter->node);
4404 if (cur_node == MAX_NUMNODES)
4405 cur_node = first_online_node;
4406 adapter->node = cur_node;
4407 }
4408 err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]);
4409 if (!err) 4428 if (!err)
4410 continue; 4429 continue;
4411 DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i); 4430 DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i);
4412 break; 4431 break;
4413 } 4432 }
4414 4433
4415 /* reset the node back to its starting value */
4416 adapter->node = orig_node;
4417
4418 return err; 4434 return err;
4419} 4435}
4420 4436
@@ -4451,8 +4467,8 @@ static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
4451 int i; 4467 int i;
4452 4468
4453 for (i = 0; i < adapter->num_tx_queues; i++) 4469 for (i = 0; i < adapter->num_tx_queues; i++)
4454 if (adapter->tx_ring[i].desc) 4470 if (adapter->tx_ring[i]->desc)
4455 ixgbe_free_tx_resources(adapter, &adapter->tx_ring[i]); 4471 ixgbe_free_tx_resources(adapter, adapter->tx_ring[i]);
4456} 4472}
4457 4473
4458/** 4474/**
@@ -4488,8 +4504,8 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
4488 int i; 4504 int i;
4489 4505
4490 for (i = 0; i < adapter->num_rx_queues; i++) 4506 for (i = 0; i < adapter->num_rx_queues; i++)
4491 if (adapter->rx_ring[i].desc) 4507 if (adapter->rx_ring[i]->desc)
4492 ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]); 4508 ixgbe_free_rx_resources(adapter, adapter->rx_ring[i]);
4493} 4509}
4494 4510
4495/** 4511/**
@@ -4766,8 +4782,8 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
4766 adapter->hw_rx_no_dma_resources += 4782 adapter->hw_rx_no_dma_resources +=
4767 IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 4783 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
4768 for (i = 0; i < adapter->num_rx_queues; i++) { 4784 for (i = 0; i < adapter->num_rx_queues; i++) {
4769 rsc_count += adapter->rx_ring[i].rsc_count; 4785 rsc_count += adapter->rx_ring[i]->rsc_count;
4770 rsc_flush += adapter->rx_ring[i].rsc_flush; 4786 rsc_flush += adapter->rx_ring[i]->rsc_flush;
4771 } 4787 }
4772 adapter->rsc_total_count = rsc_count; 4788 adapter->rsc_total_count = rsc_count;
4773 adapter->rsc_total_flush = rsc_flush; 4789 adapter->rsc_total_flush = rsc_flush;
@@ -4775,11 +4791,11 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
4775 4791
4776 /* gather some stats to the adapter struct that are per queue */ 4792 /* gather some stats to the adapter struct that are per queue */
4777 for (i = 0; i < adapter->num_tx_queues; i++) 4793 for (i = 0; i < adapter->num_tx_queues; i++)
4778 restart_queue += adapter->tx_ring[i].restart_queue; 4794 restart_queue += adapter->tx_ring[i]->restart_queue;
4779 adapter->restart_queue = restart_queue; 4795 adapter->restart_queue = restart_queue;
4780 4796
4781 for (i = 0; i < adapter->num_rx_queues; i++) 4797 for (i = 0; i < adapter->num_rx_queues; i++)
4782 non_eop_descs += adapter->rx_ring[i].non_eop_descs; 4798 non_eop_descs += adapter->rx_ring[i]->non_eop_descs;
4783 adapter->non_eop_descs = non_eop_descs; 4799 adapter->non_eop_descs = non_eop_descs;
4784 4800
4785 adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); 4801 adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
@@ -5018,7 +5034,7 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work)
5018 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { 5034 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
5019 for (i = 0; i < adapter->num_tx_queues; i++) 5035 for (i = 0; i < adapter->num_tx_queues; i++)
5020 set_bit(__IXGBE_FDIR_INIT_DONE, 5036 set_bit(__IXGBE_FDIR_INIT_DONE,
5021 &(adapter->tx_ring[i].reinit_state)); 5037 &(adapter->tx_ring[i]->reinit_state));
5022 } else { 5038 } else {
5023 DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, " 5039 DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, "
5024 "ignored adding FDIR ATR filters \n"); 5040 "ignored adding FDIR ATR filters \n");
@@ -5120,7 +5136,7 @@ static void ixgbe_watchdog_task(struct work_struct *work)
5120 5136
5121 if (!netif_carrier_ok(netdev)) { 5137 if (!netif_carrier_ok(netdev)) {
5122 for (i = 0; i < adapter->num_tx_queues; i++) { 5138 for (i = 0; i < adapter->num_tx_queues; i++) {
5123 tx_ring = &adapter->tx_ring[i]; 5139 tx_ring = adapter->tx_ring[i];
5124 if (tx_ring->next_to_use != tx_ring->next_to_clean) { 5140 if (tx_ring->next_to_use != tx_ring->next_to_clean) {
5125 some_tx_pending = 1; 5141 some_tx_pending = 1;
5126 break; 5142 break;
@@ -5622,7 +5638,7 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
5622 } 5638 }
5623 } 5639 }
5624 5640
5625 tx_ring = &adapter->tx_ring[skb->queue_mapping]; 5641 tx_ring = adapter->tx_ring[skb->queue_mapping];
5626 5642
5627 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && 5643 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
5628 (skb->protocol == htons(ETH_P_FCOE))) { 5644 (skb->protocol == htons(ETH_P_FCOE))) {