aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgbe/ixgbe_ethtool.c
diff options
context:
space:
mode:
authorPJ Waskiewicz <peter.p.waskiewicz.jr@intel.com>2010-02-03 09:19:12 -0500
committerDavid S. Miller <davem@davemloft.net>2010-02-03 22:48:35 -0500
commit4a0b9ca015bae64df7d97c9e0a1d33159b36e69f (patch)
tree5289dc2752eaeec08282a94008ff76c980a3f645 /drivers/net/ixgbe/ixgbe_ethtool.c
parent1a6c14a2c7c313c584f26730e67f062f474bb744 (diff)
ixgbe: Make descriptor ring allocations NUMA-aware
This patch allocates the ring structures themselves on each NUMA node along with the buffer_info structures. This way we don't allocate the entire ring memory on a single node in one big block, thus reducing NUMA node memory crosstalk. Signed-off-by: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ixgbe/ixgbe_ethtool.c')
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c71
1 files changed, 38 insertions, 33 deletions
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 1525c86cbccf..07a9410c08d4 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -834,8 +834,8 @@ static void ixgbe_get_ringparam(struct net_device *netdev,
834 struct ethtool_ringparam *ring) 834 struct ethtool_ringparam *ring)
835{ 835{
836 struct ixgbe_adapter *adapter = netdev_priv(netdev); 836 struct ixgbe_adapter *adapter = netdev_priv(netdev);
837 struct ixgbe_ring *tx_ring = adapter->tx_ring; 837 struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
838 struct ixgbe_ring *rx_ring = adapter->rx_ring; 838 struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
839 839
840 ring->rx_max_pending = IXGBE_MAX_RXD; 840 ring->rx_max_pending = IXGBE_MAX_RXD;
841 ring->tx_max_pending = IXGBE_MAX_TXD; 841 ring->tx_max_pending = IXGBE_MAX_TXD;
@@ -867,8 +867,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
867 new_tx_count = min(new_tx_count, (u32)IXGBE_MAX_TXD); 867 new_tx_count = min(new_tx_count, (u32)IXGBE_MAX_TXD);
868 new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); 868 new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
869 869
870 if ((new_tx_count == adapter->tx_ring->count) && 870 if ((new_tx_count == adapter->tx_ring[0]->count) &&
871 (new_rx_count == adapter->rx_ring->count)) { 871 (new_rx_count == adapter->rx_ring[0]->count)) {
872 /* nothing to do */ 872 /* nothing to do */
873 return 0; 873 return 0;
874 } 874 }
@@ -878,25 +878,24 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
878 878
879 if (!netif_running(adapter->netdev)) { 879 if (!netif_running(adapter->netdev)) {
880 for (i = 0; i < adapter->num_tx_queues; i++) 880 for (i = 0; i < adapter->num_tx_queues; i++)
881 adapter->tx_ring[i].count = new_tx_count; 881 adapter->tx_ring[i]->count = new_tx_count;
882 for (i = 0; i < adapter->num_rx_queues; i++) 882 for (i = 0; i < adapter->num_rx_queues; i++)
883 adapter->rx_ring[i].count = new_rx_count; 883 adapter->rx_ring[i]->count = new_rx_count;
884 adapter->tx_ring_count = new_tx_count; 884 adapter->tx_ring_count = new_tx_count;
885 adapter->rx_ring_count = new_rx_count; 885 adapter->rx_ring_count = new_rx_count;
886 goto err_setup; 886 goto clear_reset;
887 } 887 }
888 888
889 temp_tx_ring = kcalloc(adapter->num_tx_queues, 889 temp_tx_ring = vmalloc(adapter->num_tx_queues * sizeof(struct ixgbe_ring));
890 sizeof(struct ixgbe_ring), GFP_KERNEL);
891 if (!temp_tx_ring) { 890 if (!temp_tx_ring) {
892 err = -ENOMEM; 891 err = -ENOMEM;
893 goto err_setup; 892 goto clear_reset;
894 } 893 }
895 894
896 if (new_tx_count != adapter->tx_ring_count) { 895 if (new_tx_count != adapter->tx_ring_count) {
897 memcpy(temp_tx_ring, adapter->tx_ring,
898 adapter->num_tx_queues * sizeof(struct ixgbe_ring));
899 for (i = 0; i < adapter->num_tx_queues; i++) { 896 for (i = 0; i < adapter->num_tx_queues; i++) {
897 memcpy(&temp_tx_ring[i], adapter->tx_ring[i],
898 sizeof(struct ixgbe_ring));
900 temp_tx_ring[i].count = new_tx_count; 899 temp_tx_ring[i].count = new_tx_count;
901 err = ixgbe_setup_tx_resources(adapter, 900 err = ixgbe_setup_tx_resources(adapter,
902 &temp_tx_ring[i]); 901 &temp_tx_ring[i]);
@@ -904,28 +903,24 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
904 while (i) { 903 while (i) {
905 i--; 904 i--;
906 ixgbe_free_tx_resources(adapter, 905 ixgbe_free_tx_resources(adapter,
907 &temp_tx_ring[i]); 906 &temp_tx_ring[i]);
908 } 907 }
909 goto err_setup; 908 goto clear_reset;
910 } 909 }
911 } 910 }
912 need_update = true; 911 need_update = true;
913 } 912 }
914 913
915 temp_rx_ring = kcalloc(adapter->num_rx_queues, 914 temp_rx_ring = vmalloc(adapter->num_rx_queues * sizeof(struct ixgbe_ring));
916 sizeof(struct ixgbe_ring), GFP_KERNEL); 915 if (!temp_rx_ring) {
917 if ((!temp_rx_ring) && (need_update)) {
918 for (i = 0; i < adapter->num_tx_queues; i++)
919 ixgbe_free_tx_resources(adapter, &temp_tx_ring[i]);
920 kfree(temp_tx_ring);
921 err = -ENOMEM; 916 err = -ENOMEM;
922 goto err_setup; 917 goto err_setup;
923 } 918 }
924 919
925 if (new_rx_count != adapter->rx_ring_count) { 920 if (new_rx_count != adapter->rx_ring_count) {
926 memcpy(temp_rx_ring, adapter->rx_ring,
927 adapter->num_rx_queues * sizeof(struct ixgbe_ring));
928 for (i = 0; i < adapter->num_rx_queues; i++) { 921 for (i = 0; i < adapter->num_rx_queues; i++) {
922 memcpy(&temp_rx_ring[i], adapter->rx_ring[i],
923 sizeof(struct ixgbe_ring));
929 temp_rx_ring[i].count = new_rx_count; 924 temp_rx_ring[i].count = new_rx_count;
930 err = ixgbe_setup_rx_resources(adapter, 925 err = ixgbe_setup_rx_resources(adapter,
931 &temp_rx_ring[i]); 926 &temp_rx_ring[i]);
@@ -947,22 +942,32 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
947 942
948 /* tx */ 943 /* tx */
949 if (new_tx_count != adapter->tx_ring_count) { 944 if (new_tx_count != adapter->tx_ring_count) {
950 kfree(adapter->tx_ring); 945 for (i = 0; i < adapter->num_tx_queues; i++) {
951 adapter->tx_ring = temp_tx_ring; 946 ixgbe_free_tx_resources(adapter,
952 temp_tx_ring = NULL; 947 adapter->tx_ring[i]);
948 memcpy(adapter->tx_ring[i], &temp_tx_ring[i],
949 sizeof(struct ixgbe_ring));
950 }
953 adapter->tx_ring_count = new_tx_count; 951 adapter->tx_ring_count = new_tx_count;
954 } 952 }
955 953
956 /* rx */ 954 /* rx */
957 if (new_rx_count != adapter->rx_ring_count) { 955 if (new_rx_count != adapter->rx_ring_count) {
958 kfree(adapter->rx_ring); 956 for (i = 0; i < adapter->num_rx_queues; i++) {
959 adapter->rx_ring = temp_rx_ring; 957 ixgbe_free_rx_resources(adapter,
960 temp_rx_ring = NULL; 958 adapter->rx_ring[i]);
959 memcpy(adapter->rx_ring[i], &temp_rx_ring[i],
960 sizeof(struct ixgbe_ring));
961 }
961 adapter->rx_ring_count = new_rx_count; 962 adapter->rx_ring_count = new_rx_count;
962 } 963 }
963 ixgbe_up(adapter); 964 ixgbe_up(adapter);
964 } 965 }
966
967 vfree(temp_rx_ring);
965err_setup: 968err_setup:
969 vfree(temp_tx_ring);
970clear_reset:
966 clear_bit(__IXGBE_RESETTING, &adapter->state); 971 clear_bit(__IXGBE_RESETTING, &adapter->state);
967 return err; 972 return err;
968} 973}
@@ -1007,13 +1012,13 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1007 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1012 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1008 } 1013 }
1009 for (j = 0; j < adapter->num_tx_queues; j++) { 1014 for (j = 0; j < adapter->num_tx_queues; j++) {
1010 queue_stat = (u64 *)&adapter->tx_ring[j].stats; 1015 queue_stat = (u64 *)&adapter->tx_ring[j]->stats;
1011 for (k = 0; k < stat_count; k++) 1016 for (k = 0; k < stat_count; k++)
1012 data[i + k] = queue_stat[k]; 1017 data[i + k] = queue_stat[k];
1013 i += k; 1018 i += k;
1014 } 1019 }
1015 for (j = 0; j < adapter->num_rx_queues; j++) { 1020 for (j = 0; j < adapter->num_rx_queues; j++) {
1016 queue_stat = (u64 *)&adapter->rx_ring[j].stats; 1021 queue_stat = (u64 *)&adapter->rx_ring[j]->stats;
1017 for (k = 0; k < stat_count; k++) 1022 for (k = 0; k < stat_count; k++)
1018 data[i + k] = queue_stat[k]; 1023 data[i + k] = queue_stat[k];
1019 i += k; 1024 i += k;
@@ -1627,7 +1632,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1627 reg_data |= IXGBE_RXDCTL_ENABLE; 1632 reg_data |= IXGBE_RXDCTL_ENABLE;
1628 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(0), reg_data); 1633 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(0), reg_data);
1629 if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 1634 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1630 int j = adapter->rx_ring[0].reg_idx; 1635 int j = adapter->rx_ring[0]->reg_idx;
1631 u32 k; 1636 u32 k;
1632 for (k = 0; k < 10; k++) { 1637 for (k = 0; k < 10; k++) {
1633 if (IXGBE_READ_REG(&adapter->hw, 1638 if (IXGBE_READ_REG(&adapter->hw,
@@ -2011,7 +2016,7 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
2011{ 2016{
2012 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2017 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2013 2018
2014 ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0].work_limit; 2019 ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0]->work_limit;
2015 2020
2016 /* only valid if in constant ITR mode */ 2021 /* only valid if in constant ITR mode */
2017 switch (adapter->rx_itr_setting) { 2022 switch (adapter->rx_itr_setting) {
@@ -2064,7 +2069,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
2064 return -EINVAL; 2069 return -EINVAL;
2065 2070
2066 if (ec->tx_max_coalesced_frames_irq) 2071 if (ec->tx_max_coalesced_frames_irq)
2067 adapter->tx_ring[0].work_limit = ec->tx_max_coalesced_frames_irq; 2072 adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq;
2068 2073
2069 if (ec->rx_coalesce_usecs > 1) { 2074 if (ec->rx_coalesce_usecs > 1) {
2070 /* check the limits */ 2075 /* check the limits */