aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2012-09-12 03:09:51 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2012-10-23 00:17:19 -0400
commit1f4702aa25f4cdb80999fbe392ff9af5c0fb7969 (patch)
tree78bf5d5a5771108dc3453e4b6807a5c938a49c72 /drivers/net/ethernet
parentde52a12c29a1037b397d36866fd205bbc4347455 (diff)
ixgbe: Fix possible memory leak in ixgbe_set_ringparam
We were not correctly freeing the temporary rings on error in ixgbe_set_ring_param. In order to correct this I am unwinding a number of changes that were made in order to get things back to the original working form with modification for the current ring layouts. This approach has multiple advantages including a smaller memory footprint, and the fact that the interface is stopped while we are allocating the rings meaning that there is less potential for some sort of memory corruption on the ring. The only disadvantage I see with this approach is that on a Rx allocation failure we will report an error and only update the Tx rings. However the adapter should be fully functional in this state and the likelihood of such an error is very low. In addition it is not unreasonable to expect the user to need to recheck the ring configuration should they experience an error setting the ring sizes. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Tested-by: Phil Schmitt <phillip.j.schmitt@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c102
1 files changed, 50 insertions, 52 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 56b20d17d0e4..872c3374ddfa 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -887,24 +887,23 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
887 struct ethtool_ringparam *ring) 887 struct ethtool_ringparam *ring)
888{ 888{
889 struct ixgbe_adapter *adapter = netdev_priv(netdev); 889 struct ixgbe_adapter *adapter = netdev_priv(netdev);
890 struct ixgbe_ring *temp_tx_ring, *temp_rx_ring; 890 struct ixgbe_ring *temp_ring;
891 int i, err = 0; 891 int i, err = 0;
892 u32 new_rx_count, new_tx_count; 892 u32 new_rx_count, new_tx_count;
893 bool need_update = false;
894 893
895 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 894 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
896 return -EINVAL; 895 return -EINVAL;
897 896
898 new_rx_count = max_t(u32, ring->rx_pending, IXGBE_MIN_RXD); 897 new_tx_count = clamp_t(u32, ring->tx_pending,
899 new_rx_count = min_t(u32, new_rx_count, IXGBE_MAX_RXD); 898 IXGBE_MIN_TXD, IXGBE_MAX_TXD);
900 new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
901
902 new_tx_count = max_t(u32, ring->tx_pending, IXGBE_MIN_TXD);
903 new_tx_count = min_t(u32, new_tx_count, IXGBE_MAX_TXD);
904 new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); 899 new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
905 900
906 if ((new_tx_count == adapter->tx_ring[0]->count) && 901 new_rx_count = clamp_t(u32, ring->rx_pending,
907 (new_rx_count == adapter->rx_ring[0]->count)) { 902 IXGBE_MIN_RXD, IXGBE_MAX_RXD);
903 new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
904
905 if ((new_tx_count == adapter->tx_ring_count) &&
906 (new_rx_count == adapter->rx_ring_count)) {
908 /* nothing to do */ 907 /* nothing to do */
909 return 0; 908 return 0;
910 } 909 }
@@ -922,81 +921,80 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
922 goto clear_reset; 921 goto clear_reset;
923 } 922 }
924 923
925 temp_tx_ring = vmalloc(adapter->num_tx_queues * sizeof(struct ixgbe_ring)); 924 /* allocate temporary buffer to store rings in */
926 if (!temp_tx_ring) { 925 i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues);
926 temp_ring = vmalloc(i * sizeof(struct ixgbe_ring));
927
928 if (!temp_ring) {
927 err = -ENOMEM; 929 err = -ENOMEM;
928 goto clear_reset; 930 goto clear_reset;
929 } 931 }
930 932
933 ixgbe_down(adapter);
934
935 /*
936 * Setup new Tx resources and free the old Tx resources in that order.
937 * We can then assign the new resources to the rings via a memcpy.
938 * The advantage to this approach is that we are guaranteed to still
939 * have resources even in the case of an allocation failure.
940 */
931 if (new_tx_count != adapter->tx_ring_count) { 941 if (new_tx_count != adapter->tx_ring_count) {
932 for (i = 0; i < adapter->num_tx_queues; i++) { 942 for (i = 0; i < adapter->num_tx_queues; i++) {
933 memcpy(&temp_tx_ring[i], adapter->tx_ring[i], 943 memcpy(&temp_ring[i], adapter->tx_ring[i],
934 sizeof(struct ixgbe_ring)); 944 sizeof(struct ixgbe_ring));
935 temp_tx_ring[i].count = new_tx_count; 945
936 err = ixgbe_setup_tx_resources(&temp_tx_ring[i]); 946 temp_ring[i].count = new_tx_count;
947 err = ixgbe_setup_tx_resources(&temp_ring[i]);
937 if (err) { 948 if (err) {
938 while (i) { 949 while (i) {
939 i--; 950 i--;
940 ixgbe_free_tx_resources(&temp_tx_ring[i]); 951 ixgbe_free_tx_resources(&temp_ring[i]);
941 } 952 }
942 goto clear_reset; 953 goto err_setup;
943 } 954 }
944 } 955 }
945 need_update = true;
946 }
947 956
948 temp_rx_ring = vmalloc(adapter->num_rx_queues * sizeof(struct ixgbe_ring)); 957 for (i = 0; i < adapter->num_tx_queues; i++) {
949 if (!temp_rx_ring) { 958 ixgbe_free_tx_resources(adapter->tx_ring[i]);
950 err = -ENOMEM; 959
951 goto err_setup; 960 memcpy(adapter->tx_ring[i], &temp_ring[i],
961 sizeof(struct ixgbe_ring));
962 }
963
964 adapter->tx_ring_count = new_tx_count;
952 } 965 }
953 966
967 /* Repeat the process for the Rx rings if needed */
954 if (new_rx_count != adapter->rx_ring_count) { 968 if (new_rx_count != adapter->rx_ring_count) {
955 for (i = 0; i < adapter->num_rx_queues; i++) { 969 for (i = 0; i < adapter->num_rx_queues; i++) {
956 memcpy(&temp_rx_ring[i], adapter->rx_ring[i], 970 memcpy(&temp_ring[i], adapter->rx_ring[i],
957 sizeof(struct ixgbe_ring)); 971 sizeof(struct ixgbe_ring));
958 temp_rx_ring[i].count = new_rx_count; 972
959 err = ixgbe_setup_rx_resources(&temp_rx_ring[i]); 973 temp_ring[i].count = new_rx_count;
974 err = ixgbe_setup_rx_resources(&temp_ring[i]);
960 if (err) { 975 if (err) {
961 while (i) { 976 while (i) {
962 i--; 977 i--;
963 ixgbe_free_rx_resources(&temp_rx_ring[i]); 978 ixgbe_free_rx_resources(&temp_ring[i]);
964 } 979 }
965 goto err_setup; 980 goto err_setup;
966 } 981 }
982
967 } 983 }
968 need_update = true;
969 }
970 984
971 /* if rings need to be updated, here's the place to do it in one shot */ 985 for (i = 0; i < adapter->num_rx_queues; i++) {
972 if (need_update) { 986 ixgbe_free_rx_resources(adapter->rx_ring[i]);
973 ixgbe_down(adapter);
974 987
975 /* tx */ 988 memcpy(adapter->rx_ring[i], &temp_ring[i],
976 if (new_tx_count != adapter->tx_ring_count) { 989 sizeof(struct ixgbe_ring));
977 for (i = 0; i < adapter->num_tx_queues; i++) {
978 ixgbe_free_tx_resources(adapter->tx_ring[i]);
979 memcpy(adapter->tx_ring[i], &temp_tx_ring[i],
980 sizeof(struct ixgbe_ring));
981 }
982 adapter->tx_ring_count = new_tx_count;
983 } 990 }
984 991
985 /* rx */ 992 adapter->rx_ring_count = new_rx_count;
986 if (new_rx_count != adapter->rx_ring_count) {
987 for (i = 0; i < adapter->num_rx_queues; i++) {
988 ixgbe_free_rx_resources(adapter->rx_ring[i]);
989 memcpy(adapter->rx_ring[i], &temp_rx_ring[i],
990 sizeof(struct ixgbe_ring));
991 }
992 adapter->rx_ring_count = new_rx_count;
993 }
994 ixgbe_up(adapter);
995 } 993 }
996 994
997 vfree(temp_rx_ring);
998err_setup: 995err_setup:
999 vfree(temp_tx_ring); 996 ixgbe_up(adapter);
997 vfree(temp_ring);
1000clear_reset: 998clear_reset:
1001 clear_bit(__IXGBE_RESETTING, &adapter->state); 999 clear_bit(__IXGBE_RESETTING, &adapter->state);
1002 return err; 1000 return err;