diff options
author | Alexander Duyck <alexander.h.duyck@intel.com> | 2008-11-20 03:48:10 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-11-20 03:48:10 -0500 |
commit | 68fd991020fdf51bc94327d288ae4ae5d0b8dced (patch) | |
tree | ff3f911ef077496ac3a4374c57ee97fe581590f5 /drivers/net/igb/igb_ethtool.c | |
parent | b2d565365e02947699a8e3d594275662a8d00646 (diff) |
igb: Fix tx/rx_ring_count parameters for igb on suspend/resume/ring resize
When suspending the device the ring structure is freed which causes it to
loose track of the count. To resolve this we need to move the ring count
outside of the ring structure and store it in the adapter struct.
In addition to resolving the suspend/resume issue this patch also addresses
issues seen in the event of memory allocation errors causing uneven ring
sizes on multiple queues.
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/igb/igb_ethtool.c')
-rw-r--r-- | drivers/net/igb/igb_ethtool.c | 102 |
1 files changed, 49 insertions, 53 deletions
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c index 9b9066c5b0e..a661159a097 100644 --- a/drivers/net/igb/igb_ethtool.c +++ b/drivers/net/igb/igb_ethtool.c | |||
@@ -714,15 +714,13 @@ static void igb_get_ringparam(struct net_device *netdev, | |||
714 | struct ethtool_ringparam *ring) | 714 | struct ethtool_ringparam *ring) |
715 | { | 715 | { |
716 | struct igb_adapter *adapter = netdev_priv(netdev); | 716 | struct igb_adapter *adapter = netdev_priv(netdev); |
717 | struct igb_ring *tx_ring = adapter->tx_ring; | ||
718 | struct igb_ring *rx_ring = adapter->rx_ring; | ||
719 | 717 | ||
720 | ring->rx_max_pending = IGB_MAX_RXD; | 718 | ring->rx_max_pending = IGB_MAX_RXD; |
721 | ring->tx_max_pending = IGB_MAX_TXD; | 719 | ring->tx_max_pending = IGB_MAX_TXD; |
722 | ring->rx_mini_max_pending = 0; | 720 | ring->rx_mini_max_pending = 0; |
723 | ring->rx_jumbo_max_pending = 0; | 721 | ring->rx_jumbo_max_pending = 0; |
724 | ring->rx_pending = rx_ring->count; | 722 | ring->rx_pending = adapter->rx_ring_count; |
725 | ring->tx_pending = tx_ring->count; | 723 | ring->tx_pending = adapter->tx_ring_count; |
726 | ring->rx_mini_pending = 0; | 724 | ring->rx_mini_pending = 0; |
727 | ring->rx_jumbo_pending = 0; | 725 | ring->rx_jumbo_pending = 0; |
728 | } | 726 | } |
@@ -731,12 +729,9 @@ static int igb_set_ringparam(struct net_device *netdev, | |||
731 | struct ethtool_ringparam *ring) | 729 | struct ethtool_ringparam *ring) |
732 | { | 730 | { |
733 | struct igb_adapter *adapter = netdev_priv(netdev); | 731 | struct igb_adapter *adapter = netdev_priv(netdev); |
734 | struct igb_buffer *old_buf; | 732 | struct igb_ring *temp_ring; |
735 | struct igb_buffer *old_rx_buf; | ||
736 | void *old_desc; | ||
737 | int i, err; | 733 | int i, err; |
738 | u32 new_rx_count, new_tx_count, old_size; | 734 | u32 new_rx_count, new_tx_count; |
739 | dma_addr_t old_dma; | ||
740 | 735 | ||
741 | if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) | 736 | if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) |
742 | return -EINVAL; | 737 | return -EINVAL; |
@@ -749,12 +744,19 @@ static int igb_set_ringparam(struct net_device *netdev, | |||
749 | new_tx_count = min(new_tx_count, (u32)IGB_MAX_TXD); | 744 | new_tx_count = min(new_tx_count, (u32)IGB_MAX_TXD); |
750 | new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); | 745 | new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); |
751 | 746 | ||
752 | if ((new_tx_count == adapter->tx_ring->count) && | 747 | if ((new_tx_count == adapter->tx_ring_count) && |
753 | (new_rx_count == adapter->rx_ring->count)) { | 748 | (new_rx_count == adapter->rx_ring_count)) { |
754 | /* nothing to do */ | 749 | /* nothing to do */ |
755 | return 0; | 750 | return 0; |
756 | } | 751 | } |
757 | 752 | ||
753 | if (adapter->num_tx_queues > adapter->num_rx_queues) | ||
754 | temp_ring = vmalloc(adapter->num_tx_queues * sizeof(struct igb_ring)); | ||
755 | else | ||
756 | temp_ring = vmalloc(adapter->num_rx_queues * sizeof(struct igb_ring)); | ||
757 | if (!temp_ring) | ||
758 | return -ENOMEM; | ||
759 | |||
758 | while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) | 760 | while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) |
759 | msleep(1); | 761 | msleep(1); |
760 | 762 | ||
@@ -766,62 +768,55 @@ static int igb_set_ringparam(struct net_device *netdev, | |||
766 | * because the ISRs in MSI-X mode get passed pointers | 768 | * because the ISRs in MSI-X mode get passed pointers |
767 | * to the tx and rx ring structs. | 769 | * to the tx and rx ring structs. |
768 | */ | 770 | */ |
769 | if (new_tx_count != adapter->tx_ring->count) { | 771 | if (new_tx_count != adapter->tx_ring_count) { |
772 | memcpy(temp_ring, adapter->tx_ring, | ||
773 | adapter->num_tx_queues * sizeof(struct igb_ring)); | ||
774 | |||
770 | for (i = 0; i < adapter->num_tx_queues; i++) { | 775 | for (i = 0; i < adapter->num_tx_queues; i++) { |
771 | /* Save existing descriptor ring */ | 776 | temp_ring[i].count = new_tx_count; |
772 | old_buf = adapter->tx_ring[i].buffer_info; | 777 | err = igb_setup_tx_resources(adapter, &temp_ring[i]); |
773 | old_desc = adapter->tx_ring[i].desc; | ||
774 | old_size = adapter->tx_ring[i].size; | ||
775 | old_dma = adapter->tx_ring[i].dma; | ||
776 | /* Try to allocate a new one */ | ||
777 | adapter->tx_ring[i].buffer_info = NULL; | ||
778 | adapter->tx_ring[i].desc = NULL; | ||
779 | adapter->tx_ring[i].count = new_tx_count; | ||
780 | err = igb_setup_tx_resources(adapter, | ||
781 | &adapter->tx_ring[i]); | ||
782 | if (err) { | 778 | if (err) { |
783 | /* Restore the old one so at least | 779 | while (i) { |
784 | the adapter still works, even if | 780 | i--; |
785 | we failed the request */ | 781 | igb_free_tx_resources(&temp_ring[i]); |
786 | adapter->tx_ring[i].buffer_info = old_buf; | 782 | } |
787 | adapter->tx_ring[i].desc = old_desc; | ||
788 | adapter->tx_ring[i].size = old_size; | ||
789 | adapter->tx_ring[i].dma = old_dma; | ||
790 | goto err_setup; | 783 | goto err_setup; |
791 | } | 784 | } |
792 | /* Free the old buffer manually */ | ||
793 | vfree(old_buf); | ||
794 | pci_free_consistent(adapter->pdev, old_size, | ||
795 | old_desc, old_dma); | ||
796 | } | 785 | } |
786 | |||
787 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
788 | igb_free_tx_resources(&adapter->tx_ring[i]); | ||
789 | |||
790 | memcpy(adapter->tx_ring, temp_ring, | ||
791 | adapter->num_tx_queues * sizeof(struct igb_ring)); | ||
792 | |||
793 | adapter->tx_ring_count = new_tx_count; | ||
797 | } | 794 | } |
798 | 795 | ||
799 | if (new_rx_count != adapter->rx_ring->count) { | 796 | if (new_rx_count != adapter->rx_ring->count) { |
800 | for (i = 0; i < adapter->num_rx_queues; i++) { | 797 | memcpy(temp_ring, adapter->rx_ring, |
798 | adapter->num_rx_queues * sizeof(struct igb_ring)); | ||
801 | 799 | ||
802 | old_rx_buf = adapter->rx_ring[i].buffer_info; | 800 | for (i = 0; i < adapter->num_rx_queues; i++) { |
803 | old_desc = adapter->rx_ring[i].desc; | 801 | temp_ring[i].count = new_rx_count; |
804 | old_size = adapter->rx_ring[i].size; | 802 | err = igb_setup_rx_resources(adapter, &temp_ring[i]); |
805 | old_dma = adapter->rx_ring[i].dma; | ||
806 | |||
807 | adapter->rx_ring[i].buffer_info = NULL; | ||
808 | adapter->rx_ring[i].desc = NULL; | ||
809 | adapter->rx_ring[i].dma = 0; | ||
810 | adapter->rx_ring[i].count = new_rx_count; | ||
811 | err = igb_setup_rx_resources(adapter, | ||
812 | &adapter->rx_ring[i]); | ||
813 | if (err) { | 803 | if (err) { |
814 | adapter->rx_ring[i].buffer_info = old_rx_buf; | 804 | while (i) { |
815 | adapter->rx_ring[i].desc = old_desc; | 805 | i--; |
816 | adapter->rx_ring[i].size = old_size; | 806 | igb_free_rx_resources(&temp_ring[i]); |
817 | adapter->rx_ring[i].dma = old_dma; | 807 | } |
818 | goto err_setup; | 808 | goto err_setup; |
819 | } | 809 | } |
820 | 810 | ||
821 | vfree(old_rx_buf); | ||
822 | pci_free_consistent(adapter->pdev, old_size, old_desc, | ||
823 | old_dma); | ||
824 | } | 811 | } |
812 | |||
813 | for (i = 0; i < adapter->num_rx_queues; i++) | ||
814 | igb_free_rx_resources(&adapter->rx_ring[i]); | ||
815 | |||
816 | memcpy(adapter->rx_ring, temp_ring, | ||
817 | adapter->num_rx_queues * sizeof(struct igb_ring)); | ||
818 | |||
819 | adapter->rx_ring_count = new_rx_count; | ||
825 | } | 820 | } |
826 | 821 | ||
827 | err = 0; | 822 | err = 0; |
@@ -830,6 +825,7 @@ err_setup: | |||
830 | igb_up(adapter); | 825 | igb_up(adapter); |
831 | 826 | ||
832 | clear_bit(__IGB_RESETTING, &adapter->state); | 827 | clear_bit(__IGB_RESETTING, &adapter->state); |
828 | vfree(temp_ring); | ||
833 | return err; | 829 | return err; |
834 | } | 830 | } |
835 | 831 | ||