diff options
author | Alexander Duyck <alexander.h.duyck@intel.com> | 2012-02-08 02:50:14 -0500 |
---|---|---|
committer | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2012-03-14 03:30:17 -0400 |
commit | bdda1a61c1dbb1e3a4f76c944b7c1e3b3731c861 (patch) | |
tree | 477442309851ea292875671921d3aa85b655a4cd /drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |
parent | 5edc4465826bf5be4da678822b0c0e35666b6d67 (diff) |
ixgbe: Do not disable read relaxed ordering when DCA is enabled
A previous fix had gone though and disabled relaxed ordering for Rx
descriptor read fetching. This was not necessary as this functions
correctly and has no ill effects on the system.
In addition several of the defines used for the DCA control registers were
incorrect in that they indicated descriptor effects when they actually had
an impact on either data or header write back. As such I have update these
to correctly reflect either DATA or HEAD.
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Tested-by: Stephen Ko <stephen.s.ko@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbe/ixgbe_main.c')
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 71 |
1 files changed, 38 insertions, 33 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 95240ab29177..ac3532446594 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -860,63 +860,68 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, | |||
860 | } | 860 | } |
861 | 861 | ||
862 | #ifdef CONFIG_IXGBE_DCA | 862 | #ifdef CONFIG_IXGBE_DCA |
863 | static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, | 863 | static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, |
864 | struct ixgbe_ring *rx_ring, | 864 | struct ixgbe_ring *tx_ring, |
865 | int cpu) | 865 | int cpu) |
866 | { | 866 | { |
867 | struct ixgbe_hw *hw = &adapter->hw; | 867 | struct ixgbe_hw *hw = &adapter->hw; |
868 | u32 rxctrl; | 868 | u32 txctrl = dca3_get_tag(tx_ring->dev, cpu); |
869 | u8 reg_idx = rx_ring->reg_idx; | 869 | u16 reg_offset; |
870 | 870 | ||
871 | rxctrl = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(reg_idx)); | ||
872 | switch (hw->mac.type) { | 871 | switch (hw->mac.type) { |
873 | case ixgbe_mac_82598EB: | 872 | case ixgbe_mac_82598EB: |
874 | rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK; | 873 | reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx); |
875 | rxctrl |= dca3_get_tag(rx_ring->dev, cpu); | ||
876 | break; | 874 | break; |
877 | case ixgbe_mac_82599EB: | 875 | case ixgbe_mac_82599EB: |
878 | case ixgbe_mac_X540: | 876 | case ixgbe_mac_X540: |
879 | rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599; | 877 | reg_offset = IXGBE_DCA_TXCTRL_82599(tx_ring->reg_idx); |
880 | rxctrl |= (dca3_get_tag(rx_ring->dev, cpu) << | 878 | txctrl <<= IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599; |
881 | IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599); | ||
882 | break; | 879 | break; |
883 | default: | 880 | default: |
884 | break; | 881 | /* for unknown hardware do not write register */ |
882 | return; | ||
885 | } | 883 | } |
886 | rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN; | 884 | |
887 | rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN; | 885 | /* |
888 | rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN); | 886 | * We can enable relaxed ordering for reads, but not writes when |
889 | IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl); | 887 | * DCA is enabled. This is due to a known issue in some chipsets |
888 | * which will cause the DCA tag to be cleared. | ||
889 | */ | ||
890 | txctrl |= IXGBE_DCA_TXCTRL_DESC_RRO_EN | | ||
891 | IXGBE_DCA_TXCTRL_DATA_RRO_EN | | ||
892 | IXGBE_DCA_TXCTRL_DESC_DCA_EN; | ||
893 | |||
894 | IXGBE_WRITE_REG(hw, reg_offset, txctrl); | ||
890 | } | 895 | } |
891 | 896 | ||
892 | static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, | 897 | static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, |
893 | struct ixgbe_ring *tx_ring, | 898 | struct ixgbe_ring *rx_ring, |
894 | int cpu) | 899 | int cpu) |
895 | { | 900 | { |
896 | struct ixgbe_hw *hw = &adapter->hw; | 901 | struct ixgbe_hw *hw = &adapter->hw; |
897 | u32 txctrl; | 902 | u32 rxctrl = dca3_get_tag(rx_ring->dev, cpu); |
898 | u8 reg_idx = tx_ring->reg_idx; | 903 | u8 reg_idx = rx_ring->reg_idx; |
904 | |||
899 | 905 | ||
900 | switch (hw->mac.type) { | 906 | switch (hw->mac.type) { |
901 | case ixgbe_mac_82598EB: | ||
902 | txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(reg_idx)); | ||
903 | txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK; | ||
904 | txctrl |= dca3_get_tag(tx_ring->dev, cpu); | ||
905 | txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; | ||
906 | IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(reg_idx), txctrl); | ||
907 | break; | ||
908 | case ixgbe_mac_82599EB: | 907 | case ixgbe_mac_82599EB: |
909 | case ixgbe_mac_X540: | 908 | case ixgbe_mac_X540: |
910 | txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx)); | 909 | rxctrl <<= IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599; |
911 | txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599; | ||
912 | txctrl |= (dca3_get_tag(tx_ring->dev, cpu) << | ||
913 | IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599); | ||
914 | txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; | ||
915 | IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx), txctrl); | ||
916 | break; | 910 | break; |
917 | default: | 911 | default: |
918 | break; | 912 | break; |
919 | } | 913 | } |
914 | |||
915 | /* | ||
916 | * We can enable relaxed ordering for reads, but not writes when | ||
917 | * DCA is enabled. This is due to a known issue in some chipsets | ||
918 | * which will cause the DCA tag to be cleared. | ||
919 | */ | ||
920 | rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN | | ||
921 | IXGBE_DCA_RXCTRL_DATA_DCA_EN | | ||
922 | IXGBE_DCA_RXCTRL_DESC_DCA_EN; | ||
923 | |||
924 | IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl); | ||
920 | } | 925 | } |
921 | 926 | ||
922 | static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector) | 927 | static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector) |
@@ -991,8 +996,8 @@ static int __ixgbe_notify_dca(struct device *dev, void *data) | |||
991 | 996 | ||
992 | return 0; | 997 | return 0; |
993 | } | 998 | } |
994 | #endif /* CONFIG_IXGBE_DCA */ | ||
995 | 999 | ||
1000 | #endif /* CONFIG_IXGBE_DCA */ | ||
996 | static inline void ixgbe_rx_hash(struct ixgbe_ring *ring, | 1001 | static inline void ixgbe_rx_hash(struct ixgbe_ring *ring, |
997 | union ixgbe_adv_rx_desc *rx_desc, | 1002 | union ixgbe_adv_rx_desc *rx_desc, |
998 | struct sk_buff *skb) | 1003 | struct sk_buff *skb) |