diff options
author | Alexander Duyck <alexander.h.duyck@intel.com> | 2012-05-18 02:34:02 -0400 |
---|---|---|
committer | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2012-07-17 22:09:13 -0400 |
commit | 671c0adb5cb5f3acdc93527e54cf1e379fc980b1 (patch) | |
tree | dbfb0fc2d0c841058dae547c2a4eafafa4a88a87 | |
parent | 4ae63730bb420610cb99ed152d6daa35236cc9e9 (diff) |
ixgbe: Cleanup logic for MRQC and MTQC configuration
This change is meant to make the code much more readable for MTQC and MRQC
configuration.
The big change is that I simplified much of the logic so that we are
essentially handling just 4 cases and their variants. In the cases where
RSS is disabled we are actually just programming the RETA table with all
1s resulting in a single queue RSS. In the case of SR-IOV I am treating
that as a subset of VMDq. This all results int he following configuration
for the hardware:
DCB
En Dis
VMDq En VMDQ/DCB VMDq/RSS
Dis DCB/RSS RSS
Cc: John Fastabend <john.r.fastabend@intel.com>
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Tested-by: Stephen Ko <stephen.s.ko@intel.com>
Tested-by: Ross Brattain <ross.b.brattain@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 116 |
1 files changed, 66 insertions, 50 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 32c8cd649cb0..2b4b79178858 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -2719,8 +2719,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, | |||
2719 | static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) | 2719 | static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) |
2720 | { | 2720 | { |
2721 | struct ixgbe_hw *hw = &adapter->hw; | 2721 | struct ixgbe_hw *hw = &adapter->hw; |
2722 | u32 rttdcs; | 2722 | u32 rttdcs, mtqc; |
2723 | u32 reg; | ||
2724 | u8 tcs = netdev_get_num_tc(adapter->netdev); | 2723 | u8 tcs = netdev_get_num_tc(adapter->netdev); |
2725 | 2724 | ||
2726 | if (hw->mac.type == ixgbe_mac_82598EB) | 2725 | if (hw->mac.type == ixgbe_mac_82598EB) |
@@ -2732,28 +2731,32 @@ static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) | |||
2732 | IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); | 2731 | IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); |
2733 | 2732 | ||
2734 | /* set transmit pool layout */ | 2733 | /* set transmit pool layout */ |
2735 | switch (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { | 2734 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { |
2736 | case (IXGBE_FLAG_SRIOV_ENABLED): | 2735 | mtqc = IXGBE_MTQC_VT_ENA; |
2737 | IXGBE_WRITE_REG(hw, IXGBE_MTQC, | 2736 | if (tcs > 4) |
2738 | (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF)); | 2737 | mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; |
2739 | break; | 2738 | else if (tcs > 1) |
2740 | default: | 2739 | mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; |
2741 | if (!tcs) | 2740 | else if (adapter->ring_feature[RING_F_RSS].indices == 4) |
2742 | reg = IXGBE_MTQC_64Q_1PB; | 2741 | mtqc |= IXGBE_MTQC_32VF; |
2743 | else if (tcs <= 4) | ||
2744 | reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; | ||
2745 | else | 2742 | else |
2746 | reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; | 2743 | mtqc |= IXGBE_MTQC_64VF; |
2744 | } else { | ||
2745 | if (tcs > 4) | ||
2746 | mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; | ||
2747 | else if (tcs > 1) | ||
2748 | mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; | ||
2749 | else | ||
2750 | mtqc = IXGBE_MTQC_64Q_1PB; | ||
2751 | } | ||
2747 | 2752 | ||
2748 | IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg); | 2753 | IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc); |
2749 | 2754 | ||
2750 | /* Enable Security TX Buffer IFG for multiple pb */ | 2755 | /* Enable Security TX Buffer IFG for multiple pb */ |
2751 | if (tcs) { | 2756 | if (tcs) { |
2752 | reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); | 2757 | u32 sectx = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); |
2753 | reg |= IXGBE_SECTX_DCB; | 2758 | sectx |= IXGBE_SECTX_DCB; |
2754 | IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg); | 2759 | IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, sectx); |
2755 | } | ||
2756 | break; | ||
2757 | } | 2760 | } |
2758 | 2761 | ||
2759 | /* re-enable the arbiter */ | 2762 | /* re-enable the arbiter */ |
@@ -2886,11 +2889,18 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) | |||
2886 | u32 mrqc = 0, reta = 0; | 2889 | u32 mrqc = 0, reta = 0; |
2887 | u32 rxcsum; | 2890 | u32 rxcsum; |
2888 | int i, j; | 2891 | int i, j; |
2889 | u8 tcs = netdev_get_num_tc(adapter->netdev); | 2892 | u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; |
2890 | int maxq = adapter->ring_feature[RING_F_RSS].indices; | 2893 | |
2894 | if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) | ||
2895 | rss_i = 1; | ||
2891 | 2896 | ||
2892 | if (tcs) | 2897 | /* |
2893 | maxq = min(maxq, adapter->num_tx_queues / tcs); | 2898 | * Program table for at least 2 queues w/ SR-IOV so that VFs can |
2899 | * make full use of any rings they may have. We will use the | ||
2900 | * PSRTYPE register to control how many rings we use within the PF. | ||
2901 | */ | ||
2902 | if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 2)) | ||
2903 | rss_i = 2; | ||
2894 | 2904 | ||
2895 | /* Fill out hash function seeds */ | 2905 | /* Fill out hash function seeds */ |
2896 | for (i = 0; i < 10; i++) | 2906 | for (i = 0; i < 10; i++) |
@@ -2898,7 +2908,7 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) | |||
2898 | 2908 | ||
2899 | /* Fill out redirection table */ | 2909 | /* Fill out redirection table */ |
2900 | for (i = 0, j = 0; i < 128; i++, j++) { | 2910 | for (i = 0, j = 0; i < 128; i++, j++) { |
2901 | if (j == maxq) | 2911 | if (j == rss_i) |
2902 | j = 0; | 2912 | j = 0; |
2903 | /* reta = 4-byte sliding window of | 2913 | /* reta = 4-byte sliding window of |
2904 | * 0x00..(indices-1)(indices-1)00..etc. */ | 2914 | * 0x00..(indices-1)(indices-1)00..etc. */ |
@@ -2912,35 +2922,36 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) | |||
2912 | rxcsum |= IXGBE_RXCSUM_PCSD; | 2922 | rxcsum |= IXGBE_RXCSUM_PCSD; |
2913 | IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); | 2923 | IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); |
2914 | 2924 | ||
2915 | if (adapter->hw.mac.type == ixgbe_mac_82598EB && | 2925 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) { |
2916 | (adapter->flags & IXGBE_FLAG_RSS_ENABLED)) { | 2926 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) |
2917 | mrqc = IXGBE_MRQC_RSSEN; | 2927 | mrqc = IXGBE_MRQC_RSSEN; |
2918 | } else { | 2928 | } else { |
2919 | int mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED | 2929 | u8 tcs = netdev_get_num_tc(adapter->netdev); |
2920 | | IXGBE_FLAG_SRIOV_ENABLED); | 2930 | |
2921 | 2931 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { | |
2922 | switch (mask) { | 2932 | if (tcs > 4) |
2923 | case (IXGBE_FLAG_RSS_ENABLED): | 2933 | mrqc = IXGBE_MRQC_VMDQRT8TCEN; /* 8 TCs */ |
2924 | if (!tcs) | 2934 | else if (tcs > 1) |
2925 | mrqc = IXGBE_MRQC_RSSEN; | 2935 | mrqc = IXGBE_MRQC_VMDQRT4TCEN; /* 4 TCs */ |
2926 | else if (tcs <= 4) | 2936 | else if (adapter->ring_feature[RING_F_RSS].indices == 4) |
2927 | mrqc = IXGBE_MRQC_RTRSS4TCEN; | 2937 | mrqc = IXGBE_MRQC_VMDQRSS32EN; |
2928 | else | 2938 | else |
2939 | mrqc = IXGBE_MRQC_VMDQRSS64EN; | ||
2940 | } else { | ||
2941 | if (tcs > 4) | ||
2929 | mrqc = IXGBE_MRQC_RTRSS8TCEN; | 2942 | mrqc = IXGBE_MRQC_RTRSS8TCEN; |
2930 | break; | 2943 | else if (tcs > 1) |
2931 | case (IXGBE_FLAG_SRIOV_ENABLED): | 2944 | mrqc = IXGBE_MRQC_RTRSS4TCEN; |
2932 | mrqc = IXGBE_MRQC_VMDQEN; | 2945 | else |
2933 | break; | 2946 | mrqc = IXGBE_MRQC_RSSEN; |
2934 | default: | ||
2935 | break; | ||
2936 | } | 2947 | } |
2937 | } | 2948 | } |
2938 | 2949 | ||
2939 | /* Perform hash on these packet types */ | 2950 | /* Perform hash on these packet types */ |
2940 | mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 | 2951 | mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 | |
2941 | | IXGBE_MRQC_RSS_FIELD_IPV4_TCP | 2952 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP | |
2942 | | IXGBE_MRQC_RSS_FIELD_IPV6 | 2953 | IXGBE_MRQC_RSS_FIELD_IPV6 | |
2943 | | IXGBE_MRQC_RSS_FIELD_IPV6_TCP; | 2954 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP; |
2944 | 2955 | ||
2945 | if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) | 2956 | if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) |
2946 | mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; | 2957 | mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; |
@@ -3103,8 +3114,13 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) | |||
3103 | if (hw->mac.type == ixgbe_mac_82598EB) | 3114 | if (hw->mac.type == ixgbe_mac_82598EB) |
3104 | return; | 3115 | return; |
3105 | 3116 | ||
3106 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) | 3117 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { |
3107 | psrtype |= (adapter->num_rx_queues_per_pool << 29); | 3118 | int rss_i = adapter->ring_feature[RING_F_RSS].indices; |
3119 | if (rss_i > 3) | ||
3120 | psrtype |= 2 << 29; | ||
3121 | else if (rss_i > 1) | ||
3122 | psrtype |= 1 << 29; | ||
3123 | } | ||
3108 | 3124 | ||
3109 | for (p = 0; p < adapter->num_rx_pools; p++) | 3125 | for (p = 0; p < adapter->num_rx_pools; p++) |
3110 | IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(adapter->num_vfs + p), | 3126 | IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(adapter->num_vfs + p), |