diff options
author | Alexander Duyck <alexander.h.duyck@intel.com> | 2012-09-24 20:31:27 -0400 |
---|---|---|
committer | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2012-10-19 07:42:13 -0400 |
commit | 6a05004a8a65f187fa5493821d3115397209ae76 (patch) | |
tree | f575d670280b2b3eb140d05a4917b02fdd504150 /drivers/net/ethernet/intel | |
parent | 0c2cc02e571aee1f2193a004508d4d604eff6a8f (diff) |
igb: Split igb_update_dca into separate Tx and Rx functions
This change makes it so that igb_update_dca is broken into two halves, one
for Rx and one for Tx. The advantage to this is primarily readability.
In addition I am enabling relaxed ordering for reads from hardware since
this is supported on all of the igb parts.
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Tested-by: Aaron Brown <aaron.f.brown@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel')
-rw-r--r-- | drivers/net/ethernet/intel/igb/e1000_82575.h | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igb/igb_main.c | 80 |
2 files changed, 52 insertions, 31 deletions
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h index e85c453f5428..44b76b3b6816 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.h +++ b/drivers/net/ethernet/intel/igb/e1000_82575.h | |||
@@ -172,10 +172,13 @@ struct e1000_adv_tx_context_desc { | |||
172 | #define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ | 172 | #define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ |
173 | #define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */ | 173 | #define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */ |
174 | #define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */ | 174 | #define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */ |
175 | #define E1000_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */ | ||
175 | 176 | ||
176 | #define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ | 177 | #define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ |
177 | #define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ | 178 | #define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ |
179 | #define E1000_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */ | ||
178 | #define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ | 180 | #define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ |
181 | #define E1000_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */ | ||
179 | 182 | ||
180 | /* Additional DCA related definitions, note change in position of CPUID */ | 183 | /* Additional DCA related definitions, note change in position of CPUID */ |
181 | #define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */ | 184 | #define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */ |
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index e7b102723481..87abb5735852 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c | |||
@@ -4851,45 +4851,63 @@ static irqreturn_t igb_msix_ring(int irq, void *data) | |||
4851 | } | 4851 | } |
4852 | 4852 | ||
4853 | #ifdef CONFIG_IGB_DCA | 4853 | #ifdef CONFIG_IGB_DCA |
4854 | static void igb_update_tx_dca(struct igb_adapter *adapter, | ||
4855 | struct igb_ring *tx_ring, | ||
4856 | int cpu) | ||
4857 | { | ||
4858 | struct e1000_hw *hw = &adapter->hw; | ||
4859 | u32 txctrl = dca3_get_tag(tx_ring->dev, cpu); | ||
4860 | |||
4861 | if (hw->mac.type != e1000_82575) | ||
4862 | txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT; | ||
4863 | |||
4864 | /* | ||
4865 | * We can enable relaxed ordering for reads, but not writes when | ||
4866 | * DCA is enabled. This is due to a known issue in some chipsets | ||
4867 | * which will cause the DCA tag to be cleared. | ||
4868 | */ | ||
4869 | txctrl |= E1000_DCA_TXCTRL_DESC_RRO_EN | | ||
4870 | E1000_DCA_TXCTRL_DATA_RRO_EN | | ||
4871 | E1000_DCA_TXCTRL_DESC_DCA_EN; | ||
4872 | |||
4873 | wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl); | ||
4874 | } | ||
4875 | |||
4876 | static void igb_update_rx_dca(struct igb_adapter *adapter, | ||
4877 | struct igb_ring *rx_ring, | ||
4878 | int cpu) | ||
4879 | { | ||
4880 | struct e1000_hw *hw = &adapter->hw; | ||
4881 | u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu); | ||
4882 | |||
4883 | if (hw->mac.type != e1000_82575) | ||
4884 | rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT; | ||
4885 | |||
4886 | /* | ||
4887 | * We can enable relaxed ordering for reads, but not writes when | ||
4888 | * DCA is enabled. This is due to a known issue in some chipsets | ||
4889 | * which will cause the DCA tag to be cleared. | ||
4890 | */ | ||
4891 | rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN | | ||
4892 | E1000_DCA_RXCTRL_DESC_DCA_EN; | ||
4893 | |||
4894 | wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl); | ||
4895 | } | ||
4896 | |||
4854 | static void igb_update_dca(struct igb_q_vector *q_vector) | 4897 | static void igb_update_dca(struct igb_q_vector *q_vector) |
4855 | { | 4898 | { |
4856 | struct igb_adapter *adapter = q_vector->adapter; | 4899 | struct igb_adapter *adapter = q_vector->adapter; |
4857 | struct e1000_hw *hw = &adapter->hw; | ||
4858 | int cpu = get_cpu(); | 4900 | int cpu = get_cpu(); |
4859 | 4901 | ||
4860 | if (q_vector->cpu == cpu) | 4902 | if (q_vector->cpu == cpu) |
4861 | goto out_no_update; | 4903 | goto out_no_update; |
4862 | 4904 | ||
4863 | if (q_vector->tx.ring) { | 4905 | if (q_vector->tx.ring) |
4864 | int q = q_vector->tx.ring->reg_idx; | 4906 | igb_update_tx_dca(adapter, q_vector->tx.ring, cpu); |
4865 | u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q)); | 4907 | |
4866 | if (hw->mac.type == e1000_82575) { | 4908 | if (q_vector->rx.ring) |
4867 | dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK; | 4909 | igb_update_rx_dca(adapter, q_vector->rx.ring, cpu); |
4868 | dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); | 4910 | |
4869 | } else { | ||
4870 | dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576; | ||
4871 | dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) << | ||
4872 | E1000_DCA_TXCTRL_CPUID_SHIFT; | ||
4873 | } | ||
4874 | dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN; | ||
4875 | wr32(E1000_DCA_TXCTRL(q), dca_txctrl); | ||
4876 | } | ||
4877 | if (q_vector->rx.ring) { | ||
4878 | int q = q_vector->rx.ring->reg_idx; | ||
4879 | u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q)); | ||
4880 | if (hw->mac.type == e1000_82575) { | ||
4881 | dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK; | ||
4882 | dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); | ||
4883 | } else { | ||
4884 | dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576; | ||
4885 | dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) << | ||
4886 | E1000_DCA_RXCTRL_CPUID_SHIFT; | ||
4887 | } | ||
4888 | dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN; | ||
4889 | dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN; | ||
4890 | dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN; | ||
4891 | wr32(E1000_DCA_RXCTRL(q), dca_rxctrl); | ||
4892 | } | ||
4893 | q_vector->cpu = cpu; | 4911 | q_vector->cpu = cpu; |
4894 | out_no_update: | 4912 | out_no_update: |
4895 | put_cpu(); | 4913 | put_cpu(); |