aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgbe/ixgbe_main.c
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2010-08-19 09:38:57 -0400
committerDavid S. Miller <davem@davemloft.net>2010-08-19 19:44:24 -0400
commitf5b4a52e81d46b51ba07a983036739dc0c2c6c1c (patch)
tree3806736fb55d55aa486f9c9d2d70565509658698 /drivers/net/ixgbe/ixgbe_main.c
parent7367096a43259c9b461ec3120dcaaf674a092132 (diff)
ixgbe: Move virtualization config into a separate function
This change moves the configuration that was done in configure_rx into a separate virtualization configuration function. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ixgbe/ixgbe_main.c')
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c84
1 files changed, 46 insertions, 38 deletions
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index f8cdc992f24d..d358beff57fc 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -2696,6 +2696,48 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
2696 psrtype); 2696 psrtype);
2697} 2697}
2698 2698
2699static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
2700{
2701 struct ixgbe_hw *hw = &adapter->hw;
2702 u32 gcr_ext;
2703 u32 vt_reg_bits;
2704 u32 reg_offset, vf_shift;
2705 u32 vmdctl;
2706
2707 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
2708 return;
2709
2710 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
2711 vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN | IXGBE_VT_CTL_REPLEN;
2712 vt_reg_bits |= (adapter->num_vfs << IXGBE_VT_CTL_POOL_SHIFT);
2713 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits);
2714
2715 vf_shift = adapter->num_vfs % 32;
2716 reg_offset = (adapter->num_vfs > 32) ? 1 : 0;
2717
2718 /* Enable only the PF's pool for Tx/Rx */
2719 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
2720 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), 0);
2721 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
2722 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), 0);
2723 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
2724
2725 /* Map PF MAC address in RAR Entry 0 to first pool following VFs */
2726 hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs);
2727
2728 /*
2729 * Set up VF register offsets for selected VT Mode,
2730 * i.e. 32 or 64 VFs for SR-IOV
2731 */
2732 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
2733 gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
2734 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
2735 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
2736
2737 /* enable Tx loopback for VF/PF communication */
2738 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
2739}
2740
2699static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) 2741static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
2700{ 2742{
2701 struct ixgbe_hw *hw = &adapter->hw; 2743 struct ixgbe_hw *hw = &adapter->hw;
@@ -2820,7 +2862,6 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2820 struct ixgbe_ring *rx_ring; 2862 struct ixgbe_ring *rx_ring;
2821 int i; 2863 int i;
2822 u32 rxctrl; 2864 u32 rxctrl;
2823 u32 gcr_ext;
2824 2865
2825 /* disable receives while setting up the descriptors */ 2866 /* disable receives while setting up the descriptors */
2826 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 2867 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
@@ -2829,6 +2870,10 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2829 ixgbe_setup_psrtype(adapter); 2870 ixgbe_setup_psrtype(adapter);
2830 ixgbe_setup_rdrxctl(adapter); 2871 ixgbe_setup_rdrxctl(adapter);
2831 2872
2873 /* Program MRQC for the distribution of queues */
2874 ixgbe_setup_mrqc(adapter);
2875 ixgbe_configure_virtualization(adapter);
2876
2832 /* set_rx_buffer_len must be called before ring initialization */ 2877 /* set_rx_buffer_len must be called before ring initialization */
2833 ixgbe_set_rx_buffer_len(adapter); 2878 ixgbe_set_rx_buffer_len(adapter);
2834 2879
@@ -2843,43 +2888,6 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2843 ixgbe_configure_rscctl(adapter, rx_ring); 2888 ixgbe_configure_rscctl(adapter, rx_ring);
2844 } 2889 }
2845 2890
2846 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
2847 u32 vt_reg_bits;
2848 u32 reg_offset, vf_shift;
2849 u32 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
2850 vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN
2851 | IXGBE_VT_CTL_REPLEN;
2852 vt_reg_bits |= (adapter->num_vfs <<
2853 IXGBE_VT_CTL_POOL_SHIFT);
2854 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits);
2855 IXGBE_WRITE_REG(hw, IXGBE_MRQC, 0);
2856
2857 vf_shift = adapter->num_vfs % 32;
2858 reg_offset = adapter->num_vfs / 32;
2859 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0);
2860 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
2861 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0);
2862 IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0);
2863 /* Enable only the PF's pool for Tx/Rx */
2864 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
2865 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
2866 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
2867 }
2868
2869 /* Program MRQC for the distribution of queues */
2870 ixgbe_setup_mrqc(adapter);
2871
2872 if (adapter->num_vfs) {
2873 /* Map PF MAC address in RAR Entry 0 to first pool
2874 * following VFs */
2875 hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs);
2876
2877 /* Set up VF register offsets for selected VT Mode, i.e.
2878 * 64 VFs for SR-IOV */
2879 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
2880 gcr_ext |= IXGBE_GCR_EXT_SRIOV;
2881 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
2882 }
2883} 2891}
2884 2892
2885static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 2893static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)