aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgbe
diff options
context:
space:
mode:
authorGreg Rose <gregory.v.rose@intel.com>2010-01-08 21:26:46 -0500
committerDavid S. Miller <davem@davemloft.net>2010-01-10 16:34:26 -0500
commit1cdd1ec8784399eef55a60887a45f3f46a1c240a (patch)
treece27da2c207b6eeb900be46b9867bdce8ccb3503 /drivers/net/ixgbe
parent096a58fdec72335d9cbee94bd10b312c5f14f8af (diff)
ixgbe: Add SR-IOV features to main module
Adds SR-IOV features supported by the 82599 controller to the main driver module. If the CONFIG_PCI_IOV kernel option is selected then the SR-IOV features are enabled. Use the max_vfs module option to allocate up to 63 virtual functions per physical port. Signed-off-by: Greg Rose <gregory.v.rose@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ixgbe')
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c278
1 files changed, 268 insertions, 10 deletions
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 233c3917427e..a938dba7bdfe 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -45,6 +45,7 @@
45#include "ixgbe.h" 45#include "ixgbe.h"
46#include "ixgbe_common.h" 46#include "ixgbe_common.h"
47#include "ixgbe_dcb_82599.h" 47#include "ixgbe_dcb_82599.h"
48#include "ixgbe_sriov.h"
48 49
49char ixgbe_driver_name[] = "ixgbe"; 50char ixgbe_driver_name[] = "ixgbe";
50static const char ixgbe_driver_string[] = 51static const char ixgbe_driver_string[] =
@@ -124,6 +125,13 @@ static struct notifier_block dca_notifier = {
124}; 125};
125#endif 126#endif
126 127
128#ifdef CONFIG_PCI_IOV
129static unsigned int max_vfs;
130module_param(max_vfs, uint, 0);
131MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
132 "per physical function");
133#endif /* CONFIG_PCI_IOV */
134
127MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 135MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
128MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); 136MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
129MODULE_LICENSE("GPL"); 137MODULE_LICENSE("GPL");
@@ -131,6 +139,41 @@ MODULE_VERSION(DRV_VERSION);
131 139
132#define DEFAULT_DEBUG_LEVEL_SHIFT 3 140#define DEFAULT_DEBUG_LEVEL_SHIFT 3
133 141
142static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
143{
144 struct ixgbe_hw *hw = &adapter->hw;
145 u32 gcr;
146 u32 gpie;
147 u32 vmdctl;
148
149#ifdef CONFIG_PCI_IOV
150 /* disable iov and allow time for transactions to clear */
151 pci_disable_sriov(adapter->pdev);
152#endif
153
154 /* turn off device IOV mode */
155 gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
156 gcr &= ~(IXGBE_GCR_EXT_SRIOV);
157 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr);
158 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
159 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
160 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
161
162 /* set default pool back to 0 */
163 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
164 vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
165 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
166
167 /* take a breather then clean up driver data */
168 msleep(100);
169 if (adapter->vfinfo)
170 kfree(adapter->vfinfo);
171 adapter->vfinfo = NULL;
172
173 adapter->num_vfs = 0;
174 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
175}
176
134static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter) 177static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
135{ 178{
136 u32 ctrl_ext; 179 u32 ctrl_ext;
@@ -1020,7 +1063,12 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1020 1063
1021 /* set up to autoclear timer, and the vectors */ 1064 /* set up to autoclear timer, and the vectors */
1022 mask = IXGBE_EIMS_ENABLE_MASK; 1065 mask = IXGBE_EIMS_ENABLE_MASK;
1023 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC); 1066 if (adapter->num_vfs)
1067 mask &= ~(IXGBE_EIMS_OTHER |
1068 IXGBE_EIMS_MAILBOX |
1069 IXGBE_EIMS_LSC);
1070 else
1071 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
1024 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask); 1072 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
1025} 1073}
1026 1074
@@ -1249,6 +1297,9 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
1249 if (eicr & IXGBE_EICR_LSC) 1297 if (eicr & IXGBE_EICR_LSC)
1250 ixgbe_check_lsc(adapter); 1298 ixgbe_check_lsc(adapter);
1251 1299
1300 if (eicr & IXGBE_EICR_MAILBOX)
1301 ixgbe_msg_task(adapter);
1302
1252 if (hw->mac.type == ixgbe_mac_82598EB) 1303 if (hw->mac.type == ixgbe_mac_82598EB)
1253 ixgbe_check_fan_failure(adapter, eicr); 1304 ixgbe_check_fan_failure(adapter, eicr);
1254 1305
@@ -1763,6 +1814,8 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
1763 mask |= IXGBE_EIMS_ECC; 1814 mask |= IXGBE_EIMS_ECC;
1764 mask |= IXGBE_EIMS_GPI_SDP1; 1815 mask |= IXGBE_EIMS_GPI_SDP1;
1765 mask |= IXGBE_EIMS_GPI_SDP2; 1816 mask |= IXGBE_EIMS_GPI_SDP2;
1817 if (adapter->num_vfs)
1818 mask |= IXGBE_EIMS_MAILBOX;
1766 } 1819 }
1767 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || 1820 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
1768 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) 1821 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
@@ -1771,6 +1824,11 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
1771 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); 1824 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1772 ixgbe_irq_enable_queues(adapter, ~0); 1825 ixgbe_irq_enable_queues(adapter, ~0);
1773 IXGBE_WRITE_FLUSH(&adapter->hw); 1826 IXGBE_WRITE_FLUSH(&adapter->hw);
1827
1828 if (adapter->num_vfs > 32) {
1829 u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
1830 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
1831 }
1774} 1832}
1775 1833
1776/** 1834/**
@@ -1900,6 +1958,8 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
1900 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); 1958 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
1901 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); 1959 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
1902 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); 1960 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
1961 if (adapter->num_vfs > 32)
1962 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
1903 } 1963 }
1904 IXGBE_WRITE_FLUSH(&adapter->hw); 1964 IXGBE_WRITE_FLUSH(&adapter->hw);
1905 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 1965 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
@@ -1984,18 +2044,32 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
1984 2044
1985 if (hw->mac.type == ixgbe_mac_82599EB) { 2045 if (hw->mac.type == ixgbe_mac_82599EB) {
1986 u32 rttdcs; 2046 u32 rttdcs;
2047 u32 mask;
1987 2048
1988 /* disable the arbiter while setting MTQC */ 2049 /* disable the arbiter while setting MTQC */
1989 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 2050 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
1990 rttdcs |= IXGBE_RTTDCS_ARBDIS; 2051 rttdcs |= IXGBE_RTTDCS_ARBDIS;
1991 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 2052 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
1992 2053
1993 /* We enable 8 traffic classes, DCB only */ 2054 /* set transmit pool layout */
1994 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) 2055 mask = (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_DCB_ENABLED);
1995 IXGBE_WRITE_REG(hw, IXGBE_MTQC, (IXGBE_MTQC_RT_ENA | 2056 switch (adapter->flags & mask) {
1996 IXGBE_MTQC_8TC_8TQ)); 2057
1997 else 2058 case (IXGBE_FLAG_SRIOV_ENABLED):
2059 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
2060 (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF));
2061 break;
2062
2063 case (IXGBE_FLAG_DCB_ENABLED):
2064 /* We enable 8 traffic classes, DCB only */
2065 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
2066 (IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ));
2067 break;
2068
2069 default:
1998 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB); 2070 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2071 break;
2072 }
1999 2073
2000 /* re-eable the arbiter */ 2074 /* re-eable the arbiter */
2001 rttdcs &= ~IXGBE_RTTDCS_ARBDIS; 2075 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
@@ -2054,12 +2128,16 @@ static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
2054#ifdef CONFIG_IXGBE_DCB 2128#ifdef CONFIG_IXGBE_DCB
2055 | IXGBE_FLAG_DCB_ENABLED 2129 | IXGBE_FLAG_DCB_ENABLED
2056#endif 2130#endif
2131 | IXGBE_FLAG_SRIOV_ENABLED
2057 ); 2132 );
2058 2133
2059 switch (mask) { 2134 switch (mask) {
2060 case (IXGBE_FLAG_RSS_ENABLED): 2135 case (IXGBE_FLAG_RSS_ENABLED):
2061 mrqc = IXGBE_MRQC_RSSEN; 2136 mrqc = IXGBE_MRQC_RSSEN;
2062 break; 2137 break;
2138 case (IXGBE_FLAG_SRIOV_ENABLED):
2139 mrqc = IXGBE_MRQC_VMDQEN;
2140 break;
2063#ifdef CONFIG_IXGBE_DCB 2141#ifdef CONFIG_IXGBE_DCB
2064 case (IXGBE_FLAG_DCB_ENABLED): 2142 case (IXGBE_FLAG_DCB_ENABLED):
2065 mrqc = IXGBE_MRQC_RT8TCEN; 2143 mrqc = IXGBE_MRQC_RT8TCEN;
@@ -2140,7 +2218,9 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2140 int rx_buf_len; 2218 int rx_buf_len;
2141 2219
2142 /* Decide whether to use packet split mode or not */ 2220 /* Decide whether to use packet split mode or not */
2143 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; 2221 /* Do not use packet split if we're in SR-IOV Mode */
2222 if (!adapter->num_vfs)
2223 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
2144 2224
2145 /* Set the RX buffer length according to the mode */ 2225 /* Set the RX buffer length according to the mode */
2146 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 2226 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
@@ -2152,7 +2232,9 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2152 IXGBE_PSRTYPE_IPV4HDR | 2232 IXGBE_PSRTYPE_IPV4HDR |
2153 IXGBE_PSRTYPE_IPV6HDR | 2233 IXGBE_PSRTYPE_IPV6HDR |
2154 IXGBE_PSRTYPE_L2HDR; 2234 IXGBE_PSRTYPE_L2HDR;
2155 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); 2235 IXGBE_WRITE_REG(hw,
2236 IXGBE_PSRTYPE(adapter->num_vfs),
2237 psrtype);
2156 } 2238 }
2157 } else { 2239 } else {
2158 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && 2240 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
@@ -2238,6 +2320,30 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2238 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); 2320 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
2239 } 2321 }
2240 2322
2323 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
2324 u32 vt_reg_bits;
2325 u32 reg_offset, vf_shift;
2326 u32 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
2327 vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN
2328 | IXGBE_VT_CTL_REPLEN;
2329 vt_reg_bits |= (adapter->num_vfs <<
2330 IXGBE_VT_CTL_POOL_SHIFT);
2331 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits);
2332 IXGBE_WRITE_REG(hw, IXGBE_MRQC, 0);
2333
2334 vf_shift = adapter->num_vfs % 32;
2335 reg_offset = adapter->num_vfs / 32;
2336 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0);
2337 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
2338 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0);
2339 IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0);
2340 /* Enable only the PF's pool for Tx/Rx */
2341 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
2342 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
2343 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
2344 ixgbe_set_vmolr(hw, adapter->num_vfs);
2345 }
2346
2241 /* Program MRQC for the distribution of queues */ 2347 /* Program MRQC for the distribution of queues */
2242 mrqc = ixgbe_setup_mrqc(adapter); 2348 mrqc = ixgbe_setup_mrqc(adapter);
2243 2349
@@ -2269,6 +2375,20 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2269 } 2375 }
2270 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 2376 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2271 2377
2378 if (adapter->num_vfs) {
2379 u32 reg;
2380
2381 /* Map PF MAC address in RAR Entry 0 to first pool
2382 * following VFs */
2383 hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs);
2384
2385 /* Set up VF register offsets for selected VT Mode, i.e.
2386 * 64 VFs for SR-IOV */
2387 reg = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
2388 reg |= IXGBE_GCR_EXT_SRIOV;
2389 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, reg);
2390 }
2391
2272 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 2392 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2273 2393
2274 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED || 2394 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED ||
@@ -2449,6 +2569,8 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
2449 addr_list = netdev->mc_list->dmi_addr; 2569 addr_list = netdev->mc_list->dmi_addr;
2450 hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count, 2570 hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
2451 ixgbe_addr_list_itr); 2571 ixgbe_addr_list_itr);
2572 if (adapter->num_vfs)
2573 ixgbe_restore_vf_multicasts(adapter);
2452} 2574}
2453 2575
2454static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) 2576static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
@@ -2709,6 +2831,10 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
2709 /* MSI only */ 2831 /* MSI only */
2710 gpie = 0; 2832 gpie = 0;
2711 } 2833 }
2834 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
2835 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
2836 gpie |= IXGBE_GPIE_VTMODE_64;
2837 }
2712 /* XXX: to interrupt immediately for EICS writes, enable this */ 2838 /* XXX: to interrupt immediately for EICS writes, enable this */
2713 /* gpie |= IXGBE_GPIE_EIMEN; */ 2839 /* gpie |= IXGBE_GPIE_EIMEN; */
2714 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 2840 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
@@ -2783,6 +2909,18 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
2783 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); 2909 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
2784 txdctl |= IXGBE_TXDCTL_ENABLE; 2910 txdctl |= IXGBE_TXDCTL_ENABLE;
2785 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl); 2911 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
2912 if (hw->mac.type == ixgbe_mac_82599EB) {
2913 int wait_loop = 10;
2914 /* poll for Tx Enable ready */
2915 do {
2916 msleep(1);
2917 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
2918 } while (--wait_loop &&
2919 !(txdctl & IXGBE_TXDCTL_ENABLE));
2920 if (!wait_loop)
2921 DPRINTK(DRV, ERR, "Could not enable "
2922 "Tx Queue %d\n", j);
2923 }
2786 } 2924 }
2787 2925
2788 for (i = 0; i < num_rx_rings; i++) { 2926 for (i = 0; i < num_rx_rings; i++) {
@@ -2918,7 +3056,8 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
2918 } 3056 }
2919 3057
2920 /* reprogram the RAR[0] in case user changed it. */ 3058 /* reprogram the RAR[0] in case user changed it. */
2921 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 3059 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
3060 IXGBE_RAH_AV);
2922} 3061}
2923 3062
2924/** 3063/**
@@ -3286,6 +3425,19 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
3286} 3425}
3287 3426
3288#endif /* IXGBE_FCOE */ 3427#endif /* IXGBE_FCOE */
3428/**
3429 * ixgbe_set_sriov_queues: Allocate queues for IOV use
3430 * @adapter: board private structure to initialize
3431 *
3432 * IOV doesn't actually use anything, so just NAK the
3433 * request for now and let the other queue routines
3434 * figure out what to do.
3435 */
3436static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
3437{
3438 return false;
3439}
3440
3289/* 3441/*
3290 * ixgbe_set_num_queues: Allocate queues for device, feature dependant 3442 * ixgbe_set_num_queues: Allocate queues for device, feature dependant
3291 * @adapter: board private structure to initialize 3443 * @adapter: board private structure to initialize
@@ -3299,6 +3451,15 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
3299 **/ 3451 **/
3300static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) 3452static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
3301{ 3453{
3454 /* Start with base case */
3455 adapter->num_rx_queues = 1;
3456 adapter->num_tx_queues = 1;
3457 adapter->num_rx_pools = adapter->num_rx_queues;
3458 adapter->num_rx_queues_per_pool = 1;
3459
3460 if (ixgbe_set_sriov_queues(adapter))
3461 return;
3462
3302#ifdef IXGBE_FCOE 3463#ifdef IXGBE_FCOE
3303 if (ixgbe_set_fcoe_queues(adapter)) 3464 if (ixgbe_set_fcoe_queues(adapter))
3304 goto done; 3465 goto done;
@@ -3570,6 +3731,24 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
3570 3731
3571#endif /* IXGBE_FCOE */ 3732#endif /* IXGBE_FCOE */
3572/** 3733/**
3734 * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
3735 * @adapter: board private structure to initialize
3736 *
3737 * SR-IOV doesn't use any descriptor rings but changes the default if
3738 * no other mapping is used.
3739 *
3740 */
3741static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
3742{
3743 adapter->rx_ring[0].reg_idx = adapter->num_vfs * 2;
3744 adapter->tx_ring[0].reg_idx = adapter->num_vfs * 2;
3745 if (adapter->num_vfs)
3746 return true;
3747 else
3748 return false;
3749}
3750
3751/**
3573 * ixgbe_cache_ring_register - Descriptor ring to register mapping 3752 * ixgbe_cache_ring_register - Descriptor ring to register mapping
3574 * @adapter: board private structure to initialize 3753 * @adapter: board private structure to initialize
3575 * 3754 *
@@ -3586,6 +3765,9 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
3586 adapter->rx_ring[0].reg_idx = 0; 3765 adapter->rx_ring[0].reg_idx = 0;
3587 adapter->tx_ring[0].reg_idx = 0; 3766 adapter->tx_ring[0].reg_idx = 0;
3588 3767
3768 if (ixgbe_cache_ring_sriov(adapter))
3769 return;
3770
3589#ifdef IXGBE_FCOE 3771#ifdef IXGBE_FCOE
3590 if (ixgbe_cache_ring_fcoe(adapter)) 3772 if (ixgbe_cache_ring_fcoe(adapter))
3591 return; 3773 return;
@@ -3695,6 +3877,9 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
3695 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 3877 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
3696 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; 3878 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
3697 adapter->atr_sample_rate = 0; 3879 adapter->atr_sample_rate = 0;
3880 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3881 ixgbe_disable_sriov(adapter);
3882
3698 ixgbe_set_num_queues(adapter); 3883 ixgbe_set_num_queues(adapter);
3699 3884
3700 err = pci_enable_msi(adapter->pdev); 3885 err = pci_enable_msi(adapter->pdev);
@@ -5474,7 +5659,8 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p)
5474 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 5659 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
5475 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 5660 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
5476 5661
5477 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 5662 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
5663 IXGBE_RAH_AV);
5478 5664
5479 return 0; 5665 return 0;
5480} 5666}
@@ -5607,6 +5793,61 @@ static const struct net_device_ops ixgbe_netdev_ops = {
5607#endif /* IXGBE_FCOE */ 5793#endif /* IXGBE_FCOE */
5608}; 5794};
5609 5795
5796static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
5797 const struct ixgbe_info *ii)
5798{
5799#ifdef CONFIG_PCI_IOV
5800 struct ixgbe_hw *hw = &adapter->hw;
5801 int err;
5802
5803 if (hw->mac.type != ixgbe_mac_82599EB || !max_vfs)
5804 return;
5805
5806 /* The 82599 supports up to 64 VFs per physical function
5807 * but this implementation limits allocation to 63 so that
5808 * basic networking resources are still available to the
5809 * physical function
5810 */
5811 adapter->num_vfs = (max_vfs > 63) ? 63 : max_vfs;
5812 adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
5813 err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
5814 if (err) {
5815 DPRINTK(PROBE, ERR,
5816 "Failed to enable PCI sriov: %d\n", err);
5817 goto err_novfs;
5818 }
5819 /* If call to enable VFs succeeded then allocate memory
5820 * for per VF control structures.
5821 */
5822 adapter->vfinfo =
5823 kcalloc(adapter->num_vfs,
5824 sizeof(struct vf_data_storage), GFP_KERNEL);
5825 if (adapter->vfinfo) {
5826 /* Now that we're sure SR-IOV is enabled
5827 * and memory allocated set up the mailbox parameters
5828 */
5829 ixgbe_init_mbx_params_pf(hw);
5830 memcpy(&hw->mbx.ops, ii->mbx_ops,
5831 sizeof(hw->mbx.ops));
5832
5833 /* Disable RSC when in SR-IOV mode */
5834 adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
5835 IXGBE_FLAG2_RSC_ENABLED);
5836 return;
5837 }
5838
5839 /* Oh oh */
5840 DPRINTK(PROBE, ERR,
5841 "Unable to allocate memory for VF "
5842 "Data Storage - SRIOV disabled\n");
5843 pci_disable_sriov(adapter->pdev);
5844
5845err_novfs:
5846 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
5847 adapter->num_vfs = 0;
5848#endif /* CONFIG_PCI_IOV */
5849}
5850
5610/** 5851/**
5611 * ixgbe_probe - Device Initialization Routine 5852 * ixgbe_probe - Device Initialization Routine
5612 * @pdev: PCI device information struct 5853 * @pdev: PCI device information struct
@@ -5781,6 +6022,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
5781 goto err_sw_init; 6022 goto err_sw_init;
5782 } 6023 }
5783 6024
6025 ixgbe_probe_vf(adapter, ii);
6026
5784 netdev->features = NETIF_F_SG | 6027 netdev->features = NETIF_F_SG |
5785 NETIF_F_IP_CSUM | 6028 NETIF_F_IP_CSUM |
5786 NETIF_F_HW_VLAN_TX | 6029 NETIF_F_HW_VLAN_TX |
@@ -5801,6 +6044,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
5801 netdev->vlan_features |= NETIF_F_IPV6_CSUM; 6044 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
5802 netdev->vlan_features |= NETIF_F_SG; 6045 netdev->vlan_features |= NETIF_F_SG;
5803 6046
6047 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
6048 adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED |
6049 IXGBE_FLAG_DCB_ENABLED);
5804 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) 6050 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
5805 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; 6051 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
5806 6052
@@ -5927,6 +6173,13 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
5927 ixgbe_setup_dca(adapter); 6173 ixgbe_setup_dca(adapter);
5928 } 6174 }
5929#endif 6175#endif
6176 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
6177 DPRINTK(PROBE, INFO, "IOV is enabled with %d VFs\n",
6178 adapter->num_vfs);
6179 for (i = 0; i < adapter->num_vfs; i++)
6180 ixgbe_vf_configuration(pdev, (i | 0x10000000));
6181 }
6182
5930 /* add san mac addr to netdev */ 6183 /* add san mac addr to netdev */
5931 ixgbe_add_sanmac_netdev(netdev); 6184 ixgbe_add_sanmac_netdev(netdev);
5932 6185
@@ -5939,6 +6192,8 @@ err_register:
5939 ixgbe_clear_interrupt_scheme(adapter); 6192 ixgbe_clear_interrupt_scheme(adapter);
5940err_sw_init: 6193err_sw_init:
5941err_eeprom: 6194err_eeprom:
6195 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
6196 ixgbe_disable_sriov(adapter);
5942 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); 6197 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
5943 del_timer_sync(&adapter->sfp_timer); 6198 del_timer_sync(&adapter->sfp_timer);
5944 cancel_work_sync(&adapter->sfp_task); 6199 cancel_work_sync(&adapter->sfp_task);
@@ -6007,6 +6262,9 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
6007 if (netdev->reg_state == NETREG_REGISTERED) 6262 if (netdev->reg_state == NETREG_REGISTERED)
6008 unregister_netdev(netdev); 6263 unregister_netdev(netdev);
6009 6264
6265 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
6266 ixgbe_disable_sriov(adapter);
6267
6010 ixgbe_clear_interrupt_scheme(adapter); 6268 ixgbe_clear_interrupt_scheme(adapter);
6011 6269
6012 ixgbe_release_hw_control(adapter); 6270 ixgbe_release_hw_control(adapter);