aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2009-02-19 23:39:44 -0500
committerDavid S. Miller <davem@davemloft.net>2009-02-20 03:22:53 -0500
commite173952257d7a3d3c64de3039d9fc02d1fbf49c3 (patch)
tree39952d57329428d90b26389931bdb183ec2dd009 /drivers/net
parent1bfaf07bb1d7201d3c6cb984bccd9c2416e19b6c (diff)
igb: add pf side of VMDq support
Add the pf portion of vmdq support. This provides enough support so that VMDq is enabled, and the pf is functional without enabling vfs. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/igb/e1000_82575.h26
-rw-r--r--drivers/net/igb/e1000_defines.h2
-rw-r--r--drivers/net/igb/e1000_regs.h7
-rw-r--r--drivers/net/igb/igb.h3
-rw-r--r--drivers/net/igb/igb_ethtool.c2
-rw-r--r--drivers/net/igb/igb_main.c174
6 files changed, 195 insertions, 19 deletions
diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h
index 49b41c92a8c8..116714f346bb 100644
--- a/drivers/net/igb/e1000_82575.h
+++ b/drivers/net/igb/e1000_82575.h
@@ -40,8 +40,11 @@ extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
40#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ 40#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */
41#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 41#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
42#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 42#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
43#define E1000_SRRCTL_DROP_EN 0x80000000
43 44
44#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002 45#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002
46#define E1000_MRQC_ENABLE_VMDQ 0x00000003
47#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005
45#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 48#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
46#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 49#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
47#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000 50#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000
@@ -159,4 +162,27 @@ struct e1000_adv_tx_context_desc {
159#define E1000_DCA_TXCTRL_CPUID_SHIFT 24 /* Tx CPUID now in the last byte */ 162#define E1000_DCA_TXCTRL_CPUID_SHIFT 24 /* Tx CPUID now in the last byte */
160#define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */ 163#define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */
161 164
165/* Easy defines for setting default pool, would normally be left a zero */
166#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7
167#define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT)
168
169/* Other useful VMD_CTL register defines */
170#define E1000_VT_CTL_IGNORE_MAC (1 << 28)
171#define E1000_VT_CTL_DISABLE_DEF_POOL (1 << 29)
172#define E1000_VT_CTL_VM_REPL_EN (1 << 30)
173
174/* Per VM Offload register setup */
175#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */
176#define E1000_VMOLR_LPE 0x00010000 /* Accept Long packet */
177#define E1000_VMOLR_RSSE 0x00020000 /* Enable RSS */
178#define E1000_VMOLR_AUPE 0x01000000 /* Accept untagged packets */
179#define E1000_VMOLR_ROMPE 0x02000000 /* Accept overflow multicast */
180#define E1000_VMOLR_ROPE 0x04000000 /* Accept overflow unicast */
181#define E1000_VMOLR_BAM 0x08000000 /* Accept Broadcast packets */
182#define E1000_VMOLR_MPME 0x10000000 /* Multicast promiscuous mode */
183#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */
184
185#define ALL_QUEUES 0xFFFF
186
187
162#endif 188#endif
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index 5a32a7004e0a..d7613db78000 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -399,6 +399,8 @@
399#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ 399#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */
400#define E1000_RAL_MAC_ADDR_LEN 4 400#define E1000_RAL_MAC_ADDR_LEN 4
401#define E1000_RAH_MAC_ADDR_LEN 2 401#define E1000_RAH_MAC_ADDR_LEN 2
402#define E1000_RAH_POOL_MASK 0x03FC0000
403#define E1000_RAH_POOL_1 0x00040000
402 404
403/* Error Codes */ 405/* Error Codes */
404#define E1000_ERR_NVM 1 406#define E1000_ERR_NVM 1
diff --git a/drivers/net/igb/e1000_regs.h b/drivers/net/igb/e1000_regs.h
index 95ed8ec15770..5d00c864d106 100644
--- a/drivers/net/igb/e1000_regs.h
+++ b/drivers/net/igb/e1000_regs.h
@@ -292,7 +292,7 @@ enum {
292#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ 292#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
293 (0x054E4 + ((_i - 16) * 8))) 293 (0x054E4 + ((_i - 16) * 8)))
294#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ 294#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */
295#define E1000_VMD_CTL 0x0581C /* VMDq Control - RW */ 295#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */
296#define E1000_WUC 0x05800 /* Wakeup Control - RW */ 296#define E1000_WUC 0x05800 /* Wakeup Control - RW */
297#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ 297#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */
298#define E1000_WUS 0x05810 /* Wakeup Status - RO */ 298#define E1000_WUS 0x05810 /* Wakeup Status - RO */
@@ -320,6 +320,11 @@ enum {
320#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) 320#define E1000_RETA(_i) (0x05C00 + ((_i) * 4))
321#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW Array */ 321#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW Array */
322 322
323/* VT Registers */
324#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */
325/* These act per VF so an array friendly macro is used */
326#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n)))
327
323#define wr32(reg, value) (writel(value, hw->hw_addr + reg)) 328#define wr32(reg, value) (writel(value, hw->hw_addr + reg))
324#define rd32(reg) (readl(hw->hw_addr + reg)) 329#define rd32(reg) (readl(hw->hw_addr + reg))
325#define wrfl() ((void)rd32(E1000_STATUS)) 330#define wrfl() ((void)rd32(E1000_STATUS))
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index 3d3e5f6cd313..d925f7dd7fb2 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -88,8 +88,7 @@ struct igb_adapter;
88#define IGB_RXBUFFER_2048 2048 88#define IGB_RXBUFFER_2048 2048
89#define IGB_RXBUFFER_16384 16384 89#define IGB_RXBUFFER_16384 16384
90 90
91/* Packet Buffer allocations */ 91#define MAX_STD_JUMBO_FRAME_SIZE 9234
92
93 92
94/* How many Tx Descriptors do we need to call netif_wake_queue ? */ 93/* How many Tx Descriptors do we need to call netif_wake_queue ? */
95#define IGB_TX_QUEUE_WAKE 16 94#define IGB_TX_QUEUE_WAKE 16
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index 31f9a64773ff..34a8a0fadf2d 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -398,7 +398,7 @@ static void igb_get_regs(struct net_device *netdev,
398 regs_buff[34] = rd32(E1000_RLPML); 398 regs_buff[34] = rd32(E1000_RLPML);
399 regs_buff[35] = rd32(E1000_RFCTL); 399 regs_buff[35] = rd32(E1000_RFCTL);
400 regs_buff[36] = rd32(E1000_MRQC); 400 regs_buff[36] = rd32(E1000_MRQC);
401 regs_buff[37] = rd32(E1000_VMD_CTL); 401 regs_buff[37] = rd32(E1000_VT_CTL);
402 402
403 /* Transmit */ 403 /* Transmit */
404 regs_buff[38] = rd32(E1000_TCTL); 404 regs_buff[38] = rd32(E1000_TCTL);
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 0dcc0c109b9d..c7c7eeba3366 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -122,6 +122,10 @@ static void igb_vlan_rx_register(struct net_device *, struct vlan_group *);
122static void igb_vlan_rx_add_vid(struct net_device *, u16); 122static void igb_vlan_rx_add_vid(struct net_device *, u16);
123static void igb_vlan_rx_kill_vid(struct net_device *, u16); 123static void igb_vlan_rx_kill_vid(struct net_device *, u16);
124static void igb_restore_vlan(struct igb_adapter *); 124static void igb_restore_vlan(struct igb_adapter *);
125static inline void igb_set_rah_pool(struct e1000_hw *, int , int);
126static void igb_set_mc_list_pools(struct igb_adapter *, int, u16);
127static inline void igb_set_vmolr(struct e1000_hw *, int);
128static inline void igb_set_vf_rlpml(struct igb_adapter *, int, int);
125 129
126static int igb_suspend(struct pci_dev *, pm_message_t); 130static int igb_suspend(struct pci_dev *, pm_message_t);
127#ifdef CONFIG_PM 131#ifdef CONFIG_PM
@@ -888,6 +892,9 @@ int igb_up(struct igb_adapter *adapter)
888 if (adapter->msix_entries) 892 if (adapter->msix_entries)
889 igb_configure_msix(adapter); 893 igb_configure_msix(adapter);
890 894
895 igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0);
896 igb_set_vmolr(hw, adapter->vfs_allocated_count);
897
891 /* Clear any pending interrupts. */ 898 /* Clear any pending interrupts. */
892 rd32(E1000_ICR); 899 rd32(E1000_ICR);
893 igb_irq_enable(adapter); 900 igb_irq_enable(adapter);
@@ -1617,6 +1624,9 @@ static int igb_open(struct net_device *netdev)
1617 * clean_rx handler before we do so. */ 1624 * clean_rx handler before we do so. */
1618 igb_configure(adapter); 1625 igb_configure(adapter);
1619 1626
1627 igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0);
1628 igb_set_vmolr(hw, adapter->vfs_allocated_count);
1629
1620 err = igb_request_irq(adapter); 1630 err = igb_request_irq(adapter);
1621 if (err) 1631 if (err)
1622 goto err_req_irq; 1632 goto err_req_irq;
@@ -1797,10 +1807,11 @@ static void igb_configure_tx(struct igb_adapter *adapter)
1797 wr32(E1000_DCA_TXCTRL(j), txctrl); 1807 wr32(E1000_DCA_TXCTRL(j), txctrl);
1798 } 1808 }
1799 1809
1800 /* Use the default values for the Tx Inter Packet Gap (IPG) timer */ 1810 /* disable queue 0 to prevent tail bump w/o re-configuration */
1811 if (adapter->vfs_allocated_count)
1812 wr32(E1000_TXDCTL(0), 0);
1801 1813
1802 /* Program the Transmit Control Register */ 1814 /* Program the Transmit Control Register */
1803
1804 tctl = rd32(E1000_TCTL); 1815 tctl = rd32(E1000_TCTL);
1805 tctl &= ~E1000_TCTL_CT; 1816 tctl &= ~E1000_TCTL_CT;
1806 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | 1817 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
@@ -1954,6 +1965,30 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
1954 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; 1965 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
1955 } 1966 }
1956 1967
1968 /* Attention!!! For SR-IOV PF driver operations you must enable
1969 * queue drop for all VF and PF queues to prevent head of line blocking
1970 * if an un-trusted VF does not provide descriptors to hardware.
1971 */
1972 if (adapter->vfs_allocated_count) {
1973 u32 vmolr;
1974
1975 j = adapter->rx_ring[0].reg_idx;
1976
1977 /* set all queue drop enable bits */
1978 wr32(E1000_QDE, ALL_QUEUES);
1979 srrctl |= E1000_SRRCTL_DROP_EN;
1980
1981 /* disable queue 0 to prevent tail write w/o re-config */
1982 wr32(E1000_RXDCTL(0), 0);
1983
1984 vmolr = rd32(E1000_VMOLR(j));
1985 if (rctl & E1000_RCTL_LPE)
1986 vmolr |= E1000_VMOLR_LPE;
1987 if (adapter->num_rx_queues > 0)
1988 vmolr |= E1000_VMOLR_RSSE;
1989 wr32(E1000_VMOLR(j), vmolr);
1990 }
1991
1957 for (i = 0; i < adapter->num_rx_queues; i++) { 1992 for (i = 0; i < adapter->num_rx_queues; i++) {
1958 j = adapter->rx_ring[i].reg_idx; 1993 j = adapter->rx_ring[i].reg_idx;
1959 wr32(E1000_SRRCTL(j), srrctl); 1994 wr32(E1000_SRRCTL(j), srrctl);
@@ -1963,6 +1998,54 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
1963} 1998}
1964 1999
1965/** 2000/**
2001 * igb_rlpml_set - set maximum receive packet size
2002 * @adapter: board private structure
2003 *
2004 * Configure maximum receivable packet size.
2005 **/
2006static void igb_rlpml_set(struct igb_adapter *adapter)
2007{
2008 u32 max_frame_size = adapter->max_frame_size;
2009 struct e1000_hw *hw = &adapter->hw;
2010 u16 pf_id = adapter->vfs_allocated_count;
2011
2012 if (adapter->vlgrp)
2013 max_frame_size += VLAN_TAG_SIZE;
2014
2015 /* if vfs are enabled we set RLPML to the largest possible request
2016 * size and set the VMOLR RLPML to the size we need */
2017 if (pf_id) {
2018 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
2019 max_frame_size = MAX_STD_JUMBO_FRAME_SIZE + VLAN_TAG_SIZE;
2020 }
2021
2022 wr32(E1000_RLPML, max_frame_size);
2023}
2024
2025/**
2026 * igb_configure_vt_default_pool - Configure VT default pool
2027 * @adapter: board private structure
2028 *
2029 * Configure the default pool
2030 **/
2031static void igb_configure_vt_default_pool(struct igb_adapter *adapter)
2032{
2033 struct e1000_hw *hw = &adapter->hw;
2034 u16 pf_id = adapter->vfs_allocated_count;
2035 u32 vtctl;
2036
2037 /* not in sr-iov mode - do nothing */
2038 if (!pf_id)
2039 return;
2040
2041 vtctl = rd32(E1000_VT_CTL);
2042 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2043 E1000_VT_CTL_DISABLE_DEF_POOL);
2044 vtctl |= pf_id << E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2045 wr32(E1000_VT_CTL, vtctl);
2046}
2047
2048/**
1966 * igb_configure_rx - Configure receive Unit after Reset 2049 * igb_configure_rx - Configure receive Unit after Reset
1967 * @adapter: board private structure 2050 * @adapter: board private structure
1968 * 2051 *
@@ -2033,8 +2116,10 @@ static void igb_configure_rx(struct igb_adapter *adapter)
2033 writel(reta.dword, 2116 writel(reta.dword,
2034 hw->hw_addr + E1000_RETA(0) + (j & ~3)); 2117 hw->hw_addr + E1000_RETA(0) + (j & ~3));
2035 } 2118 }
2036 2119 if (adapter->vfs_allocated_count)
2037 mrqc = E1000_MRQC_ENABLE_RSS_4Q; 2120 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2121 else
2122 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
2038 2123
2039 /* Fill out hash function seeds */ 2124 /* Fill out hash function seeds */
2040 for (j = 0; j < 10; j++) 2125 for (j = 0; j < 10; j++)
@@ -2059,6 +2144,9 @@ static void igb_configure_rx(struct igb_adapter *adapter)
2059 rxcsum |= E1000_RXCSUM_PCSD; 2144 rxcsum |= E1000_RXCSUM_PCSD;
2060 wr32(E1000_RXCSUM, rxcsum); 2145 wr32(E1000_RXCSUM, rxcsum);
2061 } else { 2146 } else {
2147 /* Enable multi-queue for sr-iov */
2148 if (adapter->vfs_allocated_count)
2149 wr32(E1000_MRQC, E1000_MRQC_ENABLE_VMDQ);
2062 /* Enable Receive Checksum Offload for TCP and UDP */ 2150 /* Enable Receive Checksum Offload for TCP and UDP */
2063 rxcsum = rd32(E1000_RXCSUM); 2151 rxcsum = rd32(E1000_RXCSUM);
2064 if (adapter->rx_csum) 2152 if (adapter->rx_csum)
@@ -2069,11 +2157,10 @@ static void igb_configure_rx(struct igb_adapter *adapter)
2069 wr32(E1000_RXCSUM, rxcsum); 2157 wr32(E1000_RXCSUM, rxcsum);
2070 } 2158 }
2071 2159
2072 if (adapter->vlgrp) 2160 /* Set the default pool for the PF's first queue */
2073 wr32(E1000_RLPML, 2161 igb_configure_vt_default_pool(adapter);
2074 adapter->max_frame_size + VLAN_TAG_SIZE); 2162
2075 else 2163 igb_rlpml_set(adapter);
2076 wr32(E1000_RLPML, adapter->max_frame_size);
2077 2164
2078 /* Enable Receives */ 2165 /* Enable Receives */
2079 wr32(E1000_RCTL, rctl); 2166 wr32(E1000_RCTL, rctl);
@@ -2303,6 +2390,8 @@ static int igb_set_mac(struct net_device *netdev, void *p)
2303 2390
2304 hw->mac.ops.rar_set(hw, hw->mac.addr, 0); 2391 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
2305 2392
2393 igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0);
2394
2306 return 0; 2395 return 0;
2307} 2396}
2308 2397
@@ -2362,7 +2451,11 @@ static void igb_set_multi(struct net_device *netdev)
2362 memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN); 2451 memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
2363 mc_ptr = mc_ptr->next; 2452 mc_ptr = mc_ptr->next;
2364 } 2453 }
2365 igb_update_mc_addr_list(hw, mta_list, i, 1, mac->rar_entry_count); 2454 igb_update_mc_addr_list(hw, mta_list, i,
2455 adapter->vfs_allocated_count + 1,
2456 mac->rar_entry_count);
2457
2458 igb_set_mc_list_pools(adapter, i, mac->rar_entry_count);
2366 kfree(mta_list); 2459 kfree(mta_list);
2367} 2460}
2368 2461
@@ -3222,7 +3315,6 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3222 return -EINVAL; 3315 return -EINVAL;
3223 } 3316 }
3224 3317
3225#define MAX_STD_JUMBO_FRAME_SIZE 9234
3226 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { 3318 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
3227 dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n"); 3319 dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n");
3228 return -EINVAL; 3320 return -EINVAL;
@@ -3256,6 +3348,12 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3256#else 3348#else
3257 adapter->rx_buffer_len = PAGE_SIZE / 2; 3349 adapter->rx_buffer_len = PAGE_SIZE / 2;
3258#endif 3350#endif
3351
3352 /* if sr-iov is enabled we need to force buffer size to 1K or larger */
3353 if (adapter->vfs_allocated_count &&
3354 (adapter->rx_buffer_len < IGB_RXBUFFER_1024))
3355 adapter->rx_buffer_len = IGB_RXBUFFER_1024;
3356
3259 /* adjust allocation if LPE protects us, and we aren't using SBP */ 3357 /* adjust allocation if LPE protects us, and we aren't using SBP */
3260 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || 3358 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
3261 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)) 3359 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))
@@ -4462,8 +4560,6 @@ static void igb_vlan_rx_register(struct net_device *netdev,
4462 rctl &= ~E1000_RCTL_CFIEN; 4560 rctl &= ~E1000_RCTL_CFIEN;
4463 wr32(E1000_RCTL, rctl); 4561 wr32(E1000_RCTL, rctl);
4464 igb_update_mng_vlan(adapter); 4562 igb_update_mng_vlan(adapter);
4465 wr32(E1000_RLPML,
4466 adapter->max_frame_size + VLAN_TAG_SIZE);
4467 } else { 4563 } else {
4468 /* disable VLAN tag insert/strip */ 4564 /* disable VLAN tag insert/strip */
4469 ctrl = rd32(E1000_CTRL); 4565 ctrl = rd32(E1000_CTRL);
@@ -4474,10 +4570,10 @@ static void igb_vlan_rx_register(struct net_device *netdev,
4474 igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 4570 igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
4475 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE; 4571 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
4476 } 4572 }
4477 wr32(E1000_RLPML,
4478 adapter->max_frame_size);
4479 } 4573 }
4480 4574
4575 igb_rlpml_set(adapter);
4576
4481 if (!test_bit(__IGB_DOWN, &adapter->state)) 4577 if (!test_bit(__IGB_DOWN, &adapter->state))
4482 igb_irq_enable(adapter); 4578 igb_irq_enable(adapter);
4483} 4579}
@@ -4841,4 +4937,52 @@ static void igb_io_resume(struct pci_dev *pdev)
4841 igb_get_hw_control(adapter); 4937 igb_get_hw_control(adapter);
4842} 4938}
4843 4939
4940static inline void igb_set_vmolr(struct e1000_hw *hw, int vfn)
4941{
4942 u32 reg_data;
4943
4944 reg_data = rd32(E1000_VMOLR(vfn));
4945 reg_data |= E1000_VMOLR_BAM | /* Accept broadcast */
4946 E1000_VMOLR_ROPE | /* Accept packets matched in UTA */
4947 E1000_VMOLR_ROMPE | /* Accept packets matched in MTA */
4948 E1000_VMOLR_AUPE | /* Accept untagged packets */
4949 E1000_VMOLR_STRVLAN; /* Strip vlan tags */
4950 wr32(E1000_VMOLR(vfn), reg_data);
4951}
4952
4953static inline void igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
4954 int vfn)
4955{
4956 struct e1000_hw *hw = &adapter->hw;
4957 u32 vmolr;
4958
4959 vmolr = rd32(E1000_VMOLR(vfn));
4960 vmolr &= ~E1000_VMOLR_RLPML_MASK;
4961 vmolr |= size | E1000_VMOLR_LPE;
4962 wr32(E1000_VMOLR(vfn), vmolr);
4963}
4964
4965static inline void igb_set_rah_pool(struct e1000_hw *hw, int pool, int entry)
4966{
4967 u32 reg_data;
4968
4969 reg_data = rd32(E1000_RAH(entry));
4970 reg_data &= ~E1000_RAH_POOL_MASK;
4971 reg_data |= E1000_RAH_POOL_1 << pool;;
4972 wr32(E1000_RAH(entry), reg_data);
4973}
4974
4975static void igb_set_mc_list_pools(struct igb_adapter *adapter,
4976 int entry_count, u16 total_rar_filters)
4977{
4978 struct e1000_hw *hw = &adapter->hw;
4979 int i = adapter->vfs_allocated_count + 1;
4980
4981 if ((i + entry_count) < total_rar_filters)
4982 total_rar_filters = i + entry_count;
4983
4984 for (; i < total_rar_filters; i++)
4985 igb_set_rah_pool(hw, adapter->vfs_allocated_count, i);
4986}
4987
4844/* igb_main.c */ 4988/* igb_main.c */