aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/igb/igb_main.c
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2009-10-27 11:53:25 -0400
committerDavid S. Miller <davem@davemloft.net>2009-10-28 04:20:34 -0400
commit06cf2666c7f5cc4ba4bf2687d041c61ada76fa3c (patch)
tree50c63c9a271e0da49430a7fb99083e59c1210113 /drivers/net/igb/igb_main.c
parente694e964fc1241b4981873bdccce70438d5f0394 (diff)
igb: move the multiple receive queue configuration into seperate function
This patch moves the multiple receive queue configuration into a seperate function from igb_configure_rx. We can essentially do the configuration for the multiple receive queues just prior to enabling the RX and this will allow us to seperate the queue enablement from the receive queue layout configuration. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/igb/igb_main.c')
-rw-r--r--drivers/net/igb/igb_main.c217
1 files changed, 111 insertions, 106 deletions
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 3dc8e88c5188..ea0560484dc7 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -82,6 +82,7 @@ static int igb_setup_all_tx_resources(struct igb_adapter *);
82static int igb_setup_all_rx_resources(struct igb_adapter *); 82static int igb_setup_all_rx_resources(struct igb_adapter *);
83static void igb_free_all_tx_resources(struct igb_adapter *); 83static void igb_free_all_tx_resources(struct igb_adapter *);
84static void igb_free_all_rx_resources(struct igb_adapter *); 84static void igb_free_all_rx_resources(struct igb_adapter *);
85static void igb_setup_mrqc(struct igb_adapter *);
85void igb_update_stats(struct igb_adapter *); 86void igb_update_stats(struct igb_adapter *);
86static int igb_probe(struct pci_dev *, const struct pci_device_id *); 87static int igb_probe(struct pci_dev *, const struct pci_device_id *);
87static void __devexit igb_remove(struct pci_dev *pdev); 88static void __devexit igb_remove(struct pci_dev *pdev);
@@ -1115,6 +1116,7 @@ static void igb_configure(struct igb_adapter *adapter)
1115 igb_restore_vlan(adapter); 1116 igb_restore_vlan(adapter);
1116 1117
1117 igb_setup_tctl(adapter); 1118 igb_setup_tctl(adapter);
1119 igb_setup_mrqc(adapter);
1118 igb_setup_rctl(adapter); 1120 igb_setup_rctl(adapter);
1119 1121
1120 igb_configure_tx(adapter); 1122 igb_configure_tx(adapter);
@@ -1157,7 +1159,6 @@ int igb_up(struct igb_adapter *adapter)
1157 if (adapter->msix_entries) 1159 if (adapter->msix_entries)
1158 igb_configure_msix(adapter); 1160 igb_configure_msix(adapter);
1159 1161
1160 igb_vmm_control(adapter);
1161 igb_set_vmolr(hw, adapter->vfs_allocated_count); 1162 igb_set_vmolr(hw, adapter->vfs_allocated_count);
1162 1163
1163 /* Clear any pending interrupts. */ 1164 /* Clear any pending interrupts. */
@@ -1928,7 +1929,6 @@ static int igb_open(struct net_device *netdev)
1928 * clean_rx handler before we do so. */ 1929 * clean_rx handler before we do so. */
1929 igb_configure(adapter); 1930 igb_configure(adapter);
1930 1931
1931 igb_vmm_control(adapter);
1932 igb_set_vmolr(hw, adapter->vfs_allocated_count); 1932 igb_set_vmolr(hw, adapter->vfs_allocated_count);
1933 1933
1934 err = igb_request_irq(adapter); 1934 err = igb_request_irq(adapter);
@@ -2217,6 +2217,111 @@ static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2217} 2217}
2218 2218
2219/** 2219/**
2220 * igb_setup_mrqc - configure the multiple receive queue control registers
2221 * @adapter: Board private structure
2222 **/
2223static void igb_setup_mrqc(struct igb_adapter *adapter)
2224{
2225 struct e1000_hw *hw = &adapter->hw;
2226 u32 mrqc, rxcsum;
2227 u32 j, num_rx_queues, shift = 0, shift2 = 0;
2228 union e1000_reta {
2229 u32 dword;
2230 u8 bytes[4];
2231 } reta;
2232 static const u8 rsshash[40] = {
2233 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2234 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2235 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2236 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2237
2238 /* Fill out hash function seeds */
2239 for (j = 0; j < 10; j++) {
2240 u32 rsskey = rsshash[(j * 4)];
2241 rsskey |= rsshash[(j * 4) + 1] << 8;
2242 rsskey |= rsshash[(j * 4) + 2] << 16;
2243 rsskey |= rsshash[(j * 4) + 3] << 24;
2244 array_wr32(E1000_RSSRK(0), j, rsskey);
2245 }
2246
2247 num_rx_queues = adapter->num_rx_queues;
2248
2249 if (adapter->vfs_allocated_count) {
2250 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2251 switch (hw->mac.type) {
2252 case e1000_82576:
2253 shift = 3;
2254 num_rx_queues = 2;
2255 break;
2256 case e1000_82575:
2257 shift = 2;
2258 shift2 = 6;
2259 default:
2260 break;
2261 }
2262 } else {
2263 if (hw->mac.type == e1000_82575)
2264 shift = 6;
2265 }
2266
2267 for (j = 0; j < (32 * 4); j++) {
2268 reta.bytes[j & 3] = (j % num_rx_queues) << shift;
2269 if (shift2)
2270 reta.bytes[j & 3] |= num_rx_queues << shift2;
2271 if ((j & 3) == 3)
2272 wr32(E1000_RETA(j >> 2), reta.dword);
2273 }
2274
2275 /*
2276 * Disable raw packet checksumming so that RSS hash is placed in
2277 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
2278 * offloads as they are enabled by default
2279 */
2280 rxcsum = rd32(E1000_RXCSUM);
2281 rxcsum |= E1000_RXCSUM_PCSD;
2282
2283 if (adapter->hw.mac.type >= e1000_82576)
2284 /* Enable Receive Checksum Offload for SCTP */
2285 rxcsum |= E1000_RXCSUM_CRCOFL;
2286
2287 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2288 wr32(E1000_RXCSUM, rxcsum);
2289
2290 /* If VMDq is enabled then we set the appropriate mode for that, else
2291 * we default to RSS so that an RSS hash is calculated per packet even
2292 * if we are only using one queue */
2293 if (adapter->vfs_allocated_count) {
2294 if (hw->mac.type > e1000_82575) {
2295 /* Set the default pool for the PF's first queue */
2296 u32 vtctl = rd32(E1000_VT_CTL);
2297 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2298 E1000_VT_CTL_DISABLE_DEF_POOL);
2299 vtctl |= adapter->vfs_allocated_count <<
2300 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2301 wr32(E1000_VT_CTL, vtctl);
2302 }
2303 if (adapter->num_rx_queues > 1)
2304 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2305 else
2306 mrqc = E1000_MRQC_ENABLE_VMDQ;
2307 } else {
2308 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
2309 }
2310 igb_vmm_control(adapter);
2311
2312 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
2313 E1000_MRQC_RSS_FIELD_IPV4_TCP);
2314 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
2315 E1000_MRQC_RSS_FIELD_IPV6_TCP);
2316 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
2317 E1000_MRQC_RSS_FIELD_IPV6_UDP);
2318 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
2319 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
2320
2321 wr32(E1000_MRQC, mrqc);
2322}
2323
2324/**
2220 * igb_setup_rctl - configure the receive control registers 2325 * igb_setup_rctl - configure the receive control registers
2221 * @adapter: Board private structure 2326 * @adapter: Board private structure
2222 **/ 2327 **/
@@ -2298,29 +2403,6 @@ static void igb_rlpml_set(struct igb_adapter *adapter)
2298} 2403}
2299 2404
2300/** 2405/**
2301 * igb_configure_vt_default_pool - Configure VT default pool
2302 * @adapter: board private structure
2303 *
2304 * Configure the default pool
2305 **/
2306static void igb_configure_vt_default_pool(struct igb_adapter *adapter)
2307{
2308 struct e1000_hw *hw = &adapter->hw;
2309 u16 pf_id = adapter->vfs_allocated_count;
2310 u32 vtctl;
2311
2312 /* not in sr-iov mode - do nothing */
2313 if (!pf_id)
2314 return;
2315
2316 vtctl = rd32(E1000_VT_CTL);
2317 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2318 E1000_VT_CTL_DISABLE_DEF_POOL);
2319 vtctl |= pf_id << E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2320 wr32(E1000_VT_CTL, vtctl);
2321}
2322
2323/**
2324 * igb_configure_rx_ring - Configure a receive ring after Reset 2406 * igb_configure_rx_ring - Configure a receive ring after Reset
2325 * @adapter: board private structure 2407 * @adapter: board private structure
2326 * @ring: receive ring to be configured 2408 * @ring: receive ring to be configured
@@ -2391,85 +2473,8 @@ static void igb_configure_rx_ring(struct igb_adapter *adapter,
2391 **/ 2473 **/
2392static void igb_configure_rx(struct igb_adapter *adapter) 2474static void igb_configure_rx(struct igb_adapter *adapter)
2393{ 2475{
2394 struct e1000_hw *hw = &adapter->hw;
2395 u32 rctl, rxcsum;
2396 int i; 2476 int i;
2397 2477
2398 /* disable receives while setting up the descriptors */
2399 rctl = rd32(E1000_RCTL);
2400 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
2401 wrfl();
2402 mdelay(10);
2403
2404 if (adapter->itr_setting > 3)
2405 wr32(E1000_ITR, adapter->itr);
2406
2407 /* Setup the HW Rx Head and Tail Descriptor Pointers and
2408 * the Base and Length of the Rx Descriptor Ring */
2409 for (i = 0; i < adapter->num_rx_queues; i++)
2410 igb_configure_rx_ring(adapter, &adapter->rx_ring[i]);
2411
2412 if (adapter->num_rx_queues > 1) {
2413 u32 random[10];
2414 u32 mrqc;
2415 u32 j, shift;
2416 union e1000_reta {
2417 u32 dword;
2418 u8 bytes[4];
2419 } reta;
2420
2421 get_random_bytes(&random[0], 40);
2422
2423 if (hw->mac.type >= e1000_82576)
2424 shift = 0;
2425 else
2426 shift = 6;
2427 for (j = 0; j < (32 * 4); j++) {
2428 reta.bytes[j & 3] =
2429 adapter->rx_ring[(j % adapter->num_rx_queues)].reg_idx << shift;
2430 if ((j & 3) == 3)
2431 writel(reta.dword,
2432 hw->hw_addr + E1000_RETA(0) + (j & ~3));
2433 }
2434 if (adapter->vfs_allocated_count)
2435 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2436 else
2437 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
2438
2439 /* Fill out hash function seeds */
2440 for (j = 0; j < 10; j++)
2441 array_wr32(E1000_RSSRK(0), j, random[j]);
2442
2443 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
2444 E1000_MRQC_RSS_FIELD_IPV4_TCP);
2445 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
2446 E1000_MRQC_RSS_FIELD_IPV6_TCP);
2447 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
2448 E1000_MRQC_RSS_FIELD_IPV6_UDP);
2449 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
2450 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
2451
2452 wr32(E1000_MRQC, mrqc);
2453 } else if (adapter->vfs_allocated_count) {
2454 /* Enable multi-queue for sr-iov */
2455 wr32(E1000_MRQC, E1000_MRQC_ENABLE_VMDQ);
2456 }
2457
2458 /* Enable Receive Checksum Offload for TCP and UDP */
2459 rxcsum = rd32(E1000_RXCSUM);
2460 /* Disable raw packet checksumming */
2461 rxcsum |= E1000_RXCSUM_PCSD;
2462
2463 if (adapter->hw.mac.type == e1000_82576)
2464 /* Enable Receive Checksum Offload for SCTP */
2465 rxcsum |= E1000_RXCSUM_CRCOFL;
2466
2467 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2468 wr32(E1000_RXCSUM, rxcsum);
2469
2470 /* Set the default pool for the PF's first queue */
2471 igb_configure_vt_default_pool(adapter);
2472
2473 /* set UTA to appropriate mode */ 2478 /* set UTA to appropriate mode */
2474 igb_set_uta(adapter); 2479 igb_set_uta(adapter);
2475 2480
@@ -2477,10 +2482,10 @@ static void igb_configure_rx(struct igb_adapter *adapter)
2477 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0, 2482 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
2478 adapter->vfs_allocated_count); 2483 adapter->vfs_allocated_count);
2479 2484
2480 igb_rlpml_set(adapter); 2485 /* Setup the HW Rx Head and Tail Descriptor Pointers and
2481 2486 * the Base and Length of the Rx Descriptor Ring */
2482 /* Enable Receives */ 2487 for (i = 0; i < adapter->num_rx_queues; i++)
2483 wr32(E1000_RCTL, rctl); 2488 igb_configure_rx_ring(adapter, &adapter->rx_ring[i]);
2484} 2489}
2485 2490
2486/** 2491/**