aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/igb/igb.h8
-rw-r--r--drivers/net/igb/igb_ethtool.c17
-rw-r--r--drivers/net/igb/igb_main.c169
3 files changed, 113 insertions, 81 deletions
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index c25ca17d3228..56de7ec15b46 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -47,7 +47,9 @@ struct igb_adapter;
47 47
48#define IGB_MIN_DYN_ITR 3000 48#define IGB_MIN_DYN_ITR 3000
49#define IGB_MAX_DYN_ITR 96000 49#define IGB_MAX_DYN_ITR 96000
50#define IGB_START_ITR 6000 50
51/* ((1000000000ns / (6000ints/s * 1024ns)) << 2 = 648 */
52#define IGB_START_ITR 648
51 53
52#define IGB_DYN_ITR_PACKET_THRESHOLD 2 54#define IGB_DYN_ITR_PACKET_THRESHOLD 2
53#define IGB_DYN_ITR_LENGTH_LOW 200 55#define IGB_DYN_ITR_LENGTH_LOW 200
@@ -170,9 +172,10 @@ struct igb_ring {
170 }; 172 };
171 /* RX */ 173 /* RX */
172 struct { 174 struct {
173 int no_itr_adjust;
174 struct igb_queue_stats rx_stats; 175 struct igb_queue_stats rx_stats;
175 struct napi_struct napi; 176 struct napi_struct napi;
177 int set_itr;
178 struct igb_ring *buddy;
176#ifdef CONFIG_IGB_LRO 179#ifdef CONFIG_IGB_LRO
177 struct net_lro_mgr lro_mgr; 180 struct net_lro_mgr lro_mgr;
178 bool lro_used; 181 bool lro_used;
@@ -219,7 +222,6 @@ struct igb_adapter {
219 u32 itr_setting; 222 u32 itr_setting;
220 u16 tx_itr; 223 u16 tx_itr;
221 u16 rx_itr; 224 u16 rx_itr;
222 int set_itr;
223 225
224 struct work_struct reset_task; 226 struct work_struct reset_task;
225 struct work_struct watchdog_task; 227 struct work_struct watchdog_task;
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index 7db183093768..11aee1309951 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -1861,6 +1861,8 @@ static int igb_set_coalesce(struct net_device *netdev,
1861 struct ethtool_coalesce *ec) 1861 struct ethtool_coalesce *ec)
1862{ 1862{
1863 struct igb_adapter *adapter = netdev_priv(netdev); 1863 struct igb_adapter *adapter = netdev_priv(netdev);
1864 struct e1000_hw *hw = &adapter->hw;
1865 int i;
1864 1866
1865 if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) || 1867 if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
1866 ((ec->rx_coalesce_usecs > 3) && 1868 ((ec->rx_coalesce_usecs > 3) &&
@@ -1869,13 +1871,16 @@ static int igb_set_coalesce(struct net_device *netdev,
1869 return -EINVAL; 1871 return -EINVAL;
1870 1872
1871 /* convert to rate of irq's per second */ 1873 /* convert to rate of irq's per second */
1872 if (ec->rx_coalesce_usecs <= 3) 1874 if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) {
1873 adapter->itr_setting = ec->rx_coalesce_usecs; 1875 adapter->itr_setting = ec->rx_coalesce_usecs;
1874 else 1876 adapter->itr = IGB_START_ITR;
1875 adapter->itr_setting = (1000000 / ec->rx_coalesce_usecs); 1877 } else {
1878 adapter->itr_setting = ec->rx_coalesce_usecs << 2;
1879 adapter->itr = adapter->itr_setting;
1880 }
1876 1881
1877 if (netif_running(netdev)) 1882 for (i = 0; i < adapter->num_rx_queues; i++)
1878 igb_reinit_locked(adapter); 1883 wr32(adapter->rx_ring[i].itr_register, adapter->itr);
1879 1884
1880 return 0; 1885 return 0;
1881} 1886}
@@ -1888,7 +1893,7 @@ static int igb_get_coalesce(struct net_device *netdev,
1888 if (adapter->itr_setting <= 3) 1893 if (adapter->itr_setting <= 3)
1889 ec->rx_coalesce_usecs = adapter->itr_setting; 1894 ec->rx_coalesce_usecs = adapter->itr_setting;
1890 else 1895 else
1891 ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting; 1896 ec->rx_coalesce_usecs = adapter->itr_setting >> 2;
1892 1897
1893 return 0; 1898 return 0;
1894} 1899}
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 2a5303c311bc..aaed129f4ca0 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -255,6 +255,8 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
255 return -ENOMEM; 255 return -ENOMEM;
256 } 256 }
257 257
258 adapter->rx_ring->buddy = adapter->tx_ring;
259
258 for (i = 0; i < adapter->num_tx_queues; i++) { 260 for (i = 0; i < adapter->num_tx_queues; i++) {
259 struct igb_ring *ring = &(adapter->tx_ring[i]); 261 struct igb_ring *ring = &(adapter->tx_ring[i]);
260 ring->adapter = adapter; 262 ring->adapter = adapter;
@@ -375,7 +377,7 @@ static void igb_configure_msix(struct igb_adapter *adapter)
375 igb_assign_vector(adapter, IGB_N0_QUEUE, i, vector++); 377 igb_assign_vector(adapter, IGB_N0_QUEUE, i, vector++);
376 adapter->eims_enable_mask |= tx_ring->eims_value; 378 adapter->eims_enable_mask |= tx_ring->eims_value;
377 if (tx_ring->itr_val) 379 if (tx_ring->itr_val)
378 writel(1000000000 / (tx_ring->itr_val * 256), 380 writel(tx_ring->itr_val,
379 hw->hw_addr + tx_ring->itr_register); 381 hw->hw_addr + tx_ring->itr_register);
380 else 382 else
381 writel(1, hw->hw_addr + tx_ring->itr_register); 383 writel(1, hw->hw_addr + tx_ring->itr_register);
@@ -383,10 +385,11 @@ static void igb_configure_msix(struct igb_adapter *adapter)
383 385
384 for (i = 0; i < adapter->num_rx_queues; i++) { 386 for (i = 0; i < adapter->num_rx_queues; i++) {
385 struct igb_ring *rx_ring = &adapter->rx_ring[i]; 387 struct igb_ring *rx_ring = &adapter->rx_ring[i];
388 rx_ring->buddy = 0;
386 igb_assign_vector(adapter, i, IGB_N0_QUEUE, vector++); 389 igb_assign_vector(adapter, i, IGB_N0_QUEUE, vector++);
387 adapter->eims_enable_mask |= rx_ring->eims_value; 390 adapter->eims_enable_mask |= rx_ring->eims_value;
388 if (rx_ring->itr_val) 391 if (rx_ring->itr_val)
389 writel(1000000000 / (rx_ring->itr_val * 256), 392 writel(rx_ring->itr_val,
390 hw->hw_addr + rx_ring->itr_register); 393 hw->hw_addr + rx_ring->itr_register);
391 else 394 else
392 writel(1, hw->hw_addr + rx_ring->itr_register); 395 writel(1, hw->hw_addr + rx_ring->itr_register);
@@ -449,7 +452,7 @@ static int igb_request_msix(struct igb_adapter *adapter)
449 if (err) 452 if (err)
450 goto out; 453 goto out;
451 ring->itr_register = E1000_EITR(0) + (vector << 2); 454 ring->itr_register = E1000_EITR(0) + (vector << 2);
452 ring->itr_val = adapter->itr; 455 ring->itr_val = 976; /* ~4000 ints/sec */
453 vector++; 456 vector++;
454 } 457 }
455 for (i = 0; i < adapter->num_rx_queues; i++) { 458 for (i = 0; i < adapter->num_rx_queues; i++) {
@@ -1898,8 +1901,7 @@ static void igb_configure_rx(struct igb_adapter *adapter)
1898 mdelay(10); 1901 mdelay(10);
1899 1902
1900 if (adapter->itr_setting > 3) 1903 if (adapter->itr_setting > 3)
1901 wr32(E1000_ITR, 1904 wr32(E1000_ITR, adapter->itr);
1902 1000000000 / (adapter->itr * 256));
1903 1905
1904 /* Setup the HW Rx Head and Tail Descriptor Pointers and 1906 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1905 * the Base and Length of the Rx Descriptor Ring */ 1907 * the Base and Length of the Rx Descriptor Ring */
@@ -2463,38 +2465,60 @@ enum latency_range {
2463}; 2465};
2464 2466
2465 2467
2466static void igb_lower_rx_eitr(struct igb_adapter *adapter, 2468/**
2467 struct igb_ring *rx_ring) 2469 * igb_update_ring_itr - update the dynamic ITR value based on packet size
2470 *
2471 * Stores a new ITR value based on strictly on packet size. This
2472 * algorithm is less sophisticated than that used in igb_update_itr,
2473 * due to the difficulty of synchronizing statistics across multiple
2474 * receive rings. The divisors and thresholds used by this fuction
2475 * were determined based on theoretical maximum wire speed and testing
2476 * data, in order to minimize response time while increasing bulk
2477 * throughput.
2478 * This functionality is controlled by the InterruptThrottleRate module
2479 * parameter (see igb_param.c)
2480 * NOTE: This function is called only when operating in a multiqueue
2481 * receive environment.
2482 * @rx_ring: pointer to ring
2483 **/
2484static void igb_update_ring_itr(struct igb_ring *rx_ring)
2468{ 2485{
2469 struct e1000_hw *hw = &adapter->hw; 2486 int new_val = rx_ring->itr_val;
2470 int new_val; 2487 int avg_wire_size = 0;
2488 struct igb_adapter *adapter = rx_ring->adapter;
2471 2489
2472 new_val = rx_ring->itr_val / 2; 2490 if (!rx_ring->total_packets)
2473 if (new_val < IGB_MIN_DYN_ITR) 2491 goto clear_counts; /* no packets, so don't do anything */
2474 new_val = IGB_MIN_DYN_ITR;
2475 2492
2476 if (new_val != rx_ring->itr_val) { 2493 /* For non-gigabit speeds, just fix the interrupt rate at 4000
2477 rx_ring->itr_val = new_val; 2494 * ints/sec - ITR timer value of 120 ticks.
2478 wr32(rx_ring->itr_register, 2495 */
2479 1000000000 / (new_val * 256)); 2496 if (adapter->link_speed != SPEED_1000) {
2497 new_val = 120;
2498 goto set_itr_val;
2480 } 2499 }
2481} 2500 avg_wire_size = rx_ring->total_bytes / rx_ring->total_packets;
2482 2501
2483static void igb_raise_rx_eitr(struct igb_adapter *adapter, 2502 /* Add 24 bytes to size to account for CRC, preamble, and gap */
2484 struct igb_ring *rx_ring) 2503 avg_wire_size += 24;
2485{ 2504
2486 struct e1000_hw *hw = &adapter->hw; 2505 /* Don't starve jumbo frames */
2487 int new_val; 2506 avg_wire_size = min(avg_wire_size, 3000);
2488 2507
2489 new_val = rx_ring->itr_val * 2; 2508 /* Give a little boost to mid-size frames */
2490 if (new_val > IGB_MAX_DYN_ITR) 2509 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
2491 new_val = IGB_MAX_DYN_ITR; 2510 new_val = avg_wire_size / 3;
2511 else
2512 new_val = avg_wire_size / 2;
2492 2513
2514set_itr_val:
2493 if (new_val != rx_ring->itr_val) { 2515 if (new_val != rx_ring->itr_val) {
2494 rx_ring->itr_val = new_val; 2516 rx_ring->itr_val = new_val;
2495 wr32(rx_ring->itr_register, 2517 rx_ring->set_itr = 1;
2496 1000000000 / (new_val * 256));
2497 } 2518 }
2519clear_counts:
2520 rx_ring->total_bytes = 0;
2521 rx_ring->total_packets = 0;
2498} 2522}
2499 2523
2500/** 2524/**
@@ -2561,8 +2585,7 @@ update_itr_done:
2561 return retval; 2585 return retval;
2562} 2586}
2563 2587
2564static void igb_set_itr(struct igb_adapter *adapter, u16 itr_register, 2588static void igb_set_itr(struct igb_adapter *adapter)
2565 int rx_only)
2566{ 2589{
2567 u16 current_itr; 2590 u16 current_itr;
2568 u32 new_itr = adapter->itr; 2591 u32 new_itr = adapter->itr;
@@ -2578,26 +2601,23 @@ static void igb_set_itr(struct igb_adapter *adapter, u16 itr_register,
2578 adapter->rx_itr, 2601 adapter->rx_itr,
2579 adapter->rx_ring->total_packets, 2602 adapter->rx_ring->total_packets,
2580 adapter->rx_ring->total_bytes); 2603 adapter->rx_ring->total_bytes);
2581 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2582 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2583 adapter->rx_itr = low_latency;
2584 2604
2585 if (!rx_only) { 2605 if (adapter->rx_ring->buddy) {
2586 adapter->tx_itr = igb_update_itr(adapter, 2606 adapter->tx_itr = igb_update_itr(adapter,
2587 adapter->tx_itr, 2607 adapter->tx_itr,
2588 adapter->tx_ring->total_packets, 2608 adapter->tx_ring->total_packets,
2589 adapter->tx_ring->total_bytes); 2609 adapter->tx_ring->total_bytes);
2590 /* conservative mode (itr 3) eliminates the
2591 * lowest_latency setting */
2592 if (adapter->itr_setting == 3 &&
2593 adapter->tx_itr == lowest_latency)
2594 adapter->tx_itr = low_latency;
2595 2610
2596 current_itr = max(adapter->rx_itr, adapter->tx_itr); 2611 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2597 } else { 2612 } else {
2598 current_itr = adapter->rx_itr; 2613 current_itr = adapter->rx_itr;
2599 } 2614 }
2600 2615
2616 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2617 if (adapter->itr_setting == 3 &&
2618 current_itr == lowest_latency)
2619 current_itr = low_latency;
2620
2601 switch (current_itr) { 2621 switch (current_itr) {
2602 /* counts and packets in update_itr are dependent on these numbers */ 2622 /* counts and packets in update_itr are dependent on these numbers */
2603 case lowest_latency: 2623 case lowest_latency:
@@ -2614,6 +2634,13 @@ static void igb_set_itr(struct igb_adapter *adapter, u16 itr_register,
2614 } 2634 }
2615 2635
2616set_itr_now: 2636set_itr_now:
2637 adapter->rx_ring->total_bytes = 0;
2638 adapter->rx_ring->total_packets = 0;
2639 if (adapter->rx_ring->buddy) {
2640 adapter->rx_ring->buddy->total_bytes = 0;
2641 adapter->rx_ring->buddy->total_packets = 0;
2642 }
2643
2617 if (new_itr != adapter->itr) { 2644 if (new_itr != adapter->itr) {
2618 /* this attempts to bias the interrupt rate towards Bulk 2645 /* this attempts to bias the interrupt rate towards Bulk
2619 * by adding intermediate steps when interrupt rate is 2646 * by adding intermediate steps when interrupt rate is
@@ -2628,7 +2655,8 @@ set_itr_now:
2628 * ends up being correct. 2655 * ends up being correct.
2629 */ 2656 */
2630 adapter->itr = new_itr; 2657 adapter->itr = new_itr;
2631 adapter->set_itr = 1; 2658 adapter->rx_ring->itr_val = 1000000000 / (new_itr * 256);
2659 adapter->rx_ring->set_itr = 1;
2632 } 2660 }
2633 2661
2634 return; 2662 return;
@@ -2979,6 +3007,7 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb,
2979 /* this is a hard error */ 3007 /* this is a hard error */
2980 return NETDEV_TX_BUSY; 3008 return NETDEV_TX_BUSY;
2981 } 3009 }
3010 skb_orphan(skb);
2982 3011
2983 if (adapter->vlgrp && vlan_tx_tag_present(skb)) { 3012 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
2984 tx_flags |= IGB_TX_FLAGS_VLAN; 3013 tx_flags |= IGB_TX_FLAGS_VLAN;
@@ -3312,8 +3341,6 @@ static irqreturn_t igb_msix_tx(int irq, void *data)
3312 struct igb_adapter *adapter = tx_ring->adapter; 3341 struct igb_adapter *adapter = tx_ring->adapter;
3313 struct e1000_hw *hw = &adapter->hw; 3342 struct e1000_hw *hw = &adapter->hw;
3314 3343
3315 if (!tx_ring->itr_val)
3316 wr32(E1000_EIMC, tx_ring->eims_value);
3317#ifdef CONFIG_DCA 3344#ifdef CONFIG_DCA
3318 if (adapter->flags & IGB_FLAG_DCA_ENABLED) 3345 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
3319 igb_update_tx_dca(tx_ring); 3346 igb_update_tx_dca(tx_ring);
@@ -3332,21 +3359,36 @@ static irqreturn_t igb_msix_tx(int irq, void *data)
3332 return IRQ_HANDLED; 3359 return IRQ_HANDLED;
3333} 3360}
3334 3361
3362static void igb_write_itr(struct igb_ring *ring)
3363{
3364 struct e1000_hw *hw = &ring->adapter->hw;
3365 if ((ring->adapter->itr_setting & 3) && ring->set_itr) {
3366 switch (hw->mac.type) {
3367 case e1000_82576:
3368 wr32(ring->itr_register,
3369 ring->itr_val |
3370 0x80000000);
3371 break;
3372 default:
3373 wr32(ring->itr_register,
3374 ring->itr_val |
3375 (ring->itr_val << 16));
3376 break;
3377 }
3378 ring->set_itr = 0;
3379 }
3380}
3381
3335static irqreturn_t igb_msix_rx(int irq, void *data) 3382static irqreturn_t igb_msix_rx(int irq, void *data)
3336{ 3383{
3337 struct igb_ring *rx_ring = data; 3384 struct igb_ring *rx_ring = data;
3338 struct igb_adapter *adapter = rx_ring->adapter; 3385 struct igb_adapter *adapter = rx_ring->adapter;
3339 struct e1000_hw *hw = &adapter->hw;
3340 3386
3341 /* Write the ITR value calculated at the end of the 3387 /* Write the ITR value calculated at the end of the
3342 * previous interrupt. 3388 * previous interrupt.
3343 */ 3389 */
3344 3390
3345 if (adapter->set_itr) { 3391 igb_write_itr(rx_ring);
3346 wr32(rx_ring->itr_register,
3347 1000000000 / (rx_ring->itr_val * 256));
3348 adapter->set_itr = 0;
3349 }
3350 3392
3351 if (netif_rx_schedule_prep(adapter->netdev, &rx_ring->napi)) 3393 if (netif_rx_schedule_prep(adapter->netdev, &rx_ring->napi))
3352 __netif_rx_schedule(adapter->netdev, &rx_ring->napi); 3394 __netif_rx_schedule(adapter->netdev, &rx_ring->napi);
@@ -3493,13 +3535,7 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
3493 /* read ICR disables interrupts using IAM */ 3535 /* read ICR disables interrupts using IAM */
3494 u32 icr = rd32(E1000_ICR); 3536 u32 icr = rd32(E1000_ICR);
3495 3537
3496 /* Write the ITR value calculated at the end of the 3538 igb_write_itr(adapter->rx_ring);
3497 * previous interrupt.
3498 */
3499 if (adapter->set_itr) {
3500 wr32(E1000_ITR, 1000000000 / (adapter->itr * 256));
3501 adapter->set_itr = 0;
3502 }
3503 3539
3504 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 3540 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
3505 hw->mac.get_link_status = 1; 3541 hw->mac.get_link_status = 1;
@@ -3529,13 +3565,7 @@ static irqreturn_t igb_intr(int irq, void *data)
3529 if (!icr) 3565 if (!icr)
3530 return IRQ_NONE; /* Not our interrupt */ 3566 return IRQ_NONE; /* Not our interrupt */
3531 3567
3532 /* Write the ITR value calculated at the end of the 3568 igb_write_itr(adapter->rx_ring);
3533 * previous interrupt.
3534 */
3535 if (adapter->set_itr) {
3536 wr32(E1000_ITR, 1000000000 / (adapter->itr * 256));
3537 adapter->set_itr = 0;
3538 }
3539 3569
3540 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is 3570 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
3541 * not set, then the adapter didn't send an interrupt */ 3571 * not set, then the adapter didn't send an interrupt */
@@ -3585,7 +3615,7 @@ static int igb_poll(struct napi_struct *napi, int budget)
3585 if ((tx_clean_complete && (work_done < budget)) || 3615 if ((tx_clean_complete && (work_done < budget)) ||
3586 !netif_running(netdev)) { 3616 !netif_running(netdev)) {
3587 if (adapter->itr_setting & 3) 3617 if (adapter->itr_setting & 3)
3588 igb_set_itr(adapter, E1000_ITR, false); 3618 igb_set_itr(adapter);
3589 netif_rx_complete(netdev, napi); 3619 netif_rx_complete(netdev, napi);
3590 if (!test_bit(__IGB_DOWN, &adapter->state)) 3620 if (!test_bit(__IGB_DOWN, &adapter->state))
3591 igb_irq_enable(adapter); 3621 igb_irq_enable(adapter);
@@ -3619,15 +3649,11 @@ static int igb_clean_rx_ring_msix(struct napi_struct *napi, int budget)
3619quit_polling: 3649quit_polling:
3620 netif_rx_complete(netdev, napi); 3650 netif_rx_complete(netdev, napi);
3621 3651
3622 wr32(E1000_EIMS, rx_ring->eims_value); 3652 if (adapter->itr_setting & 3) {
3623 if ((adapter->itr_setting & 3) && !rx_ring->no_itr_adjust && 3653 if (adapter->num_rx_queues == 1)
3624 (rx_ring->total_packets > IGB_DYN_ITR_PACKET_THRESHOLD)) { 3654 igb_set_itr(adapter);
3625 int mean_size = rx_ring->total_bytes / 3655 else
3626 rx_ring->total_packets; 3656 igb_update_ring_itr(rx_ring);
3627 if (mean_size < IGB_DYN_ITR_LENGTH_LOW)
3628 igb_raise_rx_eitr(adapter, rx_ring);
3629 else if (mean_size > IGB_DYN_ITR_LENGTH_HIGH)
3630 igb_lower_rx_eitr(adapter, rx_ring);
3631 } 3657 }
3632 3658
3633 if (!test_bit(__IGB_DOWN, &adapter->state)) 3659 if (!test_bit(__IGB_DOWN, &adapter->state))
@@ -3972,7 +3998,6 @@ send_up:
3972 dev_kfree_skb_irq(skb); 3998 dev_kfree_skb_irq(skb);
3973 goto next_desc; 3999 goto next_desc;
3974 } 4000 }
3975 rx_ring->no_itr_adjust |= (staterr & E1000_RXD_STAT_DYNINT);
3976 4001
3977 total_bytes += skb->len; 4002 total_bytes += skb->len;
3978 total_packets++; 4003 total_packets++;