diff options
author | PJ Waskiewicz <peter.p.waskiewicz.jr@intel.com> | 2008-06-27 14:00:39 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@redhat.com> | 2008-07-04 08:47:01 -0400 |
commit | 844290e56067aed0a54142d756565abb9614136c (patch) | |
tree | c6c122522c64d2c383ccc7bb2389a0c713de7bb1 /drivers/net/igb | |
parent | 662d7205b3db0bf9ebcae31f30ed72a1bceb47af (diff) |
igb: add NAPI Rx queue support
Update the NAPI implementation to use the new napi_struct infrstructure.
This removes the need of multiple net_device structs to implement a
multiqueue NAPI.
Signed-off-by: PJ Waskiewicz <peter.p.waskiewicz.jr.@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/net/igb')
-rw-r--r-- | drivers/net/igb/igb.h | 2 | ||||
-rw-r--r-- | drivers/net/igb/igb_main.c | 148 |
2 files changed, 61 insertions, 89 deletions
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h index 0eecb8b2abd2..2c48eec17660 100644 --- a/drivers/net/igb/igb.h +++ b/drivers/net/igb/igb.h | |||
@@ -150,6 +150,7 @@ struct igb_ring { | |||
150 | u16 itr_register; | 150 | u16 itr_register; |
151 | u16 cpu; | 151 | u16 cpu; |
152 | 152 | ||
153 | int queue_index; | ||
153 | unsigned int total_bytes; | 154 | unsigned int total_bytes; |
154 | unsigned int total_packets; | 155 | unsigned int total_packets; |
155 | 156 | ||
@@ -265,6 +266,7 @@ struct igb_adapter { | |||
265 | int msg_enable; | 266 | int msg_enable; |
266 | struct msix_entry *msix_entries; | 267 | struct msix_entry *msix_entries; |
267 | u32 eims_enable_mask; | 268 | u32 eims_enable_mask; |
269 | u32 eims_other; | ||
268 | 270 | ||
269 | /* to not mess up cache alignment, always add to the bottom */ | 271 | /* to not mess up cache alignment, always add to the bottom */ |
270 | unsigned long state; | 272 | unsigned long state; |
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index e5dff61f162e..7bc6fae182a7 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c | |||
@@ -227,12 +227,11 @@ static int igb_alloc_queues(struct igb_adapter *adapter) | |||
227 | for (i = 0; i < adapter->num_rx_queues; i++) { | 227 | for (i = 0; i < adapter->num_rx_queues; i++) { |
228 | struct igb_ring *ring = &(adapter->rx_ring[i]); | 228 | struct igb_ring *ring = &(adapter->rx_ring[i]); |
229 | ring->adapter = adapter; | 229 | ring->adapter = adapter; |
230 | ring->queue_index = i; | ||
230 | ring->itr_register = E1000_ITR; | 231 | ring->itr_register = E1000_ITR; |
231 | 232 | ||
232 | if (!ring->napi.poll) | 233 | /* set a default napi handler for each rx_ring */ |
233 | netif_napi_add(adapter->netdev, &ring->napi, igb_clean, | 234 | netif_napi_add(adapter->netdev, &ring->napi, igb_clean, 64); |
234 | adapter->napi.weight / | ||
235 | adapter->num_rx_queues); | ||
236 | } | 235 | } |
237 | return 0; | 236 | return 0; |
238 | } | 237 | } |
@@ -300,9 +299,6 @@ static void igb_configure_msix(struct igb_adapter *adapter) | |||
300 | array_wr32(E1000_MSIXBM(0), vector++, | 299 | array_wr32(E1000_MSIXBM(0), vector++, |
301 | E1000_EIMS_OTHER); | 300 | E1000_EIMS_OTHER); |
302 | 301 | ||
303 | /* disable IAM for ICR interrupt bits */ | ||
304 | wr32(E1000_IAM, 0); | ||
305 | |||
306 | tmp = rd32(E1000_CTRL_EXT); | 302 | tmp = rd32(E1000_CTRL_EXT); |
307 | /* enable MSI-X PBA support*/ | 303 | /* enable MSI-X PBA support*/ |
308 | tmp |= E1000_CTRL_EXT_PBA_CLR; | 304 | tmp |= E1000_CTRL_EXT_PBA_CLR; |
@@ -313,6 +309,7 @@ static void igb_configure_msix(struct igb_adapter *adapter) | |||
313 | 309 | ||
314 | wr32(E1000_CTRL_EXT, tmp); | 310 | wr32(E1000_CTRL_EXT, tmp); |
315 | adapter->eims_enable_mask |= E1000_EIMS_OTHER; | 311 | adapter->eims_enable_mask |= E1000_EIMS_OTHER; |
312 | adapter->eims_other = E1000_EIMS_OTHER; | ||
316 | 313 | ||
317 | wrfl(); | 314 | wrfl(); |
318 | } | 315 | } |
@@ -355,6 +352,9 @@ static int igb_request_msix(struct igb_adapter *adapter) | |||
355 | goto out; | 352 | goto out; |
356 | ring->itr_register = E1000_EITR(0) + (vector << 2); | 353 | ring->itr_register = E1000_EITR(0) + (vector << 2); |
357 | ring->itr_val = adapter->itr; | 354 | ring->itr_val = adapter->itr; |
355 | /* overwrite the poll routine for MSIX, we've already done | ||
356 | * netif_napi_add */ | ||
357 | ring->napi.poll = &igb_clean_rx_ring_msix; | ||
358 | vector++; | 358 | vector++; |
359 | } | 359 | } |
360 | 360 | ||
@@ -363,9 +363,6 @@ static int igb_request_msix(struct igb_adapter *adapter) | |||
363 | if (err) | 363 | if (err) |
364 | goto out; | 364 | goto out; |
365 | 365 | ||
366 | adapter->napi.poll = igb_clean_rx_ring_msix; | ||
367 | for (i = 0; i < adapter->num_rx_queues; i++) | ||
368 | adapter->rx_ring[i].napi.poll = adapter->napi.poll; | ||
369 | igb_configure_msix(adapter); | 366 | igb_configure_msix(adapter); |
370 | return 0; | 367 | return 0; |
371 | out: | 368 | out: |
@@ -434,12 +431,8 @@ static int igb_request_irq(struct igb_adapter *adapter) | |||
434 | 431 | ||
435 | if (adapter->msix_entries) { | 432 | if (adapter->msix_entries) { |
436 | err = igb_request_msix(adapter); | 433 | err = igb_request_msix(adapter); |
437 | if (!err) { | 434 | if (!err) |
438 | /* enable IAM, auto-mask, | ||
439 | * DO NOT USE EIAM or IAM in legacy mode */ | ||
440 | wr32(E1000_IAM, IMS_ENABLE_MASK); | ||
441 | goto request_done; | 435 | goto request_done; |
442 | } | ||
443 | /* fall back to MSI */ | 436 | /* fall back to MSI */ |
444 | igb_reset_interrupt_capability(adapter); | 437 | igb_reset_interrupt_capability(adapter); |
445 | if (!pci_enable_msi(adapter->pdev)) | 438 | if (!pci_enable_msi(adapter->pdev)) |
@@ -448,7 +441,11 @@ static int igb_request_irq(struct igb_adapter *adapter) | |||
448 | igb_free_all_rx_resources(adapter); | 441 | igb_free_all_rx_resources(adapter); |
449 | adapter->num_rx_queues = 1; | 442 | adapter->num_rx_queues = 1; |
450 | igb_alloc_queues(adapter); | 443 | igb_alloc_queues(adapter); |
444 | } else { | ||
445 | wr32(E1000_MSIXBM(0), (E1000_EICR_RX_QUEUE0 | | ||
446 | E1000_EIMS_OTHER)); | ||
451 | } | 447 | } |
448 | |||
452 | if (adapter->msi_enabled) { | 449 | if (adapter->msi_enabled) { |
453 | err = request_irq(adapter->pdev->irq, &igb_intr_msi, 0, | 450 | err = request_irq(adapter->pdev->irq, &igb_intr_msi, 0, |
454 | netdev->name, netdev); | 451 | netdev->name, netdev); |
@@ -500,9 +497,12 @@ static void igb_irq_disable(struct igb_adapter *adapter) | |||
500 | struct e1000_hw *hw = &adapter->hw; | 497 | struct e1000_hw *hw = &adapter->hw; |
501 | 498 | ||
502 | if (adapter->msix_entries) { | 499 | if (adapter->msix_entries) { |
500 | wr32(E1000_EIAM, 0); | ||
503 | wr32(E1000_EIMC, ~0); | 501 | wr32(E1000_EIMC, ~0); |
504 | wr32(E1000_EIAC, 0); | 502 | wr32(E1000_EIAC, 0); |
505 | } | 503 | } |
504 | |||
505 | wr32(E1000_IAM, 0); | ||
506 | wr32(E1000_IMC, ~0); | 506 | wr32(E1000_IMC, ~0); |
507 | wrfl(); | 507 | wrfl(); |
508 | synchronize_irq(adapter->pdev->irq); | 508 | synchronize_irq(adapter->pdev->irq); |
@@ -517,13 +517,14 @@ static void igb_irq_enable(struct igb_adapter *adapter) | |||
517 | struct e1000_hw *hw = &adapter->hw; | 517 | struct e1000_hw *hw = &adapter->hw; |
518 | 518 | ||
519 | if (adapter->msix_entries) { | 519 | if (adapter->msix_entries) { |
520 | wr32(E1000_EIMS, | 520 | wr32(E1000_EIAC, adapter->eims_enable_mask); |
521 | adapter->eims_enable_mask); | 521 | wr32(E1000_EIAM, adapter->eims_enable_mask); |
522 | wr32(E1000_EIAC, | 522 | wr32(E1000_EIMS, adapter->eims_enable_mask); |
523 | adapter->eims_enable_mask); | ||
524 | wr32(E1000_IMS, E1000_IMS_LSC); | 523 | wr32(E1000_IMS, E1000_IMS_LSC); |
525 | } else | 524 | } else { |
526 | wr32(E1000_IMS, IMS_ENABLE_MASK); | 525 | wr32(E1000_IMS, IMS_ENABLE_MASK); |
526 | wr32(E1000_IAM, IMS_ENABLE_MASK); | ||
527 | } | ||
527 | } | 528 | } |
528 | 529 | ||
529 | static void igb_update_mng_vlan(struct igb_adapter *adapter) | 530 | static void igb_update_mng_vlan(struct igb_adapter *adapter) |
@@ -661,13 +662,10 @@ int igb_up(struct igb_adapter *adapter) | |||
661 | 662 | ||
662 | clear_bit(__IGB_DOWN, &adapter->state); | 663 | clear_bit(__IGB_DOWN, &adapter->state); |
663 | 664 | ||
664 | napi_enable(&adapter->napi); | 665 | for (i = 0; i < adapter->num_rx_queues; i++) |
665 | 666 | napi_enable(&adapter->rx_ring[i].napi); | |
666 | if (adapter->msix_entries) { | 667 | if (adapter->msix_entries) |
667 | for (i = 0; i < adapter->num_rx_queues; i++) | ||
668 | napi_enable(&adapter->rx_ring[i].napi); | ||
669 | igb_configure_msix(adapter); | 668 | igb_configure_msix(adapter); |
670 | } | ||
671 | 669 | ||
672 | /* Clear any pending interrupts. */ | 670 | /* Clear any pending interrupts. */ |
673 | rd32(E1000_ICR); | 671 | rd32(E1000_ICR); |
@@ -704,11 +702,9 @@ void igb_down(struct igb_adapter *adapter) | |||
704 | wrfl(); | 702 | wrfl(); |
705 | msleep(10); | 703 | msleep(10); |
706 | 704 | ||
707 | napi_disable(&adapter->napi); | 705 | for (i = 0; i < adapter->num_rx_queues; i++) |
706 | napi_disable(&adapter->rx_ring[i].napi); | ||
708 | 707 | ||
709 | if (adapter->msix_entries) | ||
710 | for (i = 0; i < adapter->num_rx_queues; i++) | ||
711 | napi_disable(&adapter->rx_ring[i].napi); | ||
712 | igb_irq_disable(adapter); | 708 | igb_irq_disable(adapter); |
713 | 709 | ||
714 | del_timer_sync(&adapter->watchdog_timer); | 710 | del_timer_sync(&adapter->watchdog_timer); |
@@ -933,7 +929,6 @@ static int __devinit igb_probe(struct pci_dev *pdev, | |||
933 | igb_set_ethtool_ops(netdev); | 929 | igb_set_ethtool_ops(netdev); |
934 | netdev->tx_timeout = &igb_tx_timeout; | 930 | netdev->tx_timeout = &igb_tx_timeout; |
935 | netdev->watchdog_timeo = 5 * HZ; | 931 | netdev->watchdog_timeo = 5 * HZ; |
936 | netif_napi_add(netdev, &adapter->napi, igb_clean, 64); | ||
937 | netdev->vlan_rx_register = igb_vlan_rx_register; | 932 | netdev->vlan_rx_register = igb_vlan_rx_register; |
938 | netdev->vlan_rx_add_vid = igb_vlan_rx_add_vid; | 933 | netdev->vlan_rx_add_vid = igb_vlan_rx_add_vid; |
939 | netdev->vlan_rx_kill_vid = igb_vlan_rx_kill_vid; | 934 | netdev->vlan_rx_kill_vid = igb_vlan_rx_kill_vid; |
@@ -1298,15 +1293,14 @@ static int igb_open(struct net_device *netdev) | |||
1298 | /* From here on the code is the same as igb_up() */ | 1293 | /* From here on the code is the same as igb_up() */ |
1299 | clear_bit(__IGB_DOWN, &adapter->state); | 1294 | clear_bit(__IGB_DOWN, &adapter->state); |
1300 | 1295 | ||
1301 | napi_enable(&adapter->napi); | 1296 | for (i = 0; i < adapter->num_rx_queues; i++) |
1302 | if (adapter->msix_entries) | 1297 | napi_enable(&adapter->rx_ring[i].napi); |
1303 | for (i = 0; i < adapter->num_rx_queues; i++) | ||
1304 | napi_enable(&adapter->rx_ring[i].napi); | ||
1305 | |||
1306 | igb_irq_enable(adapter); | ||
1307 | 1298 | ||
1308 | /* Clear any pending interrupts. */ | 1299 | /* Clear any pending interrupts. */ |
1309 | rd32(E1000_ICR); | 1300 | rd32(E1000_ICR); |
1301 | |||
1302 | igb_irq_enable(adapter); | ||
1303 | |||
1310 | /* Fire a link status change interrupt to start the watchdog. */ | 1304 | /* Fire a link status change interrupt to start the watchdog. */ |
1311 | wr32(E1000_ICS, E1000_ICS_LSC); | 1305 | wr32(E1000_ICS, E1000_ICS_LSC); |
1312 | 1306 | ||
@@ -1534,8 +1528,6 @@ int igb_setup_rx_resources(struct igb_adapter *adapter, | |||
1534 | rx_ring->pending_skb = NULL; | 1528 | rx_ring->pending_skb = NULL; |
1535 | 1529 | ||
1536 | rx_ring->adapter = adapter; | 1530 | rx_ring->adapter = adapter; |
1537 | /* FIXME: do we want to setup ring->napi->poll here? */ | ||
1538 | rx_ring->napi.poll = adapter->napi.poll; | ||
1539 | 1531 | ||
1540 | return 0; | 1532 | return 0; |
1541 | 1533 | ||
@@ -3034,26 +3026,19 @@ static irqreturn_t igb_msix_other(int irq, void *data) | |||
3034 | struct net_device *netdev = data; | 3026 | struct net_device *netdev = data; |
3035 | struct igb_adapter *adapter = netdev_priv(netdev); | 3027 | struct igb_adapter *adapter = netdev_priv(netdev); |
3036 | struct e1000_hw *hw = &adapter->hw; | 3028 | struct e1000_hw *hw = &adapter->hw; |
3037 | u32 eicr; | 3029 | u32 icr = rd32(E1000_ICR); |
3038 | /* disable interrupts from the "other" bit, avoid re-entry */ | ||
3039 | wr32(E1000_EIMC, E1000_EIMS_OTHER); | ||
3040 | |||
3041 | eicr = rd32(E1000_EICR); | ||
3042 | 3030 | ||
3043 | if (eicr & E1000_EIMS_OTHER) { | 3031 | /* reading ICR causes bit 31 of EICR to be cleared */ |
3044 | u32 icr = rd32(E1000_ICR); | 3032 | if (!(icr & E1000_ICR_LSC)) |
3045 | /* reading ICR causes bit 31 of EICR to be cleared */ | 3033 | goto no_link_interrupt; |
3046 | if (!(icr & E1000_ICR_LSC)) | 3034 | hw->mac.get_link_status = 1; |
3047 | goto no_link_interrupt; | 3035 | /* guard against interrupt when we're going down */ |
3048 | hw->mac.get_link_status = 1; | 3036 | if (!test_bit(__IGB_DOWN, &adapter->state)) |
3049 | /* guard against interrupt when we're going down */ | 3037 | mod_timer(&adapter->watchdog_timer, jiffies + 1); |
3050 | if (!test_bit(__IGB_DOWN, &adapter->state)) | ||
3051 | mod_timer(&adapter->watchdog_timer, jiffies + 1); | ||
3052 | } | ||
3053 | 3038 | ||
3054 | no_link_interrupt: | 3039 | no_link_interrupt: |
3055 | wr32(E1000_IMS, E1000_IMS_LSC); | 3040 | wr32(E1000_IMS, E1000_IMS_LSC); |
3056 | wr32(E1000_EIMS, E1000_EIMS_OTHER); | 3041 | wr32(E1000_EIMS, adapter->eims_other); |
3057 | 3042 | ||
3058 | return IRQ_HANDLED; | 3043 | return IRQ_HANDLED; |
3059 | } | 3044 | } |
@@ -3084,21 +3069,19 @@ static irqreturn_t igb_msix_rx(int irq, void *data) | |||
3084 | struct igb_adapter *adapter = rx_ring->adapter; | 3069 | struct igb_adapter *adapter = rx_ring->adapter; |
3085 | struct e1000_hw *hw = &adapter->hw; | 3070 | struct e1000_hw *hw = &adapter->hw; |
3086 | 3071 | ||
3087 | if (!rx_ring->itr_val) | 3072 | /* Write the ITR value calculated at the end of the |
3088 | wr32(E1000_EIMC, rx_ring->eims_value); | 3073 | * previous interrupt. |
3074 | */ | ||
3089 | 3075 | ||
3090 | if (netif_rx_schedule_prep(adapter->netdev, &rx_ring->napi)) { | 3076 | if (adapter->set_itr) { |
3091 | rx_ring->total_bytes = 0; | 3077 | wr32(rx_ring->itr_register, |
3092 | rx_ring->total_packets = 0; | 3078 | 1000000000 / (rx_ring->itr_val * 256)); |
3093 | rx_ring->no_itr_adjust = 0; | 3079 | adapter->set_itr = 0; |
3094 | __netif_rx_schedule(adapter->netdev, &rx_ring->napi); | ||
3095 | } else { | ||
3096 | if (!rx_ring->no_itr_adjust) { | ||
3097 | igb_lower_rx_eitr(adapter, rx_ring); | ||
3098 | rx_ring->no_itr_adjust = 1; | ||
3099 | } | ||
3100 | } | 3080 | } |
3101 | 3081 | ||
3082 | if (netif_rx_schedule_prep(adapter->netdev, &rx_ring->napi)) | ||
3083 | __netif_rx_schedule(adapter->netdev, &rx_ring->napi); | ||
3084 | |||
3102 | return IRQ_HANDLED; | 3085 | return IRQ_HANDLED; |
3103 | } | 3086 | } |
3104 | 3087 | ||
@@ -3112,7 +3095,6 @@ static irqreturn_t igb_intr_msi(int irq, void *data) | |||
3112 | { | 3095 | { |
3113 | struct net_device *netdev = data; | 3096 | struct net_device *netdev = data; |
3114 | struct igb_adapter *adapter = netdev_priv(netdev); | 3097 | struct igb_adapter *adapter = netdev_priv(netdev); |
3115 | struct napi_struct *napi = &adapter->napi; | ||
3116 | struct e1000_hw *hw = &adapter->hw; | 3098 | struct e1000_hw *hw = &adapter->hw; |
3117 | /* read ICR disables interrupts using IAM */ | 3099 | /* read ICR disables interrupts using IAM */ |
3118 | u32 icr = rd32(E1000_ICR); | 3100 | u32 icr = rd32(E1000_ICR); |
@@ -3121,25 +3103,17 @@ static irqreturn_t igb_intr_msi(int irq, void *data) | |||
3121 | * previous interrupt. | 3103 | * previous interrupt. |
3122 | */ | 3104 | */ |
3123 | if (adapter->set_itr) { | 3105 | if (adapter->set_itr) { |
3124 | wr32(E1000_ITR, | 3106 | wr32(E1000_ITR, 1000000000 / (adapter->itr * 256)); |
3125 | 1000000000 / (adapter->itr * 256)); | ||
3126 | adapter->set_itr = 0; | 3107 | adapter->set_itr = 0; |
3127 | } | 3108 | } |
3128 | 3109 | ||
3129 | /* read ICR disables interrupts using IAM */ | ||
3130 | if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { | 3110 | if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { |
3131 | hw->mac.get_link_status = 1; | 3111 | hw->mac.get_link_status = 1; |
3132 | if (!test_bit(__IGB_DOWN, &adapter->state)) | 3112 | if (!test_bit(__IGB_DOWN, &adapter->state)) |
3133 | mod_timer(&adapter->watchdog_timer, jiffies + 1); | 3113 | mod_timer(&adapter->watchdog_timer, jiffies + 1); |
3134 | } | 3114 | } |
3135 | 3115 | ||
3136 | if (netif_rx_schedule_prep(netdev, napi)) { | 3116 | netif_rx_schedule(netdev, &adapter->rx_ring[0].napi); |
3137 | adapter->tx_ring->total_bytes = 0; | ||
3138 | adapter->tx_ring->total_packets = 0; | ||
3139 | adapter->rx_ring->total_bytes = 0; | ||
3140 | adapter->rx_ring->total_packets = 0; | ||
3141 | __netif_rx_schedule(netdev, napi); | ||
3142 | } | ||
3143 | 3117 | ||
3144 | return IRQ_HANDLED; | 3118 | return IRQ_HANDLED; |
3145 | } | 3119 | } |
@@ -3153,7 +3127,6 @@ static irqreturn_t igb_intr(int irq, void *data) | |||
3153 | { | 3127 | { |
3154 | struct net_device *netdev = data; | 3128 | struct net_device *netdev = data; |
3155 | struct igb_adapter *adapter = netdev_priv(netdev); | 3129 | struct igb_adapter *adapter = netdev_priv(netdev); |
3156 | struct napi_struct *napi = &adapter->napi; | ||
3157 | struct e1000_hw *hw = &adapter->hw; | 3130 | struct e1000_hw *hw = &adapter->hw; |
3158 | /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No | 3131 | /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No |
3159 | * need for the IMC write */ | 3132 | * need for the IMC write */ |
@@ -3166,8 +3139,7 @@ static irqreturn_t igb_intr(int irq, void *data) | |||
3166 | * previous interrupt. | 3139 | * previous interrupt. |
3167 | */ | 3140 | */ |
3168 | if (adapter->set_itr) { | 3141 | if (adapter->set_itr) { |
3169 | wr32(E1000_ITR, | 3142 | wr32(E1000_ITR, 1000000000 / (adapter->itr * 256)); |
3170 | 1000000000 / (adapter->itr * 256)); | ||
3171 | adapter->set_itr = 0; | 3143 | adapter->set_itr = 0; |
3172 | } | 3144 | } |
3173 | 3145 | ||
@@ -3185,13 +3157,7 @@ static irqreturn_t igb_intr(int irq, void *data) | |||
3185 | mod_timer(&adapter->watchdog_timer, jiffies + 1); | 3157 | mod_timer(&adapter->watchdog_timer, jiffies + 1); |
3186 | } | 3158 | } |
3187 | 3159 | ||
3188 | if (netif_rx_schedule_prep(netdev, napi)) { | 3160 | netif_rx_schedule(netdev, &adapter->rx_ring[0].napi); |
3189 | adapter->tx_ring->total_bytes = 0; | ||
3190 | adapter->rx_ring->total_bytes = 0; | ||
3191 | adapter->tx_ring->total_packets = 0; | ||
3192 | adapter->rx_ring->total_packets = 0; | ||
3193 | __netif_rx_schedule(netdev, napi); | ||
3194 | } | ||
3195 | 3161 | ||
3196 | return IRQ_HANDLED; | 3162 | return IRQ_HANDLED; |
3197 | } | 3163 | } |
@@ -3274,6 +3240,10 @@ quit_polling: | |||
3274 | else if (mean_size > IGB_DYN_ITR_LENGTH_HIGH) | 3240 | else if (mean_size > IGB_DYN_ITR_LENGTH_HIGH) |
3275 | igb_lower_rx_eitr(adapter, rx_ring); | 3241 | igb_lower_rx_eitr(adapter, rx_ring); |
3276 | } | 3242 | } |
3243 | |||
3244 | if (!test_bit(__IGB_DOWN, &adapter->state)) | ||
3245 | wr32(E1000_EIMS, rx_ring->eims_value); | ||
3246 | |||
3277 | return 0; | 3247 | return 0; |
3278 | } | 3248 | } |
3279 | 3249 | ||