aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAuke Kok <auke\-jan.h.kok@intel.com>2006-07-14 19:14:23 -0400
committerAuke Kok <juke-jan.h.kok@intel.com>2006-07-14 19:14:23 -0400
commitd3d9e484b2ca502c87156b69fa6b8f8fd5fa18a0 (patch)
tree25cc750bb3a88599645c0415110e8ed9a6086f64 /drivers
parent22e1170310ec6afa41e0dc7ac9dfac735d82dcab (diff)
e1000: Redo netpoll fix to address community concerns
The original suggested fix for netpoll was found to be racy on SMP kernels. While it is highly unlikely that this race would ever be seen in the real world due to current netpoll usage models, we implemented this updated fix to address concerns. Signed-off-by: Mitch Williams <mitch.a.williams@intel.com> Signed-off-by: Auke Kok <auke-jan.h.kok@intel.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/e1000/e1000_main.c37
1 files changed, 15 insertions, 22 deletions
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 6d3d41934503..1c6bcad5b910 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -3387,8 +3387,8 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
3387 E1000_WRITE_REG(hw, IMC, ~0); 3387 E1000_WRITE_REG(hw, IMC, ~0);
3388 E1000_WRITE_FLUSH(hw); 3388 E1000_WRITE_FLUSH(hw);
3389 } 3389 }
3390 if (likely(netif_rx_schedule_prep(&adapter->polling_netdev[0]))) 3390 if (likely(netif_rx_schedule_prep(netdev)))
3391 __netif_rx_schedule(&adapter->polling_netdev[0]); 3391 __netif_rx_schedule(netdev);
3392 else 3392 else
3393 e1000_irq_enable(adapter); 3393 e1000_irq_enable(adapter);
3394#else 3394#else
@@ -3431,34 +3431,26 @@ e1000_clean(struct net_device *poll_dev, int *budget)
3431{ 3431{
3432 struct e1000_adapter *adapter; 3432 struct e1000_adapter *adapter;
3433 int work_to_do = min(*budget, poll_dev->quota); 3433 int work_to_do = min(*budget, poll_dev->quota);
3434 int tx_cleaned = 0, i = 0, work_done = 0; 3434 int tx_cleaned = 0, work_done = 0;
3435 3435
3436 /* Must NOT use netdev_priv macro here. */ 3436 /* Must NOT use netdev_priv macro here. */
3437 adapter = poll_dev->priv; 3437 adapter = poll_dev->priv;
3438 3438
3439 /* Keep link state information with original netdev */ 3439 /* Keep link state information with original netdev */
3440 if (!netif_carrier_ok(adapter->netdev)) 3440 if (!netif_carrier_ok(poll_dev))
3441 goto quit_polling; 3441 goto quit_polling;
3442 3442
3443 while (poll_dev != &adapter->polling_netdev[i]) { 3443 /* e1000_clean is called per-cpu. This lock protects
3444 i++; 3444 * tx_ring[0] from being cleaned by multiple cpus
3445 BUG_ON(i == adapter->num_rx_queues); 3445 * simultaneously. A failure obtaining the lock means
3446 * tx_ring[0] is currently being cleaned anyway. */
3447 if (spin_trylock(&adapter->tx_queue_lock)) {
3448 tx_cleaned = e1000_clean_tx_irq(adapter,
3449 &adapter->tx_ring[0]);
3450 spin_unlock(&adapter->tx_queue_lock);
3446 } 3451 }
3447 3452
3448 if (likely(adapter->num_tx_queues == 1)) { 3453 adapter->clean_rx(adapter, &adapter->rx_ring[0],
3449 /* e1000_clean is called per-cpu. This lock protects
3450 * tx_ring[0] from being cleaned by multiple cpus
3451 * simultaneously. A failure obtaining the lock means
3452 * tx_ring[0] is currently being cleaned anyway. */
3453 if (spin_trylock(&adapter->tx_queue_lock)) {
3454 tx_cleaned = e1000_clean_tx_irq(adapter,
3455 &adapter->tx_ring[0]);
3456 spin_unlock(&adapter->tx_queue_lock);
3457 }
3458 } else
3459 tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]);
3460
3461 adapter->clean_rx(adapter, &adapter->rx_ring[i],
3462 &work_done, work_to_do); 3454 &work_done, work_to_do);
3463 3455
3464 *budget -= work_done; 3456 *budget -= work_done;
@@ -3466,7 +3458,7 @@ e1000_clean(struct net_device *poll_dev, int *budget)
3466 3458
3467 /* If no Tx and not enough Rx work done, exit the polling mode */ 3459 /* If no Tx and not enough Rx work done, exit the polling mode */
3468 if ((!tx_cleaned && (work_done == 0)) || 3460 if ((!tx_cleaned && (work_done == 0)) ||
3469 !netif_running(adapter->netdev)) { 3461 !netif_running(poll_dev)) {
3470quit_polling: 3462quit_polling:
3471 netif_rx_complete(poll_dev); 3463 netif_rx_complete(poll_dev);
3472 e1000_irq_enable(adapter); 3464 e1000_irq_enable(adapter);
@@ -4752,6 +4744,7 @@ static void
4752e1000_netpoll(struct net_device *netdev) 4744e1000_netpoll(struct net_device *netdev)
4753{ 4745{
4754 struct e1000_adapter *adapter = netdev_priv(netdev); 4746 struct e1000_adapter *adapter = netdev_priv(netdev);
4747
4755 disable_irq(adapter->pdev->irq); 4748 disable_irq(adapter->pdev->irq);
4756 e1000_intr(adapter->pdev->irq, netdev, NULL); 4749 e1000_intr(adapter->pdev->irq, netdev, NULL);
4757 e1000_clean_tx_irq(adapter, adapter->tx_ring); 4750 e1000_clean_tx_irq(adapter, adapter->tx_ring);