aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/forcedeth.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/forcedeth.c')
-rw-r--r--drivers/net/forcedeth.c27
1 files changed, 16 insertions, 11 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index cfbb7aacfe94..70ddf1acfd88 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -992,7 +992,7 @@ static void nv_enable_irq(struct net_device *dev)
992 if (np->msi_flags & NV_MSI_X_ENABLED) 992 if (np->msi_flags & NV_MSI_X_ENABLED)
993 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 993 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
994 else 994 else
995 enable_irq(dev->irq); 995 enable_irq(np->pci_dev->irq);
996 } else { 996 } else {
997 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 997 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
998 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 998 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
@@ -1008,7 +1008,7 @@ static void nv_disable_irq(struct net_device *dev)
1008 if (np->msi_flags & NV_MSI_X_ENABLED) 1008 if (np->msi_flags & NV_MSI_X_ENABLED)
1009 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 1009 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1010 else 1010 else
1011 disable_irq(dev->irq); 1011 disable_irq(np->pci_dev->irq);
1012 } else { 1012 } else {
1013 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1013 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1014 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 1014 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
@@ -1607,7 +1607,7 @@ static void nv_do_rx_refill(unsigned long data)
1607 if (np->msi_flags & NV_MSI_X_ENABLED) 1607 if (np->msi_flags & NV_MSI_X_ENABLED)
1608 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 1608 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1609 else 1609 else
1610 disable_irq(dev->irq); 1610 disable_irq(np->pci_dev->irq);
1611 } else { 1611 } else {
1612 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1612 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1613 } 1613 }
@@ -1625,7 +1625,7 @@ static void nv_do_rx_refill(unsigned long data)
1625 if (np->msi_flags & NV_MSI_X_ENABLED) 1625 if (np->msi_flags & NV_MSI_X_ENABLED)
1626 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 1626 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1627 else 1627 else
1628 enable_irq(dev->irq); 1628 enable_irq(np->pci_dev->irq);
1629 } else { 1629 } else {
1630 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1630 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1631 } 1631 }
@@ -2408,13 +2408,13 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
2408 struct fe_priv *np = netdev_priv(dev); 2408 struct fe_priv *np = netdev_priv(dev);
2409 u32 flags; 2409 u32 flags;
2410 u32 vlanflags = 0; 2410 u32 vlanflags = 0;
2411 u32 rx_processed_cnt = 0; 2411 int rx_work = 0;
2412 struct sk_buff *skb; 2412 struct sk_buff *skb;
2413 int len; 2413 int len;
2414 2414
2415 while((np->get_rx.ex != np->put_rx.ex) && 2415 while((np->get_rx.ex != np->put_rx.ex) &&
2416 !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) && 2416 !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) &&
2417 (rx_processed_cnt++ < limit)) { 2417 (rx_work < limit)) {
2418 2418
2419 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: flags 0x%x.\n", 2419 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: flags 0x%x.\n",
2420 dev->name, flags); 2420 dev->name, flags);
@@ -2517,9 +2517,11 @@ next_pkt:
2517 np->get_rx.ex = np->first_rx.ex; 2517 np->get_rx.ex = np->first_rx.ex;
2518 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) 2518 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
2519 np->get_rx_ctx = np->first_rx_ctx; 2519 np->get_rx_ctx = np->first_rx_ctx;
2520
2521 rx_work++;
2520 } 2522 }
2521 2523
2522 return rx_processed_cnt; 2524 return rx_work;
2523} 2525}
2524 2526
2525static void set_bufsize(struct net_device *dev) 2527static void set_bufsize(struct net_device *dev)
@@ -3558,10 +3560,12 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
3558 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { 3560 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
3559 if ((ret = pci_enable_msi(np->pci_dev)) == 0) { 3561 if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
3560 np->msi_flags |= NV_MSI_ENABLED; 3562 np->msi_flags |= NV_MSI_ENABLED;
3563 dev->irq = np->pci_dev->irq;
3561 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) { 3564 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) {
3562 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); 3565 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
3563 pci_disable_msi(np->pci_dev); 3566 pci_disable_msi(np->pci_dev);
3564 np->msi_flags &= ~NV_MSI_ENABLED; 3567 np->msi_flags &= ~NV_MSI_ENABLED;
3568 dev->irq = np->pci_dev->irq;
3565 goto out_err; 3569 goto out_err;
3566 } 3570 }
3567 3571
@@ -3624,7 +3628,7 @@ static void nv_do_nic_poll(unsigned long data)
3624 if (np->msi_flags & NV_MSI_X_ENABLED) 3628 if (np->msi_flags & NV_MSI_X_ENABLED)
3625 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 3629 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
3626 else 3630 else
3627 disable_irq_lockdep(dev->irq); 3631 disable_irq_lockdep(np->pci_dev->irq);
3628 mask = np->irqmask; 3632 mask = np->irqmask;
3629 } else { 3633 } else {
3630 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 3634 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
@@ -3642,6 +3646,8 @@ static void nv_do_nic_poll(unsigned long data)
3642 } 3646 }
3643 np->nic_poll_irq = 0; 3647 np->nic_poll_irq = 0;
3644 3648
3649 /* disable_irq() contains synchronize_irq, thus no irq handler can run now */
3650
3645 if (np->recover_error) { 3651 if (np->recover_error) {
3646 np->recover_error = 0; 3652 np->recover_error = 0;
3647 printk(KERN_INFO "forcedeth: MAC in recoverable error state\n"); 3653 printk(KERN_INFO "forcedeth: MAC in recoverable error state\n");
@@ -3678,7 +3684,6 @@ static void nv_do_nic_poll(unsigned long data)
3678 } 3684 }
3679 } 3685 }
3680 3686
3681 /* FIXME: Do we need synchronize_irq(dev->irq) here? */
3682 3687
3683 writel(mask, base + NvRegIrqMask); 3688 writel(mask, base + NvRegIrqMask);
3684 pci_push(base); 3689 pci_push(base);
@@ -3691,7 +3696,7 @@ static void nv_do_nic_poll(unsigned long data)
3691 if (np->msi_flags & NV_MSI_X_ENABLED) 3696 if (np->msi_flags & NV_MSI_X_ENABLED)
3692 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 3697 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
3693 else 3698 else
3694 enable_irq_lockdep(dev->irq); 3699 enable_irq_lockdep(np->pci_dev->irq);
3695 } else { 3700 } else {
3696 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 3701 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
3697 nv_nic_irq_rx(0, dev); 3702 nv_nic_irq_rx(0, dev);
@@ -4948,7 +4953,7 @@ static int nv_close(struct net_device *dev)
4948#ifdef CONFIG_FORCEDETH_NAPI 4953#ifdef CONFIG_FORCEDETH_NAPI
4949 napi_disable(&np->napi); 4954 napi_disable(&np->napi);
4950#endif 4955#endif
4951 synchronize_irq(dev->irq); 4956 synchronize_irq(np->pci_dev->irq);
4952 4957
4953 del_timer_sync(&np->oom_kick); 4958 del_timer_sync(&np->oom_kick);
4954 del_timer_sync(&np->nic_poll); 4959 del_timer_sync(&np->nic_poll);