aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael Chan <mchan@broadcom.com>2005-04-25 18:17:17 -0400
committerDavid S. Miller <davem@davemloft.net>2005-04-25 18:17:17 -0400
commit04237dddd14375fce1df4bfb1be92a35aa1c247f (patch)
treed2c8e35c19f7cac7a1d3e5a7bc55d76835d2a7e0
parent52f6d697dc0f2c039e8413e780b0f45ddf8161fc (diff)
[TG3]: Fix tg3_restart_ints()
tg3_restart_ints() is called to re-enable interrupts after tg3_poll() has finished all the work. It calls tg3_cond_int() to force an interrupt if the status block updated bit is set. The updated bit will be set if there is a new status block update sometime during tg3_poll() and it can be very often. The worst part is that even if all the work has been processed, the updated bit remains set and an interrupt will be forced unnecessarily. The fix is to call tg3_has_work() instead to determine if new work is posted before forcing an interrupt. The way to force an interrupt is also changed to use "coalesce_now" instead of "SETINT". The former is generally a safer way to force the interrupt. Also deleted the first parameter to tg3_has_work() which is unused. Signed-off-by: Michael Chan <mchan@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/tg3.c53
1 files changed, 28 insertions, 25 deletions
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 92b0e4975135..903d0ced7ddb 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -426,9 +426,30 @@ static void tg3_enable_ints(struct tg3 *tp)
426 tg3_cond_int(tp); 426 tg3_cond_int(tp);
427} 427}
428 428
429static inline unsigned int tg3_has_work(struct tg3 *tp)
430{
431 struct tg3_hw_status *sblk = tp->hw_status;
432 unsigned int work_exists = 0;
433
434 /* check for phy events */
435 if (!(tp->tg3_flags &
436 (TG3_FLAG_USE_LINKCHG_REG |
437 TG3_FLAG_POLL_SERDES))) {
438 if (sblk->status & SD_STATUS_LINK_CHG)
439 work_exists = 1;
440 }
441 /* check for RX/TX work to do */
442 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
443 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
444 work_exists = 1;
445
446 return work_exists;
447}
448
429/* tg3_restart_ints 449/* tg3_restart_ints
430 * similar to tg3_enable_ints, but it can return without flushing the 450 * similar to tg3_enable_ints, but it accurately determines whether there
431 * PIO write which reenables interrupts 451 * is new work pending and can return without flushing the PIO write
452 * which reenables interrupts
432 */ 453 */
433static void tg3_restart_ints(struct tg3 *tp) 454static void tg3_restart_ints(struct tg3 *tp)
434{ 455{
@@ -437,7 +458,9 @@ static void tg3_restart_ints(struct tg3 *tp)
437 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000); 458 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
438 mmiowb(); 459 mmiowb();
439 460
440 tg3_cond_int(tp); 461 if (tg3_has_work(tp))
462 tw32(HOSTCC_MODE, tp->coalesce_mode |
463 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
441} 464}
442 465
443static inline void tg3_netif_stop(struct tg3 *tp) 466static inline void tg3_netif_stop(struct tg3 *tp)
@@ -2891,26 +2914,6 @@ static int tg3_poll(struct net_device *netdev, int *budget)
2891 return (done ? 0 : 1); 2914 return (done ? 0 : 1);
2892} 2915}
2893 2916
2894static inline unsigned int tg3_has_work(struct net_device *dev, struct tg3 *tp)
2895{
2896 struct tg3_hw_status *sblk = tp->hw_status;
2897 unsigned int work_exists = 0;
2898
2899 /* check for phy events */
2900 if (!(tp->tg3_flags &
2901 (TG3_FLAG_USE_LINKCHG_REG |
2902 TG3_FLAG_POLL_SERDES))) {
2903 if (sblk->status & SD_STATUS_LINK_CHG)
2904 work_exists = 1;
2905 }
2906 /* check for RX/TX work to do */
2907 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
2908 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
2909 work_exists = 1;
2910
2911 return work_exists;
2912}
2913
2914/* MSI ISR - No need to check for interrupt sharing and no need to 2917/* MSI ISR - No need to check for interrupt sharing and no need to
2915 * flush status block and interrupt mailbox. PCI ordering rules 2918 * flush status block and interrupt mailbox. PCI ordering rules
2916 * guarantee that MSI will arrive after the status block. 2919 * guarantee that MSI will arrive after the status block.
@@ -2934,7 +2937,7 @@ static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
2934 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 2937 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
2935 sblk->status &= ~SD_STATUS_UPDATED; 2938 sblk->status &= ~SD_STATUS_UPDATED;
2936 2939
2937 if (likely(tg3_has_work(dev, tp))) 2940 if (likely(tg3_has_work(tp)))
2938 netif_rx_schedule(dev); /* schedule NAPI poll */ 2941 netif_rx_schedule(dev); /* schedule NAPI poll */
2939 else { 2942 else {
2940 /* no work, re-enable interrupts 2943 /* no work, re-enable interrupts
@@ -2981,7 +2984,7 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2981 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); 2984 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2982 sblk->status &= ~SD_STATUS_UPDATED; 2985 sblk->status &= ~SD_STATUS_UPDATED;
2983 2986
2984 if (likely(tg3_has_work(dev, tp))) 2987 if (likely(tg3_has_work(tp)))
2985 netif_rx_schedule(dev); /* schedule NAPI poll */ 2988 netif_rx_schedule(dev); /* schedule NAPI poll */
2986 else { 2989 else {
2987 /* no work, shared interrupt perhaps? re-enable 2990 /* no work, shared interrupt perhaps? re-enable