aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorMichael Chan <mchan@broadcom.com>2006-05-26 20:48:07 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2006-06-18 00:26:26 -0400
commitdf3e6548186f0baa727cd6d3a492891854bd31f2 (patch)
tree525617b51451b730b0723d57258c6a8bf7f53d09 /drivers/net
parent30b6c28d2aca4669f2e609ad5d77ea2a6cf0dd3a (diff)
[TG3]: Add recovery logic when MMIOs are re-ordered
Add recovery logic when we suspect that the system is re-ordering MMIOs. Re-ordered MMIOs to the send mailbox can cause bogus tx completions and hit BUG_ON() in the tx completion path. tg3 already has logic to handle re-ordered MMIOs by flushing the MMIOs that must be strictly ordered (such as the send mailbox). Determining when to enable the flush is currently a manual process of adding known chipsets to a list. The new code replaces the BUG_ON() in the tx completion path with the call to tg3_tx_recover(). It will set the TG3_FLAG_MBOX_WRITE_REORDER flag and reset the chip later in the workqueue to recover and start flushing MMIOs to the mailbox. A message to report the problem will be printed. We will then decide whether or not to add the host bridge to the list of chipsets that do re-ordering. We may add some additional code later to print the host bridge's ID so that the user can report it more easily. The assumption that re-ordering can only happen on x86 systems is also removed. Signed-off-by: Michael Chan <mchan@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/tg3.c53
-rw-r--r--drivers/net/tg3.h5
2 files changed, 49 insertions, 9 deletions
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index cb0ebf83c843..9e61df607413 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -2967,6 +2967,29 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2967 return err; 2967 return err;
2968} 2968}
2969 2969
2970/* This is called whenever we suspect that the system chipset is re-
2971 * ordering the sequence of MMIO to the tx send mailbox. The symptom
2972 * is bogus tx completions. We try to recover by setting the
2973 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
2974 * in the workqueue.
2975 */
2976static void tg3_tx_recover(struct tg3 *tp)
2977{
2978 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
2979 tp->write32_tx_mbox == tg3_write_indirect_mbox);
2980
2981 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
2982 "mapped I/O cycles to the network device, attempting to "
2983 "recover. Please report the problem to the driver maintainer "
2984 "and include system chipset information.\n", tp->dev->name);
2985
2986 spin_lock(&tp->lock);
2987 spin_lock(&tp->tx_lock);
2988 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
2989 spin_unlock(&tp->tx_lock);
2990 spin_unlock(&tp->lock);
2991}
2992
2970/* Tigon3 never reports partial packet sends. So we do not 2993/* Tigon3 never reports partial packet sends. So we do not
2971 * need special logic to handle SKBs that have not had all 2994 * need special logic to handle SKBs that have not had all
2972 * of their frags sent yet, like SunGEM does. 2995 * of their frags sent yet, like SunGEM does.
@@ -2979,9 +3002,13 @@ static void tg3_tx(struct tg3 *tp)
2979 while (sw_idx != hw_idx) { 3002 while (sw_idx != hw_idx) {
2980 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx]; 3003 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2981 struct sk_buff *skb = ri->skb; 3004 struct sk_buff *skb = ri->skb;
2982 int i; 3005 int i, tx_bug = 0;
3006
3007 if (unlikely(skb == NULL)) {
3008 tg3_tx_recover(tp);
3009 return;
3010 }
2983 3011
2984 BUG_ON(skb == NULL);
2985 pci_unmap_single(tp->pdev, 3012 pci_unmap_single(tp->pdev,
2986 pci_unmap_addr(ri, mapping), 3013 pci_unmap_addr(ri, mapping),
2987 skb_headlen(skb), 3014 skb_headlen(skb),
@@ -2992,10 +3019,9 @@ static void tg3_tx(struct tg3 *tp)
2992 sw_idx = NEXT_TX(sw_idx); 3019 sw_idx = NEXT_TX(sw_idx);
2993 3020
2994 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3021 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2995 BUG_ON(sw_idx == hw_idx);
2996
2997 ri = &tp->tx_buffers[sw_idx]; 3022 ri = &tp->tx_buffers[sw_idx];
2998 BUG_ON(ri->skb != NULL); 3023 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3024 tx_bug = 1;
2999 3025
3000 pci_unmap_page(tp->pdev, 3026 pci_unmap_page(tp->pdev,
3001 pci_unmap_addr(ri, mapping), 3027 pci_unmap_addr(ri, mapping),
@@ -3006,6 +3032,11 @@ static void tg3_tx(struct tg3 *tp)
3006 } 3032 }
3007 3033
3008 dev_kfree_skb(skb); 3034 dev_kfree_skb(skb);
3035
3036 if (unlikely(tx_bug)) {
3037 tg3_tx_recover(tp);
3038 return;
3039 }
3009 } 3040 }
3010 3041
3011 tp->tx_cons = sw_idx; 3042 tp->tx_cons = sw_idx;
@@ -3333,6 +3364,11 @@ static int tg3_poll(struct net_device *netdev, int *budget)
3333 /* run TX completion thread */ 3364 /* run TX completion thread */
3334 if (sblk->idx[0].tx_consumer != tp->tx_cons) { 3365 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3335 tg3_tx(tp); 3366 tg3_tx(tp);
3367 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) {
3368 netif_rx_complete(netdev);
3369 schedule_work(&tp->reset_task);
3370 return 0;
3371 }
3336 } 3372 }
3337 3373
3338 /* run RX thread, within the bounds set by NAPI. 3374 /* run RX thread, within the bounds set by NAPI.
@@ -3581,6 +3617,13 @@ static void tg3_reset_task(void *_data)
3581 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER; 3617 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3582 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER; 3618 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3583 3619
3620 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3621 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3622 tp->write32_rx_mbox = tg3_write_flush_reg32;
3623 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3624 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3625 }
3626
3584 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); 3627 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3585 tg3_init_hw(tp, 1); 3628 tg3_init_hw(tp, 1);
3586 3629
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index ff0faab94bd5..35669e18065c 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2155,11 +2155,7 @@ struct tg3 {
2155#define TG3_FLAG_ENABLE_ASF 0x00000020 2155#define TG3_FLAG_ENABLE_ASF 0x00000020
2156#define TG3_FLAG_5701_REG_WRITE_BUG 0x00000040 2156#define TG3_FLAG_5701_REG_WRITE_BUG 0x00000040
2157#define TG3_FLAG_POLL_SERDES 0x00000080 2157#define TG3_FLAG_POLL_SERDES 0x00000080
2158#if defined(CONFIG_X86)
2159#define TG3_FLAG_MBOX_WRITE_REORDER 0x00000100 2158#define TG3_FLAG_MBOX_WRITE_REORDER 0x00000100
2160#else
2161#define TG3_FLAG_MBOX_WRITE_REORDER 0 /* disables code too */
2162#endif
2163#define TG3_FLAG_PCIX_TARGET_HWBUG 0x00000200 2159#define TG3_FLAG_PCIX_TARGET_HWBUG 0x00000200
2164#define TG3_FLAG_WOL_SPEED_100MB 0x00000400 2160#define TG3_FLAG_WOL_SPEED_100MB 0x00000400
2165#define TG3_FLAG_WOL_ENABLE 0x00000800 2161#define TG3_FLAG_WOL_ENABLE 0x00000800
@@ -2172,6 +2168,7 @@ struct tg3 {
2172#define TG3_FLAG_PCI_HIGH_SPEED 0x00040000 2168#define TG3_FLAG_PCI_HIGH_SPEED 0x00040000
2173#define TG3_FLAG_PCI_32BIT 0x00080000 2169#define TG3_FLAG_PCI_32BIT 0x00080000
2174#define TG3_FLAG_SRAM_USE_CONFIG 0x00100000 2170#define TG3_FLAG_SRAM_USE_CONFIG 0x00100000
2171#define TG3_FLAG_TX_RECOVERY_PENDING 0x00200000
2175#define TG3_FLAG_SERDES_WOL_CAP 0x00400000 2172#define TG3_FLAG_SERDES_WOL_CAP 0x00400000
2176#define TG3_FLAG_JUMBO_RING_ENABLE 0x00800000 2173#define TG3_FLAG_JUMBO_RING_ENABLE 0x00800000
2177#define TG3_FLAG_10_100_ONLY 0x01000000 2174#define TG3_FLAG_10_100_ONLY 0x01000000