aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/tg3.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/tg3.c')
-rw-r--r--drivers/net/tg3.c1383
1 files changed, 1084 insertions, 299 deletions
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index f79b02e80e75..7e371b1209a1 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -7,7 +7,12 @@
7 * Copyright (C) 2005 Broadcom Corporation. 7 * Copyright (C) 2005 Broadcom Corporation.
8 * 8 *
9 * Firmware is: 9 * Firmware is:
10 * Copyright (C) 2000-2003 Broadcom Corporation. 10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
11 */ 16 */
12 17
13#include <linux/config.h> 18#include <linux/config.h>
@@ -61,8 +66,8 @@
61 66
62#define DRV_MODULE_NAME "tg3" 67#define DRV_MODULE_NAME "tg3"
63#define PFX DRV_MODULE_NAME ": " 68#define PFX DRV_MODULE_NAME ": "
64#define DRV_MODULE_VERSION "3.27" 69#define DRV_MODULE_VERSION "3.32"
65#define DRV_MODULE_RELDATE "May 5, 2005" 70#define DRV_MODULE_RELDATE "June 24, 2005"
66 71
67#define TG3_DEF_MAC_MODE 0 72#define TG3_DEF_MAC_MODE 0
68#define TG3_DEF_RX_MODE 0 73#define TG3_DEF_RX_MODE 0
@@ -133,6 +138,8 @@
133/* number of ETHTOOL_GSTATS u64's */ 138/* number of ETHTOOL_GSTATS u64's */
134#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64)) 139#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
135 140
141#define TG3_NUM_TEST 6
142
136static char version[] __devinitdata = 143static char version[] __devinitdata =
137 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 144 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
138 145
@@ -206,6 +213,8 @@ static struct pci_device_id tg3_pci_tbl[] = {
206 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 213 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
207 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752, 214 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
208 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 215 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
216 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
217 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
209 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753, 218 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
210 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 219 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
211 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M, 220 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
@@ -314,15 +323,24 @@ static struct {
314 { "nic_tx_threshold_hit" } 323 { "nic_tx_threshold_hit" }
315}; 324};
316 325
326static struct {
327 const char string[ETH_GSTRING_LEN];
328} ethtool_test_keys[TG3_NUM_TEST] = {
329 { "nvram test (online) " },
330 { "link test (online) " },
331 { "register test (offline)" },
332 { "memory test (offline)" },
333 { "loopback test (offline)" },
334 { "interrupt test (offline)" },
335};
336
317static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val) 337static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
318{ 338{
319 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) { 339 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
320 unsigned long flags; 340 spin_lock_bh(&tp->indirect_lock);
321
322 spin_lock_irqsave(&tp->indirect_lock, flags);
323 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); 341 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
324 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); 342 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
325 spin_unlock_irqrestore(&tp->indirect_lock, flags); 343 spin_unlock_bh(&tp->indirect_lock);
326 } else { 344 } else {
327 writel(val, tp->regs + off); 345 writel(val, tp->regs + off);
328 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0) 346 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0)
@@ -333,12 +351,10 @@ static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
333static void _tw32_flush(struct tg3 *tp, u32 off, u32 val) 351static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
334{ 352{
335 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) { 353 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
336 unsigned long flags; 354 spin_lock_bh(&tp->indirect_lock);
337
338 spin_lock_irqsave(&tp->indirect_lock, flags);
339 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); 355 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
340 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); 356 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
341 spin_unlock_irqrestore(&tp->indirect_lock, flags); 357 spin_unlock_bh(&tp->indirect_lock);
342 } else { 358 } else {
343 void __iomem *dest = tp->regs + off; 359 void __iomem *dest = tp->regs + off;
344 writel(val, dest); 360 writel(val, dest);
@@ -378,28 +394,24 @@ static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
378 394
379static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val) 395static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
380{ 396{
381 unsigned long flags; 397 spin_lock_bh(&tp->indirect_lock);
382
383 spin_lock_irqsave(&tp->indirect_lock, flags);
384 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); 398 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
385 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 399 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
386 400
387 /* Always leave this as zero. */ 401 /* Always leave this as zero. */
388 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 402 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
389 spin_unlock_irqrestore(&tp->indirect_lock, flags); 403 spin_unlock_bh(&tp->indirect_lock);
390} 404}
391 405
392static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val) 406static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
393{ 407{
394 unsigned long flags; 408 spin_lock_bh(&tp->indirect_lock);
395
396 spin_lock_irqsave(&tp->indirect_lock, flags);
397 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); 409 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
398 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 410 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
399 411
400 /* Always leave this as zero. */ 412 /* Always leave this as zero. */
401 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 413 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
402 spin_unlock_irqrestore(&tp->indirect_lock, flags); 414 spin_unlock_bh(&tp->indirect_lock);
403} 415}
404 416
405static void tg3_disable_ints(struct tg3 *tp) 417static void tg3_disable_ints(struct tg3 *tp)
@@ -418,11 +430,14 @@ static inline void tg3_cond_int(struct tg3 *tp)
418 430
419static void tg3_enable_ints(struct tg3 *tp) 431static void tg3_enable_ints(struct tg3 *tp)
420{ 432{
433 tp->irq_sync = 0;
434 wmb();
435
421 tw32(TG3PCI_MISC_HOST_CTRL, 436 tw32(TG3PCI_MISC_HOST_CTRL,
422 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); 437 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
423 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000); 438 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
439 (tp->last_tag << 24));
424 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); 440 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
425
426 tg3_cond_int(tp); 441 tg3_cond_int(tp);
427} 442}
428 443
@@ -455,16 +470,23 @@ static void tg3_restart_ints(struct tg3 *tp)
455{ 470{
456 tw32(TG3PCI_MISC_HOST_CTRL, 471 tw32(TG3PCI_MISC_HOST_CTRL,
457 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); 472 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
458 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000); 473 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
474 tp->last_tag << 24);
459 mmiowb(); 475 mmiowb();
460 476
461 if (tg3_has_work(tp)) 477 /* When doing tagged status, this work check is unnecessary.
478 * The last_tag we write above tells the chip which piece of
479 * work we've completed.
480 */
481 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
482 tg3_has_work(tp))
462 tw32(HOSTCC_MODE, tp->coalesce_mode | 483 tw32(HOSTCC_MODE, tp->coalesce_mode |
463 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW)); 484 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
464} 485}
465 486
466static inline void tg3_netif_stop(struct tg3 *tp) 487static inline void tg3_netif_stop(struct tg3 *tp)
467{ 488{
489 tp->dev->trans_start = jiffies; /* prevent tx timeout */
468 netif_poll_disable(tp->dev); 490 netif_poll_disable(tp->dev);
469 netif_tx_disable(tp->dev); 491 netif_tx_disable(tp->dev);
470} 492}
@@ -477,7 +499,8 @@ static inline void tg3_netif_start(struct tg3 *tp)
477 * (such as after tg3_init_hw) 499 * (such as after tg3_init_hw)
478 */ 500 */
479 netif_poll_enable(tp->dev); 501 netif_poll_enable(tp->dev);
480 tg3_cond_int(tp); 502 tp->hw_status->status |= SD_STATUS_UPDATED;
503 tg3_enable_ints(tp);
481} 504}
482 505
483static void tg3_switch_clocks(struct tg3 *tp) 506static void tg3_switch_clocks(struct tg3 *tp)
@@ -2500,7 +2523,7 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2500 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 2523 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2501 if (netif_carrier_ok(tp->dev)) { 2524 if (netif_carrier_ok(tp->dev)) {
2502 tw32(HOSTCC_STAT_COAL_TICKS, 2525 tw32(HOSTCC_STAT_COAL_TICKS,
2503 DEFAULT_STAT_COAL_TICKS); 2526 tp->coal.stats_block_coalesce_usecs);
2504 } else { 2527 } else {
2505 tw32(HOSTCC_STAT_COAL_TICKS, 0); 2528 tw32(HOSTCC_STAT_COAL_TICKS, 0);
2506 } 2529 }
@@ -2551,7 +2574,7 @@ static void tg3_tx(struct tg3 *tp)
2551 sw_idx = NEXT_TX(sw_idx); 2574 sw_idx = NEXT_TX(sw_idx);
2552 } 2575 }
2553 2576
2554 dev_kfree_skb_irq(skb); 2577 dev_kfree_skb(skb);
2555 } 2578 }
2556 2579
2557 tp->tx_cons = sw_idx; 2580 tp->tx_cons = sw_idx;
@@ -2857,11 +2880,8 @@ static int tg3_poll(struct net_device *netdev, int *budget)
2857{ 2880{
2858 struct tg3 *tp = netdev_priv(netdev); 2881 struct tg3 *tp = netdev_priv(netdev);
2859 struct tg3_hw_status *sblk = tp->hw_status; 2882 struct tg3_hw_status *sblk = tp->hw_status;
2860 unsigned long flags;
2861 int done; 2883 int done;
2862 2884
2863 spin_lock_irqsave(&tp->lock, flags);
2864
2865 /* handle link change and other phy events */ 2885 /* handle link change and other phy events */
2866 if (!(tp->tg3_flags & 2886 if (!(tp->tg3_flags &
2867 (TG3_FLAG_USE_LINKCHG_REG | 2887 (TG3_FLAG_USE_LINKCHG_REG |
@@ -2869,7 +2889,9 @@ static int tg3_poll(struct net_device *netdev, int *budget)
2869 if (sblk->status & SD_STATUS_LINK_CHG) { 2889 if (sblk->status & SD_STATUS_LINK_CHG) {
2870 sblk->status = SD_STATUS_UPDATED | 2890 sblk->status = SD_STATUS_UPDATED |
2871 (sblk->status & ~SD_STATUS_LINK_CHG); 2891 (sblk->status & ~SD_STATUS_LINK_CHG);
2892 spin_lock(&tp->lock);
2872 tg3_setup_phy(tp, 0); 2893 tg3_setup_phy(tp, 0);
2894 spin_unlock(&tp->lock);
2873 } 2895 }
2874 } 2896 }
2875 2897
@@ -2880,13 +2902,10 @@ static int tg3_poll(struct net_device *netdev, int *budget)
2880 spin_unlock(&tp->tx_lock); 2902 spin_unlock(&tp->tx_lock);
2881 } 2903 }
2882 2904
2883 spin_unlock_irqrestore(&tp->lock, flags);
2884
2885 /* run RX thread, within the bounds set by NAPI. 2905 /* run RX thread, within the bounds set by NAPI.
2886 * All RX "locking" is done by ensuring outside 2906 * All RX "locking" is done by ensuring outside
2887 * code synchronizes with dev->poll() 2907 * code synchronizes with dev->poll()
2888 */ 2908 */
2889 done = 1;
2890 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) { 2909 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
2891 int orig_budget = *budget; 2910 int orig_budget = *budget;
2892 int work_done; 2911 int work_done;
@@ -2898,22 +2917,59 @@ static int tg3_poll(struct net_device *netdev, int *budget)
2898 2917
2899 *budget -= work_done; 2918 *budget -= work_done;
2900 netdev->quota -= work_done; 2919 netdev->quota -= work_done;
2901
2902 if (work_done >= orig_budget)
2903 done = 0;
2904 } 2920 }
2905 2921
2922 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
2923 tp->last_tag = sblk->status_tag;
2924 rmb();
2925 sblk->status &= ~SD_STATUS_UPDATED;
2926
2906 /* if no more work, tell net stack and NIC we're done */ 2927 /* if no more work, tell net stack and NIC we're done */
2928 done = !tg3_has_work(tp);
2907 if (done) { 2929 if (done) {
2908 spin_lock_irqsave(&tp->lock, flags); 2930 spin_lock(&tp->lock);
2909 __netif_rx_complete(netdev); 2931 netif_rx_complete(netdev);
2910 tg3_restart_ints(tp); 2932 tg3_restart_ints(tp);
2911 spin_unlock_irqrestore(&tp->lock, flags); 2933 spin_unlock(&tp->lock);
2912 } 2934 }
2913 2935
2914 return (done ? 0 : 1); 2936 return (done ? 0 : 1);
2915} 2937}
2916 2938
2939static void tg3_irq_quiesce(struct tg3 *tp)
2940{
2941 BUG_ON(tp->irq_sync);
2942
2943 tp->irq_sync = 1;
2944 smp_mb();
2945
2946 synchronize_irq(tp->pdev->irq);
2947}
2948
2949static inline int tg3_irq_sync(struct tg3 *tp)
2950{
2951 return tp->irq_sync;
2952}
2953
2954/* Fully shutdown all tg3 driver activity elsewhere in the system.
2955 * If irq_sync is non-zero, then the IRQ handler must be synchronized
2956 * with as well. Most of the time, this is not necessary except when
2957 * shutting down the device.
2958 */
2959static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
2960{
2961 if (irq_sync)
2962 tg3_irq_quiesce(tp);
2963 spin_lock_bh(&tp->lock);
2964 spin_lock(&tp->tx_lock);
2965}
2966
2967static inline void tg3_full_unlock(struct tg3 *tp)
2968{
2969 spin_unlock(&tp->tx_lock);
2970 spin_unlock_bh(&tp->lock);
2971}
2972
2917/* MSI ISR - No need to check for interrupt sharing and no need to 2973/* MSI ISR - No need to check for interrupt sharing and no need to
2918 * flush status block and interrupt mailbox. PCI ordering rules 2974 * flush status block and interrupt mailbox. PCI ordering rules
2919 * guarantee that MSI will arrive after the status block. 2975 * guarantee that MSI will arrive after the status block.
@@ -2923,31 +2979,28 @@ static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
2923 struct net_device *dev = dev_id; 2979 struct net_device *dev = dev_id;
2924 struct tg3 *tp = netdev_priv(dev); 2980 struct tg3 *tp = netdev_priv(dev);
2925 struct tg3_hw_status *sblk = tp->hw_status; 2981 struct tg3_hw_status *sblk = tp->hw_status;
2926 unsigned long flags;
2927
2928 spin_lock_irqsave(&tp->lock, flags);
2929 2982
2930 /* 2983 /*
2931 * writing any value to intr-mbox-0 clears PCI INTA# and 2984 * Writing any value to intr-mbox-0 clears PCI INTA# and
2932 * chip-internal interrupt pending events. 2985 * chip-internal interrupt pending events.
2933 * writing non-zero to intr-mbox-0 additional tells the 2986 * Writing non-zero to intr-mbox-0 additional tells the
2934 * NIC to stop sending us irqs, engaging "in-intr-handler" 2987 * NIC to stop sending us irqs, engaging "in-intr-handler"
2935 * event coalescing. 2988 * event coalescing.
2936 */ 2989 */
2937 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 2990 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
2991 tp->last_tag = sblk->status_tag;
2992 rmb();
2993 if (tg3_irq_sync(tp))
2994 goto out;
2938 sblk->status &= ~SD_STATUS_UPDATED; 2995 sblk->status &= ~SD_STATUS_UPDATED;
2939
2940 if (likely(tg3_has_work(tp))) 2996 if (likely(tg3_has_work(tp)))
2941 netif_rx_schedule(dev); /* schedule NAPI poll */ 2997 netif_rx_schedule(dev); /* schedule NAPI poll */
2942 else { 2998 else {
2943 /* no work, re-enable interrupts 2999 /* No work, re-enable interrupts. */
2944 */
2945 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 3000 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2946 0x00000000); 3001 tp->last_tag << 24);
2947 } 3002 }
2948 3003out:
2949 spin_unlock_irqrestore(&tp->lock, flags);
2950
2951 return IRQ_RETVAL(1); 3004 return IRQ_RETVAL(1);
2952} 3005}
2953 3006
@@ -2956,10 +3009,50 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2956 struct net_device *dev = dev_id; 3009 struct net_device *dev = dev_id;
2957 struct tg3 *tp = netdev_priv(dev); 3010 struct tg3 *tp = netdev_priv(dev);
2958 struct tg3_hw_status *sblk = tp->hw_status; 3011 struct tg3_hw_status *sblk = tp->hw_status;
2959 unsigned long flags;
2960 unsigned int handled = 1; 3012 unsigned int handled = 1;
2961 3013
2962 spin_lock_irqsave(&tp->lock, flags); 3014 /* In INTx mode, it is possible for the interrupt to arrive at
3015 * the CPU before the status block posted prior to the interrupt.
3016 * Reading the PCI State register will confirm whether the
3017 * interrupt is ours and will flush the status block.
3018 */
3019 if ((sblk->status & SD_STATUS_UPDATED) ||
3020 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3021 /*
3022 * Writing any value to intr-mbox-0 clears PCI INTA# and
3023 * chip-internal interrupt pending events.
3024 * Writing non-zero to intr-mbox-0 additional tells the
3025 * NIC to stop sending us irqs, engaging "in-intr-handler"
3026 * event coalescing.
3027 */
3028 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3029 0x00000001);
3030 if (tg3_irq_sync(tp))
3031 goto out;
3032 sblk->status &= ~SD_STATUS_UPDATED;
3033 if (likely(tg3_has_work(tp)))
3034 netif_rx_schedule(dev); /* schedule NAPI poll */
3035 else {
3036 /* No work, shared interrupt perhaps? re-enable
3037 * interrupts, and flush that PCI write
3038 */
3039 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3040 0x00000000);
3041 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
3042 }
3043 } else { /* shared interrupt */
3044 handled = 0;
3045 }
3046out:
3047 return IRQ_RETVAL(handled);
3048}
3049
3050static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3051{
3052 struct net_device *dev = dev_id;
3053 struct tg3 *tp = netdev_priv(dev);
3054 struct tg3_hw_status *sblk = tp->hw_status;
3055 unsigned int handled = 1;
2963 3056
2964 /* In INTx mode, it is possible for the interrupt to arrive at 3057 /* In INTx mode, it is possible for the interrupt to arrive at
2965 * the CPU before the status block posted prior to the interrupt. 3058 * the CPU before the status block posted prior to the interrupt.
@@ -2977,13 +3070,11 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2977 */ 3070 */
2978 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 3071 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2979 0x00000001); 3072 0x00000001);
2980 /* 3073 tp->last_tag = sblk->status_tag;
2981 * Flush PCI write. This also guarantees that our 3074 rmb();
2982 * status block has been flushed to host memory. 3075 if (tg3_irq_sync(tp))
2983 */ 3076 goto out;
2984 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2985 sblk->status &= ~SD_STATUS_UPDATED; 3077 sblk->status &= ~SD_STATUS_UPDATED;
2986
2987 if (likely(tg3_has_work(tp))) 3078 if (likely(tg3_has_work(tp)))
2988 netif_rx_schedule(dev); /* schedule NAPI poll */ 3079 netif_rx_schedule(dev); /* schedule NAPI poll */
2989 else { 3080 else {
@@ -2991,15 +3082,13 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2991 * interrupts, and flush that PCI write 3082 * interrupts, and flush that PCI write
2992 */ 3083 */
2993 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 3084 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2994 0x00000000); 3085 tp->last_tag << 24);
2995 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); 3086 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2996 } 3087 }
2997 } else { /* shared interrupt */ 3088 } else { /* shared interrupt */
2998 handled = 0; 3089 handled = 0;
2999 } 3090 }
3000 3091out:
3001 spin_unlock_irqrestore(&tp->lock, flags);
3002
3003 return IRQ_RETVAL(handled); 3092 return IRQ_RETVAL(handled);
3004} 3093}
3005 3094
@@ -3020,7 +3109,7 @@ static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3020} 3109}
3021 3110
3022static int tg3_init_hw(struct tg3 *); 3111static int tg3_init_hw(struct tg3 *);
3023static int tg3_halt(struct tg3 *, int); 3112static int tg3_halt(struct tg3 *, int, int);
3024 3113
3025#ifdef CONFIG_NET_POLL_CONTROLLER 3114#ifdef CONFIG_NET_POLL_CONTROLLER
3026static void tg3_poll_controller(struct net_device *dev) 3115static void tg3_poll_controller(struct net_device *dev)
@@ -3038,19 +3127,17 @@ static void tg3_reset_task(void *_data)
3038 3127
3039 tg3_netif_stop(tp); 3128 tg3_netif_stop(tp);
3040 3129
3041 spin_lock_irq(&tp->lock); 3130 tg3_full_lock(tp, 1);
3042 spin_lock(&tp->tx_lock);
3043 3131
3044 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER; 3132 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3045 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER; 3133 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3046 3134
3047 tg3_halt(tp, 0); 3135 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3048 tg3_init_hw(tp); 3136 tg3_init_hw(tp);
3049 3137
3050 tg3_netif_start(tp); 3138 tg3_netif_start(tp);
3051 3139
3052 spin_unlock(&tp->tx_lock); 3140 tg3_full_unlock(tp);
3053 spin_unlock_irq(&tp->lock);
3054 3141
3055 if (restart_timer) 3142 if (restart_timer)
3056 mod_timer(&tp->timer, jiffies + 1); 3143 mod_timer(&tp->timer, jiffies + 1);
@@ -3156,39 +3243,21 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3156 unsigned int i; 3243 unsigned int i;
3157 u32 len, entry, base_flags, mss; 3244 u32 len, entry, base_flags, mss;
3158 int would_hit_hwbug; 3245 int would_hit_hwbug;
3159 unsigned long flags;
3160 3246
3161 len = skb_headlen(skb); 3247 len = skb_headlen(skb);
3162 3248
3163 /* No BH disabling for tx_lock here. We are running in BH disabled 3249 /* No BH disabling for tx_lock here. We are running in BH disabled
3164 * context and TX reclaim runs via tp->poll inside of a software 3250 * context and TX reclaim runs via tp->poll inside of a software
3165 * interrupt. Rejoice! 3251 * interrupt. Furthermore, IRQ processing runs lockless so we have
3166 * 3252 * no IRQ context deadlocks to worry about either. Rejoice!
3167 * Actually, things are not so simple. If we are to take a hw
3168 * IRQ here, we can deadlock, consider:
3169 *
3170 * CPU1 CPU2
3171 * tg3_start_xmit
3172 * take tp->tx_lock
3173 * tg3_timer
3174 * take tp->lock
3175 * tg3_interrupt
3176 * spin on tp->lock
3177 * spin on tp->tx_lock
3178 *
3179 * So we really do need to disable interrupts when taking
3180 * tx_lock here.
3181 */ 3253 */
3182 local_irq_save(flags); 3254 if (!spin_trylock(&tp->tx_lock))
3183 if (!spin_trylock(&tp->tx_lock)) {
3184 local_irq_restore(flags);
3185 return NETDEV_TX_LOCKED; 3255 return NETDEV_TX_LOCKED;
3186 }
3187 3256
3188 /* This is a hard error, log it. */ 3257 /* This is a hard error, log it. */
3189 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { 3258 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3190 netif_stop_queue(dev); 3259 netif_stop_queue(dev);
3191 spin_unlock_irqrestore(&tp->tx_lock, flags); 3260 spin_unlock(&tp->tx_lock);
3192 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n", 3261 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
3193 dev->name); 3262 dev->name);
3194 return NETDEV_TX_BUSY; 3263 return NETDEV_TX_BUSY;
@@ -3353,7 +3422,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3353 3422
3354out_unlock: 3423out_unlock:
3355 mmiowb(); 3424 mmiowb();
3356 spin_unlock_irqrestore(&tp->tx_lock, flags); 3425 spin_unlock(&tp->tx_lock);
3357 3426
3358 dev->trans_start = jiffies; 3427 dev->trans_start = jiffies;
3359 3428
@@ -3387,10 +3456,10 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3387 } 3456 }
3388 3457
3389 tg3_netif_stop(tp); 3458 tg3_netif_stop(tp);
3390 spin_lock_irq(&tp->lock);
3391 spin_lock(&tp->tx_lock);
3392 3459
3393 tg3_halt(tp, 1); 3460 tg3_full_lock(tp, 1);
3461
3462 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3394 3463
3395 tg3_set_mtu(dev, tp, new_mtu); 3464 tg3_set_mtu(dev, tp, new_mtu);
3396 3465
@@ -3398,8 +3467,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3398 3467
3399 tg3_netif_start(tp); 3468 tg3_netif_start(tp);
3400 3469
3401 spin_unlock(&tp->tx_lock); 3470 tg3_full_unlock(tp);
3402 spin_unlock_irq(&tp->lock);
3403 3471
3404 return 0; 3472 return 0;
3405} 3473}
@@ -4081,19 +4149,19 @@ static void tg3_stop_fw(struct tg3 *tp)
4081} 4149}
4082 4150
4083/* tp->lock is held. */ 4151/* tp->lock is held. */
4084static int tg3_halt(struct tg3 *tp, int silent) 4152static int tg3_halt(struct tg3 *tp, int kind, int silent)
4085{ 4153{
4086 int err; 4154 int err;
4087 4155
4088 tg3_stop_fw(tp); 4156 tg3_stop_fw(tp);
4089 4157
4090 tg3_write_sig_pre_reset(tp, RESET_KIND_SHUTDOWN); 4158 tg3_write_sig_pre_reset(tp, kind);
4091 4159
4092 tg3_abort_hw(tp, silent); 4160 tg3_abort_hw(tp, silent);
4093 err = tg3_chip_reset(tp); 4161 err = tg3_chip_reset(tp);
4094 4162
4095 tg3_write_sig_legacy(tp, RESET_KIND_SHUTDOWN); 4163 tg3_write_sig_legacy(tp, kind);
4096 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN); 4164 tg3_write_sig_post_reset(tp, kind);
4097 4165
4098 if (err) 4166 if (err)
4099 return err; 4167 return err;
@@ -4307,7 +4375,12 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_b
4307 */ 4375 */
4308 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG; 4376 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
4309 4377
4378 /* It is possible that bootcode is still loading at this point.
4379 * Get the nvram lock first before halting the cpu.
4380 */
4381 tg3_nvram_lock(tp);
4310 err = tg3_halt_cpu(tp, cpu_base); 4382 err = tg3_halt_cpu(tp, cpu_base);
4383 tg3_nvram_unlock(tp);
4311 if (err) 4384 if (err)
4312 goto out; 4385 goto out;
4313 4386
@@ -5015,9 +5088,9 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
5015 5088
5016 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 5089 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5017 5090
5018 spin_lock_irq(&tp->lock); 5091 spin_lock_bh(&tp->lock);
5019 __tg3_set_mac_addr(tp); 5092 __tg3_set_mac_addr(tp);
5020 spin_unlock_irq(&tp->lock); 5093 spin_unlock_bh(&tp->lock);
5021 5094
5022 return 0; 5095 return 0;
5023} 5096}
@@ -5044,6 +5117,27 @@ static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5044} 5117}
5045 5118
5046static void __tg3_set_rx_mode(struct net_device *); 5119static void __tg3_set_rx_mode(struct net_device *);
5120static void tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5121{
5122 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5123 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5124 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5125 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5126 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5127 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5128 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5129 }
5130 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5131 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5132 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5133 u32 val = ec->stats_block_coalesce_usecs;
5134
5135 if (!netif_carrier_ok(tp->dev))
5136 val = 0;
5137
5138 tw32(HOSTCC_STAT_COAL_TICKS, val);
5139 }
5140}
5047 5141
5048/* tp->lock is held. */ 5142/* tp->lock is held. */
5049static int tg3_reset_hw(struct tg3 *tp) 5143static int tg3_reset_hw(struct tg3 *tp)
@@ -5366,16 +5460,7 @@ static int tg3_reset_hw(struct tg3 *tp)
5366 udelay(10); 5460 udelay(10);
5367 } 5461 }
5368 5462
5369 tw32(HOSTCC_RXCOL_TICKS, 0); 5463 tg3_set_coalesce(tp, &tp->coal);
5370 tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS);
5371 tw32(HOSTCC_RXMAX_FRAMES, 1);
5372 tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES);
5373 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5374 tw32(HOSTCC_RXCOAL_TICK_INT, 0);
5375 tw32(HOSTCC_TXCOAL_TICK_INT, 0);
5376 }
5377 tw32(HOSTCC_RXCOAL_MAXF_INT, 1);
5378 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
5379 5464
5380 /* set status block DMA address */ 5465 /* set status block DMA address */
5381 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 5466 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
@@ -5388,8 +5473,6 @@ static int tg3_reset_hw(struct tg3 *tp)
5388 * the tg3_periodic_fetch_stats call there, and 5473 * the tg3_periodic_fetch_stats call there, and
5389 * tg3_get_stats to see how this works for 5705/5750 chips. 5474 * tg3_get_stats to see how this works for 5705/5750 chips.
5390 */ 5475 */
5391 tw32(HOSTCC_STAT_COAL_TICKS,
5392 DEFAULT_STAT_COAL_TICKS);
5393 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 5476 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5394 ((u64) tp->stats_mapping >> 32)); 5477 ((u64) tp->stats_mapping >> 32));
5395 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, 5478 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
@@ -5445,7 +5528,8 @@ static int tg3_reset_hw(struct tg3 *tp)
5445 udelay(100); 5528 udelay(100);
5446 5529
5447 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0); 5530 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5448 tr32(MAILBOX_INTERRUPT_0); 5531 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
5532 tp->last_tag = 0;
5449 5533
5450 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 5534 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5451 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE); 5535 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
@@ -5643,9 +5727,6 @@ static int tg3_reset_hw(struct tg3 *tp)
5643 5727
5644 tg3_write_sig_post_reset(tp, RESET_KIND_INIT); 5728 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
5645 5729
5646 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
5647 tg3_enable_ints(tp);
5648
5649 return 0; 5730 return 0;
5650} 5731}
5651 5732
@@ -5718,36 +5799,35 @@ static void tg3_periodic_fetch_stats(struct tg3 *tp)
5718static void tg3_timer(unsigned long __opaque) 5799static void tg3_timer(unsigned long __opaque)
5719{ 5800{
5720 struct tg3 *tp = (struct tg3 *) __opaque; 5801 struct tg3 *tp = (struct tg3 *) __opaque;
5721 unsigned long flags;
5722 5802
5723 spin_lock_irqsave(&tp->lock, flags); 5803 spin_lock(&tp->lock);
5724 spin_lock(&tp->tx_lock);
5725 5804
5726 /* All of this garbage is because when using non-tagged 5805 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
5727 * IRQ status the mailbox/status_block protocol the chip 5806 /* All of this garbage is because when using non-tagged
5728 * uses with the cpu is race prone. 5807 * IRQ status the mailbox/status_block protocol the chip
5729 */ 5808 * uses with the cpu is race prone.
5730 if (tp->hw_status->status & SD_STATUS_UPDATED) { 5809 */
5731 tw32(GRC_LOCAL_CTRL, 5810 if (tp->hw_status->status & SD_STATUS_UPDATED) {
5732 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); 5811 tw32(GRC_LOCAL_CTRL,
5733 } else { 5812 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
5734 tw32(HOSTCC_MODE, tp->coalesce_mode | 5813 } else {
5735 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW)); 5814 tw32(HOSTCC_MODE, tp->coalesce_mode |
5736 } 5815 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
5816 }
5737 5817
5738 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 5818 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
5739 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER; 5819 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
5740 spin_unlock(&tp->tx_lock); 5820 spin_unlock(&tp->lock);
5741 spin_unlock_irqrestore(&tp->lock, flags); 5821 schedule_work(&tp->reset_task);
5742 schedule_work(&tp->reset_task); 5822 return;
5743 return; 5823 }
5744 } 5824 }
5745 5825
5746 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5747 tg3_periodic_fetch_stats(tp);
5748
5749 /* This part only runs once per second. */ 5826 /* This part only runs once per second. */
5750 if (!--tp->timer_counter) { 5827 if (!--tp->timer_counter) {
5828 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5829 tg3_periodic_fetch_stats(tp);
5830
5751 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) { 5831 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
5752 u32 mac_stat; 5832 u32 mac_stat;
5753 int phy_event; 5833 int phy_event;
@@ -5805,8 +5885,7 @@ static void tg3_timer(unsigned long __opaque)
5805 tp->asf_counter = tp->asf_multiplier; 5885 tp->asf_counter = tp->asf_multiplier;
5806 } 5886 }
5807 5887
5808 spin_unlock(&tp->tx_lock); 5888 spin_unlock(&tp->lock);
5809 spin_unlock_irqrestore(&tp->lock, flags);
5810 5889
5811 tp->timer.expires = jiffies + tp->timer_offset; 5890 tp->timer.expires = jiffies + tp->timer_offset;
5812 add_timer(&tp->timer); 5891 add_timer(&tp->timer);
@@ -5818,6 +5897,9 @@ static int tg3_test_interrupt(struct tg3 *tp)
5818 int err, i; 5897 int err, i;
5819 u32 int_mbox = 0; 5898 u32 int_mbox = 0;
5820 5899
5900 if (!netif_running(dev))
5901 return -ENODEV;
5902
5821 tg3_disable_ints(tp); 5903 tg3_disable_ints(tp);
5822 5904
5823 free_irq(tp->pdev->irq, dev); 5905 free_irq(tp->pdev->irq, dev);
@@ -5846,9 +5928,13 @@ static int tg3_test_interrupt(struct tg3 *tp)
5846 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) 5928 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
5847 err = request_irq(tp->pdev->irq, tg3_msi, 5929 err = request_irq(tp->pdev->irq, tg3_msi,
5848 SA_SAMPLE_RANDOM, dev->name, dev); 5930 SA_SAMPLE_RANDOM, dev->name, dev);
5849 else 5931 else {
5850 err = request_irq(tp->pdev->irq, tg3_interrupt, 5932 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
5933 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
5934 fn = tg3_interrupt_tagged;
5935 err = request_irq(tp->pdev->irq, fn,
5851 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev); 5936 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
5937 }
5852 5938
5853 if (err) 5939 if (err)
5854 return err; 5940 return err;
@@ -5900,23 +5986,26 @@ static int tg3_test_msi(struct tg3 *tp)
5900 5986
5901 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; 5987 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
5902 5988
5903 err = request_irq(tp->pdev->irq, tg3_interrupt, 5989 {
5904 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev); 5990 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
5991 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
5992 fn = tg3_interrupt_tagged;
5905 5993
5994 err = request_irq(tp->pdev->irq, fn,
5995 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
5996 }
5906 if (err) 5997 if (err)
5907 return err; 5998 return err;
5908 5999
5909 /* Need to reset the chip because the MSI cycle may have terminated 6000 /* Need to reset the chip because the MSI cycle may have terminated
5910 * with Master Abort. 6001 * with Master Abort.
5911 */ 6002 */
5912 spin_lock_irq(&tp->lock); 6003 tg3_full_lock(tp, 1);
5913 spin_lock(&tp->tx_lock);
5914 6004
5915 tg3_halt(tp, 1); 6005 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5916 err = tg3_init_hw(tp); 6006 err = tg3_init_hw(tp);
5917 6007
5918 spin_unlock(&tp->tx_lock); 6008 tg3_full_unlock(tp);
5919 spin_unlock_irq(&tp->lock);
5920 6009
5921 if (err) 6010 if (err)
5922 free_irq(tp->pdev->irq, dev); 6011 free_irq(tp->pdev->irq, dev);
@@ -5929,14 +6018,12 @@ static int tg3_open(struct net_device *dev)
5929 struct tg3 *tp = netdev_priv(dev); 6018 struct tg3 *tp = netdev_priv(dev);
5930 int err; 6019 int err;
5931 6020
5932 spin_lock_irq(&tp->lock); 6021 tg3_full_lock(tp, 0);
5933 spin_lock(&tp->tx_lock);
5934 6022
5935 tg3_disable_ints(tp); 6023 tg3_disable_ints(tp);
5936 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE; 6024 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
5937 6025
5938 spin_unlock(&tp->tx_lock); 6026 tg3_full_unlock(tp);
5939 spin_unlock_irq(&tp->lock);
5940 6027
5941 /* The placement of this call is tied 6028 /* The placement of this call is tied
5942 * to the setup and use of Host TX descriptors. 6029 * to the setup and use of Host TX descriptors.
@@ -5948,7 +6035,13 @@ static int tg3_open(struct net_device *dev)
5948 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && 6035 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5949 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) && 6036 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
5950 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) { 6037 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) {
5951 if (pci_enable_msi(tp->pdev) == 0) { 6038 /* All MSI supporting chips should support tagged
6039 * status. Assert that this is the case.
6040 */
6041 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6042 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6043 "Not using MSI.\n", tp->dev->name);
6044 } else if (pci_enable_msi(tp->pdev) == 0) {
5952 u32 msi_mode; 6045 u32 msi_mode;
5953 6046
5954 msi_mode = tr32(MSGINT_MODE); 6047 msi_mode = tr32(MSGINT_MODE);
@@ -5959,9 +6052,14 @@ static int tg3_open(struct net_device *dev)
5959 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) 6052 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
5960 err = request_irq(tp->pdev->irq, tg3_msi, 6053 err = request_irq(tp->pdev->irq, tg3_msi,
5961 SA_SAMPLE_RANDOM, dev->name, dev); 6054 SA_SAMPLE_RANDOM, dev->name, dev);
5962 else 6055 else {
5963 err = request_irq(tp->pdev->irq, tg3_interrupt, 6056 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6057 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6058 fn = tg3_interrupt_tagged;
6059
6060 err = request_irq(tp->pdev->irq, fn,
5964 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev); 6061 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6062 }
5965 6063
5966 if (err) { 6064 if (err) {
5967 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { 6065 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
@@ -5972,17 +6070,23 @@ static int tg3_open(struct net_device *dev)
5972 return err; 6070 return err;
5973 } 6071 }
5974 6072
5975 spin_lock_irq(&tp->lock); 6073 tg3_full_lock(tp, 0);
5976 spin_lock(&tp->tx_lock);
5977 6074
5978 err = tg3_init_hw(tp); 6075 err = tg3_init_hw(tp);
5979 if (err) { 6076 if (err) {
5980 tg3_halt(tp, 1); 6077 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5981 tg3_free_rings(tp); 6078 tg3_free_rings(tp);
5982 } else { 6079 } else {
5983 tp->timer_offset = HZ / 10; 6080 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
5984 tp->timer_counter = tp->timer_multiplier = 10; 6081 tp->timer_offset = HZ;
5985 tp->asf_counter = tp->asf_multiplier = (10 * 120); 6082 else
6083 tp->timer_offset = HZ / 10;
6084
6085 BUG_ON(tp->timer_offset > HZ);
6086 tp->timer_counter = tp->timer_multiplier =
6087 (HZ / tp->timer_offset);
6088 tp->asf_counter = tp->asf_multiplier =
6089 ((HZ / tp->timer_offset) * 120);
5986 6090
5987 init_timer(&tp->timer); 6091 init_timer(&tp->timer);
5988 tp->timer.expires = jiffies + tp->timer_offset; 6092 tp->timer.expires = jiffies + tp->timer_offset;
@@ -5990,8 +6094,7 @@ static int tg3_open(struct net_device *dev)
5990 tp->timer.function = tg3_timer; 6094 tp->timer.function = tg3_timer;
5991 } 6095 }
5992 6096
5993 spin_unlock(&tp->tx_lock); 6097 tg3_full_unlock(tp);
5994 spin_unlock_irq(&tp->lock);
5995 6098
5996 if (err) { 6099 if (err) {
5997 free_irq(tp->pdev->irq, dev); 6100 free_irq(tp->pdev->irq, dev);
@@ -6005,34 +6108,31 @@ static int tg3_open(struct net_device *dev)
6005 6108
6006 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { 6109 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6007 err = tg3_test_msi(tp); 6110 err = tg3_test_msi(tp);
6111
6008 if (err) { 6112 if (err) {
6009 spin_lock_irq(&tp->lock); 6113 tg3_full_lock(tp, 0);
6010 spin_lock(&tp->tx_lock);
6011 6114
6012 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { 6115 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6013 pci_disable_msi(tp->pdev); 6116 pci_disable_msi(tp->pdev);
6014 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; 6117 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6015 } 6118 }
6016 tg3_halt(tp, 1); 6119 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6017 tg3_free_rings(tp); 6120 tg3_free_rings(tp);
6018 tg3_free_consistent(tp); 6121 tg3_free_consistent(tp);
6019 6122
6020 spin_unlock(&tp->tx_lock); 6123 tg3_full_unlock(tp);
6021 spin_unlock_irq(&tp->lock);
6022 6124
6023 return err; 6125 return err;
6024 } 6126 }
6025 } 6127 }
6026 6128
6027 spin_lock_irq(&tp->lock); 6129 tg3_full_lock(tp, 0);
6028 spin_lock(&tp->tx_lock);
6029 6130
6030 add_timer(&tp->timer); 6131 add_timer(&tp->timer);
6031 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; 6132 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6032 tg3_enable_ints(tp); 6133 tg3_enable_ints(tp);
6033 6134
6034 spin_unlock(&tp->tx_lock); 6135 tg3_full_unlock(tp);
6035 spin_unlock_irq(&tp->lock);
6036 6136
6037 netif_start_queue(dev); 6137 netif_start_queue(dev);
6038 6138
@@ -6278,23 +6378,21 @@ static int tg3_close(struct net_device *dev)
6278 6378
6279 del_timer_sync(&tp->timer); 6379 del_timer_sync(&tp->timer);
6280 6380
6281 spin_lock_irq(&tp->lock); 6381 tg3_full_lock(tp, 1);
6282 spin_lock(&tp->tx_lock);
6283#if 0 6382#if 0
6284 tg3_dump_state(tp); 6383 tg3_dump_state(tp);
6285#endif 6384#endif
6286 6385
6287 tg3_disable_ints(tp); 6386 tg3_disable_ints(tp);
6288 6387
6289 tg3_halt(tp, 1); 6388 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6290 tg3_free_rings(tp); 6389 tg3_free_rings(tp);
6291 tp->tg3_flags &= 6390 tp->tg3_flags &=
6292 ~(TG3_FLAG_INIT_COMPLETE | 6391 ~(TG3_FLAG_INIT_COMPLETE |
6293 TG3_FLAG_GOT_SERDES_FLOWCTL); 6392 TG3_FLAG_GOT_SERDES_FLOWCTL);
6294 netif_carrier_off(tp->dev); 6393 netif_carrier_off(tp->dev);
6295 6394
6296 spin_unlock(&tp->tx_lock); 6395 tg3_full_unlock(tp);
6297 spin_unlock_irq(&tp->lock);
6298 6396
6299 free_irq(tp->pdev->irq, dev); 6397 free_irq(tp->pdev->irq, dev);
6300 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { 6398 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
@@ -6331,16 +6429,15 @@ static unsigned long calc_crc_errors(struct tg3 *tp)
6331 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) && 6429 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6332 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || 6430 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6333 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) { 6431 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
6334 unsigned long flags;
6335 u32 val; 6432 u32 val;
6336 6433
6337 spin_lock_irqsave(&tp->lock, flags); 6434 spin_lock_bh(&tp->lock);
6338 if (!tg3_readphy(tp, 0x1e, &val)) { 6435 if (!tg3_readphy(tp, 0x1e, &val)) {
6339 tg3_writephy(tp, 0x1e, val | 0x8000); 6436 tg3_writephy(tp, 0x1e, val | 0x8000);
6340 tg3_readphy(tp, 0x14, &val); 6437 tg3_readphy(tp, 0x14, &val);
6341 } else 6438 } else
6342 val = 0; 6439 val = 0;
6343 spin_unlock_irqrestore(&tp->lock, flags); 6440 spin_unlock_bh(&tp->lock);
6344 6441
6345 tp->phy_crc_errors += val; 6442 tp->phy_crc_errors += val;
6346 6443
@@ -6602,11 +6699,9 @@ static void tg3_set_rx_mode(struct net_device *dev)
6602{ 6699{
6603 struct tg3 *tp = netdev_priv(dev); 6700 struct tg3 *tp = netdev_priv(dev);
6604 6701
6605 spin_lock_irq(&tp->lock); 6702 tg3_full_lock(tp, 0);
6606 spin_lock(&tp->tx_lock);
6607 __tg3_set_rx_mode(dev); 6703 __tg3_set_rx_mode(dev);
6608 spin_unlock(&tp->tx_lock); 6704 tg3_full_unlock(tp);
6609 spin_unlock_irq(&tp->lock);
6610} 6705}
6611 6706
6612#define TG3_REGDUMP_LEN (32 * 1024) 6707#define TG3_REGDUMP_LEN (32 * 1024)
@@ -6628,8 +6723,7 @@ static void tg3_get_regs(struct net_device *dev,
6628 6723
6629 memset(p, 0, TG3_REGDUMP_LEN); 6724 memset(p, 0, TG3_REGDUMP_LEN);
6630 6725
6631 spin_lock_irq(&tp->lock); 6726 tg3_full_lock(tp, 0);
6632 spin_lock(&tp->tx_lock);
6633 6727
6634#define __GET_REG32(reg) (*(p)++ = tr32(reg)) 6728#define __GET_REG32(reg) (*(p)++ = tr32(reg))
6635#define GET_REG32_LOOP(base,len) \ 6729#define GET_REG32_LOOP(base,len) \
@@ -6679,8 +6773,7 @@ do { p = (u32 *)(orig_p + (reg)); \
6679#undef GET_REG32_LOOP 6773#undef GET_REG32_LOOP
6680#undef GET_REG32_1 6774#undef GET_REG32_1
6681 6775
6682 spin_unlock(&tp->tx_lock); 6776 tg3_full_unlock(tp);
6683 spin_unlock_irq(&tp->lock);
6684} 6777}
6685 6778
6686static int tg3_get_eeprom_len(struct net_device *dev) 6779static int tg3_get_eeprom_len(struct net_device *dev)
@@ -6856,8 +6949,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6856 return -EINVAL; 6949 return -EINVAL;
6857 } 6950 }
6858 6951
6859 spin_lock_irq(&tp->lock); 6952 tg3_full_lock(tp, 0);
6860 spin_lock(&tp->tx_lock);
6861 6953
6862 tp->link_config.autoneg = cmd->autoneg; 6954 tp->link_config.autoneg = cmd->autoneg;
6863 if (cmd->autoneg == AUTONEG_ENABLE) { 6955 if (cmd->autoneg == AUTONEG_ENABLE) {
@@ -6873,8 +6965,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6873 if (netif_running(dev)) 6965 if (netif_running(dev))
6874 tg3_setup_phy(tp, 1); 6966 tg3_setup_phy(tp, 1);
6875 6967
6876 spin_unlock(&tp->tx_lock); 6968 tg3_full_unlock(tp);
6877 spin_unlock_irq(&tp->lock);
6878 6969
6879 return 0; 6970 return 0;
6880} 6971}
@@ -6910,12 +7001,12 @@ static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6910 !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP)) 7001 !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
6911 return -EINVAL; 7002 return -EINVAL;
6912 7003
6913 spin_lock_irq(&tp->lock); 7004 spin_lock_bh(&tp->lock);
6914 if (wol->wolopts & WAKE_MAGIC) 7005 if (wol->wolopts & WAKE_MAGIC)
6915 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; 7006 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
6916 else 7007 else
6917 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE; 7008 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
6918 spin_unlock_irq(&tp->lock); 7009 spin_unlock_bh(&tp->lock);
6919 7010
6920 return 0; 7011 return 0;
6921} 7012}
@@ -6955,7 +7046,7 @@ static int tg3_nway_reset(struct net_device *dev)
6955 if (!netif_running(dev)) 7046 if (!netif_running(dev))
6956 return -EAGAIN; 7047 return -EAGAIN;
6957 7048
6958 spin_lock_irq(&tp->lock); 7049 spin_lock_bh(&tp->lock);
6959 r = -EINVAL; 7050 r = -EINVAL;
6960 tg3_readphy(tp, MII_BMCR, &bmcr); 7051 tg3_readphy(tp, MII_BMCR, &bmcr);
6961 if (!tg3_readphy(tp, MII_BMCR, &bmcr) && 7052 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
@@ -6963,7 +7054,7 @@ static int tg3_nway_reset(struct net_device *dev)
6963 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART); 7054 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART);
6964 r = 0; 7055 r = 0;
6965 } 7056 }
6966 spin_unlock_irq(&tp->lock); 7057 spin_unlock_bh(&tp->lock);
6967 7058
6968 return r; 7059 return r;
6969} 7060}
@@ -6985,17 +7076,19 @@ static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *
6985static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) 7076static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6986{ 7077{
6987 struct tg3 *tp = netdev_priv(dev); 7078 struct tg3 *tp = netdev_priv(dev);
7079 int irq_sync = 0;
6988 7080
6989 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) || 7081 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
6990 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) || 7082 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
6991 (ering->tx_pending > TG3_TX_RING_SIZE - 1)) 7083 (ering->tx_pending > TG3_TX_RING_SIZE - 1))
6992 return -EINVAL; 7084 return -EINVAL;
6993 7085
6994 if (netif_running(dev)) 7086 if (netif_running(dev)) {
6995 tg3_netif_stop(tp); 7087 tg3_netif_stop(tp);
7088 irq_sync = 1;
7089 }
6996 7090
6997 spin_lock_irq(&tp->lock); 7091 tg3_full_lock(tp, irq_sync);
6998 spin_lock(&tp->tx_lock);
6999 7092
7000 tp->rx_pending = ering->rx_pending; 7093 tp->rx_pending = ering->rx_pending;
7001 7094
@@ -7006,13 +7099,12 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
7006 tp->tx_pending = ering->tx_pending; 7099 tp->tx_pending = ering->tx_pending;
7007 7100
7008 if (netif_running(dev)) { 7101 if (netif_running(dev)) {
7009 tg3_halt(tp, 1); 7102 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7010 tg3_init_hw(tp); 7103 tg3_init_hw(tp);
7011 tg3_netif_start(tp); 7104 tg3_netif_start(tp);
7012 } 7105 }
7013 7106
7014 spin_unlock(&tp->tx_lock); 7107 tg3_full_unlock(tp);
7015 spin_unlock_irq(&tp->lock);
7016 7108
7017 return 0; 7109 return 0;
7018} 7110}
@@ -7029,12 +7121,15 @@ static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam
7029static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) 7121static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7030{ 7122{
7031 struct tg3 *tp = netdev_priv(dev); 7123 struct tg3 *tp = netdev_priv(dev);
7124 int irq_sync = 0;
7032 7125
7033 if (netif_running(dev)) 7126 if (netif_running(dev)) {
7034 tg3_netif_stop(tp); 7127 tg3_netif_stop(tp);
7128 irq_sync = 1;
7129 }
7130
7131 tg3_full_lock(tp, irq_sync);
7035 7132
7036 spin_lock_irq(&tp->lock);
7037 spin_lock(&tp->tx_lock);
7038 if (epause->autoneg) 7133 if (epause->autoneg)
7039 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; 7134 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7040 else 7135 else
@@ -7049,12 +7144,12 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
7049 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE; 7144 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7050 7145
7051 if (netif_running(dev)) { 7146 if (netif_running(dev)) {
7052 tg3_halt(tp, 1); 7147 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7053 tg3_init_hw(tp); 7148 tg3_init_hw(tp);
7054 tg3_netif_start(tp); 7149 tg3_netif_start(tp);
7055 } 7150 }
7056 spin_unlock(&tp->tx_lock); 7151
7057 spin_unlock_irq(&tp->lock); 7152 tg3_full_unlock(tp);
7058 7153
7059 return 0; 7154 return 0;
7060} 7155}
@@ -7075,12 +7170,12 @@ static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7075 return 0; 7170 return 0;
7076 } 7171 }
7077 7172
7078 spin_lock_irq(&tp->lock); 7173 spin_lock_bh(&tp->lock);
7079 if (data) 7174 if (data)
7080 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS; 7175 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7081 else 7176 else
7082 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS; 7177 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
7083 spin_unlock_irq(&tp->lock); 7178 spin_unlock_bh(&tp->lock);
7084 7179
7085 return 0; 7180 return 0;
7086} 7181}
@@ -7108,12 +7203,20 @@ static int tg3_get_stats_count (struct net_device *dev)
7108 return TG3_NUM_STATS; 7203 return TG3_NUM_STATS;
7109} 7204}
7110 7205
7206static int tg3_get_test_count (struct net_device *dev)
7207{
7208 return TG3_NUM_TEST;
7209}
7210
7111static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf) 7211static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7112{ 7212{
7113 switch (stringset) { 7213 switch (stringset) {
7114 case ETH_SS_STATS: 7214 case ETH_SS_STATS:
7115 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys)); 7215 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
7116 break; 7216 break;
7217 case ETH_SS_TEST:
7218 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
7219 break;
7117 default: 7220 default:
7118 WARN_ON(1); /* we need a WARN() */ 7221 WARN_ON(1); /* we need a WARN() */
7119 break; 7222 break;
@@ -7127,6 +7230,517 @@ static void tg3_get_ethtool_stats (struct net_device *dev,
7127 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats)); 7230 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
7128} 7231}
7129 7232
7233#define NVRAM_TEST_SIZE 0x100
7234
7235static int tg3_test_nvram(struct tg3 *tp)
7236{
7237 u32 *buf, csum;
7238 int i, j, err = 0;
7239
7240 buf = kmalloc(NVRAM_TEST_SIZE, GFP_KERNEL);
7241 if (buf == NULL)
7242 return -ENOMEM;
7243
7244 for (i = 0, j = 0; i < NVRAM_TEST_SIZE; i += 4, j++) {
7245 u32 val;
7246
7247 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
7248 break;
7249 buf[j] = cpu_to_le32(val);
7250 }
7251 if (i < NVRAM_TEST_SIZE)
7252 goto out;
7253
7254 err = -EIO;
7255 if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC)
7256 goto out;
7257
7258 /* Bootstrap checksum at offset 0x10 */
7259 csum = calc_crc((unsigned char *) buf, 0x10);
7260 if(csum != cpu_to_le32(buf[0x10/4]))
7261 goto out;
7262
7263 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
7264 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
7265 if (csum != cpu_to_le32(buf[0xfc/4]))
7266 goto out;
7267
7268 err = 0;
7269
7270out:
7271 kfree(buf);
7272 return err;
7273}
7274
7275#define TG3_SERDES_TIMEOUT_SEC 2
7276#define TG3_COPPER_TIMEOUT_SEC 6
7277
7278static int tg3_test_link(struct tg3 *tp)
7279{
7280 int i, max;
7281
7282 if (!netif_running(tp->dev))
7283 return -ENODEV;
7284
7285 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7286 max = TG3_SERDES_TIMEOUT_SEC;
7287 else
7288 max = TG3_COPPER_TIMEOUT_SEC;
7289
7290 for (i = 0; i < max; i++) {
7291 if (netif_carrier_ok(tp->dev))
7292 return 0;
7293
7294 if (msleep_interruptible(1000))
7295 break;
7296 }
7297
7298 return -EIO;
7299}
7300
7301/* Only test the commonly used registers */
7302static int tg3_test_registers(struct tg3 *tp)
7303{
7304 int i, is_5705;
7305 u32 offset, read_mask, write_mask, val, save_val, read_val;
7306 static struct {
7307 u16 offset;
7308 u16 flags;
7309#define TG3_FL_5705 0x1
7310#define TG3_FL_NOT_5705 0x2
7311#define TG3_FL_NOT_5788 0x4
7312 u32 read_mask;
7313 u32 write_mask;
7314 } reg_tbl[] = {
7315 /* MAC Control Registers */
7316 { MAC_MODE, TG3_FL_NOT_5705,
7317 0x00000000, 0x00ef6f8c },
7318 { MAC_MODE, TG3_FL_5705,
7319 0x00000000, 0x01ef6b8c },
7320 { MAC_STATUS, TG3_FL_NOT_5705,
7321 0x03800107, 0x00000000 },
7322 { MAC_STATUS, TG3_FL_5705,
7323 0x03800100, 0x00000000 },
7324 { MAC_ADDR_0_HIGH, 0x0000,
7325 0x00000000, 0x0000ffff },
7326 { MAC_ADDR_0_LOW, 0x0000,
7327 0x00000000, 0xffffffff },
7328 { MAC_RX_MTU_SIZE, 0x0000,
7329 0x00000000, 0x0000ffff },
7330 { MAC_TX_MODE, 0x0000,
7331 0x00000000, 0x00000070 },
7332 { MAC_TX_LENGTHS, 0x0000,
7333 0x00000000, 0x00003fff },
7334 { MAC_RX_MODE, TG3_FL_NOT_5705,
7335 0x00000000, 0x000007fc },
7336 { MAC_RX_MODE, TG3_FL_5705,
7337 0x00000000, 0x000007dc },
7338 { MAC_HASH_REG_0, 0x0000,
7339 0x00000000, 0xffffffff },
7340 { MAC_HASH_REG_1, 0x0000,
7341 0x00000000, 0xffffffff },
7342 { MAC_HASH_REG_2, 0x0000,
7343 0x00000000, 0xffffffff },
7344 { MAC_HASH_REG_3, 0x0000,
7345 0x00000000, 0xffffffff },
7346
7347 /* Receive Data and Receive BD Initiator Control Registers. */
7348 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
7349 0x00000000, 0xffffffff },
7350 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
7351 0x00000000, 0xffffffff },
7352 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
7353 0x00000000, 0x00000003 },
7354 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
7355 0x00000000, 0xffffffff },
7356 { RCVDBDI_STD_BD+0, 0x0000,
7357 0x00000000, 0xffffffff },
7358 { RCVDBDI_STD_BD+4, 0x0000,
7359 0x00000000, 0xffffffff },
7360 { RCVDBDI_STD_BD+8, 0x0000,
7361 0x00000000, 0xffff0002 },
7362 { RCVDBDI_STD_BD+0xc, 0x0000,
7363 0x00000000, 0xffffffff },
7364
7365 /* Receive BD Initiator Control Registers. */
7366 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
7367 0x00000000, 0xffffffff },
7368 { RCVBDI_STD_THRESH, TG3_FL_5705,
7369 0x00000000, 0x000003ff },
7370 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
7371 0x00000000, 0xffffffff },
7372
7373 /* Host Coalescing Control Registers. */
7374 { HOSTCC_MODE, TG3_FL_NOT_5705,
7375 0x00000000, 0x00000004 },
7376 { HOSTCC_MODE, TG3_FL_5705,
7377 0x00000000, 0x000000f6 },
7378 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
7379 0x00000000, 0xffffffff },
7380 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
7381 0x00000000, 0x000003ff },
7382 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
7383 0x00000000, 0xffffffff },
7384 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
7385 0x00000000, 0x000003ff },
7386 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
7387 0x00000000, 0xffffffff },
7388 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7389 0x00000000, 0x000000ff },
7390 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
7391 0x00000000, 0xffffffff },
7392 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7393 0x00000000, 0x000000ff },
7394 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
7395 0x00000000, 0xffffffff },
7396 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
7397 0x00000000, 0xffffffff },
7398 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7399 0x00000000, 0xffffffff },
7400 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7401 0x00000000, 0x000000ff },
7402 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7403 0x00000000, 0xffffffff },
7404 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7405 0x00000000, 0x000000ff },
7406 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
7407 0x00000000, 0xffffffff },
7408 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
7409 0x00000000, 0xffffffff },
7410 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
7411 0x00000000, 0xffffffff },
7412 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
7413 0x00000000, 0xffffffff },
7414 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
7415 0x00000000, 0xffffffff },
7416 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
7417 0xffffffff, 0x00000000 },
7418 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
7419 0xffffffff, 0x00000000 },
7420
7421 /* Buffer Manager Control Registers. */
7422 { BUFMGR_MB_POOL_ADDR, 0x0000,
7423 0x00000000, 0x007fff80 },
7424 { BUFMGR_MB_POOL_SIZE, 0x0000,
7425 0x00000000, 0x007fffff },
7426 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
7427 0x00000000, 0x0000003f },
7428 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
7429 0x00000000, 0x000001ff },
7430 { BUFMGR_MB_HIGH_WATER, 0x0000,
7431 0x00000000, 0x000001ff },
7432 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
7433 0xffffffff, 0x00000000 },
7434 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
7435 0xffffffff, 0x00000000 },
7436
7437 /* Mailbox Registers */
7438 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
7439 0x00000000, 0x000001ff },
7440 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
7441 0x00000000, 0x000001ff },
7442 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
7443 0x00000000, 0x000007ff },
7444 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
7445 0x00000000, 0x000001ff },
7446
7447 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
7448 };
7449
7450 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7451 is_5705 = 1;
7452 else
7453 is_5705 = 0;
7454
7455 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
7456 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
7457 continue;
7458
7459 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
7460 continue;
7461
7462 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7463 (reg_tbl[i].flags & TG3_FL_NOT_5788))
7464 continue;
7465
7466 offset = (u32) reg_tbl[i].offset;
7467 read_mask = reg_tbl[i].read_mask;
7468 write_mask = reg_tbl[i].write_mask;
7469
7470 /* Save the original register content */
7471 save_val = tr32(offset);
7472
7473 /* Determine the read-only value. */
7474 read_val = save_val & read_mask;
7475
7476 /* Write zero to the register, then make sure the read-only bits
7477 * are not changed and the read/write bits are all zeros.
7478 */
7479 tw32(offset, 0);
7480
7481 val = tr32(offset);
7482
7483 /* Test the read-only and read/write bits. */
7484 if (((val & read_mask) != read_val) || (val & write_mask))
7485 goto out;
7486
7487 /* Write ones to all the bits defined by RdMask and WrMask, then
7488 * make sure the read-only bits are not changed and the
7489 * read/write bits are all ones.
7490 */
7491 tw32(offset, read_mask | write_mask);
7492
7493 val = tr32(offset);
7494
7495 /* Test the read-only bits. */
7496 if ((val & read_mask) != read_val)
7497 goto out;
7498
7499 /* Test the read/write bits. */
7500 if ((val & write_mask) != write_mask)
7501 goto out;
7502
7503 tw32(offset, save_val);
7504 }
7505
7506 return 0;
7507
7508out:
7509 printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
7510 tw32(offset, save_val);
7511 return -EIO;
7512}
7513
7514static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
7515{
7516 static u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
7517 int i;
7518 u32 j;
7519
7520 for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
7521 for (j = 0; j < len; j += 4) {
7522 u32 val;
7523
7524 tg3_write_mem(tp, offset + j, test_pattern[i]);
7525 tg3_read_mem(tp, offset + j, &val);
7526 if (val != test_pattern[i])
7527 return -EIO;
7528 }
7529 }
7530 return 0;
7531}
7532
7533static int tg3_test_memory(struct tg3 *tp)
7534{
7535 static struct mem_entry {
7536 u32 offset;
7537 u32 len;
7538 } mem_tbl_570x[] = {
7539 { 0x00000000, 0x01000},
7540 { 0x00002000, 0x1c000},
7541 { 0xffffffff, 0x00000}
7542 }, mem_tbl_5705[] = {
7543 { 0x00000100, 0x0000c},
7544 { 0x00000200, 0x00008},
7545 { 0x00000b50, 0x00400},
7546 { 0x00004000, 0x00800},
7547 { 0x00006000, 0x01000},
7548 { 0x00008000, 0x02000},
7549 { 0x00010000, 0x0e000},
7550 { 0xffffffff, 0x00000}
7551 };
7552 struct mem_entry *mem_tbl;
7553 int err = 0;
7554 int i;
7555
7556 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7557 mem_tbl = mem_tbl_5705;
7558 else
7559 mem_tbl = mem_tbl_570x;
7560
7561 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
7562 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
7563 mem_tbl[i].len)) != 0)
7564 break;
7565 }
7566
7567 return err;
7568}
7569
7570static int tg3_test_loopback(struct tg3 *tp)
7571{
7572 u32 mac_mode, send_idx, rx_start_idx, rx_idx, tx_idx, opaque_key;
7573 u32 desc_idx;
7574 struct sk_buff *skb, *rx_skb;
7575 u8 *tx_data;
7576 dma_addr_t map;
7577 int num_pkts, tx_len, rx_len, i, err;
7578 struct tg3_rx_buffer_desc *desc;
7579
7580 if (!netif_running(tp->dev))
7581 return -ENODEV;
7582
7583 err = -EIO;
7584
7585 tg3_abort_hw(tp, 1);
7586
7587 tg3_reset_hw(tp);
7588
7589 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
7590 MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
7591 MAC_MODE_PORT_MODE_GMII;
7592 tw32(MAC_MODE, mac_mode);
7593
7594 tx_len = 1514;
7595 skb = dev_alloc_skb(tx_len);
7596 tx_data = skb_put(skb, tx_len);
7597 memcpy(tx_data, tp->dev->dev_addr, 6);
7598 memset(tx_data + 6, 0x0, 8);
7599
7600 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
7601
7602 for (i = 14; i < tx_len; i++)
7603 tx_data[i] = (u8) (i & 0xff);
7604
7605 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
7606
7607 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7608 HOSTCC_MODE_NOW);
7609
7610 udelay(10);
7611
7612 rx_start_idx = tp->hw_status->idx[0].rx_producer;
7613
7614 send_idx = 0;
7615 num_pkts = 0;
7616
7617 tg3_set_txd(tp, send_idx, map, tx_len, 0, 1);
7618
7619 send_idx++;
7620 num_pkts++;
7621
7622 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, send_idx);
7623 tr32(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
7624
7625 udelay(10);
7626
7627 for (i = 0; i < 10; i++) {
7628 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7629 HOSTCC_MODE_NOW);
7630
7631 udelay(10);
7632
7633 tx_idx = tp->hw_status->idx[0].tx_consumer;
7634 rx_idx = tp->hw_status->idx[0].rx_producer;
7635 if ((tx_idx == send_idx) &&
7636 (rx_idx == (rx_start_idx + num_pkts)))
7637 break;
7638 }
7639
7640 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
7641 dev_kfree_skb(skb);
7642
7643 if (tx_idx != send_idx)
7644 goto out;
7645
7646 if (rx_idx != rx_start_idx + num_pkts)
7647 goto out;
7648
7649 desc = &tp->rx_rcb[rx_start_idx];
7650 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
7651 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
7652 if (opaque_key != RXD_OPAQUE_RING_STD)
7653 goto out;
7654
7655 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
7656 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
7657 goto out;
7658
7659 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
7660 if (rx_len != tx_len)
7661 goto out;
7662
7663 rx_skb = tp->rx_std_buffers[desc_idx].skb;
7664
7665 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
7666 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
7667
7668 for (i = 14; i < tx_len; i++) {
7669 if (*(rx_skb->data + i) != (u8) (i & 0xff))
7670 goto out;
7671 }
7672 err = 0;
7673
7674 /* tg3_free_rings will unmap and free the rx_skb */
7675out:
7676 return err;
7677}
7678
7679static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
7680 u64 *data)
7681{
7682 struct tg3 *tp = netdev_priv(dev);
7683
7684 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
7685
7686 if (tg3_test_nvram(tp) != 0) {
7687 etest->flags |= ETH_TEST_FL_FAILED;
7688 data[0] = 1;
7689 }
7690 if (tg3_test_link(tp) != 0) {
7691 etest->flags |= ETH_TEST_FL_FAILED;
7692 data[1] = 1;
7693 }
7694 if (etest->flags & ETH_TEST_FL_OFFLINE) {
7695 int irq_sync = 0;
7696
7697 if (netif_running(dev)) {
7698 tg3_netif_stop(tp);
7699 irq_sync = 1;
7700 }
7701
7702 tg3_full_lock(tp, irq_sync);
7703
7704 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
7705 tg3_nvram_lock(tp);
7706 tg3_halt_cpu(tp, RX_CPU_BASE);
7707 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7708 tg3_halt_cpu(tp, TX_CPU_BASE);
7709 tg3_nvram_unlock(tp);
7710
7711 if (tg3_test_registers(tp) != 0) {
7712 etest->flags |= ETH_TEST_FL_FAILED;
7713 data[2] = 1;
7714 }
7715 if (tg3_test_memory(tp) != 0) {
7716 etest->flags |= ETH_TEST_FL_FAILED;
7717 data[3] = 1;
7718 }
7719 if (tg3_test_loopback(tp) != 0) {
7720 etest->flags |= ETH_TEST_FL_FAILED;
7721 data[4] = 1;
7722 }
7723
7724 tg3_full_unlock(tp);
7725
7726 if (tg3_test_interrupt(tp) != 0) {
7727 etest->flags |= ETH_TEST_FL_FAILED;
7728 data[5] = 1;
7729 }
7730
7731 tg3_full_lock(tp, 0);
7732
7733 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7734 if (netif_running(dev)) {
7735 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7736 tg3_init_hw(tp);
7737 tg3_netif_start(tp);
7738 }
7739
7740 tg3_full_unlock(tp);
7741 }
7742}
7743
7130static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 7744static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7131{ 7745{
7132 struct mii_ioctl_data *data = if_mii(ifr); 7746 struct mii_ioctl_data *data = if_mii(ifr);
@@ -7144,9 +7758,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7144 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) 7758 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7145 break; /* We have no PHY */ 7759 break; /* We have no PHY */
7146 7760
7147 spin_lock_irq(&tp->lock); 7761 spin_lock_bh(&tp->lock);
7148 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval); 7762 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
7149 spin_unlock_irq(&tp->lock); 7763 spin_unlock_bh(&tp->lock);
7150 7764
7151 data->val_out = mii_regval; 7765 data->val_out = mii_regval;
7152 7766
@@ -7160,9 +7774,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7160 if (!capable(CAP_NET_ADMIN)) 7774 if (!capable(CAP_NET_ADMIN))
7161 return -EPERM; 7775 return -EPERM;
7162 7776
7163 spin_lock_irq(&tp->lock); 7777 spin_lock_bh(&tp->lock);
7164 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in); 7778 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
7165 spin_unlock_irq(&tp->lock); 7779 spin_unlock_bh(&tp->lock);
7166 7780
7167 return err; 7781 return err;
7168 7782
@@ -7178,31 +7792,35 @@ static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
7178{ 7792{
7179 struct tg3 *tp = netdev_priv(dev); 7793 struct tg3 *tp = netdev_priv(dev);
7180 7794
7181 spin_lock_irq(&tp->lock); 7795 tg3_full_lock(tp, 0);
7182 spin_lock(&tp->tx_lock);
7183 7796
7184 tp->vlgrp = grp; 7797 tp->vlgrp = grp;
7185 7798
7186 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */ 7799 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
7187 __tg3_set_rx_mode(dev); 7800 __tg3_set_rx_mode(dev);
7188 7801
7189 spin_unlock(&tp->tx_lock); 7802 tg3_full_unlock(tp);
7190 spin_unlock_irq(&tp->lock);
7191} 7803}
7192 7804
7193static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) 7805static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
7194{ 7806{
7195 struct tg3 *tp = netdev_priv(dev); 7807 struct tg3 *tp = netdev_priv(dev);
7196 7808
7197 spin_lock_irq(&tp->lock); 7809 tg3_full_lock(tp, 0);
7198 spin_lock(&tp->tx_lock);
7199 if (tp->vlgrp) 7810 if (tp->vlgrp)
7200 tp->vlgrp->vlan_devices[vid] = NULL; 7811 tp->vlgrp->vlan_devices[vid] = NULL;
7201 spin_unlock(&tp->tx_lock); 7812 tg3_full_unlock(tp);
7202 spin_unlock_irq(&tp->lock);
7203} 7813}
7204#endif 7814#endif
7205 7815
7816static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
7817{
7818 struct tg3 *tp = netdev_priv(dev);
7819
7820 memcpy(ec, &tp->coal, sizeof(*ec));
7821 return 0;
7822}
7823
7206static struct ethtool_ops tg3_ethtool_ops = { 7824static struct ethtool_ops tg3_ethtool_ops = {
7207 .get_settings = tg3_get_settings, 7825 .get_settings = tg3_get_settings,
7208 .set_settings = tg3_set_settings, 7826 .set_settings = tg3_set_settings,
@@ -7232,9 +7850,12 @@ static struct ethtool_ops tg3_ethtool_ops = {
7232 .get_tso = ethtool_op_get_tso, 7850 .get_tso = ethtool_op_get_tso,
7233 .set_tso = tg3_set_tso, 7851 .set_tso = tg3_set_tso,
7234#endif 7852#endif
7853 .self_test_count = tg3_get_test_count,
7854 .self_test = tg3_self_test,
7235 .get_strings = tg3_get_strings, 7855 .get_strings = tg3_get_strings,
7236 .get_stats_count = tg3_get_stats_count, 7856 .get_stats_count = tg3_get_stats_count,
7237 .get_ethtool_stats = tg3_get_ethtool_stats, 7857 .get_ethtool_stats = tg3_get_ethtool_stats,
7858 .get_coalesce = tg3_get_coalesce,
7238}; 7859};
7239 7860
7240static void __devinit tg3_get_eeprom_size(struct tg3 *tp) 7861static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
@@ -7914,6 +8535,16 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
7914 8535
7915 case NIC_SRAM_DATA_CFG_LED_MODE_MAC: 8536 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
7916 tp->led_ctrl = LED_CTRL_MODE_MAC; 8537 tp->led_ctrl = LED_CTRL_MODE_MAC;
8538
8539 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
8540 * read on some older 5700/5701 bootcode.
8541 */
8542 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
8543 ASIC_REV_5700 ||
8544 GET_ASIC_REV(tp->pci_chip_rev_id) ==
8545 ASIC_REV_5701)
8546 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
8547
7917 break; 8548 break;
7918 8549
7919 case SHASTA_EXT_LED_SHARED: 8550 case SHASTA_EXT_LED_SHARED:
@@ -8422,15 +9053,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
8422 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) 9053 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8423 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG; 9054 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
8424 9055
8425 /* Only 5701 and later support tagged irq status mode.
8426 * Also, 5788 chips cannot use tagged irq status.
8427 *
8428 * However, since we are using NAPI avoid tagged irq status
8429 * because the interrupt condition is more difficult to
8430 * fully clear in that mode.
8431 */
8432 tp->coalesce_mode = 0; 9056 tp->coalesce_mode = 0;
8433
8434 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX && 9057 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
8435 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX) 9058 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
8436 tp->coalesce_mode |= HOSTCC_MODE_32BYTE; 9059 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
@@ -8494,6 +9117,18 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
8494 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M)) 9117 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
8495 tp->tg3_flags2 |= TG3_FLG2_IS_5788; 9118 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
8496 9119
9120 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9121 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
9122 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
9123 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
9124 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
9125 HOSTCC_MODE_CLRTICK_TXBD);
9126
9127 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
9128 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9129 tp->misc_host_ctrl);
9130 }
9131
8497 /* these are limited to 10/100 only */ 9132 /* these are limited to 10/100 only */
8498 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 && 9133 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
8499 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) || 9134 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
@@ -8671,6 +9306,146 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
8671 return 0; 9306 return 0;
8672} 9307}
8673 9308
9309#define BOUNDARY_SINGLE_CACHELINE 1
9310#define BOUNDARY_MULTI_CACHELINE 2
9311
9312static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
9313{
9314 int cacheline_size;
9315 u8 byte;
9316 int goal;
9317
9318 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
9319 if (byte == 0)
9320 cacheline_size = 1024;
9321 else
9322 cacheline_size = (int) byte * 4;
9323
9324 /* On 5703 and later chips, the boundary bits have no
9325 * effect.
9326 */
9327 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9328 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
9329 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
9330 goto out;
9331
9332#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
9333 goal = BOUNDARY_MULTI_CACHELINE;
9334#else
9335#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
9336 goal = BOUNDARY_SINGLE_CACHELINE;
9337#else
9338 goal = 0;
9339#endif
9340#endif
9341
9342 if (!goal)
9343 goto out;
9344
9345 /* PCI controllers on most RISC systems tend to disconnect
9346 * when a device tries to burst across a cache-line boundary.
9347 * Therefore, letting tg3 do so just wastes PCI bandwidth.
9348 *
9349 * Unfortunately, for PCI-E there are only limited
9350 * write-side controls for this, and thus for reads
9351 * we will still get the disconnects. We'll also waste
9352 * these PCI cycles for both read and write for chips
9353 * other than 5700 and 5701 which do not implement the
9354 * boundary bits.
9355 */
9356 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
9357 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
9358 switch (cacheline_size) {
9359 case 16:
9360 case 32:
9361 case 64:
9362 case 128:
9363 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9364 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
9365 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
9366 } else {
9367 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
9368 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
9369 }
9370 break;
9371
9372 case 256:
9373 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
9374 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
9375 break;
9376
9377 default:
9378 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
9379 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
9380 break;
9381 };
9382 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
9383 switch (cacheline_size) {
9384 case 16:
9385 case 32:
9386 case 64:
9387 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9388 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
9389 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
9390 break;
9391 }
9392 /* fallthrough */
9393 case 128:
9394 default:
9395 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
9396 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
9397 break;
9398 };
9399 } else {
9400 switch (cacheline_size) {
9401 case 16:
9402 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9403 val |= (DMA_RWCTRL_READ_BNDRY_16 |
9404 DMA_RWCTRL_WRITE_BNDRY_16);
9405 break;
9406 }
9407 /* fallthrough */
9408 case 32:
9409 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9410 val |= (DMA_RWCTRL_READ_BNDRY_32 |
9411 DMA_RWCTRL_WRITE_BNDRY_32);
9412 break;
9413 }
9414 /* fallthrough */
9415 case 64:
9416 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9417 val |= (DMA_RWCTRL_READ_BNDRY_64 |
9418 DMA_RWCTRL_WRITE_BNDRY_64);
9419 break;
9420 }
9421 /* fallthrough */
9422 case 128:
9423 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9424 val |= (DMA_RWCTRL_READ_BNDRY_128 |
9425 DMA_RWCTRL_WRITE_BNDRY_128);
9426 break;
9427 }
9428 /* fallthrough */
9429 case 256:
9430 val |= (DMA_RWCTRL_READ_BNDRY_256 |
9431 DMA_RWCTRL_WRITE_BNDRY_256);
9432 break;
9433 case 512:
9434 val |= (DMA_RWCTRL_READ_BNDRY_512 |
9435 DMA_RWCTRL_WRITE_BNDRY_512);
9436 break;
9437 case 1024:
9438 default:
9439 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
9440 DMA_RWCTRL_WRITE_BNDRY_1024);
9441 break;
9442 };
9443 }
9444
9445out:
9446 return val;
9447}
9448
8674static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device) 9449static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
8675{ 9450{
8676 struct tg3_internal_buffer_desc test_desc; 9451 struct tg3_internal_buffer_desc test_desc;
@@ -8752,12 +9527,12 @@ static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dm
8752 return ret; 9527 return ret;
8753} 9528}
8754 9529
8755#define TEST_BUFFER_SIZE 0x400 9530#define TEST_BUFFER_SIZE 0x2000
8756 9531
8757static int __devinit tg3_test_dma(struct tg3 *tp) 9532static int __devinit tg3_test_dma(struct tg3 *tp)
8758{ 9533{
8759 dma_addr_t buf_dma; 9534 dma_addr_t buf_dma;
8760 u32 *buf; 9535 u32 *buf, saved_dma_rwctrl;
8761 int ret; 9536 int ret;
8762 9537
8763 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma); 9538 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
@@ -8769,46 +9544,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
8769 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) | 9544 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
8770 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT)); 9545 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
8771 9546
8772#ifndef CONFIG_X86 9547 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
8773 {
8774 u8 byte;
8775 int cacheline_size;
8776 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
8777
8778 if (byte == 0)
8779 cacheline_size = 1024;
8780 else
8781 cacheline_size = (int) byte * 4;
8782
8783 switch (cacheline_size) {
8784 case 16:
8785 case 32:
8786 case 64:
8787 case 128:
8788 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
8789 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
8790 tp->dma_rwctrl |=
8791 DMA_RWCTRL_WRITE_BNDRY_384_PCIX;
8792 break;
8793 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
8794 tp->dma_rwctrl &=
8795 ~(DMA_RWCTRL_PCI_WRITE_CMD);
8796 tp->dma_rwctrl |=
8797 DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
8798 break;
8799 }
8800 /* fallthrough */
8801 case 256:
8802 if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
8803 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8804 tp->dma_rwctrl |=
8805 DMA_RWCTRL_WRITE_BNDRY_256;
8806 else if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8807 tp->dma_rwctrl |=
8808 DMA_RWCTRL_WRITE_BNDRY_256_PCIX;
8809 };
8810 }
8811#endif
8812 9548
8813 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { 9549 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
8814 /* DMA read watermark not used on PCIE */ 9550 /* DMA read watermark not used on PCIE */
@@ -8827,7 +9563,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
8827 if (ccval == 0x6 || ccval == 0x7) 9563 if (ccval == 0x6 || ccval == 0x7)
8828 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; 9564 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
8829 9565
8830 /* Set bit 23 to renable PCIX hw bug fix */ 9566 /* Set bit 23 to enable PCIX hw bug fix */
8831 tp->dma_rwctrl |= 0x009f0000; 9567 tp->dma_rwctrl |= 0x009f0000;
8832 } else { 9568 } else {
8833 tp->dma_rwctrl |= 0x001b000f; 9569 tp->dma_rwctrl |= 0x001b000f;
@@ -8868,6 +9604,13 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
8868 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) 9604 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
8869 goto out; 9605 goto out;
8870 9606
9607 /* It is best to perform DMA test with maximum write burst size
9608 * to expose the 5700/5701 write DMA bug.
9609 */
9610 saved_dma_rwctrl = tp->dma_rwctrl;
9611 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
9612 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9613
8871 while (1) { 9614 while (1) {
8872 u32 *p = buf, i; 9615 u32 *p = buf, i;
8873 9616
@@ -8906,8 +9649,9 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
8906 if (p[i] == i) 9649 if (p[i] == i)
8907 continue; 9650 continue;
8908 9651
8909 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) == 9652 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
8910 DMA_RWCTRL_WRITE_BNDRY_DISAB) { 9653 DMA_RWCTRL_WRITE_BNDRY_16) {
9654 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
8911 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; 9655 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
8912 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 9656 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8913 break; 9657 break;
@@ -8924,6 +9668,28 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
8924 break; 9668 break;
8925 } 9669 }
8926 } 9670 }
9671 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
9672 DMA_RWCTRL_WRITE_BNDRY_16) {
9673 static struct pci_device_id dma_wait_state_chipsets[] = {
9674 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
9675 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
9676 { },
9677 };
9678
9679 /* DMA test passed without adjusting DMA boundary,
9680 * now look for chipsets that are known to expose the
9681 * DMA bug without failing the test.
9682 */
9683 if (pci_dev_present(dma_wait_state_chipsets)) {
9684 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
9685 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
9686 }
9687 else
9688 /* Safe to use the calculated DMA boundary. */
9689 tp->dma_rwctrl = saved_dma_rwctrl;
9690
9691 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9692 }
8927 9693
8928out: 9694out:
8929 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma); 9695 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
@@ -9011,6 +9777,31 @@ static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
9011 return peer; 9777 return peer;
9012} 9778}
9013 9779
9780static void __devinit tg3_init_coal(struct tg3 *tp)
9781{
9782 struct ethtool_coalesce *ec = &tp->coal;
9783
9784 memset(ec, 0, sizeof(*ec));
9785 ec->cmd = ETHTOOL_GCOALESCE;
9786 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
9787 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
9788 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
9789 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
9790 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
9791 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
9792 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
9793 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
9794 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
9795
9796 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
9797 HOSTCC_MODE_CLRTICK_TXBD)) {
9798 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
9799 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
9800 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
9801 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
9802 }
9803}
9804
9014static int __devinit tg3_init_one(struct pci_dev *pdev, 9805static int __devinit tg3_init_one(struct pci_dev *pdev,
9015 const struct pci_device_id *ent) 9806 const struct pci_device_id *ent)
9016{ 9807{
@@ -9232,7 +10023,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
9232 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 10023 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9233 pci_save_state(tp->pdev); 10024 pci_save_state(tp->pdev);
9234 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE); 10025 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
9235 tg3_halt(tp, 1); 10026 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9236 } 10027 }
9237 10028
9238 err = tg3_test_dma(tp); 10029 err = tg3_test_dma(tp);
@@ -9256,6 +10047,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
9256 /* flow control autonegotiation is default behavior */ 10047 /* flow control autonegotiation is default behavior */
9257 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; 10048 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9258 10049
10050 tg3_init_coal(tp);
10051
9259 err = register_netdev(dev); 10052 err = register_netdev(dev);
9260 if (err) { 10053 if (err) {
9261 printk(KERN_ERR PFX "Cannot register net device, " 10054 printk(KERN_ERR PFX "Cannot register net device, "
@@ -9298,6 +10091,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
9298 (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0, 10091 (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
9299 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0, 10092 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
9300 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0); 10093 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
10094 printk(KERN_INFO "%s: dma_rwctrl[%08x]\n",
10095 dev->name, tp->dma_rwctrl);
9301 10096
9302 return 0; 10097 return 0;
9303 10098
@@ -9345,24 +10140,19 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
9345 10140
9346 del_timer_sync(&tp->timer); 10141 del_timer_sync(&tp->timer);
9347 10142
9348 spin_lock_irq(&tp->lock); 10143 tg3_full_lock(tp, 1);
9349 spin_lock(&tp->tx_lock);
9350 tg3_disable_ints(tp); 10144 tg3_disable_ints(tp);
9351 spin_unlock(&tp->tx_lock); 10145 tg3_full_unlock(tp);
9352 spin_unlock_irq(&tp->lock);
9353 10146
9354 netif_device_detach(dev); 10147 netif_device_detach(dev);
9355 10148
9356 spin_lock_irq(&tp->lock); 10149 tg3_full_lock(tp, 0);
9357 spin_lock(&tp->tx_lock); 10150 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9358 tg3_halt(tp, 1); 10151 tg3_full_unlock(tp);
9359 spin_unlock(&tp->tx_lock);
9360 spin_unlock_irq(&tp->lock);
9361 10152
9362 err = tg3_set_power_state(tp, pci_choose_state(pdev, state)); 10153 err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
9363 if (err) { 10154 if (err) {
9364 spin_lock_irq(&tp->lock); 10155 tg3_full_lock(tp, 0);
9365 spin_lock(&tp->tx_lock);
9366 10156
9367 tg3_init_hw(tp); 10157 tg3_init_hw(tp);
9368 10158
@@ -9372,8 +10162,7 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
9372 netif_device_attach(dev); 10162 netif_device_attach(dev);
9373 tg3_netif_start(tp); 10163 tg3_netif_start(tp);
9374 10164
9375 spin_unlock(&tp->tx_lock); 10165 tg3_full_unlock(tp);
9376 spin_unlock_irq(&tp->lock);
9377 } 10166 }
9378 10167
9379 return err; 10168 return err;
@@ -9396,20 +10185,16 @@ static int tg3_resume(struct pci_dev *pdev)
9396 10185
9397 netif_device_attach(dev); 10186 netif_device_attach(dev);
9398 10187
9399 spin_lock_irq(&tp->lock); 10188 tg3_full_lock(tp, 0);
9400 spin_lock(&tp->tx_lock);
9401 10189
9402 tg3_init_hw(tp); 10190 tg3_init_hw(tp);
9403 10191
9404 tp->timer.expires = jiffies + tp->timer_offset; 10192 tp->timer.expires = jiffies + tp->timer_offset;
9405 add_timer(&tp->timer); 10193 add_timer(&tp->timer);
9406 10194
9407 tg3_enable_ints(tp);
9408
9409 tg3_netif_start(tp); 10195 tg3_netif_start(tp);
9410 10196
9411 spin_unlock(&tp->tx_lock); 10197 tg3_full_unlock(tp);
9412 spin_unlock_irq(&tp->lock);
9413 10198
9414 return 0; 10199 return 0;
9415} 10200}