aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/tg3.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/tg3.c')
-rw-r--r--drivers/net/tg3.c1116
1 files changed, 950 insertions, 166 deletions
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 058c70c6f1ac..fc9b5cd957aa 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -61,8 +61,8 @@
61 61
62#define DRV_MODULE_NAME "tg3" 62#define DRV_MODULE_NAME "tg3"
63#define PFX DRV_MODULE_NAME ": " 63#define PFX DRV_MODULE_NAME ": "
64#define DRV_MODULE_VERSION "3.26" 64#define DRV_MODULE_VERSION "3.29"
65#define DRV_MODULE_RELDATE "April 24, 2005" 65#define DRV_MODULE_RELDATE "May 23, 2005"
66 66
67#define TG3_DEF_MAC_MODE 0 67#define TG3_DEF_MAC_MODE 0
68#define TG3_DEF_RX_MODE 0 68#define TG3_DEF_RX_MODE 0
@@ -133,6 +133,8 @@
133/* number of ETHTOOL_GSTATS u64's */ 133/* number of ETHTOOL_GSTATS u64's */
134#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64)) 134#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
135 135
136#define TG3_NUM_TEST 6
137
136static char version[] __devinitdata = 138static char version[] __devinitdata =
137 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 139 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
138 140
@@ -206,6 +208,8 @@ static struct pci_device_id tg3_pci_tbl[] = {
206 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 208 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
207 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752, 209 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
208 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 210 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
211 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
212 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
209 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753, 213 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
210 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 214 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
211 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M, 215 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
@@ -314,6 +318,17 @@ static struct {
314 { "nic_tx_threshold_hit" } 318 { "nic_tx_threshold_hit" }
315}; 319};
316 320
321static struct {
322 const char string[ETH_GSTRING_LEN];
323} ethtool_test_keys[TG3_NUM_TEST] = {
324 { "nvram test (online) " },
325 { "link test (online) " },
326 { "register test (offline)" },
327 { "memory test (offline)" },
328 { "loopback test (offline)" },
329 { "interrupt test (offline)" },
330};
331
317static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val) 332static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
318{ 333{
319 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) { 334 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
@@ -420,7 +435,8 @@ static void tg3_enable_ints(struct tg3 *tp)
420{ 435{
421 tw32(TG3PCI_MISC_HOST_CTRL, 436 tw32(TG3PCI_MISC_HOST_CTRL,
422 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); 437 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
423 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000); 438 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
439 (tp->last_tag << 24));
424 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); 440 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
425 441
426 tg3_cond_int(tp); 442 tg3_cond_int(tp);
@@ -455,10 +471,16 @@ static void tg3_restart_ints(struct tg3 *tp)
455{ 471{
456 tw32(TG3PCI_MISC_HOST_CTRL, 472 tw32(TG3PCI_MISC_HOST_CTRL,
457 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); 473 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
458 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000); 474 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
475 tp->last_tag << 24);
459 mmiowb(); 476 mmiowb();
460 477
461 if (tg3_has_work(tp)) 478 /* When doing tagged status, this work check is unnecessary.
479 * The last_tag we write above tells the chip which piece of
480 * work we've completed.
481 */
482 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
483 tg3_has_work(tp))
462 tw32(HOSTCC_MODE, tp->coalesce_mode | 484 tw32(HOSTCC_MODE, tp->coalesce_mode |
463 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW)); 485 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
464} 486}
@@ -2500,7 +2522,7 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2500 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 2522 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2501 if (netif_carrier_ok(tp->dev)) { 2523 if (netif_carrier_ok(tp->dev)) {
2502 tw32(HOSTCC_STAT_COAL_TICKS, 2524 tw32(HOSTCC_STAT_COAL_TICKS,
2503 DEFAULT_STAT_COAL_TICKS); 2525 tp->coal.stats_block_coalesce_usecs);
2504 } else { 2526 } else {
2505 tw32(HOSTCC_STAT_COAL_TICKS, 0); 2527 tw32(HOSTCC_STAT_COAL_TICKS, 0);
2506 } 2528 }
@@ -2886,7 +2908,6 @@ static int tg3_poll(struct net_device *netdev, int *budget)
2886 * All RX "locking" is done by ensuring outside 2908 * All RX "locking" is done by ensuring outside
2887 * code synchronizes with dev->poll() 2909 * code synchronizes with dev->poll()
2888 */ 2910 */
2889 done = 1;
2890 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) { 2911 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
2891 int orig_budget = *budget; 2912 int orig_budget = *budget;
2892 int work_done; 2913 int work_done;
@@ -2898,12 +2919,14 @@ static int tg3_poll(struct net_device *netdev, int *budget)
2898 2919
2899 *budget -= work_done; 2920 *budget -= work_done;
2900 netdev->quota -= work_done; 2921 netdev->quota -= work_done;
2901
2902 if (work_done >= orig_budget)
2903 done = 0;
2904 } 2922 }
2905 2923
2924 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
2925 tp->last_tag = sblk->status_tag;
2926 rmb();
2927
2906 /* if no more work, tell net stack and NIC we're done */ 2928 /* if no more work, tell net stack and NIC we're done */
2929 done = !tg3_has_work(tp);
2907 if (done) { 2930 if (done) {
2908 spin_lock_irqsave(&tp->lock, flags); 2931 spin_lock_irqsave(&tp->lock, flags);
2909 __netif_rx_complete(netdev); 2932 __netif_rx_complete(netdev);
@@ -2928,22 +2951,21 @@ static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
2928 spin_lock_irqsave(&tp->lock, flags); 2951 spin_lock_irqsave(&tp->lock, flags);
2929 2952
2930 /* 2953 /*
2931 * writing any value to intr-mbox-0 clears PCI INTA# and 2954 * Writing any value to intr-mbox-0 clears PCI INTA# and
2932 * chip-internal interrupt pending events. 2955 * chip-internal interrupt pending events.
2933 * writing non-zero to intr-mbox-0 additional tells the 2956 * Writing non-zero to intr-mbox-0 additional tells the
2934 * NIC to stop sending us irqs, engaging "in-intr-handler" 2957 * NIC to stop sending us irqs, engaging "in-intr-handler"
2935 * event coalescing. 2958 * event coalescing.
2936 */ 2959 */
2937 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 2960 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
2961 tp->last_tag = sblk->status_tag;
2938 sblk->status &= ~SD_STATUS_UPDATED; 2962 sblk->status &= ~SD_STATUS_UPDATED;
2939
2940 if (likely(tg3_has_work(tp))) 2963 if (likely(tg3_has_work(tp)))
2941 netif_rx_schedule(dev); /* schedule NAPI poll */ 2964 netif_rx_schedule(dev); /* schedule NAPI poll */
2942 else { 2965 else {
2943 /* no work, re-enable interrupts 2966 /* No work, re-enable interrupts. */
2944 */
2945 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 2967 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2946 0x00000000); 2968 tp->last_tag << 24);
2947 } 2969 }
2948 2970
2949 spin_unlock_irqrestore(&tp->lock, flags); 2971 spin_unlock_irqrestore(&tp->lock, flags);
@@ -2969,21 +2991,62 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2969 if ((sblk->status & SD_STATUS_UPDATED) || 2991 if ((sblk->status & SD_STATUS_UPDATED) ||
2970 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 2992 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
2971 /* 2993 /*
2972 * writing any value to intr-mbox-0 clears PCI INTA# and 2994 * Writing any value to intr-mbox-0 clears PCI INTA# and
2973 * chip-internal interrupt pending events. 2995 * chip-internal interrupt pending events.
2974 * writing non-zero to intr-mbox-0 additional tells the 2996 * Writing non-zero to intr-mbox-0 additional tells the
2975 * NIC to stop sending us irqs, engaging "in-intr-handler" 2997 * NIC to stop sending us irqs, engaging "in-intr-handler"
2976 * event coalescing. 2998 * event coalescing.
2977 */ 2999 */
2978 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 3000 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2979 0x00000001); 3001 0x00000001);
3002 sblk->status &= ~SD_STATUS_UPDATED;
3003 if (likely(tg3_has_work(tp)))
3004 netif_rx_schedule(dev); /* schedule NAPI poll */
3005 else {
3006 /* No work, shared interrupt perhaps? re-enable
3007 * interrupts, and flush that PCI write
3008 */
3009 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3010 0x00000000);
3011 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
3012 }
3013 } else { /* shared interrupt */
3014 handled = 0;
3015 }
3016
3017 spin_unlock_irqrestore(&tp->lock, flags);
3018
3019 return IRQ_RETVAL(handled);
3020}
3021
3022static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3023{
3024 struct net_device *dev = dev_id;
3025 struct tg3 *tp = netdev_priv(dev);
3026 struct tg3_hw_status *sblk = tp->hw_status;
3027 unsigned long flags;
3028 unsigned int handled = 1;
3029
3030 spin_lock_irqsave(&tp->lock, flags);
3031
3032 /* In INTx mode, it is possible for the interrupt to arrive at
3033 * the CPU before the status block posted prior to the interrupt.
3034 * Reading the PCI State register will confirm whether the
3035 * interrupt is ours and will flush the status block.
3036 */
3037 if ((sblk->status & SD_STATUS_UPDATED) ||
3038 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
2980 /* 3039 /*
2981 * Flush PCI write. This also guarantees that our 3040 * writing any value to intr-mbox-0 clears PCI INTA# and
2982 * status block has been flushed to host memory. 3041 * chip-internal interrupt pending events.
3042 * writing non-zero to intr-mbox-0 additional tells the
3043 * NIC to stop sending us irqs, engaging "in-intr-handler"
3044 * event coalescing.
2983 */ 3045 */
2984 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); 3046 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3047 0x00000001);
3048 tp->last_tag = sblk->status_tag;
2985 sblk->status &= ~SD_STATUS_UPDATED; 3049 sblk->status &= ~SD_STATUS_UPDATED;
2986
2987 if (likely(tg3_has_work(tp))) 3050 if (likely(tg3_has_work(tp)))
2988 netif_rx_schedule(dev); /* schedule NAPI poll */ 3051 netif_rx_schedule(dev); /* schedule NAPI poll */
2989 else { 3052 else {
@@ -2991,7 +3054,7 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2991 * interrupts, and flush that PCI write 3054 * interrupts, and flush that PCI write
2992 */ 3055 */
2993 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 3056 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2994 0x00000000); 3057 tp->last_tag << 24);
2995 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); 3058 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2996 } 3059 }
2997 } else { /* shared interrupt */ 3060 } else { /* shared interrupt */
@@ -3020,7 +3083,7 @@ static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3020} 3083}
3021 3084
3022static int tg3_init_hw(struct tg3 *); 3085static int tg3_init_hw(struct tg3 *);
3023static int tg3_halt(struct tg3 *); 3086static int tg3_halt(struct tg3 *, int, int);
3024 3087
3025#ifdef CONFIG_NET_POLL_CONTROLLER 3088#ifdef CONFIG_NET_POLL_CONTROLLER
3026static void tg3_poll_controller(struct net_device *dev) 3089static void tg3_poll_controller(struct net_device *dev)
@@ -3044,7 +3107,7 @@ static void tg3_reset_task(void *_data)
3044 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER; 3107 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3045 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER; 3108 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3046 3109
3047 tg3_halt(tp); 3110 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3048 tg3_init_hw(tp); 3111 tg3_init_hw(tp);
3049 3112
3050 tg3_netif_start(tp); 3113 tg3_netif_start(tp);
@@ -3390,7 +3453,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3390 spin_lock_irq(&tp->lock); 3453 spin_lock_irq(&tp->lock);
3391 spin_lock(&tp->tx_lock); 3454 spin_lock(&tp->tx_lock);
3392 3455
3393 tg3_halt(tp); 3456 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3394 3457
3395 tg3_set_mtu(dev, tp, new_mtu); 3458 tg3_set_mtu(dev, tp, new_mtu);
3396 3459
@@ -3657,7 +3720,7 @@ err_out:
3657/* To stop a block, clear the enable bit and poll till it 3720/* To stop a block, clear the enable bit and poll till it
3658 * clears. tp->lock is held. 3721 * clears. tp->lock is held.
3659 */ 3722 */
3660static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit) 3723static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
3661{ 3724{
3662 unsigned int i; 3725 unsigned int i;
3663 u32 val; 3726 u32 val;
@@ -3690,7 +3753,7 @@ static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit)
3690 break; 3753 break;
3691 } 3754 }
3692 3755
3693 if (i == MAX_WAIT_CNT) { 3756 if (i == MAX_WAIT_CNT && !silent) {
3694 printk(KERN_ERR PFX "tg3_stop_block timed out, " 3757 printk(KERN_ERR PFX "tg3_stop_block timed out, "
3695 "ofs=%lx enable_bit=%x\n", 3758 "ofs=%lx enable_bit=%x\n",
3696 ofs, enable_bit); 3759 ofs, enable_bit);
@@ -3701,7 +3764,7 @@ static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit)
3701} 3764}
3702 3765
3703/* tp->lock is held. */ 3766/* tp->lock is held. */
3704static int tg3_abort_hw(struct tg3 *tp) 3767static int tg3_abort_hw(struct tg3 *tp, int silent)
3705{ 3768{
3706 int i, err; 3769 int i, err;
3707 3770
@@ -3711,22 +3774,20 @@ static int tg3_abort_hw(struct tg3 *tp)
3711 tw32_f(MAC_RX_MODE, tp->rx_mode); 3774 tw32_f(MAC_RX_MODE, tp->rx_mode);
3712 udelay(10); 3775 udelay(10);
3713 3776
3714 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE); 3777 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
3715 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE); 3778 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
3716 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE); 3779 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
3717 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE); 3780 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
3718 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE); 3781 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
3719 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE); 3782 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
3720 3783
3721 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE); 3784 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
3722 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE); 3785 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
3723 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE); 3786 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
3724 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE); 3787 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
3725 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE); 3788 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
3726 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE); 3789 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
3727 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE); 3790 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
3728 if (err)
3729 goto out;
3730 3791
3731 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE; 3792 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
3732 tw32_f(MAC_MODE, tp->mac_mode); 3793 tw32_f(MAC_MODE, tp->mac_mode);
@@ -3744,27 +3805,24 @@ static int tg3_abort_hw(struct tg3 *tp)
3744 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, " 3805 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
3745 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n", 3806 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
3746 tp->dev->name, tr32(MAC_TX_MODE)); 3807 tp->dev->name, tr32(MAC_TX_MODE));
3747 return -ENODEV; 3808 err |= -ENODEV;
3748 } 3809 }
3749 3810
3750 err = tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE); 3811 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
3751 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE); 3812 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
3752 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE); 3813 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
3753 3814
3754 tw32(FTQ_RESET, 0xffffffff); 3815 tw32(FTQ_RESET, 0xffffffff);
3755 tw32(FTQ_RESET, 0x00000000); 3816 tw32(FTQ_RESET, 0x00000000);
3756 3817
3757 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE); 3818 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
3758 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE); 3819 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
3759 if (err)
3760 goto out;
3761 3820
3762 if (tp->hw_status) 3821 if (tp->hw_status)
3763 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE); 3822 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3764 if (tp->hw_stats) 3823 if (tp->hw_stats)
3765 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); 3824 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3766 3825
3767out:
3768 return err; 3826 return err;
3769} 3827}
3770 3828
@@ -4086,19 +4144,19 @@ static void tg3_stop_fw(struct tg3 *tp)
4086} 4144}
4087 4145
4088/* tp->lock is held. */ 4146/* tp->lock is held. */
4089static int tg3_halt(struct tg3 *tp) 4147static int tg3_halt(struct tg3 *tp, int kind, int silent)
4090{ 4148{
4091 int err; 4149 int err;
4092 4150
4093 tg3_stop_fw(tp); 4151 tg3_stop_fw(tp);
4094 4152
4095 tg3_write_sig_pre_reset(tp, RESET_KIND_SHUTDOWN); 4153 tg3_write_sig_pre_reset(tp, kind);
4096 4154
4097 tg3_abort_hw(tp); 4155 tg3_abort_hw(tp, silent);
4098 err = tg3_chip_reset(tp); 4156 err = tg3_chip_reset(tp);
4099 4157
4100 tg3_write_sig_legacy(tp, RESET_KIND_SHUTDOWN); 4158 tg3_write_sig_legacy(tp, kind);
4101 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN); 4159 tg3_write_sig_post_reset(tp, kind);
4102 4160
4103 if (err) 4161 if (err)
4104 return err; 4162 return err;
@@ -4312,7 +4370,12 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_b
4312 */ 4370 */
4313 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG; 4371 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
4314 4372
4373 /* It is possible that bootcode is still loading at this point.
4374 * Get the nvram lock first before halting the cpu.
4375 */
4376 tg3_nvram_lock(tp);
4315 err = tg3_halt_cpu(tp, cpu_base); 4377 err = tg3_halt_cpu(tp, cpu_base);
4378 tg3_nvram_unlock(tp);
4316 if (err) 4379 if (err)
4317 goto out; 4380 goto out;
4318 4381
@@ -5049,6 +5112,27 @@ static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5049} 5112}
5050 5113
5051static void __tg3_set_rx_mode(struct net_device *); 5114static void __tg3_set_rx_mode(struct net_device *);
5115static void tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5116{
5117 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5118 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5119 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5120 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5121 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5122 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5123 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5124 }
5125 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5126 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5127 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5128 u32 val = ec->stats_block_coalesce_usecs;
5129
5130 if (!netif_carrier_ok(tp->dev))
5131 val = 0;
5132
5133 tw32(HOSTCC_STAT_COAL_TICKS, val);
5134 }
5135}
5052 5136
5053/* tp->lock is held. */ 5137/* tp->lock is held. */
5054static int tg3_reset_hw(struct tg3 *tp) 5138static int tg3_reset_hw(struct tg3 *tp)
@@ -5063,9 +5147,7 @@ static int tg3_reset_hw(struct tg3 *tp)
5063 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT); 5147 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5064 5148
5065 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) { 5149 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5066 err = tg3_abort_hw(tp); 5150 tg3_abort_hw(tp, 1);
5067 if (err)
5068 return err;
5069 } 5151 }
5070 5152
5071 err = tg3_chip_reset(tp); 5153 err = tg3_chip_reset(tp);
@@ -5373,16 +5455,7 @@ static int tg3_reset_hw(struct tg3 *tp)
5373 udelay(10); 5455 udelay(10);
5374 } 5456 }
5375 5457
5376 tw32(HOSTCC_RXCOL_TICKS, 0); 5458 tg3_set_coalesce(tp, &tp->coal);
5377 tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS);
5378 tw32(HOSTCC_RXMAX_FRAMES, 1);
5379 tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES);
5380 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5381 tw32(HOSTCC_RXCOAL_TICK_INT, 0);
5382 tw32(HOSTCC_TXCOAL_TICK_INT, 0);
5383 }
5384 tw32(HOSTCC_RXCOAL_MAXF_INT, 1);
5385 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
5386 5459
5387 /* set status block DMA address */ 5460 /* set status block DMA address */
5388 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 5461 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
@@ -5395,8 +5468,6 @@ static int tg3_reset_hw(struct tg3 *tp)
5395 * the tg3_periodic_fetch_stats call there, and 5468 * the tg3_periodic_fetch_stats call there, and
5396 * tg3_get_stats to see how this works for 5705/5750 chips. 5469 * tg3_get_stats to see how this works for 5705/5750 chips.
5397 */ 5470 */
5398 tw32(HOSTCC_STAT_COAL_TICKS,
5399 DEFAULT_STAT_COAL_TICKS);
5400 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 5471 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5401 ((u64) tp->stats_mapping >> 32)); 5472 ((u64) tp->stats_mapping >> 32));
5402 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, 5473 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
@@ -5452,7 +5523,8 @@ static int tg3_reset_hw(struct tg3 *tp)
5452 udelay(100); 5523 udelay(100);
5453 5524
5454 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0); 5525 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5455 tr32(MAILBOX_INTERRUPT_0); 5526 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
5527 tp->last_tag = 0;
5456 5528
5457 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 5529 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5458 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE); 5530 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
@@ -5730,31 +5802,33 @@ static void tg3_timer(unsigned long __opaque)
5730 spin_lock_irqsave(&tp->lock, flags); 5802 spin_lock_irqsave(&tp->lock, flags);
5731 spin_lock(&tp->tx_lock); 5803 spin_lock(&tp->tx_lock);
5732 5804
5733 /* All of this garbage is because when using non-tagged 5805 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
5734 * IRQ status the mailbox/status_block protocol the chip 5806 /* All of this garbage is because when using non-tagged
5735 * uses with the cpu is race prone. 5807 * IRQ status the mailbox/status_block protocol the chip
5736 */ 5808 * uses with the cpu is race prone.
5737 if (tp->hw_status->status & SD_STATUS_UPDATED) { 5809 */
5738 tw32(GRC_LOCAL_CTRL, 5810 if (tp->hw_status->status & SD_STATUS_UPDATED) {
5739 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); 5811 tw32(GRC_LOCAL_CTRL,
5740 } else { 5812 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
5741 tw32(HOSTCC_MODE, tp->coalesce_mode | 5813 } else {
5742 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW)); 5814 tw32(HOSTCC_MODE, tp->coalesce_mode |
5743 } 5815 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
5816 }
5744 5817
5745 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 5818 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
5746 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER; 5819 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
5747 spin_unlock(&tp->tx_lock); 5820 spin_unlock(&tp->tx_lock);
5748 spin_unlock_irqrestore(&tp->lock, flags); 5821 spin_unlock_irqrestore(&tp->lock, flags);
5749 schedule_work(&tp->reset_task); 5822 schedule_work(&tp->reset_task);
5750 return; 5823 return;
5824 }
5751 } 5825 }
5752 5826
5753 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5754 tg3_periodic_fetch_stats(tp);
5755
5756 /* This part only runs once per second. */ 5827 /* This part only runs once per second. */
5757 if (!--tp->timer_counter) { 5828 if (!--tp->timer_counter) {
5829 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5830 tg3_periodic_fetch_stats(tp);
5831
5758 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) { 5832 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
5759 u32 mac_stat; 5833 u32 mac_stat;
5760 int phy_event; 5834 int phy_event;
@@ -5825,6 +5899,9 @@ static int tg3_test_interrupt(struct tg3 *tp)
5825 int err, i; 5899 int err, i;
5826 u32 int_mbox = 0; 5900 u32 int_mbox = 0;
5827 5901
5902 if (!netif_running(dev))
5903 return -ENODEV;
5904
5828 tg3_disable_ints(tp); 5905 tg3_disable_ints(tp);
5829 5906
5830 free_irq(tp->pdev->irq, dev); 5907 free_irq(tp->pdev->irq, dev);
@@ -5853,9 +5930,13 @@ static int tg3_test_interrupt(struct tg3 *tp)
5853 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) 5930 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
5854 err = request_irq(tp->pdev->irq, tg3_msi, 5931 err = request_irq(tp->pdev->irq, tg3_msi,
5855 SA_SAMPLE_RANDOM, dev->name, dev); 5932 SA_SAMPLE_RANDOM, dev->name, dev);
5856 else 5933 else {
5857 err = request_irq(tp->pdev->irq, tg3_interrupt, 5934 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
5935 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
5936 fn = tg3_interrupt_tagged;
5937 err = request_irq(tp->pdev->irq, fn,
5858 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev); 5938 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
5939 }
5859 5940
5860 if (err) 5941 if (err)
5861 return err; 5942 return err;
@@ -5907,9 +5988,14 @@ static int tg3_test_msi(struct tg3 *tp)
5907 5988
5908 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; 5989 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
5909 5990
5910 err = request_irq(tp->pdev->irq, tg3_interrupt, 5991 {
5911 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev); 5992 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
5993 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
5994 fn = tg3_interrupt_tagged;
5912 5995
5996 err = request_irq(tp->pdev->irq, fn,
5997 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
5998 }
5913 if (err) 5999 if (err)
5914 return err; 6000 return err;
5915 6001
@@ -5919,7 +6005,7 @@ static int tg3_test_msi(struct tg3 *tp)
5919 spin_lock_irq(&tp->lock); 6005 spin_lock_irq(&tp->lock);
5920 spin_lock(&tp->tx_lock); 6006 spin_lock(&tp->tx_lock);
5921 6007
5922 tg3_halt(tp); 6008 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5923 err = tg3_init_hw(tp); 6009 err = tg3_init_hw(tp);
5924 6010
5925 spin_unlock(&tp->tx_lock); 6011 spin_unlock(&tp->tx_lock);
@@ -5955,7 +6041,13 @@ static int tg3_open(struct net_device *dev)
5955 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && 6041 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5956 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) && 6042 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
5957 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) { 6043 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) {
5958 if (pci_enable_msi(tp->pdev) == 0) { 6044 /* All MSI supporting chips should support tagged
6045 * status. Assert that this is the case.
6046 */
6047 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6048 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6049 "Not using MSI.\n", tp->dev->name);
6050 } else if (pci_enable_msi(tp->pdev) == 0) {
5959 u32 msi_mode; 6051 u32 msi_mode;
5960 6052
5961 msi_mode = tr32(MSGINT_MODE); 6053 msi_mode = tr32(MSGINT_MODE);
@@ -5966,9 +6058,14 @@ static int tg3_open(struct net_device *dev)
5966 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) 6058 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
5967 err = request_irq(tp->pdev->irq, tg3_msi, 6059 err = request_irq(tp->pdev->irq, tg3_msi,
5968 SA_SAMPLE_RANDOM, dev->name, dev); 6060 SA_SAMPLE_RANDOM, dev->name, dev);
5969 else 6061 else {
5970 err = request_irq(tp->pdev->irq, tg3_interrupt, 6062 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6063 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6064 fn = tg3_interrupt_tagged;
6065
6066 err = request_irq(tp->pdev->irq, fn,
5971 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev); 6067 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6068 }
5972 6069
5973 if (err) { 6070 if (err) {
5974 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { 6071 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
@@ -5984,12 +6081,19 @@ static int tg3_open(struct net_device *dev)
5984 6081
5985 err = tg3_init_hw(tp); 6082 err = tg3_init_hw(tp);
5986 if (err) { 6083 if (err) {
5987 tg3_halt(tp); 6084 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5988 tg3_free_rings(tp); 6085 tg3_free_rings(tp);
5989 } else { 6086 } else {
5990 tp->timer_offset = HZ / 10; 6087 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
5991 tp->timer_counter = tp->timer_multiplier = 10; 6088 tp->timer_offset = HZ;
5992 tp->asf_counter = tp->asf_multiplier = (10 * 120); 6089 else
6090 tp->timer_offset = HZ / 10;
6091
6092 BUG_ON(tp->timer_offset > HZ);
6093 tp->timer_counter = tp->timer_multiplier =
6094 (HZ / tp->timer_offset);
6095 tp->asf_counter = tp->asf_multiplier =
6096 ((HZ / tp->timer_offset) * 120);
5993 6097
5994 init_timer(&tp->timer); 6098 init_timer(&tp->timer);
5995 tp->timer.expires = jiffies + tp->timer_offset; 6099 tp->timer.expires = jiffies + tp->timer_offset;
@@ -6012,6 +6116,7 @@ static int tg3_open(struct net_device *dev)
6012 6116
6013 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { 6117 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6014 err = tg3_test_msi(tp); 6118 err = tg3_test_msi(tp);
6119
6015 if (err) { 6120 if (err) {
6016 spin_lock_irq(&tp->lock); 6121 spin_lock_irq(&tp->lock);
6017 spin_lock(&tp->tx_lock); 6122 spin_lock(&tp->tx_lock);
@@ -6020,7 +6125,7 @@ static int tg3_open(struct net_device *dev)
6020 pci_disable_msi(tp->pdev); 6125 pci_disable_msi(tp->pdev);
6021 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; 6126 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6022 } 6127 }
6023 tg3_halt(tp); 6128 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6024 tg3_free_rings(tp); 6129 tg3_free_rings(tp);
6025 tg3_free_consistent(tp); 6130 tg3_free_consistent(tp);
6026 6131
@@ -6293,7 +6398,7 @@ static int tg3_close(struct net_device *dev)
6293 6398
6294 tg3_disable_ints(tp); 6399 tg3_disable_ints(tp);
6295 6400
6296 tg3_halt(tp); 6401 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6297 tg3_free_rings(tp); 6402 tg3_free_rings(tp);
6298 tp->tg3_flags &= 6403 tp->tg3_flags &=
6299 ~(TG3_FLAG_INIT_COMPLETE | 6404 ~(TG3_FLAG_INIT_COMPLETE |
@@ -7013,7 +7118,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
7013 tp->tx_pending = ering->tx_pending; 7118 tp->tx_pending = ering->tx_pending;
7014 7119
7015 if (netif_running(dev)) { 7120 if (netif_running(dev)) {
7016 tg3_halt(tp); 7121 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7017 tg3_init_hw(tp); 7122 tg3_init_hw(tp);
7018 tg3_netif_start(tp); 7123 tg3_netif_start(tp);
7019 } 7124 }
@@ -7056,7 +7161,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
7056 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE; 7161 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7057 7162
7058 if (netif_running(dev)) { 7163 if (netif_running(dev)) {
7059 tg3_halt(tp); 7164 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7060 tg3_init_hw(tp); 7165 tg3_init_hw(tp);
7061 tg3_netif_start(tp); 7166 tg3_netif_start(tp);
7062 } 7167 }
@@ -7115,12 +7220,20 @@ static int tg3_get_stats_count (struct net_device *dev)
7115 return TG3_NUM_STATS; 7220 return TG3_NUM_STATS;
7116} 7221}
7117 7222
7223static int tg3_get_test_count (struct net_device *dev)
7224{
7225 return TG3_NUM_TEST;
7226}
7227
7118static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf) 7228static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7119{ 7229{
7120 switch (stringset) { 7230 switch (stringset) {
7121 case ETH_SS_STATS: 7231 case ETH_SS_STATS:
7122 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys)); 7232 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
7123 break; 7233 break;
7234 case ETH_SS_TEST:
7235 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
7236 break;
7124 default: 7237 default:
7125 WARN_ON(1); /* we need a WARN() */ 7238 WARN_ON(1); /* we need a WARN() */
7126 break; 7239 break;
@@ -7134,6 +7247,516 @@ static void tg3_get_ethtool_stats (struct net_device *dev,
7134 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats)); 7247 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
7135} 7248}
7136 7249
7250#define NVRAM_TEST_SIZE 0x100
7251
7252static int tg3_test_nvram(struct tg3 *tp)
7253{
7254 u32 *buf, csum;
7255 int i, j, err = 0;
7256
7257 buf = kmalloc(NVRAM_TEST_SIZE, GFP_KERNEL);
7258 if (buf == NULL)
7259 return -ENOMEM;
7260
7261 for (i = 0, j = 0; i < NVRAM_TEST_SIZE; i += 4, j++) {
7262 u32 val;
7263
7264 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
7265 break;
7266 buf[j] = cpu_to_le32(val);
7267 }
7268 if (i < NVRAM_TEST_SIZE)
7269 goto out;
7270
7271 err = -EIO;
7272 if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC)
7273 goto out;
7274
7275 /* Bootstrap checksum at offset 0x10 */
7276 csum = calc_crc((unsigned char *) buf, 0x10);
7277 if(csum != cpu_to_le32(buf[0x10/4]))
7278 goto out;
7279
7280 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
7281 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
7282 if (csum != cpu_to_le32(buf[0xfc/4]))
7283 goto out;
7284
7285 err = 0;
7286
7287out:
7288 kfree(buf);
7289 return err;
7290}
7291
7292#define TG3_SERDES_TIMEOUT_SEC 2
7293#define TG3_COPPER_TIMEOUT_SEC 6
7294
7295static int tg3_test_link(struct tg3 *tp)
7296{
7297 int i, max;
7298
7299 if (!netif_running(tp->dev))
7300 return -ENODEV;
7301
7302 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7303 max = TG3_SERDES_TIMEOUT_SEC;
7304 else
7305 max = TG3_COPPER_TIMEOUT_SEC;
7306
7307 for (i = 0; i < max; i++) {
7308 if (netif_carrier_ok(tp->dev))
7309 return 0;
7310
7311 if (msleep_interruptible(1000))
7312 break;
7313 }
7314
7315 return -EIO;
7316}
7317
7318/* Only test the commonly used registers */
7319static int tg3_test_registers(struct tg3 *tp)
7320{
7321 int i, is_5705;
7322 u32 offset, read_mask, write_mask, val, save_val, read_val;
7323 static struct {
7324 u16 offset;
7325 u16 flags;
7326#define TG3_FL_5705 0x1
7327#define TG3_FL_NOT_5705 0x2
7328#define TG3_FL_NOT_5788 0x4
7329 u32 read_mask;
7330 u32 write_mask;
7331 } reg_tbl[] = {
7332 /* MAC Control Registers */
7333 { MAC_MODE, TG3_FL_NOT_5705,
7334 0x00000000, 0x00ef6f8c },
7335 { MAC_MODE, TG3_FL_5705,
7336 0x00000000, 0x01ef6b8c },
7337 { MAC_STATUS, TG3_FL_NOT_5705,
7338 0x03800107, 0x00000000 },
7339 { MAC_STATUS, TG3_FL_5705,
7340 0x03800100, 0x00000000 },
7341 { MAC_ADDR_0_HIGH, 0x0000,
7342 0x00000000, 0x0000ffff },
7343 { MAC_ADDR_0_LOW, 0x0000,
7344 0x00000000, 0xffffffff },
7345 { MAC_RX_MTU_SIZE, 0x0000,
7346 0x00000000, 0x0000ffff },
7347 { MAC_TX_MODE, 0x0000,
7348 0x00000000, 0x00000070 },
7349 { MAC_TX_LENGTHS, 0x0000,
7350 0x00000000, 0x00003fff },
7351 { MAC_RX_MODE, TG3_FL_NOT_5705,
7352 0x00000000, 0x000007fc },
7353 { MAC_RX_MODE, TG3_FL_5705,
7354 0x00000000, 0x000007dc },
7355 { MAC_HASH_REG_0, 0x0000,
7356 0x00000000, 0xffffffff },
7357 { MAC_HASH_REG_1, 0x0000,
7358 0x00000000, 0xffffffff },
7359 { MAC_HASH_REG_2, 0x0000,
7360 0x00000000, 0xffffffff },
7361 { MAC_HASH_REG_3, 0x0000,
7362 0x00000000, 0xffffffff },
7363
7364 /* Receive Data and Receive BD Initiator Control Registers. */
7365 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
7366 0x00000000, 0xffffffff },
7367 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
7368 0x00000000, 0xffffffff },
7369 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
7370 0x00000000, 0x00000003 },
7371 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
7372 0x00000000, 0xffffffff },
7373 { RCVDBDI_STD_BD+0, 0x0000,
7374 0x00000000, 0xffffffff },
7375 { RCVDBDI_STD_BD+4, 0x0000,
7376 0x00000000, 0xffffffff },
7377 { RCVDBDI_STD_BD+8, 0x0000,
7378 0x00000000, 0xffff0002 },
7379 { RCVDBDI_STD_BD+0xc, 0x0000,
7380 0x00000000, 0xffffffff },
7381
7382 /* Receive BD Initiator Control Registers. */
7383 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
7384 0x00000000, 0xffffffff },
7385 { RCVBDI_STD_THRESH, TG3_FL_5705,
7386 0x00000000, 0x000003ff },
7387 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
7388 0x00000000, 0xffffffff },
7389
7390 /* Host Coalescing Control Registers. */
7391 { HOSTCC_MODE, TG3_FL_NOT_5705,
7392 0x00000000, 0x00000004 },
7393 { HOSTCC_MODE, TG3_FL_5705,
7394 0x00000000, 0x000000f6 },
7395 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
7396 0x00000000, 0xffffffff },
7397 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
7398 0x00000000, 0x000003ff },
7399 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
7400 0x00000000, 0xffffffff },
7401 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
7402 0x00000000, 0x000003ff },
7403 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
7404 0x00000000, 0xffffffff },
7405 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7406 0x00000000, 0x000000ff },
7407 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
7408 0x00000000, 0xffffffff },
7409 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7410 0x00000000, 0x000000ff },
7411 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
7412 0x00000000, 0xffffffff },
7413 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
7414 0x00000000, 0xffffffff },
7415 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7416 0x00000000, 0xffffffff },
7417 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7418 0x00000000, 0x000000ff },
7419 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7420 0x00000000, 0xffffffff },
7421 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7422 0x00000000, 0x000000ff },
7423 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
7424 0x00000000, 0xffffffff },
7425 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
7426 0x00000000, 0xffffffff },
7427 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
7428 0x00000000, 0xffffffff },
7429 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
7430 0x00000000, 0xffffffff },
7431 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
7432 0x00000000, 0xffffffff },
7433 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
7434 0xffffffff, 0x00000000 },
7435 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
7436 0xffffffff, 0x00000000 },
7437
7438 /* Buffer Manager Control Registers. */
7439 { BUFMGR_MB_POOL_ADDR, 0x0000,
7440 0x00000000, 0x007fff80 },
7441 { BUFMGR_MB_POOL_SIZE, 0x0000,
7442 0x00000000, 0x007fffff },
7443 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
7444 0x00000000, 0x0000003f },
7445 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
7446 0x00000000, 0x000001ff },
7447 { BUFMGR_MB_HIGH_WATER, 0x0000,
7448 0x00000000, 0x000001ff },
7449 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
7450 0xffffffff, 0x00000000 },
7451 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
7452 0xffffffff, 0x00000000 },
7453
7454 /* Mailbox Registers */
7455 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
7456 0x00000000, 0x000001ff },
7457 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
7458 0x00000000, 0x000001ff },
7459 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
7460 0x00000000, 0x000007ff },
7461 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
7462 0x00000000, 0x000001ff },
7463
7464 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
7465 };
7466
7467 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7468 is_5705 = 1;
7469 else
7470 is_5705 = 0;
7471
7472 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
7473 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
7474 continue;
7475
7476 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
7477 continue;
7478
7479 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7480 (reg_tbl[i].flags & TG3_FL_NOT_5788))
7481 continue;
7482
7483 offset = (u32) reg_tbl[i].offset;
7484 read_mask = reg_tbl[i].read_mask;
7485 write_mask = reg_tbl[i].write_mask;
7486
7487 /* Save the original register content */
7488 save_val = tr32(offset);
7489
7490 /* Determine the read-only value. */
7491 read_val = save_val & read_mask;
7492
7493 /* Write zero to the register, then make sure the read-only bits
7494 * are not changed and the read/write bits are all zeros.
7495 */
7496 tw32(offset, 0);
7497
7498 val = tr32(offset);
7499
7500 /* Test the read-only and read/write bits. */
7501 if (((val & read_mask) != read_val) || (val & write_mask))
7502 goto out;
7503
7504 /* Write ones to all the bits defined by RdMask and WrMask, then
7505 * make sure the read-only bits are not changed and the
7506 * read/write bits are all ones.
7507 */
7508 tw32(offset, read_mask | write_mask);
7509
7510 val = tr32(offset);
7511
7512 /* Test the read-only bits. */
7513 if ((val & read_mask) != read_val)
7514 goto out;
7515
7516 /* Test the read/write bits. */
7517 if ((val & write_mask) != write_mask)
7518 goto out;
7519
7520 tw32(offset, save_val);
7521 }
7522
7523 return 0;
7524
7525out:
7526 printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
7527 tw32(offset, save_val);
7528 return -EIO;
7529}
7530
7531static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
7532{
7533 static u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
7534 int i;
7535 u32 j;
7536
7537 for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
7538 for (j = 0; j < len; j += 4) {
7539 u32 val;
7540
7541 tg3_write_mem(tp, offset + j, test_pattern[i]);
7542 tg3_read_mem(tp, offset + j, &val);
7543 if (val != test_pattern[i])
7544 return -EIO;
7545 }
7546 }
7547 return 0;
7548}
7549
7550static int tg3_test_memory(struct tg3 *tp)
7551{
7552 static struct mem_entry {
7553 u32 offset;
7554 u32 len;
7555 } mem_tbl_570x[] = {
7556 { 0x00000000, 0x01000},
7557 { 0x00002000, 0x1c000},
7558 { 0xffffffff, 0x00000}
7559 }, mem_tbl_5705[] = {
7560 { 0x00000100, 0x0000c},
7561 { 0x00000200, 0x00008},
7562 { 0x00000b50, 0x00400},
7563 { 0x00004000, 0x00800},
7564 { 0x00006000, 0x01000},
7565 { 0x00008000, 0x02000},
7566 { 0x00010000, 0x0e000},
7567 { 0xffffffff, 0x00000}
7568 };
7569 struct mem_entry *mem_tbl;
7570 int err = 0;
7571 int i;
7572
7573 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7574 mem_tbl = mem_tbl_5705;
7575 else
7576 mem_tbl = mem_tbl_570x;
7577
7578 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
7579 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
7580 mem_tbl[i].len)) != 0)
7581 break;
7582 }
7583
7584 return err;
7585}
7586
7587static int tg3_test_loopback(struct tg3 *tp)
7588{
7589 u32 mac_mode, send_idx, rx_start_idx, rx_idx, tx_idx, opaque_key;
7590 u32 desc_idx;
7591 struct sk_buff *skb, *rx_skb;
7592 u8 *tx_data;
7593 dma_addr_t map;
7594 int num_pkts, tx_len, rx_len, i, err;
7595 struct tg3_rx_buffer_desc *desc;
7596
7597 if (!netif_running(tp->dev))
7598 return -ENODEV;
7599
7600 err = -EIO;
7601
7602 tg3_abort_hw(tp, 1);
7603
7604 /* Clearing this flag to keep interrupts disabled */
7605 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7606 tg3_reset_hw(tp);
7607
7608 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
7609 MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
7610 MAC_MODE_PORT_MODE_GMII;
7611 tw32(MAC_MODE, mac_mode);
7612
7613 tx_len = 1514;
7614 skb = dev_alloc_skb(tx_len);
7615 tx_data = skb_put(skb, tx_len);
7616 memcpy(tx_data, tp->dev->dev_addr, 6);
7617 memset(tx_data + 6, 0x0, 8);
7618
7619 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
7620
7621 for (i = 14; i < tx_len; i++)
7622 tx_data[i] = (u8) (i & 0xff);
7623
7624 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
7625
7626 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7627 HOSTCC_MODE_NOW);
7628
7629 udelay(10);
7630
7631 rx_start_idx = tp->hw_status->idx[0].rx_producer;
7632
7633 send_idx = 0;
7634 num_pkts = 0;
7635
7636 tg3_set_txd(tp, send_idx, map, tx_len, 0, 1);
7637
7638 send_idx++;
7639 num_pkts++;
7640
7641 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, send_idx);
7642 tr32(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
7643
7644 udelay(10);
7645
7646 for (i = 0; i < 10; i++) {
7647 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7648 HOSTCC_MODE_NOW);
7649
7650 udelay(10);
7651
7652 tx_idx = tp->hw_status->idx[0].tx_consumer;
7653 rx_idx = tp->hw_status->idx[0].rx_producer;
7654 if ((tx_idx == send_idx) &&
7655 (rx_idx == (rx_start_idx + num_pkts)))
7656 break;
7657 }
7658
7659 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
7660 dev_kfree_skb(skb);
7661
7662 if (tx_idx != send_idx)
7663 goto out;
7664
7665 if (rx_idx != rx_start_idx + num_pkts)
7666 goto out;
7667
7668 desc = &tp->rx_rcb[rx_start_idx];
7669 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
7670 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
7671 if (opaque_key != RXD_OPAQUE_RING_STD)
7672 goto out;
7673
7674 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
7675 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
7676 goto out;
7677
7678 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
7679 if (rx_len != tx_len)
7680 goto out;
7681
7682 rx_skb = tp->rx_std_buffers[desc_idx].skb;
7683
7684 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
7685 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
7686
7687 for (i = 14; i < tx_len; i++) {
7688 if (*(rx_skb->data + i) != (u8) (i & 0xff))
7689 goto out;
7690 }
7691 err = 0;
7692
7693 /* tg3_free_rings will unmap and free the rx_skb */
7694out:
7695 return err;
7696}
7697
7698static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
7699 u64 *data)
7700{
7701 struct tg3 *tp = netdev_priv(dev);
7702
7703 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
7704
7705 if (tg3_test_nvram(tp) != 0) {
7706 etest->flags |= ETH_TEST_FL_FAILED;
7707 data[0] = 1;
7708 }
7709 if (tg3_test_link(tp) != 0) {
7710 etest->flags |= ETH_TEST_FL_FAILED;
7711 data[1] = 1;
7712 }
7713 if (etest->flags & ETH_TEST_FL_OFFLINE) {
7714 if (netif_running(dev))
7715 tg3_netif_stop(tp);
7716
7717 spin_lock_irq(&tp->lock);
7718 spin_lock(&tp->tx_lock);
7719
7720 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
7721 tg3_nvram_lock(tp);
7722 tg3_halt_cpu(tp, RX_CPU_BASE);
7723 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7724 tg3_halt_cpu(tp, TX_CPU_BASE);
7725 tg3_nvram_unlock(tp);
7726
7727 if (tg3_test_registers(tp) != 0) {
7728 etest->flags |= ETH_TEST_FL_FAILED;
7729 data[2] = 1;
7730 }
7731 if (tg3_test_memory(tp) != 0) {
7732 etest->flags |= ETH_TEST_FL_FAILED;
7733 data[3] = 1;
7734 }
7735 if (tg3_test_loopback(tp) != 0) {
7736 etest->flags |= ETH_TEST_FL_FAILED;
7737 data[4] = 1;
7738 }
7739
7740 spin_unlock(&tp->tx_lock);
7741 spin_unlock_irq(&tp->lock);
7742 if (tg3_test_interrupt(tp) != 0) {
7743 etest->flags |= ETH_TEST_FL_FAILED;
7744 data[5] = 1;
7745 }
7746 spin_lock_irq(&tp->lock);
7747 spin_lock(&tp->tx_lock);
7748
7749 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7750 if (netif_running(dev)) {
7751 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7752 tg3_init_hw(tp);
7753 tg3_netif_start(tp);
7754 }
7755 spin_unlock(&tp->tx_lock);
7756 spin_unlock_irq(&tp->lock);
7757 }
7758}
7759
7137static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 7760static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7138{ 7761{
7139 struct mii_ioctl_data *data = if_mii(ifr); 7762 struct mii_ioctl_data *data = if_mii(ifr);
@@ -7210,6 +7833,14 @@ static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
7210} 7833}
7211#endif 7834#endif
7212 7835
7836static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
7837{
7838 struct tg3 *tp = netdev_priv(dev);
7839
7840 memcpy(ec, &tp->coal, sizeof(*ec));
7841 return 0;
7842}
7843
7213static struct ethtool_ops tg3_ethtool_ops = { 7844static struct ethtool_ops tg3_ethtool_ops = {
7214 .get_settings = tg3_get_settings, 7845 .get_settings = tg3_get_settings,
7215 .set_settings = tg3_set_settings, 7846 .set_settings = tg3_set_settings,
@@ -7239,9 +7870,12 @@ static struct ethtool_ops tg3_ethtool_ops = {
7239 .get_tso = ethtool_op_get_tso, 7870 .get_tso = ethtool_op_get_tso,
7240 .set_tso = tg3_set_tso, 7871 .set_tso = tg3_set_tso,
7241#endif 7872#endif
7873 .self_test_count = tg3_get_test_count,
7874 .self_test = tg3_self_test,
7242 .get_strings = tg3_get_strings, 7875 .get_strings = tg3_get_strings,
7243 .get_stats_count = tg3_get_stats_count, 7876 .get_stats_count = tg3_get_stats_count,
7244 .get_ethtool_stats = tg3_get_ethtool_stats, 7877 .get_ethtool_stats = tg3_get_ethtool_stats,
7878 .get_coalesce = tg3_get_coalesce,
7245}; 7879};
7246 7880
7247static void __devinit tg3_get_eeprom_size(struct tg3 *tp) 7881static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
@@ -8429,15 +9063,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
8429 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) 9063 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8430 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG; 9064 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
8431 9065
8432 /* Only 5701 and later support tagged irq status mode.
8433 * Also, 5788 chips cannot use tagged irq status.
8434 *
8435 * However, since we are using NAPI avoid tagged irq status
8436 * because the interrupt condition is more difficult to
8437 * fully clear in that mode.
8438 */
8439 tp->coalesce_mode = 0; 9066 tp->coalesce_mode = 0;
8440
8441 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX && 9067 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
8442 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX) 9068 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
8443 tp->coalesce_mode |= HOSTCC_MODE_32BYTE; 9069 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
@@ -8501,6 +9127,18 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
8501 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M)) 9127 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
8502 tp->tg3_flags2 |= TG3_FLG2_IS_5788; 9128 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
8503 9129
9130 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9131 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
9132 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
9133 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
9134 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
9135 HOSTCC_MODE_CLRTICK_TXBD);
9136
9137 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
9138 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9139 tp->misc_host_ctrl);
9140 }
9141
8504 /* these are limited to 10/100 only */ 9142 /* these are limited to 10/100 only */
8505 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 && 9143 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
8506 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) || 9144 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
@@ -8678,6 +9316,146 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
8678 return 0; 9316 return 0;
8679} 9317}
8680 9318
9319#define BOUNDARY_SINGLE_CACHELINE 1
9320#define BOUNDARY_MULTI_CACHELINE 2
9321
9322static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
9323{
9324 int cacheline_size;
9325 u8 byte;
9326 int goal;
9327
9328 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
9329 if (byte == 0)
9330 cacheline_size = 1024;
9331 else
9332 cacheline_size = (int) byte * 4;
9333
9334 /* On 5703 and later chips, the boundary bits have no
9335 * effect.
9336 */
9337 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9338 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
9339 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
9340 goto out;
9341
9342#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
9343 goal = BOUNDARY_MULTI_CACHELINE;
9344#else
9345#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
9346 goal = BOUNDARY_SINGLE_CACHELINE;
9347#else
9348 goal = 0;
9349#endif
9350#endif
9351
9352 if (!goal)
9353 goto out;
9354
9355 /* PCI controllers on most RISC systems tend to disconnect
9356 * when a device tries to burst across a cache-line boundary.
9357 * Therefore, letting tg3 do so just wastes PCI bandwidth.
9358 *
9359 * Unfortunately, for PCI-E there are only limited
9360 * write-side controls for this, and thus for reads
9361 * we will still get the disconnects. We'll also waste
9362 * these PCI cycles for both read and write for chips
9363 * other than 5700 and 5701 which do not implement the
9364 * boundary bits.
9365 */
9366 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
9367 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
9368 switch (cacheline_size) {
9369 case 16:
9370 case 32:
9371 case 64:
9372 case 128:
9373 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9374 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
9375 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
9376 } else {
9377 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
9378 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
9379 }
9380 break;
9381
9382 case 256:
9383 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
9384 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
9385 break;
9386
9387 default:
9388 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
9389 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
9390 break;
9391 };
9392 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
9393 switch (cacheline_size) {
9394 case 16:
9395 case 32:
9396 case 64:
9397 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9398 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
9399 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
9400 break;
9401 }
9402 /* fallthrough */
9403 case 128:
9404 default:
9405 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
9406 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
9407 break;
9408 };
9409 } else {
9410 switch (cacheline_size) {
9411 case 16:
9412 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9413 val |= (DMA_RWCTRL_READ_BNDRY_16 |
9414 DMA_RWCTRL_WRITE_BNDRY_16);
9415 break;
9416 }
9417 /* fallthrough */
9418 case 32:
9419 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9420 val |= (DMA_RWCTRL_READ_BNDRY_32 |
9421 DMA_RWCTRL_WRITE_BNDRY_32);
9422 break;
9423 }
9424 /* fallthrough */
9425 case 64:
9426 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9427 val |= (DMA_RWCTRL_READ_BNDRY_64 |
9428 DMA_RWCTRL_WRITE_BNDRY_64);
9429 break;
9430 }
9431 /* fallthrough */
9432 case 128:
9433 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9434 val |= (DMA_RWCTRL_READ_BNDRY_128 |
9435 DMA_RWCTRL_WRITE_BNDRY_128);
9436 break;
9437 }
9438 /* fallthrough */
9439 case 256:
9440 val |= (DMA_RWCTRL_READ_BNDRY_256 |
9441 DMA_RWCTRL_WRITE_BNDRY_256);
9442 break;
9443 case 512:
9444 val |= (DMA_RWCTRL_READ_BNDRY_512 |
9445 DMA_RWCTRL_WRITE_BNDRY_512);
9446 break;
9447 case 1024:
9448 default:
9449 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
9450 DMA_RWCTRL_WRITE_BNDRY_1024);
9451 break;
9452 };
9453 }
9454
9455out:
9456 return val;
9457}
9458
8681static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device) 9459static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
8682{ 9460{
8683 struct tg3_internal_buffer_desc test_desc; 9461 struct tg3_internal_buffer_desc test_desc;
@@ -8759,12 +9537,12 @@ static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dm
8759 return ret; 9537 return ret;
8760} 9538}
8761 9539
8762#define TEST_BUFFER_SIZE 0x400 9540#define TEST_BUFFER_SIZE 0x2000
8763 9541
8764static int __devinit tg3_test_dma(struct tg3 *tp) 9542static int __devinit tg3_test_dma(struct tg3 *tp)
8765{ 9543{
8766 dma_addr_t buf_dma; 9544 dma_addr_t buf_dma;
8767 u32 *buf; 9545 u32 *buf, saved_dma_rwctrl;
8768 int ret; 9546 int ret;
8769 9547
8770 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma); 9548 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
@@ -8776,46 +9554,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
8776 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) | 9554 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
8777 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT)); 9555 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
8778 9556
8779#ifndef CONFIG_X86 9557 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
8780 {
8781 u8 byte;
8782 int cacheline_size;
8783 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
8784
8785 if (byte == 0)
8786 cacheline_size = 1024;
8787 else
8788 cacheline_size = (int) byte * 4;
8789
8790 switch (cacheline_size) {
8791 case 16:
8792 case 32:
8793 case 64:
8794 case 128:
8795 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
8796 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
8797 tp->dma_rwctrl |=
8798 DMA_RWCTRL_WRITE_BNDRY_384_PCIX;
8799 break;
8800 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
8801 tp->dma_rwctrl &=
8802 ~(DMA_RWCTRL_PCI_WRITE_CMD);
8803 tp->dma_rwctrl |=
8804 DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
8805 break;
8806 }
8807 /* fallthrough */
8808 case 256:
8809 if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
8810 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8811 tp->dma_rwctrl |=
8812 DMA_RWCTRL_WRITE_BNDRY_256;
8813 else if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8814 tp->dma_rwctrl |=
8815 DMA_RWCTRL_WRITE_BNDRY_256_PCIX;
8816 };
8817 }
8818#endif
8819 9558
8820 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { 9559 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
8821 /* DMA read watermark not used on PCIE */ 9560 /* DMA read watermark not used on PCIE */
@@ -8834,7 +9573,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
8834 if (ccval == 0x6 || ccval == 0x7) 9573 if (ccval == 0x6 || ccval == 0x7)
8835 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; 9574 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
8836 9575
8837 /* Set bit 23 to renable PCIX hw bug fix */ 9576 /* Set bit 23 to enable PCIX hw bug fix */
8838 tp->dma_rwctrl |= 0x009f0000; 9577 tp->dma_rwctrl |= 0x009f0000;
8839 } else { 9578 } else {
8840 tp->dma_rwctrl |= 0x001b000f; 9579 tp->dma_rwctrl |= 0x001b000f;
@@ -8875,6 +9614,13 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
8875 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) 9614 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
8876 goto out; 9615 goto out;
8877 9616
9617 /* It is best to perform DMA test with maximum write burst size
9618 * to expose the 5700/5701 write DMA bug.
9619 */
9620 saved_dma_rwctrl = tp->dma_rwctrl;
9621 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
9622 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9623
8878 while (1) { 9624 while (1) {
8879 u32 *p = buf, i; 9625 u32 *p = buf, i;
8880 9626
@@ -8913,8 +9659,9 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
8913 if (p[i] == i) 9659 if (p[i] == i)
8914 continue; 9660 continue;
8915 9661
8916 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) == 9662 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
8917 DMA_RWCTRL_WRITE_BNDRY_DISAB) { 9663 DMA_RWCTRL_WRITE_BNDRY_16) {
9664 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
8918 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; 9665 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
8919 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 9666 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8920 break; 9667 break;
@@ -8931,6 +9678,14 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
8931 break; 9678 break;
8932 } 9679 }
8933 } 9680 }
9681 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
9682 DMA_RWCTRL_WRITE_BNDRY_16) {
9683 /* DMA test passed without adjusting DMA boundary,
9684 * just restore the calculated DMA boundary
9685 */
9686 tp->dma_rwctrl = saved_dma_rwctrl;
9687 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9688 }
8934 9689
8935out: 9690out:
8936 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma); 9691 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
@@ -9018,6 +9773,31 @@ static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
9018 return peer; 9773 return peer;
9019} 9774}
9020 9775
9776static void __devinit tg3_init_coal(struct tg3 *tp)
9777{
9778 struct ethtool_coalesce *ec = &tp->coal;
9779
9780 memset(ec, 0, sizeof(*ec));
9781 ec->cmd = ETHTOOL_GCOALESCE;
9782 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
9783 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
9784 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
9785 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
9786 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
9787 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
9788 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
9789 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
9790 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
9791
9792 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
9793 HOSTCC_MODE_CLRTICK_TXBD)) {
9794 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
9795 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
9796 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
9797 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
9798 }
9799}
9800
9021static int __devinit tg3_init_one(struct pci_dev *pdev, 9801static int __devinit tg3_init_one(struct pci_dev *pdev,
9022 const struct pci_device_id *ent) 9802 const struct pci_device_id *ent)
9023{ 9803{
@@ -9239,7 +10019,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
9239 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 10019 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9240 pci_save_state(tp->pdev); 10020 pci_save_state(tp->pdev);
9241 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE); 10021 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
9242 tg3_halt(tp); 10022 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9243 } 10023 }
9244 10024
9245 err = tg3_test_dma(tp); 10025 err = tg3_test_dma(tp);
@@ -9263,6 +10043,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
9263 /* flow control autonegotiation is default behavior */ 10043 /* flow control autonegotiation is default behavior */
9264 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; 10044 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9265 10045
10046 tg3_init_coal(tp);
10047
9266 err = register_netdev(dev); 10048 err = register_netdev(dev);
9267 if (err) { 10049 if (err) {
9268 printk(KERN_ERR PFX "Cannot register net device, " 10050 printk(KERN_ERR PFX "Cannot register net device, "
@@ -9305,6 +10087,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
9305 (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0, 10087 (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
9306 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0, 10088 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
9307 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0); 10089 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
10090 printk(KERN_INFO "%s: dma_rwctrl[%08x]\n",
10091 dev->name, tp->dma_rwctrl);
9308 10092
9309 return 0; 10093 return 0;
9310 10094
@@ -9362,7 +10146,7 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
9362 10146
9363 spin_lock_irq(&tp->lock); 10147 spin_lock_irq(&tp->lock);
9364 spin_lock(&tp->tx_lock); 10148 spin_lock(&tp->tx_lock);
9365 tg3_halt(tp); 10149 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9366 spin_unlock(&tp->tx_lock); 10150 spin_unlock(&tp->tx_lock);
9367 spin_unlock_irq(&tp->lock); 10151 spin_unlock_irq(&tp->lock);
9368 10152