aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/tg3.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/tg3.c')
-rw-r--r--drivers/net/tg3.c1090
1 files changed, 955 insertions, 135 deletions
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index f79b02e80e75..a0b8848049c9 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -7,7 +7,12 @@
7 * Copyright (C) 2005 Broadcom Corporation. 7 * Copyright (C) 2005 Broadcom Corporation.
8 * 8 *
9 * Firmware is: 9 * Firmware is:
10 * Copyright (C) 2000-2003 Broadcom Corporation. 10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
11 */ 16 */
12 17
13#include <linux/config.h> 18#include <linux/config.h>
@@ -61,8 +66,8 @@
61 66
62#define DRV_MODULE_NAME "tg3" 67#define DRV_MODULE_NAME "tg3"
63#define PFX DRV_MODULE_NAME ": " 68#define PFX DRV_MODULE_NAME ": "
64#define DRV_MODULE_VERSION "3.27" 69#define DRV_MODULE_VERSION "3.31"
65#define DRV_MODULE_RELDATE "May 5, 2005" 70#define DRV_MODULE_RELDATE "June 8, 2005"
66 71
67#define TG3_DEF_MAC_MODE 0 72#define TG3_DEF_MAC_MODE 0
68#define TG3_DEF_RX_MODE 0 73#define TG3_DEF_RX_MODE 0
@@ -133,6 +138,8 @@
133/* number of ETHTOOL_GSTATS u64's */ 138/* number of ETHTOOL_GSTATS u64's */
134#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64)) 139#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
135 140
141#define TG3_NUM_TEST 6
142
136static char version[] __devinitdata = 143static char version[] __devinitdata =
137 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 144 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
138 145
@@ -206,6 +213,8 @@ static struct pci_device_id tg3_pci_tbl[] = {
206 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 213 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
207 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752, 214 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
208 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 215 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
216 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
217 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
209 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753, 218 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
210 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 219 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
211 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M, 220 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
@@ -314,6 +323,17 @@ static struct {
314 { "nic_tx_threshold_hit" } 323 { "nic_tx_threshold_hit" }
315}; 324};
316 325
326static struct {
327 const char string[ETH_GSTRING_LEN];
328} ethtool_test_keys[TG3_NUM_TEST] = {
329 { "nvram test (online) " },
330 { "link test (online) " },
331 { "register test (offline)" },
332 { "memory test (offline)" },
333 { "loopback test (offline)" },
334 { "interrupt test (offline)" },
335};
336
317static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val) 337static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
318{ 338{
319 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) { 339 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
@@ -420,7 +440,8 @@ static void tg3_enable_ints(struct tg3 *tp)
420{ 440{
421 tw32(TG3PCI_MISC_HOST_CTRL, 441 tw32(TG3PCI_MISC_HOST_CTRL,
422 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); 442 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
423 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000); 443 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
444 (tp->last_tag << 24));
424 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); 445 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
425 446
426 tg3_cond_int(tp); 447 tg3_cond_int(tp);
@@ -455,10 +476,16 @@ static void tg3_restart_ints(struct tg3 *tp)
455{ 476{
456 tw32(TG3PCI_MISC_HOST_CTRL, 477 tw32(TG3PCI_MISC_HOST_CTRL,
457 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); 478 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
458 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000); 479 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
480 tp->last_tag << 24);
459 mmiowb(); 481 mmiowb();
460 482
461 if (tg3_has_work(tp)) 483 /* When doing tagged status, this work check is unnecessary.
484 * The last_tag we write above tells the chip which piece of
485 * work we've completed.
486 */
487 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
488 tg3_has_work(tp))
462 tw32(HOSTCC_MODE, tp->coalesce_mode | 489 tw32(HOSTCC_MODE, tp->coalesce_mode |
463 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW)); 490 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
464} 491}
@@ -2500,7 +2527,7 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2500 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 2527 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2501 if (netif_carrier_ok(tp->dev)) { 2528 if (netif_carrier_ok(tp->dev)) {
2502 tw32(HOSTCC_STAT_COAL_TICKS, 2529 tw32(HOSTCC_STAT_COAL_TICKS,
2503 DEFAULT_STAT_COAL_TICKS); 2530 tp->coal.stats_block_coalesce_usecs);
2504 } else { 2531 } else {
2505 tw32(HOSTCC_STAT_COAL_TICKS, 0); 2532 tw32(HOSTCC_STAT_COAL_TICKS, 0);
2506 } 2533 }
@@ -2886,7 +2913,6 @@ static int tg3_poll(struct net_device *netdev, int *budget)
2886 * All RX "locking" is done by ensuring outside 2913 * All RX "locking" is done by ensuring outside
2887 * code synchronizes with dev->poll() 2914 * code synchronizes with dev->poll()
2888 */ 2915 */
2889 done = 1;
2890 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) { 2916 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
2891 int orig_budget = *budget; 2917 int orig_budget = *budget;
2892 int work_done; 2918 int work_done;
@@ -2898,12 +2924,14 @@ static int tg3_poll(struct net_device *netdev, int *budget)
2898 2924
2899 *budget -= work_done; 2925 *budget -= work_done;
2900 netdev->quota -= work_done; 2926 netdev->quota -= work_done;
2901
2902 if (work_done >= orig_budget)
2903 done = 0;
2904 } 2927 }
2905 2928
2929 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
2930 tp->last_tag = sblk->status_tag;
2931 rmb();
2932
2906 /* if no more work, tell net stack and NIC we're done */ 2933 /* if no more work, tell net stack and NIC we're done */
2934 done = !tg3_has_work(tp);
2907 if (done) { 2935 if (done) {
2908 spin_lock_irqsave(&tp->lock, flags); 2936 spin_lock_irqsave(&tp->lock, flags);
2909 __netif_rx_complete(netdev); 2937 __netif_rx_complete(netdev);
@@ -2928,22 +2956,21 @@ static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
2928 spin_lock_irqsave(&tp->lock, flags); 2956 spin_lock_irqsave(&tp->lock, flags);
2929 2957
2930 /* 2958 /*
2931 * writing any value to intr-mbox-0 clears PCI INTA# and 2959 * Writing any value to intr-mbox-0 clears PCI INTA# and
2932 * chip-internal interrupt pending events. 2960 * chip-internal interrupt pending events.
2933 * writing non-zero to intr-mbox-0 additional tells the 2961 * Writing non-zero to intr-mbox-0 additional tells the
2934 * NIC to stop sending us irqs, engaging "in-intr-handler" 2962 * NIC to stop sending us irqs, engaging "in-intr-handler"
2935 * event coalescing. 2963 * event coalescing.
2936 */ 2964 */
2937 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 2965 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
2966 tp->last_tag = sblk->status_tag;
2938 sblk->status &= ~SD_STATUS_UPDATED; 2967 sblk->status &= ~SD_STATUS_UPDATED;
2939
2940 if (likely(tg3_has_work(tp))) 2968 if (likely(tg3_has_work(tp)))
2941 netif_rx_schedule(dev); /* schedule NAPI poll */ 2969 netif_rx_schedule(dev); /* schedule NAPI poll */
2942 else { 2970 else {
2943 /* no work, re-enable interrupts 2971 /* No work, re-enable interrupts. */
2944 */
2945 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 2972 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2946 0x00000000); 2973 tp->last_tag << 24);
2947 } 2974 }
2948 2975
2949 spin_unlock_irqrestore(&tp->lock, flags); 2976 spin_unlock_irqrestore(&tp->lock, flags);
@@ -2969,21 +2996,62 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2969 if ((sblk->status & SD_STATUS_UPDATED) || 2996 if ((sblk->status & SD_STATUS_UPDATED) ||
2970 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 2997 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
2971 /* 2998 /*
2972 * writing any value to intr-mbox-0 clears PCI INTA# and 2999 * Writing any value to intr-mbox-0 clears PCI INTA# and
2973 * chip-internal interrupt pending events. 3000 * chip-internal interrupt pending events.
2974 * writing non-zero to intr-mbox-0 additional tells the 3001 * Writing non-zero to intr-mbox-0 additional tells the
2975 * NIC to stop sending us irqs, engaging "in-intr-handler" 3002 * NIC to stop sending us irqs, engaging "in-intr-handler"
2976 * event coalescing. 3003 * event coalescing.
2977 */ 3004 */
2978 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 3005 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2979 0x00000001); 3006 0x00000001);
3007 sblk->status &= ~SD_STATUS_UPDATED;
3008 if (likely(tg3_has_work(tp)))
3009 netif_rx_schedule(dev); /* schedule NAPI poll */
3010 else {
3011 /* No work, shared interrupt perhaps? re-enable
3012 * interrupts, and flush that PCI write
3013 */
3014 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3015 0x00000000);
3016 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
3017 }
3018 } else { /* shared interrupt */
3019 handled = 0;
3020 }
3021
3022 spin_unlock_irqrestore(&tp->lock, flags);
3023
3024 return IRQ_RETVAL(handled);
3025}
3026
3027static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3028{
3029 struct net_device *dev = dev_id;
3030 struct tg3 *tp = netdev_priv(dev);
3031 struct tg3_hw_status *sblk = tp->hw_status;
3032 unsigned long flags;
3033 unsigned int handled = 1;
3034
3035 spin_lock_irqsave(&tp->lock, flags);
3036
3037 /* In INTx mode, it is possible for the interrupt to arrive at
3038 * the CPU before the status block posted prior to the interrupt.
3039 * Reading the PCI State register will confirm whether the
3040 * interrupt is ours and will flush the status block.
3041 */
3042 if ((sblk->status & SD_STATUS_UPDATED) ||
3043 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
2980 /* 3044 /*
2981 * Flush PCI write. This also guarantees that our 3045 * writing any value to intr-mbox-0 clears PCI INTA# and
2982 * status block has been flushed to host memory. 3046 * chip-internal interrupt pending events.
3047 * writing non-zero to intr-mbox-0 additional tells the
3048 * NIC to stop sending us irqs, engaging "in-intr-handler"
3049 * event coalescing.
2983 */ 3050 */
2984 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); 3051 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3052 0x00000001);
3053 tp->last_tag = sblk->status_tag;
2985 sblk->status &= ~SD_STATUS_UPDATED; 3054 sblk->status &= ~SD_STATUS_UPDATED;
2986
2987 if (likely(tg3_has_work(tp))) 3055 if (likely(tg3_has_work(tp)))
2988 netif_rx_schedule(dev); /* schedule NAPI poll */ 3056 netif_rx_schedule(dev); /* schedule NAPI poll */
2989 else { 3057 else {
@@ -2991,7 +3059,7 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2991 * interrupts, and flush that PCI write 3059 * interrupts, and flush that PCI write
2992 */ 3060 */
2993 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 3061 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2994 0x00000000); 3062 tp->last_tag << 24);
2995 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); 3063 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2996 } 3064 }
2997 } else { /* shared interrupt */ 3065 } else { /* shared interrupt */
@@ -3020,7 +3088,7 @@ static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3020} 3088}
3021 3089
3022static int tg3_init_hw(struct tg3 *); 3090static int tg3_init_hw(struct tg3 *);
3023static int tg3_halt(struct tg3 *, int); 3091static int tg3_halt(struct tg3 *, int, int);
3024 3092
3025#ifdef CONFIG_NET_POLL_CONTROLLER 3093#ifdef CONFIG_NET_POLL_CONTROLLER
3026static void tg3_poll_controller(struct net_device *dev) 3094static void tg3_poll_controller(struct net_device *dev)
@@ -3044,7 +3112,7 @@ static void tg3_reset_task(void *_data)
3044 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER; 3112 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3045 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER; 3113 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3046 3114
3047 tg3_halt(tp, 0); 3115 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3048 tg3_init_hw(tp); 3116 tg3_init_hw(tp);
3049 3117
3050 tg3_netif_start(tp); 3118 tg3_netif_start(tp);
@@ -3390,7 +3458,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3390 spin_lock_irq(&tp->lock); 3458 spin_lock_irq(&tp->lock);
3391 spin_lock(&tp->tx_lock); 3459 spin_lock(&tp->tx_lock);
3392 3460
3393 tg3_halt(tp, 1); 3461 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3394 3462
3395 tg3_set_mtu(dev, tp, new_mtu); 3463 tg3_set_mtu(dev, tp, new_mtu);
3396 3464
@@ -4081,19 +4149,19 @@ static void tg3_stop_fw(struct tg3 *tp)
4081} 4149}
4082 4150
4083/* tp->lock is held. */ 4151/* tp->lock is held. */
4084static int tg3_halt(struct tg3 *tp, int silent) 4152static int tg3_halt(struct tg3 *tp, int kind, int silent)
4085{ 4153{
4086 int err; 4154 int err;
4087 4155
4088 tg3_stop_fw(tp); 4156 tg3_stop_fw(tp);
4089 4157
4090 tg3_write_sig_pre_reset(tp, RESET_KIND_SHUTDOWN); 4158 tg3_write_sig_pre_reset(tp, kind);
4091 4159
4092 tg3_abort_hw(tp, silent); 4160 tg3_abort_hw(tp, silent);
4093 err = tg3_chip_reset(tp); 4161 err = tg3_chip_reset(tp);
4094 4162
4095 tg3_write_sig_legacy(tp, RESET_KIND_SHUTDOWN); 4163 tg3_write_sig_legacy(tp, kind);
4096 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN); 4164 tg3_write_sig_post_reset(tp, kind);
4097 4165
4098 if (err) 4166 if (err)
4099 return err; 4167 return err;
@@ -4307,7 +4375,12 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_b
4307 */ 4375 */
4308 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG; 4376 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
4309 4377
4378 /* It is possible that bootcode is still loading at this point.
4379 * Get the nvram lock first before halting the cpu.
4380 */
4381 tg3_nvram_lock(tp);
4310 err = tg3_halt_cpu(tp, cpu_base); 4382 err = tg3_halt_cpu(tp, cpu_base);
4383 tg3_nvram_unlock(tp);
4311 if (err) 4384 if (err)
4312 goto out; 4385 goto out;
4313 4386
@@ -5044,6 +5117,27 @@ static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5044} 5117}
5045 5118
5046static void __tg3_set_rx_mode(struct net_device *); 5119static void __tg3_set_rx_mode(struct net_device *);
5120static void tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5121{
5122 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5123 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5124 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5125 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5126 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5127 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5128 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5129 }
5130 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5131 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5132 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5133 u32 val = ec->stats_block_coalesce_usecs;
5134
5135 if (!netif_carrier_ok(tp->dev))
5136 val = 0;
5137
5138 tw32(HOSTCC_STAT_COAL_TICKS, val);
5139 }
5140}
5047 5141
5048/* tp->lock is held. */ 5142/* tp->lock is held. */
5049static int tg3_reset_hw(struct tg3 *tp) 5143static int tg3_reset_hw(struct tg3 *tp)
@@ -5366,16 +5460,7 @@ static int tg3_reset_hw(struct tg3 *tp)
5366 udelay(10); 5460 udelay(10);
5367 } 5461 }
5368 5462
5369 tw32(HOSTCC_RXCOL_TICKS, 0); 5463 tg3_set_coalesce(tp, &tp->coal);
5370 tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS);
5371 tw32(HOSTCC_RXMAX_FRAMES, 1);
5372 tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES);
5373 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5374 tw32(HOSTCC_RXCOAL_TICK_INT, 0);
5375 tw32(HOSTCC_TXCOAL_TICK_INT, 0);
5376 }
5377 tw32(HOSTCC_RXCOAL_MAXF_INT, 1);
5378 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
5379 5464
5380 /* set status block DMA address */ 5465 /* set status block DMA address */
5381 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 5466 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
@@ -5388,8 +5473,6 @@ static int tg3_reset_hw(struct tg3 *tp)
5388 * the tg3_periodic_fetch_stats call there, and 5473 * the tg3_periodic_fetch_stats call there, and
5389 * tg3_get_stats to see how this works for 5705/5750 chips. 5474 * tg3_get_stats to see how this works for 5705/5750 chips.
5390 */ 5475 */
5391 tw32(HOSTCC_STAT_COAL_TICKS,
5392 DEFAULT_STAT_COAL_TICKS);
5393 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 5476 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5394 ((u64) tp->stats_mapping >> 32)); 5477 ((u64) tp->stats_mapping >> 32));
5395 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, 5478 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
@@ -5445,7 +5528,8 @@ static int tg3_reset_hw(struct tg3 *tp)
5445 udelay(100); 5528 udelay(100);
5446 5529
5447 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0); 5530 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5448 tr32(MAILBOX_INTERRUPT_0); 5531 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
5532 tp->last_tag = 0;
5449 5533
5450 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 5534 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5451 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE); 5535 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
@@ -5723,31 +5807,33 @@ static void tg3_timer(unsigned long __opaque)
5723 spin_lock_irqsave(&tp->lock, flags); 5807 spin_lock_irqsave(&tp->lock, flags);
5724 spin_lock(&tp->tx_lock); 5808 spin_lock(&tp->tx_lock);
5725 5809
5726 /* All of this garbage is because when using non-tagged 5810 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
5727 * IRQ status the mailbox/status_block protocol the chip 5811 /* All of this garbage is because when using non-tagged
5728 * uses with the cpu is race prone. 5812 * IRQ status the mailbox/status_block protocol the chip
5729 */ 5813 * uses with the cpu is race prone.
5730 if (tp->hw_status->status & SD_STATUS_UPDATED) { 5814 */
5731 tw32(GRC_LOCAL_CTRL, 5815 if (tp->hw_status->status & SD_STATUS_UPDATED) {
5732 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); 5816 tw32(GRC_LOCAL_CTRL,
5733 } else { 5817 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
5734 tw32(HOSTCC_MODE, tp->coalesce_mode | 5818 } else {
5735 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW)); 5819 tw32(HOSTCC_MODE, tp->coalesce_mode |
5736 } 5820 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
5821 }
5737 5822
5738 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 5823 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
5739 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER; 5824 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
5740 spin_unlock(&tp->tx_lock); 5825 spin_unlock(&tp->tx_lock);
5741 spin_unlock_irqrestore(&tp->lock, flags); 5826 spin_unlock_irqrestore(&tp->lock, flags);
5742 schedule_work(&tp->reset_task); 5827 schedule_work(&tp->reset_task);
5743 return; 5828 return;
5829 }
5744 } 5830 }
5745 5831
5746 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5747 tg3_periodic_fetch_stats(tp);
5748
5749 /* This part only runs once per second. */ 5832 /* This part only runs once per second. */
5750 if (!--tp->timer_counter) { 5833 if (!--tp->timer_counter) {
5834 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5835 tg3_periodic_fetch_stats(tp);
5836
5751 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) { 5837 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
5752 u32 mac_stat; 5838 u32 mac_stat;
5753 int phy_event; 5839 int phy_event;
@@ -5818,6 +5904,9 @@ static int tg3_test_interrupt(struct tg3 *tp)
5818 int err, i; 5904 int err, i;
5819 u32 int_mbox = 0; 5905 u32 int_mbox = 0;
5820 5906
5907 if (!netif_running(dev))
5908 return -ENODEV;
5909
5821 tg3_disable_ints(tp); 5910 tg3_disable_ints(tp);
5822 5911
5823 free_irq(tp->pdev->irq, dev); 5912 free_irq(tp->pdev->irq, dev);
@@ -5846,9 +5935,13 @@ static int tg3_test_interrupt(struct tg3 *tp)
5846 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) 5935 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
5847 err = request_irq(tp->pdev->irq, tg3_msi, 5936 err = request_irq(tp->pdev->irq, tg3_msi,
5848 SA_SAMPLE_RANDOM, dev->name, dev); 5937 SA_SAMPLE_RANDOM, dev->name, dev);
5849 else 5938 else {
5850 err = request_irq(tp->pdev->irq, tg3_interrupt, 5939 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
5940 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
5941 fn = tg3_interrupt_tagged;
5942 err = request_irq(tp->pdev->irq, fn,
5851 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev); 5943 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
5944 }
5852 5945
5853 if (err) 5946 if (err)
5854 return err; 5947 return err;
@@ -5900,9 +5993,14 @@ static int tg3_test_msi(struct tg3 *tp)
5900 5993
5901 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; 5994 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
5902 5995
5903 err = request_irq(tp->pdev->irq, tg3_interrupt, 5996 {
5904 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev); 5997 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
5998 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
5999 fn = tg3_interrupt_tagged;
5905 6000
6001 err = request_irq(tp->pdev->irq, fn,
6002 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6003 }
5906 if (err) 6004 if (err)
5907 return err; 6005 return err;
5908 6006
@@ -5912,7 +6010,7 @@ static int tg3_test_msi(struct tg3 *tp)
5912 spin_lock_irq(&tp->lock); 6010 spin_lock_irq(&tp->lock);
5913 spin_lock(&tp->tx_lock); 6011 spin_lock(&tp->tx_lock);
5914 6012
5915 tg3_halt(tp, 1); 6013 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5916 err = tg3_init_hw(tp); 6014 err = tg3_init_hw(tp);
5917 6015
5918 spin_unlock(&tp->tx_lock); 6016 spin_unlock(&tp->tx_lock);
@@ -5948,7 +6046,13 @@ static int tg3_open(struct net_device *dev)
5948 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && 6046 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5949 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) && 6047 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
5950 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) { 6048 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) {
5951 if (pci_enable_msi(tp->pdev) == 0) { 6049 /* All MSI supporting chips should support tagged
6050 * status. Assert that this is the case.
6051 */
6052 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6053 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6054 "Not using MSI.\n", tp->dev->name);
6055 } else if (pci_enable_msi(tp->pdev) == 0) {
5952 u32 msi_mode; 6056 u32 msi_mode;
5953 6057
5954 msi_mode = tr32(MSGINT_MODE); 6058 msi_mode = tr32(MSGINT_MODE);
@@ -5959,9 +6063,14 @@ static int tg3_open(struct net_device *dev)
5959 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) 6063 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
5960 err = request_irq(tp->pdev->irq, tg3_msi, 6064 err = request_irq(tp->pdev->irq, tg3_msi,
5961 SA_SAMPLE_RANDOM, dev->name, dev); 6065 SA_SAMPLE_RANDOM, dev->name, dev);
5962 else 6066 else {
5963 err = request_irq(tp->pdev->irq, tg3_interrupt, 6067 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6068 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6069 fn = tg3_interrupt_tagged;
6070
6071 err = request_irq(tp->pdev->irq, fn,
5964 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev); 6072 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6073 }
5965 6074
5966 if (err) { 6075 if (err) {
5967 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { 6076 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
@@ -5977,12 +6086,19 @@ static int tg3_open(struct net_device *dev)
5977 6086
5978 err = tg3_init_hw(tp); 6087 err = tg3_init_hw(tp);
5979 if (err) { 6088 if (err) {
5980 tg3_halt(tp, 1); 6089 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5981 tg3_free_rings(tp); 6090 tg3_free_rings(tp);
5982 } else { 6091 } else {
5983 tp->timer_offset = HZ / 10; 6092 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
5984 tp->timer_counter = tp->timer_multiplier = 10; 6093 tp->timer_offset = HZ;
5985 tp->asf_counter = tp->asf_multiplier = (10 * 120); 6094 else
6095 tp->timer_offset = HZ / 10;
6096
6097 BUG_ON(tp->timer_offset > HZ);
6098 tp->timer_counter = tp->timer_multiplier =
6099 (HZ / tp->timer_offset);
6100 tp->asf_counter = tp->asf_multiplier =
6101 ((HZ / tp->timer_offset) * 120);
5986 6102
5987 init_timer(&tp->timer); 6103 init_timer(&tp->timer);
5988 tp->timer.expires = jiffies + tp->timer_offset; 6104 tp->timer.expires = jiffies + tp->timer_offset;
@@ -6005,6 +6121,7 @@ static int tg3_open(struct net_device *dev)
6005 6121
6006 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { 6122 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6007 err = tg3_test_msi(tp); 6123 err = tg3_test_msi(tp);
6124
6008 if (err) { 6125 if (err) {
6009 spin_lock_irq(&tp->lock); 6126 spin_lock_irq(&tp->lock);
6010 spin_lock(&tp->tx_lock); 6127 spin_lock(&tp->tx_lock);
@@ -6013,7 +6130,7 @@ static int tg3_open(struct net_device *dev)
6013 pci_disable_msi(tp->pdev); 6130 pci_disable_msi(tp->pdev);
6014 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; 6131 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6015 } 6132 }
6016 tg3_halt(tp, 1); 6133 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6017 tg3_free_rings(tp); 6134 tg3_free_rings(tp);
6018 tg3_free_consistent(tp); 6135 tg3_free_consistent(tp);
6019 6136
@@ -6286,7 +6403,7 @@ static int tg3_close(struct net_device *dev)
6286 6403
6287 tg3_disable_ints(tp); 6404 tg3_disable_ints(tp);
6288 6405
6289 tg3_halt(tp, 1); 6406 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6290 tg3_free_rings(tp); 6407 tg3_free_rings(tp);
6291 tp->tg3_flags &= 6408 tp->tg3_flags &=
6292 ~(TG3_FLAG_INIT_COMPLETE | 6409 ~(TG3_FLAG_INIT_COMPLETE |
@@ -7006,7 +7123,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
7006 tp->tx_pending = ering->tx_pending; 7123 tp->tx_pending = ering->tx_pending;
7007 7124
7008 if (netif_running(dev)) { 7125 if (netif_running(dev)) {
7009 tg3_halt(tp, 1); 7126 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7010 tg3_init_hw(tp); 7127 tg3_init_hw(tp);
7011 tg3_netif_start(tp); 7128 tg3_netif_start(tp);
7012 } 7129 }
@@ -7049,7 +7166,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
7049 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE; 7166 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7050 7167
7051 if (netif_running(dev)) { 7168 if (netif_running(dev)) {
7052 tg3_halt(tp, 1); 7169 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7053 tg3_init_hw(tp); 7170 tg3_init_hw(tp);
7054 tg3_netif_start(tp); 7171 tg3_netif_start(tp);
7055 } 7172 }
@@ -7108,12 +7225,20 @@ static int tg3_get_stats_count (struct net_device *dev)
7108 return TG3_NUM_STATS; 7225 return TG3_NUM_STATS;
7109} 7226}
7110 7227
7228static int tg3_get_test_count (struct net_device *dev)
7229{
7230 return TG3_NUM_TEST;
7231}
7232
7111static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf) 7233static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7112{ 7234{
7113 switch (stringset) { 7235 switch (stringset) {
7114 case ETH_SS_STATS: 7236 case ETH_SS_STATS:
7115 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys)); 7237 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
7116 break; 7238 break;
7239 case ETH_SS_TEST:
7240 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
7241 break;
7117 default: 7242 default:
7118 WARN_ON(1); /* we need a WARN() */ 7243 WARN_ON(1); /* we need a WARN() */
7119 break; 7244 break;
@@ -7127,6 +7252,516 @@ static void tg3_get_ethtool_stats (struct net_device *dev,
7127 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats)); 7252 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
7128} 7253}
7129 7254
7255#define NVRAM_TEST_SIZE 0x100
7256
7257static int tg3_test_nvram(struct tg3 *tp)
7258{
7259 u32 *buf, csum;
7260 int i, j, err = 0;
7261
7262 buf = kmalloc(NVRAM_TEST_SIZE, GFP_KERNEL);
7263 if (buf == NULL)
7264 return -ENOMEM;
7265
7266 for (i = 0, j = 0; i < NVRAM_TEST_SIZE; i += 4, j++) {
7267 u32 val;
7268
7269 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
7270 break;
7271 buf[j] = cpu_to_le32(val);
7272 }
7273 if (i < NVRAM_TEST_SIZE)
7274 goto out;
7275
7276 err = -EIO;
7277 if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC)
7278 goto out;
7279
7280 /* Bootstrap checksum at offset 0x10 */
7281 csum = calc_crc((unsigned char *) buf, 0x10);
7282 if(csum != cpu_to_le32(buf[0x10/4]))
7283 goto out;
7284
7285 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
7286 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
7287 if (csum != cpu_to_le32(buf[0xfc/4]))
7288 goto out;
7289
7290 err = 0;
7291
7292out:
7293 kfree(buf);
7294 return err;
7295}
7296
7297#define TG3_SERDES_TIMEOUT_SEC 2
7298#define TG3_COPPER_TIMEOUT_SEC 6
7299
7300static int tg3_test_link(struct tg3 *tp)
7301{
7302 int i, max;
7303
7304 if (!netif_running(tp->dev))
7305 return -ENODEV;
7306
7307 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7308 max = TG3_SERDES_TIMEOUT_SEC;
7309 else
7310 max = TG3_COPPER_TIMEOUT_SEC;
7311
7312 for (i = 0; i < max; i++) {
7313 if (netif_carrier_ok(tp->dev))
7314 return 0;
7315
7316 if (msleep_interruptible(1000))
7317 break;
7318 }
7319
7320 return -EIO;
7321}
7322
7323/* Only test the commonly used registers */
7324static int tg3_test_registers(struct tg3 *tp)
7325{
7326 int i, is_5705;
7327 u32 offset, read_mask, write_mask, val, save_val, read_val;
7328 static struct {
7329 u16 offset;
7330 u16 flags;
7331#define TG3_FL_5705 0x1
7332#define TG3_FL_NOT_5705 0x2
7333#define TG3_FL_NOT_5788 0x4
7334 u32 read_mask;
7335 u32 write_mask;
7336 } reg_tbl[] = {
7337 /* MAC Control Registers */
7338 { MAC_MODE, TG3_FL_NOT_5705,
7339 0x00000000, 0x00ef6f8c },
7340 { MAC_MODE, TG3_FL_5705,
7341 0x00000000, 0x01ef6b8c },
7342 { MAC_STATUS, TG3_FL_NOT_5705,
7343 0x03800107, 0x00000000 },
7344 { MAC_STATUS, TG3_FL_5705,
7345 0x03800100, 0x00000000 },
7346 { MAC_ADDR_0_HIGH, 0x0000,
7347 0x00000000, 0x0000ffff },
7348 { MAC_ADDR_0_LOW, 0x0000,
7349 0x00000000, 0xffffffff },
7350 { MAC_RX_MTU_SIZE, 0x0000,
7351 0x00000000, 0x0000ffff },
7352 { MAC_TX_MODE, 0x0000,
7353 0x00000000, 0x00000070 },
7354 { MAC_TX_LENGTHS, 0x0000,
7355 0x00000000, 0x00003fff },
7356 { MAC_RX_MODE, TG3_FL_NOT_5705,
7357 0x00000000, 0x000007fc },
7358 { MAC_RX_MODE, TG3_FL_5705,
7359 0x00000000, 0x000007dc },
7360 { MAC_HASH_REG_0, 0x0000,
7361 0x00000000, 0xffffffff },
7362 { MAC_HASH_REG_1, 0x0000,
7363 0x00000000, 0xffffffff },
7364 { MAC_HASH_REG_2, 0x0000,
7365 0x00000000, 0xffffffff },
7366 { MAC_HASH_REG_3, 0x0000,
7367 0x00000000, 0xffffffff },
7368
7369 /* Receive Data and Receive BD Initiator Control Registers. */
7370 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
7371 0x00000000, 0xffffffff },
7372 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
7373 0x00000000, 0xffffffff },
7374 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
7375 0x00000000, 0x00000003 },
7376 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
7377 0x00000000, 0xffffffff },
7378 { RCVDBDI_STD_BD+0, 0x0000,
7379 0x00000000, 0xffffffff },
7380 { RCVDBDI_STD_BD+4, 0x0000,
7381 0x00000000, 0xffffffff },
7382 { RCVDBDI_STD_BD+8, 0x0000,
7383 0x00000000, 0xffff0002 },
7384 { RCVDBDI_STD_BD+0xc, 0x0000,
7385 0x00000000, 0xffffffff },
7386
7387 /* Receive BD Initiator Control Registers. */
7388 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
7389 0x00000000, 0xffffffff },
7390 { RCVBDI_STD_THRESH, TG3_FL_5705,
7391 0x00000000, 0x000003ff },
7392 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
7393 0x00000000, 0xffffffff },
7394
7395 /* Host Coalescing Control Registers. */
7396 { HOSTCC_MODE, TG3_FL_NOT_5705,
7397 0x00000000, 0x00000004 },
7398 { HOSTCC_MODE, TG3_FL_5705,
7399 0x00000000, 0x000000f6 },
7400 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
7401 0x00000000, 0xffffffff },
7402 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
7403 0x00000000, 0x000003ff },
7404 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
7405 0x00000000, 0xffffffff },
7406 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
7407 0x00000000, 0x000003ff },
7408 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
7409 0x00000000, 0xffffffff },
7410 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7411 0x00000000, 0x000000ff },
7412 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
7413 0x00000000, 0xffffffff },
7414 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7415 0x00000000, 0x000000ff },
7416 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
7417 0x00000000, 0xffffffff },
7418 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
7419 0x00000000, 0xffffffff },
7420 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7421 0x00000000, 0xffffffff },
7422 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7423 0x00000000, 0x000000ff },
7424 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7425 0x00000000, 0xffffffff },
7426 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7427 0x00000000, 0x000000ff },
7428 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
7429 0x00000000, 0xffffffff },
7430 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
7431 0x00000000, 0xffffffff },
7432 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
7433 0x00000000, 0xffffffff },
7434 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
7435 0x00000000, 0xffffffff },
7436 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
7437 0x00000000, 0xffffffff },
7438 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
7439 0xffffffff, 0x00000000 },
7440 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
7441 0xffffffff, 0x00000000 },
7442
7443 /* Buffer Manager Control Registers. */
7444 { BUFMGR_MB_POOL_ADDR, 0x0000,
7445 0x00000000, 0x007fff80 },
7446 { BUFMGR_MB_POOL_SIZE, 0x0000,
7447 0x00000000, 0x007fffff },
7448 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
7449 0x00000000, 0x0000003f },
7450 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
7451 0x00000000, 0x000001ff },
7452 { BUFMGR_MB_HIGH_WATER, 0x0000,
7453 0x00000000, 0x000001ff },
7454 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
7455 0xffffffff, 0x00000000 },
7456 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
7457 0xffffffff, 0x00000000 },
7458
7459 /* Mailbox Registers */
7460 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
7461 0x00000000, 0x000001ff },
7462 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
7463 0x00000000, 0x000001ff },
7464 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
7465 0x00000000, 0x000007ff },
7466 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
7467 0x00000000, 0x000001ff },
7468
7469 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
7470 };
7471
7472 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7473 is_5705 = 1;
7474 else
7475 is_5705 = 0;
7476
7477 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
7478 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
7479 continue;
7480
7481 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
7482 continue;
7483
7484 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7485 (reg_tbl[i].flags & TG3_FL_NOT_5788))
7486 continue;
7487
7488 offset = (u32) reg_tbl[i].offset;
7489 read_mask = reg_tbl[i].read_mask;
7490 write_mask = reg_tbl[i].write_mask;
7491
7492 /* Save the original register content */
7493 save_val = tr32(offset);
7494
7495 /* Determine the read-only value. */
7496 read_val = save_val & read_mask;
7497
7498 /* Write zero to the register, then make sure the read-only bits
7499 * are not changed and the read/write bits are all zeros.
7500 */
7501 tw32(offset, 0);
7502
7503 val = tr32(offset);
7504
7505 /* Test the read-only and read/write bits. */
7506 if (((val & read_mask) != read_val) || (val & write_mask))
7507 goto out;
7508
7509 /* Write ones to all the bits defined by RdMask and WrMask, then
7510 * make sure the read-only bits are not changed and the
7511 * read/write bits are all ones.
7512 */
7513 tw32(offset, read_mask | write_mask);
7514
7515 val = tr32(offset);
7516
7517 /* Test the read-only bits. */
7518 if ((val & read_mask) != read_val)
7519 goto out;
7520
7521 /* Test the read/write bits. */
7522 if ((val & write_mask) != write_mask)
7523 goto out;
7524
7525 tw32(offset, save_val);
7526 }
7527
7528 return 0;
7529
7530out:
7531 printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
7532 tw32(offset, save_val);
7533 return -EIO;
7534}
7535
7536static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
7537{
7538 static u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
7539 int i;
7540 u32 j;
7541
7542 for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
7543 for (j = 0; j < len; j += 4) {
7544 u32 val;
7545
7546 tg3_write_mem(tp, offset + j, test_pattern[i]);
7547 tg3_read_mem(tp, offset + j, &val);
7548 if (val != test_pattern[i])
7549 return -EIO;
7550 }
7551 }
7552 return 0;
7553}
7554
7555static int tg3_test_memory(struct tg3 *tp)
7556{
7557 static struct mem_entry {
7558 u32 offset;
7559 u32 len;
7560 } mem_tbl_570x[] = {
7561 { 0x00000000, 0x01000},
7562 { 0x00002000, 0x1c000},
7563 { 0xffffffff, 0x00000}
7564 }, mem_tbl_5705[] = {
7565 { 0x00000100, 0x0000c},
7566 { 0x00000200, 0x00008},
7567 { 0x00000b50, 0x00400},
7568 { 0x00004000, 0x00800},
7569 { 0x00006000, 0x01000},
7570 { 0x00008000, 0x02000},
7571 { 0x00010000, 0x0e000},
7572 { 0xffffffff, 0x00000}
7573 };
7574 struct mem_entry *mem_tbl;
7575 int err = 0;
7576 int i;
7577
7578 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7579 mem_tbl = mem_tbl_5705;
7580 else
7581 mem_tbl = mem_tbl_570x;
7582
7583 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
7584 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
7585 mem_tbl[i].len)) != 0)
7586 break;
7587 }
7588
7589 return err;
7590}
7591
7592static int tg3_test_loopback(struct tg3 *tp)
7593{
7594 u32 mac_mode, send_idx, rx_start_idx, rx_idx, tx_idx, opaque_key;
7595 u32 desc_idx;
7596 struct sk_buff *skb, *rx_skb;
7597 u8 *tx_data;
7598 dma_addr_t map;
7599 int num_pkts, tx_len, rx_len, i, err;
7600 struct tg3_rx_buffer_desc *desc;
7601
7602 if (!netif_running(tp->dev))
7603 return -ENODEV;
7604
7605 err = -EIO;
7606
7607 tg3_abort_hw(tp, 1);
7608
7609 /* Clearing this flag to keep interrupts disabled */
7610 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7611 tg3_reset_hw(tp);
7612
7613 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
7614 MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
7615 MAC_MODE_PORT_MODE_GMII;
7616 tw32(MAC_MODE, mac_mode);
7617
7618 tx_len = 1514;
7619 skb = dev_alloc_skb(tx_len);
7620 tx_data = skb_put(skb, tx_len);
7621 memcpy(tx_data, tp->dev->dev_addr, 6);
7622 memset(tx_data + 6, 0x0, 8);
7623
7624 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
7625
7626 for (i = 14; i < tx_len; i++)
7627 tx_data[i] = (u8) (i & 0xff);
7628
7629 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
7630
7631 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7632 HOSTCC_MODE_NOW);
7633
7634 udelay(10);
7635
7636 rx_start_idx = tp->hw_status->idx[0].rx_producer;
7637
7638 send_idx = 0;
7639 num_pkts = 0;
7640
7641 tg3_set_txd(tp, send_idx, map, tx_len, 0, 1);
7642
7643 send_idx++;
7644 num_pkts++;
7645
7646 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, send_idx);
7647 tr32(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
7648
7649 udelay(10);
7650
7651 for (i = 0; i < 10; i++) {
7652 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7653 HOSTCC_MODE_NOW);
7654
7655 udelay(10);
7656
7657 tx_idx = tp->hw_status->idx[0].tx_consumer;
7658 rx_idx = tp->hw_status->idx[0].rx_producer;
7659 if ((tx_idx == send_idx) &&
7660 (rx_idx == (rx_start_idx + num_pkts)))
7661 break;
7662 }
7663
7664 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
7665 dev_kfree_skb(skb);
7666
7667 if (tx_idx != send_idx)
7668 goto out;
7669
7670 if (rx_idx != rx_start_idx + num_pkts)
7671 goto out;
7672
7673 desc = &tp->rx_rcb[rx_start_idx];
7674 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
7675 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
7676 if (opaque_key != RXD_OPAQUE_RING_STD)
7677 goto out;
7678
7679 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
7680 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
7681 goto out;
7682
7683 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
7684 if (rx_len != tx_len)
7685 goto out;
7686
7687 rx_skb = tp->rx_std_buffers[desc_idx].skb;
7688
7689 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
7690 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
7691
7692 for (i = 14; i < tx_len; i++) {
7693 if (*(rx_skb->data + i) != (u8) (i & 0xff))
7694 goto out;
7695 }
7696 err = 0;
7697
7698 /* tg3_free_rings will unmap and free the rx_skb */
7699out:
7700 return err;
7701}
7702
7703static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
7704 u64 *data)
7705{
7706 struct tg3 *tp = netdev_priv(dev);
7707
7708 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
7709
7710 if (tg3_test_nvram(tp) != 0) {
7711 etest->flags |= ETH_TEST_FL_FAILED;
7712 data[0] = 1;
7713 }
7714 if (tg3_test_link(tp) != 0) {
7715 etest->flags |= ETH_TEST_FL_FAILED;
7716 data[1] = 1;
7717 }
7718 if (etest->flags & ETH_TEST_FL_OFFLINE) {
7719 if (netif_running(dev))
7720 tg3_netif_stop(tp);
7721
7722 spin_lock_irq(&tp->lock);
7723 spin_lock(&tp->tx_lock);
7724
7725 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
7726 tg3_nvram_lock(tp);
7727 tg3_halt_cpu(tp, RX_CPU_BASE);
7728 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7729 tg3_halt_cpu(tp, TX_CPU_BASE);
7730 tg3_nvram_unlock(tp);
7731
7732 if (tg3_test_registers(tp) != 0) {
7733 etest->flags |= ETH_TEST_FL_FAILED;
7734 data[2] = 1;
7735 }
7736 if (tg3_test_memory(tp) != 0) {
7737 etest->flags |= ETH_TEST_FL_FAILED;
7738 data[3] = 1;
7739 }
7740 if (tg3_test_loopback(tp) != 0) {
7741 etest->flags |= ETH_TEST_FL_FAILED;
7742 data[4] = 1;
7743 }
7744
7745 spin_unlock(&tp->tx_lock);
7746 spin_unlock_irq(&tp->lock);
7747 if (tg3_test_interrupt(tp) != 0) {
7748 etest->flags |= ETH_TEST_FL_FAILED;
7749 data[5] = 1;
7750 }
7751 spin_lock_irq(&tp->lock);
7752 spin_lock(&tp->tx_lock);
7753
7754 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7755 if (netif_running(dev)) {
7756 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7757 tg3_init_hw(tp);
7758 tg3_netif_start(tp);
7759 }
7760 spin_unlock(&tp->tx_lock);
7761 spin_unlock_irq(&tp->lock);
7762 }
7763}
7764
7130static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 7765static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7131{ 7766{
7132 struct mii_ioctl_data *data = if_mii(ifr); 7767 struct mii_ioctl_data *data = if_mii(ifr);
@@ -7203,6 +7838,14 @@ static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
7203} 7838}
7204#endif 7839#endif
7205 7840
7841static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
7842{
7843 struct tg3 *tp = netdev_priv(dev);
7844
7845 memcpy(ec, &tp->coal, sizeof(*ec));
7846 return 0;
7847}
7848
7206static struct ethtool_ops tg3_ethtool_ops = { 7849static struct ethtool_ops tg3_ethtool_ops = {
7207 .get_settings = tg3_get_settings, 7850 .get_settings = tg3_get_settings,
7208 .set_settings = tg3_set_settings, 7851 .set_settings = tg3_set_settings,
@@ -7232,9 +7875,12 @@ static struct ethtool_ops tg3_ethtool_ops = {
7232 .get_tso = ethtool_op_get_tso, 7875 .get_tso = ethtool_op_get_tso,
7233 .set_tso = tg3_set_tso, 7876 .set_tso = tg3_set_tso,
7234#endif 7877#endif
7878 .self_test_count = tg3_get_test_count,
7879 .self_test = tg3_self_test,
7235 .get_strings = tg3_get_strings, 7880 .get_strings = tg3_get_strings,
7236 .get_stats_count = tg3_get_stats_count, 7881 .get_stats_count = tg3_get_stats_count,
7237 .get_ethtool_stats = tg3_get_ethtool_stats, 7882 .get_ethtool_stats = tg3_get_ethtool_stats,
7883 .get_coalesce = tg3_get_coalesce,
7238}; 7884};
7239 7885
7240static void __devinit tg3_get_eeprom_size(struct tg3 *tp) 7886static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
@@ -7914,6 +8560,16 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
7914 8560
7915 case NIC_SRAM_DATA_CFG_LED_MODE_MAC: 8561 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
7916 tp->led_ctrl = LED_CTRL_MODE_MAC; 8562 tp->led_ctrl = LED_CTRL_MODE_MAC;
8563
8564 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
8565 * read on some older 5700/5701 bootcode.
8566 */
8567 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
8568 ASIC_REV_5700 ||
8569 GET_ASIC_REV(tp->pci_chip_rev_id) ==
8570 ASIC_REV_5701)
8571 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
8572
7917 break; 8573 break;
7918 8574
7919 case SHASTA_EXT_LED_SHARED: 8575 case SHASTA_EXT_LED_SHARED:
@@ -8422,15 +9078,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
8422 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) 9078 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8423 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG; 9079 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
8424 9080
8425 /* Only 5701 and later support tagged irq status mode.
8426 * Also, 5788 chips cannot use tagged irq status.
8427 *
8428 * However, since we are using NAPI avoid tagged irq status
8429 * because the interrupt condition is more difficult to
8430 * fully clear in that mode.
8431 */
8432 tp->coalesce_mode = 0; 9081 tp->coalesce_mode = 0;
8433
8434 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX && 9082 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
8435 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX) 9083 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
8436 tp->coalesce_mode |= HOSTCC_MODE_32BYTE; 9084 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
@@ -8494,6 +9142,18 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
8494 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M)) 9142 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
8495 tp->tg3_flags2 |= TG3_FLG2_IS_5788; 9143 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
8496 9144
9145 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9146 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
9147 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
9148 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
9149 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
9150 HOSTCC_MODE_CLRTICK_TXBD);
9151
9152 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
9153 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9154 tp->misc_host_ctrl);
9155 }
9156
8497 /* these are limited to 10/100 only */ 9157 /* these are limited to 10/100 only */
8498 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 && 9158 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
8499 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) || 9159 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
@@ -8671,6 +9331,146 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
8671 return 0; 9331 return 0;
8672} 9332}
8673 9333
9334#define BOUNDARY_SINGLE_CACHELINE 1
9335#define BOUNDARY_MULTI_CACHELINE 2
9336
9337static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
9338{
9339 int cacheline_size;
9340 u8 byte;
9341 int goal;
9342
9343 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
9344 if (byte == 0)
9345 cacheline_size = 1024;
9346 else
9347 cacheline_size = (int) byte * 4;
9348
9349 /* On 5703 and later chips, the boundary bits have no
9350 * effect.
9351 */
9352 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9353 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
9354 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
9355 goto out;
9356
9357#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
9358 goal = BOUNDARY_MULTI_CACHELINE;
9359#else
9360#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
9361 goal = BOUNDARY_SINGLE_CACHELINE;
9362#else
9363 goal = 0;
9364#endif
9365#endif
9366
9367 if (!goal)
9368 goto out;
9369
9370 /* PCI controllers on most RISC systems tend to disconnect
9371 * when a device tries to burst across a cache-line boundary.
9372 * Therefore, letting tg3 do so just wastes PCI bandwidth.
9373 *
9374 * Unfortunately, for PCI-E there are only limited
9375 * write-side controls for this, and thus for reads
9376 * we will still get the disconnects. We'll also waste
9377 * these PCI cycles for both read and write for chips
9378 * other than 5700 and 5701 which do not implement the
9379 * boundary bits.
9380 */
9381 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
9382 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
9383 switch (cacheline_size) {
9384 case 16:
9385 case 32:
9386 case 64:
9387 case 128:
9388 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9389 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
9390 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
9391 } else {
9392 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
9393 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
9394 }
9395 break;
9396
9397 case 256:
9398 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
9399 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
9400 break;
9401
9402 default:
9403 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
9404 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
9405 break;
9406 };
9407 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
9408 switch (cacheline_size) {
9409 case 16:
9410 case 32:
9411 case 64:
9412 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9413 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
9414 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
9415 break;
9416 }
9417 /* fallthrough */
9418 case 128:
9419 default:
9420 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
9421 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
9422 break;
9423 };
9424 } else {
9425 switch (cacheline_size) {
9426 case 16:
9427 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9428 val |= (DMA_RWCTRL_READ_BNDRY_16 |
9429 DMA_RWCTRL_WRITE_BNDRY_16);
9430 break;
9431 }
9432 /* fallthrough */
9433 case 32:
9434 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9435 val |= (DMA_RWCTRL_READ_BNDRY_32 |
9436 DMA_RWCTRL_WRITE_BNDRY_32);
9437 break;
9438 }
9439 /* fallthrough */
9440 case 64:
9441 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9442 val |= (DMA_RWCTRL_READ_BNDRY_64 |
9443 DMA_RWCTRL_WRITE_BNDRY_64);
9444 break;
9445 }
9446 /* fallthrough */
9447 case 128:
9448 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9449 val |= (DMA_RWCTRL_READ_BNDRY_128 |
9450 DMA_RWCTRL_WRITE_BNDRY_128);
9451 break;
9452 }
9453 /* fallthrough */
9454 case 256:
9455 val |= (DMA_RWCTRL_READ_BNDRY_256 |
9456 DMA_RWCTRL_WRITE_BNDRY_256);
9457 break;
9458 case 512:
9459 val |= (DMA_RWCTRL_READ_BNDRY_512 |
9460 DMA_RWCTRL_WRITE_BNDRY_512);
9461 break;
9462 case 1024:
9463 default:
9464 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
9465 DMA_RWCTRL_WRITE_BNDRY_1024);
9466 break;
9467 };
9468 }
9469
9470out:
9471 return val;
9472}
9473
8674static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device) 9474static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
8675{ 9475{
8676 struct tg3_internal_buffer_desc test_desc; 9476 struct tg3_internal_buffer_desc test_desc;
@@ -8752,12 +9552,12 @@ static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dm
8752 return ret; 9552 return ret;
8753} 9553}
8754 9554
8755#define TEST_BUFFER_SIZE 0x400 9555#define TEST_BUFFER_SIZE 0x2000
8756 9556
8757static int __devinit tg3_test_dma(struct tg3 *tp) 9557static int __devinit tg3_test_dma(struct tg3 *tp)
8758{ 9558{
8759 dma_addr_t buf_dma; 9559 dma_addr_t buf_dma;
8760 u32 *buf; 9560 u32 *buf, saved_dma_rwctrl;
8761 int ret; 9561 int ret;
8762 9562
8763 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma); 9563 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
@@ -8769,46 +9569,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
8769 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) | 9569 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
8770 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT)); 9570 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
8771 9571
8772#ifndef CONFIG_X86 9572 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
8773 {
8774 u8 byte;
8775 int cacheline_size;
8776 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
8777
8778 if (byte == 0)
8779 cacheline_size = 1024;
8780 else
8781 cacheline_size = (int) byte * 4;
8782
8783 switch (cacheline_size) {
8784 case 16:
8785 case 32:
8786 case 64:
8787 case 128:
8788 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
8789 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
8790 tp->dma_rwctrl |=
8791 DMA_RWCTRL_WRITE_BNDRY_384_PCIX;
8792 break;
8793 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
8794 tp->dma_rwctrl &=
8795 ~(DMA_RWCTRL_PCI_WRITE_CMD);
8796 tp->dma_rwctrl |=
8797 DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
8798 break;
8799 }
8800 /* fallthrough */
8801 case 256:
8802 if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
8803 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8804 tp->dma_rwctrl |=
8805 DMA_RWCTRL_WRITE_BNDRY_256;
8806 else if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8807 tp->dma_rwctrl |=
8808 DMA_RWCTRL_WRITE_BNDRY_256_PCIX;
8809 };
8810 }
8811#endif
8812 9573
8813 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { 9574 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
8814 /* DMA read watermark not used on PCIE */ 9575 /* DMA read watermark not used on PCIE */
@@ -8827,7 +9588,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
8827 if (ccval == 0x6 || ccval == 0x7) 9588 if (ccval == 0x6 || ccval == 0x7)
8828 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; 9589 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
8829 9590
8830 /* Set bit 23 to renable PCIX hw bug fix */ 9591 /* Set bit 23 to enable PCIX hw bug fix */
8831 tp->dma_rwctrl |= 0x009f0000; 9592 tp->dma_rwctrl |= 0x009f0000;
8832 } else { 9593 } else {
8833 tp->dma_rwctrl |= 0x001b000f; 9594 tp->dma_rwctrl |= 0x001b000f;
@@ -8868,6 +9629,13 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
8868 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) 9629 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
8869 goto out; 9630 goto out;
8870 9631
9632 /* It is best to perform DMA test with maximum write burst size
9633 * to expose the 5700/5701 write DMA bug.
9634 */
9635 saved_dma_rwctrl = tp->dma_rwctrl;
9636 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
9637 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9638
8871 while (1) { 9639 while (1) {
8872 u32 *p = buf, i; 9640 u32 *p = buf, i;
8873 9641
@@ -8906,8 +9674,9 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
8906 if (p[i] == i) 9674 if (p[i] == i)
8907 continue; 9675 continue;
8908 9676
8909 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) == 9677 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
8910 DMA_RWCTRL_WRITE_BNDRY_DISAB) { 9678 DMA_RWCTRL_WRITE_BNDRY_16) {
9679 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
8911 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; 9680 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
8912 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 9681 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8913 break; 9682 break;
@@ -8924,6 +9693,28 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
8924 break; 9693 break;
8925 } 9694 }
8926 } 9695 }
9696 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
9697 DMA_RWCTRL_WRITE_BNDRY_16) {
9698 static struct pci_device_id dma_wait_state_chipsets[] = {
9699 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
9700 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
9701 { },
9702 };
9703
9704 /* DMA test passed without adjusting DMA boundary,
9705 * now look for chipsets that are known to expose the
9706 * DMA bug without failing the test.
9707 */
9708 if (pci_dev_present(dma_wait_state_chipsets)) {
9709 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
9710 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
9711 }
9712 else
9713 /* Safe to use the calculated DMA boundary. */
9714 tp->dma_rwctrl = saved_dma_rwctrl;
9715
9716 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9717 }
8927 9718
8928out: 9719out:
8929 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma); 9720 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
@@ -9011,6 +9802,31 @@ static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
9011 return peer; 9802 return peer;
9012} 9803}
9013 9804
9805static void __devinit tg3_init_coal(struct tg3 *tp)
9806{
9807 struct ethtool_coalesce *ec = &tp->coal;
9808
9809 memset(ec, 0, sizeof(*ec));
9810 ec->cmd = ETHTOOL_GCOALESCE;
9811 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
9812 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
9813 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
9814 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
9815 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
9816 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
9817 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
9818 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
9819 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
9820
9821 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
9822 HOSTCC_MODE_CLRTICK_TXBD)) {
9823 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
9824 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
9825 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
9826 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
9827 }
9828}
9829
9014static int __devinit tg3_init_one(struct pci_dev *pdev, 9830static int __devinit tg3_init_one(struct pci_dev *pdev,
9015 const struct pci_device_id *ent) 9831 const struct pci_device_id *ent)
9016{ 9832{
@@ -9232,7 +10048,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
9232 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 10048 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9233 pci_save_state(tp->pdev); 10049 pci_save_state(tp->pdev);
9234 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE); 10050 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
9235 tg3_halt(tp, 1); 10051 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9236 } 10052 }
9237 10053
9238 err = tg3_test_dma(tp); 10054 err = tg3_test_dma(tp);
@@ -9256,6 +10072,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
9256 /* flow control autonegotiation is default behavior */ 10072 /* flow control autonegotiation is default behavior */
9257 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; 10073 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9258 10074
10075 tg3_init_coal(tp);
10076
9259 err = register_netdev(dev); 10077 err = register_netdev(dev);
9260 if (err) { 10078 if (err) {
9261 printk(KERN_ERR PFX "Cannot register net device, " 10079 printk(KERN_ERR PFX "Cannot register net device, "
@@ -9298,6 +10116,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
9298 (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0, 10116 (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
9299 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0, 10117 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
9300 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0); 10118 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
10119 printk(KERN_INFO "%s: dma_rwctrl[%08x]\n",
10120 dev->name, tp->dma_rwctrl);
9301 10121
9302 return 0; 10122 return 0;
9303 10123
@@ -9355,7 +10175,7 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
9355 10175
9356 spin_lock_irq(&tp->lock); 10176 spin_lock_irq(&tp->lock);
9357 spin_lock(&tp->tx_lock); 10177 spin_lock(&tp->tx_lock);
9358 tg3_halt(tp, 1); 10178 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9359 spin_unlock(&tp->tx_lock); 10179 spin_unlock(&tp->tx_lock);
9360 spin_unlock_irq(&tp->lock); 10180 spin_unlock_irq(&tp->lock);
9361 10181