aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/tg3.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/tg3.c')
-rw-r--r--drivers/net/tg3.c1059
1 files changed, 925 insertions, 134 deletions
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index f79b02e80e75..fc9b5cd957aa 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -61,8 +61,8 @@
61 61
62#define DRV_MODULE_NAME "tg3" 62#define DRV_MODULE_NAME "tg3"
63#define PFX DRV_MODULE_NAME ": " 63#define PFX DRV_MODULE_NAME ": "
64#define DRV_MODULE_VERSION "3.27" 64#define DRV_MODULE_VERSION "3.29"
65#define DRV_MODULE_RELDATE "May 5, 2005" 65#define DRV_MODULE_RELDATE "May 23, 2005"
66 66
67#define TG3_DEF_MAC_MODE 0 67#define TG3_DEF_MAC_MODE 0
68#define TG3_DEF_RX_MODE 0 68#define TG3_DEF_RX_MODE 0
@@ -133,6 +133,8 @@
133/* number of ETHTOOL_GSTATS u64's */ 133/* number of ETHTOOL_GSTATS u64's */
134#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64)) 134#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
135 135
136#define TG3_NUM_TEST 6
137
136static char version[] __devinitdata = 138static char version[] __devinitdata =
137 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 139 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
138 140
@@ -206,6 +208,8 @@ static struct pci_device_id tg3_pci_tbl[] = {
206 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 208 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
207 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752, 209 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
208 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 210 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
211 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
212 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
209 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753, 213 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
210 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 214 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
211 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M, 215 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
@@ -314,6 +318,17 @@ static struct {
314 { "nic_tx_threshold_hit" } 318 { "nic_tx_threshold_hit" }
315}; 319};
316 320
321static struct {
322 const char string[ETH_GSTRING_LEN];
323} ethtool_test_keys[TG3_NUM_TEST] = {
324 { "nvram test (online) " },
325 { "link test (online) " },
326 { "register test (offline)" },
327 { "memory test (offline)" },
328 { "loopback test (offline)" },
329 { "interrupt test (offline)" },
330};
331
317static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val) 332static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
318{ 333{
319 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) { 334 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
@@ -420,7 +435,8 @@ static void tg3_enable_ints(struct tg3 *tp)
420{ 435{
421 tw32(TG3PCI_MISC_HOST_CTRL, 436 tw32(TG3PCI_MISC_HOST_CTRL,
422 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); 437 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
423 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000); 438 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
439 (tp->last_tag << 24));
424 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); 440 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
425 441
426 tg3_cond_int(tp); 442 tg3_cond_int(tp);
@@ -455,10 +471,16 @@ static void tg3_restart_ints(struct tg3 *tp)
455{ 471{
456 tw32(TG3PCI_MISC_HOST_CTRL, 472 tw32(TG3PCI_MISC_HOST_CTRL,
457 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); 473 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
458 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000); 474 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
475 tp->last_tag << 24);
459 mmiowb(); 476 mmiowb();
460 477
461 if (tg3_has_work(tp)) 478 /* When doing tagged status, this work check is unnecessary.
479 * The last_tag we write above tells the chip which piece of
480 * work we've completed.
481 */
482 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
483 tg3_has_work(tp))
462 tw32(HOSTCC_MODE, tp->coalesce_mode | 484 tw32(HOSTCC_MODE, tp->coalesce_mode |
463 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW)); 485 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
464} 486}
@@ -2500,7 +2522,7 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2500 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 2522 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2501 if (netif_carrier_ok(tp->dev)) { 2523 if (netif_carrier_ok(tp->dev)) {
2502 tw32(HOSTCC_STAT_COAL_TICKS, 2524 tw32(HOSTCC_STAT_COAL_TICKS,
2503 DEFAULT_STAT_COAL_TICKS); 2525 tp->coal.stats_block_coalesce_usecs);
2504 } else { 2526 } else {
2505 tw32(HOSTCC_STAT_COAL_TICKS, 0); 2527 tw32(HOSTCC_STAT_COAL_TICKS, 0);
2506 } 2528 }
@@ -2886,7 +2908,6 @@ static int tg3_poll(struct net_device *netdev, int *budget)
2886 * All RX "locking" is done by ensuring outside 2908 * All RX "locking" is done by ensuring outside
2887 * code synchronizes with dev->poll() 2909 * code synchronizes with dev->poll()
2888 */ 2910 */
2889 done = 1;
2890 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) { 2911 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
2891 int orig_budget = *budget; 2912 int orig_budget = *budget;
2892 int work_done; 2913 int work_done;
@@ -2898,12 +2919,14 @@ static int tg3_poll(struct net_device *netdev, int *budget)
2898 2919
2899 *budget -= work_done; 2920 *budget -= work_done;
2900 netdev->quota -= work_done; 2921 netdev->quota -= work_done;
2901
2902 if (work_done >= orig_budget)
2903 done = 0;
2904 } 2922 }
2905 2923
2924 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
2925 tp->last_tag = sblk->status_tag;
2926 rmb();
2927
2906 /* if no more work, tell net stack and NIC we're done */ 2928 /* if no more work, tell net stack and NIC we're done */
2929 done = !tg3_has_work(tp);
2907 if (done) { 2930 if (done) {
2908 spin_lock_irqsave(&tp->lock, flags); 2931 spin_lock_irqsave(&tp->lock, flags);
2909 __netif_rx_complete(netdev); 2932 __netif_rx_complete(netdev);
@@ -2928,22 +2951,21 @@ static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
2928 spin_lock_irqsave(&tp->lock, flags); 2951 spin_lock_irqsave(&tp->lock, flags);
2929 2952
2930 /* 2953 /*
2931 * writing any value to intr-mbox-0 clears PCI INTA# and 2954 * Writing any value to intr-mbox-0 clears PCI INTA# and
2932 * chip-internal interrupt pending events. 2955 * chip-internal interrupt pending events.
2933 * writing non-zero to intr-mbox-0 additional tells the 2956 * Writing non-zero to intr-mbox-0 additional tells the
2934 * NIC to stop sending us irqs, engaging "in-intr-handler" 2957 * NIC to stop sending us irqs, engaging "in-intr-handler"
2935 * event coalescing. 2958 * event coalescing.
2936 */ 2959 */
2937 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 2960 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
2961 tp->last_tag = sblk->status_tag;
2938 sblk->status &= ~SD_STATUS_UPDATED; 2962 sblk->status &= ~SD_STATUS_UPDATED;
2939
2940 if (likely(tg3_has_work(tp))) 2963 if (likely(tg3_has_work(tp)))
2941 netif_rx_schedule(dev); /* schedule NAPI poll */ 2964 netif_rx_schedule(dev); /* schedule NAPI poll */
2942 else { 2965 else {
2943 /* no work, re-enable interrupts 2966 /* No work, re-enable interrupts. */
2944 */
2945 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 2967 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2946 0x00000000); 2968 tp->last_tag << 24);
2947 } 2969 }
2948 2970
2949 spin_unlock_irqrestore(&tp->lock, flags); 2971 spin_unlock_irqrestore(&tp->lock, flags);
@@ -2969,21 +2991,62 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2969 if ((sblk->status & SD_STATUS_UPDATED) || 2991 if ((sblk->status & SD_STATUS_UPDATED) ||
2970 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 2992 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
2971 /* 2993 /*
2972 * writing any value to intr-mbox-0 clears PCI INTA# and 2994 * Writing any value to intr-mbox-0 clears PCI INTA# and
2973 * chip-internal interrupt pending events. 2995 * chip-internal interrupt pending events.
2974 * writing non-zero to intr-mbox-0 additional tells the 2996 * Writing non-zero to intr-mbox-0 additional tells the
2975 * NIC to stop sending us irqs, engaging "in-intr-handler" 2997 * NIC to stop sending us irqs, engaging "in-intr-handler"
2976 * event coalescing. 2998 * event coalescing.
2977 */ 2999 */
2978 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 3000 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2979 0x00000001); 3001 0x00000001);
3002 sblk->status &= ~SD_STATUS_UPDATED;
3003 if (likely(tg3_has_work(tp)))
3004 netif_rx_schedule(dev); /* schedule NAPI poll */
3005 else {
3006 /* No work, shared interrupt perhaps? re-enable
3007 * interrupts, and flush that PCI write
3008 */
3009 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3010 0x00000000);
3011 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
3012 }
3013 } else { /* shared interrupt */
3014 handled = 0;
3015 }
3016
3017 spin_unlock_irqrestore(&tp->lock, flags);
3018
3019 return IRQ_RETVAL(handled);
3020}
3021
3022static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3023{
3024 struct net_device *dev = dev_id;
3025 struct tg3 *tp = netdev_priv(dev);
3026 struct tg3_hw_status *sblk = tp->hw_status;
3027 unsigned long flags;
3028 unsigned int handled = 1;
3029
3030 spin_lock_irqsave(&tp->lock, flags);
3031
3032 /* In INTx mode, it is possible for the interrupt to arrive at
3033 * the CPU before the status block posted prior to the interrupt.
3034 * Reading the PCI State register will confirm whether the
3035 * interrupt is ours and will flush the status block.
3036 */
3037 if ((sblk->status & SD_STATUS_UPDATED) ||
3038 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
2980 /* 3039 /*
2981 * Flush PCI write. This also guarantees that our 3040 * writing any value to intr-mbox-0 clears PCI INTA# and
2982 * status block has been flushed to host memory. 3041 * chip-internal interrupt pending events.
3042 * writing non-zero to intr-mbox-0 additional tells the
3043 * NIC to stop sending us irqs, engaging "in-intr-handler"
3044 * event coalescing.
2983 */ 3045 */
2984 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); 3046 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3047 0x00000001);
3048 tp->last_tag = sblk->status_tag;
2985 sblk->status &= ~SD_STATUS_UPDATED; 3049 sblk->status &= ~SD_STATUS_UPDATED;
2986
2987 if (likely(tg3_has_work(tp))) 3050 if (likely(tg3_has_work(tp)))
2988 netif_rx_schedule(dev); /* schedule NAPI poll */ 3051 netif_rx_schedule(dev); /* schedule NAPI poll */
2989 else { 3052 else {
@@ -2991,7 +3054,7 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2991 * interrupts, and flush that PCI write 3054 * interrupts, and flush that PCI write
2992 */ 3055 */
2993 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 3056 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2994 0x00000000); 3057 tp->last_tag << 24);
2995 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); 3058 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2996 } 3059 }
2997 } else { /* shared interrupt */ 3060 } else { /* shared interrupt */
@@ -3020,7 +3083,7 @@ static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3020} 3083}
3021 3084
3022static int tg3_init_hw(struct tg3 *); 3085static int tg3_init_hw(struct tg3 *);
3023static int tg3_halt(struct tg3 *, int); 3086static int tg3_halt(struct tg3 *, int, int);
3024 3087
3025#ifdef CONFIG_NET_POLL_CONTROLLER 3088#ifdef CONFIG_NET_POLL_CONTROLLER
3026static void tg3_poll_controller(struct net_device *dev) 3089static void tg3_poll_controller(struct net_device *dev)
@@ -3044,7 +3107,7 @@ static void tg3_reset_task(void *_data)
3044 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER; 3107 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3045 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER; 3108 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3046 3109
3047 tg3_halt(tp, 0); 3110 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3048 tg3_init_hw(tp); 3111 tg3_init_hw(tp);
3049 3112
3050 tg3_netif_start(tp); 3113 tg3_netif_start(tp);
@@ -3390,7 +3453,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3390 spin_lock_irq(&tp->lock); 3453 spin_lock_irq(&tp->lock);
3391 spin_lock(&tp->tx_lock); 3454 spin_lock(&tp->tx_lock);
3392 3455
3393 tg3_halt(tp, 1); 3456 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3394 3457
3395 tg3_set_mtu(dev, tp, new_mtu); 3458 tg3_set_mtu(dev, tp, new_mtu);
3396 3459
@@ -4081,19 +4144,19 @@ static void tg3_stop_fw(struct tg3 *tp)
4081} 4144}
4082 4145
4083/* tp->lock is held. */ 4146/* tp->lock is held. */
4084static int tg3_halt(struct tg3 *tp, int silent) 4147static int tg3_halt(struct tg3 *tp, int kind, int silent)
4085{ 4148{
4086 int err; 4149 int err;
4087 4150
4088 tg3_stop_fw(tp); 4151 tg3_stop_fw(tp);
4089 4152
4090 tg3_write_sig_pre_reset(tp, RESET_KIND_SHUTDOWN); 4153 tg3_write_sig_pre_reset(tp, kind);
4091 4154
4092 tg3_abort_hw(tp, silent); 4155 tg3_abort_hw(tp, silent);
4093 err = tg3_chip_reset(tp); 4156 err = tg3_chip_reset(tp);
4094 4157
4095 tg3_write_sig_legacy(tp, RESET_KIND_SHUTDOWN); 4158 tg3_write_sig_legacy(tp, kind);
4096 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN); 4159 tg3_write_sig_post_reset(tp, kind);
4097 4160
4098 if (err) 4161 if (err)
4099 return err; 4162 return err;
@@ -4307,7 +4370,12 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_b
4307 */ 4370 */
4308 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG; 4371 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
4309 4372
4373 /* It is possible that bootcode is still loading at this point.
4374 * Get the nvram lock first before halting the cpu.
4375 */
4376 tg3_nvram_lock(tp);
4310 err = tg3_halt_cpu(tp, cpu_base); 4377 err = tg3_halt_cpu(tp, cpu_base);
4378 tg3_nvram_unlock(tp);
4311 if (err) 4379 if (err)
4312 goto out; 4380 goto out;
4313 4381
@@ -5044,6 +5112,27 @@ static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5044} 5112}
5045 5113
5046static void __tg3_set_rx_mode(struct net_device *); 5114static void __tg3_set_rx_mode(struct net_device *);
5115static void tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5116{
5117 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5118 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5119 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5120 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5121 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5122 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5123 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5124 }
5125 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5126 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5127 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5128 u32 val = ec->stats_block_coalesce_usecs;
5129
5130 if (!netif_carrier_ok(tp->dev))
5131 val = 0;
5132
5133 tw32(HOSTCC_STAT_COAL_TICKS, val);
5134 }
5135}
5047 5136
5048/* tp->lock is held. */ 5137/* tp->lock is held. */
5049static int tg3_reset_hw(struct tg3 *tp) 5138static int tg3_reset_hw(struct tg3 *tp)
@@ -5366,16 +5455,7 @@ static int tg3_reset_hw(struct tg3 *tp)
5366 udelay(10); 5455 udelay(10);
5367 } 5456 }
5368 5457
5369 tw32(HOSTCC_RXCOL_TICKS, 0); 5458 tg3_set_coalesce(tp, &tp->coal);
5370 tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS);
5371 tw32(HOSTCC_RXMAX_FRAMES, 1);
5372 tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES);
5373 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5374 tw32(HOSTCC_RXCOAL_TICK_INT, 0);
5375 tw32(HOSTCC_TXCOAL_TICK_INT, 0);
5376 }
5377 tw32(HOSTCC_RXCOAL_MAXF_INT, 1);
5378 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
5379 5459
5380 /* set status block DMA address */ 5460 /* set status block DMA address */
5381 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 5461 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
@@ -5388,8 +5468,6 @@ static int tg3_reset_hw(struct tg3 *tp)
5388 * the tg3_periodic_fetch_stats call there, and 5468 * the tg3_periodic_fetch_stats call there, and
5389 * tg3_get_stats to see how this works for 5705/5750 chips. 5469 * tg3_get_stats to see how this works for 5705/5750 chips.
5390 */ 5470 */
5391 tw32(HOSTCC_STAT_COAL_TICKS,
5392 DEFAULT_STAT_COAL_TICKS);
5393 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 5471 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5394 ((u64) tp->stats_mapping >> 32)); 5472 ((u64) tp->stats_mapping >> 32));
5395 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, 5473 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
@@ -5445,7 +5523,8 @@ static int tg3_reset_hw(struct tg3 *tp)
5445 udelay(100); 5523 udelay(100);
5446 5524
5447 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0); 5525 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5448 tr32(MAILBOX_INTERRUPT_0); 5526 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
5527 tp->last_tag = 0;
5449 5528
5450 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 5529 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5451 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE); 5530 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
@@ -5723,31 +5802,33 @@ static void tg3_timer(unsigned long __opaque)
5723 spin_lock_irqsave(&tp->lock, flags); 5802 spin_lock_irqsave(&tp->lock, flags);
5724 spin_lock(&tp->tx_lock); 5803 spin_lock(&tp->tx_lock);
5725 5804
5726 /* All of this garbage is because when using non-tagged 5805 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
5727 * IRQ status the mailbox/status_block protocol the chip 5806 /* All of this garbage is because when using non-tagged
5728 * uses with the cpu is race prone. 5807 * IRQ status the mailbox/status_block protocol the chip
5729 */ 5808 * uses with the cpu is race prone.
5730 if (tp->hw_status->status & SD_STATUS_UPDATED) { 5809 */
5731 tw32(GRC_LOCAL_CTRL, 5810 if (tp->hw_status->status & SD_STATUS_UPDATED) {
5732 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); 5811 tw32(GRC_LOCAL_CTRL,
5733 } else { 5812 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
5734 tw32(HOSTCC_MODE, tp->coalesce_mode | 5813 } else {
5735 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW)); 5814 tw32(HOSTCC_MODE, tp->coalesce_mode |
5736 } 5815 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
5816 }
5737 5817
5738 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 5818 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
5739 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER; 5819 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
5740 spin_unlock(&tp->tx_lock); 5820 spin_unlock(&tp->tx_lock);
5741 spin_unlock_irqrestore(&tp->lock, flags); 5821 spin_unlock_irqrestore(&tp->lock, flags);
5742 schedule_work(&tp->reset_task); 5822 schedule_work(&tp->reset_task);
5743 return; 5823 return;
5824 }
5744 } 5825 }
5745 5826
5746 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5747 tg3_periodic_fetch_stats(tp);
5748
5749 /* This part only runs once per second. */ 5827 /* This part only runs once per second. */
5750 if (!--tp->timer_counter) { 5828 if (!--tp->timer_counter) {
5829 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5830 tg3_periodic_fetch_stats(tp);
5831
5751 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) { 5832 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
5752 u32 mac_stat; 5833 u32 mac_stat;
5753 int phy_event; 5834 int phy_event;
@@ -5818,6 +5899,9 @@ static int tg3_test_interrupt(struct tg3 *tp)
5818 int err, i; 5899 int err, i;
5819 u32 int_mbox = 0; 5900 u32 int_mbox = 0;
5820 5901
5902 if (!netif_running(dev))
5903 return -ENODEV;
5904
5821 tg3_disable_ints(tp); 5905 tg3_disable_ints(tp);
5822 5906
5823 free_irq(tp->pdev->irq, dev); 5907 free_irq(tp->pdev->irq, dev);
@@ -5846,9 +5930,13 @@ static int tg3_test_interrupt(struct tg3 *tp)
5846 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) 5930 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
5847 err = request_irq(tp->pdev->irq, tg3_msi, 5931 err = request_irq(tp->pdev->irq, tg3_msi,
5848 SA_SAMPLE_RANDOM, dev->name, dev); 5932 SA_SAMPLE_RANDOM, dev->name, dev);
5849 else 5933 else {
5850 err = request_irq(tp->pdev->irq, tg3_interrupt, 5934 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
5935 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
5936 fn = tg3_interrupt_tagged;
5937 err = request_irq(tp->pdev->irq, fn,
5851 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev); 5938 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
5939 }
5852 5940
5853 if (err) 5941 if (err)
5854 return err; 5942 return err;
@@ -5900,9 +5988,14 @@ static int tg3_test_msi(struct tg3 *tp)
5900 5988
5901 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; 5989 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
5902 5990
5903 err = request_irq(tp->pdev->irq, tg3_interrupt, 5991 {
5904 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev); 5992 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
5993 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
5994 fn = tg3_interrupt_tagged;
5905 5995
5996 err = request_irq(tp->pdev->irq, fn,
5997 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
5998 }
5906 if (err) 5999 if (err)
5907 return err; 6000 return err;
5908 6001
@@ -5912,7 +6005,7 @@ static int tg3_test_msi(struct tg3 *tp)
5912 spin_lock_irq(&tp->lock); 6005 spin_lock_irq(&tp->lock);
5913 spin_lock(&tp->tx_lock); 6006 spin_lock(&tp->tx_lock);
5914 6007
5915 tg3_halt(tp, 1); 6008 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5916 err = tg3_init_hw(tp); 6009 err = tg3_init_hw(tp);
5917 6010
5918 spin_unlock(&tp->tx_lock); 6011 spin_unlock(&tp->tx_lock);
@@ -5948,7 +6041,13 @@ static int tg3_open(struct net_device *dev)
5948 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && 6041 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5949 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) && 6042 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
5950 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) { 6043 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) {
5951 if (pci_enable_msi(tp->pdev) == 0) { 6044 /* All MSI supporting chips should support tagged
6045 * status. Assert that this is the case.
6046 */
6047 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6048 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6049 "Not using MSI.\n", tp->dev->name);
6050 } else if (pci_enable_msi(tp->pdev) == 0) {
5952 u32 msi_mode; 6051 u32 msi_mode;
5953 6052
5954 msi_mode = tr32(MSGINT_MODE); 6053 msi_mode = tr32(MSGINT_MODE);
@@ -5959,9 +6058,14 @@ static int tg3_open(struct net_device *dev)
5959 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) 6058 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
5960 err = request_irq(tp->pdev->irq, tg3_msi, 6059 err = request_irq(tp->pdev->irq, tg3_msi,
5961 SA_SAMPLE_RANDOM, dev->name, dev); 6060 SA_SAMPLE_RANDOM, dev->name, dev);
5962 else 6061 else {
5963 err = request_irq(tp->pdev->irq, tg3_interrupt, 6062 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6063 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6064 fn = tg3_interrupt_tagged;
6065
6066 err = request_irq(tp->pdev->irq, fn,
5964 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev); 6067 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6068 }
5965 6069
5966 if (err) { 6070 if (err) {
5967 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { 6071 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
@@ -5977,12 +6081,19 @@ static int tg3_open(struct net_device *dev)
5977 6081
5978 err = tg3_init_hw(tp); 6082 err = tg3_init_hw(tp);
5979 if (err) { 6083 if (err) {
5980 tg3_halt(tp, 1); 6084 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5981 tg3_free_rings(tp); 6085 tg3_free_rings(tp);
5982 } else { 6086 } else {
5983 tp->timer_offset = HZ / 10; 6087 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
5984 tp->timer_counter = tp->timer_multiplier = 10; 6088 tp->timer_offset = HZ;
5985 tp->asf_counter = tp->asf_multiplier = (10 * 120); 6089 else
6090 tp->timer_offset = HZ / 10;
6091
6092 BUG_ON(tp->timer_offset > HZ);
6093 tp->timer_counter = tp->timer_multiplier =
6094 (HZ / tp->timer_offset);
6095 tp->asf_counter = tp->asf_multiplier =
6096 ((HZ / tp->timer_offset) * 120);
5986 6097
5987 init_timer(&tp->timer); 6098 init_timer(&tp->timer);
5988 tp->timer.expires = jiffies + tp->timer_offset; 6099 tp->timer.expires = jiffies + tp->timer_offset;
@@ -6005,6 +6116,7 @@ static int tg3_open(struct net_device *dev)
6005 6116
6006 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { 6117 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6007 err = tg3_test_msi(tp); 6118 err = tg3_test_msi(tp);
6119
6008 if (err) { 6120 if (err) {
6009 spin_lock_irq(&tp->lock); 6121 spin_lock_irq(&tp->lock);
6010 spin_lock(&tp->tx_lock); 6122 spin_lock(&tp->tx_lock);
@@ -6013,7 +6125,7 @@ static int tg3_open(struct net_device *dev)
6013 pci_disable_msi(tp->pdev); 6125 pci_disable_msi(tp->pdev);
6014 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; 6126 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6015 } 6127 }
6016 tg3_halt(tp, 1); 6128 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6017 tg3_free_rings(tp); 6129 tg3_free_rings(tp);
6018 tg3_free_consistent(tp); 6130 tg3_free_consistent(tp);
6019 6131
@@ -6286,7 +6398,7 @@ static int tg3_close(struct net_device *dev)
6286 6398
6287 tg3_disable_ints(tp); 6399 tg3_disable_ints(tp);
6288 6400
6289 tg3_halt(tp, 1); 6401 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6290 tg3_free_rings(tp); 6402 tg3_free_rings(tp);
6291 tp->tg3_flags &= 6403 tp->tg3_flags &=
6292 ~(TG3_FLAG_INIT_COMPLETE | 6404 ~(TG3_FLAG_INIT_COMPLETE |
@@ -7006,7 +7118,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
7006 tp->tx_pending = ering->tx_pending; 7118 tp->tx_pending = ering->tx_pending;
7007 7119
7008 if (netif_running(dev)) { 7120 if (netif_running(dev)) {
7009 tg3_halt(tp, 1); 7121 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7010 tg3_init_hw(tp); 7122 tg3_init_hw(tp);
7011 tg3_netif_start(tp); 7123 tg3_netif_start(tp);
7012 } 7124 }
@@ -7049,7 +7161,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
7049 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE; 7161 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7050 7162
7051 if (netif_running(dev)) { 7163 if (netif_running(dev)) {
7052 tg3_halt(tp, 1); 7164 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7053 tg3_init_hw(tp); 7165 tg3_init_hw(tp);
7054 tg3_netif_start(tp); 7166 tg3_netif_start(tp);
7055 } 7167 }
@@ -7108,12 +7220,20 @@ static int tg3_get_stats_count (struct net_device *dev)
7108 return TG3_NUM_STATS; 7220 return TG3_NUM_STATS;
7109} 7221}
7110 7222
7223static int tg3_get_test_count (struct net_device *dev)
7224{
7225 return TG3_NUM_TEST;
7226}
7227
7111static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf) 7228static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7112{ 7229{
7113 switch (stringset) { 7230 switch (stringset) {
7114 case ETH_SS_STATS: 7231 case ETH_SS_STATS:
7115 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys)); 7232 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
7116 break; 7233 break;
7234 case ETH_SS_TEST:
7235 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
7236 break;
7117 default: 7237 default:
7118 WARN_ON(1); /* we need a WARN() */ 7238 WARN_ON(1); /* we need a WARN() */
7119 break; 7239 break;
@@ -7127,6 +7247,516 @@ static void tg3_get_ethtool_stats (struct net_device *dev,
7127 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats)); 7247 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
7128} 7248}
7129 7249
7250#define NVRAM_TEST_SIZE 0x100
7251
7252static int tg3_test_nvram(struct tg3 *tp)
7253{
7254 u32 *buf, csum;
7255 int i, j, err = 0;
7256
7257 buf = kmalloc(NVRAM_TEST_SIZE, GFP_KERNEL);
7258 if (buf == NULL)
7259 return -ENOMEM;
7260
7261 for (i = 0, j = 0; i < NVRAM_TEST_SIZE; i += 4, j++) {
7262 u32 val;
7263
7264 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
7265 break;
7266 buf[j] = cpu_to_le32(val);
7267 }
7268 if (i < NVRAM_TEST_SIZE)
7269 goto out;
7270
7271 err = -EIO;
7272 if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC)
7273 goto out;
7274
7275 /* Bootstrap checksum at offset 0x10 */
7276 csum = calc_crc((unsigned char *) buf, 0x10);
7277 if(csum != cpu_to_le32(buf[0x10/4]))
7278 goto out;
7279
7280 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
7281 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
7282 if (csum != cpu_to_le32(buf[0xfc/4]))
7283 goto out;
7284
7285 err = 0;
7286
7287out:
7288 kfree(buf);
7289 return err;
7290}
7291
7292#define TG3_SERDES_TIMEOUT_SEC 2
7293#define TG3_COPPER_TIMEOUT_SEC 6
7294
7295static int tg3_test_link(struct tg3 *tp)
7296{
7297 int i, max;
7298
7299 if (!netif_running(tp->dev))
7300 return -ENODEV;
7301
7302 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7303 max = TG3_SERDES_TIMEOUT_SEC;
7304 else
7305 max = TG3_COPPER_TIMEOUT_SEC;
7306
7307 for (i = 0; i < max; i++) {
7308 if (netif_carrier_ok(tp->dev))
7309 return 0;
7310
7311 if (msleep_interruptible(1000))
7312 break;
7313 }
7314
7315 return -EIO;
7316}
7317
7318/* Only test the commonly used registers */
7319static int tg3_test_registers(struct tg3 *tp)
7320{
7321 int i, is_5705;
7322 u32 offset, read_mask, write_mask, val, save_val, read_val;
7323 static struct {
7324 u16 offset;
7325 u16 flags;
7326#define TG3_FL_5705 0x1
7327#define TG3_FL_NOT_5705 0x2
7328#define TG3_FL_NOT_5788 0x4
7329 u32 read_mask;
7330 u32 write_mask;
7331 } reg_tbl[] = {
7332 /* MAC Control Registers */
7333 { MAC_MODE, TG3_FL_NOT_5705,
7334 0x00000000, 0x00ef6f8c },
7335 { MAC_MODE, TG3_FL_5705,
7336 0x00000000, 0x01ef6b8c },
7337 { MAC_STATUS, TG3_FL_NOT_5705,
7338 0x03800107, 0x00000000 },
7339 { MAC_STATUS, TG3_FL_5705,
7340 0x03800100, 0x00000000 },
7341 { MAC_ADDR_0_HIGH, 0x0000,
7342 0x00000000, 0x0000ffff },
7343 { MAC_ADDR_0_LOW, 0x0000,
7344 0x00000000, 0xffffffff },
7345 { MAC_RX_MTU_SIZE, 0x0000,
7346 0x00000000, 0x0000ffff },
7347 { MAC_TX_MODE, 0x0000,
7348 0x00000000, 0x00000070 },
7349 { MAC_TX_LENGTHS, 0x0000,
7350 0x00000000, 0x00003fff },
7351 { MAC_RX_MODE, TG3_FL_NOT_5705,
7352 0x00000000, 0x000007fc },
7353 { MAC_RX_MODE, TG3_FL_5705,
7354 0x00000000, 0x000007dc },
7355 { MAC_HASH_REG_0, 0x0000,
7356 0x00000000, 0xffffffff },
7357 { MAC_HASH_REG_1, 0x0000,
7358 0x00000000, 0xffffffff },
7359 { MAC_HASH_REG_2, 0x0000,
7360 0x00000000, 0xffffffff },
7361 { MAC_HASH_REG_3, 0x0000,
7362 0x00000000, 0xffffffff },
7363
7364 /* Receive Data and Receive BD Initiator Control Registers. */
7365 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
7366 0x00000000, 0xffffffff },
7367 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
7368 0x00000000, 0xffffffff },
7369 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
7370 0x00000000, 0x00000003 },
7371 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
7372 0x00000000, 0xffffffff },
7373 { RCVDBDI_STD_BD+0, 0x0000,
7374 0x00000000, 0xffffffff },
7375 { RCVDBDI_STD_BD+4, 0x0000,
7376 0x00000000, 0xffffffff },
7377 { RCVDBDI_STD_BD+8, 0x0000,
7378 0x00000000, 0xffff0002 },
7379 { RCVDBDI_STD_BD+0xc, 0x0000,
7380 0x00000000, 0xffffffff },
7381
7382 /* Receive BD Initiator Control Registers. */
7383 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
7384 0x00000000, 0xffffffff },
7385 { RCVBDI_STD_THRESH, TG3_FL_5705,
7386 0x00000000, 0x000003ff },
7387 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
7388 0x00000000, 0xffffffff },
7389
7390 /* Host Coalescing Control Registers. */
7391 { HOSTCC_MODE, TG3_FL_NOT_5705,
7392 0x00000000, 0x00000004 },
7393 { HOSTCC_MODE, TG3_FL_5705,
7394 0x00000000, 0x000000f6 },
7395 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
7396 0x00000000, 0xffffffff },
7397 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
7398 0x00000000, 0x000003ff },
7399 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
7400 0x00000000, 0xffffffff },
7401 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
7402 0x00000000, 0x000003ff },
7403 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
7404 0x00000000, 0xffffffff },
7405 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7406 0x00000000, 0x000000ff },
7407 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
7408 0x00000000, 0xffffffff },
7409 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7410 0x00000000, 0x000000ff },
7411 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
7412 0x00000000, 0xffffffff },
7413 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
7414 0x00000000, 0xffffffff },
7415 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7416 0x00000000, 0xffffffff },
7417 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7418 0x00000000, 0x000000ff },
7419 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7420 0x00000000, 0xffffffff },
7421 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7422 0x00000000, 0x000000ff },
7423 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
7424 0x00000000, 0xffffffff },
7425 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
7426 0x00000000, 0xffffffff },
7427 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
7428 0x00000000, 0xffffffff },
7429 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
7430 0x00000000, 0xffffffff },
7431 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
7432 0x00000000, 0xffffffff },
7433 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
7434 0xffffffff, 0x00000000 },
7435 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
7436 0xffffffff, 0x00000000 },
7437
7438 /* Buffer Manager Control Registers. */
7439 { BUFMGR_MB_POOL_ADDR, 0x0000,
7440 0x00000000, 0x007fff80 },
7441 { BUFMGR_MB_POOL_SIZE, 0x0000,
7442 0x00000000, 0x007fffff },
7443 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
7444 0x00000000, 0x0000003f },
7445 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
7446 0x00000000, 0x000001ff },
7447 { BUFMGR_MB_HIGH_WATER, 0x0000,
7448 0x00000000, 0x000001ff },
7449 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
7450 0xffffffff, 0x00000000 },
7451 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
7452 0xffffffff, 0x00000000 },
7453
7454 /* Mailbox Registers */
7455 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
7456 0x00000000, 0x000001ff },
7457 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
7458 0x00000000, 0x000001ff },
7459 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
7460 0x00000000, 0x000007ff },
7461 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
7462 0x00000000, 0x000001ff },
7463
7464 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
7465 };
7466
7467 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7468 is_5705 = 1;
7469 else
7470 is_5705 = 0;
7471
7472 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
7473 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
7474 continue;
7475
7476 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
7477 continue;
7478
7479 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7480 (reg_tbl[i].flags & TG3_FL_NOT_5788))
7481 continue;
7482
7483 offset = (u32) reg_tbl[i].offset;
7484 read_mask = reg_tbl[i].read_mask;
7485 write_mask = reg_tbl[i].write_mask;
7486
7487 /* Save the original register content */
7488 save_val = tr32(offset);
7489
7490 /* Determine the read-only value. */
7491 read_val = save_val & read_mask;
7492
7493 /* Write zero to the register, then make sure the read-only bits
7494 * are not changed and the read/write bits are all zeros.
7495 */
7496 tw32(offset, 0);
7497
7498 val = tr32(offset);
7499
7500 /* Test the read-only and read/write bits. */
7501 if (((val & read_mask) != read_val) || (val & write_mask))
7502 goto out;
7503
7504 /* Write ones to all the bits defined by RdMask and WrMask, then
7505 * make sure the read-only bits are not changed and the
7506 * read/write bits are all ones.
7507 */
7508 tw32(offset, read_mask | write_mask);
7509
7510 val = tr32(offset);
7511
7512 /* Test the read-only bits. */
7513 if ((val & read_mask) != read_val)
7514 goto out;
7515
7516 /* Test the read/write bits. */
7517 if ((val & write_mask) != write_mask)
7518 goto out;
7519
7520 tw32(offset, save_val);
7521 }
7522
7523 return 0;
7524
7525out:
7526 printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
7527 tw32(offset, save_val);
7528 return -EIO;
7529}
7530
7531static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
7532{
7533 static u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
7534 int i;
7535 u32 j;
7536
7537 for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
7538 for (j = 0; j < len; j += 4) {
7539 u32 val;
7540
7541 tg3_write_mem(tp, offset + j, test_pattern[i]);
7542 tg3_read_mem(tp, offset + j, &val);
7543 if (val != test_pattern[i])
7544 return -EIO;
7545 }
7546 }
7547 return 0;
7548}
7549
7550static int tg3_test_memory(struct tg3 *tp)
7551{
7552 static struct mem_entry {
7553 u32 offset;
7554 u32 len;
7555 } mem_tbl_570x[] = {
7556 { 0x00000000, 0x01000},
7557 { 0x00002000, 0x1c000},
7558 { 0xffffffff, 0x00000}
7559 }, mem_tbl_5705[] = {
7560 { 0x00000100, 0x0000c},
7561 { 0x00000200, 0x00008},
7562 { 0x00000b50, 0x00400},
7563 { 0x00004000, 0x00800},
7564 { 0x00006000, 0x01000},
7565 { 0x00008000, 0x02000},
7566 { 0x00010000, 0x0e000},
7567 { 0xffffffff, 0x00000}
7568 };
7569 struct mem_entry *mem_tbl;
7570 int err = 0;
7571 int i;
7572
7573 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7574 mem_tbl = mem_tbl_5705;
7575 else
7576 mem_tbl = mem_tbl_570x;
7577
7578 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
7579 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
7580 mem_tbl[i].len)) != 0)
7581 break;
7582 }
7583
7584 return err;
7585}
7586
7587static int tg3_test_loopback(struct tg3 *tp)
7588{
7589 u32 mac_mode, send_idx, rx_start_idx, rx_idx, tx_idx, opaque_key;
7590 u32 desc_idx;
7591 struct sk_buff *skb, *rx_skb;
7592 u8 *tx_data;
7593 dma_addr_t map;
7594 int num_pkts, tx_len, rx_len, i, err;
7595 struct tg3_rx_buffer_desc *desc;
7596
7597 if (!netif_running(tp->dev))
7598 return -ENODEV;
7599
7600 err = -EIO;
7601
7602 tg3_abort_hw(tp, 1);
7603
7604 /* Clearing this flag to keep interrupts disabled */
7605 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7606 tg3_reset_hw(tp);
7607
7608 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
7609 MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
7610 MAC_MODE_PORT_MODE_GMII;
7611 tw32(MAC_MODE, mac_mode);
7612
7613 tx_len = 1514;
7614 skb = dev_alloc_skb(tx_len);
7615 tx_data = skb_put(skb, tx_len);
7616 memcpy(tx_data, tp->dev->dev_addr, 6);
7617 memset(tx_data + 6, 0x0, 8);
7618
7619 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
7620
7621 for (i = 14; i < tx_len; i++)
7622 tx_data[i] = (u8) (i & 0xff);
7623
7624 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
7625
7626 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7627 HOSTCC_MODE_NOW);
7628
7629 udelay(10);
7630
7631 rx_start_idx = tp->hw_status->idx[0].rx_producer;
7632
7633 send_idx = 0;
7634 num_pkts = 0;
7635
7636 tg3_set_txd(tp, send_idx, map, tx_len, 0, 1);
7637
7638 send_idx++;
7639 num_pkts++;
7640
7641 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, send_idx);
7642 tr32(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
7643
7644 udelay(10);
7645
7646 for (i = 0; i < 10; i++) {
7647 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7648 HOSTCC_MODE_NOW);
7649
7650 udelay(10);
7651
7652 tx_idx = tp->hw_status->idx[0].tx_consumer;
7653 rx_idx = tp->hw_status->idx[0].rx_producer;
7654 if ((tx_idx == send_idx) &&
7655 (rx_idx == (rx_start_idx + num_pkts)))
7656 break;
7657 }
7658
7659 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
7660 dev_kfree_skb(skb);
7661
7662 if (tx_idx != send_idx)
7663 goto out;
7664
7665 if (rx_idx != rx_start_idx + num_pkts)
7666 goto out;
7667
7668 desc = &tp->rx_rcb[rx_start_idx];
7669 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
7670 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
7671 if (opaque_key != RXD_OPAQUE_RING_STD)
7672 goto out;
7673
7674 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
7675 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
7676 goto out;
7677
7678 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
7679 if (rx_len != tx_len)
7680 goto out;
7681
7682 rx_skb = tp->rx_std_buffers[desc_idx].skb;
7683
7684 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
7685 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
7686
7687 for (i = 14; i < tx_len; i++) {
7688 if (*(rx_skb->data + i) != (u8) (i & 0xff))
7689 goto out;
7690 }
7691 err = 0;
7692
7693 /* tg3_free_rings will unmap and free the rx_skb */
7694out:
7695 return err;
7696}
7697
7698static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
7699 u64 *data)
7700{
7701 struct tg3 *tp = netdev_priv(dev);
7702
7703 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
7704
7705 if (tg3_test_nvram(tp) != 0) {
7706 etest->flags |= ETH_TEST_FL_FAILED;
7707 data[0] = 1;
7708 }
7709 if (tg3_test_link(tp) != 0) {
7710 etest->flags |= ETH_TEST_FL_FAILED;
7711 data[1] = 1;
7712 }
7713 if (etest->flags & ETH_TEST_FL_OFFLINE) {
7714 if (netif_running(dev))
7715 tg3_netif_stop(tp);
7716
7717 spin_lock_irq(&tp->lock);
7718 spin_lock(&tp->tx_lock);
7719
7720 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
7721 tg3_nvram_lock(tp);
7722 tg3_halt_cpu(tp, RX_CPU_BASE);
7723 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7724 tg3_halt_cpu(tp, TX_CPU_BASE);
7725 tg3_nvram_unlock(tp);
7726
7727 if (tg3_test_registers(tp) != 0) {
7728 etest->flags |= ETH_TEST_FL_FAILED;
7729 data[2] = 1;
7730 }
7731 if (tg3_test_memory(tp) != 0) {
7732 etest->flags |= ETH_TEST_FL_FAILED;
7733 data[3] = 1;
7734 }
7735 if (tg3_test_loopback(tp) != 0) {
7736 etest->flags |= ETH_TEST_FL_FAILED;
7737 data[4] = 1;
7738 }
7739
7740 spin_unlock(&tp->tx_lock);
7741 spin_unlock_irq(&tp->lock);
7742 if (tg3_test_interrupt(tp) != 0) {
7743 etest->flags |= ETH_TEST_FL_FAILED;
7744 data[5] = 1;
7745 }
7746 spin_lock_irq(&tp->lock);
7747 spin_lock(&tp->tx_lock);
7748
7749 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7750 if (netif_running(dev)) {
7751 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7752 tg3_init_hw(tp);
7753 tg3_netif_start(tp);
7754 }
7755 spin_unlock(&tp->tx_lock);
7756 spin_unlock_irq(&tp->lock);
7757 }
7758}
7759
7130static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 7760static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7131{ 7761{
7132 struct mii_ioctl_data *data = if_mii(ifr); 7762 struct mii_ioctl_data *data = if_mii(ifr);
@@ -7203,6 +7833,14 @@ static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
7203} 7833}
7204#endif 7834#endif
7205 7835
7836static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
7837{
7838 struct tg3 *tp = netdev_priv(dev);
7839
7840 memcpy(ec, &tp->coal, sizeof(*ec));
7841 return 0;
7842}
7843
7206static struct ethtool_ops tg3_ethtool_ops = { 7844static struct ethtool_ops tg3_ethtool_ops = {
7207 .get_settings = tg3_get_settings, 7845 .get_settings = tg3_get_settings,
7208 .set_settings = tg3_set_settings, 7846 .set_settings = tg3_set_settings,
@@ -7232,9 +7870,12 @@ static struct ethtool_ops tg3_ethtool_ops = {
7232 .get_tso = ethtool_op_get_tso, 7870 .get_tso = ethtool_op_get_tso,
7233 .set_tso = tg3_set_tso, 7871 .set_tso = tg3_set_tso,
7234#endif 7872#endif
7873 .self_test_count = tg3_get_test_count,
7874 .self_test = tg3_self_test,
7235 .get_strings = tg3_get_strings, 7875 .get_strings = tg3_get_strings,
7236 .get_stats_count = tg3_get_stats_count, 7876 .get_stats_count = tg3_get_stats_count,
7237 .get_ethtool_stats = tg3_get_ethtool_stats, 7877 .get_ethtool_stats = tg3_get_ethtool_stats,
7878 .get_coalesce = tg3_get_coalesce,
7238}; 7879};
7239 7880
7240static void __devinit tg3_get_eeprom_size(struct tg3 *tp) 7881static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
@@ -8422,15 +9063,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
8422 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) 9063 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8423 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG; 9064 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
8424 9065
8425 /* Only 5701 and later support tagged irq status mode.
8426 * Also, 5788 chips cannot use tagged irq status.
8427 *
8428 * However, since we are using NAPI avoid tagged irq status
8429 * because the interrupt condition is more difficult to
8430 * fully clear in that mode.
8431 */
8432 tp->coalesce_mode = 0; 9066 tp->coalesce_mode = 0;
8433
8434 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX && 9067 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
8435 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX) 9068 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
8436 tp->coalesce_mode |= HOSTCC_MODE_32BYTE; 9069 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
@@ -8494,6 +9127,18 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
8494 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M)) 9127 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
8495 tp->tg3_flags2 |= TG3_FLG2_IS_5788; 9128 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
8496 9129
9130 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9131 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
9132 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
9133 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
9134 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
9135 HOSTCC_MODE_CLRTICK_TXBD);
9136
9137 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
9138 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9139 tp->misc_host_ctrl);
9140 }
9141
8497 /* these are limited to 10/100 only */ 9142 /* these are limited to 10/100 only */
8498 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 && 9143 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
8499 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) || 9144 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
@@ -8671,6 +9316,146 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
8671 return 0; 9316 return 0;
8672} 9317}
8673 9318
9319#define BOUNDARY_SINGLE_CACHELINE 1
9320#define BOUNDARY_MULTI_CACHELINE 2
9321
9322static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
9323{
9324 int cacheline_size;
9325 u8 byte;
9326 int goal;
9327
9328 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
9329 if (byte == 0)
9330 cacheline_size = 1024;
9331 else
9332 cacheline_size = (int) byte * 4;
9333
9334 /* On 5703 and later chips, the boundary bits have no
9335 * effect.
9336 */
9337 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9338 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
9339 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
9340 goto out;
9341
9342#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
9343 goal = BOUNDARY_MULTI_CACHELINE;
9344#else
9345#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
9346 goal = BOUNDARY_SINGLE_CACHELINE;
9347#else
9348 goal = 0;
9349#endif
9350#endif
9351
9352 if (!goal)
9353 goto out;
9354
9355 /* PCI controllers on most RISC systems tend to disconnect
9356 * when a device tries to burst across a cache-line boundary.
9357 * Therefore, letting tg3 do so just wastes PCI bandwidth.
9358 *
9359 * Unfortunately, for PCI-E there are only limited
9360 * write-side controls for this, and thus for reads
9361 * we will still get the disconnects. We'll also waste
9362 * these PCI cycles for both read and write for chips
9363 * other than 5700 and 5701 which do not implement the
9364 * boundary bits.
9365 */
9366 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
9367 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
9368 switch (cacheline_size) {
9369 case 16:
9370 case 32:
9371 case 64:
9372 case 128:
9373 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9374 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
9375 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
9376 } else {
9377 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
9378 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
9379 }
9380 break;
9381
9382 case 256:
9383 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
9384 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
9385 break;
9386
9387 default:
9388 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
9389 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
9390 break;
9391 };
9392 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
9393 switch (cacheline_size) {
9394 case 16:
9395 case 32:
9396 case 64:
9397 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9398 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
9399 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
9400 break;
9401 }
9402 /* fallthrough */
9403 case 128:
9404 default:
9405 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
9406 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
9407 break;
9408 };
9409 } else {
9410 switch (cacheline_size) {
9411 case 16:
9412 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9413 val |= (DMA_RWCTRL_READ_BNDRY_16 |
9414 DMA_RWCTRL_WRITE_BNDRY_16);
9415 break;
9416 }
9417 /* fallthrough */
9418 case 32:
9419 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9420 val |= (DMA_RWCTRL_READ_BNDRY_32 |
9421 DMA_RWCTRL_WRITE_BNDRY_32);
9422 break;
9423 }
9424 /* fallthrough */
9425 case 64:
9426 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9427 val |= (DMA_RWCTRL_READ_BNDRY_64 |
9428 DMA_RWCTRL_WRITE_BNDRY_64);
9429 break;
9430 }
9431 /* fallthrough */
9432 case 128:
9433 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9434 val |= (DMA_RWCTRL_READ_BNDRY_128 |
9435 DMA_RWCTRL_WRITE_BNDRY_128);
9436 break;
9437 }
9438 /* fallthrough */
9439 case 256:
9440 val |= (DMA_RWCTRL_READ_BNDRY_256 |
9441 DMA_RWCTRL_WRITE_BNDRY_256);
9442 break;
9443 case 512:
9444 val |= (DMA_RWCTRL_READ_BNDRY_512 |
9445 DMA_RWCTRL_WRITE_BNDRY_512);
9446 break;
9447 case 1024:
9448 default:
9449 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
9450 DMA_RWCTRL_WRITE_BNDRY_1024);
9451 break;
9452 };
9453 }
9454
9455out:
9456 return val;
9457}
9458
8674static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device) 9459static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
8675{ 9460{
8676 struct tg3_internal_buffer_desc test_desc; 9461 struct tg3_internal_buffer_desc test_desc;
@@ -8752,12 +9537,12 @@ static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dm
8752 return ret; 9537 return ret;
8753} 9538}
8754 9539
8755#define TEST_BUFFER_SIZE 0x400 9540#define TEST_BUFFER_SIZE 0x2000
8756 9541
8757static int __devinit tg3_test_dma(struct tg3 *tp) 9542static int __devinit tg3_test_dma(struct tg3 *tp)
8758{ 9543{
8759 dma_addr_t buf_dma; 9544 dma_addr_t buf_dma;
8760 u32 *buf; 9545 u32 *buf, saved_dma_rwctrl;
8761 int ret; 9546 int ret;
8762 9547
8763 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma); 9548 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
@@ -8769,46 +9554,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
8769 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) | 9554 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
8770 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT)); 9555 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
8771 9556
8772#ifndef CONFIG_X86 9557 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
8773 {
8774 u8 byte;
8775 int cacheline_size;
8776 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
8777
8778 if (byte == 0)
8779 cacheline_size = 1024;
8780 else
8781 cacheline_size = (int) byte * 4;
8782
8783 switch (cacheline_size) {
8784 case 16:
8785 case 32:
8786 case 64:
8787 case 128:
8788 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
8789 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
8790 tp->dma_rwctrl |=
8791 DMA_RWCTRL_WRITE_BNDRY_384_PCIX;
8792 break;
8793 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
8794 tp->dma_rwctrl &=
8795 ~(DMA_RWCTRL_PCI_WRITE_CMD);
8796 tp->dma_rwctrl |=
8797 DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
8798 break;
8799 }
8800 /* fallthrough */
8801 case 256:
8802 if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
8803 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8804 tp->dma_rwctrl |=
8805 DMA_RWCTRL_WRITE_BNDRY_256;
8806 else if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8807 tp->dma_rwctrl |=
8808 DMA_RWCTRL_WRITE_BNDRY_256_PCIX;
8809 };
8810 }
8811#endif
8812 9558
8813 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { 9559 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
8814 /* DMA read watermark not used on PCIE */ 9560 /* DMA read watermark not used on PCIE */
@@ -8827,7 +9573,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
8827 if (ccval == 0x6 || ccval == 0x7) 9573 if (ccval == 0x6 || ccval == 0x7)
8828 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; 9574 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
8829 9575
8830 /* Set bit 23 to renable PCIX hw bug fix */ 9576 /* Set bit 23 to enable PCIX hw bug fix */
8831 tp->dma_rwctrl |= 0x009f0000; 9577 tp->dma_rwctrl |= 0x009f0000;
8832 } else { 9578 } else {
8833 tp->dma_rwctrl |= 0x001b000f; 9579 tp->dma_rwctrl |= 0x001b000f;
@@ -8868,6 +9614,13 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
8868 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) 9614 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
8869 goto out; 9615 goto out;
8870 9616
9617 /* It is best to perform DMA test with maximum write burst size
9618 * to expose the 5700/5701 write DMA bug.
9619 */
9620 saved_dma_rwctrl = tp->dma_rwctrl;
9621 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
9622 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9623
8871 while (1) { 9624 while (1) {
8872 u32 *p = buf, i; 9625 u32 *p = buf, i;
8873 9626
@@ -8906,8 +9659,9 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
8906 if (p[i] == i) 9659 if (p[i] == i)
8907 continue; 9660 continue;
8908 9661
8909 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) == 9662 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
8910 DMA_RWCTRL_WRITE_BNDRY_DISAB) { 9663 DMA_RWCTRL_WRITE_BNDRY_16) {
9664 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
8911 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; 9665 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
8912 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 9666 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8913 break; 9667 break;
@@ -8924,6 +9678,14 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
8924 break; 9678 break;
8925 } 9679 }
8926 } 9680 }
9681 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
9682 DMA_RWCTRL_WRITE_BNDRY_16) {
9683 /* DMA test passed without adjusting DMA boundary,
9684 * just restore the calculated DMA boundary
9685 */
9686 tp->dma_rwctrl = saved_dma_rwctrl;
9687 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9688 }
8927 9689
8928out: 9690out:
8929 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma); 9691 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
@@ -9011,6 +9773,31 @@ static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
9011 return peer; 9773 return peer;
9012} 9774}
9013 9775
9776static void __devinit tg3_init_coal(struct tg3 *tp)
9777{
9778 struct ethtool_coalesce *ec = &tp->coal;
9779
9780 memset(ec, 0, sizeof(*ec));
9781 ec->cmd = ETHTOOL_GCOALESCE;
9782 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
9783 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
9784 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
9785 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
9786 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
9787 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
9788 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
9789 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
9790 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
9791
9792 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
9793 HOSTCC_MODE_CLRTICK_TXBD)) {
9794 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
9795 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
9796 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
9797 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
9798 }
9799}
9800
9014static int __devinit tg3_init_one(struct pci_dev *pdev, 9801static int __devinit tg3_init_one(struct pci_dev *pdev,
9015 const struct pci_device_id *ent) 9802 const struct pci_device_id *ent)
9016{ 9803{
@@ -9232,7 +10019,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
9232 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 10019 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9233 pci_save_state(tp->pdev); 10020 pci_save_state(tp->pdev);
9234 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE); 10021 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
9235 tg3_halt(tp, 1); 10022 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9236 } 10023 }
9237 10024
9238 err = tg3_test_dma(tp); 10025 err = tg3_test_dma(tp);
@@ -9256,6 +10043,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
9256 /* flow control autonegotiation is default behavior */ 10043 /* flow control autonegotiation is default behavior */
9257 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; 10044 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9258 10045
10046 tg3_init_coal(tp);
10047
9259 err = register_netdev(dev); 10048 err = register_netdev(dev);
9260 if (err) { 10049 if (err) {
9261 printk(KERN_ERR PFX "Cannot register net device, " 10050 printk(KERN_ERR PFX "Cannot register net device, "
@@ -9298,6 +10087,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
9298 (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0, 10087 (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
9299 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0, 10088 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
9300 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0); 10089 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
10090 printk(KERN_INFO "%s: dma_rwctrl[%08x]\n",
10091 dev->name, tp->dma_rwctrl);
9301 10092
9302 return 0; 10093 return 0;
9303 10094
@@ -9355,7 +10146,7 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
9355 10146
9356 spin_lock_irq(&tp->lock); 10147 spin_lock_irq(&tp->lock);
9357 spin_lock(&tp->tx_lock); 10148 spin_lock(&tp->tx_lock);
9358 tg3_halt(tp, 1); 10149 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9359 spin_unlock(&tp->tx_lock); 10150 spin_unlock(&tp->tx_lock);
9360 spin_unlock_irq(&tp->lock); 10151 spin_unlock_irq(&tp->lock);
9361 10152