aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/tg3.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/tg3.c')
-rw-r--r--drivers/net/tg3.c226
1 files changed, 106 insertions, 120 deletions
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 49ad60b72657..b2ddd4522a87 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -69,8 +69,8 @@
69 69
70#define DRV_MODULE_NAME "tg3" 70#define DRV_MODULE_NAME "tg3"
71#define PFX DRV_MODULE_NAME ": " 71#define PFX DRV_MODULE_NAME ": "
72#define DRV_MODULE_VERSION "3.58" 72#define DRV_MODULE_VERSION "3.60"
73#define DRV_MODULE_RELDATE "May 22, 2006" 73#define DRV_MODULE_RELDATE "June 17, 2006"
74 74
75#define TG3_DEF_MAC_MODE 0 75#define TG3_DEF_MAC_MODE 0
76#define TG3_DEF_RX_MODE 0 76#define TG3_DEF_RX_MODE 0
@@ -229,6 +229,8 @@ static struct pci_device_id tg3_pci_tbl[] = {
229 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 229 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M, 230 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M,
231 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 231 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
232 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786,
233 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
232 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787, 234 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787,
233 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 235 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
234 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M, 236 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M,
@@ -2965,6 +2967,27 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2965 return err; 2967 return err;
2966} 2968}
2967 2969
2970/* This is called whenever we suspect that the system chipset is re-
2971 * ordering the sequence of MMIO to the tx send mailbox. The symptom
2972 * is bogus tx completions. We try to recover by setting the
2973 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
2974 * in the workqueue.
2975 */
2976static void tg3_tx_recover(struct tg3 *tp)
2977{
2978 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
2979 tp->write32_tx_mbox == tg3_write_indirect_mbox);
2980
2981 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
2982 "mapped I/O cycles to the network device, attempting to "
2983 "recover. Please report the problem to the driver maintainer "
2984 "and include system chipset information.\n", tp->dev->name);
2985
2986 spin_lock(&tp->lock);
2987 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
2988 spin_unlock(&tp->lock);
2989}
2990
2968/* Tigon3 never reports partial packet sends. So we do not 2991/* Tigon3 never reports partial packet sends. So we do not
2969 * need special logic to handle SKBs that have not had all 2992 * need special logic to handle SKBs that have not had all
2970 * of their frags sent yet, like SunGEM does. 2993 * of their frags sent yet, like SunGEM does.
@@ -2977,9 +3000,13 @@ static void tg3_tx(struct tg3 *tp)
2977 while (sw_idx != hw_idx) { 3000 while (sw_idx != hw_idx) {
2978 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx]; 3001 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2979 struct sk_buff *skb = ri->skb; 3002 struct sk_buff *skb = ri->skb;
2980 int i; 3003 int i, tx_bug = 0;
3004
3005 if (unlikely(skb == NULL)) {
3006 tg3_tx_recover(tp);
3007 return;
3008 }
2981 3009
2982 BUG_ON(skb == NULL);
2983 pci_unmap_single(tp->pdev, 3010 pci_unmap_single(tp->pdev,
2984 pci_unmap_addr(ri, mapping), 3011 pci_unmap_addr(ri, mapping),
2985 skb_headlen(skb), 3012 skb_headlen(skb),
@@ -2990,10 +3017,9 @@ static void tg3_tx(struct tg3 *tp)
2990 sw_idx = NEXT_TX(sw_idx); 3017 sw_idx = NEXT_TX(sw_idx);
2991 3018
2992 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3019 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2993 BUG_ON(sw_idx == hw_idx);
2994
2995 ri = &tp->tx_buffers[sw_idx]; 3020 ri = &tp->tx_buffers[sw_idx];
2996 BUG_ON(ri->skb != NULL); 3021 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3022 tx_bug = 1;
2997 3023
2998 pci_unmap_page(tp->pdev, 3024 pci_unmap_page(tp->pdev,
2999 pci_unmap_addr(ri, mapping), 3025 pci_unmap_addr(ri, mapping),
@@ -3004,6 +3030,11 @@ static void tg3_tx(struct tg3 *tp)
3004 } 3030 }
3005 3031
3006 dev_kfree_skb(skb); 3032 dev_kfree_skb(skb);
3033
3034 if (unlikely(tx_bug)) {
3035 tg3_tx_recover(tp);
3036 return;
3037 }
3007 } 3038 }
3008 3039
3009 tp->tx_cons = sw_idx; 3040 tp->tx_cons = sw_idx;
@@ -3331,6 +3362,11 @@ static int tg3_poll(struct net_device *netdev, int *budget)
3331 /* run TX completion thread */ 3362 /* run TX completion thread */
3332 if (sblk->idx[0].tx_consumer != tp->tx_cons) { 3363 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3333 tg3_tx(tp); 3364 tg3_tx(tp);
3365 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) {
3366 netif_rx_complete(netdev);
3367 schedule_work(&tp->reset_task);
3368 return 0;
3369 }
3334 } 3370 }
3335 3371
3336 /* run RX thread, within the bounds set by NAPI. 3372 /* run RX thread, within the bounds set by NAPI.
@@ -3391,12 +3427,10 @@ static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3391 if (irq_sync) 3427 if (irq_sync)
3392 tg3_irq_quiesce(tp); 3428 tg3_irq_quiesce(tp);
3393 spin_lock_bh(&tp->lock); 3429 spin_lock_bh(&tp->lock);
3394 spin_lock(&tp->tx_lock);
3395} 3430}
3396 3431
3397static inline void tg3_full_unlock(struct tg3 *tp) 3432static inline void tg3_full_unlock(struct tg3 *tp)
3398{ 3433{
3399 spin_unlock(&tp->tx_lock);
3400 spin_unlock_bh(&tp->lock); 3434 spin_unlock_bh(&tp->lock);
3401} 3435}
3402 3436
@@ -3579,6 +3613,13 @@ static void tg3_reset_task(void *_data)
3579 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER; 3613 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3580 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER; 3614 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3581 3615
3616 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3617 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3618 tp->write32_rx_mbox = tg3_write_flush_reg32;
3619 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3620 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3621 }
3622
3582 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); 3623 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3583 tg3_init_hw(tp, 1); 3624 tg3_init_hw(tp, 1);
3584 3625
@@ -3718,14 +3759,11 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3718 3759
3719 len = skb_headlen(skb); 3760 len = skb_headlen(skb);
3720 3761
3721 /* No BH disabling for tx_lock here. We are running in BH disabled 3762 /* We are running in BH disabled context with netif_tx_lock
3722 * context and TX reclaim runs via tp->poll inside of a software 3763 * and TX reclaim runs via tp->poll inside of a software
3723 * interrupt. Furthermore, IRQ processing runs lockless so we have 3764 * interrupt. Furthermore, IRQ processing runs lockless so we have
3724 * no IRQ context deadlocks to worry about either. Rejoice! 3765 * no IRQ context deadlocks to worry about either. Rejoice!
3725 */ 3766 */
3726 if (!spin_trylock(&tp->tx_lock))
3727 return NETDEV_TX_LOCKED;
3728
3729 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { 3767 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3730 if (!netif_queue_stopped(dev)) { 3768 if (!netif_queue_stopped(dev)) {
3731 netif_stop_queue(dev); 3769 netif_stop_queue(dev);
@@ -3734,7 +3772,6 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3734 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " 3772 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3735 "queue awake!\n", dev->name); 3773 "queue awake!\n", dev->name);
3736 } 3774 }
3737 spin_unlock(&tp->tx_lock);
3738 return NETDEV_TX_BUSY; 3775 return NETDEV_TX_BUSY;
3739 } 3776 }
3740 3777
@@ -3817,15 +3854,16 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3817 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); 3854 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3818 3855
3819 tp->tx_prod = entry; 3856 tp->tx_prod = entry;
3820 if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) { 3857 if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) {
3858 spin_lock(&tp->tx_lock);
3821 netif_stop_queue(dev); 3859 netif_stop_queue(dev);
3822 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH) 3860 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3823 netif_wake_queue(tp->dev); 3861 netif_wake_queue(tp->dev);
3862 spin_unlock(&tp->tx_lock);
3824 } 3863 }
3825 3864
3826out_unlock: 3865out_unlock:
3827 mmiowb(); 3866 mmiowb();
3828 spin_unlock(&tp->tx_lock);
3829 3867
3830 dev->trans_start = jiffies; 3868 dev->trans_start = jiffies;
3831 3869
@@ -3844,14 +3882,11 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
3844 3882
3845 len = skb_headlen(skb); 3883 len = skb_headlen(skb);
3846 3884
3847 /* No BH disabling for tx_lock here. We are running in BH disabled 3885 /* We are running in BH disabled context with netif_tx_lock
3848 * context and TX reclaim runs via tp->poll inside of a software 3886 * and TX reclaim runs via tp->poll inside of a software
3849 * interrupt. Furthermore, IRQ processing runs lockless so we have 3887 * interrupt. Furthermore, IRQ processing runs lockless so we have
3850 * no IRQ context deadlocks to worry about either. Rejoice! 3888 * no IRQ context deadlocks to worry about either. Rejoice!
3851 */ 3889 */
3852 if (!spin_trylock(&tp->tx_lock))
3853 return NETDEV_TX_LOCKED;
3854
3855 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { 3890 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3856 if (!netif_queue_stopped(dev)) { 3891 if (!netif_queue_stopped(dev)) {
3857 netif_stop_queue(dev); 3892 netif_stop_queue(dev);
@@ -3860,7 +3895,6 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
3860 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " 3895 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3861 "queue awake!\n", dev->name); 3896 "queue awake!\n", dev->name);
3862 } 3897 }
3863 spin_unlock(&tp->tx_lock);
3864 return NETDEV_TX_BUSY; 3898 return NETDEV_TX_BUSY;
3865 } 3899 }
3866 3900
@@ -3998,15 +4032,16 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
3998 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); 4032 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3999 4033
4000 tp->tx_prod = entry; 4034 tp->tx_prod = entry;
4001 if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) { 4035 if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) {
4036 spin_lock(&tp->tx_lock);
4002 netif_stop_queue(dev); 4037 netif_stop_queue(dev);
4003 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH) 4038 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
4004 netif_wake_queue(tp->dev); 4039 netif_wake_queue(tp->dev);
4040 spin_unlock(&tp->tx_lock);
4005 } 4041 }
4006 4042
4007out_unlock: 4043out_unlock:
4008 mmiowb(); 4044 mmiowb();
4009 spin_unlock(&tp->tx_lock);
4010 4045
4011 dev->trans_start = jiffies; 4046 dev->trans_start = jiffies;
4012 4047
@@ -4485,9 +4520,8 @@ static void tg3_disable_nvram_access(struct tg3 *tp)
4485/* tp->lock is held. */ 4520/* tp->lock is held. */
4486static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind) 4521static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4487{ 4522{
4488 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) 4523 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4489 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX, 4524 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4490 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4491 4525
4492 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) { 4526 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4493 switch (kind) { 4527 switch (kind) {
@@ -4568,13 +4602,12 @@ static int tg3_chip_reset(struct tg3 *tp)
4568 void (*write_op)(struct tg3 *, u32, u32); 4602 void (*write_op)(struct tg3 *, u32, u32);
4569 int i; 4603 int i;
4570 4604
4571 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) { 4605 tg3_nvram_lock(tp);
4572 tg3_nvram_lock(tp); 4606
4573 /* No matching tg3_nvram_unlock() after this because 4607 /* No matching tg3_nvram_unlock() after this because
4574 * chip reset below will undo the nvram lock. 4608 * chip reset below will undo the nvram lock.
4575 */ 4609 */
4576 tp->nvram_lock_cnt = 0; 4610 tp->nvram_lock_cnt = 0;
4577 }
4578 4611
4579 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || 4612 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
4580 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 4613 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
@@ -4727,20 +4760,25 @@ static int tg3_chip_reset(struct tg3 *tp)
4727 tw32_f(MAC_MODE, 0); 4760 tw32_f(MAC_MODE, 0);
4728 udelay(40); 4761 udelay(40);
4729 4762
4730 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) { 4763 /* Wait for firmware initialization to complete. */
4731 /* Wait for firmware initialization to complete. */ 4764 for (i = 0; i < 100000; i++) {
4732 for (i = 0; i < 100000; i++) { 4765 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4733 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val); 4766 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4734 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) 4767 break;
4735 break; 4768 udelay(10);
4736 udelay(10); 4769 }
4737 } 4770
4738 if (i >= 100000) { 4771 /* Chip might not be fitted with firmare. Some Sun onboard
4739 printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, " 4772 * parts are configured like that. So don't signal the timeout
4740 "firmware will not restart magic=%08x\n", 4773 * of the above loop as an error, but do report the lack of
4741 tp->dev->name, val); 4774 * running firmware once.
4742 return -ENODEV; 4775 */
4743 } 4776 if (i >= 100000 &&
4777 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
4778 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
4779
4780 printk(KERN_INFO PFX "%s: No firmware running.\n",
4781 tp->dev->name);
4744 } 4782 }
4745 4783
4746 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && 4784 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
@@ -9075,9 +9113,6 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
9075{ 9113{
9076 int j; 9114 int j;
9077 9115
9078 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
9079 return;
9080
9081 tw32_f(GRC_EEPROM_ADDR, 9116 tw32_f(GRC_EEPROM_ADDR,
9082 (EEPROM_ADDR_FSM_RESET | 9117 (EEPROM_ADDR_FSM_RESET |
9083 (EEPROM_DEFAULT_CLOCK_PERIOD << 9118 (EEPROM_DEFAULT_CLOCK_PERIOD <<
@@ -9210,11 +9245,6 @@ static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
9210{ 9245{
9211 int ret; 9246 int ret;
9212 9247
9213 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9214 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
9215 return -EINVAL;
9216 }
9217
9218 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) 9248 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
9219 return tg3_nvram_read_using_eeprom(tp, offset, val); 9249 return tg3_nvram_read_using_eeprom(tp, offset, val);
9220 9250
@@ -9447,11 +9477,6 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9447{ 9477{
9448 int ret; 9478 int ret;
9449 9479
9450 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9451 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
9452 return -EINVAL;
9453 }
9454
9455 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) { 9480 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9456 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl & 9481 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9457 ~GRC_LCLCTRL_GPIO_OUTPUT1); 9482 ~GRC_LCLCTRL_GPIO_OUTPUT1);
@@ -9578,15 +9603,19 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9578 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 9603 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9579 tp->misc_host_ctrl); 9604 tp->misc_host_ctrl);
9580 9605
9606 /* The memory arbiter has to be enabled in order for SRAM accesses
9607 * to succeed. Normally on powerup the tg3 chip firmware will make
9608 * sure it is enabled, but other entities such as system netboot
9609 * code might disable it.
9610 */
9611 val = tr32(MEMARB_MODE);
9612 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9613
9581 tp->phy_id = PHY_ID_INVALID; 9614 tp->phy_id = PHY_ID_INVALID;
9582 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 9615 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9583 9616
9584 /* Do not even try poking around in here on Sun parts. */ 9617 /* Assume an onboard device by default. */
9585 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) { 9618 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9586 /* All SUN chips are built-in LOMs. */
9587 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9588 return;
9589 }
9590 9619
9591 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); 9620 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9592 if (val == NIC_SRAM_DATA_SIG_MAGIC) { 9621 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
@@ -9686,6 +9715,8 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9686 9715
9687 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) 9716 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP)
9688 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT; 9717 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9718 else
9719 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
9689 9720
9690 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { 9721 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9691 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF; 9722 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
@@ -9834,16 +9865,8 @@ static void __devinit tg3_read_partno(struct tg3 *tp)
9834 int i; 9865 int i;
9835 u32 magic; 9866 u32 magic;
9836 9867
9837 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9838 /* Sun decided not to put the necessary bits in the
9839 * NVRAM of their onboard tg3 parts :(
9840 */
9841 strcpy(tp->board_part_number, "Sun 570X");
9842 return;
9843 }
9844
9845 if (tg3_nvram_read_swab(tp, 0x0, &magic)) 9868 if (tg3_nvram_read_swab(tp, 0x0, &magic))
9846 return; 9869 goto out_not_found;
9847 9870
9848 if (magic == TG3_EEPROM_MAGIC) { 9871 if (magic == TG3_EEPROM_MAGIC) {
9849 for (i = 0; i < 256; i += 4) { 9872 for (i = 0; i < 256; i += 4) {
@@ -9874,6 +9897,9 @@ static void __devinit tg3_read_partno(struct tg3 *tp)
9874 break; 9897 break;
9875 msleep(1); 9898 msleep(1);
9876 } 9899 }
9900 if (!(tmp16 & 0x8000))
9901 goto out_not_found;
9902
9877 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA, 9903 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
9878 &tmp); 9904 &tmp);
9879 tmp = cpu_to_le32(tmp); 9905 tmp = cpu_to_le32(tmp);
@@ -9965,37 +9991,6 @@ static void __devinit tg3_read_fw_ver(struct tg3 *tp)
9965 } 9991 }
9966} 9992}
9967 9993
9968#ifdef CONFIG_SPARC64
9969static int __devinit tg3_is_sun_570X(struct tg3 *tp)
9970{
9971 struct pci_dev *pdev = tp->pdev;
9972 struct pcidev_cookie *pcp = pdev->sysdata;
9973
9974 if (pcp != NULL) {
9975 int node = pcp->prom_node;
9976 u32 venid;
9977 int err;
9978
9979 err = prom_getproperty(node, "subsystem-vendor-id",
9980 (char *) &venid, sizeof(venid));
9981 if (err == 0 || err == -1)
9982 return 0;
9983 if (venid == PCI_VENDOR_ID_SUN)
9984 return 1;
9985
9986 /* TG3 chips onboard the SunBlade-2500 don't have the
9987 * subsystem-vendor-id set to PCI_VENDOR_ID_SUN but they
9988 * are distinguishable from non-Sun variants by being
9989 * named "network" by the firmware. Non-Sun cards will
9990 * show up as being named "ethernet".
9991 */
9992 if (!strcmp(pcp->prom_name, "network"))
9993 return 1;
9994 }
9995 return 0;
9996}
9997#endif
9998
9999static int __devinit tg3_get_invariants(struct tg3 *tp) 9994static int __devinit tg3_get_invariants(struct tg3 *tp)
10000{ 9995{
10001 static struct pci_device_id write_reorder_chipsets[] = { 9996 static struct pci_device_id write_reorder_chipsets[] = {
@@ -10012,11 +10007,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
10012 u16 pci_cmd; 10007 u16 pci_cmd;
10013 int err; 10008 int err;
10014 10009
10015#ifdef CONFIG_SPARC64
10016 if (tg3_is_sun_570X(tp))
10017 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
10018#endif
10019
10020 /* Force memory write invalidate off. If we leave it on, 10010 /* Force memory write invalidate off. If we leave it on,
10021 * then on 5700_BX chips we have to enable a workaround. 10011 * then on 5700_BX chips we have to enable a workaround.
10022 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary 10012 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
@@ -10312,8 +10302,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
10312 if (tp->write32 == tg3_write_indirect_reg32 || 10302 if (tp->write32 == tg3_write_indirect_reg32 ||
10313 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) && 10303 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10314 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || 10304 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10315 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) || 10305 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
10316 (tp->tg3_flags2 & TG3_FLG2_SUN_570X))
10317 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG; 10306 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
10318 10307
10319 /* Get eeprom hw config before calling tg3_set_power_state(). 10308 /* Get eeprom hw config before calling tg3_set_power_state().
@@ -10594,8 +10583,7 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
10594#endif 10583#endif
10595 10584
10596 mac_offset = 0x7c; 10585 mac_offset = 0x7c;
10597 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 && 10586 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
10598 !(tp->tg3_flags & TG3_FLG2_SUN_570X)) ||
10599 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { 10587 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10600 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) 10588 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
10601 mac_offset = 0xcc; 10589 mac_offset = 0xcc;
@@ -10622,8 +10610,7 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
10622 } 10610 }
10623 if (!addr_ok) { 10611 if (!addr_ok) {
10624 /* Next, try NVRAM. */ 10612 /* Next, try NVRAM. */
10625 if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) && 10613 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
10626 !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
10627 !tg3_nvram_read(tp, mac_offset + 4, &lo)) { 10614 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
10628 dev->dev_addr[0] = ((hi >> 16) & 0xff); 10615 dev->dev_addr[0] = ((hi >> 16) & 0xff);
10629 dev->dev_addr[1] = ((hi >> 24) & 0xff); 10616 dev->dev_addr[1] = ((hi >> 24) & 0xff);
@@ -11291,7 +11278,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
11291 SET_MODULE_OWNER(dev); 11278 SET_MODULE_OWNER(dev);
11292 SET_NETDEV_DEV(dev, &pdev->dev); 11279 SET_NETDEV_DEV(dev, &pdev->dev);
11293 11280
11294 dev->features |= NETIF_F_LLTX;
11295#if TG3_VLAN_TAG_USED 11281#if TG3_VLAN_TAG_USED
11296 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 11282 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
11297 dev->vlan_rx_register = tg3_vlan_rx_register; 11283 dev->vlan_rx_register = tg3_vlan_rx_register;