diff options
Diffstat (limited to 'drivers/net/tg3.c')
-rw-r--r-- | drivers/net/tg3.c | 2137 |
1 files changed, 1359 insertions, 778 deletions
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index ba5d3fe753b6..ecc41cffb470 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) | 4 | * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) |
5 | * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com) | 5 | * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com) |
6 | * Copyright (C) 2004 Sun Microsystems Inc. | 6 | * Copyright (C) 2004 Sun Microsystems Inc. |
7 | * Copyright (C) 2005-2009 Broadcom Corporation. | 7 | * Copyright (C) 2005-2010 Broadcom Corporation. |
8 | * | 8 | * |
9 | * Firmware is: | 9 | * Firmware is: |
10 | * Derived from proprietary unpublished source code, | 10 | * Derived from proprietary unpublished source code, |
@@ -67,9 +67,8 @@ | |||
67 | #include "tg3.h" | 67 | #include "tg3.h" |
68 | 68 | ||
69 | #define DRV_MODULE_NAME "tg3" | 69 | #define DRV_MODULE_NAME "tg3" |
70 | #define PFX DRV_MODULE_NAME ": " | 70 | #define DRV_MODULE_VERSION "3.108" |
71 | #define DRV_MODULE_VERSION "3.102" | 71 | #define DRV_MODULE_RELDATE "February 17, 2010" |
72 | #define DRV_MODULE_RELDATE "September 1, 2009" | ||
73 | 72 | ||
74 | #define TG3_DEF_MAC_MODE 0 | 73 | #define TG3_DEF_MAC_MODE 0 |
75 | #define TG3_DEF_RX_MODE 0 | 74 | #define TG3_DEF_RX_MODE 0 |
@@ -137,6 +136,12 @@ | |||
137 | #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ) | 136 | #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ) |
138 | #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ) | 137 | #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ) |
139 | 138 | ||
139 | #define TG3_RX_STD_BUFF_RING_SIZE \ | ||
140 | (sizeof(struct ring_info) * TG3_RX_RING_SIZE) | ||
141 | |||
142 | #define TG3_RX_JMB_BUFF_RING_SIZE \ | ||
143 | (sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE) | ||
144 | |||
140 | /* minimum number of free TX descriptors required to wake up TX process */ | 145 | /* minimum number of free TX descriptors required to wake up TX process */ |
141 | #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4) | 146 | #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4) |
142 | 147 | ||
@@ -152,7 +157,7 @@ | |||
152 | #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin" | 157 | #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin" |
153 | 158 | ||
154 | static char version[] __devinitdata = | 159 | static char version[] __devinitdata = |
155 | DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; | 160 | DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")"; |
156 | 161 | ||
157 | MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)"); | 162 | MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)"); |
158 | MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver"); | 163 | MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver"); |
@@ -168,7 +173,7 @@ static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */ | |||
168 | module_param(tg3_debug, int, 0); | 173 | module_param(tg3_debug, int, 0); |
169 | MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value"); | 174 | MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value"); |
170 | 175 | ||
171 | static struct pci_device_id tg3_pci_tbl[] = { | 176 | static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = { |
172 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)}, | 177 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)}, |
173 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)}, | 178 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)}, |
174 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)}, | 179 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)}, |
@@ -235,6 +240,15 @@ static struct pci_device_id tg3_pci_tbl[] = { | |||
235 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)}, | 240 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)}, |
236 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)}, | 241 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)}, |
237 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)}, | 242 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)}, |
243 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)}, | ||
244 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)}, | ||
245 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5724)}, | ||
246 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)}, | ||
247 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)}, | ||
248 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)}, | ||
249 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)}, | ||
250 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)}, | ||
251 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)}, | ||
238 | {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, | 252 | {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, |
239 | {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, | 253 | {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, |
240 | {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, | 254 | {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, |
@@ -396,7 +410,7 @@ static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val) | |||
396 | TG3_64BIT_REG_LOW, val); | 410 | TG3_64BIT_REG_LOW, val); |
397 | return; | 411 | return; |
398 | } | 412 | } |
399 | if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) { | 413 | if (off == TG3_RX_STD_PROD_IDX_REG) { |
400 | pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX + | 414 | pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX + |
401 | TG3_64BIT_REG_LOW, val); | 415 | TG3_64BIT_REG_LOW, val); |
402 | return; | 416 | return; |
@@ -627,7 +641,6 @@ static void tg3_disable_ints(struct tg3 *tp) | |||
627 | static void tg3_enable_ints(struct tg3 *tp) | 641 | static void tg3_enable_ints(struct tg3 *tp) |
628 | { | 642 | { |
629 | int i; | 643 | int i; |
630 | u32 coal_now = 0; | ||
631 | 644 | ||
632 | tp->irq_sync = 0; | 645 | tp->irq_sync = 0; |
633 | wmb(); | 646 | wmb(); |
@@ -635,13 +648,14 @@ static void tg3_enable_ints(struct tg3 *tp) | |||
635 | tw32(TG3PCI_MISC_HOST_CTRL, | 648 | tw32(TG3PCI_MISC_HOST_CTRL, |
636 | (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); | 649 | (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); |
637 | 650 | ||
651 | tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE; | ||
638 | for (i = 0; i < tp->irq_cnt; i++) { | 652 | for (i = 0; i < tp->irq_cnt; i++) { |
639 | struct tg3_napi *tnapi = &tp->napi[i]; | 653 | struct tg3_napi *tnapi = &tp->napi[i]; |
640 | tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); | 654 | tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); |
641 | if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) | 655 | if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) |
642 | tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); | 656 | tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); |
643 | 657 | ||
644 | coal_now |= tnapi->coal_now; | 658 | tp->coal_now |= tnapi->coal_now; |
645 | } | 659 | } |
646 | 660 | ||
647 | /* Force an initial interrupt */ | 661 | /* Force an initial interrupt */ |
@@ -649,8 +663,9 @@ static void tg3_enable_ints(struct tg3 *tp) | |||
649 | (tp->napi[0].hw_status->status & SD_STATUS_UPDATED)) | 663 | (tp->napi[0].hw_status->status & SD_STATUS_UPDATED)) |
650 | tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); | 664 | tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); |
651 | else | 665 | else |
652 | tw32(HOSTCC_MODE, tp->coalesce_mode | | 666 | tw32(HOSTCC_MODE, tp->coal_now); |
653 | HOSTCC_MODE_ENABLE | coal_now); | 667 | |
668 | tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now); | ||
654 | } | 669 | } |
655 | 670 | ||
656 | static inline unsigned int tg3_has_work(struct tg3_napi *tnapi) | 671 | static inline unsigned int tg3_has_work(struct tg3_napi *tnapi) |
@@ -937,18 +952,19 @@ static void tg3_mdio_config_5785(struct tg3 *tp) | |||
937 | u32 val; | 952 | u32 val; |
938 | struct phy_device *phydev; | 953 | struct phy_device *phydev; |
939 | 954 | ||
940 | phydev = tp->mdio_bus->phy_map[PHY_ADDR]; | 955 | phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; |
941 | switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { | 956 | switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { |
942 | case TG3_PHY_ID_BCM50610: | 957 | case PHY_ID_BCM50610: |
958 | case PHY_ID_BCM50610M: | ||
943 | val = MAC_PHYCFG2_50610_LED_MODES; | 959 | val = MAC_PHYCFG2_50610_LED_MODES; |
944 | break; | 960 | break; |
945 | case TG3_PHY_ID_BCMAC131: | 961 | case PHY_ID_BCMAC131: |
946 | val = MAC_PHYCFG2_AC131_LED_MODES; | 962 | val = MAC_PHYCFG2_AC131_LED_MODES; |
947 | break; | 963 | break; |
948 | case TG3_PHY_ID_RTL8211C: | 964 | case PHY_ID_RTL8211C: |
949 | val = MAC_PHYCFG2_RTL8211C_LED_MODES; | 965 | val = MAC_PHYCFG2_RTL8211C_LED_MODES; |
950 | break; | 966 | break; |
951 | case TG3_PHY_ID_RTL8201E: | 967 | case PHY_ID_RTL8201E: |
952 | val = MAC_PHYCFG2_RTL8201E_LED_MODES; | 968 | val = MAC_PHYCFG2_RTL8201E_LED_MODES; |
953 | break; | 969 | break; |
954 | default: | 970 | default: |
@@ -967,7 +983,7 @@ static void tg3_mdio_config_5785(struct tg3 *tp) | |||
967 | return; | 983 | return; |
968 | } | 984 | } |
969 | 985 | ||
970 | if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) | 986 | if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) |
971 | val |= MAC_PHYCFG2_EMODE_MASK_MASK | | 987 | val |= MAC_PHYCFG2_EMODE_MASK_MASK | |
972 | MAC_PHYCFG2_FMODE_MASK_MASK | | 988 | MAC_PHYCFG2_FMODE_MASK_MASK | |
973 | MAC_PHYCFG2_GMODE_MASK_MASK | | 989 | MAC_PHYCFG2_GMODE_MASK_MASK | |
@@ -980,7 +996,7 @@ static void tg3_mdio_config_5785(struct tg3 *tp) | |||
980 | val = tr32(MAC_PHYCFG1); | 996 | val = tr32(MAC_PHYCFG1); |
981 | val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK | | 997 | val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK | |
982 | MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN); | 998 | MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN); |
983 | if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) { | 999 | if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) { |
984 | if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN) | 1000 | if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN) |
985 | val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC; | 1001 | val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC; |
986 | if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN) | 1002 | if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN) |
@@ -998,7 +1014,7 @@ static void tg3_mdio_config_5785(struct tg3 *tp) | |||
998 | MAC_RGMII_MODE_TX_ENABLE | | 1014 | MAC_RGMII_MODE_TX_ENABLE | |
999 | MAC_RGMII_MODE_TX_LOWPWR | | 1015 | MAC_RGMII_MODE_TX_LOWPWR | |
1000 | MAC_RGMII_MODE_TX_RESET); | 1016 | MAC_RGMII_MODE_TX_RESET); |
1001 | if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) { | 1017 | if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) { |
1002 | if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN) | 1018 | if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN) |
1003 | val |= MAC_RGMII_MODE_RX_INT_B | | 1019 | val |= MAC_RGMII_MODE_RX_INT_B | |
1004 | MAC_RGMII_MODE_RX_QUALITY | | 1020 | MAC_RGMII_MODE_RX_QUALITY | |
@@ -1018,6 +1034,17 @@ static void tg3_mdio_start(struct tg3 *tp) | |||
1018 | tw32_f(MAC_MI_MODE, tp->mi_mode); | 1034 | tw32_f(MAC_MI_MODE, tp->mi_mode); |
1019 | udelay(80); | 1035 | udelay(80); |
1020 | 1036 | ||
1037 | if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) && | ||
1038 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) | ||
1039 | tg3_mdio_config_5785(tp); | ||
1040 | } | ||
1041 | |||
1042 | static int tg3_mdio_init(struct tg3 *tp) | ||
1043 | { | ||
1044 | int i; | ||
1045 | u32 reg; | ||
1046 | struct phy_device *phydev; | ||
1047 | |||
1021 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { | 1048 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { |
1022 | u32 funcnum, is_serdes; | 1049 | u32 funcnum, is_serdes; |
1023 | 1050 | ||
@@ -1027,22 +1054,15 @@ static void tg3_mdio_start(struct tg3 *tp) | |||
1027 | else | 1054 | else |
1028 | tp->phy_addr = 1; | 1055 | tp->phy_addr = 1; |
1029 | 1056 | ||
1030 | is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES; | 1057 | if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) |
1058 | is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES; | ||
1059 | else | ||
1060 | is_serdes = tr32(TG3_CPMU_PHY_STRAP) & | ||
1061 | TG3_CPMU_PHY_STRAP_IS_SERDES; | ||
1031 | if (is_serdes) | 1062 | if (is_serdes) |
1032 | tp->phy_addr += 7; | 1063 | tp->phy_addr += 7; |
1033 | } else | 1064 | } else |
1034 | tp->phy_addr = PHY_ADDR; | 1065 | tp->phy_addr = TG3_PHY_MII_ADDR; |
1035 | |||
1036 | if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) && | ||
1037 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) | ||
1038 | tg3_mdio_config_5785(tp); | ||
1039 | } | ||
1040 | |||
1041 | static int tg3_mdio_init(struct tg3 *tp) | ||
1042 | { | ||
1043 | int i; | ||
1044 | u32 reg; | ||
1045 | struct phy_device *phydev; | ||
1046 | 1066 | ||
1047 | tg3_mdio_start(tp); | 1067 | tg3_mdio_start(tp); |
1048 | 1068 | ||
@@ -1062,7 +1082,7 @@ static int tg3_mdio_init(struct tg3 *tp) | |||
1062 | tp->mdio_bus->read = &tg3_mdio_read; | 1082 | tp->mdio_bus->read = &tg3_mdio_read; |
1063 | tp->mdio_bus->write = &tg3_mdio_write; | 1083 | tp->mdio_bus->write = &tg3_mdio_write; |
1064 | tp->mdio_bus->reset = &tg3_mdio_reset; | 1084 | tp->mdio_bus->reset = &tg3_mdio_reset; |
1065 | tp->mdio_bus->phy_mask = ~(1 << PHY_ADDR); | 1085 | tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR); |
1066 | tp->mdio_bus->irq = &tp->mdio_irq[0]; | 1086 | tp->mdio_bus->irq = &tp->mdio_irq[0]; |
1067 | 1087 | ||
1068 | for (i = 0; i < PHY_MAX_ADDR; i++) | 1088 | for (i = 0; i < PHY_MAX_ADDR; i++) |
@@ -1078,39 +1098,45 @@ static int tg3_mdio_init(struct tg3 *tp) | |||
1078 | 1098 | ||
1079 | i = mdiobus_register(tp->mdio_bus); | 1099 | i = mdiobus_register(tp->mdio_bus); |
1080 | if (i) { | 1100 | if (i) { |
1081 | printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n", | 1101 | netdev_warn(tp->dev, "mdiobus_reg failed (0x%x)\n", i); |
1082 | tp->dev->name, i); | ||
1083 | mdiobus_free(tp->mdio_bus); | 1102 | mdiobus_free(tp->mdio_bus); |
1084 | return i; | 1103 | return i; |
1085 | } | 1104 | } |
1086 | 1105 | ||
1087 | phydev = tp->mdio_bus->phy_map[PHY_ADDR]; | 1106 | phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; |
1088 | 1107 | ||
1089 | if (!phydev || !phydev->drv) { | 1108 | if (!phydev || !phydev->drv) { |
1090 | printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name); | 1109 | netdev_warn(tp->dev, "No PHY devices\n"); |
1091 | mdiobus_unregister(tp->mdio_bus); | 1110 | mdiobus_unregister(tp->mdio_bus); |
1092 | mdiobus_free(tp->mdio_bus); | 1111 | mdiobus_free(tp->mdio_bus); |
1093 | return -ENODEV; | 1112 | return -ENODEV; |
1094 | } | 1113 | } |
1095 | 1114 | ||
1096 | switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { | 1115 | switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { |
1097 | case TG3_PHY_ID_BCM57780: | 1116 | case PHY_ID_BCM57780: |
1098 | phydev->interface = PHY_INTERFACE_MODE_GMII; | 1117 | phydev->interface = PHY_INTERFACE_MODE_GMII; |
1118 | phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; | ||
1099 | break; | 1119 | break; |
1100 | case TG3_PHY_ID_BCM50610: | 1120 | case PHY_ID_BCM50610: |
1101 | if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) | 1121 | case PHY_ID_BCM50610M: |
1122 | phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE | | ||
1123 | PHY_BRCM_RX_REFCLK_UNUSED | | ||
1124 | PHY_BRCM_DIS_TXCRXC_NOENRGY | | ||
1125 | PHY_BRCM_AUTO_PWRDWN_ENABLE; | ||
1126 | if (tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE) | ||
1102 | phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE; | 1127 | phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE; |
1103 | if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN) | 1128 | if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN) |
1104 | phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE; | 1129 | phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE; |
1105 | if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN) | 1130 | if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN) |
1106 | phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE; | 1131 | phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE; |
1107 | /* fallthru */ | 1132 | /* fallthru */ |
1108 | case TG3_PHY_ID_RTL8211C: | 1133 | case PHY_ID_RTL8211C: |
1109 | phydev->interface = PHY_INTERFACE_MODE_RGMII; | 1134 | phydev->interface = PHY_INTERFACE_MODE_RGMII; |
1110 | break; | 1135 | break; |
1111 | case TG3_PHY_ID_RTL8201E: | 1136 | case PHY_ID_RTL8201E: |
1112 | case TG3_PHY_ID_BCMAC131: | 1137 | case PHY_ID_BCMAC131: |
1113 | phydev->interface = PHY_INTERFACE_MODE_MII; | 1138 | phydev->interface = PHY_INTERFACE_MODE_MII; |
1139 | phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; | ||
1114 | tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET; | 1140 | tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET; |
1115 | break; | 1141 | break; |
1116 | } | 1142 | } |
@@ -1224,27 +1250,22 @@ static void tg3_ump_link_report(struct tg3 *tp) | |||
1224 | static void tg3_link_report(struct tg3 *tp) | 1250 | static void tg3_link_report(struct tg3 *tp) |
1225 | { | 1251 | { |
1226 | if (!netif_carrier_ok(tp->dev)) { | 1252 | if (!netif_carrier_ok(tp->dev)) { |
1227 | if (netif_msg_link(tp)) | 1253 | netif_info(tp, link, tp->dev, "Link is down\n"); |
1228 | printk(KERN_INFO PFX "%s: Link is down.\n", | ||
1229 | tp->dev->name); | ||
1230 | tg3_ump_link_report(tp); | 1254 | tg3_ump_link_report(tp); |
1231 | } else if (netif_msg_link(tp)) { | 1255 | } else if (netif_msg_link(tp)) { |
1232 | printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n", | 1256 | netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n", |
1233 | tp->dev->name, | 1257 | (tp->link_config.active_speed == SPEED_1000 ? |
1234 | (tp->link_config.active_speed == SPEED_1000 ? | 1258 | 1000 : |
1235 | 1000 : | 1259 | (tp->link_config.active_speed == SPEED_100 ? |
1236 | (tp->link_config.active_speed == SPEED_100 ? | 1260 | 100 : 10)), |
1237 | 100 : 10)), | 1261 | (tp->link_config.active_duplex == DUPLEX_FULL ? |
1238 | (tp->link_config.active_duplex == DUPLEX_FULL ? | 1262 | "full" : "half")); |
1239 | "full" : "half")); | 1263 | |
1240 | 1264 | netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n", | |
1241 | printk(KERN_INFO PFX | 1265 | (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ? |
1242 | "%s: Flow control is %s for TX and %s for RX.\n", | 1266 | "on" : "off", |
1243 | tp->dev->name, | 1267 | (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ? |
1244 | (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ? | 1268 | "on" : "off"); |
1245 | "on" : "off", | ||
1246 | (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ? | ||
1247 | "on" : "off"); | ||
1248 | tg3_ump_link_report(tp); | 1269 | tg3_ump_link_report(tp); |
1249 | } | 1270 | } |
1250 | } | 1271 | } |
@@ -1311,7 +1332,7 @@ static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv) | |||
1311 | u32 old_tx_mode = tp->tx_mode; | 1332 | u32 old_tx_mode = tp->tx_mode; |
1312 | 1333 | ||
1313 | if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) | 1334 | if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) |
1314 | autoneg = tp->mdio_bus->phy_map[PHY_ADDR]->autoneg; | 1335 | autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg; |
1315 | else | 1336 | else |
1316 | autoneg = tp->link_config.autoneg; | 1337 | autoneg = tp->link_config.autoneg; |
1317 | 1338 | ||
@@ -1348,7 +1369,7 @@ static void tg3_adjust_link(struct net_device *dev) | |||
1348 | u8 oldflowctrl, linkmesg = 0; | 1369 | u8 oldflowctrl, linkmesg = 0; |
1349 | u32 mac_mode, lcl_adv, rmt_adv; | 1370 | u32 mac_mode, lcl_adv, rmt_adv; |
1350 | struct tg3 *tp = netdev_priv(dev); | 1371 | struct tg3 *tp = netdev_priv(dev); |
1351 | struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR]; | 1372 | struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; |
1352 | 1373 | ||
1353 | spin_lock_bh(&tp->lock); | 1374 | spin_lock_bh(&tp->lock); |
1354 | 1375 | ||
@@ -1363,8 +1384,11 @@ static void tg3_adjust_link(struct net_device *dev) | |||
1363 | 1384 | ||
1364 | if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10) | 1385 | if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10) |
1365 | mac_mode |= MAC_MODE_PORT_MODE_MII; | 1386 | mac_mode |= MAC_MODE_PORT_MODE_MII; |
1366 | else | 1387 | else if (phydev->speed == SPEED_1000 || |
1388 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) | ||
1367 | mac_mode |= MAC_MODE_PORT_MODE_GMII; | 1389 | mac_mode |= MAC_MODE_PORT_MODE_GMII; |
1390 | else | ||
1391 | mac_mode |= MAC_MODE_PORT_MODE_MII; | ||
1368 | 1392 | ||
1369 | if (phydev->duplex == DUPLEX_HALF) | 1393 | if (phydev->duplex == DUPLEX_HALF) |
1370 | mac_mode |= MAC_MODE_HALF_DUPLEX; | 1394 | mac_mode |= MAC_MODE_HALF_DUPLEX; |
@@ -1434,13 +1458,13 @@ static int tg3_phy_init(struct tg3 *tp) | |||
1434 | /* Bring the PHY back to a known state. */ | 1458 | /* Bring the PHY back to a known state. */ |
1435 | tg3_bmcr_reset(tp); | 1459 | tg3_bmcr_reset(tp); |
1436 | 1460 | ||
1437 | phydev = tp->mdio_bus->phy_map[PHY_ADDR]; | 1461 | phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; |
1438 | 1462 | ||
1439 | /* Attach the MAC to the PHY. */ | 1463 | /* Attach the MAC to the PHY. */ |
1440 | phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link, | 1464 | phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link, |
1441 | phydev->dev_flags, phydev->interface); | 1465 | phydev->dev_flags, phydev->interface); |
1442 | if (IS_ERR(phydev)) { | 1466 | if (IS_ERR(phydev)) { |
1443 | printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name); | 1467 | netdev_err(tp->dev, "Could not attach to PHY\n"); |
1444 | return PTR_ERR(phydev); | 1468 | return PTR_ERR(phydev); |
1445 | } | 1469 | } |
1446 | 1470 | ||
@@ -1461,7 +1485,7 @@ static int tg3_phy_init(struct tg3 *tp) | |||
1461 | SUPPORTED_Asym_Pause); | 1485 | SUPPORTED_Asym_Pause); |
1462 | break; | 1486 | break; |
1463 | default: | 1487 | default: |
1464 | phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]); | 1488 | phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); |
1465 | return -EINVAL; | 1489 | return -EINVAL; |
1466 | } | 1490 | } |
1467 | 1491 | ||
@@ -1479,7 +1503,7 @@ static void tg3_phy_start(struct tg3 *tp) | |||
1479 | if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) | 1503 | if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) |
1480 | return; | 1504 | return; |
1481 | 1505 | ||
1482 | phydev = tp->mdio_bus->phy_map[PHY_ADDR]; | 1506 | phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; |
1483 | 1507 | ||
1484 | if (tp->link_config.phy_is_low_power) { | 1508 | if (tp->link_config.phy_is_low_power) { |
1485 | tp->link_config.phy_is_low_power = 0; | 1509 | tp->link_config.phy_is_low_power = 0; |
@@ -1499,13 +1523,13 @@ static void tg3_phy_stop(struct tg3 *tp) | |||
1499 | if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) | 1523 | if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) |
1500 | return; | 1524 | return; |
1501 | 1525 | ||
1502 | phy_stop(tp->mdio_bus->phy_map[PHY_ADDR]); | 1526 | phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); |
1503 | } | 1527 | } |
1504 | 1528 | ||
1505 | static void tg3_phy_fini(struct tg3 *tp) | 1529 | static void tg3_phy_fini(struct tg3 *tp) |
1506 | { | 1530 | { |
1507 | if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) { | 1531 | if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) { |
1508 | phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]); | 1532 | phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); |
1509 | tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED; | 1533 | tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED; |
1510 | } | 1534 | } |
1511 | } | 1535 | } |
@@ -1540,7 +1564,9 @@ static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable) | |||
1540 | { | 1564 | { |
1541 | u32 reg; | 1565 | u32 reg; |
1542 | 1566 | ||
1543 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) | 1567 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || |
1568 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 && | ||
1569 | (tp->tg3_flags2 & TG3_FLG2_MII_SERDES))) | ||
1544 | return; | 1570 | return; |
1545 | 1571 | ||
1546 | if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { | 1572 | if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { |
@@ -1915,6 +1941,10 @@ static int tg3_phy_reset(struct tg3 *tp) | |||
1915 | } | 1941 | } |
1916 | } | 1942 | } |
1917 | 1943 | ||
1944 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 && | ||
1945 | (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) | ||
1946 | return 0; | ||
1947 | |||
1918 | tg3_phy_apply_otp(tp); | 1948 | tg3_phy_apply_otp(tp); |
1919 | 1949 | ||
1920 | if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD) | 1950 | if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD) |
@@ -1958,7 +1988,7 @@ out: | |||
1958 | } | 1988 | } |
1959 | /* Set Extended packet length bit (bit 14) on all chips that */ | 1989 | /* Set Extended packet length bit (bit 14) on all chips that */ |
1960 | /* support jumbo frames */ | 1990 | /* support jumbo frames */ |
1961 | if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) { | 1991 | if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { |
1962 | /* Cannot do read-modify-write on 5401 */ | 1992 | /* Cannot do read-modify-write on 5401 */ |
1963 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20); | 1993 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20); |
1964 | } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { | 1994 | } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { |
@@ -1995,7 +2025,9 @@ static void tg3_frob_aux_power(struct tg3 *tp) | |||
1995 | { | 2025 | { |
1996 | struct tg3 *tp_peer = tp; | 2026 | struct tg3 *tp_peer = tp; |
1997 | 2027 | ||
1998 | if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0) | 2028 | /* The GPIOs do something completely different on 57765. */ |
2029 | if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 || | ||
2030 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) | ||
1999 | return; | 2031 | return; |
2000 | 2032 | ||
2001 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || | 2033 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || |
@@ -2108,7 +2140,7 @@ static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed) | |||
2108 | { | 2140 | { |
2109 | if (tp->led_ctrl == LED_CTRL_MODE_PHY_2) | 2141 | if (tp->led_ctrl == LED_CTRL_MODE_PHY_2) |
2110 | return 1; | 2142 | return 1; |
2111 | else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) { | 2143 | else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) { |
2112 | if (speed != SPEED_10) | 2144 | if (speed != SPEED_10) |
2113 | return 1; | 2145 | return 1; |
2114 | } else if (speed == SPEED_10) | 2146 | } else if (speed == SPEED_10) |
@@ -2149,6 +2181,26 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) | |||
2149 | tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ); | 2181 | tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ); |
2150 | udelay(40); | 2182 | udelay(40); |
2151 | return; | 2183 | return; |
2184 | } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { | ||
2185 | u32 phytest; | ||
2186 | if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { | ||
2187 | u32 phy; | ||
2188 | |||
2189 | tg3_writephy(tp, MII_ADVERTISE, 0); | ||
2190 | tg3_writephy(tp, MII_BMCR, | ||
2191 | BMCR_ANENABLE | BMCR_ANRESTART); | ||
2192 | |||
2193 | tg3_writephy(tp, MII_TG3_FET_TEST, | ||
2194 | phytest | MII_TG3_FET_SHADOW_EN); | ||
2195 | if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) { | ||
2196 | phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD; | ||
2197 | tg3_writephy(tp, | ||
2198 | MII_TG3_FET_SHDW_AUXMODE4, | ||
2199 | phy); | ||
2200 | } | ||
2201 | tg3_writephy(tp, MII_TG3_FET_TEST, phytest); | ||
2202 | } | ||
2203 | return; | ||
2152 | } else if (do_low_power) { | 2204 | } else if (do_low_power) { |
2153 | tg3_writephy(tp, MII_TG3_EXT_CTRL, | 2205 | tg3_writephy(tp, MII_TG3_EXT_CTRL, |
2154 | MII_TG3_EXT_CTRL_FORCE_LED_OFF); | 2206 | MII_TG3_EXT_CTRL_FORCE_LED_OFF); |
@@ -2218,7 +2270,7 @@ static void tg3_nvram_unlock(struct tg3 *tp) | |||
2218 | static void tg3_enable_nvram_access(struct tg3 *tp) | 2270 | static void tg3_enable_nvram_access(struct tg3 *tp) |
2219 | { | 2271 | { |
2220 | if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && | 2272 | if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && |
2221 | !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) { | 2273 | !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) { |
2222 | u32 nvaccess = tr32(NVRAM_ACCESS); | 2274 | u32 nvaccess = tr32(NVRAM_ACCESS); |
2223 | 2275 | ||
2224 | tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE); | 2276 | tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE); |
@@ -2229,7 +2281,7 @@ static void tg3_enable_nvram_access(struct tg3 *tp) | |||
2229 | static void tg3_disable_nvram_access(struct tg3 *tp) | 2281 | static void tg3_disable_nvram_access(struct tg3 *tp) |
2230 | { | 2282 | { |
2231 | if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && | 2283 | if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && |
2232 | !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) { | 2284 | !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) { |
2233 | u32 nvaccess = tr32(NVRAM_ACCESS); | 2285 | u32 nvaccess = tr32(NVRAM_ACCESS); |
2234 | 2286 | ||
2235 | tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE); | 2287 | tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE); |
@@ -2441,8 +2493,8 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) | |||
2441 | break; | 2493 | break; |
2442 | 2494 | ||
2443 | default: | 2495 | default: |
2444 | printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n", | 2496 | netdev_err(tp->dev, "Invalid power state (D%d) requested\n", |
2445 | tp->dev->name, state); | 2497 | state); |
2446 | return -EINVAL; | 2498 | return -EINVAL; |
2447 | } | 2499 | } |
2448 | 2500 | ||
@@ -2474,7 +2526,7 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) | |||
2474 | struct phy_device *phydev; | 2526 | struct phy_device *phydev; |
2475 | u32 phyid, advertising; | 2527 | u32 phyid, advertising; |
2476 | 2528 | ||
2477 | phydev = tp->mdio_bus->phy_map[PHY_ADDR]; | 2529 | phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; |
2478 | 2530 | ||
2479 | tp->link_config.phy_is_low_power = 1; | 2531 | tp->link_config.phy_is_low_power = 1; |
2480 | 2532 | ||
@@ -2504,11 +2556,11 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) | |||
2504 | phy_start_aneg(phydev); | 2556 | phy_start_aneg(phydev); |
2505 | 2557 | ||
2506 | phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask; | 2558 | phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask; |
2507 | if (phyid != TG3_PHY_ID_BCMAC131) { | 2559 | if (phyid != PHY_ID_BCMAC131) { |
2508 | phyid &= TG3_PHY_OUI_MASK; | 2560 | phyid &= PHY_BCM_OUI_MASK; |
2509 | if (phyid == TG3_PHY_OUI_1 || | 2561 | if (phyid == PHY_BCM_OUI_1 || |
2510 | phyid == TG3_PHY_OUI_2 || | 2562 | phyid == PHY_BCM_OUI_2 || |
2511 | phyid == TG3_PHY_OUI_3) | 2563 | phyid == PHY_BCM_OUI_3) |
2512 | do_low_power = true; | 2564 | do_low_power = true; |
2513 | } | 2565 | } |
2514 | } | 2566 | } |
@@ -3018,7 +3070,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) | |||
3018 | if (force_reset) | 3070 | if (force_reset) |
3019 | tg3_phy_reset(tp); | 3071 | tg3_phy_reset(tp); |
3020 | 3072 | ||
3021 | if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) { | 3073 | if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { |
3022 | tg3_readphy(tp, MII_BMSR, &bmsr); | 3074 | tg3_readphy(tp, MII_BMSR, &bmsr); |
3023 | if (tg3_readphy(tp, MII_BMSR, &bmsr) || | 3075 | if (tg3_readphy(tp, MII_BMSR, &bmsr) || |
3024 | !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) | 3076 | !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) |
@@ -3039,7 +3091,8 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) | |||
3039 | } | 3091 | } |
3040 | } | 3092 | } |
3041 | 3093 | ||
3042 | if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 && | 3094 | if ((tp->phy_id & TG3_PHY_ID_REV_MASK) == |
3095 | TG3_PHY_REV_BCM5401_B0 && | ||
3043 | !(bmsr & BMSR_LSTATUS) && | 3096 | !(bmsr & BMSR_LSTATUS) && |
3044 | tp->link_config.active_speed == SPEED_1000) { | 3097 | tp->link_config.active_speed == SPEED_1000) { |
3045 | err = tg3_phy_reset(tp); | 3098 | err = tg3_phy_reset(tp); |
@@ -3194,7 +3247,7 @@ relink: | |||
3194 | /* ??? Without this setting Netgear GA302T PHY does not | 3247 | /* ??? Without this setting Netgear GA302T PHY does not |
3195 | * ??? send/receive packets... | 3248 | * ??? send/receive packets... |
3196 | */ | 3249 | */ |
3197 | if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 && | 3250 | if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 && |
3198 | tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) { | 3251 | tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) { |
3199 | tp->mi_mode |= MAC_MI_MODE_AUTO_POLL; | 3252 | tp->mi_mode |= MAC_MI_MODE_AUTO_POLL; |
3200 | tw32_f(MAC_MI_MODE, tp->mi_mode); | 3253 | tw32_f(MAC_MI_MODE, tp->mi_mode); |
@@ -3243,15 +3296,6 @@ relink: | |||
3243 | pci_write_config_word(tp->pdev, | 3296 | pci_write_config_word(tp->pdev, |
3244 | tp->pcie_cap + PCI_EXP_LNKCTL, | 3297 | tp->pcie_cap + PCI_EXP_LNKCTL, |
3245 | newlnkctl); | 3298 | newlnkctl); |
3246 | } else if (tp->tg3_flags3 & TG3_FLG3_TOGGLE_10_100_L1PLLPD) { | ||
3247 | u32 newreg, oldreg = tr32(TG3_PCIE_LNKCTL); | ||
3248 | if (tp->link_config.active_speed == SPEED_100 || | ||
3249 | tp->link_config.active_speed == SPEED_10) | ||
3250 | newreg = oldreg & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; | ||
3251 | else | ||
3252 | newreg = oldreg | TG3_PCIE_LNKCTL_L1_PLL_PD_EN; | ||
3253 | if (newreg != oldreg) | ||
3254 | tw32(TG3_PCIE_LNKCTL, newreg); | ||
3255 | } | 3299 | } |
3256 | 3300 | ||
3257 | if (current_link_up != netif_carrier_ok(tp->dev)) { | 3301 | if (current_link_up != netif_carrier_ok(tp->dev)) { |
@@ -3918,7 +3962,7 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset) | |||
3918 | tw32_f(MAC_MODE, tp->mac_mode); | 3962 | tw32_f(MAC_MODE, tp->mac_mode); |
3919 | udelay(40); | 3963 | udelay(40); |
3920 | 3964 | ||
3921 | if (tp->phy_id == PHY_ID_BCM8002) | 3965 | if (tp->phy_id == TG3_PHY_ID_BCM8002) |
3922 | tg3_init_bcm8002(tp); | 3966 | tg3_init_bcm8002(tp); |
3923 | 3967 | ||
3924 | /* Enable link change event even when serdes polling. */ | 3968 | /* Enable link change event even when serdes polling. */ |
@@ -4291,10 +4335,8 @@ static void tg3_tx_recover(struct tg3 *tp) | |||
4291 | BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) || | 4335 | BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) || |
4292 | tp->write32_tx_mbox == tg3_write_indirect_mbox); | 4336 | tp->write32_tx_mbox == tg3_write_indirect_mbox); |
4293 | 4337 | ||
4294 | printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-" | 4338 | netdev_warn(tp->dev, "The system may be re-ordering memory-mapped I/O cycles to the network device, attempting to recover\n" |
4295 | "mapped I/O cycles to the network device, attempting to " | 4339 | "Please report the problem to the driver maintainer and include system chipset information.\n"); |
4296 | "recover. Please report the problem to the driver maintainer " | ||
4297 | "and include system chipset information.\n", tp->dev->name); | ||
4298 | 4340 | ||
4299 | spin_lock(&tp->lock); | 4341 | spin_lock(&tp->lock); |
4300 | tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING; | 4342 | tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING; |
@@ -4320,13 +4362,13 @@ static void tg3_tx(struct tg3_napi *tnapi) | |||
4320 | struct netdev_queue *txq; | 4362 | struct netdev_queue *txq; |
4321 | int index = tnapi - tp->napi; | 4363 | int index = tnapi - tp->napi; |
4322 | 4364 | ||
4323 | if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) | 4365 | if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) |
4324 | index--; | 4366 | index--; |
4325 | 4367 | ||
4326 | txq = netdev_get_tx_queue(tp->dev, index); | 4368 | txq = netdev_get_tx_queue(tp->dev, index); |
4327 | 4369 | ||
4328 | while (sw_idx != hw_idx) { | 4370 | while (sw_idx != hw_idx) { |
4329 | struct tx_ring_info *ri = &tnapi->tx_buffers[sw_idx]; | 4371 | struct ring_info *ri = &tnapi->tx_buffers[sw_idx]; |
4330 | struct sk_buff *skb = ri->skb; | 4372 | struct sk_buff *skb = ri->skb; |
4331 | int i, tx_bug = 0; | 4373 | int i, tx_bug = 0; |
4332 | 4374 | ||
@@ -4335,7 +4377,10 @@ static void tg3_tx(struct tg3_napi *tnapi) | |||
4335 | return; | 4377 | return; |
4336 | } | 4378 | } |
4337 | 4379 | ||
4338 | skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE); | 4380 | pci_unmap_single(tp->pdev, |
4381 | pci_unmap_addr(ri, mapping), | ||
4382 | skb_headlen(skb), | ||
4383 | PCI_DMA_TODEVICE); | ||
4339 | 4384 | ||
4340 | ri->skb = NULL; | 4385 | ri->skb = NULL; |
4341 | 4386 | ||
@@ -4345,6 +4390,11 @@ static void tg3_tx(struct tg3_napi *tnapi) | |||
4345 | ri = &tnapi->tx_buffers[sw_idx]; | 4390 | ri = &tnapi->tx_buffers[sw_idx]; |
4346 | if (unlikely(ri->skb != NULL || sw_idx == hw_idx)) | 4391 | if (unlikely(ri->skb != NULL || sw_idx == hw_idx)) |
4347 | tx_bug = 1; | 4392 | tx_bug = 1; |
4393 | |||
4394 | pci_unmap_page(tp->pdev, | ||
4395 | pci_unmap_addr(ri, mapping), | ||
4396 | skb_shinfo(skb)->frags[i].size, | ||
4397 | PCI_DMA_TODEVICE); | ||
4348 | sw_idx = NEXT_TX(sw_idx); | 4398 | sw_idx = NEXT_TX(sw_idx); |
4349 | } | 4399 | } |
4350 | 4400 | ||
@@ -4375,6 +4425,17 @@ static void tg3_tx(struct tg3_napi *tnapi) | |||
4375 | } | 4425 | } |
4376 | } | 4426 | } |
4377 | 4427 | ||
4428 | static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz) | ||
4429 | { | ||
4430 | if (!ri->skb) | ||
4431 | return; | ||
4432 | |||
4433 | pci_unmap_single(tp->pdev, pci_unmap_addr(ri, mapping), | ||
4434 | map_sz, PCI_DMA_FROMDEVICE); | ||
4435 | dev_kfree_skb_any(ri->skb); | ||
4436 | ri->skb = NULL; | ||
4437 | } | ||
4438 | |||
4378 | /* Returns size of skb allocated or < 0 on error. | 4439 | /* Returns size of skb allocated or < 0 on error. |
4379 | * | 4440 | * |
4380 | * We only need to fill in the address because the other members | 4441 | * We only need to fill in the address because the other members |
@@ -4386,16 +4447,14 @@ static void tg3_tx(struct tg3_napi *tnapi) | |||
4386 | * buffers the cpu only reads the last cacheline of the RX descriptor | 4447 | * buffers the cpu only reads the last cacheline of the RX descriptor |
4387 | * (to fetch the error flags, vlan tag, checksum, and opaque cookie). | 4448 | * (to fetch the error flags, vlan tag, checksum, and opaque cookie). |
4388 | */ | 4449 | */ |
4389 | static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key, | 4450 | static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, |
4390 | int src_idx, u32 dest_idx_unmasked) | 4451 | u32 opaque_key, u32 dest_idx_unmasked) |
4391 | { | 4452 | { |
4392 | struct tg3 *tp = tnapi->tp; | ||
4393 | struct tg3_rx_buffer_desc *desc; | 4453 | struct tg3_rx_buffer_desc *desc; |
4394 | struct ring_info *map, *src_map; | 4454 | struct ring_info *map, *src_map; |
4395 | struct sk_buff *skb; | 4455 | struct sk_buff *skb; |
4396 | dma_addr_t mapping; | 4456 | dma_addr_t mapping; |
4397 | int skb_size, dest_idx; | 4457 | int skb_size, dest_idx; |
4398 | struct tg3_rx_prodring_set *tpr = &tp->prodring[0]; | ||
4399 | 4458 | ||
4400 | src_map = NULL; | 4459 | src_map = NULL; |
4401 | switch (opaque_key) { | 4460 | switch (opaque_key) { |
@@ -4403,8 +4462,6 @@ static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key, | |||
4403 | dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; | 4462 | dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; |
4404 | desc = &tpr->rx_std[dest_idx]; | 4463 | desc = &tpr->rx_std[dest_idx]; |
4405 | map = &tpr->rx_std_buffers[dest_idx]; | 4464 | map = &tpr->rx_std_buffers[dest_idx]; |
4406 | if (src_idx >= 0) | ||
4407 | src_map = &tpr->rx_std_buffers[src_idx]; | ||
4408 | skb_size = tp->rx_pkt_map_sz; | 4465 | skb_size = tp->rx_pkt_map_sz; |
4409 | break; | 4466 | break; |
4410 | 4467 | ||
@@ -4412,8 +4469,6 @@ static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key, | |||
4412 | dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; | 4469 | dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; |
4413 | desc = &tpr->rx_jmb[dest_idx].std; | 4470 | desc = &tpr->rx_jmb[dest_idx].std; |
4414 | map = &tpr->rx_jmb_buffers[dest_idx]; | 4471 | map = &tpr->rx_jmb_buffers[dest_idx]; |
4415 | if (src_idx >= 0) | ||
4416 | src_map = &tpr->rx_jmb_buffers[src_idx]; | ||
4417 | skb_size = TG3_RX_JMB_MAP_SZ; | 4472 | skb_size = TG3_RX_JMB_MAP_SZ; |
4418 | break; | 4473 | break; |
4419 | 4474 | ||
@@ -4435,13 +4490,14 @@ static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key, | |||
4435 | 4490 | ||
4436 | mapping = pci_map_single(tp->pdev, skb->data, skb_size, | 4491 | mapping = pci_map_single(tp->pdev, skb->data, skb_size, |
4437 | PCI_DMA_FROMDEVICE); | 4492 | PCI_DMA_FROMDEVICE); |
4493 | if (pci_dma_mapping_error(tp->pdev, mapping)) { | ||
4494 | dev_kfree_skb(skb); | ||
4495 | return -EIO; | ||
4496 | } | ||
4438 | 4497 | ||
4439 | map->skb = skb; | 4498 | map->skb = skb; |
4440 | pci_unmap_addr_set(map, mapping, mapping); | 4499 | pci_unmap_addr_set(map, mapping, mapping); |
4441 | 4500 | ||
4442 | if (src_map != NULL) | ||
4443 | src_map->skb = NULL; | ||
4444 | |||
4445 | desc->addr_hi = ((u64)mapping >> 32); | 4501 | desc->addr_hi = ((u64)mapping >> 32); |
4446 | desc->addr_lo = ((u64)mapping & 0xffffffff); | 4502 | desc->addr_lo = ((u64)mapping & 0xffffffff); |
4447 | 4503 | ||
@@ -4452,30 +4508,32 @@ static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key, | |||
4452 | * members of the RX descriptor are invariant. See notes above | 4508 | * members of the RX descriptor are invariant. See notes above |
4453 | * tg3_alloc_rx_skb for full details. | 4509 | * tg3_alloc_rx_skb for full details. |
4454 | */ | 4510 | */ |
4455 | static void tg3_recycle_rx(struct tg3_napi *tnapi, u32 opaque_key, | 4511 | static void tg3_recycle_rx(struct tg3_napi *tnapi, |
4456 | int src_idx, u32 dest_idx_unmasked) | 4512 | struct tg3_rx_prodring_set *dpr, |
4513 | u32 opaque_key, int src_idx, | ||
4514 | u32 dest_idx_unmasked) | ||
4457 | { | 4515 | { |
4458 | struct tg3 *tp = tnapi->tp; | 4516 | struct tg3 *tp = tnapi->tp; |
4459 | struct tg3_rx_buffer_desc *src_desc, *dest_desc; | 4517 | struct tg3_rx_buffer_desc *src_desc, *dest_desc; |
4460 | struct ring_info *src_map, *dest_map; | 4518 | struct ring_info *src_map, *dest_map; |
4461 | int dest_idx; | 4519 | int dest_idx; |
4462 | struct tg3_rx_prodring_set *tpr = &tp->prodring[0]; | 4520 | struct tg3_rx_prodring_set *spr = &tp->prodring[0]; |
4463 | 4521 | ||
4464 | switch (opaque_key) { | 4522 | switch (opaque_key) { |
4465 | case RXD_OPAQUE_RING_STD: | 4523 | case RXD_OPAQUE_RING_STD: |
4466 | dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; | 4524 | dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; |
4467 | dest_desc = &tpr->rx_std[dest_idx]; | 4525 | dest_desc = &dpr->rx_std[dest_idx]; |
4468 | dest_map = &tpr->rx_std_buffers[dest_idx]; | 4526 | dest_map = &dpr->rx_std_buffers[dest_idx]; |
4469 | src_desc = &tpr->rx_std[src_idx]; | 4527 | src_desc = &spr->rx_std[src_idx]; |
4470 | src_map = &tpr->rx_std_buffers[src_idx]; | 4528 | src_map = &spr->rx_std_buffers[src_idx]; |
4471 | break; | 4529 | break; |
4472 | 4530 | ||
4473 | case RXD_OPAQUE_RING_JUMBO: | 4531 | case RXD_OPAQUE_RING_JUMBO: |
4474 | dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; | 4532 | dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; |
4475 | dest_desc = &tpr->rx_jmb[dest_idx].std; | 4533 | dest_desc = &dpr->rx_jmb[dest_idx].std; |
4476 | dest_map = &tpr->rx_jmb_buffers[dest_idx]; | 4534 | dest_map = &dpr->rx_jmb_buffers[dest_idx]; |
4477 | src_desc = &tpr->rx_jmb[src_idx].std; | 4535 | src_desc = &spr->rx_jmb[src_idx].std; |
4478 | src_map = &tpr->rx_jmb_buffers[src_idx]; | 4536 | src_map = &spr->rx_jmb_buffers[src_idx]; |
4479 | break; | 4537 | break; |
4480 | 4538 | ||
4481 | default: | 4539 | default: |
@@ -4488,6 +4546,11 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi, u32 opaque_key, | |||
4488 | dest_desc->addr_hi = src_desc->addr_hi; | 4546 | dest_desc->addr_hi = src_desc->addr_hi; |
4489 | dest_desc->addr_lo = src_desc->addr_lo; | 4547 | dest_desc->addr_lo = src_desc->addr_lo; |
4490 | 4548 | ||
4549 | /* Ensure that the update to the skb happens after the physical | ||
4550 | * addresses have been transferred to the new BD location. | ||
4551 | */ | ||
4552 | smp_wmb(); | ||
4553 | |||
4491 | src_map->skb = NULL; | 4554 | src_map->skb = NULL; |
4492 | } | 4555 | } |
4493 | 4556 | ||
@@ -4519,10 +4582,11 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) | |||
4519 | { | 4582 | { |
4520 | struct tg3 *tp = tnapi->tp; | 4583 | struct tg3 *tp = tnapi->tp; |
4521 | u32 work_mask, rx_std_posted = 0; | 4584 | u32 work_mask, rx_std_posted = 0; |
4585 | u32 std_prod_idx, jmb_prod_idx; | ||
4522 | u32 sw_idx = tnapi->rx_rcb_ptr; | 4586 | u32 sw_idx = tnapi->rx_rcb_ptr; |
4523 | u16 hw_idx; | 4587 | u16 hw_idx; |
4524 | int received; | 4588 | int received; |
4525 | struct tg3_rx_prodring_set *tpr = &tp->prodring[0]; | 4589 | struct tg3_rx_prodring_set *tpr = tnapi->prodring; |
4526 | 4590 | ||
4527 | hw_idx = *(tnapi->rx_rcb_prod_idx); | 4591 | hw_idx = *(tnapi->rx_rcb_prod_idx); |
4528 | /* | 4592 | /* |
@@ -4532,7 +4596,10 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) | |||
4532 | rmb(); | 4596 | rmb(); |
4533 | work_mask = 0; | 4597 | work_mask = 0; |
4534 | received = 0; | 4598 | received = 0; |
4599 | std_prod_idx = tpr->rx_std_prod_idx; | ||
4600 | jmb_prod_idx = tpr->rx_jmb_prod_idx; | ||
4535 | while (sw_idx != hw_idx && budget > 0) { | 4601 | while (sw_idx != hw_idx && budget > 0) { |
4602 | struct ring_info *ri; | ||
4536 | struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx]; | 4603 | struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx]; |
4537 | unsigned int len; | 4604 | unsigned int len; |
4538 | struct sk_buff *skb; | 4605 | struct sk_buff *skb; |
@@ -4542,16 +4609,16 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) | |||
4542 | desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; | 4609 | desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; |
4543 | opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; | 4610 | opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; |
4544 | if (opaque_key == RXD_OPAQUE_RING_STD) { | 4611 | if (opaque_key == RXD_OPAQUE_RING_STD) { |
4545 | struct ring_info *ri = &tpr->rx_std_buffers[desc_idx]; | 4612 | ri = &tp->prodring[0].rx_std_buffers[desc_idx]; |
4546 | dma_addr = pci_unmap_addr(ri, mapping); | 4613 | dma_addr = pci_unmap_addr(ri, mapping); |
4547 | skb = ri->skb; | 4614 | skb = ri->skb; |
4548 | post_ptr = &tpr->rx_std_ptr; | 4615 | post_ptr = &std_prod_idx; |
4549 | rx_std_posted++; | 4616 | rx_std_posted++; |
4550 | } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { | 4617 | } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { |
4551 | struct ring_info *ri = &tpr->rx_jmb_buffers[desc_idx]; | 4618 | ri = &tp->prodring[0].rx_jmb_buffers[desc_idx]; |
4552 | dma_addr = pci_unmap_addr(ri, mapping); | 4619 | dma_addr = pci_unmap_addr(ri, mapping); |
4553 | skb = ri->skb; | 4620 | skb = ri->skb; |
4554 | post_ptr = &tpr->rx_jmb_ptr; | 4621 | post_ptr = &jmb_prod_idx; |
4555 | } else | 4622 | } else |
4556 | goto next_pkt_nopost; | 4623 | goto next_pkt_nopost; |
4557 | 4624 | ||
@@ -4560,7 +4627,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) | |||
4560 | if ((desc->err_vlan & RXD_ERR_MASK) != 0 && | 4627 | if ((desc->err_vlan & RXD_ERR_MASK) != 0 && |
4561 | (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) { | 4628 | (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) { |
4562 | drop_it: | 4629 | drop_it: |
4563 | tg3_recycle_rx(tnapi, opaque_key, | 4630 | tg3_recycle_rx(tnapi, tpr, opaque_key, |
4564 | desc_idx, *post_ptr); | 4631 | desc_idx, *post_ptr); |
4565 | drop_it_no_recycle: | 4632 | drop_it_no_recycle: |
4566 | /* Other statistics kept track of by card. */ | 4633 | /* Other statistics kept track of by card. */ |
@@ -4571,28 +4638,34 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) | |||
4571 | len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - | 4638 | len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - |
4572 | ETH_FCS_LEN; | 4639 | ETH_FCS_LEN; |
4573 | 4640 | ||
4574 | if (len > RX_COPY_THRESHOLD | 4641 | if (len > RX_COPY_THRESHOLD && |
4575 | && tp->rx_offset == NET_IP_ALIGN | 4642 | tp->rx_offset == NET_IP_ALIGN) { |
4576 | /* rx_offset will likely not equal NET_IP_ALIGN | 4643 | /* rx_offset will likely not equal NET_IP_ALIGN |
4577 | * if this is a 5701 card running in PCI-X mode | 4644 | * if this is a 5701 card running in PCI-X mode |
4578 | * [see tg3_get_invariants()] | 4645 | * [see tg3_get_invariants()] |
4579 | */ | 4646 | */ |
4580 | ) { | ||
4581 | int skb_size; | 4647 | int skb_size; |
4582 | 4648 | ||
4583 | skb_size = tg3_alloc_rx_skb(tnapi, opaque_key, | 4649 | skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key, |
4584 | desc_idx, *post_ptr); | 4650 | *post_ptr); |
4585 | if (skb_size < 0) | 4651 | if (skb_size < 0) |
4586 | goto drop_it; | 4652 | goto drop_it; |
4587 | 4653 | ||
4588 | pci_unmap_single(tp->pdev, dma_addr, skb_size, | 4654 | pci_unmap_single(tp->pdev, dma_addr, skb_size, |
4589 | PCI_DMA_FROMDEVICE); | 4655 | PCI_DMA_FROMDEVICE); |
4590 | 4656 | ||
4657 | /* Ensure that the update to the skb happens | ||
4658 | * after the usage of the old DMA mapping. | ||
4659 | */ | ||
4660 | smp_wmb(); | ||
4661 | |||
4662 | ri->skb = NULL; | ||
4663 | |||
4591 | skb_put(skb, len); | 4664 | skb_put(skb, len); |
4592 | } else { | 4665 | } else { |
4593 | struct sk_buff *copy_skb; | 4666 | struct sk_buff *copy_skb; |
4594 | 4667 | ||
4595 | tg3_recycle_rx(tnapi, opaque_key, | 4668 | tg3_recycle_rx(tnapi, tpr, opaque_key, |
4596 | desc_idx, *post_ptr); | 4669 | desc_idx, *post_ptr); |
4597 | 4670 | ||
4598 | copy_skb = netdev_alloc_skb(tp->dev, | 4671 | copy_skb = netdev_alloc_skb(tp->dev, |
@@ -4642,10 +4715,9 @@ next_pkt: | |||
4642 | (*post_ptr)++; | 4715 | (*post_ptr)++; |
4643 | 4716 | ||
4644 | if (unlikely(rx_std_posted >= tp->rx_std_max_post)) { | 4717 | if (unlikely(rx_std_posted >= tp->rx_std_max_post)) { |
4645 | u32 idx = *post_ptr % TG3_RX_RING_SIZE; | 4718 | tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE; |
4646 | 4719 | tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, | |
4647 | tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + | 4720 | tpr->rx_std_prod_idx); |
4648 | TG3_64BIT_REG_LOW, idx); | ||
4649 | work_mask &= ~RXD_OPAQUE_RING_STD; | 4721 | work_mask &= ~RXD_OPAQUE_RING_STD; |
4650 | rx_std_posted = 0; | 4722 | rx_std_posted = 0; |
4651 | } | 4723 | } |
@@ -4665,33 +4737,46 @@ next_pkt_nopost: | |||
4665 | tw32_rx_mbox(tnapi->consmbox, sw_idx); | 4737 | tw32_rx_mbox(tnapi->consmbox, sw_idx); |
4666 | 4738 | ||
4667 | /* Refill RX ring(s). */ | 4739 | /* Refill RX ring(s). */ |
4668 | if (work_mask & RXD_OPAQUE_RING_STD) { | 4740 | if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) { |
4669 | sw_idx = tpr->rx_std_ptr % TG3_RX_RING_SIZE; | 4741 | if (work_mask & RXD_OPAQUE_RING_STD) { |
4670 | tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW, | 4742 | tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE; |
4671 | sw_idx); | 4743 | tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, |
4672 | } | 4744 | tpr->rx_std_prod_idx); |
4673 | if (work_mask & RXD_OPAQUE_RING_JUMBO) { | 4745 | } |
4674 | sw_idx = tpr->rx_jmb_ptr % TG3_RX_JUMBO_RING_SIZE; | 4746 | if (work_mask & RXD_OPAQUE_RING_JUMBO) { |
4675 | tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW, | 4747 | tpr->rx_jmb_prod_idx = jmb_prod_idx % |
4676 | sw_idx); | 4748 | TG3_RX_JUMBO_RING_SIZE; |
4749 | tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, | ||
4750 | tpr->rx_jmb_prod_idx); | ||
4751 | } | ||
4752 | mmiowb(); | ||
4753 | } else if (work_mask) { | ||
4754 | /* rx_std_buffers[] and rx_jmb_buffers[] entries must be | ||
4755 | * updated before the producer indices can be updated. | ||
4756 | */ | ||
4757 | smp_wmb(); | ||
4758 | |||
4759 | tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE; | ||
4760 | tpr->rx_jmb_prod_idx = jmb_prod_idx % TG3_RX_JUMBO_RING_SIZE; | ||
4761 | |||
4762 | if (tnapi != &tp->napi[1]) | ||
4763 | napi_schedule(&tp->napi[1].napi); | ||
4677 | } | 4764 | } |
4678 | mmiowb(); | ||
4679 | 4765 | ||
4680 | return received; | 4766 | return received; |
4681 | } | 4767 | } |
4682 | 4768 | ||
4683 | static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) | 4769 | static void tg3_poll_link(struct tg3 *tp) |
4684 | { | 4770 | { |
4685 | struct tg3 *tp = tnapi->tp; | ||
4686 | struct tg3_hw_status *sblk = tnapi->hw_status; | ||
4687 | |||
4688 | /* handle link change and other phy events */ | 4771 | /* handle link change and other phy events */ |
4689 | if (!(tp->tg3_flags & | 4772 | if (!(tp->tg3_flags & |
4690 | (TG3_FLAG_USE_LINKCHG_REG | | 4773 | (TG3_FLAG_USE_LINKCHG_REG | |
4691 | TG3_FLAG_POLL_SERDES))) { | 4774 | TG3_FLAG_POLL_SERDES))) { |
4775 | struct tg3_hw_status *sblk = tp->napi[0].hw_status; | ||
4776 | |||
4692 | if (sblk->status & SD_STATUS_LINK_CHG) { | 4777 | if (sblk->status & SD_STATUS_LINK_CHG) { |
4693 | sblk->status = SD_STATUS_UPDATED | | 4778 | sblk->status = SD_STATUS_UPDATED | |
4694 | (sblk->status & ~SD_STATUS_LINK_CHG); | 4779 | (sblk->status & ~SD_STATUS_LINK_CHG); |
4695 | spin_lock(&tp->lock); | 4780 | spin_lock(&tp->lock); |
4696 | if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { | 4781 | if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { |
4697 | tw32_f(MAC_STATUS, | 4782 | tw32_f(MAC_STATUS, |
@@ -4705,6 +4790,134 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) | |||
4705 | spin_unlock(&tp->lock); | 4790 | spin_unlock(&tp->lock); |
4706 | } | 4791 | } |
4707 | } | 4792 | } |
4793 | } | ||
4794 | |||
4795 | static int tg3_rx_prodring_xfer(struct tg3 *tp, | ||
4796 | struct tg3_rx_prodring_set *dpr, | ||
4797 | struct tg3_rx_prodring_set *spr) | ||
4798 | { | ||
4799 | u32 si, di, cpycnt, src_prod_idx; | ||
4800 | int i, err = 0; | ||
4801 | |||
4802 | while (1) { | ||
4803 | src_prod_idx = spr->rx_std_prod_idx; | ||
4804 | |||
4805 | /* Make sure updates to the rx_std_buffers[] entries and the | ||
4806 | * standard producer index are seen in the correct order. | ||
4807 | */ | ||
4808 | smp_rmb(); | ||
4809 | |||
4810 | if (spr->rx_std_cons_idx == src_prod_idx) | ||
4811 | break; | ||
4812 | |||
4813 | if (spr->rx_std_cons_idx < src_prod_idx) | ||
4814 | cpycnt = src_prod_idx - spr->rx_std_cons_idx; | ||
4815 | else | ||
4816 | cpycnt = TG3_RX_RING_SIZE - spr->rx_std_cons_idx; | ||
4817 | |||
4818 | cpycnt = min(cpycnt, TG3_RX_RING_SIZE - dpr->rx_std_prod_idx); | ||
4819 | |||
4820 | si = spr->rx_std_cons_idx; | ||
4821 | di = dpr->rx_std_prod_idx; | ||
4822 | |||
4823 | for (i = di; i < di + cpycnt; i++) { | ||
4824 | if (dpr->rx_std_buffers[i].skb) { | ||
4825 | cpycnt = i - di; | ||
4826 | err = -ENOSPC; | ||
4827 | break; | ||
4828 | } | ||
4829 | } | ||
4830 | |||
4831 | if (!cpycnt) | ||
4832 | break; | ||
4833 | |||
4834 | /* Ensure that updates to the rx_std_buffers ring and the | ||
4835 | * shadowed hardware producer ring from tg3_recycle_skb() are | ||
4836 | * ordered correctly WRT the skb check above. | ||
4837 | */ | ||
4838 | smp_rmb(); | ||
4839 | |||
4840 | memcpy(&dpr->rx_std_buffers[di], | ||
4841 | &spr->rx_std_buffers[si], | ||
4842 | cpycnt * sizeof(struct ring_info)); | ||
4843 | |||
4844 | for (i = 0; i < cpycnt; i++, di++, si++) { | ||
4845 | struct tg3_rx_buffer_desc *sbd, *dbd; | ||
4846 | sbd = &spr->rx_std[si]; | ||
4847 | dbd = &dpr->rx_std[di]; | ||
4848 | dbd->addr_hi = sbd->addr_hi; | ||
4849 | dbd->addr_lo = sbd->addr_lo; | ||
4850 | } | ||
4851 | |||
4852 | spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) % | ||
4853 | TG3_RX_RING_SIZE; | ||
4854 | dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) % | ||
4855 | TG3_RX_RING_SIZE; | ||
4856 | } | ||
4857 | |||
4858 | while (1) { | ||
4859 | src_prod_idx = spr->rx_jmb_prod_idx; | ||
4860 | |||
4861 | /* Make sure updates to the rx_jmb_buffers[] entries and | ||
4862 | * the jumbo producer index are seen in the correct order. | ||
4863 | */ | ||
4864 | smp_rmb(); | ||
4865 | |||
4866 | if (spr->rx_jmb_cons_idx == src_prod_idx) | ||
4867 | break; | ||
4868 | |||
4869 | if (spr->rx_jmb_cons_idx < src_prod_idx) | ||
4870 | cpycnt = src_prod_idx - spr->rx_jmb_cons_idx; | ||
4871 | else | ||
4872 | cpycnt = TG3_RX_JUMBO_RING_SIZE - spr->rx_jmb_cons_idx; | ||
4873 | |||
4874 | cpycnt = min(cpycnt, | ||
4875 | TG3_RX_JUMBO_RING_SIZE - dpr->rx_jmb_prod_idx); | ||
4876 | |||
4877 | si = spr->rx_jmb_cons_idx; | ||
4878 | di = dpr->rx_jmb_prod_idx; | ||
4879 | |||
4880 | for (i = di; i < di + cpycnt; i++) { | ||
4881 | if (dpr->rx_jmb_buffers[i].skb) { | ||
4882 | cpycnt = i - di; | ||
4883 | err = -ENOSPC; | ||
4884 | break; | ||
4885 | } | ||
4886 | } | ||
4887 | |||
4888 | if (!cpycnt) | ||
4889 | break; | ||
4890 | |||
4891 | /* Ensure that updates to the rx_jmb_buffers ring and the | ||
4892 | * shadowed hardware producer ring from tg3_recycle_skb() are | ||
4893 | * ordered correctly WRT the skb check above. | ||
4894 | */ | ||
4895 | smp_rmb(); | ||
4896 | |||
4897 | memcpy(&dpr->rx_jmb_buffers[di], | ||
4898 | &spr->rx_jmb_buffers[si], | ||
4899 | cpycnt * sizeof(struct ring_info)); | ||
4900 | |||
4901 | for (i = 0; i < cpycnt; i++, di++, si++) { | ||
4902 | struct tg3_rx_buffer_desc *sbd, *dbd; | ||
4903 | sbd = &spr->rx_jmb[si].std; | ||
4904 | dbd = &dpr->rx_jmb[di].std; | ||
4905 | dbd->addr_hi = sbd->addr_hi; | ||
4906 | dbd->addr_lo = sbd->addr_lo; | ||
4907 | } | ||
4908 | |||
4909 | spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) % | ||
4910 | TG3_RX_JUMBO_RING_SIZE; | ||
4911 | dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) % | ||
4912 | TG3_RX_JUMBO_RING_SIZE; | ||
4913 | } | ||
4914 | |||
4915 | return err; | ||
4916 | } | ||
4917 | |||
4918 | static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) | ||
4919 | { | ||
4920 | struct tg3 *tp = tnapi->tp; | ||
4708 | 4921 | ||
4709 | /* run TX completion thread */ | 4922 | /* run TX completion thread */ |
4710 | if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) { | 4923 | if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) { |
@@ -4720,6 +4933,76 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) | |||
4720 | if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) | 4933 | if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) |
4721 | work_done += tg3_rx(tnapi, budget - work_done); | 4934 | work_done += tg3_rx(tnapi, budget - work_done); |
4722 | 4935 | ||
4936 | if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) { | ||
4937 | struct tg3_rx_prodring_set *dpr = &tp->prodring[0]; | ||
4938 | int i, err = 0; | ||
4939 | u32 std_prod_idx = dpr->rx_std_prod_idx; | ||
4940 | u32 jmb_prod_idx = dpr->rx_jmb_prod_idx; | ||
4941 | |||
4942 | for (i = 1; i < tp->irq_cnt; i++) | ||
4943 | err |= tg3_rx_prodring_xfer(tp, dpr, | ||
4944 | tp->napi[i].prodring); | ||
4945 | |||
4946 | wmb(); | ||
4947 | |||
4948 | if (std_prod_idx != dpr->rx_std_prod_idx) | ||
4949 | tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, | ||
4950 | dpr->rx_std_prod_idx); | ||
4951 | |||
4952 | if (jmb_prod_idx != dpr->rx_jmb_prod_idx) | ||
4953 | tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, | ||
4954 | dpr->rx_jmb_prod_idx); | ||
4955 | |||
4956 | mmiowb(); | ||
4957 | |||
4958 | if (err) | ||
4959 | tw32_f(HOSTCC_MODE, tp->coal_now); | ||
4960 | } | ||
4961 | |||
4962 | return work_done; | ||
4963 | } | ||
4964 | |||
4965 | static int tg3_poll_msix(struct napi_struct *napi, int budget) | ||
4966 | { | ||
4967 | struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); | ||
4968 | struct tg3 *tp = tnapi->tp; | ||
4969 | int work_done = 0; | ||
4970 | struct tg3_hw_status *sblk = tnapi->hw_status; | ||
4971 | |||
4972 | while (1) { | ||
4973 | work_done = tg3_poll_work(tnapi, work_done, budget); | ||
4974 | |||
4975 | if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) | ||
4976 | goto tx_recovery; | ||
4977 | |||
4978 | if (unlikely(work_done >= budget)) | ||
4979 | break; | ||
4980 | |||
4981 | /* tp->last_tag is used in tg3_restart_ints() below | ||
4982 | * to tell the hw how much work has been processed, | ||
4983 | * so we must read it before checking for more work. | ||
4984 | */ | ||
4985 | tnapi->last_tag = sblk->status_tag; | ||
4986 | tnapi->last_irq_tag = tnapi->last_tag; | ||
4987 | rmb(); | ||
4988 | |||
4989 | /* check for RX/TX work to do */ | ||
4990 | if (sblk->idx[0].tx_consumer == tnapi->tx_cons && | ||
4991 | *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr) { | ||
4992 | napi_complete(napi); | ||
4993 | /* Reenable interrupts. */ | ||
4994 | tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); | ||
4995 | mmiowb(); | ||
4996 | break; | ||
4997 | } | ||
4998 | } | ||
4999 | |||
5000 | return work_done; | ||
5001 | |||
5002 | tx_recovery: | ||
5003 | /* work_done is guaranteed to be less than budget. */ | ||
5004 | napi_complete(napi); | ||
5005 | schedule_work(&tp->reset_task); | ||
4723 | return work_done; | 5006 | return work_done; |
4724 | } | 5007 | } |
4725 | 5008 | ||
@@ -4731,6 +5014,8 @@ static int tg3_poll(struct napi_struct *napi, int budget) | |||
4731 | struct tg3_hw_status *sblk = tnapi->hw_status; | 5014 | struct tg3_hw_status *sblk = tnapi->hw_status; |
4732 | 5015 | ||
4733 | while (1) { | 5016 | while (1) { |
5017 | tg3_poll_link(tp); | ||
5018 | |||
4734 | work_done = tg3_poll_work(tnapi, work_done, budget); | 5019 | work_done = tg3_poll_work(tnapi, work_done, budget); |
4735 | 5020 | ||
4736 | if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) | 5021 | if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) |
@@ -4975,8 +5260,7 @@ static int tg3_restart_hw(struct tg3 *tp, int reset_phy) | |||
4975 | 5260 | ||
4976 | err = tg3_init_hw(tp, reset_phy); | 5261 | err = tg3_init_hw(tp, reset_phy); |
4977 | if (err) { | 5262 | if (err) { |
4978 | printk(KERN_ERR PFX "%s: Failed to re-initialize device, " | 5263 | netdev_err(tp->dev, "Failed to re-initialize device, aborting\n"); |
4979 | "aborting.\n", tp->dev->name); | ||
4980 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 5264 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); |
4981 | tg3_full_unlock(tp); | 5265 | tg3_full_unlock(tp); |
4982 | del_timer_sync(&tp->timer); | 5266 | del_timer_sync(&tp->timer); |
@@ -4995,7 +5279,7 @@ static void tg3_poll_controller(struct net_device *dev) | |||
4995 | struct tg3 *tp = netdev_priv(dev); | 5279 | struct tg3 *tp = netdev_priv(dev); |
4996 | 5280 | ||
4997 | for (i = 0; i < tp->irq_cnt; i++) | 5281 | for (i = 0; i < tp->irq_cnt; i++) |
4998 | tg3_interrupt(tp->napi[i].irq_vec, dev); | 5282 | tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]); |
4999 | } | 5283 | } |
5000 | #endif | 5284 | #endif |
5001 | 5285 | ||
@@ -5049,10 +5333,10 @@ out: | |||
5049 | 5333 | ||
5050 | static void tg3_dump_short_state(struct tg3 *tp) | 5334 | static void tg3_dump_short_state(struct tg3 *tp) |
5051 | { | 5335 | { |
5052 | printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n", | 5336 | netdev_err(tp->dev, "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n", |
5053 | tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS)); | 5337 | tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS)); |
5054 | printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n", | 5338 | netdev_err(tp->dev, "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n", |
5055 | tr32(RDMAC_STATUS), tr32(WDMAC_STATUS)); | 5339 | tr32(RDMAC_STATUS), tr32(WDMAC_STATUS)); |
5056 | } | 5340 | } |
5057 | 5341 | ||
5058 | static void tg3_tx_timeout(struct net_device *dev) | 5342 | static void tg3_tx_timeout(struct net_device *dev) |
@@ -5060,8 +5344,7 @@ static void tg3_tx_timeout(struct net_device *dev) | |||
5060 | struct tg3 *tp = netdev_priv(dev); | 5344 | struct tg3 *tp = netdev_priv(dev); |
5061 | 5345 | ||
5062 | if (netif_msg_tx_err(tp)) { | 5346 | if (netif_msg_tx_err(tp)) { |
5063 | printk(KERN_ERR PFX "%s: transmit timed out, resetting\n", | 5347 | netdev_err(dev, "transmit timed out, resetting\n"); |
5064 | dev->name); | ||
5065 | tg3_dump_short_state(tp); | 5348 | tg3_dump_short_state(tp); |
5066 | } | 5349 | } |
5067 | 5350 | ||
@@ -5093,11 +5376,11 @@ static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping, | |||
5093 | static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32); | 5376 | static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32); |
5094 | 5377 | ||
5095 | /* Workaround 4GB and 40-bit hardware DMA bugs. */ | 5378 | /* Workaround 4GB and 40-bit hardware DMA bugs. */ |
5096 | static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb, | 5379 | static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, |
5097 | u32 last_plus_one, u32 *start, | 5380 | struct sk_buff *skb, u32 last_plus_one, |
5098 | u32 base_flags, u32 mss) | 5381 | u32 *start, u32 base_flags, u32 mss) |
5099 | { | 5382 | { |
5100 | struct tg3_napi *tnapi = &tp->napi[0]; | 5383 | struct tg3 *tp = tnapi->tp; |
5101 | struct sk_buff *new_skb; | 5384 | struct sk_buff *new_skb; |
5102 | dma_addr_t new_addr = 0; | 5385 | dma_addr_t new_addr = 0; |
5103 | u32 entry = *start; | 5386 | u32 entry = *start; |
@@ -5118,16 +5401,21 @@ static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb, | |||
5118 | } else { | 5401 | } else { |
5119 | /* New SKB is guaranteed to be linear. */ | 5402 | /* New SKB is guaranteed to be linear. */ |
5120 | entry = *start; | 5403 | entry = *start; |
5121 | ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE); | 5404 | new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len, |
5122 | new_addr = skb_shinfo(new_skb)->dma_head; | 5405 | PCI_DMA_TODEVICE); |
5406 | /* Make sure the mapping succeeded */ | ||
5407 | if (pci_dma_mapping_error(tp->pdev, new_addr)) { | ||
5408 | ret = -1; | ||
5409 | dev_kfree_skb(new_skb); | ||
5410 | new_skb = NULL; | ||
5123 | 5411 | ||
5124 | /* Make sure new skb does not cross any 4G boundaries. | 5412 | /* Make sure new skb does not cross any 4G boundaries. |
5125 | * Drop the packet if it does. | 5413 | * Drop the packet if it does. |
5126 | */ | 5414 | */ |
5127 | if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) { | 5415 | } else if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) && |
5128 | if (!ret) | 5416 | tg3_4g_overflow_test(new_addr, new_skb->len)) { |
5129 | skb_dma_unmap(&tp->pdev->dev, new_skb, | 5417 | pci_unmap_single(tp->pdev, new_addr, new_skb->len, |
5130 | DMA_TO_DEVICE); | 5418 | PCI_DMA_TODEVICE); |
5131 | ret = -1; | 5419 | ret = -1; |
5132 | dev_kfree_skb(new_skb); | 5420 | dev_kfree_skb(new_skb); |
5133 | new_skb = NULL; | 5421 | new_skb = NULL; |
@@ -5141,15 +5429,28 @@ static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb, | |||
5141 | /* Now clean up the sw ring entries. */ | 5429 | /* Now clean up the sw ring entries. */ |
5142 | i = 0; | 5430 | i = 0; |
5143 | while (entry != last_plus_one) { | 5431 | while (entry != last_plus_one) { |
5432 | int len; | ||
5433 | |||
5144 | if (i == 0) | 5434 | if (i == 0) |
5145 | tnapi->tx_buffers[entry].skb = new_skb; | 5435 | len = skb_headlen(skb); |
5146 | else | 5436 | else |
5437 | len = skb_shinfo(skb)->frags[i-1].size; | ||
5438 | |||
5439 | pci_unmap_single(tp->pdev, | ||
5440 | pci_unmap_addr(&tnapi->tx_buffers[entry], | ||
5441 | mapping), | ||
5442 | len, PCI_DMA_TODEVICE); | ||
5443 | if (i == 0) { | ||
5444 | tnapi->tx_buffers[entry].skb = new_skb; | ||
5445 | pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, | ||
5446 | new_addr); | ||
5447 | } else { | ||
5147 | tnapi->tx_buffers[entry].skb = NULL; | 5448 | tnapi->tx_buffers[entry].skb = NULL; |
5449 | } | ||
5148 | entry = NEXT_TX(entry); | 5450 | entry = NEXT_TX(entry); |
5149 | i++; | 5451 | i++; |
5150 | } | 5452 | } |
5151 | 5453 | ||
5152 | skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE); | ||
5153 | dev_kfree_skb(skb); | 5454 | dev_kfree_skb(skb); |
5154 | 5455 | ||
5155 | return ret; | 5456 | return ret; |
@@ -5179,21 +5480,22 @@ static void tg3_set_txd(struct tg3_napi *tnapi, int entry, | |||
5179 | } | 5480 | } |
5180 | 5481 | ||
5181 | /* hard_start_xmit for devices that don't have any bugs and | 5482 | /* hard_start_xmit for devices that don't have any bugs and |
5182 | * support TG3_FLG2_HW_TSO_2 only. | 5483 | * support TG3_FLG2_HW_TSO_2 and TG3_FLG2_HW_TSO_3 only. |
5183 | */ | 5484 | */ |
5184 | static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, | 5485 | static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, |
5185 | struct net_device *dev) | 5486 | struct net_device *dev) |
5186 | { | 5487 | { |
5187 | struct tg3 *tp = netdev_priv(dev); | 5488 | struct tg3 *tp = netdev_priv(dev); |
5188 | u32 len, entry, base_flags, mss; | 5489 | u32 len, entry, base_flags, mss; |
5189 | struct skb_shared_info *sp; | ||
5190 | dma_addr_t mapping; | 5490 | dma_addr_t mapping; |
5191 | struct tg3_napi *tnapi; | 5491 | struct tg3_napi *tnapi; |
5192 | struct netdev_queue *txq; | 5492 | struct netdev_queue *txq; |
5493 | unsigned int i, last; | ||
5494 | |||
5193 | 5495 | ||
5194 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); | 5496 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); |
5195 | tnapi = &tp->napi[skb_get_queue_mapping(skb)]; | 5497 | tnapi = &tp->napi[skb_get_queue_mapping(skb)]; |
5196 | if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) | 5498 | if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) |
5197 | tnapi++; | 5499 | tnapi++; |
5198 | 5500 | ||
5199 | /* We are running in BH disabled context with netif_tx_lock | 5501 | /* We are running in BH disabled context with netif_tx_lock |
@@ -5206,8 +5508,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, | |||
5206 | netif_tx_stop_queue(txq); | 5508 | netif_tx_stop_queue(txq); |
5207 | 5509 | ||
5208 | /* This is a hard error, log it. */ | 5510 | /* This is a hard error, log it. */ |
5209 | printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " | 5511 | netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); |
5210 | "queue awake!\n", dev->name); | ||
5211 | } | 5512 | } |
5212 | return NETDEV_TX_BUSY; | 5513 | return NETDEV_TX_BUSY; |
5213 | } | 5514 | } |
@@ -5238,7 +5539,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, | |||
5238 | hdrlen = ip_tcp_len + tcp_opt_len; | 5539 | hdrlen = ip_tcp_len + tcp_opt_len; |
5239 | } | 5540 | } |
5240 | 5541 | ||
5241 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { | 5542 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) { |
5242 | mss |= (hdrlen & 0xc) << 12; | 5543 | mss |= (hdrlen & 0xc) << 12; |
5243 | if (hdrlen & 0x10) | 5544 | if (hdrlen & 0x10) |
5244 | base_flags |= 0x00000010; | 5545 | base_flags |= 0x00000010; |
@@ -5260,20 +5561,19 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, | |||
5260 | (vlan_tx_tag_get(skb) << 16)); | 5561 | (vlan_tx_tag_get(skb) << 16)); |
5261 | #endif | 5562 | #endif |
5262 | 5563 | ||
5263 | if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) { | 5564 | len = skb_headlen(skb); |
5565 | |||
5566 | /* Queue skb data, a.k.a. the main skb fragment. */ | ||
5567 | mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE); | ||
5568 | if (pci_dma_mapping_error(tp->pdev, mapping)) { | ||
5264 | dev_kfree_skb(skb); | 5569 | dev_kfree_skb(skb); |
5265 | goto out_unlock; | 5570 | goto out_unlock; |
5266 | } | 5571 | } |
5267 | 5572 | ||
5268 | sp = skb_shinfo(skb); | ||
5269 | |||
5270 | mapping = sp->dma_head; | ||
5271 | |||
5272 | tnapi->tx_buffers[entry].skb = skb; | 5573 | tnapi->tx_buffers[entry].skb = skb; |
5574 | pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); | ||
5273 | 5575 | ||
5274 | len = skb_headlen(skb); | 5576 | if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) && |
5275 | |||
5276 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 && | ||
5277 | !mss && skb->len > ETH_DATA_LEN) | 5577 | !mss && skb->len > ETH_DATA_LEN) |
5278 | base_flags |= TXD_FLAG_JMB_PKT; | 5578 | base_flags |= TXD_FLAG_JMB_PKT; |
5279 | 5579 | ||
@@ -5284,15 +5584,21 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, | |||
5284 | 5584 | ||
5285 | /* Now loop through additional data fragments, and queue them. */ | 5585 | /* Now loop through additional data fragments, and queue them. */ |
5286 | if (skb_shinfo(skb)->nr_frags > 0) { | 5586 | if (skb_shinfo(skb)->nr_frags > 0) { |
5287 | unsigned int i, last; | ||
5288 | |||
5289 | last = skb_shinfo(skb)->nr_frags - 1; | 5587 | last = skb_shinfo(skb)->nr_frags - 1; |
5290 | for (i = 0; i <= last; i++) { | 5588 | for (i = 0; i <= last; i++) { |
5291 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 5589 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
5292 | 5590 | ||
5293 | len = frag->size; | 5591 | len = frag->size; |
5294 | mapping = sp->dma_maps[i]; | 5592 | mapping = pci_map_page(tp->pdev, |
5593 | frag->page, | ||
5594 | frag->page_offset, | ||
5595 | len, PCI_DMA_TODEVICE); | ||
5596 | if (pci_dma_mapping_error(tp->pdev, mapping)) | ||
5597 | goto dma_error; | ||
5598 | |||
5295 | tnapi->tx_buffers[entry].skb = NULL; | 5599 | tnapi->tx_buffers[entry].skb = NULL; |
5600 | pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, | ||
5601 | mapping); | ||
5296 | 5602 | ||
5297 | tg3_set_txd(tnapi, entry, mapping, len, | 5603 | tg3_set_txd(tnapi, entry, mapping, len, |
5298 | base_flags, (i == last) | (mss << 1)); | 5604 | base_flags, (i == last) | (mss << 1)); |
@@ -5315,6 +5621,27 @@ out_unlock: | |||
5315 | mmiowb(); | 5621 | mmiowb(); |
5316 | 5622 | ||
5317 | return NETDEV_TX_OK; | 5623 | return NETDEV_TX_OK; |
5624 | |||
5625 | dma_error: | ||
5626 | last = i; | ||
5627 | entry = tnapi->tx_prod; | ||
5628 | tnapi->tx_buffers[entry].skb = NULL; | ||
5629 | pci_unmap_single(tp->pdev, | ||
5630 | pci_unmap_addr(&tnapi->tx_buffers[entry], mapping), | ||
5631 | skb_headlen(skb), | ||
5632 | PCI_DMA_TODEVICE); | ||
5633 | for (i = 0; i <= last; i++) { | ||
5634 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | ||
5635 | entry = NEXT_TX(entry); | ||
5636 | |||
5637 | pci_unmap_page(tp->pdev, | ||
5638 | pci_unmap_addr(&tnapi->tx_buffers[entry], | ||
5639 | mapping), | ||
5640 | frag->size, PCI_DMA_TODEVICE); | ||
5641 | } | ||
5642 | |||
5643 | dev_kfree_skb(skb); | ||
5644 | return NETDEV_TX_OK; | ||
5318 | } | 5645 | } |
5319 | 5646 | ||
5320 | static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *, | 5647 | static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *, |
@@ -5362,12 +5689,17 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, | |||
5362 | { | 5689 | { |
5363 | struct tg3 *tp = netdev_priv(dev); | 5690 | struct tg3 *tp = netdev_priv(dev); |
5364 | u32 len, entry, base_flags, mss; | 5691 | u32 len, entry, base_flags, mss; |
5365 | struct skb_shared_info *sp; | ||
5366 | int would_hit_hwbug; | 5692 | int would_hit_hwbug; |
5367 | dma_addr_t mapping; | 5693 | dma_addr_t mapping; |
5368 | struct tg3_napi *tnapi = &tp->napi[0]; | 5694 | struct tg3_napi *tnapi; |
5695 | struct netdev_queue *txq; | ||
5696 | unsigned int i, last; | ||
5369 | 5697 | ||
5370 | len = skb_headlen(skb); | 5698 | |
5699 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); | ||
5700 | tnapi = &tp->napi[skb_get_queue_mapping(skb)]; | ||
5701 | if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) | ||
5702 | tnapi++; | ||
5371 | 5703 | ||
5372 | /* We are running in BH disabled context with netif_tx_lock | 5704 | /* We are running in BH disabled context with netif_tx_lock |
5373 | * and TX reclaim runs via tp->napi.poll inside of a software | 5705 | * and TX reclaim runs via tp->napi.poll inside of a software |
@@ -5375,12 +5707,11 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, | |||
5375 | * no IRQ context deadlocks to worry about either. Rejoice! | 5707 | * no IRQ context deadlocks to worry about either. Rejoice! |
5376 | */ | 5708 | */ |
5377 | if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) { | 5709 | if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) { |
5378 | if (!netif_queue_stopped(dev)) { | 5710 | if (!netif_tx_queue_stopped(txq)) { |
5379 | netif_stop_queue(dev); | 5711 | netif_tx_stop_queue(txq); |
5380 | 5712 | ||
5381 | /* This is a hard error, log it. */ | 5713 | /* This is a hard error, log it. */ |
5382 | printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " | 5714 | netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); |
5383 | "queue awake!\n", dev->name); | ||
5384 | } | 5715 | } |
5385 | return NETDEV_TX_BUSY; | 5716 | return NETDEV_TX_BUSY; |
5386 | } | 5717 | } |
@@ -5389,10 +5720,10 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, | |||
5389 | base_flags = 0; | 5720 | base_flags = 0; |
5390 | if (skb->ip_summed == CHECKSUM_PARTIAL) | 5721 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
5391 | base_flags |= TXD_FLAG_TCPUDP_CSUM; | 5722 | base_flags |= TXD_FLAG_TCPUDP_CSUM; |
5392 | mss = 0; | 5723 | |
5393 | if ((mss = skb_shinfo(skb)->gso_size) != 0) { | 5724 | if ((mss = skb_shinfo(skb)->gso_size) != 0) { |
5394 | struct iphdr *iph; | 5725 | struct iphdr *iph; |
5395 | int tcp_opt_len, ip_tcp_len, hdr_len; | 5726 | u32 tcp_opt_len, ip_tcp_len, hdr_len; |
5396 | 5727 | ||
5397 | if (skb_header_cloned(skb) && | 5728 | if (skb_header_cloned(skb) && |
5398 | pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { | 5729 | pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { |
@@ -5423,8 +5754,15 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, | |||
5423 | IPPROTO_TCP, | 5754 | IPPROTO_TCP, |
5424 | 0); | 5755 | 0); |
5425 | 5756 | ||
5426 | if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) || | 5757 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) { |
5427 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) { | 5758 | mss |= (hdr_len & 0xc) << 12; |
5759 | if (hdr_len & 0x10) | ||
5760 | base_flags |= 0x00000010; | ||
5761 | base_flags |= (hdr_len & 0x3e0) << 5; | ||
5762 | } else if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) | ||
5763 | mss |= hdr_len << 9; | ||
5764 | else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) || | ||
5765 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { | ||
5428 | if (tcp_opt_len || iph->ihl > 5) { | 5766 | if (tcp_opt_len || iph->ihl > 5) { |
5429 | int tsflags; | 5767 | int tsflags; |
5430 | 5768 | ||
@@ -5446,22 +5784,35 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, | |||
5446 | (vlan_tx_tag_get(skb) << 16)); | 5784 | (vlan_tx_tag_get(skb) << 16)); |
5447 | #endif | 5785 | #endif |
5448 | 5786 | ||
5449 | if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) { | 5787 | if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) && |
5788 | !mss && skb->len > ETH_DATA_LEN) | ||
5789 | base_flags |= TXD_FLAG_JMB_PKT; | ||
5790 | |||
5791 | len = skb_headlen(skb); | ||
5792 | |||
5793 | mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE); | ||
5794 | if (pci_dma_mapping_error(tp->pdev, mapping)) { | ||
5450 | dev_kfree_skb(skb); | 5795 | dev_kfree_skb(skb); |
5451 | goto out_unlock; | 5796 | goto out_unlock; |
5452 | } | 5797 | } |
5453 | 5798 | ||
5454 | sp = skb_shinfo(skb); | ||
5455 | |||
5456 | mapping = sp->dma_head; | ||
5457 | |||
5458 | tnapi->tx_buffers[entry].skb = skb; | 5799 | tnapi->tx_buffers[entry].skb = skb; |
5800 | pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); | ||
5459 | 5801 | ||
5460 | would_hit_hwbug = 0; | 5802 | would_hit_hwbug = 0; |
5461 | 5803 | ||
5462 | if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG) | 5804 | if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8) |
5463 | would_hit_hwbug = 1; | 5805 | would_hit_hwbug = 1; |
5464 | else if (tg3_4g_overflow_test(mapping, len)) | 5806 | |
5807 | if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) && | ||
5808 | tg3_4g_overflow_test(mapping, len)) | ||
5809 | would_hit_hwbug = 1; | ||
5810 | |||
5811 | if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) && | ||
5812 | tg3_40bit_overflow_test(tp, mapping, len)) | ||
5813 | would_hit_hwbug = 1; | ||
5814 | |||
5815 | if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG) | ||
5465 | would_hit_hwbug = 1; | 5816 | would_hit_hwbug = 1; |
5466 | 5817 | ||
5467 | tg3_set_txd(tnapi, entry, mapping, len, base_flags, | 5818 | tg3_set_txd(tnapi, entry, mapping, len, base_flags, |
@@ -5471,21 +5822,32 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, | |||
5471 | 5822 | ||
5472 | /* Now loop through additional data fragments, and queue them. */ | 5823 | /* Now loop through additional data fragments, and queue them. */ |
5473 | if (skb_shinfo(skb)->nr_frags > 0) { | 5824 | if (skb_shinfo(skb)->nr_frags > 0) { |
5474 | unsigned int i, last; | ||
5475 | |||
5476 | last = skb_shinfo(skb)->nr_frags - 1; | 5825 | last = skb_shinfo(skb)->nr_frags - 1; |
5477 | for (i = 0; i <= last; i++) { | 5826 | for (i = 0; i <= last; i++) { |
5478 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 5827 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
5479 | 5828 | ||
5480 | len = frag->size; | 5829 | len = frag->size; |
5481 | mapping = sp->dma_maps[i]; | 5830 | mapping = pci_map_page(tp->pdev, |
5831 | frag->page, | ||
5832 | frag->page_offset, | ||
5833 | len, PCI_DMA_TODEVICE); | ||
5482 | 5834 | ||
5483 | tnapi->tx_buffers[entry].skb = NULL; | 5835 | tnapi->tx_buffers[entry].skb = NULL; |
5836 | pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, | ||
5837 | mapping); | ||
5838 | if (pci_dma_mapping_error(tp->pdev, mapping)) | ||
5839 | goto dma_error; | ||
5840 | |||
5841 | if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && | ||
5842 | len <= 8) | ||
5843 | would_hit_hwbug = 1; | ||
5484 | 5844 | ||
5485 | if (tg3_4g_overflow_test(mapping, len)) | 5845 | if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) && |
5846 | tg3_4g_overflow_test(mapping, len)) | ||
5486 | would_hit_hwbug = 1; | 5847 | would_hit_hwbug = 1; |
5487 | 5848 | ||
5488 | if (tg3_40bit_overflow_test(tp, mapping, len)) | 5849 | if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) && |
5850 | tg3_40bit_overflow_test(tp, mapping, len)) | ||
5489 | would_hit_hwbug = 1; | 5851 | would_hit_hwbug = 1; |
5490 | 5852 | ||
5491 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) | 5853 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) |
@@ -5509,7 +5871,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, | |||
5509 | /* If the workaround fails due to memory/mapping | 5871 | /* If the workaround fails due to memory/mapping |
5510 | * failure, silently drop this packet. | 5872 | * failure, silently drop this packet. |
5511 | */ | 5873 | */ |
5512 | if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one, | 5874 | if (tigon3_dma_hwbug_workaround(tnapi, skb, last_plus_one, |
5513 | &start, base_flags, mss)) | 5875 | &start, base_flags, mss)) |
5514 | goto out_unlock; | 5876 | goto out_unlock; |
5515 | 5877 | ||
@@ -5517,19 +5879,40 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, | |||
5517 | } | 5879 | } |
5518 | 5880 | ||
5519 | /* Packets are ready, update Tx producer idx local and on card. */ | 5881 | /* Packets are ready, update Tx producer idx local and on card. */ |
5520 | tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, entry); | 5882 | tw32_tx_mbox(tnapi->prodmbox, entry); |
5521 | 5883 | ||
5522 | tnapi->tx_prod = entry; | 5884 | tnapi->tx_prod = entry; |
5523 | if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { | 5885 | if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { |
5524 | netif_stop_queue(dev); | 5886 | netif_tx_stop_queue(txq); |
5525 | if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) | 5887 | if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) |
5526 | netif_wake_queue(tp->dev); | 5888 | netif_tx_wake_queue(txq); |
5527 | } | 5889 | } |
5528 | 5890 | ||
5529 | out_unlock: | 5891 | out_unlock: |
5530 | mmiowb(); | 5892 | mmiowb(); |
5531 | 5893 | ||
5532 | return NETDEV_TX_OK; | 5894 | return NETDEV_TX_OK; |
5895 | |||
5896 | dma_error: | ||
5897 | last = i; | ||
5898 | entry = tnapi->tx_prod; | ||
5899 | tnapi->tx_buffers[entry].skb = NULL; | ||
5900 | pci_unmap_single(tp->pdev, | ||
5901 | pci_unmap_addr(&tnapi->tx_buffers[entry], mapping), | ||
5902 | skb_headlen(skb), | ||
5903 | PCI_DMA_TODEVICE); | ||
5904 | for (i = 0; i <= last; i++) { | ||
5905 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | ||
5906 | entry = NEXT_TX(entry); | ||
5907 | |||
5908 | pci_unmap_page(tp->pdev, | ||
5909 | pci_unmap_addr(&tnapi->tx_buffers[entry], | ||
5910 | mapping), | ||
5911 | frag->size, PCI_DMA_TODEVICE); | ||
5912 | } | ||
5913 | |||
5914 | dev_kfree_skb(skb); | ||
5915 | return NETDEV_TX_OK; | ||
5533 | } | 5916 | } |
5534 | 5917 | ||
5535 | static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, | 5918 | static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, |
@@ -5594,36 +5977,33 @@ static void tg3_rx_prodring_free(struct tg3 *tp, | |||
5594 | struct tg3_rx_prodring_set *tpr) | 5977 | struct tg3_rx_prodring_set *tpr) |
5595 | { | 5978 | { |
5596 | int i; | 5979 | int i; |
5597 | struct ring_info *rxp; | ||
5598 | |||
5599 | for (i = 0; i < TG3_RX_RING_SIZE; i++) { | ||
5600 | rxp = &tpr->rx_std_buffers[i]; | ||
5601 | 5980 | ||
5602 | if (rxp->skb == NULL) | 5981 | if (tpr != &tp->prodring[0]) { |
5603 | continue; | 5982 | for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx; |
5983 | i = (i + 1) % TG3_RX_RING_SIZE) | ||
5984 | tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i], | ||
5985 | tp->rx_pkt_map_sz); | ||
5986 | |||
5987 | if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { | ||
5988 | for (i = tpr->rx_jmb_cons_idx; | ||
5989 | i != tpr->rx_jmb_prod_idx; | ||
5990 | i = (i + 1) % TG3_RX_JUMBO_RING_SIZE) { | ||
5991 | tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i], | ||
5992 | TG3_RX_JMB_MAP_SZ); | ||
5993 | } | ||
5994 | } | ||
5604 | 5995 | ||
5605 | pci_unmap_single(tp->pdev, | 5996 | return; |
5606 | pci_unmap_addr(rxp, mapping), | ||
5607 | tp->rx_pkt_map_sz, | ||
5608 | PCI_DMA_FROMDEVICE); | ||
5609 | dev_kfree_skb_any(rxp->skb); | ||
5610 | rxp->skb = NULL; | ||
5611 | } | 5997 | } |
5612 | 5998 | ||
5613 | if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { | 5999 | for (i = 0; i < TG3_RX_RING_SIZE; i++) |
5614 | for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) { | 6000 | tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i], |
5615 | rxp = &tpr->rx_jmb_buffers[i]; | 6001 | tp->rx_pkt_map_sz); |
5616 | |||
5617 | if (rxp->skb == NULL) | ||
5618 | continue; | ||
5619 | 6002 | ||
5620 | pci_unmap_single(tp->pdev, | 6003 | if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { |
5621 | pci_unmap_addr(rxp, mapping), | 6004 | for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) |
5622 | TG3_RX_JMB_MAP_SZ, | 6005 | tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i], |
5623 | PCI_DMA_FROMDEVICE); | 6006 | TG3_RX_JMB_MAP_SZ); |
5624 | dev_kfree_skb_any(rxp->skb); | ||
5625 | rxp->skb = NULL; | ||
5626 | } | ||
5627 | } | 6007 | } |
5628 | } | 6008 | } |
5629 | 6009 | ||
@@ -5638,7 +6018,19 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp, | |||
5638 | struct tg3_rx_prodring_set *tpr) | 6018 | struct tg3_rx_prodring_set *tpr) |
5639 | { | 6019 | { |
5640 | u32 i, rx_pkt_dma_sz; | 6020 | u32 i, rx_pkt_dma_sz; |
5641 | struct tg3_napi *tnapi = &tp->napi[0]; | 6021 | |
6022 | tpr->rx_std_cons_idx = 0; | ||
6023 | tpr->rx_std_prod_idx = 0; | ||
6024 | tpr->rx_jmb_cons_idx = 0; | ||
6025 | tpr->rx_jmb_prod_idx = 0; | ||
6026 | |||
6027 | if (tpr != &tp->prodring[0]) { | ||
6028 | memset(&tpr->rx_std_buffers[0], 0, TG3_RX_STD_BUFF_RING_SIZE); | ||
6029 | if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) | ||
6030 | memset(&tpr->rx_jmb_buffers[0], 0, | ||
6031 | TG3_RX_JMB_BUFF_RING_SIZE); | ||
6032 | goto done; | ||
6033 | } | ||
5642 | 6034 | ||
5643 | /* Zero out all descriptors. */ | 6035 | /* Zero out all descriptors. */ |
5644 | memset(tpr->rx_std, 0, TG3_RX_RING_BYTES); | 6036 | memset(tpr->rx_std, 0, TG3_RX_RING_BYTES); |
@@ -5665,12 +6057,9 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp, | |||
5665 | 6057 | ||
5666 | /* Now allocate fresh SKBs for each rx ring. */ | 6058 | /* Now allocate fresh SKBs for each rx ring. */ |
5667 | for (i = 0; i < tp->rx_pending; i++) { | 6059 | for (i = 0; i < tp->rx_pending; i++) { |
5668 | if (tg3_alloc_rx_skb(tnapi, RXD_OPAQUE_RING_STD, -1, i) < 0) { | 6060 | if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) { |
5669 | printk(KERN_WARNING PFX | 6061 | netdev_warn(tp->dev, "Using a smaller RX standard ring, only %d out of %d buffers were allocated successfully\n", |
5670 | "%s: Using a smaller RX standard ring, " | 6062 | i, tp->rx_pending); |
5671 | "only %d out of %d buffers were allocated " | ||
5672 | "successfully.\n", | ||
5673 | tp->dev->name, i, tp->rx_pending); | ||
5674 | if (i == 0) | 6063 | if (i == 0) |
5675 | goto initfail; | 6064 | goto initfail; |
5676 | tp->rx_pending = i; | 6065 | tp->rx_pending = i; |
@@ -5683,31 +6072,28 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp, | |||
5683 | 6072 | ||
5684 | memset(tpr->rx_jmb, 0, TG3_RX_JUMBO_RING_BYTES); | 6073 | memset(tpr->rx_jmb, 0, TG3_RX_JUMBO_RING_BYTES); |
5685 | 6074 | ||
5686 | if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) { | 6075 | if (!(tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)) |
5687 | for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) { | 6076 | goto done; |
5688 | struct tg3_rx_buffer_desc *rxd; | ||
5689 | 6077 | ||
5690 | rxd = &tpr->rx_jmb[i].std; | 6078 | for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) { |
5691 | rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT; | 6079 | struct tg3_rx_buffer_desc *rxd; |
5692 | rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) | | ||
5693 | RXD_FLAG_JUMBO; | ||
5694 | rxd->opaque = (RXD_OPAQUE_RING_JUMBO | | ||
5695 | (i << RXD_OPAQUE_INDEX_SHIFT)); | ||
5696 | } | ||
5697 | 6080 | ||
5698 | for (i = 0; i < tp->rx_jumbo_pending; i++) { | 6081 | rxd = &tpr->rx_jmb[i].std; |
5699 | if (tg3_alloc_rx_skb(tnapi, RXD_OPAQUE_RING_JUMBO, | 6082 | rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT; |
5700 | -1, i) < 0) { | 6083 | rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) | |
5701 | printk(KERN_WARNING PFX | 6084 | RXD_FLAG_JUMBO; |
5702 | "%s: Using a smaller RX jumbo ring, " | 6085 | rxd->opaque = (RXD_OPAQUE_RING_JUMBO | |
5703 | "only %d out of %d buffers were " | 6086 | (i << RXD_OPAQUE_INDEX_SHIFT)); |
5704 | "allocated successfully.\n", | 6087 | } |
5705 | tp->dev->name, i, tp->rx_jumbo_pending); | 6088 | |
5706 | if (i == 0) | 6089 | for (i = 0; i < tp->rx_jumbo_pending; i++) { |
5707 | goto initfail; | 6090 | if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) { |
5708 | tp->rx_jumbo_pending = i; | 6091 | netdev_warn(tp->dev, "Using a smaller RX jumbo ring, only %d out of %d buffers were allocated successfully\n", |
5709 | break; | 6092 | i, tp->rx_jumbo_pending); |
5710 | } | 6093 | if (i == 0) |
6094 | goto initfail; | ||
6095 | tp->rx_jumbo_pending = i; | ||
6096 | break; | ||
5711 | } | 6097 | } |
5712 | } | 6098 | } |
5713 | 6099 | ||
@@ -5741,8 +6127,7 @@ static void tg3_rx_prodring_fini(struct tg3 *tp, | |||
5741 | static int tg3_rx_prodring_init(struct tg3 *tp, | 6127 | static int tg3_rx_prodring_init(struct tg3 *tp, |
5742 | struct tg3_rx_prodring_set *tpr) | 6128 | struct tg3_rx_prodring_set *tpr) |
5743 | { | 6129 | { |
5744 | tpr->rx_std_buffers = kzalloc(sizeof(struct ring_info) * | 6130 | tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE, GFP_KERNEL); |
5745 | TG3_RX_RING_SIZE, GFP_KERNEL); | ||
5746 | if (!tpr->rx_std_buffers) | 6131 | if (!tpr->rx_std_buffers) |
5747 | return -ENOMEM; | 6132 | return -ENOMEM; |
5748 | 6133 | ||
@@ -5752,8 +6137,7 @@ static int tg3_rx_prodring_init(struct tg3 *tp, | |||
5752 | goto err_out; | 6137 | goto err_out; |
5753 | 6138 | ||
5754 | if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { | 6139 | if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { |
5755 | tpr->rx_jmb_buffers = kzalloc(sizeof(struct ring_info) * | 6140 | tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE, |
5756 | TG3_RX_JUMBO_RING_SIZE, | ||
5757 | GFP_KERNEL); | 6141 | GFP_KERNEL); |
5758 | if (!tpr->rx_jmb_buffers) | 6142 | if (!tpr->rx_jmb_buffers) |
5759 | goto err_out; | 6143 | goto err_out; |
@@ -5790,8 +6174,9 @@ static void tg3_free_rings(struct tg3 *tp) | |||
5790 | continue; | 6174 | continue; |
5791 | 6175 | ||
5792 | for (i = 0; i < TG3_TX_RING_SIZE; ) { | 6176 | for (i = 0; i < TG3_TX_RING_SIZE; ) { |
5793 | struct tx_ring_info *txp; | 6177 | struct ring_info *txp; |
5794 | struct sk_buff *skb; | 6178 | struct sk_buff *skb; |
6179 | unsigned int k; | ||
5795 | 6180 | ||
5796 | txp = &tnapi->tx_buffers[i]; | 6181 | txp = &tnapi->tx_buffers[i]; |
5797 | skb = txp->skb; | 6182 | skb = txp->skb; |
@@ -5801,17 +6186,28 @@ static void tg3_free_rings(struct tg3 *tp) | |||
5801 | continue; | 6186 | continue; |
5802 | } | 6187 | } |
5803 | 6188 | ||
5804 | skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE); | 6189 | pci_unmap_single(tp->pdev, |
5805 | 6190 | pci_unmap_addr(txp, mapping), | |
6191 | skb_headlen(skb), | ||
6192 | PCI_DMA_TODEVICE); | ||
5806 | txp->skb = NULL; | 6193 | txp->skb = NULL; |
5807 | 6194 | ||
5808 | i += skb_shinfo(skb)->nr_frags + 1; | 6195 | i++; |
6196 | |||
6197 | for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) { | ||
6198 | txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)]; | ||
6199 | pci_unmap_page(tp->pdev, | ||
6200 | pci_unmap_addr(txp, mapping), | ||
6201 | skb_shinfo(skb)->frags[k].size, | ||
6202 | PCI_DMA_TODEVICE); | ||
6203 | i++; | ||
6204 | } | ||
5809 | 6205 | ||
5810 | dev_kfree_skb_any(skb); | 6206 | dev_kfree_skb_any(skb); |
5811 | } | 6207 | } |
5812 | } | ||
5813 | 6208 | ||
5814 | tg3_rx_prodring_free(tp, &tp->prodring[0]); | 6209 | tg3_rx_prodring_free(tp, &tp->prodring[j]); |
6210 | } | ||
5815 | } | 6211 | } |
5816 | 6212 | ||
5817 | /* Initialize tx/rx rings for packet processing. | 6213 | /* Initialize tx/rx rings for packet processing. |
@@ -5845,9 +6241,14 @@ static int tg3_init_rings(struct tg3 *tp) | |||
5845 | tnapi->rx_rcb_ptr = 0; | 6241 | tnapi->rx_rcb_ptr = 0; |
5846 | if (tnapi->rx_rcb) | 6242 | if (tnapi->rx_rcb) |
5847 | memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); | 6243 | memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); |
6244 | |||
6245 | if (tg3_rx_prodring_alloc(tp, &tp->prodring[i])) { | ||
6246 | tg3_free_rings(tp); | ||
6247 | return -ENOMEM; | ||
6248 | } | ||
5848 | } | 6249 | } |
5849 | 6250 | ||
5850 | return tg3_rx_prodring_alloc(tp, &tp->prodring[0]); | 6251 | return 0; |
5851 | } | 6252 | } |
5852 | 6253 | ||
5853 | /* | 6254 | /* |
@@ -5891,7 +6292,8 @@ static void tg3_free_consistent(struct tg3 *tp) | |||
5891 | tp->hw_stats = NULL; | 6292 | tp->hw_stats = NULL; |
5892 | } | 6293 | } |
5893 | 6294 | ||
5894 | tg3_rx_prodring_fini(tp, &tp->prodring[0]); | 6295 | for (i = 0; i < tp->irq_cnt; i++) |
6296 | tg3_rx_prodring_fini(tp, &tp->prodring[i]); | ||
5895 | } | 6297 | } |
5896 | 6298 | ||
5897 | /* | 6299 | /* |
@@ -5902,8 +6304,10 @@ static int tg3_alloc_consistent(struct tg3 *tp) | |||
5902 | { | 6304 | { |
5903 | int i; | 6305 | int i; |
5904 | 6306 | ||
5905 | if (tg3_rx_prodring_init(tp, &tp->prodring[0])) | 6307 | for (i = 0; i < tp->irq_cnt; i++) { |
5906 | return -ENOMEM; | 6308 | if (tg3_rx_prodring_init(tp, &tp->prodring[i])) |
6309 | goto err_out; | ||
6310 | } | ||
5907 | 6311 | ||
5908 | tp->hw_stats = pci_alloc_consistent(tp->pdev, | 6312 | tp->hw_stats = pci_alloc_consistent(tp->pdev, |
5909 | sizeof(struct tg3_hw_stats), | 6313 | sizeof(struct tg3_hw_stats), |
@@ -5926,6 +6330,24 @@ static int tg3_alloc_consistent(struct tg3 *tp) | |||
5926 | memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); | 6330 | memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); |
5927 | sblk = tnapi->hw_status; | 6331 | sblk = tnapi->hw_status; |
5928 | 6332 | ||
6333 | /* If multivector TSS is enabled, vector 0 does not handle | ||
6334 | * tx interrupts. Don't allocate any resources for it. | ||
6335 | */ | ||
6336 | if ((!i && !(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) || | ||
6337 | (i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))) { | ||
6338 | tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) * | ||
6339 | TG3_TX_RING_SIZE, | ||
6340 | GFP_KERNEL); | ||
6341 | if (!tnapi->tx_buffers) | ||
6342 | goto err_out; | ||
6343 | |||
6344 | tnapi->tx_ring = pci_alloc_consistent(tp->pdev, | ||
6345 | TG3_TX_RING_BYTES, | ||
6346 | &tnapi->tx_desc_mapping); | ||
6347 | if (!tnapi->tx_ring) | ||
6348 | goto err_out; | ||
6349 | } | ||
6350 | |||
5929 | /* | 6351 | /* |
5930 | * When RSS is enabled, the status block format changes | 6352 | * When RSS is enabled, the status block format changes |
5931 | * slightly. The "rx_jumbo_consumer", "reserved", | 6353 | * slightly. The "rx_jumbo_consumer", "reserved", |
@@ -5947,6 +6369,8 @@ static int tg3_alloc_consistent(struct tg3 *tp) | |||
5947 | break; | 6369 | break; |
5948 | } | 6370 | } |
5949 | 6371 | ||
6372 | tnapi->prodring = &tp->prodring[i]; | ||
6373 | |||
5950 | /* | 6374 | /* |
5951 | * If multivector RSS is enabled, vector 0 does not handle | 6375 | * If multivector RSS is enabled, vector 0 does not handle |
5952 | * rx or tx interrupts. Don't allocate any resources for it. | 6376 | * rx or tx interrupts. Don't allocate any resources for it. |
@@ -5961,17 +6385,6 @@ static int tg3_alloc_consistent(struct tg3 *tp) | |||
5961 | goto err_out; | 6385 | goto err_out; |
5962 | 6386 | ||
5963 | memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); | 6387 | memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); |
5964 | |||
5965 | tnapi->tx_buffers = kzalloc(sizeof(struct tx_ring_info) * | ||
5966 | TG3_TX_RING_SIZE, GFP_KERNEL); | ||
5967 | if (!tnapi->tx_buffers) | ||
5968 | goto err_out; | ||
5969 | |||
5970 | tnapi->tx_ring = pci_alloc_consistent(tp->pdev, | ||
5971 | TG3_TX_RING_BYTES, | ||
5972 | &tnapi->tx_desc_mapping); | ||
5973 | if (!tnapi->tx_ring) | ||
5974 | goto err_out; | ||
5975 | } | 6388 | } |
5976 | 6389 | ||
5977 | return 0; | 6390 | return 0; |
@@ -6020,8 +6433,7 @@ static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int | |||
6020 | } | 6433 | } |
6021 | 6434 | ||
6022 | if (i == MAX_WAIT_CNT && !silent) { | 6435 | if (i == MAX_WAIT_CNT && !silent) { |
6023 | printk(KERN_ERR PFX "tg3_stop_block timed out, " | 6436 | pr_err("tg3_stop_block timed out, ofs=%lx enable_bit=%x\n", |
6024 | "ofs=%lx enable_bit=%x\n", | ||
6025 | ofs, enable_bit); | 6437 | ofs, enable_bit); |
6026 | return -ENODEV; | 6438 | return -ENODEV; |
6027 | } | 6439 | } |
@@ -6068,9 +6480,8 @@ static int tg3_abort_hw(struct tg3 *tp, int silent) | |||
6068 | break; | 6480 | break; |
6069 | } | 6481 | } |
6070 | if (i >= MAX_WAIT_CNT) { | 6482 | if (i >= MAX_WAIT_CNT) { |
6071 | printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, " | 6483 | netdev_err(tp->dev, "%s timed out, TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n", |
6072 | "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n", | 6484 | __func__, tr32(MAC_TX_MODE)); |
6073 | tp->dev->name, tr32(MAC_TX_MODE)); | ||
6074 | err |= -ENODEV; | 6485 | err |= -ENODEV; |
6075 | } | 6486 | } |
6076 | 6487 | ||
@@ -6291,8 +6702,14 @@ static int tg3_poll_fw(struct tg3 *tp) | |||
6291 | !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) { | 6702 | !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) { |
6292 | tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED; | 6703 | tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED; |
6293 | 6704 | ||
6294 | printk(KERN_INFO PFX "%s: No firmware running.\n", | 6705 | netdev_info(tp->dev, "No firmware running\n"); |
6295 | tp->dev->name); | 6706 | } |
6707 | |||
6708 | if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) { | ||
6709 | /* The 57765 A0 needs a little more | ||
6710 | * time to do some important work. | ||
6711 | */ | ||
6712 | mdelay(10); | ||
6296 | } | 6713 | } |
6297 | 6714 | ||
6298 | return 0; | 6715 | return 0; |
@@ -6580,10 +6997,35 @@ static int tg3_chip_reset(struct tg3 *tp) | |||
6580 | 6997 | ||
6581 | tg3_mdio_start(tp); | 6998 | tg3_mdio_start(tp); |
6582 | 6999 | ||
7000 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) { | ||
7001 | u8 phy_addr; | ||
7002 | |||
7003 | phy_addr = tp->phy_addr; | ||
7004 | tp->phy_addr = TG3_PHY_PCIE_ADDR; | ||
7005 | |||
7006 | tg3_writephy(tp, TG3_PCIEPHY_BLOCK_ADDR, | ||
7007 | TG3_PCIEPHY_TXB_BLK << TG3_PCIEPHY_BLOCK_SHIFT); | ||
7008 | val = TG3_PCIEPHY_TX0CTRL1_TXOCM | TG3_PCIEPHY_TX0CTRL1_RDCTL | | ||
7009 | TG3_PCIEPHY_TX0CTRL1_TXCMV | TG3_PCIEPHY_TX0CTRL1_TKSEL | | ||
7010 | TG3_PCIEPHY_TX0CTRL1_NB_EN; | ||
7011 | tg3_writephy(tp, TG3_PCIEPHY_TX0CTRL1, val); | ||
7012 | udelay(10); | ||
7013 | |||
7014 | tg3_writephy(tp, TG3_PCIEPHY_BLOCK_ADDR, | ||
7015 | TG3_PCIEPHY_XGXS_BLK1 << TG3_PCIEPHY_BLOCK_SHIFT); | ||
7016 | val = TG3_PCIEPHY_PWRMGMT4_LOWPWR_EN | | ||
7017 | TG3_PCIEPHY_PWRMGMT4_L1PLLPD_EN; | ||
7018 | tg3_writephy(tp, TG3_PCIEPHY_PWRMGMT4, val); | ||
7019 | udelay(10); | ||
7020 | |||
7021 | tp->phy_addr = phy_addr; | ||
7022 | } | ||
7023 | |||
6583 | if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && | 7024 | if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && |
6584 | tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 && | 7025 | tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 && |
6585 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && | 7026 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && |
6586 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) { | 7027 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && |
7028 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) { | ||
6587 | val = tr32(0x7c00); | 7029 | val = tr32(0x7c00); |
6588 | 7030 | ||
6589 | tw32(0x7c00, val | (1 << 25)); | 7031 | tw32(0x7c00, val | (1 << 25)); |
@@ -6688,10 +7130,8 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset) | |||
6688 | } | 7130 | } |
6689 | 7131 | ||
6690 | if (i >= 10000) { | 7132 | if (i >= 10000) { |
6691 | printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, " | 7133 | netdev_err(tp->dev, "%s timed out, %s CPU\n", |
6692 | "and %s CPU\n", | 7134 | __func__, offset == RX_CPU_BASE ? "RX" : "TX"); |
6693 | tp->dev->name, | ||
6694 | (offset == RX_CPU_BASE ? "RX" : "TX")); | ||
6695 | return -ENODEV; | 7135 | return -ENODEV; |
6696 | } | 7136 | } |
6697 | 7137 | ||
@@ -6716,9 +7156,8 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_b | |||
6716 | 7156 | ||
6717 | if (cpu_base == TX_CPU_BASE && | 7157 | if (cpu_base == TX_CPU_BASE && |
6718 | (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { | 7158 | (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { |
6719 | printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load " | 7159 | netdev_err(tp->dev, "%s: Trying to load TX cpu firmware which is 5705\n", |
6720 | "TX cpu firmware on %s which is 5705.\n", | 7160 | __func__); |
6721 | tp->dev->name); | ||
6722 | return -EINVAL; | 7161 | return -EINVAL; |
6723 | } | 7162 | } |
6724 | 7163 | ||
@@ -6797,10 +7236,8 @@ static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp) | |||
6797 | udelay(1000); | 7236 | udelay(1000); |
6798 | } | 7237 | } |
6799 | if (i >= 5) { | 7238 | if (i >= 5) { |
6800 | printk(KERN_ERR PFX "tg3_load_firmware fails for %s " | 7239 | netdev_err(tp->dev, "tg3_load_firmware fails to set RX CPU PC, is %08x should be %08x\n", |
6801 | "to set RX CPU PC, is %08x should be %08x\n", | 7240 | tr32(RX_CPU_BASE + CPU_PC), info.fw_base); |
6802 | tp->dev->name, tr32(RX_CPU_BASE + CPU_PC), | ||
6803 | info.fw_base); | ||
6804 | return -ENODEV; | 7241 | return -ENODEV; |
6805 | } | 7242 | } |
6806 | tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); | 7243 | tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); |
@@ -6863,10 +7300,8 @@ static int tg3_load_tso_firmware(struct tg3 *tp) | |||
6863 | udelay(1000); | 7300 | udelay(1000); |
6864 | } | 7301 | } |
6865 | if (i >= 5) { | 7302 | if (i >= 5) { |
6866 | printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s " | 7303 | netdev_err(tp->dev, "%s fails to set CPU PC, is %08x should be %08x\n", |
6867 | "to set CPU PC, is %08x should be %08x\n", | 7304 | __func__, tr32(cpu_base + CPU_PC), info.fw_base); |
6868 | tp->dev->name, tr32(cpu_base + CPU_PC), | ||
6869 | info.fw_base); | ||
6870 | return -ENODEV; | 7305 | return -ENODEV; |
6871 | } | 7306 | } |
6872 | tw32(cpu_base + CPU_STATE, 0xffffffff); | 7307 | tw32(cpu_base + CPU_STATE, 0xffffffff); |
@@ -6935,19 +7370,21 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) | |||
6935 | { | 7370 | { |
6936 | int i; | 7371 | int i; |
6937 | 7372 | ||
6938 | if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) { | 7373 | if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) { |
6939 | tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs); | 7374 | tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs); |
6940 | tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames); | 7375 | tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames); |
6941 | tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq); | 7376 | tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq); |
6942 | |||
6943 | tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs); | ||
6944 | tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames); | ||
6945 | tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq); | ||
6946 | } else { | 7377 | } else { |
6947 | tw32(HOSTCC_TXCOL_TICKS, 0); | 7378 | tw32(HOSTCC_TXCOL_TICKS, 0); |
6948 | tw32(HOSTCC_TXMAX_FRAMES, 0); | 7379 | tw32(HOSTCC_TXMAX_FRAMES, 0); |
6949 | tw32(HOSTCC_TXCOAL_MAXF_INT, 0); | 7380 | tw32(HOSTCC_TXCOAL_MAXF_INT, 0); |
7381 | } | ||
6950 | 7382 | ||
7383 | if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) { | ||
7384 | tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs); | ||
7385 | tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames); | ||
7386 | tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq); | ||
7387 | } else { | ||
6951 | tw32(HOSTCC_RXCOL_TICKS, 0); | 7388 | tw32(HOSTCC_RXCOL_TICKS, 0); |
6952 | tw32(HOSTCC_RXMAX_FRAMES, 0); | 7389 | tw32(HOSTCC_RXMAX_FRAMES, 0); |
6953 | tw32(HOSTCC_RXCOAL_MAXF_INT, 0); | 7390 | tw32(HOSTCC_RXCOAL_MAXF_INT, 0); |
@@ -6970,25 +7407,31 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) | |||
6970 | 7407 | ||
6971 | reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18; | 7408 | reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18; |
6972 | tw32(reg, ec->rx_coalesce_usecs); | 7409 | tw32(reg, ec->rx_coalesce_usecs); |
6973 | reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18; | ||
6974 | tw32(reg, ec->tx_coalesce_usecs); | ||
6975 | reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18; | 7410 | reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18; |
6976 | tw32(reg, ec->rx_max_coalesced_frames); | 7411 | tw32(reg, ec->rx_max_coalesced_frames); |
6977 | reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18; | ||
6978 | tw32(reg, ec->tx_max_coalesced_frames); | ||
6979 | reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18; | 7412 | reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18; |
6980 | tw32(reg, ec->rx_max_coalesced_frames_irq); | 7413 | tw32(reg, ec->rx_max_coalesced_frames_irq); |
6981 | reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18; | 7414 | |
6982 | tw32(reg, ec->tx_max_coalesced_frames_irq); | 7415 | if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) { |
7416 | reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18; | ||
7417 | tw32(reg, ec->tx_coalesce_usecs); | ||
7418 | reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18; | ||
7419 | tw32(reg, ec->tx_max_coalesced_frames); | ||
7420 | reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18; | ||
7421 | tw32(reg, ec->tx_max_coalesced_frames_irq); | ||
7422 | } | ||
6983 | } | 7423 | } |
6984 | 7424 | ||
6985 | for (; i < tp->irq_max - 1; i++) { | 7425 | for (; i < tp->irq_max - 1; i++) { |
6986 | tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0); | 7426 | tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0); |
6987 | tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0); | ||
6988 | tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0); | 7427 | tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0); |
6989 | tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0); | ||
6990 | tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); | 7428 | tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); |
6991 | tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); | 7429 | |
7430 | if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) { | ||
7431 | tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0); | ||
7432 | tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0); | ||
7433 | tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); | ||
7434 | } | ||
6992 | } | 7435 | } |
6993 | } | 7436 | } |
6994 | 7437 | ||
@@ -7002,6 +7445,8 @@ static void tg3_rings_reset(struct tg3 *tp) | |||
7002 | /* Disable all transmit rings but the first. */ | 7445 | /* Disable all transmit rings but the first. */ |
7003 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) | 7446 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) |
7004 | limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16; | 7447 | limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16; |
7448 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) | ||
7449 | limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2; | ||
7005 | else | 7450 | else |
7006 | limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; | 7451 | limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; |
7007 | 7452 | ||
@@ -7016,7 +7461,8 @@ static void tg3_rings_reset(struct tg3 *tp) | |||
7016 | limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17; | 7461 | limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17; |
7017 | else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) | 7462 | else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) |
7018 | limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16; | 7463 | limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16; |
7019 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) | 7464 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || |
7465 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) | ||
7020 | limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4; | 7466 | limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4; |
7021 | else | 7467 | else |
7022 | limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; | 7468 | limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; |
@@ -7034,10 +7480,13 @@ static void tg3_rings_reset(struct tg3 *tp) | |||
7034 | for (i = 1; i < TG3_IRQ_MAX_VECS; i++) { | 7480 | for (i = 1; i < TG3_IRQ_MAX_VECS; i++) { |
7035 | tp->napi[i].tx_prod = 0; | 7481 | tp->napi[i].tx_prod = 0; |
7036 | tp->napi[i].tx_cons = 0; | 7482 | tp->napi[i].tx_cons = 0; |
7037 | tw32_mailbox(tp->napi[i].prodmbox, 0); | 7483 | if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) |
7484 | tw32_mailbox(tp->napi[i].prodmbox, 0); | ||
7038 | tw32_rx_mbox(tp->napi[i].consmbox, 0); | 7485 | tw32_rx_mbox(tp->napi[i].consmbox, 0); |
7039 | tw32_mailbox_f(tp->napi[i].int_mbox, 1); | 7486 | tw32_mailbox_f(tp->napi[i].int_mbox, 1); |
7040 | } | 7487 | } |
7488 | if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) | ||
7489 | tw32_mailbox(tp->napi[0].prodmbox, 0); | ||
7041 | } else { | 7490 | } else { |
7042 | tp->napi[0].tx_prod = 0; | 7491 | tp->napi[0].tx_prod = 0; |
7043 | tp->napi[0].tx_cons = 0; | 7492 | tp->napi[0].tx_cons = 0; |
@@ -7089,17 +7538,19 @@ static void tg3_rings_reset(struct tg3 *tp) | |||
7089 | /* Clear status block in ram. */ | 7538 | /* Clear status block in ram. */ |
7090 | memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); | 7539 | memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); |
7091 | 7540 | ||
7092 | tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, | 7541 | if (tnapi->tx_ring) { |
7093 | (TG3_TX_RING_SIZE << | 7542 | tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, |
7094 | BDINFO_FLAGS_MAXLEN_SHIFT), | 7543 | (TG3_TX_RING_SIZE << |
7095 | NIC_SRAM_TX_BUFFER_DESC); | 7544 | BDINFO_FLAGS_MAXLEN_SHIFT), |
7545 | NIC_SRAM_TX_BUFFER_DESC); | ||
7546 | txrcb += TG3_BDINFO_SIZE; | ||
7547 | } | ||
7096 | 7548 | ||
7097 | tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, | 7549 | tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, |
7098 | (TG3_RX_RCB_RING_SIZE(tp) << | 7550 | (TG3_RX_RCB_RING_SIZE(tp) << |
7099 | BDINFO_FLAGS_MAXLEN_SHIFT), 0); | 7551 | BDINFO_FLAGS_MAXLEN_SHIFT), 0); |
7100 | 7552 | ||
7101 | stblk += 8; | 7553 | stblk += 8; |
7102 | txrcb += TG3_BDINFO_SIZE; | ||
7103 | rxrcb += TG3_BDINFO_SIZE; | 7554 | rxrcb += TG3_BDINFO_SIZE; |
7104 | } | 7555 | } |
7105 | } | 7556 | } |
@@ -7121,8 +7572,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
7121 | tg3_abort_hw(tp, 1); | 7572 | tg3_abort_hw(tp, 1); |
7122 | } | 7573 | } |
7123 | 7574 | ||
7124 | if (reset_phy && | 7575 | if (reset_phy) |
7125 | !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) | ||
7126 | tg3_phy_reset(tp); | 7576 | tg3_phy_reset(tp); |
7127 | 7577 | ||
7128 | err = tg3_chip_reset(tp); | 7578 | err = tg3_chip_reset(tp); |
@@ -7162,15 +7612,23 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
7162 | tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS); | 7612 | tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS); |
7163 | 7613 | ||
7164 | tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR); | 7614 | tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR); |
7615 | |||
7616 | val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; | ||
7617 | tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); | ||
7165 | } | 7618 | } |
7166 | 7619 | ||
7167 | if (tp->tg3_flags3 & TG3_FLG3_TOGGLE_10_100_L1PLLPD) { | 7620 | if (tp->tg3_flags3 & TG3_FLG3_L1PLLPD_EN) { |
7168 | val = tr32(TG3_PCIE_LNKCTL); | 7621 | u32 grc_mode = tr32(GRC_MODE); |
7169 | if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) | 7622 | |
7170 | val |= TG3_PCIE_LNKCTL_L1_PLL_PD_DIS; | 7623 | /* Access the lower 1K of PL PCIE block registers. */ |
7171 | else | 7624 | val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; |
7172 | val &= ~TG3_PCIE_LNKCTL_L1_PLL_PD_DIS; | 7625 | tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); |
7173 | tw32(TG3_PCIE_LNKCTL, val); | 7626 | |
7627 | val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1); | ||
7628 | tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1, | ||
7629 | val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN); | ||
7630 | |||
7631 | tw32(GRC_MODE, grc_mode); | ||
7174 | } | 7632 | } |
7175 | 7633 | ||
7176 | /* This works around an issue with Athlon chipsets on | 7634 | /* This works around an issue with Athlon chipsets on |
@@ -7217,9 +7675,13 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
7217 | if (err) | 7675 | if (err) |
7218 | return err; | 7676 | return err; |
7219 | 7677 | ||
7220 | if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 && | 7678 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || |
7221 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 && | 7679 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { |
7222 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) { | 7680 | val = tr32(TG3PCI_DMA_RW_CTRL) & |
7681 | ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT; | ||
7682 | tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl); | ||
7683 | } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 && | ||
7684 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) { | ||
7223 | /* This value is determined during the probe time DMA | 7685 | /* This value is determined during the probe time DMA |
7224 | * engine test, tg3_test_dma. | 7686 | * engine test, tg3_test_dma. |
7225 | */ | 7687 | */ |
@@ -7300,8 +7762,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
7300 | udelay(10); | 7762 | udelay(10); |
7301 | } | 7763 | } |
7302 | if (i >= 2000) { | 7764 | if (i >= 2000) { |
7303 | printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n", | 7765 | netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__); |
7304 | tp->dev->name); | ||
7305 | return -ENODEV; | 7766 | return -ENODEV; |
7306 | } | 7767 | } |
7307 | 7768 | ||
@@ -7342,8 +7803,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
7342 | ((u64) tpr->rx_std_mapping >> 32)); | 7803 | ((u64) tpr->rx_std_mapping >> 32)); |
7343 | tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, | 7804 | tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, |
7344 | ((u64) tpr->rx_std_mapping & 0xffffffff)); | 7805 | ((u64) tpr->rx_std_mapping & 0xffffffff)); |
7345 | tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, | 7806 | if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) |
7346 | NIC_SRAM_RX_BUFFER_DESC); | 7807 | tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, |
7808 | NIC_SRAM_RX_BUFFER_DESC); | ||
7347 | 7809 | ||
7348 | /* Disable the mini ring */ | 7810 | /* Disable the mini ring */ |
7349 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) | 7811 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) |
@@ -7366,14 +7828,16 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
7366 | tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, | 7828 | tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, |
7367 | (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) | | 7829 | (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) | |
7368 | BDINFO_FLAGS_USE_EXT_RECV); | 7830 | BDINFO_FLAGS_USE_EXT_RECV); |
7369 | tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, | 7831 | if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) |
7370 | NIC_SRAM_RX_JUMBO_BUFFER_DESC); | 7832 | tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, |
7833 | NIC_SRAM_RX_JUMBO_BUFFER_DESC); | ||
7371 | } else { | 7834 | } else { |
7372 | tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, | 7835 | tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, |
7373 | BDINFO_FLAGS_DISABLED); | 7836 | BDINFO_FLAGS_DISABLED); |
7374 | } | 7837 | } |
7375 | 7838 | ||
7376 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) | 7839 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || |
7840 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) | ||
7377 | val = (RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT) | | 7841 | val = (RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT) | |
7378 | (RX_STD_MAX_SIZE << 2); | 7842 | (RX_STD_MAX_SIZE << 2); |
7379 | else | 7843 | else |
@@ -7383,16 +7847,15 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
7383 | 7847 | ||
7384 | tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val); | 7848 | tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val); |
7385 | 7849 | ||
7386 | tpr->rx_std_ptr = tp->rx_pending; | 7850 | tpr->rx_std_prod_idx = tp->rx_pending; |
7387 | tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW, | 7851 | tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx); |
7388 | tpr->rx_std_ptr); | ||
7389 | 7852 | ||
7390 | tpr->rx_jmb_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ? | 7853 | tpr->rx_jmb_prod_idx = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ? |
7391 | tp->rx_jumbo_pending : 0; | 7854 | tp->rx_jumbo_pending : 0; |
7392 | tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW, | 7855 | tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx); |
7393 | tpr->rx_jmb_ptr); | ||
7394 | 7856 | ||
7395 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { | 7857 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || |
7858 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { | ||
7396 | tw32(STD_REPLENISH_LWM, 32); | 7859 | tw32(STD_REPLENISH_LWM, 32); |
7397 | tw32(JMB_REPLENISH_LWM, 16); | 7860 | tw32(JMB_REPLENISH_LWM, 16); |
7398 | } | 7861 | } |
@@ -7427,6 +7890,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
7427 | RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB | | 7890 | RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB | |
7428 | RDMAC_MODE_LNGREAD_ENAB); | 7891 | RDMAC_MODE_LNGREAD_ENAB); |
7429 | 7892 | ||
7893 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) | ||
7894 | rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS; | ||
7895 | |||
7430 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || | 7896 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || |
7431 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || | 7897 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || |
7432 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) | 7898 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) |
@@ -7453,7 +7919,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
7453 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) | 7919 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) |
7454 | rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN; | 7920 | rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN; |
7455 | 7921 | ||
7456 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || | 7922 | if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) || |
7923 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || | ||
7457 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) | 7924 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) |
7458 | rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN; | 7925 | rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN; |
7459 | 7926 | ||
@@ -7602,6 +8069,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
7602 | if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) | 8069 | if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) |
7603 | val |= WDMAC_MODE_STATUS_TAG_FIX; | 8070 | val |= WDMAC_MODE_STATUS_TAG_FIX; |
7604 | 8071 | ||
8072 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) | ||
8073 | val |= WDMAC_MODE_BURST_ALL_DATA; | ||
8074 | |||
7605 | tw32_f(WDMAC_MODE, val); | 8075 | tw32_f(WDMAC_MODE, val); |
7606 | udelay(40); | 8076 | udelay(40); |
7607 | 8077 | ||
@@ -7641,7 +8111,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
7641 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) | 8111 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) |
7642 | tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8); | 8112 | tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8); |
7643 | val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE; | 8113 | val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE; |
7644 | if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) | 8114 | if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) |
7645 | val |= SNDBDI_MODE_MULTI_TXQ_EN; | 8115 | val |= SNDBDI_MODE_MULTI_TXQ_EN; |
7646 | tw32(SNDBDI_MODE, val); | 8116 | tw32(SNDBDI_MODE, val); |
7647 | tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE); | 8117 | tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE); |
@@ -7732,7 +8202,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
7732 | /* Prevent chip from dropping frames when flow control | 8202 | /* Prevent chip from dropping frames when flow control |
7733 | * is enabled. | 8203 | * is enabled. |
7734 | */ | 8204 | */ |
7735 | tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2); | 8205 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) |
8206 | val = 1; | ||
8207 | else | ||
8208 | val = 2; | ||
8209 | tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val); | ||
7736 | 8210 | ||
7737 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 && | 8211 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 && |
7738 | (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) { | 8212 | (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) { |
@@ -8065,7 +8539,8 @@ static int tg3_test_interrupt(struct tg3 *tp) | |||
8065 | * Turn off MSI one shot mode. Otherwise this test has no | 8539 | * Turn off MSI one shot mode. Otherwise this test has no |
8066 | * observable way to know whether the interrupt was delivered. | 8540 | * observable way to know whether the interrupt was delivered. |
8067 | */ | 8541 | */ |
8068 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 && | 8542 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || |
8543 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) && | ||
8069 | (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) { | 8544 | (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) { |
8070 | val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE; | 8545 | val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE; |
8071 | tw32(MSGINT_MODE, val); | 8546 | tw32(MSGINT_MODE, val); |
@@ -8108,7 +8583,8 @@ static int tg3_test_interrupt(struct tg3 *tp) | |||
8108 | 8583 | ||
8109 | if (intr_ok) { | 8584 | if (intr_ok) { |
8110 | /* Reenable MSI one shot mode. */ | 8585 | /* Reenable MSI one shot mode. */ |
8111 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 && | 8586 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || |
8587 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) && | ||
8112 | (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) { | 8588 | (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) { |
8113 | val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE; | 8589 | val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE; |
8114 | tw32(MSGINT_MODE, val); | 8590 | tw32(MSGINT_MODE, val); |
@@ -8149,16 +8625,15 @@ static int tg3_test_msi(struct tg3 *tp) | |||
8149 | return err; | 8625 | return err; |
8150 | 8626 | ||
8151 | /* MSI test failed, go back to INTx mode */ | 8627 | /* MSI test failed, go back to INTx mode */ |
8152 | printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, " | 8628 | netdev_warn(tp->dev, "No interrupt was generated using MSI, switching to INTx mode\n" |
8153 | "switching to INTx mode. Please report this failure to " | 8629 | "Please report this failure to the PCI maintainer and include system chipset information\n"); |
8154 | "the PCI maintainer and include system chipset information.\n", | ||
8155 | tp->dev->name); | ||
8156 | 8630 | ||
8157 | free_irq(tp->napi[0].irq_vec, &tp->napi[0]); | 8631 | free_irq(tp->napi[0].irq_vec, &tp->napi[0]); |
8158 | 8632 | ||
8159 | pci_disable_msi(tp->pdev); | 8633 | pci_disable_msi(tp->pdev); |
8160 | 8634 | ||
8161 | tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; | 8635 | tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; |
8636 | tp->napi[0].irq_vec = tp->pdev->irq; | ||
8162 | 8637 | ||
8163 | err = tg3_request_irq(tp, 0); | 8638 | err = tg3_request_irq(tp, 0); |
8164 | if (err) | 8639 | if (err) |
@@ -8185,8 +8660,8 @@ static int tg3_request_firmware(struct tg3 *tp) | |||
8185 | const __be32 *fw_data; | 8660 | const __be32 *fw_data; |
8186 | 8661 | ||
8187 | if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) { | 8662 | if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) { |
8188 | printk(KERN_ERR "%s: Failed to load firmware \"%s\"\n", | 8663 | netdev_err(tp->dev, "Failed to load firmware \"%s\"\n", |
8189 | tp->dev->name, tp->fw_needed); | 8664 | tp->fw_needed); |
8190 | return -ENOENT; | 8665 | return -ENOENT; |
8191 | } | 8666 | } |
8192 | 8667 | ||
@@ -8199,8 +8674,8 @@ static int tg3_request_firmware(struct tg3 *tp) | |||
8199 | 8674 | ||
8200 | tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */ | 8675 | tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */ |
8201 | if (tp->fw_len < (tp->fw->size - 12)) { | 8676 | if (tp->fw_len < (tp->fw->size - 12)) { |
8202 | printk(KERN_ERR "%s: bogus length %d in \"%s\"\n", | 8677 | netdev_err(tp->dev, "bogus length %d in \"%s\"\n", |
8203 | tp->dev->name, tp->fw_len, tp->fw_needed); | 8678 | tp->fw_len, tp->fw_needed); |
8204 | release_firmware(tp->fw); | 8679 | release_firmware(tp->fw); |
8205 | tp->fw = NULL; | 8680 | tp->fw = NULL; |
8206 | return -EINVAL; | 8681 | return -EINVAL; |
@@ -8238,9 +8713,8 @@ static bool tg3_enable_msix(struct tg3 *tp) | |||
8238 | return false; | 8713 | return false; |
8239 | if (pci_enable_msix(tp->pdev, msix_ent, rc)) | 8714 | if (pci_enable_msix(tp->pdev, msix_ent, rc)) |
8240 | return false; | 8715 | return false; |
8241 | printk(KERN_NOTICE | 8716 | netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n", |
8242 | "%s: Requested %d MSI-X vectors, received %d\n", | 8717 | tp->irq_cnt, rc); |
8243 | tp->dev->name, tp->irq_cnt, rc); | ||
8244 | tp->irq_cnt = rc; | 8718 | tp->irq_cnt = rc; |
8245 | } | 8719 | } |
8246 | 8720 | ||
@@ -8249,7 +8723,11 @@ static bool tg3_enable_msix(struct tg3 *tp) | |||
8249 | for (i = 0; i < tp->irq_max; i++) | 8723 | for (i = 0; i < tp->irq_max; i++) |
8250 | tp->napi[i].irq_vec = msix_ent[i].vector; | 8724 | tp->napi[i].irq_vec = msix_ent[i].vector; |
8251 | 8725 | ||
8252 | tp->dev->real_num_tx_queues = tp->irq_cnt - 1; | 8726 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { |
8727 | tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS; | ||
8728 | tp->dev->real_num_tx_queues = tp->irq_cnt - 1; | ||
8729 | } else | ||
8730 | tp->dev->real_num_tx_queues = 1; | ||
8253 | 8731 | ||
8254 | return true; | 8732 | return true; |
8255 | } | 8733 | } |
@@ -8261,8 +8739,7 @@ static void tg3_ints_init(struct tg3 *tp) | |||
8261 | /* All MSI supporting chips should support tagged | 8739 | /* All MSI supporting chips should support tagged |
8262 | * status. Assert that this is the case. | 8740 | * status. Assert that this is the case. |
8263 | */ | 8741 | */ |
8264 | printk(KERN_WARNING PFX "%s: MSI without TAGGED? " | 8742 | netdev_warn(tp->dev, "MSI without TAGGED? Not using MSI\n"); |
8265 | "Not using MSI.\n", tp->dev->name); | ||
8266 | goto defcfg; | 8743 | goto defcfg; |
8267 | } | 8744 | } |
8268 | 8745 | ||
@@ -8307,12 +8784,10 @@ static int tg3_open(struct net_device *dev) | |||
8307 | if (err) | 8784 | if (err) |
8308 | return err; | 8785 | return err; |
8309 | } else if (err) { | 8786 | } else if (err) { |
8310 | printk(KERN_WARNING "%s: TSO capability disabled.\n", | 8787 | netdev_warn(tp->dev, "TSO capability disabled\n"); |
8311 | tp->dev->name); | ||
8312 | tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE; | 8788 | tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE; |
8313 | } else if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) { | 8789 | } else if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) { |
8314 | printk(KERN_NOTICE "%s: TSO capability restored.\n", | 8790 | netdev_notice(tp->dev, "TSO capability restored\n"); |
8315 | tp->dev->name); | ||
8316 | tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; | 8791 | tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; |
8317 | } | 8792 | } |
8318 | } | 8793 | } |
@@ -8400,6 +8875,7 @@ static int tg3_open(struct net_device *dev) | |||
8400 | } | 8875 | } |
8401 | 8876 | ||
8402 | if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && | 8877 | if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && |
8878 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 && | ||
8403 | (tp->tg3_flags2 & TG3_FLG2_USING_MSI) && | 8879 | (tp->tg3_flags2 & TG3_FLG2_USING_MSI) && |
8404 | (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)) { | 8880 | (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)) { |
8405 | u32 val = tr32(PCIE_TRANSACTION_CFG); | 8881 | u32 val = tr32(PCIE_TRANSACTION_CFG); |
@@ -8977,21 +9453,18 @@ static void __tg3_set_rx_mode(struct net_device *dev) | |||
8977 | } else if (dev->flags & IFF_ALLMULTI) { | 9453 | } else if (dev->flags & IFF_ALLMULTI) { |
8978 | /* Accept all multicast. */ | 9454 | /* Accept all multicast. */ |
8979 | tg3_set_multi (tp, 1); | 9455 | tg3_set_multi (tp, 1); |
8980 | } else if (dev->mc_count < 1) { | 9456 | } else if (netdev_mc_empty(dev)) { |
8981 | /* Reject all multicast. */ | 9457 | /* Reject all multicast. */ |
8982 | tg3_set_multi (tp, 0); | 9458 | tg3_set_multi (tp, 0); |
8983 | } else { | 9459 | } else { |
8984 | /* Accept one or more multicast(s). */ | 9460 | /* Accept one or more multicast(s). */ |
8985 | struct dev_mc_list *mclist; | 9461 | struct dev_mc_list *mclist; |
8986 | unsigned int i; | ||
8987 | u32 mc_filter[4] = { 0, }; | 9462 | u32 mc_filter[4] = { 0, }; |
8988 | u32 regidx; | 9463 | u32 regidx; |
8989 | u32 bit; | 9464 | u32 bit; |
8990 | u32 crc; | 9465 | u32 crc; |
8991 | 9466 | ||
8992 | for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; | 9467 | netdev_for_each_mc_addr(mclist, dev) { |
8993 | i++, mclist = mclist->next) { | ||
8994 | |||
8995 | crc = calc_crc (mclist->dmi_addr, ETH_ALEN); | 9468 | crc = calc_crc (mclist->dmi_addr, ETH_ALEN); |
8996 | bit = ~crc & 0x7f; | 9469 | bit = ~crc & 0x7f; |
8997 | regidx = (bit & 0x60) >> 5; | 9470 | regidx = (bit & 0x60) >> 5; |
@@ -9240,9 +9713,11 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
9240 | struct tg3 *tp = netdev_priv(dev); | 9713 | struct tg3 *tp = netdev_priv(dev); |
9241 | 9714 | ||
9242 | if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { | 9715 | if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { |
9716 | struct phy_device *phydev; | ||
9243 | if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) | 9717 | if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) |
9244 | return -EAGAIN; | 9718 | return -EAGAIN; |
9245 | return phy_ethtool_gset(tp->mdio_bus->phy_map[PHY_ADDR], cmd); | 9719 | phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; |
9720 | return phy_ethtool_gset(phydev, cmd); | ||
9246 | } | 9721 | } |
9247 | 9722 | ||
9248 | cmd->supported = (SUPPORTED_Autoneg); | 9723 | cmd->supported = (SUPPORTED_Autoneg); |
@@ -9281,9 +9756,11 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
9281 | struct tg3 *tp = netdev_priv(dev); | 9756 | struct tg3 *tp = netdev_priv(dev); |
9282 | 9757 | ||
9283 | if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { | 9758 | if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { |
9759 | struct phy_device *phydev; | ||
9284 | if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) | 9760 | if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) |
9285 | return -EAGAIN; | 9761 | return -EAGAIN; |
9286 | return phy_ethtool_sset(tp->mdio_bus->phy_map[PHY_ADDR], cmd); | 9762 | phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; |
9763 | return phy_ethtool_sset(phydev, cmd); | ||
9287 | } | 9764 | } |
9288 | 9765 | ||
9289 | if (cmd->autoneg != AUTONEG_ENABLE && | 9766 | if (cmd->autoneg != AUTONEG_ENABLE && |
@@ -9300,7 +9777,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
9300 | ADVERTISED_Pause | | 9777 | ADVERTISED_Pause | |
9301 | ADVERTISED_Asym_Pause; | 9778 | ADVERTISED_Asym_Pause; |
9302 | 9779 | ||
9303 | if (!(tp->tg3_flags2 & TG3_FLAG_10_100_ONLY)) | 9780 | if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) |
9304 | mask |= ADVERTISED_1000baseT_Half | | 9781 | mask |= ADVERTISED_1000baseT_Half | |
9305 | ADVERTISED_1000baseT_Full; | 9782 | ADVERTISED_1000baseT_Full; |
9306 | 9783 | ||
@@ -9436,15 +9913,16 @@ static int tg3_set_tso(struct net_device *dev, u32 value) | |||
9436 | return 0; | 9913 | return 0; |
9437 | } | 9914 | } |
9438 | if ((dev->features & NETIF_F_IPV6_CSUM) && | 9915 | if ((dev->features & NETIF_F_IPV6_CSUM) && |
9439 | (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)) { | 9916 | ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) || |
9917 | (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3))) { | ||
9440 | if (value) { | 9918 | if (value) { |
9441 | dev->features |= NETIF_F_TSO6; | 9919 | dev->features |= NETIF_F_TSO6; |
9442 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || | 9920 | if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) || |
9921 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || | ||
9443 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && | 9922 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && |
9444 | GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) || | 9923 | GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) || |
9445 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || | 9924 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || |
9446 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || | 9925 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) |
9447 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) | ||
9448 | dev->features |= NETIF_F_TSO_ECN; | 9926 | dev->features |= NETIF_F_TSO_ECN; |
9449 | } else | 9927 | } else |
9450 | dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN); | 9928 | dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN); |
@@ -9466,7 +9944,7 @@ static int tg3_nway_reset(struct net_device *dev) | |||
9466 | if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { | 9944 | if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { |
9467 | if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) | 9945 | if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) |
9468 | return -EAGAIN; | 9946 | return -EAGAIN; |
9469 | r = phy_start_aneg(tp->mdio_bus->phy_map[PHY_ADDR]); | 9947 | r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); |
9470 | } else { | 9948 | } else { |
9471 | u32 bmcr; | 9949 | u32 bmcr; |
9472 | 9950 | ||
@@ -9578,56 +10056,66 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam | |||
9578 | int err = 0; | 10056 | int err = 0; |
9579 | 10057 | ||
9580 | if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { | 10058 | if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { |
9581 | if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) | 10059 | u32 newadv; |
9582 | return -EAGAIN; | 10060 | struct phy_device *phydev; |
9583 | 10061 | ||
9584 | if (epause->autoneg) { | 10062 | phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; |
9585 | u32 newadv; | ||
9586 | struct phy_device *phydev; | ||
9587 | 10063 | ||
9588 | phydev = tp->mdio_bus->phy_map[PHY_ADDR]; | 10064 | if (!(phydev->supported & SUPPORTED_Pause) || |
10065 | (!(phydev->supported & SUPPORTED_Asym_Pause) && | ||
10066 | ((epause->rx_pause && !epause->tx_pause) || | ||
10067 | (!epause->rx_pause && epause->tx_pause)))) | ||
10068 | return -EINVAL; | ||
9589 | 10069 | ||
9590 | if (epause->rx_pause) { | 10070 | tp->link_config.flowctrl = 0; |
9591 | if (epause->tx_pause) | 10071 | if (epause->rx_pause) { |
9592 | newadv = ADVERTISED_Pause; | 10072 | tp->link_config.flowctrl |= FLOW_CTRL_RX; |
9593 | else | 10073 | |
9594 | newadv = ADVERTISED_Pause | | 10074 | if (epause->tx_pause) { |
9595 | ADVERTISED_Asym_Pause; | 10075 | tp->link_config.flowctrl |= FLOW_CTRL_TX; |
9596 | } else if (epause->tx_pause) { | 10076 | newadv = ADVERTISED_Pause; |
9597 | newadv = ADVERTISED_Asym_Pause; | ||
9598 | } else | 10077 | } else |
9599 | newadv = 0; | 10078 | newadv = ADVERTISED_Pause | |
9600 | 10079 | ADVERTISED_Asym_Pause; | |
9601 | if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) { | 10080 | } else if (epause->tx_pause) { |
9602 | u32 oldadv = phydev->advertising & | 10081 | tp->link_config.flowctrl |= FLOW_CTRL_TX; |
9603 | (ADVERTISED_Pause | | 10082 | newadv = ADVERTISED_Asym_Pause; |
9604 | ADVERTISED_Asym_Pause); | 10083 | } else |
9605 | if (oldadv != newadv) { | 10084 | newadv = 0; |
9606 | phydev->advertising &= | 10085 | |
9607 | ~(ADVERTISED_Pause | | 10086 | if (epause->autoneg) |
9608 | ADVERTISED_Asym_Pause); | 10087 | tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; |
9609 | phydev->advertising |= newadv; | 10088 | else |
9610 | err = phy_start_aneg(phydev); | 10089 | tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG; |
10090 | |||
10091 | if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) { | ||
10092 | u32 oldadv = phydev->advertising & | ||
10093 | (ADVERTISED_Pause | ADVERTISED_Asym_Pause); | ||
10094 | if (oldadv != newadv) { | ||
10095 | phydev->advertising &= | ||
10096 | ~(ADVERTISED_Pause | | ||
10097 | ADVERTISED_Asym_Pause); | ||
10098 | phydev->advertising |= newadv; | ||
10099 | if (phydev->autoneg) { | ||
10100 | /* | ||
10101 | * Always renegotiate the link to | ||
10102 | * inform our link partner of our | ||
10103 | * flow control settings, even if the | ||
10104 | * flow control is forced. Let | ||
10105 | * tg3_adjust_link() do the final | ||
10106 | * flow control setup. | ||
10107 | */ | ||
10108 | return phy_start_aneg(phydev); | ||
9611 | } | 10109 | } |
9612 | } else { | ||
9613 | tp->link_config.advertising &= | ||
9614 | ~(ADVERTISED_Pause | | ||
9615 | ADVERTISED_Asym_Pause); | ||
9616 | tp->link_config.advertising |= newadv; | ||
9617 | } | 10110 | } |
9618 | } else { | ||
9619 | if (epause->rx_pause) | ||
9620 | tp->link_config.flowctrl |= FLOW_CTRL_RX; | ||
9621 | else | ||
9622 | tp->link_config.flowctrl &= ~FLOW_CTRL_RX; | ||
9623 | 10111 | ||
9624 | if (epause->tx_pause) | 10112 | if (!epause->autoneg) |
9625 | tp->link_config.flowctrl |= FLOW_CTRL_TX; | ||
9626 | else | ||
9627 | tp->link_config.flowctrl &= ~FLOW_CTRL_TX; | ||
9628 | |||
9629 | if (netif_running(dev)) | ||
9630 | tg3_setup_flow_control(tp, 0, 0); | 10113 | tg3_setup_flow_control(tp, 0, 0); |
10114 | } else { | ||
10115 | tp->link_config.orig_advertising &= | ||
10116 | ~(ADVERTISED_Pause | | ||
10117 | ADVERTISED_Asym_Pause); | ||
10118 | tp->link_config.orig_advertising |= newadv; | ||
9631 | } | 10119 | } |
9632 | } else { | 10120 | } else { |
9633 | int irq_sync = 0; | 10121 | int irq_sync = 0; |
@@ -10161,8 +10649,7 @@ static int tg3_test_registers(struct tg3 *tp) | |||
10161 | 10649 | ||
10162 | out: | 10650 | out: |
10163 | if (netif_msg_hw(tp)) | 10651 | if (netif_msg_hw(tp)) |
10164 | printk(KERN_ERR PFX "Register test failed at offset %x\n", | 10652 | pr_err("Register test failed at offset %x\n", offset); |
10165 | offset); | ||
10166 | tw32(offset, save_val); | 10653 | tw32(offset, save_val); |
10167 | return -EIO; | 10654 | return -EIO; |
10168 | } | 10655 | } |
@@ -10217,12 +10704,27 @@ static int tg3_test_memory(struct tg3 *tp) | |||
10217 | { 0x00008000, 0x01000}, | 10704 | { 0x00008000, 0x01000}, |
10218 | { 0x00010000, 0x01000}, | 10705 | { 0x00010000, 0x01000}, |
10219 | { 0xffffffff, 0x00000} | 10706 | { 0xffffffff, 0x00000} |
10707 | }, mem_tbl_5717[] = { | ||
10708 | { 0x00000200, 0x00008}, | ||
10709 | { 0x00010000, 0x0a000}, | ||
10710 | { 0x00020000, 0x13c00}, | ||
10711 | { 0xffffffff, 0x00000} | ||
10712 | }, mem_tbl_57765[] = { | ||
10713 | { 0x00000200, 0x00008}, | ||
10714 | { 0x00004000, 0x00800}, | ||
10715 | { 0x00006000, 0x09800}, | ||
10716 | { 0x00010000, 0x0a000}, | ||
10717 | { 0xffffffff, 0x00000} | ||
10220 | }; | 10718 | }; |
10221 | struct mem_entry *mem_tbl; | 10719 | struct mem_entry *mem_tbl; |
10222 | int err = 0; | 10720 | int err = 0; |
10223 | int i; | 10721 | int i; |
10224 | 10722 | ||
10225 | if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) | 10723 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) |
10724 | mem_tbl = mem_tbl_5717; | ||
10725 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) | ||
10726 | mem_tbl = mem_tbl_57765; | ||
10727 | else if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) | ||
10226 | mem_tbl = mem_tbl_5755; | 10728 | mem_tbl = mem_tbl_5755; |
10227 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) | 10729 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) |
10228 | mem_tbl = mem_tbl_5906; | 10730 | mem_tbl = mem_tbl_5906; |
@@ -10255,12 +10757,12 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) | |||
10255 | struct tg3_napi *tnapi, *rnapi; | 10757 | struct tg3_napi *tnapi, *rnapi; |
10256 | struct tg3_rx_prodring_set *tpr = &tp->prodring[0]; | 10758 | struct tg3_rx_prodring_set *tpr = &tp->prodring[0]; |
10257 | 10759 | ||
10760 | tnapi = &tp->napi[0]; | ||
10761 | rnapi = &tp->napi[0]; | ||
10258 | if (tp->irq_cnt > 1) { | 10762 | if (tp->irq_cnt > 1) { |
10259 | tnapi = &tp->napi[1]; | ||
10260 | rnapi = &tp->napi[1]; | 10763 | rnapi = &tp->napi[1]; |
10261 | } else { | 10764 | if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) |
10262 | tnapi = &tp->napi[0]; | 10765 | tnapi = &tp->napi[1]; |
10263 | rnapi = &tp->napi[0]; | ||
10264 | } | 10766 | } |
10265 | coal_now = tnapi->coal_now | rnapi->coal_now; | 10767 | coal_now = tnapi->coal_now | rnapi->coal_now; |
10266 | 10768 | ||
@@ -10297,8 +10799,12 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) | |||
10297 | 10799 | ||
10298 | mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; | 10800 | mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; |
10299 | if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { | 10801 | if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { |
10300 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) | 10802 | tg3_writephy(tp, MII_TG3_FET_PTEST, |
10301 | tg3_writephy(tp, MII_TG3_FET_PTEST, 0x1800); | 10803 | MII_TG3_FET_PTEST_FRC_TX_LINK | |
10804 | MII_TG3_FET_PTEST_FRC_TX_LOCK); | ||
10805 | /* The write needs to be flushed for the AC131 */ | ||
10806 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) | ||
10807 | tg3_readphy(tp, MII_TG3_FET_PTEST, &val); | ||
10302 | mac_mode |= MAC_MODE_PORT_MODE_MII; | 10808 | mac_mode |= MAC_MODE_PORT_MODE_MII; |
10303 | } else | 10809 | } else |
10304 | mac_mode |= MAC_MODE_PORT_MODE_GMII; | 10810 | mac_mode |= MAC_MODE_PORT_MODE_GMII; |
@@ -10310,9 +10816,10 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) | |||
10310 | tw32_f(MAC_RX_MODE, tp->rx_mode); | 10816 | tw32_f(MAC_RX_MODE, tp->rx_mode); |
10311 | } | 10817 | } |
10312 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) { | 10818 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) { |
10313 | if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) | 10819 | u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK; |
10820 | if (masked_phy_id == TG3_PHY_ID_BCM5401) | ||
10314 | mac_mode &= ~MAC_MODE_LINK_POLARITY; | 10821 | mac_mode &= ~MAC_MODE_LINK_POLARITY; |
10315 | else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) | 10822 | else if (masked_phy_id == TG3_PHY_ID_BCM5411) |
10316 | mac_mode |= MAC_MODE_LINK_POLARITY; | 10823 | mac_mode |= MAC_MODE_LINK_POLARITY; |
10317 | tg3_writephy(tp, MII_TG3_EXT_CTRL, | 10824 | tg3_writephy(tp, MII_TG3_EXT_CTRL, |
10318 | MII_TG3_EXT_CTRL_LNK3_LED_MODE); | 10825 | MII_TG3_EXT_CTRL_LNK3_LED_MODE); |
@@ -10339,6 +10846,10 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) | |||
10339 | tx_data[i] = (u8) (i & 0xff); | 10846 | tx_data[i] = (u8) (i & 0xff); |
10340 | 10847 | ||
10341 | map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE); | 10848 | map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE); |
10849 | if (pci_dma_mapping_error(tp->pdev, map)) { | ||
10850 | dev_kfree_skb(skb); | ||
10851 | return -EIO; | ||
10852 | } | ||
10342 | 10853 | ||
10343 | tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | | 10854 | tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | |
10344 | rnapi->coal_now); | 10855 | rnapi->coal_now); |
@@ -10359,8 +10870,8 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) | |||
10359 | 10870 | ||
10360 | udelay(10); | 10871 | udelay(10); |
10361 | 10872 | ||
10362 | /* 250 usec to allow enough time on some 10/100 Mbps devices. */ | 10873 | /* 350 usec to allow enough time on some 10/100 Mbps devices. */ |
10363 | for (i = 0; i < 25; i++) { | 10874 | for (i = 0; i < 35; i++) { |
10364 | tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | | 10875 | tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | |
10365 | coal_now); | 10876 | coal_now); |
10366 | 10877 | ||
@@ -10565,9 +11076,11 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
10565 | int err; | 11076 | int err; |
10566 | 11077 | ||
10567 | if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { | 11078 | if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { |
11079 | struct phy_device *phydev; | ||
10568 | if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) | 11080 | if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) |
10569 | return -EAGAIN; | 11081 | return -EAGAIN; |
10570 | return phy_mii_ioctl(tp->mdio_bus->phy_map[PHY_ADDR], data, cmd); | 11082 | phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; |
11083 | return phy_mii_ioctl(phydev, data, cmd); | ||
10571 | } | 11084 | } |
10572 | 11085 | ||
10573 | switch(cmd) { | 11086 | switch(cmd) { |
@@ -10887,7 +11400,7 @@ static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp) | |||
10887 | 11400 | ||
10888 | /* NVRAM protection for TPM */ | 11401 | /* NVRAM protection for TPM */ |
10889 | if (nvcfg1 & (1 << 27)) | 11402 | if (nvcfg1 & (1 << 27)) |
10890 | tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM; | 11403 | tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM; |
10891 | 11404 | ||
10892 | switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { | 11405 | switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { |
10893 | case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ: | 11406 | case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ: |
@@ -10928,7 +11441,7 @@ static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp) | |||
10928 | 11441 | ||
10929 | /* NVRAM protection for TPM */ | 11442 | /* NVRAM protection for TPM */ |
10930 | if (nvcfg1 & (1 << 27)) { | 11443 | if (nvcfg1 & (1 << 27)) { |
10931 | tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM; | 11444 | tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM; |
10932 | protect = 1; | 11445 | protect = 1; |
10933 | } | 11446 | } |
10934 | 11447 | ||
@@ -11022,7 +11535,7 @@ static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp) | |||
11022 | 11535 | ||
11023 | /* NVRAM protection for TPM */ | 11536 | /* NVRAM protection for TPM */ |
11024 | if (nvcfg1 & (1 << 27)) { | 11537 | if (nvcfg1 & (1 << 27)) { |
11025 | tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM; | 11538 | tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM; |
11026 | protect = 1; | 11539 | protect = 1; |
11027 | } | 11540 | } |
11028 | 11541 | ||
@@ -11263,8 +11776,8 @@ static void __devinit tg3_nvram_init(struct tg3 *tp) | |||
11263 | tp->tg3_flags |= TG3_FLAG_NVRAM; | 11776 | tp->tg3_flags |= TG3_FLAG_NVRAM; |
11264 | 11777 | ||
11265 | if (tg3_nvram_lock(tp)) { | 11778 | if (tg3_nvram_lock(tp)) { |
11266 | printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, " | 11779 | netdev_warn(tp->dev, "Cannot get nvram lock, %s failed\n", |
11267 | "tg3_nvram_init failed.\n", tp->dev->name); | 11780 | __func__); |
11268 | return; | 11781 | return; |
11269 | } | 11782 | } |
11270 | tg3_enable_nvram_access(tp); | 11783 | tg3_enable_nvram_access(tp); |
@@ -11283,7 +11796,8 @@ static void __devinit tg3_nvram_init(struct tg3 *tp) | |||
11283 | tg3_get_5761_nvram_info(tp); | 11796 | tg3_get_5761_nvram_info(tp); |
11284 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) | 11797 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) |
11285 | tg3_get_5906_nvram_info(tp); | 11798 | tg3_get_5906_nvram_info(tp); |
11286 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) | 11799 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || |
11800 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) | ||
11287 | tg3_get_57780_nvram_info(tp); | 11801 | tg3_get_57780_nvram_info(tp); |
11288 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) | 11802 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) |
11289 | tg3_get_5717_nvram_info(tp); | 11803 | tg3_get_5717_nvram_info(tp); |
@@ -11524,7 +12038,7 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf) | |||
11524 | 12038 | ||
11525 | tg3_enable_nvram_access(tp); | 12039 | tg3_enable_nvram_access(tp); |
11526 | if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && | 12040 | if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && |
11527 | !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) | 12041 | !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) |
11528 | tw32(NVRAM_WRITE1, 0x406); | 12042 | tw32(NVRAM_WRITE1, 0x406); |
11529 | 12043 | ||
11530 | grc_mode = tr32(GRC_MODE); | 12044 | grc_mode = tr32(GRC_MODE); |
@@ -11561,45 +12075,71 @@ struct subsys_tbl_ent { | |||
11561 | u32 phy_id; | 12075 | u32 phy_id; |
11562 | }; | 12076 | }; |
11563 | 12077 | ||
11564 | static struct subsys_tbl_ent subsys_id_to_phy_id[] = { | 12078 | static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = { |
11565 | /* Broadcom boards. */ | 12079 | /* Broadcom boards. */ |
11566 | { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */ | 12080 | { TG3PCI_SUBVENDOR_ID_BROADCOM, |
11567 | { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */ | 12081 | TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 }, |
11568 | { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */ | 12082 | { TG3PCI_SUBVENDOR_ID_BROADCOM, |
11569 | { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */ | 12083 | TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 }, |
11570 | { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */ | 12084 | { TG3PCI_SUBVENDOR_ID_BROADCOM, |
11571 | { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */ | 12085 | TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 }, |
11572 | { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */ | 12086 | { TG3PCI_SUBVENDOR_ID_BROADCOM, |
11573 | { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */ | 12087 | TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 }, |
11574 | { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */ | 12088 | { TG3PCI_SUBVENDOR_ID_BROADCOM, |
11575 | { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */ | 12089 | TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 }, |
11576 | { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */ | 12090 | { TG3PCI_SUBVENDOR_ID_BROADCOM, |
12091 | TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 }, | ||
12092 | { TG3PCI_SUBVENDOR_ID_BROADCOM, | ||
12093 | TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 }, | ||
12094 | { TG3PCI_SUBVENDOR_ID_BROADCOM, | ||
12095 | TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 }, | ||
12096 | { TG3PCI_SUBVENDOR_ID_BROADCOM, | ||
12097 | TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 }, | ||
12098 | { TG3PCI_SUBVENDOR_ID_BROADCOM, | ||
12099 | TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 }, | ||
12100 | { TG3PCI_SUBVENDOR_ID_BROADCOM, | ||
12101 | TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 }, | ||
11577 | 12102 | ||
11578 | /* 3com boards. */ | 12103 | /* 3com boards. */ |
11579 | { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */ | 12104 | { TG3PCI_SUBVENDOR_ID_3COM, |
11580 | { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */ | 12105 | TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 }, |
11581 | { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */ | 12106 | { TG3PCI_SUBVENDOR_ID_3COM, |
11582 | { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */ | 12107 | TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 }, |
11583 | { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */ | 12108 | { TG3PCI_SUBVENDOR_ID_3COM, |
12109 | TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 }, | ||
12110 | { TG3PCI_SUBVENDOR_ID_3COM, | ||
12111 | TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 }, | ||
12112 | { TG3PCI_SUBVENDOR_ID_3COM, | ||
12113 | TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 }, | ||
11584 | 12114 | ||
11585 | /* DELL boards. */ | 12115 | /* DELL boards. */ |
11586 | { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */ | 12116 | { TG3PCI_SUBVENDOR_ID_DELL, |
11587 | { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */ | 12117 | TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 }, |
11588 | { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */ | 12118 | { TG3PCI_SUBVENDOR_ID_DELL, |
11589 | { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */ | 12119 | TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 }, |
12120 | { TG3PCI_SUBVENDOR_ID_DELL, | ||
12121 | TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 }, | ||
12122 | { TG3PCI_SUBVENDOR_ID_DELL, | ||
12123 | TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 }, | ||
11590 | 12124 | ||
11591 | /* Compaq boards. */ | 12125 | /* Compaq boards. */ |
11592 | { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */ | 12126 | { TG3PCI_SUBVENDOR_ID_COMPAQ, |
11593 | { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */ | 12127 | TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 }, |
11594 | { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */ | 12128 | { TG3PCI_SUBVENDOR_ID_COMPAQ, |
11595 | { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */ | 12129 | TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 }, |
11596 | { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */ | 12130 | { TG3PCI_SUBVENDOR_ID_COMPAQ, |
12131 | TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 }, | ||
12132 | { TG3PCI_SUBVENDOR_ID_COMPAQ, | ||
12133 | TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 }, | ||
12134 | { TG3PCI_SUBVENDOR_ID_COMPAQ, | ||
12135 | TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 }, | ||
11597 | 12136 | ||
11598 | /* IBM boards. */ | 12137 | /* IBM boards. */ |
11599 | { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */ | 12138 | { TG3PCI_SUBVENDOR_ID_IBM, |
12139 | TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 } | ||
11600 | }; | 12140 | }; |
11601 | 12141 | ||
11602 | static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp) | 12142 | static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp) |
11603 | { | 12143 | { |
11604 | int i; | 12144 | int i; |
11605 | 12145 | ||
@@ -11640,7 +12180,7 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) | |||
11640 | val = tr32(MEMARB_MODE); | 12180 | val = tr32(MEMARB_MODE); |
11641 | tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); | 12181 | tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); |
11642 | 12182 | ||
11643 | tp->phy_id = PHY_ID_INVALID; | 12183 | tp->phy_id = TG3_PHY_ID_INVALID; |
11644 | tp->led_ctrl = LED_CTRL_MODE_PHY_1; | 12184 | tp->led_ctrl = LED_CTRL_MODE_PHY_1; |
11645 | 12185 | ||
11646 | /* Assume an onboard device and WOL capable by default. */ | 12186 | /* Assume an onboard device and WOL capable by default. */ |
@@ -11697,7 +12237,8 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) | |||
11697 | 12237 | ||
11698 | tp->phy_id = eeprom_phy_id; | 12238 | tp->phy_id = eeprom_phy_id; |
11699 | if (eeprom_phy_serdes) { | 12239 | if (eeprom_phy_serdes) { |
11700 | if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) | 12240 | if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) || |
12241 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) | ||
11701 | tp->tg3_flags2 |= TG3_FLG2_MII_SERDES; | 12242 | tp->tg3_flags2 |= TG3_FLG2_MII_SERDES; |
11702 | else | 12243 | else |
11703 | tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES; | 12244 | tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES; |
@@ -11813,8 +12354,8 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) | |||
11813 | tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND; | 12354 | tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND; |
11814 | } | 12355 | } |
11815 | 12356 | ||
11816 | if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE) | 12357 | if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE) |
11817 | tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE; | 12358 | tp->tg3_flags3 |= TG3_FLG3_RGMII_INBAND_DISABLE; |
11818 | if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN) | 12359 | if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN) |
11819 | tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN; | 12360 | tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN; |
11820 | if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN) | 12361 | if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN) |
@@ -11890,7 +12431,7 @@ static int __devinit tg3_phy_probe(struct tg3 *tp) | |||
11890 | err = 0; | 12431 | err = 0; |
11891 | if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) || | 12432 | if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) || |
11892 | (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) { | 12433 | (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) { |
11893 | hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID; | 12434 | hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID; |
11894 | } else { | 12435 | } else { |
11895 | /* Now read the physical PHY_ID from the chip and verify | 12436 | /* Now read the physical PHY_ID from the chip and verify |
11896 | * that it is sane. If it doesn't look good, we fall back | 12437 | * that it is sane. If it doesn't look good, we fall back |
@@ -11904,17 +12445,17 @@ static int __devinit tg3_phy_probe(struct tg3 *tp) | |||
11904 | hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16; | 12445 | hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16; |
11905 | hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0; | 12446 | hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0; |
11906 | 12447 | ||
11907 | hw_phy_id_masked = hw_phy_id & PHY_ID_MASK; | 12448 | hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK; |
11908 | } | 12449 | } |
11909 | 12450 | ||
11910 | if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) { | 12451 | if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) { |
11911 | tp->phy_id = hw_phy_id; | 12452 | tp->phy_id = hw_phy_id; |
11912 | if (hw_phy_id_masked == PHY_ID_BCM8002) | 12453 | if (hw_phy_id_masked == TG3_PHY_ID_BCM8002) |
11913 | tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES; | 12454 | tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES; |
11914 | else | 12455 | else |
11915 | tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES; | 12456 | tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES; |
11916 | } else { | 12457 | } else { |
11917 | if (tp->phy_id != PHY_ID_INVALID) { | 12458 | if (tp->phy_id != TG3_PHY_ID_INVALID) { |
11918 | /* Do nothing, phy ID already set up in | 12459 | /* Do nothing, phy ID already set up in |
11919 | * tg3_get_eeprom_hw_cfg(). | 12460 | * tg3_get_eeprom_hw_cfg(). |
11920 | */ | 12461 | */ |
@@ -11924,13 +12465,13 @@ static int __devinit tg3_phy_probe(struct tg3 *tp) | |||
11924 | /* No eeprom signature? Try the hardcoded | 12465 | /* No eeprom signature? Try the hardcoded |
11925 | * subsys device table. | 12466 | * subsys device table. |
11926 | */ | 12467 | */ |
11927 | p = lookup_by_subsys(tp); | 12468 | p = tg3_lookup_by_subsys(tp); |
11928 | if (!p) | 12469 | if (!p) |
11929 | return -ENODEV; | 12470 | return -ENODEV; |
11930 | 12471 | ||
11931 | tp->phy_id = p->phy_id; | 12472 | tp->phy_id = p->phy_id; |
11932 | if (!tp->phy_id || | 12473 | if (!tp->phy_id || |
11933 | tp->phy_id == PHY_ID_BCM8002) | 12474 | tp->phy_id == TG3_PHY_ID_BCM8002) |
11934 | tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES; | 12475 | tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES; |
11935 | } | 12476 | } |
11936 | } | 12477 | } |
@@ -11982,13 +12523,11 @@ static int __devinit tg3_phy_probe(struct tg3 *tp) | |||
11982 | } | 12523 | } |
11983 | 12524 | ||
11984 | skip_phy_reset: | 12525 | skip_phy_reset: |
11985 | if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) { | 12526 | if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { |
11986 | err = tg3_init_5401phy_dsp(tp); | 12527 | err = tg3_init_5401phy_dsp(tp); |
11987 | if (err) | 12528 | if (err) |
11988 | return err; | 12529 | return err; |
11989 | } | ||
11990 | 12530 | ||
11991 | if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) { | ||
11992 | err = tg3_init_5401phy_dsp(tp); | 12531 | err = tg3_init_5401phy_dsp(tp); |
11993 | } | 12532 | } |
11994 | 12533 | ||
@@ -12008,8 +12547,9 @@ skip_phy_reset: | |||
12008 | 12547 | ||
12009 | static void __devinit tg3_read_partno(struct tg3 *tp) | 12548 | static void __devinit tg3_read_partno(struct tg3 *tp) |
12010 | { | 12549 | { |
12011 | unsigned char vpd_data[256]; /* in little-endian format */ | 12550 | unsigned char vpd_data[TG3_NVM_VPD_LEN]; /* in little-endian format */ |
12012 | unsigned int i; | 12551 | unsigned int block_end, rosize, len; |
12552 | int i = 0; | ||
12013 | u32 magic; | 12553 | u32 magic; |
12014 | 12554 | ||
12015 | if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) || | 12555 | if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) || |
@@ -12017,90 +12557,62 @@ static void __devinit tg3_read_partno(struct tg3 *tp) | |||
12017 | goto out_not_found; | 12557 | goto out_not_found; |
12018 | 12558 | ||
12019 | if (magic == TG3_EEPROM_MAGIC) { | 12559 | if (magic == TG3_EEPROM_MAGIC) { |
12020 | for (i = 0; i < 256; i += 4) { | 12560 | for (i = 0; i < TG3_NVM_VPD_LEN; i += 4) { |
12021 | u32 tmp; | 12561 | u32 tmp; |
12022 | 12562 | ||
12023 | /* The data is in little-endian format in NVRAM. | 12563 | /* The data is in little-endian format in NVRAM. |
12024 | * Use the big-endian read routines to preserve | 12564 | * Use the big-endian read routines to preserve |
12025 | * the byte order as it exists in NVRAM. | 12565 | * the byte order as it exists in NVRAM. |
12026 | */ | 12566 | */ |
12027 | if (tg3_nvram_read_be32(tp, 0x100 + i, &tmp)) | 12567 | if (tg3_nvram_read_be32(tp, TG3_NVM_VPD_OFF + i, &tmp)) |
12028 | goto out_not_found; | 12568 | goto out_not_found; |
12029 | 12569 | ||
12030 | memcpy(&vpd_data[i], &tmp, sizeof(tmp)); | 12570 | memcpy(&vpd_data[i], &tmp, sizeof(tmp)); |
12031 | } | 12571 | } |
12032 | } else { | 12572 | } else { |
12033 | int vpd_cap; | 12573 | ssize_t cnt; |
12034 | 12574 | unsigned int pos = 0; | |
12035 | vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD); | 12575 | |
12036 | for (i = 0; i < 256; i += 4) { | 12576 | for (; pos < TG3_NVM_VPD_LEN && i < 3; i++, pos += cnt) { |
12037 | u32 tmp, j = 0; | 12577 | cnt = pci_read_vpd(tp->pdev, pos, |
12038 | __le32 v; | 12578 | TG3_NVM_VPD_LEN - pos, |
12039 | u16 tmp16; | 12579 | &vpd_data[pos]); |
12040 | 12580 | if (cnt == -ETIMEDOUT || -EINTR) | |
12041 | pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR, | 12581 | cnt = 0; |
12042 | i); | 12582 | else if (cnt < 0) |
12043 | while (j++ < 100) { | ||
12044 | pci_read_config_word(tp->pdev, vpd_cap + | ||
12045 | PCI_VPD_ADDR, &tmp16); | ||
12046 | if (tmp16 & 0x8000) | ||
12047 | break; | ||
12048 | msleep(1); | ||
12049 | } | ||
12050 | if (!(tmp16 & 0x8000)) | ||
12051 | goto out_not_found; | 12583 | goto out_not_found; |
12052 | |||
12053 | pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA, | ||
12054 | &tmp); | ||
12055 | v = cpu_to_le32(tmp); | ||
12056 | memcpy(&vpd_data[i], &v, sizeof(v)); | ||
12057 | } | 12584 | } |
12585 | if (pos != TG3_NVM_VPD_LEN) | ||
12586 | goto out_not_found; | ||
12058 | } | 12587 | } |
12059 | 12588 | ||
12060 | /* Now parse and find the part number. */ | 12589 | i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN, |
12061 | for (i = 0; i < 254; ) { | 12590 | PCI_VPD_LRDT_RO_DATA); |
12062 | unsigned char val = vpd_data[i]; | 12591 | if (i < 0) |
12063 | unsigned int block_end; | 12592 | goto out_not_found; |
12064 | |||
12065 | if (val == 0x82 || val == 0x91) { | ||
12066 | i = (i + 3 + | ||
12067 | (vpd_data[i + 1] + | ||
12068 | (vpd_data[i + 2] << 8))); | ||
12069 | continue; | ||
12070 | } | ||
12071 | |||
12072 | if (val != 0x90) | ||
12073 | goto out_not_found; | ||
12074 | 12593 | ||
12075 | block_end = (i + 3 + | 12594 | rosize = pci_vpd_lrdt_size(&vpd_data[i]); |
12076 | (vpd_data[i + 1] + | 12595 | block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize; |
12077 | (vpd_data[i + 2] << 8))); | 12596 | i += PCI_VPD_LRDT_TAG_SIZE; |
12078 | i += 3; | ||
12079 | 12597 | ||
12080 | if (block_end > 256) | 12598 | if (block_end > TG3_NVM_VPD_LEN) |
12081 | goto out_not_found; | 12599 | goto out_not_found; |
12082 | 12600 | ||
12083 | while (i < (block_end - 2)) { | 12601 | i = pci_vpd_find_info_keyword(vpd_data, i, rosize, |
12084 | if (vpd_data[i + 0] == 'P' && | 12602 | PCI_VPD_RO_KEYWORD_PARTNO); |
12085 | vpd_data[i + 1] == 'N') { | 12603 | if (i < 0) |
12086 | int partno_len = vpd_data[i + 2]; | 12604 | goto out_not_found; |
12087 | 12605 | ||
12088 | i += 3; | 12606 | len = pci_vpd_info_field_size(&vpd_data[i]); |
12089 | if (partno_len > 24 || (partno_len + i) > 256) | ||
12090 | goto out_not_found; | ||
12091 | 12607 | ||
12092 | memcpy(tp->board_part_number, | 12608 | i += PCI_VPD_INFO_FLD_HDR_SIZE; |
12093 | &vpd_data[i], partno_len); | 12609 | if (len > TG3_BPN_SIZE || |
12610 | (len + i) > TG3_NVM_VPD_LEN) | ||
12611 | goto out_not_found; | ||
12094 | 12612 | ||
12095 | /* Success. */ | 12613 | memcpy(tp->board_part_number, &vpd_data[i], len); |
12096 | return; | ||
12097 | } | ||
12098 | i += 3 + vpd_data[i + 2]; | ||
12099 | } | ||
12100 | 12614 | ||
12101 | /* Part number not found. */ | 12615 | return; |
12102 | goto out_not_found; | ||
12103 | } | ||
12104 | 12616 | ||
12105 | out_not_found: | 12617 | out_not_found: |
12106 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) | 12618 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) |
@@ -12117,6 +12629,24 @@ out_not_found: | |||
12117 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 && | 12629 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 && |
12118 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788) | 12630 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788) |
12119 | strcpy(tp->board_part_number, "BCM57788"); | 12631 | strcpy(tp->board_part_number, "BCM57788"); |
12632 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 && | ||
12633 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761) | ||
12634 | strcpy(tp->board_part_number, "BCM57761"); | ||
12635 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 && | ||
12636 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765) | ||
12637 | strcpy(tp->board_part_number, "BCM57765"); | ||
12638 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 && | ||
12639 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781) | ||
12640 | strcpy(tp->board_part_number, "BCM57781"); | ||
12641 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 && | ||
12642 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785) | ||
12643 | strcpy(tp->board_part_number, "BCM57785"); | ||
12644 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 && | ||
12645 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791) | ||
12646 | strcpy(tp->board_part_number, "BCM57791"); | ||
12647 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 && | ||
12648 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795) | ||
12649 | strcpy(tp->board_part_number, "BCM57795"); | ||
12120 | else | 12650 | else |
12121 | strcpy(tp->board_part_number, "none"); | 12651 | strcpy(tp->board_part_number, "none"); |
12122 | } | 12652 | } |
@@ -12219,6 +12749,12 @@ static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val) | |||
12219 | case TG3_EEPROM_SB_REVISION_3: | 12749 | case TG3_EEPROM_SB_REVISION_3: |
12220 | offset = TG3_EEPROM_SB_F1R3_EDH_OFF; | 12750 | offset = TG3_EEPROM_SB_F1R3_EDH_OFF; |
12221 | break; | 12751 | break; |
12752 | case TG3_EEPROM_SB_REVISION_4: | ||
12753 | offset = TG3_EEPROM_SB_F1R4_EDH_OFF; | ||
12754 | break; | ||
12755 | case TG3_EEPROM_SB_REVISION_5: | ||
12756 | offset = TG3_EEPROM_SB_F1R5_EDH_OFF; | ||
12757 | break; | ||
12222 | default: | 12758 | default: |
12223 | return; | 12759 | return; |
12224 | } | 12760 | } |
@@ -12400,13 +12936,21 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
12400 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) { | 12936 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) { |
12401 | u32 prod_id_asic_rev; | 12937 | u32 prod_id_asic_rev; |
12402 | 12938 | ||
12403 | if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717C || | 12939 | if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || |
12404 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717S || | 12940 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || |
12405 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718C || | 12941 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_5724) |
12406 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718S) | ||
12407 | pci_read_config_dword(tp->pdev, | 12942 | pci_read_config_dword(tp->pdev, |
12408 | TG3PCI_GEN2_PRODID_ASICREV, | 12943 | TG3PCI_GEN2_PRODID_ASICREV, |
12409 | &prod_id_asic_rev); | 12944 | &prod_id_asic_rev); |
12945 | else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 || | ||
12946 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 || | ||
12947 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 || | ||
12948 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 || | ||
12949 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 || | ||
12950 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795) | ||
12951 | pci_read_config_dword(tp->pdev, | ||
12952 | TG3PCI_GEN15_PRODID_ASICREV, | ||
12953 | &prod_id_asic_rev); | ||
12410 | else | 12954 | else |
12411 | pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV, | 12955 | pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV, |
12412 | &prod_id_asic_rev); | 12956 | &prod_id_asic_rev); |
@@ -12560,7 +13104,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
12560 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || | 13104 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || |
12561 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || | 13105 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || |
12562 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || | 13106 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || |
12563 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) | 13107 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || |
13108 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) | ||
12564 | tp->tg3_flags3 |= TG3_FLG3_5755_PLUS; | 13109 | tp->tg3_flags3 |= TG3_FLG3_5755_PLUS; |
12565 | 13110 | ||
12566 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || | 13111 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || |
@@ -12586,6 +13131,30 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
12586 | tp->dev->features |= NETIF_F_IPV6_CSUM; | 13131 | tp->dev->features |= NETIF_F_IPV6_CSUM; |
12587 | } | 13132 | } |
12588 | 13133 | ||
13134 | /* Determine TSO capabilities */ | ||
13135 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || | ||
13136 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) | ||
13137 | tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3; | ||
13138 | else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || | ||
13139 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) | ||
13140 | tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2; | ||
13141 | else if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { | ||
13142 | tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG; | ||
13143 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 && | ||
13144 | tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2) | ||
13145 | tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG; | ||
13146 | } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && | ||
13147 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 && | ||
13148 | tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) { | ||
13149 | tp->tg3_flags2 |= TG3_FLG2_TSO_BUG; | ||
13150 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) | ||
13151 | tp->fw_needed = FIRMWARE_TG3TSO5; | ||
13152 | else | ||
13153 | tp->fw_needed = FIRMWARE_TG3TSO; | ||
13154 | } | ||
13155 | |||
13156 | tp->irq_max = 1; | ||
13157 | |||
12589 | if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { | 13158 | if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { |
12590 | tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI; | 13159 | tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI; |
12591 | if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX || | 13160 | if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX || |
@@ -12597,29 +13166,31 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
12597 | 13166 | ||
12598 | if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || | 13167 | if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || |
12599 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { | 13168 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { |
12600 | tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2; | ||
12601 | tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI; | 13169 | tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI; |
12602 | } else { | ||
12603 | tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG; | ||
12604 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == | ||
12605 | ASIC_REV_5750 && | ||
12606 | tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2) | ||
12607 | tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG; | ||
12608 | } | 13170 | } |
12609 | } | ||
12610 | 13171 | ||
12611 | tp->irq_max = 1; | 13172 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || |
13173 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { | ||
13174 | tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX; | ||
13175 | tp->irq_max = TG3_IRQ_MAX_VECS; | ||
13176 | } | ||
13177 | } | ||
12612 | 13178 | ||
12613 | #ifdef TG3_NAPI | 13179 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || |
12614 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { | 13180 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) |
12615 | tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX; | 13181 | tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG; |
12616 | tp->irq_max = TG3_IRQ_MAX_VECS; | 13182 | else if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) { |
13183 | tp->tg3_flags3 |= TG3_FLG3_4G_DMA_BNDRY_BUG; | ||
13184 | tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG; | ||
12617 | } | 13185 | } |
12618 | #endif | 13186 | |
13187 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || | ||
13188 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) | ||
13189 | tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG; | ||
12619 | 13190 | ||
12620 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || | 13191 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || |
12621 | (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) || | 13192 | (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) || |
12622 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) | 13193 | (tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG)) |
12623 | tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE; | 13194 | tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE; |
12624 | 13195 | ||
12625 | pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, | 13196 | pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, |
@@ -12644,6 +13215,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
12644 | tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 || | 13215 | tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 || |
12645 | tp->pci_chip_rev_id == CHIPREV_ID_57780_A1) | 13216 | tp->pci_chip_rev_id == CHIPREV_ID_57780_A1) |
12646 | tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG; | 13217 | tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG; |
13218 | } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) { | ||
13219 | tp->tg3_flags3 |= TG3_FLG3_L1PLLPD_EN; | ||
12647 | } | 13220 | } |
12648 | } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) { | 13221 | } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) { |
12649 | tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS; | 13222 | tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS; |
@@ -12651,8 +13224,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
12651 | (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { | 13224 | (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { |
12652 | tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX); | 13225 | tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX); |
12653 | if (!tp->pcix_cap) { | 13226 | if (!tp->pcix_cap) { |
12654 | printk(KERN_ERR PFX "Cannot find PCI-X " | 13227 | pr_err("Cannot find PCI-X capability, aborting\n"); |
12655 | "capability, aborting.\n"); | ||
12656 | return -EIO; | 13228 | return -EIO; |
12657 | } | 13229 | } |
12658 | 13230 | ||
@@ -12812,7 +13384,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
12812 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || | 13384 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || |
12813 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || | 13385 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || |
12814 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || | 13386 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || |
12815 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) | 13387 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || |
13388 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) | ||
12816 | tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT; | 13389 | tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT; |
12817 | 13390 | ||
12818 | /* Set up tp->grc_local_ctrl before calling tg3_set_power_state(). | 13391 | /* Set up tp->grc_local_ctrl before calling tg3_set_power_state(). |
@@ -12831,7 +13404,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
12831 | tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; | 13404 | tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; |
12832 | 13405 | ||
12833 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || | 13406 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || |
12834 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) | 13407 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || |
13408 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) | ||
12835 | tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; | 13409 | tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; |
12836 | 13410 | ||
12837 | if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || | 13411 | if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || |
@@ -12847,8 +13421,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
12847 | /* Force the chip into D0. */ | 13421 | /* Force the chip into D0. */ |
12848 | err = tg3_set_power_state(tp, PCI_D0); | 13422 | err = tg3_set_power_state(tp, PCI_D0); |
12849 | if (err) { | 13423 | if (err) { |
12850 | printk(KERN_ERR PFX "(%s) transition to D0 failed\n", | 13424 | pr_err("(%s) transition to D0 failed\n", pci_name(tp->pdev)); |
12851 | pci_name(tp->pdev)); | ||
12852 | return err; | 13425 | return err; |
12853 | } | 13426 | } |
12854 | 13427 | ||
@@ -12891,7 +13464,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
12891 | !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) && | 13464 | !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) && |
12892 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && | 13465 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && |
12893 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 && | 13466 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 && |
12894 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) { | 13467 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && |
13468 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) { | ||
12895 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || | 13469 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || |
12896 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || | 13470 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || |
12897 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || | 13471 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || |
@@ -12926,15 +13500,15 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
12926 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) | 13500 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) |
12927 | tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB; | 13501 | tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB; |
12928 | 13502 | ||
12929 | if ((tp->pci_chip_rev_id == CHIPREV_ID_57780_A1 && | ||
12930 | tr32(RCVLPC_STATS_ENABLE) & RCVLPC_STATSENAB_ASF_FIX) || | ||
12931 | tp->pci_chip_rev_id == CHIPREV_ID_57780_A0) | ||
12932 | tp->tg3_flags3 |= TG3_FLG3_TOGGLE_10_100_L1PLLPD; | ||
12933 | |||
12934 | err = tg3_mdio_init(tp); | 13503 | err = tg3_mdio_init(tp); |
12935 | if (err) | 13504 | if (err) |
12936 | return err; | 13505 | return err; |
12937 | 13506 | ||
13507 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 && | ||
13508 | (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0 || | ||
13509 | (tp->tg3_flags2 & TG3_FLG2_MII_SERDES))) | ||
13510 | return -ENOTSUPP; | ||
13511 | |||
12938 | /* Initialize data/descriptor byte/word swapping. */ | 13512 | /* Initialize data/descriptor byte/word swapping. */ |
12939 | val = tr32(GRC_MODE); | 13513 | val = tr32(GRC_MODE); |
12940 | val &= GRC_MODE_HOST_STACKUP; | 13514 | val &= GRC_MODE_HOST_STACKUP; |
@@ -13014,12 +13588,14 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
13014 | tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F || | 13588 | tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F || |
13015 | tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) || | 13589 | tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) || |
13016 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 || | 13590 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 || |
13591 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 || | ||
13592 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 || | ||
13017 | (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)) | 13593 | (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)) |
13018 | tp->tg3_flags |= TG3_FLAG_10_100_ONLY; | 13594 | tp->tg3_flags |= TG3_FLAG_10_100_ONLY; |
13019 | 13595 | ||
13020 | err = tg3_phy_probe(tp); | 13596 | err = tg3_phy_probe(tp); |
13021 | if (err) { | 13597 | if (err) { |
13022 | printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n", | 13598 | pr_err("(%s) phy probe failed, err %d\n", |
13023 | pci_name(tp->pdev), err); | 13599 | pci_name(tp->pdev), err); |
13024 | /* ... but do not return immediately ... */ | 13600 | /* ... but do not return immediately ... */ |
13025 | tg3_mdio_fini(tp); | 13601 | tg3_mdio_fini(tp); |
@@ -13220,6 +13796,12 @@ static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val) | |||
13220 | #endif | 13796 | #endif |
13221 | #endif | 13797 | #endif |
13222 | 13798 | ||
13799 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || | ||
13800 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { | ||
13801 | val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT; | ||
13802 | goto out; | ||
13803 | } | ||
13804 | |||
13223 | if (!goal) | 13805 | if (!goal) |
13224 | goto out; | 13806 | goto out; |
13225 | 13807 | ||
@@ -13414,7 +13996,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp) | |||
13414 | { | 13996 | { |
13415 | dma_addr_t buf_dma; | 13997 | dma_addr_t buf_dma; |
13416 | u32 *buf, saved_dma_rwctrl; | 13998 | u32 *buf, saved_dma_rwctrl; |
13417 | int ret; | 13999 | int ret = 0; |
13418 | 14000 | ||
13419 | buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma); | 14001 | buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma); |
13420 | if (!buf) { | 14002 | if (!buf) { |
@@ -13427,6 +14009,10 @@ static int __devinit tg3_test_dma(struct tg3 *tp) | |||
13427 | 14009 | ||
13428 | tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl); | 14010 | tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl); |
13429 | 14011 | ||
14012 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || | ||
14013 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) | ||
14014 | goto out; | ||
14015 | |||
13430 | if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { | 14016 | if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { |
13431 | /* DMA read watermark not used on PCIE */ | 14017 | /* DMA read watermark not used on PCIE */ |
13432 | tp->dma_rwctrl |= 0x00180000; | 14018 | tp->dma_rwctrl |= 0x00180000; |
@@ -13499,7 +14085,6 @@ static int __devinit tg3_test_dma(struct tg3 *tp) | |||
13499 | tg3_switch_clocks(tp); | 14085 | tg3_switch_clocks(tp); |
13500 | #endif | 14086 | #endif |
13501 | 14087 | ||
13502 | ret = 0; | ||
13503 | if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && | 14088 | if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && |
13504 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) | 14089 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) |
13505 | goto out; | 14090 | goto out; |
@@ -13520,7 +14105,8 @@ static int __devinit tg3_test_dma(struct tg3 *tp) | |||
13520 | /* Send the buffer to the chip. */ | 14105 | /* Send the buffer to the chip. */ |
13521 | ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1); | 14106 | ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1); |
13522 | if (ret) { | 14107 | if (ret) { |
13523 | printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret); | 14108 | pr_err("tg3_test_dma() Write the buffer failed %d\n", |
14109 | ret); | ||
13524 | break; | 14110 | break; |
13525 | } | 14111 | } |
13526 | 14112 | ||
@@ -13530,7 +14116,8 @@ static int __devinit tg3_test_dma(struct tg3 *tp) | |||
13530 | u32 val; | 14116 | u32 val; |
13531 | tg3_read_mem(tp, 0x2100 + (i*4), &val); | 14117 | tg3_read_mem(tp, 0x2100 + (i*4), &val); |
13532 | if (le32_to_cpu(val) != p[i]) { | 14118 | if (le32_to_cpu(val) != p[i]) { |
13533 | printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i); | 14119 | pr_err(" tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", |
14120 | val, i); | ||
13534 | /* ret = -ENODEV here? */ | 14121 | /* ret = -ENODEV here? */ |
13535 | } | 14122 | } |
13536 | p[i] = 0; | 14123 | p[i] = 0; |
@@ -13539,7 +14126,8 @@ static int __devinit tg3_test_dma(struct tg3 *tp) | |||
13539 | /* Now read it back. */ | 14126 | /* Now read it back. */ |
13540 | ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0); | 14127 | ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0); |
13541 | if (ret) { | 14128 | if (ret) { |
13542 | printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret); | 14129 | pr_err("tg3_test_dma() Read the buffer failed %d\n", |
14130 | ret); | ||
13543 | 14131 | ||
13544 | break; | 14132 | break; |
13545 | } | 14133 | } |
@@ -13556,7 +14144,8 @@ static int __devinit tg3_test_dma(struct tg3 *tp) | |||
13556 | tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); | 14144 | tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); |
13557 | break; | 14145 | break; |
13558 | } else { | 14146 | } else { |
13559 | printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i); | 14147 | pr_err("tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", |
14148 | p[i], i); | ||
13560 | ret = -ENODEV; | 14149 | ret = -ENODEV; |
13561 | goto out; | 14150 | goto out; |
13562 | } | 14151 | } |
@@ -13617,8 +14206,22 @@ static void __devinit tg3_init_link_config(struct tg3 *tp) | |||
13617 | 14206 | ||
13618 | static void __devinit tg3_init_bufmgr_config(struct tg3 *tp) | 14207 | static void __devinit tg3_init_bufmgr_config(struct tg3 *tp) |
13619 | { | 14208 | { |
13620 | if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS && | 14209 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || |
13621 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) { | 14210 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { |
14211 | tp->bufmgr_config.mbuf_read_dma_low_water = | ||
14212 | DEFAULT_MB_RDMA_LOW_WATER_5705; | ||
14213 | tp->bufmgr_config.mbuf_mac_rx_low_water = | ||
14214 | DEFAULT_MB_MACRX_LOW_WATER_57765; | ||
14215 | tp->bufmgr_config.mbuf_high_water = | ||
14216 | DEFAULT_MB_HIGH_WATER_57765; | ||
14217 | |||
14218 | tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = | ||
14219 | DEFAULT_MB_RDMA_LOW_WATER_5705; | ||
14220 | tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = | ||
14221 | DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765; | ||
14222 | tp->bufmgr_config.mbuf_high_water_jumbo = | ||
14223 | DEFAULT_MB_HIGH_WATER_JUMBO_57765; | ||
14224 | } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { | ||
13622 | tp->bufmgr_config.mbuf_read_dma_low_water = | 14225 | tp->bufmgr_config.mbuf_read_dma_low_water = |
13623 | DEFAULT_MB_RDMA_LOW_WATER_5705; | 14226 | DEFAULT_MB_RDMA_LOW_WATER_5705; |
13624 | tp->bufmgr_config.mbuf_mac_rx_low_water = | 14227 | tp->bufmgr_config.mbuf_mac_rx_low_water = |
@@ -13660,25 +14263,28 @@ static void __devinit tg3_init_bufmgr_config(struct tg3 *tp) | |||
13660 | 14263 | ||
13661 | static char * __devinit tg3_phy_string(struct tg3 *tp) | 14264 | static char * __devinit tg3_phy_string(struct tg3 *tp) |
13662 | { | 14265 | { |
13663 | switch (tp->phy_id & PHY_ID_MASK) { | 14266 | switch (tp->phy_id & TG3_PHY_ID_MASK) { |
13664 | case PHY_ID_BCM5400: return "5400"; | 14267 | case TG3_PHY_ID_BCM5400: return "5400"; |
13665 | case PHY_ID_BCM5401: return "5401"; | 14268 | case TG3_PHY_ID_BCM5401: return "5401"; |
13666 | case PHY_ID_BCM5411: return "5411"; | 14269 | case TG3_PHY_ID_BCM5411: return "5411"; |
13667 | case PHY_ID_BCM5701: return "5701"; | 14270 | case TG3_PHY_ID_BCM5701: return "5701"; |
13668 | case PHY_ID_BCM5703: return "5703"; | 14271 | case TG3_PHY_ID_BCM5703: return "5703"; |
13669 | case PHY_ID_BCM5704: return "5704"; | 14272 | case TG3_PHY_ID_BCM5704: return "5704"; |
13670 | case PHY_ID_BCM5705: return "5705"; | 14273 | case TG3_PHY_ID_BCM5705: return "5705"; |
13671 | case PHY_ID_BCM5750: return "5750"; | 14274 | case TG3_PHY_ID_BCM5750: return "5750"; |
13672 | case PHY_ID_BCM5752: return "5752"; | 14275 | case TG3_PHY_ID_BCM5752: return "5752"; |
13673 | case PHY_ID_BCM5714: return "5714"; | 14276 | case TG3_PHY_ID_BCM5714: return "5714"; |
13674 | case PHY_ID_BCM5780: return "5780"; | 14277 | case TG3_PHY_ID_BCM5780: return "5780"; |
13675 | case PHY_ID_BCM5755: return "5755"; | 14278 | case TG3_PHY_ID_BCM5755: return "5755"; |
13676 | case PHY_ID_BCM5787: return "5787"; | 14279 | case TG3_PHY_ID_BCM5787: return "5787"; |
13677 | case PHY_ID_BCM5784: return "5784"; | 14280 | case TG3_PHY_ID_BCM5784: return "5784"; |
13678 | case PHY_ID_BCM5756: return "5722/5756"; | 14281 | case TG3_PHY_ID_BCM5756: return "5722/5756"; |
13679 | case PHY_ID_BCM5906: return "5906"; | 14282 | case TG3_PHY_ID_BCM5906: return "5906"; |
13680 | case PHY_ID_BCM5761: return "5761"; | 14283 | case TG3_PHY_ID_BCM5761: return "5761"; |
13681 | case PHY_ID_BCM8002: return "8002/serdes"; | 14284 | case TG3_PHY_ID_BCM5718C: return "5718C"; |
14285 | case TG3_PHY_ID_BCM5718S: return "5718S"; | ||
14286 | case TG3_PHY_ID_BCM57765: return "57765"; | ||
14287 | case TG3_PHY_ID_BCM8002: return "8002/serdes"; | ||
13682 | case 0: return "serdes"; | 14288 | case 0: return "serdes"; |
13683 | default: return "unknown"; | 14289 | default: return "unknown"; |
13684 | } | 14290 | } |
@@ -13820,7 +14426,6 @@ static const struct net_device_ops tg3_netdev_ops_dma_bug = { | |||
13820 | static int __devinit tg3_init_one(struct pci_dev *pdev, | 14426 | static int __devinit tg3_init_one(struct pci_dev *pdev, |
13821 | const struct pci_device_id *ent) | 14427 | const struct pci_device_id *ent) |
13822 | { | 14428 | { |
13823 | static int tg3_version_printed = 0; | ||
13824 | struct net_device *dev; | 14429 | struct net_device *dev; |
13825 | struct tg3 *tp; | 14430 | struct tg3 *tp; |
13826 | int i, err, pm_cap; | 14431 | int i, err, pm_cap; |
@@ -13828,20 +14433,17 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, | |||
13828 | char str[40]; | 14433 | char str[40]; |
13829 | u64 dma_mask, persist_dma_mask; | 14434 | u64 dma_mask, persist_dma_mask; |
13830 | 14435 | ||
13831 | if (tg3_version_printed++ == 0) | 14436 | printk_once(KERN_INFO "%s\n", version); |
13832 | printk(KERN_INFO "%s", version); | ||
13833 | 14437 | ||
13834 | err = pci_enable_device(pdev); | 14438 | err = pci_enable_device(pdev); |
13835 | if (err) { | 14439 | if (err) { |
13836 | printk(KERN_ERR PFX "Cannot enable PCI device, " | 14440 | pr_err("Cannot enable PCI device, aborting\n"); |
13837 | "aborting.\n"); | ||
13838 | return err; | 14441 | return err; |
13839 | } | 14442 | } |
13840 | 14443 | ||
13841 | err = pci_request_regions(pdev, DRV_MODULE_NAME); | 14444 | err = pci_request_regions(pdev, DRV_MODULE_NAME); |
13842 | if (err) { | 14445 | if (err) { |
13843 | printk(KERN_ERR PFX "Cannot obtain PCI resources, " | 14446 | pr_err("Cannot obtain PCI resources, aborting\n"); |
13844 | "aborting.\n"); | ||
13845 | goto err_out_disable_pdev; | 14447 | goto err_out_disable_pdev; |
13846 | } | 14448 | } |
13847 | 14449 | ||
@@ -13850,15 +14452,14 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, | |||
13850 | /* Find power-management capability. */ | 14452 | /* Find power-management capability. */ |
13851 | pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); | 14453 | pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); |
13852 | if (pm_cap == 0) { | 14454 | if (pm_cap == 0) { |
13853 | printk(KERN_ERR PFX "Cannot find PowerManagement capability, " | 14455 | pr_err("Cannot find PowerManagement capability, aborting\n"); |
13854 | "aborting.\n"); | ||
13855 | err = -EIO; | 14456 | err = -EIO; |
13856 | goto err_out_free_res; | 14457 | goto err_out_free_res; |
13857 | } | 14458 | } |
13858 | 14459 | ||
13859 | dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS); | 14460 | dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS); |
13860 | if (!dev) { | 14461 | if (!dev) { |
13861 | printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n"); | 14462 | pr_err("Etherdev alloc failed, aborting\n"); |
13862 | err = -ENOMEM; | 14463 | err = -ENOMEM; |
13863 | goto err_out_free_res; | 14464 | goto err_out_free_res; |
13864 | } | 14465 | } |
@@ -13908,8 +14509,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, | |||
13908 | 14509 | ||
13909 | tp->regs = pci_ioremap_bar(pdev, BAR_0); | 14510 | tp->regs = pci_ioremap_bar(pdev, BAR_0); |
13910 | if (!tp->regs) { | 14511 | if (!tp->regs) { |
13911 | printk(KERN_ERR PFX "Cannot map device registers, " | 14512 | netdev_err(dev, "Cannot map device registers, aborting\n"); |
13912 | "aborting.\n"); | ||
13913 | err = -ENOMEM; | 14513 | err = -ENOMEM; |
13914 | goto err_out_free_dev; | 14514 | goto err_out_free_dev; |
13915 | } | 14515 | } |
@@ -13919,64 +14519,18 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, | |||
13919 | tp->rx_pending = TG3_DEF_RX_RING_PENDING; | 14519 | tp->rx_pending = TG3_DEF_RX_RING_PENDING; |
13920 | tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING; | 14520 | tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING; |
13921 | 14521 | ||
13922 | intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW; | ||
13923 | rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW; | ||
13924 | sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW; | ||
13925 | for (i = 0; i < TG3_IRQ_MAX_VECS; i++) { | ||
13926 | struct tg3_napi *tnapi = &tp->napi[i]; | ||
13927 | |||
13928 | tnapi->tp = tp; | ||
13929 | tnapi->tx_pending = TG3_DEF_TX_RING_PENDING; | ||
13930 | |||
13931 | tnapi->int_mbox = intmbx; | ||
13932 | if (i < 4) | ||
13933 | intmbx += 0x8; | ||
13934 | else | ||
13935 | intmbx += 0x4; | ||
13936 | |||
13937 | tnapi->consmbox = rcvmbx; | ||
13938 | tnapi->prodmbox = sndmbx; | ||
13939 | |||
13940 | if (i) | ||
13941 | tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1); | ||
13942 | else | ||
13943 | tnapi->coal_now = HOSTCC_MODE_NOW; | ||
13944 | |||
13945 | if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX)) | ||
13946 | break; | ||
13947 | |||
13948 | /* | ||
13949 | * If we support MSIX, we'll be using RSS. If we're using | ||
13950 | * RSS, the first vector only handles link interrupts and the | ||
13951 | * remaining vectors handle rx and tx interrupts. Reuse the | ||
13952 | * mailbox values for the next iteration. The values we setup | ||
13953 | * above are still useful for the single vectored mode. | ||
13954 | */ | ||
13955 | if (!i) | ||
13956 | continue; | ||
13957 | |||
13958 | rcvmbx += 0x8; | ||
13959 | |||
13960 | if (sndmbx & 0x4) | ||
13961 | sndmbx -= 0x4; | ||
13962 | else | ||
13963 | sndmbx += 0xc; | ||
13964 | } | ||
13965 | |||
13966 | netif_napi_add(dev, &tp->napi[0].napi, tg3_poll, 64); | ||
13967 | dev->ethtool_ops = &tg3_ethtool_ops; | 14522 | dev->ethtool_ops = &tg3_ethtool_ops; |
13968 | dev->watchdog_timeo = TG3_TX_TIMEOUT; | 14523 | dev->watchdog_timeo = TG3_TX_TIMEOUT; |
13969 | dev->irq = pdev->irq; | 14524 | dev->irq = pdev->irq; |
13970 | 14525 | ||
13971 | err = tg3_get_invariants(tp); | 14526 | err = tg3_get_invariants(tp); |
13972 | if (err) { | 14527 | if (err) { |
13973 | printk(KERN_ERR PFX "Problem fetching invariants of chip, " | 14528 | netdev_err(dev, "Problem fetching invariants of chip, aborting\n"); |
13974 | "aborting.\n"); | ||
13975 | goto err_out_iounmap; | 14529 | goto err_out_iounmap; |
13976 | } | 14530 | } |
13977 | 14531 | ||
13978 | if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || | 14532 | if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) && |
13979 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) | 14533 | tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) |
13980 | dev->netdev_ops = &tg3_netdev_ops; | 14534 | dev->netdev_ops = &tg3_netdev_ops; |
13981 | else | 14535 | else |
13982 | dev->netdev_ops = &tg3_netdev_ops_dma_bug; | 14536 | dev->netdev_ops = &tg3_netdev_ops_dma_bug; |
@@ -14006,8 +14560,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, | |||
14006 | err = pci_set_consistent_dma_mask(pdev, | 14560 | err = pci_set_consistent_dma_mask(pdev, |
14007 | persist_dma_mask); | 14561 | persist_dma_mask); |
14008 | if (err < 0) { | 14562 | if (err < 0) { |
14009 | printk(KERN_ERR PFX "Unable to obtain 64 bit " | 14563 | netdev_err(dev, "Unable to obtain 64 bit DMA for consistent allocations\n"); |
14010 | "DMA for consistent allocations\n"); | ||
14011 | goto err_out_iounmap; | 14564 | goto err_out_iounmap; |
14012 | } | 14565 | } |
14013 | } | 14566 | } |
@@ -14015,54 +14568,46 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, | |||
14015 | if (err || dma_mask == DMA_BIT_MASK(32)) { | 14568 | if (err || dma_mask == DMA_BIT_MASK(32)) { |
14016 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | 14569 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
14017 | if (err) { | 14570 | if (err) { |
14018 | printk(KERN_ERR PFX "No usable DMA configuration, " | 14571 | netdev_err(dev, "No usable DMA configuration, aborting\n"); |
14019 | "aborting.\n"); | ||
14020 | goto err_out_iounmap; | 14572 | goto err_out_iounmap; |
14021 | } | 14573 | } |
14022 | } | 14574 | } |
14023 | 14575 | ||
14024 | tg3_init_bufmgr_config(tp); | 14576 | tg3_init_bufmgr_config(tp); |
14025 | 14577 | ||
14026 | if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) | 14578 | /* Selectively allow TSO based on operating conditions */ |
14027 | tp->fw_needed = FIRMWARE_TG3; | 14579 | if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) || |
14028 | 14580 | (tp->fw_needed && !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) | |
14029 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { | ||
14030 | tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; | 14581 | tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; |
14582 | else { | ||
14583 | tp->tg3_flags2 &= ~(TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG); | ||
14584 | tp->fw_needed = NULL; | ||
14031 | } | 14585 | } |
14032 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || | 14586 | |
14033 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 || | 14587 | if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) |
14034 | tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 || | 14588 | tp->fw_needed = FIRMWARE_TG3; |
14035 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 || | ||
14036 | (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) { | ||
14037 | tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE; | ||
14038 | } else { | ||
14039 | tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG; | ||
14040 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) | ||
14041 | tp->fw_needed = FIRMWARE_TG3TSO5; | ||
14042 | else | ||
14043 | tp->fw_needed = FIRMWARE_TG3TSO; | ||
14044 | } | ||
14045 | 14589 | ||
14046 | /* TSO is on by default on chips that support hardware TSO. | 14590 | /* TSO is on by default on chips that support hardware TSO. |
14047 | * Firmware TSO on older chips gives lower performance, so it | 14591 | * Firmware TSO on older chips gives lower performance, so it |
14048 | * is off by default, but can be enabled using ethtool. | 14592 | * is off by default, but can be enabled using ethtool. |
14049 | */ | 14593 | */ |
14050 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { | 14594 | if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) && |
14051 | if (dev->features & NETIF_F_IP_CSUM) | 14595 | (dev->features & NETIF_F_IP_CSUM)) |
14052 | dev->features |= NETIF_F_TSO; | 14596 | dev->features |= NETIF_F_TSO; |
14053 | if ((dev->features & NETIF_F_IPV6_CSUM) && | 14597 | |
14054 | (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)) | 14598 | if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) || |
14599 | (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3)) { | ||
14600 | if (dev->features & NETIF_F_IPV6_CSUM) | ||
14055 | dev->features |= NETIF_F_TSO6; | 14601 | dev->features |= NETIF_F_TSO6; |
14056 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || | 14602 | if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) || |
14603 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || | ||
14057 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && | 14604 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && |
14058 | GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) || | 14605 | GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) || |
14059 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || | 14606 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || |
14060 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || | 14607 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) |
14061 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) | ||
14062 | dev->features |= NETIF_F_TSO_ECN; | 14608 | dev->features |= NETIF_F_TSO_ECN; |
14063 | } | 14609 | } |
14064 | 14610 | ||
14065 | |||
14066 | if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 && | 14611 | if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 && |
14067 | !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) && | 14612 | !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) && |
14068 | !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) { | 14613 | !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) { |
@@ -14072,18 +14617,16 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, | |||
14072 | 14617 | ||
14073 | err = tg3_get_device_address(tp); | 14618 | err = tg3_get_device_address(tp); |
14074 | if (err) { | 14619 | if (err) { |
14075 | printk(KERN_ERR PFX "Could not obtain valid ethernet address, " | 14620 | netdev_err(dev, "Could not obtain valid ethernet address, aborting\n"); |
14076 | "aborting.\n"); | 14621 | goto err_out_iounmap; |
14077 | goto err_out_fw; | ||
14078 | } | 14622 | } |
14079 | 14623 | ||
14080 | if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { | 14624 | if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { |
14081 | tp->aperegs = pci_ioremap_bar(pdev, BAR_2); | 14625 | tp->aperegs = pci_ioremap_bar(pdev, BAR_2); |
14082 | if (!tp->aperegs) { | 14626 | if (!tp->aperegs) { |
14083 | printk(KERN_ERR PFX "Cannot map APE registers, " | 14627 | netdev_err(dev, "Cannot map APE registers, aborting\n"); |
14084 | "aborting.\n"); | ||
14085 | err = -ENOMEM; | 14628 | err = -ENOMEM; |
14086 | goto err_out_fw; | 14629 | goto err_out_iounmap; |
14087 | } | 14630 | } |
14088 | 14631 | ||
14089 | tg3_ape_lock_init(tp); | 14632 | tg3_ape_lock_init(tp); |
@@ -14105,7 +14648,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, | |||
14105 | 14648 | ||
14106 | err = tg3_test_dma(tp); | 14649 | err = tg3_test_dma(tp); |
14107 | if (err) { | 14650 | if (err) { |
14108 | printk(KERN_ERR PFX "DMA engine test failed, aborting.\n"); | 14651 | netdev_err(dev, "DMA engine test failed, aborting\n"); |
14109 | goto err_out_apeunmap; | 14652 | goto err_out_apeunmap; |
14110 | } | 14653 | } |
14111 | 14654 | ||
@@ -14113,50 +14656,92 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, | |||
14113 | tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; | 14656 | tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; |
14114 | tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; | 14657 | tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; |
14115 | 14658 | ||
14659 | intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW; | ||
14660 | rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW; | ||
14661 | sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW; | ||
14662 | for (i = 0; i < TG3_IRQ_MAX_VECS; i++) { | ||
14663 | struct tg3_napi *tnapi = &tp->napi[i]; | ||
14664 | |||
14665 | tnapi->tp = tp; | ||
14666 | tnapi->tx_pending = TG3_DEF_TX_RING_PENDING; | ||
14667 | |||
14668 | tnapi->int_mbox = intmbx; | ||
14669 | if (i < 4) | ||
14670 | intmbx += 0x8; | ||
14671 | else | ||
14672 | intmbx += 0x4; | ||
14673 | |||
14674 | tnapi->consmbox = rcvmbx; | ||
14675 | tnapi->prodmbox = sndmbx; | ||
14676 | |||
14677 | if (i) { | ||
14678 | tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1); | ||
14679 | netif_napi_add(dev, &tnapi->napi, tg3_poll_msix, 64); | ||
14680 | } else { | ||
14681 | tnapi->coal_now = HOSTCC_MODE_NOW; | ||
14682 | netif_napi_add(dev, &tnapi->napi, tg3_poll, 64); | ||
14683 | } | ||
14684 | |||
14685 | if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX)) | ||
14686 | break; | ||
14687 | |||
14688 | /* | ||
14689 | * If we support MSIX, we'll be using RSS. If we're using | ||
14690 | * RSS, the first vector only handles link interrupts and the | ||
14691 | * remaining vectors handle rx and tx interrupts. Reuse the | ||
14692 | * mailbox values for the next iteration. The values we setup | ||
14693 | * above are still useful for the single vectored mode. | ||
14694 | */ | ||
14695 | if (!i) | ||
14696 | continue; | ||
14697 | |||
14698 | rcvmbx += 0x8; | ||
14699 | |||
14700 | if (sndmbx & 0x4) | ||
14701 | sndmbx -= 0x4; | ||
14702 | else | ||
14703 | sndmbx += 0xc; | ||
14704 | } | ||
14705 | |||
14116 | tg3_init_coal(tp); | 14706 | tg3_init_coal(tp); |
14117 | 14707 | ||
14118 | pci_set_drvdata(pdev, dev); | 14708 | pci_set_drvdata(pdev, dev); |
14119 | 14709 | ||
14120 | err = register_netdev(dev); | 14710 | err = register_netdev(dev); |
14121 | if (err) { | 14711 | if (err) { |
14122 | printk(KERN_ERR PFX "Cannot register net device, " | 14712 | netdev_err(dev, "Cannot register net device, aborting\n"); |
14123 | "aborting.\n"); | ||
14124 | goto err_out_apeunmap; | 14713 | goto err_out_apeunmap; |
14125 | } | 14714 | } |
14126 | 14715 | ||
14127 | printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n", | 14716 | netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n", |
14128 | dev->name, | 14717 | tp->board_part_number, |
14129 | tp->board_part_number, | 14718 | tp->pci_chip_rev_id, |
14130 | tp->pci_chip_rev_id, | 14719 | tg3_bus_string(tp, str), |
14131 | tg3_bus_string(tp, str), | 14720 | dev->dev_addr); |
14132 | dev->dev_addr); | ||
14133 | 14721 | ||
14134 | if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) | 14722 | if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) { |
14135 | printk(KERN_INFO | 14723 | struct phy_device *phydev; |
14136 | "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", | 14724 | phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; |
14137 | tp->dev->name, | 14725 | netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", |
14138 | tp->mdio_bus->phy_map[PHY_ADDR]->drv->name, | 14726 | phydev->drv->name, dev_name(&phydev->dev)); |
14139 | dev_name(&tp->mdio_bus->phy_map[PHY_ADDR]->dev)); | 14727 | } else |
14140 | else | 14728 | netdev_info(dev, "attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n", |
14141 | printk(KERN_INFO | 14729 | tg3_phy_string(tp), |
14142 | "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n", | 14730 | ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" : |
14143 | tp->dev->name, tg3_phy_string(tp), | 14731 | ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" : |
14144 | ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" : | 14732 | "10/100/1000Base-T")), |
14145 | ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" : | 14733 | (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0); |
14146 | "10/100/1000Base-T")), | 14734 | |
14147 | (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0); | 14735 | netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n", |
14148 | 14736 | (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0, | |
14149 | printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n", | 14737 | (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0, |
14150 | dev->name, | 14738 | (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0, |
14151 | (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0, | 14739 | (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0, |
14152 | (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0, | 14740 | (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0); |
14153 | (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0, | 14741 | netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n", |
14154 | (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0, | 14742 | tp->dma_rwctrl, |
14155 | (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0); | 14743 | pdev->dma_mask == DMA_BIT_MASK(32) ? 32 : |
14156 | printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n", | 14744 | ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64); |
14157 | dev->name, tp->dma_rwctrl, | ||
14158 | (pdev->dma_mask == DMA_BIT_MASK(32)) ? 32 : | ||
14159 | (((u64) pdev->dma_mask == DMA_BIT_MASK(40)) ? 40 : 64)); | ||
14160 | 14745 | ||
14161 | return 0; | 14746 | return 0; |
14162 | 14747 | ||
@@ -14166,10 +14751,6 @@ err_out_apeunmap: | |||
14166 | tp->aperegs = NULL; | 14751 | tp->aperegs = NULL; |
14167 | } | 14752 | } |
14168 | 14753 | ||
14169 | err_out_fw: | ||
14170 | if (tp->fw) | ||
14171 | release_firmware(tp->fw); | ||
14172 | |||
14173 | err_out_iounmap: | 14754 | err_out_iounmap: |
14174 | if (tp->regs) { | 14755 | if (tp->regs) { |
14175 | iounmap(tp->regs); | 14756 | iounmap(tp->regs); |