aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/tg3.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/tg3.c')
-rw-r--r--drivers/net/tg3.c1497
1 files changed, 836 insertions, 661 deletions
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 46a3f86125be..9d5c1786c664 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -68,8 +68,8 @@
68 68
69#define DRV_MODULE_NAME "tg3" 69#define DRV_MODULE_NAME "tg3"
70#define PFX DRV_MODULE_NAME ": " 70#define PFX DRV_MODULE_NAME ": "
71#define DRV_MODULE_VERSION "3.99" 71#define DRV_MODULE_VERSION "3.101"
72#define DRV_MODULE_RELDATE "April 20, 2009" 72#define DRV_MODULE_RELDATE "August 28, 2009"
73 73
74#define TG3_DEF_MAC_MODE 0 74#define TG3_DEF_MAC_MODE 0
75#define TG3_DEF_RX_MODE 0 75#define TG3_DEF_RX_MODE 0
@@ -92,7 +92,7 @@
92/* hardware minimum and maximum for a single frame's data payload */ 92/* hardware minimum and maximum for a single frame's data payload */
93#define TG3_MIN_MTU 60 93#define TG3_MIN_MTU 60
94#define TG3_MAX_MTU(tp) \ 94#define TG3_MAX_MTU(tp) \
95 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500) 95 ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ? 9000 : 1500)
96 96
97/* These numbers seem to be hard coded in the NIC firmware somehow. 97/* These numbers seem to be hard coded in the NIC firmware somehow.
98 * You can't change the ring sizes, but you can change where you place 98 * You can't change the ring sizes, but you can change where you place
@@ -117,19 +117,26 @@
117 117
118#define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \ 118#define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
119 TG3_RX_RING_SIZE) 119 TG3_RX_RING_SIZE)
120#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \ 120#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_ext_rx_buffer_desc) * \
121 TG3_RX_JUMBO_RING_SIZE) 121 TG3_RX_JUMBO_RING_SIZE)
122#define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \ 122#define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
123 TG3_RX_RCB_RING_SIZE(tp)) 123 TG3_RX_RCB_RING_SIZE(tp))
124#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ 124#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
125 TG3_TX_RING_SIZE) 125 TG3_TX_RING_SIZE)
126#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) 126#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
127 127
128#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64) 128#define TG3_DMA_BYTE_ENAB 64
129#define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64) 129
130#define TG3_RX_STD_DMA_SZ 1536
131#define TG3_RX_JMB_DMA_SZ 9046
132
133#define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
134
135#define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
136#define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
130 137
131/* minimum number of free TX descriptors required to wake up TX process */ 138/* minimum number of free TX descriptors required to wake up TX process */
132#define TG3_TX_WAKEUP_THRESH(tp) ((tp)->tx_pending / 4) 139#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
133 140
134#define TG3_RAW_IP_ALIGN 2 141#define TG3_RAW_IP_ALIGN 2
135 142
@@ -219,11 +226,12 @@ static struct pci_device_id tg3_pci_tbl[] = {
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)}, 226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)}, 227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)}, 228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5785)}, 229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)}, 231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)}, 232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)}, 233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57720)}, 234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
227 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, 235 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
228 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, 236 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
229 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, 237 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
@@ -607,13 +615,13 @@ static void tg3_disable_ints(struct tg3 *tp)
607{ 615{
608 tw32(TG3PCI_MISC_HOST_CTRL, 616 tw32(TG3PCI_MISC_HOST_CTRL,
609 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT)); 617 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
610 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 618 tw32_mailbox_f(tp->napi[0].int_mbox, 0x00000001);
611} 619}
612 620
613static inline void tg3_cond_int(struct tg3 *tp) 621static inline void tg3_cond_int(struct tg3 *tp)
614{ 622{
615 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) && 623 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
616 (tp->hw_status->status & SD_STATUS_UPDATED)) 624 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
617 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); 625 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
618 else 626 else
619 tw32(HOSTCC_MODE, tp->coalesce_mode | 627 tw32(HOSTCC_MODE, tp->coalesce_mode |
@@ -622,22 +630,22 @@ static inline void tg3_cond_int(struct tg3 *tp)
622 630
623static void tg3_enable_ints(struct tg3 *tp) 631static void tg3_enable_ints(struct tg3 *tp)
624{ 632{
633 struct tg3_napi *tnapi = &tp->napi[0];
625 tp->irq_sync = 0; 634 tp->irq_sync = 0;
626 wmb(); 635 wmb();
627 636
628 tw32(TG3PCI_MISC_HOST_CTRL, 637 tw32(TG3PCI_MISC_HOST_CTRL,
629 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); 638 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
630 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 639 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
631 (tp->last_tag << 24));
632 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) 640 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
633 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 641 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
634 (tp->last_tag << 24));
635 tg3_cond_int(tp); 642 tg3_cond_int(tp);
636} 643}
637 644
638static inline unsigned int tg3_has_work(struct tg3 *tp) 645static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
639{ 646{
640 struct tg3_hw_status *sblk = tp->hw_status; 647 struct tg3 *tp = tnapi->tp;
648 struct tg3_hw_status *sblk = tnapi->hw_status;
641 unsigned int work_exists = 0; 649 unsigned int work_exists = 0;
642 650
643 /* check for phy events */ 651 /* check for phy events */
@@ -648,22 +656,23 @@ static inline unsigned int tg3_has_work(struct tg3 *tp)
648 work_exists = 1; 656 work_exists = 1;
649 } 657 }
650 /* check for RX/TX work to do */ 658 /* check for RX/TX work to do */
651 if (sblk->idx[0].tx_consumer != tp->tx_cons || 659 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
652 sblk->idx[0].rx_producer != tp->rx_rcb_ptr) 660 sblk->idx[0].rx_producer != tnapi->rx_rcb_ptr)
653 work_exists = 1; 661 work_exists = 1;
654 662
655 return work_exists; 663 return work_exists;
656} 664}
657 665
658/* tg3_restart_ints 666/* tg3_int_reenable
659 * similar to tg3_enable_ints, but it accurately determines whether there 667 * similar to tg3_enable_ints, but it accurately determines whether there
660 * is new work pending and can return without flushing the PIO write 668 * is new work pending and can return without flushing the PIO write
661 * which reenables interrupts 669 * which reenables interrupts
662 */ 670 */
663static void tg3_restart_ints(struct tg3 *tp) 671static void tg3_int_reenable(struct tg3_napi *tnapi)
664{ 672{
665 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 673 struct tg3 *tp = tnapi->tp;
666 tp->last_tag << 24); 674
675 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
667 mmiowb(); 676 mmiowb();
668 677
669 /* When doing tagged status, this work check is unnecessary. 678 /* When doing tagged status, this work check is unnecessary.
@@ -671,7 +680,7 @@ static void tg3_restart_ints(struct tg3 *tp)
671 * work we've completed. 680 * work we've completed.
672 */ 681 */
673 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) && 682 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
674 tg3_has_work(tp)) 683 tg3_has_work(tnapi))
675 tw32(HOSTCC_MODE, tp->coalesce_mode | 684 tw32(HOSTCC_MODE, tp->coalesce_mode |
676 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW)); 685 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
677} 686}
@@ -679,19 +688,20 @@ static void tg3_restart_ints(struct tg3 *tp)
679static inline void tg3_netif_stop(struct tg3 *tp) 688static inline void tg3_netif_stop(struct tg3 *tp)
680{ 689{
681 tp->dev->trans_start = jiffies; /* prevent tx timeout */ 690 tp->dev->trans_start = jiffies; /* prevent tx timeout */
682 napi_disable(&tp->napi); 691 napi_disable(&tp->napi[0].napi);
683 netif_tx_disable(tp->dev); 692 netif_tx_disable(tp->dev);
684} 693}
685 694
686static inline void tg3_netif_start(struct tg3 *tp) 695static inline void tg3_netif_start(struct tg3 *tp)
687{ 696{
697 struct tg3_napi *tnapi = &tp->napi[0];
688 netif_wake_queue(tp->dev); 698 netif_wake_queue(tp->dev);
689 /* NOTE: unconditional netif_wake_queue is only appropriate 699 /* NOTE: unconditional netif_wake_queue is only appropriate
690 * so long as all callers are assured to have free tx slots 700 * so long as all callers are assured to have free tx slots
691 * (such as after tg3_init_hw) 701 * (such as after tg3_init_hw)
692 */ 702 */
693 napi_enable(&tp->napi); 703 napi_enable(&tnapi->napi);
694 tp->hw_status->status |= SD_STATUS_UPDATED; 704 tnapi->hw_status->status |= SD_STATUS_UPDATED;
695 tg3_enable_ints(tp); 705 tg3_enable_ints(tp);
696} 706}
697 707
@@ -784,7 +794,7 @@ static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
784 unsigned int loops; 794 unsigned int loops;
785 int ret; 795 int ret;
786 796
787 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 && 797 if ((tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) &&
788 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL)) 798 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
789 return 0; 799 return 0;
790 800
@@ -917,7 +927,9 @@ static void tg3_mdio_config_5785(struct tg3 *tp)
917 tw32(MAC_PHYCFG2, val); 927 tw32(MAC_PHYCFG2, val);
918 928
919 val = tr32(MAC_PHYCFG1); 929 val = tr32(MAC_PHYCFG1);
920 val &= ~MAC_PHYCFG1_RGMII_INT; 930 val &= ~(MAC_PHYCFG1_RGMII_INT |
931 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
932 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
921 tw32(MAC_PHYCFG1, val); 933 tw32(MAC_PHYCFG1, val);
922 934
923 return; 935 return;
@@ -933,15 +945,18 @@ static void tg3_mdio_config_5785(struct tg3 *tp)
933 945
934 tw32(MAC_PHYCFG2, val); 946 tw32(MAC_PHYCFG2, val);
935 947
936 val = tr32(MAC_PHYCFG1) & ~(MAC_PHYCFG1_RGMII_EXT_RX_DEC | 948 val = tr32(MAC_PHYCFG1);
937 MAC_PHYCFG1_RGMII_SND_STAT_EN); 949 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
938 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) { 950 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
951 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
939 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN) 952 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
940 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC; 953 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
941 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN) 954 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
942 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN; 955 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
943 } 956 }
944 tw32(MAC_PHYCFG1, val | MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV); 957 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
958 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
959 tw32(MAC_PHYCFG1, val);
945 960
946 val = tr32(MAC_EXT_RGMII_MODE); 961 val = tr32(MAC_EXT_RGMII_MODE);
947 val &= ~(MAC_RGMII_MODE_RX_INT_B | 962 val &= ~(MAC_RGMII_MODE_RX_INT_B |
@@ -1064,6 +1079,7 @@ static int tg3_mdio_init(struct tg3 *tp)
1064 case TG3_PHY_ID_RTL8201E: 1079 case TG3_PHY_ID_RTL8201E:
1065 case TG3_PHY_ID_BCMAC131: 1080 case TG3_PHY_ID_BCMAC131:
1066 phydev->interface = PHY_INTERFACE_MODE_MII; 1081 phydev->interface = PHY_INTERFACE_MODE_MII;
1082 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
1067 break; 1083 break;
1068 } 1084 }
1069 1085
@@ -1469,14 +1485,38 @@ static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1469 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val); 1485 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1470} 1486}
1471 1487
1488static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1489{
1490 u32 phytest;
1491
1492 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1493 u32 phy;
1494
1495 tg3_writephy(tp, MII_TG3_FET_TEST,
1496 phytest | MII_TG3_FET_SHADOW_EN);
1497 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1498 if (enable)
1499 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1500 else
1501 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1502 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1503 }
1504 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1505 }
1506}
1507
1472static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable) 1508static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1473{ 1509{
1474 u32 reg; 1510 u32 reg;
1475 1511
1476 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || 1512 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
1477 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
1478 return; 1513 return;
1479 1514
1515 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
1516 tg3_phy_fet_toggle_apd(tp, enable);
1517 return;
1518 }
1519
1480 reg = MII_TG3_MISC_SHDW_WREN | 1520 reg = MII_TG3_MISC_SHDW_WREN |
1481 MII_TG3_MISC_SHDW_SCR5_SEL | 1521 MII_TG3_MISC_SHDW_SCR5_SEL |
1482 MII_TG3_MISC_SHDW_SCR5_LPED | 1522 MII_TG3_MISC_SHDW_SCR5_LPED |
@@ -1506,20 +1546,22 @@ static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1506 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) 1546 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1507 return; 1547 return;
1508 1548
1509 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { 1549 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
1510 u32 ephy; 1550 u32 ephy;
1511 1551
1512 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) { 1552 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1513 tg3_writephy(tp, MII_TG3_EPHY_TEST, 1553 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1514 ephy | MII_TG3_EPHY_SHADOW_EN); 1554
1515 if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) { 1555 tg3_writephy(tp, MII_TG3_FET_TEST,
1556 ephy | MII_TG3_FET_SHADOW_EN);
1557 if (!tg3_readphy(tp, reg, &phy)) {
1516 if (enable) 1558 if (enable)
1517 phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX; 1559 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1518 else 1560 else
1519 phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX; 1561 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1520 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy); 1562 tg3_writephy(tp, reg, phy);
1521 } 1563 }
1522 tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy); 1564 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1523 } 1565 }
1524 } else { 1566 } else {
1525 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC | 1567 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
@@ -1888,7 +1930,7 @@ out:
1888 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) { 1930 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1889 /* Cannot do read-modify-write on 5401 */ 1931 /* Cannot do read-modify-write on 5401 */
1890 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20); 1932 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1891 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) { 1933 } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
1892 u32 phy_reg; 1934 u32 phy_reg;
1893 1935
1894 /* Set bit 14 with read-modify-write to preserve other bits */ 1936 /* Set bit 14 with read-modify-write to preserve other bits */
@@ -1900,7 +1942,7 @@ out:
1900 /* Set phy register 0x10 bit 0 to high fifo elasticity to support 1942 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1901 * jumbo frames transmission. 1943 * jumbo frames transmission.
1902 */ 1944 */
1903 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) { 1945 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
1904 u32 phy_reg; 1946 u32 phy_reg;
1905 1947
1906 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg)) 1948 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
@@ -1910,7 +1952,7 @@ out:
1910 1952
1911 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { 1953 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1912 /* adjust output voltage */ 1954 /* adjust output voltage */
1913 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12); 1955 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
1914 } 1956 }
1915 1957
1916 tg3_phy_toggle_automdix(tp, 1); 1958 tg3_phy_toggle_automdix(tp, 1);
@@ -2655,7 +2697,7 @@ static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8
2655 break; 2697 break;
2656 2698
2657 default: 2699 default:
2658 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { 2700 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
2659 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 : 2701 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2660 SPEED_10; 2702 SPEED_10;
2661 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL : 2703 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
@@ -2990,7 +3032,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2990 3032
2991 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) 3033 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2992 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG); 3034 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
2993 else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) 3035 else if (!(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
2994 tg3_writephy(tp, MII_TG3_IMASK, ~0); 3036 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2995 3037
2996 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || 3038 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
@@ -3100,7 +3142,9 @@ relink:
3100 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 3142 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3101 else 3143 else
3102 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 3144 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3103 } else 3145 } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)
3146 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3147 else
3104 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 3148 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3105 3149
3106 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; 3150 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
@@ -3167,6 +3211,15 @@ relink:
3167 pci_write_config_word(tp->pdev, 3211 pci_write_config_word(tp->pdev,
3168 tp->pcie_cap + PCI_EXP_LNKCTL, 3212 tp->pcie_cap + PCI_EXP_LNKCTL,
3169 newlnkctl); 3213 newlnkctl);
3214 } else if (tp->tg3_flags3 & TG3_FLG3_TOGGLE_10_100_L1PLLPD) {
3215 u32 newreg, oldreg = tr32(TG3_PCIE_LNKCTL);
3216 if (tp->link_config.active_speed == SPEED_100 ||
3217 tp->link_config.active_speed == SPEED_10)
3218 newreg = oldreg & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
3219 else
3220 newreg = oldreg | TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
3221 if (newreg != oldreg)
3222 tw32(TG3_PCIE_LNKCTL, newreg);
3170 } 3223 }
3171 3224
3172 if (current_link_up != netif_carrier_ok(tp->dev)) { 3225 if (current_link_up != netif_carrier_ok(tp->dev)) {
@@ -3848,9 +3901,9 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3848 else 3901 else
3849 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status); 3902 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3850 3903
3851 tp->hw_status->status = 3904 tp->napi[0].hw_status->status =
3852 (SD_STATUS_UPDATED | 3905 (SD_STATUS_UPDATED |
3853 (tp->hw_status->status & ~SD_STATUS_LINK_CHG)); 3906 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
3854 3907
3855 for (i = 0; i < 100; i++) { 3908 for (i = 0; i < 100; i++) {
3856 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | 3909 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
@@ -4216,24 +4269,25 @@ static void tg3_tx_recover(struct tg3 *tp)
4216 spin_unlock(&tp->lock); 4269 spin_unlock(&tp->lock);
4217} 4270}
4218 4271
4219static inline u32 tg3_tx_avail(struct tg3 *tp) 4272static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4220{ 4273{
4221 smp_mb(); 4274 smp_mb();
4222 return (tp->tx_pending - 4275 return tnapi->tx_pending -
4223 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1))); 4276 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4224} 4277}
4225 4278
4226/* Tigon3 never reports partial packet sends. So we do not 4279/* Tigon3 never reports partial packet sends. So we do not
4227 * need special logic to handle SKBs that have not had all 4280 * need special logic to handle SKBs that have not had all
4228 * of their frags sent yet, like SunGEM does. 4281 * of their frags sent yet, like SunGEM does.
4229 */ 4282 */
4230static void tg3_tx(struct tg3 *tp) 4283static void tg3_tx(struct tg3_napi *tnapi)
4231{ 4284{
4232 u32 hw_idx = tp->hw_status->idx[0].tx_consumer; 4285 struct tg3 *tp = tnapi->tp;
4233 u32 sw_idx = tp->tx_cons; 4286 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4287 u32 sw_idx = tnapi->tx_cons;
4234 4288
4235 while (sw_idx != hw_idx) { 4289 while (sw_idx != hw_idx) {
4236 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx]; 4290 struct tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
4237 struct sk_buff *skb = ri->skb; 4291 struct sk_buff *skb = ri->skb;
4238 int i, tx_bug = 0; 4292 int i, tx_bug = 0;
4239 4293
@@ -4249,7 +4303,7 @@ static void tg3_tx(struct tg3 *tp)
4249 sw_idx = NEXT_TX(sw_idx); 4303 sw_idx = NEXT_TX(sw_idx);
4250 4304
4251 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 4305 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4252 ri = &tp->tx_buffers[sw_idx]; 4306 ri = &tnapi->tx_buffers[sw_idx];
4253 if (unlikely(ri->skb != NULL || sw_idx == hw_idx)) 4307 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4254 tx_bug = 1; 4308 tx_bug = 1;
4255 sw_idx = NEXT_TX(sw_idx); 4309 sw_idx = NEXT_TX(sw_idx);
@@ -4263,7 +4317,7 @@ static void tg3_tx(struct tg3 *tp)
4263 } 4317 }
4264 } 4318 }
4265 4319
4266 tp->tx_cons = sw_idx; 4320 tnapi->tx_cons = sw_idx;
4267 4321
4268 /* Need to make the tx_cons update visible to tg3_start_xmit() 4322 /* Need to make the tx_cons update visible to tg3_start_xmit()
4269 * before checking for netif_queue_stopped(). Without the 4323 * before checking for netif_queue_stopped(). Without the
@@ -4273,10 +4327,10 @@ static void tg3_tx(struct tg3 *tp)
4273 smp_mb(); 4327 smp_mb();
4274 4328
4275 if (unlikely(netif_queue_stopped(tp->dev) && 4329 if (unlikely(netif_queue_stopped(tp->dev) &&
4276 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) { 4330 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4277 netif_tx_lock(tp->dev); 4331 netif_tx_lock(tp->dev);
4278 if (netif_queue_stopped(tp->dev) && 4332 if (netif_queue_stopped(tp->dev) &&
4279 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))) 4333 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4280 netif_wake_queue(tp->dev); 4334 netif_wake_queue(tp->dev);
4281 netif_tx_unlock(tp->dev); 4335 netif_tx_unlock(tp->dev);
4282 } 4336 }
@@ -4293,33 +4347,35 @@ static void tg3_tx(struct tg3 *tp)
4293 * buffers the cpu only reads the last cacheline of the RX descriptor 4347 * buffers the cpu only reads the last cacheline of the RX descriptor
4294 * (to fetch the error flags, vlan tag, checksum, and opaque cookie). 4348 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4295 */ 4349 */
4296static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key, 4350static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key,
4297 int src_idx, u32 dest_idx_unmasked) 4351 int src_idx, u32 dest_idx_unmasked)
4298{ 4352{
4353 struct tg3 *tp = tnapi->tp;
4299 struct tg3_rx_buffer_desc *desc; 4354 struct tg3_rx_buffer_desc *desc;
4300 struct ring_info *map, *src_map; 4355 struct ring_info *map, *src_map;
4301 struct sk_buff *skb; 4356 struct sk_buff *skb;
4302 dma_addr_t mapping; 4357 dma_addr_t mapping;
4303 int skb_size, dest_idx; 4358 int skb_size, dest_idx;
4359 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
4304 4360
4305 src_map = NULL; 4361 src_map = NULL;
4306 switch (opaque_key) { 4362 switch (opaque_key) {
4307 case RXD_OPAQUE_RING_STD: 4363 case RXD_OPAQUE_RING_STD:
4308 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; 4364 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4309 desc = &tp->rx_std[dest_idx]; 4365 desc = &tpr->rx_std[dest_idx];
4310 map = &tp->rx_std_buffers[dest_idx]; 4366 map = &tpr->rx_std_buffers[dest_idx];
4311 if (src_idx >= 0) 4367 if (src_idx >= 0)
4312 src_map = &tp->rx_std_buffers[src_idx]; 4368 src_map = &tpr->rx_std_buffers[src_idx];
4313 skb_size = tp->rx_pkt_buf_sz; 4369 skb_size = tp->rx_pkt_map_sz;
4314 break; 4370 break;
4315 4371
4316 case RXD_OPAQUE_RING_JUMBO: 4372 case RXD_OPAQUE_RING_JUMBO:
4317 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; 4373 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4318 desc = &tp->rx_jumbo[dest_idx]; 4374 desc = &tpr->rx_jmb[dest_idx].std;
4319 map = &tp->rx_jumbo_buffers[dest_idx]; 4375 map = &tpr->rx_jmb_buffers[dest_idx];
4320 if (src_idx >= 0) 4376 if (src_idx >= 0)
4321 src_map = &tp->rx_jumbo_buffers[src_idx]; 4377 src_map = &tpr->rx_jmb_buffers[src_idx];
4322 skb_size = RX_JUMBO_PKT_BUF_SZ; 4378 skb_size = TG3_RX_JMB_MAP_SZ;
4323 break; 4379 break;
4324 4380
4325 default: 4381 default:
@@ -4332,14 +4388,13 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
4332 * Callers depend upon this behavior and assume that 4388 * Callers depend upon this behavior and assume that
4333 * we leave everything unchanged if we fail. 4389 * we leave everything unchanged if we fail.
4334 */ 4390 */
4335 skb = netdev_alloc_skb(tp->dev, skb_size); 4391 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4336 if (skb == NULL) 4392 if (skb == NULL)
4337 return -ENOMEM; 4393 return -ENOMEM;
4338 4394
4339 skb_reserve(skb, tp->rx_offset); 4395 skb_reserve(skb, tp->rx_offset);
4340 4396
4341 mapping = pci_map_single(tp->pdev, skb->data, 4397 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4342 skb_size - tp->rx_offset,
4343 PCI_DMA_FROMDEVICE); 4398 PCI_DMA_FROMDEVICE);
4344 4399
4345 map->skb = skb; 4400 map->skb = skb;
@@ -4358,28 +4413,30 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
4358 * members of the RX descriptor are invariant. See notes above 4413 * members of the RX descriptor are invariant. See notes above
4359 * tg3_alloc_rx_skb for full details. 4414 * tg3_alloc_rx_skb for full details.
4360 */ 4415 */
4361static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key, 4416static void tg3_recycle_rx(struct tg3_napi *tnapi, u32 opaque_key,
4362 int src_idx, u32 dest_idx_unmasked) 4417 int src_idx, u32 dest_idx_unmasked)
4363{ 4418{
4419 struct tg3 *tp = tnapi->tp;
4364 struct tg3_rx_buffer_desc *src_desc, *dest_desc; 4420 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4365 struct ring_info *src_map, *dest_map; 4421 struct ring_info *src_map, *dest_map;
4366 int dest_idx; 4422 int dest_idx;
4423 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
4367 4424
4368 switch (opaque_key) { 4425 switch (opaque_key) {
4369 case RXD_OPAQUE_RING_STD: 4426 case RXD_OPAQUE_RING_STD:
4370 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; 4427 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4371 dest_desc = &tp->rx_std[dest_idx]; 4428 dest_desc = &tpr->rx_std[dest_idx];
4372 dest_map = &tp->rx_std_buffers[dest_idx]; 4429 dest_map = &tpr->rx_std_buffers[dest_idx];
4373 src_desc = &tp->rx_std[src_idx]; 4430 src_desc = &tpr->rx_std[src_idx];
4374 src_map = &tp->rx_std_buffers[src_idx]; 4431 src_map = &tpr->rx_std_buffers[src_idx];
4375 break; 4432 break;
4376 4433
4377 case RXD_OPAQUE_RING_JUMBO: 4434 case RXD_OPAQUE_RING_JUMBO:
4378 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; 4435 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4379 dest_desc = &tp->rx_jumbo[dest_idx]; 4436 dest_desc = &tpr->rx_jmb[dest_idx].std;
4380 dest_map = &tp->rx_jumbo_buffers[dest_idx]; 4437 dest_map = &tpr->rx_jmb_buffers[dest_idx];
4381 src_desc = &tp->rx_jumbo[src_idx]; 4438 src_desc = &tpr->rx_jmb[src_idx].std;
4382 src_map = &tp->rx_jumbo_buffers[src_idx]; 4439 src_map = &tpr->rx_jmb_buffers[src_idx];
4383 break; 4440 break;
4384 4441
4385 default: 4442 default:
@@ -4395,13 +4452,6 @@ static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
4395 src_map->skb = NULL; 4452 src_map->skb = NULL;
4396} 4453}
4397 4454
4398#if TG3_VLAN_TAG_USED
4399static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
4400{
4401 return vlan_gro_receive(&tp->napi, tp->vlgrp, vlan_tag, skb);
4402}
4403#endif
4404
4405/* The RX ring scheme is composed of multiple rings which post fresh 4455/* The RX ring scheme is composed of multiple rings which post fresh
4406 * buffers to the chip, and one special ring the chip uses to report 4456 * buffers to the chip, and one special ring the chip uses to report
4407 * status back to the host. 4457 * status back to the host.
@@ -4426,14 +4476,16 @@ static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
4426 * If both the host and chip were to write into the same ring, cache line 4476 * If both the host and chip were to write into the same ring, cache line
4427 * eviction could occur since both entities want it in an exclusive state. 4477 * eviction could occur since both entities want it in an exclusive state.
4428 */ 4478 */
4429static int tg3_rx(struct tg3 *tp, int budget) 4479static int tg3_rx(struct tg3_napi *tnapi, int budget)
4430{ 4480{
4481 struct tg3 *tp = tnapi->tp;
4431 u32 work_mask, rx_std_posted = 0; 4482 u32 work_mask, rx_std_posted = 0;
4432 u32 sw_idx = tp->rx_rcb_ptr; 4483 u32 sw_idx = tnapi->rx_rcb_ptr;
4433 u16 hw_idx; 4484 u16 hw_idx;
4434 int received; 4485 int received;
4486 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
4435 4487
4436 hw_idx = tp->hw_status->idx[0].rx_producer; 4488 hw_idx = tnapi->hw_status->idx[0].rx_producer;
4437 /* 4489 /*
4438 * We need to order the read of hw_idx and the read of 4490 * We need to order the read of hw_idx and the read of
4439 * the opaque cookie. 4491 * the opaque cookie.
@@ -4442,7 +4494,7 @@ static int tg3_rx(struct tg3 *tp, int budget)
4442 work_mask = 0; 4494 work_mask = 0;
4443 received = 0; 4495 received = 0;
4444 while (sw_idx != hw_idx && budget > 0) { 4496 while (sw_idx != hw_idx && budget > 0) {
4445 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx]; 4497 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4446 unsigned int len; 4498 unsigned int len;
4447 struct sk_buff *skb; 4499 struct sk_buff *skb;
4448 dma_addr_t dma_addr; 4500 dma_addr_t dma_addr;
@@ -4451,27 +4503,25 @@ static int tg3_rx(struct tg3 *tp, int budget)
4451 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; 4503 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4452 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; 4504 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4453 if (opaque_key == RXD_OPAQUE_RING_STD) { 4505 if (opaque_key == RXD_OPAQUE_RING_STD) {
4454 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], 4506 struct ring_info *ri = &tpr->rx_std_buffers[desc_idx];
4455 mapping); 4507 dma_addr = pci_unmap_addr(ri, mapping);
4456 skb = tp->rx_std_buffers[desc_idx].skb; 4508 skb = ri->skb;
4457 post_ptr = &tp->rx_std_ptr; 4509 post_ptr = &tpr->rx_std_ptr;
4458 rx_std_posted++; 4510 rx_std_posted++;
4459 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { 4511 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4460 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx], 4512 struct ring_info *ri = &tpr->rx_jmb_buffers[desc_idx];
4461 mapping); 4513 dma_addr = pci_unmap_addr(ri, mapping);
4462 skb = tp->rx_jumbo_buffers[desc_idx].skb; 4514 skb = ri->skb;
4463 post_ptr = &tp->rx_jumbo_ptr; 4515 post_ptr = &tpr->rx_jmb_ptr;
4464 } 4516 } else
4465 else {
4466 goto next_pkt_nopost; 4517 goto next_pkt_nopost;
4467 }
4468 4518
4469 work_mask |= opaque_key; 4519 work_mask |= opaque_key;
4470 4520
4471 if ((desc->err_vlan & RXD_ERR_MASK) != 0 && 4521 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4472 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) { 4522 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4473 drop_it: 4523 drop_it:
4474 tg3_recycle_rx(tp, opaque_key, 4524 tg3_recycle_rx(tnapi, opaque_key,
4475 desc_idx, *post_ptr); 4525 desc_idx, *post_ptr);
4476 drop_it_no_recycle: 4526 drop_it_no_recycle:
4477 /* Other statistics kept track of by card. */ 4527 /* Other statistics kept track of by card. */
@@ -4491,20 +4541,19 @@ static int tg3_rx(struct tg3 *tp, int budget)
4491 ) { 4541 ) {
4492 int skb_size; 4542 int skb_size;
4493 4543
4494 skb_size = tg3_alloc_rx_skb(tp, opaque_key, 4544 skb_size = tg3_alloc_rx_skb(tnapi, opaque_key,
4495 desc_idx, *post_ptr); 4545 desc_idx, *post_ptr);
4496 if (skb_size < 0) 4546 if (skb_size < 0)
4497 goto drop_it; 4547 goto drop_it;
4498 4548
4499 pci_unmap_single(tp->pdev, dma_addr, 4549 pci_unmap_single(tp->pdev, dma_addr, skb_size,
4500 skb_size - tp->rx_offset,
4501 PCI_DMA_FROMDEVICE); 4550 PCI_DMA_FROMDEVICE);
4502 4551
4503 skb_put(skb, len); 4552 skb_put(skb, len);
4504 } else { 4553 } else {
4505 struct sk_buff *copy_skb; 4554 struct sk_buff *copy_skb;
4506 4555
4507 tg3_recycle_rx(tp, opaque_key, 4556 tg3_recycle_rx(tnapi, opaque_key,
4508 desc_idx, *post_ptr); 4557 desc_idx, *post_ptr);
4509 4558
4510 copy_skb = netdev_alloc_skb(tp->dev, 4559 copy_skb = netdev_alloc_skb(tp->dev,
@@ -4541,11 +4590,11 @@ static int tg3_rx(struct tg3 *tp, int budget)
4541#if TG3_VLAN_TAG_USED 4590#if TG3_VLAN_TAG_USED
4542 if (tp->vlgrp != NULL && 4591 if (tp->vlgrp != NULL &&
4543 desc->type_flags & RXD_FLAG_VLAN) { 4592 desc->type_flags & RXD_FLAG_VLAN) {
4544 tg3_vlan_rx(tp, skb, 4593 vlan_gro_receive(&tnapi->napi, tp->vlgrp,
4545 desc->err_vlan & RXD_VLAN_MASK); 4594 desc->err_vlan & RXD_VLAN_MASK, skb);
4546 } else 4595 } else
4547#endif 4596#endif
4548 napi_gro_receive(&tp->napi, skb); 4597 napi_gro_receive(&tnapi->napi, skb);
4549 4598
4550 received++; 4599 received++;
4551 budget--; 4600 budget--;
@@ -4567,23 +4616,23 @@ next_pkt_nopost:
4567 4616
4568 /* Refresh hw_idx to see if there is new work */ 4617 /* Refresh hw_idx to see if there is new work */
4569 if (sw_idx == hw_idx) { 4618 if (sw_idx == hw_idx) {
4570 hw_idx = tp->hw_status->idx[0].rx_producer; 4619 hw_idx = tnapi->hw_status->idx[0].rx_producer;
4571 rmb(); 4620 rmb();
4572 } 4621 }
4573 } 4622 }
4574 4623
4575 /* ACK the status ring. */ 4624 /* ACK the status ring. */
4576 tp->rx_rcb_ptr = sw_idx; 4625 tnapi->rx_rcb_ptr = sw_idx;
4577 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx); 4626 tw32_rx_mbox(tnapi->consmbox, sw_idx);
4578 4627
4579 /* Refill RX ring(s). */ 4628 /* Refill RX ring(s). */
4580 if (work_mask & RXD_OPAQUE_RING_STD) { 4629 if (work_mask & RXD_OPAQUE_RING_STD) {
4581 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE; 4630 sw_idx = tpr->rx_std_ptr % TG3_RX_RING_SIZE;
4582 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW, 4631 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
4583 sw_idx); 4632 sw_idx);
4584 } 4633 }
4585 if (work_mask & RXD_OPAQUE_RING_JUMBO) { 4634 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4586 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE; 4635 sw_idx = tpr->rx_jmb_ptr % TG3_RX_JUMBO_RING_SIZE;
4587 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW, 4636 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
4588 sw_idx); 4637 sw_idx);
4589 } 4638 }
@@ -4592,9 +4641,10 @@ next_pkt_nopost:
4592 return received; 4641 return received;
4593} 4642}
4594 4643
4595static int tg3_poll_work(struct tg3 *tp, int work_done, int budget) 4644static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
4596{ 4645{
4597 struct tg3_hw_status *sblk = tp->hw_status; 4646 struct tg3 *tp = tnapi->tp;
4647 struct tg3_hw_status *sblk = tnapi->hw_status;
4598 4648
4599 /* handle link change and other phy events */ 4649 /* handle link change and other phy events */
4600 if (!(tp->tg3_flags & 4650 if (!(tp->tg3_flags &
@@ -4618,8 +4668,8 @@ static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
4618 } 4668 }
4619 4669
4620 /* run TX completion thread */ 4670 /* run TX completion thread */
4621 if (sblk->idx[0].tx_consumer != tp->tx_cons) { 4671 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
4622 tg3_tx(tp); 4672 tg3_tx(tnapi);
4623 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) 4673 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4624 return work_done; 4674 return work_done;
4625 } 4675 }
@@ -4628,20 +4678,21 @@ static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
4628 * All RX "locking" is done by ensuring outside 4678 * All RX "locking" is done by ensuring outside
4629 * code synchronizes with tg3->napi.poll() 4679 * code synchronizes with tg3->napi.poll()
4630 */ 4680 */
4631 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) 4681 if (sblk->idx[0].rx_producer != tnapi->rx_rcb_ptr)
4632 work_done += tg3_rx(tp, budget - work_done); 4682 work_done += tg3_rx(tnapi, budget - work_done);
4633 4683
4634 return work_done; 4684 return work_done;
4635} 4685}
4636 4686
4637static int tg3_poll(struct napi_struct *napi, int budget) 4687static int tg3_poll(struct napi_struct *napi, int budget)
4638{ 4688{
4639 struct tg3 *tp = container_of(napi, struct tg3, napi); 4689 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
4690 struct tg3 *tp = tnapi->tp;
4640 int work_done = 0; 4691 int work_done = 0;
4641 struct tg3_hw_status *sblk = tp->hw_status; 4692 struct tg3_hw_status *sblk = tnapi->hw_status;
4642 4693
4643 while (1) { 4694 while (1) {
4644 work_done = tg3_poll_work(tp, work_done, budget); 4695 work_done = tg3_poll_work(tnapi, work_done, budget);
4645 4696
4646 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) 4697 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4647 goto tx_recovery; 4698 goto tx_recovery;
@@ -4650,19 +4701,19 @@ static int tg3_poll(struct napi_struct *napi, int budget)
4650 break; 4701 break;
4651 4702
4652 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) { 4703 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
4653 /* tp->last_tag is used in tg3_restart_ints() below 4704 /* tp->last_tag is used in tg3_int_reenable() below
4654 * to tell the hw how much work has been processed, 4705 * to tell the hw how much work has been processed,
4655 * so we must read it before checking for more work. 4706 * so we must read it before checking for more work.
4656 */ 4707 */
4657 tp->last_tag = sblk->status_tag; 4708 tnapi->last_tag = sblk->status_tag;
4658 tp->last_irq_tag = tp->last_tag; 4709 tnapi->last_irq_tag = tnapi->last_tag;
4659 rmb(); 4710 rmb();
4660 } else 4711 } else
4661 sblk->status &= ~SD_STATUS_UPDATED; 4712 sblk->status &= ~SD_STATUS_UPDATED;
4662 4713
4663 if (likely(!tg3_has_work(tp))) { 4714 if (likely(!tg3_has_work(tnapi))) {
4664 napi_complete(napi); 4715 napi_complete(napi);
4665 tg3_restart_ints(tp); 4716 tg3_int_reenable(tnapi);
4666 break; 4717 break;
4667 } 4718 }
4668 } 4719 }
@@ -4713,14 +4764,14 @@ static inline void tg3_full_unlock(struct tg3 *tp)
4713 */ 4764 */
4714static irqreturn_t tg3_msi_1shot(int irq, void *dev_id) 4765static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
4715{ 4766{
4716 struct net_device *dev = dev_id; 4767 struct tg3_napi *tnapi = dev_id;
4717 struct tg3 *tp = netdev_priv(dev); 4768 struct tg3 *tp = tnapi->tp;
4718 4769
4719 prefetch(tp->hw_status); 4770 prefetch(tnapi->hw_status);
4720 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); 4771 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
4721 4772
4722 if (likely(!tg3_irq_sync(tp))) 4773 if (likely(!tg3_irq_sync(tp)))
4723 napi_schedule(&tp->napi); 4774 napi_schedule(&tnapi->napi);
4724 4775
4725 return IRQ_HANDLED; 4776 return IRQ_HANDLED;
4726} 4777}
@@ -4731,11 +4782,11 @@ static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
4731 */ 4782 */
4732static irqreturn_t tg3_msi(int irq, void *dev_id) 4783static irqreturn_t tg3_msi(int irq, void *dev_id)
4733{ 4784{
4734 struct net_device *dev = dev_id; 4785 struct tg3_napi *tnapi = dev_id;
4735 struct tg3 *tp = netdev_priv(dev); 4786 struct tg3 *tp = tnapi->tp;
4736 4787
4737 prefetch(tp->hw_status); 4788 prefetch(tnapi->hw_status);
4738 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); 4789 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
4739 /* 4790 /*
4740 * Writing any value to intr-mbox-0 clears PCI INTA# and 4791 * Writing any value to intr-mbox-0 clears PCI INTA# and
4741 * chip-internal interrupt pending events. 4792 * chip-internal interrupt pending events.
@@ -4745,16 +4796,16 @@ static irqreturn_t tg3_msi(int irq, void *dev_id)
4745 */ 4796 */
4746 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 4797 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4747 if (likely(!tg3_irq_sync(tp))) 4798 if (likely(!tg3_irq_sync(tp)))
4748 napi_schedule(&tp->napi); 4799 napi_schedule(&tnapi->napi);
4749 4800
4750 return IRQ_RETVAL(1); 4801 return IRQ_RETVAL(1);
4751} 4802}
4752 4803
4753static irqreturn_t tg3_interrupt(int irq, void *dev_id) 4804static irqreturn_t tg3_interrupt(int irq, void *dev_id)
4754{ 4805{
4755 struct net_device *dev = dev_id; 4806 struct tg3_napi *tnapi = dev_id;
4756 struct tg3 *tp = netdev_priv(dev); 4807 struct tg3 *tp = tnapi->tp;
4757 struct tg3_hw_status *sblk = tp->hw_status; 4808 struct tg3_hw_status *sblk = tnapi->hw_status;
4758 unsigned int handled = 1; 4809 unsigned int handled = 1;
4759 4810
4760 /* In INTx mode, it is possible for the interrupt to arrive at 4811 /* In INTx mode, it is possible for the interrupt to arrive at
@@ -4785,9 +4836,9 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id)
4785 if (tg3_irq_sync(tp)) 4836 if (tg3_irq_sync(tp))
4786 goto out; 4837 goto out;
4787 sblk->status &= ~SD_STATUS_UPDATED; 4838 sblk->status &= ~SD_STATUS_UPDATED;
4788 if (likely(tg3_has_work(tp))) { 4839 if (likely(tg3_has_work(tnapi))) {
4789 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); 4840 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
4790 napi_schedule(&tp->napi); 4841 napi_schedule(&tnapi->napi);
4791 } else { 4842 } else {
4792 /* No work, shared interrupt perhaps? re-enable 4843 /* No work, shared interrupt perhaps? re-enable
4793 * interrupts, and flush that PCI write 4844 * interrupts, and flush that PCI write
@@ -4801,9 +4852,9 @@ out:
4801 4852
4802static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id) 4853static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4803{ 4854{
4804 struct net_device *dev = dev_id; 4855 struct tg3_napi *tnapi = dev_id;
4805 struct tg3 *tp = netdev_priv(dev); 4856 struct tg3 *tp = tnapi->tp;
4806 struct tg3_hw_status *sblk = tp->hw_status; 4857 struct tg3_hw_status *sblk = tnapi->hw_status;
4807 unsigned int handled = 1; 4858 unsigned int handled = 1;
4808 4859
4809 /* In INTx mode, it is possible for the interrupt to arrive at 4860 /* In INTx mode, it is possible for the interrupt to arrive at
@@ -4811,7 +4862,7 @@ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4811 * Reading the PCI State register will confirm whether the 4862 * Reading the PCI State register will confirm whether the
4812 * interrupt is ours and will flush the status block. 4863 * interrupt is ours and will flush the status block.
4813 */ 4864 */
4814 if (unlikely(sblk->status_tag == tp->last_irq_tag)) { 4865 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
4815 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) || 4866 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4816 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 4867 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4817 handled = 0; 4868 handled = 0;
@@ -4838,14 +4889,14 @@ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4838 * so that the above check can report that the screaming interrupts 4889 * so that the above check can report that the screaming interrupts
4839 * are unhandled. Eventually they will be silenced. 4890 * are unhandled. Eventually they will be silenced.
4840 */ 4891 */
4841 tp->last_irq_tag = sblk->status_tag; 4892 tnapi->last_irq_tag = sblk->status_tag;
4842 4893
4843 if (tg3_irq_sync(tp)) 4894 if (tg3_irq_sync(tp))
4844 goto out; 4895 goto out;
4845 4896
4846 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); 4897 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
4847 4898
4848 napi_schedule(&tp->napi); 4899 napi_schedule(&tnapi->napi);
4849 4900
4850out: 4901out:
4851 return IRQ_RETVAL(handled); 4902 return IRQ_RETVAL(handled);
@@ -4854,9 +4905,9 @@ out:
4854/* ISR for interrupt test */ 4905/* ISR for interrupt test */
4855static irqreturn_t tg3_test_isr(int irq, void *dev_id) 4906static irqreturn_t tg3_test_isr(int irq, void *dev_id)
4856{ 4907{
4857 struct net_device *dev = dev_id; 4908 struct tg3_napi *tnapi = dev_id;
4858 struct tg3 *tp = netdev_priv(dev); 4909 struct tg3 *tp = tnapi->tp;
4859 struct tg3_hw_status *sblk = tp->hw_status; 4910 struct tg3_hw_status *sblk = tnapi->hw_status;
4860 4911
4861 if ((sblk->status & SD_STATUS_UPDATED) || 4912 if ((sblk->status & SD_STATUS_UPDATED) ||
4862 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 4913 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
@@ -4886,7 +4937,7 @@ static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
4886 tg3_full_unlock(tp); 4937 tg3_full_unlock(tp);
4887 del_timer_sync(&tp->timer); 4938 del_timer_sync(&tp->timer);
4888 tp->irq_sync = 0; 4939 tp->irq_sync = 0;
4889 napi_enable(&tp->napi); 4940 napi_enable(&tp->napi[0].napi);
4890 dev_close(tp->dev); 4941 dev_close(tp->dev);
4891 tg3_full_lock(tp, 0); 4942 tg3_full_lock(tp, 0);
4892 } 4943 }
@@ -4993,13 +5044,14 @@ static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4993#endif 5044#endif
4994} 5045}
4995 5046
4996static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32); 5047static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32);
4997 5048
4998/* Workaround 4GB and 40-bit hardware DMA bugs. */ 5049/* Workaround 4GB and 40-bit hardware DMA bugs. */
4999static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb, 5050static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
5000 u32 last_plus_one, u32 *start, 5051 u32 last_plus_one, u32 *start,
5001 u32 base_flags, u32 mss) 5052 u32 base_flags, u32 mss)
5002{ 5053{
5054 struct tg3_napi *tnapi = &tp->napi[0];
5003 struct sk_buff *new_skb; 5055 struct sk_buff *new_skb;
5004 dma_addr_t new_addr = 0; 5056 dma_addr_t new_addr = 0;
5005 u32 entry = *start; 5057 u32 entry = *start;
@@ -5034,7 +5086,7 @@ static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
5034 dev_kfree_skb(new_skb); 5086 dev_kfree_skb(new_skb);
5035 new_skb = NULL; 5087 new_skb = NULL;
5036 } else { 5088 } else {
5037 tg3_set_txd(tp, entry, new_addr, new_skb->len, 5089 tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5038 base_flags, 1 | (mss << 1)); 5090 base_flags, 1 | (mss << 1));
5039 *start = NEXT_TX(entry); 5091 *start = NEXT_TX(entry);
5040 } 5092 }
@@ -5043,11 +5095,10 @@ static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
5043 /* Now clean up the sw ring entries. */ 5095 /* Now clean up the sw ring entries. */
5044 i = 0; 5096 i = 0;
5045 while (entry != last_plus_one) { 5097 while (entry != last_plus_one) {
5046 if (i == 0) { 5098 if (i == 0)
5047 tp->tx_buffers[entry].skb = new_skb; 5099 tnapi->tx_buffers[entry].skb = new_skb;
5048 } else { 5100 else
5049 tp->tx_buffers[entry].skb = NULL; 5101 tnapi->tx_buffers[entry].skb = NULL;
5050 }
5051 entry = NEXT_TX(entry); 5102 entry = NEXT_TX(entry);
5052 i++; 5103 i++;
5053 } 5104 }
@@ -5058,11 +5109,11 @@ static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
5058 return ret; 5109 return ret;
5059} 5110}
5060 5111
5061static void tg3_set_txd(struct tg3 *tp, int entry, 5112static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5062 dma_addr_t mapping, int len, u32 flags, 5113 dma_addr_t mapping, int len, u32 flags,
5063 u32 mss_and_is_end) 5114 u32 mss_and_is_end)
5064{ 5115{
5065 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry]; 5116 struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5066 int is_end = (mss_and_is_end & 0x1); 5117 int is_end = (mss_and_is_end & 0x1);
5067 u32 mss = (mss_and_is_end >> 1); 5118 u32 mss = (mss_and_is_end >> 1);
5068 u32 vlan_tag = 0; 5119 u32 vlan_tag = 0;
@@ -5084,12 +5135,14 @@ static void tg3_set_txd(struct tg3 *tp, int entry,
5084/* hard_start_xmit for devices that don't have any bugs and 5135/* hard_start_xmit for devices that don't have any bugs and
5085 * support TG3_FLG2_HW_TSO_2 only. 5136 * support TG3_FLG2_HW_TSO_2 only.
5086 */ 5137 */
5087static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) 5138static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5139 struct net_device *dev)
5088{ 5140{
5089 struct tg3 *tp = netdev_priv(dev); 5141 struct tg3 *tp = netdev_priv(dev);
5090 u32 len, entry, base_flags, mss; 5142 u32 len, entry, base_flags, mss;
5091 struct skb_shared_info *sp; 5143 struct skb_shared_info *sp;
5092 dma_addr_t mapping; 5144 dma_addr_t mapping;
5145 struct tg3_napi *tnapi = &tp->napi[0];
5093 5146
5094 len = skb_headlen(skb); 5147 len = skb_headlen(skb);
5095 5148
@@ -5098,7 +5151,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5098 * interrupt. Furthermore, IRQ processing runs lockless so we have 5151 * interrupt. Furthermore, IRQ processing runs lockless so we have
5099 * no IRQ context deadlocks to worry about either. Rejoice! 5152 * no IRQ context deadlocks to worry about either. Rejoice!
5100 */ 5153 */
5101 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { 5154 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5102 if (!netif_queue_stopped(dev)) { 5155 if (!netif_queue_stopped(dev)) {
5103 netif_stop_queue(dev); 5156 netif_stop_queue(dev);
5104 5157
@@ -5109,7 +5162,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5109 return NETDEV_TX_BUSY; 5162 return NETDEV_TX_BUSY;
5110 } 5163 }
5111 5164
5112 entry = tp->tx_prod; 5165 entry = tnapi->tx_prod;
5113 base_flags = 0; 5166 base_flags = 0;
5114 mss = 0; 5167 mss = 0;
5115 if ((mss = skb_shinfo(skb)->gso_size) != 0) { 5168 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
@@ -5157,9 +5210,9 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5157 5210
5158 mapping = sp->dma_head; 5211 mapping = sp->dma_head;
5159 5212
5160 tp->tx_buffers[entry].skb = skb; 5213 tnapi->tx_buffers[entry].skb = skb;
5161 5214
5162 tg3_set_txd(tp, entry, mapping, len, base_flags, 5215 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5163 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1)); 5216 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5164 5217
5165 entry = NEXT_TX(entry); 5218 entry = NEXT_TX(entry);
@@ -5174,9 +5227,9 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5174 5227
5175 len = frag->size; 5228 len = frag->size;
5176 mapping = sp->dma_maps[i]; 5229 mapping = sp->dma_maps[i];
5177 tp->tx_buffers[entry].skb = NULL; 5230 tnapi->tx_buffers[entry].skb = NULL;
5178 5231
5179 tg3_set_txd(tp, entry, mapping, len, 5232 tg3_set_txd(tnapi, entry, mapping, len,
5180 base_flags, (i == last) | (mss << 1)); 5233 base_flags, (i == last) | (mss << 1));
5181 5234
5182 entry = NEXT_TX(entry); 5235 entry = NEXT_TX(entry);
@@ -5184,12 +5237,12 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5184 } 5237 }
5185 5238
5186 /* Packets are ready, update Tx producer idx local and on card. */ 5239 /* Packets are ready, update Tx producer idx local and on card. */
5187 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); 5240 tw32_tx_mbox(tnapi->prodmbox, entry);
5188 5241
5189 tp->tx_prod = entry; 5242 tnapi->tx_prod = entry;
5190 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) { 5243 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5191 netif_stop_queue(dev); 5244 netif_stop_queue(dev);
5192 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)) 5245 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5193 netif_wake_queue(tp->dev); 5246 netif_wake_queue(tp->dev);
5194 } 5247 }
5195 5248
@@ -5199,7 +5252,8 @@ out_unlock:
5199 return NETDEV_TX_OK; 5252 return NETDEV_TX_OK;
5200} 5253}
5201 5254
5202static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *); 5255static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *,
5256 struct net_device *);
5203 5257
5204/* Use GSO to workaround a rare TSO bug that may be triggered when the 5258/* Use GSO to workaround a rare TSO bug that may be triggered when the
5205 * TSO header is greater than 80 bytes. 5259 * TSO header is greater than 80 bytes.
@@ -5207,11 +5261,12 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
5207static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb) 5261static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5208{ 5262{
5209 struct sk_buff *segs, *nskb; 5263 struct sk_buff *segs, *nskb;
5264 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5210 5265
5211 /* Estimate the number of fragments in the worst case */ 5266 /* Estimate the number of fragments in the worst case */
5212 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) { 5267 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5213 netif_stop_queue(tp->dev); 5268 netif_stop_queue(tp->dev);
5214 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3)) 5269 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5215 return NETDEV_TX_BUSY; 5270 return NETDEV_TX_BUSY;
5216 5271
5217 netif_wake_queue(tp->dev); 5272 netif_wake_queue(tp->dev);
@@ -5237,13 +5292,15 @@ tg3_tso_bug_end:
5237/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and 5292/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5238 * support TG3_FLG2_HW_TSO_1 or firmware TSO only. 5293 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
5239 */ 5294 */
5240static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) 5295static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5296 struct net_device *dev)
5241{ 5297{
5242 struct tg3 *tp = netdev_priv(dev); 5298 struct tg3 *tp = netdev_priv(dev);
5243 u32 len, entry, base_flags, mss; 5299 u32 len, entry, base_flags, mss;
5244 struct skb_shared_info *sp; 5300 struct skb_shared_info *sp;
5245 int would_hit_hwbug; 5301 int would_hit_hwbug;
5246 dma_addr_t mapping; 5302 dma_addr_t mapping;
5303 struct tg3_napi *tnapi = &tp->napi[0];
5247 5304
5248 len = skb_headlen(skb); 5305 len = skb_headlen(skb);
5249 5306
@@ -5252,7 +5309,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
5252 * interrupt. Furthermore, IRQ processing runs lockless so we have 5309 * interrupt. Furthermore, IRQ processing runs lockless so we have
5253 * no IRQ context deadlocks to worry about either. Rejoice! 5310 * no IRQ context deadlocks to worry about either. Rejoice!
5254 */ 5311 */
5255 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { 5312 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5256 if (!netif_queue_stopped(dev)) { 5313 if (!netif_queue_stopped(dev)) {
5257 netif_stop_queue(dev); 5314 netif_stop_queue(dev);
5258 5315
@@ -5263,7 +5320,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
5263 return NETDEV_TX_BUSY; 5320 return NETDEV_TX_BUSY;
5264 } 5321 }
5265 5322
5266 entry = tp->tx_prod; 5323 entry = tnapi->tx_prod;
5267 base_flags = 0; 5324 base_flags = 0;
5268 if (skb->ip_summed == CHECKSUM_PARTIAL) 5325 if (skb->ip_summed == CHECKSUM_PARTIAL)
5269 base_flags |= TXD_FLAG_TCPUDP_CSUM; 5326 base_flags |= TXD_FLAG_TCPUDP_CSUM;
@@ -5333,7 +5390,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
5333 5390
5334 mapping = sp->dma_head; 5391 mapping = sp->dma_head;
5335 5392
5336 tp->tx_buffers[entry].skb = skb; 5393 tnapi->tx_buffers[entry].skb = skb;
5337 5394
5338 would_hit_hwbug = 0; 5395 would_hit_hwbug = 0;
5339 5396
@@ -5342,7 +5399,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
5342 else if (tg3_4g_overflow_test(mapping, len)) 5399 else if (tg3_4g_overflow_test(mapping, len))
5343 would_hit_hwbug = 1; 5400 would_hit_hwbug = 1;
5344 5401
5345 tg3_set_txd(tp, entry, mapping, len, base_flags, 5402 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5346 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1)); 5403 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5347 5404
5348 entry = NEXT_TX(entry); 5405 entry = NEXT_TX(entry);
@@ -5358,7 +5415,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
5358 len = frag->size; 5415 len = frag->size;
5359 mapping = sp->dma_maps[i]; 5416 mapping = sp->dma_maps[i];
5360 5417
5361 tp->tx_buffers[entry].skb = NULL; 5418 tnapi->tx_buffers[entry].skb = NULL;
5362 5419
5363 if (tg3_4g_overflow_test(mapping, len)) 5420 if (tg3_4g_overflow_test(mapping, len))
5364 would_hit_hwbug = 1; 5421 would_hit_hwbug = 1;
@@ -5367,10 +5424,10 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
5367 would_hit_hwbug = 1; 5424 would_hit_hwbug = 1;
5368 5425
5369 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) 5426 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5370 tg3_set_txd(tp, entry, mapping, len, 5427 tg3_set_txd(tnapi, entry, mapping, len,
5371 base_flags, (i == last)|(mss << 1)); 5428 base_flags, (i == last)|(mss << 1));
5372 else 5429 else
5373 tg3_set_txd(tp, entry, mapping, len, 5430 tg3_set_txd(tnapi, entry, mapping, len,
5374 base_flags, (i == last)); 5431 base_flags, (i == last));
5375 5432
5376 entry = NEXT_TX(entry); 5433 entry = NEXT_TX(entry);
@@ -5395,12 +5452,12 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
5395 } 5452 }
5396 5453
5397 /* Packets are ready, update Tx producer idx local and on card. */ 5454 /* Packets are ready, update Tx producer idx local and on card. */
5398 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); 5455 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, entry);
5399 5456
5400 tp->tx_prod = entry; 5457 tnapi->tx_prod = entry;
5401 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) { 5458 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5402 netif_stop_queue(dev); 5459 netif_stop_queue(dev);
5403 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)) 5460 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5404 netif_wake_queue(tp->dev); 5461 netif_wake_queue(tp->dev);
5405 } 5462 }
5406 5463
@@ -5468,63 +5525,40 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5468 return err; 5525 return err;
5469} 5526}
5470 5527
5471/* Free up pending packets in all rx/tx rings. 5528static void tg3_rx_prodring_free(struct tg3 *tp,
5472 * 5529 struct tg3_rx_prodring_set *tpr)
5473 * The chip has been shut down and the driver detached from
5474 * the networking, so no interrupts or new tx packets will
5475 * end up in the driver. tp->{tx,}lock is not held and we are not
5476 * in an interrupt context and thus may sleep.
5477 */
5478static void tg3_free_rings(struct tg3 *tp)
5479{ 5530{
5480 struct ring_info *rxp;
5481 int i; 5531 int i;
5532 struct ring_info *rxp;
5482 5533
5483 for (i = 0; i < TG3_RX_RING_SIZE; i++) { 5534 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5484 rxp = &tp->rx_std_buffers[i]; 5535 rxp = &tpr->rx_std_buffers[i];
5485 5536
5486 if (rxp->skb == NULL) 5537 if (rxp->skb == NULL)
5487 continue; 5538 continue;
5488 pci_unmap_single(tp->pdev,
5489 pci_unmap_addr(rxp, mapping),
5490 tp->rx_pkt_buf_sz - tp->rx_offset,
5491 PCI_DMA_FROMDEVICE);
5492 dev_kfree_skb_any(rxp->skb);
5493 rxp->skb = NULL;
5494 }
5495 5539
5496 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5497 rxp = &tp->rx_jumbo_buffers[i];
5498
5499 if (rxp->skb == NULL)
5500 continue;
5501 pci_unmap_single(tp->pdev, 5540 pci_unmap_single(tp->pdev,
5502 pci_unmap_addr(rxp, mapping), 5541 pci_unmap_addr(rxp, mapping),
5503 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset, 5542 tp->rx_pkt_map_sz,
5504 PCI_DMA_FROMDEVICE); 5543 PCI_DMA_FROMDEVICE);
5505 dev_kfree_skb_any(rxp->skb); 5544 dev_kfree_skb_any(rxp->skb);
5506 rxp->skb = NULL; 5545 rxp->skb = NULL;
5507 } 5546 }
5508 5547
5509 for (i = 0; i < TG3_TX_RING_SIZE; ) { 5548 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
5510 struct tx_ring_info *txp; 5549 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5511 struct sk_buff *skb; 5550 rxp = &tpr->rx_jmb_buffers[i];
5512 5551
5513 txp = &tp->tx_buffers[i]; 5552 if (rxp->skb == NULL)
5514 skb = txp->skb; 5553 continue;
5515 5554
5516 if (skb == NULL) { 5555 pci_unmap_single(tp->pdev,
5517 i++; 5556 pci_unmap_addr(rxp, mapping),
5518 continue; 5557 TG3_RX_JMB_MAP_SZ,
5558 PCI_DMA_FROMDEVICE);
5559 dev_kfree_skb_any(rxp->skb);
5560 rxp->skb = NULL;
5519 } 5561 }
5520
5521 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5522
5523 txp->skb = NULL;
5524
5525 i += skb_shinfo(skb)->nr_frags + 1;
5526
5527 dev_kfree_skb_any(skb);
5528 } 5562 }
5529} 5563}
5530 5564
@@ -5535,23 +5569,20 @@ static void tg3_free_rings(struct tg3 *tp)
5535 * end up in the driver. tp->{tx,}lock are held and thus 5569 * end up in the driver. tp->{tx,}lock are held and thus
5536 * we may not sleep. 5570 * we may not sleep.
5537 */ 5571 */
5538static int tg3_init_rings(struct tg3 *tp) 5572static int tg3_rx_prodring_alloc(struct tg3 *tp,
5573 struct tg3_rx_prodring_set *tpr)
5539{ 5574{
5540 u32 i; 5575 u32 i, rx_pkt_dma_sz;
5541 5576 struct tg3_napi *tnapi = &tp->napi[0];
5542 /* Free up all the SKBs. */
5543 tg3_free_rings(tp);
5544 5577
5545 /* Zero out all descriptors. */ 5578 /* Zero out all descriptors. */
5546 memset(tp->rx_std, 0, TG3_RX_RING_BYTES); 5579 memset(tpr->rx_std, 0, TG3_RX_RING_BYTES);
5547 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
5548 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
5549 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
5550 5580
5551 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ; 5581 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
5552 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) && 5582 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
5553 (tp->dev->mtu > ETH_DATA_LEN)) 5583 tp->dev->mtu > ETH_DATA_LEN)
5554 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ; 5584 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
5585 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
5555 5586
5556 /* Initialize invariants of the rings, we only set this 5587 /* Initialize invariants of the rings, we only set this
5557 * stuff once. This works because the card does not 5588 * stuff once. This works because the card does not
@@ -5560,62 +5591,179 @@ static int tg3_init_rings(struct tg3 *tp)
5560 for (i = 0; i < TG3_RX_RING_SIZE; i++) { 5591 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5561 struct tg3_rx_buffer_desc *rxd; 5592 struct tg3_rx_buffer_desc *rxd;
5562 5593
5563 rxd = &tp->rx_std[i]; 5594 rxd = &tpr->rx_std[i];
5564 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64) 5595 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
5565 << RXD_LEN_SHIFT;
5566 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT); 5596 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
5567 rxd->opaque = (RXD_OPAQUE_RING_STD | 5597 rxd->opaque = (RXD_OPAQUE_RING_STD |
5568 (i << RXD_OPAQUE_INDEX_SHIFT)); 5598 (i << RXD_OPAQUE_INDEX_SHIFT));
5569 } 5599 }
5570 5600
5571 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5572 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5573 struct tg3_rx_buffer_desc *rxd;
5574
5575 rxd = &tp->rx_jumbo[i];
5576 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
5577 << RXD_LEN_SHIFT;
5578 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
5579 RXD_FLAG_JUMBO;
5580 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
5581 (i << RXD_OPAQUE_INDEX_SHIFT));
5582 }
5583 }
5584
5585 /* Now allocate fresh SKBs for each rx ring. */ 5601 /* Now allocate fresh SKBs for each rx ring. */
5586 for (i = 0; i < tp->rx_pending; i++) { 5602 for (i = 0; i < tp->rx_pending; i++) {
5587 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) { 5603 if (tg3_alloc_rx_skb(tnapi, RXD_OPAQUE_RING_STD, -1, i) < 0) {
5588 printk(KERN_WARNING PFX 5604 printk(KERN_WARNING PFX
5589 "%s: Using a smaller RX standard ring, " 5605 "%s: Using a smaller RX standard ring, "
5590 "only %d out of %d buffers were allocated " 5606 "only %d out of %d buffers were allocated "
5591 "successfully.\n", 5607 "successfully.\n",
5592 tp->dev->name, i, tp->rx_pending); 5608 tp->dev->name, i, tp->rx_pending);
5593 if (i == 0) 5609 if (i == 0)
5594 return -ENOMEM; 5610 goto initfail;
5595 tp->rx_pending = i; 5611 tp->rx_pending = i;
5596 break; 5612 break;
5597 } 5613 }
5598 } 5614 }
5599 5615
5616 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE))
5617 goto done;
5618
5619 memset(tpr->rx_jmb, 0, TG3_RX_JUMBO_RING_BYTES);
5620
5600 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) { 5621 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5622 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5623 struct tg3_rx_buffer_desc *rxd;
5624
5625 rxd = &tpr->rx_jmb[i].std;
5626 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
5627 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
5628 RXD_FLAG_JUMBO;
5629 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
5630 (i << RXD_OPAQUE_INDEX_SHIFT));
5631 }
5632
5601 for (i = 0; i < tp->rx_jumbo_pending; i++) { 5633 for (i = 0; i < tp->rx_jumbo_pending; i++) {
5602 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO, 5634 if (tg3_alloc_rx_skb(tnapi, RXD_OPAQUE_RING_JUMBO,
5603 -1, i) < 0) { 5635 -1, i) < 0) {
5604 printk(KERN_WARNING PFX 5636 printk(KERN_WARNING PFX
5605 "%s: Using a smaller RX jumbo ring, " 5637 "%s: Using a smaller RX jumbo ring, "
5606 "only %d out of %d buffers were " 5638 "only %d out of %d buffers were "
5607 "allocated successfully.\n", 5639 "allocated successfully.\n",
5608 tp->dev->name, i, tp->rx_jumbo_pending); 5640 tp->dev->name, i, tp->rx_jumbo_pending);
5609 if (i == 0) { 5641 if (i == 0)
5610 tg3_free_rings(tp); 5642 goto initfail;
5611 return -ENOMEM;
5612 }
5613 tp->rx_jumbo_pending = i; 5643 tp->rx_jumbo_pending = i;
5614 break; 5644 break;
5615 } 5645 }
5616 } 5646 }
5617 } 5647 }
5648
5649done:
5650 return 0;
5651
5652initfail:
5653 tg3_rx_prodring_free(tp, tpr);
5654 return -ENOMEM;
5655}
5656
5657static void tg3_rx_prodring_fini(struct tg3 *tp,
5658 struct tg3_rx_prodring_set *tpr)
5659{
5660 kfree(tpr->rx_std_buffers);
5661 tpr->rx_std_buffers = NULL;
5662 kfree(tpr->rx_jmb_buffers);
5663 tpr->rx_jmb_buffers = NULL;
5664 if (tpr->rx_std) {
5665 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
5666 tpr->rx_std, tpr->rx_std_mapping);
5667 tpr->rx_std = NULL;
5668 }
5669 if (tpr->rx_jmb) {
5670 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5671 tpr->rx_jmb, tpr->rx_jmb_mapping);
5672 tpr->rx_jmb = NULL;
5673 }
5674}
5675
5676static int tg3_rx_prodring_init(struct tg3 *tp,
5677 struct tg3_rx_prodring_set *tpr)
5678{
5679 tpr->rx_std_buffers = kzalloc(sizeof(struct ring_info) *
5680 TG3_RX_RING_SIZE, GFP_KERNEL);
5681 if (!tpr->rx_std_buffers)
5682 return -ENOMEM;
5683
5684 tpr->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
5685 &tpr->rx_std_mapping);
5686 if (!tpr->rx_std)
5687 goto err_out;
5688
5689 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
5690 tpr->rx_jmb_buffers = kzalloc(sizeof(struct ring_info) *
5691 TG3_RX_JUMBO_RING_SIZE,
5692 GFP_KERNEL);
5693 if (!tpr->rx_jmb_buffers)
5694 goto err_out;
5695
5696 tpr->rx_jmb = pci_alloc_consistent(tp->pdev,
5697 TG3_RX_JUMBO_RING_BYTES,
5698 &tpr->rx_jmb_mapping);
5699 if (!tpr->rx_jmb)
5700 goto err_out;
5701 }
5702
5618 return 0; 5703 return 0;
5704
5705err_out:
5706 tg3_rx_prodring_fini(tp, tpr);
5707 return -ENOMEM;
5708}
5709
5710/* Free up pending packets in all rx/tx rings.
5711 *
5712 * The chip has been shut down and the driver detached from
5713 * the networking, so no interrupts or new tx packets will
5714 * end up in the driver. tp->{tx,}lock is not held and we are not
5715 * in an interrupt context and thus may sleep.
5716 */
5717static void tg3_free_rings(struct tg3 *tp)
5718{
5719 struct tg3_napi *tnapi = &tp->napi[0];
5720 int i;
5721
5722 for (i = 0; i < TG3_TX_RING_SIZE; ) {
5723 struct tx_ring_info *txp;
5724 struct sk_buff *skb;
5725
5726 txp = &tnapi->tx_buffers[i];
5727 skb = txp->skb;
5728
5729 if (skb == NULL) {
5730 i++;
5731 continue;
5732 }
5733
5734 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5735
5736 txp->skb = NULL;
5737
5738 i += skb_shinfo(skb)->nr_frags + 1;
5739
5740 dev_kfree_skb_any(skb);
5741 }
5742
5743 tg3_rx_prodring_free(tp, &tp->prodring[0]);
5744}
5745
5746/* Initialize tx/rx rings for packet processing.
5747 *
5748 * The chip has been shut down and the driver detached from
5749 * the networking, so no interrupts or new tx packets will
5750 * end up in the driver. tp->{tx,}lock are held and thus
5751 * we may not sleep.
5752 */
5753static int tg3_init_rings(struct tg3 *tp)
5754{
5755 struct tg3_napi *tnapi = &tp->napi[0];
5756
5757 /* Free up all the SKBs. */
5758 tg3_free_rings(tp);
5759
5760 /* Zero out all descriptors. */
5761 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
5762
5763 tnapi->rx_rcb_ptr = 0;
5764 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
5765
5766 return tg3_rx_prodring_alloc(tp, &tp->prodring[0]);
5619} 5767}
5620 5768
5621/* 5769/*
@@ -5624,38 +5772,32 @@ static int tg3_init_rings(struct tg3 *tp)
5624 */ 5772 */
5625static void tg3_free_consistent(struct tg3 *tp) 5773static void tg3_free_consistent(struct tg3 *tp)
5626{ 5774{
5627 kfree(tp->rx_std_buffers); 5775 struct tg3_napi *tnapi = &tp->napi[0];
5628 tp->rx_std_buffers = NULL; 5776
5629 if (tp->rx_std) { 5777 kfree(tnapi->tx_buffers);
5630 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES, 5778 tnapi->tx_buffers = NULL;
5631 tp->rx_std, tp->rx_std_mapping); 5779 if (tnapi->tx_ring) {
5632 tp->rx_std = NULL; 5780 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
5633 } 5781 tnapi->tx_ring, tnapi->tx_desc_mapping);
5634 if (tp->rx_jumbo) { 5782 tnapi->tx_ring = NULL;
5635 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5636 tp->rx_jumbo, tp->rx_jumbo_mapping);
5637 tp->rx_jumbo = NULL;
5638 } 5783 }
5639 if (tp->rx_rcb) { 5784 if (tnapi->rx_rcb) {
5640 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp), 5785 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5641 tp->rx_rcb, tp->rx_rcb_mapping); 5786 tnapi->rx_rcb, tnapi->rx_rcb_mapping);
5642 tp->rx_rcb = NULL; 5787 tnapi->rx_rcb = NULL;
5643 }
5644 if (tp->tx_ring) {
5645 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
5646 tp->tx_ring, tp->tx_desc_mapping);
5647 tp->tx_ring = NULL;
5648 } 5788 }
5649 if (tp->hw_status) { 5789 if (tnapi->hw_status) {
5650 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE, 5790 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
5651 tp->hw_status, tp->status_mapping); 5791 tnapi->hw_status,
5652 tp->hw_status = NULL; 5792 tnapi->status_mapping);
5793 tnapi->hw_status = NULL;
5653 } 5794 }
5654 if (tp->hw_stats) { 5795 if (tp->hw_stats) {
5655 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats), 5796 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
5656 tp->hw_stats, tp->stats_mapping); 5797 tp->hw_stats, tp->stats_mapping);
5657 tp->hw_stats = NULL; 5798 tp->hw_stats = NULL;
5658 } 5799 }
5800 tg3_rx_prodring_fini(tp, &tp->prodring[0]);
5659} 5801}
5660 5802
5661/* 5803/*
@@ -5664,53 +5806,43 @@ static void tg3_free_consistent(struct tg3 *tp)
5664 */ 5806 */
5665static int tg3_alloc_consistent(struct tg3 *tp) 5807static int tg3_alloc_consistent(struct tg3 *tp)
5666{ 5808{
5667 tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) * 5809 struct tg3_napi *tnapi = &tp->napi[0];
5668 (TG3_RX_RING_SIZE +
5669 TG3_RX_JUMBO_RING_SIZE)) +
5670 (sizeof(struct tx_ring_info) *
5671 TG3_TX_RING_SIZE),
5672 GFP_KERNEL);
5673 if (!tp->rx_std_buffers)
5674 return -ENOMEM;
5675 5810
5676 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE]; 5811 if (tg3_rx_prodring_init(tp, &tp->prodring[0]))
5677 tp->tx_buffers = (struct tx_ring_info *) 5812 return -ENOMEM;
5678 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
5679 5813
5680 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES, 5814 tnapi->tx_buffers = kzalloc(sizeof(struct tx_ring_info) *
5681 &tp->rx_std_mapping); 5815 TG3_TX_RING_SIZE, GFP_KERNEL);
5682 if (!tp->rx_std) 5816 if (!tnapi->tx_buffers)
5683 goto err_out; 5817 goto err_out;
5684 5818
5685 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES, 5819 tnapi->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
5686 &tp->rx_jumbo_mapping); 5820 &tnapi->tx_desc_mapping);
5687 5821 if (!tnapi->tx_ring)
5688 if (!tp->rx_jumbo)
5689 goto err_out; 5822 goto err_out;
5690 5823
5691 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp), 5824 tnapi->hw_status = pci_alloc_consistent(tp->pdev,
5692 &tp->rx_rcb_mapping); 5825 TG3_HW_STATUS_SIZE,
5693 if (!tp->rx_rcb) 5826 &tnapi->status_mapping);
5827 if (!tnapi->hw_status)
5694 goto err_out; 5828 goto err_out;
5695 5829
5696 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES, 5830 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
5697 &tp->tx_desc_mapping);
5698 if (!tp->tx_ring)
5699 goto err_out;
5700 5831
5701 tp->hw_status = pci_alloc_consistent(tp->pdev, 5832 tnapi->rx_rcb = pci_alloc_consistent(tp->pdev,
5702 TG3_HW_STATUS_SIZE, 5833 TG3_RX_RCB_RING_BYTES(tp),
5703 &tp->status_mapping); 5834 &tnapi->rx_rcb_mapping);
5704 if (!tp->hw_status) 5835 if (!tnapi->rx_rcb)
5705 goto err_out; 5836 goto err_out;
5706 5837
5838 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
5839
5707 tp->hw_stats = pci_alloc_consistent(tp->pdev, 5840 tp->hw_stats = pci_alloc_consistent(tp->pdev,
5708 sizeof(struct tg3_hw_stats), 5841 sizeof(struct tg3_hw_stats),
5709 &tp->stats_mapping); 5842 &tp->stats_mapping);
5710 if (!tp->hw_stats) 5843 if (!tp->hw_stats)
5711 goto err_out; 5844 goto err_out;
5712 5845
5713 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5714 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); 5846 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5715 5847
5716 return 0; 5848 return 0;
@@ -5772,6 +5904,7 @@ static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int
5772static int tg3_abort_hw(struct tg3 *tp, int silent) 5904static int tg3_abort_hw(struct tg3 *tp, int silent)
5773{ 5905{
5774 int i, err; 5906 int i, err;
5907 struct tg3_napi *tnapi = &tp->napi[0];
5775 5908
5776 tg3_disable_ints(tp); 5909 tg3_disable_ints(tp);
5777 5910
@@ -5823,8 +5956,8 @@ static int tg3_abort_hw(struct tg3 *tp, int silent)
5823 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent); 5956 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
5824 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent); 5957 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
5825 5958
5826 if (tp->hw_status) 5959 if (tnapi->hw_status)
5827 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE); 5960 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
5828 if (tp->hw_stats) 5961 if (tp->hw_stats)
5829 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); 5962 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5830 5963
@@ -6151,15 +6284,20 @@ static int tg3_chip_reset(struct tg3 *tp)
6151 * sharing or irqpoll. 6284 * sharing or irqpoll.
6152 */ 6285 */
6153 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING; 6286 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
6154 if (tp->hw_status) { 6287 if (tp->napi[0].hw_status) {
6155 tp->hw_status->status = 0; 6288 tp->napi[0].hw_status->status = 0;
6156 tp->hw_status->status_tag = 0; 6289 tp->napi[0].hw_status->status_tag = 0;
6157 } 6290 }
6158 tp->last_tag = 0; 6291 tp->napi[0].last_tag = 0;
6159 tp->last_irq_tag = 0; 6292 tp->napi[0].last_irq_tag = 0;
6160 smp_mb(); 6293 smp_mb();
6161 synchronize_irq(tp->pdev->irq); 6294 synchronize_irq(tp->pdev->irq);
6162 6295
6296 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
6297 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
6298 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
6299 }
6300
6163 /* do the reset */ 6301 /* do the reset */
6164 val = GRC_MISC_CFG_CORECLK_RESET; 6302 val = GRC_MISC_CFG_CORECLK_RESET;
6165 6303
@@ -6212,6 +6350,8 @@ static int tg3_chip_reset(struct tg3 *tp)
6212 udelay(120); 6350 udelay(120);
6213 6351
6214 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pcie_cap) { 6352 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pcie_cap) {
6353 u16 val16;
6354
6215 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) { 6355 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
6216 int i; 6356 int i;
6217 u32 cfg_val; 6357 u32 cfg_val;
@@ -6225,12 +6365,22 @@ static int tg3_chip_reset(struct tg3 *tp)
6225 cfg_val | (1 << 15)); 6365 cfg_val | (1 << 15));
6226 } 6366 }
6227 6367
6228 /* Set PCIE max payload size to 128 bytes and 6368 /* Clear the "no snoop" and "relaxed ordering" bits. */
6229 * clear the "no snoop" and "relaxed ordering" bits. 6369 pci_read_config_word(tp->pdev,
6370 tp->pcie_cap + PCI_EXP_DEVCTL,
6371 &val16);
6372 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
6373 PCI_EXP_DEVCTL_NOSNOOP_EN);
6374 /*
6375 * Older PCIe devices only support the 128 byte
6376 * MPS setting. Enforce the restriction.
6230 */ 6377 */
6378 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
6379 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784))
6380 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
6231 pci_write_config_word(tp->pdev, 6381 pci_write_config_word(tp->pdev,
6232 tp->pcie_cap + PCI_EXP_DEVCTL, 6382 tp->pcie_cap + PCI_EXP_DEVCTL,
6233 0); 6383 val16);
6234 6384
6235 pcie_set_readrq(tp->pdev, 4096); 6385 pcie_set_readrq(tp->pdev, 4096);
6236 6386
@@ -6288,14 +6438,14 @@ static int tg3_chip_reset(struct tg3 *tp)
6288 tw32_f(MAC_MODE, 0); 6438 tw32_f(MAC_MODE, 0);
6289 udelay(40); 6439 udelay(40);
6290 6440
6291 tg3_mdio_start(tp);
6292
6293 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC); 6441 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
6294 6442
6295 err = tg3_poll_fw(tp); 6443 err = tg3_poll_fw(tp);
6296 if (err) 6444 if (err)
6297 return err; 6445 return err;
6298 6446
6447 tg3_mdio_start(tp);
6448
6299 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && 6449 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
6300 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) { 6450 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
6301 val = tr32(0x7c00); 6451 val = tr32(0x7c00);
@@ -6672,6 +6822,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6672{ 6822{
6673 u32 val, rdmac_mode; 6823 u32 val, rdmac_mode;
6674 int i, err, limit; 6824 int i, err, limit;
6825 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
6675 6826
6676 tg3_disable_ints(tp); 6827 tg3_disable_ints(tp);
6677 6828
@@ -6719,6 +6870,20 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6719 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN | 6870 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
6720 PCIE_PWR_MGMT_L1_THRESH_4MS; 6871 PCIE_PWR_MGMT_L1_THRESH_4MS;
6721 tw32(PCIE_PWR_MGMT_THRESH, val); 6872 tw32(PCIE_PWR_MGMT_THRESH, val);
6873
6874 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
6875 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
6876
6877 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
6878 }
6879
6880 if (tp->tg3_flags3 & TG3_FLG3_TOGGLE_10_100_L1PLLPD) {
6881 val = tr32(TG3_PCIE_LNKCTL);
6882 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG)
6883 val |= TG3_PCIE_LNKCTL_L1_PLL_PD_DIS;
6884 else
6885 val &= ~TG3_PCIE_LNKCTL_L1_PLL_PD_DIS;
6886 tw32(TG3_PCIE_LNKCTL, val);
6722 } 6887 }
6723 6888
6724 /* This works around an issue with Athlon chipsets on 6889 /* This works around an issue with Athlon chipsets on
@@ -6886,35 +7051,33 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6886 * configurable. 7051 * configurable.
6887 */ 7052 */
6888 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, 7053 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6889 ((u64) tp->rx_std_mapping >> 32)); 7054 ((u64) tpr->rx_std_mapping >> 32));
6890 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, 7055 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6891 ((u64) tp->rx_std_mapping & 0xffffffff)); 7056 ((u64) tpr->rx_std_mapping & 0xffffffff));
6892 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, 7057 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6893 NIC_SRAM_RX_BUFFER_DESC); 7058 NIC_SRAM_RX_BUFFER_DESC);
6894 7059
6895 /* Don't even try to program the JUMBO/MINI buffer descriptor 7060 /* Disable the mini ring */
6896 * configs on 5705. 7061 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6897 */
6898 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6899 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6900 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6901 } else {
6902 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6903 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6904
6905 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS, 7062 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6906 BDINFO_FLAGS_DISABLED); 7063 BDINFO_FLAGS_DISABLED);
6907 7064
7065 /* Program the jumbo buffer descriptor ring control
7066 * blocks on those devices that have them.
7067 */
7068 if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
7069 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
6908 /* Setup replenish threshold. */ 7070 /* Setup replenish threshold. */
6909 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8); 7071 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6910 7072
6911 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) { 7073 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6912 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, 7074 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6913 ((u64) tp->rx_jumbo_mapping >> 32)); 7075 ((u64) tpr->rx_jmb_mapping >> 32));
6914 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, 7076 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6915 ((u64) tp->rx_jumbo_mapping & 0xffffffff)); 7077 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
6916 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, 7078 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6917 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT); 7079 (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) |
7080 BDINFO_FLAGS_USE_EXT_RECV);
6918 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, 7081 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6919 NIC_SRAM_RX_JUMBO_BUFFER_DESC); 7082 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6920 } else { 7083 } else {
@@ -6922,7 +7085,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6922 BDINFO_FLAGS_DISABLED); 7085 BDINFO_FLAGS_DISABLED);
6923 } 7086 }
6924 7087
6925 } 7088 val = RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT;
7089 } else
7090 val = RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT;
7091
7092 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
6926 7093
6927 /* There is only one send ring on 5705/5750, no need to explicitly 7094 /* There is only one send ring on 5705/5750, no need to explicitly
6928 * disable the others. 7095 * disable the others.
@@ -6934,13 +7101,15 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6934 BDINFO_FLAGS_DISABLED); 7101 BDINFO_FLAGS_DISABLED);
6935 } 7102 }
6936 7103
6937 tp->tx_prod = 0; 7104 tp->napi[0].tx_prod = 0;
6938 tp->tx_cons = 0; 7105 tp->napi[0].tx_cons = 0;
6939 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6940 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0); 7106 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6941 7107
7108 val = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7109 tw32_mailbox(val, 0);
7110
6942 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB, 7111 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6943 tp->tx_desc_mapping, 7112 tp->napi[0].tx_desc_mapping,
6944 (TG3_TX_RING_SIZE << 7113 (TG3_TX_RING_SIZE <<
6945 BDINFO_FLAGS_MAXLEN_SHIFT), 7114 BDINFO_FLAGS_MAXLEN_SHIFT),
6946 NIC_SRAM_TX_BUFFER_DESC); 7115 NIC_SRAM_TX_BUFFER_DESC);
@@ -6956,23 +7125,22 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6956 } 7125 }
6957 } 7126 }
6958 7127
6959 tp->rx_rcb_ptr = 0; 7128 tw32_rx_mbox(tp->napi[0].consmbox, 0);
6960 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6961 7129
6962 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB, 7130 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6963 tp->rx_rcb_mapping, 7131 tp->napi[0].rx_rcb_mapping,
6964 (TG3_RX_RCB_RING_SIZE(tp) << 7132 (TG3_RX_RCB_RING_SIZE(tp) <<
6965 BDINFO_FLAGS_MAXLEN_SHIFT), 7133 BDINFO_FLAGS_MAXLEN_SHIFT),
6966 0); 7134 0);
6967 7135
6968 tp->rx_std_ptr = tp->rx_pending; 7136 tpr->rx_std_ptr = tp->rx_pending;
6969 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW, 7137 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6970 tp->rx_std_ptr); 7138 tpr->rx_std_ptr);
6971 7139
6972 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ? 7140 tpr->rx_jmb_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6973 tp->rx_jumbo_pending : 0; 7141 tp->rx_jumbo_pending : 0;
6974 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW, 7142 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6975 tp->rx_jumbo_ptr); 7143 tpr->rx_jmb_ptr);
6976 7144
6977 /* Initialize MAC address and backoff seed. */ 7145 /* Initialize MAC address and backoff seed. */
6978 __tg3_set_mac_addr(tp, 0); 7146 __tg3_set_mac_addr(tp, 0);
@@ -7063,9 +7231,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7063 7231
7064 /* set status block DMA address */ 7232 /* set status block DMA address */
7065 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 7233 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7066 ((u64) tp->status_mapping >> 32)); 7234 ((u64) tp->napi[0].status_mapping >> 32));
7067 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, 7235 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7068 ((u64) tp->status_mapping & 0xffffffff)); 7236 ((u64) tp->napi[0].status_mapping & 0xffffffff));
7069 7237
7070 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 7238 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7071 /* Status/statistics block address. See tg3_timer, 7239 /* Status/statistics block address. See tg3_timer,
@@ -7094,7 +7262,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7094 tg3_write_mem(tp, i, 0); 7262 tg3_write_mem(tp, i, 0);
7095 udelay(40); 7263 udelay(40);
7096 } 7264 }
7097 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE); 7265 memset(tp->napi[0].hw_status, 0, TG3_HW_STATUS_SIZE);
7098 7266
7099 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) { 7267 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7100 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; 7268 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
@@ -7147,7 +7315,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7147 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 7315 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7148 udelay(100); 7316 udelay(100);
7149 7317
7150 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0); 7318 tw32_mailbox_f(tp->napi[0].int_mbox, 0);
7151 7319
7152 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 7320 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7153 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE); 7321 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
@@ -7164,7 +7332,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7164 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && 7332 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7165 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) || 7333 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7166 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) { 7334 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7167 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) && 7335 if ((tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
7168 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 || 7336 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7169 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) { 7337 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7170 /* nothing */ 7338 /* nothing */
@@ -7302,7 +7470,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7302 return err; 7470 return err;
7303 7471
7304 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) && 7472 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7305 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) { 7473 !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)) {
7306 u32 tmp; 7474 u32 tmp;
7307 7475
7308 /* Clear CRC stats. */ 7476 /* Clear CRC stats. */
@@ -7449,7 +7617,7 @@ static void tg3_timer(unsigned long __opaque)
7449 * IRQ status the mailbox/status_block protocol the chip 7617 * IRQ status the mailbox/status_block protocol the chip
7450 * uses with the cpu is race prone. 7618 * uses with the cpu is race prone.
7451 */ 7619 */
7452 if (tp->hw_status->status & SD_STATUS_UPDATED) { 7620 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
7453 tw32(GRC_LOCAL_CTRL, 7621 tw32(GRC_LOCAL_CTRL,
7454 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); 7622 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7455 } else { 7623 } else {
@@ -7559,7 +7727,7 @@ static int tg3_request_irq(struct tg3 *tp)
7559{ 7727{
7560 irq_handler_t fn; 7728 irq_handler_t fn;
7561 unsigned long flags; 7729 unsigned long flags;
7562 struct net_device *dev = tp->dev; 7730 char *name = tp->dev->name;
7563 7731
7564 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { 7732 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7565 fn = tg3_msi; 7733 fn = tg3_msi;
@@ -7572,11 +7740,12 @@ static int tg3_request_irq(struct tg3 *tp)
7572 fn = tg3_interrupt_tagged; 7740 fn = tg3_interrupt_tagged;
7573 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM; 7741 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
7574 } 7742 }
7575 return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev)); 7743 return request_irq(tp->pdev->irq, fn, flags, name, &tp->napi[0]);
7576} 7744}
7577 7745
7578static int tg3_test_interrupt(struct tg3 *tp) 7746static int tg3_test_interrupt(struct tg3 *tp)
7579{ 7747{
7748 struct tg3_napi *tnapi = &tp->napi[0];
7580 struct net_device *dev = tp->dev; 7749 struct net_device *dev = tp->dev;
7581 int err, i, intr_ok = 0; 7750 int err, i, intr_ok = 0;
7582 7751
@@ -7585,14 +7754,14 @@ static int tg3_test_interrupt(struct tg3 *tp)
7585 7754
7586 tg3_disable_ints(tp); 7755 tg3_disable_ints(tp);
7587 7756
7588 free_irq(tp->pdev->irq, dev); 7757 free_irq(tp->pdev->irq, tnapi);
7589 7758
7590 err = request_irq(tp->pdev->irq, tg3_test_isr, 7759 err = request_irq(tp->pdev->irq, tg3_test_isr,
7591 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev); 7760 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
7592 if (err) 7761 if (err)
7593 return err; 7762 return err;
7594 7763
7595 tp->hw_status->status &= ~SD_STATUS_UPDATED; 7764 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
7596 tg3_enable_ints(tp); 7765 tg3_enable_ints(tp);
7597 7766
7598 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 7767 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
@@ -7601,8 +7770,7 @@ static int tg3_test_interrupt(struct tg3 *tp)
7601 for (i = 0; i < 5; i++) { 7770 for (i = 0; i < 5; i++) {
7602 u32 int_mbox, misc_host_ctrl; 7771 u32 int_mbox, misc_host_ctrl;
7603 7772
7604 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 + 7773 int_mbox = tr32_mailbox(tnapi->int_mbox);
7605 TG3_64BIT_REG_LOW);
7606 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); 7774 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7607 7775
7608 if ((int_mbox != 0) || 7776 if ((int_mbox != 0) ||
@@ -7616,7 +7784,7 @@ static int tg3_test_interrupt(struct tg3 *tp)
7616 7784
7617 tg3_disable_ints(tp); 7785 tg3_disable_ints(tp);
7618 7786
7619 free_irq(tp->pdev->irq, dev); 7787 free_irq(tp->pdev->irq, tnapi);
7620 7788
7621 err = tg3_request_irq(tp); 7789 err = tg3_request_irq(tp);
7622 7790
@@ -7634,7 +7802,6 @@ static int tg3_test_interrupt(struct tg3 *tp)
7634 */ 7802 */
7635static int tg3_test_msi(struct tg3 *tp) 7803static int tg3_test_msi(struct tg3 *tp)
7636{ 7804{
7637 struct net_device *dev = tp->dev;
7638 int err; 7805 int err;
7639 u16 pci_cmd; 7806 u16 pci_cmd;
7640 7807
@@ -7665,7 +7832,8 @@ static int tg3_test_msi(struct tg3 *tp)
7665 "the PCI maintainer and include system chipset information.\n", 7832 "the PCI maintainer and include system chipset information.\n",
7666 tp->dev->name); 7833 tp->dev->name);
7667 7834
7668 free_irq(tp->pdev->irq, dev); 7835 free_irq(tp->pdev->irq, &tp->napi[0]);
7836
7669 pci_disable_msi(tp->pdev); 7837 pci_disable_msi(tp->pdev);
7670 7838
7671 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; 7839 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
@@ -7685,7 +7853,7 @@ static int tg3_test_msi(struct tg3 *tp)
7685 tg3_full_unlock(tp); 7853 tg3_full_unlock(tp);
7686 7854
7687 if (err) 7855 if (err)
7688 free_irq(tp->pdev->irq, dev); 7856 free_irq(tp->pdev->irq, &tp->napi[0]);
7689 7857
7690 return err; 7858 return err;
7691} 7859}
@@ -7721,6 +7889,33 @@ static int tg3_request_firmware(struct tg3 *tp)
7721 return 0; 7889 return 0;
7722} 7890}
7723 7891
7892static void tg3_ints_init(struct tg3 *tp)
7893{
7894 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
7895 /* All MSI supporting chips should support tagged
7896 * status. Assert that this is the case.
7897 */
7898 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7899 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7900 "Not using MSI.\n", tp->dev->name);
7901 } else if (pci_enable_msi(tp->pdev) == 0) {
7902 u32 msi_mode;
7903
7904 msi_mode = tr32(MSGINT_MODE);
7905 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7906 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7907 }
7908 }
7909}
7910
7911static void tg3_ints_fini(struct tg3 *tp)
7912{
7913 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7914 pci_disable_msi(tp->pdev);
7915 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7916 }
7917}
7918
7724static int tg3_open(struct net_device *dev) 7919static int tg3_open(struct net_device *dev)
7725{ 7920{
7726 struct tg3 *tp = netdev_priv(dev); 7921 struct tg3 *tp = netdev_priv(dev);
@@ -7762,33 +7957,14 @@ static int tg3_open(struct net_device *dev)
7762 if (err) 7957 if (err)
7763 return err; 7958 return err;
7764 7959
7765 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) { 7960 tg3_ints_init(tp);
7766 /* All MSI supporting chips should support tagged
7767 * status. Assert that this is the case.
7768 */
7769 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7770 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7771 "Not using MSI.\n", tp->dev->name);
7772 } else if (pci_enable_msi(tp->pdev) == 0) {
7773 u32 msi_mode;
7774 7961
7775 msi_mode = tr32(MSGINT_MODE); 7962 napi_enable(&tp->napi[0].napi);
7776 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7777 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7778 }
7779 }
7780 err = tg3_request_irq(tp);
7781 7963
7782 if (err) { 7964 err = tg3_request_irq(tp);
7783 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7784 pci_disable_msi(tp->pdev);
7785 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7786 }
7787 tg3_free_consistent(tp);
7788 return err;
7789 }
7790 7965
7791 napi_enable(&tp->napi); 7966 if (err)
7967 goto err_out1;
7792 7968
7793 tg3_full_lock(tp, 0); 7969 tg3_full_lock(tp, 0);
7794 7970
@@ -7816,36 +7992,19 @@ static int tg3_open(struct net_device *dev)
7816 7992
7817 tg3_full_unlock(tp); 7993 tg3_full_unlock(tp);
7818 7994
7819 if (err) { 7995 if (err)
7820 napi_disable(&tp->napi); 7996 goto err_out2;
7821 free_irq(tp->pdev->irq, dev);
7822 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7823 pci_disable_msi(tp->pdev);
7824 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7825 }
7826 tg3_free_consistent(tp);
7827 return err;
7828 }
7829 7997
7830 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { 7998 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7831 err = tg3_test_msi(tp); 7999 err = tg3_test_msi(tp);
7832 8000
7833 if (err) { 8001 if (err) {
7834 tg3_full_lock(tp, 0); 8002 tg3_full_lock(tp, 0);
7835
7836 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7837 pci_disable_msi(tp->pdev);
7838 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7839 }
7840 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 8003 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7841 tg3_free_rings(tp); 8004 tg3_free_rings(tp);
7842 tg3_free_consistent(tp);
7843
7844 tg3_full_unlock(tp); 8005 tg3_full_unlock(tp);
7845 8006
7846 napi_disable(&tp->napi); 8007 goto err_out1;
7847
7848 return err;
7849 } 8008 }
7850 8009
7851 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { 8010 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
@@ -7871,6 +8030,15 @@ static int tg3_open(struct net_device *dev)
7871 netif_start_queue(dev); 8030 netif_start_queue(dev);
7872 8031
7873 return 0; 8032 return 0;
8033
8034err_out2:
8035 free_irq(tp->pdev->irq, &tp->napi[0]);
8036
8037err_out1:
8038 napi_disable(&tp->napi[0].napi);
8039 tg3_ints_fini(tp);
8040 tg3_free_consistent(tp);
8041 return err;
7874} 8042}
7875 8043
7876#if 0 8044#if 0
@@ -7879,6 +8047,7 @@ static int tg3_open(struct net_device *dev)
7879 u32 val32, val32_2, val32_3, val32_4, val32_5; 8047 u32 val32, val32_2, val32_3, val32_4, val32_5;
7880 u16 val16; 8048 u16 val16;
7881 int i; 8049 int i;
8050 struct tg3_hw_status *sblk = tp->napi[0]->hw_status;
7882 8051
7883 pci_read_config_word(tp->pdev, PCI_STATUS, &val16); 8052 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7884 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32); 8053 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
@@ -8031,14 +8200,15 @@ static int tg3_open(struct net_device *dev)
8031 val32, val32_2, val32_3, val32_4, val32_5); 8200 val32, val32_2, val32_3, val32_4, val32_5);
8032 8201
8033 /* SW status block */ 8202 /* SW status block */
8034 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n", 8203 printk(KERN_DEBUG
8035 tp->hw_status->status, 8204 "Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
8036 tp->hw_status->status_tag, 8205 sblk->status,
8037 tp->hw_status->rx_jumbo_consumer, 8206 sblk->status_tag,
8038 tp->hw_status->rx_consumer, 8207 sblk->rx_jumbo_consumer,
8039 tp->hw_status->rx_mini_consumer, 8208 sblk->rx_consumer,
8040 tp->hw_status->idx[0].rx_producer, 8209 sblk->rx_mini_consumer,
8041 tp->hw_status->idx[0].tx_consumer); 8210 sblk->idx[0].rx_producer,
8211 sblk->idx[0].tx_consumer);
8042 8212
8043 /* SW statistics block */ 8213 /* SW statistics block */
8044 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n", 8214 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
@@ -8108,7 +8278,7 @@ static int tg3_close(struct net_device *dev)
8108{ 8278{
8109 struct tg3 *tp = netdev_priv(dev); 8279 struct tg3 *tp = netdev_priv(dev);
8110 8280
8111 napi_disable(&tp->napi); 8281 napi_disable(&tp->napi[0].napi);
8112 cancel_work_sync(&tp->reset_task); 8282 cancel_work_sync(&tp->reset_task);
8113 8283
8114 netif_stop_queue(dev); 8284 netif_stop_queue(dev);
@@ -8128,11 +8298,9 @@ static int tg3_close(struct net_device *dev)
8128 8298
8129 tg3_full_unlock(tp); 8299 tg3_full_unlock(tp);
8130 8300
8131 free_irq(tp->pdev->irq, dev); 8301 free_irq(tp->pdev->irq, &tp->napi[0]);
8132 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { 8302
8133 pci_disable_msi(tp->pdev); 8303 tg3_ints_fini(tp);
8134 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8135 }
8136 8304
8137 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev), 8305 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
8138 sizeof(tp->net_stats_prev)); 8306 sizeof(tp->net_stats_prev));
@@ -8934,7 +9102,7 @@ static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *
8934 else 9102 else
8935 ering->rx_jumbo_pending = 0; 9103 ering->rx_jumbo_pending = 0;
8936 9104
8937 ering->tx_pending = tp->tx_pending; 9105 ering->tx_pending = tp->napi[0].tx_pending;
8938} 9106}
8939 9107
8940static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) 9108static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
@@ -8964,7 +9132,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
8964 tp->rx_pending > 63) 9132 tp->rx_pending > 63)
8965 tp->rx_pending = 63; 9133 tp->rx_pending = 63;
8966 tp->rx_jumbo_pending = ering->rx_jumbo_pending; 9134 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8967 tp->tx_pending = ering->tx_pending; 9135 tp->napi[0].tx_pending = ering->tx_pending;
8968 9136
8969 if (netif_running(dev)) { 9137 if (netif_running(dev)) {
8970 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 9138 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
@@ -9678,6 +9846,11 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9678 dma_addr_t map; 9846 dma_addr_t map;
9679 int num_pkts, tx_len, rx_len, i, err; 9847 int num_pkts, tx_len, rx_len, i, err;
9680 struct tg3_rx_buffer_desc *desc; 9848 struct tg3_rx_buffer_desc *desc;
9849 struct tg3_napi *tnapi, *rnapi;
9850 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
9851
9852 tnapi = &tp->napi[0];
9853 rnapi = &tp->napi[0];
9681 9854
9682 if (loopback_mode == TG3_MAC_LOOPBACK) { 9855 if (loopback_mode == TG3_MAC_LOOPBACK) {
9683 /* HW errata - mac loopback fails in some cases on 5780. 9856 /* HW errata - mac loopback fails in some cases on 5780.
@@ -9699,18 +9872,8 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9699 } else if (loopback_mode == TG3_PHY_LOOPBACK) { 9872 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
9700 u32 val; 9873 u32 val;
9701 9874
9702 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { 9875 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
9703 u32 phytest; 9876 tg3_phy_fet_toggle_apd(tp, false);
9704
9705 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9706 u32 phy;
9707
9708 tg3_writephy(tp, MII_TG3_EPHY_TEST,
9709 phytest | MII_TG3_EPHY_SHADOW_EN);
9710 if (!tg3_readphy(tp, 0x1b, &phy))
9711 tg3_writephy(tp, 0x1b, phy & ~0x20);
9712 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
9713 }
9714 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100; 9877 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9715 } else 9878 } else
9716 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000; 9879 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
@@ -9721,8 +9884,9 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9721 udelay(40); 9884 udelay(40);
9722 9885
9723 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; 9886 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
9724 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { 9887 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
9725 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800); 9888 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9889 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x1800);
9726 mac_mode |= MAC_MODE_PORT_MODE_MII; 9890 mac_mode |= MAC_MODE_PORT_MODE_MII;
9727 } else 9891 } else
9728 mac_mode |= MAC_MODE_PORT_MODE_GMII; 9892 mac_mode |= MAC_MODE_PORT_MODE_GMII;
@@ -9769,18 +9933,17 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9769 9933
9770 udelay(10); 9934 udelay(10);
9771 9935
9772 rx_start_idx = tp->hw_status->idx[0].rx_producer; 9936 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
9773 9937
9774 num_pkts = 0; 9938 num_pkts = 0;
9775 9939
9776 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1); 9940 tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len, 0, 1);
9777 9941
9778 tp->tx_prod++; 9942 tnapi->tx_prod++;
9779 num_pkts++; 9943 num_pkts++;
9780 9944
9781 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 9945 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
9782 tp->tx_prod); 9946 tr32_mailbox(tnapi->prodmbox);
9783 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
9784 9947
9785 udelay(10); 9948 udelay(10);
9786 9949
@@ -9791,9 +9954,9 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9791 9954
9792 udelay(10); 9955 udelay(10);
9793 9956
9794 tx_idx = tp->hw_status->idx[0].tx_consumer; 9957 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
9795 rx_idx = tp->hw_status->idx[0].rx_producer; 9958 rx_idx = rnapi->hw_status->idx[0].rx_producer;
9796 if ((tx_idx == tp->tx_prod) && 9959 if ((tx_idx == tnapi->tx_prod) &&
9797 (rx_idx == (rx_start_idx + num_pkts))) 9960 (rx_idx == (rx_start_idx + num_pkts)))
9798 break; 9961 break;
9799 } 9962 }
@@ -9801,13 +9964,13 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9801 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE); 9964 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
9802 dev_kfree_skb(skb); 9965 dev_kfree_skb(skb);
9803 9966
9804 if (tx_idx != tp->tx_prod) 9967 if (tx_idx != tnapi->tx_prod)
9805 goto out; 9968 goto out;
9806 9969
9807 if (rx_idx != rx_start_idx + num_pkts) 9970 if (rx_idx != rx_start_idx + num_pkts)
9808 goto out; 9971 goto out;
9809 9972
9810 desc = &tp->rx_rcb[rx_start_idx]; 9973 desc = &rnapi->rx_rcb[rx_start_idx];
9811 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; 9974 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
9812 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; 9975 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
9813 if (opaque_key != RXD_OPAQUE_RING_STD) 9976 if (opaque_key != RXD_OPAQUE_RING_STD)
@@ -9821,9 +9984,9 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9821 if (rx_len != tx_len) 9984 if (rx_len != tx_len)
9822 goto out; 9985 goto out;
9823 9986
9824 rx_skb = tp->rx_std_buffers[desc_idx].skb; 9987 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
9825 9988
9826 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping); 9989 map = pci_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping);
9827 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE); 9990 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
9828 9991
9829 for (i = 14; i < tx_len; i++) { 9992 for (i = 14; i < tx_len; i++) {
@@ -10236,8 +10399,7 @@ static void __devinit tg3_get_nvram_info(struct tg3 *tp)
10236 nvcfg1 = tr32(NVRAM_CFG1); 10399 nvcfg1 = tr32(NVRAM_CFG1);
10237 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) { 10400 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
10238 tp->tg3_flags2 |= TG3_FLG2_FLASH; 10401 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10239 } 10402 } else {
10240 else {
10241 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 10403 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10242 tw32(NVRAM_CFG1, nvcfg1); 10404 tw32(NVRAM_CFG1, nvcfg1);
10243 } 10405 }
@@ -10245,37 +10407,36 @@ static void __devinit tg3_get_nvram_info(struct tg3 *tp)
10245 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) || 10407 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
10246 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { 10408 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10247 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) { 10409 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
10248 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED: 10410 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
10249 tp->nvram_jedecnum = JEDEC_ATMEL; 10411 tp->nvram_jedecnum = JEDEC_ATMEL;
10250 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; 10412 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10251 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 10413 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10252 break; 10414 break;
10253 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED: 10415 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
10254 tp->nvram_jedecnum = JEDEC_ATMEL; 10416 tp->nvram_jedecnum = JEDEC_ATMEL;
10255 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE; 10417 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
10256 break; 10418 break;
10257 case FLASH_VENDOR_ATMEL_EEPROM: 10419 case FLASH_VENDOR_ATMEL_EEPROM:
10258 tp->nvram_jedecnum = JEDEC_ATMEL; 10420 tp->nvram_jedecnum = JEDEC_ATMEL;
10259 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 10421 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10260 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 10422 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10261 break; 10423 break;
10262 case FLASH_VENDOR_ST: 10424 case FLASH_VENDOR_ST:
10263 tp->nvram_jedecnum = JEDEC_ST; 10425 tp->nvram_jedecnum = JEDEC_ST;
10264 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE; 10426 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
10265 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 10427 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10266 break; 10428 break;
10267 case FLASH_VENDOR_SAIFUN: 10429 case FLASH_VENDOR_SAIFUN:
10268 tp->nvram_jedecnum = JEDEC_SAIFUN; 10430 tp->nvram_jedecnum = JEDEC_SAIFUN;
10269 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE; 10431 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
10270 break; 10432 break;
10271 case FLASH_VENDOR_SST_SMALL: 10433 case FLASH_VENDOR_SST_SMALL:
10272 case FLASH_VENDOR_SST_LARGE: 10434 case FLASH_VENDOR_SST_LARGE:
10273 tp->nvram_jedecnum = JEDEC_SST; 10435 tp->nvram_jedecnum = JEDEC_SST;
10274 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE; 10436 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
10275 break; 10437 break;
10276 } 10438 }
10277 } 10439 } else {
10278 else {
10279 tp->nvram_jedecnum = JEDEC_ATMEL; 10440 tp->nvram_jedecnum = JEDEC_ATMEL;
10280 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; 10441 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10281 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 10442 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
@@ -10293,48 +10454,47 @@ static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10293 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM; 10454 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10294 10455
10295 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 10456 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10296 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ: 10457 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
10297 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ: 10458 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
10298 tp->nvram_jedecnum = JEDEC_ATMEL; 10459 tp->nvram_jedecnum = JEDEC_ATMEL;
10299 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 10460 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10300 break; 10461 break;
10301 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 10462 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10302 tp->nvram_jedecnum = JEDEC_ATMEL; 10463 tp->nvram_jedecnum = JEDEC_ATMEL;
10303 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 10464 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10304 tp->tg3_flags2 |= TG3_FLG2_FLASH; 10465 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10305 break; 10466 break;
10306 case FLASH_5752VENDOR_ST_M45PE10: 10467 case FLASH_5752VENDOR_ST_M45PE10:
10307 case FLASH_5752VENDOR_ST_M45PE20: 10468 case FLASH_5752VENDOR_ST_M45PE20:
10308 case FLASH_5752VENDOR_ST_M45PE40: 10469 case FLASH_5752VENDOR_ST_M45PE40:
10309 tp->nvram_jedecnum = JEDEC_ST; 10470 tp->nvram_jedecnum = JEDEC_ST;
10310 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 10471 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10311 tp->tg3_flags2 |= TG3_FLG2_FLASH; 10472 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10312 break; 10473 break;
10313 } 10474 }
10314 10475
10315 if (tp->tg3_flags2 & TG3_FLG2_FLASH) { 10476 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
10316 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) { 10477 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10317 case FLASH_5752PAGE_SIZE_256: 10478 case FLASH_5752PAGE_SIZE_256:
10318 tp->nvram_pagesize = 256; 10479 tp->nvram_pagesize = 256;
10319 break; 10480 break;
10320 case FLASH_5752PAGE_SIZE_512: 10481 case FLASH_5752PAGE_SIZE_512:
10321 tp->nvram_pagesize = 512; 10482 tp->nvram_pagesize = 512;
10322 break; 10483 break;
10323 case FLASH_5752PAGE_SIZE_1K: 10484 case FLASH_5752PAGE_SIZE_1K:
10324 tp->nvram_pagesize = 1024; 10485 tp->nvram_pagesize = 1024;
10325 break; 10486 break;
10326 case FLASH_5752PAGE_SIZE_2K: 10487 case FLASH_5752PAGE_SIZE_2K:
10327 tp->nvram_pagesize = 2048; 10488 tp->nvram_pagesize = 2048;
10328 break; 10489 break;
10329 case FLASH_5752PAGE_SIZE_4K: 10490 case FLASH_5752PAGE_SIZE_4K:
10330 tp->nvram_pagesize = 4096; 10491 tp->nvram_pagesize = 4096;
10331 break; 10492 break;
10332 case FLASH_5752PAGE_SIZE_264: 10493 case FLASH_5752PAGE_SIZE_264:
10333 tp->nvram_pagesize = 264; 10494 tp->nvram_pagesize = 264;
10334 break; 10495 break;
10335 } 10496 }
10336 } 10497 } else {
10337 else {
10338 /* For eeprom, set pagesize to maximum eeprom size */ 10498 /* For eeprom, set pagesize to maximum eeprom size */
10339 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 10499 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10340 10500
@@ -10357,45 +10517,45 @@ static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10357 10517
10358 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; 10518 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10359 switch (nvcfg1) { 10519 switch (nvcfg1) {
10360 case FLASH_5755VENDOR_ATMEL_FLASH_1: 10520 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10361 case FLASH_5755VENDOR_ATMEL_FLASH_2: 10521 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10362 case FLASH_5755VENDOR_ATMEL_FLASH_3: 10522 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10363 case FLASH_5755VENDOR_ATMEL_FLASH_5: 10523 case FLASH_5755VENDOR_ATMEL_FLASH_5:
10364 tp->nvram_jedecnum = JEDEC_ATMEL; 10524 tp->nvram_jedecnum = JEDEC_ATMEL;
10365 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 10525 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10366 tp->tg3_flags2 |= TG3_FLG2_FLASH; 10526 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10367 tp->nvram_pagesize = 264; 10527 tp->nvram_pagesize = 264;
10368 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 || 10528 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10369 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5) 10529 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
10370 tp->nvram_size = (protect ? 0x3e200 : 10530 tp->nvram_size = (protect ? 0x3e200 :
10371 TG3_NVRAM_SIZE_512KB); 10531 TG3_NVRAM_SIZE_512KB);
10372 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2) 10532 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
10373 tp->nvram_size = (protect ? 0x1f200 : 10533 tp->nvram_size = (protect ? 0x1f200 :
10374 TG3_NVRAM_SIZE_256KB); 10534 TG3_NVRAM_SIZE_256KB);
10375 else 10535 else
10376 tp->nvram_size = (protect ? 0x1f200 : 10536 tp->nvram_size = (protect ? 0x1f200 :
10377 TG3_NVRAM_SIZE_128KB); 10537 TG3_NVRAM_SIZE_128KB);
10378 break; 10538 break;
10379 case FLASH_5752VENDOR_ST_M45PE10: 10539 case FLASH_5752VENDOR_ST_M45PE10:
10380 case FLASH_5752VENDOR_ST_M45PE20: 10540 case FLASH_5752VENDOR_ST_M45PE20:
10381 case FLASH_5752VENDOR_ST_M45PE40: 10541 case FLASH_5752VENDOR_ST_M45PE40:
10382 tp->nvram_jedecnum = JEDEC_ST; 10542 tp->nvram_jedecnum = JEDEC_ST;
10383 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 10543 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10384 tp->tg3_flags2 |= TG3_FLG2_FLASH; 10544 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10385 tp->nvram_pagesize = 256; 10545 tp->nvram_pagesize = 256;
10386 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10) 10546 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
10387 tp->nvram_size = (protect ? 10547 tp->nvram_size = (protect ?
10388 TG3_NVRAM_SIZE_64KB : 10548 TG3_NVRAM_SIZE_64KB :
10389 TG3_NVRAM_SIZE_128KB); 10549 TG3_NVRAM_SIZE_128KB);
10390 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20) 10550 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
10391 tp->nvram_size = (protect ? 10551 tp->nvram_size = (protect ?
10392 TG3_NVRAM_SIZE_64KB : 10552 TG3_NVRAM_SIZE_64KB :
10393 TG3_NVRAM_SIZE_256KB); 10553 TG3_NVRAM_SIZE_256KB);
10394 else 10554 else
10395 tp->nvram_size = (protect ? 10555 tp->nvram_size = (protect ?
10396 TG3_NVRAM_SIZE_128KB : 10556 TG3_NVRAM_SIZE_128KB :
10397 TG3_NVRAM_SIZE_512KB); 10557 TG3_NVRAM_SIZE_512KB);
10398 break; 10558 break;
10399 } 10559 }
10400} 10560}
10401 10561
@@ -10406,34 +10566,34 @@ static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10406 nvcfg1 = tr32(NVRAM_CFG1); 10566 nvcfg1 = tr32(NVRAM_CFG1);
10407 10567
10408 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 10568 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10409 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ: 10569 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10410 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: 10570 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10411 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ: 10571 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10412 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: 10572 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10413 tp->nvram_jedecnum = JEDEC_ATMEL; 10573 tp->nvram_jedecnum = JEDEC_ATMEL;
10414 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 10574 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10415 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 10575 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10416 10576
10417 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 10577 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10418 tw32(NVRAM_CFG1, nvcfg1); 10578 tw32(NVRAM_CFG1, nvcfg1);
10419 break; 10579 break;
10420 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 10580 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10421 case FLASH_5755VENDOR_ATMEL_FLASH_1: 10581 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10422 case FLASH_5755VENDOR_ATMEL_FLASH_2: 10582 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10423 case FLASH_5755VENDOR_ATMEL_FLASH_3: 10583 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10424 tp->nvram_jedecnum = JEDEC_ATMEL; 10584 tp->nvram_jedecnum = JEDEC_ATMEL;
10425 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 10585 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10426 tp->tg3_flags2 |= TG3_FLG2_FLASH; 10586 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10427 tp->nvram_pagesize = 264; 10587 tp->nvram_pagesize = 264;
10428 break; 10588 break;
10429 case FLASH_5752VENDOR_ST_M45PE10: 10589 case FLASH_5752VENDOR_ST_M45PE10:
10430 case FLASH_5752VENDOR_ST_M45PE20: 10590 case FLASH_5752VENDOR_ST_M45PE20:
10431 case FLASH_5752VENDOR_ST_M45PE40: 10591 case FLASH_5752VENDOR_ST_M45PE40:
10432 tp->nvram_jedecnum = JEDEC_ST; 10592 tp->nvram_jedecnum = JEDEC_ST;
10433 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 10593 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10434 tp->tg3_flags2 |= TG3_FLG2_FLASH; 10594 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10435 tp->nvram_pagesize = 256; 10595 tp->nvram_pagesize = 256;
10436 break; 10596 break;
10437 } 10597 }
10438} 10598}
10439 10599
@@ -10451,63 +10611,63 @@ static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10451 10611
10452 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; 10612 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10453 switch (nvcfg1) { 10613 switch (nvcfg1) {
10454 case FLASH_5761VENDOR_ATMEL_ADB021D: 10614 case FLASH_5761VENDOR_ATMEL_ADB021D:
10455 case FLASH_5761VENDOR_ATMEL_ADB041D: 10615 case FLASH_5761VENDOR_ATMEL_ADB041D:
10456 case FLASH_5761VENDOR_ATMEL_ADB081D: 10616 case FLASH_5761VENDOR_ATMEL_ADB081D:
10457 case FLASH_5761VENDOR_ATMEL_ADB161D: 10617 case FLASH_5761VENDOR_ATMEL_ADB161D:
10458 case FLASH_5761VENDOR_ATMEL_MDB021D: 10618 case FLASH_5761VENDOR_ATMEL_MDB021D:
10459 case FLASH_5761VENDOR_ATMEL_MDB041D: 10619 case FLASH_5761VENDOR_ATMEL_MDB041D:
10460 case FLASH_5761VENDOR_ATMEL_MDB081D: 10620 case FLASH_5761VENDOR_ATMEL_MDB081D:
10461 case FLASH_5761VENDOR_ATMEL_MDB161D: 10621 case FLASH_5761VENDOR_ATMEL_MDB161D:
10462 tp->nvram_jedecnum = JEDEC_ATMEL; 10622 tp->nvram_jedecnum = JEDEC_ATMEL;
10463 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 10623 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10464 tp->tg3_flags2 |= TG3_FLG2_FLASH; 10624 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10465 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS; 10625 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10466 tp->nvram_pagesize = 256; 10626 tp->nvram_pagesize = 256;
10467 break; 10627 break;
10468 case FLASH_5761VENDOR_ST_A_M45PE20: 10628 case FLASH_5761VENDOR_ST_A_M45PE20:
10469 case FLASH_5761VENDOR_ST_A_M45PE40: 10629 case FLASH_5761VENDOR_ST_A_M45PE40:
10470 case FLASH_5761VENDOR_ST_A_M45PE80: 10630 case FLASH_5761VENDOR_ST_A_M45PE80:
10471 case FLASH_5761VENDOR_ST_A_M45PE16: 10631 case FLASH_5761VENDOR_ST_A_M45PE16:
10472 case FLASH_5761VENDOR_ST_M_M45PE20: 10632 case FLASH_5761VENDOR_ST_M_M45PE20:
10473 case FLASH_5761VENDOR_ST_M_M45PE40: 10633 case FLASH_5761VENDOR_ST_M_M45PE40:
10474 case FLASH_5761VENDOR_ST_M_M45PE80: 10634 case FLASH_5761VENDOR_ST_M_M45PE80:
10475 case FLASH_5761VENDOR_ST_M_M45PE16: 10635 case FLASH_5761VENDOR_ST_M_M45PE16:
10476 tp->nvram_jedecnum = JEDEC_ST; 10636 tp->nvram_jedecnum = JEDEC_ST;
10477 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 10637 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10478 tp->tg3_flags2 |= TG3_FLG2_FLASH; 10638 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10479 tp->nvram_pagesize = 256; 10639 tp->nvram_pagesize = 256;
10480 break; 10640 break;
10481 } 10641 }
10482 10642
10483 if (protect) { 10643 if (protect) {
10484 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT); 10644 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10485 } else { 10645 } else {
10486 switch (nvcfg1) { 10646 switch (nvcfg1) {
10487 case FLASH_5761VENDOR_ATMEL_ADB161D: 10647 case FLASH_5761VENDOR_ATMEL_ADB161D:
10488 case FLASH_5761VENDOR_ATMEL_MDB161D: 10648 case FLASH_5761VENDOR_ATMEL_MDB161D:
10489 case FLASH_5761VENDOR_ST_A_M45PE16: 10649 case FLASH_5761VENDOR_ST_A_M45PE16:
10490 case FLASH_5761VENDOR_ST_M_M45PE16: 10650 case FLASH_5761VENDOR_ST_M_M45PE16:
10491 tp->nvram_size = TG3_NVRAM_SIZE_2MB; 10651 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
10492 break; 10652 break;
10493 case FLASH_5761VENDOR_ATMEL_ADB081D: 10653 case FLASH_5761VENDOR_ATMEL_ADB081D:
10494 case FLASH_5761VENDOR_ATMEL_MDB081D: 10654 case FLASH_5761VENDOR_ATMEL_MDB081D:
10495 case FLASH_5761VENDOR_ST_A_M45PE80: 10655 case FLASH_5761VENDOR_ST_A_M45PE80:
10496 case FLASH_5761VENDOR_ST_M_M45PE80: 10656 case FLASH_5761VENDOR_ST_M_M45PE80:
10497 tp->nvram_size = TG3_NVRAM_SIZE_1MB; 10657 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
10498 break; 10658 break;
10499 case FLASH_5761VENDOR_ATMEL_ADB041D: 10659 case FLASH_5761VENDOR_ATMEL_ADB041D:
10500 case FLASH_5761VENDOR_ATMEL_MDB041D: 10660 case FLASH_5761VENDOR_ATMEL_MDB041D:
10501 case FLASH_5761VENDOR_ST_A_M45PE40: 10661 case FLASH_5761VENDOR_ST_A_M45PE40:
10502 case FLASH_5761VENDOR_ST_M_M45PE40: 10662 case FLASH_5761VENDOR_ST_M_M45PE40:
10503 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 10663 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10504 break; 10664 break;
10505 case FLASH_5761VENDOR_ATMEL_ADB021D: 10665 case FLASH_5761VENDOR_ATMEL_ADB021D:
10506 case FLASH_5761VENDOR_ATMEL_MDB021D: 10666 case FLASH_5761VENDOR_ATMEL_MDB021D:
10507 case FLASH_5761VENDOR_ST_A_M45PE20: 10667 case FLASH_5761VENDOR_ST_A_M45PE20:
10508 case FLASH_5761VENDOR_ST_M_M45PE20: 10668 case FLASH_5761VENDOR_ST_M_M45PE20:
10509 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 10669 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10510 break; 10670 break;
10511 } 10671 }
10512 } 10672 }
10513} 10673}
@@ -11485,6 +11645,9 @@ out_not_found:
11485 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 && 11645 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
11486 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790) 11646 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
11487 strcpy(tp->board_part_number, "BCM57790"); 11647 strcpy(tp->board_part_number, "BCM57790");
11648 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
11649 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
11650 strcpy(tp->board_part_number, "BCM57788");
11488 else 11651 else
11489 strcpy(tp->board_part_number, "none"); 11652 strcpy(tp->board_part_number, "none");
11490} 11653}
@@ -11967,7 +12130,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
11967 12130
11968 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || 12131 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
11969 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) 12132 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11970 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE; 12133 tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE;
11971 12134
11972 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, 12135 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11973 &pci_state_reg); 12136 &pci_state_reg);
@@ -12216,12 +12379,15 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12216 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB; 12379 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
12217 } 12380 }
12218 12381
12382 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12383 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
12384
12219 /* A few boards don't want Ethernet@WireSpeed phy feature */ 12385 /* A few boards don't want Ethernet@WireSpeed phy feature */
12220 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) || 12386 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12221 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) && 12387 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
12222 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) && 12388 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
12223 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) || 12389 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
12224 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) || 12390 (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) ||
12225 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) 12391 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
12226 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED; 12392 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
12227 12393
@@ -12232,7 +12398,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12232 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG; 12398 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
12233 12399
12234 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && 12400 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
12235 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906 && 12401 !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) &&
12236 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && 12402 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12237 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780) { 12403 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780) {
12238 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 12404 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
@@ -12269,6 +12435,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12269 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) 12435 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12270 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB; 12436 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
12271 12437
12438 if ((tp->pci_chip_rev_id == CHIPREV_ID_57780_A1 &&
12439 tr32(RCVLPC_STATS_ENABLE) & RCVLPC_STATSENAB_ASF_FIX) ||
12440 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0)
12441 tp->tg3_flags3 |= TG3_FLG3_TOGGLE_10_100_L1PLLPD;
12442
12272 err = tg3_mdio_init(tp); 12443 err = tg3_mdio_init(tp);
12273 if (err) 12444 if (err)
12274 return err; 12445 return err;
@@ -12352,7 +12523,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12352 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F || 12523 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
12353 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) || 12524 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
12354 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 || 12525 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
12355 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 12526 (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
12356 tp->tg3_flags |= TG3_FLAG_10_100_ONLY; 12527 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
12357 12528
12358 err = tg3_phy_probe(tp); 12529 err = tg3_phy_probe(tp);
@@ -13252,9 +13423,13 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
13252 13423
13253 tp->rx_pending = TG3_DEF_RX_RING_PENDING; 13424 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
13254 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING; 13425 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
13255 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
13256 13426
13257 netif_napi_add(dev, &tp->napi, tg3_poll, 64); 13427 tp->napi[0].tp = tp;
13428 tp->napi[0].int_mbox = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
13429 tp->napi[0].consmbox = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
13430 tp->napi[0].prodmbox = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
13431 tp->napi[0].tx_pending = TG3_DEF_TX_RING_PENDING;
13432 netif_napi_add(dev, &tp->napi[0].napi, tg3_poll, 64);
13258 dev->ethtool_ops = &tg3_ethtool_ops; 13433 dev->ethtool_ops = &tg3_ethtool_ops;
13259 dev->watchdog_timeo = TG3_TX_TIMEOUT; 13434 dev->watchdog_timeo = TG3_TX_TIMEOUT;
13260 dev->irq = pdev->irq; 13435 dev->irq = pdev->irq;