aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/realtek/8139cp.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/realtek/8139cp.c')
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c52
1 files changed, 40 insertions, 12 deletions
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index b01f83a044c4..6cb96b4afdf5 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -648,6 +648,7 @@ static void cp_tx (struct cp_private *cp)
648{ 648{
649 unsigned tx_head = cp->tx_head; 649 unsigned tx_head = cp->tx_head;
650 unsigned tx_tail = cp->tx_tail; 650 unsigned tx_tail = cp->tx_tail;
651 unsigned bytes_compl = 0, pkts_compl = 0;
651 652
652 while (tx_tail != tx_head) { 653 while (tx_tail != tx_head) {
653 struct cp_desc *txd = cp->tx_ring + tx_tail; 654 struct cp_desc *txd = cp->tx_ring + tx_tail;
@@ -666,6 +667,9 @@ static void cp_tx (struct cp_private *cp)
666 le32_to_cpu(txd->opts1) & 0xffff, 667 le32_to_cpu(txd->opts1) & 0xffff,
667 PCI_DMA_TODEVICE); 668 PCI_DMA_TODEVICE);
668 669
670 bytes_compl += skb->len;
671 pkts_compl++;
672
669 if (status & LastFrag) { 673 if (status & LastFrag) {
670 if (status & (TxError | TxFIFOUnder)) { 674 if (status & (TxError | TxFIFOUnder)) {
671 netif_dbg(cp, tx_err, cp->dev, 675 netif_dbg(cp, tx_err, cp->dev,
@@ -697,6 +701,7 @@ static void cp_tx (struct cp_private *cp)
697 701
698 cp->tx_tail = tx_tail; 702 cp->tx_tail = tx_tail;
699 703
704 netdev_completed_queue(cp->dev, pkts_compl, bytes_compl);
700 if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1)) 705 if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1))
701 netif_wake_queue(cp->dev); 706 netif_wake_queue(cp->dev);
702} 707}
@@ -843,6 +848,8 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
843 wmb(); 848 wmb();
844 } 849 }
845 cp->tx_head = entry; 850 cp->tx_head = entry;
851
852 netdev_sent_queue(dev, skb->len);
846 netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n", 853 netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
847 entry, skb->len); 854 entry, skb->len);
848 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1)) 855 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
@@ -937,6 +944,8 @@ static void cp_stop_hw (struct cp_private *cp)
937 944
938 cp->rx_tail = 0; 945 cp->rx_tail = 0;
939 cp->tx_head = cp->tx_tail = 0; 946 cp->tx_head = cp->tx_tail = 0;
947
948 netdev_reset_queue(cp->dev);
940} 949}
941 950
942static void cp_reset_hw (struct cp_private *cp) 951static void cp_reset_hw (struct cp_private *cp)
@@ -957,8 +966,38 @@ static void cp_reset_hw (struct cp_private *cp)
957 966
958static inline void cp_start_hw (struct cp_private *cp) 967static inline void cp_start_hw (struct cp_private *cp)
959{ 968{
969 dma_addr_t ring_dma;
970
960 cpw16(CpCmd, cp->cpcmd); 971 cpw16(CpCmd, cp->cpcmd);
972
973 /*
974 * These (at least TxRingAddr) need to be configured after the
975 * corresponding bits in CpCmd are enabled. Datasheet v1.6 ยง6.33
976 * (C+ Command Register) recommends that these and more be configured
977 * *after* the [RT]xEnable bits in CpCmd are set. And on some hardware
978 * it's been observed that the TxRingAddr is actually reset to garbage
979 * when C+ mode Tx is enabled in CpCmd.
980 */
981 cpw32_f(HiTxRingAddr, 0);
982 cpw32_f(HiTxRingAddr + 4, 0);
983
984 ring_dma = cp->ring_dma;
985 cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
986 cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
987
988 ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
989 cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
990 cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
991
992 /*
993 * Strictly speaking, the datasheet says this should be enabled
994 * *before* setting the descriptor addresses. But what, then, would
995 * prevent it from doing DMA to random unconfigured addresses?
996 * This variant appears to work fine.
997 */
961 cpw8(Cmd, RxOn | TxOn); 998 cpw8(Cmd, RxOn | TxOn);
999
1000 netdev_reset_queue(cp->dev);
962} 1001}
963 1002
964static void cp_enable_irq(struct cp_private *cp) 1003static void cp_enable_irq(struct cp_private *cp)
@@ -969,7 +1008,6 @@ static void cp_enable_irq(struct cp_private *cp)
969static void cp_init_hw (struct cp_private *cp) 1008static void cp_init_hw (struct cp_private *cp)
970{ 1009{
971 struct net_device *dev = cp->dev; 1010 struct net_device *dev = cp->dev;
972 dma_addr_t ring_dma;
973 1011
974 cp_reset_hw(cp); 1012 cp_reset_hw(cp);
975 1013
@@ -992,17 +1030,6 @@ static void cp_init_hw (struct cp_private *cp)
992 1030
993 cpw8(Config5, cpr8(Config5) & PMEStatus); 1031 cpw8(Config5, cpr8(Config5) & PMEStatus);
994 1032
995 cpw32_f(HiTxRingAddr, 0);
996 cpw32_f(HiTxRingAddr + 4, 0);
997
998 ring_dma = cp->ring_dma;
999 cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
1000 cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
1001
1002 ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
1003 cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
1004 cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
1005
1006 cpw16(MultiIntr, 0); 1033 cpw16(MultiIntr, 0);
1007 1034
1008 cpw8_f(Cfg9346, Cfg9346_Lock); 1035 cpw8_f(Cfg9346, Cfg9346_Lock);
@@ -1192,6 +1219,7 @@ static void cp_tx_timeout(struct net_device *dev)
1192 cp_clean_rings(cp); 1219 cp_clean_rings(cp);
1193 rc = cp_init_rings(cp); 1220 rc = cp_init_rings(cp);
1194 cp_start_hw(cp); 1221 cp_start_hw(cp);
1222 cp_enable_irq(cp);
1195 1223
1196 netif_wake_queue(dev); 1224 netif_wake_queue(dev);
1197 1225