aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Woodhouse <dwmw2@infradead.org>2012-11-21 22:16:58 -0500
committerDavid S. Miller <davem@davemloft.net>2012-11-25 15:54:23 -0500
commit871f0d4c153e1258d4becf306eca6761bf38b629 (patch)
tree4160014e5bc0c793b3825b53bf1f6787e897f90a
parenta9dbe40fc10cea2efe6e1ff9e03c62dd7579c5ba (diff)
8139cp: enable bql
This adds support for byte queue limits on RTL8139C+ Tested on real hardware. Signed-off-by: David Woodhouse <David.Woodhouse@intel.com> Acked-By: Dave Täht <dave.taht@bufferbloat.net> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c11
1 files changed, 11 insertions, 0 deletions
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 5166d94a2240..3de318d557f0 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -648,6 +648,7 @@ static void cp_tx (struct cp_private *cp)
648{ 648{
649 unsigned tx_head = cp->tx_head; 649 unsigned tx_head = cp->tx_head;
650 unsigned tx_tail = cp->tx_tail; 650 unsigned tx_tail = cp->tx_tail;
651 unsigned bytes_compl = 0, pkts_compl = 0;
651 652
652 while (tx_tail != tx_head) { 653 while (tx_tail != tx_head) {
653 struct cp_desc *txd = cp->tx_ring + tx_tail; 654 struct cp_desc *txd = cp->tx_ring + tx_tail;
@@ -666,6 +667,9 @@ static void cp_tx (struct cp_private *cp)
666 le32_to_cpu(txd->opts1) & 0xffff, 667 le32_to_cpu(txd->opts1) & 0xffff,
667 PCI_DMA_TODEVICE); 668 PCI_DMA_TODEVICE);
668 669
670 bytes_compl += skb->len;
671 pkts_compl++;
672
669 if (status & LastFrag) { 673 if (status & LastFrag) {
670 if (status & (TxError | TxFIFOUnder)) { 674 if (status & (TxError | TxFIFOUnder)) {
671 netif_dbg(cp, tx_err, cp->dev, 675 netif_dbg(cp, tx_err, cp->dev,
@@ -697,6 +701,7 @@ static void cp_tx (struct cp_private *cp)
697 701
698 cp->tx_tail = tx_tail; 702 cp->tx_tail = tx_tail;
699 703
704 netdev_completed_queue(cp->dev, pkts_compl, bytes_compl);
700 if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1)) 705 if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1))
701 netif_wake_queue(cp->dev); 706 netif_wake_queue(cp->dev);
702} 707}
@@ -843,6 +848,8 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
843 wmb(); 848 wmb();
844 } 849 }
845 cp->tx_head = entry; 850 cp->tx_head = entry;
851
852 netdev_sent_queue(dev, skb->len);
846 netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n", 853 netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
847 entry, skb->len); 854 entry, skb->len);
848 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1)) 855 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
@@ -937,6 +944,8 @@ static void cp_stop_hw (struct cp_private *cp)
937 944
938 cp->rx_tail = 0; 945 cp->rx_tail = 0;
939 cp->tx_head = cp->tx_tail = 0; 946 cp->tx_head = cp->tx_tail = 0;
947
948 netdev_reset_queue(cp->dev);
940} 949}
941 950
942static void cp_reset_hw (struct cp_private *cp) 951static void cp_reset_hw (struct cp_private *cp)
@@ -987,6 +996,8 @@ static inline void cp_start_hw (struct cp_private *cp)
987 * This variant appears to work fine. 996 * This variant appears to work fine.
988 */ 997 */
989 cpw8(Cmd, RxOn | TxOn); 998 cpw8(Cmd, RxOn | TxOn);
999
1000 netdev_reset_queue(cp->dev);
990} 1001}
991 1002
992static void cp_enable_irq(struct cp_private *cp) 1003static void cp_enable_irq(struct cp_private *cp)