aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/realtek/8139cp.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-12-12 21:07:07 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-12 21:07:07 -0500
commit6be35c700f742e911ecedd07fcc43d4439922334 (patch)
treeca9f37214d204465fcc2d79c82efd291e357c53c /drivers/net/ethernet/realtek/8139cp.c
parente37aa63e87bd581f9be5555ed0ba83f5295c92fc (diff)
parent520dfe3a3645257bf83660f672c47f8558f3d4c4 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking changes from David Miller: 1) Allow to dump, monitor, and change the bridge multicast database using netlink. From Cong Wang. 2) RFC 5961 TCP blind data injection attack mitigation, from Eric Dumazet. 3) Networking user namespace support from Eric W. Biederman. 4) tuntap/virtio-net multiqueue support by Jason Wang. 5) Support for checksum offload of encapsulated packets (basically, tunneled traffic can still be checksummed by HW). From Joseph Gasparakis. 6) Allow BPF filter access to VLAN tags, from Eric Dumazet and Daniel Borkmann. 7) Bridge port parameters over netlink and BPDU blocking support from Stephen Hemminger. 8) Improve data access patterns during inet socket demux by rearranging socket layout, from Eric Dumazet. 9) TIPC protocol updates and cleanups from Ying Xue, Paul Gortmaker, and Jon Maloy. 10) Update TCP socket hash sizing to be more in line with current day realities. The existing heurstics were choosen a decade ago. From Eric Dumazet. 11) Fix races, queue bloat, and excessive wakeups in ATM and associated drivers, from Krzysztof Mazur and David Woodhouse. 12) Support DOVE (Distributed Overlay Virtual Ethernet) extensions in VXLAN driver, from David Stevens. 13) Add "oops_only" mode to netconsole, from Amerigo Wang. 14) Support set and query of VEB/VEPA bridge mode via PF_BRIDGE, also allow DCB netlink to work on namespaces other than the initial namespace. From John Fastabend. 15) Support PTP in the Tigon3 driver, from Matt Carlson. 16) tun/vhost zero copy fixes and improvements, plus turn it on by default, from Michael S. Tsirkin. 17) Support per-association statistics in SCTP, from Michele Baldessari. And many, many, driver updates, cleanups, and improvements. Too numerous to mention individually. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1722 commits) net/mlx4_en: Add support for destination MAC in steering rules net/mlx4_en: Use generic etherdevice.h functions. net: ethtool: Add destination MAC address to flow steering API bridge: add support of adding and deleting mdb entries bridge: notify mdb changes via netlink ndisc: Unexport ndisc_{build,send}_skb(). uapi: add missing netconf.h to export list pkt_sched: avoid requeues if possible solos-pci: fix double-free of TX skb in DMA mode bnx2: Fix accidental reversions. bna: Driver Version Updated to 3.1.2.1 bna: Firmware update bna: Add RX State bna: Rx Page Based Allocation bna: TX Intr Coalescing Fix bna: Tx and Rx Optimizations bna: Code Cleanup and Enhancements ath9k: check pdata variable before dereferencing it ath5k: RX timestamp is reported at end of frame ath9k_htc: RX timestamp is reported at end of frame ...
Diffstat (limited to 'drivers/net/ethernet/realtek/8139cp.c')
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c75
1 files changed, 44 insertions, 31 deletions
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 609125a249d9..cb6fc5a743ca 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -648,6 +648,7 @@ static void cp_tx (struct cp_private *cp)
648{ 648{
649 unsigned tx_head = cp->tx_head; 649 unsigned tx_head = cp->tx_head;
650 unsigned tx_tail = cp->tx_tail; 650 unsigned tx_tail = cp->tx_tail;
651 unsigned bytes_compl = 0, pkts_compl = 0;
651 652
652 while (tx_tail != tx_head) { 653 while (tx_tail != tx_head) {
653 struct cp_desc *txd = cp->tx_ring + tx_tail; 654 struct cp_desc *txd = cp->tx_ring + tx_tail;
@@ -666,6 +667,9 @@ static void cp_tx (struct cp_private *cp)
666 le32_to_cpu(txd->opts1) & 0xffff, 667 le32_to_cpu(txd->opts1) & 0xffff,
667 PCI_DMA_TODEVICE); 668 PCI_DMA_TODEVICE);
668 669
670 bytes_compl += skb->len;
671 pkts_compl++;
672
669 if (status & LastFrag) { 673 if (status & LastFrag) {
670 if (status & (TxError | TxFIFOUnder)) { 674 if (status & (TxError | TxFIFOUnder)) {
671 netif_dbg(cp, tx_err, cp->dev, 675 netif_dbg(cp, tx_err, cp->dev,
@@ -697,6 +701,7 @@ static void cp_tx (struct cp_private *cp)
697 701
698 cp->tx_tail = tx_tail; 702 cp->tx_tail = tx_tail;
699 703
704 netdev_completed_queue(cp->dev, pkts_compl, bytes_compl);
700 if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1)) 705 if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1))
701 netif_wake_queue(cp->dev); 706 netif_wake_queue(cp->dev);
702} 707}
@@ -843,6 +848,8 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
843 wmb(); 848 wmb();
844 } 849 }
845 cp->tx_head = entry; 850 cp->tx_head = entry;
851
852 netdev_sent_queue(dev, skb->len);
846 netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n", 853 netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
847 entry, skb->len); 854 entry, skb->len);
848 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1)) 855 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
@@ -937,6 +944,8 @@ static void cp_stop_hw (struct cp_private *cp)
937 944
938 cp->rx_tail = 0; 945 cp->rx_tail = 0;
939 cp->tx_head = cp->tx_tail = 0; 946 cp->tx_head = cp->tx_tail = 0;
947
948 netdev_reset_queue(cp->dev);
940} 949}
941 950
942static void cp_reset_hw (struct cp_private *cp) 951static void cp_reset_hw (struct cp_private *cp)
@@ -957,8 +966,38 @@ static void cp_reset_hw (struct cp_private *cp)
957 966
958static inline void cp_start_hw (struct cp_private *cp) 967static inline void cp_start_hw (struct cp_private *cp)
959{ 968{
969 dma_addr_t ring_dma;
970
960 cpw16(CpCmd, cp->cpcmd); 971 cpw16(CpCmd, cp->cpcmd);
972
973 /*
974 * These (at least TxRingAddr) need to be configured after the
975 * corresponding bits in CpCmd are enabled. Datasheet v1.6 ยง6.33
976 * (C+ Command Register) recommends that these and more be configured
977 * *after* the [RT]xEnable bits in CpCmd are set. And on some hardware
978 * it's been observed that the TxRingAddr is actually reset to garbage
979 * when C+ mode Tx is enabled in CpCmd.
980 */
981 cpw32_f(HiTxRingAddr, 0);
982 cpw32_f(HiTxRingAddr + 4, 0);
983
984 ring_dma = cp->ring_dma;
985 cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
986 cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
987
988 ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
989 cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
990 cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
991
992 /*
993 * Strictly speaking, the datasheet says this should be enabled
994 * *before* setting the descriptor addresses. But what, then, would
995 * prevent it from doing DMA to random unconfigured addresses?
996 * This variant appears to work fine.
997 */
961 cpw8(Cmd, RxOn | TxOn); 998 cpw8(Cmd, RxOn | TxOn);
999
1000 netdev_reset_queue(cp->dev);
962} 1001}
963 1002
964static void cp_enable_irq(struct cp_private *cp) 1003static void cp_enable_irq(struct cp_private *cp)
@@ -969,7 +1008,6 @@ static void cp_enable_irq(struct cp_private *cp)
969static void cp_init_hw (struct cp_private *cp) 1008static void cp_init_hw (struct cp_private *cp)
970{ 1009{
971 struct net_device *dev = cp->dev; 1010 struct net_device *dev = cp->dev;
972 dma_addr_t ring_dma;
973 1011
974 cp_reset_hw(cp); 1012 cp_reset_hw(cp);
975 1013
@@ -992,17 +1030,6 @@ static void cp_init_hw (struct cp_private *cp)
992 1030
993 cpw8(Config5, cpr8(Config5) & PMEStatus); 1031 cpw8(Config5, cpr8(Config5) & PMEStatus);
994 1032
995 cpw32_f(HiTxRingAddr, 0);
996 cpw32_f(HiTxRingAddr + 4, 0);
997
998 ring_dma = cp->ring_dma;
999 cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
1000 cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
1001
1002 ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
1003 cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
1004 cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
1005
1006 cpw16(MultiIntr, 0); 1033 cpw16(MultiIntr, 0);
1007 1034
1008 cpw8_f(Cfg9346, Cfg9346_Lock); 1035 cpw8_f(Cfg9346, Cfg9346_Lock);
@@ -1197,18 +1224,16 @@ static void cp_tx_timeout(struct net_device *dev)
1197 cp_clean_rings(cp); 1224 cp_clean_rings(cp);
1198 rc = cp_init_rings(cp); 1225 rc = cp_init_rings(cp);
1199 cp_start_hw(cp); 1226 cp_start_hw(cp);
1227 cp_enable_irq(cp);
1200 1228
1201 netif_wake_queue(dev); 1229 netif_wake_queue(dev);
1202 1230
1203 spin_unlock_irqrestore(&cp->lock, flags); 1231 spin_unlock_irqrestore(&cp->lock, flags);
1204} 1232}
1205 1233
1206#ifdef BROKEN
1207static int cp_change_mtu(struct net_device *dev, int new_mtu) 1234static int cp_change_mtu(struct net_device *dev, int new_mtu)
1208{ 1235{
1209 struct cp_private *cp = netdev_priv(dev); 1236 struct cp_private *cp = netdev_priv(dev);
1210 int rc;
1211 unsigned long flags;
1212 1237
1213 /* check for invalid MTU, according to hardware limits */ 1238 /* check for invalid MTU, according to hardware limits */
1214 if (new_mtu < CP_MIN_MTU || new_mtu > CP_MAX_MTU) 1239 if (new_mtu < CP_MIN_MTU || new_mtu > CP_MAX_MTU)
@@ -1221,22 +1246,12 @@ static int cp_change_mtu(struct net_device *dev, int new_mtu)
1221 return 0; 1246 return 0;
1222 } 1247 }
1223 1248
1224 spin_lock_irqsave(&cp->lock, flags); 1249 /* network IS up, close it, reset MTU, and come up again. */
1225 1250 cp_close(dev);
1226 cp_stop_hw(cp); /* stop h/w and free rings */
1227 cp_clean_rings(cp);
1228
1229 dev->mtu = new_mtu; 1251 dev->mtu = new_mtu;
1230 cp_set_rxbufsize(cp); /* set new rx buf size */ 1252 cp_set_rxbufsize(cp);
1231 1253 return cp_open(dev);
1232 rc = cp_init_rings(cp); /* realloc and restart h/w */
1233 cp_start_hw(cp);
1234
1235 spin_unlock_irqrestore(&cp->lock, flags);
1236
1237 return rc;
1238} 1254}
1239#endif /* BROKEN */
1240 1255
1241static const char mii_2_8139_map[8] = { 1256static const char mii_2_8139_map[8] = {
1242 BasicModeCtrl, 1257 BasicModeCtrl,
@@ -1812,9 +1827,7 @@ static const struct net_device_ops cp_netdev_ops = {
1812 .ndo_start_xmit = cp_start_xmit, 1827 .ndo_start_xmit = cp_start_xmit,
1813 .ndo_tx_timeout = cp_tx_timeout, 1828 .ndo_tx_timeout = cp_tx_timeout,
1814 .ndo_set_features = cp_set_features, 1829 .ndo_set_features = cp_set_features,
1815#ifdef BROKEN
1816 .ndo_change_mtu = cp_change_mtu, 1830 .ndo_change_mtu = cp_change_mtu,
1817#endif
1818 1831
1819#ifdef CONFIG_NET_POLL_CONTROLLER 1832#ifdef CONFIG_NET_POLL_CONTROLLER
1820 .ndo_poll_controller = cp_poll_controller, 1833 .ndo_poll_controller = cp_poll_controller,