aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/forcedeth.c
diff options
context:
space:
mode:
authorJeff Garzik <jeff@garzik.org>2007-10-16 01:40:30 -0400
committerJeff Garzik <jgarzik@redhat.com>2008-04-17 15:31:33 -0400
commit36b30ea940bb88d88c90698e0e3d97a805ab5856 (patch)
tree10d41b37a308c2136385ba2b7a63477774223290 /drivers/net/forcedeth.c
parentcac1f3c8a80f3fc0b4489d1d3ba29214677ffab2 (diff)
[netdrvr] forcedeth: internal simplifications; changelog removal
* remove changelog from source; its kept in git repository * consolidate descriptor version tests using nv_optimized() * consolidate NIC DMA start, stop and drain into nv_start_txrx(), nv_stop_txrx(), nv_drain_txrx() Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/net/forcedeth.c')
-rw-r--r--drivers/net/forcedeth.c234
1 files changed, 74 insertions, 160 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 3338b115fa66..8c4214b0ee1f 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -29,90 +29,6 @@
29 * along with this program; if not, write to the Free Software 29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
31 * 31 *
32 * Changelog:
33 * 0.01: 05 Oct 2003: First release that compiles without warnings.
34 * 0.02: 05 Oct 2003: Fix bug for nv_drain_tx: do not try to free NULL skbs.
35 * Check all PCI BARs for the register window.
36 * udelay added to mii_rw.
37 * 0.03: 06 Oct 2003: Initialize dev->irq.
38 * 0.04: 07 Oct 2003: Initialize np->lock, reduce handled irqs, add printks.
39 * 0.05: 09 Oct 2003: printk removed again, irq status print tx_timeout.
40 * 0.06: 10 Oct 2003: MAC Address read updated, pff flag generation updated,
41 * irq mask updated
42 * 0.07: 14 Oct 2003: Further irq mask updates.
43 * 0.08: 20 Oct 2003: rx_desc.Length initialization added, nv_alloc_rx refill
44 * added into irq handler, NULL check for drain_ring.
45 * 0.09: 20 Oct 2003: Basic link speed irq implementation. Only handle the
46 * requested interrupt sources.
47 * 0.10: 20 Oct 2003: First cleanup for release.
48 * 0.11: 21 Oct 2003: hexdump for tx added, rx buffer sizes increased.
49 * MAC Address init fix, set_multicast cleanup.
50 * 0.12: 23 Oct 2003: Cleanups for release.
51 * 0.13: 25 Oct 2003: Limit for concurrent tx packets increased to 10.
52 * Set link speed correctly. start rx before starting
53 * tx (nv_start_rx sets the link speed).
54 * 0.14: 25 Oct 2003: Nic dependant irq mask.
55 * 0.15: 08 Nov 2003: fix smp deadlock with set_multicast_list during
56 * open.
57 * 0.16: 15 Nov 2003: include file cleanup for ppc64, rx buffer size
58 * increased to 1628 bytes.
59 * 0.17: 16 Nov 2003: undo rx buffer size increase. Substract 1 from
60 * the tx length.
61 * 0.18: 17 Nov 2003: fix oops due to late initialization of dev_stats
62 * 0.19: 29 Nov 2003: Handle RxNoBuf, detect & handle invalid mac
63 * addresses, really stop rx if already running
64 * in nv_start_rx, clean up a bit.
65 * 0.20: 07 Dec 2003: alloc fixes
66 * 0.21: 12 Jan 2004: additional alloc fix, nic polling fix.
67 * 0.22: 19 Jan 2004: reprogram timer to a sane rate, avoid lockup
68 * on close.
69 * 0.23: 26 Jan 2004: various small cleanups
70 * 0.24: 27 Feb 2004: make driver even less anonymous in backtraces
71 * 0.25: 09 Mar 2004: wol support
72 * 0.26: 03 Jun 2004: netdriver specific annotation, sparse-related fixes
73 * 0.27: 19 Jun 2004: Gigabit support, new descriptor rings,
74 * added CK804/MCP04 device IDs, code fixes
75 * for registers, link status and other minor fixes.
76 * 0.28: 21 Jun 2004: Big cleanup, making driver mostly endian safe
77 * 0.29: 31 Aug 2004: Add backup timer for link change notification.
78 * 0.30: 25 Sep 2004: rx checksum support for nf 250 Gb. Add rx reset
79 * into nv_close, otherwise reenabling for wol can
80 * cause DMA to kfree'd memory.
81 * 0.31: 14 Nov 2004: ethtool support for getting/setting link
82 * capabilities.
83 * 0.32: 16 Apr 2005: RX_ERROR4 handling added.
84 * 0.33: 16 May 2005: Support for MCP51 added.
85 * 0.34: 18 Jun 2005: Add DEV_NEED_LINKTIMER to all nForce nics.
86 * 0.35: 26 Jun 2005: Support for MCP55 added.
87 * 0.36: 28 Jun 2005: Add jumbo frame support.
88 * 0.37: 10 Jul 2005: Additional ethtool support, cleanup of pci id list
89 * 0.38: 16 Jul 2005: tx irq rewrite: Use global flags instead of
90 * per-packet flags.
91 * 0.39: 18 Jul 2005: Add 64bit descriptor support.
92 * 0.40: 19 Jul 2005: Add support for mac address change.
93 * 0.41: 30 Jul 2005: Write back original MAC in nv_close instead
94 * of nv_remove
95 * 0.42: 06 Aug 2005: Fix lack of link speed initialization
96 * in the second (and later) nv_open call
97 * 0.43: 10 Aug 2005: Add support for tx checksum.
98 * 0.44: 20 Aug 2005: Add support for scatter gather and segmentation.
99 * 0.45: 18 Sep 2005: Remove nv_stop/start_rx from every link check
100 * 0.46: 20 Oct 2005: Add irq optimization modes.
101 * 0.47: 26 Oct 2005: Add phyaddr 0 in phy scan.
102 * 0.48: 24 Dec 2005: Disable TSO, bugfix for pci_map_single
103 * 0.49: 10 Dec 2005: Fix tso for large buffers.
104 * 0.50: 20 Jan 2006: Add 8021pq tagging support.
105 * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings.
106 * 0.52: 20 Jan 2006: Add MSI/MSIX support.
107 * 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset.
108 * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup.
109 * 0.55: 22 Mar 2006: Add flow control (pause frame).
110 * 0.56: 22 Mar 2006: Additional ethtool config and moduleparam support.
111 * 0.57: 14 May 2006: Mac address set in probe/remove and order corrections.
112 * 0.58: 30 Oct 2006: Added support for sideband management unit.
113 * 0.59: 30 Oct 2006: Added support for recoverable error.
114 * 0.60: 20 Jan 2007: Code optimizations for rings, rx & tx data paths, and stats.
115 *
116 * Known bugs: 32 * Known bugs:
117 * We suspect that on some hardware no TX done interrupts are generated. 33 * We suspect that on some hardware no TX done interrupts are generated.
118 * This means recovery from netif_stop_queue only happens if the hw timer 34 * This means recovery from netif_stop_queue only happens if the hw timer
@@ -123,11 +39,6 @@
123 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few 39 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
124 * superfluous timer interrupts from the nic. 40 * superfluous timer interrupts from the nic.
125 */ 41 */
126#ifdef CONFIG_FORCEDETH_NAPI
127#define DRIVERNAPI "-NAPI"
128#else
129#define DRIVERNAPI
130#endif
131#define FORCEDETH_VERSION "0.61" 42#define FORCEDETH_VERSION "0.61"
132#define DRV_NAME "forcedeth" 43#define DRV_NAME "forcedeth"
133 44
@@ -930,6 +841,13 @@ static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
930 return le32_to_cpu(prd->flaglen) & LEN_MASK_V2; 841 return le32_to_cpu(prd->flaglen) & LEN_MASK_V2;
931} 842}
932 843
844static bool nv_optimized(struct fe_priv *np)
845{
846 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
847 return false;
848 return true;
849}
850
933static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, 851static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
934 int delay, int delaymax, const char *msg) 852 int delay, int delaymax, const char *msg)
935{ 853{
@@ -966,7 +884,7 @@ static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
966 struct fe_priv *np = get_nvpriv(dev); 884 struct fe_priv *np = get_nvpriv(dev);
967 u8 __iomem *base = get_hwbase(dev); 885 u8 __iomem *base = get_hwbase(dev);
968 886
969 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 887 if (!nv_optimized(np)) {
970 if (rxtx_flags & NV_SETUP_RX_RING) { 888 if (rxtx_flags & NV_SETUP_RX_RING) {
971 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr); 889 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
972 } 890 }
@@ -989,7 +907,7 @@ static void free_rings(struct net_device *dev)
989{ 907{
990 struct fe_priv *np = get_nvpriv(dev); 908 struct fe_priv *np = get_nvpriv(dev);
991 909
992 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 910 if (!nv_optimized(np)) {
993 if (np->rx_ring.orig) 911 if (np->rx_ring.orig)
994 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), 912 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
995 np->rx_ring.orig, np->ring_addr); 913 np->rx_ring.orig, np->ring_addr);
@@ -1435,6 +1353,18 @@ static void nv_stop_tx(struct net_device *dev)
1435 base + NvRegTransmitPoll); 1353 base + NvRegTransmitPoll);
1436} 1354}
1437 1355
1356static void nv_start_rxtx(struct net_device *dev)
1357{
1358 nv_start_rx(dev);
1359 nv_start_tx(dev);
1360}
1361
1362static void nv_stop_rxtx(struct net_device *dev)
1363{
1364 nv_stop_rx(dev);
1365 nv_stop_tx(dev);
1366}
1367
1438static void nv_txrx_reset(struct net_device *dev) 1368static void nv_txrx_reset(struct net_device *dev)
1439{ 1369{
1440 struct fe_priv *np = netdev_priv(dev); 1370 struct fe_priv *np = netdev_priv(dev);
@@ -1657,7 +1587,7 @@ static void nv_do_rx_refill(unsigned long data)
1657 } else { 1587 } else {
1658 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1588 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1659 } 1589 }
1660 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1590 if (!nv_optimized(np))
1661 retcode = nv_alloc_rx(dev); 1591 retcode = nv_alloc_rx(dev);
1662 else 1592 else
1663 retcode = nv_alloc_rx_optimized(dev); 1593 retcode = nv_alloc_rx_optimized(dev);
@@ -1682,8 +1612,10 @@ static void nv_init_rx(struct net_device *dev)
1682{ 1612{
1683 struct fe_priv *np = netdev_priv(dev); 1613 struct fe_priv *np = netdev_priv(dev);
1684 int i; 1614 int i;
1615
1685 np->get_rx = np->put_rx = np->first_rx = np->rx_ring; 1616 np->get_rx = np->put_rx = np->first_rx = np->rx_ring;
1686 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1617
1618 if (!nv_optimized(np))
1687 np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1]; 1619 np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1];
1688 else 1620 else
1689 np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1]; 1621 np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1];
@@ -1691,7 +1623,7 @@ static void nv_init_rx(struct net_device *dev)
1691 np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1]; 1623 np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1];
1692 1624
1693 for (i = 0; i < np->rx_ring_size; i++) { 1625 for (i = 0; i < np->rx_ring_size; i++) {
1694 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1626 if (!nv_optimized(np)) {
1695 np->rx_ring.orig[i].flaglen = 0; 1627 np->rx_ring.orig[i].flaglen = 0;
1696 np->rx_ring.orig[i].buf = 0; 1628 np->rx_ring.orig[i].buf = 0;
1697 } else { 1629 } else {
@@ -1709,8 +1641,10 @@ static void nv_init_tx(struct net_device *dev)
1709{ 1641{
1710 struct fe_priv *np = netdev_priv(dev); 1642 struct fe_priv *np = netdev_priv(dev);
1711 int i; 1643 int i;
1644
1712 np->get_tx = np->put_tx = np->first_tx = np->tx_ring; 1645 np->get_tx = np->put_tx = np->first_tx = np->tx_ring;
1713 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1646
1647 if (!nv_optimized(np))
1714 np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1]; 1648 np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
1715 else 1649 else
1716 np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1]; 1650 np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
@@ -1721,7 +1655,7 @@ static void nv_init_tx(struct net_device *dev)
1721 np->tx_end_flip = NULL; 1655 np->tx_end_flip = NULL;
1722 1656
1723 for (i = 0; i < np->tx_ring_size; i++) { 1657 for (i = 0; i < np->tx_ring_size; i++) {
1724 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1658 if (!nv_optimized(np)) {
1725 np->tx_ring.orig[i].flaglen = 0; 1659 np->tx_ring.orig[i].flaglen = 0;
1726 np->tx_ring.orig[i].buf = 0; 1660 np->tx_ring.orig[i].buf = 0;
1727 } else { 1661 } else {
@@ -1744,7 +1678,8 @@ static int nv_init_ring(struct net_device *dev)
1744 1678
1745 nv_init_tx(dev); 1679 nv_init_tx(dev);
1746 nv_init_rx(dev); 1680 nv_init_rx(dev);
1747 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1681
1682 if (!nv_optimized(np))
1748 return nv_alloc_rx(dev); 1683 return nv_alloc_rx(dev);
1749 else 1684 else
1750 return nv_alloc_rx_optimized(dev); 1685 return nv_alloc_rx_optimized(dev);
@@ -1775,7 +1710,7 @@ static void nv_drain_tx(struct net_device *dev)
1775 unsigned int i; 1710 unsigned int i;
1776 1711
1777 for (i = 0; i < np->tx_ring_size; i++) { 1712 for (i = 0; i < np->tx_ring_size; i++) {
1778 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1713 if (!nv_optimized(np)) {
1779 np->tx_ring.orig[i].flaglen = 0; 1714 np->tx_ring.orig[i].flaglen = 0;
1780 np->tx_ring.orig[i].buf = 0; 1715 np->tx_ring.orig[i].buf = 0;
1781 } else { 1716 } else {
@@ -1802,7 +1737,7 @@ static void nv_drain_rx(struct net_device *dev)
1802 int i; 1737 int i;
1803 1738
1804 for (i = 0; i < np->rx_ring_size; i++) { 1739 for (i = 0; i < np->rx_ring_size; i++) {
1805 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1740 if (!nv_optimized(np)) {
1806 np->rx_ring.orig[i].flaglen = 0; 1741 np->rx_ring.orig[i].flaglen = 0;
1807 np->rx_ring.orig[i].buf = 0; 1742 np->rx_ring.orig[i].buf = 0;
1808 } else { 1743 } else {
@@ -1823,7 +1758,7 @@ static void nv_drain_rx(struct net_device *dev)
1823 } 1758 }
1824} 1759}
1825 1760
1826static void drain_ring(struct net_device *dev) 1761static void nv_drain_rxtx(struct net_device *dev)
1827{ 1762{
1828 nv_drain_tx(dev); 1763 nv_drain_tx(dev);
1829 nv_drain_rx(dev); 1764 nv_drain_rx(dev);
@@ -2260,7 +2195,7 @@ static void nv_tx_timeout(struct net_device *dev)
2260 } 2195 }
2261 printk(KERN_INFO "%s: Dumping tx ring\n", dev->name); 2196 printk(KERN_INFO "%s: Dumping tx ring\n", dev->name);
2262 for (i=0;i<np->tx_ring_size;i+= 4) { 2197 for (i=0;i<np->tx_ring_size;i+= 4) {
2263 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 2198 if (!nv_optimized(np)) {
2264 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", 2199 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
2265 i, 2200 i,
2266 le32_to_cpu(np->tx_ring.orig[i].buf), 2201 le32_to_cpu(np->tx_ring.orig[i].buf),
@@ -2296,7 +2231,7 @@ static void nv_tx_timeout(struct net_device *dev)
2296 nv_stop_tx(dev); 2231 nv_stop_tx(dev);
2297 2232
2298 /* 2) check that the packets were not sent already: */ 2233 /* 2) check that the packets were not sent already: */
2299 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 2234 if (!nv_optimized(np))
2300 nv_tx_done(dev); 2235 nv_tx_done(dev);
2301 else 2236 else
2302 nv_tx_done_optimized(dev, np->tx_ring_size); 2237 nv_tx_done_optimized(dev, np->tx_ring_size);
@@ -2663,12 +2598,10 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
2663 netif_tx_lock_bh(dev); 2598 netif_tx_lock_bh(dev);
2664 spin_lock(&np->lock); 2599 spin_lock(&np->lock);
2665 /* stop engines */ 2600 /* stop engines */
2666 nv_stop_rx(dev); 2601 nv_stop_rxtx(dev);
2667 nv_stop_tx(dev);
2668 nv_txrx_reset(dev); 2602 nv_txrx_reset(dev);
2669 /* drain rx queue */ 2603 /* drain rx queue */
2670 nv_drain_rx(dev); 2604 nv_drain_rxtx(dev);
2671 nv_drain_tx(dev);
2672 /* reinit driver view of the rx queue */ 2605 /* reinit driver view of the rx queue */
2673 set_bufsize(dev); 2606 set_bufsize(dev);
2674 if (nv_init_ring(dev)) { 2607 if (nv_init_ring(dev)) {
@@ -2685,8 +2618,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
2685 pci_push(base); 2618 pci_push(base);
2686 2619
2687 /* restart rx engine */ 2620 /* restart rx engine */
2688 nv_start_rx(dev); 2621 nv_start_rxtx(dev);
2689 nv_start_tx(dev);
2690 spin_unlock(&np->lock); 2622 spin_unlock(&np->lock);
2691 netif_tx_unlock_bh(dev); 2623 netif_tx_unlock_bh(dev);
2692 nv_enable_irq(dev); 2624 nv_enable_irq(dev);
@@ -3393,7 +3325,7 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
3393 unsigned long flags; 3325 unsigned long flags;
3394 int pkts, retcode; 3326 int pkts, retcode;
3395 3327
3396 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 3328 if (!nv_optimized(np)) {
3397 pkts = nv_rx_process(dev, budget); 3329 pkts = nv_rx_process(dev, budget);
3398 retcode = nv_alloc_rx(dev); 3330 retcode = nv_alloc_rx(dev);
3399 } else { 3331 } else {
@@ -3634,7 +3566,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
3634 if (intr_test) { 3566 if (intr_test) {
3635 handler = nv_nic_irq_test; 3567 handler = nv_nic_irq_test;
3636 } else { 3568 } else {
3637 if (np->desc_ver == DESC_VER_3) 3569 if (nv_optimized(np))
3638 handler = nv_nic_irq_optimized; 3570 handler = nv_nic_irq_optimized;
3639 else 3571 else
3640 handler = nv_nic_irq; 3572 handler = nv_nic_irq;
@@ -3787,12 +3719,10 @@ static void nv_do_nic_poll(unsigned long data)
3787 netif_tx_lock_bh(dev); 3719 netif_tx_lock_bh(dev);
3788 spin_lock(&np->lock); 3720 spin_lock(&np->lock);
3789 /* stop engines */ 3721 /* stop engines */
3790 nv_stop_rx(dev); 3722 nv_stop_rxtx(dev);
3791 nv_stop_tx(dev);
3792 nv_txrx_reset(dev); 3723 nv_txrx_reset(dev);
3793 /* drain rx queue */ 3724 /* drain rx queue */
3794 nv_drain_rx(dev); 3725 nv_drain_rxtx(dev);
3795 nv_drain_tx(dev);
3796 /* reinit driver view of the rx queue */ 3726 /* reinit driver view of the rx queue */
3797 set_bufsize(dev); 3727 set_bufsize(dev);
3798 if (nv_init_ring(dev)) { 3728 if (nv_init_ring(dev)) {
@@ -3809,8 +3739,7 @@ static void nv_do_nic_poll(unsigned long data)
3809 pci_push(base); 3739 pci_push(base);
3810 3740
3811 /* restart rx engine */ 3741 /* restart rx engine */
3812 nv_start_rx(dev); 3742 nv_start_rxtx(dev);
3813 nv_start_tx(dev);
3814 spin_unlock(&np->lock); 3743 spin_unlock(&np->lock);
3815 netif_tx_unlock_bh(dev); 3744 netif_tx_unlock_bh(dev);
3816 } 3745 }
@@ -3821,7 +3750,7 @@ static void nv_do_nic_poll(unsigned long data)
3821 pci_push(base); 3750 pci_push(base);
3822 3751
3823 if (!using_multi_irqs(dev)) { 3752 if (!using_multi_irqs(dev)) {
3824 if (np->desc_ver == DESC_VER_3) 3753 if (nv_optimized(np))
3825 nv_nic_irq_optimized(0, dev); 3754 nv_nic_irq_optimized(0, dev);
3826 else 3755 else
3827 nv_nic_irq(0, dev); 3756 nv_nic_irq(0, dev);
@@ -4019,8 +3948,7 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
4019 netif_tx_lock_bh(dev); 3948 netif_tx_lock_bh(dev);
4020 spin_lock(&np->lock); 3949 spin_lock(&np->lock);
4021 /* stop engines */ 3950 /* stop engines */
4022 nv_stop_rx(dev); 3951 nv_stop_rxtx(dev);
4023 nv_stop_tx(dev);
4024 spin_unlock(&np->lock); 3952 spin_unlock(&np->lock);
4025 netif_tx_unlock_bh(dev); 3953 netif_tx_unlock_bh(dev);
4026 } 3954 }
@@ -4126,8 +4054,7 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
4126 } 4054 }
4127 4055
4128 if (netif_running(dev)) { 4056 if (netif_running(dev)) {
4129 nv_start_rx(dev); 4057 nv_start_rxtx(dev);
4130 nv_start_tx(dev);
4131 nv_enable_irq(dev); 4058 nv_enable_irq(dev);
4132 } 4059 }
4133 4060
@@ -4170,8 +4097,7 @@ static int nv_nway_reset(struct net_device *dev)
4170 netif_tx_lock_bh(dev); 4097 netif_tx_lock_bh(dev);
4171 spin_lock(&np->lock); 4098 spin_lock(&np->lock);
4172 /* stop engines */ 4099 /* stop engines */
4173 nv_stop_rx(dev); 4100 nv_stop_rxtx(dev);
4174 nv_stop_tx(dev);
4175 spin_unlock(&np->lock); 4101 spin_unlock(&np->lock);
4176 netif_tx_unlock_bh(dev); 4102 netif_tx_unlock_bh(dev);
4177 printk(KERN_INFO "%s: link down.\n", dev->name); 4103 printk(KERN_INFO "%s: link down.\n", dev->name);
@@ -4191,8 +4117,7 @@ static int nv_nway_reset(struct net_device *dev)
4191 } 4117 }
4192 4118
4193 if (netif_running(dev)) { 4119 if (netif_running(dev)) {
4194 nv_start_rx(dev); 4120 nv_start_rxtx(dev);
4195 nv_start_tx(dev);
4196 nv_enable_irq(dev); 4121 nv_enable_irq(dev);
4197 } 4122 }
4198 ret = 0; 4123 ret = 0;
@@ -4249,7 +4174,7 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
4249 } 4174 }
4250 4175
4251 /* allocate new rings */ 4176 /* allocate new rings */
4252 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 4177 if (!nv_optimized(np)) {
4253 rxtx_ring = pci_alloc_consistent(np->pci_dev, 4178 rxtx_ring = pci_alloc_consistent(np->pci_dev,
4254 sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), 4179 sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
4255 &ring_addr); 4180 &ring_addr);
@@ -4262,7 +4187,7 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
4262 tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL); 4187 tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL);
4263 if (!rxtx_ring || !rx_skbuff || !tx_skbuff) { 4188 if (!rxtx_ring || !rx_skbuff || !tx_skbuff) {
4264 /* fall back to old rings */ 4189 /* fall back to old rings */
4265 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 4190 if (!nv_optimized(np)) {
4266 if (rxtx_ring) 4191 if (rxtx_ring)
4267 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), 4192 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
4268 rxtx_ring, ring_addr); 4193 rxtx_ring, ring_addr);
@@ -4283,12 +4208,10 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
4283 netif_tx_lock_bh(dev); 4208 netif_tx_lock_bh(dev);
4284 spin_lock(&np->lock); 4209 spin_lock(&np->lock);
4285 /* stop engines */ 4210 /* stop engines */
4286 nv_stop_rx(dev); 4211 nv_stop_rxtx(dev);
4287 nv_stop_tx(dev);
4288 nv_txrx_reset(dev); 4212 nv_txrx_reset(dev);
4289 /* drain queues */ 4213 /* drain queues */
4290 nv_drain_rx(dev); 4214 nv_drain_rxtx(dev);
4291 nv_drain_tx(dev);
4292 /* delete queues */ 4215 /* delete queues */
4293 free_rings(dev); 4216 free_rings(dev);
4294 } 4217 }
@@ -4296,7 +4219,8 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
4296 /* set new values */ 4219 /* set new values */
4297 np->rx_ring_size = ring->rx_pending; 4220 np->rx_ring_size = ring->rx_pending;
4298 np->tx_ring_size = ring->tx_pending; 4221 np->tx_ring_size = ring->tx_pending;
4299 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 4222
4223 if (!nv_optimized(np)) {
4300 np->rx_ring.orig = (struct ring_desc*)rxtx_ring; 4224 np->rx_ring.orig = (struct ring_desc*)rxtx_ring;
4301 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; 4225 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
4302 } else { 4226 } else {
@@ -4328,8 +4252,7 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
4328 pci_push(base); 4252 pci_push(base);
4329 4253
4330 /* restart engines */ 4254 /* restart engines */
4331 nv_start_rx(dev); 4255 nv_start_rxtx(dev);
4332 nv_start_tx(dev);
4333 spin_unlock(&np->lock); 4256 spin_unlock(&np->lock);
4334 netif_tx_unlock_bh(dev); 4257 netif_tx_unlock_bh(dev);
4335 nv_enable_irq(dev); 4258 nv_enable_irq(dev);
@@ -4370,8 +4293,7 @@ static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam*
4370 netif_tx_lock_bh(dev); 4293 netif_tx_lock_bh(dev);
4371 spin_lock(&np->lock); 4294 spin_lock(&np->lock);
4372 /* stop engines */ 4295 /* stop engines */
4373 nv_stop_rx(dev); 4296 nv_stop_rxtx(dev);
4374 nv_stop_tx(dev);
4375 spin_unlock(&np->lock); 4297 spin_unlock(&np->lock);
4376 netif_tx_unlock_bh(dev); 4298 netif_tx_unlock_bh(dev);
4377 } 4299 }
@@ -4412,8 +4334,7 @@ static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam*
4412 } 4334 }
4413 4335
4414 if (netif_running(dev)) { 4336 if (netif_running(dev)) {
4415 nv_start_rx(dev); 4337 nv_start_rxtx(dev);
4416 nv_start_tx(dev);
4417 nv_enable_irq(dev); 4338 nv_enable_irq(dev);
4418 } 4339 }
4419 return 0; 4340 return 0;
@@ -4649,8 +4570,7 @@ static int nv_loopback_test(struct net_device *dev)
4649 pci_push(base); 4570 pci_push(base);
4650 4571
4651 /* restart rx engine */ 4572 /* restart rx engine */
4652 nv_start_rx(dev); 4573 nv_start_rxtx(dev);
4653 nv_start_tx(dev);
4654 4574
4655 /* setup packet for tx */ 4575 /* setup packet for tx */
4656 pkt_len = ETH_DATA_LEN; 4576 pkt_len = ETH_DATA_LEN;
@@ -4668,7 +4588,7 @@ static int nv_loopback_test(struct net_device *dev)
4668 for (i = 0; i < pkt_len; i++) 4588 for (i = 0; i < pkt_len; i++)
4669 pkt_data[i] = (u8)(i & 0xff); 4589 pkt_data[i] = (u8)(i & 0xff);
4670 4590
4671 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 4591 if (!nv_optimized(np)) {
4672 np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr); 4592 np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
4673 np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); 4593 np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
4674 } else { 4594 } else {
@@ -4682,7 +4602,7 @@ static int nv_loopback_test(struct net_device *dev)
4682 msleep(500); 4602 msleep(500);
4683 4603
4684 /* check for rx of the packet */ 4604 /* check for rx of the packet */
4685 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 4605 if (!nv_optimized(np)) {
4686 flags = le32_to_cpu(np->rx_ring.orig[0].flaglen); 4606 flags = le32_to_cpu(np->rx_ring.orig[0].flaglen);
4687 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver); 4607 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
4688 4608
@@ -4728,12 +4648,10 @@ static int nv_loopback_test(struct net_device *dev)
4728 dev_kfree_skb_any(tx_skb); 4648 dev_kfree_skb_any(tx_skb);
4729 out: 4649 out:
4730 /* stop engines */ 4650 /* stop engines */
4731 nv_stop_rx(dev); 4651 nv_stop_rxtx(dev);
4732 nv_stop_tx(dev);
4733 nv_txrx_reset(dev); 4652 nv_txrx_reset(dev);
4734 /* drain rx queue */ 4653 /* drain rx queue */
4735 nv_drain_rx(dev); 4654 nv_drain_rxtx(dev);
4736 nv_drain_tx(dev);
4737 4655
4738 if (netif_running(dev)) { 4656 if (netif_running(dev)) {
4739 writel(misc1_flags, base + NvRegMisc1); 4657 writel(misc1_flags, base + NvRegMisc1);
@@ -4771,12 +4689,10 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
4771 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 4689 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
4772 } 4690 }
4773 /* stop engines */ 4691 /* stop engines */
4774 nv_stop_rx(dev); 4692 nv_stop_rxtx(dev);
4775 nv_stop_tx(dev);
4776 nv_txrx_reset(dev); 4693 nv_txrx_reset(dev);
4777 /* drain rx queue */ 4694 /* drain rx queue */
4778 nv_drain_rx(dev); 4695 nv_drain_rxtx(dev);
4779 nv_drain_tx(dev);
4780 spin_unlock_irq(&np->lock); 4696 spin_unlock_irq(&np->lock);
4781 netif_tx_unlock_bh(dev); 4697 netif_tx_unlock_bh(dev);
4782 } 4698 }
@@ -4817,8 +4733,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
4817 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4733 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4818 pci_push(base); 4734 pci_push(base);
4819 /* restart rx engine */ 4735 /* restart rx engine */
4820 nv_start_rx(dev); 4736 nv_start_rxtx(dev);
4821 nv_start_tx(dev);
4822 netif_start_queue(dev); 4737 netif_start_queue(dev);
4823#ifdef CONFIG_FORCEDETH_NAPI 4738#ifdef CONFIG_FORCEDETH_NAPI
4824 napi_enable(&np->napi); 4739 napi_enable(&np->napi);
@@ -5047,8 +4962,7 @@ static int nv_open(struct net_device *dev)
5047 * to init hw */ 4962 * to init hw */
5048 np->linkspeed = 0; 4963 np->linkspeed = 0;
5049 ret = nv_update_linkspeed(dev); 4964 ret = nv_update_linkspeed(dev);
5050 nv_start_rx(dev); 4965 nv_start_rxtx(dev);
5051 nv_start_tx(dev);
5052 netif_start_queue(dev); 4966 netif_start_queue(dev);
5053#ifdef CONFIG_FORCEDETH_NAPI 4967#ifdef CONFIG_FORCEDETH_NAPI
5054 napi_enable(&np->napi); 4968 napi_enable(&np->napi);
@@ -5072,7 +4986,7 @@ static int nv_open(struct net_device *dev)
5072 4986
5073 return 0; 4987 return 0;
5074out_drain: 4988out_drain:
5075 drain_ring(dev); 4989 nv_drain_rxtx(dev);
5076 return ret; 4990 return ret;
5077} 4991}
5078 4992
@@ -5095,8 +5009,7 @@ static int nv_close(struct net_device *dev)
5095 5009
5096 netif_stop_queue(dev); 5010 netif_stop_queue(dev);
5097 spin_lock_irq(&np->lock); 5011 spin_lock_irq(&np->lock);
5098 nv_stop_tx(dev); 5012 nv_stop_rxtx(dev);
5099 nv_stop_rx(dev);
5100 nv_txrx_reset(dev); 5013 nv_txrx_reset(dev);
5101 5014
5102 /* disable interrupts on the nic or we will lock up */ 5015 /* disable interrupts on the nic or we will lock up */
@@ -5109,7 +5022,7 @@ static int nv_close(struct net_device *dev)
5109 5022
5110 nv_free_irq(dev); 5023 nv_free_irq(dev);
5111 5024
5112 drain_ring(dev); 5025 nv_drain_rxtx(dev);
5113 5026
5114 if (np->wolenabled) { 5027 if (np->wolenabled) {
5115 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); 5028 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
@@ -5269,7 +5182,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5269 np->rx_ring_size = RX_RING_DEFAULT; 5182 np->rx_ring_size = RX_RING_DEFAULT;
5270 np->tx_ring_size = TX_RING_DEFAULT; 5183 np->tx_ring_size = TX_RING_DEFAULT;
5271 5184
5272 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 5185 if (!nv_optimized(np)) {
5273 np->rx_ring.orig = pci_alloc_consistent(pci_dev, 5186 np->rx_ring.orig = pci_alloc_consistent(pci_dev,
5274 sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), 5187 sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
5275 &np->ring_addr); 5188 &np->ring_addr);
@@ -5291,7 +5204,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5291 5204
5292 dev->open = nv_open; 5205 dev->open = nv_open;
5293 dev->stop = nv_close; 5206 dev->stop = nv_close;
5294 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 5207
5208 if (!nv_optimized(np))
5295 dev->hard_start_xmit = nv_start_xmit; 5209 dev->hard_start_xmit = nv_start_xmit;
5296 else 5210 else
5297 dev->hard_start_xmit = nv_start_xmit_optimized; 5211 dev->hard_start_xmit = nv_start_xmit_optimized;