aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/forcedeth.c
diff options
context:
space:
mode:
authorAyaz Abdulla <aabdulla@nvidia.com>2007-01-21 18:10:37 -0500
committerJeff Garzik <jeff@garzik.org>2007-02-05 16:58:48 -0500
commit86b22b0dfbf462e6ed75e54fc83575dae01e3c69 (patch)
tree8a6efa296e3184acc0cdf8ce9bc72a9fac1e4913 /drivers/net/forcedeth.c
parent658f648ad1c2876e0ce5401e087d2d21d0262441 (diff)
forcedeth: optimized routines
This patch breaks up the routines into two versions, one for legacy descriptor versions (ver 1 and ver 2) and one for desc ver 3. This will make the new desc functions more leaner and further reductions will be made in next few patches. Signed-Off-By: Ayaz Abdulla <aabdulla@nvidia.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/forcedeth.c')
-rw-r--r--drivers/net/forcedeth.c634
1 files changed, 501 insertions, 133 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 0fc078625090..f28ae12d8569 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -1307,50 +1307,57 @@ static struct net_device_stats *nv_get_stats(struct net_device *dev)
1307static int nv_alloc_rx(struct net_device *dev) 1307static int nv_alloc_rx(struct net_device *dev)
1308{ 1308{
1309 struct fe_priv *np = netdev_priv(dev); 1309 struct fe_priv *np = netdev_priv(dev);
1310 union ring_type less_rx; 1310 struct ring_desc* less_rx;
1311 1311
1312 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1312 less_rx = np->get_rx.orig;
1313 less_rx.orig = np->get_rx.orig; 1313 if (less_rx-- == np->first_rx.orig)
1314 if (less_rx.orig-- == np->first_rx.orig) 1314 less_rx = np->last_rx.orig;
1315 less_rx.orig = np->last_rx.orig;
1316 } else {
1317 less_rx.ex = np->get_rx.ex;
1318 if (less_rx.ex-- == np->first_rx.ex)
1319 less_rx.ex = np->last_rx.ex;
1320 }
1321 1315
1322 while (1) { 1316 while (np->put_rx.orig != less_rx) {
1323 struct sk_buff *skb; 1317 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1324 1318 if (skb) {
1325 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1319 skb->dev = dev;
1326 if (np->put_rx.orig == less_rx.orig) 1320 np->put_rx_ctx->skb = skb;
1327 break; 1321 np->put_rx_ctx->dma = pci_map_single(np->pci_dev, skb->data,
1322 skb->end-skb->data, PCI_DMA_FROMDEVICE);
1323 np->put_rx_ctx->dma_len = skb->end-skb->data;
1324 np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma);
1325 wmb();
1326 np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
1327 if (np->put_rx.orig++ == np->last_rx.orig)
1328 np->put_rx.orig = np->first_rx.orig;
1329 if (np->put_rx_ctx++ == np->last_rx_ctx)
1330 np->put_rx_ctx = np->first_rx_ctx;
1328 } else { 1331 } else {
1329 if (np->put_rx.ex == less_rx.ex) 1332 return 1;
1330 break;
1331 } 1333 }
1334 }
1335 return 0;
1336}
1337
1338static int nv_alloc_rx_optimized(struct net_device *dev)
1339{
1340 struct fe_priv *np = netdev_priv(dev);
1341 struct ring_desc_ex* less_rx;
1342
1343 less_rx = np->get_rx.ex;
1344 if (less_rx-- == np->first_rx.ex)
1345 less_rx = np->last_rx.ex;
1332 1346
1333 skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); 1347 while (np->put_rx.ex != less_rx) {
1348 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1334 if (skb) { 1349 if (skb) {
1335 skb->dev = dev; 1350 skb->dev = dev;
1336 np->put_rx_ctx->skb = skb; 1351 np->put_rx_ctx->skb = skb;
1337 np->put_rx_ctx->dma = pci_map_single(np->pci_dev, skb->data, 1352 np->put_rx_ctx->dma = pci_map_single(np->pci_dev, skb->data,
1338 skb->end-skb->data, PCI_DMA_FROMDEVICE); 1353 skb->end-skb->data, PCI_DMA_FROMDEVICE);
1339 np->put_rx_ctx->dma_len = skb->end-skb->data; 1354 np->put_rx_ctx->dma_len = skb->end-skb->data;
1340 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1355 np->put_rx.ex->bufhigh = cpu_to_le64(np->put_rx_ctx->dma) >> 32;
1341 np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma); 1356 np->put_rx.ex->buflow = cpu_to_le64(np->put_rx_ctx->dma) & 0x0FFFFFFFF;
1342 wmb(); 1357 wmb();
1343 np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); 1358 np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
1344 if (np->put_rx.orig++ == np->last_rx.orig) 1359 if (np->put_rx.ex++ == np->last_rx.ex)
1345 np->put_rx.orig = np->first_rx.orig; 1360 np->put_rx.ex = np->first_rx.ex;
1346 } else {
1347 np->put_rx.ex->bufhigh = cpu_to_le64(np->put_rx_ctx->dma) >> 32;
1348 np->put_rx.ex->buflow = cpu_to_le64(np->put_rx_ctx->dma) & 0x0FFFFFFFF;
1349 wmb();
1350 np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
1351 if (np->put_rx.ex++ == np->last_rx.ex)
1352 np->put_rx.ex = np->first_rx.ex;
1353 }
1354 if (np->put_rx_ctx++ == np->last_rx_ctx) 1361 if (np->put_rx_ctx++ == np->last_rx_ctx)
1355 np->put_rx_ctx = np->first_rx_ctx; 1362 np->put_rx_ctx = np->first_rx_ctx;
1356 } else { 1363 } else {
@@ -1374,6 +1381,7 @@ static void nv_do_rx_refill(unsigned long data)
1374{ 1381{
1375 struct net_device *dev = (struct net_device *) data; 1382 struct net_device *dev = (struct net_device *) data;
1376 struct fe_priv *np = netdev_priv(dev); 1383 struct fe_priv *np = netdev_priv(dev);
1384 int retcode;
1377 1385
1378 if (!using_multi_irqs(dev)) { 1386 if (!using_multi_irqs(dev)) {
1379 if (np->msi_flags & NV_MSI_X_ENABLED) 1387 if (np->msi_flags & NV_MSI_X_ENABLED)
@@ -1383,7 +1391,11 @@ static void nv_do_rx_refill(unsigned long data)
1383 } else { 1391 } else {
1384 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1392 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1385 } 1393 }
1386 if (nv_alloc_rx(dev)) { 1394 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1395 retcode = nv_alloc_rx(dev);
1396 else
1397 retcode = nv_alloc_rx_optimized(dev);
1398 if (retcode) {
1387 spin_lock_irq(&np->lock); 1399 spin_lock_irq(&np->lock);
1388 if (!np->in_shutdown) 1400 if (!np->in_shutdown)
1389 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 1401 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
@@ -1456,9 +1468,14 @@ static void nv_init_tx(struct net_device *dev)
1456 1468
1457static int nv_init_ring(struct net_device *dev) 1469static int nv_init_ring(struct net_device *dev)
1458{ 1470{
1471 struct fe_priv *np = netdev_priv(dev);
1472
1459 nv_init_tx(dev); 1473 nv_init_tx(dev);
1460 nv_init_rx(dev); 1474 nv_init_rx(dev);
1461 return nv_alloc_rx(dev); 1475 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1476 return nv_alloc_rx(dev);
1477 else
1478 return nv_alloc_rx_optimized(dev);
1462} 1479}
1463 1480
1464static int nv_release_txskb(struct net_device *dev, struct nv_skb_map* tx_skb) 1481static int nv_release_txskb(struct net_device *dev, struct nv_skb_map* tx_skb)
@@ -1554,9 +1571,9 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1554 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 1571 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1555 u32 empty_slots; 1572 u32 empty_slots;
1556 u32 tx_flags_vlan = 0; 1573 u32 tx_flags_vlan = 0;
1557 union ring_type put_tx; 1574 struct ring_desc* put_tx;
1558 union ring_type start_tx; 1575 struct ring_desc* start_tx;
1559 union ring_type prev_tx; 1576 struct ring_desc* prev_tx;
1560 struct nv_skb_map* prev_tx_ctx; 1577 struct nv_skb_map* prev_tx_ctx;
1561 1578
1562 /* add fragments to entries count */ 1579 /* add fragments to entries count */
@@ -1573,10 +1590,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1573 return NETDEV_TX_BUSY; 1590 return NETDEV_TX_BUSY;
1574 } 1591 }
1575 1592
1576 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1593 start_tx = put_tx = np->put_tx.orig;
1577 start_tx.orig = put_tx.orig = np->put_tx.orig;
1578 else
1579 start_tx.ex = put_tx.ex = np->put_tx.ex;
1580 1594
1581 /* setup the header buffer */ 1595 /* setup the header buffer */
1582 do { 1596 do {
@@ -1586,24 +1600,13 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1586 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, 1600 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
1587 PCI_DMA_TODEVICE); 1601 PCI_DMA_TODEVICE);
1588 np->put_tx_ctx->dma_len = bcnt; 1602 np->put_tx_ctx->dma_len = bcnt;
1589 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1603 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
1590 put_tx.orig->buf = cpu_to_le32(np->put_tx_ctx->dma); 1604 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1591 put_tx.orig->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1592 } else {
1593 put_tx.ex->bufhigh = cpu_to_le64(np->put_tx_ctx->dma) >> 32;
1594 put_tx.ex->buflow = cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF;
1595 put_tx.ex->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1596 }
1597 tx_flags = np->tx_flags; 1605 tx_flags = np->tx_flags;
1598 offset += bcnt; 1606 offset += bcnt;
1599 size -= bcnt; 1607 size -= bcnt;
1600 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1608 if (put_tx++ == np->last_tx.orig)
1601 if (put_tx.orig++ == np->last_tx.orig) 1609 put_tx = np->first_tx.orig;
1602 put_tx.orig = np->first_tx.orig;
1603 } else {
1604 if (put_tx.ex++ == np->last_tx.ex)
1605 put_tx.ex = np->first_tx.ex;
1606 }
1607 if (np->put_tx_ctx++ == np->last_tx_ctx) 1610 if (np->put_tx_ctx++ == np->last_tx_ctx)
1608 np->put_tx_ctx = np->first_tx_ctx; 1611 np->put_tx_ctx = np->first_tx_ctx;
1609 } while (size); 1612 } while (size);
@@ -1622,33 +1625,19 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1622 PCI_DMA_TODEVICE); 1625 PCI_DMA_TODEVICE);
1623 np->put_tx_ctx->dma_len = bcnt; 1626 np->put_tx_ctx->dma_len = bcnt;
1624 1627
1625 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1628 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
1626 put_tx.orig->buf = cpu_to_le32(np->put_tx_ctx->dma); 1629 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1627 put_tx.orig->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1628 } else {
1629 put_tx.ex->bufhigh = cpu_to_le64(np->put_tx_ctx->dma) >> 32;
1630 put_tx.ex->buflow = cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF;
1631 put_tx.ex->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1632 }
1633 offset += bcnt; 1630 offset += bcnt;
1634 size -= bcnt; 1631 size -= bcnt;
1635 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1632 if (put_tx++ == np->last_tx.orig)
1636 if (put_tx.orig++ == np->last_tx.orig) 1633 put_tx = np->first_tx.orig;
1637 put_tx.orig = np->first_tx.orig;
1638 } else {
1639 if (put_tx.ex++ == np->last_tx.ex)
1640 put_tx.ex = np->first_tx.ex;
1641 }
1642 if (np->put_tx_ctx++ == np->last_tx_ctx) 1634 if (np->put_tx_ctx++ == np->last_tx_ctx)
1643 np->put_tx_ctx = np->first_tx_ctx; 1635 np->put_tx_ctx = np->first_tx_ctx;
1644 } while (size); 1636 } while (size);
1645 } 1637 }
1646 1638
1647 /* set last fragment flag */ 1639 /* set last fragment flag */
1648 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1640 prev_tx->flaglen |= cpu_to_le32(tx_flags_extra);
1649 prev_tx.orig->flaglen |= cpu_to_le32(tx_flags_extra);
1650 else
1651 prev_tx.ex->flaglen |= cpu_to_le32(tx_flags_extra);
1652 1641
1653 /* save skb in this slot's context area */ 1642 /* save skb in this slot's context area */
1654 prev_tx_ctx->skb = skb; 1643 prev_tx_ctx->skb = skb;
@@ -1667,14 +1656,8 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1667 spin_lock_irq(&np->lock); 1656 spin_lock_irq(&np->lock);
1668 1657
1669 /* set tx flags */ 1658 /* set tx flags */
1670 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1659 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
1671 start_tx.orig->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); 1660 np->put_tx.orig = put_tx;
1672 np->put_tx.orig = put_tx.orig;
1673 } else {
1674 start_tx.ex->txvlan = cpu_to_le32(tx_flags_vlan);
1675 start_tx.ex->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
1676 np->put_tx.ex = put_tx.ex;
1677 }
1678 1661
1679 spin_unlock_irq(&np->lock); 1662 spin_unlock_irq(&np->lock);
1680 1663
@@ -1696,6 +1679,130 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1696 return NETDEV_TX_OK; 1679 return NETDEV_TX_OK;
1697} 1680}
1698 1681
1682static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev)
1683{
1684 struct fe_priv *np = netdev_priv(dev);
1685 u32 tx_flags = 0;
1686 u32 tx_flags_extra = NV_TX2_LASTPACKET;
1687 unsigned int fragments = skb_shinfo(skb)->nr_frags;
1688 unsigned int i;
1689 u32 offset = 0;
1690 u32 bcnt;
1691 u32 size = skb->len-skb->data_len;
1692 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1693 u32 empty_slots;
1694 u32 tx_flags_vlan = 0;
1695 struct ring_desc_ex* put_tx;
1696 struct ring_desc_ex* start_tx;
1697 struct ring_desc_ex* prev_tx;
1698 struct nv_skb_map* prev_tx_ctx;
1699
1700 /* add fragments to entries count */
1701 for (i = 0; i < fragments; i++) {
1702 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
1703 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1704 }
1705
1706 empty_slots = nv_get_empty_tx_slots(np);
1707 if ((empty_slots - np->tx_limit_stop) <= entries) {
1708 spin_lock_irq(&np->lock);
1709 netif_stop_queue(dev);
1710 spin_unlock_irq(&np->lock);
1711 return NETDEV_TX_BUSY;
1712 }
1713
1714 start_tx = put_tx = np->put_tx.ex;
1715
1716 /* setup the header buffer */
1717 do {
1718 prev_tx = put_tx;
1719 prev_tx_ctx = np->put_tx_ctx;
1720 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
1721 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
1722 PCI_DMA_TODEVICE);
1723 np->put_tx_ctx->dma_len = bcnt;
1724 put_tx->bufhigh = cpu_to_le64(np->put_tx_ctx->dma) >> 32;
1725 put_tx->buflow = cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF;
1726 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1727 tx_flags = np->tx_flags;
1728 offset += bcnt;
1729 size -= bcnt;
1730 if (put_tx++ == np->last_tx.ex)
1731 put_tx = np->first_tx.ex;
1732 if (np->put_tx_ctx++ == np->last_tx_ctx)
1733 np->put_tx_ctx = np->first_tx_ctx;
1734 } while (size);
1735
1736 /* setup the fragments */
1737 for (i = 0; i < fragments; i++) {
1738 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1739 u32 size = frag->size;
1740 offset = 0;
1741
1742 do {
1743 prev_tx = put_tx;
1744 prev_tx_ctx = np->put_tx_ctx;
1745 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
1746 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
1747 PCI_DMA_TODEVICE);
1748 np->put_tx_ctx->dma_len = bcnt;
1749
1750 put_tx->bufhigh = cpu_to_le64(np->put_tx_ctx->dma) >> 32;
1751 put_tx->buflow = cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF;
1752 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1753 offset += bcnt;
1754 size -= bcnt;
1755 if (put_tx++ == np->last_tx.ex)
1756 put_tx = np->first_tx.ex;
1757 if (np->put_tx_ctx++ == np->last_tx_ctx)
1758 np->put_tx_ctx = np->first_tx_ctx;
1759 } while (size);
1760 }
1761
1762 /* set last fragment flag */
1763 prev_tx->flaglen |= cpu_to_le32(tx_flags_extra);
1764
1765 /* save skb in this slot's context area */
1766 prev_tx_ctx->skb = skb;
1767
1768 if (skb_is_gso(skb))
1769 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
1770 else
1771 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
1772 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
1773
1774 /* vlan tag */
1775 if (np->vlangrp && vlan_tx_tag_present(skb)) {
1776 tx_flags_vlan = NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb);
1777 }
1778
1779 spin_lock_irq(&np->lock);
1780
1781 /* set tx flags */
1782 start_tx->txvlan = cpu_to_le32(tx_flags_vlan);
1783 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
1784 np->put_tx.ex = put_tx;
1785
1786 spin_unlock_irq(&np->lock);
1787
1788 dprintk(KERN_DEBUG "%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n",
1789 dev->name, entries, tx_flags_extra);
1790 {
1791 int j;
1792 for (j=0; j<64; j++) {
1793 if ((j%16) == 0)
1794 dprintk("\n%03x:", j);
1795 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
1796 }
1797 dprintk("\n");
1798 }
1799
1800 dev->trans_start = jiffies;
1801 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
1802 pci_push(get_hwbase(dev));
1803 return NETDEV_TX_OK;
1804}
1805
1699/* 1806/*
1700 * nv_tx_done: check for completed packets, release the skbs. 1807 * nv_tx_done: check for completed packets, release the skbs.
1701 * 1808 *
@@ -1707,16 +1814,8 @@ static void nv_tx_done(struct net_device *dev)
1707 u32 flags; 1814 u32 flags;
1708 struct sk_buff *skb; 1815 struct sk_buff *skb;
1709 1816
1710 while (1) { 1817 while (np->get_tx.orig != np->put_tx.orig) {
1711 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1818 flags = le32_to_cpu(np->get_tx.orig->flaglen);
1712 if (np->get_tx.orig == np->put_tx.orig)
1713 break;
1714 flags = le32_to_cpu(np->get_tx.orig->flaglen);
1715 } else {
1716 if (np->get_tx.ex == np->put_tx.ex)
1717 break;
1718 flags = le32_to_cpu(np->get_tx.ex->flaglen);
1719 }
1720 1819
1721 dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n", 1820 dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n",
1722 dev->name, flags); 1821 dev->name, flags);
@@ -1754,13 +1853,45 @@ static void nv_tx_done(struct net_device *dev)
1754 } 1853 }
1755 } 1854 }
1756 nv_release_txskb(dev, np->get_tx_ctx); 1855 nv_release_txskb(dev, np->get_tx_ctx);
1757 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1856 if (np->get_tx.orig++ == np->last_tx.orig)
1758 if (np->get_tx.orig++ == np->last_tx.orig) 1857 np->get_tx.orig = np->first_tx.orig;
1759 np->get_tx.orig = np->first_tx.orig; 1858 if (np->get_tx_ctx++ == np->last_tx_ctx)
1760 } else { 1859 np->get_tx_ctx = np->first_tx_ctx;
1761 if (np->get_tx.ex++ == np->last_tx.ex) 1860 }
1762 np->get_tx.ex = np->first_tx.ex; 1861 if (nv_get_empty_tx_slots(np) > np->tx_limit_start)
1862 netif_wake_queue(dev);
1863}
1864
1865static void nv_tx_done_optimized(struct net_device *dev)
1866{
1867 struct fe_priv *np = netdev_priv(dev);
1868 u32 flags;
1869 struct sk_buff *skb;
1870
1871 while (np->get_tx.ex == np->put_tx.ex) {
1872 flags = le32_to_cpu(np->get_tx.ex->flaglen);
1873
1874 dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n",
1875 dev->name, flags);
1876 if (flags & NV_TX_VALID)
1877 break;
1878 if (flags & NV_TX2_LASTPACKET) {
1879 skb = np->get_tx_ctx->skb;
1880 if (flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION|
1881 NV_TX2_UNDERFLOW|NV_TX2_ERROR)) {
1882 if (flags & NV_TX2_UNDERFLOW)
1883 np->stats.tx_fifo_errors++;
1884 if (flags & NV_TX2_CARRIERLOST)
1885 np->stats.tx_carrier_errors++;
1886 np->stats.tx_errors++;
1887 } else {
1888 np->stats.tx_packets++;
1889 np->stats.tx_bytes += skb->len;
1890 }
1763 } 1891 }
1892 nv_release_txskb(dev, np->get_tx_ctx);
1893 if (np->get_tx.ex++ == np->last_tx.ex)
1894 np->get_tx.ex = np->first_tx.ex;
1764 if (np->get_tx_ctx++ == np->last_tx_ctx) 1895 if (np->get_tx_ctx++ == np->last_tx_ctx)
1765 np->get_tx_ctx = np->first_tx_ctx; 1896 np->get_tx_ctx = np->first_tx_ctx;
1766 } 1897 }
@@ -1837,7 +1968,10 @@ static void nv_tx_timeout(struct net_device *dev)
1837 nv_stop_tx(dev); 1968 nv_stop_tx(dev);
1838 1969
1839 /* 2) check that the packets were not sent already: */ 1970 /* 2) check that the packets were not sent already: */
1840 nv_tx_done(dev); 1971 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1972 nv_tx_done(dev);
1973 else
1974 nv_tx_done_optimized(dev);
1841 1975
1842 /* 3) if there are dead entries: clear everything */ 1976 /* 3) if there are dead entries: clear everything */
1843 if (np->get_tx_ctx != np->put_tx_ctx) { 1977 if (np->get_tx_ctx != np->put_tx_ctx) {
@@ -1913,22 +2047,14 @@ static int nv_rx_process(struct net_device *dev, int limit)
1913 u32 vlanflags = 0; 2047 u32 vlanflags = 0;
1914 int count; 2048 int count;
1915 2049
1916 for (count = 0; count < limit; ++count) { 2050 for (count = 0; count < limit; ++count) {
1917 struct sk_buff *skb; 2051 struct sk_buff *skb;
1918 int len; 2052 int len;
1919 2053
1920 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 2054 if (np->get_rx.orig == np->put_rx.orig)
1921 if (np->get_rx.orig == np->put_rx.orig) 2055 break; /* we scanned the whole ring - do not continue */
1922 break; /* we scanned the whole ring - do not continue */ 2056 flags = le32_to_cpu(np->get_rx.orig->flaglen);
1923 flags = le32_to_cpu(np->get_rx.orig->flaglen); 2057 len = nv_descr_getlength(np->get_rx.orig, np->desc_ver);
1924 len = nv_descr_getlength(np->get_rx.orig, np->desc_ver);
1925 } else {
1926 if (np->get_rx.ex == np->put_rx.ex)
1927 break; /* we scanned the whole ring - do not continue */
1928 flags = le32_to_cpu(np->get_rx.ex->flaglen);
1929 len = nv_descr_getlength_ex(np->get_rx.ex, np->desc_ver);
1930 vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
1931 }
1932 2058
1933 dprintk(KERN_DEBUG "%s: nv_rx_process: flags 0x%x.\n", 2059 dprintk(KERN_DEBUG "%s: nv_rx_process: flags 0x%x.\n",
1934 dev->name, flags); 2060 dev->name, flags);
@@ -2076,13 +2202,133 @@ static int nv_rx_process(struct net_device *dev, int limit)
2076 np->stats.rx_packets++; 2202 np->stats.rx_packets++;
2077 np->stats.rx_bytes += len; 2203 np->stats.rx_bytes += len;
2078next_pkt: 2204next_pkt:
2079 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 2205 if (np->get_rx.orig++ == np->last_rx.orig)
2080 if (np->get_rx.orig++ == np->last_rx.orig) 2206 np->get_rx.orig = np->first_rx.orig;
2081 np->get_rx.orig = np->first_rx.orig; 2207 if (np->get_rx_ctx++ == np->last_rx_ctx)
2082 } else { 2208 np->get_rx_ctx = np->first_rx_ctx;
2083 if (np->get_rx.ex++ == np->last_rx.ex) 2209 }
2084 np->get_rx.ex = np->first_rx.ex; 2210
2211 return count;
2212}
2213
2214static int nv_rx_process_optimized(struct net_device *dev, int limit)
2215{
2216 struct fe_priv *np = netdev_priv(dev);
2217 u32 flags;
2218 u32 vlanflags = 0;
2219 int count;
2220
2221 for (count = 0; count < limit; ++count) {
2222 struct sk_buff *skb;
2223 int len;
2224
2225 if (np->get_rx.ex == np->put_rx.ex)
2226 break; /* we scanned the whole ring - do not continue */
2227 flags = le32_to_cpu(np->get_rx.ex->flaglen);
2228 len = nv_descr_getlength_ex(np->get_rx.ex, np->desc_ver);
2229 vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
2230
2231 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: flags 0x%x.\n",
2232 dev->name, flags);
2233
2234 if (flags & NV_RX_AVAIL)
2235 break; /* still owned by hardware, */
2236
2237 /*
2238 * the packet is for us - immediately tear down the pci mapping.
2239 * TODO: check if a prefetch of the first cacheline improves
2240 * the performance.
2241 */
2242 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
2243 np->get_rx_ctx->dma_len,
2244 PCI_DMA_FROMDEVICE);
2245 skb = np->get_rx_ctx->skb;
2246 np->get_rx_ctx->skb = NULL;
2247
2248 {
2249 int j;
2250 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
2251 for (j=0; j<64; j++) {
2252 if ((j%16) == 0)
2253 dprintk("\n%03x:", j);
2254 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2255 }
2256 dprintk("\n");
2085 } 2257 }
2258 /* look at what we actually got: */
2259 if (!(flags & NV_RX2_DESCRIPTORVALID)) {
2260 dev_kfree_skb(skb);
2261 goto next_pkt;
2262 }
2263
2264 if (flags & NV_RX2_ERROR) {
2265 if (flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) {
2266 np->stats.rx_errors++;
2267 dev_kfree_skb(skb);
2268 goto next_pkt;
2269 }
2270 if (flags & NV_RX2_CRCERR) {
2271 np->stats.rx_crc_errors++;
2272 np->stats.rx_errors++;
2273 dev_kfree_skb(skb);
2274 goto next_pkt;
2275 }
2276 if (flags & NV_RX2_OVERFLOW) {
2277 np->stats.rx_over_errors++;
2278 np->stats.rx_errors++;
2279 dev_kfree_skb(skb);
2280 goto next_pkt;
2281 }
2282 if (flags & NV_RX2_ERROR4) {
2283 len = nv_getlen(dev, skb->data, len);
2284 if (len < 0) {
2285 np->stats.rx_errors++;
2286 dev_kfree_skb(skb);
2287 goto next_pkt;
2288 }
2289 }
2290 /* framing errors are soft errors */
2291 if (flags & NV_RX2_FRAMINGERR) {
2292 if (flags & NV_RX2_SUBSTRACT1) {
2293 len--;
2294 }
2295 }
2296 }
2297 if (np->rx_csum) {
2298 flags &= NV_RX2_CHECKSUMMASK;
2299 if (flags == NV_RX2_CHECKSUMOK1 ||
2300 flags == NV_RX2_CHECKSUMOK2 ||
2301 flags == NV_RX2_CHECKSUMOK3) {
2302 dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name);
2303 skb->ip_summed = CHECKSUM_UNNECESSARY;
2304 } else {
2305 dprintk(KERN_DEBUG "%s: hwchecksum miss!.\n", dev->name);
2306 }
2307 }
2308 /* got a valid packet - forward it to the network core */
2309 skb_put(skb, len);
2310 skb->protocol = eth_type_trans(skb, dev);
2311 dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n",
2312 dev->name, len, skb->protocol);
2313#ifdef CONFIG_FORCEDETH_NAPI
2314 if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT))
2315 vlan_hwaccel_receive_skb(skb, np->vlangrp,
2316 vlanflags & NV_RX3_VLAN_TAG_MASK);
2317 else
2318 netif_receive_skb(skb);
2319#else
2320 if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT))
2321 vlan_hwaccel_rx(skb, np->vlangrp,
2322 vlanflags & NV_RX3_VLAN_TAG_MASK);
2323 else
2324 netif_rx(skb);
2325#endif
2326 dev->last_rx = jiffies;
2327 np->stats.rx_packets++;
2328 np->stats.rx_bytes += len;
2329next_pkt:
2330 if (np->get_rx.ex++ == np->last_rx.ex)
2331 np->get_rx.ex = np->first_rx.ex;
2086 if (np->get_rx_ctx++ == np->last_rx_ctx) 2332 if (np->get_rx_ctx++ == np->last_rx_ctx)
2087 np->get_rx_ctx = np->first_rx_ctx; 2333 np->get_rx_ctx = np->first_rx_ctx;
2088 } 2334 }
@@ -2655,6 +2901,117 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
2655 return IRQ_RETVAL(i); 2901 return IRQ_RETVAL(i);
2656} 2902}
2657 2903
2904static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
2905{
2906 struct net_device *dev = (struct net_device *) data;
2907 struct fe_priv *np = netdev_priv(dev);
2908 u8 __iomem *base = get_hwbase(dev);
2909 u32 events;
2910 int i;
2911
2912 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name);
2913
2914 for (i=0; ; i++) {
2915 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
2916 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
2917 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
2918 } else {
2919 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
2920 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
2921 }
2922 pci_push(base);
2923 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
2924 if (!(events & np->irqmask))
2925 break;
2926
2927 spin_lock(&np->lock);
2928 nv_tx_done_optimized(dev);
2929 spin_unlock(&np->lock);
2930
2931 if (events & NVREG_IRQ_LINK) {
2932 spin_lock(&np->lock);
2933 nv_link_irq(dev);
2934 spin_unlock(&np->lock);
2935 }
2936 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
2937 spin_lock(&np->lock);
2938 nv_linkchange(dev);
2939 spin_unlock(&np->lock);
2940 np->link_timeout = jiffies + LINK_TIMEOUT;
2941 }
2942 if (events & (NVREG_IRQ_TX_ERR)) {
2943 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
2944 dev->name, events);
2945 }
2946 if (events & (NVREG_IRQ_UNKNOWN)) {
2947 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
2948 dev->name, events);
2949 }
2950 if (unlikely(events & NVREG_IRQ_RECOVER_ERROR)) {
2951 spin_lock(&np->lock);
2952 /* disable interrupts on the nic */
2953 if (!(np->msi_flags & NV_MSI_X_ENABLED))
2954 writel(0, base + NvRegIrqMask);
2955 else
2956 writel(np->irqmask, base + NvRegIrqMask);
2957 pci_push(base);
2958
2959 if (!np->in_shutdown) {
2960 np->nic_poll_irq = np->irqmask;
2961 np->recover_error = 1;
2962 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
2963 }
2964 spin_unlock(&np->lock);
2965 break;
2966 }
2967
2968#ifdef CONFIG_FORCEDETH_NAPI
2969 if (events & NVREG_IRQ_RX_ALL) {
2970 netif_rx_schedule(dev);
2971
2972 /* Disable furthur receive irq's */
2973 spin_lock(&np->lock);
2974 np->irqmask &= ~NVREG_IRQ_RX_ALL;
2975
2976 if (np->msi_flags & NV_MSI_X_ENABLED)
2977 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
2978 else
2979 writel(np->irqmask, base + NvRegIrqMask);
2980 spin_unlock(&np->lock);
2981 }
2982#else
2983 nv_rx_process_optimized(dev, dev->weight);
2984 if (nv_alloc_rx_optimized(dev)) {
2985 spin_lock(&np->lock);
2986 if (!np->in_shutdown)
2987 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2988 spin_unlock(&np->lock);
2989 }
2990#endif
2991 if (i > max_interrupt_work) {
2992 spin_lock(&np->lock);
2993 /* disable interrupts on the nic */
2994 if (!(np->msi_flags & NV_MSI_X_ENABLED))
2995 writel(0, base + NvRegIrqMask);
2996 else
2997 writel(np->irqmask, base + NvRegIrqMask);
2998 pci_push(base);
2999
3000 if (!np->in_shutdown) {
3001 np->nic_poll_irq = np->irqmask;
3002 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3003 }
3004 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
3005 spin_unlock(&np->lock);
3006 break;
3007 }
3008
3009 }
3010 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name);
3011
3012 return IRQ_RETVAL(i);
3013}
3014
2658static irqreturn_t nv_nic_irq_tx(int foo, void *data) 3015static irqreturn_t nv_nic_irq_tx(int foo, void *data)
2659{ 3016{
2660 struct net_device *dev = (struct net_device *) data; 3017 struct net_device *dev = (struct net_device *) data;
@@ -2675,7 +3032,7 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data)
2675 break; 3032 break;
2676 3033
2677 spin_lock_irqsave(&np->lock, flags); 3034 spin_lock_irqsave(&np->lock, flags);
2678 nv_tx_done(dev); 3035 nv_tx_done_optimized(dev);
2679 spin_unlock_irqrestore(&np->lock, flags); 3036 spin_unlock_irqrestore(&np->lock, flags);
2680 3037
2681 if (events & (NVREG_IRQ_TX_ERR)) { 3038 if (events & (NVREG_IRQ_TX_ERR)) {
@@ -2711,7 +3068,10 @@ static int nv_napi_poll(struct net_device *dev, int *budget)
2711 u8 __iomem *base = get_hwbase(dev); 3068 u8 __iomem *base = get_hwbase(dev);
2712 unsigned long flags; 3069 unsigned long flags;
2713 3070
2714 pkts = nv_rx_process(dev, limit); 3071 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
3072 pkts = nv_rx_process(dev, limit);
3073 else
3074 pkts = nv_rx_process_optimized(dev, limit);
2715 3075
2716 if (nv_alloc_rx(dev)) { 3076 if (nv_alloc_rx(dev)) {
2717 spin_lock_irqsave(&np->lock, flags); 3077 spin_lock_irqsave(&np->lock, flags);
@@ -2782,8 +3142,8 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data)
2782 if (!(events & np->irqmask)) 3142 if (!(events & np->irqmask))
2783 break; 3143 break;
2784 3144
2785 nv_rx_process(dev, dev->weight); 3145 nv_rx_process_optimized(dev, dev->weight);
2786 if (nv_alloc_rx(dev)) { 3146 if (nv_alloc_rx_optimized(dev)) {
2787 spin_lock_irqsave(&np->lock, flags); 3147 spin_lock_irqsave(&np->lock, flags);
2788 if (!np->in_shutdown) 3148 if (!np->in_shutdown)
2789 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3149 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
@@ -2942,6 +3302,16 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
2942 u8 __iomem *base = get_hwbase(dev); 3302 u8 __iomem *base = get_hwbase(dev);
2943 int ret = 1; 3303 int ret = 1;
2944 int i; 3304 int i;
3305 irqreturn_t (*handler)(int foo, void *data);
3306
3307 if (intr_test) {
3308 handler = nv_nic_irq_test;
3309 } else {
3310 if (np->desc_ver == DESC_VER_3)
3311 handler = nv_nic_irq_optimized;
3312 else
3313 handler = nv_nic_irq;
3314 }
2945 3315
2946 if (np->msi_flags & NV_MSI_X_CAPABLE) { 3316 if (np->msi_flags & NV_MSI_X_CAPABLE) {
2947 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { 3317 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
@@ -2979,10 +3349,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
2979 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); 3349 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
2980 } else { 3350 } else {
2981 /* Request irq for all interrupts */ 3351 /* Request irq for all interrupts */
2982 if ((!intr_test && 3352 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) {
2983 request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, IRQF_SHARED, dev->name, dev) != 0) ||
2984 (intr_test &&
2985 request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq_test, IRQF_SHARED, dev->name, dev) != 0)) {
2986 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); 3353 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
2987 pci_disable_msix(np->pci_dev); 3354 pci_disable_msix(np->pci_dev);
2988 np->msi_flags &= ~NV_MSI_X_ENABLED; 3355 np->msi_flags &= ~NV_MSI_X_ENABLED;
@@ -2998,8 +3365,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
2998 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { 3365 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
2999 if ((ret = pci_enable_msi(np->pci_dev)) == 0) { 3366 if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
3000 np->msi_flags |= NV_MSI_ENABLED; 3367 np->msi_flags |= NV_MSI_ENABLED;
3001 if ((!intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq, IRQF_SHARED, dev->name, dev) != 0) || 3368 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) {
3002 (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, IRQF_SHARED, dev->name, dev) != 0)) {
3003 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); 3369 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
3004 pci_disable_msi(np->pci_dev); 3370 pci_disable_msi(np->pci_dev);
3005 np->msi_flags &= ~NV_MSI_ENABLED; 3371 np->msi_flags &= ~NV_MSI_ENABLED;
@@ -3014,8 +3380,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
3014 } 3380 }
3015 } 3381 }
3016 if (ret != 0) { 3382 if (ret != 0) {
3017 if ((!intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq, IRQF_SHARED, dev->name, dev) != 0) || 3383 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0)
3018 (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, IRQF_SHARED, dev->name, dev) != 0))
3019 goto out_err; 3384 goto out_err;
3020 3385
3021 } 3386 }
@@ -4629,7 +4994,10 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4629 4994
4630 dev->open = nv_open; 4995 dev->open = nv_open;
4631 dev->stop = nv_close; 4996 dev->stop = nv_close;
4632 dev->hard_start_xmit = nv_start_xmit; 4997 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
4998 dev->hard_start_xmit = nv_start_xmit;
4999 else
5000 dev->hard_start_xmit = nv_start_xmit_optimized;
4633 dev->get_stats = nv_get_stats; 5001 dev->get_stats = nv_get_stats;
4634 dev->change_mtu = nv_change_mtu; 5002 dev->change_mtu = nv_change_mtu;
4635 dev->set_mac_address = nv_set_mac_address; 5003 dev->set_mac_address = nv_set_mac_address;