aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAyaz Abdulla <aabdulla@nvidia.com>2006-02-04 13:13:26 -0500
committerJeff Garzik <jgarzik@pobox.com>2006-02-20 05:59:18 -0500
commit0832b25a75d128e4f9724156380ba071c4f3f20d (patch)
treef24926c80383da38da6071dc97263c4089bc3f78 /drivers
parentee407b02f3f1992bc746876c26f8175c8783562b (diff)
[PATCH] forcedeth: Add support for 64bit rings
This forcedeth patch adds high dma support for tx/rx rings. Signed-off-By: Ayaz Abdulla <aabdulla@nvidia.com> Signed-off-by: Jeff Garzik <jgarzik@pobox.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/forcedeth.c58
1 files changed, 42 insertions, 16 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 0fbe342c2ac9..870613bf3fd6 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -103,6 +103,7 @@
103 * 0.48: 24 Dec 2005: Disable TSO, bugfix for pci_map_single 103 * 0.48: 24 Dec 2005: Disable TSO, bugfix for pci_map_single
104 * 0.49: 10 Dec 2005: Fix tso for large buffers. 104 * 0.49: 10 Dec 2005: Fix tso for large buffers.
105 * 0.50: 20 Jan 2006: Add 8021pq tagging support. 105 * 0.50: 20 Jan 2006: Add 8021pq tagging support.
106 * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings.
106 * 107 *
107 * Known bugs: 108 * Known bugs:
108 * We suspect that on some hardware no TX done interrupts are generated. 109 * We suspect that on some hardware no TX done interrupts are generated.
@@ -114,7 +115,7 @@
114 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few 115 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
115 * superfluous timer interrupts from the nic. 116 * superfluous timer interrupts from the nic.
116 */ 117 */
117#define FORCEDETH_VERSION "0.50" 118#define FORCEDETH_VERSION "0.51"
118#define DRV_NAME "forcedeth" 119#define DRV_NAME "forcedeth"
119 120
120#include <linux/module.h> 121#include <linux/module.h>
@@ -258,6 +259,8 @@ enum {
258#define NVREG_TXRXCTL_DESC_3 0x02200 259#define NVREG_TXRXCTL_DESC_3 0x02200
259#define NVREG_TXRXCTL_VLANSTRIP 0x00040 260#define NVREG_TXRXCTL_VLANSTRIP 0x00040
260#define NVREG_TXRXCTL_VLANINS 0x00080 261#define NVREG_TXRXCTL_VLANINS 0x00080
262 NvRegTxRingPhysAddrHigh = 0x148,
263 NvRegRxRingPhysAddrHigh = 0x14C,
261 NvRegMIIStatus = 0x180, 264 NvRegMIIStatus = 0x180,
262#define NVREG_MIISTAT_ERROR 0x0001 265#define NVREG_MIISTAT_ERROR 0x0001
263#define NVREG_MIISTAT_LINKCHANGE 0x0008 266#define NVREG_MIISTAT_LINKCHANGE 0x0008
@@ -627,6 +630,33 @@ static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
627 return 0; 630 return 0;
628} 631}
629 632
633#define NV_SETUP_RX_RING 0x01
634#define NV_SETUP_TX_RING 0x02
635
636static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
637{
638 struct fe_priv *np = get_nvpriv(dev);
639 u8 __iomem *base = get_hwbase(dev);
640
641 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
642 if (rxtx_flags & NV_SETUP_RX_RING) {
643 writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr);
644 }
645 if (rxtx_flags & NV_SETUP_TX_RING) {
646 writel((u32) cpu_to_le64(np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
647 }
648 } else {
649 if (rxtx_flags & NV_SETUP_RX_RING) {
650 writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr);
651 writel((u32) (cpu_to_le64(np->ring_addr) >> 32), base + NvRegRxRingPhysAddrHigh);
652 }
653 if (rxtx_flags & NV_SETUP_TX_RING) {
654 writel((u32) cpu_to_le64(np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
655 writel((u32) (cpu_to_le64(np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)) >> 32), base + NvRegTxRingPhysAddrHigh);
656 }
657 }
658}
659
630#define MII_READ (-1) 660#define MII_READ (-1)
631/* mii_rw: read/write a register on the PHY. 661/* mii_rw: read/write a register on the PHY.
632 * 662 *
@@ -1295,10 +1325,7 @@ static void nv_tx_timeout(struct net_device *dev)
1295 printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name); 1325 printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name);
1296 nv_drain_tx(dev); 1326 nv_drain_tx(dev);
1297 np->next_tx = np->nic_tx = 0; 1327 np->next_tx = np->nic_tx = 0;
1298 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1328 setup_hw_rings(dev, NV_SETUP_TX_RING);
1299 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
1300 else
1301 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
1302 netif_wake_queue(dev); 1329 netif_wake_queue(dev);
1303 } 1330 }
1304 1331
@@ -1573,11 +1600,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
1573 } 1600 }
1574 /* reinit nic view of the rx queue */ 1601 /* reinit nic view of the rx queue */
1575 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 1602 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
1576 writel((u32) np->ring_addr, base + NvRegRxRingPhysAddr); 1603 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
1577 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1578 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
1579 else
1580 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
1581 writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT), 1604 writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT),
1582 base + NvRegRingSizes); 1605 base + NvRegRingSizes);
1583 pci_push(base); 1606 pci_push(base);
@@ -2310,11 +2333,7 @@ static int nv_open(struct net_device *dev)
2310 nv_copy_mac_to_hw(dev); 2333 nv_copy_mac_to_hw(dev);
2311 2334
2312 /* 4) give hw rings */ 2335 /* 4) give hw rings */
2313 writel((u32) np->ring_addr, base + NvRegRxRingPhysAddr); 2336 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
2314 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
2315 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
2316 else
2317 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
2318 writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT), 2337 writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT),
2319 base + NvRegRingSizes); 2338 base + NvRegRingSizes);
2320 2339
@@ -2529,7 +2548,14 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
2529 printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n", 2548 printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n",
2530 pci_name(pci_dev)); 2549 pci_name(pci_dev));
2531 } else { 2550 } else {
2532 dev->features |= NETIF_F_HIGHDMA; 2551 if (pci_set_consistent_dma_mask(pci_dev, 0x0000007fffffffffULL)) {
2552 printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed for device %s.\n",
2553 pci_name(pci_dev));
2554 goto out_relreg;
2555 } else {
2556 dev->features |= NETIF_F_HIGHDMA;
2557 printk(KERN_INFO "forcedeth: using HIGHDMA\n");
2558 }
2533 } 2559 }
2534 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; 2560 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
2535 } else if (id->driver_data & DEV_HAS_LARGEDESC) { 2561 } else if (id->driver_data & DEV_HAS_LARGEDESC) {