diff options
author | Manfred Spraul <manfred@colorfullife.com> | 2005-07-31 12:32:26 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@pobox.com> | 2005-07-31 12:59:56 -0400 |
commit | ee73362cdd7d9b8166424f5f9e3176c629ac5cb2 (patch) | |
tree | de286b3f681482846cbbc412b3251a7201cf6b36 /drivers/net/forcedeth.c | |
parent | c2dba06dae7d6c4d15b83ea12d8c601cffd0aee9 (diff) |
[PATCH] forcedeth: 64-bit DMA support
This is a multi-part message in MIME format.
Signed-off-by: Jeff Garzik <jgarzik@pobox.com>
Diffstat (limited to 'drivers/net/forcedeth.c')
-rw-r--r-- | drivers/net/forcedeth.c | 210 |
1 files changed, 161 insertions, 49 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 746ad0178f8..4d38acbac4e 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -89,6 +89,7 @@ | |||
89 | * 0.37: 10 Jul 2005: Additional ethtool support, cleanup of pci id list | 89 | * 0.37: 10 Jul 2005: Additional ethtool support, cleanup of pci id list |
90 | * 0.38: 16 Jul 2005: tx irq rewrite: Use global flags instead of | 90 | * 0.38: 16 Jul 2005: tx irq rewrite: Use global flags instead of |
91 | * per-packet flags. | 91 | * per-packet flags. |
92 | * 0.39: 18 Jul 2005: Add 64bit descriptor support. | ||
92 | * | 93 | * |
93 | * Known bugs: | 94 | * Known bugs: |
94 | * We suspect that on some hardware no TX done interrupts are generated. | 95 | * We suspect that on some hardware no TX done interrupts are generated. |
@@ -100,7 +101,7 @@ | |||
100 | * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few | 101 | * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few |
101 | * superfluous timer interrupts from the nic. | 102 | * superfluous timer interrupts from the nic. |
102 | */ | 103 | */ |
103 | #define FORCEDETH_VERSION "0.38" | 104 | #define FORCEDETH_VERSION "0.39" |
104 | #define DRV_NAME "forcedeth" | 105 | #define DRV_NAME "forcedeth" |
105 | 106 | ||
106 | #include <linux/module.h> | 107 | #include <linux/module.h> |
@@ -138,6 +139,7 @@ | |||
138 | #define DEV_NEED_TIMERIRQ 0x0001 /* set the timer irq flag in the irq mask */ | 139 | #define DEV_NEED_TIMERIRQ 0x0001 /* set the timer irq flag in the irq mask */ |
139 | #define DEV_NEED_LINKTIMER 0x0002 /* poll link settings. Relies on the timer irq */ | 140 | #define DEV_NEED_LINKTIMER 0x0002 /* poll link settings. Relies on the timer irq */ |
140 | #define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */ | 141 | #define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */ |
142 | #define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */ | ||
141 | 143 | ||
142 | enum { | 144 | enum { |
143 | NvRegIrqStatus = 0x000, | 145 | NvRegIrqStatus = 0x000, |
@@ -291,6 +293,18 @@ struct ring_desc { | |||
291 | u32 FlagLen; | 293 | u32 FlagLen; |
292 | }; | 294 | }; |
293 | 295 | ||
296 | struct ring_desc_ex { | ||
297 | u32 PacketBufferHigh; | ||
298 | u32 PacketBufferLow; | ||
299 | u32 Reserved; | ||
300 | u32 FlagLen; | ||
301 | }; | ||
302 | |||
303 | typedef union _ring_type { | ||
304 | struct ring_desc* orig; | ||
305 | struct ring_desc_ex* ex; | ||
306 | } ring_type; | ||
307 | |||
294 | #define FLAG_MASK_V1 0xffff0000 | 308 | #define FLAG_MASK_V1 0xffff0000 |
295 | #define FLAG_MASK_V2 0xffffc000 | 309 | #define FLAG_MASK_V2 0xffffc000 |
296 | #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1) | 310 | #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1) |
@@ -405,6 +419,7 @@ struct ring_desc { | |||
405 | */ | 419 | */ |
406 | #define DESC_VER_1 0x0 | 420 | #define DESC_VER_1 0x0 |
407 | #define DESC_VER_2 (0x02100|NVREG_TXRXCTL_RXCHECK) | 421 | #define DESC_VER_2 (0x02100|NVREG_TXRXCTL_RXCHECK) |
422 | #define DESC_VER_3 (0x02200|NVREG_TXRXCTL_RXCHECK) | ||
408 | 423 | ||
409 | /* PHY defines */ | 424 | /* PHY defines */ |
410 | #define PHY_OUI_MARVELL 0x5043 | 425 | #define PHY_OUI_MARVELL 0x5043 |
@@ -477,7 +492,7 @@ struct fe_priv { | |||
477 | /* rx specific fields. | 492 | /* rx specific fields. |
478 | * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); | 493 | * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); |
479 | */ | 494 | */ |
480 | struct ring_desc *rx_ring; | 495 | ring_type rx_ring; |
481 | unsigned int cur_rx, refill_rx; | 496 | unsigned int cur_rx, refill_rx; |
482 | struct sk_buff *rx_skbuff[RX_RING]; | 497 | struct sk_buff *rx_skbuff[RX_RING]; |
483 | dma_addr_t rx_dma[RX_RING]; | 498 | dma_addr_t rx_dma[RX_RING]; |
@@ -494,7 +509,7 @@ struct fe_priv { | |||
494 | /* | 509 | /* |
495 | * tx specific fields. | 510 | * tx specific fields. |
496 | */ | 511 | */ |
497 | struct ring_desc *tx_ring; | 512 | ring_type tx_ring; |
498 | unsigned int next_tx, nic_tx; | 513 | unsigned int next_tx, nic_tx; |
499 | struct sk_buff *tx_skbuff[TX_RING]; | 514 | struct sk_buff *tx_skbuff[TX_RING]; |
500 | dma_addr_t tx_dma[TX_RING]; | 515 | dma_addr_t tx_dma[TX_RING]; |
@@ -529,6 +544,11 @@ static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v) | |||
529 | & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2); | 544 | & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2); |
530 | } | 545 | } |
531 | 546 | ||
547 | static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v) | ||
548 | { | ||
549 | return le32_to_cpu(prd->FlagLen) & LEN_MASK_V2; | ||
550 | } | ||
551 | |||
532 | static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, | 552 | static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, |
533 | int delay, int delaymax, const char *msg) | 553 | int delay, int delaymax, const char *msg) |
534 | { | 554 | { |
@@ -813,9 +833,16 @@ static int nv_alloc_rx(struct net_device *dev) | |||
813 | } | 833 | } |
814 | np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data, skb->len, | 834 | np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data, skb->len, |
815 | PCI_DMA_FROMDEVICE); | 835 | PCI_DMA_FROMDEVICE); |
816 | np->rx_ring[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]); | 836 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
817 | wmb(); | 837 | np->rx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]); |
818 | np->rx_ring[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); | 838 | wmb(); |
839 | np->rx_ring.orig[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); | ||
840 | } else { | ||
841 | np->rx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->rx_dma[nr]) >> 32; | ||
842 | np->rx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF; | ||
843 | wmb(); | ||
844 | np->rx_ring.ex[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); | ||
845 | } | ||
819 | dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n", | 846 | dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n", |
820 | dev->name, refill_rx); | 847 | dev->name, refill_rx); |
821 | refill_rx++; | 848 | refill_rx++; |
@@ -849,7 +876,10 @@ static void nv_init_rx(struct net_device *dev) | |||
849 | np->cur_rx = RX_RING; | 876 | np->cur_rx = RX_RING; |
850 | np->refill_rx = 0; | 877 | np->refill_rx = 0; |
851 | for (i = 0; i < RX_RING; i++) | 878 | for (i = 0; i < RX_RING; i++) |
852 | np->rx_ring[i].FlagLen = 0; | 879 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
880 | np->rx_ring.orig[i].FlagLen = 0; | ||
881 | else | ||
882 | np->rx_ring.ex[i].FlagLen = 0; | ||
853 | } | 883 | } |
854 | 884 | ||
855 | static void nv_init_tx(struct net_device *dev) | 885 | static void nv_init_tx(struct net_device *dev) |
@@ -859,7 +889,10 @@ static void nv_init_tx(struct net_device *dev) | |||
859 | 889 | ||
860 | np->next_tx = np->nic_tx = 0; | 890 | np->next_tx = np->nic_tx = 0; |
861 | for (i = 0; i < TX_RING; i++) | 891 | for (i = 0; i < TX_RING; i++) |
862 | np->tx_ring[i].FlagLen = 0; | 892 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
893 | np->tx_ring.orig[i].FlagLen = 0; | ||
894 | else | ||
895 | np->tx_ring.ex[i].FlagLen = 0; | ||
863 | } | 896 | } |
864 | 897 | ||
865 | static int nv_init_ring(struct net_device *dev) | 898 | static int nv_init_ring(struct net_device *dev) |
@@ -874,7 +907,10 @@ static void nv_drain_tx(struct net_device *dev) | |||
874 | struct fe_priv *np = get_nvpriv(dev); | 907 | struct fe_priv *np = get_nvpriv(dev); |
875 | int i; | 908 | int i; |
876 | for (i = 0; i < TX_RING; i++) { | 909 | for (i = 0; i < TX_RING; i++) { |
877 | np->tx_ring[i].FlagLen = 0; | 910 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
911 | np->tx_ring.orig[i].FlagLen = 0; | ||
912 | else | ||
913 | np->tx_ring.ex[i].FlagLen = 0; | ||
878 | if (np->tx_skbuff[i]) { | 914 | if (np->tx_skbuff[i]) { |
879 | pci_unmap_single(np->pci_dev, np->tx_dma[i], | 915 | pci_unmap_single(np->pci_dev, np->tx_dma[i], |
880 | np->tx_skbuff[i]->len, | 916 | np->tx_skbuff[i]->len, |
@@ -891,7 +927,10 @@ static void nv_drain_rx(struct net_device *dev) | |||
891 | struct fe_priv *np = get_nvpriv(dev); | 927 | struct fe_priv *np = get_nvpriv(dev); |
892 | int i; | 928 | int i; |
893 | for (i = 0; i < RX_RING; i++) { | 929 | for (i = 0; i < RX_RING; i++) { |
894 | np->rx_ring[i].FlagLen = 0; | 930 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
931 | np->rx_ring.orig[i].FlagLen = 0; | ||
932 | else | ||
933 | np->rx_ring.ex[i].FlagLen = 0; | ||
895 | wmb(); | 934 | wmb(); |
896 | if (np->rx_skbuff[i]) { | 935 | if (np->rx_skbuff[i]) { |
897 | pci_unmap_single(np->pci_dev, np->rx_dma[i], | 936 | pci_unmap_single(np->pci_dev, np->rx_dma[i], |
@@ -922,11 +961,19 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
922 | np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data,skb->len, | 961 | np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data,skb->len, |
923 | PCI_DMA_TODEVICE); | 962 | PCI_DMA_TODEVICE); |
924 | 963 | ||
925 | np->tx_ring[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]); | 964 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
965 | np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]); | ||
966 | else { | ||
967 | np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32; | ||
968 | np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF; | ||
969 | } | ||
926 | 970 | ||
927 | spin_lock_irq(&np->lock); | 971 | spin_lock_irq(&np->lock); |
928 | wmb(); | 972 | wmb(); |
929 | np->tx_ring[nr].FlagLen = cpu_to_le32( (skb->len-1) | np->tx_flags ); | 973 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
974 | np->tx_ring.orig[nr].FlagLen = cpu_to_le32( (skb->len-1) | np->tx_flags ); | ||
975 | else | ||
976 | np->tx_ring.ex[nr].FlagLen = cpu_to_le32( (skb->len-1) | np->tx_flags ); | ||
930 | dprintk(KERN_DEBUG "%s: nv_start_xmit: packet packet %d queued for transmission.\n", | 977 | dprintk(KERN_DEBUG "%s: nv_start_xmit: packet packet %d queued for transmission.\n", |
931 | dev->name, np->next_tx); | 978 | dev->name, np->next_tx); |
932 | { | 979 | { |
@@ -964,7 +1011,10 @@ static void nv_tx_done(struct net_device *dev) | |||
964 | while (np->nic_tx != np->next_tx) { | 1011 | while (np->nic_tx != np->next_tx) { |
965 | i = np->nic_tx % TX_RING; | 1012 | i = np->nic_tx % TX_RING; |
966 | 1013 | ||
967 | Flags = le32_to_cpu(np->tx_ring[i].FlagLen); | 1014 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
1015 | Flags = le32_to_cpu(np->tx_ring.orig[i].FlagLen); | ||
1016 | else | ||
1017 | Flags = le32_to_cpu(np->tx_ring.ex[i].FlagLen); | ||
968 | 1018 | ||
969 | dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, Flags 0x%x.\n", | 1019 | dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, Flags 0x%x.\n", |
970 | dev->name, np->nic_tx, Flags); | 1020 | dev->name, np->nic_tx, Flags); |
@@ -1035,16 +1085,33 @@ static void nv_tx_timeout(struct net_device *dev) | |||
1035 | } | 1085 | } |
1036 | printk(KERN_INFO "%s: Dumping tx ring\n", dev->name); | 1086 | printk(KERN_INFO "%s: Dumping tx ring\n", dev->name); |
1037 | for (i=0;i<TX_RING;i+= 4) { | 1087 | for (i=0;i<TX_RING;i+= 4) { |
1038 | printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", | 1088 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
1039 | i, | 1089 | printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", |
1040 | le32_to_cpu(np->tx_ring[i].PacketBuffer), | 1090 | i, |
1041 | le32_to_cpu(np->tx_ring[i].FlagLen), | 1091 | le32_to_cpu(np->tx_ring.orig[i].PacketBuffer), |
1042 | le32_to_cpu(np->tx_ring[i+1].PacketBuffer), | 1092 | le32_to_cpu(np->tx_ring.orig[i].FlagLen), |
1043 | le32_to_cpu(np->tx_ring[i+1].FlagLen), | 1093 | le32_to_cpu(np->tx_ring.orig[i+1].PacketBuffer), |
1044 | le32_to_cpu(np->tx_ring[i+2].PacketBuffer), | 1094 | le32_to_cpu(np->tx_ring.orig[i+1].FlagLen), |
1045 | le32_to_cpu(np->tx_ring[i+2].FlagLen), | 1095 | le32_to_cpu(np->tx_ring.orig[i+2].PacketBuffer), |
1046 | le32_to_cpu(np->tx_ring[i+3].PacketBuffer), | 1096 | le32_to_cpu(np->tx_ring.orig[i+2].FlagLen), |
1047 | le32_to_cpu(np->tx_ring[i+3].FlagLen)); | 1097 | le32_to_cpu(np->tx_ring.orig[i+3].PacketBuffer), |
1098 | le32_to_cpu(np->tx_ring.orig[i+3].FlagLen)); | ||
1099 | } else { | ||
1100 | printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n", | ||
1101 | i, | ||
1102 | le32_to_cpu(np->tx_ring.ex[i].PacketBufferHigh), | ||
1103 | le32_to_cpu(np->tx_ring.ex[i].PacketBufferLow), | ||
1104 | le32_to_cpu(np->tx_ring.ex[i].FlagLen), | ||
1105 | le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferHigh), | ||
1106 | le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferLow), | ||
1107 | le32_to_cpu(np->tx_ring.ex[i+1].FlagLen), | ||
1108 | le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferHigh), | ||
1109 | le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferLow), | ||
1110 | le32_to_cpu(np->tx_ring.ex[i+2].FlagLen), | ||
1111 | le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferHigh), | ||
1112 | le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferLow), | ||
1113 | le32_to_cpu(np->tx_ring.ex[i+3].FlagLen)); | ||
1114 | } | ||
1048 | } | 1115 | } |
1049 | } | 1116 | } |
1050 | 1117 | ||
@@ -1061,7 +1128,10 @@ static void nv_tx_timeout(struct net_device *dev) | |||
1061 | printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name); | 1128 | printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name); |
1062 | nv_drain_tx(dev); | 1129 | nv_drain_tx(dev); |
1063 | np->next_tx = np->nic_tx = 0; | 1130 | np->next_tx = np->nic_tx = 0; |
1064 | writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); | 1131 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
1132 | writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); | ||
1133 | else | ||
1134 | writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr); | ||
1065 | netif_wake_queue(dev); | 1135 | netif_wake_queue(dev); |
1066 | } | 1136 | } |
1067 | 1137 | ||
@@ -1136,8 +1206,13 @@ static void nv_rx_process(struct net_device *dev) | |||
1136 | break; /* we scanned the whole ring - do not continue */ | 1206 | break; /* we scanned the whole ring - do not continue */ |
1137 | 1207 | ||
1138 | i = np->cur_rx % RX_RING; | 1208 | i = np->cur_rx % RX_RING; |
1139 | Flags = le32_to_cpu(np->rx_ring[i].FlagLen); | 1209 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
1140 | len = nv_descr_getlength(&np->rx_ring[i], np->desc_ver); | 1210 | Flags = le32_to_cpu(np->rx_ring.orig[i].FlagLen); |
1211 | len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver); | ||
1212 | } else { | ||
1213 | Flags = le32_to_cpu(np->rx_ring.ex[i].FlagLen); | ||
1214 | len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver); | ||
1215 | } | ||
1141 | 1216 | ||
1142 | dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n", | 1217 | dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n", |
1143 | dev->name, np->cur_rx, Flags); | 1218 | dev->name, np->cur_rx, Flags); |
@@ -1321,7 +1396,10 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu) | |||
1321 | /* reinit nic view of the rx queue */ | 1396 | /* reinit nic view of the rx queue */ |
1322 | writel(np->rx_buf_sz, base + NvRegOffloadConfig); | 1397 | writel(np->rx_buf_sz, base + NvRegOffloadConfig); |
1323 | writel((u32) np->ring_addr, base + NvRegRxRingPhysAddr); | 1398 | writel((u32) np->ring_addr, base + NvRegRxRingPhysAddr); |
1324 | writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); | 1399 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
1400 | writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); | ||
1401 | else | ||
1402 | writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr); | ||
1325 | writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT), | 1403 | writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT), |
1326 | base + NvRegRingSizes); | 1404 | base + NvRegRingSizes); |
1327 | pci_push(base); | 1405 | pci_push(base); |
@@ -1982,7 +2060,10 @@ static int nv_open(struct net_device *dev) | |||
1982 | 2060 | ||
1983 | /* 4) give hw rings */ | 2061 | /* 4) give hw rings */ |
1984 | writel((u32) np->ring_addr, base + NvRegRxRingPhysAddr); | 2062 | writel((u32) np->ring_addr, base + NvRegRxRingPhysAddr); |
1985 | writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); | 2063 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
2064 | writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); | ||
2065 | else | ||
2066 | writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr); | ||
1986 | writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT), | 2067 | writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT), |
1987 | base + NvRegRingSizes); | 2068 | base + NvRegRingSizes); |
1988 | 2069 | ||
@@ -2173,24 +2254,48 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
2173 | } | 2254 | } |
2174 | 2255 | ||
2175 | /* handle different descriptor versions */ | 2256 | /* handle different descriptor versions */ |
2176 | np->desc_ver = DESC_VER_1; | 2257 | if (id->driver_data & DEV_HAS_HIGH_DMA) { |
2177 | np->pkt_limit = NV_PKTLIMIT_1; | 2258 | /* packet format 3: supports 40-bit addressing */ |
2178 | if (id->driver_data & DEV_HAS_LARGEDESC) { | 2259 | np->desc_ver = DESC_VER_3; |
2260 | if (pci_set_dma_mask(pci_dev, 0x0000007fffffffffULL)) { | ||
2261 | printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n", | ||
2262 | pci_name(pci_dev)); | ||
2263 | } | ||
2264 | } else if (id->driver_data & DEV_HAS_LARGEDESC) { | ||
2265 | /* packet format 2: supports jumbo frames */ | ||
2179 | np->desc_ver = DESC_VER_2; | 2266 | np->desc_ver = DESC_VER_2; |
2180 | np->pkt_limit = NV_PKTLIMIT_2; | 2267 | } else { |
2268 | /* original packet format */ | ||
2269 | np->desc_ver = DESC_VER_1; | ||
2181 | } | 2270 | } |
2182 | 2271 | ||
2272 | np->pkt_limit = NV_PKTLIMIT_1; | ||
2273 | if (id->driver_data & DEV_HAS_LARGEDESC) | ||
2274 | np->pkt_limit = NV_PKTLIMIT_2; | ||
2275 | |||
2183 | err = -ENOMEM; | 2276 | err = -ENOMEM; |
2184 | np->base = ioremap(addr, NV_PCI_REGSZ); | 2277 | np->base = ioremap(addr, NV_PCI_REGSZ); |
2185 | if (!np->base) | 2278 | if (!np->base) |
2186 | goto out_relreg; | 2279 | goto out_relreg; |
2187 | dev->base_addr = (unsigned long)np->base; | 2280 | dev->base_addr = (unsigned long)np->base; |
2281 | |||
2188 | dev->irq = pci_dev->irq; | 2282 | dev->irq = pci_dev->irq; |
2189 | np->rx_ring = pci_alloc_consistent(pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING), | 2283 | |
2190 | &np->ring_addr); | 2284 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
2191 | if (!np->rx_ring) | 2285 | np->rx_ring.orig = pci_alloc_consistent(pci_dev, |
2192 | goto out_unmap; | 2286 | sizeof(struct ring_desc) * (RX_RING + TX_RING), |
2193 | np->tx_ring = &np->rx_ring[RX_RING]; | 2287 | &np->ring_addr); |
2288 | if (!np->rx_ring.orig) | ||
2289 | goto out_unmap; | ||
2290 | np->tx_ring.orig = &np->rx_ring.orig[RX_RING]; | ||
2291 | } else { | ||
2292 | np->rx_ring.ex = pci_alloc_consistent(pci_dev, | ||
2293 | sizeof(struct ring_desc_ex) * (RX_RING + TX_RING), | ||
2294 | &np->ring_addr); | ||
2295 | if (!np->rx_ring.ex) | ||
2296 | goto out_unmap; | ||
2297 | np->tx_ring.ex = &np->rx_ring.ex[RX_RING]; | ||
2298 | } | ||
2194 | 2299 | ||
2195 | dev->open = nv_open; | 2300 | dev->open = nv_open; |
2196 | dev->stop = nv_close; | 2301 | dev->stop = nv_close; |
@@ -2313,8 +2418,12 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
2313 | return 0; | 2418 | return 0; |
2314 | 2419 | ||
2315 | out_freering: | 2420 | out_freering: |
2316 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING), | 2421 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
2317 | np->rx_ring, np->ring_addr); | 2422 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING), |
2423 | np->rx_ring.orig, np->ring_addr); | ||
2424 | else | ||
2425 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (RX_RING + TX_RING), | ||
2426 | np->rx_ring.ex, np->ring_addr); | ||
2318 | pci_set_drvdata(pci_dev, NULL); | 2427 | pci_set_drvdata(pci_dev, NULL); |
2319 | out_unmap: | 2428 | out_unmap: |
2320 | iounmap(get_hwbase(dev)); | 2429 | iounmap(get_hwbase(dev)); |
@@ -2343,7 +2452,10 @@ static void __devexit nv_remove(struct pci_dev *pci_dev) | |||
2343 | writel(np->orig_mac[1], base + NvRegMacAddrB); | 2452 | writel(np->orig_mac[1], base + NvRegMacAddrB); |
2344 | 2453 | ||
2345 | /* free all structures */ | 2454 | /* free all structures */ |
2346 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING), np->rx_ring, np->ring_addr); | 2455 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
2456 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING), np->rx_ring.orig, np->ring_addr); | ||
2457 | else | ||
2458 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (RX_RING + TX_RING), np->rx_ring.ex, np->ring_addr); | ||
2347 | iounmap(get_hwbase(dev)); | 2459 | iounmap(get_hwbase(dev)); |
2348 | pci_release_regions(pci_dev); | 2460 | pci_release_regions(pci_dev); |
2349 | pci_disable_device(pci_dev); | 2461 | pci_disable_device(pci_dev); |
@@ -2382,35 +2494,35 @@ static struct pci_device_id pci_tbl[] = { | |||
2382 | }, | 2494 | }, |
2383 | { /* CK804 Ethernet Controller */ | 2495 | { /* CK804 Ethernet Controller */ |
2384 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8), | 2496 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8), |
2385 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC, | 2497 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA, |
2386 | }, | 2498 | }, |
2387 | { /* CK804 Ethernet Controller */ | 2499 | { /* CK804 Ethernet Controller */ |
2388 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9), | 2500 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9), |
2389 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC, | 2501 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA, |
2390 | }, | 2502 | }, |
2391 | { /* MCP04 Ethernet Controller */ | 2503 | { /* MCP04 Ethernet Controller */ |
2392 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10), | 2504 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10), |
2393 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC, | 2505 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA, |
2394 | }, | 2506 | }, |
2395 | { /* MCP04 Ethernet Controller */ | 2507 | { /* MCP04 Ethernet Controller */ |
2396 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11), | 2508 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11), |
2397 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC, | 2509 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA, |
2398 | }, | 2510 | }, |
2399 | { /* MCP51 Ethernet Controller */ | 2511 | { /* MCP51 Ethernet Controller */ |
2400 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12), | 2512 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12), |
2401 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, | 2513 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA, |
2402 | }, | 2514 | }, |
2403 | { /* MCP51 Ethernet Controller */ | 2515 | { /* MCP51 Ethernet Controller */ |
2404 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13), | 2516 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13), |
2405 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, | 2517 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA, |
2406 | }, | 2518 | }, |
2407 | { /* MCP55 Ethernet Controller */ | 2519 | { /* MCP55 Ethernet Controller */ |
2408 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), | 2520 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), |
2409 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC, | 2521 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA, |
2410 | }, | 2522 | }, |
2411 | { /* MCP55 Ethernet Controller */ | 2523 | { /* MCP55 Ethernet Controller */ |
2412 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), | 2524 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), |
2413 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC, | 2525 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA, |
2414 | }, | 2526 | }, |
2415 | {0,}, | 2527 | {0,}, |
2416 | }; | 2528 | }; |