aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/forcedeth.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/forcedeth.c')
-rw-r--r--drivers/net/forcedeth.c670
1 files changed, 432 insertions, 238 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 21be4fa071b..eea1d66c530 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -109,6 +109,7 @@
109 * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup. 109 * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup.
110 * 0.55: 22 Mar 2006: Add flow control (pause frame). 110 * 0.55: 22 Mar 2006: Add flow control (pause frame).
111 * 0.56: 22 Mar 2006: Additional ethtool config and moduleparam support. 111 * 0.56: 22 Mar 2006: Additional ethtool config and moduleparam support.
112 * 0.57: 14 May 2006: Mac address set in probe/remove and order corrections.
112 * 113 *
113 * Known bugs: 114 * Known bugs:
114 * We suspect that on some hardware no TX done interrupts are generated. 115 * We suspect that on some hardware no TX done interrupts are generated.
@@ -120,7 +121,12 @@
120 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few 121 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
121 * superfluous timer interrupts from the nic. 122 * superfluous timer interrupts from the nic.
122 */ 123 */
123#define FORCEDETH_VERSION "0.56" 124#ifdef CONFIG_FORCEDETH_NAPI
125#define DRIVERNAPI "-NAPI"
126#else
127#define DRIVERNAPI
128#endif
129#define FORCEDETH_VERSION "0.57"
124#define DRV_NAME "forcedeth" 130#define DRV_NAME "forcedeth"
125 131
126#include <linux/module.h> 132#include <linux/module.h>
@@ -240,10 +246,12 @@ enum {
240#define NVREG_RNDSEED_FORCE2 0x2d00 246#define NVREG_RNDSEED_FORCE2 0x2d00
241#define NVREG_RNDSEED_FORCE3 0x7400 247#define NVREG_RNDSEED_FORCE3 0x7400
242 248
243 NvRegUnknownSetupReg1 = 0xA0, 249 NvRegTxDeferral = 0xA0,
244#define NVREG_UNKSETUP1_VAL 0x16070f 250#define NVREG_TX_DEFERRAL_DEFAULT 0x15050f
245 NvRegUnknownSetupReg2 = 0xA4, 251#define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f
246#define NVREG_UNKSETUP2_VAL 0x16 252#define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f
253 NvRegRxDeferral = 0xA4,
254#define NVREG_RX_DEFERRAL_DEFAULT 0x16
247 NvRegMacAddrA = 0xA8, 255 NvRegMacAddrA = 0xA8,
248 NvRegMacAddrB = 0xAC, 256 NvRegMacAddrB = 0xAC,
249 NvRegMulticastAddrA = 0xB0, 257 NvRegMulticastAddrA = 0xB0,
@@ -260,7 +268,8 @@ enum {
260 NvRegRingSizes = 0x108, 268 NvRegRingSizes = 0x108,
261#define NVREG_RINGSZ_TXSHIFT 0 269#define NVREG_RINGSZ_TXSHIFT 0
262#define NVREG_RINGSZ_RXSHIFT 16 270#define NVREG_RINGSZ_RXSHIFT 16
263 NvRegUnknownTransmitterReg = 0x10c, 271 NvRegTransmitPoll = 0x10c,
272#define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000
264 NvRegLinkSpeed = 0x110, 273 NvRegLinkSpeed = 0x110,
265#define NVREG_LINKSPEED_FORCE 0x10000 274#define NVREG_LINKSPEED_FORCE 0x10000
266#define NVREG_LINKSPEED_10 1000 275#define NVREG_LINKSPEED_10 1000
@@ -269,8 +278,10 @@ enum {
269#define NVREG_LINKSPEED_MASK (0xFFF) 278#define NVREG_LINKSPEED_MASK (0xFFF)
270 NvRegUnknownSetupReg5 = 0x130, 279 NvRegUnknownSetupReg5 = 0x130,
271#define NVREG_UNKSETUP5_BIT31 (1<<31) 280#define NVREG_UNKSETUP5_BIT31 (1<<31)
272 NvRegUnknownSetupReg3 = 0x13c, 281 NvRegTxWatermark = 0x13c,
273#define NVREG_UNKSETUP3_VAL1 0x200010 282#define NVREG_TX_WM_DESC1_DEFAULT 0x0200010
283#define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000
284#define NVREG_TX_WM_DESC2_3_1000 0xfe08000
274 NvRegTxRxControl = 0x144, 285 NvRegTxRxControl = 0x144,
275#define NVREG_TXRXCTL_KICK 0x0001 286#define NVREG_TXRXCTL_KICK 0x0001
276#define NVREG_TXRXCTL_BIT1 0x0002 287#define NVREG_TXRXCTL_BIT1 0x0002
@@ -377,21 +388,21 @@ enum {
377 388
378/* Big endian: should work, but is untested */ 389/* Big endian: should work, but is untested */
379struct ring_desc { 390struct ring_desc {
380 u32 PacketBuffer; 391 __le32 buf;
381 u32 FlagLen; 392 __le32 flaglen;
382}; 393};
383 394
384struct ring_desc_ex { 395struct ring_desc_ex {
385 u32 PacketBufferHigh; 396 __le32 bufhigh;
386 u32 PacketBufferLow; 397 __le32 buflow;
387 u32 TxVlan; 398 __le32 txvlan;
388 u32 FlagLen; 399 __le32 flaglen;
389}; 400};
390 401
391typedef union _ring_type { 402union ring_type {
392 struct ring_desc* orig; 403 struct ring_desc* orig;
393 struct ring_desc_ex* ex; 404 struct ring_desc_ex* ex;
394} ring_type; 405};
395 406
396#define FLAG_MASK_V1 0xffff0000 407#define FLAG_MASK_V1 0xffff0000
397#define FLAG_MASK_V2 0xffffc000 408#define FLAG_MASK_V2 0xffffc000
@@ -532,6 +543,9 @@ typedef union _ring_type {
532#define PHYID1_OUI_SHFT 6 543#define PHYID1_OUI_SHFT 6
533#define PHYID2_OUI_MASK 0xfc00 544#define PHYID2_OUI_MASK 0xfc00
534#define PHYID2_OUI_SHFT 10 545#define PHYID2_OUI_SHFT 10
546#define PHYID2_MODEL_MASK 0x03f0
547#define PHY_MODEL_MARVELL_E3016 0x220
548#define PHY_MARVELL_E3016_INITMASK 0x0300
535#define PHY_INIT1 0x0f000 549#define PHY_INIT1 0x0f000
536#define PHY_INIT2 0x0e00 550#define PHY_INIT2 0x0e00
537#define PHY_INIT3 0x01000 551#define PHY_INIT3 0x01000
@@ -649,8 +663,8 @@ static const struct nv_ethtool_str nv_etests_str[] = {
649}; 663};
650 664
651struct register_test { 665struct register_test {
652 u32 reg; 666 __le32 reg;
653 u32 mask; 667 __le32 mask;
654}; 668};
655 669
656static const struct register_test nv_registers_test[] = { 670static const struct register_test nv_registers_test[] = {
@@ -658,7 +672,7 @@ static const struct register_test nv_registers_test[] = {
658 { NvRegMisc1, 0x03c }, 672 { NvRegMisc1, 0x03c },
659 { NvRegOffloadConfig, 0x03ff }, 673 { NvRegOffloadConfig, 0x03ff },
660 { NvRegMulticastAddrA, 0xffffffff }, 674 { NvRegMulticastAddrA, 0xffffffff },
661 { NvRegUnknownSetupReg3, 0x0ff }, 675 { NvRegTxWatermark, 0x0ff },
662 { NvRegWakeUpFlags, 0x07777 }, 676 { NvRegWakeUpFlags, 0x07777 },
663 { 0,0 } 677 { 0,0 }
664}; 678};
@@ -690,6 +704,7 @@ struct fe_priv {
690 int phyaddr; 704 int phyaddr;
691 int wolenabled; 705 int wolenabled;
692 unsigned int phy_oui; 706 unsigned int phy_oui;
707 unsigned int phy_model;
693 u16 gigabit; 708 u16 gigabit;
694 int intr_test; 709 int intr_test;
695 710
@@ -703,13 +718,14 @@ struct fe_priv {
703 u32 vlanctl_bits; 718 u32 vlanctl_bits;
704 u32 driver_data; 719 u32 driver_data;
705 u32 register_size; 720 u32 register_size;
721 int rx_csum;
706 722
707 void __iomem *base; 723 void __iomem *base;
708 724
709 /* rx specific fields. 725 /* rx specific fields.
710 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); 726 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
711 */ 727 */
712 ring_type rx_ring; 728 union ring_type rx_ring;
713 unsigned int cur_rx, refill_rx; 729 unsigned int cur_rx, refill_rx;
714 struct sk_buff **rx_skbuff; 730 struct sk_buff **rx_skbuff;
715 dma_addr_t *rx_dma; 731 dma_addr_t *rx_dma;
@@ -729,7 +745,7 @@ struct fe_priv {
729 /* 745 /*
730 * tx specific fields. 746 * tx specific fields.
731 */ 747 */
732 ring_type tx_ring; 748 union ring_type tx_ring;
733 unsigned int next_tx, nic_tx; 749 unsigned int next_tx, nic_tx;
734 struct sk_buff **tx_skbuff; 750 struct sk_buff **tx_skbuff;
735 dma_addr_t *tx_dma; 751 dma_addr_t *tx_dma;
@@ -822,13 +838,13 @@ static inline void pci_push(u8 __iomem *base)
822 838
823static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v) 839static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
824{ 840{
825 return le32_to_cpu(prd->FlagLen) 841 return le32_to_cpu(prd->flaglen)
826 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2); 842 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
827} 843}
828 844
829static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v) 845static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
830{ 846{
831 return le32_to_cpu(prd->FlagLen) & LEN_MASK_V2; 847 return le32_to_cpu(prd->flaglen) & LEN_MASK_V2;
832} 848}
833 849
834static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, 850static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
@@ -881,7 +897,7 @@ static void free_rings(struct net_device *dev)
881 struct fe_priv *np = get_nvpriv(dev); 897 struct fe_priv *np = get_nvpriv(dev);
882 898
883 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 899 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
884 if(np->rx_ring.orig) 900 if (np->rx_ring.orig)
885 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), 901 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
886 np->rx_ring.orig, np->ring_addr); 902 np->rx_ring.orig, np->ring_addr);
887 } else { 903 } else {
@@ -1016,14 +1032,13 @@ static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
1016 return retval; 1032 return retval;
1017} 1033}
1018 1034
1019static int phy_reset(struct net_device *dev) 1035static int phy_reset(struct net_device *dev, u32 bmcr_setup)
1020{ 1036{
1021 struct fe_priv *np = netdev_priv(dev); 1037 struct fe_priv *np = netdev_priv(dev);
1022 u32 miicontrol; 1038 u32 miicontrol;
1023 unsigned int tries = 0; 1039 unsigned int tries = 0;
1024 1040
1025 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1041 miicontrol = BMCR_RESET | bmcr_setup;
1026 miicontrol |= BMCR_RESET;
1027 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) { 1042 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) {
1028 return -1; 1043 return -1;
1029 } 1044 }
@@ -1048,6 +1063,16 @@ static int phy_init(struct net_device *dev)
1048 u8 __iomem *base = get_hwbase(dev); 1063 u8 __iomem *base = get_hwbase(dev);
1049 u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg; 1064 u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg;
1050 1065
1066 /* phy errata for E3016 phy */
1067 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
1068 reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1069 reg &= ~PHY_MARVELL_E3016_INITMASK;
1070 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) {
1071 printk(KERN_INFO "%s: phy write to errata reg failed.\n", pci_name(np->pci_dev));
1072 return PHY_ERROR;
1073 }
1074 }
1075
1051 /* set advertise register */ 1076 /* set advertise register */
1052 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 1077 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
1053 reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP); 1078 reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP);
@@ -1078,8 +1103,13 @@ static int phy_init(struct net_device *dev)
1078 else 1103 else
1079 np->gigabit = 0; 1104 np->gigabit = 0;
1080 1105
1081 /* reset the phy */ 1106 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1082 if (phy_reset(dev)) { 1107 mii_control |= BMCR_ANENABLE;
1108
1109 /* reset the phy
1110 * (certain phys need bmcr to be setup with reset)
1111 */
1112 if (phy_reset(dev, mii_control)) {
1083 printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev)); 1113 printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev));
1084 return PHY_ERROR; 1114 return PHY_ERROR;
1085 } 1115 }
@@ -1174,7 +1204,7 @@ static void nv_stop_tx(struct net_device *dev)
1174 KERN_INFO "nv_stop_tx: TransmitterStatus remained busy"); 1204 KERN_INFO "nv_stop_tx: TransmitterStatus remained busy");
1175 1205
1176 udelay(NV_TXSTOP_DELAY2); 1206 udelay(NV_TXSTOP_DELAY2);
1177 writel(0, base + NvRegUnknownTransmitterReg); 1207 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
1178} 1208}
1179 1209
1180static void nv_txrx_reset(struct net_device *dev) 1210static void nv_txrx_reset(struct net_device *dev)
@@ -1254,14 +1284,14 @@ static int nv_alloc_rx(struct net_device *dev)
1254 np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data, 1284 np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data,
1255 skb->end-skb->data, PCI_DMA_FROMDEVICE); 1285 skb->end-skb->data, PCI_DMA_FROMDEVICE);
1256 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1286 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1257 np->rx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]); 1287 np->rx_ring.orig[nr].buf = cpu_to_le32(np->rx_dma[nr]);
1258 wmb(); 1288 wmb();
1259 np->rx_ring.orig[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); 1289 np->rx_ring.orig[nr].flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
1260 } else { 1290 } else {
1261 np->rx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->rx_dma[nr]) >> 32; 1291 np->rx_ring.ex[nr].bufhigh = cpu_to_le64(np->rx_dma[nr]) >> 32;
1262 np->rx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF; 1292 np->rx_ring.ex[nr].buflow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF;
1263 wmb(); 1293 wmb();
1264 np->rx_ring.ex[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); 1294 np->rx_ring.ex[nr].flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
1265 } 1295 }
1266 dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n", 1296 dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n",
1267 dev->name, refill_rx); 1297 dev->name, refill_rx);
@@ -1273,6 +1303,16 @@ static int nv_alloc_rx(struct net_device *dev)
1273 return 0; 1303 return 0;
1274} 1304}
1275 1305
1306/* If rx bufs are exhausted called after 50ms to attempt to refresh */
1307#ifdef CONFIG_FORCEDETH_NAPI
1308static void nv_do_rx_refill(unsigned long data)
1309{
1310 struct net_device *dev = (struct net_device *) data;
1311
1312 /* Just reschedule NAPI rx processing */
1313 netif_rx_schedule(dev);
1314}
1315#else
1276static void nv_do_rx_refill(unsigned long data) 1316static void nv_do_rx_refill(unsigned long data)
1277{ 1317{
1278 struct net_device *dev = (struct net_device *) data; 1318 struct net_device *dev = (struct net_device *) data;
@@ -1301,6 +1341,7 @@ static void nv_do_rx_refill(unsigned long data)
1301 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1341 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1302 } 1342 }
1303} 1343}
1344#endif
1304 1345
1305static void nv_init_rx(struct net_device *dev) 1346static void nv_init_rx(struct net_device *dev)
1306{ 1347{
@@ -1311,9 +1352,9 @@ static void nv_init_rx(struct net_device *dev)
1311 np->refill_rx = 0; 1352 np->refill_rx = 0;
1312 for (i = 0; i < np->rx_ring_size; i++) 1353 for (i = 0; i < np->rx_ring_size; i++)
1313 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1354 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1314 np->rx_ring.orig[i].FlagLen = 0; 1355 np->rx_ring.orig[i].flaglen = 0;
1315 else 1356 else
1316 np->rx_ring.ex[i].FlagLen = 0; 1357 np->rx_ring.ex[i].flaglen = 0;
1317} 1358}
1318 1359
1319static void nv_init_tx(struct net_device *dev) 1360static void nv_init_tx(struct net_device *dev)
@@ -1324,9 +1365,9 @@ static void nv_init_tx(struct net_device *dev)
1324 np->next_tx = np->nic_tx = 0; 1365 np->next_tx = np->nic_tx = 0;
1325 for (i = 0; i < np->tx_ring_size; i++) { 1366 for (i = 0; i < np->tx_ring_size; i++) {
1326 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1367 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1327 np->tx_ring.orig[i].FlagLen = 0; 1368 np->tx_ring.orig[i].flaglen = 0;
1328 else 1369 else
1329 np->tx_ring.ex[i].FlagLen = 0; 1370 np->tx_ring.ex[i].flaglen = 0;
1330 np->tx_skbuff[i] = NULL; 1371 np->tx_skbuff[i] = NULL;
1331 np->tx_dma[i] = 0; 1372 np->tx_dma[i] = 0;
1332 } 1373 }
@@ -1369,9 +1410,9 @@ static void nv_drain_tx(struct net_device *dev)
1369 1410
1370 for (i = 0; i < np->tx_ring_size; i++) { 1411 for (i = 0; i < np->tx_ring_size; i++) {
1371 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1412 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1372 np->tx_ring.orig[i].FlagLen = 0; 1413 np->tx_ring.orig[i].flaglen = 0;
1373 else 1414 else
1374 np->tx_ring.ex[i].FlagLen = 0; 1415 np->tx_ring.ex[i].flaglen = 0;
1375 if (nv_release_txskb(dev, i)) 1416 if (nv_release_txskb(dev, i))
1376 np->stats.tx_dropped++; 1417 np->stats.tx_dropped++;
1377 } 1418 }
@@ -1383,9 +1424,9 @@ static void nv_drain_rx(struct net_device *dev)
1383 int i; 1424 int i;
1384 for (i = 0; i < np->rx_ring_size; i++) { 1425 for (i = 0; i < np->rx_ring_size; i++) {
1385 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1426 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1386 np->rx_ring.orig[i].FlagLen = 0; 1427 np->rx_ring.orig[i].flaglen = 0;
1387 else 1428 else
1388 np->rx_ring.ex[i].FlagLen = 0; 1429 np->rx_ring.ex[i].flaglen = 0;
1389 wmb(); 1430 wmb();
1390 if (np->rx_skbuff[i]) { 1431 if (np->rx_skbuff[i]) {
1391 pci_unmap_single(np->pci_dev, np->rx_dma[i], 1432 pci_unmap_single(np->pci_dev, np->rx_dma[i],
@@ -1446,17 +1487,17 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1446 np->tx_dma_len[nr] = bcnt; 1487 np->tx_dma_len[nr] = bcnt;
1447 1488
1448 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1489 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1449 np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]); 1490 np->tx_ring.orig[nr].buf = cpu_to_le32(np->tx_dma[nr]);
1450 np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); 1491 np->tx_ring.orig[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1451 } else { 1492 } else {
1452 np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32; 1493 np->tx_ring.ex[nr].bufhigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
1453 np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF; 1494 np->tx_ring.ex[nr].buflow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
1454 np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); 1495 np->tx_ring.ex[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1455 } 1496 }
1456 tx_flags = np->tx_flags; 1497 tx_flags = np->tx_flags;
1457 offset += bcnt; 1498 offset += bcnt;
1458 size -= bcnt; 1499 size -= bcnt;
1459 } while(size); 1500 } while (size);
1460 1501
1461 /* setup the fragments */ 1502 /* setup the fragments */
1462 for (i = 0; i < fragments; i++) { 1503 for (i = 0; i < fragments; i++) {
@@ -1473,12 +1514,12 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1473 np->tx_dma_len[nr] = bcnt; 1514 np->tx_dma_len[nr] = bcnt;
1474 1515
1475 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1516 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1476 np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]); 1517 np->tx_ring.orig[nr].buf = cpu_to_le32(np->tx_dma[nr]);
1477 np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); 1518 np->tx_ring.orig[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1478 } else { 1519 } else {
1479 np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32; 1520 np->tx_ring.ex[nr].bufhigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
1480 np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF; 1521 np->tx_ring.ex[nr].buflow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
1481 np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); 1522 np->tx_ring.ex[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1482 } 1523 }
1483 offset += bcnt; 1524 offset += bcnt;
1484 size -= bcnt; 1525 size -= bcnt;
@@ -1487,19 +1528,20 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1487 1528
1488 /* set last fragment flag */ 1529 /* set last fragment flag */
1489 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1530 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1490 np->tx_ring.orig[nr].FlagLen |= cpu_to_le32(tx_flags_extra); 1531 np->tx_ring.orig[nr].flaglen |= cpu_to_le32(tx_flags_extra);
1491 } else { 1532 } else {
1492 np->tx_ring.ex[nr].FlagLen |= cpu_to_le32(tx_flags_extra); 1533 np->tx_ring.ex[nr].flaglen |= cpu_to_le32(tx_flags_extra);
1493 } 1534 }
1494 1535
1495 np->tx_skbuff[nr] = skb; 1536 np->tx_skbuff[nr] = skb;
1496 1537
1497#ifdef NETIF_F_TSO 1538#ifdef NETIF_F_TSO
1498 if (skb_shinfo(skb)->gso_size) 1539 if (skb_is_gso(skb))
1499 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); 1540 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
1500 else 1541 else
1501#endif 1542#endif
1502 tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0); 1543 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
1544 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
1503 1545
1504 /* vlan tag */ 1546 /* vlan tag */
1505 if (np->vlangrp && vlan_tx_tag_present(skb)) { 1547 if (np->vlangrp && vlan_tx_tag_present(skb)) {
@@ -1508,10 +1550,10 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1508 1550
1509 /* set tx flags */ 1551 /* set tx flags */
1510 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1552 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1511 np->tx_ring.orig[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); 1553 np->tx_ring.orig[start_nr].flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
1512 } else { 1554 } else {
1513 np->tx_ring.ex[start_nr].TxVlan = cpu_to_le32(tx_flags_vlan); 1555 np->tx_ring.ex[start_nr].txvlan = cpu_to_le32(tx_flags_vlan);
1514 np->tx_ring.ex[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); 1556 np->tx_ring.ex[start_nr].flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
1515 } 1557 }
1516 1558
1517 dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n", 1559 dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n",
@@ -1543,7 +1585,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1543static void nv_tx_done(struct net_device *dev) 1585static void nv_tx_done(struct net_device *dev)
1544{ 1586{
1545 struct fe_priv *np = netdev_priv(dev); 1587 struct fe_priv *np = netdev_priv(dev);
1546 u32 Flags; 1588 u32 flags;
1547 unsigned int i; 1589 unsigned int i;
1548 struct sk_buff *skb; 1590 struct sk_buff *skb;
1549 1591
@@ -1551,22 +1593,22 @@ static void nv_tx_done(struct net_device *dev)
1551 i = np->nic_tx % np->tx_ring_size; 1593 i = np->nic_tx % np->tx_ring_size;
1552 1594
1553 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1595 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1554 Flags = le32_to_cpu(np->tx_ring.orig[i].FlagLen); 1596 flags = le32_to_cpu(np->tx_ring.orig[i].flaglen);
1555 else 1597 else
1556 Flags = le32_to_cpu(np->tx_ring.ex[i].FlagLen); 1598 flags = le32_to_cpu(np->tx_ring.ex[i].flaglen);
1557 1599
1558 dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, Flags 0x%x.\n", 1600 dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, flags 0x%x.\n",
1559 dev->name, np->nic_tx, Flags); 1601 dev->name, np->nic_tx, flags);
1560 if (Flags & NV_TX_VALID) 1602 if (flags & NV_TX_VALID)
1561 break; 1603 break;
1562 if (np->desc_ver == DESC_VER_1) { 1604 if (np->desc_ver == DESC_VER_1) {
1563 if (Flags & NV_TX_LASTPACKET) { 1605 if (flags & NV_TX_LASTPACKET) {
1564 skb = np->tx_skbuff[i]; 1606 skb = np->tx_skbuff[i];
1565 if (Flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION| 1607 if (flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION|
1566 NV_TX_UNDERFLOW|NV_TX_ERROR)) { 1608 NV_TX_UNDERFLOW|NV_TX_ERROR)) {
1567 if (Flags & NV_TX_UNDERFLOW) 1609 if (flags & NV_TX_UNDERFLOW)
1568 np->stats.tx_fifo_errors++; 1610 np->stats.tx_fifo_errors++;
1569 if (Flags & NV_TX_CARRIERLOST) 1611 if (flags & NV_TX_CARRIERLOST)
1570 np->stats.tx_carrier_errors++; 1612 np->stats.tx_carrier_errors++;
1571 np->stats.tx_errors++; 1613 np->stats.tx_errors++;
1572 } else { 1614 } else {
@@ -1575,13 +1617,13 @@ static void nv_tx_done(struct net_device *dev)
1575 } 1617 }
1576 } 1618 }
1577 } else { 1619 } else {
1578 if (Flags & NV_TX2_LASTPACKET) { 1620 if (flags & NV_TX2_LASTPACKET) {
1579 skb = np->tx_skbuff[i]; 1621 skb = np->tx_skbuff[i];
1580 if (Flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION| 1622 if (flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION|
1581 NV_TX2_UNDERFLOW|NV_TX2_ERROR)) { 1623 NV_TX2_UNDERFLOW|NV_TX2_ERROR)) {
1582 if (Flags & NV_TX2_UNDERFLOW) 1624 if (flags & NV_TX2_UNDERFLOW)
1583 np->stats.tx_fifo_errors++; 1625 np->stats.tx_fifo_errors++;
1584 if (Flags & NV_TX2_CARRIERLOST) 1626 if (flags & NV_TX2_CARRIERLOST)
1585 np->stats.tx_carrier_errors++; 1627 np->stats.tx_carrier_errors++;
1586 np->stats.tx_errors++; 1628 np->stats.tx_errors++;
1587 } else { 1629 } else {
@@ -1634,29 +1676,29 @@ static void nv_tx_timeout(struct net_device *dev)
1634 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1676 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1635 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", 1677 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
1636 i, 1678 i,
1637 le32_to_cpu(np->tx_ring.orig[i].PacketBuffer), 1679 le32_to_cpu(np->tx_ring.orig[i].buf),
1638 le32_to_cpu(np->tx_ring.orig[i].FlagLen), 1680 le32_to_cpu(np->tx_ring.orig[i].flaglen),
1639 le32_to_cpu(np->tx_ring.orig[i+1].PacketBuffer), 1681 le32_to_cpu(np->tx_ring.orig[i+1].buf),
1640 le32_to_cpu(np->tx_ring.orig[i+1].FlagLen), 1682 le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
1641 le32_to_cpu(np->tx_ring.orig[i+2].PacketBuffer), 1683 le32_to_cpu(np->tx_ring.orig[i+2].buf),
1642 le32_to_cpu(np->tx_ring.orig[i+2].FlagLen), 1684 le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
1643 le32_to_cpu(np->tx_ring.orig[i+3].PacketBuffer), 1685 le32_to_cpu(np->tx_ring.orig[i+3].buf),
1644 le32_to_cpu(np->tx_ring.orig[i+3].FlagLen)); 1686 le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
1645 } else { 1687 } else {
1646 printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n", 1688 printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
1647 i, 1689 i,
1648 le32_to_cpu(np->tx_ring.ex[i].PacketBufferHigh), 1690 le32_to_cpu(np->tx_ring.ex[i].bufhigh),
1649 le32_to_cpu(np->tx_ring.ex[i].PacketBufferLow), 1691 le32_to_cpu(np->tx_ring.ex[i].buflow),
1650 le32_to_cpu(np->tx_ring.ex[i].FlagLen), 1692 le32_to_cpu(np->tx_ring.ex[i].flaglen),
1651 le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferHigh), 1693 le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
1652 le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferLow), 1694 le32_to_cpu(np->tx_ring.ex[i+1].buflow),
1653 le32_to_cpu(np->tx_ring.ex[i+1].FlagLen), 1695 le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
1654 le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferHigh), 1696 le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
1655 le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferLow), 1697 le32_to_cpu(np->tx_ring.ex[i+2].buflow),
1656 le32_to_cpu(np->tx_ring.ex[i+2].FlagLen), 1698 le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
1657 le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferHigh), 1699 le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
1658 le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferLow), 1700 le32_to_cpu(np->tx_ring.ex[i+3].buflow),
1659 le32_to_cpu(np->tx_ring.ex[i+3].FlagLen)); 1701 le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
1660 } 1702 }
1661 } 1703 }
1662 } 1704 }
@@ -1693,7 +1735,7 @@ static int nv_getlen(struct net_device *dev, void *packet, int datalen)
1693 int protolen; /* length as stored in the proto field */ 1735 int protolen; /* length as stored in the proto field */
1694 1736
1695 /* 1) calculate len according to header */ 1737 /* 1) calculate len according to header */
1696 if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == __constant_htons(ETH_P_8021Q)) { 1738 if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
1697 protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto ); 1739 protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto );
1698 hdrlen = VLAN_HLEN; 1740 hdrlen = VLAN_HLEN;
1699 } else { 1741 } else {
@@ -1736,13 +1778,14 @@ static int nv_getlen(struct net_device *dev, void *packet, int datalen)
1736 } 1778 }
1737} 1779}
1738 1780
1739static void nv_rx_process(struct net_device *dev) 1781static int nv_rx_process(struct net_device *dev, int limit)
1740{ 1782{
1741 struct fe_priv *np = netdev_priv(dev); 1783 struct fe_priv *np = netdev_priv(dev);
1742 u32 Flags; 1784 u32 flags;
1743 u32 vlanflags = 0; 1785 u32 vlanflags = 0;
1786 int count;
1744 1787
1745 for (;;) { 1788 for (count = 0; count < limit; ++count) {
1746 struct sk_buff *skb; 1789 struct sk_buff *skb;
1747 int len; 1790 int len;
1748 int i; 1791 int i;
@@ -1751,18 +1794,18 @@ static void nv_rx_process(struct net_device *dev)
1751 1794
1752 i = np->cur_rx % np->rx_ring_size; 1795 i = np->cur_rx % np->rx_ring_size;
1753 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1796 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1754 Flags = le32_to_cpu(np->rx_ring.orig[i].FlagLen); 1797 flags = le32_to_cpu(np->rx_ring.orig[i].flaglen);
1755 len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver); 1798 len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver);
1756 } else { 1799 } else {
1757 Flags = le32_to_cpu(np->rx_ring.ex[i].FlagLen); 1800 flags = le32_to_cpu(np->rx_ring.ex[i].flaglen);
1758 len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver); 1801 len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver);
1759 vlanflags = le32_to_cpu(np->rx_ring.ex[i].PacketBufferLow); 1802 vlanflags = le32_to_cpu(np->rx_ring.ex[i].buflow);
1760 } 1803 }
1761 1804
1762 dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n", 1805 dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, flags 0x%x.\n",
1763 dev->name, np->cur_rx, Flags); 1806 dev->name, np->cur_rx, flags);
1764 1807
1765 if (Flags & NV_RX_AVAIL) 1808 if (flags & NV_RX_AVAIL)
1766 break; /* still owned by hardware, */ 1809 break; /* still owned by hardware, */
1767 1810
1768 /* 1811 /*
@@ -1776,7 +1819,7 @@ static void nv_rx_process(struct net_device *dev)
1776 1819
1777 { 1820 {
1778 int j; 1821 int j;
1779 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",Flags); 1822 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
1780 for (j=0; j<64; j++) { 1823 for (j=0; j<64; j++) {
1781 if ((j%16) == 0) 1824 if ((j%16) == 0)
1782 dprintk("\n%03x:", j); 1825 dprintk("\n%03x:", j);
@@ -1786,30 +1829,30 @@ static void nv_rx_process(struct net_device *dev)
1786 } 1829 }
1787 /* look at what we actually got: */ 1830 /* look at what we actually got: */
1788 if (np->desc_ver == DESC_VER_1) { 1831 if (np->desc_ver == DESC_VER_1) {
1789 if (!(Flags & NV_RX_DESCRIPTORVALID)) 1832 if (!(flags & NV_RX_DESCRIPTORVALID))
1790 goto next_pkt; 1833 goto next_pkt;
1791 1834
1792 if (Flags & NV_RX_ERROR) { 1835 if (flags & NV_RX_ERROR) {
1793 if (Flags & NV_RX_MISSEDFRAME) { 1836 if (flags & NV_RX_MISSEDFRAME) {
1794 np->stats.rx_missed_errors++; 1837 np->stats.rx_missed_errors++;
1795 np->stats.rx_errors++; 1838 np->stats.rx_errors++;
1796 goto next_pkt; 1839 goto next_pkt;
1797 } 1840 }
1798 if (Flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) { 1841 if (flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) {
1799 np->stats.rx_errors++; 1842 np->stats.rx_errors++;
1800 goto next_pkt; 1843 goto next_pkt;
1801 } 1844 }
1802 if (Flags & NV_RX_CRCERR) { 1845 if (flags & NV_RX_CRCERR) {
1803 np->stats.rx_crc_errors++; 1846 np->stats.rx_crc_errors++;
1804 np->stats.rx_errors++; 1847 np->stats.rx_errors++;
1805 goto next_pkt; 1848 goto next_pkt;
1806 } 1849 }
1807 if (Flags & NV_RX_OVERFLOW) { 1850 if (flags & NV_RX_OVERFLOW) {
1808 np->stats.rx_over_errors++; 1851 np->stats.rx_over_errors++;
1809 np->stats.rx_errors++; 1852 np->stats.rx_errors++;
1810 goto next_pkt; 1853 goto next_pkt;
1811 } 1854 }
1812 if (Flags & NV_RX_ERROR4) { 1855 if (flags & NV_RX_ERROR4) {
1813 len = nv_getlen(dev, np->rx_skbuff[i]->data, len); 1856 len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
1814 if (len < 0) { 1857 if (len < 0) {
1815 np->stats.rx_errors++; 1858 np->stats.rx_errors++;
@@ -1817,32 +1860,32 @@ static void nv_rx_process(struct net_device *dev)
1817 } 1860 }
1818 } 1861 }
1819 /* framing errors are soft errors. */ 1862 /* framing errors are soft errors. */
1820 if (Flags & NV_RX_FRAMINGERR) { 1863 if (flags & NV_RX_FRAMINGERR) {
1821 if (Flags & NV_RX_SUBSTRACT1) { 1864 if (flags & NV_RX_SUBSTRACT1) {
1822 len--; 1865 len--;
1823 } 1866 }
1824 } 1867 }
1825 } 1868 }
1826 } else { 1869 } else {
1827 if (!(Flags & NV_RX2_DESCRIPTORVALID)) 1870 if (!(flags & NV_RX2_DESCRIPTORVALID))
1828 goto next_pkt; 1871 goto next_pkt;
1829 1872
1830 if (Flags & NV_RX2_ERROR) { 1873 if (flags & NV_RX2_ERROR) {
1831 if (Flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) { 1874 if (flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) {
1832 np->stats.rx_errors++; 1875 np->stats.rx_errors++;
1833 goto next_pkt; 1876 goto next_pkt;
1834 } 1877 }
1835 if (Flags & NV_RX2_CRCERR) { 1878 if (flags & NV_RX2_CRCERR) {
1836 np->stats.rx_crc_errors++; 1879 np->stats.rx_crc_errors++;
1837 np->stats.rx_errors++; 1880 np->stats.rx_errors++;
1838 goto next_pkt; 1881 goto next_pkt;
1839 } 1882 }
1840 if (Flags & NV_RX2_OVERFLOW) { 1883 if (flags & NV_RX2_OVERFLOW) {
1841 np->stats.rx_over_errors++; 1884 np->stats.rx_over_errors++;
1842 np->stats.rx_errors++; 1885 np->stats.rx_errors++;
1843 goto next_pkt; 1886 goto next_pkt;
1844 } 1887 }
1845 if (Flags & NV_RX2_ERROR4) { 1888 if (flags & NV_RX2_ERROR4) {
1846 len = nv_getlen(dev, np->rx_skbuff[i]->data, len); 1889 len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
1847 if (len < 0) { 1890 if (len < 0) {
1848 np->stats.rx_errors++; 1891 np->stats.rx_errors++;
@@ -1850,17 +1893,17 @@ static void nv_rx_process(struct net_device *dev)
1850 } 1893 }
1851 } 1894 }
1852 /* framing errors are soft errors */ 1895 /* framing errors are soft errors */
1853 if (Flags & NV_RX2_FRAMINGERR) { 1896 if (flags & NV_RX2_FRAMINGERR) {
1854 if (Flags & NV_RX2_SUBSTRACT1) { 1897 if (flags & NV_RX2_SUBSTRACT1) {
1855 len--; 1898 len--;
1856 } 1899 }
1857 } 1900 }
1858 } 1901 }
1859 if (np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) { 1902 if (np->rx_csum) {
1860 Flags &= NV_RX2_CHECKSUMMASK; 1903 flags &= NV_RX2_CHECKSUMMASK;
1861 if (Flags == NV_RX2_CHECKSUMOK1 || 1904 if (flags == NV_RX2_CHECKSUMOK1 ||
1862 Flags == NV_RX2_CHECKSUMOK2 || 1905 flags == NV_RX2_CHECKSUMOK2 ||
1863 Flags == NV_RX2_CHECKSUMOK3) { 1906 flags == NV_RX2_CHECKSUMOK3) {
1864 dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name); 1907 dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name);
1865 np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY; 1908 np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY;
1866 } else { 1909 } else {
@@ -1876,17 +1919,27 @@ static void nv_rx_process(struct net_device *dev)
1876 skb->protocol = eth_type_trans(skb, dev); 1919 skb->protocol = eth_type_trans(skb, dev);
1877 dprintk(KERN_DEBUG "%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n", 1920 dprintk(KERN_DEBUG "%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n",
1878 dev->name, np->cur_rx, len, skb->protocol); 1921 dev->name, np->cur_rx, len, skb->protocol);
1879 if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT)) { 1922#ifdef CONFIG_FORCEDETH_NAPI
1880 vlan_hwaccel_rx(skb, np->vlangrp, vlanflags & NV_RX3_VLAN_TAG_MASK); 1923 if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT))
1881 } else { 1924 vlan_hwaccel_receive_skb(skb, np->vlangrp,
1925 vlanflags & NV_RX3_VLAN_TAG_MASK);
1926 else
1927 netif_receive_skb(skb);
1928#else
1929 if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT))
1930 vlan_hwaccel_rx(skb, np->vlangrp,
1931 vlanflags & NV_RX3_VLAN_TAG_MASK);
1932 else
1882 netif_rx(skb); 1933 netif_rx(skb);
1883 } 1934#endif
1884 dev->last_rx = jiffies; 1935 dev->last_rx = jiffies;
1885 np->stats.rx_packets++; 1936 np->stats.rx_packets++;
1886 np->stats.rx_bytes += len; 1937 np->stats.rx_bytes += len;
1887next_pkt: 1938next_pkt:
1888 np->cur_rx++; 1939 np->cur_rx++;
1889 } 1940 }
1941
1942 return count;
1890} 1943}
1891 1944
1892static void set_bufsize(struct net_device *dev) 1945static void set_bufsize(struct net_device *dev)
@@ -1986,7 +2039,7 @@ static int nv_set_mac_address(struct net_device *dev, void *addr)
1986 struct fe_priv *np = netdev_priv(dev); 2039 struct fe_priv *np = netdev_priv(dev);
1987 struct sockaddr *macaddr = (struct sockaddr*)addr; 2040 struct sockaddr *macaddr = (struct sockaddr*)addr;
1988 2041
1989 if(!is_valid_ether_addr(macaddr->sa_data)) 2042 if (!is_valid_ether_addr(macaddr->sa_data))
1990 return -EADDRNOTAVAIL; 2043 return -EADDRNOTAVAIL;
1991 2044
1992 /* synchronized against open : rtnl_lock() held by caller */ 2045 /* synchronized against open : rtnl_lock() held by caller */
@@ -2028,7 +2081,6 @@ static void nv_set_multicast(struct net_device *dev)
2028 memset(mask, 0, sizeof(mask)); 2081 memset(mask, 0, sizeof(mask));
2029 2082
2030 if (dev->flags & IFF_PROMISC) { 2083 if (dev->flags & IFF_PROMISC) {
2031 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
2032 pff |= NVREG_PFF_PROMISC; 2084 pff |= NVREG_PFF_PROMISC;
2033 } else { 2085 } else {
2034 pff |= NVREG_PFF_MYADDR; 2086 pff |= NVREG_PFF_MYADDR;
@@ -2127,7 +2179,7 @@ static int nv_update_linkspeed(struct net_device *dev)
2127 int newdup = np->duplex; 2179 int newdup = np->duplex;
2128 int mii_status; 2180 int mii_status;
2129 int retval = 0; 2181 int retval = 0;
2130 u32 control_1000, status_1000, phyreg, pause_flags; 2182 u32 control_1000, status_1000, phyreg, pause_flags, txreg;
2131 2183
2132 /* BMSR_LSTATUS is latched, read it twice: 2184 /* BMSR_LSTATUS is latched, read it twice:
2133 * we want the current value. 2185 * we want the current value.
@@ -2245,6 +2297,26 @@ set_speed:
2245 phyreg |= PHY_1000; 2297 phyreg |= PHY_1000;
2246 writel(phyreg, base + NvRegPhyInterface); 2298 writel(phyreg, base + NvRegPhyInterface);
2247 2299
2300 if (phyreg & PHY_RGMII) {
2301 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
2302 txreg = NVREG_TX_DEFERRAL_RGMII_1000;
2303 else
2304 txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
2305 } else {
2306 txreg = NVREG_TX_DEFERRAL_DEFAULT;
2307 }
2308 writel(txreg, base + NvRegTxDeferral);
2309
2310 if (np->desc_ver == DESC_VER_1) {
2311 txreg = NVREG_TX_WM_DESC1_DEFAULT;
2312 } else {
2313 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
2314 txreg = NVREG_TX_WM_DESC2_3_1000;
2315 else
2316 txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
2317 }
2318 writel(txreg, base + NvRegTxWatermark);
2319
2248 writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD), 2320 writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD),
2249 base + NvRegMisc1); 2321 base + NvRegMisc1);
2250 pci_push(base); 2322 pci_push(base);
@@ -2259,20 +2331,20 @@ set_speed:
2259 lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM); 2331 lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM);
2260 2332
2261 switch (adv_pause) { 2333 switch (adv_pause) {
2262 case (ADVERTISE_PAUSE_CAP): 2334 case ADVERTISE_PAUSE_CAP:
2263 if (lpa_pause & LPA_PAUSE_CAP) { 2335 if (lpa_pause & LPA_PAUSE_CAP) {
2264 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 2336 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
2265 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 2337 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
2266 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 2338 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
2267 } 2339 }
2268 break; 2340 break;
2269 case (ADVERTISE_PAUSE_ASYM): 2341 case ADVERTISE_PAUSE_ASYM:
2270 if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM)) 2342 if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM))
2271 { 2343 {
2272 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 2344 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
2273 } 2345 }
2274 break; 2346 break;
2275 case (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM): 2347 case ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM:
2276 if (lpa_pause & LPA_PAUSE_CAP) 2348 if (lpa_pause & LPA_PAUSE_CAP)
2277 { 2349 {
2278 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 2350 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
@@ -2352,14 +2424,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
2352 nv_tx_done(dev); 2424 nv_tx_done(dev);
2353 spin_unlock(&np->lock); 2425 spin_unlock(&np->lock);
2354 2426
2355 nv_rx_process(dev);
2356 if (nv_alloc_rx(dev)) {
2357 spin_lock(&np->lock);
2358 if (!np->in_shutdown)
2359 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2360 spin_unlock(&np->lock);
2361 }
2362
2363 if (events & NVREG_IRQ_LINK) { 2427 if (events & NVREG_IRQ_LINK) {
2364 spin_lock(&np->lock); 2428 spin_lock(&np->lock);
2365 nv_link_irq(dev); 2429 nv_link_irq(dev);
@@ -2379,6 +2443,29 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
2379 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", 2443 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
2380 dev->name, events); 2444 dev->name, events);
2381 } 2445 }
2446#ifdef CONFIG_FORCEDETH_NAPI
2447 if (events & NVREG_IRQ_RX_ALL) {
2448 netif_rx_schedule(dev);
2449
2450 /* Disable furthur receive irq's */
2451 spin_lock(&np->lock);
2452 np->irqmask &= ~NVREG_IRQ_RX_ALL;
2453
2454 if (np->msi_flags & NV_MSI_X_ENABLED)
2455 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
2456 else
2457 writel(np->irqmask, base + NvRegIrqMask);
2458 spin_unlock(&np->lock);
2459 }
2460#else
2461 nv_rx_process(dev, dev->weight);
2462 if (nv_alloc_rx(dev)) {
2463 spin_lock(&np->lock);
2464 if (!np->in_shutdown)
2465 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2466 spin_unlock(&np->lock);
2467 }
2468#endif
2382 if (i > max_interrupt_work) { 2469 if (i > max_interrupt_work) {
2383 spin_lock(&np->lock); 2470 spin_lock(&np->lock);
2384 /* disable interrupts on the nic */ 2471 /* disable interrupts on the nic */
@@ -2450,6 +2537,63 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs)
2450 return IRQ_RETVAL(i); 2537 return IRQ_RETVAL(i);
2451} 2538}
2452 2539
2540#ifdef CONFIG_FORCEDETH_NAPI
2541static int nv_napi_poll(struct net_device *dev, int *budget)
2542{
2543 int pkts, limit = min(*budget, dev->quota);
2544 struct fe_priv *np = netdev_priv(dev);
2545 u8 __iomem *base = get_hwbase(dev);
2546
2547 pkts = nv_rx_process(dev, limit);
2548
2549 if (nv_alloc_rx(dev)) {
2550 spin_lock_irq(&np->lock);
2551 if (!np->in_shutdown)
2552 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2553 spin_unlock_irq(&np->lock);
2554 }
2555
2556 if (pkts < limit) {
2557 /* all done, no more packets present */
2558 netif_rx_complete(dev);
2559
2560 /* re-enable receive interrupts */
2561 spin_lock_irq(&np->lock);
2562 np->irqmask |= NVREG_IRQ_RX_ALL;
2563 if (np->msi_flags & NV_MSI_X_ENABLED)
2564 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
2565 else
2566 writel(np->irqmask, base + NvRegIrqMask);
2567 spin_unlock_irq(&np->lock);
2568 return 0;
2569 } else {
2570 /* used up our quantum, so reschedule */
2571 dev->quota -= pkts;
2572 *budget -= pkts;
2573 return 1;
2574 }
2575}
2576#endif
2577
2578#ifdef CONFIG_FORCEDETH_NAPI
2579static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
2580{
2581 struct net_device *dev = (struct net_device *) data;
2582 u8 __iomem *base = get_hwbase(dev);
2583 u32 events;
2584
2585 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
2586 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
2587
2588 if (events) {
2589 netif_rx_schedule(dev);
2590 /* disable receive interrupts on the nic */
2591 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
2592 pci_push(base);
2593 }
2594 return IRQ_HANDLED;
2595}
2596#else
2453static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs) 2597static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
2454{ 2598{
2455 struct net_device *dev = (struct net_device *) data; 2599 struct net_device *dev = (struct net_device *) data;
@@ -2468,7 +2612,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
2468 if (!(events & np->irqmask)) 2612 if (!(events & np->irqmask))
2469 break; 2613 break;
2470 2614
2471 nv_rx_process(dev); 2615 nv_rx_process(dev, dev->weight);
2472 if (nv_alloc_rx(dev)) { 2616 if (nv_alloc_rx(dev)) {
2473 spin_lock_irq(&np->lock); 2617 spin_lock_irq(&np->lock);
2474 if (!np->in_shutdown) 2618 if (!np->in_shutdown)
@@ -2490,12 +2634,12 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
2490 spin_unlock_irq(&np->lock); 2634 spin_unlock_irq(&np->lock);
2491 break; 2635 break;
2492 } 2636 }
2493
2494 } 2637 }
2495 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name); 2638 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name);
2496 2639
2497 return IRQ_RETVAL(i); 2640 return IRQ_RETVAL(i);
2498} 2641}
2642#endif
2499 2643
2500static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs) 2644static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
2501{ 2645{
@@ -2622,21 +2766,21 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
2622 np->msi_flags |= NV_MSI_X_ENABLED; 2766 np->msi_flags |= NV_MSI_X_ENABLED;
2623 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) { 2767 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
2624 /* Request irq for rx handling */ 2768 /* Request irq for rx handling */
2625 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) { 2769 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, IRQF_SHARED, dev->name, dev) != 0) {
2626 printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret); 2770 printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
2627 pci_disable_msix(np->pci_dev); 2771 pci_disable_msix(np->pci_dev);
2628 np->msi_flags &= ~NV_MSI_X_ENABLED; 2772 np->msi_flags &= ~NV_MSI_X_ENABLED;
2629 goto out_err; 2773 goto out_err;
2630 } 2774 }
2631 /* Request irq for tx handling */ 2775 /* Request irq for tx handling */
2632 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) { 2776 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, IRQF_SHARED, dev->name, dev) != 0) {
2633 printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret); 2777 printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
2634 pci_disable_msix(np->pci_dev); 2778 pci_disable_msix(np->pci_dev);
2635 np->msi_flags &= ~NV_MSI_X_ENABLED; 2779 np->msi_flags &= ~NV_MSI_X_ENABLED;
2636 goto out_free_rx; 2780 goto out_free_rx;
2637 } 2781 }
2638 /* Request irq for link and timer handling */ 2782 /* Request irq for link and timer handling */
2639 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) { 2783 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, IRQF_SHARED, dev->name, dev) != 0) {
2640 printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret); 2784 printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
2641 pci_disable_msix(np->pci_dev); 2785 pci_disable_msix(np->pci_dev);
2642 np->msi_flags &= ~NV_MSI_X_ENABLED; 2786 np->msi_flags &= ~NV_MSI_X_ENABLED;
@@ -2651,9 +2795,9 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
2651 } else { 2795 } else {
2652 /* Request irq for all interrupts */ 2796 /* Request irq for all interrupts */
2653 if ((!intr_test && 2797 if ((!intr_test &&
2654 request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) || 2798 request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, IRQF_SHARED, dev->name, dev) != 0) ||
2655 (intr_test && 2799 (intr_test &&
2656 request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq_test, SA_SHIRQ, dev->name, dev) != 0)) { 2800 request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq_test, IRQF_SHARED, dev->name, dev) != 0)) {
2657 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); 2801 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
2658 pci_disable_msix(np->pci_dev); 2802 pci_disable_msix(np->pci_dev);
2659 np->msi_flags &= ~NV_MSI_X_ENABLED; 2803 np->msi_flags &= ~NV_MSI_X_ENABLED;
@@ -2669,8 +2813,8 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
2669 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { 2813 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
2670 if ((ret = pci_enable_msi(np->pci_dev)) == 0) { 2814 if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
2671 np->msi_flags |= NV_MSI_ENABLED; 2815 np->msi_flags |= NV_MSI_ENABLED;
2672 if ((!intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) || 2816 if ((!intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq, IRQF_SHARED, dev->name, dev) != 0) ||
2673 (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, SA_SHIRQ, dev->name, dev) != 0)) { 2817 (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, IRQF_SHARED, dev->name, dev) != 0)) {
2674 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); 2818 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
2675 pci_disable_msi(np->pci_dev); 2819 pci_disable_msi(np->pci_dev);
2676 np->msi_flags &= ~NV_MSI_ENABLED; 2820 np->msi_flags &= ~NV_MSI_ENABLED;
@@ -2685,8 +2829,8 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
2685 } 2829 }
2686 } 2830 }
2687 if (ret != 0) { 2831 if (ret != 0) {
2688 if ((!intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) || 2832 if ((!intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq, IRQF_SHARED, dev->name, dev) != 0) ||
2689 (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, SA_SHIRQ, dev->name, dev) != 0)) 2833 (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, IRQF_SHARED, dev->name, dev) != 0))
2690 goto out_err; 2834 goto out_err;
2691 2835
2692 } 2836 }
@@ -2735,21 +2879,21 @@ static void nv_do_nic_poll(unsigned long data)
2735 2879
2736 if (!using_multi_irqs(dev)) { 2880 if (!using_multi_irqs(dev)) {
2737 if (np->msi_flags & NV_MSI_X_ENABLED) 2881 if (np->msi_flags & NV_MSI_X_ENABLED)
2738 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 2882 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
2739 else 2883 else
2740 disable_irq(dev->irq); 2884 disable_irq_lockdep(dev->irq);
2741 mask = np->irqmask; 2885 mask = np->irqmask;
2742 } else { 2886 } else {
2743 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 2887 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
2744 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 2888 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
2745 mask |= NVREG_IRQ_RX_ALL; 2889 mask |= NVREG_IRQ_RX_ALL;
2746 } 2890 }
2747 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { 2891 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
2748 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 2892 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
2749 mask |= NVREG_IRQ_TX_ALL; 2893 mask |= NVREG_IRQ_TX_ALL;
2750 } 2894 }
2751 if (np->nic_poll_irq & NVREG_IRQ_OTHER) { 2895 if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
2752 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 2896 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
2753 mask |= NVREG_IRQ_OTHER; 2897 mask |= NVREG_IRQ_OTHER;
2754 } 2898 }
2755 } 2899 }
@@ -2761,23 +2905,23 @@ static void nv_do_nic_poll(unsigned long data)
2761 pci_push(base); 2905 pci_push(base);
2762 2906
2763 if (!using_multi_irqs(dev)) { 2907 if (!using_multi_irqs(dev)) {
2764 nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL); 2908 nv_nic_irq(0, dev, NULL);
2765 if (np->msi_flags & NV_MSI_X_ENABLED) 2909 if (np->msi_flags & NV_MSI_X_ENABLED)
2766 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 2910 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
2767 else 2911 else
2768 enable_irq(dev->irq); 2912 enable_irq_lockdep(dev->irq);
2769 } else { 2913 } else {
2770 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 2914 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
2771 nv_nic_irq_rx((int) 0, (void *) data, (struct pt_regs *) NULL); 2915 nv_nic_irq_rx(0, dev, NULL);
2772 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 2916 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
2773 } 2917 }
2774 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { 2918 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
2775 nv_nic_irq_tx((int) 0, (void *) data, (struct pt_regs *) NULL); 2919 nv_nic_irq_tx(0, dev, NULL);
2776 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 2920 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
2777 } 2921 }
2778 if (np->nic_poll_irq & NVREG_IRQ_OTHER) { 2922 if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
2779 nv_nic_irq_other((int) 0, (void *) data, (struct pt_regs *) NULL); 2923 nv_nic_irq_other(0, dev, NULL);
2780 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 2924 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
2781 } 2925 }
2782 } 2926 }
2783} 2927}
@@ -3033,9 +3177,18 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
3033 if (netif_running(dev)) 3177 if (netif_running(dev))
3034 printk(KERN_INFO "%s: link down.\n", dev->name); 3178 printk(KERN_INFO "%s: link down.\n", dev->name);
3035 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 3179 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
3036 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 3180 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
3037 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 3181 bmcr |= BMCR_ANENABLE;
3038 3182 /* reset the phy in order for settings to stick,
3183 * and cause autoneg to start */
3184 if (phy_reset(dev, bmcr)) {
3185 printk(KERN_INFO "%s: phy reset failed\n", dev->name);
3186 return -EINVAL;
3187 }
3188 } else {
3189 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
3190 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
3191 }
3039 } else { 3192 } else {
3040 int adv, bmcr; 3193 int adv, bmcr;
3041 3194
@@ -3075,17 +3228,19 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
3075 bmcr |= BMCR_FULLDPLX; 3228 bmcr |= BMCR_FULLDPLX;
3076 if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL)) 3229 if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL))
3077 bmcr |= BMCR_SPEED100; 3230 bmcr |= BMCR_SPEED100;
3078 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
3079 if (np->phy_oui == PHY_OUI_MARVELL) { 3231 if (np->phy_oui == PHY_OUI_MARVELL) {
3080 /* reset the phy */ 3232 /* reset the phy in order for forced mode settings to stick */
3081 if (phy_reset(dev)) { 3233 if (phy_reset(dev, bmcr)) {
3082 printk(KERN_INFO "%s: phy reset failed\n", dev->name); 3234 printk(KERN_INFO "%s: phy reset failed\n", dev->name);
3083 return -EINVAL; 3235 return -EINVAL;
3084 } 3236 }
3085 } else if (netif_running(dev)) { 3237 } else {
3086 /* Wait a bit and then reconfigure the nic. */ 3238 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
3087 udelay(10); 3239 if (netif_running(dev)) {
3088 nv_linkchange(dev); 3240 /* Wait a bit and then reconfigure the nic. */
3241 udelay(10);
3242 nv_linkchange(dev);
3243 }
3089 } 3244 }
3090 } 3245 }
3091 3246
@@ -3142,8 +3297,17 @@ static int nv_nway_reset(struct net_device *dev)
3142 } 3297 }
3143 3298
3144 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 3299 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
3145 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 3300 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
3146 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 3301 bmcr |= BMCR_ANENABLE;
3302 /* reset the phy in order for settings to stick*/
3303 if (phy_reset(dev, bmcr)) {
3304 printk(KERN_INFO "%s: phy reset failed\n", dev->name);
3305 return -EINVAL;
3306 }
3307 } else {
3308 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
3309 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
3310 }
3147 3311
3148 if (netif_running(dev)) { 3312 if (netif_running(dev)) {
3149 nv_start_rx(dev); 3313 nv_start_rx(dev);
@@ -3221,7 +3385,7 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
3221 if (!rxtx_ring || !rx_skbuff || !rx_dma || !tx_skbuff || !tx_dma || !tx_dma_len) { 3385 if (!rxtx_ring || !rx_skbuff || !rx_dma || !tx_skbuff || !tx_dma || !tx_dma_len) {
3222 /* fall back to old rings */ 3386 /* fall back to old rings */
3223 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 3387 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
3224 if(rxtx_ring) 3388 if (rxtx_ring)
3225 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), 3389 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
3226 rxtx_ring, ring_addr); 3390 rxtx_ring, ring_addr);
3227 } else { 3391 } else {
@@ -3394,7 +3558,7 @@ static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam*
3394static u32 nv_get_rx_csum(struct net_device *dev) 3558static u32 nv_get_rx_csum(struct net_device *dev)
3395{ 3559{
3396 struct fe_priv *np = netdev_priv(dev); 3560 struct fe_priv *np = netdev_priv(dev);
3397 return (np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) != 0; 3561 return (np->rx_csum) != 0;
3398} 3562}
3399 3563
3400static int nv_set_rx_csum(struct net_device *dev, u32 data) 3564static int nv_set_rx_csum(struct net_device *dev, u32 data)
@@ -3404,22 +3568,15 @@ static int nv_set_rx_csum(struct net_device *dev, u32 data)
3404 int retcode = 0; 3568 int retcode = 0;
3405 3569
3406 if (np->driver_data & DEV_HAS_CHECKSUM) { 3570 if (np->driver_data & DEV_HAS_CHECKSUM) {
3407
3408 if (((np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) && data) ||
3409 (!(np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) && !data)) {
3410 /* already set or unset */
3411 return 0;
3412 }
3413
3414 if (data) { 3571 if (data) {
3572 np->rx_csum = 1;
3415 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; 3573 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
3416 } else if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE)) {
3417 np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
3418 } else { 3574 } else {
3419 printk(KERN_INFO "Can not disable rx checksum if vlan is enabled\n"); 3575 np->rx_csum = 0;
3420 return -EINVAL; 3576 /* vlan is dependent on rx checksum offload */
3577 if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE))
3578 np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
3421 } 3579 }
3422
3423 if (netif_running(dev)) { 3580 if (netif_running(dev)) {
3424 spin_lock_irq(&np->lock); 3581 spin_lock_irq(&np->lock);
3425 writel(np->txrxctl_bits, base + NvRegTxRxControl); 3582 writel(np->txrxctl_bits, base + NvRegTxRxControl);
@@ -3457,7 +3614,7 @@ static int nv_get_stats_count(struct net_device *dev)
3457 struct fe_priv *np = netdev_priv(dev); 3614 struct fe_priv *np = netdev_priv(dev);
3458 3615
3459 if (np->driver_data & DEV_HAS_STATISTICS) 3616 if (np->driver_data & DEV_HAS_STATISTICS)
3460 return (sizeof(struct nv_ethtool_stats)/sizeof(u64)); 3617 return sizeof(struct nv_ethtool_stats)/sizeof(u64);
3461 else 3618 else
3462 return 0; 3619 return 0;
3463} 3620}
@@ -3595,7 +3752,7 @@ static int nv_loopback_test(struct net_device *dev)
3595 struct sk_buff *tx_skb, *rx_skb; 3752 struct sk_buff *tx_skb, *rx_skb;
3596 dma_addr_t test_dma_addr; 3753 dma_addr_t test_dma_addr;
3597 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); 3754 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
3598 u32 Flags; 3755 u32 flags;
3599 int len, i, pkt_len; 3756 int len, i, pkt_len;
3600 u8 *pkt_data; 3757 u8 *pkt_data;
3601 u32 filter_flags = 0; 3758 u32 filter_flags = 0;
@@ -3632,6 +3789,12 @@ static int nv_loopback_test(struct net_device *dev)
3632 /* setup packet for tx */ 3789 /* setup packet for tx */
3633 pkt_len = ETH_DATA_LEN; 3790 pkt_len = ETH_DATA_LEN;
3634 tx_skb = dev_alloc_skb(pkt_len); 3791 tx_skb = dev_alloc_skb(pkt_len);
3792 if (!tx_skb) {
3793 printk(KERN_ERR "dev_alloc_skb() failed during loopback test"
3794 " of %s\n", dev->name);
3795 ret = 0;
3796 goto out;
3797 }
3635 pkt_data = skb_put(tx_skb, pkt_len); 3798 pkt_data = skb_put(tx_skb, pkt_len);
3636 for (i = 0; i < pkt_len; i++) 3799 for (i = 0; i < pkt_len; i++)
3637 pkt_data[i] = (u8)(i & 0xff); 3800 pkt_data[i] = (u8)(i & 0xff);
@@ -3639,12 +3802,12 @@ static int nv_loopback_test(struct net_device *dev)
3639 tx_skb->end-tx_skb->data, PCI_DMA_FROMDEVICE); 3802 tx_skb->end-tx_skb->data, PCI_DMA_FROMDEVICE);
3640 3803
3641 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 3804 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
3642 np->tx_ring.orig[0].PacketBuffer = cpu_to_le32(test_dma_addr); 3805 np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
3643 np->tx_ring.orig[0].FlagLen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); 3806 np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
3644 } else { 3807 } else {
3645 np->tx_ring.ex[0].PacketBufferHigh = cpu_to_le64(test_dma_addr) >> 32; 3808 np->tx_ring.ex[0].bufhigh = cpu_to_le64(test_dma_addr) >> 32;
3646 np->tx_ring.ex[0].PacketBufferLow = cpu_to_le64(test_dma_addr) & 0x0FFFFFFFF; 3809 np->tx_ring.ex[0].buflow = cpu_to_le64(test_dma_addr) & 0x0FFFFFFFF;
3647 np->tx_ring.ex[0].FlagLen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); 3810 np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
3648 } 3811 }
3649 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 3812 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
3650 pci_push(get_hwbase(dev)); 3813 pci_push(get_hwbase(dev));
@@ -3653,21 +3816,21 @@ static int nv_loopback_test(struct net_device *dev)
3653 3816
3654 /* check for rx of the packet */ 3817 /* check for rx of the packet */
3655 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 3818 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
3656 Flags = le32_to_cpu(np->rx_ring.orig[0].FlagLen); 3819 flags = le32_to_cpu(np->rx_ring.orig[0].flaglen);
3657 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver); 3820 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
3658 3821
3659 } else { 3822 } else {
3660 Flags = le32_to_cpu(np->rx_ring.ex[0].FlagLen); 3823 flags = le32_to_cpu(np->rx_ring.ex[0].flaglen);
3661 len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver); 3824 len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver);
3662 } 3825 }
3663 3826
3664 if (Flags & NV_RX_AVAIL) { 3827 if (flags & NV_RX_AVAIL) {
3665 ret = 0; 3828 ret = 0;
3666 } else if (np->desc_ver == DESC_VER_1) { 3829 } else if (np->desc_ver == DESC_VER_1) {
3667 if (Flags & NV_RX_ERROR) 3830 if (flags & NV_RX_ERROR)
3668 ret = 0; 3831 ret = 0;
3669 } else { 3832 } else {
3670 if (Flags & NV_RX2_ERROR) { 3833 if (flags & NV_RX2_ERROR) {
3671 ret = 0; 3834 ret = 0;
3672 } 3835 }
3673 } 3836 }
@@ -3696,7 +3859,7 @@ static int nv_loopback_test(struct net_device *dev)
3696 tx_skb->end-tx_skb->data, 3859 tx_skb->end-tx_skb->data,
3697 PCI_DMA_TODEVICE); 3860 PCI_DMA_TODEVICE);
3698 dev_kfree_skb_any(tx_skb); 3861 dev_kfree_skb_any(tx_skb);
3699 3862 out:
3700 /* stop engines */ 3863 /* stop engines */
3701 nv_stop_rx(dev); 3864 nv_stop_rx(dev);
3702 nv_stop_tx(dev); 3865 nv_stop_tx(dev);
@@ -3729,6 +3892,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
3729 if (test->flags & ETH_TEST_FL_OFFLINE) { 3892 if (test->flags & ETH_TEST_FL_OFFLINE) {
3730 if (netif_running(dev)) { 3893 if (netif_running(dev)) {
3731 netif_stop_queue(dev); 3894 netif_stop_queue(dev);
3895 netif_poll_disable(dev);
3732 netif_tx_lock_bh(dev); 3896 netif_tx_lock_bh(dev);
3733 spin_lock_irq(&np->lock); 3897 spin_lock_irq(&np->lock);
3734 nv_disable_hw_interrupts(dev, np->irqmask); 3898 nv_disable_hw_interrupts(dev, np->irqmask);
@@ -3787,6 +3951,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
3787 nv_start_rx(dev); 3951 nv_start_rx(dev);
3788 nv_start_tx(dev); 3952 nv_start_tx(dev);
3789 netif_start_queue(dev); 3953 netif_start_queue(dev);
3954 netif_poll_enable(dev);
3790 nv_enable_hw_interrupts(dev, np->irqmask); 3955 nv_enable_hw_interrupts(dev, np->irqmask);
3791 } 3956 }
3792 } 3957 }
@@ -3804,7 +3969,7 @@ static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer)
3804 } 3969 }
3805} 3970}
3806 3971
3807static struct ethtool_ops ops = { 3972static const struct ethtool_ops ops = {
3808 .get_drvinfo = nv_get_drvinfo, 3973 .get_drvinfo = nv_get_drvinfo,
3809 .get_link = ethtool_op_get_link, 3974 .get_link = ethtool_op_get_link,
3810 .get_wol = nv_get_wol, 3975 .get_wol = nv_get_wol,
@@ -3871,10 +4036,9 @@ static int nv_open(struct net_device *dev)
3871 4036
3872 dprintk(KERN_DEBUG "nv_open: begin\n"); 4037 dprintk(KERN_DEBUG "nv_open: begin\n");
3873 4038
3874 /* 1) erase previous misconfiguration */ 4039 /* erase previous misconfiguration */
3875 if (np->driver_data & DEV_HAS_POWER_CNTRL) 4040 if (np->driver_data & DEV_HAS_POWER_CNTRL)
3876 nv_mac_reset(dev); 4041 nv_mac_reset(dev);
3877 /* 4.1-1: stop adapter: ignored, 4.3 seems to be overkill */
3878 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); 4042 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
3879 writel(0, base + NvRegMulticastAddrB); 4043 writel(0, base + NvRegMulticastAddrB);
3880 writel(0, base + NvRegMulticastMaskA); 4044 writel(0, base + NvRegMulticastMaskA);
@@ -3889,28 +4053,27 @@ static int nv_open(struct net_device *dev)
3889 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) 4053 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)
3890 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); 4054 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
3891 4055
3892 /* 2) initialize descriptor rings */ 4056 /* initialize descriptor rings */
3893 set_bufsize(dev); 4057 set_bufsize(dev);
3894 oom = nv_init_ring(dev); 4058 oom = nv_init_ring(dev);
3895 4059
3896 writel(0, base + NvRegLinkSpeed); 4060 writel(0, base + NvRegLinkSpeed);
3897 writel(0, base + NvRegUnknownTransmitterReg); 4061 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
3898 nv_txrx_reset(dev); 4062 nv_txrx_reset(dev);
3899 writel(0, base + NvRegUnknownSetupReg6); 4063 writel(0, base + NvRegUnknownSetupReg6);
3900 4064
3901 np->in_shutdown = 0; 4065 np->in_shutdown = 0;
3902 4066
3903 /* 3) set mac address */ 4067 /* give hw rings */
3904 nv_copy_mac_to_hw(dev);
3905
3906 /* 4) give hw rings */
3907 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4068 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
3908 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4069 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
3909 base + NvRegRingSizes); 4070 base + NvRegRingSizes);
3910 4071
3911 /* 5) continue setup */
3912 writel(np->linkspeed, base + NvRegLinkSpeed); 4072 writel(np->linkspeed, base + NvRegLinkSpeed);
3913 writel(NVREG_UNKSETUP3_VAL1, base + NvRegUnknownSetupReg3); 4073 if (np->desc_ver == DESC_VER_1)
4074 writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark);
4075 else
4076 writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark);
3914 writel(np->txrxctl_bits, base + NvRegTxRxControl); 4077 writel(np->txrxctl_bits, base + NvRegTxRxControl);
3915 writel(np->vlanctl_bits, base + NvRegVlanControl); 4078 writel(np->vlanctl_bits, base + NvRegVlanControl);
3916 pci_push(base); 4079 pci_push(base);
@@ -3923,7 +4086,6 @@ static int nv_open(struct net_device *dev)
3923 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 4086 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
3924 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); 4087 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
3925 4088
3926 /* 6) continue setup */
3927 writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1); 4089 writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
3928 writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus); 4090 writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
3929 writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags); 4091 writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
@@ -3932,8 +4094,8 @@ static int nv_open(struct net_device *dev)
3932 writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus); 4094 writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
3933 get_random_bytes(&i, sizeof(i)); 4095 get_random_bytes(&i, sizeof(i));
3934 writel(NVREG_RNDSEED_FORCE | (i&NVREG_RNDSEED_MASK), base + NvRegRandomSeed); 4096 writel(NVREG_RNDSEED_FORCE | (i&NVREG_RNDSEED_MASK), base + NvRegRandomSeed);
3935 writel(NVREG_UNKSETUP1_VAL, base + NvRegUnknownSetupReg1); 4097 writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral);
3936 writel(NVREG_UNKSETUP2_VAL, base + NvRegUnknownSetupReg2); 4098 writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral);
3937 if (poll_interval == -1) { 4099 if (poll_interval == -1) {
3938 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) 4100 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT)
3939 writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval); 4101 writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
@@ -3993,6 +4155,8 @@ static int nv_open(struct net_device *dev)
3993 nv_start_rx(dev); 4155 nv_start_rx(dev);
3994 nv_start_tx(dev); 4156 nv_start_tx(dev);
3995 netif_start_queue(dev); 4157 netif_start_queue(dev);
4158 netif_poll_enable(dev);
4159
3996 if (ret) { 4160 if (ret) {
3997 netif_carrier_on(dev); 4161 netif_carrier_on(dev);
3998 } else { 4162 } else {
@@ -4022,6 +4186,7 @@ static int nv_close(struct net_device *dev)
4022 spin_lock_irq(&np->lock); 4186 spin_lock_irq(&np->lock);
4023 np->in_shutdown = 1; 4187 np->in_shutdown = 1;
4024 spin_unlock_irq(&np->lock); 4188 spin_unlock_irq(&np->lock);
4189 netif_poll_disable(dev);
4025 synchronize_irq(dev->irq); 4190 synchronize_irq(dev->irq);
4026 4191
4027 del_timer_sync(&np->oom_kick); 4192 del_timer_sync(&np->oom_kick);
@@ -4049,12 +4214,6 @@ static int nv_close(struct net_device *dev)
4049 if (np->wolenabled) 4214 if (np->wolenabled)
4050 nv_start_rx(dev); 4215 nv_start_rx(dev);
4051 4216
4052 /* special op: write back the misordered MAC address - otherwise
4053 * the next nv_probe would see a wrong address.
4054 */
4055 writel(np->orig_mac[0], base + NvRegMacAddrA);
4056 writel(np->orig_mac[1], base + NvRegMacAddrB);
4057
4058 /* FIXME: power down nic */ 4217 /* FIXME: power down nic */
4059 4218
4060 return 0; 4219 return 0;
@@ -4067,7 +4226,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4067 unsigned long addr; 4226 unsigned long addr;
4068 u8 __iomem *base; 4227 u8 __iomem *base;
4069 int err, i; 4228 int err, i;
4070 u32 powerstate; 4229 u32 powerstate, txreg;
4071 4230
4072 dev = alloc_etherdev(sizeof(struct fe_priv)); 4231 dev = alloc_etherdev(sizeof(struct fe_priv));
4073 err = -ENOMEM; 4232 err = -ENOMEM;
@@ -4163,6 +4322,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4163 np->pkt_limit = NV_PKTLIMIT_2; 4322 np->pkt_limit = NV_PKTLIMIT_2;
4164 4323
4165 if (id->driver_data & DEV_HAS_CHECKSUM) { 4324 if (id->driver_data & DEV_HAS_CHECKSUM) {
4325 np->rx_csum = 1;
4166 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; 4326 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
4167 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; 4327 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
4168#ifdef NETIF_F_TSO 4328#ifdef NETIF_F_TSO
@@ -4243,6 +4403,10 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4243#ifdef CONFIG_NET_POLL_CONTROLLER 4403#ifdef CONFIG_NET_POLL_CONTROLLER
4244 dev->poll_controller = nv_poll_controller; 4404 dev->poll_controller = nv_poll_controller;
4245#endif 4405#endif
4406 dev->weight = 64;
4407#ifdef CONFIG_FORCEDETH_NAPI
4408 dev->poll = nv_napi_poll;
4409#endif
4246 SET_ETHTOOL_OPS(dev, &ops); 4410 SET_ETHTOOL_OPS(dev, &ops);
4247 dev->tx_timeout = nv_tx_timeout; 4411 dev->tx_timeout = nv_tx_timeout;
4248 dev->watchdog_timeo = NV_WATCHDOG_TIMEO; 4412 dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
@@ -4254,12 +4418,30 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4254 np->orig_mac[0] = readl(base + NvRegMacAddrA); 4418 np->orig_mac[0] = readl(base + NvRegMacAddrA);
4255 np->orig_mac[1] = readl(base + NvRegMacAddrB); 4419 np->orig_mac[1] = readl(base + NvRegMacAddrB);
4256 4420
4257 dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff; 4421 /* check the workaround bit for correct mac address order */
4258 dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff; 4422 txreg = readl(base + NvRegTransmitPoll);
4259 dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff; 4423 if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) {
4260 dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff; 4424 /* mac address is already in correct order */
4261 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff; 4425 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
4262 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff; 4426 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
4427 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
4428 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
4429 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
4430 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
4431 } else {
4432 /* need to reverse mac address to correct order */
4433 dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff;
4434 dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff;
4435 dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
4436 dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
4437 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
4438 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
4439 /* set permanent address to be correct aswell */
4440 np->orig_mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
4441 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
4442 np->orig_mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
4443 writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
4444 }
4263 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 4445 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
4264 4446
4265 if (!is_valid_ether_addr(dev->perm_addr)) { 4447 if (!is_valid_ether_addr(dev->perm_addr)) {
@@ -4282,6 +4464,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4282 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], 4464 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
4283 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); 4465 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
4284 4466
4467 /* set mac address */
4468 nv_copy_mac_to_hw(dev);
4469
4285 /* disable WOL */ 4470 /* disable WOL */
4286 writel(0, base + NvRegWakeUpFlags); 4471 writel(0, base + NvRegWakeUpFlags);
4287 np->wolenabled = 0; 4472 np->wolenabled = 0;
@@ -4342,6 +4527,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4342 if (id2 < 0 || id2 == 0xffff) 4527 if (id2 < 0 || id2 == 0xffff)
4343 continue; 4528 continue;
4344 4529
4530 np->phy_model = id2 & PHYID2_MODEL_MASK;
4345 id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT; 4531 id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
4346 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT; 4532 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
4347 dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n", 4533 dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n",
@@ -4394,9 +4580,17 @@ out:
4394static void __devexit nv_remove(struct pci_dev *pci_dev) 4580static void __devexit nv_remove(struct pci_dev *pci_dev)
4395{ 4581{
4396 struct net_device *dev = pci_get_drvdata(pci_dev); 4582 struct net_device *dev = pci_get_drvdata(pci_dev);
4583 struct fe_priv *np = netdev_priv(dev);
4584 u8 __iomem *base = get_hwbase(dev);
4397 4585
4398 unregister_netdev(dev); 4586 unregister_netdev(dev);
4399 4587
4588 /* special op: write back the misordered MAC address - otherwise
4589 * the next nv_probe would see a wrong address.
4590 */
4591 writel(np->orig_mac[0], base + NvRegMacAddrA);
4592 writel(np->orig_mac[1], base + NvRegMacAddrB);
4593
4400 /* free all structures */ 4594 /* free all structures */
4401 free_rings(dev); 4595 free_rings(dev);
4402 iounmap(get_hwbase(dev)); 4596 iounmap(get_hwbase(dev));
@@ -4513,7 +4707,7 @@ static struct pci_driver driver = {
4513static int __init init_nic(void) 4707static int __init init_nic(void)
4514{ 4708{
4515 printk(KERN_INFO "forcedeth.c: Reverse Engineered nForce ethernet driver. Version %s.\n", FORCEDETH_VERSION); 4709 printk(KERN_INFO "forcedeth.c: Reverse Engineered nForce ethernet driver. Version %s.\n", FORCEDETH_VERSION);
4516 return pci_module_init(&driver); 4710 return pci_register_driver(&driver);
4517} 4711}
4518 4712
4519static void __exit exit_nic(void) 4713static void __exit exit_nic(void)