aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/forcedeth.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-09-24 13:15:13 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-09-24 13:15:13 -0400
commita319a2773a13bab56a0d0b3744ba8703324313b5 (patch)
treef02c86acabd1031439fd422a167784007e84ebb1 /drivers/net/forcedeth.c
parente18fa700c9a31360bc8f193aa543b7ef7b39a06b (diff)
parent183798799216fad36c7219fe8d4d6dee6b8fa755 (diff)
Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6
* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6: (217 commits) net/ieee80211: fix more crypto-related build breakage [PATCH] Spidernet: add ethtool -S (show statistics) [NET] GT96100: Delete bitrotting ethernet driver [PATCH] mv643xx_eth: restrict to 32-bit PPC_MULTIPLATFORM [PATCH] Cirrus Logic ep93xx ethernet driver r8169: the MMIO region of the 8167 stands behin BAR#1 e1000, ixgb: Remove pointless wrappers [PATCH] Remove powerpc specific parts of 3c509 driver [PATCH] s2io: Switch to pci_get_device [PATCH] gt96100: move to pci_get_device API [PATCH] ehea: bugfix for register access functions [PATCH] e1000 disable device on PCI error drivers/net/phy/fixed: #if 0 some incomplete code drivers/net: const-ify ethtool_ops declarations [PATCH] ethtool: allow const ethtool_ops [PATCH] sky2: big endian [PATCH] sky2: fiber support [PATCH] sky2: tx pause bug fix drivers/net: Trim trailing whitespace [PATCH] ehea: IBM eHEA Ethernet Device Driver ... Manually resolved conflicts in drivers/net/ixgb/ixgb_main.c and drivers/net/sky2.c related to CHECKSUM_HW/CHECKSUM_PARTIAL changes by commit 84fa7933a33f806bbbaae6775e87459b1ec584c0 that just happened to be next to unrelated changes in this update.
Diffstat (limited to 'drivers/net/forcedeth.c')
-rw-r--r--drivers/net/forcedeth.c562
1 files changed, 361 insertions, 201 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 32cacf115f75..97db910fbc8c 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -109,6 +109,7 @@
109 * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup. 109 * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup.
110 * 0.55: 22 Mar 2006: Add flow control (pause frame). 110 * 0.55: 22 Mar 2006: Add flow control (pause frame).
111 * 0.56: 22 Mar 2006: Additional ethtool config and moduleparam support. 111 * 0.56: 22 Mar 2006: Additional ethtool config and moduleparam support.
112 * 0.57: 14 May 2006: Mac address set in probe/remove and order corrections.
112 * 113 *
113 * Known bugs: 114 * Known bugs:
114 * We suspect that on some hardware no TX done interrupts are generated. 115 * We suspect that on some hardware no TX done interrupts are generated.
@@ -120,7 +121,12 @@
120 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few 121 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
121 * superfluous timer interrupts from the nic. 122 * superfluous timer interrupts from the nic.
122 */ 123 */
123#define FORCEDETH_VERSION "0.56" 124#ifdef CONFIG_FORCEDETH_NAPI
125#define DRIVERNAPI "-NAPI"
126#else
127#define DRIVERNAPI
128#endif
129#define FORCEDETH_VERSION "0.57"
124#define DRV_NAME "forcedeth" 130#define DRV_NAME "forcedeth"
125 131
126#include <linux/module.h> 132#include <linux/module.h>
@@ -262,7 +268,8 @@ enum {
262 NvRegRingSizes = 0x108, 268 NvRegRingSizes = 0x108,
263#define NVREG_RINGSZ_TXSHIFT 0 269#define NVREG_RINGSZ_TXSHIFT 0
264#define NVREG_RINGSZ_RXSHIFT 16 270#define NVREG_RINGSZ_RXSHIFT 16
265 NvRegUnknownTransmitterReg = 0x10c, 271 NvRegTransmitPoll = 0x10c,
272#define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000
266 NvRegLinkSpeed = 0x110, 273 NvRegLinkSpeed = 0x110,
267#define NVREG_LINKSPEED_FORCE 0x10000 274#define NVREG_LINKSPEED_FORCE 0x10000
268#define NVREG_LINKSPEED_10 1000 275#define NVREG_LINKSPEED_10 1000
@@ -381,21 +388,21 @@ enum {
381 388
382/* Big endian: should work, but is untested */ 389/* Big endian: should work, but is untested */
383struct ring_desc { 390struct ring_desc {
384 u32 PacketBuffer; 391 __le32 buf;
385 u32 FlagLen; 392 __le32 flaglen;
386}; 393};
387 394
388struct ring_desc_ex { 395struct ring_desc_ex {
389 u32 PacketBufferHigh; 396 __le32 bufhigh;
390 u32 PacketBufferLow; 397 __le32 buflow;
391 u32 TxVlan; 398 __le32 txvlan;
392 u32 FlagLen; 399 __le32 flaglen;
393}; 400};
394 401
395typedef union _ring_type { 402union ring_type {
396 struct ring_desc* orig; 403 struct ring_desc* orig;
397 struct ring_desc_ex* ex; 404 struct ring_desc_ex* ex;
398} ring_type; 405};
399 406
400#define FLAG_MASK_V1 0xffff0000 407#define FLAG_MASK_V1 0xffff0000
401#define FLAG_MASK_V2 0xffffc000 408#define FLAG_MASK_V2 0xffffc000
@@ -536,6 +543,9 @@ typedef union _ring_type {
536#define PHYID1_OUI_SHFT 6 543#define PHYID1_OUI_SHFT 6
537#define PHYID2_OUI_MASK 0xfc00 544#define PHYID2_OUI_MASK 0xfc00
538#define PHYID2_OUI_SHFT 10 545#define PHYID2_OUI_SHFT 10
546#define PHYID2_MODEL_MASK 0x03f0
547#define PHY_MODEL_MARVELL_E3016 0x220
548#define PHY_MARVELL_E3016_INITMASK 0x0300
539#define PHY_INIT1 0x0f000 549#define PHY_INIT1 0x0f000
540#define PHY_INIT2 0x0e00 550#define PHY_INIT2 0x0e00
541#define PHY_INIT3 0x01000 551#define PHY_INIT3 0x01000
@@ -653,8 +663,8 @@ static const struct nv_ethtool_str nv_etests_str[] = {
653}; 663};
654 664
655struct register_test { 665struct register_test {
656 u32 reg; 666 __le32 reg;
657 u32 mask; 667 __le32 mask;
658}; 668};
659 669
660static const struct register_test nv_registers_test[] = { 670static const struct register_test nv_registers_test[] = {
@@ -694,6 +704,7 @@ struct fe_priv {
694 int phyaddr; 704 int phyaddr;
695 int wolenabled; 705 int wolenabled;
696 unsigned int phy_oui; 706 unsigned int phy_oui;
707 unsigned int phy_model;
697 u16 gigabit; 708 u16 gigabit;
698 int intr_test; 709 int intr_test;
699 710
@@ -707,13 +718,14 @@ struct fe_priv {
707 u32 vlanctl_bits; 718 u32 vlanctl_bits;
708 u32 driver_data; 719 u32 driver_data;
709 u32 register_size; 720 u32 register_size;
721 int rx_csum;
710 722
711 void __iomem *base; 723 void __iomem *base;
712 724
713 /* rx specific fields. 725 /* rx specific fields.
714 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); 726 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
715 */ 727 */
716 ring_type rx_ring; 728 union ring_type rx_ring;
717 unsigned int cur_rx, refill_rx; 729 unsigned int cur_rx, refill_rx;
718 struct sk_buff **rx_skbuff; 730 struct sk_buff **rx_skbuff;
719 dma_addr_t *rx_dma; 731 dma_addr_t *rx_dma;
@@ -733,7 +745,7 @@ struct fe_priv {
733 /* 745 /*
734 * tx specific fields. 746 * tx specific fields.
735 */ 747 */
736 ring_type tx_ring; 748 union ring_type tx_ring;
737 unsigned int next_tx, nic_tx; 749 unsigned int next_tx, nic_tx;
738 struct sk_buff **tx_skbuff; 750 struct sk_buff **tx_skbuff;
739 dma_addr_t *tx_dma; 751 dma_addr_t *tx_dma;
@@ -826,13 +838,13 @@ static inline void pci_push(u8 __iomem *base)
826 838
827static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v) 839static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
828{ 840{
829 return le32_to_cpu(prd->FlagLen) 841 return le32_to_cpu(prd->flaglen)
830 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2); 842 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
831} 843}
832 844
833static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v) 845static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
834{ 846{
835 return le32_to_cpu(prd->FlagLen) & LEN_MASK_V2; 847 return le32_to_cpu(prd->flaglen) & LEN_MASK_V2;
836} 848}
837 849
838static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, 850static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
@@ -885,7 +897,7 @@ static void free_rings(struct net_device *dev)
885 struct fe_priv *np = get_nvpriv(dev); 897 struct fe_priv *np = get_nvpriv(dev);
886 898
887 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 899 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
888 if(np->rx_ring.orig) 900 if (np->rx_ring.orig)
889 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), 901 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
890 np->rx_ring.orig, np->ring_addr); 902 np->rx_ring.orig, np->ring_addr);
891 } else { 903 } else {
@@ -1020,14 +1032,13 @@ static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
1020 return retval; 1032 return retval;
1021} 1033}
1022 1034
1023static int phy_reset(struct net_device *dev) 1035static int phy_reset(struct net_device *dev, u32 bmcr_setup)
1024{ 1036{
1025 struct fe_priv *np = netdev_priv(dev); 1037 struct fe_priv *np = netdev_priv(dev);
1026 u32 miicontrol; 1038 u32 miicontrol;
1027 unsigned int tries = 0; 1039 unsigned int tries = 0;
1028 1040
1029 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1041 miicontrol = BMCR_RESET | bmcr_setup;
1030 miicontrol |= BMCR_RESET;
1031 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) { 1042 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) {
1032 return -1; 1043 return -1;
1033 } 1044 }
@@ -1052,6 +1063,16 @@ static int phy_init(struct net_device *dev)
1052 u8 __iomem *base = get_hwbase(dev); 1063 u8 __iomem *base = get_hwbase(dev);
1053 u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg; 1064 u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg;
1054 1065
1066 /* phy errata for E3016 phy */
1067 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
1068 reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1069 reg &= ~PHY_MARVELL_E3016_INITMASK;
1070 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) {
1071 printk(KERN_INFO "%s: phy write to errata reg failed.\n", pci_name(np->pci_dev));
1072 return PHY_ERROR;
1073 }
1074 }
1075
1055 /* set advertise register */ 1076 /* set advertise register */
1056 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 1077 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
1057 reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP); 1078 reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP);
@@ -1082,8 +1103,13 @@ static int phy_init(struct net_device *dev)
1082 else 1103 else
1083 np->gigabit = 0; 1104 np->gigabit = 0;
1084 1105
1085 /* reset the phy */ 1106 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1086 if (phy_reset(dev)) { 1107 mii_control |= BMCR_ANENABLE;
1108
1109 /* reset the phy
1110 * (certain phys need bmcr to be setup with reset)
1111 */
1112 if (phy_reset(dev, mii_control)) {
1087 printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev)); 1113 printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev));
1088 return PHY_ERROR; 1114 return PHY_ERROR;
1089 } 1115 }
@@ -1178,7 +1204,7 @@ static void nv_stop_tx(struct net_device *dev)
1178 KERN_INFO "nv_stop_tx: TransmitterStatus remained busy"); 1204 KERN_INFO "nv_stop_tx: TransmitterStatus remained busy");
1179 1205
1180 udelay(NV_TXSTOP_DELAY2); 1206 udelay(NV_TXSTOP_DELAY2);
1181 writel(0, base + NvRegUnknownTransmitterReg); 1207 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
1182} 1208}
1183 1209
1184static void nv_txrx_reset(struct net_device *dev) 1210static void nv_txrx_reset(struct net_device *dev)
@@ -1258,14 +1284,14 @@ static int nv_alloc_rx(struct net_device *dev)
1258 np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data, 1284 np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data,
1259 skb->end-skb->data, PCI_DMA_FROMDEVICE); 1285 skb->end-skb->data, PCI_DMA_FROMDEVICE);
1260 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1286 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1261 np->rx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]); 1287 np->rx_ring.orig[nr].buf = cpu_to_le32(np->rx_dma[nr]);
1262 wmb(); 1288 wmb();
1263 np->rx_ring.orig[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); 1289 np->rx_ring.orig[nr].flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
1264 } else { 1290 } else {
1265 np->rx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->rx_dma[nr]) >> 32; 1291 np->rx_ring.ex[nr].bufhigh = cpu_to_le64(np->rx_dma[nr]) >> 32;
1266 np->rx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF; 1292 np->rx_ring.ex[nr].buflow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF;
1267 wmb(); 1293 wmb();
1268 np->rx_ring.ex[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); 1294 np->rx_ring.ex[nr].flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
1269 } 1295 }
1270 dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n", 1296 dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n",
1271 dev->name, refill_rx); 1297 dev->name, refill_rx);
@@ -1277,6 +1303,16 @@ static int nv_alloc_rx(struct net_device *dev)
1277 return 0; 1303 return 0;
1278} 1304}
1279 1305
1306/* If rx bufs are exhausted called after 50ms to attempt to refresh */
1307#ifdef CONFIG_FORCEDETH_NAPI
1308static void nv_do_rx_refill(unsigned long data)
1309{
1310 struct net_device *dev = (struct net_device *) data;
1311
1312 /* Just reschedule NAPI rx processing */
1313 netif_rx_schedule(dev);
1314}
1315#else
1280static void nv_do_rx_refill(unsigned long data) 1316static void nv_do_rx_refill(unsigned long data)
1281{ 1317{
1282 struct net_device *dev = (struct net_device *) data; 1318 struct net_device *dev = (struct net_device *) data;
@@ -1305,6 +1341,7 @@ static void nv_do_rx_refill(unsigned long data)
1305 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1341 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1306 } 1342 }
1307} 1343}
1344#endif
1308 1345
1309static void nv_init_rx(struct net_device *dev) 1346static void nv_init_rx(struct net_device *dev)
1310{ 1347{
@@ -1315,9 +1352,9 @@ static void nv_init_rx(struct net_device *dev)
1315 np->refill_rx = 0; 1352 np->refill_rx = 0;
1316 for (i = 0; i < np->rx_ring_size; i++) 1353 for (i = 0; i < np->rx_ring_size; i++)
1317 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1354 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1318 np->rx_ring.orig[i].FlagLen = 0; 1355 np->rx_ring.orig[i].flaglen = 0;
1319 else 1356 else
1320 np->rx_ring.ex[i].FlagLen = 0; 1357 np->rx_ring.ex[i].flaglen = 0;
1321} 1358}
1322 1359
1323static void nv_init_tx(struct net_device *dev) 1360static void nv_init_tx(struct net_device *dev)
@@ -1328,9 +1365,9 @@ static void nv_init_tx(struct net_device *dev)
1328 np->next_tx = np->nic_tx = 0; 1365 np->next_tx = np->nic_tx = 0;
1329 for (i = 0; i < np->tx_ring_size; i++) { 1366 for (i = 0; i < np->tx_ring_size; i++) {
1330 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1367 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1331 np->tx_ring.orig[i].FlagLen = 0; 1368 np->tx_ring.orig[i].flaglen = 0;
1332 else 1369 else
1333 np->tx_ring.ex[i].FlagLen = 0; 1370 np->tx_ring.ex[i].flaglen = 0;
1334 np->tx_skbuff[i] = NULL; 1371 np->tx_skbuff[i] = NULL;
1335 np->tx_dma[i] = 0; 1372 np->tx_dma[i] = 0;
1336 } 1373 }
@@ -1373,9 +1410,9 @@ static void nv_drain_tx(struct net_device *dev)
1373 1410
1374 for (i = 0; i < np->tx_ring_size; i++) { 1411 for (i = 0; i < np->tx_ring_size; i++) {
1375 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1412 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1376 np->tx_ring.orig[i].FlagLen = 0; 1413 np->tx_ring.orig[i].flaglen = 0;
1377 else 1414 else
1378 np->tx_ring.ex[i].FlagLen = 0; 1415 np->tx_ring.ex[i].flaglen = 0;
1379 if (nv_release_txskb(dev, i)) 1416 if (nv_release_txskb(dev, i))
1380 np->stats.tx_dropped++; 1417 np->stats.tx_dropped++;
1381 } 1418 }
@@ -1387,9 +1424,9 @@ static void nv_drain_rx(struct net_device *dev)
1387 int i; 1424 int i;
1388 for (i = 0; i < np->rx_ring_size; i++) { 1425 for (i = 0; i < np->rx_ring_size; i++) {
1389 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1426 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1390 np->rx_ring.orig[i].FlagLen = 0; 1427 np->rx_ring.orig[i].flaglen = 0;
1391 else 1428 else
1392 np->rx_ring.ex[i].FlagLen = 0; 1429 np->rx_ring.ex[i].flaglen = 0;
1393 wmb(); 1430 wmb();
1394 if (np->rx_skbuff[i]) { 1431 if (np->rx_skbuff[i]) {
1395 pci_unmap_single(np->pci_dev, np->rx_dma[i], 1432 pci_unmap_single(np->pci_dev, np->rx_dma[i],
@@ -1450,17 +1487,17 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1450 np->tx_dma_len[nr] = bcnt; 1487 np->tx_dma_len[nr] = bcnt;
1451 1488
1452 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1489 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1453 np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]); 1490 np->tx_ring.orig[nr].buf = cpu_to_le32(np->tx_dma[nr]);
1454 np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); 1491 np->tx_ring.orig[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1455 } else { 1492 } else {
1456 np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32; 1493 np->tx_ring.ex[nr].bufhigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
1457 np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF; 1494 np->tx_ring.ex[nr].buflow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
1458 np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); 1495 np->tx_ring.ex[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1459 } 1496 }
1460 tx_flags = np->tx_flags; 1497 tx_flags = np->tx_flags;
1461 offset += bcnt; 1498 offset += bcnt;
1462 size -= bcnt; 1499 size -= bcnt;
1463 } while(size); 1500 } while (size);
1464 1501
1465 /* setup the fragments */ 1502 /* setup the fragments */
1466 for (i = 0; i < fragments; i++) { 1503 for (i = 0; i < fragments; i++) {
@@ -1477,12 +1514,12 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1477 np->tx_dma_len[nr] = bcnt; 1514 np->tx_dma_len[nr] = bcnt;
1478 1515
1479 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1516 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1480 np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]); 1517 np->tx_ring.orig[nr].buf = cpu_to_le32(np->tx_dma[nr]);
1481 np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); 1518 np->tx_ring.orig[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1482 } else { 1519 } else {
1483 np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32; 1520 np->tx_ring.ex[nr].bufhigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
1484 np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF; 1521 np->tx_ring.ex[nr].buflow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
1485 np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); 1522 np->tx_ring.ex[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1486 } 1523 }
1487 offset += bcnt; 1524 offset += bcnt;
1488 size -= bcnt; 1525 size -= bcnt;
@@ -1491,9 +1528,9 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1491 1528
1492 /* set last fragment flag */ 1529 /* set last fragment flag */
1493 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1530 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1494 np->tx_ring.orig[nr].FlagLen |= cpu_to_le32(tx_flags_extra); 1531 np->tx_ring.orig[nr].flaglen |= cpu_to_le32(tx_flags_extra);
1495 } else { 1532 } else {
1496 np->tx_ring.ex[nr].FlagLen |= cpu_to_le32(tx_flags_extra); 1533 np->tx_ring.ex[nr].flaglen |= cpu_to_le32(tx_flags_extra);
1497 } 1534 }
1498 1535
1499 np->tx_skbuff[nr] = skb; 1536 np->tx_skbuff[nr] = skb;
@@ -1513,10 +1550,10 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1513 1550
1514 /* set tx flags */ 1551 /* set tx flags */
1515 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1552 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1516 np->tx_ring.orig[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); 1553 np->tx_ring.orig[start_nr].flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
1517 } else { 1554 } else {
1518 np->tx_ring.ex[start_nr].TxVlan = cpu_to_le32(tx_flags_vlan); 1555 np->tx_ring.ex[start_nr].txvlan = cpu_to_le32(tx_flags_vlan);
1519 np->tx_ring.ex[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); 1556 np->tx_ring.ex[start_nr].flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
1520 } 1557 }
1521 1558
1522 dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n", 1559 dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n",
@@ -1548,7 +1585,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1548static void nv_tx_done(struct net_device *dev) 1585static void nv_tx_done(struct net_device *dev)
1549{ 1586{
1550 struct fe_priv *np = netdev_priv(dev); 1587 struct fe_priv *np = netdev_priv(dev);
1551 u32 Flags; 1588 u32 flags;
1552 unsigned int i; 1589 unsigned int i;
1553 struct sk_buff *skb; 1590 struct sk_buff *skb;
1554 1591
@@ -1556,22 +1593,22 @@ static void nv_tx_done(struct net_device *dev)
1556 i = np->nic_tx % np->tx_ring_size; 1593 i = np->nic_tx % np->tx_ring_size;
1557 1594
1558 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1595 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1559 Flags = le32_to_cpu(np->tx_ring.orig[i].FlagLen); 1596 flags = le32_to_cpu(np->tx_ring.orig[i].flaglen);
1560 else 1597 else
1561 Flags = le32_to_cpu(np->tx_ring.ex[i].FlagLen); 1598 flags = le32_to_cpu(np->tx_ring.ex[i].flaglen);
1562 1599
1563 dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, Flags 0x%x.\n", 1600 dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, flags 0x%x.\n",
1564 dev->name, np->nic_tx, Flags); 1601 dev->name, np->nic_tx, flags);
1565 if (Flags & NV_TX_VALID) 1602 if (flags & NV_TX_VALID)
1566 break; 1603 break;
1567 if (np->desc_ver == DESC_VER_1) { 1604 if (np->desc_ver == DESC_VER_1) {
1568 if (Flags & NV_TX_LASTPACKET) { 1605 if (flags & NV_TX_LASTPACKET) {
1569 skb = np->tx_skbuff[i]; 1606 skb = np->tx_skbuff[i];
1570 if (Flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION| 1607 if (flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION|
1571 NV_TX_UNDERFLOW|NV_TX_ERROR)) { 1608 NV_TX_UNDERFLOW|NV_TX_ERROR)) {
1572 if (Flags & NV_TX_UNDERFLOW) 1609 if (flags & NV_TX_UNDERFLOW)
1573 np->stats.tx_fifo_errors++; 1610 np->stats.tx_fifo_errors++;
1574 if (Flags & NV_TX_CARRIERLOST) 1611 if (flags & NV_TX_CARRIERLOST)
1575 np->stats.tx_carrier_errors++; 1612 np->stats.tx_carrier_errors++;
1576 np->stats.tx_errors++; 1613 np->stats.tx_errors++;
1577 } else { 1614 } else {
@@ -1580,13 +1617,13 @@ static void nv_tx_done(struct net_device *dev)
1580 } 1617 }
1581 } 1618 }
1582 } else { 1619 } else {
1583 if (Flags & NV_TX2_LASTPACKET) { 1620 if (flags & NV_TX2_LASTPACKET) {
1584 skb = np->tx_skbuff[i]; 1621 skb = np->tx_skbuff[i];
1585 if (Flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION| 1622 if (flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION|
1586 NV_TX2_UNDERFLOW|NV_TX2_ERROR)) { 1623 NV_TX2_UNDERFLOW|NV_TX2_ERROR)) {
1587 if (Flags & NV_TX2_UNDERFLOW) 1624 if (flags & NV_TX2_UNDERFLOW)
1588 np->stats.tx_fifo_errors++; 1625 np->stats.tx_fifo_errors++;
1589 if (Flags & NV_TX2_CARRIERLOST) 1626 if (flags & NV_TX2_CARRIERLOST)
1590 np->stats.tx_carrier_errors++; 1627 np->stats.tx_carrier_errors++;
1591 np->stats.tx_errors++; 1628 np->stats.tx_errors++;
1592 } else { 1629 } else {
@@ -1639,29 +1676,29 @@ static void nv_tx_timeout(struct net_device *dev)
1639 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1676 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1640 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", 1677 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
1641 i, 1678 i,
1642 le32_to_cpu(np->tx_ring.orig[i].PacketBuffer), 1679 le32_to_cpu(np->tx_ring.orig[i].buf),
1643 le32_to_cpu(np->tx_ring.orig[i].FlagLen), 1680 le32_to_cpu(np->tx_ring.orig[i].flaglen),
1644 le32_to_cpu(np->tx_ring.orig[i+1].PacketBuffer), 1681 le32_to_cpu(np->tx_ring.orig[i+1].buf),
1645 le32_to_cpu(np->tx_ring.orig[i+1].FlagLen), 1682 le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
1646 le32_to_cpu(np->tx_ring.orig[i+2].PacketBuffer), 1683 le32_to_cpu(np->tx_ring.orig[i+2].buf),
1647 le32_to_cpu(np->tx_ring.orig[i+2].FlagLen), 1684 le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
1648 le32_to_cpu(np->tx_ring.orig[i+3].PacketBuffer), 1685 le32_to_cpu(np->tx_ring.orig[i+3].buf),
1649 le32_to_cpu(np->tx_ring.orig[i+3].FlagLen)); 1686 le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
1650 } else { 1687 } else {
1651 printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n", 1688 printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
1652 i, 1689 i,
1653 le32_to_cpu(np->tx_ring.ex[i].PacketBufferHigh), 1690 le32_to_cpu(np->tx_ring.ex[i].bufhigh),
1654 le32_to_cpu(np->tx_ring.ex[i].PacketBufferLow), 1691 le32_to_cpu(np->tx_ring.ex[i].buflow),
1655 le32_to_cpu(np->tx_ring.ex[i].FlagLen), 1692 le32_to_cpu(np->tx_ring.ex[i].flaglen),
1656 le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferHigh), 1693 le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
1657 le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferLow), 1694 le32_to_cpu(np->tx_ring.ex[i+1].buflow),
1658 le32_to_cpu(np->tx_ring.ex[i+1].FlagLen), 1695 le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
1659 le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferHigh), 1696 le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
1660 le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferLow), 1697 le32_to_cpu(np->tx_ring.ex[i+2].buflow),
1661 le32_to_cpu(np->tx_ring.ex[i+2].FlagLen), 1698 le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
1662 le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferHigh), 1699 le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
1663 le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferLow), 1700 le32_to_cpu(np->tx_ring.ex[i+3].buflow),
1664 le32_to_cpu(np->tx_ring.ex[i+3].FlagLen)); 1701 le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
1665 } 1702 }
1666 } 1703 }
1667 } 1704 }
@@ -1698,7 +1735,7 @@ static int nv_getlen(struct net_device *dev, void *packet, int datalen)
1698 int protolen; /* length as stored in the proto field */ 1735 int protolen; /* length as stored in the proto field */
1699 1736
1700 /* 1) calculate len according to header */ 1737 /* 1) calculate len according to header */
1701 if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == __constant_htons(ETH_P_8021Q)) { 1738 if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
1702 protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto ); 1739 protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto );
1703 hdrlen = VLAN_HLEN; 1740 hdrlen = VLAN_HLEN;
1704 } else { 1741 } else {
@@ -1741,13 +1778,14 @@ static int nv_getlen(struct net_device *dev, void *packet, int datalen)
1741 } 1778 }
1742} 1779}
1743 1780
1744static void nv_rx_process(struct net_device *dev) 1781static int nv_rx_process(struct net_device *dev, int limit)
1745{ 1782{
1746 struct fe_priv *np = netdev_priv(dev); 1783 struct fe_priv *np = netdev_priv(dev);
1747 u32 Flags; 1784 u32 flags;
1748 u32 vlanflags = 0; 1785 u32 vlanflags = 0;
1786 int count;
1749 1787
1750 for (;;) { 1788 for (count = 0; count < limit; ++count) {
1751 struct sk_buff *skb; 1789 struct sk_buff *skb;
1752 int len; 1790 int len;
1753 int i; 1791 int i;
@@ -1756,18 +1794,18 @@ static void nv_rx_process(struct net_device *dev)
1756 1794
1757 i = np->cur_rx % np->rx_ring_size; 1795 i = np->cur_rx % np->rx_ring_size;
1758 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1796 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1759 Flags = le32_to_cpu(np->rx_ring.orig[i].FlagLen); 1797 flags = le32_to_cpu(np->rx_ring.orig[i].flaglen);
1760 len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver); 1798 len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver);
1761 } else { 1799 } else {
1762 Flags = le32_to_cpu(np->rx_ring.ex[i].FlagLen); 1800 flags = le32_to_cpu(np->rx_ring.ex[i].flaglen);
1763 len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver); 1801 len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver);
1764 vlanflags = le32_to_cpu(np->rx_ring.ex[i].PacketBufferLow); 1802 vlanflags = le32_to_cpu(np->rx_ring.ex[i].buflow);
1765 } 1803 }
1766 1804
1767 dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n", 1805 dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, flags 0x%x.\n",
1768 dev->name, np->cur_rx, Flags); 1806 dev->name, np->cur_rx, flags);
1769 1807
1770 if (Flags & NV_RX_AVAIL) 1808 if (flags & NV_RX_AVAIL)
1771 break; /* still owned by hardware, */ 1809 break; /* still owned by hardware, */
1772 1810
1773 /* 1811 /*
@@ -1781,7 +1819,7 @@ static void nv_rx_process(struct net_device *dev)
1781 1819
1782 { 1820 {
1783 int j; 1821 int j;
1784 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",Flags); 1822 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
1785 for (j=0; j<64; j++) { 1823 for (j=0; j<64; j++) {
1786 if ((j%16) == 0) 1824 if ((j%16) == 0)
1787 dprintk("\n%03x:", j); 1825 dprintk("\n%03x:", j);
@@ -1791,30 +1829,30 @@ static void nv_rx_process(struct net_device *dev)
1791 } 1829 }
1792 /* look at what we actually got: */ 1830 /* look at what we actually got: */
1793 if (np->desc_ver == DESC_VER_1) { 1831 if (np->desc_ver == DESC_VER_1) {
1794 if (!(Flags & NV_RX_DESCRIPTORVALID)) 1832 if (!(flags & NV_RX_DESCRIPTORVALID))
1795 goto next_pkt; 1833 goto next_pkt;
1796 1834
1797 if (Flags & NV_RX_ERROR) { 1835 if (flags & NV_RX_ERROR) {
1798 if (Flags & NV_RX_MISSEDFRAME) { 1836 if (flags & NV_RX_MISSEDFRAME) {
1799 np->stats.rx_missed_errors++; 1837 np->stats.rx_missed_errors++;
1800 np->stats.rx_errors++; 1838 np->stats.rx_errors++;
1801 goto next_pkt; 1839 goto next_pkt;
1802 } 1840 }
1803 if (Flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) { 1841 if (flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) {
1804 np->stats.rx_errors++; 1842 np->stats.rx_errors++;
1805 goto next_pkt; 1843 goto next_pkt;
1806 } 1844 }
1807 if (Flags & NV_RX_CRCERR) { 1845 if (flags & NV_RX_CRCERR) {
1808 np->stats.rx_crc_errors++; 1846 np->stats.rx_crc_errors++;
1809 np->stats.rx_errors++; 1847 np->stats.rx_errors++;
1810 goto next_pkt; 1848 goto next_pkt;
1811 } 1849 }
1812 if (Flags & NV_RX_OVERFLOW) { 1850 if (flags & NV_RX_OVERFLOW) {
1813 np->stats.rx_over_errors++; 1851 np->stats.rx_over_errors++;
1814 np->stats.rx_errors++; 1852 np->stats.rx_errors++;
1815 goto next_pkt; 1853 goto next_pkt;
1816 } 1854 }
1817 if (Flags & NV_RX_ERROR4) { 1855 if (flags & NV_RX_ERROR4) {
1818 len = nv_getlen(dev, np->rx_skbuff[i]->data, len); 1856 len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
1819 if (len < 0) { 1857 if (len < 0) {
1820 np->stats.rx_errors++; 1858 np->stats.rx_errors++;
@@ -1822,32 +1860,32 @@ static void nv_rx_process(struct net_device *dev)
1822 } 1860 }
1823 } 1861 }
1824 /* framing errors are soft errors. */ 1862 /* framing errors are soft errors. */
1825 if (Flags & NV_RX_FRAMINGERR) { 1863 if (flags & NV_RX_FRAMINGERR) {
1826 if (Flags & NV_RX_SUBSTRACT1) { 1864 if (flags & NV_RX_SUBSTRACT1) {
1827 len--; 1865 len--;
1828 } 1866 }
1829 } 1867 }
1830 } 1868 }
1831 } else { 1869 } else {
1832 if (!(Flags & NV_RX2_DESCRIPTORVALID)) 1870 if (!(flags & NV_RX2_DESCRIPTORVALID))
1833 goto next_pkt; 1871 goto next_pkt;
1834 1872
1835 if (Flags & NV_RX2_ERROR) { 1873 if (flags & NV_RX2_ERROR) {
1836 if (Flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) { 1874 if (flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) {
1837 np->stats.rx_errors++; 1875 np->stats.rx_errors++;
1838 goto next_pkt; 1876 goto next_pkt;
1839 } 1877 }
1840 if (Flags & NV_RX2_CRCERR) { 1878 if (flags & NV_RX2_CRCERR) {
1841 np->stats.rx_crc_errors++; 1879 np->stats.rx_crc_errors++;
1842 np->stats.rx_errors++; 1880 np->stats.rx_errors++;
1843 goto next_pkt; 1881 goto next_pkt;
1844 } 1882 }
1845 if (Flags & NV_RX2_OVERFLOW) { 1883 if (flags & NV_RX2_OVERFLOW) {
1846 np->stats.rx_over_errors++; 1884 np->stats.rx_over_errors++;
1847 np->stats.rx_errors++; 1885 np->stats.rx_errors++;
1848 goto next_pkt; 1886 goto next_pkt;
1849 } 1887 }
1850 if (Flags & NV_RX2_ERROR4) { 1888 if (flags & NV_RX2_ERROR4) {
1851 len = nv_getlen(dev, np->rx_skbuff[i]->data, len); 1889 len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
1852 if (len < 0) { 1890 if (len < 0) {
1853 np->stats.rx_errors++; 1891 np->stats.rx_errors++;
@@ -1855,17 +1893,17 @@ static void nv_rx_process(struct net_device *dev)
1855 } 1893 }
1856 } 1894 }
1857 /* framing errors are soft errors */ 1895 /* framing errors are soft errors */
1858 if (Flags & NV_RX2_FRAMINGERR) { 1896 if (flags & NV_RX2_FRAMINGERR) {
1859 if (Flags & NV_RX2_SUBSTRACT1) { 1897 if (flags & NV_RX2_SUBSTRACT1) {
1860 len--; 1898 len--;
1861 } 1899 }
1862 } 1900 }
1863 } 1901 }
1864 if (np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) { 1902 if (np->rx_csum) {
1865 Flags &= NV_RX2_CHECKSUMMASK; 1903 flags &= NV_RX2_CHECKSUMMASK;
1866 if (Flags == NV_RX2_CHECKSUMOK1 || 1904 if (flags == NV_RX2_CHECKSUMOK1 ||
1867 Flags == NV_RX2_CHECKSUMOK2 || 1905 flags == NV_RX2_CHECKSUMOK2 ||
1868 Flags == NV_RX2_CHECKSUMOK3) { 1906 flags == NV_RX2_CHECKSUMOK3) {
1869 dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name); 1907 dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name);
1870 np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY; 1908 np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY;
1871 } else { 1909 } else {
@@ -1881,17 +1919,27 @@ static void nv_rx_process(struct net_device *dev)
1881 skb->protocol = eth_type_trans(skb, dev); 1919 skb->protocol = eth_type_trans(skb, dev);
1882 dprintk(KERN_DEBUG "%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n", 1920 dprintk(KERN_DEBUG "%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n",
1883 dev->name, np->cur_rx, len, skb->protocol); 1921 dev->name, np->cur_rx, len, skb->protocol);
1884 if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT)) { 1922#ifdef CONFIG_FORCEDETH_NAPI
1885 vlan_hwaccel_rx(skb, np->vlangrp, vlanflags & NV_RX3_VLAN_TAG_MASK); 1923 if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT))
1886 } else { 1924 vlan_hwaccel_receive_skb(skb, np->vlangrp,
1925 vlanflags & NV_RX3_VLAN_TAG_MASK);
1926 else
1927 netif_receive_skb(skb);
1928#else
1929 if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT))
1930 vlan_hwaccel_rx(skb, np->vlangrp,
1931 vlanflags & NV_RX3_VLAN_TAG_MASK);
1932 else
1887 netif_rx(skb); 1933 netif_rx(skb);
1888 } 1934#endif
1889 dev->last_rx = jiffies; 1935 dev->last_rx = jiffies;
1890 np->stats.rx_packets++; 1936 np->stats.rx_packets++;
1891 np->stats.rx_bytes += len; 1937 np->stats.rx_bytes += len;
1892next_pkt: 1938next_pkt:
1893 np->cur_rx++; 1939 np->cur_rx++;
1894 } 1940 }
1941
1942 return count;
1895} 1943}
1896 1944
1897static void set_bufsize(struct net_device *dev) 1945static void set_bufsize(struct net_device *dev)
@@ -1991,7 +2039,7 @@ static int nv_set_mac_address(struct net_device *dev, void *addr)
1991 struct fe_priv *np = netdev_priv(dev); 2039 struct fe_priv *np = netdev_priv(dev);
1992 struct sockaddr *macaddr = (struct sockaddr*)addr; 2040 struct sockaddr *macaddr = (struct sockaddr*)addr;
1993 2041
1994 if(!is_valid_ether_addr(macaddr->sa_data)) 2042 if (!is_valid_ether_addr(macaddr->sa_data))
1995 return -EADDRNOTAVAIL; 2043 return -EADDRNOTAVAIL;
1996 2044
1997 /* synchronized against open : rtnl_lock() held by caller */ 2045 /* synchronized against open : rtnl_lock() held by caller */
@@ -2033,7 +2081,6 @@ static void nv_set_multicast(struct net_device *dev)
2033 memset(mask, 0, sizeof(mask)); 2081 memset(mask, 0, sizeof(mask));
2034 2082
2035 if (dev->flags & IFF_PROMISC) { 2083 if (dev->flags & IFF_PROMISC) {
2036 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
2037 pff |= NVREG_PFF_PROMISC; 2084 pff |= NVREG_PFF_PROMISC;
2038 } else { 2085 } else {
2039 pff |= NVREG_PFF_MYADDR; 2086 pff |= NVREG_PFF_MYADDR;
@@ -2284,20 +2331,20 @@ set_speed:
2284 lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM); 2331 lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM);
2285 2332
2286 switch (adv_pause) { 2333 switch (adv_pause) {
2287 case (ADVERTISE_PAUSE_CAP): 2334 case ADVERTISE_PAUSE_CAP:
2288 if (lpa_pause & LPA_PAUSE_CAP) { 2335 if (lpa_pause & LPA_PAUSE_CAP) {
2289 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 2336 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
2290 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 2337 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
2291 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 2338 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
2292 } 2339 }
2293 break; 2340 break;
2294 case (ADVERTISE_PAUSE_ASYM): 2341 case ADVERTISE_PAUSE_ASYM:
2295 if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM)) 2342 if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM))
2296 { 2343 {
2297 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 2344 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
2298 } 2345 }
2299 break; 2346 break;
2300 case (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM): 2347 case ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM:
2301 if (lpa_pause & LPA_PAUSE_CAP) 2348 if (lpa_pause & LPA_PAUSE_CAP)
2302 { 2349 {
2303 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 2350 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
@@ -2377,14 +2424,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
2377 nv_tx_done(dev); 2424 nv_tx_done(dev);
2378 spin_unlock(&np->lock); 2425 spin_unlock(&np->lock);
2379 2426
2380 nv_rx_process(dev);
2381 if (nv_alloc_rx(dev)) {
2382 spin_lock(&np->lock);
2383 if (!np->in_shutdown)
2384 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2385 spin_unlock(&np->lock);
2386 }
2387
2388 if (events & NVREG_IRQ_LINK) { 2427 if (events & NVREG_IRQ_LINK) {
2389 spin_lock(&np->lock); 2428 spin_lock(&np->lock);
2390 nv_link_irq(dev); 2429 nv_link_irq(dev);
@@ -2404,6 +2443,29 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
2404 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", 2443 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
2405 dev->name, events); 2444 dev->name, events);
2406 } 2445 }
2446#ifdef CONFIG_FORCEDETH_NAPI
2447 if (events & NVREG_IRQ_RX_ALL) {
2448 netif_rx_schedule(dev);
2449
2450 /* Disable furthur receive irq's */
2451 spin_lock(&np->lock);
2452 np->irqmask &= ~NVREG_IRQ_RX_ALL;
2453
2454 if (np->msi_flags & NV_MSI_X_ENABLED)
2455 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
2456 else
2457 writel(np->irqmask, base + NvRegIrqMask);
2458 spin_unlock(&np->lock);
2459 }
2460#else
2461 nv_rx_process(dev, dev->weight);
2462 if (nv_alloc_rx(dev)) {
2463 spin_lock(&np->lock);
2464 if (!np->in_shutdown)
2465 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2466 spin_unlock(&np->lock);
2467 }
2468#endif
2407 if (i > max_interrupt_work) { 2469 if (i > max_interrupt_work) {
2408 spin_lock(&np->lock); 2470 spin_lock(&np->lock);
2409 /* disable interrupts on the nic */ 2471 /* disable interrupts on the nic */
@@ -2475,6 +2537,63 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs)
2475 return IRQ_RETVAL(i); 2537 return IRQ_RETVAL(i);
2476} 2538}
2477 2539
2540#ifdef CONFIG_FORCEDETH_NAPI
2541static int nv_napi_poll(struct net_device *dev, int *budget)
2542{
2543 int pkts, limit = min(*budget, dev->quota);
2544 struct fe_priv *np = netdev_priv(dev);
2545 u8 __iomem *base = get_hwbase(dev);
2546
2547 pkts = nv_rx_process(dev, limit);
2548
2549 if (nv_alloc_rx(dev)) {
2550 spin_lock_irq(&np->lock);
2551 if (!np->in_shutdown)
2552 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2553 spin_unlock_irq(&np->lock);
2554 }
2555
2556 if (pkts < limit) {
2557 /* all done, no more packets present */
2558 netif_rx_complete(dev);
2559
2560 /* re-enable receive interrupts */
2561 spin_lock_irq(&np->lock);
2562 np->irqmask |= NVREG_IRQ_RX_ALL;
2563 if (np->msi_flags & NV_MSI_X_ENABLED)
2564 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
2565 else
2566 writel(np->irqmask, base + NvRegIrqMask);
2567 spin_unlock_irq(&np->lock);
2568 return 0;
2569 } else {
2570 /* used up our quantum, so reschedule */
2571 dev->quota -= pkts;
2572 *budget -= pkts;
2573 return 1;
2574 }
2575}
2576#endif
2577
2578#ifdef CONFIG_FORCEDETH_NAPI
2579static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
2580{
2581 struct net_device *dev = (struct net_device *) data;
2582 u8 __iomem *base = get_hwbase(dev);
2583 u32 events;
2584
2585 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
2586 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
2587
2588 if (events) {
2589 netif_rx_schedule(dev);
2590 /* disable receive interrupts on the nic */
2591 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
2592 pci_push(base);
2593 }
2594 return IRQ_HANDLED;
2595}
2596#else
2478static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs) 2597static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
2479{ 2598{
2480 struct net_device *dev = (struct net_device *) data; 2599 struct net_device *dev = (struct net_device *) data;
@@ -2493,7 +2612,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
2493 if (!(events & np->irqmask)) 2612 if (!(events & np->irqmask))
2494 break; 2613 break;
2495 2614
2496 nv_rx_process(dev); 2615 nv_rx_process(dev, dev->weight);
2497 if (nv_alloc_rx(dev)) { 2616 if (nv_alloc_rx(dev)) {
2498 spin_lock_irq(&np->lock); 2617 spin_lock_irq(&np->lock);
2499 if (!np->in_shutdown) 2618 if (!np->in_shutdown)
@@ -2515,12 +2634,12 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
2515 spin_unlock_irq(&np->lock); 2634 spin_unlock_irq(&np->lock);
2516 break; 2635 break;
2517 } 2636 }
2518
2519 } 2637 }
2520 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name); 2638 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name);
2521 2639
2522 return IRQ_RETVAL(i); 2640 return IRQ_RETVAL(i);
2523} 2641}
2642#endif
2524 2643
2525static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs) 2644static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
2526{ 2645{
@@ -3058,9 +3177,18 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
3058 if (netif_running(dev)) 3177 if (netif_running(dev))
3059 printk(KERN_INFO "%s: link down.\n", dev->name); 3178 printk(KERN_INFO "%s: link down.\n", dev->name);
3060 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 3179 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
3061 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 3180 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
3062 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 3181 bmcr |= BMCR_ANENABLE;
3063 3182 /* reset the phy in order for settings to stick,
3183 * and cause autoneg to start */
3184 if (phy_reset(dev, bmcr)) {
3185 printk(KERN_INFO "%s: phy reset failed\n", dev->name);
3186 return -EINVAL;
3187 }
3188 } else {
3189 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
3190 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
3191 }
3064 } else { 3192 } else {
3065 int adv, bmcr; 3193 int adv, bmcr;
3066 3194
@@ -3100,17 +3228,19 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
3100 bmcr |= BMCR_FULLDPLX; 3228 bmcr |= BMCR_FULLDPLX;
3101 if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL)) 3229 if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL))
3102 bmcr |= BMCR_SPEED100; 3230 bmcr |= BMCR_SPEED100;
3103 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
3104 if (np->phy_oui == PHY_OUI_MARVELL) { 3231 if (np->phy_oui == PHY_OUI_MARVELL) {
3105 /* reset the phy */ 3232 /* reset the phy in order for forced mode settings to stick */
3106 if (phy_reset(dev)) { 3233 if (phy_reset(dev, bmcr)) {
3107 printk(KERN_INFO "%s: phy reset failed\n", dev->name); 3234 printk(KERN_INFO "%s: phy reset failed\n", dev->name);
3108 return -EINVAL; 3235 return -EINVAL;
3109 } 3236 }
3110 } else if (netif_running(dev)) { 3237 } else {
3111 /* Wait a bit and then reconfigure the nic. */ 3238 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
3112 udelay(10); 3239 if (netif_running(dev)) {
3113 nv_linkchange(dev); 3240 /* Wait a bit and then reconfigure the nic. */
3241 udelay(10);
3242 nv_linkchange(dev);
3243 }
3114 } 3244 }
3115 } 3245 }
3116 3246
@@ -3167,8 +3297,17 @@ static int nv_nway_reset(struct net_device *dev)
3167 } 3297 }
3168 3298
3169 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 3299 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
3170 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 3300 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
3171 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 3301 bmcr |= BMCR_ANENABLE;
3302 /* reset the phy in order for settings to stick*/
3303 if (phy_reset(dev, bmcr)) {
3304 printk(KERN_INFO "%s: phy reset failed\n", dev->name);
3305 return -EINVAL;
3306 }
3307 } else {
3308 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
3309 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
3310 }
3172 3311
3173 if (netif_running(dev)) { 3312 if (netif_running(dev)) {
3174 nv_start_rx(dev); 3313 nv_start_rx(dev);
@@ -3246,7 +3385,7 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
3246 if (!rxtx_ring || !rx_skbuff || !rx_dma || !tx_skbuff || !tx_dma || !tx_dma_len) { 3385 if (!rxtx_ring || !rx_skbuff || !rx_dma || !tx_skbuff || !tx_dma || !tx_dma_len) {
3247 /* fall back to old rings */ 3386 /* fall back to old rings */
3248 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 3387 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
3249 if(rxtx_ring) 3388 if (rxtx_ring)
3250 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), 3389 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
3251 rxtx_ring, ring_addr); 3390 rxtx_ring, ring_addr);
3252 } else { 3391 } else {
@@ -3419,7 +3558,7 @@ static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam*
3419static u32 nv_get_rx_csum(struct net_device *dev) 3558static u32 nv_get_rx_csum(struct net_device *dev)
3420{ 3559{
3421 struct fe_priv *np = netdev_priv(dev); 3560 struct fe_priv *np = netdev_priv(dev);
3422 return (np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) != 0; 3561 return (np->rx_csum) != 0;
3423} 3562}
3424 3563
3425static int nv_set_rx_csum(struct net_device *dev, u32 data) 3564static int nv_set_rx_csum(struct net_device *dev, u32 data)
@@ -3429,22 +3568,15 @@ static int nv_set_rx_csum(struct net_device *dev, u32 data)
3429 int retcode = 0; 3568 int retcode = 0;
3430 3569
3431 if (np->driver_data & DEV_HAS_CHECKSUM) { 3570 if (np->driver_data & DEV_HAS_CHECKSUM) {
3432
3433 if (((np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) && data) ||
3434 (!(np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) && !data)) {
3435 /* already set or unset */
3436 return 0;
3437 }
3438
3439 if (data) { 3571 if (data) {
3572 np->rx_csum = 1;
3440 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; 3573 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
3441 } else if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE)) {
3442 np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
3443 } else { 3574 } else {
3444 printk(KERN_INFO "Can not disable rx checksum if vlan is enabled\n"); 3575 np->rx_csum = 0;
3445 return -EINVAL; 3576 /* vlan is dependent on rx checksum offload */
3577 if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE))
3578 np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
3446 } 3579 }
3447
3448 if (netif_running(dev)) { 3580 if (netif_running(dev)) {
3449 spin_lock_irq(&np->lock); 3581 spin_lock_irq(&np->lock);
3450 writel(np->txrxctl_bits, base + NvRegTxRxControl); 3582 writel(np->txrxctl_bits, base + NvRegTxRxControl);
@@ -3482,7 +3614,7 @@ static int nv_get_stats_count(struct net_device *dev)
3482 struct fe_priv *np = netdev_priv(dev); 3614 struct fe_priv *np = netdev_priv(dev);
3483 3615
3484 if (np->driver_data & DEV_HAS_STATISTICS) 3616 if (np->driver_data & DEV_HAS_STATISTICS)
3485 return (sizeof(struct nv_ethtool_stats)/sizeof(u64)); 3617 return sizeof(struct nv_ethtool_stats)/sizeof(u64);
3486 else 3618 else
3487 return 0; 3619 return 0;
3488} 3620}
@@ -3620,7 +3752,7 @@ static int nv_loopback_test(struct net_device *dev)
3620 struct sk_buff *tx_skb, *rx_skb; 3752 struct sk_buff *tx_skb, *rx_skb;
3621 dma_addr_t test_dma_addr; 3753 dma_addr_t test_dma_addr;
3622 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); 3754 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
3623 u32 Flags; 3755 u32 flags;
3624 int len, i, pkt_len; 3756 int len, i, pkt_len;
3625 u8 *pkt_data; 3757 u8 *pkt_data;
3626 u32 filter_flags = 0; 3758 u32 filter_flags = 0;
@@ -3664,12 +3796,12 @@ static int nv_loopback_test(struct net_device *dev)
3664 tx_skb->end-tx_skb->data, PCI_DMA_FROMDEVICE); 3796 tx_skb->end-tx_skb->data, PCI_DMA_FROMDEVICE);
3665 3797
3666 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 3798 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
3667 np->tx_ring.orig[0].PacketBuffer = cpu_to_le32(test_dma_addr); 3799 np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
3668 np->tx_ring.orig[0].FlagLen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); 3800 np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
3669 } else { 3801 } else {
3670 np->tx_ring.ex[0].PacketBufferHigh = cpu_to_le64(test_dma_addr) >> 32; 3802 np->tx_ring.ex[0].bufhigh = cpu_to_le64(test_dma_addr) >> 32;
3671 np->tx_ring.ex[0].PacketBufferLow = cpu_to_le64(test_dma_addr) & 0x0FFFFFFFF; 3803 np->tx_ring.ex[0].buflow = cpu_to_le64(test_dma_addr) & 0x0FFFFFFFF;
3672 np->tx_ring.ex[0].FlagLen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); 3804 np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
3673 } 3805 }
3674 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 3806 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
3675 pci_push(get_hwbase(dev)); 3807 pci_push(get_hwbase(dev));
@@ -3678,21 +3810,21 @@ static int nv_loopback_test(struct net_device *dev)
3678 3810
3679 /* check for rx of the packet */ 3811 /* check for rx of the packet */
3680 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 3812 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
3681 Flags = le32_to_cpu(np->rx_ring.orig[0].FlagLen); 3813 flags = le32_to_cpu(np->rx_ring.orig[0].flaglen);
3682 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver); 3814 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
3683 3815
3684 } else { 3816 } else {
3685 Flags = le32_to_cpu(np->rx_ring.ex[0].FlagLen); 3817 flags = le32_to_cpu(np->rx_ring.ex[0].flaglen);
3686 len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver); 3818 len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver);
3687 } 3819 }
3688 3820
3689 if (Flags & NV_RX_AVAIL) { 3821 if (flags & NV_RX_AVAIL) {
3690 ret = 0; 3822 ret = 0;
3691 } else if (np->desc_ver == DESC_VER_1) { 3823 } else if (np->desc_ver == DESC_VER_1) {
3692 if (Flags & NV_RX_ERROR) 3824 if (flags & NV_RX_ERROR)
3693 ret = 0; 3825 ret = 0;
3694 } else { 3826 } else {
3695 if (Flags & NV_RX2_ERROR) { 3827 if (flags & NV_RX2_ERROR) {
3696 ret = 0; 3828 ret = 0;
3697 } 3829 }
3698 } 3830 }
@@ -3754,6 +3886,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
3754 if (test->flags & ETH_TEST_FL_OFFLINE) { 3886 if (test->flags & ETH_TEST_FL_OFFLINE) {
3755 if (netif_running(dev)) { 3887 if (netif_running(dev)) {
3756 netif_stop_queue(dev); 3888 netif_stop_queue(dev);
3889 netif_poll_disable(dev);
3757 netif_tx_lock_bh(dev); 3890 netif_tx_lock_bh(dev);
3758 spin_lock_irq(&np->lock); 3891 spin_lock_irq(&np->lock);
3759 nv_disable_hw_interrupts(dev, np->irqmask); 3892 nv_disable_hw_interrupts(dev, np->irqmask);
@@ -3812,6 +3945,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
3812 nv_start_rx(dev); 3945 nv_start_rx(dev);
3813 nv_start_tx(dev); 3946 nv_start_tx(dev);
3814 netif_start_queue(dev); 3947 netif_start_queue(dev);
3948 netif_poll_enable(dev);
3815 nv_enable_hw_interrupts(dev, np->irqmask); 3949 nv_enable_hw_interrupts(dev, np->irqmask);
3816 } 3950 }
3817 } 3951 }
@@ -3829,7 +3963,7 @@ static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer)
3829 } 3963 }
3830} 3964}
3831 3965
3832static struct ethtool_ops ops = { 3966static const struct ethtool_ops ops = {
3833 .get_drvinfo = nv_get_drvinfo, 3967 .get_drvinfo = nv_get_drvinfo,
3834 .get_link = ethtool_op_get_link, 3968 .get_link = ethtool_op_get_link,
3835 .get_wol = nv_get_wol, 3969 .get_wol = nv_get_wol,
@@ -3896,10 +4030,9 @@ static int nv_open(struct net_device *dev)
3896 4030
3897 dprintk(KERN_DEBUG "nv_open: begin\n"); 4031 dprintk(KERN_DEBUG "nv_open: begin\n");
3898 4032
3899 /* 1) erase previous misconfiguration */ 4033 /* erase previous misconfiguration */
3900 if (np->driver_data & DEV_HAS_POWER_CNTRL) 4034 if (np->driver_data & DEV_HAS_POWER_CNTRL)
3901 nv_mac_reset(dev); 4035 nv_mac_reset(dev);
3902 /* 4.1-1: stop adapter: ignored, 4.3 seems to be overkill */
3903 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); 4036 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
3904 writel(0, base + NvRegMulticastAddrB); 4037 writel(0, base + NvRegMulticastAddrB);
3905 writel(0, base + NvRegMulticastMaskA); 4038 writel(0, base + NvRegMulticastMaskA);
@@ -3914,26 +4047,22 @@ static int nv_open(struct net_device *dev)
3914 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) 4047 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)
3915 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); 4048 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
3916 4049
3917 /* 2) initialize descriptor rings */ 4050 /* initialize descriptor rings */
3918 set_bufsize(dev); 4051 set_bufsize(dev);
3919 oom = nv_init_ring(dev); 4052 oom = nv_init_ring(dev);
3920 4053
3921 writel(0, base + NvRegLinkSpeed); 4054 writel(0, base + NvRegLinkSpeed);
3922 writel(0, base + NvRegUnknownTransmitterReg); 4055 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
3923 nv_txrx_reset(dev); 4056 nv_txrx_reset(dev);
3924 writel(0, base + NvRegUnknownSetupReg6); 4057 writel(0, base + NvRegUnknownSetupReg6);
3925 4058
3926 np->in_shutdown = 0; 4059 np->in_shutdown = 0;
3927 4060
3928 /* 3) set mac address */ 4061 /* give hw rings */
3929 nv_copy_mac_to_hw(dev);
3930
3931 /* 4) give hw rings */
3932 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4062 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
3933 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4063 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
3934 base + NvRegRingSizes); 4064 base + NvRegRingSizes);
3935 4065
3936 /* 5) continue setup */
3937 writel(np->linkspeed, base + NvRegLinkSpeed); 4066 writel(np->linkspeed, base + NvRegLinkSpeed);
3938 if (np->desc_ver == DESC_VER_1) 4067 if (np->desc_ver == DESC_VER_1)
3939 writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark); 4068 writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark);
@@ -3951,7 +4080,6 @@ static int nv_open(struct net_device *dev)
3951 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 4080 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
3952 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); 4081 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
3953 4082
3954 /* 6) continue setup */
3955 writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1); 4083 writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
3956 writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus); 4084 writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
3957 writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags); 4085 writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
@@ -4021,6 +4149,8 @@ static int nv_open(struct net_device *dev)
4021 nv_start_rx(dev); 4149 nv_start_rx(dev);
4022 nv_start_tx(dev); 4150 nv_start_tx(dev);
4023 netif_start_queue(dev); 4151 netif_start_queue(dev);
4152 netif_poll_enable(dev);
4153
4024 if (ret) { 4154 if (ret) {
4025 netif_carrier_on(dev); 4155 netif_carrier_on(dev);
4026 } else { 4156 } else {
@@ -4050,6 +4180,7 @@ static int nv_close(struct net_device *dev)
4050 spin_lock_irq(&np->lock); 4180 spin_lock_irq(&np->lock);
4051 np->in_shutdown = 1; 4181 np->in_shutdown = 1;
4052 spin_unlock_irq(&np->lock); 4182 spin_unlock_irq(&np->lock);
4183 netif_poll_disable(dev);
4053 synchronize_irq(dev->irq); 4184 synchronize_irq(dev->irq);
4054 4185
4055 del_timer_sync(&np->oom_kick); 4186 del_timer_sync(&np->oom_kick);
@@ -4077,12 +4208,6 @@ static int nv_close(struct net_device *dev)
4077 if (np->wolenabled) 4208 if (np->wolenabled)
4078 nv_start_rx(dev); 4209 nv_start_rx(dev);
4079 4210
4080 /* special op: write back the misordered MAC address - otherwise
4081 * the next nv_probe would see a wrong address.
4082 */
4083 writel(np->orig_mac[0], base + NvRegMacAddrA);
4084 writel(np->orig_mac[1], base + NvRegMacAddrB);
4085
4086 /* FIXME: power down nic */ 4211 /* FIXME: power down nic */
4087 4212
4088 return 0; 4213 return 0;
@@ -4095,7 +4220,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4095 unsigned long addr; 4220 unsigned long addr;
4096 u8 __iomem *base; 4221 u8 __iomem *base;
4097 int err, i; 4222 int err, i;
4098 u32 powerstate; 4223 u32 powerstate, txreg;
4099 4224
4100 dev = alloc_etherdev(sizeof(struct fe_priv)); 4225 dev = alloc_etherdev(sizeof(struct fe_priv));
4101 err = -ENOMEM; 4226 err = -ENOMEM;
@@ -4191,6 +4316,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4191 np->pkt_limit = NV_PKTLIMIT_2; 4316 np->pkt_limit = NV_PKTLIMIT_2;
4192 4317
4193 if (id->driver_data & DEV_HAS_CHECKSUM) { 4318 if (id->driver_data & DEV_HAS_CHECKSUM) {
4319 np->rx_csum = 1;
4194 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; 4320 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
4195 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; 4321 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
4196#ifdef NETIF_F_TSO 4322#ifdef NETIF_F_TSO
@@ -4271,6 +4397,10 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4271#ifdef CONFIG_NET_POLL_CONTROLLER 4397#ifdef CONFIG_NET_POLL_CONTROLLER
4272 dev->poll_controller = nv_poll_controller; 4398 dev->poll_controller = nv_poll_controller;
4273#endif 4399#endif
4400 dev->weight = 64;
4401#ifdef CONFIG_FORCEDETH_NAPI
4402 dev->poll = nv_napi_poll;
4403#endif
4274 SET_ETHTOOL_OPS(dev, &ops); 4404 SET_ETHTOOL_OPS(dev, &ops);
4275 dev->tx_timeout = nv_tx_timeout; 4405 dev->tx_timeout = nv_tx_timeout;
4276 dev->watchdog_timeo = NV_WATCHDOG_TIMEO; 4406 dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
@@ -4282,12 +4412,30 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4282 np->orig_mac[0] = readl(base + NvRegMacAddrA); 4412 np->orig_mac[0] = readl(base + NvRegMacAddrA);
4283 np->orig_mac[1] = readl(base + NvRegMacAddrB); 4413 np->orig_mac[1] = readl(base + NvRegMacAddrB);
4284 4414
4285 dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff; 4415 /* check the workaround bit for correct mac address order */
4286 dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff; 4416 txreg = readl(base + NvRegTransmitPoll);
4287 dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff; 4417 if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) {
4288 dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff; 4418 /* mac address is already in correct order */
4289 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff; 4419 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
4290 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff; 4420 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
4421 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
4422 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
4423 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
4424 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
4425 } else {
4426 /* need to reverse mac address to correct order */
4427 dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff;
4428 dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff;
4429 dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
4430 dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
4431 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
4432 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
4433 /* set permanent address to be correct aswell */
4434 np->orig_mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
4435 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
4436 np->orig_mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
4437 writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
4438 }
4291 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 4439 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
4292 4440
4293 if (!is_valid_ether_addr(dev->perm_addr)) { 4441 if (!is_valid_ether_addr(dev->perm_addr)) {
@@ -4310,6 +4458,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4310 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], 4458 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
4311 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); 4459 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
4312 4460
4461 /* set mac address */
4462 nv_copy_mac_to_hw(dev);
4463
4313 /* disable WOL */ 4464 /* disable WOL */
4314 writel(0, base + NvRegWakeUpFlags); 4465 writel(0, base + NvRegWakeUpFlags);
4315 np->wolenabled = 0; 4466 np->wolenabled = 0;
@@ -4370,6 +4521,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4370 if (id2 < 0 || id2 == 0xffff) 4521 if (id2 < 0 || id2 == 0xffff)
4371 continue; 4522 continue;
4372 4523
4524 np->phy_model = id2 & PHYID2_MODEL_MASK;
4373 id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT; 4525 id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
4374 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT; 4526 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
4375 dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n", 4527 dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n",
@@ -4422,9 +4574,17 @@ out:
4422static void __devexit nv_remove(struct pci_dev *pci_dev) 4574static void __devexit nv_remove(struct pci_dev *pci_dev)
4423{ 4575{
4424 struct net_device *dev = pci_get_drvdata(pci_dev); 4576 struct net_device *dev = pci_get_drvdata(pci_dev);
4577 struct fe_priv *np = netdev_priv(dev);
4578 u8 __iomem *base = get_hwbase(dev);
4425 4579
4426 unregister_netdev(dev); 4580 unregister_netdev(dev);
4427 4581
4582 /* special op: write back the misordered MAC address - otherwise
4583 * the next nv_probe would see a wrong address.
4584 */
4585 writel(np->orig_mac[0], base + NvRegMacAddrA);
4586 writel(np->orig_mac[1], base + NvRegMacAddrB);
4587
4428 /* free all structures */ 4588 /* free all structures */
4429 free_rings(dev); 4589 free_rings(dev);
4430 iounmap(get_hwbase(dev)); 4590 iounmap(get_hwbase(dev));
@@ -4541,7 +4701,7 @@ static struct pci_driver driver = {
4541static int __init init_nic(void) 4701static int __init init_nic(void)
4542{ 4702{
4543 printk(KERN_INFO "forcedeth.c: Reverse Engineered nForce ethernet driver. Version %s.\n", FORCEDETH_VERSION); 4703 printk(KERN_INFO "forcedeth.c: Reverse Engineered nForce ethernet driver. Version %s.\n", FORCEDETH_VERSION);
4544 return pci_module_init(&driver); 4704 return pci_register_driver(&driver);
4545} 4705}
4546 4706
4547static void __exit exit_nic(void) 4707static void __exit exit_nic(void)