aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/forcedeth.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/forcedeth.c')
-rw-r--r--drivers/net/forcedeth.c461
1 files changed, 294 insertions, 167 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 11b8f1b43dd5..a2aca92e8b2a 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -109,6 +109,7 @@
109 * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup. 109 * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup.
110 * 0.55: 22 Mar 2006: Add flow control (pause frame). 110 * 0.55: 22 Mar 2006: Add flow control (pause frame).
111 * 0.56: 22 Mar 2006: Additional ethtool config and moduleparam support. 111 * 0.56: 22 Mar 2006: Additional ethtool config and moduleparam support.
112 * 0.57: 14 May 2006: Mac address set in probe/remove and order corrections.
112 * 113 *
113 * Known bugs: 114 * Known bugs:
114 * We suspect that on some hardware no TX done interrupts are generated. 115 * We suspect that on some hardware no TX done interrupts are generated.
@@ -120,7 +121,12 @@
120 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few 121 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
121 * superfluous timer interrupts from the nic. 122 * superfluous timer interrupts from the nic.
122 */ 123 */
123#define FORCEDETH_VERSION "0.56" 124#ifdef CONFIG_FORCEDETH_NAPI
125#define DRIVERNAPI "-NAPI"
126#else
127#define DRIVERNAPI
128#endif
129#define FORCEDETH_VERSION "0.57"
124#define DRV_NAME "forcedeth" 130#define DRV_NAME "forcedeth"
125 131
126#include <linux/module.h> 132#include <linux/module.h>
@@ -262,7 +268,8 @@ enum {
262 NvRegRingSizes = 0x108, 268 NvRegRingSizes = 0x108,
263#define NVREG_RINGSZ_TXSHIFT 0 269#define NVREG_RINGSZ_TXSHIFT 0
264#define NVREG_RINGSZ_RXSHIFT 16 270#define NVREG_RINGSZ_RXSHIFT 16
265 NvRegUnknownTransmitterReg = 0x10c, 271 NvRegTransmitPoll = 0x10c,
272#define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000
266 NvRegLinkSpeed = 0x110, 273 NvRegLinkSpeed = 0x110,
267#define NVREG_LINKSPEED_FORCE 0x10000 274#define NVREG_LINKSPEED_FORCE 0x10000
268#define NVREG_LINKSPEED_10 1000 275#define NVREG_LINKSPEED_10 1000
@@ -381,21 +388,21 @@ enum {
381 388
382/* Big endian: should work, but is untested */ 389/* Big endian: should work, but is untested */
383struct ring_desc { 390struct ring_desc {
384 u32 PacketBuffer; 391 __le32 buf;
385 u32 FlagLen; 392 __le32 flaglen;
386}; 393};
387 394
388struct ring_desc_ex { 395struct ring_desc_ex {
389 u32 PacketBufferHigh; 396 __le32 bufhigh;
390 u32 PacketBufferLow; 397 __le32 buflow;
391 u32 TxVlan; 398 __le32 txvlan;
392 u32 FlagLen; 399 __le32 flaglen;
393}; 400};
394 401
395typedef union _ring_type { 402union ring_type {
396 struct ring_desc* orig; 403 struct ring_desc* orig;
397 struct ring_desc_ex* ex; 404 struct ring_desc_ex* ex;
398} ring_type; 405};
399 406
400#define FLAG_MASK_V1 0xffff0000 407#define FLAG_MASK_V1 0xffff0000
401#define FLAG_MASK_V2 0xffffc000 408#define FLAG_MASK_V2 0xffffc000
@@ -653,8 +660,8 @@ static const struct nv_ethtool_str nv_etests_str[] = {
653}; 660};
654 661
655struct register_test { 662struct register_test {
656 u32 reg; 663 __le32 reg;
657 u32 mask; 664 __le32 mask;
658}; 665};
659 666
660static const struct register_test nv_registers_test[] = { 667static const struct register_test nv_registers_test[] = {
@@ -713,7 +720,7 @@ struct fe_priv {
713 /* rx specific fields. 720 /* rx specific fields.
714 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); 721 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
715 */ 722 */
716 ring_type rx_ring; 723 union ring_type rx_ring;
717 unsigned int cur_rx, refill_rx; 724 unsigned int cur_rx, refill_rx;
718 struct sk_buff **rx_skbuff; 725 struct sk_buff **rx_skbuff;
719 dma_addr_t *rx_dma; 726 dma_addr_t *rx_dma;
@@ -733,7 +740,7 @@ struct fe_priv {
733 /* 740 /*
734 * tx specific fields. 741 * tx specific fields.
735 */ 742 */
736 ring_type tx_ring; 743 union ring_type tx_ring;
737 unsigned int next_tx, nic_tx; 744 unsigned int next_tx, nic_tx;
738 struct sk_buff **tx_skbuff; 745 struct sk_buff **tx_skbuff;
739 dma_addr_t *tx_dma; 746 dma_addr_t *tx_dma;
@@ -826,13 +833,13 @@ static inline void pci_push(u8 __iomem *base)
826 833
827static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v) 834static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
828{ 835{
829 return le32_to_cpu(prd->FlagLen) 836 return le32_to_cpu(prd->flaglen)
830 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2); 837 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
831} 838}
832 839
833static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v) 840static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
834{ 841{
835 return le32_to_cpu(prd->FlagLen) & LEN_MASK_V2; 842 return le32_to_cpu(prd->flaglen) & LEN_MASK_V2;
836} 843}
837 844
838static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, 845static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
@@ -885,7 +892,7 @@ static void free_rings(struct net_device *dev)
885 struct fe_priv *np = get_nvpriv(dev); 892 struct fe_priv *np = get_nvpriv(dev);
886 893
887 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 894 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
888 if(np->rx_ring.orig) 895 if (np->rx_ring.orig)
889 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), 896 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
890 np->rx_ring.orig, np->ring_addr); 897 np->rx_ring.orig, np->ring_addr);
891 } else { 898 } else {
@@ -1178,7 +1185,7 @@ static void nv_stop_tx(struct net_device *dev)
1178 KERN_INFO "nv_stop_tx: TransmitterStatus remained busy"); 1185 KERN_INFO "nv_stop_tx: TransmitterStatus remained busy");
1179 1186
1180 udelay(NV_TXSTOP_DELAY2); 1187 udelay(NV_TXSTOP_DELAY2);
1181 writel(0, base + NvRegUnknownTransmitterReg); 1188 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
1182} 1189}
1183 1190
1184static void nv_txrx_reset(struct net_device *dev) 1191static void nv_txrx_reset(struct net_device *dev)
@@ -1258,14 +1265,14 @@ static int nv_alloc_rx(struct net_device *dev)
1258 np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data, 1265 np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data,
1259 skb->end-skb->data, PCI_DMA_FROMDEVICE); 1266 skb->end-skb->data, PCI_DMA_FROMDEVICE);
1260 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1267 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1261 np->rx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]); 1268 np->rx_ring.orig[nr].buf = cpu_to_le32(np->rx_dma[nr]);
1262 wmb(); 1269 wmb();
1263 np->rx_ring.orig[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); 1270 np->rx_ring.orig[nr].flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
1264 } else { 1271 } else {
1265 np->rx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->rx_dma[nr]) >> 32; 1272 np->rx_ring.ex[nr].bufhigh = cpu_to_le64(np->rx_dma[nr]) >> 32;
1266 np->rx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF; 1273 np->rx_ring.ex[nr].buflow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF;
1267 wmb(); 1274 wmb();
1268 np->rx_ring.ex[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); 1275 np->rx_ring.ex[nr].flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
1269 } 1276 }
1270 dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n", 1277 dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n",
1271 dev->name, refill_rx); 1278 dev->name, refill_rx);
@@ -1277,6 +1284,16 @@ static int nv_alloc_rx(struct net_device *dev)
1277 return 0; 1284 return 0;
1278} 1285}
1279 1286
1287/* If rx bufs are exhausted called after 50ms to attempt to refresh */
1288#ifdef CONFIG_FORCEDETH_NAPI
1289static void nv_do_rx_refill(unsigned long data)
1290{
1291 struct net_device *dev = (struct net_device *) data;
1292
1293 /* Just reschedule NAPI rx processing */
1294 netif_rx_schedule(dev);
1295}
1296#else
1280static void nv_do_rx_refill(unsigned long data) 1297static void nv_do_rx_refill(unsigned long data)
1281{ 1298{
1282 struct net_device *dev = (struct net_device *) data; 1299 struct net_device *dev = (struct net_device *) data;
@@ -1305,6 +1322,7 @@ static void nv_do_rx_refill(unsigned long data)
1305 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1322 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1306 } 1323 }
1307} 1324}
1325#endif
1308 1326
1309static void nv_init_rx(struct net_device *dev) 1327static void nv_init_rx(struct net_device *dev)
1310{ 1328{
@@ -1315,9 +1333,9 @@ static void nv_init_rx(struct net_device *dev)
1315 np->refill_rx = 0; 1333 np->refill_rx = 0;
1316 for (i = 0; i < np->rx_ring_size; i++) 1334 for (i = 0; i < np->rx_ring_size; i++)
1317 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1335 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1318 np->rx_ring.orig[i].FlagLen = 0; 1336 np->rx_ring.orig[i].flaglen = 0;
1319 else 1337 else
1320 np->rx_ring.ex[i].FlagLen = 0; 1338 np->rx_ring.ex[i].flaglen = 0;
1321} 1339}
1322 1340
1323static void nv_init_tx(struct net_device *dev) 1341static void nv_init_tx(struct net_device *dev)
@@ -1328,9 +1346,9 @@ static void nv_init_tx(struct net_device *dev)
1328 np->next_tx = np->nic_tx = 0; 1346 np->next_tx = np->nic_tx = 0;
1329 for (i = 0; i < np->tx_ring_size; i++) { 1347 for (i = 0; i < np->tx_ring_size; i++) {
1330 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1348 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1331 np->tx_ring.orig[i].FlagLen = 0; 1349 np->tx_ring.orig[i].flaglen = 0;
1332 else 1350 else
1333 np->tx_ring.ex[i].FlagLen = 0; 1351 np->tx_ring.ex[i].flaglen = 0;
1334 np->tx_skbuff[i] = NULL; 1352 np->tx_skbuff[i] = NULL;
1335 np->tx_dma[i] = 0; 1353 np->tx_dma[i] = 0;
1336 } 1354 }
@@ -1373,9 +1391,9 @@ static void nv_drain_tx(struct net_device *dev)
1373 1391
1374 for (i = 0; i < np->tx_ring_size; i++) { 1392 for (i = 0; i < np->tx_ring_size; i++) {
1375 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1393 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1376 np->tx_ring.orig[i].FlagLen = 0; 1394 np->tx_ring.orig[i].flaglen = 0;
1377 else 1395 else
1378 np->tx_ring.ex[i].FlagLen = 0; 1396 np->tx_ring.ex[i].flaglen = 0;
1379 if (nv_release_txskb(dev, i)) 1397 if (nv_release_txskb(dev, i))
1380 np->stats.tx_dropped++; 1398 np->stats.tx_dropped++;
1381 } 1399 }
@@ -1387,9 +1405,9 @@ static void nv_drain_rx(struct net_device *dev)
1387 int i; 1405 int i;
1388 for (i = 0; i < np->rx_ring_size; i++) { 1406 for (i = 0; i < np->rx_ring_size; i++) {
1389 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1407 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1390 np->rx_ring.orig[i].FlagLen = 0; 1408 np->rx_ring.orig[i].flaglen = 0;
1391 else 1409 else
1392 np->rx_ring.ex[i].FlagLen = 0; 1410 np->rx_ring.ex[i].flaglen = 0;
1393 wmb(); 1411 wmb();
1394 if (np->rx_skbuff[i]) { 1412 if (np->rx_skbuff[i]) {
1395 pci_unmap_single(np->pci_dev, np->rx_dma[i], 1413 pci_unmap_single(np->pci_dev, np->rx_dma[i],
@@ -1450,17 +1468,17 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1450 np->tx_dma_len[nr] = bcnt; 1468 np->tx_dma_len[nr] = bcnt;
1451 1469
1452 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1470 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1453 np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]); 1471 np->tx_ring.orig[nr].buf = cpu_to_le32(np->tx_dma[nr]);
1454 np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); 1472 np->tx_ring.orig[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1455 } else { 1473 } else {
1456 np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32; 1474 np->tx_ring.ex[nr].bufhigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
1457 np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF; 1475 np->tx_ring.ex[nr].buflow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
1458 np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); 1476 np->tx_ring.ex[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1459 } 1477 }
1460 tx_flags = np->tx_flags; 1478 tx_flags = np->tx_flags;
1461 offset += bcnt; 1479 offset += bcnt;
1462 size -= bcnt; 1480 size -= bcnt;
1463 } while(size); 1481 } while (size);
1464 1482
1465 /* setup the fragments */ 1483 /* setup the fragments */
1466 for (i = 0; i < fragments; i++) { 1484 for (i = 0; i < fragments; i++) {
@@ -1477,12 +1495,12 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1477 np->tx_dma_len[nr] = bcnt; 1495 np->tx_dma_len[nr] = bcnt;
1478 1496
1479 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1497 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1480 np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]); 1498 np->tx_ring.orig[nr].buf = cpu_to_le32(np->tx_dma[nr]);
1481 np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); 1499 np->tx_ring.orig[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1482 } else { 1500 } else {
1483 np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32; 1501 np->tx_ring.ex[nr].bufhigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
1484 np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF; 1502 np->tx_ring.ex[nr].buflow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
1485 np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); 1503 np->tx_ring.ex[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1486 } 1504 }
1487 offset += bcnt; 1505 offset += bcnt;
1488 size -= bcnt; 1506 size -= bcnt;
@@ -1491,9 +1509,9 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1491 1509
1492 /* set last fragment flag */ 1510 /* set last fragment flag */
1493 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1511 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1494 np->tx_ring.orig[nr].FlagLen |= cpu_to_le32(tx_flags_extra); 1512 np->tx_ring.orig[nr].flaglen |= cpu_to_le32(tx_flags_extra);
1495 } else { 1513 } else {
1496 np->tx_ring.ex[nr].FlagLen |= cpu_to_le32(tx_flags_extra); 1514 np->tx_ring.ex[nr].flaglen |= cpu_to_le32(tx_flags_extra);
1497 } 1515 }
1498 1516
1499 np->tx_skbuff[nr] = skb; 1517 np->tx_skbuff[nr] = skb;
@@ -1512,10 +1530,10 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1512 1530
1513 /* set tx flags */ 1531 /* set tx flags */
1514 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1532 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1515 np->tx_ring.orig[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); 1533 np->tx_ring.orig[start_nr].flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
1516 } else { 1534 } else {
1517 np->tx_ring.ex[start_nr].TxVlan = cpu_to_le32(tx_flags_vlan); 1535 np->tx_ring.ex[start_nr].txvlan = cpu_to_le32(tx_flags_vlan);
1518 np->tx_ring.ex[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); 1536 np->tx_ring.ex[start_nr].flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
1519 } 1537 }
1520 1538
1521 dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n", 1539 dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n",
@@ -1547,7 +1565,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1547static void nv_tx_done(struct net_device *dev) 1565static void nv_tx_done(struct net_device *dev)
1548{ 1566{
1549 struct fe_priv *np = netdev_priv(dev); 1567 struct fe_priv *np = netdev_priv(dev);
1550 u32 Flags; 1568 u32 flags;
1551 unsigned int i; 1569 unsigned int i;
1552 struct sk_buff *skb; 1570 struct sk_buff *skb;
1553 1571
@@ -1555,22 +1573,22 @@ static void nv_tx_done(struct net_device *dev)
1555 i = np->nic_tx % np->tx_ring_size; 1573 i = np->nic_tx % np->tx_ring_size;
1556 1574
1557 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1575 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1558 Flags = le32_to_cpu(np->tx_ring.orig[i].FlagLen); 1576 flags = le32_to_cpu(np->tx_ring.orig[i].flaglen);
1559 else 1577 else
1560 Flags = le32_to_cpu(np->tx_ring.ex[i].FlagLen); 1578 flags = le32_to_cpu(np->tx_ring.ex[i].flaglen);
1561 1579
1562 dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, Flags 0x%x.\n", 1580 dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, flags 0x%x.\n",
1563 dev->name, np->nic_tx, Flags); 1581 dev->name, np->nic_tx, flags);
1564 if (Flags & NV_TX_VALID) 1582 if (flags & NV_TX_VALID)
1565 break; 1583 break;
1566 if (np->desc_ver == DESC_VER_1) { 1584 if (np->desc_ver == DESC_VER_1) {
1567 if (Flags & NV_TX_LASTPACKET) { 1585 if (flags & NV_TX_LASTPACKET) {
1568 skb = np->tx_skbuff[i]; 1586 skb = np->tx_skbuff[i];
1569 if (Flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION| 1587 if (flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION|
1570 NV_TX_UNDERFLOW|NV_TX_ERROR)) { 1588 NV_TX_UNDERFLOW|NV_TX_ERROR)) {
1571 if (Flags & NV_TX_UNDERFLOW) 1589 if (flags & NV_TX_UNDERFLOW)
1572 np->stats.tx_fifo_errors++; 1590 np->stats.tx_fifo_errors++;
1573 if (Flags & NV_TX_CARRIERLOST) 1591 if (flags & NV_TX_CARRIERLOST)
1574 np->stats.tx_carrier_errors++; 1592 np->stats.tx_carrier_errors++;
1575 np->stats.tx_errors++; 1593 np->stats.tx_errors++;
1576 } else { 1594 } else {
@@ -1579,13 +1597,13 @@ static void nv_tx_done(struct net_device *dev)
1579 } 1597 }
1580 } 1598 }
1581 } else { 1599 } else {
1582 if (Flags & NV_TX2_LASTPACKET) { 1600 if (flags & NV_TX2_LASTPACKET) {
1583 skb = np->tx_skbuff[i]; 1601 skb = np->tx_skbuff[i];
1584 if (Flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION| 1602 if (flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION|
1585 NV_TX2_UNDERFLOW|NV_TX2_ERROR)) { 1603 NV_TX2_UNDERFLOW|NV_TX2_ERROR)) {
1586 if (Flags & NV_TX2_UNDERFLOW) 1604 if (flags & NV_TX2_UNDERFLOW)
1587 np->stats.tx_fifo_errors++; 1605 np->stats.tx_fifo_errors++;
1588 if (Flags & NV_TX2_CARRIERLOST) 1606 if (flags & NV_TX2_CARRIERLOST)
1589 np->stats.tx_carrier_errors++; 1607 np->stats.tx_carrier_errors++;
1590 np->stats.tx_errors++; 1608 np->stats.tx_errors++;
1591 } else { 1609 } else {
@@ -1638,29 +1656,29 @@ static void nv_tx_timeout(struct net_device *dev)
1638 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1656 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1639 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", 1657 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
1640 i, 1658 i,
1641 le32_to_cpu(np->tx_ring.orig[i].PacketBuffer), 1659 le32_to_cpu(np->tx_ring.orig[i].buf),
1642 le32_to_cpu(np->tx_ring.orig[i].FlagLen), 1660 le32_to_cpu(np->tx_ring.orig[i].flaglen),
1643 le32_to_cpu(np->tx_ring.orig[i+1].PacketBuffer), 1661 le32_to_cpu(np->tx_ring.orig[i+1].buf),
1644 le32_to_cpu(np->tx_ring.orig[i+1].FlagLen), 1662 le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
1645 le32_to_cpu(np->tx_ring.orig[i+2].PacketBuffer), 1663 le32_to_cpu(np->tx_ring.orig[i+2].buf),
1646 le32_to_cpu(np->tx_ring.orig[i+2].FlagLen), 1664 le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
1647 le32_to_cpu(np->tx_ring.orig[i+3].PacketBuffer), 1665 le32_to_cpu(np->tx_ring.orig[i+3].buf),
1648 le32_to_cpu(np->tx_ring.orig[i+3].FlagLen)); 1666 le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
1649 } else { 1667 } else {
1650 printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n", 1668 printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
1651 i, 1669 i,
1652 le32_to_cpu(np->tx_ring.ex[i].PacketBufferHigh), 1670 le32_to_cpu(np->tx_ring.ex[i].bufhigh),
1653 le32_to_cpu(np->tx_ring.ex[i].PacketBufferLow), 1671 le32_to_cpu(np->tx_ring.ex[i].buflow),
1654 le32_to_cpu(np->tx_ring.ex[i].FlagLen), 1672 le32_to_cpu(np->tx_ring.ex[i].flaglen),
1655 le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferHigh), 1673 le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
1656 le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferLow), 1674 le32_to_cpu(np->tx_ring.ex[i+1].buflow),
1657 le32_to_cpu(np->tx_ring.ex[i+1].FlagLen), 1675 le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
1658 le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferHigh), 1676 le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
1659 le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferLow), 1677 le32_to_cpu(np->tx_ring.ex[i+2].buflow),
1660 le32_to_cpu(np->tx_ring.ex[i+2].FlagLen), 1678 le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
1661 le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferHigh), 1679 le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
1662 le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferLow), 1680 le32_to_cpu(np->tx_ring.ex[i+3].buflow),
1663 le32_to_cpu(np->tx_ring.ex[i+3].FlagLen)); 1681 le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
1664 } 1682 }
1665 } 1683 }
1666 } 1684 }
@@ -1697,7 +1715,7 @@ static int nv_getlen(struct net_device *dev, void *packet, int datalen)
1697 int protolen; /* length as stored in the proto field */ 1715 int protolen; /* length as stored in the proto field */
1698 1716
1699 /* 1) calculate len according to header */ 1717 /* 1) calculate len according to header */
1700 if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == __constant_htons(ETH_P_8021Q)) { 1718 if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
1701 protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto ); 1719 protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto );
1702 hdrlen = VLAN_HLEN; 1720 hdrlen = VLAN_HLEN;
1703 } else { 1721 } else {
@@ -1740,13 +1758,14 @@ static int nv_getlen(struct net_device *dev, void *packet, int datalen)
1740 } 1758 }
1741} 1759}
1742 1760
1743static void nv_rx_process(struct net_device *dev) 1761static int nv_rx_process(struct net_device *dev, int limit)
1744{ 1762{
1745 struct fe_priv *np = netdev_priv(dev); 1763 struct fe_priv *np = netdev_priv(dev);
1746 u32 Flags; 1764 u32 flags;
1747 u32 vlanflags = 0; 1765 u32 vlanflags = 0;
1766 int count;
1748 1767
1749 for (;;) { 1768 for (count = 0; count < limit; ++count) {
1750 struct sk_buff *skb; 1769 struct sk_buff *skb;
1751 int len; 1770 int len;
1752 int i; 1771 int i;
@@ -1755,18 +1774,18 @@ static void nv_rx_process(struct net_device *dev)
1755 1774
1756 i = np->cur_rx % np->rx_ring_size; 1775 i = np->cur_rx % np->rx_ring_size;
1757 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1776 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1758 Flags = le32_to_cpu(np->rx_ring.orig[i].FlagLen); 1777 flags = le32_to_cpu(np->rx_ring.orig[i].flaglen);
1759 len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver); 1778 len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver);
1760 } else { 1779 } else {
1761 Flags = le32_to_cpu(np->rx_ring.ex[i].FlagLen); 1780 flags = le32_to_cpu(np->rx_ring.ex[i].flaglen);
1762 len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver); 1781 len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver);
1763 vlanflags = le32_to_cpu(np->rx_ring.ex[i].PacketBufferLow); 1782 vlanflags = le32_to_cpu(np->rx_ring.ex[i].buflow);
1764 } 1783 }
1765 1784
1766 dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n", 1785 dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, flags 0x%x.\n",
1767 dev->name, np->cur_rx, Flags); 1786 dev->name, np->cur_rx, flags);
1768 1787
1769 if (Flags & NV_RX_AVAIL) 1788 if (flags & NV_RX_AVAIL)
1770 break; /* still owned by hardware, */ 1789 break; /* still owned by hardware, */
1771 1790
1772 /* 1791 /*
@@ -1780,7 +1799,7 @@ static void nv_rx_process(struct net_device *dev)
1780 1799
1781 { 1800 {
1782 int j; 1801 int j;
1783 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",Flags); 1802 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
1784 for (j=0; j<64; j++) { 1803 for (j=0; j<64; j++) {
1785 if ((j%16) == 0) 1804 if ((j%16) == 0)
1786 dprintk("\n%03x:", j); 1805 dprintk("\n%03x:", j);
@@ -1790,30 +1809,30 @@ static void nv_rx_process(struct net_device *dev)
1790 } 1809 }
1791 /* look at what we actually got: */ 1810 /* look at what we actually got: */
1792 if (np->desc_ver == DESC_VER_1) { 1811 if (np->desc_ver == DESC_VER_1) {
1793 if (!(Flags & NV_RX_DESCRIPTORVALID)) 1812 if (!(flags & NV_RX_DESCRIPTORVALID))
1794 goto next_pkt; 1813 goto next_pkt;
1795 1814
1796 if (Flags & NV_RX_ERROR) { 1815 if (flags & NV_RX_ERROR) {
1797 if (Flags & NV_RX_MISSEDFRAME) { 1816 if (flags & NV_RX_MISSEDFRAME) {
1798 np->stats.rx_missed_errors++; 1817 np->stats.rx_missed_errors++;
1799 np->stats.rx_errors++; 1818 np->stats.rx_errors++;
1800 goto next_pkt; 1819 goto next_pkt;
1801 } 1820 }
1802 if (Flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) { 1821 if (flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) {
1803 np->stats.rx_errors++; 1822 np->stats.rx_errors++;
1804 goto next_pkt; 1823 goto next_pkt;
1805 } 1824 }
1806 if (Flags & NV_RX_CRCERR) { 1825 if (flags & NV_RX_CRCERR) {
1807 np->stats.rx_crc_errors++; 1826 np->stats.rx_crc_errors++;
1808 np->stats.rx_errors++; 1827 np->stats.rx_errors++;
1809 goto next_pkt; 1828 goto next_pkt;
1810 } 1829 }
1811 if (Flags & NV_RX_OVERFLOW) { 1830 if (flags & NV_RX_OVERFLOW) {
1812 np->stats.rx_over_errors++; 1831 np->stats.rx_over_errors++;
1813 np->stats.rx_errors++; 1832 np->stats.rx_errors++;
1814 goto next_pkt; 1833 goto next_pkt;
1815 } 1834 }
1816 if (Flags & NV_RX_ERROR4) { 1835 if (flags & NV_RX_ERROR4) {
1817 len = nv_getlen(dev, np->rx_skbuff[i]->data, len); 1836 len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
1818 if (len < 0) { 1837 if (len < 0) {
1819 np->stats.rx_errors++; 1838 np->stats.rx_errors++;
@@ -1821,32 +1840,32 @@ static void nv_rx_process(struct net_device *dev)
1821 } 1840 }
1822 } 1841 }
1823 /* framing errors are soft errors. */ 1842 /* framing errors are soft errors. */
1824 if (Flags & NV_RX_FRAMINGERR) { 1843 if (flags & NV_RX_FRAMINGERR) {
1825 if (Flags & NV_RX_SUBSTRACT1) { 1844 if (flags & NV_RX_SUBSTRACT1) {
1826 len--; 1845 len--;
1827 } 1846 }
1828 } 1847 }
1829 } 1848 }
1830 } else { 1849 } else {
1831 if (!(Flags & NV_RX2_DESCRIPTORVALID)) 1850 if (!(flags & NV_RX2_DESCRIPTORVALID))
1832 goto next_pkt; 1851 goto next_pkt;
1833 1852
1834 if (Flags & NV_RX2_ERROR) { 1853 if (flags & NV_RX2_ERROR) {
1835 if (Flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) { 1854 if (flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) {
1836 np->stats.rx_errors++; 1855 np->stats.rx_errors++;
1837 goto next_pkt; 1856 goto next_pkt;
1838 } 1857 }
1839 if (Flags & NV_RX2_CRCERR) { 1858 if (flags & NV_RX2_CRCERR) {
1840 np->stats.rx_crc_errors++; 1859 np->stats.rx_crc_errors++;
1841 np->stats.rx_errors++; 1860 np->stats.rx_errors++;
1842 goto next_pkt; 1861 goto next_pkt;
1843 } 1862 }
1844 if (Flags & NV_RX2_OVERFLOW) { 1863 if (flags & NV_RX2_OVERFLOW) {
1845 np->stats.rx_over_errors++; 1864 np->stats.rx_over_errors++;
1846 np->stats.rx_errors++; 1865 np->stats.rx_errors++;
1847 goto next_pkt; 1866 goto next_pkt;
1848 } 1867 }
1849 if (Flags & NV_RX2_ERROR4) { 1868 if (flags & NV_RX2_ERROR4) {
1850 len = nv_getlen(dev, np->rx_skbuff[i]->data, len); 1869 len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
1851 if (len < 0) { 1870 if (len < 0) {
1852 np->stats.rx_errors++; 1871 np->stats.rx_errors++;
@@ -1854,17 +1873,17 @@ static void nv_rx_process(struct net_device *dev)
1854 } 1873 }
1855 } 1874 }
1856 /* framing errors are soft errors */ 1875 /* framing errors are soft errors */
1857 if (Flags & NV_RX2_FRAMINGERR) { 1876 if (flags & NV_RX2_FRAMINGERR) {
1858 if (Flags & NV_RX2_SUBSTRACT1) { 1877 if (flags & NV_RX2_SUBSTRACT1) {
1859 len--; 1878 len--;
1860 } 1879 }
1861 } 1880 }
1862 } 1881 }
1863 if (np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) { 1882 if (np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) {
1864 Flags &= NV_RX2_CHECKSUMMASK; 1883 flags &= NV_RX2_CHECKSUMMASK;
1865 if (Flags == NV_RX2_CHECKSUMOK1 || 1884 if (flags == NV_RX2_CHECKSUMOK1 ||
1866 Flags == NV_RX2_CHECKSUMOK2 || 1885 flags == NV_RX2_CHECKSUMOK2 ||
1867 Flags == NV_RX2_CHECKSUMOK3) { 1886 flags == NV_RX2_CHECKSUMOK3) {
1868 dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name); 1887 dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name);
1869 np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY; 1888 np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY;
1870 } else { 1889 } else {
@@ -1880,17 +1899,27 @@ static void nv_rx_process(struct net_device *dev)
1880 skb->protocol = eth_type_trans(skb, dev); 1899 skb->protocol = eth_type_trans(skb, dev);
1881 dprintk(KERN_DEBUG "%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n", 1900 dprintk(KERN_DEBUG "%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n",
1882 dev->name, np->cur_rx, len, skb->protocol); 1901 dev->name, np->cur_rx, len, skb->protocol);
1883 if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT)) { 1902#ifdef CONFIG_FORCEDETH_NAPI
1884 vlan_hwaccel_rx(skb, np->vlangrp, vlanflags & NV_RX3_VLAN_TAG_MASK); 1903 if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT))
1885 } else { 1904 vlan_hwaccel_receive_skb(skb, np->vlangrp,
1905 vlanflags & NV_RX3_VLAN_TAG_MASK);
1906 else
1907 netif_receive_skb(skb);
1908#else
1909 if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT))
1910 vlan_hwaccel_rx(skb, np->vlangrp,
1911 vlanflags & NV_RX3_VLAN_TAG_MASK);
1912 else
1886 netif_rx(skb); 1913 netif_rx(skb);
1887 } 1914#endif
1888 dev->last_rx = jiffies; 1915 dev->last_rx = jiffies;
1889 np->stats.rx_packets++; 1916 np->stats.rx_packets++;
1890 np->stats.rx_bytes += len; 1917 np->stats.rx_bytes += len;
1891next_pkt: 1918next_pkt:
1892 np->cur_rx++; 1919 np->cur_rx++;
1893 } 1920 }
1921
1922 return count;
1894} 1923}
1895 1924
1896static void set_bufsize(struct net_device *dev) 1925static void set_bufsize(struct net_device *dev)
@@ -1990,7 +2019,7 @@ static int nv_set_mac_address(struct net_device *dev, void *addr)
1990 struct fe_priv *np = netdev_priv(dev); 2019 struct fe_priv *np = netdev_priv(dev);
1991 struct sockaddr *macaddr = (struct sockaddr*)addr; 2020 struct sockaddr *macaddr = (struct sockaddr*)addr;
1992 2021
1993 if(!is_valid_ether_addr(macaddr->sa_data)) 2022 if (!is_valid_ether_addr(macaddr->sa_data))
1994 return -EADDRNOTAVAIL; 2023 return -EADDRNOTAVAIL;
1995 2024
1996 /* synchronized against open : rtnl_lock() held by caller */ 2025 /* synchronized against open : rtnl_lock() held by caller */
@@ -2283,20 +2312,20 @@ set_speed:
2283 lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM); 2312 lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM);
2284 2313
2285 switch (adv_pause) { 2314 switch (adv_pause) {
2286 case (ADVERTISE_PAUSE_CAP): 2315 case ADVERTISE_PAUSE_CAP:
2287 if (lpa_pause & LPA_PAUSE_CAP) { 2316 if (lpa_pause & LPA_PAUSE_CAP) {
2288 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 2317 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
2289 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 2318 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
2290 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 2319 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
2291 } 2320 }
2292 break; 2321 break;
2293 case (ADVERTISE_PAUSE_ASYM): 2322 case ADVERTISE_PAUSE_ASYM:
2294 if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM)) 2323 if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM))
2295 { 2324 {
2296 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 2325 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
2297 } 2326 }
2298 break; 2327 break;
2299 case (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM): 2328 case ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM:
2300 if (lpa_pause & LPA_PAUSE_CAP) 2329 if (lpa_pause & LPA_PAUSE_CAP)
2301 { 2330 {
2302 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 2331 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
@@ -2376,14 +2405,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
2376 nv_tx_done(dev); 2405 nv_tx_done(dev);
2377 spin_unlock(&np->lock); 2406 spin_unlock(&np->lock);
2378 2407
2379 nv_rx_process(dev);
2380 if (nv_alloc_rx(dev)) {
2381 spin_lock(&np->lock);
2382 if (!np->in_shutdown)
2383 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2384 spin_unlock(&np->lock);
2385 }
2386
2387 if (events & NVREG_IRQ_LINK) { 2408 if (events & NVREG_IRQ_LINK) {
2388 spin_lock(&np->lock); 2409 spin_lock(&np->lock);
2389 nv_link_irq(dev); 2410 nv_link_irq(dev);
@@ -2403,6 +2424,29 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
2403 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", 2424 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
2404 dev->name, events); 2425 dev->name, events);
2405 } 2426 }
2427#ifdef CONFIG_FORCEDETH_NAPI
2428 if (events & NVREG_IRQ_RX_ALL) {
2429 netif_rx_schedule(dev);
2430
2431 /* Disable furthur receive irq's */
2432 spin_lock(&np->lock);
2433 np->irqmask &= ~NVREG_IRQ_RX_ALL;
2434
2435 if (np->msi_flags & NV_MSI_X_ENABLED)
2436 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
2437 else
2438 writel(np->irqmask, base + NvRegIrqMask);
2439 spin_unlock(&np->lock);
2440 }
2441#else
2442 nv_rx_process(dev, dev->weight);
2443 if (nv_alloc_rx(dev)) {
2444 spin_lock(&np->lock);
2445 if (!np->in_shutdown)
2446 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2447 spin_unlock(&np->lock);
2448 }
2449#endif
2406 if (i > max_interrupt_work) { 2450 if (i > max_interrupt_work) {
2407 spin_lock(&np->lock); 2451 spin_lock(&np->lock);
2408 /* disable interrupts on the nic */ 2452 /* disable interrupts on the nic */
@@ -2474,6 +2518,63 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs)
2474 return IRQ_RETVAL(i); 2518 return IRQ_RETVAL(i);
2475} 2519}
2476 2520
2521#ifdef CONFIG_FORCEDETH_NAPI
2522static int nv_napi_poll(struct net_device *dev, int *budget)
2523{
2524 int pkts, limit = min(*budget, dev->quota);
2525 struct fe_priv *np = netdev_priv(dev);
2526 u8 __iomem *base = get_hwbase(dev);
2527
2528 pkts = nv_rx_process(dev, limit);
2529
2530 if (nv_alloc_rx(dev)) {
2531 spin_lock_irq(&np->lock);
2532 if (!np->in_shutdown)
2533 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2534 spin_unlock_irq(&np->lock);
2535 }
2536
2537 if (pkts < limit) {
2538 /* all done, no more packets present */
2539 netif_rx_complete(dev);
2540
2541 /* re-enable receive interrupts */
2542 spin_lock_irq(&np->lock);
2543 np->irqmask |= NVREG_IRQ_RX_ALL;
2544 if (np->msi_flags & NV_MSI_X_ENABLED)
2545 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
2546 else
2547 writel(np->irqmask, base + NvRegIrqMask);
2548 spin_unlock_irq(&np->lock);
2549 return 0;
2550 } else {
2551 /* used up our quantum, so reschedule */
2552 dev->quota -= pkts;
2553 *budget -= pkts;
2554 return 1;
2555 }
2556}
2557#endif
2558
2559#ifdef CONFIG_FORCEDETH_NAPI
2560static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
2561{
2562 struct net_device *dev = (struct net_device *) data;
2563 u8 __iomem *base = get_hwbase(dev);
2564 u32 events;
2565
2566 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
2567 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
2568
2569 if (events) {
2570 netif_rx_schedule(dev);
2571 /* disable receive interrupts on the nic */
2572 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
2573 pci_push(base);
2574 }
2575 return IRQ_HANDLED;
2576}
2577#else
2477static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs) 2578static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
2478{ 2579{
2479 struct net_device *dev = (struct net_device *) data; 2580 struct net_device *dev = (struct net_device *) data;
@@ -2492,7 +2593,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
2492 if (!(events & np->irqmask)) 2593 if (!(events & np->irqmask))
2493 break; 2594 break;
2494 2595
2495 nv_rx_process(dev); 2596 nv_rx_process(dev, dev->weight);
2496 if (nv_alloc_rx(dev)) { 2597 if (nv_alloc_rx(dev)) {
2497 spin_lock_irq(&np->lock); 2598 spin_lock_irq(&np->lock);
2498 if (!np->in_shutdown) 2599 if (!np->in_shutdown)
@@ -2514,12 +2615,12 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
2514 spin_unlock_irq(&np->lock); 2615 spin_unlock_irq(&np->lock);
2515 break; 2616 break;
2516 } 2617 }
2517
2518 } 2618 }
2519 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name); 2619 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name);
2520 2620
2521 return IRQ_RETVAL(i); 2621 return IRQ_RETVAL(i);
2522} 2622}
2623#endif
2523 2624
2524static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs) 2625static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
2525{ 2626{
@@ -3245,7 +3346,7 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
3245 if (!rxtx_ring || !rx_skbuff || !rx_dma || !tx_skbuff || !tx_dma || !tx_dma_len) { 3346 if (!rxtx_ring || !rx_skbuff || !rx_dma || !tx_skbuff || !tx_dma || !tx_dma_len) {
3246 /* fall back to old rings */ 3347 /* fall back to old rings */
3247 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 3348 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
3248 if(rxtx_ring) 3349 if (rxtx_ring)
3249 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), 3350 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
3250 rxtx_ring, ring_addr); 3351 rxtx_ring, ring_addr);
3251 } else { 3352 } else {
@@ -3481,7 +3582,7 @@ static int nv_get_stats_count(struct net_device *dev)
3481 struct fe_priv *np = netdev_priv(dev); 3582 struct fe_priv *np = netdev_priv(dev);
3482 3583
3483 if (np->driver_data & DEV_HAS_STATISTICS) 3584 if (np->driver_data & DEV_HAS_STATISTICS)
3484 return (sizeof(struct nv_ethtool_stats)/sizeof(u64)); 3585 return sizeof(struct nv_ethtool_stats)/sizeof(u64);
3485 else 3586 else
3486 return 0; 3587 return 0;
3487} 3588}
@@ -3619,7 +3720,7 @@ static int nv_loopback_test(struct net_device *dev)
3619 struct sk_buff *tx_skb, *rx_skb; 3720 struct sk_buff *tx_skb, *rx_skb;
3620 dma_addr_t test_dma_addr; 3721 dma_addr_t test_dma_addr;
3621 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); 3722 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
3622 u32 Flags; 3723 u32 flags;
3623 int len, i, pkt_len; 3724 int len, i, pkt_len;
3624 u8 *pkt_data; 3725 u8 *pkt_data;
3625 u32 filter_flags = 0; 3726 u32 filter_flags = 0;
@@ -3663,12 +3764,12 @@ static int nv_loopback_test(struct net_device *dev)
3663 tx_skb->end-tx_skb->data, PCI_DMA_FROMDEVICE); 3764 tx_skb->end-tx_skb->data, PCI_DMA_FROMDEVICE);
3664 3765
3665 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 3766 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
3666 np->tx_ring.orig[0].PacketBuffer = cpu_to_le32(test_dma_addr); 3767 np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
3667 np->tx_ring.orig[0].FlagLen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); 3768 np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
3668 } else { 3769 } else {
3669 np->tx_ring.ex[0].PacketBufferHigh = cpu_to_le64(test_dma_addr) >> 32; 3770 np->tx_ring.ex[0].bufhigh = cpu_to_le64(test_dma_addr) >> 32;
3670 np->tx_ring.ex[0].PacketBufferLow = cpu_to_le64(test_dma_addr) & 0x0FFFFFFFF; 3771 np->tx_ring.ex[0].buflow = cpu_to_le64(test_dma_addr) & 0x0FFFFFFFF;
3671 np->tx_ring.ex[0].FlagLen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); 3772 np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
3672 } 3773 }
3673 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 3774 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
3674 pci_push(get_hwbase(dev)); 3775 pci_push(get_hwbase(dev));
@@ -3677,21 +3778,21 @@ static int nv_loopback_test(struct net_device *dev)
3677 3778
3678 /* check for rx of the packet */ 3779 /* check for rx of the packet */
3679 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 3780 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
3680 Flags = le32_to_cpu(np->rx_ring.orig[0].FlagLen); 3781 flags = le32_to_cpu(np->rx_ring.orig[0].flaglen);
3681 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver); 3782 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
3682 3783
3683 } else { 3784 } else {
3684 Flags = le32_to_cpu(np->rx_ring.ex[0].FlagLen); 3785 flags = le32_to_cpu(np->rx_ring.ex[0].flaglen);
3685 len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver); 3786 len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver);
3686 } 3787 }
3687 3788
3688 if (Flags & NV_RX_AVAIL) { 3789 if (flags & NV_RX_AVAIL) {
3689 ret = 0; 3790 ret = 0;
3690 } else if (np->desc_ver == DESC_VER_1) { 3791 } else if (np->desc_ver == DESC_VER_1) {
3691 if (Flags & NV_RX_ERROR) 3792 if (flags & NV_RX_ERROR)
3692 ret = 0; 3793 ret = 0;
3693 } else { 3794 } else {
3694 if (Flags & NV_RX2_ERROR) { 3795 if (flags & NV_RX2_ERROR) {
3695 ret = 0; 3796 ret = 0;
3696 } 3797 }
3697 } 3798 }
@@ -3753,6 +3854,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
3753 if (test->flags & ETH_TEST_FL_OFFLINE) { 3854 if (test->flags & ETH_TEST_FL_OFFLINE) {
3754 if (netif_running(dev)) { 3855 if (netif_running(dev)) {
3755 netif_stop_queue(dev); 3856 netif_stop_queue(dev);
3857 netif_poll_disable(dev);
3756 netif_tx_lock_bh(dev); 3858 netif_tx_lock_bh(dev);
3757 spin_lock_irq(&np->lock); 3859 spin_lock_irq(&np->lock);
3758 nv_disable_hw_interrupts(dev, np->irqmask); 3860 nv_disable_hw_interrupts(dev, np->irqmask);
@@ -3811,6 +3913,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
3811 nv_start_rx(dev); 3913 nv_start_rx(dev);
3812 nv_start_tx(dev); 3914 nv_start_tx(dev);
3813 netif_start_queue(dev); 3915 netif_start_queue(dev);
3916 netif_poll_enable(dev);
3814 nv_enable_hw_interrupts(dev, np->irqmask); 3917 nv_enable_hw_interrupts(dev, np->irqmask);
3815 } 3918 }
3816 } 3919 }
@@ -3895,10 +3998,9 @@ static int nv_open(struct net_device *dev)
3895 3998
3896 dprintk(KERN_DEBUG "nv_open: begin\n"); 3999 dprintk(KERN_DEBUG "nv_open: begin\n");
3897 4000
3898 /* 1) erase previous misconfiguration */ 4001 /* erase previous misconfiguration */
3899 if (np->driver_data & DEV_HAS_POWER_CNTRL) 4002 if (np->driver_data & DEV_HAS_POWER_CNTRL)
3900 nv_mac_reset(dev); 4003 nv_mac_reset(dev);
3901 /* 4.1-1: stop adapter: ignored, 4.3 seems to be overkill */
3902 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); 4004 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
3903 writel(0, base + NvRegMulticastAddrB); 4005 writel(0, base + NvRegMulticastAddrB);
3904 writel(0, base + NvRegMulticastMaskA); 4006 writel(0, base + NvRegMulticastMaskA);
@@ -3913,26 +4015,22 @@ static int nv_open(struct net_device *dev)
3913 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) 4015 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)
3914 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); 4016 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
3915 4017
3916 /* 2) initialize descriptor rings */ 4018 /* initialize descriptor rings */
3917 set_bufsize(dev); 4019 set_bufsize(dev);
3918 oom = nv_init_ring(dev); 4020 oom = nv_init_ring(dev);
3919 4021
3920 writel(0, base + NvRegLinkSpeed); 4022 writel(0, base + NvRegLinkSpeed);
3921 writel(0, base + NvRegUnknownTransmitterReg); 4023 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
3922 nv_txrx_reset(dev); 4024 nv_txrx_reset(dev);
3923 writel(0, base + NvRegUnknownSetupReg6); 4025 writel(0, base + NvRegUnknownSetupReg6);
3924 4026
3925 np->in_shutdown = 0; 4027 np->in_shutdown = 0;
3926 4028
3927 /* 3) set mac address */ 4029 /* give hw rings */
3928 nv_copy_mac_to_hw(dev);
3929
3930 /* 4) give hw rings */
3931 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4030 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
3932 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4031 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
3933 base + NvRegRingSizes); 4032 base + NvRegRingSizes);
3934 4033
3935 /* 5) continue setup */
3936 writel(np->linkspeed, base + NvRegLinkSpeed); 4034 writel(np->linkspeed, base + NvRegLinkSpeed);
3937 if (np->desc_ver == DESC_VER_1) 4035 if (np->desc_ver == DESC_VER_1)
3938 writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark); 4036 writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark);
@@ -3950,7 +4048,6 @@ static int nv_open(struct net_device *dev)
3950 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 4048 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
3951 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); 4049 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
3952 4050
3953 /* 6) continue setup */
3954 writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1); 4051 writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
3955 writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus); 4052 writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
3956 writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags); 4053 writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
@@ -4020,6 +4117,8 @@ static int nv_open(struct net_device *dev)
4020 nv_start_rx(dev); 4117 nv_start_rx(dev);
4021 nv_start_tx(dev); 4118 nv_start_tx(dev);
4022 netif_start_queue(dev); 4119 netif_start_queue(dev);
4120 netif_poll_enable(dev);
4121
4023 if (ret) { 4122 if (ret) {
4024 netif_carrier_on(dev); 4123 netif_carrier_on(dev);
4025 } else { 4124 } else {
@@ -4049,6 +4148,7 @@ static int nv_close(struct net_device *dev)
4049 spin_lock_irq(&np->lock); 4148 spin_lock_irq(&np->lock);
4050 np->in_shutdown = 1; 4149 np->in_shutdown = 1;
4051 spin_unlock_irq(&np->lock); 4150 spin_unlock_irq(&np->lock);
4151 netif_poll_disable(dev);
4052 synchronize_irq(dev->irq); 4152 synchronize_irq(dev->irq);
4053 4153
4054 del_timer_sync(&np->oom_kick); 4154 del_timer_sync(&np->oom_kick);
@@ -4076,12 +4176,6 @@ static int nv_close(struct net_device *dev)
4076 if (np->wolenabled) 4176 if (np->wolenabled)
4077 nv_start_rx(dev); 4177 nv_start_rx(dev);
4078 4178
4079 /* special op: write back the misordered MAC address - otherwise
4080 * the next nv_probe would see a wrong address.
4081 */
4082 writel(np->orig_mac[0], base + NvRegMacAddrA);
4083 writel(np->orig_mac[1], base + NvRegMacAddrB);
4084
4085 /* FIXME: power down nic */ 4179 /* FIXME: power down nic */
4086 4180
4087 return 0; 4181 return 0;
@@ -4094,7 +4188,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4094 unsigned long addr; 4188 unsigned long addr;
4095 u8 __iomem *base; 4189 u8 __iomem *base;
4096 int err, i; 4190 int err, i;
4097 u32 powerstate; 4191 u32 powerstate, txreg;
4098 4192
4099 dev = alloc_etherdev(sizeof(struct fe_priv)); 4193 dev = alloc_etherdev(sizeof(struct fe_priv));
4100 err = -ENOMEM; 4194 err = -ENOMEM;
@@ -4270,6 +4364,10 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4270#ifdef CONFIG_NET_POLL_CONTROLLER 4364#ifdef CONFIG_NET_POLL_CONTROLLER
4271 dev->poll_controller = nv_poll_controller; 4365 dev->poll_controller = nv_poll_controller;
4272#endif 4366#endif
4367 dev->weight = 64;
4368#ifdef CONFIG_FORCEDETH_NAPI
4369 dev->poll = nv_napi_poll;
4370#endif
4273 SET_ETHTOOL_OPS(dev, &ops); 4371 SET_ETHTOOL_OPS(dev, &ops);
4274 dev->tx_timeout = nv_tx_timeout; 4372 dev->tx_timeout = nv_tx_timeout;
4275 dev->watchdog_timeo = NV_WATCHDOG_TIMEO; 4373 dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
@@ -4281,12 +4379,30 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4281 np->orig_mac[0] = readl(base + NvRegMacAddrA); 4379 np->orig_mac[0] = readl(base + NvRegMacAddrA);
4282 np->orig_mac[1] = readl(base + NvRegMacAddrB); 4380 np->orig_mac[1] = readl(base + NvRegMacAddrB);
4283 4381
4284 dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff; 4382 /* check the workaround bit for correct mac address order */
4285 dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff; 4383 txreg = readl(base + NvRegTransmitPoll);
4286 dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff; 4384 if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) {
4287 dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff; 4385 /* mac address is already in correct order */
4288 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff; 4386 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
4289 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff; 4387 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
4388 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
4389 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
4390 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
4391 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
4392 } else {
4393 /* need to reverse mac address to correct order */
4394 dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff;
4395 dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff;
4396 dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
4397 dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
4398 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
4399 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
4400 /* set permanent address to be correct aswell */
4401 np->orig_mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
4402 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
4403 np->orig_mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
4404 writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
4405 }
4290 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 4406 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
4291 4407
4292 if (!is_valid_ether_addr(dev->perm_addr)) { 4408 if (!is_valid_ether_addr(dev->perm_addr)) {
@@ -4309,6 +4425,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4309 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], 4425 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
4310 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); 4426 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
4311 4427
4428 /* set mac address */
4429 nv_copy_mac_to_hw(dev);
4430
4312 /* disable WOL */ 4431 /* disable WOL */
4313 writel(0, base + NvRegWakeUpFlags); 4432 writel(0, base + NvRegWakeUpFlags);
4314 np->wolenabled = 0; 4433 np->wolenabled = 0;
@@ -4421,9 +4540,17 @@ out:
4421static void __devexit nv_remove(struct pci_dev *pci_dev) 4540static void __devexit nv_remove(struct pci_dev *pci_dev)
4422{ 4541{
4423 struct net_device *dev = pci_get_drvdata(pci_dev); 4542 struct net_device *dev = pci_get_drvdata(pci_dev);
4543 struct fe_priv *np = netdev_priv(dev);
4544 u8 __iomem *base = get_hwbase(dev);
4424 4545
4425 unregister_netdev(dev); 4546 unregister_netdev(dev);
4426 4547
4548 /* special op: write back the misordered MAC address - otherwise
4549 * the next nv_probe would see a wrong address.
4550 */
4551 writel(np->orig_mac[0], base + NvRegMacAddrA);
4552 writel(np->orig_mac[1], base + NvRegMacAddrB);
4553
4427 /* free all structures */ 4554 /* free all structures */
4428 free_rings(dev); 4555 free_rings(dev);
4429 iounmap(get_hwbase(dev)); 4556 iounmap(get_hwbase(dev));