aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/forcedeth.c246
1 files changed, 123 insertions, 123 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 11b8f1b43dd5..4c5fe5ae405c 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -381,21 +381,21 @@ enum {
381 381
382/* Big endian: should work, but is untested */ 382/* Big endian: should work, but is untested */
383struct ring_desc { 383struct ring_desc {
384 u32 PacketBuffer; 384 u32 buf;
385 u32 FlagLen; 385 u32 flaglen;
386}; 386};
387 387
388struct ring_desc_ex { 388struct ring_desc_ex {
389 u32 PacketBufferHigh; 389 u32 bufhigh;
390 u32 PacketBufferLow; 390 u32 buflow;
391 u32 TxVlan; 391 u32 txvlan;
392 u32 FlagLen; 392 u32 flaglen;
393}; 393};
394 394
395typedef union _ring_type { 395union ring_type {
396 struct ring_desc* orig; 396 struct ring_desc* orig;
397 struct ring_desc_ex* ex; 397 struct ring_desc_ex* ex;
398} ring_type; 398};
399 399
400#define FLAG_MASK_V1 0xffff0000 400#define FLAG_MASK_V1 0xffff0000
401#define FLAG_MASK_V2 0xffffc000 401#define FLAG_MASK_V2 0xffffc000
@@ -713,7 +713,7 @@ struct fe_priv {
713 /* rx specific fields. 713 /* rx specific fields.
714 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); 714 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
715 */ 715 */
716 ring_type rx_ring; 716 union ring_type rx_ring;
717 unsigned int cur_rx, refill_rx; 717 unsigned int cur_rx, refill_rx;
718 struct sk_buff **rx_skbuff; 718 struct sk_buff **rx_skbuff;
719 dma_addr_t *rx_dma; 719 dma_addr_t *rx_dma;
@@ -733,7 +733,7 @@ struct fe_priv {
733 /* 733 /*
734 * tx specific fields. 734 * tx specific fields.
735 */ 735 */
736 ring_type tx_ring; 736 union ring_type tx_ring;
737 unsigned int next_tx, nic_tx; 737 unsigned int next_tx, nic_tx;
738 struct sk_buff **tx_skbuff; 738 struct sk_buff **tx_skbuff;
739 dma_addr_t *tx_dma; 739 dma_addr_t *tx_dma;
@@ -826,13 +826,13 @@ static inline void pci_push(u8 __iomem *base)
826 826
827static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v) 827static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
828{ 828{
829 return le32_to_cpu(prd->FlagLen) 829 return le32_to_cpu(prd->flaglen)
830 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2); 830 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
831} 831}
832 832
833static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v) 833static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
834{ 834{
835 return le32_to_cpu(prd->FlagLen) & LEN_MASK_V2; 835 return le32_to_cpu(prd->flaglen) & LEN_MASK_V2;
836} 836}
837 837
838static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, 838static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
@@ -885,7 +885,7 @@ static void free_rings(struct net_device *dev)
885 struct fe_priv *np = get_nvpriv(dev); 885 struct fe_priv *np = get_nvpriv(dev);
886 886
887 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 887 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
888 if(np->rx_ring.orig) 888 if (np->rx_ring.orig)
889 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), 889 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
890 np->rx_ring.orig, np->ring_addr); 890 np->rx_ring.orig, np->ring_addr);
891 } else { 891 } else {
@@ -1258,14 +1258,14 @@ static int nv_alloc_rx(struct net_device *dev)
1258 np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data, 1258 np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data,
1259 skb->end-skb->data, PCI_DMA_FROMDEVICE); 1259 skb->end-skb->data, PCI_DMA_FROMDEVICE);
1260 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1260 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1261 np->rx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]); 1261 np->rx_ring.orig[nr].buf = cpu_to_le32(np->rx_dma[nr]);
1262 wmb(); 1262 wmb();
1263 np->rx_ring.orig[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); 1263 np->rx_ring.orig[nr].flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
1264 } else { 1264 } else {
1265 np->rx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->rx_dma[nr]) >> 32; 1265 np->rx_ring.ex[nr].bufhigh = cpu_to_le64(np->rx_dma[nr]) >> 32;
1266 np->rx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF; 1266 np->rx_ring.ex[nr].buflow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF;
1267 wmb(); 1267 wmb();
1268 np->rx_ring.ex[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); 1268 np->rx_ring.ex[nr].flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
1269 } 1269 }
1270 dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n", 1270 dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n",
1271 dev->name, refill_rx); 1271 dev->name, refill_rx);
@@ -1315,9 +1315,9 @@ static void nv_init_rx(struct net_device *dev)
1315 np->refill_rx = 0; 1315 np->refill_rx = 0;
1316 for (i = 0; i < np->rx_ring_size; i++) 1316 for (i = 0; i < np->rx_ring_size; i++)
1317 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1317 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1318 np->rx_ring.orig[i].FlagLen = 0; 1318 np->rx_ring.orig[i].flaglen = 0;
1319 else 1319 else
1320 np->rx_ring.ex[i].FlagLen = 0; 1320 np->rx_ring.ex[i].flaglen = 0;
1321} 1321}
1322 1322
1323static void nv_init_tx(struct net_device *dev) 1323static void nv_init_tx(struct net_device *dev)
@@ -1328,9 +1328,9 @@ static void nv_init_tx(struct net_device *dev)
1328 np->next_tx = np->nic_tx = 0; 1328 np->next_tx = np->nic_tx = 0;
1329 for (i = 0; i < np->tx_ring_size; i++) { 1329 for (i = 0; i < np->tx_ring_size; i++) {
1330 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1330 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1331 np->tx_ring.orig[i].FlagLen = 0; 1331 np->tx_ring.orig[i].flaglen = 0;
1332 else 1332 else
1333 np->tx_ring.ex[i].FlagLen = 0; 1333 np->tx_ring.ex[i].flaglen = 0;
1334 np->tx_skbuff[i] = NULL; 1334 np->tx_skbuff[i] = NULL;
1335 np->tx_dma[i] = 0; 1335 np->tx_dma[i] = 0;
1336 } 1336 }
@@ -1373,9 +1373,9 @@ static void nv_drain_tx(struct net_device *dev)
1373 1373
1374 for (i = 0; i < np->tx_ring_size; i++) { 1374 for (i = 0; i < np->tx_ring_size; i++) {
1375 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1375 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1376 np->tx_ring.orig[i].FlagLen = 0; 1376 np->tx_ring.orig[i].flaglen = 0;
1377 else 1377 else
1378 np->tx_ring.ex[i].FlagLen = 0; 1378 np->tx_ring.ex[i].flaglen = 0;
1379 if (nv_release_txskb(dev, i)) 1379 if (nv_release_txskb(dev, i))
1380 np->stats.tx_dropped++; 1380 np->stats.tx_dropped++;
1381 } 1381 }
@@ -1387,9 +1387,9 @@ static void nv_drain_rx(struct net_device *dev)
1387 int i; 1387 int i;
1388 for (i = 0; i < np->rx_ring_size; i++) { 1388 for (i = 0; i < np->rx_ring_size; i++) {
1389 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1389 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1390 np->rx_ring.orig[i].FlagLen = 0; 1390 np->rx_ring.orig[i].flaglen = 0;
1391 else 1391 else
1392 np->rx_ring.ex[i].FlagLen = 0; 1392 np->rx_ring.ex[i].flaglen = 0;
1393 wmb(); 1393 wmb();
1394 if (np->rx_skbuff[i]) { 1394 if (np->rx_skbuff[i]) {
1395 pci_unmap_single(np->pci_dev, np->rx_dma[i], 1395 pci_unmap_single(np->pci_dev, np->rx_dma[i],
@@ -1450,17 +1450,17 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1450 np->tx_dma_len[nr] = bcnt; 1450 np->tx_dma_len[nr] = bcnt;
1451 1451
1452 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1452 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1453 np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]); 1453 np->tx_ring.orig[nr].buf = cpu_to_le32(np->tx_dma[nr]);
1454 np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); 1454 np->tx_ring.orig[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1455 } else { 1455 } else {
1456 np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32; 1456 np->tx_ring.ex[nr].bufhigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
1457 np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF; 1457 np->tx_ring.ex[nr].buflow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
1458 np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); 1458 np->tx_ring.ex[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1459 } 1459 }
1460 tx_flags = np->tx_flags; 1460 tx_flags = np->tx_flags;
1461 offset += bcnt; 1461 offset += bcnt;
1462 size -= bcnt; 1462 size -= bcnt;
1463 } while(size); 1463 } while (size);
1464 1464
1465 /* setup the fragments */ 1465 /* setup the fragments */
1466 for (i = 0; i < fragments; i++) { 1466 for (i = 0; i < fragments; i++) {
@@ -1477,12 +1477,12 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1477 np->tx_dma_len[nr] = bcnt; 1477 np->tx_dma_len[nr] = bcnt;
1478 1478
1479 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1479 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1480 np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]); 1480 np->tx_ring.orig[nr].buf = cpu_to_le32(np->tx_dma[nr]);
1481 np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); 1481 np->tx_ring.orig[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1482 } else { 1482 } else {
1483 np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32; 1483 np->tx_ring.ex[nr].bufhigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
1484 np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF; 1484 np->tx_ring.ex[nr].buflow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
1485 np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); 1485 np->tx_ring.ex[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1486 } 1486 }
1487 offset += bcnt; 1487 offset += bcnt;
1488 size -= bcnt; 1488 size -= bcnt;
@@ -1491,9 +1491,9 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1491 1491
1492 /* set last fragment flag */ 1492 /* set last fragment flag */
1493 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1493 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1494 np->tx_ring.orig[nr].FlagLen |= cpu_to_le32(tx_flags_extra); 1494 np->tx_ring.orig[nr].flaglen |= cpu_to_le32(tx_flags_extra);
1495 } else { 1495 } else {
1496 np->tx_ring.ex[nr].FlagLen |= cpu_to_le32(tx_flags_extra); 1496 np->tx_ring.ex[nr].flaglen |= cpu_to_le32(tx_flags_extra);
1497 } 1497 }
1498 1498
1499 np->tx_skbuff[nr] = skb; 1499 np->tx_skbuff[nr] = skb;
@@ -1512,10 +1512,10 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1512 1512
1513 /* set tx flags */ 1513 /* set tx flags */
1514 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1514 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1515 np->tx_ring.orig[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); 1515 np->tx_ring.orig[start_nr].flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
1516 } else { 1516 } else {
1517 np->tx_ring.ex[start_nr].TxVlan = cpu_to_le32(tx_flags_vlan); 1517 np->tx_ring.ex[start_nr].txvlan = cpu_to_le32(tx_flags_vlan);
1518 np->tx_ring.ex[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); 1518 np->tx_ring.ex[start_nr].flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
1519 } 1519 }
1520 1520
1521 dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n", 1521 dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n",
@@ -1547,7 +1547,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1547static void nv_tx_done(struct net_device *dev) 1547static void nv_tx_done(struct net_device *dev)
1548{ 1548{
1549 struct fe_priv *np = netdev_priv(dev); 1549 struct fe_priv *np = netdev_priv(dev);
1550 u32 Flags; 1550 u32 flags;
1551 unsigned int i; 1551 unsigned int i;
1552 struct sk_buff *skb; 1552 struct sk_buff *skb;
1553 1553
@@ -1555,22 +1555,22 @@ static void nv_tx_done(struct net_device *dev)
1555 i = np->nic_tx % np->tx_ring_size; 1555 i = np->nic_tx % np->tx_ring_size;
1556 1556
1557 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1557 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1558 Flags = le32_to_cpu(np->tx_ring.orig[i].FlagLen); 1558 flags = le32_to_cpu(np->tx_ring.orig[i].flaglen);
1559 else 1559 else
1560 Flags = le32_to_cpu(np->tx_ring.ex[i].FlagLen); 1560 flags = le32_to_cpu(np->tx_ring.ex[i].flaglen);
1561 1561
1562 dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, Flags 0x%x.\n", 1562 dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, flags 0x%x.\n",
1563 dev->name, np->nic_tx, Flags); 1563 dev->name, np->nic_tx, flags);
1564 if (Flags & NV_TX_VALID) 1564 if (flags & NV_TX_VALID)
1565 break; 1565 break;
1566 if (np->desc_ver == DESC_VER_1) { 1566 if (np->desc_ver == DESC_VER_1) {
1567 if (Flags & NV_TX_LASTPACKET) { 1567 if (flags & NV_TX_LASTPACKET) {
1568 skb = np->tx_skbuff[i]; 1568 skb = np->tx_skbuff[i];
1569 if (Flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION| 1569 if (flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION|
1570 NV_TX_UNDERFLOW|NV_TX_ERROR)) { 1570 NV_TX_UNDERFLOW|NV_TX_ERROR)) {
1571 if (Flags & NV_TX_UNDERFLOW) 1571 if (flags & NV_TX_UNDERFLOW)
1572 np->stats.tx_fifo_errors++; 1572 np->stats.tx_fifo_errors++;
1573 if (Flags & NV_TX_CARRIERLOST) 1573 if (flags & NV_TX_CARRIERLOST)
1574 np->stats.tx_carrier_errors++; 1574 np->stats.tx_carrier_errors++;
1575 np->stats.tx_errors++; 1575 np->stats.tx_errors++;
1576 } else { 1576 } else {
@@ -1579,13 +1579,13 @@ static void nv_tx_done(struct net_device *dev)
1579 } 1579 }
1580 } 1580 }
1581 } else { 1581 } else {
1582 if (Flags & NV_TX2_LASTPACKET) { 1582 if (flags & NV_TX2_LASTPACKET) {
1583 skb = np->tx_skbuff[i]; 1583 skb = np->tx_skbuff[i];
1584 if (Flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION| 1584 if (flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION|
1585 NV_TX2_UNDERFLOW|NV_TX2_ERROR)) { 1585 NV_TX2_UNDERFLOW|NV_TX2_ERROR)) {
1586 if (Flags & NV_TX2_UNDERFLOW) 1586 if (flags & NV_TX2_UNDERFLOW)
1587 np->stats.tx_fifo_errors++; 1587 np->stats.tx_fifo_errors++;
1588 if (Flags & NV_TX2_CARRIERLOST) 1588 if (flags & NV_TX2_CARRIERLOST)
1589 np->stats.tx_carrier_errors++; 1589 np->stats.tx_carrier_errors++;
1590 np->stats.tx_errors++; 1590 np->stats.tx_errors++;
1591 } else { 1591 } else {
@@ -1638,29 +1638,29 @@ static void nv_tx_timeout(struct net_device *dev)
1638 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1638 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1639 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", 1639 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
1640 i, 1640 i,
1641 le32_to_cpu(np->tx_ring.orig[i].PacketBuffer), 1641 le32_to_cpu(np->tx_ring.orig[i].buf),
1642 le32_to_cpu(np->tx_ring.orig[i].FlagLen), 1642 le32_to_cpu(np->tx_ring.orig[i].flaglen),
1643 le32_to_cpu(np->tx_ring.orig[i+1].PacketBuffer), 1643 le32_to_cpu(np->tx_ring.orig[i+1].buf),
1644 le32_to_cpu(np->tx_ring.orig[i+1].FlagLen), 1644 le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
1645 le32_to_cpu(np->tx_ring.orig[i+2].PacketBuffer), 1645 le32_to_cpu(np->tx_ring.orig[i+2].buf),
1646 le32_to_cpu(np->tx_ring.orig[i+2].FlagLen), 1646 le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
1647 le32_to_cpu(np->tx_ring.orig[i+3].PacketBuffer), 1647 le32_to_cpu(np->tx_ring.orig[i+3].buf),
1648 le32_to_cpu(np->tx_ring.orig[i+3].FlagLen)); 1648 le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
1649 } else { 1649 } else {
1650 printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n", 1650 printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
1651 i, 1651 i,
1652 le32_to_cpu(np->tx_ring.ex[i].PacketBufferHigh), 1652 le32_to_cpu(np->tx_ring.ex[i].bufhigh),
1653 le32_to_cpu(np->tx_ring.ex[i].PacketBufferLow), 1653 le32_to_cpu(np->tx_ring.ex[i].buflow),
1654 le32_to_cpu(np->tx_ring.ex[i].FlagLen), 1654 le32_to_cpu(np->tx_ring.ex[i].flaglen),
1655 le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferHigh), 1655 le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
1656 le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferLow), 1656 le32_to_cpu(np->tx_ring.ex[i+1].buflow),
1657 le32_to_cpu(np->tx_ring.ex[i+1].FlagLen), 1657 le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
1658 le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferHigh), 1658 le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
1659 le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferLow), 1659 le32_to_cpu(np->tx_ring.ex[i+2].buflow),
1660 le32_to_cpu(np->tx_ring.ex[i+2].FlagLen), 1660 le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
1661 le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferHigh), 1661 le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
1662 le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferLow), 1662 le32_to_cpu(np->tx_ring.ex[i+3].buflow),
1663 le32_to_cpu(np->tx_ring.ex[i+3].FlagLen)); 1663 le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
1664 } 1664 }
1665 } 1665 }
1666 } 1666 }
@@ -1697,7 +1697,7 @@ static int nv_getlen(struct net_device *dev, void *packet, int datalen)
1697 int protolen; /* length as stored in the proto field */ 1697 int protolen; /* length as stored in the proto field */
1698 1698
1699 /* 1) calculate len according to header */ 1699 /* 1) calculate len according to header */
1700 if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == __constant_htons(ETH_P_8021Q)) { 1700 if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
1701 protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto ); 1701 protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto );
1702 hdrlen = VLAN_HLEN; 1702 hdrlen = VLAN_HLEN;
1703 } else { 1703 } else {
@@ -1743,7 +1743,7 @@ static int nv_getlen(struct net_device *dev, void *packet, int datalen)
1743static void nv_rx_process(struct net_device *dev) 1743static void nv_rx_process(struct net_device *dev)
1744{ 1744{
1745 struct fe_priv *np = netdev_priv(dev); 1745 struct fe_priv *np = netdev_priv(dev);
1746 u32 Flags; 1746 u32 flags;
1747 u32 vlanflags = 0; 1747 u32 vlanflags = 0;
1748 1748
1749 for (;;) { 1749 for (;;) {
@@ -1755,18 +1755,18 @@ static void nv_rx_process(struct net_device *dev)
1755 1755
1756 i = np->cur_rx % np->rx_ring_size; 1756 i = np->cur_rx % np->rx_ring_size;
1757 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1757 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1758 Flags = le32_to_cpu(np->rx_ring.orig[i].FlagLen); 1758 flags = le32_to_cpu(np->rx_ring.orig[i].flaglen);
1759 len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver); 1759 len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver);
1760 } else { 1760 } else {
1761 Flags = le32_to_cpu(np->rx_ring.ex[i].FlagLen); 1761 flags = le32_to_cpu(np->rx_ring.ex[i].flaglen);
1762 len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver); 1762 len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver);
1763 vlanflags = le32_to_cpu(np->rx_ring.ex[i].PacketBufferLow); 1763 vlanflags = le32_to_cpu(np->rx_ring.ex[i].buflow);
1764 } 1764 }
1765 1765
1766 dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n", 1766 dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, flags 0x%x.\n",
1767 dev->name, np->cur_rx, Flags); 1767 dev->name, np->cur_rx, flags);
1768 1768
1769 if (Flags & NV_RX_AVAIL) 1769 if (flags & NV_RX_AVAIL)
1770 break; /* still owned by hardware, */ 1770 break; /* still owned by hardware, */
1771 1771
1772 /* 1772 /*
@@ -1780,7 +1780,7 @@ static void nv_rx_process(struct net_device *dev)
1780 1780
1781 { 1781 {
1782 int j; 1782 int j;
1783 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",Flags); 1783 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
1784 for (j=0; j<64; j++) { 1784 for (j=0; j<64; j++) {
1785 if ((j%16) == 0) 1785 if ((j%16) == 0)
1786 dprintk("\n%03x:", j); 1786 dprintk("\n%03x:", j);
@@ -1790,30 +1790,30 @@ static void nv_rx_process(struct net_device *dev)
1790 } 1790 }
1791 /* look at what we actually got: */ 1791 /* look at what we actually got: */
1792 if (np->desc_ver == DESC_VER_1) { 1792 if (np->desc_ver == DESC_VER_1) {
1793 if (!(Flags & NV_RX_DESCRIPTORVALID)) 1793 if (!(flags & NV_RX_DESCRIPTORVALID))
1794 goto next_pkt; 1794 goto next_pkt;
1795 1795
1796 if (Flags & NV_RX_ERROR) { 1796 if (flags & NV_RX_ERROR) {
1797 if (Flags & NV_RX_MISSEDFRAME) { 1797 if (flags & NV_RX_MISSEDFRAME) {
1798 np->stats.rx_missed_errors++; 1798 np->stats.rx_missed_errors++;
1799 np->stats.rx_errors++; 1799 np->stats.rx_errors++;
1800 goto next_pkt; 1800 goto next_pkt;
1801 } 1801 }
1802 if (Flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) { 1802 if (flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) {
1803 np->stats.rx_errors++; 1803 np->stats.rx_errors++;
1804 goto next_pkt; 1804 goto next_pkt;
1805 } 1805 }
1806 if (Flags & NV_RX_CRCERR) { 1806 if (flags & NV_RX_CRCERR) {
1807 np->stats.rx_crc_errors++; 1807 np->stats.rx_crc_errors++;
1808 np->stats.rx_errors++; 1808 np->stats.rx_errors++;
1809 goto next_pkt; 1809 goto next_pkt;
1810 } 1810 }
1811 if (Flags & NV_RX_OVERFLOW) { 1811 if (flags & NV_RX_OVERFLOW) {
1812 np->stats.rx_over_errors++; 1812 np->stats.rx_over_errors++;
1813 np->stats.rx_errors++; 1813 np->stats.rx_errors++;
1814 goto next_pkt; 1814 goto next_pkt;
1815 } 1815 }
1816 if (Flags & NV_RX_ERROR4) { 1816 if (flags & NV_RX_ERROR4) {
1817 len = nv_getlen(dev, np->rx_skbuff[i]->data, len); 1817 len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
1818 if (len < 0) { 1818 if (len < 0) {
1819 np->stats.rx_errors++; 1819 np->stats.rx_errors++;
@@ -1821,32 +1821,32 @@ static void nv_rx_process(struct net_device *dev)
1821 } 1821 }
1822 } 1822 }
1823 /* framing errors are soft errors. */ 1823 /* framing errors are soft errors. */
1824 if (Flags & NV_RX_FRAMINGERR) { 1824 if (flags & NV_RX_FRAMINGERR) {
1825 if (Flags & NV_RX_SUBSTRACT1) { 1825 if (flags & NV_RX_SUBSTRACT1) {
1826 len--; 1826 len--;
1827 } 1827 }
1828 } 1828 }
1829 } 1829 }
1830 } else { 1830 } else {
1831 if (!(Flags & NV_RX2_DESCRIPTORVALID)) 1831 if (!(flags & NV_RX2_DESCRIPTORVALID))
1832 goto next_pkt; 1832 goto next_pkt;
1833 1833
1834 if (Flags & NV_RX2_ERROR) { 1834 if (flags & NV_RX2_ERROR) {
1835 if (Flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) { 1835 if (flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) {
1836 np->stats.rx_errors++; 1836 np->stats.rx_errors++;
1837 goto next_pkt; 1837 goto next_pkt;
1838 } 1838 }
1839 if (Flags & NV_RX2_CRCERR) { 1839 if (flags & NV_RX2_CRCERR) {
1840 np->stats.rx_crc_errors++; 1840 np->stats.rx_crc_errors++;
1841 np->stats.rx_errors++; 1841 np->stats.rx_errors++;
1842 goto next_pkt; 1842 goto next_pkt;
1843 } 1843 }
1844 if (Flags & NV_RX2_OVERFLOW) { 1844 if (flags & NV_RX2_OVERFLOW) {
1845 np->stats.rx_over_errors++; 1845 np->stats.rx_over_errors++;
1846 np->stats.rx_errors++; 1846 np->stats.rx_errors++;
1847 goto next_pkt; 1847 goto next_pkt;
1848 } 1848 }
1849 if (Flags & NV_RX2_ERROR4) { 1849 if (flags & NV_RX2_ERROR4) {
1850 len = nv_getlen(dev, np->rx_skbuff[i]->data, len); 1850 len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
1851 if (len < 0) { 1851 if (len < 0) {
1852 np->stats.rx_errors++; 1852 np->stats.rx_errors++;
@@ -1854,17 +1854,17 @@ static void nv_rx_process(struct net_device *dev)
1854 } 1854 }
1855 } 1855 }
1856 /* framing errors are soft errors */ 1856 /* framing errors are soft errors */
1857 if (Flags & NV_RX2_FRAMINGERR) { 1857 if (flags & NV_RX2_FRAMINGERR) {
1858 if (Flags & NV_RX2_SUBSTRACT1) { 1858 if (flags & NV_RX2_SUBSTRACT1) {
1859 len--; 1859 len--;
1860 } 1860 }
1861 } 1861 }
1862 } 1862 }
1863 if (np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) { 1863 if (np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) {
1864 Flags &= NV_RX2_CHECKSUMMASK; 1864 flags &= NV_RX2_CHECKSUMMASK;
1865 if (Flags == NV_RX2_CHECKSUMOK1 || 1865 if (flags == NV_RX2_CHECKSUMOK1 ||
1866 Flags == NV_RX2_CHECKSUMOK2 || 1866 flags == NV_RX2_CHECKSUMOK2 ||
1867 Flags == NV_RX2_CHECKSUMOK3) { 1867 flags == NV_RX2_CHECKSUMOK3) {
1868 dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name); 1868 dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name);
1869 np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY; 1869 np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY;
1870 } else { 1870 } else {
@@ -1990,7 +1990,7 @@ static int nv_set_mac_address(struct net_device *dev, void *addr)
1990 struct fe_priv *np = netdev_priv(dev); 1990 struct fe_priv *np = netdev_priv(dev);
1991 struct sockaddr *macaddr = (struct sockaddr*)addr; 1991 struct sockaddr *macaddr = (struct sockaddr*)addr;
1992 1992
1993 if(!is_valid_ether_addr(macaddr->sa_data)) 1993 if (!is_valid_ether_addr(macaddr->sa_data))
1994 return -EADDRNOTAVAIL; 1994 return -EADDRNOTAVAIL;
1995 1995
1996 /* synchronized against open : rtnl_lock() held by caller */ 1996 /* synchronized against open : rtnl_lock() held by caller */
@@ -2283,20 +2283,20 @@ set_speed:
2283 lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM); 2283 lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM);
2284 2284
2285 switch (adv_pause) { 2285 switch (adv_pause) {
2286 case (ADVERTISE_PAUSE_CAP): 2286 case ADVERTISE_PAUSE_CAP:
2287 if (lpa_pause & LPA_PAUSE_CAP) { 2287 if (lpa_pause & LPA_PAUSE_CAP) {
2288 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 2288 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
2289 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 2289 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
2290 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 2290 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
2291 } 2291 }
2292 break; 2292 break;
2293 case (ADVERTISE_PAUSE_ASYM): 2293 case ADVERTISE_PAUSE_ASYM:
2294 if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM)) 2294 if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM))
2295 { 2295 {
2296 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 2296 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
2297 } 2297 }
2298 break; 2298 break;
2299 case (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM): 2299 case ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM:
2300 if (lpa_pause & LPA_PAUSE_CAP) 2300 if (lpa_pause & LPA_PAUSE_CAP)
2301 { 2301 {
2302 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 2302 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
@@ -3245,7 +3245,7 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
3245 if (!rxtx_ring || !rx_skbuff || !rx_dma || !tx_skbuff || !tx_dma || !tx_dma_len) { 3245 if (!rxtx_ring || !rx_skbuff || !rx_dma || !tx_skbuff || !tx_dma || !tx_dma_len) {
3246 /* fall back to old rings */ 3246 /* fall back to old rings */
3247 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 3247 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
3248 if(rxtx_ring) 3248 if (rxtx_ring)
3249 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), 3249 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
3250 rxtx_ring, ring_addr); 3250 rxtx_ring, ring_addr);
3251 } else { 3251 } else {
@@ -3481,7 +3481,7 @@ static int nv_get_stats_count(struct net_device *dev)
3481 struct fe_priv *np = netdev_priv(dev); 3481 struct fe_priv *np = netdev_priv(dev);
3482 3482
3483 if (np->driver_data & DEV_HAS_STATISTICS) 3483 if (np->driver_data & DEV_HAS_STATISTICS)
3484 return (sizeof(struct nv_ethtool_stats)/sizeof(u64)); 3484 return sizeof(struct nv_ethtool_stats)/sizeof(u64);
3485 else 3485 else
3486 return 0; 3486 return 0;
3487} 3487}
@@ -3619,7 +3619,7 @@ static int nv_loopback_test(struct net_device *dev)
3619 struct sk_buff *tx_skb, *rx_skb; 3619 struct sk_buff *tx_skb, *rx_skb;
3620 dma_addr_t test_dma_addr; 3620 dma_addr_t test_dma_addr;
3621 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); 3621 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
3622 u32 Flags; 3622 u32 flags;
3623 int len, i, pkt_len; 3623 int len, i, pkt_len;
3624 u8 *pkt_data; 3624 u8 *pkt_data;
3625 u32 filter_flags = 0; 3625 u32 filter_flags = 0;
@@ -3663,12 +3663,12 @@ static int nv_loopback_test(struct net_device *dev)
3663 tx_skb->end-tx_skb->data, PCI_DMA_FROMDEVICE); 3663 tx_skb->end-tx_skb->data, PCI_DMA_FROMDEVICE);
3664 3664
3665 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 3665 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
3666 np->tx_ring.orig[0].PacketBuffer = cpu_to_le32(test_dma_addr); 3666 np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
3667 np->tx_ring.orig[0].FlagLen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); 3667 np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
3668 } else { 3668 } else {
3669 np->tx_ring.ex[0].PacketBufferHigh = cpu_to_le64(test_dma_addr) >> 32; 3669 np->tx_ring.ex[0].bufhigh = cpu_to_le64(test_dma_addr) >> 32;
3670 np->tx_ring.ex[0].PacketBufferLow = cpu_to_le64(test_dma_addr) & 0x0FFFFFFFF; 3670 np->tx_ring.ex[0].buflow = cpu_to_le64(test_dma_addr) & 0x0FFFFFFFF;
3671 np->tx_ring.ex[0].FlagLen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); 3671 np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
3672 } 3672 }
3673 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 3673 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
3674 pci_push(get_hwbase(dev)); 3674 pci_push(get_hwbase(dev));
@@ -3677,21 +3677,21 @@ static int nv_loopback_test(struct net_device *dev)
3677 3677
3678 /* check for rx of the packet */ 3678 /* check for rx of the packet */
3679 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 3679 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
3680 Flags = le32_to_cpu(np->rx_ring.orig[0].FlagLen); 3680 flags = le32_to_cpu(np->rx_ring.orig[0].flaglen);
3681 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver); 3681 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
3682 3682
3683 } else { 3683 } else {
3684 Flags = le32_to_cpu(np->rx_ring.ex[0].FlagLen); 3684 flags = le32_to_cpu(np->rx_ring.ex[0].flaglen);
3685 len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver); 3685 len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver);
3686 } 3686 }
3687 3687
3688 if (Flags & NV_RX_AVAIL) { 3688 if (flags & NV_RX_AVAIL) {
3689 ret = 0; 3689 ret = 0;
3690 } else if (np->desc_ver == DESC_VER_1) { 3690 } else if (np->desc_ver == DESC_VER_1) {
3691 if (Flags & NV_RX_ERROR) 3691 if (flags & NV_RX_ERROR)
3692 ret = 0; 3692 ret = 0;
3693 } else { 3693 } else {
3694 if (Flags & NV_RX2_ERROR) { 3694 if (flags & NV_RX2_ERROR) {
3695 ret = 0; 3695 ret = 0;
3696 } 3696 }
3697 } 3697 }