aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/forcedeth.c
diff options
context:
space:
mode:
authorAyaz Abdulla <aabdulla@nvidia.com>2007-01-09 13:30:07 -0500
committerJeff Garzik <jeff@garzik.org>2007-02-05 16:58:46 -0500
commit761fcd9e3e0aa2a02231d1631f31409be5e890d2 (patch)
tree5a045c004734479ace8ae9c7533abed801ad291c /drivers/net/forcedeth.c
parentd2f7841277d8613a780ab28d04d8f31a31816153 (diff)
forcedeth: ring access
This patch modifys ring access by using pointers. This avoids computing the current index and avoids accessing the base address of the rings. Signed-Off-By: Ayaz Abdulla <aabdulla@nvidia.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/forcedeth.c')
-rw-r--r--drivers/net/forcedeth.c451
1 files changed, 265 insertions, 186 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 3ef1aec0acf3..bea826bd3168 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -691,6 +691,12 @@ static const struct register_test nv_registers_test[] = {
691 { 0,0 } 691 { 0,0 }
692}; 692};
693 693
694struct nv_skb_map {
695 struct sk_buff *skb;
696 dma_addr_t dma;
697 unsigned int dma_len;
698};
699
694/* 700/*
695 * SMP locking: 701 * SMP locking:
696 * All hardware access under dev->priv->lock, except the performance 702 * All hardware access under dev->priv->lock, except the performance
@@ -741,10 +747,12 @@ struct fe_priv {
741 /* rx specific fields. 747 /* rx specific fields.
742 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); 748 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
743 */ 749 */
750 union ring_type get_rx, put_rx, first_rx, last_rx;
751 struct nv_skb_map *get_rx_ctx, *put_rx_ctx;
752 struct nv_skb_map *first_rx_ctx, *last_rx_ctx;
753 struct nv_skb_map *rx_skb;
754
744 union ring_type rx_ring; 755 union ring_type rx_ring;
745 unsigned int cur_rx, refill_rx;
746 struct sk_buff **rx_skbuff;
747 dma_addr_t *rx_dma;
748 unsigned int rx_buf_sz; 756 unsigned int rx_buf_sz;
749 unsigned int pkt_limit; 757 unsigned int pkt_limit;
750 struct timer_list oom_kick; 758 struct timer_list oom_kick;
@@ -761,11 +769,12 @@ struct fe_priv {
761 /* 769 /*
762 * tx specific fields. 770 * tx specific fields.
763 */ 771 */
772 union ring_type get_tx, put_tx, first_tx, last_tx;
773 struct nv_skb_map *get_tx_ctx, *put_tx_ctx;
774 struct nv_skb_map *first_tx_ctx, *last_tx_ctx;
775 struct nv_skb_map *tx_skb;
776
764 union ring_type tx_ring; 777 union ring_type tx_ring;
765 unsigned int next_tx, nic_tx;
766 struct sk_buff **tx_skbuff;
767 dma_addr_t *tx_dma;
768 unsigned int *tx_dma_len;
769 u32 tx_flags; 778 u32 tx_flags;
770 int tx_ring_size; 779 int tx_ring_size;
771 int tx_limit_start; 780 int tx_limit_start;
@@ -921,16 +930,10 @@ static void free_rings(struct net_device *dev)
921 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), 930 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
922 np->rx_ring.ex, np->ring_addr); 931 np->rx_ring.ex, np->ring_addr);
923 } 932 }
924 if (np->rx_skbuff) 933 if (np->rx_skb)
925 kfree(np->rx_skbuff); 934 kfree(np->rx_skb);
926 if (np->rx_dma) 935 if (np->tx_skb)
927 kfree(np->rx_dma); 936 kfree(np->tx_skb);
928 if (np->tx_skbuff)
929 kfree(np->tx_skbuff);
930 if (np->tx_dma)
931 kfree(np->tx_dma);
932 if (np->tx_dma_len)
933 kfree(np->tx_dma_len);
934} 937}
935 938
936static int using_multi_irqs(struct net_device *dev) 939static int using_multi_irqs(struct net_device *dev)
@@ -1304,43 +1307,60 @@ static struct net_device_stats *nv_get_stats(struct net_device *dev)
1304static int nv_alloc_rx(struct net_device *dev) 1307static int nv_alloc_rx(struct net_device *dev)
1305{ 1308{
1306 struct fe_priv *np = netdev_priv(dev); 1309 struct fe_priv *np = netdev_priv(dev);
1307 unsigned int refill_rx = np->refill_rx; 1310 union ring_type less_rx;
1308 int nr;
1309 1311
1310 while (np->cur_rx != refill_rx) { 1312 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1313 less_rx.orig = np->get_rx.orig;
1314 if (less_rx.orig-- == np->first_rx.orig)
1315 less_rx.orig = np->last_rx.orig;
1316 } else {
1317 less_rx.ex = np->get_rx.ex;
1318 if (less_rx.ex-- == np->first_rx.ex)
1319 less_rx.ex = np->last_rx.ex;
1320 }
1321
1322 while (1) {
1311 struct sk_buff *skb; 1323 struct sk_buff *skb;
1312 1324
1313 nr = refill_rx % np->rx_ring_size; 1325 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1314 if (np->rx_skbuff[nr] == NULL) { 1326 if (np->put_rx.orig == less_rx.orig)
1327 break;
1328 } else {
1329 if (np->put_rx.ex == less_rx.ex)
1330 break;
1331 }
1332
1333 if (np->put_rx_ctx->skb == NULL) {
1315 1334
1316 skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); 1335 skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1317 if (!skb) 1336 if (!skb)
1318 break; 1337 return 1;
1319 1338
1320 skb->dev = dev; 1339 skb->dev = dev;
1321 np->rx_skbuff[nr] = skb; 1340 np->put_rx_ctx->skb = skb;
1322 } else { 1341 } else {
1323 skb = np->rx_skbuff[nr]; 1342 skb = np->put_rx_ctx->skb;
1324 } 1343 }
1325 np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data, 1344 np->put_rx_ctx->dma = pci_map_single(np->pci_dev, skb->data,
1326 skb->end-skb->data, PCI_DMA_FROMDEVICE); 1345 skb->end-skb->data, PCI_DMA_FROMDEVICE);
1346 np->put_rx_ctx->dma_len = skb->end-skb->data;
1327 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1347 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1328 np->rx_ring.orig[nr].buf = cpu_to_le32(np->rx_dma[nr]); 1348 np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma);
1329 wmb(); 1349 wmb();
1330 np->rx_ring.orig[nr].flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); 1350 np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
1351 if (np->put_rx.orig++ == np->last_rx.orig)
1352 np->put_rx.orig = np->first_rx.orig;
1331 } else { 1353 } else {
1332 np->rx_ring.ex[nr].bufhigh = cpu_to_le64(np->rx_dma[nr]) >> 32; 1354 np->put_rx.ex->bufhigh = cpu_to_le64(np->put_rx_ctx->dma) >> 32;
1333 np->rx_ring.ex[nr].buflow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF; 1355 np->put_rx.ex->buflow = cpu_to_le64(np->put_rx_ctx->dma) & 0x0FFFFFFFF;
1334 wmb(); 1356 wmb();
1335 np->rx_ring.ex[nr].flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); 1357 np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
1358 if (np->put_rx.ex++ == np->last_rx.ex)
1359 np->put_rx.ex = np->first_rx.ex;
1336 } 1360 }
1337 dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n", 1361 if (np->put_rx_ctx++ == np->last_rx_ctx)
1338 dev->name, refill_rx); 1362 np->put_rx_ctx = np->first_rx_ctx;
1339 refill_rx++;
1340 } 1363 }
1341 np->refill_rx = refill_rx;
1342 if (np->cur_rx - refill_rx == np->rx_ring_size)
1343 return 1;
1344 return 0; 1364 return 0;
1345} 1365}
1346 1366
@@ -1388,29 +1408,53 @@ static void nv_init_rx(struct net_device *dev)
1388{ 1408{
1389 struct fe_priv *np = netdev_priv(dev); 1409 struct fe_priv *np = netdev_priv(dev);
1390 int i; 1410 int i;
1411 np->get_rx = np->put_rx = np->first_rx = np->rx_ring;
1412 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1413 np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1];
1414 else
1415 np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1];
1416 np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb;
1417 np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1];
1391 1418
1392 np->cur_rx = np->rx_ring_size; 1419 for (i = 0; i < np->rx_ring_size; i++) {
1393 np->refill_rx = 0; 1420 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1394 for (i = 0; i < np->rx_ring_size; i++)
1395 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1396 np->rx_ring.orig[i].flaglen = 0; 1421 np->rx_ring.orig[i].flaglen = 0;
1397 else 1422 np->rx_ring.orig[i].buf = 0;
1423 } else {
1398 np->rx_ring.ex[i].flaglen = 0; 1424 np->rx_ring.ex[i].flaglen = 0;
1425 np->rx_ring.ex[i].txvlan = 0;
1426 np->rx_ring.ex[i].bufhigh = 0;
1427 np->rx_ring.ex[i].buflow = 0;
1428 }
1429 np->rx_skb[i].skb = NULL;
1430 np->rx_skb[i].dma = 0;
1431 }
1399} 1432}
1400 1433
1401static void nv_init_tx(struct net_device *dev) 1434static void nv_init_tx(struct net_device *dev)
1402{ 1435{
1403 struct fe_priv *np = netdev_priv(dev); 1436 struct fe_priv *np = netdev_priv(dev);
1404 int i; 1437 int i;
1438 np->get_tx = np->put_tx = np->first_tx = np->tx_ring;
1439 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1440 np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
1441 else
1442 np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
1443 np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;
1444 np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
1405 1445
1406 np->next_tx = np->nic_tx = 0;
1407 for (i = 0; i < np->tx_ring_size; i++) { 1446 for (i = 0; i < np->tx_ring_size; i++) {
1408 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1447 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1409 np->tx_ring.orig[i].flaglen = 0; 1448 np->tx_ring.orig[i].flaglen = 0;
1410 else 1449 np->tx_ring.orig[i].buf = 0;
1450 } else {
1411 np->tx_ring.ex[i].flaglen = 0; 1451 np->tx_ring.ex[i].flaglen = 0;
1412 np->tx_skbuff[i] = NULL; 1452 np->tx_ring.ex[i].txvlan = 0;
1413 np->tx_dma[i] = 0; 1453 np->tx_ring.ex[i].bufhigh = 0;
1454 np->tx_ring.ex[i].buflow = 0;
1455 }
1456 np->tx_skb[i].skb = NULL;
1457 np->tx_skb[i].dma = 0;
1414 } 1458 }
1415} 1459}
1416 1460
@@ -1421,23 +1465,19 @@ static int nv_init_ring(struct net_device *dev)
1421 return nv_alloc_rx(dev); 1465 return nv_alloc_rx(dev);
1422} 1466}
1423 1467
1424static int nv_release_txskb(struct net_device *dev, unsigned int skbnr) 1468static int nv_release_txskb(struct net_device *dev, struct nv_skb_map* tx_skb)
1425{ 1469{
1426 struct fe_priv *np = netdev_priv(dev); 1470 struct fe_priv *np = netdev_priv(dev);
1427 1471
1428 dprintk(KERN_INFO "%s: nv_release_txskb for skbnr %d\n", 1472 if (tx_skb->dma) {
1429 dev->name, skbnr); 1473 pci_unmap_page(np->pci_dev, tx_skb->dma,
1430 1474 tx_skb->dma_len,
1431 if (np->tx_dma[skbnr]) {
1432 pci_unmap_page(np->pci_dev, np->tx_dma[skbnr],
1433 np->tx_dma_len[skbnr],
1434 PCI_DMA_TODEVICE); 1475 PCI_DMA_TODEVICE);
1435 np->tx_dma[skbnr] = 0; 1476 tx_skb->dma = 0;
1436 } 1477 }
1437 1478 if (tx_skb->skb) {
1438 if (np->tx_skbuff[skbnr]) { 1479 dev_kfree_skb_any(tx_skb->skb);
1439 dev_kfree_skb_any(np->tx_skbuff[skbnr]); 1480 tx_skb->skb = NULL;
1440 np->tx_skbuff[skbnr] = NULL;
1441 return 1; 1481 return 1;
1442 } else { 1482 } else {
1443 return 0; 1483 return 0;
@@ -1450,11 +1490,16 @@ static void nv_drain_tx(struct net_device *dev)
1450 unsigned int i; 1490 unsigned int i;
1451 1491
1452 for (i = 0; i < np->tx_ring_size; i++) { 1492 for (i = 0; i < np->tx_ring_size; i++) {
1453 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1493 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1454 np->tx_ring.orig[i].flaglen = 0; 1494 np->tx_ring.orig[i].flaglen = 0;
1455 else 1495 np->tx_ring.orig[i].buf = 0;
1496 } else {
1456 np->tx_ring.ex[i].flaglen = 0; 1497 np->tx_ring.ex[i].flaglen = 0;
1457 if (nv_release_txskb(dev, i)) 1498 np->tx_ring.ex[i].txvlan = 0;
1499 np->tx_ring.ex[i].bufhigh = 0;
1500 np->tx_ring.ex[i].buflow = 0;
1501 }
1502 if (nv_release_txskb(dev, &np->tx_skb[i]))
1458 np->stats.tx_dropped++; 1503 np->stats.tx_dropped++;
1459 } 1504 }
1460} 1505}
@@ -1463,18 +1508,24 @@ static void nv_drain_rx(struct net_device *dev)
1463{ 1508{
1464 struct fe_priv *np = netdev_priv(dev); 1509 struct fe_priv *np = netdev_priv(dev);
1465 int i; 1510 int i;
1511
1466 for (i = 0; i < np->rx_ring_size; i++) { 1512 for (i = 0; i < np->rx_ring_size; i++) {
1467 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1513 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1468 np->rx_ring.orig[i].flaglen = 0; 1514 np->rx_ring.orig[i].flaglen = 0;
1469 else 1515 np->rx_ring.orig[i].buf = 0;
1516 } else {
1470 np->rx_ring.ex[i].flaglen = 0; 1517 np->rx_ring.ex[i].flaglen = 0;
1518 np->rx_ring.ex[i].txvlan = 0;
1519 np->rx_ring.ex[i].bufhigh = 0;
1520 np->rx_ring.ex[i].buflow = 0;
1521 }
1471 wmb(); 1522 wmb();
1472 if (np->rx_skbuff[i]) { 1523 if (np->rx_skb[i].skb) {
1473 pci_unmap_single(np->pci_dev, np->rx_dma[i], 1524 pci_unmap_single(np->pci_dev, np->rx_skb[i].dma,
1474 np->rx_skbuff[i]->end-np->rx_skbuff[i]->data, 1525 np->rx_skb[i].skb->end-np->rx_skb[i].skb->data,
1475 PCI_DMA_FROMDEVICE); 1526 PCI_DMA_FROMDEVICE);
1476 dev_kfree_skb(np->rx_skbuff[i]); 1527 dev_kfree_skb(np->rx_skb[i].skb);
1477 np->rx_skbuff[i] = NULL; 1528 np->rx_skb[i].skb = NULL;
1478 } 1529 }
1479 } 1530 }
1480} 1531}
@@ -1485,6 +1536,11 @@ static void drain_ring(struct net_device *dev)
1485 nv_drain_rx(dev); 1536 nv_drain_rx(dev);
1486} 1537}
1487 1538
1539static inline u32 nv_get_empty_tx_slots(struct fe_priv *np)
1540{
1541 return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size));
1542}
1543
1488/* 1544/*
1489 * nv_start_xmit: dev->hard_start_xmit function 1545 * nv_start_xmit: dev->hard_start_xmit function
1490 * Called with netif_tx_lock held. 1546 * Called with netif_tx_lock held.
@@ -1495,14 +1551,17 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1495 u32 tx_flags = 0; 1551 u32 tx_flags = 0;
1496 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); 1552 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
1497 unsigned int fragments = skb_shinfo(skb)->nr_frags; 1553 unsigned int fragments = skb_shinfo(skb)->nr_frags;
1498 unsigned int nr = (np->next_tx - 1) % np->tx_ring_size;
1499 unsigned int start_nr = np->next_tx % np->tx_ring_size;
1500 unsigned int i; 1554 unsigned int i;
1501 u32 offset = 0; 1555 u32 offset = 0;
1502 u32 bcnt; 1556 u32 bcnt;
1503 u32 size = skb->len-skb->data_len; 1557 u32 size = skb->len-skb->data_len;
1504 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 1558 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1559 u32 empty_slots;
1505 u32 tx_flags_vlan = 0; 1560 u32 tx_flags_vlan = 0;
1561 union ring_type put_tx;
1562 union ring_type start_tx;
1563 union ring_type prev_tx;
1564 struct nv_skb_map* prev_tx_ctx;
1506 1565
1507 /* add fragments to entries count */ 1566 /* add fragments to entries count */
1508 for (i = 0; i < fragments; i++) { 1567 for (i = 0; i < fragments; i++) {
@@ -1512,32 +1571,46 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1512 1571
1513 spin_lock_irq(&np->lock); 1572 spin_lock_irq(&np->lock);
1514 1573
1515 if ((np->next_tx - np->nic_tx + entries - 1) > np->tx_limit_stop) { 1574 empty_slots = nv_get_empty_tx_slots(np);
1575 if ((empty_slots - np->tx_limit_stop) <= entries) {
1516 spin_unlock_irq(&np->lock); 1576 spin_unlock_irq(&np->lock);
1517 netif_stop_queue(dev); 1577 netif_stop_queue(dev);
1518 return NETDEV_TX_BUSY; 1578 return NETDEV_TX_BUSY;
1519 } 1579 }
1520 1580
1581 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1582 start_tx.orig = put_tx.orig = np->put_tx.orig;
1583 else
1584 start_tx.ex = put_tx.ex = np->put_tx.ex;
1585
1521 /* setup the header buffer */ 1586 /* setup the header buffer */
1522 do { 1587 do {
1588 prev_tx = put_tx;
1589 prev_tx_ctx = np->put_tx_ctx;
1523 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 1590 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
1524 nr = (nr + 1) % np->tx_ring_size; 1591 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
1525
1526 np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
1527 PCI_DMA_TODEVICE); 1592 PCI_DMA_TODEVICE);
1528 np->tx_dma_len[nr] = bcnt; 1593 np->put_tx_ctx->dma_len = bcnt;
1529
1530 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1594 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1531 np->tx_ring.orig[nr].buf = cpu_to_le32(np->tx_dma[nr]); 1595 put_tx.orig->buf = cpu_to_le32(np->put_tx_ctx->dma);
1532 np->tx_ring.orig[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags); 1596 put_tx.orig->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1533 } else { 1597 } else {
1534 np->tx_ring.ex[nr].bufhigh = cpu_to_le64(np->tx_dma[nr]) >> 32; 1598 put_tx.ex->bufhigh = cpu_to_le64(np->put_tx_ctx->dma) >> 32;
1535 np->tx_ring.ex[nr].buflow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF; 1599 put_tx.ex->buflow = cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF;
1536 np->tx_ring.ex[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags); 1600 put_tx.ex->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1537 } 1601 }
1538 tx_flags = np->tx_flags; 1602 tx_flags = np->tx_flags;
1539 offset += bcnt; 1603 offset += bcnt;
1540 size -= bcnt; 1604 size -= bcnt;
1605 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1606 if (put_tx.orig++ == np->last_tx.orig)
1607 put_tx.orig = np->first_tx.orig;
1608 } else {
1609 if (put_tx.ex++ == np->last_tx.ex)
1610 put_tx.ex = np->first_tx.ex;
1611 }
1612 if (np->put_tx_ctx++ == np->last_tx_ctx)
1613 np->put_tx_ctx = np->first_tx_ctx;
1541 } while (size); 1614 } while (size);
1542 1615
1543 /* setup the fragments */ 1616 /* setup the fragments */
@@ -1547,34 +1620,43 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1547 offset = 0; 1620 offset = 0;
1548 1621
1549 do { 1622 do {
1623 prev_tx = put_tx;
1624 prev_tx_ctx = np->put_tx_ctx;
1550 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 1625 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
1551 nr = (nr + 1) % np->tx_ring_size; 1626 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
1552 1627 PCI_DMA_TODEVICE);
1553 np->tx_dma[nr] = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, 1628 np->put_tx_ctx->dma_len = bcnt;
1554 PCI_DMA_TODEVICE);
1555 np->tx_dma_len[nr] = bcnt;
1556 1629
1557 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1630 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1558 np->tx_ring.orig[nr].buf = cpu_to_le32(np->tx_dma[nr]); 1631 put_tx.orig->buf = cpu_to_le32(np->put_tx_ctx->dma);
1559 np->tx_ring.orig[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags); 1632 put_tx.orig->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1560 } else { 1633 } else {
1561 np->tx_ring.ex[nr].bufhigh = cpu_to_le64(np->tx_dma[nr]) >> 32; 1634 put_tx.ex->bufhigh = cpu_to_le64(np->put_tx_ctx->dma) >> 32;
1562 np->tx_ring.ex[nr].buflow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF; 1635 put_tx.ex->buflow = cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF;
1563 np->tx_ring.ex[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags); 1636 put_tx.ex->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1564 } 1637 }
1565 offset += bcnt; 1638 offset += bcnt;
1566 size -= bcnt; 1639 size -= bcnt;
1640 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1641 if (put_tx.orig++ == np->last_tx.orig)
1642 put_tx.orig = np->first_tx.orig;
1643 } else {
1644 if (put_tx.ex++ == np->last_tx.ex)
1645 put_tx.ex = np->first_tx.ex;
1646 }
1647 if (np->put_tx_ctx++ == np->last_tx_ctx)
1648 np->put_tx_ctx = np->first_tx_ctx;
1567 } while (size); 1649 } while (size);
1568 } 1650 }
1569 1651
1570 /* set last fragment flag */ 1652 /* set last fragment flag */
1571 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1653 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1572 np->tx_ring.orig[nr].flaglen |= cpu_to_le32(tx_flags_extra); 1654 prev_tx.orig->flaglen |= cpu_to_le32(tx_flags_extra);
1573 } else { 1655 else
1574 np->tx_ring.ex[nr].flaglen |= cpu_to_le32(tx_flags_extra); 1656 prev_tx.ex->flaglen |= cpu_to_le32(tx_flags_extra);
1575 }
1576 1657
1577 np->tx_skbuff[nr] = skb; 1658 /* save skb in this slot's context area */
1659 prev_tx_ctx->skb = skb;
1578 1660
1579 if (skb_is_gso(skb)) 1661 if (skb_is_gso(skb))
1580 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); 1662 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
@@ -1589,14 +1671,17 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1589 1671
1590 /* set tx flags */ 1672 /* set tx flags */
1591 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1673 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1592 np->tx_ring.orig[start_nr].flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); 1674 start_tx.orig->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
1675 np->put_tx.orig = put_tx.orig;
1593 } else { 1676 } else {
1594 np->tx_ring.ex[start_nr].txvlan = cpu_to_le32(tx_flags_vlan); 1677 start_tx.ex->txvlan = cpu_to_le32(tx_flags_vlan);
1595 np->tx_ring.ex[start_nr].flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); 1678 start_tx.ex->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
1679 np->put_tx.ex = put_tx.ex;
1596 } 1680 }
1597 1681
1598 dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n", 1682
1599 dev->name, np->next_tx, entries, tx_flags_extra); 1683 dprintk(KERN_DEBUG "%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n",
1684 dev->name, entries, tx_flags_extra);
1600 { 1685 {
1601 int j; 1686 int j;
1602 for (j=0; j<64; j++) { 1687 for (j=0; j<64; j++) {
@@ -1607,8 +1692,6 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1607 dprintk("\n"); 1692 dprintk("\n");
1608 } 1693 }
1609 1694
1610 np->next_tx += entries;
1611
1612 dev->trans_start = jiffies; 1695 dev->trans_start = jiffies;
1613 spin_unlock_irq(&np->lock); 1696 spin_unlock_irq(&np->lock);
1614 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 1697 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
@@ -1625,24 +1708,26 @@ static void nv_tx_done(struct net_device *dev)
1625{ 1708{
1626 struct fe_priv *np = netdev_priv(dev); 1709 struct fe_priv *np = netdev_priv(dev);
1627 u32 flags; 1710 u32 flags;
1628 unsigned int i;
1629 struct sk_buff *skb; 1711 struct sk_buff *skb;
1630 1712
1631 while (np->nic_tx != np->next_tx) { 1713 while (1) {
1632 i = np->nic_tx % np->tx_ring_size; 1714 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1633 1715 if (np->get_tx.orig == np->put_tx.orig)
1634 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1716 break;
1635 flags = le32_to_cpu(np->tx_ring.orig[i].flaglen); 1717 flags = le32_to_cpu(np->get_tx.orig->flaglen);
1636 else 1718 } else {
1637 flags = le32_to_cpu(np->tx_ring.ex[i].flaglen); 1719 if (np->get_tx.ex == np->put_tx.ex)
1720 break;
1721 flags = le32_to_cpu(np->get_tx.ex->flaglen);
1722 }
1638 1723
1639 dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, flags 0x%x.\n", 1724 dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n",
1640 dev->name, np->nic_tx, flags); 1725 dev->name, flags);
1641 if (flags & NV_TX_VALID) 1726 if (flags & NV_TX_VALID)
1642 break; 1727 break;
1643 if (np->desc_ver == DESC_VER_1) { 1728 if (np->desc_ver == DESC_VER_1) {
1644 if (flags & NV_TX_LASTPACKET) { 1729 if (flags & NV_TX_LASTPACKET) {
1645 skb = np->tx_skbuff[i]; 1730 skb = np->get_tx_ctx->skb;
1646 if (flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION| 1731 if (flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION|
1647 NV_TX_UNDERFLOW|NV_TX_ERROR)) { 1732 NV_TX_UNDERFLOW|NV_TX_ERROR)) {
1648 if (flags & NV_TX_UNDERFLOW) 1733 if (flags & NV_TX_UNDERFLOW)
@@ -1657,7 +1742,7 @@ static void nv_tx_done(struct net_device *dev)
1657 } 1742 }
1658 } else { 1743 } else {
1659 if (flags & NV_TX2_LASTPACKET) { 1744 if (flags & NV_TX2_LASTPACKET) {
1660 skb = np->tx_skbuff[i]; 1745 skb = np->get_tx_ctx->skb;
1661 if (flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION| 1746 if (flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION|
1662 NV_TX2_UNDERFLOW|NV_TX2_ERROR)) { 1747 NV_TX2_UNDERFLOW|NV_TX2_ERROR)) {
1663 if (flags & NV_TX2_UNDERFLOW) 1748 if (flags & NV_TX2_UNDERFLOW)
@@ -1671,10 +1756,18 @@ static void nv_tx_done(struct net_device *dev)
1671 } 1756 }
1672 } 1757 }
1673 } 1758 }
1674 nv_release_txskb(dev, i); 1759 nv_release_txskb(dev, np->get_tx_ctx);
1675 np->nic_tx++; 1760 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1761 if (np->get_tx.orig++ == np->last_tx.orig)
1762 np->get_tx.orig = np->first_tx.orig;
1763 } else {
1764 if (np->get_tx.ex++ == np->last_tx.ex)
1765 np->get_tx.ex = np->first_tx.ex;
1766 }
1767 if (np->get_tx_ctx++ == np->last_tx_ctx)
1768 np->get_tx_ctx = np->first_tx_ctx;
1676 } 1769 }
1677 if (np->next_tx - np->nic_tx < np->tx_limit_start) 1770 if (nv_get_empty_tx_slots(np) > np->tx_limit_start)
1678 netif_wake_queue(dev); 1771 netif_wake_queue(dev);
1679} 1772}
1680 1773
@@ -1698,9 +1791,8 @@ static void nv_tx_timeout(struct net_device *dev)
1698 { 1791 {
1699 int i; 1792 int i;
1700 1793
1701 printk(KERN_INFO "%s: Ring at %lx: next %d nic %d\n", 1794 printk(KERN_INFO "%s: Ring at %lx\n",
1702 dev->name, (unsigned long)np->ring_addr, 1795 dev->name, (unsigned long)np->ring_addr);
1703 np->next_tx, np->nic_tx);
1704 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name); 1796 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name);
1705 for (i=0;i<=np->register_size;i+= 32) { 1797 for (i=0;i<=np->register_size;i+= 32) {
1706 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n", 1798 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
@@ -1751,10 +1843,10 @@ static void nv_tx_timeout(struct net_device *dev)
1751 nv_tx_done(dev); 1843 nv_tx_done(dev);
1752 1844
1753 /* 3) if there are dead entries: clear everything */ 1845 /* 3) if there are dead entries: clear everything */
1754 if (np->next_tx != np->nic_tx) { 1846 if (np->get_tx_ctx != np->put_tx_ctx) {
1755 printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name); 1847 printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name);
1756 nv_drain_tx(dev); 1848 nv_drain_tx(dev);
1757 np->next_tx = np->nic_tx = 0; 1849 nv_init_tx(dev);
1758 setup_hw_rings(dev, NV_SETUP_TX_RING); 1850 setup_hw_rings(dev, NV_SETUP_TX_RING);
1759 netif_wake_queue(dev); 1851 netif_wake_queue(dev);
1760 } 1852 }
@@ -1827,22 +1919,22 @@ static int nv_rx_process(struct net_device *dev, int limit)
1827 for (count = 0; count < limit; ++count) { 1919 for (count = 0; count < limit; ++count) {
1828 struct sk_buff *skb; 1920 struct sk_buff *skb;
1829 int len; 1921 int len;
1830 int i;
1831 if (np->cur_rx - np->refill_rx >= np->rx_ring_size)
1832 break; /* we scanned the whole ring - do not continue */
1833 1922
1834 i = np->cur_rx % np->rx_ring_size;
1835 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1923 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1836 flags = le32_to_cpu(np->rx_ring.orig[i].flaglen); 1924 if (np->get_rx.orig == np->put_rx.orig)
1837 len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver); 1925 break; /* we scanned the whole ring - do not continue */
1926 flags = le32_to_cpu(np->get_rx.orig->flaglen);
1927 len = nv_descr_getlength(np->get_rx.orig, np->desc_ver);
1838 } else { 1928 } else {
1839 flags = le32_to_cpu(np->rx_ring.ex[i].flaglen); 1929 if (np->get_rx.ex == np->put_rx.ex)
1840 len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver); 1930 break; /* we scanned the whole ring - do not continue */
1841 vlanflags = le32_to_cpu(np->rx_ring.ex[i].buflow); 1931 flags = le32_to_cpu(np->get_rx.ex->flaglen);
1932 len = nv_descr_getlength_ex(np->get_rx.ex, np->desc_ver);
1933 vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
1842 } 1934 }
1843 1935
1844 dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, flags 0x%x.\n", 1936 dprintk(KERN_DEBUG "%s: nv_rx_process: flags 0x%x.\n",
1845 dev->name, np->cur_rx, flags); 1937 dev->name, flags);
1846 1938
1847 if (flags & NV_RX_AVAIL) 1939 if (flags & NV_RX_AVAIL)
1848 break; /* still owned by hardware, */ 1940 break; /* still owned by hardware, */
@@ -1852,8 +1944,8 @@ static int nv_rx_process(struct net_device *dev, int limit)
1852 * TODO: check if a prefetch of the first cacheline improves 1944 * TODO: check if a prefetch of the first cacheline improves
1853 * the performance. 1945 * the performance.
1854 */ 1946 */
1855 pci_unmap_single(np->pci_dev, np->rx_dma[i], 1947 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
1856 np->rx_skbuff[i]->end-np->rx_skbuff[i]->data, 1948 np->get_rx_ctx->dma_len,
1857 PCI_DMA_FROMDEVICE); 1949 PCI_DMA_FROMDEVICE);
1858 1950
1859 { 1951 {
@@ -1862,7 +1954,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
1862 for (j=0; j<64; j++) { 1954 for (j=0; j<64; j++) {
1863 if ((j%16) == 0) 1955 if ((j%16) == 0)
1864 dprintk("\n%03x:", j); 1956 dprintk("\n%03x:", j);
1865 dprintk(" %02x", ((unsigned char*)np->rx_skbuff[i]->data)[j]); 1957 dprintk(" %02x", ((unsigned char*)np->get_rx_ctx->skb->data)[j]);
1866 } 1958 }
1867 dprintk("\n"); 1959 dprintk("\n");
1868 } 1960 }
@@ -1892,7 +1984,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
1892 goto next_pkt; 1984 goto next_pkt;
1893 } 1985 }
1894 if (flags & NV_RX_ERROR4) { 1986 if (flags & NV_RX_ERROR4) {
1895 len = nv_getlen(dev, np->rx_skbuff[i]->data, len); 1987 len = nv_getlen(dev, np->get_rx_ctx->skb->data, len);
1896 if (len < 0) { 1988 if (len < 0) {
1897 np->stats.rx_errors++; 1989 np->stats.rx_errors++;
1898 goto next_pkt; 1990 goto next_pkt;
@@ -1925,7 +2017,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
1925 goto next_pkt; 2017 goto next_pkt;
1926 } 2018 }
1927 if (flags & NV_RX2_ERROR4) { 2019 if (flags & NV_RX2_ERROR4) {
1928 len = nv_getlen(dev, np->rx_skbuff[i]->data, len); 2020 len = nv_getlen(dev, np->get_rx_ctx->skb->data, len);
1929 if (len < 0) { 2021 if (len < 0) {
1930 np->stats.rx_errors++; 2022 np->stats.rx_errors++;
1931 goto next_pkt; 2023 goto next_pkt;
@@ -1944,20 +2036,20 @@ static int nv_rx_process(struct net_device *dev, int limit)
1944 flags == NV_RX2_CHECKSUMOK2 || 2036 flags == NV_RX2_CHECKSUMOK2 ||
1945 flags == NV_RX2_CHECKSUMOK3) { 2037 flags == NV_RX2_CHECKSUMOK3) {
1946 dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name); 2038 dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name);
1947 np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY; 2039 np->get_rx_ctx->skb->ip_summed = CHECKSUM_UNNECESSARY;
1948 } else { 2040 } else {
1949 dprintk(KERN_DEBUG "%s: hwchecksum miss!.\n", dev->name); 2041 dprintk(KERN_DEBUG "%s: hwchecksum miss!.\n", dev->name);
1950 } 2042 }
1951 } 2043 }
1952 } 2044 }
1953 /* got a valid packet - forward it to the network core */ 2045 /* got a valid packet - forward it to the network core */
1954 skb = np->rx_skbuff[i]; 2046 skb = np->get_rx_ctx->skb;
1955 np->rx_skbuff[i] = NULL; 2047 np->get_rx_ctx->skb = NULL;
1956 2048
1957 skb_put(skb, len); 2049 skb_put(skb, len);
1958 skb->protocol = eth_type_trans(skb, dev); 2050 skb->protocol = eth_type_trans(skb, dev);
1959 dprintk(KERN_DEBUG "%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n", 2051 dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n",
1960 dev->name, np->cur_rx, len, skb->protocol); 2052 dev->name, len, skb->protocol);
1961#ifdef CONFIG_FORCEDETH_NAPI 2053#ifdef CONFIG_FORCEDETH_NAPI
1962 if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT)) 2054 if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT))
1963 vlan_hwaccel_receive_skb(skb, np->vlangrp, 2055 vlan_hwaccel_receive_skb(skb, np->vlangrp,
@@ -1975,7 +2067,15 @@ static int nv_rx_process(struct net_device *dev, int limit)
1975 np->stats.rx_packets++; 2067 np->stats.rx_packets++;
1976 np->stats.rx_bytes += len; 2068 np->stats.rx_bytes += len;
1977next_pkt: 2069next_pkt:
1978 np->cur_rx++; 2070 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
2071 if (np->get_rx.orig++ == np->last_rx.orig)
2072 np->get_rx.orig = np->first_rx.orig;
2073 } else {
2074 if (np->get_rx.ex++ == np->last_rx.ex)
2075 np->get_rx.ex = np->first_rx.ex;
2076 }
2077 if (np->get_rx_ctx++ == np->last_rx_ctx)
2078 np->get_rx_ctx = np->first_rx_ctx;
1979 } 2079 }
1980 2080
1981 return count; 2081 return count;
@@ -3463,7 +3563,7 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
3463{ 3563{
3464 struct fe_priv *np = netdev_priv(dev); 3564 struct fe_priv *np = netdev_priv(dev);
3465 u8 __iomem *base = get_hwbase(dev); 3565 u8 __iomem *base = get_hwbase(dev);
3466 u8 *rxtx_ring, *rx_skbuff, *tx_skbuff, *rx_dma, *tx_dma, *tx_dma_len; 3566 u8 *rxtx_ring, *rx_skbuff, *tx_skbuff;
3467 dma_addr_t ring_addr; 3567 dma_addr_t ring_addr;
3468 3568
3469 if (ring->rx_pending < RX_RING_MIN || 3569 if (ring->rx_pending < RX_RING_MIN ||
@@ -3489,12 +3589,9 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
3489 sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), 3589 sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
3490 &ring_addr); 3590 &ring_addr);
3491 } 3591 }
3492 rx_skbuff = kmalloc(sizeof(struct sk_buff*) * ring->rx_pending, GFP_KERNEL); 3592 rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending, GFP_KERNEL);
3493 rx_dma = kmalloc(sizeof(dma_addr_t) * ring->rx_pending, GFP_KERNEL); 3593 tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL);
3494 tx_skbuff = kmalloc(sizeof(struct sk_buff*) * ring->tx_pending, GFP_KERNEL); 3594 if (!rxtx_ring || !rx_skbuff || !tx_skbuff) {
3495 tx_dma = kmalloc(sizeof(dma_addr_t) * ring->tx_pending, GFP_KERNEL);
3496 tx_dma_len = kmalloc(sizeof(unsigned int) * ring->tx_pending, GFP_KERNEL);
3497 if (!rxtx_ring || !rx_skbuff || !rx_dma || !tx_skbuff || !tx_dma || !tx_dma_len) {
3498 /* fall back to old rings */ 3595 /* fall back to old rings */
3499 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 3596 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
3500 if (rxtx_ring) 3597 if (rxtx_ring)
@@ -3507,14 +3604,8 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
3507 } 3604 }
3508 if (rx_skbuff) 3605 if (rx_skbuff)
3509 kfree(rx_skbuff); 3606 kfree(rx_skbuff);
3510 if (rx_dma)
3511 kfree(rx_dma);
3512 if (tx_skbuff) 3607 if (tx_skbuff)
3513 kfree(tx_skbuff); 3608 kfree(tx_skbuff);
3514 if (tx_dma)
3515 kfree(tx_dma);
3516 if (tx_dma_len)
3517 kfree(tx_dma_len);
3518 goto exit; 3609 goto exit;
3519 } 3610 }
3520 3611
@@ -3536,8 +3627,8 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
3536 /* set new values */ 3627 /* set new values */
3537 np->rx_ring_size = ring->rx_pending; 3628 np->rx_ring_size = ring->rx_pending;
3538 np->tx_ring_size = ring->tx_pending; 3629 np->tx_ring_size = ring->tx_pending;
3539 np->tx_limit_stop = ring->tx_pending - TX_LIMIT_DIFFERENCE; 3630 np->tx_limit_stop = TX_LIMIT_DIFFERENCE;
3540 np->tx_limit_start = ring->tx_pending - TX_LIMIT_DIFFERENCE - 1; 3631 np->tx_limit_start = TX_LIMIT_DIFFERENCE;
3541 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 3632 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
3542 np->rx_ring.orig = (struct ring_desc*)rxtx_ring; 3633 np->rx_ring.orig = (struct ring_desc*)rxtx_ring;
3543 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; 3634 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
@@ -3545,18 +3636,12 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
3545 np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring; 3636 np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring;
3546 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; 3637 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
3547 } 3638 }
3548 np->rx_skbuff = (struct sk_buff**)rx_skbuff; 3639 np->rx_skb = (struct nv_skb_map*)rx_skbuff;
3549 np->rx_dma = (dma_addr_t*)rx_dma; 3640 np->tx_skb = (struct nv_skb_map*)tx_skbuff;
3550 np->tx_skbuff = (struct sk_buff**)tx_skbuff;
3551 np->tx_dma = (dma_addr_t*)tx_dma;
3552 np->tx_dma_len = (unsigned int*)tx_dma_len;
3553 np->ring_addr = ring_addr; 3641 np->ring_addr = ring_addr;
3554 3642
3555 memset(np->rx_skbuff, 0, sizeof(struct sk_buff*) * np->rx_ring_size); 3643 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
3556 memset(np->rx_dma, 0, sizeof(dma_addr_t) * np->rx_ring_size); 3644 memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
3557 memset(np->tx_skbuff, 0, sizeof(struct sk_buff*) * np->tx_ring_size);
3558 memset(np->tx_dma, 0, sizeof(dma_addr_t) * np->tx_ring_size);
3559 memset(np->tx_dma_len, 0, sizeof(unsigned int) * np->tx_ring_size);
3560 3645
3561 if (netif_running(dev)) { 3646 if (netif_running(dev)) {
3562 /* reinit driver view of the queues */ 3647 /* reinit driver view of the queues */
@@ -3953,7 +4038,7 @@ static int nv_loopback_test(struct net_device *dev)
3953 dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n", 4038 dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n",
3954 dev->name, len, pkt_len); 4039 dev->name, len, pkt_len);
3955 } else { 4040 } else {
3956 rx_skb = np->rx_skbuff[0]; 4041 rx_skb = np->rx_skb[0].skb;
3957 for (i = 0; i < pkt_len; i++) { 4042 for (i = 0; i < pkt_len; i++) {
3958 if (rx_skb->data[i] != (u8)(i & 0xff)) { 4043 if (rx_skb->data[i] != (u8)(i & 0xff)) {
3959 ret = 0; 4044 ret = 0;
@@ -4508,8 +4593,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4508 4593
4509 np->rx_ring_size = RX_RING_DEFAULT; 4594 np->rx_ring_size = RX_RING_DEFAULT;
4510 np->tx_ring_size = TX_RING_DEFAULT; 4595 np->tx_ring_size = TX_RING_DEFAULT;
4511 np->tx_limit_stop = np->tx_ring_size - TX_LIMIT_DIFFERENCE; 4596 np->tx_limit_stop = TX_LIMIT_DIFFERENCE;
4512 np->tx_limit_start = np->tx_ring_size - TX_LIMIT_DIFFERENCE - 1; 4597 np->tx_limit_start = TX_LIMIT_DIFFERENCE;
4513 4598
4514 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 4599 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
4515 np->rx_ring.orig = pci_alloc_consistent(pci_dev, 4600 np->rx_ring.orig = pci_alloc_consistent(pci_dev,
@@ -4526,18 +4611,12 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4526 goto out_unmap; 4611 goto out_unmap;
4527 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; 4612 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
4528 } 4613 }
4529 np->rx_skbuff = kmalloc(sizeof(struct sk_buff*) * np->rx_ring_size, GFP_KERNEL); 4614 np->rx_skb = kmalloc(sizeof(struct nv_skb_map) * np->rx_ring_size, GFP_KERNEL);
4530 np->rx_dma = kmalloc(sizeof(dma_addr_t) * np->rx_ring_size, GFP_KERNEL); 4615 np->tx_skb = kmalloc(sizeof(struct nv_skb_map) * np->tx_ring_size, GFP_KERNEL);
4531 np->tx_skbuff = kmalloc(sizeof(struct sk_buff*) * np->tx_ring_size, GFP_KERNEL); 4616 if (!np->rx_skb || !np->tx_skb)
4532 np->tx_dma = kmalloc(sizeof(dma_addr_t) * np->tx_ring_size, GFP_KERNEL);
4533 np->tx_dma_len = kmalloc(sizeof(unsigned int) * np->tx_ring_size, GFP_KERNEL);
4534 if (!np->rx_skbuff || !np->rx_dma || !np->tx_skbuff || !np->tx_dma || !np->tx_dma_len)
4535 goto out_freering; 4617 goto out_freering;
4536 memset(np->rx_skbuff, 0, sizeof(struct sk_buff*) * np->rx_ring_size); 4618 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
4537 memset(np->rx_dma, 0, sizeof(dma_addr_t) * np->rx_ring_size); 4619 memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
4538 memset(np->tx_skbuff, 0, sizeof(struct sk_buff*) * np->tx_ring_size);
4539 memset(np->tx_dma, 0, sizeof(dma_addr_t) * np->tx_ring_size);
4540 memset(np->tx_dma_len, 0, sizeof(unsigned int) * np->tx_ring_size);
4541 4620
4542 dev->open = nv_open; 4621 dev->open = nv_open;
4543 dev->stop = nv_close; 4622 dev->stop = nv_close;