aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2010-08-19 09:40:54 -0400
committerDavid S. Miller <davem@davemloft.net>2010-08-19 19:45:28 -0400
commit84418e3b10b5ba43eb5b85f725e75fd9c9730670 (patch)
tree49c34a87c4c98ae21818141ebfbf12cf9744a4e4 /drivers
parent31f05a2d875327ef133ac4b62261c4b875d1d10c (diff)
ixgbe: rewrite ethtool test to use standard config functions
This change makes it so that the ethtool loopback test uses the standard ring configuration and allocation functions. As a result the loopback test will be much more effective at testing core driver functionality. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ixgbe/ixgbe.h11
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c362
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c49
3 files changed, 169 insertions, 253 deletions
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index ac4b90edb58e..5cebc3755b64 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -453,9 +453,20 @@ extern int ixgbe_setup_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *)
453extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); 453extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
454extern void ixgbe_free_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); 454extern void ixgbe_free_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
455extern void ixgbe_free_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); 455extern void ixgbe_free_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
456extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
457extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
456extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); 458extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
457extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); 459extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
458extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); 460extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
461extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *,
462 struct net_device *,
463 struct ixgbe_adapter *,
464 struct ixgbe_ring *);
465extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *,
466 struct ixgbe_tx_buffer *);
467extern void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
468 struct ixgbe_ring *rx_ring,
469 int cleaned_count);
459extern void ixgbe_write_eitr(struct ixgbe_q_vector *); 470extern void ixgbe_write_eitr(struct ixgbe_q_vector *);
460extern int ethtool_ioctl(struct ifreq *ifr); 471extern int ethtool_ioctl(struct ifreq *ifr);
461extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); 472extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 4d74f4bc7a01..25ef8b197373 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -1438,9 +1438,7 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1438 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; 1438 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1439 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; 1439 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1440 struct ixgbe_hw *hw = &adapter->hw; 1440 struct ixgbe_hw *hw = &adapter->hw;
1441 struct pci_dev *pdev = adapter->pdev;
1442 u32 reg_ctl; 1441 u32 reg_ctl;
1443 int i;
1444 1442
1445 /* shut down the DMA engines now so they can be reinitialized later */ 1443 /* shut down the DMA engines now so they can be reinitialized later */
1446 1444
@@ -1448,14 +1446,15 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1448 reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 1446 reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1449 reg_ctl &= ~IXGBE_RXCTRL_RXEN; 1447 reg_ctl &= ~IXGBE_RXCTRL_RXEN;
1450 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl); 1448 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl);
1451 reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(0)); 1449 reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->reg_idx));
1452 reg_ctl &= ~IXGBE_RXDCTL_ENABLE; 1450 reg_ctl &= ~IXGBE_RXDCTL_ENABLE;
1453 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(0), reg_ctl); 1451 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->reg_idx), reg_ctl);
1454 1452
1455 /* now Tx */ 1453 /* now Tx */
1456 reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(0)); 1454 reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx));
1457 reg_ctl &= ~IXGBE_TXDCTL_ENABLE; 1455 reg_ctl &= ~IXGBE_TXDCTL_ENABLE;
1458 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(0), reg_ctl); 1456 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl);
1457
1459 if (hw->mac.type == ixgbe_mac_82599EB) { 1458 if (hw->mac.type == ixgbe_mac_82599EB) {
1460 reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1459 reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1461 reg_ctl &= ~IXGBE_DMATXCTL_TE; 1460 reg_ctl &= ~IXGBE_DMATXCTL_TE;
@@ -1464,221 +1463,57 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1464 1463
1465 ixgbe_reset(adapter); 1464 ixgbe_reset(adapter);
1466 1465
1467 if (tx_ring->desc && tx_ring->tx_buffer_info) { 1466 ixgbe_free_tx_resources(adapter, &adapter->test_tx_ring);
1468 for (i = 0; i < tx_ring->count; i++) { 1467 ixgbe_free_rx_resources(adapter, &adapter->test_rx_ring);
1469 struct ixgbe_tx_buffer *buf =
1470 &(tx_ring->tx_buffer_info[i]);
1471 if (buf->dma)
1472 dma_unmap_single(&pdev->dev, buf->dma,
1473 buf->length, DMA_TO_DEVICE);
1474 if (buf->skb)
1475 dev_kfree_skb(buf->skb);
1476 }
1477 }
1478
1479 if (rx_ring->desc && rx_ring->rx_buffer_info) {
1480 for (i = 0; i < rx_ring->count; i++) {
1481 struct ixgbe_rx_buffer *buf =
1482 &(rx_ring->rx_buffer_info[i]);
1483 if (buf->dma)
1484 dma_unmap_single(&pdev->dev, buf->dma,
1485 IXGBE_RXBUFFER_2048,
1486 DMA_FROM_DEVICE);
1487 if (buf->skb)
1488 dev_kfree_skb(buf->skb);
1489 }
1490 }
1491
1492 if (tx_ring->desc) {
1493 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1494 tx_ring->dma);
1495 tx_ring->desc = NULL;
1496 }
1497 if (rx_ring->desc) {
1498 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
1499 rx_ring->dma);
1500 rx_ring->desc = NULL;
1501 }
1502
1503 kfree(tx_ring->tx_buffer_info);
1504 tx_ring->tx_buffer_info = NULL;
1505 kfree(rx_ring->rx_buffer_info);
1506 rx_ring->rx_buffer_info = NULL;
1507} 1468}
1508 1469
1509static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) 1470static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1510{ 1471{
1511 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; 1472 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1512 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; 1473 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1513 struct pci_dev *pdev = adapter->pdev;
1514 u32 rctl, reg_data; 1474 u32 rctl, reg_data;
1515 int i, ret_val; 1475 int ret_val;
1476 int err;
1516 1477
1517 /* Setup Tx descriptor ring and Tx buffers */ 1478 /* Setup Tx descriptor ring and Tx buffers */
1479 tx_ring->count = IXGBE_DEFAULT_TXD;
1480 tx_ring->queue_index = 0;
1481 tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
1482 tx_ring->numa_node = adapter->node;
1518 1483
1519 if (!tx_ring->count) 1484 err = ixgbe_setup_tx_resources(adapter, tx_ring);
1520 tx_ring->count = IXGBE_DEFAULT_TXD; 1485 if (err)
1521 1486 return 1;
1522 tx_ring->tx_buffer_info = kcalloc(tx_ring->count,
1523 sizeof(struct ixgbe_tx_buffer),
1524 GFP_KERNEL);
1525 if (!(tx_ring->tx_buffer_info)) {
1526 ret_val = 1;
1527 goto err_nomem;
1528 }
1529
1530 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
1531 tx_ring->size = ALIGN(tx_ring->size, 4096);
1532 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
1533 &tx_ring->dma, GFP_KERNEL);
1534 if (!(tx_ring->desc)) {
1535 ret_val = 2;
1536 goto err_nomem;
1537 }
1538 tx_ring->next_to_use = tx_ring->next_to_clean = 0;
1539
1540 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDBAL(0),
1541 ((u64) tx_ring->dma & 0x00000000FFFFFFFF));
1542 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDBAH(0),
1543 ((u64) tx_ring->dma >> 32));
1544 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDLEN(0),
1545 tx_ring->count * sizeof(union ixgbe_adv_tx_desc));
1546 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDH(0), 0);
1547 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(0), 0);
1548
1549 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
1550 reg_data |= IXGBE_HLREG0_TXPADEN;
1551 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
1552 1487
1553 if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 1488 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1554 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL); 1489 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
1555 reg_data |= IXGBE_DMATXCTL_TE; 1490 reg_data |= IXGBE_DMATXCTL_TE;
1556 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data); 1491 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
1557 } 1492 }
1558 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_TXDCTL(0));
1559 reg_data |= IXGBE_TXDCTL_ENABLE;
1560 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TXDCTL(0), reg_data);
1561
1562 for (i = 0; i < tx_ring->count; i++) {
1563 union ixgbe_adv_tx_desc *desc = IXGBE_TX_DESC_ADV(tx_ring, i);
1564 struct sk_buff *skb;
1565 unsigned int size = 1024;
1566
1567 skb = alloc_skb(size, GFP_KERNEL);
1568 if (!skb) {
1569 ret_val = 3;
1570 goto err_nomem;
1571 }
1572 skb_put(skb, size);
1573 tx_ring->tx_buffer_info[i].skb = skb;
1574 tx_ring->tx_buffer_info[i].length = skb->len;
1575 tx_ring->tx_buffer_info[i].dma =
1576 dma_map_single(&pdev->dev, skb->data, skb->len,
1577 DMA_TO_DEVICE);
1578 desc->read.buffer_addr =
1579 cpu_to_le64(tx_ring->tx_buffer_info[i].dma);
1580 desc->read.cmd_type_len = cpu_to_le32(skb->len);
1581 desc->read.cmd_type_len |= cpu_to_le32(IXGBE_TXD_CMD_EOP |
1582 IXGBE_TXD_CMD_IFCS |
1583 IXGBE_TXD_CMD_RS);
1584 desc->read.olinfo_status = 0;
1585 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
1586 desc->read.olinfo_status |=
1587 (skb->len << IXGBE_ADVTXD_PAYLEN_SHIFT);
1588 1493
1589 } 1494 ixgbe_configure_tx_ring(adapter, tx_ring);
1590 1495
1591 /* Setup Rx Descriptor ring and Rx buffers */ 1496 /* Setup Rx Descriptor ring and Rx buffers */
1592 1497 rx_ring->count = IXGBE_DEFAULT_RXD;
1593 if (!rx_ring->count) 1498 rx_ring->queue_index = 0;
1594 rx_ring->count = IXGBE_DEFAULT_RXD; 1499 rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
1595 1500 rx_ring->rx_buf_len = IXGBE_RXBUFFER_2048;
1596 rx_ring->rx_buffer_info = kcalloc(rx_ring->count, 1501 rx_ring->numa_node = adapter->node;
1597 sizeof(struct ixgbe_rx_buffer), 1502
1598 GFP_KERNEL); 1503 err = ixgbe_setup_rx_resources(adapter, rx_ring);
1599 if (!(rx_ring->rx_buffer_info)) { 1504 if (err) {
1600 ret_val = 4; 1505 ret_val = 4;
1601 goto err_nomem; 1506 goto err_nomem;
1602 } 1507 }
1603 1508
1604 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
1605 rx_ring->size = ALIGN(rx_ring->size, 4096);
1606 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
1607 &rx_ring->dma, GFP_KERNEL);
1608 if (!(rx_ring->desc)) {
1609 ret_val = 5;
1610 goto err_nomem;
1611 }
1612 rx_ring->next_to_use = rx_ring->next_to_clean = 0;
1613
1614 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL); 1509 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
1615 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl & ~IXGBE_RXCTRL_RXEN); 1510 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl & ~IXGBE_RXCTRL_RXEN);
1616 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDBAL(0),
1617 ((u64)rx_ring->dma & 0xFFFFFFFF));
1618 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDBAH(0),
1619 ((u64) rx_ring->dma >> 32));
1620 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDLEN(0), rx_ring->size);
1621 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDH(0), 0);
1622 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(0), 0);
1623 1511
1624 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); 1512 ixgbe_configure_rx_ring(adapter, rx_ring);
1625 reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
1626 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_data);
1627
1628 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
1629 reg_data &= ~IXGBE_HLREG0_LPBK;
1630 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
1631
1632 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_RDRXCTL);
1633#define IXGBE_RDRXCTL_RDMTS_MASK 0x00000003 /* Receive Descriptor Minimum
1634 Threshold Size mask */
1635 reg_data &= ~IXGBE_RDRXCTL_RDMTS_MASK;
1636 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDRXCTL, reg_data);
1637
1638 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_MCSTCTRL);
1639#define IXGBE_MCSTCTRL_MO_MASK 0x00000003 /* Multicast Offset mask */
1640 reg_data &= ~IXGBE_MCSTCTRL_MO_MASK;
1641 reg_data |= adapter->hw.mac.mc_filter_type;
1642 IXGBE_WRITE_REG(&adapter->hw, IXGBE_MCSTCTRL, reg_data);
1643
1644 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(0));
1645 reg_data |= IXGBE_RXDCTL_ENABLE;
1646 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(0), reg_data);
1647 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1648 int j = adapter->rx_ring[0]->reg_idx;
1649 u32 k;
1650 for (k = 0; k < 10; k++) {
1651 if (IXGBE_READ_REG(&adapter->hw,
1652 IXGBE_RXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
1653 break;
1654 else
1655 msleep(1);
1656 }
1657 }
1658 1513
1659 rctl |= IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS; 1514 rctl |= IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS;
1660 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl); 1515 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl);
1661 1516
1662 for (i = 0; i < rx_ring->count; i++) {
1663 union ixgbe_adv_rx_desc *rx_desc =
1664 IXGBE_RX_DESC_ADV(rx_ring, i);
1665 struct sk_buff *skb;
1666
1667 skb = alloc_skb(IXGBE_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL);
1668 if (!skb) {
1669 ret_val = 6;
1670 goto err_nomem;
1671 }
1672 skb_reserve(skb, NET_IP_ALIGN);
1673 rx_ring->rx_buffer_info[i].skb = skb;
1674 rx_ring->rx_buffer_info[i].dma =
1675 dma_map_single(&pdev->dev, skb->data,
1676 IXGBE_RXBUFFER_2048, DMA_FROM_DEVICE);
1677 rx_desc->read.pkt_addr =
1678 cpu_to_le64(rx_ring->rx_buffer_info[i].dma);
1679 memset(skb->data, 0x00, skb->len);
1680 }
1681
1682 return 0; 1517 return 0;
1683 1518
1684err_nomem: 1519err_nomem:
@@ -1692,16 +1527,21 @@ static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
1692 u32 reg_data; 1527 u32 reg_data;
1693 1528
1694 /* right now we only support MAC loopback in the driver */ 1529 /* right now we only support MAC loopback in the driver */
1695
1696 /* Setup MAC loopback */
1697 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0); 1530 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
1531 /* Setup MAC loopback */
1698 reg_data |= IXGBE_HLREG0_LPBK; 1532 reg_data |= IXGBE_HLREG0_LPBK;
1699 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data); 1533 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
1700 1534
1535 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1536 reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
1537 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_data);
1538
1701 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_AUTOC); 1539 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_AUTOC);
1702 reg_data &= ~IXGBE_AUTOC_LMS_MASK; 1540 reg_data &= ~IXGBE_AUTOC_LMS_MASK;
1703 reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU; 1541 reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU;
1704 IXGBE_WRITE_REG(&adapter->hw, IXGBE_AUTOC, reg_data); 1542 IXGBE_WRITE_REG(&adapter->hw, IXGBE_AUTOC, reg_data);
1543 IXGBE_WRITE_FLUSH(&adapter->hw);
1544 msleep(10);
1705 1545
1706 /* Disable Atlas Tx lanes; re-enabled in reset path */ 1546 /* Disable Atlas Tx lanes; re-enabled in reset path */
1707 if (hw->mac.type == ixgbe_mac_82598EB) { 1547 if (hw->mac.type == ixgbe_mac_82598EB) {
@@ -1759,15 +1599,81 @@ static int ixgbe_check_lbtest_frame(struct sk_buff *skb,
1759 return 13; 1599 return 13;
1760} 1600}
1761 1601
1602static u16 ixgbe_clean_test_rings(struct ixgbe_adapter *adapter,
1603 struct ixgbe_ring *rx_ring,
1604 struct ixgbe_ring *tx_ring,
1605 unsigned int size)
1606{
1607 union ixgbe_adv_rx_desc *rx_desc;
1608 struct ixgbe_rx_buffer *rx_buffer_info;
1609 struct ixgbe_tx_buffer *tx_buffer_info;
1610 const int bufsz = rx_ring->rx_buf_len;
1611 u32 staterr;
1612 u16 rx_ntc, tx_ntc, count = 0;
1613
1614 /* initialize next to clean and descriptor values */
1615 rx_ntc = rx_ring->next_to_clean;
1616 tx_ntc = tx_ring->next_to_clean;
1617 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, rx_ntc);
1618 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1619
1620 while (staterr & IXGBE_RXD_STAT_DD) {
1621 /* check Rx buffer */
1622 rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
1623
1624 /* unmap Rx buffer, will be remapped by alloc_rx_buffers */
1625 dma_unmap_single(&adapter->pdev->dev,
1626 rx_buffer_info->dma,
1627 bufsz,
1628 DMA_FROM_DEVICE);
1629 rx_buffer_info->dma = 0;
1630
1631 /* verify contents of skb */
1632 if (!ixgbe_check_lbtest_frame(rx_buffer_info->skb, size))
1633 count++;
1634
1635 /* unmap buffer on Tx side */
1636 tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
1637 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
1638
1639 /* increment Rx/Tx next to clean counters */
1640 rx_ntc++;
1641 if (rx_ntc == rx_ring->count)
1642 rx_ntc = 0;
1643 tx_ntc++;
1644 if (tx_ntc == tx_ring->count)
1645 tx_ntc = 0;
1646
1647 /* fetch next descriptor */
1648 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, rx_ntc);
1649 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1650 }
1651
1652 /* re-map buffers to ring, store next to clean values */
1653 ixgbe_alloc_rx_buffers(adapter, rx_ring, count);
1654 rx_ring->next_to_clean = rx_ntc;
1655 tx_ring->next_to_clean = tx_ntc;
1656
1657 return count;
1658}
1659
1762static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter) 1660static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
1763{ 1661{
1764 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; 1662 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1765 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; 1663 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1766 struct pci_dev *pdev = adapter->pdev; 1664 int i, j, lc, good_cnt, ret_val = 0;
1767 int i, j, k, l, lc, good_cnt, ret_val = 0; 1665 unsigned int size = 1024;
1768 unsigned long time; 1666 netdev_tx_t tx_ret_val;
1667 struct sk_buff *skb;
1668
1669 /* allocate test skb */
1670 skb = alloc_skb(size, GFP_KERNEL);
1671 if (!skb)
1672 return 11;
1769 1673
1770 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(0), rx_ring->count - 1); 1674 /* place data into test skb */
1675 ixgbe_create_lbtest_frame(skb, size);
1676 skb_put(skb, size);
1771 1677
1772 /* 1678 /*
1773 * Calculate the loop count based on the largest descriptor ring 1679 * Calculate the loop count based on the largest descriptor ring
@@ -1780,54 +1686,40 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
1780 else 1686 else
1781 lc = ((rx_ring->count / 64) * 2) + 1; 1687 lc = ((rx_ring->count / 64) * 2) + 1;
1782 1688
1783 k = l = 0;
1784 for (j = 0; j <= lc; j++) { 1689 for (j = 0; j <= lc; j++) {
1785 for (i = 0; i < 64; i++) { 1690 /* reset count of good packets */
1786 ixgbe_create_lbtest_frame(
1787 tx_ring->tx_buffer_info[k].skb,
1788 1024);
1789 dma_sync_single_for_device(&pdev->dev,
1790 tx_ring->tx_buffer_info[k].dma,
1791 tx_ring->tx_buffer_info[k].length,
1792 DMA_TO_DEVICE);
1793 if (unlikely(++k == tx_ring->count))
1794 k = 0;
1795 }
1796 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(0), k);
1797 msleep(200);
1798 /* set the start time for the receive */
1799 time = jiffies;
1800 good_cnt = 0; 1691 good_cnt = 0;
1801 do { 1692
1802 /* receive the sent packets */ 1693 /* place 64 packets on the transmit queue*/
1803 dma_sync_single_for_cpu(&pdev->dev, 1694 for (i = 0; i < 64; i++) {
1804 rx_ring->rx_buffer_info[l].dma, 1695 skb_get(skb);
1805 IXGBE_RXBUFFER_2048, 1696 tx_ret_val = ixgbe_xmit_frame_ring(skb,
1806 DMA_FROM_DEVICE); 1697 adapter->netdev,
1807 ret_val = ixgbe_check_lbtest_frame( 1698 adapter,
1808 rx_ring->rx_buffer_info[l].skb, 1024); 1699 tx_ring);
1809 if (!ret_val) 1700 if (tx_ret_val == NETDEV_TX_OK)
1810 good_cnt++; 1701 good_cnt++;
1811 if (++l == rx_ring->count) 1702 }
1812 l = 0; 1703
1813 /*
1814 * time + 20 msecs (200 msecs on 2.4) is more than
1815 * enough time to complete the receives, if it's
1816 * exceeded, break and error off
1817 */
1818 } while (good_cnt < 64 && jiffies < (time + 20));
1819 if (good_cnt != 64) { 1704 if (good_cnt != 64) {
1820 /* ret_val is the same as mis-compare */ 1705 ret_val = 12;
1821 ret_val = 13;
1822 break; 1706 break;
1823 } 1707 }
1824 if (jiffies >= (time + 20)) { 1708
1825 /* Error code for time out error */ 1709 /* allow 200 milliseconds for packets to go from Tx to Rx */
1826 ret_val = 14; 1710 msleep(200);
1711
1712 good_cnt = ixgbe_clean_test_rings(adapter, rx_ring,
1713 tx_ring, size);
1714 if (good_cnt != 64) {
1715 ret_val = 13;
1827 break; 1716 break;
1828 } 1717 }
1829 } 1718 }
1830 1719
1720 /* free the original skb */
1721 kfree_skb(skb);
1722
1831 return ret_val; 1723 return ret_val;
1832} 1724}
1833 1725
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 85ecf0e39b99..18163e312a39 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -601,9 +601,9 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
601 } 601 }
602} 602}
603 603
604static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, 604void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
605 struct ixgbe_tx_buffer 605 struct ixgbe_tx_buffer
606 *tx_buffer_info) 606 *tx_buffer_info)
607{ 607{
608 if (tx_buffer_info->dma) { 608 if (tx_buffer_info->dma) {
609 if (tx_buffer_info->mapped_as_page) 609 if (tx_buffer_info->mapped_as_page)
@@ -1032,9 +1032,9 @@ static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
1032 * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split 1032 * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
1033 * @adapter: address of board private structure 1033 * @adapter: address of board private structure
1034 **/ 1034 **/
1035static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, 1035void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
1036 struct ixgbe_ring *rx_ring, 1036 struct ixgbe_ring *rx_ring,
1037 int cleaned_count) 1037 int cleaned_count)
1038{ 1038{
1039 struct net_device *netdev = adapter->netdev; 1039 struct net_device *netdev = adapter->netdev;
1040 struct pci_dev *pdev = adapter->pdev; 1040 struct pci_dev *pdev = adapter->pdev;
@@ -1095,6 +1095,7 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
1095 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); 1095 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
1096 } else { 1096 } else {
1097 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); 1097 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
1098 rx_desc->read.hdr_addr = 0;
1098 } 1099 }
1099 1100
1100 i++; 1101 i++;
@@ -2431,8 +2432,8 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
2431 * 2432 *
2432 * Configure the Tx descriptor ring after a reset. 2433 * Configure the Tx descriptor ring after a reset.
2433 **/ 2434 **/
2434 static void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, 2435void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
2435 struct ixgbe_ring *ring) 2436 struct ixgbe_ring *ring)
2436{ 2437{
2437 struct ixgbe_hw *hw = &adapter->hw; 2438 struct ixgbe_hw *hw = &adapter->hw;
2438 u64 tdba = ring->dma; 2439 u64 tdba = ring->dma;
@@ -2759,8 +2760,8 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
2759 } 2760 }
2760} 2761}
2761 2762
2762static void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, 2763void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
2763 struct ixgbe_ring *ring) 2764 struct ixgbe_ring *ring)
2764{ 2765{
2765 struct ixgbe_hw *hw = &adapter->hw; 2766 struct ixgbe_hw *hw = &adapter->hw;
2766 u64 rdba = ring->dma; 2767 u64 rdba = ring->dma;
@@ -3671,8 +3672,11 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
3671 unsigned long size; 3672 unsigned long size;
3672 unsigned int i; 3673 unsigned int i;
3673 3674
3674 /* Free all the Rx ring sk_buffs */ 3675 /* ring already cleared, nothing to do */
3676 if (!rx_ring->rx_buffer_info)
3677 return;
3675 3678
3679 /* Free all the Rx ring sk_buffs */
3676 for (i = 0; i < rx_ring->count; i++) { 3680 for (i = 0; i < rx_ring->count; i++) {
3677 struct ixgbe_rx_buffer *rx_buffer_info; 3681 struct ixgbe_rx_buffer *rx_buffer_info;
3678 3682
@@ -3739,8 +3743,11 @@ static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
3739 unsigned long size; 3743 unsigned long size;
3740 unsigned int i; 3744 unsigned int i;
3741 3745
3742 /* Free all the Tx ring sk_buffs */ 3746 /* ring already cleared, nothing to do */
3747 if (!tx_ring->tx_buffer_info)
3748 return;
3743 3749
3750 /* Free all the Tx ring sk_buffs */
3744 for (i = 0; i < tx_ring->count; i++) { 3751 for (i = 0; i < tx_ring->count; i++) {
3745 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 3752 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3746 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); 3753 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
@@ -6239,11 +6246,10 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
6239 return skb_tx_hash(dev, skb); 6246 return skb_tx_hash(dev, skb);
6240} 6247}
6241 6248
6242static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, 6249netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev,
6243 struct net_device *netdev) 6250 struct ixgbe_adapter *adapter,
6251 struct ixgbe_ring *tx_ring)
6244{ 6252{
6245 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6246 struct ixgbe_ring *tx_ring;
6247 struct netdev_queue *txq; 6253 struct netdev_queue *txq;
6248 unsigned int first; 6254 unsigned int first;
6249 unsigned int tx_flags = 0; 6255 unsigned int tx_flags = 0;
@@ -6267,8 +6273,6 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
6267 tx_flags |= IXGBE_TX_FLAGS_VLAN; 6273 tx_flags |= IXGBE_TX_FLAGS_VLAN;
6268 } 6274 }
6269 6275
6270 tx_ring = adapter->tx_ring[skb->queue_mapping];
6271
6272#ifdef IXGBE_FCOE 6276#ifdef IXGBE_FCOE
6273 /* for FCoE with DCB, we force the priority to what 6277 /* for FCoE with DCB, we force the priority to what
6274 * was specified by the switch */ 6278 * was specified by the switch */
@@ -6362,6 +6366,15 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
6362 return NETDEV_TX_OK; 6366 return NETDEV_TX_OK;
6363} 6367}
6364 6368
6369static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
6370{
6371 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6372 struct ixgbe_ring *tx_ring;
6373
6374 tx_ring = adapter->tx_ring[skb->queue_mapping];
6375 return ixgbe_xmit_frame_ring(skb, netdev, adapter, tx_ring);
6376}
6377
6365/** 6378/**
6366 * ixgbe_set_mac - Change the Ethernet Address of the NIC 6379 * ixgbe_set_mac - Change the Ethernet Address of the NIC
6367 * @netdev: network interface device structure 6380 * @netdev: network interface device structure