diff options
author | Alexander Duyck <alexander.h.duyck@intel.com> | 2009-10-27 11:55:02 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-10-28 04:20:41 -0400 |
commit | ad93d17efe063b6e95f3177fa01706f3b3b15dde (patch) | |
tree | 49b8e6043aba4c499faeb43be01d046ed3ec039f /drivers/net/igb/igb_ethtool.c | |
parent | b1a436c34c44c6e3fb03c12545d87b4c2818f31d (diff) |
igb: make ethtool use core xmit map and free functionality
This change adds a clean_rx/tx_irq type function call to the ethtool loopback
testing which allows us to test the core transmit and receive functionality in
the driver.
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/igb/igb_ethtool.c')
-rw-r--r-- | drivers/net/igb/igb_ethtool.c | 156 |
1 files changed, 89 insertions, 67 deletions
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c index 80afd8a0b123..aa05f00966e2 100644 --- a/drivers/net/igb/igb_ethtool.c +++ b/drivers/net/igb/igb_ethtool.c | |||
@@ -1254,7 +1254,7 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter) | |||
1254 | struct igb_ring *tx_ring = &adapter->test_tx_ring; | 1254 | struct igb_ring *tx_ring = &adapter->test_tx_ring; |
1255 | struct igb_ring *rx_ring = &adapter->test_rx_ring; | 1255 | struct igb_ring *rx_ring = &adapter->test_rx_ring; |
1256 | struct e1000_hw *hw = &adapter->hw; | 1256 | struct e1000_hw *hw = &adapter->hw; |
1257 | int i, ret_val; | 1257 | int ret_val; |
1258 | 1258 | ||
1259 | /* Setup Tx descriptor ring and Tx buffers */ | 1259 | /* Setup Tx descriptor ring and Tx buffers */ |
1260 | tx_ring->count = IGB_DEFAULT_TXD; | 1260 | tx_ring->count = IGB_DEFAULT_TXD; |
@@ -1270,34 +1270,6 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter) | |||
1270 | igb_setup_tctl(adapter); | 1270 | igb_setup_tctl(adapter); |
1271 | igb_configure_tx_ring(adapter, tx_ring); | 1271 | igb_configure_tx_ring(adapter, tx_ring); |
1272 | 1272 | ||
1273 | for (i = 0; i < tx_ring->count; i++) { | ||
1274 | union e1000_adv_tx_desc *tx_desc; | ||
1275 | unsigned int size = 1024; | ||
1276 | struct sk_buff *skb = alloc_skb(size, GFP_KERNEL); | ||
1277 | |||
1278 | if (!skb) { | ||
1279 | ret_val = 2; | ||
1280 | goto err_nomem; | ||
1281 | } | ||
1282 | skb_put(skb, size); | ||
1283 | tx_ring->buffer_info[i].skb = skb; | ||
1284 | tx_ring->buffer_info[i].length = skb->len; | ||
1285 | tx_ring->buffer_info[i].dma = | ||
1286 | pci_map_single(tx_ring->pdev, skb->data, skb->len, | ||
1287 | PCI_DMA_TODEVICE); | ||
1288 | tx_desc = E1000_TX_DESC_ADV(*tx_ring, i); | ||
1289 | tx_desc->read.buffer_addr = | ||
1290 | cpu_to_le64(tx_ring->buffer_info[i].dma); | ||
1291 | tx_desc->read.olinfo_status = cpu_to_le32(skb->len) << | ||
1292 | E1000_ADVTXD_PAYLEN_SHIFT; | ||
1293 | tx_desc->read.cmd_type_len = cpu_to_le32(skb->len); | ||
1294 | tx_desc->read.cmd_type_len |= cpu_to_le32(E1000_TXD_CMD_EOP | | ||
1295 | E1000_TXD_CMD_IFCS | | ||
1296 | E1000_TXD_CMD_RS | | ||
1297 | E1000_ADVTXD_DTYP_DATA | | ||
1298 | E1000_ADVTXD_DCMD_DEXT); | ||
1299 | } | ||
1300 | |||
1301 | /* Setup Rx descriptor ring and Rx buffers */ | 1273 | /* Setup Rx descriptor ring and Rx buffers */ |
1302 | rx_ring->count = IGB_DEFAULT_RXD; | 1274 | rx_ring->count = IGB_DEFAULT_RXD; |
1303 | rx_ring->pdev = adapter->pdev; | 1275 | rx_ring->pdev = adapter->pdev; |
@@ -1470,14 +1442,78 @@ static int igb_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) | |||
1470 | return 13; | 1442 | return 13; |
1471 | } | 1443 | } |
1472 | 1444 | ||
1445 | static int igb_clean_test_rings(struct igb_ring *rx_ring, | ||
1446 | struct igb_ring *tx_ring, | ||
1447 | unsigned int size) | ||
1448 | { | ||
1449 | union e1000_adv_rx_desc *rx_desc; | ||
1450 | struct igb_buffer *buffer_info; | ||
1451 | int rx_ntc, tx_ntc, count = 0; | ||
1452 | u32 staterr; | ||
1453 | |||
1454 | /* initialize next to clean and descriptor values */ | ||
1455 | rx_ntc = rx_ring->next_to_clean; | ||
1456 | tx_ntc = tx_ring->next_to_clean; | ||
1457 | rx_desc = E1000_RX_DESC_ADV(*rx_ring, rx_ntc); | ||
1458 | staterr = le32_to_cpu(rx_desc->wb.upper.status_error); | ||
1459 | |||
1460 | while (staterr & E1000_RXD_STAT_DD) { | ||
1461 | /* check rx buffer */ | ||
1462 | buffer_info = &rx_ring->buffer_info[rx_ntc]; | ||
1463 | |||
1464 | /* unmap rx buffer, will be remapped by alloc_rx_buffers */ | ||
1465 | pci_unmap_single(rx_ring->pdev, | ||
1466 | buffer_info->dma, | ||
1467 | rx_ring->rx_buffer_len, | ||
1468 | PCI_DMA_FROMDEVICE); | ||
1469 | buffer_info->dma = 0; | ||
1470 | |||
1471 | /* verify contents of skb */ | ||
1472 | if (!igb_check_lbtest_frame(buffer_info->skb, size)) | ||
1473 | count++; | ||
1474 | |||
1475 | /* unmap buffer on tx side */ | ||
1476 | buffer_info = &tx_ring->buffer_info[tx_ntc]; | ||
1477 | igb_unmap_and_free_tx_resource(tx_ring, buffer_info); | ||
1478 | |||
1479 | /* increment rx/tx next to clean counters */ | ||
1480 | rx_ntc++; | ||
1481 | if (rx_ntc == rx_ring->count) | ||
1482 | rx_ntc = 0; | ||
1483 | tx_ntc++; | ||
1484 | if (tx_ntc == tx_ring->count) | ||
1485 | tx_ntc = 0; | ||
1486 | |||
1487 | /* fetch next descriptor */ | ||
1488 | rx_desc = E1000_RX_DESC_ADV(*rx_ring, rx_ntc); | ||
1489 | staterr = le32_to_cpu(rx_desc->wb.upper.status_error); | ||
1490 | } | ||
1491 | |||
1492 | /* re-map buffers to ring, store next to clean values */ | ||
1493 | igb_alloc_rx_buffers_adv(rx_ring, count); | ||
1494 | rx_ring->next_to_clean = rx_ntc; | ||
1495 | tx_ring->next_to_clean = tx_ntc; | ||
1496 | |||
1497 | return count; | ||
1498 | } | ||
1499 | |||
1473 | static int igb_run_loopback_test(struct igb_adapter *adapter) | 1500 | static int igb_run_loopback_test(struct igb_adapter *adapter) |
1474 | { | 1501 | { |
1475 | struct igb_ring *tx_ring = &adapter->test_tx_ring; | 1502 | struct igb_ring *tx_ring = &adapter->test_tx_ring; |
1476 | struct igb_ring *rx_ring = &adapter->test_rx_ring; | 1503 | struct igb_ring *rx_ring = &adapter->test_rx_ring; |
1477 | int i, j, k, l, lc, good_cnt, ret_val = 0; | 1504 | int i, j, lc, good_cnt, ret_val = 0; |
1478 | unsigned long time; | 1505 | unsigned int size = 1024; |
1506 | netdev_tx_t tx_ret_val; | ||
1507 | struct sk_buff *skb; | ||
1508 | |||
1509 | /* allocate test skb */ | ||
1510 | skb = alloc_skb(size, GFP_KERNEL); | ||
1511 | if (!skb) | ||
1512 | return 11; | ||
1479 | 1513 | ||
1480 | writel(rx_ring->count - 1, rx_ring->tail); | 1514 | /* place data into test skb */ |
1515 | igb_create_lbtest_frame(skb, size); | ||
1516 | skb_put(skb, size); | ||
1481 | 1517 | ||
1482 | /* Calculate the loop count based on the largest descriptor ring | 1518 | /* Calculate the loop count based on the largest descriptor ring |
1483 | * The idea is to wrap the largest ring a number of times using 64 | 1519 | * The idea is to wrap the largest ring a number of times using 64 |
@@ -1489,50 +1525,36 @@ static int igb_run_loopback_test(struct igb_adapter *adapter) | |||
1489 | else | 1525 | else |
1490 | lc = ((rx_ring->count / 64) * 2) + 1; | 1526 | lc = ((rx_ring->count / 64) * 2) + 1; |
1491 | 1527 | ||
1492 | k = l = 0; | ||
1493 | for (j = 0; j <= lc; j++) { /* loop count loop */ | 1528 | for (j = 0; j <= lc; j++) { /* loop count loop */ |
1494 | for (i = 0; i < 64; i++) { /* send the packets */ | 1529 | /* reset count of good packets */ |
1495 | igb_create_lbtest_frame(tx_ring->buffer_info[k].skb, | ||
1496 | 1024); | ||
1497 | pci_dma_sync_single_for_device(tx_ring->pdev, | ||
1498 | tx_ring->buffer_info[k].dma, | ||
1499 | tx_ring->buffer_info[k].length, | ||
1500 | PCI_DMA_TODEVICE); | ||
1501 | k++; | ||
1502 | if (k == tx_ring->count) | ||
1503 | k = 0; | ||
1504 | } | ||
1505 | writel(k, tx_ring->tail); | ||
1506 | msleep(200); | ||
1507 | time = jiffies; /* set the start time for the receive */ | ||
1508 | good_cnt = 0; | 1530 | good_cnt = 0; |
1509 | do { /* receive the sent packets */ | 1531 | |
1510 | pci_dma_sync_single_for_cpu(rx_ring->pdev, | 1532 | /* place 64 packets on the transmit queue*/ |
1511 | rx_ring->buffer_info[l].dma, | 1533 | for (i = 0; i < 64; i++) { |
1512 | IGB_RXBUFFER_2048, | 1534 | skb_get(skb); |
1513 | PCI_DMA_FROMDEVICE); | 1535 | tx_ret_val = igb_xmit_frame_ring_adv(skb, tx_ring); |
1514 | 1536 | if (tx_ret_val == NETDEV_TX_OK) | |
1515 | ret_val = igb_check_lbtest_frame( | ||
1516 | rx_ring->buffer_info[l].skb, 1024); | ||
1517 | if (!ret_val) | ||
1518 | good_cnt++; | 1537 | good_cnt++; |
1519 | l++; | 1538 | } |
1520 | if (l == rx_ring->count) | 1539 | |
1521 | l = 0; | ||
1522 | /* time + 20 msecs (200 msecs on 2.4) is more than | ||
1523 | * enough time to complete the receives, if it's | ||
1524 | * exceeded, break and error off | ||
1525 | */ | ||
1526 | } while (good_cnt < 64 && jiffies < (time + 20)); | ||
1527 | if (good_cnt != 64) { | 1540 | if (good_cnt != 64) { |
1528 | ret_val = 13; /* ret_val is the same as mis-compare */ | 1541 | ret_val = 12; |
1529 | break; | 1542 | break; |
1530 | } | 1543 | } |
1531 | if (jiffies >= (time + 20)) { | 1544 | |
1532 | ret_val = 14; /* error code for time out error */ | 1545 | /* allow 200 milliseconds for packets to go from tx to rx */ |
1546 | msleep(200); | ||
1547 | |||
1548 | good_cnt = igb_clean_test_rings(rx_ring, tx_ring, size); | ||
1549 | if (good_cnt != 64) { | ||
1550 | ret_val = 13; | ||
1533 | break; | 1551 | break; |
1534 | } | 1552 | } |
1535 | } /* end loop count loop */ | 1553 | } /* end loop count loop */ |
1554 | |||
1555 | /* free the original skb */ | ||
1556 | kfree_skb(skb); | ||
1557 | |||
1536 | return ret_val; | 1558 | return ret_val; |
1537 | } | 1559 | } |
1538 | 1560 | ||