aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/igb/igb_ethtool.c
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2012-09-24 20:31:02 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2012-10-19 07:30:25 -0400
commitcbc8e55f6fdae27b667051b36040f66768ef79f2 (patch)
treeb8bc5e6391da51786627aaa55d79fa384bb8b47a /drivers/net/ethernet/intel/igb/igb_ethtool.c
parentdb2ee5bdf5c83320fa19f73a38204585f1518798 (diff)
igb: Map entire page and sync half instead of mapping and unmapping half pages
This change makes it so that we map the entire page and just sync half of it for the device at a time. The advantage to this approach is that we can avoid the locking on map/unmap seen in many IOMMU implementations. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Tested-by: Aaron Brown <aaron.f.brown@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/igb/igb_ethtool.c')
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c26
1 files changed, 13 insertions, 13 deletions
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 0faac423bd5b..96c6df65726f 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1694,7 +1694,7 @@ static int igb_check_lbtest_frame(struct igb_rx_buffer *rx_buffer,
1694 1694
1695 frame_size >>= 1; 1695 frame_size >>= 1;
1696 1696
1697 data = kmap(rx_buffer->page) + rx_buffer->page_offset; 1697 data = kmap(rx_buffer->page);
1698 1698
1699 if (data[3] != 0xFF || 1699 if (data[3] != 0xFF ||
1700 data[frame_size + 10] != 0xBE || 1700 data[frame_size + 10] != 0xBE ||
@@ -1713,9 +1713,7 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
1713 union e1000_adv_rx_desc *rx_desc; 1713 union e1000_adv_rx_desc *rx_desc;
1714 struct igb_rx_buffer *rx_buffer_info; 1714 struct igb_rx_buffer *rx_buffer_info;
1715 struct igb_tx_buffer *tx_buffer_info; 1715 struct igb_tx_buffer *tx_buffer_info;
1716 struct netdev_queue *txq;
1717 u16 rx_ntc, tx_ntc, count = 0; 1716 u16 rx_ntc, tx_ntc, count = 0;
1718 unsigned int total_bytes = 0, total_packets = 0;
1719 1717
1720 /* initialize next to clean and descriptor values */ 1718 /* initialize next to clean and descriptor values */
1721 rx_ntc = rx_ring->next_to_clean; 1719 rx_ntc = rx_ring->next_to_clean;
@@ -1726,21 +1724,24 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
1726 /* check rx buffer */ 1724 /* check rx buffer */
1727 rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc]; 1725 rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
1728 1726
1729 /* unmap rx buffer, will be remapped by alloc_rx_buffers */ 1727 /* sync Rx buffer for CPU read */
1730 dma_unmap_single(rx_ring->dev, 1728 dma_sync_single_for_cpu(rx_ring->dev,
1731 rx_buffer_info->dma, 1729 rx_buffer_info->dma,
1732 PAGE_SIZE / 2, 1730 PAGE_SIZE / 2,
1733 DMA_FROM_DEVICE); 1731 DMA_FROM_DEVICE);
1734 rx_buffer_info->dma = 0;
1735 1732
1736 /* verify contents of skb */ 1733 /* verify contents of skb */
1737 if (igb_check_lbtest_frame(rx_buffer_info, size)) 1734 if (igb_check_lbtest_frame(rx_buffer_info, size))
1738 count++; 1735 count++;
1739 1736
1737 /* sync Rx buffer for device write */
1738 dma_sync_single_for_device(rx_ring->dev,
1739 rx_buffer_info->dma,
1740 PAGE_SIZE / 2,
1741 DMA_FROM_DEVICE);
1742
1740 /* unmap buffer on tx side */ 1743 /* unmap buffer on tx side */
1741 tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc]; 1744 tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
1742 total_bytes += tx_buffer_info->bytecount;
1743 total_packets += tx_buffer_info->gso_segs;
1744 igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); 1745 igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
1745 1746
1746 /* increment rx/tx next to clean counters */ 1747 /* increment rx/tx next to clean counters */
@@ -1755,8 +1756,7 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
1755 rx_desc = IGB_RX_DESC(rx_ring, rx_ntc); 1756 rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
1756 } 1757 }
1757 1758
1758 txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); 1759 netdev_tx_reset_queue(txring_txq(tx_ring));
1759 netdev_tx_completed_queue(txq, total_packets, total_bytes);
1760 1760
1761 /* re-map buffers to ring, store next to clean values */ 1761 /* re-map buffers to ring, store next to clean values */
1762 igb_alloc_rx_buffers(rx_ring, count); 1762 igb_alloc_rx_buffers(rx_ring, count);