aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSathya Perla <sathya.perla@emulex.com>2014-03-04 01:44:38 -0500
committerDavid S. Miller <davem@davemloft.net>2014-03-04 16:17:53 -0500
commite50287be7c007a10e6e2e3332e52466faf4b6a04 (patch)
tree7ecf4f04fe32210aba3e7c4a3f9fd0c369c619b5
parent9e82e7f4af0ad90ab41b4ae80ce42b20df20c725 (diff)
be2net: dma_sync each RX frag before passing it to the stack
The driver currently maps a page for DMA, divides the page into multiple frags and posts them to the HW. It un-maps the page after data is received on all the frags of the page. This scheme doesn't work when bounce buffers are used for DMA (swiotlb=force kernel param). This patch fixes this problem by calling dma_sync_single_for_cpu() for each frag (excepting the last one) so that the data is copied from the bounce buffers. The page is un-mapped only when DMA finishes on the last frag of the page. (Thanks Ben H. for suggesting the dma_sync API!) This patch also renames the "last_page_user" field of be_rx_page_info{} struct to "last_frag" to improve readability of the fixed code. Reported-by: Li Fengmao <li.fengmao@zte.com.cn> Signed-off-by: Sathya Perla <sathya.perla@emulex.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h3
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c32
2 files changed, 24 insertions, 11 deletions
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index a150401a6cb3..bf5ca71df77f 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -261,9 +261,10 @@ struct be_tx_obj {
261/* Struct to remember the pages posted for rx frags */ 261/* Struct to remember the pages posted for rx frags */
262struct be_rx_page_info { 262struct be_rx_page_info {
263 struct page *page; 263 struct page *page;
264 /* set to page-addr for last frag of the page & frag-addr otherwise */
264 DEFINE_DMA_UNMAP_ADDR(bus); 265 DEFINE_DMA_UNMAP_ADDR(bus);
265 u16 page_offset; 266 u16 page_offset;
266 bool last_page_user; 267 bool last_frag; /* last frag of the page */
267}; 268};
268 269
269struct be_rx_stats { 270struct be_rx_stats {
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 4f87f5c0b03c..34644969a4be 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1448,11 +1448,15 @@ static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
1448 rx_page_info = &rxo->page_info_tbl[frag_idx]; 1448 rx_page_info = &rxo->page_info_tbl[frag_idx];
1449 BUG_ON(!rx_page_info->page); 1449 BUG_ON(!rx_page_info->page);
1450 1450
1451 if (rx_page_info->last_page_user) { 1451 if (rx_page_info->last_frag) {
1452 dma_unmap_page(&adapter->pdev->dev, 1452 dma_unmap_page(&adapter->pdev->dev,
1453 dma_unmap_addr(rx_page_info, bus), 1453 dma_unmap_addr(rx_page_info, bus),
1454 adapter->big_page_size, DMA_FROM_DEVICE); 1454 adapter->big_page_size, DMA_FROM_DEVICE);
1455 rx_page_info->last_page_user = false; 1455 rx_page_info->last_frag = false;
1456 } else {
1457 dma_sync_single_for_cpu(&adapter->pdev->dev,
1458 dma_unmap_addr(rx_page_info, bus),
1459 rx_frag_size, DMA_FROM_DEVICE);
1456 } 1460 }
1457 1461
1458 queue_tail_inc(rxq); 1462 queue_tail_inc(rxq);
@@ -1786,17 +1790,16 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1786 rx_stats(rxo)->rx_post_fail++; 1790 rx_stats(rxo)->rx_post_fail++;
1787 break; 1791 break;
1788 } 1792 }
1789 page_info->page_offset = 0; 1793 page_offset = 0;
1790 } else { 1794 } else {
1791 get_page(pagep); 1795 get_page(pagep);
1792 page_info->page_offset = page_offset + rx_frag_size; 1796 page_offset += rx_frag_size;
1793 } 1797 }
1794 page_offset = page_info->page_offset; 1798 page_info->page_offset = page_offset;
1795 page_info->page = pagep; 1799 page_info->page = pagep;
1796 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1797 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1798 1800
1799 rxd = queue_head_node(rxq); 1801 rxd = queue_head_node(rxq);
1802 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1800 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF); 1803 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1801 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr)); 1804 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1802 1805
@@ -1804,15 +1807,24 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1804 if ((page_offset + rx_frag_size + rx_frag_size) > 1807 if ((page_offset + rx_frag_size + rx_frag_size) >
1805 adapter->big_page_size) { 1808 adapter->big_page_size) {
1806 pagep = NULL; 1809 pagep = NULL;
1807 page_info->last_page_user = true; 1810 page_info->last_frag = true;
1811 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1812 } else {
1813 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
1808 } 1814 }
1809 1815
1810 prev_page_info = page_info; 1816 prev_page_info = page_info;
1811 queue_head_inc(rxq); 1817 queue_head_inc(rxq);
1812 page_info = &rxo->page_info_tbl[rxq->head]; 1818 page_info = &rxo->page_info_tbl[rxq->head];
1813 } 1819 }
1814 if (pagep) 1820
1815 prev_page_info->last_page_user = true; 1821 /* Mark the last frag of a page when we break out of the above loop
1822 * with no more slots available in the RXQ
1823 */
1824 if (pagep) {
1825 prev_page_info->last_frag = true;
1826 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1827 }
1816 1828
1817 if (posted) { 1829 if (posted) {
1818 atomic_add(posted, &rxq->used); 1830 atomic_add(posted, &rxq->used);