aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGuy Shapiro <guysh@mellanox.com>2015-04-15 11:17:57 -0400
committerDoug Ledford <dledford@redhat.com>2015-05-05 09:18:02 -0400
commit325ad0617adaf163e32dd2d857b90baf65a25b5b (patch)
tree13383cd4f5442a70bff01f0f55d8007fcb03002a
parentc1d383b5785b1e0fb5fb862864712a7208219e6a (diff)
IB/core: dma unmap optimizations
While unmapping an ODP writable page, the dirty bit of the page is set. In order to do so, the head of the compound page is found. Currently, the compound head is found even on non-writable pages, where it is never used, leading to unnecessary cpu barrier that impacts performance. This patch moves the search for the compound head to be done only when needed. Signed-off-by: Guy Shapiro <guysh@mellanox.com> Acked-by: Shachar Raindel <raindel@mellanox.com> Reviewed-by: Sagi Grimberg <sagig@mellanox.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
-rw-r--r--drivers/infiniband/core/umem_odp.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index aba47398880d..40becdb3196e 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -637,7 +637,6 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
637 idx = (addr - ib_umem_start(umem)) / PAGE_SIZE; 637 idx = (addr - ib_umem_start(umem)) / PAGE_SIZE;
638 if (umem->odp_data->page_list[idx]) { 638 if (umem->odp_data->page_list[idx]) {
639 struct page *page = umem->odp_data->page_list[idx]; 639 struct page *page = umem->odp_data->page_list[idx];
640 struct page *head_page = compound_head(page);
641 dma_addr_t dma = umem->odp_data->dma_list[idx]; 640 dma_addr_t dma = umem->odp_data->dma_list[idx];
642 dma_addr_t dma_addr = dma & ODP_DMA_ADDR_MASK; 641 dma_addr_t dma_addr = dma & ODP_DMA_ADDR_MASK;
643 642
@@ -645,7 +644,8 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
645 644
646 ib_dma_unmap_page(dev, dma_addr, PAGE_SIZE, 645 ib_dma_unmap_page(dev, dma_addr, PAGE_SIZE,
647 DMA_BIDIRECTIONAL); 646 DMA_BIDIRECTIONAL);
648 if (dma & ODP_WRITE_ALLOWED_BIT) 647 if (dma & ODP_WRITE_ALLOWED_BIT) {
648 struct page *head_page = compound_head(page);
649 /* 649 /*
650 * set_page_dirty prefers being called with 650 * set_page_dirty prefers being called with
651 * the page lock. However, MMU notifiers are 651 * the page lock. However, MMU notifiers are
@@ -656,6 +656,7 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
656 * be removed. 656 * be removed.
657 */ 657 */
658 set_page_dirty(head_page); 658 set_page_dirty(head_page);
659 }
659 /* on demand pinning support */ 660 /* on demand pinning support */
660 if (!umem->context->invalidate_range) 661 if (!umem->context->invalidate_range)
661 put_page(page); 662 put_page(page);