aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2017-09-20 08:07:02 -0400
committerAlex Deucher <alexander.deucher@amd.com>2017-10-09 16:30:17 -0400
commit6056a1a565547743c5a87dc3d9c51d086acf9c27 (patch)
tree838eb114d2cd6f8954ed1b03ffa9e2b53263e512
parentc6e839a3e299bbff991a3b4136f96ccaca4b276f (diff)
drm/ttm: DMA map/unmap consecutive pages as a whole v2
Instead of mapping them bit by bit map/unmap all consecutive pages as in one call. v2: test for consecutive pages instead of using compound page order. Signed-off-by: Christian König <christian.koenig@amd.com> Acked-by: Felix Kuehling <Felix.Kuehling@amd.com> Acked-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c48
1 files changed, 39 insertions, 9 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 482dd9aa2c84..6c852e81660b 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -922,16 +922,26 @@ EXPORT_SYMBOL(ttm_pool_unpopulate);
922#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU) 922#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
923int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt) 923int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt)
924{ 924{
925 unsigned i; 925 unsigned i, j;
926 int r; 926 int r;
927 927
928 r = ttm_pool_populate(&tt->ttm); 928 r = ttm_pool_populate(&tt->ttm);
929 if (r) 929 if (r)
930 return r; 930 return r;
931 931
932 for (i = 0; i < tt->ttm.num_pages; i++) { 932 for (i = 0; i < tt->ttm.num_pages; ++i) {
933 struct page *p = tt->ttm.pages[i];
934 size_t num_pages = 1;
935
936 for (j = i + 1; j < tt->ttm.num_pages; ++j) {
937 if (++p != tt->ttm.pages[j])
938 break;
939
940 ++num_pages;
941 }
942
933 tt->dma_address[i] = dma_map_page(dev, tt->ttm.pages[i], 943 tt->dma_address[i] = dma_map_page(dev, tt->ttm.pages[i],
934 0, PAGE_SIZE, 944 0, num_pages * PAGE_SIZE,
935 DMA_BIDIRECTIONAL); 945 DMA_BIDIRECTIONAL);
936 if (dma_mapping_error(dev, tt->dma_address[i])) { 946 if (dma_mapping_error(dev, tt->dma_address[i])) {
937 while (i--) { 947 while (i--) {
@@ -942,6 +952,11 @@ int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt)
942 ttm_pool_unpopulate(&tt->ttm); 952 ttm_pool_unpopulate(&tt->ttm);
943 return -EFAULT; 953 return -EFAULT;
944 } 954 }
955
956 for (j = 1; j < num_pages; ++j) {
957 tt->dma_address[i + 1] = tt->dma_address[i] + PAGE_SIZE;
958 ++i;
959 }
945 } 960 }
946 return 0; 961 return 0;
947} 962}
@@ -949,13 +964,28 @@ EXPORT_SYMBOL(ttm_populate_and_map_pages);
949 964
950void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt) 965void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt)
951{ 966{
952 unsigned i; 967 unsigned i, j;
953 968
954 for (i = 0; i < tt->ttm.num_pages; i++) { 969 for (i = 0; i < tt->ttm.num_pages;) {
955 if (tt->dma_address[i]) { 970 struct page *p = tt->ttm.pages[i];
956 dma_unmap_page(dev, tt->dma_address[i], 971 size_t num_pages = 1;
957 PAGE_SIZE, DMA_BIDIRECTIONAL); 972
973 if (!tt->dma_address[i] || !tt->ttm.pages[i]) {
974 ++i;
975 continue;
958 } 976 }
977
978 for (j = i + 1; j < tt->ttm.num_pages; ++j) {
979 if (++p != tt->ttm.pages[j])
980 break;
981
982 ++num_pages;
983 }
984
985 dma_unmap_page(dev, tt->dma_address[i], num_pages * PAGE_SIZE,
986 DMA_BIDIRECTIONAL);
987
988 i += num_pages;
959 } 989 }
960 ttm_pool_unpopulate(&tt->ttm); 990 ttm_pool_unpopulate(&tt->ttm);
961} 991}