aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMarek Szyprowski <m.szyprowski@samsung.com>2015-10-13 07:47:20 -0400
committerInki Dae <inki.dae@samsung.com>2015-11-02 21:46:39 -0500
commitdf547bf7735a623500eedff9cc6716ac1d82b95d (patch)
treeb9bad57ddb558c935c9c761eabd1551f1461d2cc
parent01351315465fe58b9ab990554467773367f74cab (diff)
drm/exynos/gem: remove DMA-mapping hacks used for constructing page array
Exynos GEM objects contains an array of pointers to the pages, which the allocated buffer consists of. Till now the code used some hacks (like relying on DMA-mapping internal structures or using ARM-specific dma_to_pfn helper) to build this array. This patch fixes this by adding proper call to dma_get_sgtable_attrs() and using the acquired scatter-list to construct needed array. This approach is more portable (work also for ARM64) and finally fixes the layering violation that was present in this code. Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Signed-off-by: Inki Dae <inki.dae@samsung.com>
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c58
1 files changed, 33 insertions, 25 deletions
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index f1dcdd086886..252eb301470c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -25,6 +25,8 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
25 struct drm_device *dev = exynos_gem->base.dev; 25 struct drm_device *dev = exynos_gem->base.dev;
26 enum dma_attr attr; 26 enum dma_attr attr;
27 unsigned int nr_pages; 27 unsigned int nr_pages;
28 struct sg_table sgt;
29 int ret = -ENOMEM;
28 30
29 if (exynos_gem->dma_addr) { 31 if (exynos_gem->dma_addr) {
30 DRM_DEBUG_KMS("already allocated.\n"); 32 DRM_DEBUG_KMS("already allocated.\n");
@@ -56,13 +58,10 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
56 58
57 nr_pages = exynos_gem->size >> PAGE_SHIFT; 59 nr_pages = exynos_gem->size >> PAGE_SHIFT;
58 60
59 if (!is_drm_iommu_supported(dev)) { 61 exynos_gem->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
60 exynos_gem->pages = drm_calloc_large(nr_pages, 62 if (!exynos_gem->pages) {
61 sizeof(struct page *)); 63 DRM_ERROR("failed to allocate pages.\n");
62 if (!exynos_gem->pages) { 64 return -ENOMEM;
63 DRM_ERROR("failed to allocate pages.\n");
64 return -ENOMEM;
65 }
66 } 65 }
67 66
68 exynos_gem->cookie = dma_alloc_attrs(dev->dev, exynos_gem->size, 67 exynos_gem->cookie = dma_alloc_attrs(dev->dev, exynos_gem->size,
@@ -70,30 +69,40 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
70 &exynos_gem->dma_attrs); 69 &exynos_gem->dma_attrs);
71 if (!exynos_gem->cookie) { 70 if (!exynos_gem->cookie) {
72 DRM_ERROR("failed to allocate buffer.\n"); 71 DRM_ERROR("failed to allocate buffer.\n");
73 if (exynos_gem->pages) 72 goto err_free;
74 drm_free_large(exynos_gem->pages);
75 return -ENOMEM;
76 } 73 }
77 74
78 if (exynos_gem->pages) { 75 ret = dma_get_sgtable_attrs(dev->dev, &sgt, exynos_gem->cookie,
79 dma_addr_t start_addr; 76 exynos_gem->dma_addr, exynos_gem->size,
80 unsigned int i = 0; 77 &exynos_gem->dma_attrs);
81 78 if (ret < 0) {
82 start_addr = exynos_gem->dma_addr; 79 DRM_ERROR("failed to get sgtable.\n");
83 while (i < nr_pages) { 80 goto err_dma_free;
84 exynos_gem->pages[i] = 81 }
85 pfn_to_page(dma_to_pfn(dev->dev, start_addr)); 82
86 start_addr += PAGE_SIZE; 83 if (drm_prime_sg_to_page_addr_arrays(&sgt, exynos_gem->pages, NULL,
87 i++; 84 nr_pages)) {
88 } 85 DRM_ERROR("invalid sgtable.\n");
89 } else { 86 ret = -EINVAL;
90 exynos_gem->pages = exynos_gem->cookie; 87 goto err_sgt_free;
91 } 88 }
92 89
90 sg_free_table(&sgt);
91
93 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", 92 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
94 (unsigned long)exynos_gem->dma_addr, exynos_gem->size); 93 (unsigned long)exynos_gem->dma_addr, exynos_gem->size);
95 94
96 return 0; 95 return 0;
96
97err_sgt_free:
98 sg_free_table(&sgt);
99err_dma_free:
100 dma_free_attrs(dev->dev, exynos_gem->size, exynos_gem->cookie,
101 exynos_gem->dma_addr, &exynos_gem->dma_attrs);
102err_free:
103 drm_free_large(exynos_gem->pages);
104
105 return ret;
97} 106}
98 107
99static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem) 108static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
@@ -112,8 +121,7 @@ static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
112 (dma_addr_t)exynos_gem->dma_addr, 121 (dma_addr_t)exynos_gem->dma_addr,
113 &exynos_gem->dma_attrs); 122 &exynos_gem->dma_attrs);
114 123
115 if (!is_drm_iommu_supported(dev)) 124 drm_free_large(exynos_gem->pages);
116 drm_free_large(exynos_gem->pages);
117} 125}
118 126
119static int exynos_drm_gem_handle_create(struct drm_gem_object *obj, 127static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,