aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c101
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h6
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c244
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.h8
4 files changed, 298 insertions, 61 deletions
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index c30d649cb147..b360e6251836 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -14,19 +14,19 @@
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 */ 15 */
16 16
17#include <asm/dma-iommu.h>
18
19#include <drm/drmP.h> 17#include <drm/drmP.h>
20#include <drm/drm_crtc_helper.h> 18#include <drm/drm_crtc_helper.h>
21#include <drm/drm_fb_helper.h> 19#include <drm/drm_fb_helper.h>
22#include <drm/drm_gem_cma_helper.h> 20#include <drm/drm_gem_cma_helper.h>
23#include <drm/drm_of.h> 21#include <drm/drm_of.h>
24#include <linux/dma-mapping.h> 22#include <linux/dma-mapping.h>
23#include <linux/dma-iommu.h>
25#include <linux/pm_runtime.h> 24#include <linux/pm_runtime.h>
26#include <linux/module.h> 25#include <linux/module.h>
27#include <linux/of_graph.h> 26#include <linux/of_graph.h>
28#include <linux/component.h> 27#include <linux/component.h>
29#include <linux/console.h> 28#include <linux/console.h>
29#include <linux/iommu.h>
30 30
31#include "rockchip_drm_drv.h" 31#include "rockchip_drm_drv.h"
32#include "rockchip_drm_fb.h" 32#include "rockchip_drm_fb.h"
@@ -50,28 +50,31 @@ static struct drm_driver rockchip_drm_driver;
50int rockchip_drm_dma_attach_device(struct drm_device *drm_dev, 50int rockchip_drm_dma_attach_device(struct drm_device *drm_dev,
51 struct device *dev) 51 struct device *dev)
52{ 52{
53 struct dma_iommu_mapping *mapping = drm_dev->dev->archdata.mapping; 53 struct rockchip_drm_private *private = drm_dev->dev_private;
54 int ret; 54 int ret;
55 55
56 if (!is_support_iommu) 56 if (!is_support_iommu)
57 return 0; 57 return 0;
58 58
59 ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); 59 ret = iommu_attach_device(private->domain, dev);
60 if (ret) 60 if (ret) {
61 dev_err(dev, "Failed to attach iommu device\n");
61 return ret; 62 return ret;
63 }
62 64
63 dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); 65 return 0;
64
65 return arm_iommu_attach_device(dev, mapping);
66} 66}
67 67
68void rockchip_drm_dma_detach_device(struct drm_device *drm_dev, 68void rockchip_drm_dma_detach_device(struct drm_device *drm_dev,
69 struct device *dev) 69 struct device *dev)
70{ 70{
71 struct rockchip_drm_private *private = drm_dev->dev_private;
72 struct iommu_domain *domain = private->domain;
73
71 if (!is_support_iommu) 74 if (!is_support_iommu)
72 return; 75 return;
73 76
74 arm_iommu_detach_device(dev); 77 iommu_detach_device(domain, dev);
75} 78}
76 79
77int rockchip_register_crtc_funcs(struct drm_crtc *crtc, 80int rockchip_register_crtc_funcs(struct drm_crtc *crtc,
@@ -123,11 +126,46 @@ static void rockchip_drm_crtc_disable_vblank(struct drm_device *dev,
123 priv->crtc_funcs[pipe]->disable_vblank(crtc); 126 priv->crtc_funcs[pipe]->disable_vblank(crtc);
124} 127}
125 128
129static int rockchip_drm_init_iommu(struct drm_device *drm_dev)
130{
131 struct rockchip_drm_private *private = drm_dev->dev_private;
132 struct iommu_domain_geometry *geometry;
133 u64 start, end;
134
135 if (!is_support_iommu)
136 return 0;
137
138 private->domain = iommu_domain_alloc(&platform_bus_type);
139 if (!private->domain)
140 return -ENOMEM;
141
142 geometry = &private->domain->geometry;
143 start = geometry->aperture_start;
144 end = geometry->aperture_end;
145
146 DRM_DEBUG("IOMMU context initialized (aperture: %#llx-%#llx)\n",
147 start, end);
148 drm_mm_init(&private->mm, start, end - start + 1);
149 mutex_init(&private->mm_lock);
150
151 return 0;
152}
153
154static void rockchip_iommu_cleanup(struct drm_device *drm_dev)
155{
156 struct rockchip_drm_private *private = drm_dev->dev_private;
157
158 if (!is_support_iommu)
159 return;
160
161 drm_mm_takedown(&private->mm);
162 iommu_domain_free(private->domain);
163}
164
126static int rockchip_drm_bind(struct device *dev) 165static int rockchip_drm_bind(struct device *dev)
127{ 166{
128 struct drm_device *drm_dev; 167 struct drm_device *drm_dev;
129 struct rockchip_drm_private *private; 168 struct rockchip_drm_private *private;
130 struct dma_iommu_mapping *mapping = NULL;
131 int ret; 169 int ret;
132 170
133 drm_dev = drm_dev_alloc(&rockchip_drm_driver, dev); 171 drm_dev = drm_dev_alloc(&rockchip_drm_driver, dev);
@@ -151,38 +189,14 @@ static int rockchip_drm_bind(struct device *dev)
151 189
152 rockchip_drm_mode_config_init(drm_dev); 190 rockchip_drm_mode_config_init(drm_dev);
153 191
154 dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), 192 ret = rockchip_drm_init_iommu(drm_dev);
155 GFP_KERNEL); 193 if (ret)
156 if (!dev->dma_parms) {
157 ret = -ENOMEM;
158 goto err_config_cleanup; 194 goto err_config_cleanup;
159 }
160
161 if (is_support_iommu) {
162 /* TODO(djkurtz): fetch the mapping start/size from somewhere */
163 mapping = arm_iommu_create_mapping(&platform_bus_type,
164 0x00000000,
165 SZ_2G);
166 if (IS_ERR(mapping)) {
167 ret = PTR_ERR(mapping);
168 goto err_config_cleanup;
169 }
170
171 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
172 if (ret)
173 goto err_release_mapping;
174
175 dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
176
177 ret = arm_iommu_attach_device(dev, mapping);
178 if (ret)
179 goto err_release_mapping;
180 }
181 195
182 /* Try to bind all sub drivers. */ 196 /* Try to bind all sub drivers. */
183 ret = component_bind_all(dev, drm_dev); 197 ret = component_bind_all(dev, drm_dev);
184 if (ret) 198 if (ret)
185 goto err_detach_device; 199 goto err_iommu_cleanup;
186 200
187 /* init kms poll for handling hpd */ 201 /* init kms poll for handling hpd */
188 drm_kms_helper_poll_init(drm_dev); 202 drm_kms_helper_poll_init(drm_dev);
@@ -207,8 +221,6 @@ static int rockchip_drm_bind(struct device *dev)
207 if (ret) 221 if (ret)
208 goto err_fbdev_fini; 222 goto err_fbdev_fini;
209 223
210 if (is_support_iommu)
211 arm_iommu_release_mapping(mapping);
212 return 0; 224 return 0;
213err_fbdev_fini: 225err_fbdev_fini:
214 rockchip_drm_fbdev_fini(drm_dev); 226 rockchip_drm_fbdev_fini(drm_dev);
@@ -217,12 +229,8 @@ err_vblank_cleanup:
217err_kms_helper_poll_fini: 229err_kms_helper_poll_fini:
218 drm_kms_helper_poll_fini(drm_dev); 230 drm_kms_helper_poll_fini(drm_dev);
219 component_unbind_all(dev, drm_dev); 231 component_unbind_all(dev, drm_dev);
220err_detach_device: 232err_iommu_cleanup:
221 if (is_support_iommu) 233 rockchip_iommu_cleanup(drm_dev);
222 arm_iommu_detach_device(dev);
223err_release_mapping:
224 if (is_support_iommu)
225 arm_iommu_release_mapping(mapping);
226err_config_cleanup: 234err_config_cleanup:
227 drm_mode_config_cleanup(drm_dev); 235 drm_mode_config_cleanup(drm_dev);
228 drm_dev->dev_private = NULL; 236 drm_dev->dev_private = NULL;
@@ -239,8 +247,7 @@ static void rockchip_drm_unbind(struct device *dev)
239 drm_vblank_cleanup(drm_dev); 247 drm_vblank_cleanup(drm_dev);
240 drm_kms_helper_poll_fini(drm_dev); 248 drm_kms_helper_poll_fini(drm_dev);
241 component_unbind_all(dev, drm_dev); 249 component_unbind_all(dev, drm_dev);
242 if (is_support_iommu) 250 rockchip_iommu_cleanup(drm_dev);
243 arm_iommu_detach_device(dev);
244 drm_mode_config_cleanup(drm_dev); 251 drm_mode_config_cleanup(drm_dev);
245 drm_dev->dev_private = NULL; 252 drm_dev->dev_private = NULL;
246 drm_dev_unregister(drm_dev); 253 drm_dev_unregister(drm_dev);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index fb6226cf84b7..adc39302bec5 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -30,6 +30,7 @@
30 30
31struct drm_device; 31struct drm_device;
32struct drm_connector; 32struct drm_connector;
33struct iommu_domain;
33 34
34/* 35/*
35 * Rockchip drm private crtc funcs. 36 * Rockchip drm private crtc funcs.
@@ -60,7 +61,10 @@ struct rockchip_drm_private {
60 struct drm_gem_object *fbdev_bo; 61 struct drm_gem_object *fbdev_bo;
61 const struct rockchip_crtc_funcs *crtc_funcs[ROCKCHIP_MAX_CRTC]; 62 const struct rockchip_crtc_funcs *crtc_funcs[ROCKCHIP_MAX_CRTC];
62 struct drm_atomic_state *state; 63 struct drm_atomic_state *state;
63 64 struct iommu_domain *domain;
65 /* protect drm_mm on multi-threads */
66 struct mutex mm_lock;
67 struct drm_mm mm;
64 struct list_head psr_list; 68 struct list_head psr_list;
65 spinlock_t psr_list_lock; 69 spinlock_t psr_list_lock;
66}; 70};
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index b70f9423379c..df9e57064f19 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -16,11 +16,146 @@
16#include <drm/drmP.h> 16#include <drm/drmP.h>
17#include <drm/drm_gem.h> 17#include <drm/drm_gem.h>
18#include <drm/drm_vma_manager.h> 18#include <drm/drm_vma_manager.h>
19#include <linux/iommu.h>
19 20
20#include "rockchip_drm_drv.h" 21#include "rockchip_drm_drv.h"
21#include "rockchip_drm_gem.h" 22#include "rockchip_drm_gem.h"
22 23
23static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj, 24static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj)
25{
26 struct drm_device *drm = rk_obj->base.dev;
27 struct rockchip_drm_private *private = drm->dev_private;
28 int prot = IOMMU_READ | IOMMU_WRITE;
29 ssize_t ret;
30
31 mutex_lock(&private->mm_lock);
32
33 ret = drm_mm_insert_node_generic(&private->mm, &rk_obj->mm,
34 rk_obj->base.size, PAGE_SIZE,
35 0, 0);
36
37 mutex_unlock(&private->mm_lock);
38 if (ret < 0) {
39 DRM_ERROR("out of I/O virtual memory: %zd\n", ret);
40 return ret;
41 }
42
43 rk_obj->dma_addr = rk_obj->mm.start;
44
45 ret = iommu_map_sg(private->domain, rk_obj->dma_addr, rk_obj->sgt->sgl,
46 rk_obj->sgt->nents, prot);
47 if (ret < rk_obj->base.size) {
48 DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n",
49 ret, rk_obj->base.size);
50 ret = -ENOMEM;
51 goto err_remove_node;
52 }
53
54 rk_obj->size = ret;
55
56 return 0;
57
58err_remove_node:
59 drm_mm_remove_node(&rk_obj->mm);
60
61 return ret;
62}
63
64static int rockchip_gem_iommu_unmap(struct rockchip_gem_object *rk_obj)
65{
66 struct drm_device *drm = rk_obj->base.dev;
67 struct rockchip_drm_private *private = drm->dev_private;
68
69 iommu_unmap(private->domain, rk_obj->dma_addr, rk_obj->size);
70
71 mutex_lock(&private->mm_lock);
72
73 drm_mm_remove_node(&rk_obj->mm);
74
75 mutex_unlock(&private->mm_lock);
76
77 return 0;
78}
79
80static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj)
81{
82 struct drm_device *drm = rk_obj->base.dev;
83 int ret, i;
84 struct scatterlist *s;
85
86 rk_obj->pages = drm_gem_get_pages(&rk_obj->base);
87 if (IS_ERR(rk_obj->pages))
88 return PTR_ERR(rk_obj->pages);
89
90 rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
91
92 rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages);
93 if (IS_ERR(rk_obj->sgt)) {
94 ret = PTR_ERR(rk_obj->sgt);
95 goto err_put_pages;
96 }
97
98 /*
99 * Fake up the SG table so that dma_sync_sg_for_device() can be used
100 * to flush the pages associated with it.
101 *
102 * TODO: Replace this by drm_clflush_sg() once it can be implemented
103 * without relying on symbols that are not exported.
104 */
105 for_each_sg(rk_obj->sgt->sgl, s, rk_obj->sgt->nents, i)
106 sg_dma_address(s) = sg_phys(s);
107
108 dma_sync_sg_for_device(drm->dev, rk_obj->sgt->sgl, rk_obj->sgt->nents,
109 DMA_TO_DEVICE);
110
111 return 0;
112
113err_put_pages:
114 drm_gem_put_pages(&rk_obj->base, rk_obj->pages, false, false);
115 return ret;
116}
117
118static void rockchip_gem_put_pages(struct rockchip_gem_object *rk_obj)
119{
120 sg_free_table(rk_obj->sgt);
121 kfree(rk_obj->sgt);
122 drm_gem_put_pages(&rk_obj->base, rk_obj->pages, true, true);
123}
124
125static int rockchip_gem_alloc_iommu(struct rockchip_gem_object *rk_obj,
126 bool alloc_kmap)
127{
128 int ret;
129
130 ret = rockchip_gem_get_pages(rk_obj);
131 if (ret < 0)
132 return ret;
133
134 ret = rockchip_gem_iommu_map(rk_obj);
135 if (ret < 0)
136 goto err_free;
137
138 if (alloc_kmap) {
139 rk_obj->kvaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
140 pgprot_writecombine(PAGE_KERNEL));
141 if (!rk_obj->kvaddr) {
142 DRM_ERROR("failed to vmap() buffer\n");
143 ret = -ENOMEM;
144 goto err_unmap;
145 }
146 }
147
148 return 0;
149
150err_unmap:
151 rockchip_gem_iommu_unmap(rk_obj);
152err_free:
153 rockchip_gem_put_pages(rk_obj);
154
155 return ret;
156}
157
158static int rockchip_gem_alloc_dma(struct rockchip_gem_object *rk_obj,
24 bool alloc_kmap) 159 bool alloc_kmap)
25{ 160{
26 struct drm_gem_object *obj = &rk_obj->base; 161 struct drm_gem_object *obj = &rk_obj->base;
@@ -42,7 +177,27 @@ static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj,
42 return 0; 177 return 0;
43} 178}
44 179
45static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj) 180static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj,
181 bool alloc_kmap)
182{
183 struct drm_gem_object *obj = &rk_obj->base;
184 struct drm_device *drm = obj->dev;
185 struct rockchip_drm_private *private = drm->dev_private;
186
187 if (private->domain)
188 return rockchip_gem_alloc_iommu(rk_obj, alloc_kmap);
189 else
190 return rockchip_gem_alloc_dma(rk_obj, alloc_kmap);
191}
192
193static void rockchip_gem_free_iommu(struct rockchip_gem_object *rk_obj)
194{
195 vunmap(rk_obj->kvaddr);
196 rockchip_gem_iommu_unmap(rk_obj);
197 rockchip_gem_put_pages(rk_obj);
198}
199
200static void rockchip_gem_free_dma(struct rockchip_gem_object *rk_obj)
46{ 201{
47 struct drm_gem_object *obj = &rk_obj->base; 202 struct drm_gem_object *obj = &rk_obj->base;
48 struct drm_device *drm = obj->dev; 203 struct drm_device *drm = obj->dev;
@@ -51,23 +206,68 @@ static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
51 rk_obj->dma_attrs); 206 rk_obj->dma_attrs);
52} 207}
53 208
54static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj, 209static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
55 struct vm_area_struct *vma) 210{
211 if (rk_obj->pages)
212 rockchip_gem_free_iommu(rk_obj);
213 else
214 rockchip_gem_free_dma(rk_obj);
215}
56 216
217static int rockchip_drm_gem_object_mmap_iommu(struct drm_gem_object *obj,
218 struct vm_area_struct *vma)
57{ 219{
220 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
221 unsigned int i, count = obj->size >> PAGE_SHIFT;
222 unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
223 unsigned long uaddr = vma->vm_start;
224 unsigned long offset = vma->vm_pgoff;
225 unsigned long end = user_count + offset;
58 int ret; 226 int ret;
227
228 if (user_count == 0)
229 return -ENXIO;
230 if (end > count)
231 return -ENXIO;
232
233 for (i = offset; i < end; i++) {
234 ret = vm_insert_page(vma, uaddr, rk_obj->pages[i]);
235 if (ret)
236 return ret;
237 uaddr += PAGE_SIZE;
238 }
239
240 return 0;
241}
242
243static int rockchip_drm_gem_object_mmap_dma(struct drm_gem_object *obj,
244 struct vm_area_struct *vma)
245{
59 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); 246 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
60 struct drm_device *drm = obj->dev; 247 struct drm_device *drm = obj->dev;
61 248
249 return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
250 obj->size, rk_obj->dma_attrs);
251}
252
253static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
254 struct vm_area_struct *vma)
255{
256 int ret;
257 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
258
62 /* 259 /*
63 * dma_alloc_attrs() allocated a struct page table for rk_obj, so clear 260 * We allocated a struct page table for rk_obj, so clear
64 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap(). 261 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
65 */ 262 */
66 vma->vm_flags &= ~VM_PFNMAP; 263 vma->vm_flags &= ~VM_PFNMAP;
67 vma->vm_pgoff = 0; 264 vma->vm_pgoff = 0;
68 265
69 ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr, 266 if (rk_obj->pages)
70 obj->size, rk_obj->dma_attrs); 267 ret = rockchip_drm_gem_object_mmap_iommu(obj, vma);
268 else
269 ret = rockchip_drm_gem_object_mmap_dma(obj, vma);
270
71 if (ret) 271 if (ret)
72 drm_gem_vm_close(vma); 272 drm_gem_vm_close(vma);
73 273
@@ -101,6 +301,12 @@ int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
101 return rockchip_drm_gem_object_mmap(obj, vma); 301 return rockchip_drm_gem_object_mmap(obj, vma);
102} 302}
103 303
304static void rockchip_gem_release_object(struct rockchip_gem_object *rk_obj)
305{
306 drm_gem_object_release(&rk_obj->base);
307 kfree(rk_obj);
308}
309
104struct rockchip_gem_object * 310struct rockchip_gem_object *
105 rockchip_gem_create_object(struct drm_device *drm, unsigned int size, 311 rockchip_gem_create_object(struct drm_device *drm, unsigned int size,
106 bool alloc_kmap) 312 bool alloc_kmap)
@@ -117,7 +323,7 @@ struct rockchip_gem_object *
117 323
118 obj = &rk_obj->base; 324 obj = &rk_obj->base;
119 325
120 drm_gem_private_object_init(drm, obj, size); 326 drm_gem_object_init(drm, obj, size);
121 327
122 ret = rockchip_gem_alloc_buf(rk_obj, alloc_kmap); 328 ret = rockchip_gem_alloc_buf(rk_obj, alloc_kmap);
123 if (ret) 329 if (ret)
@@ -126,7 +332,7 @@ struct rockchip_gem_object *
126 return rk_obj; 332 return rk_obj;
127 333
128err_free_rk_obj: 334err_free_rk_obj:
129 kfree(rk_obj); 335 rockchip_gem_release_object(rk_obj);
130 return ERR_PTR(ret); 336 return ERR_PTR(ret);
131} 337}
132 338
@@ -138,13 +344,11 @@ void rockchip_gem_free_object(struct drm_gem_object *obj)
138{ 344{
139 struct rockchip_gem_object *rk_obj; 345 struct rockchip_gem_object *rk_obj;
140 346
141 drm_gem_free_mmap_offset(obj);
142
143 rk_obj = to_rockchip_obj(obj); 347 rk_obj = to_rockchip_obj(obj);
144 348
145 rockchip_gem_free_buf(rk_obj); 349 rockchip_gem_free_buf(rk_obj);
146 350
147 kfree(rk_obj); 351 rockchip_gem_release_object(rk_obj);
148} 352}
149 353
150/* 354/*
@@ -253,6 +457,9 @@ struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj)
253 struct sg_table *sgt; 457 struct sg_table *sgt;
254 int ret; 458 int ret;
255 459
460 if (rk_obj->pages)
461 return drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages);
462
256 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); 463 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
257 if (!sgt) 464 if (!sgt)
258 return ERR_PTR(-ENOMEM); 465 return ERR_PTR(-ENOMEM);
@@ -273,6 +480,10 @@ void *rockchip_gem_prime_vmap(struct drm_gem_object *obj)
273{ 480{
274 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); 481 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
275 482
483 if (rk_obj->pages)
484 return vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
485 pgprot_writecombine(PAGE_KERNEL));
486
276 if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING) 487 if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING)
277 return NULL; 488 return NULL;
278 489
@@ -281,5 +492,12 @@ void *rockchip_gem_prime_vmap(struct drm_gem_object *obj)
281 492
282void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) 493void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
283{ 494{
284 /* Nothing to do */ 495 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
496
497 if (rk_obj->pages) {
498 vunmap(vaddr);
499 return;
500 }
501
502 /* Nothing to do if allocated by DMA mapping API. */
285} 503}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
index 18b3488db4ec..3f6ea4d18a5c 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
@@ -23,7 +23,15 @@ struct rockchip_gem_object {
23 23
24 void *kvaddr; 24 void *kvaddr;
25 dma_addr_t dma_addr; 25 dma_addr_t dma_addr;
26 /* Used when IOMMU is disabled */
26 unsigned long dma_attrs; 27 unsigned long dma_attrs;
28
29 /* Used when IOMMU is enabled */
30 struct drm_mm_node mm;
31 unsigned long num_pages;
32 struct page **pages;
33 struct sg_table *sgt;
34 size_t size;
27}; 35};
28 36
29struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj); 37struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj);