aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/udl/udl_gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/udl/udl_gem.c')
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c97
1 files changed, 23 insertions, 74 deletions
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 8044f5fb7c49..2a0a784ab6ee 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -25,6 +25,7 @@ struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
25 return NULL; 25 return NULL;
26 } 26 }
27 27
28 obj->flags = UDL_BO_CACHEABLE;
28 return obj; 29 return obj;
29} 30}
30 31
@@ -56,6 +57,23 @@ udl_gem_create(struct drm_file *file,
56 return 0; 57 return 0;
57} 58}
58 59
60static void update_vm_cache_attr(struct udl_gem_object *obj,
61 struct vm_area_struct *vma)
62{
63 DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
64
65 /* non-cacheable as default. */
66 if (obj->flags & UDL_BO_CACHEABLE) {
67 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
68 } else if (obj->flags & UDL_BO_WC) {
69 vma->vm_page_prot =
70 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
71 } else {
72 vma->vm_page_prot =
73 pgprot_noncached(vm_get_page_prot(vma->vm_flags));
74 }
75}
76
59int udl_dumb_create(struct drm_file *file, 77int udl_dumb_create(struct drm_file *file,
60 struct drm_device *dev, 78 struct drm_device *dev,
61 struct drm_mode_create_dumb *args) 79 struct drm_mode_create_dumb *args)
@@ -77,6 +95,8 @@ int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
77 vma->vm_flags &= ~VM_PFNMAP; 95 vma->vm_flags &= ~VM_PFNMAP;
78 vma->vm_flags |= VM_MIXEDMAP; 96 vma->vm_flags |= VM_MIXEDMAP;
79 97
98 update_vm_cache_attr(to_udl_bo(vma->vm_private_data), vma);
99
80 return ret; 100 return ret;
81} 101}
82 102
@@ -107,7 +127,7 @@ int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
107 } 127 }
108} 128}
109 129
110static int udl_gem_get_pages(struct udl_gem_object *obj) 130int udl_gem_get_pages(struct udl_gem_object *obj)
111{ 131{
112 struct page **pages; 132 struct page **pages;
113 133
@@ -123,7 +143,7 @@ static int udl_gem_get_pages(struct udl_gem_object *obj)
123 return 0; 143 return 0;
124} 144}
125 145
126static void udl_gem_put_pages(struct udl_gem_object *obj) 146void udl_gem_put_pages(struct udl_gem_object *obj)
127{ 147{
128 if (obj->base.import_attach) { 148 if (obj->base.import_attach) {
129 drm_free_large(obj->pages); 149 drm_free_large(obj->pages);
@@ -164,8 +184,7 @@ void udl_gem_vunmap(struct udl_gem_object *obj)
164 return; 184 return;
165 } 185 }
166 186
167 if (obj->vmapping) 187 vunmap(obj->vmapping);
168 vunmap(obj->vmapping);
169 188
170 udl_gem_put_pages(obj); 189 udl_gem_put_pages(obj);
171} 190}
@@ -220,73 +239,3 @@ unlock:
220 mutex_unlock(&dev->struct_mutex); 239 mutex_unlock(&dev->struct_mutex);
221 return ret; 240 return ret;
222} 241}
223
224static int udl_prime_create(struct drm_device *dev,
225 size_t size,
226 struct sg_table *sg,
227 struct udl_gem_object **obj_p)
228{
229 struct udl_gem_object *obj;
230 int npages;
231
232 npages = size / PAGE_SIZE;
233
234 *obj_p = NULL;
235 obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE);
236 if (!obj)
237 return -ENOMEM;
238
239 obj->sg = sg;
240 obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
241 if (obj->pages == NULL) {
242 DRM_ERROR("obj pages is NULL %d\n", npages);
243 return -ENOMEM;
244 }
245
246 drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
247
248 *obj_p = obj;
249 return 0;
250}
251
252struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
253 struct dma_buf *dma_buf)
254{
255 struct dma_buf_attachment *attach;
256 struct sg_table *sg;
257 struct udl_gem_object *uobj;
258 int ret;
259
260 /* need to attach */
261 get_device(dev->dev);
262 attach = dma_buf_attach(dma_buf, dev->dev);
263 if (IS_ERR(attach)) {
264 put_device(dev->dev);
265 return ERR_CAST(attach);
266 }
267
268 get_dma_buf(dma_buf);
269
270 sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
271 if (IS_ERR(sg)) {
272 ret = PTR_ERR(sg);
273 goto fail_detach;
274 }
275
276 ret = udl_prime_create(dev, dma_buf->size, sg, &uobj);
277 if (ret) {
278 goto fail_unmap;
279 }
280
281 uobj->base.import_attach = attach;
282
283 return &uobj->base;
284
285fail_unmap:
286 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
287fail_detach:
288 dma_buf_detach(dma_buf, attach);
289 dma_buf_put(dma_buf);
290 put_device(dev->dev);
291 return ERR_PTR(ret);
292}