aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/exynos/exynos_drm_gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/exynos/exynos_drm_gem.c')
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c89
1 files changed, 77 insertions, 12 deletions
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 1dffa8359f88..fc91293c4560 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -66,6 +66,22 @@ static int check_gem_flags(unsigned int flags)
66 return 0; 66 return 0;
67} 67}
68 68
69static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
70 struct vm_area_struct *vma)
71{
72 DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
73
74 /* non-cachable as default. */
75 if (obj->flags & EXYNOS_BO_CACHABLE)
76 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
77 else if (obj->flags & EXYNOS_BO_WC)
78 vma->vm_page_prot =
79 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
80 else
81 vma->vm_page_prot =
82 pgprot_noncached(vm_get_page_prot(vma->vm_flags));
83}
84
69static unsigned long roundup_gem_size(unsigned long size, unsigned int flags) 85static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
70{ 86{
71 if (!IS_NONCONTIG_BUFFER(flags)) { 87 if (!IS_NONCONTIG_BUFFER(flags)) {
@@ -80,7 +96,7 @@ out:
80 return roundup(size, PAGE_SIZE); 96 return roundup(size, PAGE_SIZE);
81} 97}
82 98
83static struct page **exynos_gem_get_pages(struct drm_gem_object *obj, 99struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
84 gfp_t gfpmask) 100 gfp_t gfpmask)
85{ 101{
86 struct inode *inode; 102 struct inode *inode;
@@ -180,6 +196,7 @@ static int exynos_drm_gem_get_pages(struct drm_gem_object *obj)
180 } 196 }
181 197
182 npages = obj->size >> PAGE_SHIFT; 198 npages = obj->size >> PAGE_SHIFT;
199 buf->page_size = PAGE_SIZE;
183 200
184 buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL); 201 buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
185 if (!buf->sgt) { 202 if (!buf->sgt) {
@@ -262,24 +279,24 @@ static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
262void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj) 279void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
263{ 280{
264 struct drm_gem_object *obj; 281 struct drm_gem_object *obj;
282 struct exynos_drm_gem_buf *buf;
265 283
266 DRM_DEBUG_KMS("%s\n", __FILE__); 284 DRM_DEBUG_KMS("%s\n", __FILE__);
267 285
268 if (!exynos_gem_obj)
269 return;
270
271 obj = &exynos_gem_obj->base; 286 obj = &exynos_gem_obj->base;
287 buf = exynos_gem_obj->buffer;
272 288
273 DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count)); 289 DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
274 290
275 if ((exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) && 291 if (!buf->pages)
276 exynos_gem_obj->buffer->pages) 292 return;
293
294 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG)
277 exynos_drm_gem_put_pages(obj); 295 exynos_drm_gem_put_pages(obj);
278 else 296 else
279 exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, 297 exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
280 exynos_gem_obj->buffer);
281 298
282 exynos_drm_fini_buf(obj->dev, exynos_gem_obj->buffer); 299 exynos_drm_fini_buf(obj->dev, buf);
283 exynos_gem_obj->buffer = NULL; 300 exynos_gem_obj->buffer = NULL;
284 301
285 if (obj->map_list.map) 302 if (obj->map_list.map)
@@ -292,7 +309,7 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
292 exynos_gem_obj = NULL; 309 exynos_gem_obj = NULL;
293} 310}
294 311
295static struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, 312struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
296 unsigned long size) 313 unsigned long size)
297{ 314{
298 struct exynos_drm_gem_obj *exynos_gem_obj; 315 struct exynos_drm_gem_obj *exynos_gem_obj;
@@ -493,8 +510,7 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
493 510
494 vma->vm_flags |= (VM_IO | VM_RESERVED); 511 vma->vm_flags |= (VM_IO | VM_RESERVED);
495 512
496 /* in case of direct mapping, always having non-cachable attribute */ 513 update_vm_cache_attr(exynos_gem_obj, vma);
497 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
498 514
499 vm_size = usize = vma->vm_end - vma->vm_start; 515 vm_size = usize = vma->vm_end - vma->vm_start;
500 516
@@ -588,6 +604,32 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
588 return 0; 604 return 0;
589} 605}
590 606
607int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
608 struct drm_file *file_priv)
609{ struct exynos_drm_gem_obj *exynos_gem_obj;
610 struct drm_exynos_gem_info *args = data;
611 struct drm_gem_object *obj;
612
613 mutex_lock(&dev->struct_mutex);
614
615 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
616 if (!obj) {
617 DRM_ERROR("failed to lookup gem object.\n");
618 mutex_unlock(&dev->struct_mutex);
619 return -EINVAL;
620 }
621
622 exynos_gem_obj = to_exynos_gem_obj(obj);
623
624 args->flags = exynos_gem_obj->flags;
625 args->size = exynos_gem_obj->size;
626
627 drm_gem_object_unreference(obj);
628 mutex_unlock(&dev->struct_mutex);
629
630 return 0;
631}
632
591int exynos_drm_gem_init_object(struct drm_gem_object *obj) 633int exynos_drm_gem_init_object(struct drm_gem_object *obj)
592{ 634{
593 DRM_DEBUG_KMS("%s\n", __FILE__); 635 DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -597,8 +639,17 @@ int exynos_drm_gem_init_object(struct drm_gem_object *obj)
597 639
598void exynos_drm_gem_free_object(struct drm_gem_object *obj) 640void exynos_drm_gem_free_object(struct drm_gem_object *obj)
599{ 641{
642 struct exynos_drm_gem_obj *exynos_gem_obj;
643 struct exynos_drm_gem_buf *buf;
644
600 DRM_DEBUG_KMS("%s\n", __FILE__); 645 DRM_DEBUG_KMS("%s\n", __FILE__);
601 646
647 exynos_gem_obj = to_exynos_gem_obj(obj);
648 buf = exynos_gem_obj->buffer;
649
650 if (obj->import_attach)
651 drm_prime_gem_destroy(obj, buf->sgt);
652
602 exynos_drm_gem_destroy(to_exynos_gem_obj(obj)); 653 exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
603} 654}
604 655
@@ -724,6 +775,8 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
724 775
725int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 776int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
726{ 777{
778 struct exynos_drm_gem_obj *exynos_gem_obj;
779 struct drm_gem_object *obj;
727 int ret; 780 int ret;
728 781
729 DRM_DEBUG_KMS("%s\n", __FILE__); 782 DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -735,8 +788,20 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
735 return ret; 788 return ret;
736 } 789 }
737 790
791 obj = vma->vm_private_data;
792 exynos_gem_obj = to_exynos_gem_obj(obj);
793
794 ret = check_gem_flags(exynos_gem_obj->flags);
795 if (ret) {
796 drm_gem_vm_close(vma);
797 drm_gem_free_mmap_offset(obj);
798 return ret;
799 }
800
738 vma->vm_flags &= ~VM_PFNMAP; 801 vma->vm_flags &= ~VM_PFNMAP;
739 vma->vm_flags |= VM_MIXEDMAP; 802 vma->vm_flags |= VM_MIXEDMAP;
740 803
804 update_vm_cache_attr(exynos_gem_obj, vma);
805
741 return ret; 806 return ret;
742} 807}