aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/exynos
diff options
context:
space:
mode:
authorInki Dae <inki.dae@samsung.com>2012-04-23 06:26:34 -0400
committerInki Dae <inki.dae@samsung.com>2012-05-08 05:46:32 -0400
commitc01d73faad2f775036f0b37c753254479b79cbe6 (patch)
tree87926e114722abe8b63164447b0ef7cd418bb3ef /drivers/gpu/drm/exynos
parent922f6e99368a6e513a6433bb9a21be274c029c51 (diff)
drm/exynos: added cache attribute support for gem.
with this patch, user application can set cache attribute(such as cachable, writecombime or non-cachable) of the memory region allocated by gem framework. Signed-off-by: Inki Dae <inki.dae@samsung.com> Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
Diffstat (limited to 'drivers/gpu/drm/exynos')
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c49
1 files changed, 39 insertions, 10 deletions
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 1dffa8359f88..e8ab3beb4510 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -66,6 +66,22 @@ static int check_gem_flags(unsigned int flags)
66 return 0; 66 return 0;
67} 67}
68 68
69static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
70 struct vm_area_struct *vma)
71{
72 DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
73
74 /* non-cachable as default. */
75 if (obj->flags & EXYNOS_BO_CACHABLE)
76 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
77 else if (obj->flags & EXYNOS_BO_WC)
78 vma->vm_page_prot =
79 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
80 else
81 vma->vm_page_prot =
82 pgprot_noncached(vm_get_page_prot(vma->vm_flags));
83}
84
69static unsigned long roundup_gem_size(unsigned long size, unsigned int flags) 85static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
70{ 86{
71 if (!IS_NONCONTIG_BUFFER(flags)) { 87 if (!IS_NONCONTIG_BUFFER(flags)) {
@@ -262,24 +278,24 @@ static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
262void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj) 278void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
263{ 279{
264 struct drm_gem_object *obj; 280 struct drm_gem_object *obj;
281 struct exynos_drm_gem_buf *buf;
265 282
266 DRM_DEBUG_KMS("%s\n", __FILE__); 283 DRM_DEBUG_KMS("%s\n", __FILE__);
267 284
268 if (!exynos_gem_obj)
269 return;
270
271 obj = &exynos_gem_obj->base; 285 obj = &exynos_gem_obj->base;
286 buf = exynos_gem_obj->buffer;
272 287
273 DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count)); 288 DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
274 289
275 if ((exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) && 290 if (!buf->pages)
276 exynos_gem_obj->buffer->pages) 291 return;
292
293 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG)
277 exynos_drm_gem_put_pages(obj); 294 exynos_drm_gem_put_pages(obj);
278 else 295 else
279 exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, 296 exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
280 exynos_gem_obj->buffer);
281 297
282 exynos_drm_fini_buf(obj->dev, exynos_gem_obj->buffer); 298 exynos_drm_fini_buf(obj->dev, buf);
283 exynos_gem_obj->buffer = NULL; 299 exynos_gem_obj->buffer = NULL;
284 300
285 if (obj->map_list.map) 301 if (obj->map_list.map)
@@ -493,8 +509,7 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
493 509
494 vma->vm_flags |= (VM_IO | VM_RESERVED); 510 vma->vm_flags |= (VM_IO | VM_RESERVED);
495 511
496 /* in case of direct mapping, always having non-cachable attribute */ 512 update_vm_cache_attr(exynos_gem_obj, vma);
497 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
498 513
499 vm_size = usize = vma->vm_end - vma->vm_start; 514 vm_size = usize = vma->vm_end - vma->vm_start;
500 515
@@ -724,6 +739,8 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
724 739
725int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 740int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
726{ 741{
742 struct exynos_drm_gem_obj *exynos_gem_obj;
743 struct drm_gem_object *obj;
727 int ret; 744 int ret;
728 745
729 DRM_DEBUG_KMS("%s\n", __FILE__); 746 DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -735,8 +752,20 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
735 return ret; 752 return ret;
736 } 753 }
737 754
755 obj = vma->vm_private_data;
756 exynos_gem_obj = to_exynos_gem_obj(obj);
757
758 ret = check_gem_flags(exynos_gem_obj->flags);
759 if (ret) {
760 drm_gem_vm_close(vma);
761 drm_gem_free_mmap_offset(obj);
762 return ret;
763 }
764
738 vma->vm_flags &= ~VM_PFNMAP; 765 vma->vm_flags &= ~VM_PFNMAP;
739 vma->vm_flags |= VM_MIXEDMAP; 766 vma->vm_flags |= VM_MIXEDMAP;
740 767
768 update_vm_cache_attr(exynos_gem_obj, vma);
769
741 return ret; 770 return ret;
742} 771}