diff options
Diffstat (limited to 'drivers/gpu/drm/omapdrm/omap_gem.c')
-rw-r--r-- | drivers/gpu/drm/omapdrm/omap_gem.c | 48 |
1 files changed, 26 insertions, 22 deletions
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index 6030de7ec2ba..7a4ee4edab5b 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c | |||
@@ -156,7 +156,7 @@ static u64 mmap_offset(struct drm_gem_object *obj) | |||
156 | return drm_vma_node_offset_addr(&obj->vma_node); | 156 | return drm_vma_node_offset_addr(&obj->vma_node); |
157 | } | 157 | } |
158 | 158 | ||
159 | static bool is_contiguous(struct omap_gem_object *omap_obj) | 159 | static bool omap_gem_is_contiguous(struct omap_gem_object *omap_obj) |
160 | { | 160 | { |
161 | if (omap_obj->flags & OMAP_BO_MEM_DMA_API) | 161 | if (omap_obj->flags & OMAP_BO_MEM_DMA_API) |
162 | return true; | 162 | return true; |
@@ -171,7 +171,7 @@ static bool is_contiguous(struct omap_gem_object *omap_obj) | |||
171 | * Eviction | 171 | * Eviction |
172 | */ | 172 | */ |
173 | 173 | ||
174 | static void evict_entry(struct drm_gem_object *obj, | 174 | static void omap_gem_evict_entry(struct drm_gem_object *obj, |
175 | enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry) | 175 | enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry) |
176 | { | 176 | { |
177 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | 177 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
@@ -199,7 +199,7 @@ static void evict_entry(struct drm_gem_object *obj, | |||
199 | } | 199 | } |
200 | 200 | ||
201 | /* Evict a buffer from usergart, if it is mapped there */ | 201 | /* Evict a buffer from usergart, if it is mapped there */ |
202 | static void evict(struct drm_gem_object *obj) | 202 | static void omap_gem_evict(struct drm_gem_object *obj) |
203 | { | 203 | { |
204 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | 204 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
205 | struct omap_drm_private *priv = obj->dev->dev_private; | 205 | struct omap_drm_private *priv = obj->dev->dev_private; |
@@ -213,7 +213,7 @@ static void evict(struct drm_gem_object *obj) | |||
213 | &priv->usergart[fmt].entry[i]; | 213 | &priv->usergart[fmt].entry[i]; |
214 | 214 | ||
215 | if (entry->obj == obj) | 215 | if (entry->obj == obj) |
216 | evict_entry(obj, fmt, entry); | 216 | omap_gem_evict_entry(obj, fmt, entry); |
217 | } | 217 | } |
218 | } | 218 | } |
219 | } | 219 | } |
@@ -291,7 +291,8 @@ free_pages: | |||
291 | /* acquire pages when needed (for example, for DMA where physically | 291 | /* acquire pages when needed (for example, for DMA where physically |
292 | * contiguous buffer is not required | 292 | * contiguous buffer is not required |
293 | */ | 293 | */ |
294 | static int get_pages(struct drm_gem_object *obj, struct page ***pages) | 294 | static int __omap_gem_get_pages(struct drm_gem_object *obj, |
295 | struct page ***pages) | ||
295 | { | 296 | { |
296 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | 297 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
297 | int ret = 0; | 298 | int ret = 0; |
@@ -371,7 +372,7 @@ size_t omap_gem_mmap_size(struct drm_gem_object *obj) | |||
371 | */ | 372 | */ |
372 | 373 | ||
373 | /* Normal handling for the case of faulting in non-tiled buffers */ | 374 | /* Normal handling for the case of faulting in non-tiled buffers */ |
374 | static vm_fault_t fault_1d(struct drm_gem_object *obj, | 375 | static vm_fault_t omap_gem_fault_1d(struct drm_gem_object *obj, |
375 | struct vm_area_struct *vma, struct vm_fault *vmf) | 376 | struct vm_area_struct *vma, struct vm_fault *vmf) |
376 | { | 377 | { |
377 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | 378 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
@@ -385,7 +386,7 @@ static vm_fault_t fault_1d(struct drm_gem_object *obj, | |||
385 | omap_gem_cpu_sync_page(obj, pgoff); | 386 | omap_gem_cpu_sync_page(obj, pgoff); |
386 | pfn = page_to_pfn(omap_obj->pages[pgoff]); | 387 | pfn = page_to_pfn(omap_obj->pages[pgoff]); |
387 | } else { | 388 | } else { |
388 | BUG_ON(!is_contiguous(omap_obj)); | 389 | BUG_ON(!omap_gem_is_contiguous(omap_obj)); |
389 | pfn = (omap_obj->dma_addr >> PAGE_SHIFT) + pgoff; | 390 | pfn = (omap_obj->dma_addr >> PAGE_SHIFT) + pgoff; |
390 | } | 391 | } |
391 | 392 | ||
@@ -397,7 +398,7 @@ static vm_fault_t fault_1d(struct drm_gem_object *obj, | |||
397 | } | 398 | } |
398 | 399 | ||
399 | /* Special handling for the case of faulting in 2d tiled buffers */ | 400 | /* Special handling for the case of faulting in 2d tiled buffers */ |
400 | static vm_fault_t fault_2d(struct drm_gem_object *obj, | 401 | static vm_fault_t omap_gem_fault_2d(struct drm_gem_object *obj, |
401 | struct vm_area_struct *vma, struct vm_fault *vmf) | 402 | struct vm_area_struct *vma, struct vm_fault *vmf) |
402 | { | 403 | { |
403 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | 404 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
@@ -445,7 +446,7 @@ static vm_fault_t fault_2d(struct drm_gem_object *obj, | |||
445 | 446 | ||
446 | /* evict previous buffer using this usergart entry, if any: */ | 447 | /* evict previous buffer using this usergart entry, if any: */ |
447 | if (entry->obj) | 448 | if (entry->obj) |
448 | evict_entry(entry->obj, fmt, entry); | 449 | omap_gem_evict_entry(entry->obj, fmt, entry); |
449 | 450 | ||
450 | entry->obj = obj; | 451 | entry->obj = obj; |
451 | entry->obj_pgoff = base_pgoff; | 452 | entry->obj_pgoff = base_pgoff; |
@@ -531,7 +532,7 @@ vm_fault_t omap_gem_fault(struct vm_fault *vmf) | |||
531 | mutex_lock(&dev->struct_mutex); | 532 | mutex_lock(&dev->struct_mutex); |
532 | 533 | ||
533 | /* if a shmem backed object, make sure we have pages attached now */ | 534 | /* if a shmem backed object, make sure we have pages attached now */ |
534 | err = get_pages(obj, &pages); | 535 | err = __omap_gem_get_pages(obj, &pages); |
535 | if (err) { | 536 | if (err) { |
536 | ret = vmf_error(err); | 537 | ret = vmf_error(err); |
537 | goto fail; | 538 | goto fail; |
@@ -544,9 +545,9 @@ vm_fault_t omap_gem_fault(struct vm_fault *vmf) | |||
544 | */ | 545 | */ |
545 | 546 | ||
546 | if (omap_obj->flags & OMAP_BO_TILED) | 547 | if (omap_obj->flags & OMAP_BO_TILED) |
547 | ret = fault_2d(obj, vma, vmf); | 548 | ret = omap_gem_fault_2d(obj, vma, vmf); |
548 | else | 549 | else |
549 | ret = fault_1d(obj, vma, vmf); | 550 | ret = omap_gem_fault_1d(obj, vma, vmf); |
550 | 551 | ||
551 | 552 | ||
552 | fail: | 553 | fail: |
@@ -689,7 +690,8 @@ int omap_gem_roll(struct drm_gem_object *obj, u32 roll) | |||
689 | /* if we aren't mapped yet, we don't need to do anything */ | 690 | /* if we aren't mapped yet, we don't need to do anything */ |
690 | if (omap_obj->block) { | 691 | if (omap_obj->block) { |
691 | struct page **pages; | 692 | struct page **pages; |
692 | ret = get_pages(obj, &pages); | 693 | |
694 | ret = __omap_gem_get_pages(obj, &pages); | ||
693 | if (ret) | 695 | if (ret) |
694 | goto fail; | 696 | goto fail; |
695 | ret = tiler_pin(omap_obj->block, pages, npages, roll, true); | 697 | ret = tiler_pin(omap_obj->block, pages, npages, roll, true); |
@@ -717,7 +719,7 @@ fail: | |||
717 | * the omap_obj->dma_addrs[i] is set to the DMA address, and the page is | 719 | * the omap_obj->dma_addrs[i] is set to the DMA address, and the page is |
718 | * unmapped from the CPU. | 720 | * unmapped from the CPU. |
719 | */ | 721 | */ |
720 | static inline bool is_cached_coherent(struct drm_gem_object *obj) | 722 | static inline bool omap_gem_is_cached_coherent(struct drm_gem_object *obj) |
721 | { | 723 | { |
722 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | 724 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
723 | 725 | ||
@@ -733,7 +735,7 @@ void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff) | |||
733 | struct drm_device *dev = obj->dev; | 735 | struct drm_device *dev = obj->dev; |
734 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | 736 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
735 | 737 | ||
736 | if (is_cached_coherent(obj)) | 738 | if (omap_gem_is_cached_coherent(obj)) |
737 | return; | 739 | return; |
738 | 740 | ||
739 | if (omap_obj->dma_addrs[pgoff]) { | 741 | if (omap_obj->dma_addrs[pgoff]) { |
@@ -753,7 +755,7 @@ void omap_gem_dma_sync_buffer(struct drm_gem_object *obj, | |||
753 | struct page **pages = omap_obj->pages; | 755 | struct page **pages = omap_obj->pages; |
754 | bool dirty = false; | 756 | bool dirty = false; |
755 | 757 | ||
756 | if (is_cached_coherent(obj)) | 758 | if (omap_gem_is_cached_coherent(obj)) |
757 | return; | 759 | return; |
758 | 760 | ||
759 | for (i = 0; i < npages; i++) { | 761 | for (i = 0; i < npages; i++) { |
@@ -801,7 +803,7 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr) | |||
801 | 803 | ||
802 | mutex_lock(&obj->dev->struct_mutex); | 804 | mutex_lock(&obj->dev->struct_mutex); |
803 | 805 | ||
804 | if (!is_contiguous(omap_obj) && priv->has_dmm) { | 806 | if (!omap_gem_is_contiguous(omap_obj) && priv->has_dmm) { |
805 | if (omap_obj->dma_addr_cnt == 0) { | 807 | if (omap_obj->dma_addr_cnt == 0) { |
806 | struct page **pages; | 808 | struct page **pages; |
807 | u32 npages = obj->size >> PAGE_SHIFT; | 809 | u32 npages = obj->size >> PAGE_SHIFT; |
@@ -810,7 +812,7 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr) | |||
810 | 812 | ||
811 | BUG_ON(omap_obj->block); | 813 | BUG_ON(omap_obj->block); |
812 | 814 | ||
813 | ret = get_pages(obj, &pages); | 815 | ret = __omap_gem_get_pages(obj, &pages); |
814 | if (ret) | 816 | if (ret) |
815 | goto fail; | 817 | goto fail; |
816 | 818 | ||
@@ -848,7 +850,7 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr) | |||
848 | omap_obj->dma_addr_cnt++; | 850 | omap_obj->dma_addr_cnt++; |
849 | 851 | ||
850 | *dma_addr = omap_obj->dma_addr; | 852 | *dma_addr = omap_obj->dma_addr; |
851 | } else if (is_contiguous(omap_obj)) { | 853 | } else if (omap_gem_is_contiguous(omap_obj)) { |
852 | *dma_addr = omap_obj->dma_addr; | 854 | *dma_addr = omap_obj->dma_addr; |
853 | } else { | 855 | } else { |
854 | ret = -EINVAL; | 856 | ret = -EINVAL; |
@@ -948,7 +950,7 @@ int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages, | |||
948 | return 0; | 950 | return 0; |
949 | } | 951 | } |
950 | mutex_lock(&obj->dev->struct_mutex); | 952 | mutex_lock(&obj->dev->struct_mutex); |
951 | ret = get_pages(obj, pages); | 953 | ret = __omap_gem_get_pages(obj, pages); |
952 | mutex_unlock(&obj->dev->struct_mutex); | 954 | mutex_unlock(&obj->dev->struct_mutex); |
953 | return ret; | 955 | return ret; |
954 | } | 956 | } |
@@ -974,7 +976,9 @@ void *omap_gem_vaddr(struct drm_gem_object *obj) | |||
974 | WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); | 976 | WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); |
975 | if (!omap_obj->vaddr) { | 977 | if (!omap_obj->vaddr) { |
976 | struct page **pages; | 978 | struct page **pages; |
977 | int ret = get_pages(obj, &pages); | 979 | int ret; |
980 | |||
981 | ret = __omap_gem_get_pages(obj, &pages); | ||
978 | if (ret) | 982 | if (ret) |
979 | return ERR_PTR(ret); | 983 | return ERR_PTR(ret); |
980 | omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, | 984 | omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, |
@@ -1076,7 +1080,7 @@ void omap_gem_free_object(struct drm_gem_object *obj) | |||
1076 | struct omap_drm_private *priv = dev->dev_private; | 1080 | struct omap_drm_private *priv = dev->dev_private; |
1077 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | 1081 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
1078 | 1082 | ||
1079 | evict(obj); | 1083 | omap_gem_evict(obj); |
1080 | 1084 | ||
1081 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | 1085 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
1082 | 1086 | ||