aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu.h
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2016-10-25 08:00:45 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2016-10-25 08:40:39 -0400
commitf54d1867005c3323f5d8ad83eed823e84226c429 (patch)
tree026c3f57bc546d3a0205389d0f8e0d02ce8a76ac /drivers/gpu/drm/amd/amdgpu/amdgpu.h
parent0fc4f78f44e6c6148cee32456f0d0023ec1c1fd8 (diff)
dma-buf: Rename struct fence to dma_fence
I plan to usurp the short name of struct fence for a core kernel struct, and so I need to rename the specialised fence/timeline for DMA operations to make room. A consensus was reached in https://lists.freedesktop.org/archives/dri-devel/2016-July/113083.html that making clear this fence applies to DMA operations was a good thing. Since then the patch has grown a bit as usage increases, so hopefully it remains a good thing! (v2...: rebase, rerun spatch) v3: Compile on msm, spotted a manual fixup that I broke. v4: Try again for msm, sorry Daniel coccinelle script: @@ @@ - struct fence + struct dma_fence @@ @@ - struct fence_ops + struct dma_fence_ops @@ @@ - struct fence_cb + struct dma_fence_cb @@ @@ - struct fence_array + struct dma_fence_array @@ @@ - enum fence_flag_bits + enum dma_fence_flag_bits @@ @@ ( - fence_init + dma_fence_init | - fence_release + dma_fence_release | - fence_free + dma_fence_free | - fence_get + dma_fence_get | - fence_get_rcu + dma_fence_get_rcu | - fence_put + dma_fence_put | - fence_signal + dma_fence_signal | - fence_signal_locked + dma_fence_signal_locked | - fence_default_wait + dma_fence_default_wait | - fence_add_callback + dma_fence_add_callback | - fence_remove_callback + dma_fence_remove_callback | - fence_enable_sw_signaling + dma_fence_enable_sw_signaling | - fence_is_signaled_locked + dma_fence_is_signaled_locked | - fence_is_signaled + dma_fence_is_signaled | - fence_is_later + dma_fence_is_later | - fence_later + dma_fence_later | - fence_wait_timeout + dma_fence_wait_timeout | - fence_wait_any_timeout + dma_fence_wait_any_timeout | - fence_wait + dma_fence_wait | - fence_context_alloc + dma_fence_context_alloc | - fence_array_create + dma_fence_array_create | - to_fence_array + to_dma_fence_array | - fence_is_array + dma_fence_is_array | - trace_fence_emit + trace_dma_fence_emit | - FENCE_TRACE + DMA_FENCE_TRACE | - FENCE_WARN + DMA_FENCE_WARN | - FENCE_ERR + DMA_FENCE_ERR ) ( ... ) Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Gustavo Padovan <gustavo.padovan@collabora.co.uk> Acked-by: Sumit Semwal <sumit.semwal@linaro.org> Acked-by: Christian König <christian.koenig@amd.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: http://patchwork.freedesktop.org/patch/msgid/20161025120045.28839-1-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu.h')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h54
1 files changed, 27 insertions, 27 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 039b57e4644c..283d05927d15 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -34,7 +34,7 @@
34#include <linux/kref.h> 34#include <linux/kref.h>
35#include <linux/interval_tree.h> 35#include <linux/interval_tree.h>
36#include <linux/hashtable.h> 36#include <linux/hashtable.h>
37#include <linux/fence.h> 37#include <linux/dma-fence.h>
38 38
39#include <ttm/ttm_bo_api.h> 39#include <ttm/ttm_bo_api.h>
40#include <ttm/ttm_bo_driver.h> 40#include <ttm/ttm_bo_driver.h>
@@ -378,7 +378,7 @@ struct amdgpu_fence_driver {
378 struct timer_list fallback_timer; 378 struct timer_list fallback_timer;
379 unsigned num_fences_mask; 379 unsigned num_fences_mask;
380 spinlock_t lock; 380 spinlock_t lock;
381 struct fence **fences; 381 struct dma_fence **fences;
382}; 382};
383 383
384/* some special values for the owner field */ 384/* some special values for the owner field */
@@ -399,7 +399,7 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
399 unsigned irq_type); 399 unsigned irq_type);
400void amdgpu_fence_driver_suspend(struct amdgpu_device *adev); 400void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
401void amdgpu_fence_driver_resume(struct amdgpu_device *adev); 401void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
402int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **fence); 402int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence);
403void amdgpu_fence_process(struct amdgpu_ring *ring); 403void amdgpu_fence_process(struct amdgpu_ring *ring);
404int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); 404int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
405unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); 405unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
@@ -427,7 +427,7 @@ struct amdgpu_bo_va_mapping {
427struct amdgpu_bo_va { 427struct amdgpu_bo_va {
428 /* protected by bo being reserved */ 428 /* protected by bo being reserved */
429 struct list_head bo_list; 429 struct list_head bo_list;
430 struct fence *last_pt_update; 430 struct dma_fence *last_pt_update;
431 unsigned ref_count; 431 unsigned ref_count;
432 432
433 /* protected by vm mutex and spinlock */ 433 /* protected by vm mutex and spinlock */
@@ -543,7 +543,7 @@ struct amdgpu_sa_bo {
543 struct amdgpu_sa_manager *manager; 543 struct amdgpu_sa_manager *manager;
544 unsigned soffset; 544 unsigned soffset;
545 unsigned eoffset; 545 unsigned eoffset;
546 struct fence *fence; 546 struct dma_fence *fence;
547}; 547};
548 548
549/* 549/*
@@ -566,19 +566,19 @@ int amdgpu_mode_dumb_mmap(struct drm_file *filp,
566 */ 566 */
567struct amdgpu_sync { 567struct amdgpu_sync {
568 DECLARE_HASHTABLE(fences, 4); 568 DECLARE_HASHTABLE(fences, 4);
569 struct fence *last_vm_update; 569 struct dma_fence *last_vm_update;
570}; 570};
571 571
572void amdgpu_sync_create(struct amdgpu_sync *sync); 572void amdgpu_sync_create(struct amdgpu_sync *sync);
573int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, 573int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
574 struct fence *f); 574 struct dma_fence *f);
575int amdgpu_sync_resv(struct amdgpu_device *adev, 575int amdgpu_sync_resv(struct amdgpu_device *adev,
576 struct amdgpu_sync *sync, 576 struct amdgpu_sync *sync,
577 struct reservation_object *resv, 577 struct reservation_object *resv,
578 void *owner); 578 void *owner);
579struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, 579struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
580 struct amdgpu_ring *ring); 580 struct amdgpu_ring *ring);
581struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync); 581struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
582void amdgpu_sync_free(struct amdgpu_sync *sync); 582void amdgpu_sync_free(struct amdgpu_sync *sync);
583int amdgpu_sync_init(void); 583int amdgpu_sync_init(void);
584void amdgpu_sync_fini(void); 584void amdgpu_sync_fini(void);
@@ -703,10 +703,10 @@ struct amdgpu_flip_work {
703 uint64_t base; 703 uint64_t base;
704 struct drm_pending_vblank_event *event; 704 struct drm_pending_vblank_event *event;
705 struct amdgpu_bo *old_abo; 705 struct amdgpu_bo *old_abo;
706 struct fence *excl; 706 struct dma_fence *excl;
707 unsigned shared_count; 707 unsigned shared_count;
708 struct fence **shared; 708 struct dma_fence **shared;
709 struct fence_cb cb; 709 struct dma_fence_cb cb;
710 bool async; 710 bool async;
711}; 711};
712 712
@@ -742,7 +742,7 @@ void amdgpu_job_free_resources(struct amdgpu_job *job);
742void amdgpu_job_free(struct amdgpu_job *job); 742void amdgpu_job_free(struct amdgpu_job *job);
743int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, 743int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
744 struct amd_sched_entity *entity, void *owner, 744 struct amd_sched_entity *entity, void *owner,
745 struct fence **f); 745 struct dma_fence **f);
746 746
747struct amdgpu_ring { 747struct amdgpu_ring {
748 struct amdgpu_device *adev; 748 struct amdgpu_device *adev;
@@ -844,7 +844,7 @@ struct amdgpu_vm {
844 /* contains the page directory */ 844 /* contains the page directory */
845 struct amdgpu_bo *page_directory; 845 struct amdgpu_bo *page_directory;
846 unsigned max_pde_used; 846 unsigned max_pde_used;
847 struct fence *page_directory_fence; 847 struct dma_fence *page_directory_fence;
848 uint64_t last_eviction_counter; 848 uint64_t last_eviction_counter;
849 849
850 /* array of page tables, one for each page directory entry */ 850 /* array of page tables, one for each page directory entry */
@@ -865,14 +865,14 @@ struct amdgpu_vm {
865 865
866struct amdgpu_vm_id { 866struct amdgpu_vm_id {
867 struct list_head list; 867 struct list_head list;
868 struct fence *first; 868 struct dma_fence *first;
869 struct amdgpu_sync active; 869 struct amdgpu_sync active;
870 struct fence *last_flush; 870 struct dma_fence *last_flush;
871 atomic64_t owner; 871 atomic64_t owner;
872 872
873 uint64_t pd_gpu_addr; 873 uint64_t pd_gpu_addr;
874 /* last flushed PD/PT update */ 874 /* last flushed PD/PT update */
875 struct fence *flushed_updates; 875 struct dma_fence *flushed_updates;
876 876
877 uint32_t current_gpu_reset_count; 877 uint32_t current_gpu_reset_count;
878 878
@@ -921,7 +921,7 @@ void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
921void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, 921void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
922 struct amdgpu_vm *vm); 922 struct amdgpu_vm *vm);
923int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, 923int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
924 struct amdgpu_sync *sync, struct fence *fence, 924 struct amdgpu_sync *sync, struct dma_fence *fence,
925 struct amdgpu_job *job); 925 struct amdgpu_job *job);
926int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job); 926int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
927void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id); 927void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
@@ -957,7 +957,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
957 957
958struct amdgpu_ctx_ring { 958struct amdgpu_ctx_ring {
959 uint64_t sequence; 959 uint64_t sequence;
960 struct fence **fences; 960 struct dma_fence **fences;
961 struct amd_sched_entity entity; 961 struct amd_sched_entity entity;
962}; 962};
963 963
@@ -966,7 +966,7 @@ struct amdgpu_ctx {
966 struct amdgpu_device *adev; 966 struct amdgpu_device *adev;
967 unsigned reset_counter; 967 unsigned reset_counter;
968 spinlock_t ring_lock; 968 spinlock_t ring_lock;
969 struct fence **fences; 969 struct dma_fence **fences;
970 struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS]; 970 struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS];
971 bool preamble_presented; 971 bool preamble_presented;
972}; 972};
@@ -982,8 +982,8 @@ struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
982int amdgpu_ctx_put(struct amdgpu_ctx *ctx); 982int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
983 983
984uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, 984uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
985 struct fence *fence); 985 struct dma_fence *fence);
986struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, 986struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
987 struct amdgpu_ring *ring, uint64_t seq); 987 struct amdgpu_ring *ring, uint64_t seq);
988 988
989int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, 989int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
@@ -1181,10 +1181,10 @@ struct amdgpu_gfx {
1181int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, 1181int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1182 unsigned size, struct amdgpu_ib *ib); 1182 unsigned size, struct amdgpu_ib *ib);
1183void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, 1183void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
1184 struct fence *f); 1184 struct dma_fence *f);
1185int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, 1185int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
1186 struct amdgpu_ib *ib, struct fence *last_vm_update, 1186 struct amdgpu_ib *ib, struct dma_fence *last_vm_update,
1187 struct amdgpu_job *job, struct fence **f); 1187 struct amdgpu_job *job, struct dma_fence **f);
1188int amdgpu_ib_pool_init(struct amdgpu_device *adev); 1188int amdgpu_ib_pool_init(struct amdgpu_device *adev);
1189void amdgpu_ib_pool_fini(struct amdgpu_device *adev); 1189void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
1190int amdgpu_ib_ring_tests(struct amdgpu_device *adev); 1190int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
@@ -1225,7 +1225,7 @@ struct amdgpu_cs_parser {
1225 struct amdgpu_bo_list *bo_list; 1225 struct amdgpu_bo_list *bo_list;
1226 struct amdgpu_bo_list_entry vm_pd; 1226 struct amdgpu_bo_list_entry vm_pd;
1227 struct list_head validated; 1227 struct list_head validated;
1228 struct fence *fence; 1228 struct dma_fence *fence;
1229 uint64_t bytes_moved_threshold; 1229 uint64_t bytes_moved_threshold;
1230 uint64_t bytes_moved; 1230 uint64_t bytes_moved;
1231 struct amdgpu_bo_list_entry *evictable; 1231 struct amdgpu_bo_list_entry *evictable;
@@ -1245,7 +1245,7 @@ struct amdgpu_job {
1245 struct amdgpu_ring *ring; 1245 struct amdgpu_ring *ring;
1246 struct amdgpu_sync sync; 1246 struct amdgpu_sync sync;
1247 struct amdgpu_ib *ibs; 1247 struct amdgpu_ib *ibs;
1248 struct fence *fence; /* the hw fence */ 1248 struct dma_fence *fence; /* the hw fence */
1249 uint32_t preamble_status; 1249 uint32_t preamble_status;
1250 uint32_t num_ibs; 1250 uint32_t num_ibs;
1251 void *owner; 1251 void *owner;