aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2016-10-25 08:00:45 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2016-10-25 08:40:39 -0400
commitf54d1867005c3323f5d8ad83eed823e84226c429 (patch)
tree026c3f57bc546d3a0205389d0f8e0d02ce8a76ac /drivers/gpu
parent0fc4f78f44e6c6148cee32456f0d0023ec1c1fd8 (diff)
dma-buf: Rename struct fence to dma_fence
I plan to usurp the short name of struct fence for a core kernel struct, and so I need to rename the specialised fence/timeline for DMA operations to make room. A consensus was reached in https://lists.freedesktop.org/archives/dri-devel/2016-July/113083.html that making clear this fence applies to DMA operations was a good thing. Since then the patch has grown a bit as usage increases, so hopefully it remains a good thing! (v2...: rebase, rerun spatch) v3: Compile on msm, spotted a manual fixup that I broke. v4: Try again for msm, sorry Daniel coccinelle script: @@ @@ - struct fence + struct dma_fence @@ @@ - struct fence_ops + struct dma_fence_ops @@ @@ - struct fence_cb + struct dma_fence_cb @@ @@ - struct fence_array + struct dma_fence_array @@ @@ - enum fence_flag_bits + enum dma_fence_flag_bits @@ @@ ( - fence_init + dma_fence_init | - fence_release + dma_fence_release | - fence_free + dma_fence_free | - fence_get + dma_fence_get | - fence_get_rcu + dma_fence_get_rcu | - fence_put + dma_fence_put | - fence_signal + dma_fence_signal | - fence_signal_locked + dma_fence_signal_locked | - fence_default_wait + dma_fence_default_wait | - fence_add_callback + dma_fence_add_callback | - fence_remove_callback + dma_fence_remove_callback | - fence_enable_sw_signaling + dma_fence_enable_sw_signaling | - fence_is_signaled_locked + dma_fence_is_signaled_locked | - fence_is_signaled + dma_fence_is_signaled | - fence_is_later + dma_fence_is_later | - fence_later + dma_fence_later | - fence_wait_timeout + dma_fence_wait_timeout | - fence_wait_any_timeout + dma_fence_wait_any_timeout | - fence_wait + dma_fence_wait | - fence_context_alloc + dma_fence_context_alloc | - fence_array_create + dma_fence_array_create | - to_fence_array + to_dma_fence_array | - fence_is_array + dma_fence_is_array | - trace_fence_emit + trace_dma_fence_emit | - FENCE_TRACE + DMA_FENCE_TRACE | - FENCE_WARN + DMA_FENCE_WARN | - FENCE_ERR + DMA_FENCE_ERR ) ( ... ) Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Gustavo Padovan <gustavo.padovan@collabora.co.uk> Acked-by: Sumit Semwal <sumit.semwal@linaro.org> Acked-by: Christian König <christian.koenig@amd.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: http://patchwork.freedesktop.org/patch/msgid/20161025120045.28839-1-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_test.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c79
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c6
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h4
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c67
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h26
-rw-r--r--drivers/gpu/drm/amd/scheduler/sched_fence.c48
-rw-r--r--drivers/gpu/drm/drm_atomic.c2
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c8
-rw-r--r--drivers/gpu/drm/drm_fops.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c46
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c32
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.h18
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c41
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.h8
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h2
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c4
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h2
-rw-r--r--drivers/gpu/drm/msm/msm_fence.c28
-rw-r--r--drivers/gpu/drm/msm/msm_fence.h2
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c14
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c8
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c80
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h4
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c35
-rw-r--r--drivers/gpu/drm/radeon/radeon.h10
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c56
-rw-r--r--drivers/gpu/drm/radeon/radeon_sync.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c24
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c22
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c3
-rw-r--r--drivers/gpu/drm/vgem/vgem_fence.c53
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fence.c26
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c12
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c44
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.h8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c2
83 files changed, 679 insertions, 665 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 039b57e4644c..283d05927d15 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -34,7 +34,7 @@
34#include <linux/kref.h> 34#include <linux/kref.h>
35#include <linux/interval_tree.h> 35#include <linux/interval_tree.h>
36#include <linux/hashtable.h> 36#include <linux/hashtable.h>
37#include <linux/fence.h> 37#include <linux/dma-fence.h>
38 38
39#include <ttm/ttm_bo_api.h> 39#include <ttm/ttm_bo_api.h>
40#include <ttm/ttm_bo_driver.h> 40#include <ttm/ttm_bo_driver.h>
@@ -378,7 +378,7 @@ struct amdgpu_fence_driver {
378 struct timer_list fallback_timer; 378 struct timer_list fallback_timer;
379 unsigned num_fences_mask; 379 unsigned num_fences_mask;
380 spinlock_t lock; 380 spinlock_t lock;
381 struct fence **fences; 381 struct dma_fence **fences;
382}; 382};
383 383
384/* some special values for the owner field */ 384/* some special values for the owner field */
@@ -399,7 +399,7 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
399 unsigned irq_type); 399 unsigned irq_type);
400void amdgpu_fence_driver_suspend(struct amdgpu_device *adev); 400void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
401void amdgpu_fence_driver_resume(struct amdgpu_device *adev); 401void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
402int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **fence); 402int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence);
403void amdgpu_fence_process(struct amdgpu_ring *ring); 403void amdgpu_fence_process(struct amdgpu_ring *ring);
404int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); 404int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
405unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); 405unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
@@ -427,7 +427,7 @@ struct amdgpu_bo_va_mapping {
427struct amdgpu_bo_va { 427struct amdgpu_bo_va {
428 /* protected by bo being reserved */ 428 /* protected by bo being reserved */
429 struct list_head bo_list; 429 struct list_head bo_list;
430 struct fence *last_pt_update; 430 struct dma_fence *last_pt_update;
431 unsigned ref_count; 431 unsigned ref_count;
432 432
433 /* protected by vm mutex and spinlock */ 433 /* protected by vm mutex and spinlock */
@@ -543,7 +543,7 @@ struct amdgpu_sa_bo {
543 struct amdgpu_sa_manager *manager; 543 struct amdgpu_sa_manager *manager;
544 unsigned soffset; 544 unsigned soffset;
545 unsigned eoffset; 545 unsigned eoffset;
546 struct fence *fence; 546 struct dma_fence *fence;
547}; 547};
548 548
549/* 549/*
@@ -566,19 +566,19 @@ int amdgpu_mode_dumb_mmap(struct drm_file *filp,
566 */ 566 */
567struct amdgpu_sync { 567struct amdgpu_sync {
568 DECLARE_HASHTABLE(fences, 4); 568 DECLARE_HASHTABLE(fences, 4);
569 struct fence *last_vm_update; 569 struct dma_fence *last_vm_update;
570}; 570};
571 571
572void amdgpu_sync_create(struct amdgpu_sync *sync); 572void amdgpu_sync_create(struct amdgpu_sync *sync);
573int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, 573int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
574 struct fence *f); 574 struct dma_fence *f);
575int amdgpu_sync_resv(struct amdgpu_device *adev, 575int amdgpu_sync_resv(struct amdgpu_device *adev,
576 struct amdgpu_sync *sync, 576 struct amdgpu_sync *sync,
577 struct reservation_object *resv, 577 struct reservation_object *resv,
578 void *owner); 578 void *owner);
579struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, 579struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
580 struct amdgpu_ring *ring); 580 struct amdgpu_ring *ring);
581struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync); 581struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
582void amdgpu_sync_free(struct amdgpu_sync *sync); 582void amdgpu_sync_free(struct amdgpu_sync *sync);
583int amdgpu_sync_init(void); 583int amdgpu_sync_init(void);
584void amdgpu_sync_fini(void); 584void amdgpu_sync_fini(void);
@@ -703,10 +703,10 @@ struct amdgpu_flip_work {
703 uint64_t base; 703 uint64_t base;
704 struct drm_pending_vblank_event *event; 704 struct drm_pending_vblank_event *event;
705 struct amdgpu_bo *old_abo; 705 struct amdgpu_bo *old_abo;
706 struct fence *excl; 706 struct dma_fence *excl;
707 unsigned shared_count; 707 unsigned shared_count;
708 struct fence **shared; 708 struct dma_fence **shared;
709 struct fence_cb cb; 709 struct dma_fence_cb cb;
710 bool async; 710 bool async;
711}; 711};
712 712
@@ -742,7 +742,7 @@ void amdgpu_job_free_resources(struct amdgpu_job *job);
742void amdgpu_job_free(struct amdgpu_job *job); 742void amdgpu_job_free(struct amdgpu_job *job);
743int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, 743int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
744 struct amd_sched_entity *entity, void *owner, 744 struct amd_sched_entity *entity, void *owner,
745 struct fence **f); 745 struct dma_fence **f);
746 746
747struct amdgpu_ring { 747struct amdgpu_ring {
748 struct amdgpu_device *adev; 748 struct amdgpu_device *adev;
@@ -844,7 +844,7 @@ struct amdgpu_vm {
844 /* contains the page directory */ 844 /* contains the page directory */
845 struct amdgpu_bo *page_directory; 845 struct amdgpu_bo *page_directory;
846 unsigned max_pde_used; 846 unsigned max_pde_used;
847 struct fence *page_directory_fence; 847 struct dma_fence *page_directory_fence;
848 uint64_t last_eviction_counter; 848 uint64_t last_eviction_counter;
849 849
850 /* array of page tables, one for each page directory entry */ 850 /* array of page tables, one for each page directory entry */
@@ -865,14 +865,14 @@ struct amdgpu_vm {
865 865
866struct amdgpu_vm_id { 866struct amdgpu_vm_id {
867 struct list_head list; 867 struct list_head list;
868 struct fence *first; 868 struct dma_fence *first;
869 struct amdgpu_sync active; 869 struct amdgpu_sync active;
870 struct fence *last_flush; 870 struct dma_fence *last_flush;
871 atomic64_t owner; 871 atomic64_t owner;
872 872
873 uint64_t pd_gpu_addr; 873 uint64_t pd_gpu_addr;
874 /* last flushed PD/PT update */ 874 /* last flushed PD/PT update */
875 struct fence *flushed_updates; 875 struct dma_fence *flushed_updates;
876 876
877 uint32_t current_gpu_reset_count; 877 uint32_t current_gpu_reset_count;
878 878
@@ -921,7 +921,7 @@ void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
921void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, 921void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
922 struct amdgpu_vm *vm); 922 struct amdgpu_vm *vm);
923int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, 923int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
924 struct amdgpu_sync *sync, struct fence *fence, 924 struct amdgpu_sync *sync, struct dma_fence *fence,
925 struct amdgpu_job *job); 925 struct amdgpu_job *job);
926int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job); 926int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
927void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id); 927void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
@@ -957,7 +957,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
957 957
958struct amdgpu_ctx_ring { 958struct amdgpu_ctx_ring {
959 uint64_t sequence; 959 uint64_t sequence;
960 struct fence **fences; 960 struct dma_fence **fences;
961 struct amd_sched_entity entity; 961 struct amd_sched_entity entity;
962}; 962};
963 963
@@ -966,7 +966,7 @@ struct amdgpu_ctx {
966 struct amdgpu_device *adev; 966 struct amdgpu_device *adev;
967 unsigned reset_counter; 967 unsigned reset_counter;
968 spinlock_t ring_lock; 968 spinlock_t ring_lock;
969 struct fence **fences; 969 struct dma_fence **fences;
970 struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS]; 970 struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS];
971 bool preamble_presented; 971 bool preamble_presented;
972}; 972};
@@ -982,8 +982,8 @@ struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
982int amdgpu_ctx_put(struct amdgpu_ctx *ctx); 982int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
983 983
984uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, 984uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
985 struct fence *fence); 985 struct dma_fence *fence);
986struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, 986struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
987 struct amdgpu_ring *ring, uint64_t seq); 987 struct amdgpu_ring *ring, uint64_t seq);
988 988
989int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, 989int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
@@ -1181,10 +1181,10 @@ struct amdgpu_gfx {
1181int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, 1181int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1182 unsigned size, struct amdgpu_ib *ib); 1182 unsigned size, struct amdgpu_ib *ib);
1183void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, 1183void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
1184 struct fence *f); 1184 struct dma_fence *f);
1185int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, 1185int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
1186 struct amdgpu_ib *ib, struct fence *last_vm_update, 1186 struct amdgpu_ib *ib, struct dma_fence *last_vm_update,
1187 struct amdgpu_job *job, struct fence **f); 1187 struct amdgpu_job *job, struct dma_fence **f);
1188int amdgpu_ib_pool_init(struct amdgpu_device *adev); 1188int amdgpu_ib_pool_init(struct amdgpu_device *adev);
1189void amdgpu_ib_pool_fini(struct amdgpu_device *adev); 1189void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
1190int amdgpu_ib_ring_tests(struct amdgpu_device *adev); 1190int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
@@ -1225,7 +1225,7 @@ struct amdgpu_cs_parser {
1225 struct amdgpu_bo_list *bo_list; 1225 struct amdgpu_bo_list *bo_list;
1226 struct amdgpu_bo_list_entry vm_pd; 1226 struct amdgpu_bo_list_entry vm_pd;
1227 struct list_head validated; 1227 struct list_head validated;
1228 struct fence *fence; 1228 struct dma_fence *fence;
1229 uint64_t bytes_moved_threshold; 1229 uint64_t bytes_moved_threshold;
1230 uint64_t bytes_moved; 1230 uint64_t bytes_moved;
1231 struct amdgpu_bo_list_entry *evictable; 1231 struct amdgpu_bo_list_entry *evictable;
@@ -1245,7 +1245,7 @@ struct amdgpu_job {
1245 struct amdgpu_ring *ring; 1245 struct amdgpu_ring *ring;
1246 struct amdgpu_sync sync; 1246 struct amdgpu_sync sync;
1247 struct amdgpu_ib *ibs; 1247 struct amdgpu_ib *ibs;
1248 struct fence *fence; /* the hw fence */ 1248 struct dma_fence *fence; /* the hw fence */
1249 uint32_t preamble_status; 1249 uint32_t preamble_status;
1250 uint32_t num_ibs; 1250 uint32_t num_ibs;
1251 void *owner; 1251 void *owner;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index 345305235349..cc97eee93226 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -33,7 +33,7 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
33{ 33{
34 unsigned long start_jiffies; 34 unsigned long start_jiffies;
35 unsigned long end_jiffies; 35 unsigned long end_jiffies;
36 struct fence *fence = NULL; 36 struct dma_fence *fence = NULL;
37 int i, r; 37 int i, r;
38 38
39 start_jiffies = jiffies; 39 start_jiffies = jiffies;
@@ -43,17 +43,17 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
43 false); 43 false);
44 if (r) 44 if (r)
45 goto exit_do_move; 45 goto exit_do_move;
46 r = fence_wait(fence, false); 46 r = dma_fence_wait(fence, false);
47 if (r) 47 if (r)
48 goto exit_do_move; 48 goto exit_do_move;
49 fence_put(fence); 49 dma_fence_put(fence);
50 } 50 }
51 end_jiffies = jiffies; 51 end_jiffies = jiffies;
52 r = jiffies_to_msecs(end_jiffies - start_jiffies); 52 r = jiffies_to_msecs(end_jiffies - start_jiffies);
53 53
54exit_do_move: 54exit_do_move:
55 if (fence) 55 if (fence)
56 fence_put(fence); 56 dma_fence_put(fence);
57 return r; 57 return r;
58} 58}
59 59
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index b0f6e6957536..5d582265e929 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -719,7 +719,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
719 ttm_eu_backoff_reservation(&parser->ticket, 719 ttm_eu_backoff_reservation(&parser->ticket,
720 &parser->validated); 720 &parser->validated);
721 } 721 }
722 fence_put(parser->fence); 722 dma_fence_put(parser->fence);
723 723
724 if (parser->ctx) 724 if (parser->ctx)
725 amdgpu_ctx_put(parser->ctx); 725 amdgpu_ctx_put(parser->ctx);
@@ -756,7 +756,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
756 756
757 if (p->bo_list) { 757 if (p->bo_list) {
758 for (i = 0; i < p->bo_list->num_entries; i++) { 758 for (i = 0; i < p->bo_list->num_entries; i++) {
759 struct fence *f; 759 struct dma_fence *f;
760 760
761 /* ignore duplicates */ 761 /* ignore duplicates */
762 bo = p->bo_list->array[i].robj; 762 bo = p->bo_list->array[i].robj;
@@ -956,7 +956,7 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
956 for (j = 0; j < num_deps; ++j) { 956 for (j = 0; j < num_deps; ++j) {
957 struct amdgpu_ring *ring; 957 struct amdgpu_ring *ring;
958 struct amdgpu_ctx *ctx; 958 struct amdgpu_ctx *ctx;
959 struct fence *fence; 959 struct dma_fence *fence;
960 960
961 r = amdgpu_cs_get_ring(adev, deps[j].ip_type, 961 r = amdgpu_cs_get_ring(adev, deps[j].ip_type,
962 deps[j].ip_instance, 962 deps[j].ip_instance,
@@ -978,7 +978,7 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
978 } else if (fence) { 978 } else if (fence) {
979 r = amdgpu_sync_fence(adev, &p->job->sync, 979 r = amdgpu_sync_fence(adev, &p->job->sync,
980 fence); 980 fence);
981 fence_put(fence); 981 dma_fence_put(fence);
982 amdgpu_ctx_put(ctx); 982 amdgpu_ctx_put(ctx);
983 if (r) 983 if (r)
984 return r; 984 return r;
@@ -1008,7 +1008,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1008 1008
1009 job->owner = p->filp; 1009 job->owner = p->filp;
1010 job->fence_ctx = entity->fence_context; 1010 job->fence_ctx = entity->fence_context;
1011 p->fence = fence_get(&job->base.s_fence->finished); 1011 p->fence = dma_fence_get(&job->base.s_fence->finished);
1012 cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence); 1012 cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence);
1013 job->uf_sequence = cs->out.handle; 1013 job->uf_sequence = cs->out.handle;
1014 amdgpu_job_free_resources(job); 1014 amdgpu_job_free_resources(job);
@@ -1091,7 +1091,7 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
1091 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout); 1091 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
1092 struct amdgpu_ring *ring = NULL; 1092 struct amdgpu_ring *ring = NULL;
1093 struct amdgpu_ctx *ctx; 1093 struct amdgpu_ctx *ctx;
1094 struct fence *fence; 1094 struct dma_fence *fence;
1095 long r; 1095 long r;
1096 1096
1097 r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance, 1097 r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance,
@@ -1107,8 +1107,8 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
1107 if (IS_ERR(fence)) 1107 if (IS_ERR(fence))
1108 r = PTR_ERR(fence); 1108 r = PTR_ERR(fence);
1109 else if (fence) { 1109 else if (fence) {
1110 r = fence_wait_timeout(fence, true, timeout); 1110 r = dma_fence_wait_timeout(fence, true, timeout);
1111 fence_put(fence); 1111 dma_fence_put(fence);
1112 } else 1112 } else
1113 r = 1; 1113 r = 1;
1114 1114
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index a5e2fcbef0f0..99bbc860322f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -35,7 +35,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
35 kref_init(&ctx->refcount); 35 kref_init(&ctx->refcount);
36 spin_lock_init(&ctx->ring_lock); 36 spin_lock_init(&ctx->ring_lock);
37 ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS, 37 ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS,
38 sizeof(struct fence*), GFP_KERNEL); 38 sizeof(struct dma_fence*), GFP_KERNEL);
39 if (!ctx->fences) 39 if (!ctx->fences)
40 return -ENOMEM; 40 return -ENOMEM;
41 41
@@ -79,7 +79,7 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
79 79
80 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) 80 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
81 for (j = 0; j < amdgpu_sched_jobs; ++j) 81 for (j = 0; j < amdgpu_sched_jobs; ++j)
82 fence_put(ctx->rings[i].fences[j]); 82 dma_fence_put(ctx->rings[i].fences[j]);
83 kfree(ctx->fences); 83 kfree(ctx->fences);
84 ctx->fences = NULL; 84 ctx->fences = NULL;
85 85
@@ -241,39 +241,39 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
241} 241}
242 242
243uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, 243uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
244 struct fence *fence) 244 struct dma_fence *fence)
245{ 245{
246 struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; 246 struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
247 uint64_t seq = cring->sequence; 247 uint64_t seq = cring->sequence;
248 unsigned idx = 0; 248 unsigned idx = 0;
249 struct fence *other = NULL; 249 struct dma_fence *other = NULL;
250 250
251 idx = seq & (amdgpu_sched_jobs - 1); 251 idx = seq & (amdgpu_sched_jobs - 1);
252 other = cring->fences[idx]; 252 other = cring->fences[idx];
253 if (other) { 253 if (other) {
254 signed long r; 254 signed long r;
255 r = fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT); 255 r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
256 if (r < 0) 256 if (r < 0)
257 DRM_ERROR("Error (%ld) waiting for fence!\n", r); 257 DRM_ERROR("Error (%ld) waiting for fence!\n", r);
258 } 258 }
259 259
260 fence_get(fence); 260 dma_fence_get(fence);
261 261
262 spin_lock(&ctx->ring_lock); 262 spin_lock(&ctx->ring_lock);
263 cring->fences[idx] = fence; 263 cring->fences[idx] = fence;
264 cring->sequence++; 264 cring->sequence++;
265 spin_unlock(&ctx->ring_lock); 265 spin_unlock(&ctx->ring_lock);
266 266
267 fence_put(other); 267 dma_fence_put(other);
268 268
269 return seq; 269 return seq;
270} 270}
271 271
272struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, 272struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
273 struct amdgpu_ring *ring, uint64_t seq) 273 struct amdgpu_ring *ring, uint64_t seq)
274{ 274{
275 struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; 275 struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
276 struct fence *fence; 276 struct dma_fence *fence;
277 277
278 spin_lock(&ctx->ring_lock); 278 spin_lock(&ctx->ring_lock);
279 279
@@ -288,7 +288,7 @@ struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
288 return NULL; 288 return NULL;
289 } 289 }
290 290
291 fence = fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]); 291 fence = dma_fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]);
292 spin_unlock(&ctx->ring_lock); 292 spin_unlock(&ctx->ring_lock);
293 293
294 return fence; 294 return fence;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index b4f4a9239069..0262b43c8f0e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1599,7 +1599,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
1599 adev->vm_manager.vm_pte_funcs = NULL; 1599 adev->vm_manager.vm_pte_funcs = NULL;
1600 adev->vm_manager.vm_pte_num_rings = 0; 1600 adev->vm_manager.vm_pte_num_rings = 0;
1601 adev->gart.gart_funcs = NULL; 1601 adev->gart.gart_funcs = NULL;
1602 adev->fence_context = fence_context_alloc(AMDGPU_MAX_RINGS); 1602 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
1603 1603
1604 adev->smc_rreg = &amdgpu_invalid_rreg; 1604 adev->smc_rreg = &amdgpu_invalid_rreg;
1605 adev->smc_wreg = &amdgpu_invalid_wreg; 1605 adev->smc_wreg = &amdgpu_invalid_wreg;
@@ -2193,7 +2193,7 @@ bool amdgpu_need_backup(struct amdgpu_device *adev)
2193static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev, 2193static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
2194 struct amdgpu_ring *ring, 2194 struct amdgpu_ring *ring,
2195 struct amdgpu_bo *bo, 2195 struct amdgpu_bo *bo,
2196 struct fence **fence) 2196 struct dma_fence **fence)
2197{ 2197{
2198 uint32_t domain; 2198 uint32_t domain;
2199 int r; 2199 int r;
@@ -2312,30 +2312,30 @@ retry:
2312 if (need_full_reset && amdgpu_need_backup(adev)) { 2312 if (need_full_reset && amdgpu_need_backup(adev)) {
2313 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; 2313 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2314 struct amdgpu_bo *bo, *tmp; 2314 struct amdgpu_bo *bo, *tmp;
2315 struct fence *fence = NULL, *next = NULL; 2315 struct dma_fence *fence = NULL, *next = NULL;
2316 2316
2317 DRM_INFO("recover vram bo from shadow\n"); 2317 DRM_INFO("recover vram bo from shadow\n");
2318 mutex_lock(&adev->shadow_list_lock); 2318 mutex_lock(&adev->shadow_list_lock);
2319 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) { 2319 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
2320 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next); 2320 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2321 if (fence) { 2321 if (fence) {
2322 r = fence_wait(fence, false); 2322 r = dma_fence_wait(fence, false);
2323 if (r) { 2323 if (r) {
2324 WARN(r, "recovery from shadow isn't comleted\n"); 2324 WARN(r, "recovery from shadow isn't comleted\n");
2325 break; 2325 break;
2326 } 2326 }
2327 } 2327 }
2328 2328
2329 fence_put(fence); 2329 dma_fence_put(fence);
2330 fence = next; 2330 fence = next;
2331 } 2331 }
2332 mutex_unlock(&adev->shadow_list_lock); 2332 mutex_unlock(&adev->shadow_list_lock);
2333 if (fence) { 2333 if (fence) {
2334 r = fence_wait(fence, false); 2334 r = dma_fence_wait(fence, false);
2335 if (r) 2335 if (r)
2336 WARN(r, "recovery from shadow isn't comleted\n"); 2336 WARN(r, "recovery from shadow isn't comleted\n");
2337 } 2337 }
2338 fence_put(fence); 2338 dma_fence_put(fence);
2339 } 2339 }
2340 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 2340 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2341 struct amdgpu_ring *ring = adev->rings[i]; 2341 struct amdgpu_ring *ring = adev->rings[i];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 083e2b429872..075c0d7db205 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -35,29 +35,29 @@
35#include <drm/drm_crtc_helper.h> 35#include <drm/drm_crtc_helper.h>
36#include <drm/drm_edid.h> 36#include <drm/drm_edid.h>
37 37
38static void amdgpu_flip_callback(struct fence *f, struct fence_cb *cb) 38static void amdgpu_flip_callback(struct dma_fence *f, struct dma_fence_cb *cb)
39{ 39{
40 struct amdgpu_flip_work *work = 40 struct amdgpu_flip_work *work =
41 container_of(cb, struct amdgpu_flip_work, cb); 41 container_of(cb, struct amdgpu_flip_work, cb);
42 42
43 fence_put(f); 43 dma_fence_put(f);
44 schedule_work(&work->flip_work.work); 44 schedule_work(&work->flip_work.work);
45} 45}
46 46
47static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work, 47static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work,
48 struct fence **f) 48 struct dma_fence **f)
49{ 49{
50 struct fence *fence= *f; 50 struct dma_fence *fence= *f;
51 51
52 if (fence == NULL) 52 if (fence == NULL)
53 return false; 53 return false;
54 54
55 *f = NULL; 55 *f = NULL;
56 56
57 if (!fence_add_callback(fence, &work->cb, amdgpu_flip_callback)) 57 if (!dma_fence_add_callback(fence, &work->cb, amdgpu_flip_callback))
58 return true; 58 return true;
59 59
60 fence_put(fence); 60 dma_fence_put(fence);
61 return false; 61 return false;
62} 62}
63 63
@@ -244,9 +244,9 @@ unreserve:
244 244
245cleanup: 245cleanup:
246 amdgpu_bo_unref(&work->old_abo); 246 amdgpu_bo_unref(&work->old_abo);
247 fence_put(work->excl); 247 dma_fence_put(work->excl);
248 for (i = 0; i < work->shared_count; ++i) 248 for (i = 0; i < work->shared_count; ++i)
249 fence_put(work->shared[i]); 249 dma_fence_put(work->shared[i]);
250 kfree(work->shared); 250 kfree(work->shared);
251 kfree(work); 251 kfree(work);
252 252
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 3a2e42f4b897..57552c79ec58 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -48,7 +48,7 @@
48 */ 48 */
49 49
50struct amdgpu_fence { 50struct amdgpu_fence {
51 struct fence base; 51 struct dma_fence base;
52 52
53 /* RB, DMA, etc. */ 53 /* RB, DMA, etc. */
54 struct amdgpu_ring *ring; 54 struct amdgpu_ring *ring;
@@ -73,8 +73,8 @@ void amdgpu_fence_slab_fini(void)
73/* 73/*
74 * Cast helper 74 * Cast helper
75 */ 75 */
76static const struct fence_ops amdgpu_fence_ops; 76static const struct dma_fence_ops amdgpu_fence_ops;
77static inline struct amdgpu_fence *to_amdgpu_fence(struct fence *f) 77static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
78{ 78{
79 struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base); 79 struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
80 80
@@ -130,11 +130,11 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
130 * Emits a fence command on the requested ring (all asics). 130 * Emits a fence command on the requested ring (all asics).
131 * Returns 0 on success, -ENOMEM on failure. 131 * Returns 0 on success, -ENOMEM on failure.
132 */ 132 */
133int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f) 133int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f)
134{ 134{
135 struct amdgpu_device *adev = ring->adev; 135 struct amdgpu_device *adev = ring->adev;
136 struct amdgpu_fence *fence; 136 struct amdgpu_fence *fence;
137 struct fence *old, **ptr; 137 struct dma_fence *old, **ptr;
138 uint32_t seq; 138 uint32_t seq;
139 139
140 fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL); 140 fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
@@ -143,10 +143,10 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
143 143
144 seq = ++ring->fence_drv.sync_seq; 144 seq = ++ring->fence_drv.sync_seq;
145 fence->ring = ring; 145 fence->ring = ring;
146 fence_init(&fence->base, &amdgpu_fence_ops, 146 dma_fence_init(&fence->base, &amdgpu_fence_ops,
147 &ring->fence_drv.lock, 147 &ring->fence_drv.lock,
148 adev->fence_context + ring->idx, 148 adev->fence_context + ring->idx,
149 seq); 149 seq);
150 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, 150 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
151 seq, AMDGPU_FENCE_FLAG_INT); 151 seq, AMDGPU_FENCE_FLAG_INT);
152 152
@@ -155,12 +155,12 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
155 * emitting the fence would mess up the hardware ring buffer. 155 * emitting the fence would mess up the hardware ring buffer.
156 */ 156 */
157 old = rcu_dereference_protected(*ptr, 1); 157 old = rcu_dereference_protected(*ptr, 1);
158 if (old && !fence_is_signaled(old)) { 158 if (old && !dma_fence_is_signaled(old)) {
159 DRM_INFO("rcu slot is busy\n"); 159 DRM_INFO("rcu slot is busy\n");
160 fence_wait(old, false); 160 dma_fence_wait(old, false);
161 } 161 }
162 162
163 rcu_assign_pointer(*ptr, fence_get(&fence->base)); 163 rcu_assign_pointer(*ptr, dma_fence_get(&fence->base));
164 164
165 *f = &fence->base; 165 *f = &fence->base;
166 166
@@ -211,7 +211,7 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
211 seq &= drv->num_fences_mask; 211 seq &= drv->num_fences_mask;
212 212
213 do { 213 do {
214 struct fence *fence, **ptr; 214 struct dma_fence *fence, **ptr;
215 215
216 ++last_seq; 216 ++last_seq;
217 last_seq &= drv->num_fences_mask; 217 last_seq &= drv->num_fences_mask;
@@ -224,13 +224,13 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
224 if (!fence) 224 if (!fence)
225 continue; 225 continue;
226 226
227 r = fence_signal(fence); 227 r = dma_fence_signal(fence);
228 if (!r) 228 if (!r)
229 FENCE_TRACE(fence, "signaled from irq context\n"); 229 DMA_FENCE_TRACE(fence, "signaled from irq context\n");
230 else 230 else
231 BUG(); 231 BUG();
232 232
233 fence_put(fence); 233 dma_fence_put(fence);
234 } while (last_seq != seq); 234 } while (last_seq != seq);
235} 235}
236 236
@@ -260,7 +260,7 @@ static void amdgpu_fence_fallback(unsigned long arg)
260int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) 260int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
261{ 261{
262 uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq); 262 uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq);
263 struct fence *fence, **ptr; 263 struct dma_fence *fence, **ptr;
264 int r; 264 int r;
265 265
266 if (!seq) 266 if (!seq)
@@ -269,14 +269,14 @@ int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
269 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; 269 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
270 rcu_read_lock(); 270 rcu_read_lock();
271 fence = rcu_dereference(*ptr); 271 fence = rcu_dereference(*ptr);
272 if (!fence || !fence_get_rcu(fence)) { 272 if (!fence || !dma_fence_get_rcu(fence)) {
273 rcu_read_unlock(); 273 rcu_read_unlock();
274 return 0; 274 return 0;
275 } 275 }
276 rcu_read_unlock(); 276 rcu_read_unlock();
277 277
278 r = fence_wait(fence, false); 278 r = dma_fence_wait(fence, false);
279 fence_put(fence); 279 dma_fence_put(fence);
280 return r; 280 return r;
281} 281}
282 282
@@ -452,7 +452,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
452 amd_sched_fini(&ring->sched); 452 amd_sched_fini(&ring->sched);
453 del_timer_sync(&ring->fence_drv.fallback_timer); 453 del_timer_sync(&ring->fence_drv.fallback_timer);
454 for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) 454 for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
455 fence_put(ring->fence_drv.fences[j]); 455 dma_fence_put(ring->fence_drv.fences[j]);
456 kfree(ring->fence_drv.fences); 456 kfree(ring->fence_drv.fences);
457 ring->fence_drv.fences = NULL; 457 ring->fence_drv.fences = NULL;
458 ring->fence_drv.initialized = false; 458 ring->fence_drv.initialized = false;
@@ -541,12 +541,12 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev)
541 * Common fence implementation 541 * Common fence implementation
542 */ 542 */
543 543
544static const char *amdgpu_fence_get_driver_name(struct fence *fence) 544static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
545{ 545{
546 return "amdgpu"; 546 return "amdgpu";
547} 547}
548 548
549static const char *amdgpu_fence_get_timeline_name(struct fence *f) 549static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
550{ 550{
551 struct amdgpu_fence *fence = to_amdgpu_fence(f); 551 struct amdgpu_fence *fence = to_amdgpu_fence(f);
552 return (const char *)fence->ring->name; 552 return (const char *)fence->ring->name;
@@ -560,7 +560,7 @@ static const char *amdgpu_fence_get_timeline_name(struct fence *f)
560 * to fence_queue that checks if this fence is signaled, and if so it 560 * to fence_queue that checks if this fence is signaled, and if so it
561 * signals the fence and removes itself. 561 * signals the fence and removes itself.
562 */ 562 */
563static bool amdgpu_fence_enable_signaling(struct fence *f) 563static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
564{ 564{
565 struct amdgpu_fence *fence = to_amdgpu_fence(f); 565 struct amdgpu_fence *fence = to_amdgpu_fence(f);
566 struct amdgpu_ring *ring = fence->ring; 566 struct amdgpu_ring *ring = fence->ring;
@@ -568,7 +568,7 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
568 if (!timer_pending(&ring->fence_drv.fallback_timer)) 568 if (!timer_pending(&ring->fence_drv.fallback_timer))
569 amdgpu_fence_schedule_fallback(ring); 569 amdgpu_fence_schedule_fallback(ring);
570 570
571 FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx); 571 DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
572 572
573 return true; 573 return true;
574} 574}
@@ -582,7 +582,7 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
582 */ 582 */
583static void amdgpu_fence_free(struct rcu_head *rcu) 583static void amdgpu_fence_free(struct rcu_head *rcu)
584{ 584{
585 struct fence *f = container_of(rcu, struct fence, rcu); 585 struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
586 struct amdgpu_fence *fence = to_amdgpu_fence(f); 586 struct amdgpu_fence *fence = to_amdgpu_fence(f);
587 kmem_cache_free(amdgpu_fence_slab, fence); 587 kmem_cache_free(amdgpu_fence_slab, fence);
588} 588}
@@ -595,16 +595,16 @@ static void amdgpu_fence_free(struct rcu_head *rcu)
595 * This function is called when the reference count becomes zero. 595 * This function is called when the reference count becomes zero.
596 * It just RCU schedules freeing up the fence. 596 * It just RCU schedules freeing up the fence.
597 */ 597 */
598static void amdgpu_fence_release(struct fence *f) 598static void amdgpu_fence_release(struct dma_fence *f)
599{ 599{
600 call_rcu(&f->rcu, amdgpu_fence_free); 600 call_rcu(&f->rcu, amdgpu_fence_free);
601} 601}
602 602
603static const struct fence_ops amdgpu_fence_ops = { 603static const struct dma_fence_ops amdgpu_fence_ops = {
604 .get_driver_name = amdgpu_fence_get_driver_name, 604 .get_driver_name = amdgpu_fence_get_driver_name,
605 .get_timeline_name = amdgpu_fence_get_timeline_name, 605 .get_timeline_name = amdgpu_fence_get_timeline_name,
606 .enable_signaling = amdgpu_fence_enable_signaling, 606 .enable_signaling = amdgpu_fence_enable_signaling,
607 .wait = fence_default_wait, 607 .wait = dma_fence_default_wait,
608 .release = amdgpu_fence_release, 608 .release = amdgpu_fence_release,
609}; 609};
610 610
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 6a6c86c9c169..c3672dfcfd6a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -89,7 +89,7 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
89 * Free an IB (all asics). 89 * Free an IB (all asics).
90 */ 90 */
91void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, 91void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
92 struct fence *f) 92 struct dma_fence *f)
93{ 93{
94 amdgpu_sa_bo_free(adev, &ib->sa_bo, f); 94 amdgpu_sa_bo_free(adev, &ib->sa_bo, f);
95} 95}
@@ -116,8 +116,8 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
116 * to SI there was just a DE IB. 116 * to SI there was just a DE IB.
117 */ 117 */
118int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, 118int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
119 struct amdgpu_ib *ibs, struct fence *last_vm_update, 119 struct amdgpu_ib *ibs, struct dma_fence *last_vm_update,
120 struct amdgpu_job *job, struct fence **f) 120 struct amdgpu_job *job, struct dma_fence **f)
121{ 121{
122 struct amdgpu_device *adev = ring->adev; 122 struct amdgpu_device *adev = ring->adev;
123 struct amdgpu_ib *ib = &ibs[0]; 123 struct amdgpu_ib *ib = &ibs[0];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 8c5807994073..a0de6286c453 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -81,7 +81,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
81 81
82void amdgpu_job_free_resources(struct amdgpu_job *job) 82void amdgpu_job_free_resources(struct amdgpu_job *job)
83{ 83{
84 struct fence *f; 84 struct dma_fence *f;
85 unsigned i; 85 unsigned i;
86 86
87 /* use sched fence if available */ 87 /* use sched fence if available */
@@ -95,7 +95,7 @@ static void amdgpu_job_free_cb(struct amd_sched_job *s_job)
95{ 95{
96 struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base); 96 struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
97 97
98 fence_put(job->fence); 98 dma_fence_put(job->fence);
99 amdgpu_sync_free(&job->sync); 99 amdgpu_sync_free(&job->sync);
100 kfree(job); 100 kfree(job);
101} 101}
@@ -104,14 +104,14 @@ void amdgpu_job_free(struct amdgpu_job *job)
104{ 104{
105 amdgpu_job_free_resources(job); 105 amdgpu_job_free_resources(job);
106 106
107 fence_put(job->fence); 107 dma_fence_put(job->fence);
108 amdgpu_sync_free(&job->sync); 108 amdgpu_sync_free(&job->sync);
109 kfree(job); 109 kfree(job);
110} 110}
111 111
112int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, 112int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
113 struct amd_sched_entity *entity, void *owner, 113 struct amd_sched_entity *entity, void *owner,
114 struct fence **f) 114 struct dma_fence **f)
115{ 115{
116 int r; 116 int r;
117 job->ring = ring; 117 job->ring = ring;
@@ -125,19 +125,19 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
125 125
126 job->owner = owner; 126 job->owner = owner;
127 job->fence_ctx = entity->fence_context; 127 job->fence_ctx = entity->fence_context;
128 *f = fence_get(&job->base.s_fence->finished); 128 *f = dma_fence_get(&job->base.s_fence->finished);
129 amdgpu_job_free_resources(job); 129 amdgpu_job_free_resources(job);
130 amd_sched_entity_push_job(&job->base); 130 amd_sched_entity_push_job(&job->base);
131 131
132 return 0; 132 return 0;
133} 133}
134 134
135static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job) 135static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
136{ 136{
137 struct amdgpu_job *job = to_amdgpu_job(sched_job); 137 struct amdgpu_job *job = to_amdgpu_job(sched_job);
138 struct amdgpu_vm *vm = job->vm; 138 struct amdgpu_vm *vm = job->vm;
139 139
140 struct fence *fence = amdgpu_sync_get_fence(&job->sync); 140 struct dma_fence *fence = amdgpu_sync_get_fence(&job->sync);
141 141
142 if (fence == NULL && vm && !job->vm_id) { 142 if (fence == NULL && vm && !job->vm_id) {
143 struct amdgpu_ring *ring = job->ring; 143 struct amdgpu_ring *ring = job->ring;
@@ -155,9 +155,9 @@ static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
155 return fence; 155 return fence;
156} 156}
157 157
158static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job) 158static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
159{ 159{
160 struct fence *fence = NULL; 160 struct dma_fence *fence = NULL;
161 struct amdgpu_job *job; 161 struct amdgpu_job *job;
162 int r; 162 int r;
163 163
@@ -176,8 +176,8 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job)
176 DRM_ERROR("Error scheduling IBs (%d)\n", r); 176 DRM_ERROR("Error scheduling IBs (%d)\n", r);
177 177
178 /* if gpu reset, hw fence will be replaced here */ 178 /* if gpu reset, hw fence will be replaced here */
179 fence_put(job->fence); 179 dma_fence_put(job->fence);
180 job->fence = fence_get(fence); 180 job->fence = dma_fence_get(fence);
181 amdgpu_job_free_resources(job); 181 amdgpu_job_free_resources(job);
182 return fence; 182 return fence;
183} 183}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index aa074fac0c7f..55e142a5ff5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -383,7 +383,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
383 383
384 if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED && 384 if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
385 bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) { 385 bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
386 struct fence *fence; 386 struct dma_fence *fence;
387 387
388 if (adev->mman.buffer_funcs_ring == NULL || 388 if (adev->mman.buffer_funcs_ring == NULL ||
389 !adev->mman.buffer_funcs_ring->ready) { 389 !adev->mman.buffer_funcs_ring->ready) {
@@ -403,9 +403,9 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
403 amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence); 403 amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
404 amdgpu_bo_fence(bo, fence, false); 404 amdgpu_bo_fence(bo, fence, false);
405 amdgpu_bo_unreserve(bo); 405 amdgpu_bo_unreserve(bo);
406 fence_put(bo->tbo.moving); 406 dma_fence_put(bo->tbo.moving);
407 bo->tbo.moving = fence_get(fence); 407 bo->tbo.moving = dma_fence_get(fence);
408 fence_put(fence); 408 dma_fence_put(fence);
409 } 409 }
410 *bo_ptr = bo; 410 *bo_ptr = bo;
411 411
@@ -491,7 +491,7 @@ int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
491 struct amdgpu_ring *ring, 491 struct amdgpu_ring *ring,
492 struct amdgpu_bo *bo, 492 struct amdgpu_bo *bo,
493 struct reservation_object *resv, 493 struct reservation_object *resv,
494 struct fence **fence, 494 struct dma_fence **fence,
495 bool direct) 495 bool direct)
496 496
497{ 497{
@@ -523,7 +523,7 @@ int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
523 struct amdgpu_ring *ring, 523 struct amdgpu_ring *ring,
524 struct amdgpu_bo *bo, 524 struct amdgpu_bo *bo,
525 struct reservation_object *resv, 525 struct reservation_object *resv,
526 struct fence **fence, 526 struct dma_fence **fence,
527 bool direct) 527 bool direct)
528 528
529{ 529{
@@ -926,7 +926,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
926 * @shared: true if fence should be added shared 926 * @shared: true if fence should be added shared
927 * 927 *
928 */ 928 */
929void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence, 929void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
930 bool shared) 930 bool shared)
931{ 931{
932 struct reservation_object *resv = bo->tbo.resv; 932 struct reservation_object *resv = bo->tbo.resv;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 8255034d73eb..3e785ed3cb4b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -156,19 +156,19 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
156void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, 156void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
157 struct ttm_mem_reg *new_mem); 157 struct ttm_mem_reg *new_mem);
158int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo); 158int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
159void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence, 159void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
160 bool shared); 160 bool shared);
161u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo); 161u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
162int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev, 162int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
163 struct amdgpu_ring *ring, 163 struct amdgpu_ring *ring,
164 struct amdgpu_bo *bo, 164 struct amdgpu_bo *bo,
165 struct reservation_object *resv, 165 struct reservation_object *resv,
166 struct fence **fence, bool direct); 166 struct dma_fence **fence, bool direct);
167int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev, 167int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
168 struct amdgpu_ring *ring, 168 struct amdgpu_ring *ring,
169 struct amdgpu_bo *bo, 169 struct amdgpu_bo *bo,
170 struct reservation_object *resv, 170 struct reservation_object *resv,
171 struct fence **fence, 171 struct dma_fence **fence,
172 bool direct); 172 bool direct);
173 173
174 174
@@ -200,7 +200,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
200 unsigned size, unsigned align); 200 unsigned size, unsigned align);
201void amdgpu_sa_bo_free(struct amdgpu_device *adev, 201void amdgpu_sa_bo_free(struct amdgpu_device *adev,
202 struct amdgpu_sa_bo **sa_bo, 202 struct amdgpu_sa_bo **sa_bo,
203 struct fence *fence); 203 struct dma_fence *fence);
204#if defined(CONFIG_DEBUG_FS) 204#if defined(CONFIG_DEBUG_FS)
205void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, 205void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
206 struct seq_file *m); 206 struct seq_file *m);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index d8af37a845f4..fd26c4b8d793 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -147,7 +147,7 @@ static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
147 } 147 }
148 list_del_init(&sa_bo->olist); 148 list_del_init(&sa_bo->olist);
149 list_del_init(&sa_bo->flist); 149 list_del_init(&sa_bo->flist);
150 fence_put(sa_bo->fence); 150 dma_fence_put(sa_bo->fence);
151 kfree(sa_bo); 151 kfree(sa_bo);
152} 152}
153 153
@@ -161,7 +161,7 @@ static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager)
161 sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist); 161 sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist);
162 list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) { 162 list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
163 if (sa_bo->fence == NULL || 163 if (sa_bo->fence == NULL ||
164 !fence_is_signaled(sa_bo->fence)) { 164 !dma_fence_is_signaled(sa_bo->fence)) {
165 return; 165 return;
166 } 166 }
167 amdgpu_sa_bo_remove_locked(sa_bo); 167 amdgpu_sa_bo_remove_locked(sa_bo);
@@ -244,7 +244,7 @@ static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager,
244} 244}
245 245
246static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager, 246static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
247 struct fence **fences, 247 struct dma_fence **fences,
248 unsigned *tries) 248 unsigned *tries)
249{ 249{
250 struct amdgpu_sa_bo *best_bo = NULL; 250 struct amdgpu_sa_bo *best_bo = NULL;
@@ -272,7 +272,7 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
272 sa_bo = list_first_entry(&sa_manager->flist[i], 272 sa_bo = list_first_entry(&sa_manager->flist[i],
273 struct amdgpu_sa_bo, flist); 273 struct amdgpu_sa_bo, flist);
274 274
275 if (!fence_is_signaled(sa_bo->fence)) { 275 if (!dma_fence_is_signaled(sa_bo->fence)) {
276 fences[i] = sa_bo->fence; 276 fences[i] = sa_bo->fence;
277 continue; 277 continue;
278 } 278 }
@@ -314,7 +314,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
314 struct amdgpu_sa_bo **sa_bo, 314 struct amdgpu_sa_bo **sa_bo,
315 unsigned size, unsigned align) 315 unsigned size, unsigned align)
316{ 316{
317 struct fence *fences[AMDGPU_SA_NUM_FENCE_LISTS]; 317 struct dma_fence *fences[AMDGPU_SA_NUM_FENCE_LISTS];
318 unsigned tries[AMDGPU_SA_NUM_FENCE_LISTS]; 318 unsigned tries[AMDGPU_SA_NUM_FENCE_LISTS];
319 unsigned count; 319 unsigned count;
320 int i, r; 320 int i, r;
@@ -356,14 +356,14 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
356 356
357 for (i = 0, count = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) 357 for (i = 0, count = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
358 if (fences[i]) 358 if (fences[i])
359 fences[count++] = fence_get(fences[i]); 359 fences[count++] = dma_fence_get(fences[i]);
360 360
361 if (count) { 361 if (count) {
362 spin_unlock(&sa_manager->wq.lock); 362 spin_unlock(&sa_manager->wq.lock);
363 t = fence_wait_any_timeout(fences, count, false, 363 t = dma_fence_wait_any_timeout(fences, count, false,
364 MAX_SCHEDULE_TIMEOUT); 364 MAX_SCHEDULE_TIMEOUT);
365 for (i = 0; i < count; ++i) 365 for (i = 0; i < count; ++i)
366 fence_put(fences[i]); 366 dma_fence_put(fences[i]);
367 367
368 r = (t > 0) ? 0 : t; 368 r = (t > 0) ? 0 : t;
369 spin_lock(&sa_manager->wq.lock); 369 spin_lock(&sa_manager->wq.lock);
@@ -384,7 +384,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
384} 384}
385 385
386void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo, 386void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
387 struct fence *fence) 387 struct dma_fence *fence)
388{ 388{
389 struct amdgpu_sa_manager *sa_manager; 389 struct amdgpu_sa_manager *sa_manager;
390 390
@@ -394,10 +394,10 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
394 394
395 sa_manager = (*sa_bo)->manager; 395 sa_manager = (*sa_bo)->manager;
396 spin_lock(&sa_manager->wq.lock); 396 spin_lock(&sa_manager->wq.lock);
397 if (fence && !fence_is_signaled(fence)) { 397 if (fence && !dma_fence_is_signaled(fence)) {
398 uint32_t idx; 398 uint32_t idx;
399 399
400 (*sa_bo)->fence = fence_get(fence); 400 (*sa_bo)->fence = dma_fence_get(fence);
401 idx = fence->context % AMDGPU_SA_NUM_FENCE_LISTS; 401 idx = fence->context % AMDGPU_SA_NUM_FENCE_LISTS;
402 list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]); 402 list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]);
403 } else { 403 } else {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 5c8d3022fb87..ed814e6d0207 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -34,7 +34,7 @@
34 34
35struct amdgpu_sync_entry { 35struct amdgpu_sync_entry {
36 struct hlist_node node; 36 struct hlist_node node;
37 struct fence *fence; 37 struct dma_fence *fence;
38}; 38};
39 39
40static struct kmem_cache *amdgpu_sync_slab; 40static struct kmem_cache *amdgpu_sync_slab;
@@ -60,7 +60,8 @@ void amdgpu_sync_create(struct amdgpu_sync *sync)
60 * 60 *
61 * Test if the fence was issued by us. 61 * Test if the fence was issued by us.
62 */ 62 */
63static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f) 63static bool amdgpu_sync_same_dev(struct amdgpu_device *adev,
64 struct dma_fence *f)
64{ 65{
65 struct amd_sched_fence *s_fence = to_amd_sched_fence(f); 66 struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
66 67
@@ -81,7 +82,7 @@ static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
81 * 82 *
82 * Extract who originally created the fence. 83 * Extract who originally created the fence.
83 */ 84 */
84static void *amdgpu_sync_get_owner(struct fence *f) 85static void *amdgpu_sync_get_owner(struct dma_fence *f)
85{ 86{
86 struct amd_sched_fence *s_fence = to_amd_sched_fence(f); 87 struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
87 88
@@ -99,13 +100,14 @@ static void *amdgpu_sync_get_owner(struct fence *f)
99 * 100 *
100 * Either keep the existing fence or the new one, depending which one is later. 101 * Either keep the existing fence or the new one, depending which one is later.
101 */ 102 */
102static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence) 103static void amdgpu_sync_keep_later(struct dma_fence **keep,
104 struct dma_fence *fence)
103{ 105{
104 if (*keep && fence_is_later(*keep, fence)) 106 if (*keep && dma_fence_is_later(*keep, fence))
105 return; 107 return;
106 108
107 fence_put(*keep); 109 dma_fence_put(*keep);
108 *keep = fence_get(fence); 110 *keep = dma_fence_get(fence);
109} 111}
110 112
111/** 113/**
@@ -117,7 +119,7 @@ static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence)
117 * Tries to add the fence to an existing hash entry. Returns true when an entry 119 * Tries to add the fence to an existing hash entry. Returns true when an entry
118 * was found, false otherwise. 120 * was found, false otherwise.
119 */ 121 */
120static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct fence *f) 122static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f)
121{ 123{
122 struct amdgpu_sync_entry *e; 124 struct amdgpu_sync_entry *e;
123 125
@@ -139,7 +141,7 @@ static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct fence *f)
139 * 141 *
140 */ 142 */
141int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, 143int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
142 struct fence *f) 144 struct dma_fence *f)
143{ 145{
144 struct amdgpu_sync_entry *e; 146 struct amdgpu_sync_entry *e;
145 147
@@ -158,7 +160,7 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
158 return -ENOMEM; 160 return -ENOMEM;
159 161
160 hash_add(sync->fences, &e->node, f->context); 162 hash_add(sync->fences, &e->node, f->context);
161 e->fence = fence_get(f); 163 e->fence = dma_fence_get(f);
162 return 0; 164 return 0;
163} 165}
164 166
@@ -177,7 +179,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
177 void *owner) 179 void *owner)
178{ 180{
179 struct reservation_object_list *flist; 181 struct reservation_object_list *flist;
180 struct fence *f; 182 struct dma_fence *f;
181 void *fence_owner; 183 void *fence_owner;
182 unsigned i; 184 unsigned i;
183 int r = 0; 185 int r = 0;
@@ -231,15 +233,15 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
231 * Returns the next fence not signaled yet without removing it from the sync 233 * Returns the next fence not signaled yet without removing it from the sync
232 * object. 234 * object.
233 */ 235 */
234struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, 236struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
235 struct amdgpu_ring *ring) 237 struct amdgpu_ring *ring)
236{ 238{
237 struct amdgpu_sync_entry *e; 239 struct amdgpu_sync_entry *e;
238 struct hlist_node *tmp; 240 struct hlist_node *tmp;
239 int i; 241 int i;
240 242
241 hash_for_each_safe(sync->fences, i, tmp, e, node) { 243 hash_for_each_safe(sync->fences, i, tmp, e, node) {
242 struct fence *f = e->fence; 244 struct dma_fence *f = e->fence;
243 struct amd_sched_fence *s_fence = to_amd_sched_fence(f); 245 struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
244 246
245 if (ring && s_fence) { 247 if (ring && s_fence) {
@@ -247,16 +249,16 @@ struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
247 * when they are scheduled. 249 * when they are scheduled.
248 */ 250 */
249 if (s_fence->sched == &ring->sched) { 251 if (s_fence->sched == &ring->sched) {
250 if (fence_is_signaled(&s_fence->scheduled)) 252 if (dma_fence_is_signaled(&s_fence->scheduled))
251 continue; 253 continue;
252 254
253 return &s_fence->scheduled; 255 return &s_fence->scheduled;
254 } 256 }
255 } 257 }
256 258
257 if (fence_is_signaled(f)) { 259 if (dma_fence_is_signaled(f)) {
258 hash_del(&e->node); 260 hash_del(&e->node);
259 fence_put(f); 261 dma_fence_put(f);
260 kmem_cache_free(amdgpu_sync_slab, e); 262 kmem_cache_free(amdgpu_sync_slab, e);
261 continue; 263 continue;
262 } 264 }
@@ -274,11 +276,11 @@ struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
274 * 276 *
275 * Get and removes the next fence from the sync object not signaled yet. 277 * Get and removes the next fence from the sync object not signaled yet.
276 */ 278 */
277struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync) 279struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
278{ 280{
279 struct amdgpu_sync_entry *e; 281 struct amdgpu_sync_entry *e;
280 struct hlist_node *tmp; 282 struct hlist_node *tmp;
281 struct fence *f; 283 struct dma_fence *f;
282 int i; 284 int i;
283 285
284 hash_for_each_safe(sync->fences, i, tmp, e, node) { 286 hash_for_each_safe(sync->fences, i, tmp, e, node) {
@@ -288,10 +290,10 @@ struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
288 hash_del(&e->node); 290 hash_del(&e->node);
289 kmem_cache_free(amdgpu_sync_slab, e); 291 kmem_cache_free(amdgpu_sync_slab, e);
290 292
291 if (!fence_is_signaled(f)) 293 if (!dma_fence_is_signaled(f))
292 return f; 294 return f;
293 295
294 fence_put(f); 296 dma_fence_put(f);
295 } 297 }
296 return NULL; 298 return NULL;
297} 299}
@@ -311,11 +313,11 @@ void amdgpu_sync_free(struct amdgpu_sync *sync)
311 313
312 hash_for_each_safe(sync->fences, i, tmp, e, node) { 314 hash_for_each_safe(sync->fences, i, tmp, e, node) {
313 hash_del(&e->node); 315 hash_del(&e->node);
314 fence_put(e->fence); 316 dma_fence_put(e->fence);
315 kmem_cache_free(amdgpu_sync_slab, e); 317 kmem_cache_free(amdgpu_sync_slab, e);
316 } 318 }
317 319
318 fence_put(sync->last_vm_update); 320 dma_fence_put(sync->last_vm_update);
319} 321}
320 322
321/** 323/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index b827c75e95de..e05a24325eeb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -78,7 +78,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
78 void *gtt_map, *vram_map; 78 void *gtt_map, *vram_map;
79 void **gtt_start, **gtt_end; 79 void **gtt_start, **gtt_end;
80 void **vram_start, **vram_end; 80 void **vram_start, **vram_end;
81 struct fence *fence = NULL; 81 struct dma_fence *fence = NULL;
82 82
83 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, 83 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
84 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, 84 AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
@@ -118,13 +118,13 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
118 goto out_lclean_unpin; 118 goto out_lclean_unpin;
119 } 119 }
120 120
121 r = fence_wait(fence, false); 121 r = dma_fence_wait(fence, false);
122 if (r) { 122 if (r) {
123 DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i); 123 DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
124 goto out_lclean_unpin; 124 goto out_lclean_unpin;
125 } 125 }
126 126
127 fence_put(fence); 127 dma_fence_put(fence);
128 128
129 r = amdgpu_bo_kmap(vram_obj, &vram_map); 129 r = amdgpu_bo_kmap(vram_obj, &vram_map);
130 if (r) { 130 if (r) {
@@ -163,13 +163,13 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
163 goto out_lclean_unpin; 163 goto out_lclean_unpin;
164 } 164 }
165 165
166 r = fence_wait(fence, false); 166 r = dma_fence_wait(fence, false);
167 if (r) { 167 if (r) {
168 DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i); 168 DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i);
169 goto out_lclean_unpin; 169 goto out_lclean_unpin;
170 } 170 }
171 171
172 fence_put(fence); 172 dma_fence_put(fence);
173 173
174 r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map); 174 r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
175 if (r) { 175 if (r) {
@@ -216,7 +216,7 @@ out_lclean:
216 amdgpu_bo_unref(&gtt_obj[i]); 216 amdgpu_bo_unref(&gtt_obj[i]);
217 } 217 }
218 if (fence) 218 if (fence)
219 fence_put(fence); 219 dma_fence_put(fence);
220 break; 220 break;
221 } 221 }
222 222
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 067e5e683bb3..bb964a8ff938 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -104,7 +104,7 @@ TRACE_EVENT(amdgpu_cs_ioctl,
104 __field(struct amdgpu_device *, adev) 104 __field(struct amdgpu_device *, adev)
105 __field(struct amd_sched_job *, sched_job) 105 __field(struct amd_sched_job *, sched_job)
106 __field(struct amdgpu_ib *, ib) 106 __field(struct amdgpu_ib *, ib)
107 __field(struct fence *, fence) 107 __field(struct dma_fence *, fence)
108 __field(char *, ring_name) 108 __field(char *, ring_name)
109 __field(u32, num_ibs) 109 __field(u32, num_ibs)
110 ), 110 ),
@@ -129,7 +129,7 @@ TRACE_EVENT(amdgpu_sched_run_job,
129 __field(struct amdgpu_device *, adev) 129 __field(struct amdgpu_device *, adev)
130 __field(struct amd_sched_job *, sched_job) 130 __field(struct amd_sched_job *, sched_job)
131 __field(struct amdgpu_ib *, ib) 131 __field(struct amdgpu_ib *, ib)
132 __field(struct fence *, fence) 132 __field(struct dma_fence *, fence)
133 __field(char *, ring_name) 133 __field(char *, ring_name)
134 __field(u32, num_ibs) 134 __field(u32, num_ibs)
135 ), 135 ),
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index dcaf691f56b5..a743aeabc767 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -268,7 +268,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
268 struct amdgpu_device *adev; 268 struct amdgpu_device *adev;
269 struct amdgpu_ring *ring; 269 struct amdgpu_ring *ring;
270 uint64_t old_start, new_start; 270 uint64_t old_start, new_start;
271 struct fence *fence; 271 struct dma_fence *fence;
272 int r; 272 int r;
273 273
274 adev = amdgpu_get_adev(bo->bdev); 274 adev = amdgpu_get_adev(bo->bdev);
@@ -316,7 +316,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
316 return r; 316 return r;
317 317
318 r = ttm_bo_pipeline_move(bo, fence, evict, new_mem); 318 r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
319 fence_put(fence); 319 dma_fence_put(fence);
320 return r; 320 return r;
321} 321}
322 322
@@ -1247,7 +1247,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
1247 uint64_t dst_offset, 1247 uint64_t dst_offset,
1248 uint32_t byte_count, 1248 uint32_t byte_count,
1249 struct reservation_object *resv, 1249 struct reservation_object *resv,
1250 struct fence **fence, bool direct_submit) 1250 struct dma_fence **fence, bool direct_submit)
1251{ 1251{
1252 struct amdgpu_device *adev = ring->adev; 1252 struct amdgpu_device *adev = ring->adev;
1253 struct amdgpu_job *job; 1253 struct amdgpu_job *job;
@@ -1294,7 +1294,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
1294 if (direct_submit) { 1294 if (direct_submit) {
1295 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, 1295 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs,
1296 NULL, NULL, fence); 1296 NULL, NULL, fence);
1297 job->fence = fence_get(*fence); 1297 job->fence = dma_fence_get(*fence);
1298 if (r) 1298 if (r)
1299 DRM_ERROR("Error scheduling IBs (%d)\n", r); 1299 DRM_ERROR("Error scheduling IBs (%d)\n", r);
1300 amdgpu_job_free(job); 1300 amdgpu_job_free(job);
@@ -1315,7 +1315,7 @@ error_free:
1315int amdgpu_fill_buffer(struct amdgpu_bo *bo, 1315int amdgpu_fill_buffer(struct amdgpu_bo *bo,
1316 uint32_t src_data, 1316 uint32_t src_data,
1317 struct reservation_object *resv, 1317 struct reservation_object *resv,
1318 struct fence **fence) 1318 struct dma_fence **fence)
1319{ 1319{
1320 struct amdgpu_device *adev = bo->adev; 1320 struct amdgpu_device *adev = bo->adev;
1321 struct amdgpu_job *job; 1321 struct amdgpu_job *job;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 9812c805326c..3f293e189378 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -77,11 +77,11 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
77 uint64_t dst_offset, 77 uint64_t dst_offset,
78 uint32_t byte_count, 78 uint32_t byte_count,
79 struct reservation_object *resv, 79 struct reservation_object *resv,
80 struct fence **fence, bool direct_submit); 80 struct dma_fence **fence, bool direct_submit);
81int amdgpu_fill_buffer(struct amdgpu_bo *bo, 81int amdgpu_fill_buffer(struct amdgpu_bo *bo,
82 uint32_t src_data, 82 uint32_t src_data,
83 struct reservation_object *resv, 83 struct reservation_object *resv,
84 struct fence **fence); 84 struct dma_fence **fence);
85 85
86int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma); 86int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
87bool amdgpu_ttm_is_bound(struct ttm_tt *ttm); 87bool amdgpu_ttm_is_bound(struct ttm_tt *ttm);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index e3281cacc586..0f6575e7ef8c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -333,7 +333,7 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
333 for (i = 0; i < adev->uvd.max_handles; ++i) { 333 for (i = 0; i < adev->uvd.max_handles; ++i) {
334 uint32_t handle = atomic_read(&adev->uvd.handles[i]); 334 uint32_t handle = atomic_read(&adev->uvd.handles[i]);
335 if (handle != 0 && adev->uvd.filp[i] == filp) { 335 if (handle != 0 && adev->uvd.filp[i] == filp) {
336 struct fence *fence; 336 struct dma_fence *fence;
337 337
338 r = amdgpu_uvd_get_destroy_msg(ring, handle, 338 r = amdgpu_uvd_get_destroy_msg(ring, handle,
339 false, &fence); 339 false, &fence);
@@ -342,8 +342,8 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
342 continue; 342 continue;
343 } 343 }
344 344
345 fence_wait(fence, false); 345 dma_fence_wait(fence, false);
346 fence_put(fence); 346 dma_fence_put(fence);
347 347
348 adev->uvd.filp[i] = NULL; 348 adev->uvd.filp[i] = NULL;
349 atomic_set(&adev->uvd.handles[i], 0); 349 atomic_set(&adev->uvd.handles[i], 0);
@@ -909,14 +909,14 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
909} 909}
910 910
911static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, 911static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
912 bool direct, struct fence **fence) 912 bool direct, struct dma_fence **fence)
913{ 913{
914 struct ttm_validate_buffer tv; 914 struct ttm_validate_buffer tv;
915 struct ww_acquire_ctx ticket; 915 struct ww_acquire_ctx ticket;
916 struct list_head head; 916 struct list_head head;
917 struct amdgpu_job *job; 917 struct amdgpu_job *job;
918 struct amdgpu_ib *ib; 918 struct amdgpu_ib *ib;
919 struct fence *f = NULL; 919 struct dma_fence *f = NULL;
920 struct amdgpu_device *adev = ring->adev; 920 struct amdgpu_device *adev = ring->adev;
921 uint64_t addr; 921 uint64_t addr;
922 int i, r; 922 int i, r;
@@ -960,7 +960,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
960 960
961 if (direct) { 961 if (direct) {
962 r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); 962 r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
963 job->fence = fence_get(f); 963 job->fence = dma_fence_get(f);
964 if (r) 964 if (r)
965 goto err_free; 965 goto err_free;
966 966
@@ -975,9 +975,9 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
975 ttm_eu_fence_buffer_objects(&ticket, &head, f); 975 ttm_eu_fence_buffer_objects(&ticket, &head, f);
976 976
977 if (fence) 977 if (fence)
978 *fence = fence_get(f); 978 *fence = dma_fence_get(f);
979 amdgpu_bo_unref(&bo); 979 amdgpu_bo_unref(&bo);
980 fence_put(f); 980 dma_fence_put(f);
981 981
982 return 0; 982 return 0;
983 983
@@ -993,7 +993,7 @@ err:
993 crash the vcpu so just try to emmit a dummy create/destroy msg to 993 crash the vcpu so just try to emmit a dummy create/destroy msg to
994 avoid this */ 994 avoid this */
995int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, 995int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
996 struct fence **fence) 996 struct dma_fence **fence)
997{ 997{
998 struct amdgpu_device *adev = ring->adev; 998 struct amdgpu_device *adev = ring->adev;
999 struct amdgpu_bo *bo; 999 struct amdgpu_bo *bo;
@@ -1042,7 +1042,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
1042} 1042}
1043 1043
1044int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, 1044int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
1045 bool direct, struct fence **fence) 1045 bool direct, struct dma_fence **fence)
1046{ 1046{
1047 struct amdgpu_device *adev = ring->adev; 1047 struct amdgpu_device *adev = ring->adev;
1048 struct amdgpu_bo *bo; 1048 struct amdgpu_bo *bo;
@@ -1128,7 +1128,7 @@ void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
1128 */ 1128 */
1129int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout) 1129int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1130{ 1130{
1131 struct fence *fence; 1131 struct dma_fence *fence;
1132 long r; 1132 long r;
1133 1133
1134 r = amdgpu_uvd_get_create_msg(ring, 1, NULL); 1134 r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
@@ -1143,7 +1143,7 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1143 goto error; 1143 goto error;
1144 } 1144 }
1145 1145
1146 r = fence_wait_timeout(fence, false, timeout); 1146 r = dma_fence_wait_timeout(fence, false, timeout);
1147 if (r == 0) { 1147 if (r == 0) {
1148 DRM_ERROR("amdgpu: IB test timed out.\n"); 1148 DRM_ERROR("amdgpu: IB test timed out.\n");
1149 r = -ETIMEDOUT; 1149 r = -ETIMEDOUT;
@@ -1154,7 +1154,7 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1154 r = 0; 1154 r = 0;
1155 } 1155 }
1156 1156
1157 fence_put(fence); 1157 dma_fence_put(fence);
1158 1158
1159error: 1159error:
1160 return r; 1160 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index c850009602d1..6249ba1bde2a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -29,9 +29,9 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev);
29int amdgpu_uvd_suspend(struct amdgpu_device *adev); 29int amdgpu_uvd_suspend(struct amdgpu_device *adev);
30int amdgpu_uvd_resume(struct amdgpu_device *adev); 30int amdgpu_uvd_resume(struct amdgpu_device *adev);
31int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, 31int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
32 struct fence **fence); 32 struct dma_fence **fence);
33int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, 33int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
34 bool direct, struct fence **fence); 34 bool direct, struct dma_fence **fence);
35void amdgpu_uvd_free_handles(struct amdgpu_device *adev, 35void amdgpu_uvd_free_handles(struct amdgpu_device *adev,
36 struct drm_file *filp); 36 struct drm_file *filp);
37int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx); 37int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 7fe8fd884f06..f0f8afb85585 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -395,12 +395,12 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
395 * Open up a stream for HW test 395 * Open up a stream for HW test
396 */ 396 */
397int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, 397int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
398 struct fence **fence) 398 struct dma_fence **fence)
399{ 399{
400 const unsigned ib_size_dw = 1024; 400 const unsigned ib_size_dw = 1024;
401 struct amdgpu_job *job; 401 struct amdgpu_job *job;
402 struct amdgpu_ib *ib; 402 struct amdgpu_ib *ib;
403 struct fence *f = NULL; 403 struct dma_fence *f = NULL;
404 uint64_t dummy; 404 uint64_t dummy;
405 int i, r; 405 int i, r;
406 406
@@ -450,14 +450,14 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
450 ib->ptr[i] = 0x0; 450 ib->ptr[i] = 0x0;
451 451
452 r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); 452 r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
453 job->fence = fence_get(f); 453 job->fence = dma_fence_get(f);
454 if (r) 454 if (r)
455 goto err; 455 goto err;
456 456
457 amdgpu_job_free(job); 457 amdgpu_job_free(job);
458 if (fence) 458 if (fence)
459 *fence = fence_get(f); 459 *fence = dma_fence_get(f);
460 fence_put(f); 460 dma_fence_put(f);
461 return 0; 461 return 0;
462 462
463err: 463err:
@@ -476,12 +476,12 @@ err:
476 * Close up a stream for HW test or if userspace failed to do so 476 * Close up a stream for HW test or if userspace failed to do so
477 */ 477 */
478int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, 478int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
479 bool direct, struct fence **fence) 479 bool direct, struct dma_fence **fence)
480{ 480{
481 const unsigned ib_size_dw = 1024; 481 const unsigned ib_size_dw = 1024;
482 struct amdgpu_job *job; 482 struct amdgpu_job *job;
483 struct amdgpu_ib *ib; 483 struct amdgpu_ib *ib;
484 struct fence *f = NULL; 484 struct dma_fence *f = NULL;
485 int i, r; 485 int i, r;
486 486
487 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); 487 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -513,7 +513,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
513 513
514 if (direct) { 514 if (direct) {
515 r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); 515 r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
516 job->fence = fence_get(f); 516 job->fence = dma_fence_get(f);
517 if (r) 517 if (r)
518 goto err; 518 goto err;
519 519
@@ -526,8 +526,8 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
526 } 526 }
527 527
528 if (fence) 528 if (fence)
529 *fence = fence_get(f); 529 *fence = dma_fence_get(f);
530 fence_put(f); 530 dma_fence_put(f);
531 return 0; 531 return 0;
532 532
533err: 533err:
@@ -883,7 +883,7 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
883 */ 883 */
884int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout) 884int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
885{ 885{
886 struct fence *fence = NULL; 886 struct dma_fence *fence = NULL;
887 long r; 887 long r;
888 888
889 /* skip vce ring1/2 ib test for now, since it's not reliable */ 889 /* skip vce ring1/2 ib test for now, since it's not reliable */
@@ -902,7 +902,7 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
902 goto error; 902 goto error;
903 } 903 }
904 904
905 r = fence_wait_timeout(fence, false, timeout); 905 r = dma_fence_wait_timeout(fence, false, timeout);
906 if (r == 0) { 906 if (r == 0) {
907 DRM_ERROR("amdgpu: IB test timed out.\n"); 907 DRM_ERROR("amdgpu: IB test timed out.\n");
908 r = -ETIMEDOUT; 908 r = -ETIMEDOUT;
@@ -913,6 +913,6 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
913 r = 0; 913 r = 0;
914 } 914 }
915error: 915error:
916 fence_put(fence); 916 dma_fence_put(fence);
917 return r; 917 return r;
918} 918}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index 12729d2852df..566c29ddeeb6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -29,9 +29,9 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev);
29int amdgpu_vce_suspend(struct amdgpu_device *adev); 29int amdgpu_vce_suspend(struct amdgpu_device *adev);
30int amdgpu_vce_resume(struct amdgpu_device *adev); 30int amdgpu_vce_resume(struct amdgpu_device *adev);
31int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, 31int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
32 struct fence **fence); 32 struct dma_fence **fence);
33int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, 33int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
34 bool direct, struct fence **fence); 34 bool direct, struct dma_fence **fence);
35void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp); 35void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
36int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx); 36int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
37void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, 37void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 06f24322e7c3..22cabb5456e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -25,7 +25,7 @@
25 * Alex Deucher 25 * Alex Deucher
26 * Jerome Glisse 26 * Jerome Glisse
27 */ 27 */
28#include <linux/fence-array.h> 28#include <linux/dma-fence-array.h>
29#include <drm/drmP.h> 29#include <drm/drmP.h>
30#include <drm/amdgpu_drm.h> 30#include <drm/amdgpu_drm.h>
31#include "amdgpu.h" 31#include "amdgpu.h"
@@ -194,14 +194,14 @@ static bool amdgpu_vm_is_gpu_reset(struct amdgpu_device *adev,
194 * Allocate an id for the vm, adding fences to the sync obj as necessary. 194 * Allocate an id for the vm, adding fences to the sync obj as necessary.
195 */ 195 */
196int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, 196int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
197 struct amdgpu_sync *sync, struct fence *fence, 197 struct amdgpu_sync *sync, struct dma_fence *fence,
198 struct amdgpu_job *job) 198 struct amdgpu_job *job)
199{ 199{
200 struct amdgpu_device *adev = ring->adev; 200 struct amdgpu_device *adev = ring->adev;
201 uint64_t fence_context = adev->fence_context + ring->idx; 201 uint64_t fence_context = adev->fence_context + ring->idx;
202 struct fence *updates = sync->last_vm_update; 202 struct dma_fence *updates = sync->last_vm_update;
203 struct amdgpu_vm_id *id, *idle; 203 struct amdgpu_vm_id *id, *idle;
204 struct fence **fences; 204 struct dma_fence **fences;
205 unsigned i; 205 unsigned i;
206 int r = 0; 206 int r = 0;
207 207
@@ -225,17 +225,17 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
225 if (&idle->list == &adev->vm_manager.ids_lru) { 225 if (&idle->list == &adev->vm_manager.ids_lru) {
226 u64 fence_context = adev->vm_manager.fence_context + ring->idx; 226 u64 fence_context = adev->vm_manager.fence_context + ring->idx;
227 unsigned seqno = ++adev->vm_manager.seqno[ring->idx]; 227 unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
228 struct fence_array *array; 228 struct dma_fence_array *array;
229 unsigned j; 229 unsigned j;
230 230
231 for (j = 0; j < i; ++j) 231 for (j = 0; j < i; ++j)
232 fence_get(fences[j]); 232 dma_fence_get(fences[j]);
233 233
234 array = fence_array_create(i, fences, fence_context, 234 array = dma_fence_array_create(i, fences, fence_context,
235 seqno, true); 235 seqno, true);
236 if (!array) { 236 if (!array) {
237 for (j = 0; j < i; ++j) 237 for (j = 0; j < i; ++j)
238 fence_put(fences[j]); 238 dma_fence_put(fences[j]);
239 kfree(fences); 239 kfree(fences);
240 r = -ENOMEM; 240 r = -ENOMEM;
241 goto error; 241 goto error;
@@ -243,7 +243,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
243 243
244 244
245 r = amdgpu_sync_fence(ring->adev, sync, &array->base); 245 r = amdgpu_sync_fence(ring->adev, sync, &array->base);
246 fence_put(&array->base); 246 dma_fence_put(&array->base);
247 if (r) 247 if (r)
248 goto error; 248 goto error;
249 249
@@ -257,7 +257,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
257 /* Check if we can use a VMID already assigned to this VM */ 257 /* Check if we can use a VMID already assigned to this VM */
258 i = ring->idx; 258 i = ring->idx;
259 do { 259 do {
260 struct fence *flushed; 260 struct dma_fence *flushed;
261 261
262 id = vm->ids[i++]; 262 id = vm->ids[i++];
263 if (i == AMDGPU_MAX_RINGS) 263 if (i == AMDGPU_MAX_RINGS)
@@ -279,12 +279,12 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
279 continue; 279 continue;
280 280
281 if (id->last_flush->context != fence_context && 281 if (id->last_flush->context != fence_context &&
282 !fence_is_signaled(id->last_flush)) 282 !dma_fence_is_signaled(id->last_flush))
283 continue; 283 continue;
284 284
285 flushed = id->flushed_updates; 285 flushed = id->flushed_updates;
286 if (updates && 286 if (updates &&
287 (!flushed || fence_is_later(updates, flushed))) 287 (!flushed || dma_fence_is_later(updates, flushed)))
288 continue; 288 continue;
289 289
290 /* Good we can use this VMID. Remember this submission as 290 /* Good we can use this VMID. Remember this submission as
@@ -315,14 +315,14 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
315 if (r) 315 if (r)
316 goto error; 316 goto error;
317 317
318 fence_put(id->first); 318 dma_fence_put(id->first);
319 id->first = fence_get(fence); 319 id->first = dma_fence_get(fence);
320 320
321 fence_put(id->last_flush); 321 dma_fence_put(id->last_flush);
322 id->last_flush = NULL; 322 id->last_flush = NULL;
323 323
324 fence_put(id->flushed_updates); 324 dma_fence_put(id->flushed_updates);
325 id->flushed_updates = fence_get(updates); 325 id->flushed_updates = dma_fence_get(updates);
326 326
327 id->pd_gpu_addr = job->vm_pd_addr; 327 id->pd_gpu_addr = job->vm_pd_addr;
328 id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter); 328 id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
@@ -393,7 +393,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
393 393
394 if (ring->funcs->emit_vm_flush && (job->vm_needs_flush || 394 if (ring->funcs->emit_vm_flush && (job->vm_needs_flush ||
395 amdgpu_vm_is_gpu_reset(adev, id))) { 395 amdgpu_vm_is_gpu_reset(adev, id))) {
396 struct fence *fence; 396 struct dma_fence *fence;
397 397
398 trace_amdgpu_vm_flush(job->vm_pd_addr, ring->idx, job->vm_id); 398 trace_amdgpu_vm_flush(job->vm_pd_addr, ring->idx, job->vm_id);
399 amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr); 399 amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr);
@@ -403,7 +403,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
403 return r; 403 return r;
404 404
405 mutex_lock(&adev->vm_manager.lock); 405 mutex_lock(&adev->vm_manager.lock);
406 fence_put(id->last_flush); 406 dma_fence_put(id->last_flush);
407 id->last_flush = fence; 407 id->last_flush = fence;
408 mutex_unlock(&adev->vm_manager.lock); 408 mutex_unlock(&adev->vm_manager.lock);
409 } 409 }
@@ -537,7 +537,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
537 struct amdgpu_bo *bo) 537 struct amdgpu_bo *bo)
538{ 538{
539 struct amdgpu_ring *ring; 539 struct amdgpu_ring *ring;
540 struct fence *fence = NULL; 540 struct dma_fence *fence = NULL;
541 struct amdgpu_job *job; 541 struct amdgpu_job *job;
542 struct amdgpu_pte_update_params params; 542 struct amdgpu_pte_update_params params;
543 unsigned entries; 543 unsigned entries;
@@ -578,7 +578,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
578 goto error_free; 578 goto error_free;
579 579
580 amdgpu_bo_fence(bo, fence, true); 580 amdgpu_bo_fence(bo, fence, true);
581 fence_put(fence); 581 dma_fence_put(fence);
582 return 0; 582 return 0;
583 583
584error_free: 584error_free:
@@ -625,7 +625,7 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
625 unsigned count = 0, pt_idx, ndw; 625 unsigned count = 0, pt_idx, ndw;
626 struct amdgpu_job *job; 626 struct amdgpu_job *job;
627 struct amdgpu_pte_update_params params; 627 struct amdgpu_pte_update_params params;
628 struct fence *fence = NULL; 628 struct dma_fence *fence = NULL;
629 629
630 int r; 630 int r;
631 631
@@ -714,9 +714,9 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
714 goto error_free; 714 goto error_free;
715 715
716 amdgpu_bo_fence(pd, fence, true); 716 amdgpu_bo_fence(pd, fence, true);
717 fence_put(vm->page_directory_fence); 717 dma_fence_put(vm->page_directory_fence);
718 vm->page_directory_fence = fence_get(fence); 718 vm->page_directory_fence = dma_fence_get(fence);
719 fence_put(fence); 719 dma_fence_put(fence);
720 720
721 } else { 721 } else {
722 amdgpu_job_free(job); 722 amdgpu_job_free(job);
@@ -929,20 +929,20 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
929 * Returns 0 for success, -EINVAL for failure. 929 * Returns 0 for success, -EINVAL for failure.
930 */ 930 */
931static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, 931static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
932 struct fence *exclusive, 932 struct dma_fence *exclusive,
933 uint64_t src, 933 uint64_t src,
934 dma_addr_t *pages_addr, 934 dma_addr_t *pages_addr,
935 struct amdgpu_vm *vm, 935 struct amdgpu_vm *vm,
936 uint64_t start, uint64_t last, 936 uint64_t start, uint64_t last,
937 uint32_t flags, uint64_t addr, 937 uint32_t flags, uint64_t addr,
938 struct fence **fence) 938 struct dma_fence **fence)
939{ 939{
940 struct amdgpu_ring *ring; 940 struct amdgpu_ring *ring;
941 void *owner = AMDGPU_FENCE_OWNER_VM; 941 void *owner = AMDGPU_FENCE_OWNER_VM;
942 unsigned nptes, ncmds, ndw; 942 unsigned nptes, ncmds, ndw;
943 struct amdgpu_job *job; 943 struct amdgpu_job *job;
944 struct amdgpu_pte_update_params params; 944 struct amdgpu_pte_update_params params;
945 struct fence *f = NULL; 945 struct dma_fence *f = NULL;
946 int r; 946 int r;
947 947
948 memset(&params, 0, sizeof(params)); 948 memset(&params, 0, sizeof(params));
@@ -1045,10 +1045,10 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
1045 1045
1046 amdgpu_bo_fence(vm->page_directory, f, true); 1046 amdgpu_bo_fence(vm->page_directory, f, true);
1047 if (fence) { 1047 if (fence) {
1048 fence_put(*fence); 1048 dma_fence_put(*fence);
1049 *fence = fence_get(f); 1049 *fence = dma_fence_get(f);
1050 } 1050 }
1051 fence_put(f); 1051 dma_fence_put(f);
1052 return 0; 1052 return 0;
1053 1053
1054error_free: 1054error_free:
@@ -1074,13 +1074,13 @@ error_free:
1074 * Returns 0 for success, -EINVAL for failure. 1074 * Returns 0 for success, -EINVAL for failure.
1075 */ 1075 */
1076static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, 1076static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1077 struct fence *exclusive, 1077 struct dma_fence *exclusive,
1078 uint32_t gtt_flags, 1078 uint32_t gtt_flags,
1079 dma_addr_t *pages_addr, 1079 dma_addr_t *pages_addr,
1080 struct amdgpu_vm *vm, 1080 struct amdgpu_vm *vm,
1081 struct amdgpu_bo_va_mapping *mapping, 1081 struct amdgpu_bo_va_mapping *mapping,
1082 uint32_t flags, uint64_t addr, 1082 uint32_t flags, uint64_t addr,
1083 struct fence **fence) 1083 struct dma_fence **fence)
1084{ 1084{
1085 const uint64_t max_size = 64ULL * 1024ULL * 1024ULL / AMDGPU_GPU_PAGE_SIZE; 1085 const uint64_t max_size = 64ULL * 1024ULL * 1024ULL / AMDGPU_GPU_PAGE_SIZE;
1086 1086
@@ -1147,7 +1147,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1147 dma_addr_t *pages_addr = NULL; 1147 dma_addr_t *pages_addr = NULL;
1148 uint32_t gtt_flags, flags; 1148 uint32_t gtt_flags, flags;
1149 struct ttm_mem_reg *mem; 1149 struct ttm_mem_reg *mem;
1150 struct fence *exclusive; 1150 struct dma_fence *exclusive;
1151 uint64_t addr; 1151 uint64_t addr;
1152 int r; 1152 int r;
1153 1153
@@ -1547,7 +1547,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
1547 kfree(mapping); 1547 kfree(mapping);
1548 } 1548 }
1549 1549
1550 fence_put(bo_va->last_pt_update); 1550 dma_fence_put(bo_va->last_pt_update);
1551 kfree(bo_va); 1551 kfree(bo_va);
1552} 1552}
1553 1553
@@ -1709,7 +1709,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1709 1709
1710 amdgpu_bo_unref(&vm->page_directory->shadow); 1710 amdgpu_bo_unref(&vm->page_directory->shadow);
1711 amdgpu_bo_unref(&vm->page_directory); 1711 amdgpu_bo_unref(&vm->page_directory);
1712 fence_put(vm->page_directory_fence); 1712 dma_fence_put(vm->page_directory_fence);
1713} 1713}
1714 1714
1715/** 1715/**
@@ -1733,7 +1733,8 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
1733 &adev->vm_manager.ids_lru); 1733 &adev->vm_manager.ids_lru);
1734 } 1734 }
1735 1735
1736 adev->vm_manager.fence_context = fence_context_alloc(AMDGPU_MAX_RINGS); 1736 adev->vm_manager.fence_context =
1737 dma_fence_context_alloc(AMDGPU_MAX_RINGS);
1737 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) 1738 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
1738 adev->vm_manager.seqno[i] = 0; 1739 adev->vm_manager.seqno[i] = 0;
1739 1740
@@ -1755,8 +1756,8 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
1755 for (i = 0; i < AMDGPU_NUM_VM; ++i) { 1756 for (i = 0; i < AMDGPU_NUM_VM; ++i) {
1756 struct amdgpu_vm_id *id = &adev->vm_manager.ids[i]; 1757 struct amdgpu_vm_id *id = &adev->vm_manager.ids[i];
1757 1758
1758 fence_put(adev->vm_manager.ids[i].first); 1759 dma_fence_put(adev->vm_manager.ids[i].first);
1759 amdgpu_sync_free(&adev->vm_manager.ids[i].active); 1760 amdgpu_sync_free(&adev->vm_manager.ids[i].active);
1760 fence_put(id->flushed_updates); 1761 dma_fence_put(id->flushed_updates);
1761 } 1762 }
1762} 1763}
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index cb952acc7133..321b9d5a4e6e 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -622,7 +622,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
622{ 622{
623 struct amdgpu_device *adev = ring->adev; 623 struct amdgpu_device *adev = ring->adev;
624 struct amdgpu_ib ib; 624 struct amdgpu_ib ib;
625 struct fence *f = NULL; 625 struct dma_fence *f = NULL;
626 unsigned index; 626 unsigned index;
627 u32 tmp = 0; 627 u32 tmp = 0;
628 u64 gpu_addr; 628 u64 gpu_addr;
@@ -655,7 +655,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
655 if (r) 655 if (r)
656 goto err1; 656 goto err1;
657 657
658 r = fence_wait_timeout(f, false, timeout); 658 r = dma_fence_wait_timeout(f, false, timeout);
659 if (r == 0) { 659 if (r == 0) {
660 DRM_ERROR("amdgpu: IB test timed out\n"); 660 DRM_ERROR("amdgpu: IB test timed out\n");
661 r = -ETIMEDOUT; 661 r = -ETIMEDOUT;
@@ -675,7 +675,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
675 675
676err1: 676err1:
677 amdgpu_ib_free(adev, &ib, NULL); 677 amdgpu_ib_free(adev, &ib, NULL);
678 fence_put(f); 678 dma_fence_put(f);
679err0: 679err0:
680 amdgpu_wb_free(adev, index); 680 amdgpu_wb_free(adev, index);
681 return r; 681 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 40abb6b81c09..7dc11a19e49d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -1522,7 +1522,7 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1522{ 1522{
1523 struct amdgpu_device *adev = ring->adev; 1523 struct amdgpu_device *adev = ring->adev;
1524 struct amdgpu_ib ib; 1524 struct amdgpu_ib ib;
1525 struct fence *f = NULL; 1525 struct dma_fence *f = NULL;
1526 uint32_t scratch; 1526 uint32_t scratch;
1527 uint32_t tmp = 0; 1527 uint32_t tmp = 0;
1528 long r; 1528 long r;
@@ -1548,7 +1548,7 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1548 if (r) 1548 if (r)
1549 goto err2; 1549 goto err2;
1550 1550
1551 r = fence_wait_timeout(f, false, timeout); 1551 r = dma_fence_wait_timeout(f, false, timeout);
1552 if (r == 0) { 1552 if (r == 0) {
1553 DRM_ERROR("amdgpu: IB test timed out\n"); 1553 DRM_ERROR("amdgpu: IB test timed out\n");
1554 r = -ETIMEDOUT; 1554 r = -ETIMEDOUT;
@@ -1569,7 +1569,7 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1569 1569
1570err2: 1570err2:
1571 amdgpu_ib_free(adev, &ib, NULL); 1571 amdgpu_ib_free(adev, &ib, NULL);
1572 fence_put(f); 1572 dma_fence_put(f);
1573err1: 1573err1:
1574 amdgpu_gfx_scratch_free(adev, scratch); 1574 amdgpu_gfx_scratch_free(adev, scratch);
1575 return r; 1575 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 71116da9e782..3865ffe7de55 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -2286,7 +2286,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
2286{ 2286{
2287 struct amdgpu_device *adev = ring->adev; 2287 struct amdgpu_device *adev = ring->adev;
2288 struct amdgpu_ib ib; 2288 struct amdgpu_ib ib;
2289 struct fence *f = NULL; 2289 struct dma_fence *f = NULL;
2290 uint32_t scratch; 2290 uint32_t scratch;
2291 uint32_t tmp = 0; 2291 uint32_t tmp = 0;
2292 long r; 2292 long r;
@@ -2312,7 +2312,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
2312 if (r) 2312 if (r)
2313 goto err2; 2313 goto err2;
2314 2314
2315 r = fence_wait_timeout(f, false, timeout); 2315 r = dma_fence_wait_timeout(f, false, timeout);
2316 if (r == 0) { 2316 if (r == 0) {
2317 DRM_ERROR("amdgpu: IB test timed out\n"); 2317 DRM_ERROR("amdgpu: IB test timed out\n");
2318 r = -ETIMEDOUT; 2318 r = -ETIMEDOUT;
@@ -2333,7 +2333,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
2333 2333
2334err2: 2334err2:
2335 amdgpu_ib_free(adev, &ib, NULL); 2335 amdgpu_ib_free(adev, &ib, NULL);
2336 fence_put(f); 2336 dma_fence_put(f);
2337err1: 2337err1:
2338 amdgpu_gfx_scratch_free(adev, scratch); 2338 amdgpu_gfx_scratch_free(adev, scratch);
2339 return r; 2339 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index ee6a48a09214..a9dd18847c40 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -798,7 +798,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
798{ 798{
799 struct amdgpu_device *adev = ring->adev; 799 struct amdgpu_device *adev = ring->adev;
800 struct amdgpu_ib ib; 800 struct amdgpu_ib ib;
801 struct fence *f = NULL; 801 struct dma_fence *f = NULL;
802 uint32_t scratch; 802 uint32_t scratch;
803 uint32_t tmp = 0; 803 uint32_t tmp = 0;
804 long r; 804 long r;
@@ -824,7 +824,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
824 if (r) 824 if (r)
825 goto err2; 825 goto err2;
826 826
827 r = fence_wait_timeout(f, false, timeout); 827 r = dma_fence_wait_timeout(f, false, timeout);
828 if (r == 0) { 828 if (r == 0) {
829 DRM_ERROR("amdgpu: IB test timed out.\n"); 829 DRM_ERROR("amdgpu: IB test timed out.\n");
830 r = -ETIMEDOUT; 830 r = -ETIMEDOUT;
@@ -844,7 +844,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
844 } 844 }
845err2: 845err2:
846 amdgpu_ib_free(adev, &ib, NULL); 846 amdgpu_ib_free(adev, &ib, NULL);
847 fence_put(f); 847 dma_fence_put(f);
848err1: 848err1:
849 amdgpu_gfx_scratch_free(adev, scratch); 849 amdgpu_gfx_scratch_free(adev, scratch);
850 return r; 850 return r;
@@ -1575,7 +1575,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
1575{ 1575{
1576 struct amdgpu_ring *ring = &adev->gfx.compute_ring[0]; 1576 struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
1577 struct amdgpu_ib ib; 1577 struct amdgpu_ib ib;
1578 struct fence *f = NULL; 1578 struct dma_fence *f = NULL;
1579 int r, i; 1579 int r, i;
1580 u32 tmp; 1580 u32 tmp;
1581 unsigned total_size, vgpr_offset, sgpr_offset; 1581 unsigned total_size, vgpr_offset, sgpr_offset;
@@ -1708,7 +1708,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
1708 } 1708 }
1709 1709
1710 /* wait for the GPU to finish processing the IB */ 1710 /* wait for the GPU to finish processing the IB */
1711 r = fence_wait(f, false); 1711 r = dma_fence_wait(f, false);
1712 if (r) { 1712 if (r) {
1713 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); 1713 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
1714 goto fail; 1714 goto fail;
@@ -1729,7 +1729,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
1729 1729
1730fail: 1730fail:
1731 amdgpu_ib_free(adev, &ib, NULL); 1731 amdgpu_ib_free(adev, &ib, NULL);
1732 fence_put(f); 1732 dma_fence_put(f);
1733 1733
1734 return r; 1734 return r;
1735} 1735}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 565dab3c7218..7edf6e8c63dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -668,7 +668,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
668{ 668{
669 struct amdgpu_device *adev = ring->adev; 669 struct amdgpu_device *adev = ring->adev;
670 struct amdgpu_ib ib; 670 struct amdgpu_ib ib;
671 struct fence *f = NULL; 671 struct dma_fence *f = NULL;
672 unsigned index; 672 unsigned index;
673 u32 tmp = 0; 673 u32 tmp = 0;
674 u64 gpu_addr; 674 u64 gpu_addr;
@@ -705,7 +705,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
705 if (r) 705 if (r)
706 goto err1; 706 goto err1;
707 707
708 r = fence_wait_timeout(f, false, timeout); 708 r = dma_fence_wait_timeout(f, false, timeout);
709 if (r == 0) { 709 if (r == 0) {
710 DRM_ERROR("amdgpu: IB test timed out\n"); 710 DRM_ERROR("amdgpu: IB test timed out\n");
711 r = -ETIMEDOUT; 711 r = -ETIMEDOUT;
@@ -725,7 +725,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
725 725
726err1: 726err1:
727 amdgpu_ib_free(adev, &ib, NULL); 727 amdgpu_ib_free(adev, &ib, NULL);
728 fence_put(f); 728 dma_fence_put(f);
729err0: 729err0:
730 amdgpu_wb_free(adev, index); 730 amdgpu_wb_free(adev, index);
731 return r; 731 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index a9d10941fb53..1932a67c62ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -871,7 +871,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
871{ 871{
872 struct amdgpu_device *adev = ring->adev; 872 struct amdgpu_device *adev = ring->adev;
873 struct amdgpu_ib ib; 873 struct amdgpu_ib ib;
874 struct fence *f = NULL; 874 struct dma_fence *f = NULL;
875 unsigned index; 875 unsigned index;
876 u32 tmp = 0; 876 u32 tmp = 0;
877 u64 gpu_addr; 877 u64 gpu_addr;
@@ -908,7 +908,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
908 if (r) 908 if (r)
909 goto err1; 909 goto err1;
910 910
911 r = fence_wait_timeout(f, false, timeout); 911 r = dma_fence_wait_timeout(f, false, timeout);
912 if (r == 0) { 912 if (r == 0) {
913 DRM_ERROR("amdgpu: IB test timed out\n"); 913 DRM_ERROR("amdgpu: IB test timed out\n");
914 r = -ETIMEDOUT; 914 r = -ETIMEDOUT;
@@ -927,7 +927,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
927 } 927 }
928err1: 928err1:
929 amdgpu_ib_free(adev, &ib, NULL); 929 amdgpu_ib_free(adev, &ib, NULL);
930 fence_put(f); 930 dma_fence_put(f);
931err0: 931err0:
932 amdgpu_wb_free(adev, index); 932 amdgpu_wb_free(adev, index);
933 return r; 933 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index de358193a8f9..b4cf4e25bf91 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -274,7 +274,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
274{ 274{
275 struct amdgpu_device *adev = ring->adev; 275 struct amdgpu_device *adev = ring->adev;
276 struct amdgpu_ib ib; 276 struct amdgpu_ib ib;
277 struct fence *f = NULL; 277 struct dma_fence *f = NULL;
278 unsigned index; 278 unsigned index;
279 u32 tmp = 0; 279 u32 tmp = 0;
280 u64 gpu_addr; 280 u64 gpu_addr;
@@ -305,7 +305,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
305 if (r) 305 if (r)
306 goto err1; 306 goto err1;
307 307
308 r = fence_wait_timeout(f, false, timeout); 308 r = dma_fence_wait_timeout(f, false, timeout);
309 if (r == 0) { 309 if (r == 0) {
310 DRM_ERROR("amdgpu: IB test timed out\n"); 310 DRM_ERROR("amdgpu: IB test timed out\n");
311 r = -ETIMEDOUT; 311 r = -ETIMEDOUT;
@@ -325,7 +325,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
325 325
326err1: 326err1:
327 amdgpu_ib_free(adev, &ib, NULL); 327 amdgpu_ib_free(adev, &ib, NULL);
328 fence_put(f); 328 dma_fence_put(f);
329err0: 329err0:
330 amdgpu_wb_free(adev, index); 330 amdgpu_wb_free(adev, index);
331 return r; 331 return r;
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
index b961a1c6caf3..dbd4fd3a810b 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
@@ -17,7 +17,7 @@ TRACE_EVENT(amd_sched_job,
17 TP_STRUCT__entry( 17 TP_STRUCT__entry(
18 __field(struct amd_sched_entity *, entity) 18 __field(struct amd_sched_entity *, entity)
19 __field(struct amd_sched_job *, sched_job) 19 __field(struct amd_sched_job *, sched_job)
20 __field(struct fence *, fence) 20 __field(struct dma_fence *, fence)
21 __field(const char *, name) 21 __field(const char *, name)
22 __field(u32, job_count) 22 __field(u32, job_count)
23 __field(int, hw_job_count) 23 __field(int, hw_job_count)
@@ -42,7 +42,7 @@ TRACE_EVENT(amd_sched_process_job,
42 TP_PROTO(struct amd_sched_fence *fence), 42 TP_PROTO(struct amd_sched_fence *fence),
43 TP_ARGS(fence), 43 TP_ARGS(fence),
44 TP_STRUCT__entry( 44 TP_STRUCT__entry(
45 __field(struct fence *, fence) 45 __field(struct dma_fence *, fence)
46 ), 46 ),
47 47
48 TP_fast_assign( 48 TP_fast_assign(
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 963a24d46a93..5364e6a7ec8f 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -32,7 +32,7 @@
32 32
33static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity); 33static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
34static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); 34static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
35static void amd_sched_process_job(struct fence *f, struct fence_cb *cb); 35static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
36 36
37struct kmem_cache *sched_fence_slab; 37struct kmem_cache *sched_fence_slab;
38atomic_t sched_fence_slab_ref = ATOMIC_INIT(0); 38atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
@@ -141,7 +141,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
141 return r; 141 return r;
142 142
143 atomic_set(&entity->fence_seq, 0); 143 atomic_set(&entity->fence_seq, 0);
144 entity->fence_context = fence_context_alloc(2); 144 entity->fence_context = dma_fence_context_alloc(2);
145 145
146 return 0; 146 return 0;
147} 147}
@@ -221,32 +221,32 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
221 kfifo_free(&entity->job_queue); 221 kfifo_free(&entity->job_queue);
222} 222}
223 223
224static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb) 224static void amd_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
225{ 225{
226 struct amd_sched_entity *entity = 226 struct amd_sched_entity *entity =
227 container_of(cb, struct amd_sched_entity, cb); 227 container_of(cb, struct amd_sched_entity, cb);
228 entity->dependency = NULL; 228 entity->dependency = NULL;
229 fence_put(f); 229 dma_fence_put(f);
230 amd_sched_wakeup(entity->sched); 230 amd_sched_wakeup(entity->sched);
231} 231}
232 232
233static void amd_sched_entity_clear_dep(struct fence *f, struct fence_cb *cb) 233static void amd_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb)
234{ 234{
235 struct amd_sched_entity *entity = 235 struct amd_sched_entity *entity =
236 container_of(cb, struct amd_sched_entity, cb); 236 container_of(cb, struct amd_sched_entity, cb);
237 entity->dependency = NULL; 237 entity->dependency = NULL;
238 fence_put(f); 238 dma_fence_put(f);
239} 239}
240 240
241static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity) 241static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
242{ 242{
243 struct amd_gpu_scheduler *sched = entity->sched; 243 struct amd_gpu_scheduler *sched = entity->sched;
244 struct fence * fence = entity->dependency; 244 struct dma_fence * fence = entity->dependency;
245 struct amd_sched_fence *s_fence; 245 struct amd_sched_fence *s_fence;
246 246
247 if (fence->context == entity->fence_context) { 247 if (fence->context == entity->fence_context) {
248 /* We can ignore fences from ourself */ 248 /* We can ignore fences from ourself */
249 fence_put(entity->dependency); 249 dma_fence_put(entity->dependency);
250 return false; 250 return false;
251 } 251 }
252 252
@@ -257,23 +257,23 @@ static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
257 * Fence is from the same scheduler, only need to wait for 257 * Fence is from the same scheduler, only need to wait for
258 * it to be scheduled 258 * it to be scheduled
259 */ 259 */
260 fence = fence_get(&s_fence->scheduled); 260 fence = dma_fence_get(&s_fence->scheduled);
261 fence_put(entity->dependency); 261 dma_fence_put(entity->dependency);
262 entity->dependency = fence; 262 entity->dependency = fence;
263 if (!fence_add_callback(fence, &entity->cb, 263 if (!dma_fence_add_callback(fence, &entity->cb,
264 amd_sched_entity_clear_dep)) 264 amd_sched_entity_clear_dep))
265 return true; 265 return true;
266 266
267 /* Ignore it when it is already scheduled */ 267 /* Ignore it when it is already scheduled */
268 fence_put(fence); 268 dma_fence_put(fence);
269 return false; 269 return false;
270 } 270 }
271 271
272 if (!fence_add_callback(entity->dependency, &entity->cb, 272 if (!dma_fence_add_callback(entity->dependency, &entity->cb,
273 amd_sched_entity_wakeup)) 273 amd_sched_entity_wakeup))
274 return true; 274 return true;
275 275
276 fence_put(entity->dependency); 276 dma_fence_put(entity->dependency);
277 return false; 277 return false;
278} 278}
279 279
@@ -354,7 +354,8 @@ static void amd_sched_job_finish(struct work_struct *work)
354 sched->ops->free_job(s_job); 354 sched->ops->free_job(s_job);
355} 355}
356 356
357static void amd_sched_job_finish_cb(struct fence *f, struct fence_cb *cb) 357static void amd_sched_job_finish_cb(struct dma_fence *f,
358 struct dma_fence_cb *cb)
358{ 359{
359 struct amd_sched_job *job = container_of(cb, struct amd_sched_job, 360 struct amd_sched_job *job = container_of(cb, struct amd_sched_job,
360 finish_cb); 361 finish_cb);
@@ -388,8 +389,8 @@ void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched)
388 389
389 spin_lock(&sched->job_list_lock); 390 spin_lock(&sched->job_list_lock);
390 list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) { 391 list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
391 if (fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) { 392 if (dma_fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) {
392 fence_put(s_job->s_fence->parent); 393 dma_fence_put(s_job->s_fence->parent);
393 s_job->s_fence->parent = NULL; 394 s_job->s_fence->parent = NULL;
394 } 395 }
395 } 396 }
@@ -410,21 +411,21 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
410 411
411 list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { 412 list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
412 struct amd_sched_fence *s_fence = s_job->s_fence; 413 struct amd_sched_fence *s_fence = s_job->s_fence;
413 struct fence *fence; 414 struct dma_fence *fence;
414 415
415 spin_unlock(&sched->job_list_lock); 416 spin_unlock(&sched->job_list_lock);
416 fence = sched->ops->run_job(s_job); 417 fence = sched->ops->run_job(s_job);
417 atomic_inc(&sched->hw_rq_count); 418 atomic_inc(&sched->hw_rq_count);
418 if (fence) { 419 if (fence) {
419 s_fence->parent = fence_get(fence); 420 s_fence->parent = dma_fence_get(fence);
420 r = fence_add_callback(fence, &s_fence->cb, 421 r = dma_fence_add_callback(fence, &s_fence->cb,
421 amd_sched_process_job); 422 amd_sched_process_job);
422 if (r == -ENOENT) 423 if (r == -ENOENT)
423 amd_sched_process_job(fence, &s_fence->cb); 424 amd_sched_process_job(fence, &s_fence->cb);
424 else if (r) 425 else if (r)
425 DRM_ERROR("fence add callback failed (%d)\n", 426 DRM_ERROR("fence add callback failed (%d)\n",
426 r); 427 r);
427 fence_put(fence); 428 dma_fence_put(fence);
428 } else { 429 } else {
429 DRM_ERROR("Failed to run job!\n"); 430 DRM_ERROR("Failed to run job!\n");
430 amd_sched_process_job(NULL, &s_fence->cb); 431 amd_sched_process_job(NULL, &s_fence->cb);
@@ -446,8 +447,8 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
446 struct amd_sched_entity *entity = sched_job->s_entity; 447 struct amd_sched_entity *entity = sched_job->s_entity;
447 448
448 trace_amd_sched_job(sched_job); 449 trace_amd_sched_job(sched_job);
449 fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb, 450 dma_fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb,
450 amd_sched_job_finish_cb); 451 amd_sched_job_finish_cb);
451 wait_event(entity->sched->job_scheduled, 452 wait_event(entity->sched->job_scheduled,
452 amd_sched_entity_in(sched_job)); 453 amd_sched_entity_in(sched_job));
453} 454}
@@ -511,7 +512,7 @@ amd_sched_select_entity(struct amd_gpu_scheduler *sched)
511 return entity; 512 return entity;
512} 513}
513 514
514static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) 515static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
515{ 516{
516 struct amd_sched_fence *s_fence = 517 struct amd_sched_fence *s_fence =
517 container_of(cb, struct amd_sched_fence, cb); 518 container_of(cb, struct amd_sched_fence, cb);
@@ -521,7 +522,7 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
521 amd_sched_fence_finished(s_fence); 522 amd_sched_fence_finished(s_fence);
522 523
523 trace_amd_sched_process_job(s_fence); 524 trace_amd_sched_process_job(s_fence);
524 fence_put(&s_fence->finished); 525 dma_fence_put(&s_fence->finished);
525 wake_up_interruptible(&sched->wake_up_worker); 526 wake_up_interruptible(&sched->wake_up_worker);
526} 527}
527 528
@@ -547,7 +548,7 @@ static int amd_sched_main(void *param)
547 struct amd_sched_entity *entity = NULL; 548 struct amd_sched_entity *entity = NULL;
548 struct amd_sched_fence *s_fence; 549 struct amd_sched_fence *s_fence;
549 struct amd_sched_job *sched_job; 550 struct amd_sched_job *sched_job;
550 struct fence *fence; 551 struct dma_fence *fence;
551 552
552 wait_event_interruptible(sched->wake_up_worker, 553 wait_event_interruptible(sched->wake_up_worker,
553 (!amd_sched_blocked(sched) && 554 (!amd_sched_blocked(sched) &&
@@ -569,15 +570,15 @@ static int amd_sched_main(void *param)
569 fence = sched->ops->run_job(sched_job); 570 fence = sched->ops->run_job(sched_job);
570 amd_sched_fence_scheduled(s_fence); 571 amd_sched_fence_scheduled(s_fence);
571 if (fence) { 572 if (fence) {
572 s_fence->parent = fence_get(fence); 573 s_fence->parent = dma_fence_get(fence);
573 r = fence_add_callback(fence, &s_fence->cb, 574 r = dma_fence_add_callback(fence, &s_fence->cb,
574 amd_sched_process_job); 575 amd_sched_process_job);
575 if (r == -ENOENT) 576 if (r == -ENOENT)
576 amd_sched_process_job(fence, &s_fence->cb); 577 amd_sched_process_job(fence, &s_fence->cb);
577 else if (r) 578 else if (r)
578 DRM_ERROR("fence add callback failed (%d)\n", 579 DRM_ERROR("fence add callback failed (%d)\n",
579 r); 580 r);
580 fence_put(fence); 581 dma_fence_put(fence);
581 } else { 582 } else {
582 DRM_ERROR("Failed to run job!\n"); 583 DRM_ERROR("Failed to run job!\n");
583 amd_sched_process_job(NULL, &s_fence->cb); 584 amd_sched_process_job(NULL, &s_fence->cb);
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index 7cbbbfb502ef..876aa43b57df 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -25,7 +25,7 @@
25#define _GPU_SCHEDULER_H_ 25#define _GPU_SCHEDULER_H_
26 26
27#include <linux/kfifo.h> 27#include <linux/kfifo.h>
28#include <linux/fence.h> 28#include <linux/dma-fence.h>
29 29
30struct amd_gpu_scheduler; 30struct amd_gpu_scheduler;
31struct amd_sched_rq; 31struct amd_sched_rq;
@@ -50,8 +50,8 @@ struct amd_sched_entity {
50 atomic_t fence_seq; 50 atomic_t fence_seq;
51 uint64_t fence_context; 51 uint64_t fence_context;
52 52
53 struct fence *dependency; 53 struct dma_fence *dependency;
54 struct fence_cb cb; 54 struct dma_fence_cb cb;
55}; 55};
56 56
57/** 57/**
@@ -66,10 +66,10 @@ struct amd_sched_rq {
66}; 66};
67 67
68struct amd_sched_fence { 68struct amd_sched_fence {
69 struct fence scheduled; 69 struct dma_fence scheduled;
70 struct fence finished; 70 struct dma_fence finished;
71 struct fence_cb cb; 71 struct dma_fence_cb cb;
72 struct fence *parent; 72 struct dma_fence *parent;
73 struct amd_gpu_scheduler *sched; 73 struct amd_gpu_scheduler *sched;
74 spinlock_t lock; 74 spinlock_t lock;
75 void *owner; 75 void *owner;
@@ -79,15 +79,15 @@ struct amd_sched_job {
79 struct amd_gpu_scheduler *sched; 79 struct amd_gpu_scheduler *sched;
80 struct amd_sched_entity *s_entity; 80 struct amd_sched_entity *s_entity;
81 struct amd_sched_fence *s_fence; 81 struct amd_sched_fence *s_fence;
82 struct fence_cb finish_cb; 82 struct dma_fence_cb finish_cb;
83 struct work_struct finish_work; 83 struct work_struct finish_work;
84 struct list_head node; 84 struct list_head node;
85 struct delayed_work work_tdr; 85 struct delayed_work work_tdr;
86}; 86};
87 87
88extern const struct fence_ops amd_sched_fence_ops_scheduled; 88extern const struct dma_fence_ops amd_sched_fence_ops_scheduled;
89extern const struct fence_ops amd_sched_fence_ops_finished; 89extern const struct dma_fence_ops amd_sched_fence_ops_finished;
90static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f) 90static inline struct amd_sched_fence *to_amd_sched_fence(struct dma_fence *f)
91{ 91{
92 if (f->ops == &amd_sched_fence_ops_scheduled) 92 if (f->ops == &amd_sched_fence_ops_scheduled)
93 return container_of(f, struct amd_sched_fence, scheduled); 93 return container_of(f, struct amd_sched_fence, scheduled);
@@ -103,8 +103,8 @@ static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
103 * these functions should be implemented in driver side 103 * these functions should be implemented in driver side
104*/ 104*/
105struct amd_sched_backend_ops { 105struct amd_sched_backend_ops {
106 struct fence *(*dependency)(struct amd_sched_job *sched_job); 106 struct dma_fence *(*dependency)(struct amd_sched_job *sched_job);
107 struct fence *(*run_job)(struct amd_sched_job *sched_job); 107 struct dma_fence *(*run_job)(struct amd_sched_job *sched_job);
108 void (*timedout_job)(struct amd_sched_job *sched_job); 108 void (*timedout_job)(struct amd_sched_job *sched_job);
109 void (*free_job)(struct amd_sched_job *sched_job); 109 void (*free_job)(struct amd_sched_job *sched_job);
110}; 110};
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
index 6b63beaf7574..c26fa298fe9e 100644
--- a/drivers/gpu/drm/amd/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
@@ -42,46 +42,50 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity,
42 spin_lock_init(&fence->lock); 42 spin_lock_init(&fence->lock);
43 43
44 seq = atomic_inc_return(&entity->fence_seq); 44 seq = atomic_inc_return(&entity->fence_seq);
45 fence_init(&fence->scheduled, &amd_sched_fence_ops_scheduled, 45 dma_fence_init(&fence->scheduled, &amd_sched_fence_ops_scheduled,
46 &fence->lock, entity->fence_context, seq); 46 &fence->lock, entity->fence_context, seq);
47 fence_init(&fence->finished, &amd_sched_fence_ops_finished, 47 dma_fence_init(&fence->finished, &amd_sched_fence_ops_finished,
48 &fence->lock, entity->fence_context + 1, seq); 48 &fence->lock, entity->fence_context + 1, seq);
49 49
50 return fence; 50 return fence;
51} 51}
52 52
53void amd_sched_fence_scheduled(struct amd_sched_fence *fence) 53void amd_sched_fence_scheduled(struct amd_sched_fence *fence)
54{ 54{
55 int ret = fence_signal(&fence->scheduled); 55 int ret = dma_fence_signal(&fence->scheduled);
56 56
57 if (!ret) 57 if (!ret)
58 FENCE_TRACE(&fence->scheduled, "signaled from irq context\n"); 58 DMA_FENCE_TRACE(&fence->scheduled,
59 "signaled from irq context\n");
59 else 60 else
60 FENCE_TRACE(&fence->scheduled, "was already signaled\n"); 61 DMA_FENCE_TRACE(&fence->scheduled,
62 "was already signaled\n");
61} 63}
62 64
63void amd_sched_fence_finished(struct amd_sched_fence *fence) 65void amd_sched_fence_finished(struct amd_sched_fence *fence)
64{ 66{
65 int ret = fence_signal(&fence->finished); 67 int ret = dma_fence_signal(&fence->finished);
66 68
67 if (!ret) 69 if (!ret)
68 FENCE_TRACE(&fence->finished, "signaled from irq context\n"); 70 DMA_FENCE_TRACE(&fence->finished,
71 "signaled from irq context\n");
69 else 72 else
70 FENCE_TRACE(&fence->finished, "was already signaled\n"); 73 DMA_FENCE_TRACE(&fence->finished,
74 "was already signaled\n");
71} 75}
72 76
73static const char *amd_sched_fence_get_driver_name(struct fence *fence) 77static const char *amd_sched_fence_get_driver_name(struct dma_fence *fence)
74{ 78{
75 return "amd_sched"; 79 return "amd_sched";
76} 80}
77 81
78static const char *amd_sched_fence_get_timeline_name(struct fence *f) 82static const char *amd_sched_fence_get_timeline_name(struct dma_fence *f)
79{ 83{
80 struct amd_sched_fence *fence = to_amd_sched_fence(f); 84 struct amd_sched_fence *fence = to_amd_sched_fence(f);
81 return (const char *)fence->sched->name; 85 return (const char *)fence->sched->name;
82} 86}
83 87
84static bool amd_sched_fence_enable_signaling(struct fence *f) 88static bool amd_sched_fence_enable_signaling(struct dma_fence *f)
85{ 89{
86 return true; 90 return true;
87} 91}
@@ -95,10 +99,10 @@ static bool amd_sched_fence_enable_signaling(struct fence *f)
95 */ 99 */
96static void amd_sched_fence_free(struct rcu_head *rcu) 100static void amd_sched_fence_free(struct rcu_head *rcu)
97{ 101{
98 struct fence *f = container_of(rcu, struct fence, rcu); 102 struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
99 struct amd_sched_fence *fence = to_amd_sched_fence(f); 103 struct amd_sched_fence *fence = to_amd_sched_fence(f);
100 104
101 fence_put(fence->parent); 105 dma_fence_put(fence->parent);
102 kmem_cache_free(sched_fence_slab, fence); 106 kmem_cache_free(sched_fence_slab, fence);
103} 107}
104 108
@@ -110,7 +114,7 @@ static void amd_sched_fence_free(struct rcu_head *rcu)
110 * This function is called when the reference count becomes zero. 114 * This function is called when the reference count becomes zero.
111 * It just RCU schedules freeing up the fence. 115 * It just RCU schedules freeing up the fence.
112 */ 116 */
113static void amd_sched_fence_release_scheduled(struct fence *f) 117static void amd_sched_fence_release_scheduled(struct dma_fence *f)
114{ 118{
115 struct amd_sched_fence *fence = to_amd_sched_fence(f); 119 struct amd_sched_fence *fence = to_amd_sched_fence(f);
116 120
@@ -124,27 +128,27 @@ static void amd_sched_fence_release_scheduled(struct fence *f)
124 * 128 *
125 * Drop the extra reference from the scheduled fence to the base fence. 129 * Drop the extra reference from the scheduled fence to the base fence.
126 */ 130 */
127static void amd_sched_fence_release_finished(struct fence *f) 131static void amd_sched_fence_release_finished(struct dma_fence *f)
128{ 132{
129 struct amd_sched_fence *fence = to_amd_sched_fence(f); 133 struct amd_sched_fence *fence = to_amd_sched_fence(f);
130 134
131 fence_put(&fence->scheduled); 135 dma_fence_put(&fence->scheduled);
132} 136}
133 137
134const struct fence_ops amd_sched_fence_ops_scheduled = { 138const struct dma_fence_ops amd_sched_fence_ops_scheduled = {
135 .get_driver_name = amd_sched_fence_get_driver_name, 139 .get_driver_name = amd_sched_fence_get_driver_name,
136 .get_timeline_name = amd_sched_fence_get_timeline_name, 140 .get_timeline_name = amd_sched_fence_get_timeline_name,
137 .enable_signaling = amd_sched_fence_enable_signaling, 141 .enable_signaling = amd_sched_fence_enable_signaling,
138 .signaled = NULL, 142 .signaled = NULL,
139 .wait = fence_default_wait, 143 .wait = dma_fence_default_wait,
140 .release = amd_sched_fence_release_scheduled, 144 .release = amd_sched_fence_release_scheduled,
141}; 145};
142 146
143const struct fence_ops amd_sched_fence_ops_finished = { 147const struct dma_fence_ops amd_sched_fence_ops_finished = {
144 .get_driver_name = amd_sched_fence_get_driver_name, 148 .get_driver_name = amd_sched_fence_get_driver_name,
145 .get_timeline_name = amd_sched_fence_get_timeline_name, 149 .get_timeline_name = amd_sched_fence_get_timeline_name,
146 .enable_signaling = amd_sched_fence_enable_signaling, 150 .enable_signaling = amd_sched_fence_enable_signaling,
147 .signaled = NULL, 151 .signaled = NULL,
148 .wait = fence_default_wait, 152 .wait = dma_fence_default_wait,
149 .release = amd_sched_fence_release_finished, 153 .release = amd_sched_fence_release_finished,
150}; 154};
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 1b5a32df9a9a..c32fb3c1d6f0 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -1463,7 +1463,7 @@ EXPORT_SYMBOL(drm_atomic_nonblocking_commit);
1463 1463
1464static struct drm_pending_vblank_event *create_vblank_event( 1464static struct drm_pending_vblank_event *create_vblank_event(
1465 struct drm_device *dev, struct drm_file *file_priv, 1465 struct drm_device *dev, struct drm_file *file_priv,
1466 struct fence *fence, uint64_t user_data) 1466 struct dma_fence *fence, uint64_t user_data)
1467{ 1467{
1468 struct drm_pending_vblank_event *e = NULL; 1468 struct drm_pending_vblank_event *e = NULL;
1469 int ret; 1469 int ret;
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index f9362760bfb2..75ad01d595fd 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -30,7 +30,7 @@
30#include <drm/drm_plane_helper.h> 30#include <drm/drm_plane_helper.h>
31#include <drm/drm_crtc_helper.h> 31#include <drm/drm_crtc_helper.h>
32#include <drm/drm_atomic_helper.h> 32#include <drm/drm_atomic_helper.h>
33#include <linux/fence.h> 33#include <linux/dma-fence.h>
34 34
35#include "drm_crtc_internal.h" 35#include "drm_crtc_internal.h"
36 36
@@ -1017,7 +1017,7 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
1017 * drm_atomic_helper_swap_state() so it uses the current plane state (and 1017 * drm_atomic_helper_swap_state() so it uses the current plane state (and
1018 * just uses the atomic state to find the changed planes) 1018 * just uses the atomic state to find the changed planes)
1019 * 1019 *
1020 * Returns zero if success or < 0 if fence_wait() fails. 1020 * Returns zero if success or < 0 if dma_fence_wait() fails.
1021 */ 1021 */
1022int drm_atomic_helper_wait_for_fences(struct drm_device *dev, 1022int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
1023 struct drm_atomic_state *state, 1023 struct drm_atomic_state *state,
@@ -1041,11 +1041,11 @@ int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
1041 * still interrupt the operation. Instead of blocking until the 1041 * still interrupt the operation. Instead of blocking until the
1042 * timer expires, make the wait interruptible. 1042 * timer expires, make the wait interruptible.
1043 */ 1043 */
1044 ret = fence_wait(plane_state->fence, pre_swap); 1044 ret = dma_fence_wait(plane_state->fence, pre_swap);
1045 if (ret) 1045 if (ret)
1046 return ret; 1046 return ret;
1047 1047
1048 fence_put(plane_state->fence); 1048 dma_fence_put(plane_state->fence);
1049 plane_state->fence = NULL; 1049 plane_state->fence = NULL;
1050 } 1050 }
1051 1051
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 8bed5f459182..cf993dbf602e 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -665,7 +665,7 @@ void drm_event_cancel_free(struct drm_device *dev,
665 spin_unlock_irqrestore(&dev->event_lock, flags); 665 spin_unlock_irqrestore(&dev->event_lock, flags);
666 666
667 if (p->fence) 667 if (p->fence)
668 fence_put(p->fence); 668 dma_fence_put(p->fence);
669 669
670 kfree(p); 670 kfree(p);
671} 671}
@@ -696,8 +696,8 @@ void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
696 } 696 }
697 697
698 if (e->fence) { 698 if (e->fence) {
699 fence_signal(e->fence); 699 dma_fence_signal(e->fence);
700 fence_put(e->fence); 700 dma_fence_put(e->fence);
701 } 701 }
702 702
703 if (!e->file_priv) { 703 if (!e->file_priv) {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 3755ef935af4..7d066a91d778 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -466,10 +466,10 @@ int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
466} 466}
467 467
468#ifdef CONFIG_DEBUG_FS 468#ifdef CONFIG_DEBUG_FS
469static void etnaviv_gem_describe_fence(struct fence *fence, 469static void etnaviv_gem_describe_fence(struct dma_fence *fence,
470 const char *type, struct seq_file *m) 470 const char *type, struct seq_file *m)
471{ 471{
472 if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 472 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
473 seq_printf(m, "\t%9s: %s %s seq %u\n", 473 seq_printf(m, "\t%9s: %s %s seq %u\n",
474 type, 474 type,
475 fence->ops->get_driver_name(fence), 475 fence->ops->get_driver_name(fence),
@@ -482,7 +482,7 @@ static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
482 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); 482 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
483 struct reservation_object *robj = etnaviv_obj->resv; 483 struct reservation_object *robj = etnaviv_obj->resv;
484 struct reservation_object_list *fobj; 484 struct reservation_object_list *fobj;
485 struct fence *fence; 485 struct dma_fence *fence;
486 unsigned long off = drm_vma_node_start(&obj->vma_node); 486 unsigned long off = drm_vma_node_start(&obj->vma_node);
487 487
488 seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n", 488 seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index b1254f885fed..d2211825e5c8 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -15,7 +15,7 @@
15 */ 15 */
16 16
17#include <linux/component.h> 17#include <linux/component.h>
18#include <linux/fence.h> 18#include <linux/dma-fence.h>
19#include <linux/moduleparam.h> 19#include <linux/moduleparam.h>
20#include <linux/of_device.h> 20#include <linux/of_device.h>
21#include "etnaviv_dump.h" 21#include "etnaviv_dump.h"
@@ -882,7 +882,7 @@ static void recover_worker(struct work_struct *work)
882 for (i = 0; i < ARRAY_SIZE(gpu->event); i++) { 882 for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
883 if (!gpu->event[i].used) 883 if (!gpu->event[i].used)
884 continue; 884 continue;
885 fence_signal(gpu->event[i].fence); 885 dma_fence_signal(gpu->event[i].fence);
886 gpu->event[i].fence = NULL; 886 gpu->event[i].fence = NULL;
887 gpu->event[i].used = false; 887 gpu->event[i].used = false;
888 complete(&gpu->event_free); 888 complete(&gpu->event_free);
@@ -952,55 +952,55 @@ static void hangcheck_disable(struct etnaviv_gpu *gpu)
952/* fence object management */ 952/* fence object management */
953struct etnaviv_fence { 953struct etnaviv_fence {
954 struct etnaviv_gpu *gpu; 954 struct etnaviv_gpu *gpu;
955 struct fence base; 955 struct dma_fence base;
956}; 956};
957 957
958static inline struct etnaviv_fence *to_etnaviv_fence(struct fence *fence) 958static inline struct etnaviv_fence *to_etnaviv_fence(struct dma_fence *fence)
959{ 959{
960 return container_of(fence, struct etnaviv_fence, base); 960 return container_of(fence, struct etnaviv_fence, base);
961} 961}
962 962
963static const char *etnaviv_fence_get_driver_name(struct fence *fence) 963static const char *etnaviv_fence_get_driver_name(struct dma_fence *fence)
964{ 964{
965 return "etnaviv"; 965 return "etnaviv";
966} 966}
967 967
968static const char *etnaviv_fence_get_timeline_name(struct fence *fence) 968static const char *etnaviv_fence_get_timeline_name(struct dma_fence *fence)
969{ 969{
970 struct etnaviv_fence *f = to_etnaviv_fence(fence); 970 struct etnaviv_fence *f = to_etnaviv_fence(fence);
971 971
972 return dev_name(f->gpu->dev); 972 return dev_name(f->gpu->dev);
973} 973}
974 974
975static bool etnaviv_fence_enable_signaling(struct fence *fence) 975static bool etnaviv_fence_enable_signaling(struct dma_fence *fence)
976{ 976{
977 return true; 977 return true;
978} 978}
979 979
980static bool etnaviv_fence_signaled(struct fence *fence) 980static bool etnaviv_fence_signaled(struct dma_fence *fence)
981{ 981{
982 struct etnaviv_fence *f = to_etnaviv_fence(fence); 982 struct etnaviv_fence *f = to_etnaviv_fence(fence);
983 983
984 return fence_completed(f->gpu, f->base.seqno); 984 return fence_completed(f->gpu, f->base.seqno);
985} 985}
986 986
987static void etnaviv_fence_release(struct fence *fence) 987static void etnaviv_fence_release(struct dma_fence *fence)
988{ 988{
989 struct etnaviv_fence *f = to_etnaviv_fence(fence); 989 struct etnaviv_fence *f = to_etnaviv_fence(fence);
990 990
991 kfree_rcu(f, base.rcu); 991 kfree_rcu(f, base.rcu);
992} 992}
993 993
994static const struct fence_ops etnaviv_fence_ops = { 994static const struct dma_fence_ops etnaviv_fence_ops = {
995 .get_driver_name = etnaviv_fence_get_driver_name, 995 .get_driver_name = etnaviv_fence_get_driver_name,
996 .get_timeline_name = etnaviv_fence_get_timeline_name, 996 .get_timeline_name = etnaviv_fence_get_timeline_name,
997 .enable_signaling = etnaviv_fence_enable_signaling, 997 .enable_signaling = etnaviv_fence_enable_signaling,
998 .signaled = etnaviv_fence_signaled, 998 .signaled = etnaviv_fence_signaled,
999 .wait = fence_default_wait, 999 .wait = dma_fence_default_wait,
1000 .release = etnaviv_fence_release, 1000 .release = etnaviv_fence_release,
1001}; 1001};
1002 1002
1003static struct fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu) 1003static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
1004{ 1004{
1005 struct etnaviv_fence *f; 1005 struct etnaviv_fence *f;
1006 1006
@@ -1010,8 +1010,8 @@ static struct fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
1010 1010
1011 f->gpu = gpu; 1011 f->gpu = gpu;
1012 1012
1013 fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock, 1013 dma_fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock,
1014 gpu->fence_context, ++gpu->next_fence); 1014 gpu->fence_context, ++gpu->next_fence);
1015 1015
1016 return &f->base; 1016 return &f->base;
1017} 1017}
@@ -1021,7 +1021,7 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
1021{ 1021{
1022 struct reservation_object *robj = etnaviv_obj->resv; 1022 struct reservation_object *robj = etnaviv_obj->resv;
1023 struct reservation_object_list *fobj; 1023 struct reservation_object_list *fobj;
1024 struct fence *fence; 1024 struct dma_fence *fence;
1025 int i, ret; 1025 int i, ret;
1026 1026
1027 if (!exclusive) { 1027 if (!exclusive) {
@@ -1039,7 +1039,7 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
1039 /* Wait on any existing exclusive fence which isn't our own */ 1039 /* Wait on any existing exclusive fence which isn't our own */
1040 fence = reservation_object_get_excl(robj); 1040 fence = reservation_object_get_excl(robj);
1041 if (fence && fence->context != context) { 1041 if (fence && fence->context != context) {
1042 ret = fence_wait(fence, true); 1042 ret = dma_fence_wait(fence, true);
1043 if (ret) 1043 if (ret)
1044 return ret; 1044 return ret;
1045 } 1045 }
@@ -1052,7 +1052,7 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
1052 fence = rcu_dereference_protected(fobj->shared[i], 1052 fence = rcu_dereference_protected(fobj->shared[i],
1053 reservation_object_held(robj)); 1053 reservation_object_held(robj));
1054 if (fence->context != context) { 1054 if (fence->context != context) {
1055 ret = fence_wait(fence, true); 1055 ret = dma_fence_wait(fence, true);
1056 if (ret) 1056 if (ret)
1057 return ret; 1057 return ret;
1058 } 1058 }
@@ -1158,11 +1158,11 @@ static void retire_worker(struct work_struct *work)
1158 1158
1159 mutex_lock(&gpu->lock); 1159 mutex_lock(&gpu->lock);
1160 list_for_each_entry_safe(cmdbuf, tmp, &gpu->active_cmd_list, node) { 1160 list_for_each_entry_safe(cmdbuf, tmp, &gpu->active_cmd_list, node) {
1161 if (!fence_is_signaled(cmdbuf->fence)) 1161 if (!dma_fence_is_signaled(cmdbuf->fence))
1162 break; 1162 break;
1163 1163
1164 list_del(&cmdbuf->node); 1164 list_del(&cmdbuf->node);
1165 fence_put(cmdbuf->fence); 1165 dma_fence_put(cmdbuf->fence);
1166 1166
1167 for (i = 0; i < cmdbuf->nr_bos; i++) { 1167 for (i = 0; i < cmdbuf->nr_bos; i++) {
1168 struct etnaviv_vram_mapping *mapping = cmdbuf->bo_map[i]; 1168 struct etnaviv_vram_mapping *mapping = cmdbuf->bo_map[i];
@@ -1275,7 +1275,7 @@ void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu)
1275int etnaviv_gpu_submit(struct etnaviv_gpu *gpu, 1275int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
1276 struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf) 1276 struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf)
1277{ 1277{
1278 struct fence *fence; 1278 struct dma_fence *fence;
1279 unsigned int event, i; 1279 unsigned int event, i;
1280 int ret; 1280 int ret;
1281 1281
@@ -1391,7 +1391,7 @@ static irqreturn_t irq_handler(int irq, void *data)
1391 } 1391 }
1392 1392
1393 while ((event = ffs(intr)) != 0) { 1393 while ((event = ffs(intr)) != 0) {
1394 struct fence *fence; 1394 struct dma_fence *fence;
1395 1395
1396 event -= 1; 1396 event -= 1;
1397 1397
@@ -1401,7 +1401,7 @@ static irqreturn_t irq_handler(int irq, void *data)
1401 1401
1402 fence = gpu->event[event].fence; 1402 fence = gpu->event[event].fence;
1403 gpu->event[event].fence = NULL; 1403 gpu->event[event].fence = NULL;
1404 fence_signal(fence); 1404 dma_fence_signal(fence);
1405 1405
1406 /* 1406 /*
1407 * Events can be processed out of order. Eg, 1407 * Events can be processed out of order. Eg,
@@ -1553,7 +1553,7 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
1553 return ret; 1553 return ret;
1554 1554
1555 gpu->drm = drm; 1555 gpu->drm = drm;
1556 gpu->fence_context = fence_context_alloc(1); 1556 gpu->fence_context = dma_fence_context_alloc(1);
1557 spin_lock_init(&gpu->fence_spinlock); 1557 spin_lock_init(&gpu->fence_spinlock);
1558 1558
1559 INIT_LIST_HEAD(&gpu->active_cmd_list); 1559 INIT_LIST_HEAD(&gpu->active_cmd_list);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 73c278dc3706..8c6b824e9d0a 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -89,7 +89,7 @@ struct etnaviv_chip_identity {
89 89
90struct etnaviv_event { 90struct etnaviv_event {
91 bool used; 91 bool used;
92 struct fence *fence; 92 struct dma_fence *fence;
93}; 93};
94 94
95struct etnaviv_cmdbuf; 95struct etnaviv_cmdbuf;
@@ -163,7 +163,7 @@ struct etnaviv_cmdbuf {
163 /* vram node used if the cmdbuf is mapped through the MMUv2 */ 163 /* vram node used if the cmdbuf is mapped through the MMUv2 */
164 struct drm_mm_node vram_node; 164 struct drm_mm_node vram_node;
165 /* fence after which this buffer is to be disposed */ 165 /* fence after which this buffer is to be disposed */
166 struct fence *fence; 166 struct dma_fence *fence;
167 /* target exec state */ 167 /* target exec state */
168 u32 exec_state; 168 u32 exec_state;
169 /* per GPU in-flight list */ 169 /* per GPU in-flight list */
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 74ede1f53372..f9af2a00625e 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -26,12 +26,12 @@
26 26
27#include "i915_drv.h" 27#include "i915_drv.h"
28 28
29static const char *i915_fence_get_driver_name(struct fence *fence) 29static const char *i915_fence_get_driver_name(struct dma_fence *fence)
30{ 30{
31 return "i915"; 31 return "i915";
32} 32}
33 33
34static const char *i915_fence_get_timeline_name(struct fence *fence) 34static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
35{ 35{
36 /* Timelines are bound by eviction to a VM. However, since 36 /* Timelines are bound by eviction to a VM. However, since
37 * we only have a global seqno at the moment, we only have 37 * we only have a global seqno at the moment, we only have
@@ -42,12 +42,12 @@ static const char *i915_fence_get_timeline_name(struct fence *fence)
42 return "global"; 42 return "global";
43} 43}
44 44
45static bool i915_fence_signaled(struct fence *fence) 45static bool i915_fence_signaled(struct dma_fence *fence)
46{ 46{
47 return i915_gem_request_completed(to_request(fence)); 47 return i915_gem_request_completed(to_request(fence));
48} 48}
49 49
50static bool i915_fence_enable_signaling(struct fence *fence) 50static bool i915_fence_enable_signaling(struct dma_fence *fence)
51{ 51{
52 if (i915_fence_signaled(fence)) 52 if (i915_fence_signaled(fence))
53 return false; 53 return false;
@@ -56,7 +56,7 @@ static bool i915_fence_enable_signaling(struct fence *fence)
56 return true; 56 return true;
57} 57}
58 58
59static signed long i915_fence_wait(struct fence *fence, 59static signed long i915_fence_wait(struct dma_fence *fence,
60 bool interruptible, 60 bool interruptible,
61 signed long timeout_jiffies) 61 signed long timeout_jiffies)
62{ 62{
@@ -85,26 +85,26 @@ static signed long i915_fence_wait(struct fence *fence,
85 return timeout_jiffies; 85 return timeout_jiffies;
86} 86}
87 87
88static void i915_fence_value_str(struct fence *fence, char *str, int size) 88static void i915_fence_value_str(struct dma_fence *fence, char *str, int size)
89{ 89{
90 snprintf(str, size, "%u", fence->seqno); 90 snprintf(str, size, "%u", fence->seqno);
91} 91}
92 92
93static void i915_fence_timeline_value_str(struct fence *fence, char *str, 93static void i915_fence_timeline_value_str(struct dma_fence *fence, char *str,
94 int size) 94 int size)
95{ 95{
96 snprintf(str, size, "%u", 96 snprintf(str, size, "%u",
97 intel_engine_get_seqno(to_request(fence)->engine)); 97 intel_engine_get_seqno(to_request(fence)->engine));
98} 98}
99 99
100static void i915_fence_release(struct fence *fence) 100static void i915_fence_release(struct dma_fence *fence)
101{ 101{
102 struct drm_i915_gem_request *req = to_request(fence); 102 struct drm_i915_gem_request *req = to_request(fence);
103 103
104 kmem_cache_free(req->i915->requests, req); 104 kmem_cache_free(req->i915->requests, req);
105} 105}
106 106
107const struct fence_ops i915_fence_ops = { 107const struct dma_fence_ops i915_fence_ops = {
108 .get_driver_name = i915_fence_get_driver_name, 108 .get_driver_name = i915_fence_get_driver_name,
109 .get_timeline_name = i915_fence_get_timeline_name, 109 .get_timeline_name = i915_fence_get_timeline_name,
110 .enable_signaling = i915_fence_enable_signaling, 110 .enable_signaling = i915_fence_enable_signaling,
@@ -388,8 +388,8 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
388 * The reference count is incremented atomically. If it is zero, 388 * The reference count is incremented atomically. If it is zero,
389 * the lookup knows the request is unallocated and complete. Otherwise, 389 * the lookup knows the request is unallocated and complete. Otherwise,
390 * it is either still in use, or has been reallocated and reset 390 * it is either still in use, or has been reallocated and reset
391 * with fence_init(). This increment is safe for release as we check 391 * with dma_fence_init(). This increment is safe for release as we
392 * that the request we have a reference to and matches the active 392 * check that the request we have a reference to and matches the active
393 * request. 393 * request.
394 * 394 *
395 * Before we increment the refcount, we chase the request->engine 395 * Before we increment the refcount, we chase the request->engine
@@ -412,11 +412,11 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
412 goto err; 412 goto err;
413 413
414 spin_lock_init(&req->lock); 414 spin_lock_init(&req->lock);
415 fence_init(&req->fence, 415 dma_fence_init(&req->fence,
416 &i915_fence_ops, 416 &i915_fence_ops,
417 &req->lock, 417 &req->lock,
418 engine->fence_context, 418 engine->fence_context,
419 seqno); 419 seqno);
420 420
421 i915_sw_fence_init(&req->submit, submit_notify); 421 i915_sw_fence_init(&req->submit, submit_notify);
422 422
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index 974bd7bcc801..bceeaa3a5193 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -25,7 +25,7 @@
25#ifndef I915_GEM_REQUEST_H 25#ifndef I915_GEM_REQUEST_H
26#define I915_GEM_REQUEST_H 26#define I915_GEM_REQUEST_H
27 27
28#include <linux/fence.h> 28#include <linux/dma-fence.h>
29 29
30#include "i915_gem.h" 30#include "i915_gem.h"
31#include "i915_sw_fence.h" 31#include "i915_sw_fence.h"
@@ -62,7 +62,7 @@ struct intel_signal_node {
62 * The requests are reference counted. 62 * The requests are reference counted.
63 */ 63 */
64struct drm_i915_gem_request { 64struct drm_i915_gem_request {
65 struct fence fence; 65 struct dma_fence fence;
66 spinlock_t lock; 66 spinlock_t lock;
67 67
68 /** On Which ring this request was generated */ 68 /** On Which ring this request was generated */
@@ -145,9 +145,9 @@ struct drm_i915_gem_request {
145 struct list_head execlist_link; 145 struct list_head execlist_link;
146}; 146};
147 147
148extern const struct fence_ops i915_fence_ops; 148extern const struct dma_fence_ops i915_fence_ops;
149 149
150static inline bool fence_is_i915(struct fence *fence) 150static inline bool fence_is_i915(struct dma_fence *fence)
151{ 151{
152 return fence->ops == &i915_fence_ops; 152 return fence->ops == &i915_fence_ops;
153} 153}
@@ -172,7 +172,7 @@ i915_gem_request_get_engine(struct drm_i915_gem_request *req)
172} 172}
173 173
174static inline struct drm_i915_gem_request * 174static inline struct drm_i915_gem_request *
175to_request(struct fence *fence) 175to_request(struct dma_fence *fence)
176{ 176{
177 /* We assume that NULL fence/request are interoperable */ 177 /* We assume that NULL fence/request are interoperable */
178 BUILD_BUG_ON(offsetof(struct drm_i915_gem_request, fence) != 0); 178 BUILD_BUG_ON(offsetof(struct drm_i915_gem_request, fence) != 0);
@@ -183,19 +183,19 @@ to_request(struct fence *fence)
183static inline struct drm_i915_gem_request * 183static inline struct drm_i915_gem_request *
184i915_gem_request_get(struct drm_i915_gem_request *req) 184i915_gem_request_get(struct drm_i915_gem_request *req)
185{ 185{
186 return to_request(fence_get(&req->fence)); 186 return to_request(dma_fence_get(&req->fence));
187} 187}
188 188
189static inline struct drm_i915_gem_request * 189static inline struct drm_i915_gem_request *
190i915_gem_request_get_rcu(struct drm_i915_gem_request *req) 190i915_gem_request_get_rcu(struct drm_i915_gem_request *req)
191{ 191{
192 return to_request(fence_get_rcu(&req->fence)); 192 return to_request(dma_fence_get_rcu(&req->fence));
193} 193}
194 194
195static inline void 195static inline void
196i915_gem_request_put(struct drm_i915_gem_request *req) 196i915_gem_request_put(struct drm_i915_gem_request *req)
197{ 197{
198 fence_put(&req->fence); 198 dma_fence_put(&req->fence);
199} 199}
200 200
201static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst, 201static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
@@ -497,7 +497,7 @@ __i915_gem_active_get_rcu(const struct i915_gem_active *active)
497 * compiler. 497 * compiler.
498 * 498 *
499 * The atomic operation at the heart of 499 * The atomic operation at the heart of
500 * i915_gem_request_get_rcu(), see fence_get_rcu(), is 500 * i915_gem_request_get_rcu(), see dma_fence_get_rcu(), is
501 * atomic_inc_not_zero() which is only a full memory barrier 501 * atomic_inc_not_zero() which is only a full memory barrier
502 * when successful. That is, if i915_gem_request_get_rcu() 502 * when successful. That is, if i915_gem_request_get_rcu()
503 * returns the request (and so with the reference counted 503 * returns the request (and so with the reference counted
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 1e5cbc585ca2..8185002d7ec8 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -8,7 +8,7 @@
8 */ 8 */
9 9
10#include <linux/slab.h> 10#include <linux/slab.h>
11#include <linux/fence.h> 11#include <linux/dma-fence.h>
12#include <linux/reservation.h> 12#include <linux/reservation.h>
13 13
14#include "i915_sw_fence.h" 14#include "i915_sw_fence.h"
@@ -226,49 +226,50 @@ int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
226 return pending; 226 return pending;
227} 227}
228 228
229struct dma_fence_cb { 229struct i915_sw_dma_fence_cb {
230 struct fence_cb base; 230 struct dma_fence_cb base;
231 struct i915_sw_fence *fence; 231 struct i915_sw_fence *fence;
232 struct fence *dma; 232 struct dma_fence *dma;
233 struct timer_list timer; 233 struct timer_list timer;
234}; 234};
235 235
236static void timer_i915_sw_fence_wake(unsigned long data) 236static void timer_i915_sw_fence_wake(unsigned long data)
237{ 237{
238 struct dma_fence_cb *cb = (struct dma_fence_cb *)data; 238 struct i915_sw_dma_fence_cb *cb = (struct i915_sw_dma_fence_cb *)data;
239 239
240 printk(KERN_WARNING "asynchronous wait on fence %s:%s:%x timed out\n", 240 printk(KERN_WARNING "asynchronous wait on fence %s:%s:%x timed out\n",
241 cb->dma->ops->get_driver_name(cb->dma), 241 cb->dma->ops->get_driver_name(cb->dma),
242 cb->dma->ops->get_timeline_name(cb->dma), 242 cb->dma->ops->get_timeline_name(cb->dma),
243 cb->dma->seqno); 243 cb->dma->seqno);
244 fence_put(cb->dma); 244 dma_fence_put(cb->dma);
245 cb->dma = NULL; 245 cb->dma = NULL;
246 246
247 i915_sw_fence_commit(cb->fence); 247 i915_sw_fence_commit(cb->fence);
248 cb->timer.function = NULL; 248 cb->timer.function = NULL;
249} 249}
250 250
251static void dma_i915_sw_fence_wake(struct fence *dma, struct fence_cb *data) 251static void dma_i915_sw_fence_wake(struct dma_fence *dma,
252 struct dma_fence_cb *data)
252{ 253{
253 struct dma_fence_cb *cb = container_of(data, typeof(*cb), base); 254 struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base);
254 255
255 del_timer_sync(&cb->timer); 256 del_timer_sync(&cb->timer);
256 if (cb->timer.function) 257 if (cb->timer.function)
257 i915_sw_fence_commit(cb->fence); 258 i915_sw_fence_commit(cb->fence);
258 fence_put(cb->dma); 259 dma_fence_put(cb->dma);
259 260
260 kfree(cb); 261 kfree(cb);
261} 262}
262 263
263int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, 264int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
264 struct fence *dma, 265 struct dma_fence *dma,
265 unsigned long timeout, 266 unsigned long timeout,
266 gfp_t gfp) 267 gfp_t gfp)
267{ 268{
268 struct dma_fence_cb *cb; 269 struct i915_sw_dma_fence_cb *cb;
269 int ret; 270 int ret;
270 271
271 if (fence_is_signaled(dma)) 272 if (dma_fence_is_signaled(dma))
272 return 0; 273 return 0;
273 274
274 cb = kmalloc(sizeof(*cb), gfp); 275 cb = kmalloc(sizeof(*cb), gfp);
@@ -276,7 +277,7 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
276 if (!gfpflags_allow_blocking(gfp)) 277 if (!gfpflags_allow_blocking(gfp))
277 return -ENOMEM; 278 return -ENOMEM;
278 279
279 return fence_wait(dma, false); 280 return dma_fence_wait(dma, false);
280 } 281 }
281 282
282 cb->fence = i915_sw_fence_get(fence); 283 cb->fence = i915_sw_fence_get(fence);
@@ -287,11 +288,11 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
287 timer_i915_sw_fence_wake, (unsigned long)cb, 288 timer_i915_sw_fence_wake, (unsigned long)cb,
288 TIMER_IRQSAFE); 289 TIMER_IRQSAFE);
289 if (timeout) { 290 if (timeout) {
290 cb->dma = fence_get(dma); 291 cb->dma = dma_fence_get(dma);
291 mod_timer(&cb->timer, round_jiffies_up(jiffies + timeout)); 292 mod_timer(&cb->timer, round_jiffies_up(jiffies + timeout));
292 } 293 }
293 294
294 ret = fence_add_callback(dma, &cb->base, dma_i915_sw_fence_wake); 295 ret = dma_fence_add_callback(dma, &cb->base, dma_i915_sw_fence_wake);
295 if (ret == 0) { 296 if (ret == 0) {
296 ret = 1; 297 ret = 1;
297 } else { 298 } else {
@@ -305,16 +306,16 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
305 306
306int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, 307int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
307 struct reservation_object *resv, 308 struct reservation_object *resv,
308 const struct fence_ops *exclude, 309 const struct dma_fence_ops *exclude,
309 bool write, 310 bool write,
310 unsigned long timeout, 311 unsigned long timeout,
311 gfp_t gfp) 312 gfp_t gfp)
312{ 313{
313 struct fence *excl; 314 struct dma_fence *excl;
314 int ret = 0, pending; 315 int ret = 0, pending;
315 316
316 if (write) { 317 if (write) {
317 struct fence **shared; 318 struct dma_fence **shared;
318 unsigned int count, i; 319 unsigned int count, i;
319 320
320 ret = reservation_object_get_fences_rcu(resv, 321 ret = reservation_object_get_fences_rcu(resv,
@@ -339,7 +340,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
339 } 340 }
340 341
341 for (i = 0; i < count; i++) 342 for (i = 0; i < count; i++)
342 fence_put(shared[i]); 343 dma_fence_put(shared[i]);
343 kfree(shared); 344 kfree(shared);
344 } else { 345 } else {
345 excl = reservation_object_get_excl_rcu(resv); 346 excl = reservation_object_get_excl_rcu(resv);
@@ -356,7 +357,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
356 ret |= pending; 357 ret |= pending;
357 } 358 }
358 359
359 fence_put(excl); 360 dma_fence_put(excl);
360 361
361 return ret; 362 return ret;
362} 363}
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
index 373141602ca4..cd239e92f67f 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -16,8 +16,8 @@
16#include <linux/wait.h> 16#include <linux/wait.h>
17 17
18struct completion; 18struct completion;
19struct fence; 19struct dma_fence;
20struct fence_ops; 20struct dma_fence_ops;
21struct reservation_object; 21struct reservation_object;
22 22
23struct i915_sw_fence { 23struct i915_sw_fence {
@@ -47,12 +47,12 @@ int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
47 struct i915_sw_fence *after, 47 struct i915_sw_fence *after,
48 wait_queue_t *wq); 48 wait_queue_t *wq);
49int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, 49int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
50 struct fence *dma, 50 struct dma_fence *dma,
51 unsigned long timeout, 51 unsigned long timeout,
52 gfp_t gfp); 52 gfp_t gfp);
53int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, 53int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
54 struct reservation_object *resv, 54 struct reservation_object *resv,
55 const struct fence_ops *exclude, 55 const struct dma_fence_ops *exclude,
56 bool write, 56 bool write,
57 unsigned long timeout, 57 unsigned long timeout,
58 gfp_t gfp); 58 gfp_t gfp);
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 178798002a73..5c912c25f7d3 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -491,7 +491,7 @@ TRACE_EVENT(i915_gem_ring_dispatch,
491 __entry->ring = req->engine->id; 491 __entry->ring = req->engine->id;
492 __entry->seqno = req->fence.seqno; 492 __entry->seqno = req->fence.seqno;
493 __entry->flags = flags; 493 __entry->flags = flags;
494 fence_enable_sw_signaling(&req->fence); 494 dma_fence_enable_sw_signaling(&req->fence);
495 ), 495 ),
496 496
497 TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x", 497 TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 23fc1042fed4..56efcc507ea2 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -464,7 +464,7 @@ static int intel_breadcrumbs_signaler(void *arg)
464 &request->signaling.wait); 464 &request->signaling.wait);
465 465
466 local_bh_disable(); 466 local_bh_disable();
467 fence_signal(&request->fence); 467 dma_fence_signal(&request->fence);
468 local_bh_enable(); /* kick start the tasklets */ 468 local_bh_enable(); /* kick start the tasklets */
469 469
470 /* Find the next oldest signal. Note that as we have 470 /* Find the next oldest signal. Note that as we have
@@ -502,7 +502,7 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
502 struct rb_node *parent, **p; 502 struct rb_node *parent, **p;
503 bool first, wakeup; 503 bool first, wakeup;
504 504
505 /* locked by fence_enable_sw_signaling() */ 505 /* locked by dma_fence_enable_sw_signaling() */
506 assert_spin_locked(&request->lock); 506 assert_spin_locked(&request->lock);
507 507
508 request->signaling.wait.tsk = b->signaler; 508 request->signaling.wait.tsk = b->signaler;
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 2dc94812bea5..8cceb345aa0f 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -245,7 +245,7 @@ void intel_engine_setup_common(struct intel_engine_cs *engine)
245 INIT_LIST_HEAD(&engine->execlist_queue); 245 INIT_LIST_HEAD(&engine->execlist_queue);
246 spin_lock_init(&engine->execlist_lock); 246 spin_lock_init(&engine->execlist_lock);
247 247
248 engine->fence_context = fence_context_alloc(1); 248 engine->fence_context = dma_fence_context_alloc(1);
249 249
250 intel_engine_init_requests(engine); 250 intel_engine_init_requests(engine);
251 intel_engine_init_hangcheck(engine); 251 intel_engine_init_hangcheck(engine);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index d0da52f2a806..940bf4992fe2 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -217,7 +217,7 @@ void msm_gem_vunmap(struct drm_gem_object *obj);
217int msm_gem_sync_object(struct drm_gem_object *obj, 217int msm_gem_sync_object(struct drm_gem_object *obj,
218 struct msm_fence_context *fctx, bool exclusive); 218 struct msm_fence_context *fctx, bool exclusive);
219void msm_gem_move_to_active(struct drm_gem_object *obj, 219void msm_gem_move_to_active(struct drm_gem_object *obj,
220 struct msm_gpu *gpu, bool exclusive, struct fence *fence); 220 struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence);
221void msm_gem_move_to_inactive(struct drm_gem_object *obj); 221void msm_gem_move_to_inactive(struct drm_gem_object *obj);
222int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout); 222int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
223int msm_gem_cpu_fini(struct drm_gem_object *obj); 223int msm_gem_cpu_fini(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c
index a9b9b1c95a2e..3f299c537b77 100644
--- a/drivers/gpu/drm/msm/msm_fence.c
+++ b/drivers/gpu/drm/msm/msm_fence.c
@@ -15,7 +15,7 @@
15 * this program. If not, see <http://www.gnu.org/licenses/>. 15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */ 16 */
17 17
18#include <linux/fence.h> 18#include <linux/dma-fence.h>
19 19
20#include "msm_drv.h" 20#include "msm_drv.h"
21#include "msm_fence.h" 21#include "msm_fence.h"
@@ -32,7 +32,7 @@ msm_fence_context_alloc(struct drm_device *dev, const char *name)
32 32
33 fctx->dev = dev; 33 fctx->dev = dev;
34 fctx->name = name; 34 fctx->name = name;
35 fctx->context = fence_context_alloc(1); 35 fctx->context = dma_fence_context_alloc(1);
36 init_waitqueue_head(&fctx->event); 36 init_waitqueue_head(&fctx->event);
37 spin_lock_init(&fctx->spinlock); 37 spin_lock_init(&fctx->spinlock);
38 38
@@ -100,52 +100,52 @@ void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence)
100 100
101struct msm_fence { 101struct msm_fence {
102 struct msm_fence_context *fctx; 102 struct msm_fence_context *fctx;
103 struct fence base; 103 struct dma_fence base;
104}; 104};
105 105
106static inline struct msm_fence *to_msm_fence(struct fence *fence) 106static inline struct msm_fence *to_msm_fence(struct dma_fence *fence)
107{ 107{
108 return container_of(fence, struct msm_fence, base); 108 return container_of(fence, struct msm_fence, base);
109} 109}
110 110
111static const char *msm_fence_get_driver_name(struct fence *fence) 111static const char *msm_fence_get_driver_name(struct dma_fence *fence)
112{ 112{
113 return "msm"; 113 return "msm";
114} 114}
115 115
116static const char *msm_fence_get_timeline_name(struct fence *fence) 116static const char *msm_fence_get_timeline_name(struct dma_fence *fence)
117{ 117{
118 struct msm_fence *f = to_msm_fence(fence); 118 struct msm_fence *f = to_msm_fence(fence);
119 return f->fctx->name; 119 return f->fctx->name;
120} 120}
121 121
122static bool msm_fence_enable_signaling(struct fence *fence) 122static bool msm_fence_enable_signaling(struct dma_fence *fence)
123{ 123{
124 return true; 124 return true;
125} 125}
126 126
127static bool msm_fence_signaled(struct fence *fence) 127static bool msm_fence_signaled(struct dma_fence *fence)
128{ 128{
129 struct msm_fence *f = to_msm_fence(fence); 129 struct msm_fence *f = to_msm_fence(fence);
130 return fence_completed(f->fctx, f->base.seqno); 130 return fence_completed(f->fctx, f->base.seqno);
131} 131}
132 132
133static void msm_fence_release(struct fence *fence) 133static void msm_fence_release(struct dma_fence *fence)
134{ 134{
135 struct msm_fence *f = to_msm_fence(fence); 135 struct msm_fence *f = to_msm_fence(fence);
136 kfree_rcu(f, base.rcu); 136 kfree_rcu(f, base.rcu);
137} 137}
138 138
139static const struct fence_ops msm_fence_ops = { 139static const struct dma_fence_ops msm_fence_ops = {
140 .get_driver_name = msm_fence_get_driver_name, 140 .get_driver_name = msm_fence_get_driver_name,
141 .get_timeline_name = msm_fence_get_timeline_name, 141 .get_timeline_name = msm_fence_get_timeline_name,
142 .enable_signaling = msm_fence_enable_signaling, 142 .enable_signaling = msm_fence_enable_signaling,
143 .signaled = msm_fence_signaled, 143 .signaled = msm_fence_signaled,
144 .wait = fence_default_wait, 144 .wait = dma_fence_default_wait,
145 .release = msm_fence_release, 145 .release = msm_fence_release,
146}; 146};
147 147
148struct fence * 148struct dma_fence *
149msm_fence_alloc(struct msm_fence_context *fctx) 149msm_fence_alloc(struct msm_fence_context *fctx)
150{ 150{
151 struct msm_fence *f; 151 struct msm_fence *f;
@@ -156,8 +156,8 @@ msm_fence_alloc(struct msm_fence_context *fctx)
156 156
157 f->fctx = fctx; 157 f->fctx = fctx;
158 158
159 fence_init(&f->base, &msm_fence_ops, &fctx->spinlock, 159 dma_fence_init(&f->base, &msm_fence_ops, &fctx->spinlock,
160 fctx->context, ++fctx->last_fence); 160 fctx->context, ++fctx->last_fence);
161 161
162 return &f->base; 162 return &f->base;
163} 163}
diff --git a/drivers/gpu/drm/msm/msm_fence.h b/drivers/gpu/drm/msm/msm_fence.h
index ceb5b3d314b4..56061aa1959d 100644
--- a/drivers/gpu/drm/msm/msm_fence.h
+++ b/drivers/gpu/drm/msm/msm_fence.h
@@ -41,6 +41,6 @@ int msm_queue_fence_cb(struct msm_fence_context *fctx,
41 struct msm_fence_cb *cb, uint32_t fence); 41 struct msm_fence_cb *cb, uint32_t fence);
42void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence); 42void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence);
43 43
44struct fence * msm_fence_alloc(struct msm_fence_context *fctx); 44struct dma_fence * msm_fence_alloc(struct msm_fence_context *fctx);
45 45
46#endif 46#endif
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index b6ac27e31929..57db7dbbb618 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -521,7 +521,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
521{ 521{
522 struct msm_gem_object *msm_obj = to_msm_bo(obj); 522 struct msm_gem_object *msm_obj = to_msm_bo(obj);
523 struct reservation_object_list *fobj; 523 struct reservation_object_list *fobj;
524 struct fence *fence; 524 struct dma_fence *fence;
525 int i, ret; 525 int i, ret;
526 526
527 if (!exclusive) { 527 if (!exclusive) {
@@ -540,7 +540,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
540 fence = reservation_object_get_excl(msm_obj->resv); 540 fence = reservation_object_get_excl(msm_obj->resv);
541 /* don't need to wait on our own fences, since ring is fifo */ 541 /* don't need to wait on our own fences, since ring is fifo */
542 if (fence && (fence->context != fctx->context)) { 542 if (fence && (fence->context != fctx->context)) {
543 ret = fence_wait(fence, true); 543 ret = dma_fence_wait(fence, true);
544 if (ret) 544 if (ret)
545 return ret; 545 return ret;
546 } 546 }
@@ -553,7 +553,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
553 fence = rcu_dereference_protected(fobj->shared[i], 553 fence = rcu_dereference_protected(fobj->shared[i],
554 reservation_object_held(msm_obj->resv)); 554 reservation_object_held(msm_obj->resv));
555 if (fence->context != fctx->context) { 555 if (fence->context != fctx->context) {
556 ret = fence_wait(fence, true); 556 ret = dma_fence_wait(fence, true);
557 if (ret) 557 if (ret)
558 return ret; 558 return ret;
559 } 559 }
@@ -563,7 +563,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
563} 563}
564 564
565void msm_gem_move_to_active(struct drm_gem_object *obj, 565void msm_gem_move_to_active(struct drm_gem_object *obj,
566 struct msm_gpu *gpu, bool exclusive, struct fence *fence) 566 struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
567{ 567{
568 struct msm_gem_object *msm_obj = to_msm_bo(obj); 568 struct msm_gem_object *msm_obj = to_msm_bo(obj);
569 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); 569 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
@@ -616,10 +616,10 @@ int msm_gem_cpu_fini(struct drm_gem_object *obj)
616} 616}
617 617
618#ifdef CONFIG_DEBUG_FS 618#ifdef CONFIG_DEBUG_FS
619static void describe_fence(struct fence *fence, const char *type, 619static void describe_fence(struct dma_fence *fence, const char *type,
620 struct seq_file *m) 620 struct seq_file *m)
621{ 621{
622 if (!fence_is_signaled(fence)) 622 if (!dma_fence_is_signaled(fence))
623 seq_printf(m, "\t%9s: %s %s seq %u\n", type, 623 seq_printf(m, "\t%9s: %s %s seq %u\n", type,
624 fence->ops->get_driver_name(fence), 624 fence->ops->get_driver_name(fence),
625 fence->ops->get_timeline_name(fence), 625 fence->ops->get_timeline_name(fence),
@@ -631,7 +631,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
631 struct msm_gem_object *msm_obj = to_msm_bo(obj); 631 struct msm_gem_object *msm_obj = to_msm_bo(obj);
632 struct reservation_object *robj = msm_obj->resv; 632 struct reservation_object *robj = msm_obj->resv;
633 struct reservation_object_list *fobj; 633 struct reservation_object_list *fobj;
634 struct fence *fence; 634 struct dma_fence *fence;
635 uint64_t off = drm_vma_node_start(&obj->vma_node); 635 uint64_t off = drm_vma_node_start(&obj->vma_node);
636 const char *madv; 636 const char *madv;
637 637
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index b2f13cfe945e..2cb8551fda70 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -104,7 +104,7 @@ struct msm_gem_submit {
104 struct list_head node; /* node in gpu submit_list */ 104 struct list_head node; /* node in gpu submit_list */
105 struct list_head bo_list; 105 struct list_head bo_list;
106 struct ww_acquire_ctx ticket; 106 struct ww_acquire_ctx ticket;
107 struct fence *fence; 107 struct dma_fence *fence;
108 struct pid *pid; /* submitting process */ 108 struct pid *pid; /* submitting process */
109 bool valid; /* true if no cmdstream patching needed */ 109 bool valid; /* true if no cmdstream patching needed */
110 unsigned int nr_cmds; 110 unsigned int nr_cmds;
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index b6a0f37a65f3..25e8786fa4ca 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -60,7 +60,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
60 60
61void msm_gem_submit_free(struct msm_gem_submit *submit) 61void msm_gem_submit_free(struct msm_gem_submit *submit)
62{ 62{
63 fence_put(submit->fence); 63 dma_fence_put(submit->fence);
64 list_del(&submit->node); 64 list_del(&submit->node);
65 put_pid(submit->pid); 65 put_pid(submit->pid);
66 kfree(submit); 66 kfree(submit);
@@ -380,7 +380,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
380 struct msm_file_private *ctx = file->driver_priv; 380 struct msm_file_private *ctx = file->driver_priv;
381 struct msm_gem_submit *submit; 381 struct msm_gem_submit *submit;
382 struct msm_gpu *gpu = priv->gpu; 382 struct msm_gpu *gpu = priv->gpu;
383 struct fence *in_fence = NULL; 383 struct dma_fence *in_fence = NULL;
384 struct sync_file *sync_file = NULL; 384 struct sync_file *sync_file = NULL;
385 int out_fence_fd = -1; 385 int out_fence_fd = -1;
386 unsigned i; 386 unsigned i;
@@ -439,7 +439,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
439 */ 439 */
440 440
441 if (in_fence->context != gpu->fctx->context) { 441 if (in_fence->context != gpu->fctx->context) {
442 ret = fence_wait(in_fence, true); 442 ret = dma_fence_wait(in_fence, true);
443 if (ret) 443 if (ret)
444 goto out; 444 goto out;
445 } 445 }
@@ -542,7 +542,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
542 542
543out: 543out:
544 if (in_fence) 544 if (in_fence)
545 fence_put(in_fence); 545 dma_fence_put(in_fence);
546 submit_cleanup(submit); 546 submit_cleanup(submit);
547 if (ret) 547 if (ret)
548 msm_gem_submit_free(submit); 548 msm_gem_submit_free(submit);
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 5bb09838b5ae..3249707e6834 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -476,7 +476,7 @@ static void retire_submits(struct msm_gpu *gpu)
476 submit = list_first_entry(&gpu->submit_list, 476 submit = list_first_entry(&gpu->submit_list,
477 struct msm_gem_submit, node); 477 struct msm_gem_submit, node);
478 478
479 if (fence_is_signaled(submit->fence)) { 479 if (dma_fence_is_signaled(submit->fence)) {
480 retire_submit(gpu, submit); 480 retire_submit(gpu, submit);
481 } else { 481 } else {
482 break; 482 break;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 343b8659472c..ec8ac756aab4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -83,13 +83,13 @@ nv10_bo_get_tile_region(struct drm_device *dev, int i)
83 83
84static void 84static void
85nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile, 85nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
86 struct fence *fence) 86 struct dma_fence *fence)
87{ 87{
88 struct nouveau_drm *drm = nouveau_drm(dev); 88 struct nouveau_drm *drm = nouveau_drm(dev);
89 89
90 if (tile) { 90 if (tile) {
91 spin_lock(&drm->tile.lock); 91 spin_lock(&drm->tile.lock);
92 tile->fence = (struct nouveau_fence *)fence_get(fence); 92 tile->fence = (struct nouveau_fence *)dma_fence_get(fence);
93 tile->used = false; 93 tile->used = false;
94 spin_unlock(&drm->tile.lock); 94 spin_unlock(&drm->tile.lock);
95 } 95 }
@@ -1243,7 +1243,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
1243{ 1243{
1244 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 1244 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1245 struct drm_device *dev = drm->dev; 1245 struct drm_device *dev = drm->dev;
1246 struct fence *fence = reservation_object_get_excl(bo->resv); 1246 struct dma_fence *fence = reservation_object_get_excl(bo->resv);
1247 1247
1248 nv10_bo_put_tile_region(dev, *old_tile, fence); 1248 nv10_bo_put_tile_region(dev, *old_tile, fence);
1249 *old_tile = new_tile; 1249 *old_tile = new_tile;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 4bb9ab892ae1..e9529ee6bc23 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -28,7 +28,7 @@
28 28
29#include <linux/ktime.h> 29#include <linux/ktime.h>
30#include <linux/hrtimer.h> 30#include <linux/hrtimer.h>
31#include <trace/events/fence.h> 31#include <trace/events/dma_fence.h>
32 32
33#include <nvif/cl826e.h> 33#include <nvif/cl826e.h>
34#include <nvif/notify.h> 34#include <nvif/notify.h>
@@ -38,11 +38,11 @@
38#include "nouveau_dma.h" 38#include "nouveau_dma.h"
39#include "nouveau_fence.h" 39#include "nouveau_fence.h"
40 40
41static const struct fence_ops nouveau_fence_ops_uevent; 41static const struct dma_fence_ops nouveau_fence_ops_uevent;
42static const struct fence_ops nouveau_fence_ops_legacy; 42static const struct dma_fence_ops nouveau_fence_ops_legacy;
43 43
44static inline struct nouveau_fence * 44static inline struct nouveau_fence *
45from_fence(struct fence *fence) 45from_fence(struct dma_fence *fence)
46{ 46{
47 return container_of(fence, struct nouveau_fence, base); 47 return container_of(fence, struct nouveau_fence, base);
48} 48}
@@ -58,23 +58,23 @@ nouveau_fence_signal(struct nouveau_fence *fence)
58{ 58{
59 int drop = 0; 59 int drop = 0;
60 60
61 fence_signal_locked(&fence->base); 61 dma_fence_signal_locked(&fence->base);
62 list_del(&fence->head); 62 list_del(&fence->head);
63 rcu_assign_pointer(fence->channel, NULL); 63 rcu_assign_pointer(fence->channel, NULL);
64 64
65 if (test_bit(FENCE_FLAG_USER_BITS, &fence->base.flags)) { 65 if (test_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags)) {
66 struct nouveau_fence_chan *fctx = nouveau_fctx(fence); 66 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
67 67
68 if (!--fctx->notify_ref) 68 if (!--fctx->notify_ref)
69 drop = 1; 69 drop = 1;
70 } 70 }
71 71
72 fence_put(&fence->base); 72 dma_fence_put(&fence->base);
73 return drop; 73 return drop;
74} 74}
75 75
76static struct nouveau_fence * 76static struct nouveau_fence *
77nouveau_local_fence(struct fence *fence, struct nouveau_drm *drm) { 77nouveau_local_fence(struct dma_fence *fence, struct nouveau_drm *drm) {
78 struct nouveau_fence_priv *priv = (void*)drm->fence; 78 struct nouveau_fence_priv *priv = (void*)drm->fence;
79 79
80 if (fence->ops != &nouveau_fence_ops_legacy && 80 if (fence->ops != &nouveau_fence_ops_legacy &&
@@ -201,7 +201,7 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
201 201
202struct nouveau_fence_work { 202struct nouveau_fence_work {
203 struct work_struct work; 203 struct work_struct work;
204 struct fence_cb cb; 204 struct dma_fence_cb cb;
205 void (*func)(void *); 205 void (*func)(void *);
206 void *data; 206 void *data;
207}; 207};
@@ -214,7 +214,7 @@ nouveau_fence_work_handler(struct work_struct *kwork)
214 kfree(work); 214 kfree(work);
215} 215}
216 216
217static void nouveau_fence_work_cb(struct fence *fence, struct fence_cb *cb) 217static void nouveau_fence_work_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
218{ 218{
219 struct nouveau_fence_work *work = container_of(cb, typeof(*work), cb); 219 struct nouveau_fence_work *work = container_of(cb, typeof(*work), cb);
220 220
@@ -222,12 +222,12 @@ static void nouveau_fence_work_cb(struct fence *fence, struct fence_cb *cb)
222} 222}
223 223
224void 224void
225nouveau_fence_work(struct fence *fence, 225nouveau_fence_work(struct dma_fence *fence,
226 void (*func)(void *), void *data) 226 void (*func)(void *), void *data)
227{ 227{
228 struct nouveau_fence_work *work; 228 struct nouveau_fence_work *work;
229 229
230 if (fence_is_signaled(fence)) 230 if (dma_fence_is_signaled(fence))
231 goto err; 231 goto err;
232 232
233 work = kmalloc(sizeof(*work), GFP_KERNEL); 233 work = kmalloc(sizeof(*work), GFP_KERNEL);
@@ -245,7 +245,7 @@ nouveau_fence_work(struct fence *fence,
245 work->func = func; 245 work->func = func;
246 work->data = data; 246 work->data = data;
247 247
248 if (fence_add_callback(fence, &work->cb, nouveau_fence_work_cb) < 0) 248 if (dma_fence_add_callback(fence, &work->cb, nouveau_fence_work_cb) < 0)
249 goto err_free; 249 goto err_free;
250 return; 250 return;
251 251
@@ -266,17 +266,17 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
266 fence->timeout = jiffies + (15 * HZ); 266 fence->timeout = jiffies + (15 * HZ);
267 267
268 if (priv->uevent) 268 if (priv->uevent)
269 fence_init(&fence->base, &nouveau_fence_ops_uevent, 269 dma_fence_init(&fence->base, &nouveau_fence_ops_uevent,
270 &fctx->lock, fctx->context, ++fctx->sequence); 270 &fctx->lock, fctx->context, ++fctx->sequence);
271 else 271 else
272 fence_init(&fence->base, &nouveau_fence_ops_legacy, 272 dma_fence_init(&fence->base, &nouveau_fence_ops_legacy,
273 &fctx->lock, fctx->context, ++fctx->sequence); 273 &fctx->lock, fctx->context, ++fctx->sequence);
274 kref_get(&fctx->fence_ref); 274 kref_get(&fctx->fence_ref);
275 275
276 trace_fence_emit(&fence->base); 276 trace_dma_fence_emit(&fence->base);
277 ret = fctx->emit(fence); 277 ret = fctx->emit(fence);
278 if (!ret) { 278 if (!ret) {
279 fence_get(&fence->base); 279 dma_fence_get(&fence->base);
280 spin_lock_irq(&fctx->lock); 280 spin_lock_irq(&fctx->lock);
281 281
282 if (nouveau_fence_update(chan, fctx)) 282 if (nouveau_fence_update(chan, fctx))
@@ -298,7 +298,7 @@ nouveau_fence_done(struct nouveau_fence *fence)
298 struct nouveau_channel *chan; 298 struct nouveau_channel *chan;
299 unsigned long flags; 299 unsigned long flags;
300 300
301 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags)) 301 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
302 return true; 302 return true;
303 303
304 spin_lock_irqsave(&fctx->lock, flags); 304 spin_lock_irqsave(&fctx->lock, flags);
@@ -307,11 +307,11 @@ nouveau_fence_done(struct nouveau_fence *fence)
307 nvif_notify_put(&fctx->notify); 307 nvif_notify_put(&fctx->notify);
308 spin_unlock_irqrestore(&fctx->lock, flags); 308 spin_unlock_irqrestore(&fctx->lock, flags);
309 } 309 }
310 return fence_is_signaled(&fence->base); 310 return dma_fence_is_signaled(&fence->base);
311} 311}
312 312
313static long 313static long
314nouveau_fence_wait_legacy(struct fence *f, bool intr, long wait) 314nouveau_fence_wait_legacy(struct dma_fence *f, bool intr, long wait)
315{ 315{
316 struct nouveau_fence *fence = from_fence(f); 316 struct nouveau_fence *fence = from_fence(f);
317 unsigned long sleep_time = NSEC_PER_MSEC / 1000; 317 unsigned long sleep_time = NSEC_PER_MSEC / 1000;
@@ -378,7 +378,7 @@ nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
378 if (!lazy) 378 if (!lazy)
379 return nouveau_fence_wait_busy(fence, intr); 379 return nouveau_fence_wait_busy(fence, intr);
380 380
381 ret = fence_wait_timeout(&fence->base, intr, 15 * HZ); 381 ret = dma_fence_wait_timeout(&fence->base, intr, 15 * HZ);
382 if (ret < 0) 382 if (ret < 0)
383 return ret; 383 return ret;
384 else if (!ret) 384 else if (!ret)
@@ -391,7 +391,7 @@ int
391nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool exclusive, bool intr) 391nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool exclusive, bool intr)
392{ 392{
393 struct nouveau_fence_chan *fctx = chan->fence; 393 struct nouveau_fence_chan *fctx = chan->fence;
394 struct fence *fence; 394 struct dma_fence *fence;
395 struct reservation_object *resv = nvbo->bo.resv; 395 struct reservation_object *resv = nvbo->bo.resv;
396 struct reservation_object_list *fobj; 396 struct reservation_object_list *fobj;
397 struct nouveau_fence *f; 397 struct nouveau_fence *f;
@@ -421,7 +421,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
421 } 421 }
422 422
423 if (must_wait) 423 if (must_wait)
424 ret = fence_wait(fence, intr); 424 ret = dma_fence_wait(fence, intr);
425 425
426 return ret; 426 return ret;
427 } 427 }
@@ -446,7 +446,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
446 } 446 }
447 447
448 if (must_wait) 448 if (must_wait)
449 ret = fence_wait(fence, intr); 449 ret = dma_fence_wait(fence, intr);
450 } 450 }
451 451
452 return ret; 452 return ret;
@@ -456,7 +456,7 @@ void
456nouveau_fence_unref(struct nouveau_fence **pfence) 456nouveau_fence_unref(struct nouveau_fence **pfence)
457{ 457{
458 if (*pfence) 458 if (*pfence)
459 fence_put(&(*pfence)->base); 459 dma_fence_put(&(*pfence)->base);
460 *pfence = NULL; 460 *pfence = NULL;
461} 461}
462 462
@@ -484,12 +484,12 @@ nouveau_fence_new(struct nouveau_channel *chan, bool sysmem,
484 return ret; 484 return ret;
485} 485}
486 486
487static const char *nouveau_fence_get_get_driver_name(struct fence *fence) 487static const char *nouveau_fence_get_get_driver_name(struct dma_fence *fence)
488{ 488{
489 return "nouveau"; 489 return "nouveau";
490} 490}
491 491
492static const char *nouveau_fence_get_timeline_name(struct fence *f) 492static const char *nouveau_fence_get_timeline_name(struct dma_fence *f)
493{ 493{
494 struct nouveau_fence *fence = from_fence(f); 494 struct nouveau_fence *fence = from_fence(f);
495 struct nouveau_fence_chan *fctx = nouveau_fctx(fence); 495 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
@@ -503,7 +503,7 @@ static const char *nouveau_fence_get_timeline_name(struct fence *f)
503 * result. The drm node should still be there, so we can derive the index from 503 * result. The drm node should still be there, so we can derive the index from
504 * the fence context. 504 * the fence context.
505 */ 505 */
506static bool nouveau_fence_is_signaled(struct fence *f) 506static bool nouveau_fence_is_signaled(struct dma_fence *f)
507{ 507{
508 struct nouveau_fence *fence = from_fence(f); 508 struct nouveau_fence *fence = from_fence(f);
509 struct nouveau_fence_chan *fctx = nouveau_fctx(fence); 509 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
@@ -519,7 +519,7 @@ static bool nouveau_fence_is_signaled(struct fence *f)
519 return ret; 519 return ret;
520} 520}
521 521
522static bool nouveau_fence_no_signaling(struct fence *f) 522static bool nouveau_fence_no_signaling(struct dma_fence *f)
523{ 523{
524 struct nouveau_fence *fence = from_fence(f); 524 struct nouveau_fence *fence = from_fence(f);
525 525
@@ -530,30 +530,30 @@ static bool nouveau_fence_no_signaling(struct fence *f)
530 WARN_ON(atomic_read(&fence->base.refcount.refcount) <= 1); 530 WARN_ON(atomic_read(&fence->base.refcount.refcount) <= 1);
531 531
532 /* 532 /*
533 * This needs uevents to work correctly, but fence_add_callback relies on 533 * This needs uevents to work correctly, but dma_fence_add_callback relies on
534 * being able to enable signaling. It will still get signaled eventually, 534 * being able to enable signaling. It will still get signaled eventually,
535 * just not right away. 535 * just not right away.
536 */ 536 */
537 if (nouveau_fence_is_signaled(f)) { 537 if (nouveau_fence_is_signaled(f)) {
538 list_del(&fence->head); 538 list_del(&fence->head);
539 539
540 fence_put(&fence->base); 540 dma_fence_put(&fence->base);
541 return false; 541 return false;
542 } 542 }
543 543
544 return true; 544 return true;
545} 545}
546 546
547static void nouveau_fence_release(struct fence *f) 547static void nouveau_fence_release(struct dma_fence *f)
548{ 548{
549 struct nouveau_fence *fence = from_fence(f); 549 struct nouveau_fence *fence = from_fence(f);
550 struct nouveau_fence_chan *fctx = nouveau_fctx(fence); 550 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
551 551
552 kref_put(&fctx->fence_ref, nouveau_fence_context_put); 552 kref_put(&fctx->fence_ref, nouveau_fence_context_put);
553 fence_free(&fence->base); 553 dma_fence_free(&fence->base);
554} 554}
555 555
556static const struct fence_ops nouveau_fence_ops_legacy = { 556static const struct dma_fence_ops nouveau_fence_ops_legacy = {
557 .get_driver_name = nouveau_fence_get_get_driver_name, 557 .get_driver_name = nouveau_fence_get_get_driver_name,
558 .get_timeline_name = nouveau_fence_get_timeline_name, 558 .get_timeline_name = nouveau_fence_get_timeline_name,
559 .enable_signaling = nouveau_fence_no_signaling, 559 .enable_signaling = nouveau_fence_no_signaling,
@@ -562,7 +562,7 @@ static const struct fence_ops nouveau_fence_ops_legacy = {
562 .release = nouveau_fence_release 562 .release = nouveau_fence_release
563}; 563};
564 564
565static bool nouveau_fence_enable_signaling(struct fence *f) 565static bool nouveau_fence_enable_signaling(struct dma_fence *f)
566{ 566{
567 struct nouveau_fence *fence = from_fence(f); 567 struct nouveau_fence *fence = from_fence(f);
568 struct nouveau_fence_chan *fctx = nouveau_fctx(fence); 568 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
@@ -573,18 +573,18 @@ static bool nouveau_fence_enable_signaling(struct fence *f)
573 573
574 ret = nouveau_fence_no_signaling(f); 574 ret = nouveau_fence_no_signaling(f);
575 if (ret) 575 if (ret)
576 set_bit(FENCE_FLAG_USER_BITS, &fence->base.flags); 576 set_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags);
577 else if (!--fctx->notify_ref) 577 else if (!--fctx->notify_ref)
578 nvif_notify_put(&fctx->notify); 578 nvif_notify_put(&fctx->notify);
579 579
580 return ret; 580 return ret;
581} 581}
582 582
583static const struct fence_ops nouveau_fence_ops_uevent = { 583static const struct dma_fence_ops nouveau_fence_ops_uevent = {
584 .get_driver_name = nouveau_fence_get_get_driver_name, 584 .get_driver_name = nouveau_fence_get_get_driver_name,
585 .get_timeline_name = nouveau_fence_get_timeline_name, 585 .get_timeline_name = nouveau_fence_get_timeline_name,
586 .enable_signaling = nouveau_fence_enable_signaling, 586 .enable_signaling = nouveau_fence_enable_signaling,
587 .signaled = nouveau_fence_is_signaled, 587 .signaled = nouveau_fence_is_signaled,
588 .wait = fence_default_wait, 588 .wait = dma_fence_default_wait,
589 .release = NULL 589 .release = NULL
590}; 590};
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 64c4ce7115ad..41f3c019e534 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -1,14 +1,14 @@
1#ifndef __NOUVEAU_FENCE_H__ 1#ifndef __NOUVEAU_FENCE_H__
2#define __NOUVEAU_FENCE_H__ 2#define __NOUVEAU_FENCE_H__
3 3
4#include <linux/fence.h> 4#include <linux/dma-fence.h>
5#include <nvif/notify.h> 5#include <nvif/notify.h>
6 6
7struct nouveau_drm; 7struct nouveau_drm;
8struct nouveau_bo; 8struct nouveau_bo;
9 9
10struct nouveau_fence { 10struct nouveau_fence {
11 struct fence base; 11 struct dma_fence base;
12 12
13 struct list_head head; 13 struct list_head head;
14 14
@@ -24,7 +24,7 @@ void nouveau_fence_unref(struct nouveau_fence **);
24 24
25int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *); 25int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *);
26bool nouveau_fence_done(struct nouveau_fence *); 26bool nouveau_fence_done(struct nouveau_fence *);
27void nouveau_fence_work(struct fence *, void (*)(void *), void *); 27void nouveau_fence_work(struct dma_fence *, void (*)(void *), void *);
28int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr); 28int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr);
29int nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *, bool exclusive, bool intr); 29int nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *, bool exclusive, bool intr);
30 30
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 0bd7164bc817..7f083c95f422 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -119,7 +119,7 @@ nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nvkm_vma *vma)
119 const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM; 119 const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
120 struct reservation_object *resv = nvbo->bo.resv; 120 struct reservation_object *resv = nvbo->bo.resv;
121 struct reservation_object_list *fobj; 121 struct reservation_object_list *fobj;
122 struct fence *fence = NULL; 122 struct dma_fence *fence = NULL;
123 123
124 fobj = reservation_object_get_list(resv); 124 fobj = reservation_object_get_list(resv);
125 125
diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c
index 1915b7b82a59..fa8f2375c398 100644
--- a/drivers/gpu/drm/nouveau/nv04_fence.c
+++ b/drivers/gpu/drm/nouveau/nv04_fence.c
@@ -110,6 +110,6 @@ nv04_fence_create(struct nouveau_drm *drm)
110 priv->base.context_new = nv04_fence_context_new; 110 priv->base.context_new = nv04_fence_context_new;
111 priv->base.context_del = nv04_fence_context_del; 111 priv->base.context_del = nv04_fence_context_del;
112 priv->base.contexts = 15; 112 priv->base.contexts = 15;
113 priv->base.context_base = fence_context_alloc(priv->base.contexts); 113 priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
114 return 0; 114 return 0;
115} 115}
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index 4e3de34ff6f4..f99fcf56928a 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -107,7 +107,7 @@ nv10_fence_create(struct nouveau_drm *drm)
107 priv->base.context_new = nv10_fence_context_new; 107 priv->base.context_new = nv10_fence_context_new;
108 priv->base.context_del = nv10_fence_context_del; 108 priv->base.context_del = nv10_fence_context_del;
109 priv->base.contexts = 31; 109 priv->base.contexts = 31;
110 priv->base.context_base = fence_context_alloc(priv->base.contexts); 110 priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
111 spin_lock_init(&priv->lock); 111 spin_lock_init(&priv->lock);
112 return 0; 112 return 0;
113} 113}
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index 7d5e562a55c5..79bc01111351 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -126,7 +126,7 @@ nv17_fence_create(struct nouveau_drm *drm)
126 priv->base.context_new = nv17_fence_context_new; 126 priv->base.context_new = nv17_fence_context_new;
127 priv->base.context_del = nv10_fence_context_del; 127 priv->base.context_del = nv10_fence_context_del;
128 priv->base.contexts = 31; 128 priv->base.contexts = 31;
129 priv->base.context_base = fence_context_alloc(priv->base.contexts); 129 priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
130 spin_lock_init(&priv->lock); 130 spin_lock_init(&priv->lock);
131 131
132 ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, 132 ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index 4d6f202b7770..8c5295414578 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -97,7 +97,7 @@ nv50_fence_create(struct nouveau_drm *drm)
97 priv->base.context_new = nv50_fence_context_new; 97 priv->base.context_new = nv50_fence_context_new;
98 priv->base.context_del = nv10_fence_context_del; 98 priv->base.context_del = nv10_fence_context_del;
99 priv->base.contexts = 127; 99 priv->base.contexts = 127;
100 priv->base.context_base = fence_context_alloc(priv->base.contexts); 100 priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
101 spin_lock_init(&priv->lock); 101 spin_lock_init(&priv->lock);
102 102
103 ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, 103 ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index 18bde9d8e6d6..23ef04b4e0b2 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -229,7 +229,7 @@ nv84_fence_create(struct nouveau_drm *drm)
229 priv->base.context_del = nv84_fence_context_del; 229 priv->base.context_del = nv84_fence_context_del;
230 230
231 priv->base.contexts = fifo->nr; 231 priv->base.contexts = fifo->nr;
232 priv->base.context_base = fence_context_alloc(priv->base.contexts); 232 priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
233 priv->base.uevent = true; 233 priv->base.uevent = true;
234 234
235 /* Use VRAM if there is any ; otherwise fallback to system memory */ 235 /* Use VRAM if there is any ; otherwise fallback to system memory */
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 5f3e5ad99de7..84995ebc6ffc 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -31,7 +31,7 @@
31 * Definitions taken from spice-protocol, plus kernel driver specific bits. 31 * Definitions taken from spice-protocol, plus kernel driver specific bits.
32 */ 32 */
33 33
34#include <linux/fence.h> 34#include <linux/dma-fence.h>
35#include <linux/workqueue.h> 35#include <linux/workqueue.h>
36#include <linux/firmware.h> 36#include <linux/firmware.h>
37#include <linux/platform_device.h> 37#include <linux/platform_device.h>
@@ -190,7 +190,7 @@ enum {
190 * spice-protocol/qxl_dev.h */ 190 * spice-protocol/qxl_dev.h */
191#define QXL_MAX_RES 96 191#define QXL_MAX_RES 96
192struct qxl_release { 192struct qxl_release {
193 struct fence base; 193 struct dma_fence base;
194 194
195 int id; 195 int id;
196 int type; 196 int type;
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index cd83f050cf3e..50b4e522f05f 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -21,7 +21,7 @@
21 */ 21 */
22#include "qxl_drv.h" 22#include "qxl_drv.h"
23#include "qxl_object.h" 23#include "qxl_object.h"
24#include <trace/events/fence.h> 24#include <trace/events/dma_fence.h>
25 25
26/* 26/*
27 * drawable cmd cache - allocate a bunch of VRAM pages, suballocate 27 * drawable cmd cache - allocate a bunch of VRAM pages, suballocate
@@ -40,23 +40,24 @@
40static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE }; 40static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
41static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO }; 41static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
42 42
43static const char *qxl_get_driver_name(struct fence *fence) 43static const char *qxl_get_driver_name(struct dma_fence *fence)
44{ 44{
45 return "qxl"; 45 return "qxl";
46} 46}
47 47
48static const char *qxl_get_timeline_name(struct fence *fence) 48static const char *qxl_get_timeline_name(struct dma_fence *fence)
49{ 49{
50 return "release"; 50 return "release";
51} 51}
52 52
53static bool qxl_nop_signaling(struct fence *fence) 53static bool qxl_nop_signaling(struct dma_fence *fence)
54{ 54{
55 /* fences are always automatically signaled, so just pretend we did this.. */ 55 /* fences are always automatically signaled, so just pretend we did this.. */
56 return true; 56 return true;
57} 57}
58 58
59static long qxl_fence_wait(struct fence *fence, bool intr, signed long timeout) 59static long qxl_fence_wait(struct dma_fence *fence, bool intr,
60 signed long timeout)
60{ 61{
61 struct qxl_device *qdev; 62 struct qxl_device *qdev;
62 struct qxl_release *release; 63 struct qxl_release *release;
@@ -71,7 +72,7 @@ static long qxl_fence_wait(struct fence *fence, bool intr, signed long timeout)
71retry: 72retry:
72 sc++; 73 sc++;
73 74
74 if (fence_is_signaled(fence)) 75 if (dma_fence_is_signaled(fence))
75 goto signaled; 76 goto signaled;
76 77
77 qxl_io_notify_oom(qdev); 78 qxl_io_notify_oom(qdev);
@@ -80,11 +81,11 @@ retry:
80 if (!qxl_queue_garbage_collect(qdev, true)) 81 if (!qxl_queue_garbage_collect(qdev, true))
81 break; 82 break;
82 83
83 if (fence_is_signaled(fence)) 84 if (dma_fence_is_signaled(fence))
84 goto signaled; 85 goto signaled;
85 } 86 }
86 87
87 if (fence_is_signaled(fence)) 88 if (dma_fence_is_signaled(fence))
88 goto signaled; 89 goto signaled;
89 90
90 if (have_drawable_releases || sc < 4) { 91 if (have_drawable_releases || sc < 4) {
@@ -96,9 +97,9 @@ retry:
96 return 0; 97 return 0;
97 98
98 if (have_drawable_releases && sc > 300) { 99 if (have_drawable_releases && sc > 300) {
99 FENCE_WARN(fence, "failed to wait on release %llu " 100 DMA_FENCE_WARN(fence, "failed to wait on release %llu "
100 "after spincount %d\n", 101 "after spincount %d\n",
101 fence->context & ~0xf0000000, sc); 102 fence->context & ~0xf0000000, sc);
102 goto signaled; 103 goto signaled;
103 } 104 }
104 goto retry; 105 goto retry;
@@ -115,7 +116,7 @@ signaled:
115 return end - cur; 116 return end - cur;
116} 117}
117 118
118static const struct fence_ops qxl_fence_ops = { 119static const struct dma_fence_ops qxl_fence_ops = {
119 .get_driver_name = qxl_get_driver_name, 120 .get_driver_name = qxl_get_driver_name,
120 .get_timeline_name = qxl_get_timeline_name, 121 .get_timeline_name = qxl_get_timeline_name,
121 .enable_signaling = qxl_nop_signaling, 122 .enable_signaling = qxl_nop_signaling,
@@ -192,8 +193,8 @@ qxl_release_free(struct qxl_device *qdev,
192 WARN_ON(list_empty(&release->bos)); 193 WARN_ON(list_empty(&release->bos));
193 qxl_release_free_list(release); 194 qxl_release_free_list(release);
194 195
195 fence_signal(&release->base); 196 dma_fence_signal(&release->base);
196 fence_put(&release->base); 197 dma_fence_put(&release->base);
197 } else { 198 } else {
198 qxl_release_free_list(release); 199 qxl_release_free_list(release);
199 kfree(release); 200 kfree(release);
@@ -453,9 +454,9 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
453 * Since we never really allocated a context and we don't want to conflict, 454 * Since we never really allocated a context and we don't want to conflict,
454 * set the highest bits. This will break if we really allow exporting of dma-bufs. 455 * set the highest bits. This will break if we really allow exporting of dma-bufs.
455 */ 456 */
456 fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock, 457 dma_fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock,
457 release->id | 0xf0000000, release->base.seqno); 458 release->id | 0xf0000000, release->base.seqno);
458 trace_fence_emit(&release->base); 459 trace_dma_fence_emit(&release->base);
459 460
460 driver = bdev->driver; 461 driver = bdev->driver;
461 glob = bo->glob; 462 glob = bo->glob;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 1b0dcad916b0..44e0c5ed6418 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -66,7 +66,7 @@
66#include <linux/kref.h> 66#include <linux/kref.h>
67#include <linux/interval_tree.h> 67#include <linux/interval_tree.h>
68#include <linux/hashtable.h> 68#include <linux/hashtable.h>
69#include <linux/fence.h> 69#include <linux/dma-fence.h>
70 70
71#include <ttm/ttm_bo_api.h> 71#include <ttm/ttm_bo_api.h>
72#include <ttm/ttm_bo_driver.h> 72#include <ttm/ttm_bo_driver.h>
@@ -367,7 +367,7 @@ struct radeon_fence_driver {
367}; 367};
368 368
369struct radeon_fence { 369struct radeon_fence {
370 struct fence base; 370 struct dma_fence base;
371 371
372 struct radeon_device *rdev; 372 struct radeon_device *rdev;
373 uint64_t seq; 373 uint64_t seq;
@@ -746,7 +746,7 @@ struct radeon_flip_work {
746 uint64_t base; 746 uint64_t base;
747 struct drm_pending_vblank_event *event; 747 struct drm_pending_vblank_event *event;
748 struct radeon_bo *old_rbo; 748 struct radeon_bo *old_rbo;
749 struct fence *fence; 749 struct dma_fence *fence;
750 bool async; 750 bool async;
751}; 751};
752 752
@@ -2514,9 +2514,9 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v);
2514/* 2514/*
2515 * Cast helper 2515 * Cast helper
2516 */ 2516 */
2517extern const struct fence_ops radeon_fence_ops; 2517extern const struct dma_fence_ops radeon_fence_ops;
2518 2518
2519static inline struct radeon_fence *to_radeon_fence(struct fence *f) 2519static inline struct radeon_fence *to_radeon_fence(struct dma_fence *f)
2520{ 2520{
2521 struct radeon_fence *__f = container_of(f, struct radeon_fence, base); 2521 struct radeon_fence *__f = container_of(f, struct radeon_fence, base);
2522 2522
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index eb92aef46e3c..36b7ac7e57e5 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1320,7 +1320,7 @@ int radeon_device_init(struct radeon_device *rdev,
1320 for (i = 0; i < RADEON_NUM_RINGS; i++) { 1320 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1321 rdev->ring[i].idx = i; 1321 rdev->ring[i].idx = i;
1322 } 1322 }
1323 rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS); 1323 rdev->fence_context = dma_fence_context_alloc(RADEON_NUM_RINGS);
1324 1324
1325 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n", 1325 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1326 radeon_family_name[rdev->family], pdev->vendor, pdev->device, 1326 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index cdb8cb568c15..e7409e8a9f87 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -437,7 +437,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
437 down_read(&rdev->exclusive_lock); 437 down_read(&rdev->exclusive_lock);
438 } 438 }
439 } else 439 } else
440 r = fence_wait(work->fence, false); 440 r = dma_fence_wait(work->fence, false);
441 441
442 if (r) 442 if (r)
443 DRM_ERROR("failed to wait on page flip fence (%d)!\n", r); 443 DRM_ERROR("failed to wait on page flip fence (%d)!\n", r);
@@ -447,7 +447,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
447 * confused about which BO the CRTC is scanning out 447 * confused about which BO the CRTC is scanning out
448 */ 448 */
449 449
450 fence_put(work->fence); 450 dma_fence_put(work->fence);
451 work->fence = NULL; 451 work->fence = NULL;
452 } 452 }
453 453
@@ -542,7 +542,7 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
542 DRM_ERROR("failed to pin new rbo buffer before flip\n"); 542 DRM_ERROR("failed to pin new rbo buffer before flip\n");
543 goto cleanup; 543 goto cleanup;
544 } 544 }
545 work->fence = fence_get(reservation_object_get_excl(new_rbo->tbo.resv)); 545 work->fence = dma_fence_get(reservation_object_get_excl(new_rbo->tbo.resv));
546 radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL); 546 radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
547 radeon_bo_unreserve(new_rbo); 547 radeon_bo_unreserve(new_rbo);
548 548
@@ -617,7 +617,7 @@ pflip_cleanup:
617 617
618cleanup: 618cleanup:
619 drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); 619 drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
620 fence_put(work->fence); 620 dma_fence_put(work->fence);
621 kfree(work); 621 kfree(work);
622 return r; 622 return r;
623} 623}
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 7ef075acde9c..ef09f0a63754 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -141,8 +141,10 @@ int radeon_fence_emit(struct radeon_device *rdev,
141 (*fence)->seq = seq = ++rdev->fence_drv[ring].sync_seq[ring]; 141 (*fence)->seq = seq = ++rdev->fence_drv[ring].sync_seq[ring];
142 (*fence)->ring = ring; 142 (*fence)->ring = ring;
143 (*fence)->is_vm_update = false; 143 (*fence)->is_vm_update = false;
144 fence_init(&(*fence)->base, &radeon_fence_ops, 144 dma_fence_init(&(*fence)->base, &radeon_fence_ops,
145 &rdev->fence_queue.lock, rdev->fence_context + ring, seq); 145 &rdev->fence_queue.lock,
146 rdev->fence_context + ring,
147 seq);
146 radeon_fence_ring_emit(rdev, ring, *fence); 148 radeon_fence_ring_emit(rdev, ring, *fence);
147 trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq); 149 trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq);
148 radeon_fence_schedule_check(rdev, ring); 150 radeon_fence_schedule_check(rdev, ring);
@@ -169,18 +171,18 @@ static int radeon_fence_check_signaled(wait_queue_t *wait, unsigned mode, int fl
169 */ 171 */
170 seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq); 172 seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq);
171 if (seq >= fence->seq) { 173 if (seq >= fence->seq) {
172 int ret = fence_signal_locked(&fence->base); 174 int ret = dma_fence_signal_locked(&fence->base);
173 175
174 if (!ret) 176 if (!ret)
175 FENCE_TRACE(&fence->base, "signaled from irq context\n"); 177 DMA_FENCE_TRACE(&fence->base, "signaled from irq context\n");
176 else 178 else
177 FENCE_TRACE(&fence->base, "was already signaled\n"); 179 DMA_FENCE_TRACE(&fence->base, "was already signaled\n");
178 180
179 radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring); 181 radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring);
180 __remove_wait_queue(&fence->rdev->fence_queue, &fence->fence_wake); 182 __remove_wait_queue(&fence->rdev->fence_queue, &fence->fence_wake);
181 fence_put(&fence->base); 183 dma_fence_put(&fence->base);
182 } else 184 } else
183 FENCE_TRACE(&fence->base, "pending\n"); 185 DMA_FENCE_TRACE(&fence->base, "pending\n");
184 return 0; 186 return 0;
185} 187}
186 188
@@ -351,7 +353,7 @@ static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
351 return false; 353 return false;
352} 354}
353 355
354static bool radeon_fence_is_signaled(struct fence *f) 356static bool radeon_fence_is_signaled(struct dma_fence *f)
355{ 357{
356 struct radeon_fence *fence = to_radeon_fence(f); 358 struct radeon_fence *fence = to_radeon_fence(f);
357 struct radeon_device *rdev = fence->rdev; 359 struct radeon_device *rdev = fence->rdev;
@@ -381,7 +383,7 @@ static bool radeon_fence_is_signaled(struct fence *f)
381 * to fence_queue that checks if this fence is signaled, and if so it 383 * to fence_queue that checks if this fence is signaled, and if so it
382 * signals the fence and removes itself. 384 * signals the fence and removes itself.
383 */ 385 */
384static bool radeon_fence_enable_signaling(struct fence *f) 386static bool radeon_fence_enable_signaling(struct dma_fence *f)
385{ 387{
386 struct radeon_fence *fence = to_radeon_fence(f); 388 struct radeon_fence *fence = to_radeon_fence(f);
387 struct radeon_device *rdev = fence->rdev; 389 struct radeon_device *rdev = fence->rdev;
@@ -414,9 +416,9 @@ static bool radeon_fence_enable_signaling(struct fence *f)
414 fence->fence_wake.private = NULL; 416 fence->fence_wake.private = NULL;
415 fence->fence_wake.func = radeon_fence_check_signaled; 417 fence->fence_wake.func = radeon_fence_check_signaled;
416 __add_wait_queue(&rdev->fence_queue, &fence->fence_wake); 418 __add_wait_queue(&rdev->fence_queue, &fence->fence_wake);
417 fence_get(f); 419 dma_fence_get(f);
418 420
419 FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring); 421 DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring);
420 return true; 422 return true;
421} 423}
422 424
@@ -436,9 +438,9 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
436 if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) { 438 if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
437 int ret; 439 int ret;
438 440
439 ret = fence_signal(&fence->base); 441 ret = dma_fence_signal(&fence->base);
440 if (!ret) 442 if (!ret)
441 FENCE_TRACE(&fence->base, "signaled from radeon_fence_signaled\n"); 443 DMA_FENCE_TRACE(&fence->base, "signaled from radeon_fence_signaled\n");
442 return true; 444 return true;
443 } 445 }
444 return false; 446 return false;
@@ -552,7 +554,7 @@ long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeo
552 * exclusive_lock is not held in that case. 554 * exclusive_lock is not held in that case.
553 */ 555 */
554 if (WARN_ON_ONCE(!to_radeon_fence(&fence->base))) 556 if (WARN_ON_ONCE(!to_radeon_fence(&fence->base)))
555 return fence_wait(&fence->base, intr); 557 return dma_fence_wait(&fence->base, intr);
556 558
557 seq[fence->ring] = fence->seq; 559 seq[fence->ring] = fence->seq;
558 r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, timeout); 560 r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, timeout);
@@ -560,9 +562,9 @@ long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeo
560 return r; 562 return r;
561 } 563 }
562 564
563 r_sig = fence_signal(&fence->base); 565 r_sig = dma_fence_signal(&fence->base);
564 if (!r_sig) 566 if (!r_sig)
565 FENCE_TRACE(&fence->base, "signaled from fence_wait\n"); 567 DMA_FENCE_TRACE(&fence->base, "signaled from fence_wait\n");
566 return r; 568 return r;
567} 569}
568 570
@@ -697,7 +699,7 @@ int radeon_fence_wait_empty(struct radeon_device *rdev, int ring)
697 */ 699 */
698struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence) 700struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
699{ 701{
700 fence_get(&fence->base); 702 dma_fence_get(&fence->base);
701 return fence; 703 return fence;
702} 704}
703 705
@@ -714,7 +716,7 @@ void radeon_fence_unref(struct radeon_fence **fence)
714 716
715 *fence = NULL; 717 *fence = NULL;
716 if (tmp) { 718 if (tmp) {
717 fence_put(&tmp->base); 719 dma_fence_put(&tmp->base);
718 } 720 }
719} 721}
720 722
@@ -1028,12 +1030,12 @@ int radeon_debugfs_fence_init(struct radeon_device *rdev)
1028#endif 1030#endif
1029} 1031}
1030 1032
1031static const char *radeon_fence_get_driver_name(struct fence *fence) 1033static const char *radeon_fence_get_driver_name(struct dma_fence *fence)
1032{ 1034{
1033 return "radeon"; 1035 return "radeon";
1034} 1036}
1035 1037
1036static const char *radeon_fence_get_timeline_name(struct fence *f) 1038static const char *radeon_fence_get_timeline_name(struct dma_fence *f)
1037{ 1039{
1038 struct radeon_fence *fence = to_radeon_fence(f); 1040 struct radeon_fence *fence = to_radeon_fence(f);
1039 switch (fence->ring) { 1041 switch (fence->ring) {
@@ -1051,16 +1053,16 @@ static const char *radeon_fence_get_timeline_name(struct fence *f)
1051 1053
1052static inline bool radeon_test_signaled(struct radeon_fence *fence) 1054static inline bool radeon_test_signaled(struct radeon_fence *fence)
1053{ 1055{
1054 return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags); 1056 return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
1055} 1057}
1056 1058
1057struct radeon_wait_cb { 1059struct radeon_wait_cb {
1058 struct fence_cb base; 1060 struct dma_fence_cb base;
1059 struct task_struct *task; 1061 struct task_struct *task;
1060}; 1062};
1061 1063
1062static void 1064static void
1063radeon_fence_wait_cb(struct fence *fence, struct fence_cb *cb) 1065radeon_fence_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
1064{ 1066{
1065 struct radeon_wait_cb *wait = 1067 struct radeon_wait_cb *wait =
1066 container_of(cb, struct radeon_wait_cb, base); 1068 container_of(cb, struct radeon_wait_cb, base);
@@ -1068,7 +1070,7 @@ radeon_fence_wait_cb(struct fence *fence, struct fence_cb *cb)
1068 wake_up_process(wait->task); 1070 wake_up_process(wait->task);
1069} 1071}
1070 1072
1071static signed long radeon_fence_default_wait(struct fence *f, bool intr, 1073static signed long radeon_fence_default_wait(struct dma_fence *f, bool intr,
1072 signed long t) 1074 signed long t)
1073{ 1075{
1074 struct radeon_fence *fence = to_radeon_fence(f); 1076 struct radeon_fence *fence = to_radeon_fence(f);
@@ -1077,7 +1079,7 @@ static signed long radeon_fence_default_wait(struct fence *f, bool intr,
1077 1079
1078 cb.task = current; 1080 cb.task = current;
1079 1081
1080 if (fence_add_callback(f, &cb.base, radeon_fence_wait_cb)) 1082 if (dma_fence_add_callback(f, &cb.base, radeon_fence_wait_cb))
1081 return t; 1083 return t;
1082 1084
1083 while (t > 0) { 1085 while (t > 0) {
@@ -1105,12 +1107,12 @@ static signed long radeon_fence_default_wait(struct fence *f, bool intr,
1105 } 1107 }
1106 1108
1107 __set_current_state(TASK_RUNNING); 1109 __set_current_state(TASK_RUNNING);
1108 fence_remove_callback(f, &cb.base); 1110 dma_fence_remove_callback(f, &cb.base);
1109 1111
1110 return t; 1112 return t;
1111} 1113}
1112 1114
1113const struct fence_ops radeon_fence_ops = { 1115const struct dma_fence_ops radeon_fence_ops = {
1114 .get_driver_name = radeon_fence_get_driver_name, 1116 .get_driver_name = radeon_fence_get_driver_name,
1115 .get_timeline_name = radeon_fence_get_timeline_name, 1117 .get_timeline_name = radeon_fence_get_timeline_name,
1116 .enable_signaling = radeon_fence_enable_signaling, 1118 .enable_signaling = radeon_fence_enable_signaling,
diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c
index 02ac8a1de4ff..be5d7a38d3aa 100644
--- a/drivers/gpu/drm/radeon/radeon_sync.c
+++ b/drivers/gpu/drm/radeon/radeon_sync.c
@@ -92,7 +92,7 @@ int radeon_sync_resv(struct radeon_device *rdev,
92 bool shared) 92 bool shared)
93{ 93{
94 struct reservation_object_list *flist; 94 struct reservation_object_list *flist;
95 struct fence *f; 95 struct dma_fence *f;
96 struct radeon_fence *fence; 96 struct radeon_fence *fence;
97 unsigned i; 97 unsigned i;
98 int r = 0; 98 int r = 0;
@@ -103,7 +103,7 @@ int radeon_sync_resv(struct radeon_device *rdev,
103 if (fence && fence->rdev == rdev) 103 if (fence && fence->rdev == rdev)
104 radeon_sync_fence(sync, fence); 104 radeon_sync_fence(sync, fence);
105 else if (f) 105 else if (f)
106 r = fence_wait(f, true); 106 r = dma_fence_wait(f, true);
107 107
108 flist = reservation_object_get_list(resv); 108 flist = reservation_object_get_list(resv);
109 if (shared || !flist || r) 109 if (shared || !flist || r)
@@ -116,7 +116,7 @@ int radeon_sync_resv(struct radeon_device *rdev,
116 if (fence && fence->rdev == rdev) 116 if (fence && fence->rdev == rdev)
117 radeon_sync_fence(sync, fence); 117 radeon_sync_fence(sync, fence);
118 else 118 else
119 r = fence_wait(f, true); 119 r = dma_fence_wait(f, true);
120 120
121 if (r) 121 if (r)
122 break; 122 break;
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 0cd0e7bdee55..d34d1cf33895 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -467,7 +467,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
467{ 467{
468 int32_t *msg, msg_type, handle; 468 int32_t *msg, msg_type, handle;
469 unsigned img_size = 0; 469 unsigned img_size = 0;
470 struct fence *f; 470 struct dma_fence *f;
471 void *ptr; 471 void *ptr;
472 472
473 int i, r; 473 int i, r;
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index fc6217dfe401..915e0d1c316a 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -148,7 +148,7 @@ static void ttm_bo_release_list(struct kref *list_kref)
148 BUG_ON(!list_empty(&bo->ddestroy)); 148 BUG_ON(!list_empty(&bo->ddestroy));
149 ttm_tt_destroy(bo->ttm); 149 ttm_tt_destroy(bo->ttm);
150 atomic_dec(&bo->glob->bo_count); 150 atomic_dec(&bo->glob->bo_count);
151 fence_put(bo->moving); 151 dma_fence_put(bo->moving);
152 if (bo->resv == &bo->ttm_resv) 152 if (bo->resv == &bo->ttm_resv)
153 reservation_object_fini(&bo->ttm_resv); 153 reservation_object_fini(&bo->ttm_resv);
154 mutex_destroy(&bo->wu_mutex); 154 mutex_destroy(&bo->wu_mutex);
@@ -426,20 +426,20 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
426static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) 426static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
427{ 427{
428 struct reservation_object_list *fobj; 428 struct reservation_object_list *fobj;
429 struct fence *fence; 429 struct dma_fence *fence;
430 int i; 430 int i;
431 431
432 fobj = reservation_object_get_list(bo->resv); 432 fobj = reservation_object_get_list(bo->resv);
433 fence = reservation_object_get_excl(bo->resv); 433 fence = reservation_object_get_excl(bo->resv);
434 if (fence && !fence->ops->signaled) 434 if (fence && !fence->ops->signaled)
435 fence_enable_sw_signaling(fence); 435 dma_fence_enable_sw_signaling(fence);
436 436
437 for (i = 0; fobj && i < fobj->shared_count; ++i) { 437 for (i = 0; fobj && i < fobj->shared_count; ++i) {
438 fence = rcu_dereference_protected(fobj->shared[i], 438 fence = rcu_dereference_protected(fobj->shared[i],
439 reservation_object_held(bo->resv)); 439 reservation_object_held(bo->resv));
440 440
441 if (!fence->ops->signaled) 441 if (!fence->ops->signaled)
442 fence_enable_sw_signaling(fence); 442 dma_fence_enable_sw_signaling(fence);
443 } 443 }
444} 444}
445 445
@@ -792,11 +792,11 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
792 struct ttm_mem_type_manager *man, 792 struct ttm_mem_type_manager *man,
793 struct ttm_mem_reg *mem) 793 struct ttm_mem_reg *mem)
794{ 794{
795 struct fence *fence; 795 struct dma_fence *fence;
796 int ret; 796 int ret;
797 797
798 spin_lock(&man->move_lock); 798 spin_lock(&man->move_lock);
799 fence = fence_get(man->move); 799 fence = dma_fence_get(man->move);
800 spin_unlock(&man->move_lock); 800 spin_unlock(&man->move_lock);
801 801
802 if (fence) { 802 if (fence) {
@@ -806,7 +806,7 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
806 if (unlikely(ret)) 806 if (unlikely(ret))
807 return ret; 807 return ret;
808 808
809 fence_put(bo->moving); 809 dma_fence_put(bo->moving);
810 bo->moving = fence; 810 bo->moving = fence;
811 } 811 }
812 812
@@ -1286,7 +1286,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1286{ 1286{
1287 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 1287 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1288 struct ttm_bo_global *glob = bdev->glob; 1288 struct ttm_bo_global *glob = bdev->glob;
1289 struct fence *fence; 1289 struct dma_fence *fence;
1290 int ret; 1290 int ret;
1291 1291
1292 /* 1292 /*
@@ -1309,12 +1309,12 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1309 spin_unlock(&glob->lru_lock); 1309 spin_unlock(&glob->lru_lock);
1310 1310
1311 spin_lock(&man->move_lock); 1311 spin_lock(&man->move_lock);
1312 fence = fence_get(man->move); 1312 fence = dma_fence_get(man->move);
1313 spin_unlock(&man->move_lock); 1313 spin_unlock(&man->move_lock);
1314 1314
1315 if (fence) { 1315 if (fence) {
1316 ret = fence_wait(fence, false); 1316 ret = dma_fence_wait(fence, false);
1317 fence_put(fence); 1317 dma_fence_put(fence);
1318 if (ret) { 1318 if (ret) {
1319 if (allow_errors) { 1319 if (allow_errors) {
1320 return ret; 1320 return ret;
@@ -1343,7 +1343,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1343 mem_type); 1343 mem_type);
1344 return ret; 1344 return ret;
1345 } 1345 }
1346 fence_put(man->move); 1346 dma_fence_put(man->move);
1347 1347
1348 man->use_type = false; 1348 man->use_type = false;
1349 man->has_type = false; 1349 man->has_type = false;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index bf6e21655c57..d0459b392e5e 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -644,7 +644,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
644EXPORT_SYMBOL(ttm_bo_kunmap); 644EXPORT_SYMBOL(ttm_bo_kunmap);
645 645
646int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, 646int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
647 struct fence *fence, 647 struct dma_fence *fence,
648 bool evict, 648 bool evict,
649 struct ttm_mem_reg *new_mem) 649 struct ttm_mem_reg *new_mem)
650{ 650{
@@ -674,8 +674,8 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
674 * operation has completed. 674 * operation has completed.
675 */ 675 */
676 676
677 fence_put(bo->moving); 677 dma_fence_put(bo->moving);
678 bo->moving = fence_get(fence); 678 bo->moving = dma_fence_get(fence);
679 679
680 ret = ttm_buffer_object_transfer(bo, &ghost_obj); 680 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
681 if (ret) 681 if (ret)
@@ -706,7 +706,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
706EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); 706EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
707 707
708int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, 708int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
709 struct fence *fence, bool evict, 709 struct dma_fence *fence, bool evict,
710 struct ttm_mem_reg *new_mem) 710 struct ttm_mem_reg *new_mem)
711{ 711{
712 struct ttm_bo_device *bdev = bo->bdev; 712 struct ttm_bo_device *bdev = bo->bdev;
@@ -730,8 +730,8 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
730 * operation has completed. 730 * operation has completed.
731 */ 731 */
732 732
733 fence_put(bo->moving); 733 dma_fence_put(bo->moving);
734 bo->moving = fence_get(fence); 734 bo->moving = dma_fence_get(fence);
735 735
736 ret = ttm_buffer_object_transfer(bo, &ghost_obj); 736 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
737 if (ret) 737 if (ret)
@@ -761,16 +761,16 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
761 */ 761 */
762 762
763 spin_lock(&from->move_lock); 763 spin_lock(&from->move_lock);
764 if (!from->move || fence_is_later(fence, from->move)) { 764 if (!from->move || dma_fence_is_later(fence, from->move)) {
765 fence_put(from->move); 765 dma_fence_put(from->move);
766 from->move = fence_get(fence); 766 from->move = dma_fence_get(fence);
767 } 767 }
768 spin_unlock(&from->move_lock); 768 spin_unlock(&from->move_lock);
769 769
770 ttm_bo_free_old_node(bo); 770 ttm_bo_free_old_node(bo);
771 771
772 fence_put(bo->moving); 772 dma_fence_put(bo->moving);
773 bo->moving = fence_get(fence); 773 bo->moving = dma_fence_get(fence);
774 774
775 } else { 775 } else {
776 /** 776 /**
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index a6ed9d5e5167..4748aedc933a 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -54,7 +54,7 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
54 /* 54 /*
55 * Quick non-stalling check for idle. 55 * Quick non-stalling check for idle.
56 */ 56 */
57 if (fence_is_signaled(bo->moving)) 57 if (dma_fence_is_signaled(bo->moving))
58 goto out_clear; 58 goto out_clear;
59 59
60 /* 60 /*
@@ -67,14 +67,14 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
67 goto out_unlock; 67 goto out_unlock;
68 68
69 up_read(&vma->vm_mm->mmap_sem); 69 up_read(&vma->vm_mm->mmap_sem);
70 (void) fence_wait(bo->moving, true); 70 (void) dma_fence_wait(bo->moving, true);
71 goto out_unlock; 71 goto out_unlock;
72 } 72 }
73 73
74 /* 74 /*
75 * Ordinary wait. 75 * Ordinary wait.
76 */ 76 */
77 ret = fence_wait(bo->moving, true); 77 ret = dma_fence_wait(bo->moving, true);
78 if (unlikely(ret != 0)) { 78 if (unlikely(ret != 0)) {
79 ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS : 79 ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
80 VM_FAULT_NOPAGE; 80 VM_FAULT_NOPAGE;
@@ -82,7 +82,7 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
82 } 82 }
83 83
84out_clear: 84out_clear:
85 fence_put(bo->moving); 85 dma_fence_put(bo->moving);
86 bo->moving = NULL; 86 bo->moving = NULL;
87 87
88out_unlock: 88out_unlock:
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index a80717b35dc6..d35bc491e8de 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -179,7 +179,8 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
179EXPORT_SYMBOL(ttm_eu_reserve_buffers); 179EXPORT_SYMBOL(ttm_eu_reserve_buffers);
180 180
181void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, 181void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
182 struct list_head *list, struct fence *fence) 182 struct list_head *list,
183 struct dma_fence *fence)
183{ 184{
184 struct ttm_validate_buffer *entry; 185 struct ttm_validate_buffer *entry;
185 struct ttm_buffer_object *bo; 186 struct ttm_buffer_object *bo;
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index 5c57c1ffa1f9..488909a21ed8 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -28,56 +28,57 @@
28#define VGEM_FENCE_TIMEOUT (10*HZ) 28#define VGEM_FENCE_TIMEOUT (10*HZ)
29 29
30struct vgem_fence { 30struct vgem_fence {
31 struct fence base; 31 struct dma_fence base;
32 struct spinlock lock; 32 struct spinlock lock;
33 struct timer_list timer; 33 struct timer_list timer;
34}; 34};
35 35
36static const char *vgem_fence_get_driver_name(struct fence *fence) 36static const char *vgem_fence_get_driver_name(struct dma_fence *fence)
37{ 37{
38 return "vgem"; 38 return "vgem";
39} 39}
40 40
41static const char *vgem_fence_get_timeline_name(struct fence *fence) 41static const char *vgem_fence_get_timeline_name(struct dma_fence *fence)
42{ 42{
43 return "unbound"; 43 return "unbound";
44} 44}
45 45
46static bool vgem_fence_signaled(struct fence *fence) 46static bool vgem_fence_signaled(struct dma_fence *fence)
47{ 47{
48 return false; 48 return false;
49} 49}
50 50
51static bool vgem_fence_enable_signaling(struct fence *fence) 51static bool vgem_fence_enable_signaling(struct dma_fence *fence)
52{ 52{
53 return true; 53 return true;
54} 54}
55 55
56static void vgem_fence_release(struct fence *base) 56static void vgem_fence_release(struct dma_fence *base)
57{ 57{
58 struct vgem_fence *fence = container_of(base, typeof(*fence), base); 58 struct vgem_fence *fence = container_of(base, typeof(*fence), base);
59 59
60 del_timer_sync(&fence->timer); 60 del_timer_sync(&fence->timer);
61 fence_free(&fence->base); 61 dma_fence_free(&fence->base);
62} 62}
63 63
64static void vgem_fence_value_str(struct fence *fence, char *str, int size) 64static void vgem_fence_value_str(struct dma_fence *fence, char *str, int size)
65{ 65{
66 snprintf(str, size, "%u", fence->seqno); 66 snprintf(str, size, "%u", fence->seqno);
67} 67}
68 68
69static void vgem_fence_timeline_value_str(struct fence *fence, char *str, 69static void vgem_fence_timeline_value_str(struct dma_fence *fence, char *str,
70 int size) 70 int size)
71{ 71{
72 snprintf(str, size, "%u", fence_is_signaled(fence) ? fence->seqno : 0); 72 snprintf(str, size, "%u",
73 dma_fence_is_signaled(fence) ? fence->seqno : 0);
73} 74}
74 75
75static const struct fence_ops vgem_fence_ops = { 76static const struct dma_fence_ops vgem_fence_ops = {
76 .get_driver_name = vgem_fence_get_driver_name, 77 .get_driver_name = vgem_fence_get_driver_name,
77 .get_timeline_name = vgem_fence_get_timeline_name, 78 .get_timeline_name = vgem_fence_get_timeline_name,
78 .enable_signaling = vgem_fence_enable_signaling, 79 .enable_signaling = vgem_fence_enable_signaling,
79 .signaled = vgem_fence_signaled, 80 .signaled = vgem_fence_signaled,
80 .wait = fence_default_wait, 81 .wait = dma_fence_default_wait,
81 .release = vgem_fence_release, 82 .release = vgem_fence_release,
82 83
83 .fence_value_str = vgem_fence_value_str, 84 .fence_value_str = vgem_fence_value_str,
@@ -88,11 +89,11 @@ static void vgem_fence_timeout(unsigned long data)
88{ 89{
89 struct vgem_fence *fence = (struct vgem_fence *)data; 90 struct vgem_fence *fence = (struct vgem_fence *)data;
90 91
91 fence_signal(&fence->base); 92 dma_fence_signal(&fence->base);
92} 93}
93 94
94static struct fence *vgem_fence_create(struct vgem_file *vfile, 95static struct dma_fence *vgem_fence_create(struct vgem_file *vfile,
95 unsigned int flags) 96 unsigned int flags)
96{ 97{
97 struct vgem_fence *fence; 98 struct vgem_fence *fence;
98 99
@@ -101,8 +102,8 @@ static struct fence *vgem_fence_create(struct vgem_file *vfile,
101 return NULL; 102 return NULL;
102 103
103 spin_lock_init(&fence->lock); 104 spin_lock_init(&fence->lock);
104 fence_init(&fence->base, &vgem_fence_ops, &fence->lock, 105 dma_fence_init(&fence->base, &vgem_fence_ops, &fence->lock,
105 fence_context_alloc(1), 1); 106 dma_fence_context_alloc(1), 1);
106 107
107 setup_timer(&fence->timer, vgem_fence_timeout, (unsigned long)fence); 108 setup_timer(&fence->timer, vgem_fence_timeout, (unsigned long)fence);
108 109
@@ -157,7 +158,7 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
157 struct vgem_file *vfile = file->driver_priv; 158 struct vgem_file *vfile = file->driver_priv;
158 struct reservation_object *resv; 159 struct reservation_object *resv;
159 struct drm_gem_object *obj; 160 struct drm_gem_object *obj;
160 struct fence *fence; 161 struct dma_fence *fence;
161 int ret; 162 int ret;
162 163
163 if (arg->flags & ~VGEM_FENCE_WRITE) 164 if (arg->flags & ~VGEM_FENCE_WRITE)
@@ -209,8 +210,8 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
209 } 210 }
210err_fence: 211err_fence:
211 if (ret) { 212 if (ret) {
212 fence_signal(fence); 213 dma_fence_signal(fence);
213 fence_put(fence); 214 dma_fence_put(fence);
214 } 215 }
215err: 216err:
216 drm_gem_object_unreference_unlocked(obj); 217 drm_gem_object_unreference_unlocked(obj);
@@ -239,7 +240,7 @@ int vgem_fence_signal_ioctl(struct drm_device *dev,
239{ 240{
240 struct vgem_file *vfile = file->driver_priv; 241 struct vgem_file *vfile = file->driver_priv;
241 struct drm_vgem_fence_signal *arg = data; 242 struct drm_vgem_fence_signal *arg = data;
242 struct fence *fence; 243 struct dma_fence *fence;
243 int ret = 0; 244 int ret = 0;
244 245
245 if (arg->flags) 246 if (arg->flags)
@@ -253,11 +254,11 @@ int vgem_fence_signal_ioctl(struct drm_device *dev,
253 if (IS_ERR(fence)) 254 if (IS_ERR(fence))
254 return PTR_ERR(fence); 255 return PTR_ERR(fence);
255 256
256 if (fence_is_signaled(fence)) 257 if (dma_fence_is_signaled(fence))
257 ret = -ETIMEDOUT; 258 ret = -ETIMEDOUT;
258 259
259 fence_signal(fence); 260 dma_fence_signal(fence);
260 fence_put(fence); 261 dma_fence_put(fence);
261 return ret; 262 return ret;
262} 263}
263 264
@@ -271,8 +272,8 @@ int vgem_fence_open(struct vgem_file *vfile)
271 272
272static int __vgem_fence_idr_fini(int id, void *p, void *data) 273static int __vgem_fence_idr_fini(int id, void *p, void *data)
273{ 274{
274 fence_signal(p); 275 dma_fence_signal(p);
275 fence_put(p); 276 dma_fence_put(p);
276 return 0; 277 return 0;
277} 278}
278 279
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index ae59080d63d1..ec1ebdcfe80b 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -82,7 +82,7 @@ struct virtio_gpu_fence_driver {
82}; 82};
83 83
84struct virtio_gpu_fence { 84struct virtio_gpu_fence {
85 struct fence f; 85 struct dma_fence f;
86 struct virtio_gpu_fence_driver *drv; 86 struct virtio_gpu_fence_driver *drv;
87 struct list_head node; 87 struct list_head node;
88 uint64_t seq; 88 uint64_t seq;
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index f3f70fa8a4c7..23353521f903 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -26,22 +26,22 @@
26#include <drm/drmP.h> 26#include <drm/drmP.h>
27#include "virtgpu_drv.h" 27#include "virtgpu_drv.h"
28 28
29static const char *virtio_get_driver_name(struct fence *f) 29static const char *virtio_get_driver_name(struct dma_fence *f)
30{ 30{
31 return "virtio_gpu"; 31 return "virtio_gpu";
32} 32}
33 33
34static const char *virtio_get_timeline_name(struct fence *f) 34static const char *virtio_get_timeline_name(struct dma_fence *f)
35{ 35{
36 return "controlq"; 36 return "controlq";
37} 37}
38 38
39static bool virtio_enable_signaling(struct fence *f) 39static bool virtio_enable_signaling(struct dma_fence *f)
40{ 40{
41 return true; 41 return true;
42} 42}
43 43
44static bool virtio_signaled(struct fence *f) 44static bool virtio_signaled(struct dma_fence *f)
45{ 45{
46 struct virtio_gpu_fence *fence = to_virtio_fence(f); 46 struct virtio_gpu_fence *fence = to_virtio_fence(f);
47 47
@@ -50,26 +50,26 @@ static bool virtio_signaled(struct fence *f)
50 return false; 50 return false;
51} 51}
52 52
53static void virtio_fence_value_str(struct fence *f, char *str, int size) 53static void virtio_fence_value_str(struct dma_fence *f, char *str, int size)
54{ 54{
55 struct virtio_gpu_fence *fence = to_virtio_fence(f); 55 struct virtio_gpu_fence *fence = to_virtio_fence(f);
56 56
57 snprintf(str, size, "%llu", fence->seq); 57 snprintf(str, size, "%llu", fence->seq);
58} 58}
59 59
60static void virtio_timeline_value_str(struct fence *f, char *str, int size) 60static void virtio_timeline_value_str(struct dma_fence *f, char *str, int size)
61{ 61{
62 struct virtio_gpu_fence *fence = to_virtio_fence(f); 62 struct virtio_gpu_fence *fence = to_virtio_fence(f);
63 63
64 snprintf(str, size, "%llu", (u64)atomic64_read(&fence->drv->last_seq)); 64 snprintf(str, size, "%llu", (u64)atomic64_read(&fence->drv->last_seq));
65} 65}
66 66
67static const struct fence_ops virtio_fence_ops = { 67static const struct dma_fence_ops virtio_fence_ops = {
68 .get_driver_name = virtio_get_driver_name, 68 .get_driver_name = virtio_get_driver_name,
69 .get_timeline_name = virtio_get_timeline_name, 69 .get_timeline_name = virtio_get_timeline_name,
70 .enable_signaling = virtio_enable_signaling, 70 .enable_signaling = virtio_enable_signaling,
71 .signaled = virtio_signaled, 71 .signaled = virtio_signaled,
72 .wait = fence_default_wait, 72 .wait = dma_fence_default_wait,
73 .fence_value_str = virtio_fence_value_str, 73 .fence_value_str = virtio_fence_value_str,
74 .timeline_value_str = virtio_timeline_value_str, 74 .timeline_value_str = virtio_timeline_value_str,
75}; 75};
@@ -88,9 +88,9 @@ int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
88 spin_lock_irqsave(&drv->lock, irq_flags); 88 spin_lock_irqsave(&drv->lock, irq_flags);
89 (*fence)->drv = drv; 89 (*fence)->drv = drv;
90 (*fence)->seq = ++drv->sync_seq; 90 (*fence)->seq = ++drv->sync_seq;
91 fence_init(&(*fence)->f, &virtio_fence_ops, &drv->lock, 91 dma_fence_init(&(*fence)->f, &virtio_fence_ops, &drv->lock,
92 drv->context, (*fence)->seq); 92 drv->context, (*fence)->seq);
93 fence_get(&(*fence)->f); 93 dma_fence_get(&(*fence)->f);
94 list_add_tail(&(*fence)->node, &drv->fences); 94 list_add_tail(&(*fence)->node, &drv->fences);
95 spin_unlock_irqrestore(&drv->lock, irq_flags); 95 spin_unlock_irqrestore(&drv->lock, irq_flags);
96 96
@@ -111,9 +111,9 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
111 list_for_each_entry_safe(fence, tmp, &drv->fences, node) { 111 list_for_each_entry_safe(fence, tmp, &drv->fences, node) {
112 if (last_seq < fence->seq) 112 if (last_seq < fence->seq)
113 continue; 113 continue;
114 fence_signal_locked(&fence->f); 114 dma_fence_signal_locked(&fence->f);
115 list_del(&fence->node); 115 list_del(&fence->node);
116 fence_put(&fence->f); 116 dma_fence_put(&fence->f);
117 } 117 }
118 spin_unlock_irqrestore(&drv->lock, irq_flags); 118 spin_unlock_irqrestore(&drv->lock, irq_flags);
119} 119}
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 818478b4c4f0..61f3a963af95 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -172,7 +172,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
172 /* fence the command bo */ 172 /* fence the command bo */
173 virtio_gpu_unref_list(&validate_list); 173 virtio_gpu_unref_list(&validate_list);
174 drm_free_large(buflist); 174 drm_free_large(buflist);
175 fence_put(&fence->f); 175 dma_fence_put(&fence->f);
176 return 0; 176 return 0;
177 177
178out_unresv: 178out_unresv:
@@ -298,7 +298,7 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
298 drm_gem_object_release(obj); 298 drm_gem_object_release(obj);
299 if (vgdev->has_virgl_3d) { 299 if (vgdev->has_virgl_3d) {
300 virtio_gpu_unref_list(&validate_list); 300 virtio_gpu_unref_list(&validate_list);
301 fence_put(&fence->f); 301 dma_fence_put(&fence->f);
302 } 302 }
303 return ret; 303 return ret;
304 } 304 }
@@ -309,13 +309,13 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
309 309
310 if (vgdev->has_virgl_3d) { 310 if (vgdev->has_virgl_3d) {
311 virtio_gpu_unref_list(&validate_list); 311 virtio_gpu_unref_list(&validate_list);
312 fence_put(&fence->f); 312 dma_fence_put(&fence->f);
313 } 313 }
314 return 0; 314 return 0;
315fail_unref: 315fail_unref:
316 if (vgdev->has_virgl_3d) { 316 if (vgdev->has_virgl_3d) {
317 virtio_gpu_unref_list(&validate_list); 317 virtio_gpu_unref_list(&validate_list);
318 fence_put(&fence->f); 318 dma_fence_put(&fence->f);
319 } 319 }
320//fail_obj: 320//fail_obj:
321// drm_gem_object_handle_unreference_unlocked(obj); 321// drm_gem_object_handle_unreference_unlocked(obj);
@@ -383,7 +383,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
383 reservation_object_add_excl_fence(qobj->tbo.resv, 383 reservation_object_add_excl_fence(qobj->tbo.resv,
384 &fence->f); 384 &fence->f);
385 385
386 fence_put(&fence->f); 386 dma_fence_put(&fence->f);
387out_unres: 387out_unres:
388 virtio_gpu_object_unreserve(qobj); 388 virtio_gpu_object_unreserve(qobj);
389out: 389out:
@@ -431,7 +431,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
431 args->level, &box, &fence); 431 args->level, &box, &fence);
432 reservation_object_add_excl_fence(qobj->tbo.resv, 432 reservation_object_add_excl_fence(qobj->tbo.resv,
433 &fence->f); 433 &fence->f);
434 fence_put(&fence->f); 434 dma_fence_put(&fence->f);
435 } 435 }
436 436
437out_unres: 437out_unres:
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index 036b0fbae0fb..1235519853f4 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -159,7 +159,7 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
159 virtio_gpu_init_vq(&vgdev->ctrlq, virtio_gpu_dequeue_ctrl_func); 159 virtio_gpu_init_vq(&vgdev->ctrlq, virtio_gpu_dequeue_ctrl_func);
160 virtio_gpu_init_vq(&vgdev->cursorq, virtio_gpu_dequeue_cursor_func); 160 virtio_gpu_init_vq(&vgdev->cursorq, virtio_gpu_dequeue_cursor_func);
161 161
162 vgdev->fence_drv.context = fence_context_alloc(1); 162 vgdev->fence_drv.context = dma_fence_context_alloc(1);
163 spin_lock_init(&vgdev->fence_drv.lock); 163 spin_lock_init(&vgdev->fence_drv.lock);
164 INIT_LIST_HEAD(&vgdev->fence_drv.fences); 164 INIT_LIST_HEAD(&vgdev->fence_drv.fences);
165 INIT_LIST_HEAD(&vgdev->cap_cache); 165 INIT_LIST_HEAD(&vgdev->cap_cache);
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index ba28c0f6f28a..cb75f0663ba0 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -152,7 +152,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
152 if (!ret) { 152 if (!ret) {
153 reservation_object_add_excl_fence(bo->tbo.resv, 153 reservation_object_add_excl_fence(bo->tbo.resv,
154 &fence->f); 154 &fence->f);
155 fence_put(&fence->f); 155 dma_fence_put(&fence->f);
156 fence = NULL; 156 fence = NULL;
157 virtio_gpu_object_unreserve(bo); 157 virtio_gpu_object_unreserve(bo);
158 virtio_gpu_object_wait(bo, false); 158 virtio_gpu_object_wait(bo, false);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 26ac8e80a478..6541dd8b82dc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -108,7 +108,7 @@ fman_from_fence(struct vmw_fence_obj *fence)
108 * objects with actions attached to them. 108 * objects with actions attached to them.
109 */ 109 */
110 110
111static void vmw_fence_obj_destroy(struct fence *f) 111static void vmw_fence_obj_destroy(struct dma_fence *f)
112{ 112{
113 struct vmw_fence_obj *fence = 113 struct vmw_fence_obj *fence =
114 container_of(f, struct vmw_fence_obj, base); 114 container_of(f, struct vmw_fence_obj, base);
@@ -123,17 +123,17 @@ static void vmw_fence_obj_destroy(struct fence *f)
123 fence->destroy(fence); 123 fence->destroy(fence);
124} 124}
125 125
126static const char *vmw_fence_get_driver_name(struct fence *f) 126static const char *vmw_fence_get_driver_name(struct dma_fence *f)
127{ 127{
128 return "vmwgfx"; 128 return "vmwgfx";
129} 129}
130 130
131static const char *vmw_fence_get_timeline_name(struct fence *f) 131static const char *vmw_fence_get_timeline_name(struct dma_fence *f)
132{ 132{
133 return "svga"; 133 return "svga";
134} 134}
135 135
136static bool vmw_fence_enable_signaling(struct fence *f) 136static bool vmw_fence_enable_signaling(struct dma_fence *f)
137{ 137{
138 struct vmw_fence_obj *fence = 138 struct vmw_fence_obj *fence =
139 container_of(f, struct vmw_fence_obj, base); 139 container_of(f, struct vmw_fence_obj, base);
@@ -152,12 +152,12 @@ static bool vmw_fence_enable_signaling(struct fence *f)
152} 152}
153 153
154struct vmwgfx_wait_cb { 154struct vmwgfx_wait_cb {
155 struct fence_cb base; 155 struct dma_fence_cb base;
156 struct task_struct *task; 156 struct task_struct *task;
157}; 157};
158 158
159static void 159static void
160vmwgfx_wait_cb(struct fence *fence, struct fence_cb *cb) 160vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
161{ 161{
162 struct vmwgfx_wait_cb *wait = 162 struct vmwgfx_wait_cb *wait =
163 container_of(cb, struct vmwgfx_wait_cb, base); 163 container_of(cb, struct vmwgfx_wait_cb, base);
@@ -167,7 +167,7 @@ vmwgfx_wait_cb(struct fence *fence, struct fence_cb *cb)
167 167
168static void __vmw_fences_update(struct vmw_fence_manager *fman); 168static void __vmw_fences_update(struct vmw_fence_manager *fman);
169 169
170static long vmw_fence_wait(struct fence *f, bool intr, signed long timeout) 170static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
171{ 171{
172 struct vmw_fence_obj *fence = 172 struct vmw_fence_obj *fence =
173 container_of(f, struct vmw_fence_obj, base); 173 container_of(f, struct vmw_fence_obj, base);
@@ -197,7 +197,7 @@ static long vmw_fence_wait(struct fence *f, bool intr, signed long timeout)
197 197
198 while (ret > 0) { 198 while (ret > 0) {
199 __vmw_fences_update(fman); 199 __vmw_fences_update(fman);
200 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &f->flags)) 200 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
201 break; 201 break;
202 202
203 if (intr) 203 if (intr)
@@ -225,7 +225,7 @@ out:
225 return ret; 225 return ret;
226} 226}
227 227
228static struct fence_ops vmw_fence_ops = { 228static struct dma_fence_ops vmw_fence_ops = {
229 .get_driver_name = vmw_fence_get_driver_name, 229 .get_driver_name = vmw_fence_get_driver_name,
230 .get_timeline_name = vmw_fence_get_timeline_name, 230 .get_timeline_name = vmw_fence_get_timeline_name,
231 .enable_signaling = vmw_fence_enable_signaling, 231 .enable_signaling = vmw_fence_enable_signaling,
@@ -298,7 +298,7 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
298 fman->event_fence_action_size = 298 fman->event_fence_action_size =
299 ttm_round_pot(sizeof(struct vmw_event_fence_action)); 299 ttm_round_pot(sizeof(struct vmw_event_fence_action));
300 mutex_init(&fman->goal_irq_mutex); 300 mutex_init(&fman->goal_irq_mutex);
301 fman->ctx = fence_context_alloc(1); 301 fman->ctx = dma_fence_context_alloc(1);
302 302
303 return fman; 303 return fman;
304} 304}
@@ -326,8 +326,8 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
326 unsigned long irq_flags; 326 unsigned long irq_flags;
327 int ret = 0; 327 int ret = 0;
328 328
329 fence_init(&fence->base, &vmw_fence_ops, &fman->lock, 329 dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
330 fman->ctx, seqno); 330 fman->ctx, seqno);
331 INIT_LIST_HEAD(&fence->seq_passed_actions); 331 INIT_LIST_HEAD(&fence->seq_passed_actions);
332 fence->destroy = destroy; 332 fence->destroy = destroy;
333 333
@@ -431,7 +431,7 @@ static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
431 u32 goal_seqno; 431 u32 goal_seqno;
432 u32 *fifo_mem; 432 u32 *fifo_mem;
433 433
434 if (fence_is_signaled_locked(&fence->base)) 434 if (dma_fence_is_signaled_locked(&fence->base))
435 return false; 435 return false;
436 436
437 fifo_mem = fman->dev_priv->mmio_virt; 437 fifo_mem = fman->dev_priv->mmio_virt;
@@ -459,7 +459,7 @@ rerun:
459 list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) { 459 list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
460 if (seqno - fence->base.seqno < VMW_FENCE_WRAP) { 460 if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
461 list_del_init(&fence->head); 461 list_del_init(&fence->head);
462 fence_signal_locked(&fence->base); 462 dma_fence_signal_locked(&fence->base);
463 INIT_LIST_HEAD(&action_list); 463 INIT_LIST_HEAD(&action_list);
464 list_splice_init(&fence->seq_passed_actions, 464 list_splice_init(&fence->seq_passed_actions,
465 &action_list); 465 &action_list);
@@ -500,18 +500,18 @@ bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
500{ 500{
501 struct vmw_fence_manager *fman = fman_from_fence(fence); 501 struct vmw_fence_manager *fman = fman_from_fence(fence);
502 502
503 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags)) 503 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
504 return 1; 504 return 1;
505 505
506 vmw_fences_update(fman); 506 vmw_fences_update(fman);
507 507
508 return fence_is_signaled(&fence->base); 508 return dma_fence_is_signaled(&fence->base);
509} 509}
510 510
511int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy, 511int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy,
512 bool interruptible, unsigned long timeout) 512 bool interruptible, unsigned long timeout)
513{ 513{
514 long ret = fence_wait_timeout(&fence->base, interruptible, timeout); 514 long ret = dma_fence_wait_timeout(&fence->base, interruptible, timeout);
515 515
516 if (likely(ret > 0)) 516 if (likely(ret > 0))
517 return 0; 517 return 0;
@@ -530,7 +530,7 @@ void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
530 530
531static void vmw_fence_destroy(struct vmw_fence_obj *fence) 531static void vmw_fence_destroy(struct vmw_fence_obj *fence)
532{ 532{
533 fence_free(&fence->base); 533 dma_fence_free(&fence->base);
534} 534}
535 535
536int vmw_fence_create(struct vmw_fence_manager *fman, 536int vmw_fence_create(struct vmw_fence_manager *fman,
@@ -669,7 +669,7 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
669 struct vmw_fence_obj *fence = 669 struct vmw_fence_obj *fence =
670 list_entry(fman->fence_list.prev, struct vmw_fence_obj, 670 list_entry(fman->fence_list.prev, struct vmw_fence_obj,
671 head); 671 head);
672 fence_get(&fence->base); 672 dma_fence_get(&fence->base);
673 spin_unlock_irq(&fman->lock); 673 spin_unlock_irq(&fman->lock);
674 674
675 ret = vmw_fence_obj_wait(fence, false, false, 675 ret = vmw_fence_obj_wait(fence, false, false,
@@ -677,7 +677,7 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
677 677
678 if (unlikely(ret != 0)) { 678 if (unlikely(ret != 0)) {
679 list_del_init(&fence->head); 679 list_del_init(&fence->head);
680 fence_signal(&fence->base); 680 dma_fence_signal(&fence->base);
681 INIT_LIST_HEAD(&action_list); 681 INIT_LIST_HEAD(&action_list);
682 list_splice_init(&fence->seq_passed_actions, 682 list_splice_init(&fence->seq_passed_actions,
683 &action_list); 683 &action_list);
@@ -685,7 +685,7 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
685 } 685 }
686 686
687 BUG_ON(!list_empty(&fence->head)); 687 BUG_ON(!list_empty(&fence->head));
688 fence_put(&fence->base); 688 dma_fence_put(&fence->base);
689 spin_lock_irq(&fman->lock); 689 spin_lock_irq(&fman->lock);
690 } 690 }
691 spin_unlock_irq(&fman->lock); 691 spin_unlock_irq(&fman->lock);
@@ -884,7 +884,7 @@ static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
884 spin_lock_irqsave(&fman->lock, irq_flags); 884 spin_lock_irqsave(&fman->lock, irq_flags);
885 885
886 fman->pending_actions[action->type]++; 886 fman->pending_actions[action->type]++;
887 if (fence_is_signaled_locked(&fence->base)) { 887 if (dma_fence_is_signaled_locked(&fence->base)) {
888 struct list_head action_list; 888 struct list_head action_list;
889 889
890 INIT_LIST_HEAD(&action_list); 890 INIT_LIST_HEAD(&action_list);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
index 83ae301ee141..d9d85aa6ed20 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
@@ -27,7 +27,7 @@
27 27
28#ifndef _VMWGFX_FENCE_H_ 28#ifndef _VMWGFX_FENCE_H_
29 29
30#include <linux/fence.h> 30#include <linux/dma-fence.h>
31 31
32#define VMW_FENCE_WAIT_TIMEOUT (5*HZ) 32#define VMW_FENCE_WAIT_TIMEOUT (5*HZ)
33 33
@@ -52,7 +52,7 @@ struct vmw_fence_action {
52}; 52};
53 53
54struct vmw_fence_obj { 54struct vmw_fence_obj {
55 struct fence base; 55 struct dma_fence base;
56 56
57 struct list_head head; 57 struct list_head head;
58 struct list_head seq_passed_actions; 58 struct list_head seq_passed_actions;
@@ -71,14 +71,14 @@ vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p)
71 71
72 *fence_p = NULL; 72 *fence_p = NULL;
73 if (fence) 73 if (fence)
74 fence_put(&fence->base); 74 dma_fence_put(&fence->base);
75} 75}
76 76
77static inline struct vmw_fence_obj * 77static inline struct vmw_fence_obj *
78vmw_fence_obj_reference(struct vmw_fence_obj *fence) 78vmw_fence_obj_reference(struct vmw_fence_obj *fence)
79{ 79{
80 if (fence) 80 if (fence)
81 fence_get(&fence->base); 81 dma_fence_get(&fence->base);
82 return fence; 82 return fence;
83} 83}
84 84
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 1a85fb2d4dc6..8e86d6d4141b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1454,7 +1454,7 @@ void vmw_fence_single_bo(struct ttm_buffer_object *bo,
1454 if (fence == NULL) { 1454 if (fence == NULL) {
1455 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); 1455 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1456 reservation_object_add_excl_fence(bo->resv, &fence->base); 1456 reservation_object_add_excl_fence(bo->resv, &fence->base);
1457 fence_put(&fence->base); 1457 dma_fence_put(&fence->base);
1458 } else 1458 } else
1459 reservation_object_add_excl_fence(bo->resv, &fence->base); 1459 reservation_object_add_excl_fence(bo->resv, &fence->base);
1460} 1460}