diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu.h | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 14 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 13 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 21 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/cik_sdma.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 3 |
13 files changed, 46 insertions, 38 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 6cefde4aab49..99e660fec190 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
@@ -776,7 +776,6 @@ struct amdgpu_ib { | |||
776 | bool grabbed_vmid; | 776 | bool grabbed_vmid; |
777 | struct amdgpu_vm *vm; | 777 | struct amdgpu_vm *vm; |
778 | struct amdgpu_ctx *ctx; | 778 | struct amdgpu_ctx *ctx; |
779 | struct amdgpu_sync sync; | ||
780 | uint32_t gds_base, gds_size; | 779 | uint32_t gds_base, gds_size; |
781 | uint32_t gws_base, gws_size; | 780 | uint32_t gws_base, gws_size; |
782 | uint32_t oa_base, oa_size; | 781 | uint32_t oa_base, oa_size; |
@@ -1178,6 +1177,7 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, | |||
1178 | void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib); | 1177 | void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib); |
1179 | int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, | 1178 | int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, |
1180 | struct amdgpu_ib *ib, void *owner, | 1179 | struct amdgpu_ib *ib, void *owner, |
1180 | struct fence *last_vm_update, | ||
1181 | struct fence **f); | 1181 | struct fence **f); |
1182 | int amdgpu_ib_pool_init(struct amdgpu_device *adev); | 1182 | int amdgpu_ib_pool_init(struct amdgpu_device *adev); |
1183 | void amdgpu_ib_pool_fini(struct amdgpu_device *adev); | 1183 | void amdgpu_ib_pool_fini(struct amdgpu_device *adev); |
@@ -1236,6 +1236,7 @@ struct amdgpu_job { | |||
1236 | struct amd_sched_job base; | 1236 | struct amd_sched_job base; |
1237 | struct amdgpu_device *adev; | 1237 | struct amdgpu_device *adev; |
1238 | struct amdgpu_ring *ring; | 1238 | struct amdgpu_ring *ring; |
1239 | struct amdgpu_sync sync; | ||
1239 | struct amdgpu_ib *ibs; | 1240 | struct amdgpu_ib *ibs; |
1240 | uint32_t num_ibs; | 1241 | uint32_t num_ibs; |
1241 | void *owner; | 1242 | void *owner; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index a5311623a489..52c3eb96b199 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |||
@@ -411,7 +411,7 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) | |||
411 | 411 | ||
412 | list_for_each_entry(e, &p->validated, tv.head) { | 412 | list_for_each_entry(e, &p->validated, tv.head) { |
413 | struct reservation_object *resv = e->robj->tbo.resv; | 413 | struct reservation_object *resv = e->robj->tbo.resv; |
414 | r = amdgpu_sync_resv(p->adev, &p->job->ibs[0].sync, resv, p->filp); | 414 | r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp); |
415 | 415 | ||
416 | if (r) | 416 | if (r) |
417 | return r; | 417 | return r; |
@@ -491,7 +491,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, | |||
491 | if (r) | 491 | if (r) |
492 | return r; | 492 | return r; |
493 | 493 | ||
494 | r = amdgpu_sync_fence(adev, &p->job->ibs[0].sync, vm->page_directory_fence); | 494 | r = amdgpu_sync_fence(adev, &p->job->sync, vm->page_directory_fence); |
495 | if (r) | 495 | if (r) |
496 | return r; | 496 | return r; |
497 | 497 | ||
@@ -517,14 +517,14 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, | |||
517 | return r; | 517 | return r; |
518 | 518 | ||
519 | f = bo_va->last_pt_update; | 519 | f = bo_va->last_pt_update; |
520 | r = amdgpu_sync_fence(adev, &p->job->ibs[0].sync, f); | 520 | r = amdgpu_sync_fence(adev, &p->job->sync, f); |
521 | if (r) | 521 | if (r) |
522 | return r; | 522 | return r; |
523 | } | 523 | } |
524 | 524 | ||
525 | } | 525 | } |
526 | 526 | ||
527 | r = amdgpu_vm_clear_invalids(adev, vm, &p->job->ibs[0].sync); | 527 | r = amdgpu_vm_clear_invalids(adev, vm, &p->job->sync); |
528 | 528 | ||
529 | if (amdgpu_vm_debug && p->bo_list) { | 529 | if (amdgpu_vm_debug && p->bo_list) { |
530 | /* Invalidate all BOs to test for userspace bugs */ | 530 | /* Invalidate all BOs to test for userspace bugs */ |
@@ -698,11 +698,8 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev, | |||
698 | struct amdgpu_cs_parser *p) | 698 | struct amdgpu_cs_parser *p) |
699 | { | 699 | { |
700 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; | 700 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; |
701 | struct amdgpu_ib *ib; | ||
702 | int i, j, r; | 701 | int i, j, r; |
703 | 702 | ||
704 | /* Add dependencies to first IB */ | ||
705 | ib = &p->job->ibs[0]; | ||
706 | for (i = 0; i < p->nchunks; ++i) { | 703 | for (i = 0; i < p->nchunks; ++i) { |
707 | struct drm_amdgpu_cs_chunk_dep *deps; | 704 | struct drm_amdgpu_cs_chunk_dep *deps; |
708 | struct amdgpu_cs_chunk *chunk; | 705 | struct amdgpu_cs_chunk *chunk; |
@@ -740,7 +737,8 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev, | |||
740 | return r; | 737 | return r; |
741 | 738 | ||
742 | } else if (fence) { | 739 | } else if (fence) { |
743 | r = amdgpu_sync_fence(adev, &ib->sync, fence); | 740 | r = amdgpu_sync_fence(adev, &p->job->sync, |
741 | fence); | ||
744 | fence_put(fence); | 742 | fence_put(fence); |
745 | amdgpu_ctx_put(ctx); | 743 | amdgpu_ctx_put(ctx); |
746 | if (r) | 744 | if (r) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 4b2c176b83f2..b5bdd5d59b58 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | |||
@@ -74,8 +74,6 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, | |||
74 | ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); | 74 | ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); |
75 | } | 75 | } |
76 | 76 | ||
77 | amdgpu_sync_create(&ib->sync); | ||
78 | |||
79 | ib->vm = vm; | 77 | ib->vm = vm; |
80 | 78 | ||
81 | return 0; | 79 | return 0; |
@@ -91,7 +89,6 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, | |||
91 | */ | 89 | */ |
92 | void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib) | 90 | void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib) |
93 | { | 91 | { |
94 | amdgpu_sync_free(&ib->sync); | ||
95 | amdgpu_sa_bo_free(adev, &ib->sa_bo, &ib->fence->base); | 92 | amdgpu_sa_bo_free(adev, &ib->sa_bo, &ib->fence->base); |
96 | if (ib->fence) | 93 | if (ib->fence) |
97 | fence_put(&ib->fence->base); | 94 | fence_put(&ib->fence->base); |
@@ -121,6 +118,7 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib) | |||
121 | */ | 118 | */ |
122 | int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, | 119 | int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, |
123 | struct amdgpu_ib *ibs, void *owner, | 120 | struct amdgpu_ib *ibs, void *owner, |
121 | struct fence *last_vm_update, | ||
124 | struct fence **f) | 122 | struct fence **f) |
125 | { | 123 | { |
126 | struct amdgpu_device *adev = ring->adev; | 124 | struct amdgpu_device *adev = ring->adev; |
@@ -152,16 +150,9 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, | |||
152 | return r; | 150 | return r; |
153 | } | 151 | } |
154 | 152 | ||
155 | r = amdgpu_sync_wait(&ibs->sync); | ||
156 | if (r) { | ||
157 | amdgpu_ring_undo(ring); | ||
158 | dev_err(adev->dev, "failed to sync wait (%d)\n", r); | ||
159 | return r; | ||
160 | } | ||
161 | |||
162 | if (vm) { | 153 | if (vm) { |
163 | /* do context switch */ | 154 | /* do context switch */ |
164 | amdgpu_vm_flush(ring, vm, ib->sync.last_vm_update); | 155 | amdgpu_vm_flush(ring, vm, last_vm_update); |
165 | 156 | ||
166 | if (ring->funcs->emit_gds_switch) | 157 | if (ring->funcs->emit_gds_switch) |
167 | amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id, | 158 | amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 6f3e757e056e..0f6719e0ace0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | |||
@@ -46,6 +46,8 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, | |||
46 | (*job)->ibs = (void *)&(*job)[1]; | 46 | (*job)->ibs = (void *)&(*job)[1]; |
47 | (*job)->num_ibs = num_ibs; | 47 | (*job)->num_ibs = num_ibs; |
48 | 48 | ||
49 | amdgpu_sync_create(&(*job)->sync); | ||
50 | |||
49 | return 0; | 51 | return 0; |
50 | } | 52 | } |
51 | 53 | ||
@@ -73,6 +75,7 @@ void amdgpu_job_free(struct amdgpu_job *job) | |||
73 | amdgpu_ib_free(job->adev, &job->ibs[i]); | 75 | amdgpu_ib_free(job->adev, &job->ibs[i]); |
74 | 76 | ||
75 | amdgpu_bo_unref(&job->uf.bo); | 77 | amdgpu_bo_unref(&job->uf.bo); |
78 | amdgpu_sync_free(&job->sync); | ||
76 | kfree(job); | 79 | kfree(job); |
77 | } | 80 | } |
78 | 81 | ||
@@ -99,23 +102,22 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, | |||
99 | static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job) | 102 | static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job) |
100 | { | 103 | { |
101 | struct amdgpu_job *job = to_amdgpu_job(sched_job); | 104 | struct amdgpu_job *job = to_amdgpu_job(sched_job); |
102 | struct amdgpu_sync *sync = &job->ibs->sync; | ||
103 | struct amdgpu_vm *vm = job->ibs->vm; | 105 | struct amdgpu_vm *vm = job->ibs->vm; |
104 | 106 | ||
105 | struct fence *fence = amdgpu_sync_get_fence(sync); | 107 | struct fence *fence = amdgpu_sync_get_fence(&job->sync); |
106 | 108 | ||
107 | if (fence == NULL && vm && !job->ibs->grabbed_vmid) { | 109 | if (fence == NULL && vm && !job->ibs->grabbed_vmid) { |
108 | struct amdgpu_ring *ring = job->ring; | 110 | struct amdgpu_ring *ring = job->ring; |
109 | int r; | 111 | int r; |
110 | 112 | ||
111 | r = amdgpu_vm_grab_id(vm, ring, sync, | 113 | r = amdgpu_vm_grab_id(vm, ring, &job->sync, |
112 | &job->base.s_fence->base); | 114 | &job->base.s_fence->base); |
113 | if (r) | 115 | if (r) |
114 | DRM_ERROR("Error getting VM ID (%d)\n", r); | 116 | DRM_ERROR("Error getting VM ID (%d)\n", r); |
115 | else | 117 | else |
116 | job->ibs->grabbed_vmid = true; | 118 | job->ibs->grabbed_vmid = true; |
117 | 119 | ||
118 | fence = amdgpu_sync_get_fence(sync); | 120 | fence = amdgpu_sync_get_fence(&job->sync); |
119 | } | 121 | } |
120 | 122 | ||
121 | return fence; | 123 | return fence; |
@@ -132,9 +134,16 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job) | |||
132 | return NULL; | 134 | return NULL; |
133 | } | 135 | } |
134 | job = to_amdgpu_job(sched_job); | 136 | job = to_amdgpu_job(sched_job); |
137 | |||
138 | r = amdgpu_sync_wait(&job->sync); | ||
139 | if (r) { | ||
140 | DRM_ERROR("failed to sync wait (%d)\n", r); | ||
141 | return NULL; | ||
142 | } | ||
143 | |||
135 | trace_amdgpu_sched_run_job(job); | 144 | trace_amdgpu_sched_run_job(job); |
136 | r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, | 145 | r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job->owner, |
137 | job->owner, &fence); | 146 | job->sync.last_vm_update, &fence); |
138 | if (r) { | 147 | if (r) { |
139 | DRM_ERROR("Error scheduling IBs (%d)\n", r); | 148 | DRM_ERROR("Error scheduling IBs (%d)\n", r); |
140 | goto err; | 149 | goto err; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 21c1a18c6d48..e47d5188c886 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | |||
@@ -1032,7 +1032,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, | |||
1032 | return r; | 1032 | return r; |
1033 | 1033 | ||
1034 | if (resv) { | 1034 | if (resv) { |
1035 | r = amdgpu_sync_resv(adev, &job->ibs[0].sync, resv, | 1035 | r = amdgpu_sync_resv(adev, &job->sync, resv, |
1036 | AMDGPU_FENCE_OWNER_UNDEFINED); | 1036 | AMDGPU_FENCE_OWNER_UNDEFINED); |
1037 | if (r) { | 1037 | if (r) { |
1038 | DRM_ERROR("sync failed (%d).\n", r); | 1038 | DRM_ERROR("sync failed (%d).\n", r); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 00b608b6c8c4..c536630580f8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | |||
@@ -874,7 +874,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, | |||
874 | 874 | ||
875 | if (direct) { | 875 | if (direct) { |
876 | r = amdgpu_ib_schedule(ring, 1, ib, | 876 | r = amdgpu_ib_schedule(ring, 1, ib, |
877 | AMDGPU_FENCE_OWNER_UNDEFINED, &f); | 877 | AMDGPU_FENCE_OWNER_UNDEFINED, NULL, &f); |
878 | if (r) | 878 | if (r) |
879 | goto err_free; | 879 | goto err_free; |
880 | 880 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 97c22212d048..fb2ce3ed9aab 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | |||
@@ -411,7 +411,8 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, | |||
411 | for (i = ib->length_dw; i < ib_size_dw; ++i) | 411 | for (i = ib->length_dw; i < ib_size_dw; ++i) |
412 | ib->ptr[i] = 0x0; | 412 | ib->ptr[i] = 0x0; |
413 | 413 | ||
414 | r = amdgpu_ib_schedule(ring, 1, ib, AMDGPU_FENCE_OWNER_UNDEFINED, &f); | 414 | r = amdgpu_ib_schedule(ring, 1, ib, AMDGPU_FENCE_OWNER_UNDEFINED, |
415 | NULL, &f); | ||
415 | if (r) | 416 | if (r) |
416 | goto err; | 417 | goto err; |
417 | 418 | ||
@@ -473,7 +474,8 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, | |||
473 | 474 | ||
474 | if (direct) { | 475 | if (direct) { |
475 | r = amdgpu_ib_schedule(ring, 1, ib, | 476 | r = amdgpu_ib_schedule(ring, 1, ib, |
476 | AMDGPU_FENCE_OWNER_UNDEFINED, &f); | 477 | AMDGPU_FENCE_OWNER_UNDEFINED, |
478 | NULL, &f); | ||
477 | if (r) | 479 | if (r) |
478 | goto err; | 480 | goto err; |
479 | 481 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index fb003089f73c..b291b1a4611a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
@@ -473,7 +473,8 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | |||
473 | 473 | ||
474 | if (ib->length_dw != 0) { | 474 | if (ib->length_dw != 0) { |
475 | amdgpu_ring_pad_ib(ring, ib); | 475 | amdgpu_ring_pad_ib(ring, ib); |
476 | amdgpu_sync_resv(adev, &ib->sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM); | 476 | amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv, |
477 | AMDGPU_FENCE_OWNER_VM); | ||
477 | WARN_ON(ib->length_dw > ndw); | 478 | WARN_ON(ib->length_dw > ndw); |
478 | r = amdgpu_job_submit(job, ring, AMDGPU_FENCE_OWNER_VM, &fence); | 479 | r = amdgpu_job_submit(job, ring, AMDGPU_FENCE_OWNER_VM, &fence); |
479 | if (r) | 480 | if (r) |
@@ -714,7 +715,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, | |||
714 | 715 | ||
715 | ib = &job->ibs[0]; | 716 | ib = &job->ibs[0]; |
716 | 717 | ||
717 | r = amdgpu_sync_resv(adev, &ib->sync, vm->page_directory->tbo.resv, | 718 | r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv, |
718 | owner); | 719 | owner); |
719 | if (r) | 720 | if (r) |
720 | goto error_free; | 721 | goto error_free; |
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index 6004dce23dc1..47fba3142a5a 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c | |||
@@ -633,7 +633,8 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring) | |||
633 | ib.ptr[3] = 1; | 633 | ib.ptr[3] = 1; |
634 | ib.ptr[4] = 0xDEADBEEF; | 634 | ib.ptr[4] = 0xDEADBEEF; |
635 | ib.length_dw = 5; | 635 | ib.length_dw = 5; |
636 | r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED, &f); | 636 | r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED, |
637 | NULL, &f); | ||
637 | if (r) | 638 | if (r) |
638 | goto err1; | 639 | goto err1; |
639 | 640 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 4dac79ac9d3d..fd6796657051 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | |||
@@ -2641,7 +2641,8 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring) | |||
2641 | ib.ptr[2] = 0xDEADBEEF; | 2641 | ib.ptr[2] = 0xDEADBEEF; |
2642 | ib.length_dw = 3; | 2642 | ib.length_dw = 3; |
2643 | 2643 | ||
2644 | r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED, &f); | 2644 | r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED, |
2645 | NULL, &f); | ||
2645 | if (r) | 2646 | if (r) |
2646 | goto err2; | 2647 | goto err2; |
2647 | 2648 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 4a7708541723..415da6e100cd 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | |||
@@ -709,7 +709,8 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring) | |||
709 | ib.ptr[2] = 0xDEADBEEF; | 709 | ib.ptr[2] = 0xDEADBEEF; |
710 | ib.length_dw = 3; | 710 | ib.length_dw = 3; |
711 | 711 | ||
712 | r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED, &f); | 712 | r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED, |
713 | NULL, &f); | ||
713 | if (r) | 714 | if (r) |
714 | goto err2; | 715 | goto err2; |
715 | 716 | ||
@@ -1264,7 +1265,8 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) | |||
1264 | ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4); | 1265 | ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4); |
1265 | 1266 | ||
1266 | /* shedule the ib on the ring */ | 1267 | /* shedule the ib on the ring */ |
1267 | r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED, &f); | 1268 | r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED, |
1269 | NULL, &f); | ||
1268 | if (r) { | 1270 | if (r) { |
1269 | DRM_ERROR("amdgpu: ib submit failed (%d).\n", r); | 1271 | DRM_ERROR("amdgpu: ib submit failed (%d).\n", r); |
1270 | goto fail; | 1272 | goto fail; |
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index 6a048b0b5fa7..423be6e10d01 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | |||
@@ -691,7 +691,8 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring) | |||
691 | ib.ptr[7] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP); | 691 | ib.ptr[7] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP); |
692 | ib.length_dw = 8; | 692 | ib.length_dw = 8; |
693 | 693 | ||
694 | r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED, &f); | 694 | r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED, |
695 | NULL, &f); | ||
695 | if (r) | 696 | if (r) |
696 | goto err1; | 697 | goto err1; |
697 | 698 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 446aa016e96e..960462a9e941 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | |||
@@ -842,7 +842,8 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring) | |||
842 | ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP); | 842 | ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP); |
843 | ib.length_dw = 8; | 843 | ib.length_dw = 8; |
844 | 844 | ||
845 | r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED, &f); | 845 | r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED, |
846 | NULL, &f); | ||
846 | if (r) | 847 | if (r) |
847 | goto err1; | 848 | goto err1; |
848 | 849 | ||