aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c24
3 files changed, 17 insertions, 15 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 4e8124749f9f..9f47b3e013c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -2294,8 +2294,8 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
2294struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev, 2294struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
2295 struct amdgpu_vm *vm, 2295 struct amdgpu_vm *vm,
2296 struct list_head *head); 2296 struct list_head *head);
2297struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring, 2297int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
2298 struct amdgpu_vm *vm); 2298 struct amdgpu_sync *sync);
2299void amdgpu_vm_flush(struct amdgpu_ring *ring, 2299void amdgpu_vm_flush(struct amdgpu_ring *ring,
2300 struct amdgpu_vm *vm, 2300 struct amdgpu_vm *vm,
2301 struct amdgpu_fence *updates); 2301 struct amdgpu_fence *updates);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 95d533422a5b..f3ac9d8a5691 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -165,9 +165,7 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
165 165
166 if (vm) { 166 if (vm) {
167 /* grab a vm id if necessary */ 167 /* grab a vm id if necessary */
168 struct amdgpu_fence *vm_id_fence = NULL; 168 r = amdgpu_vm_grab_id(ibs->vm, ibs->ring, &ibs->sync);
169 vm_id_fence = amdgpu_vm_grab_id(ibs->ring, ibs->vm);
170 r = amdgpu_sync_fence(adev, &ibs->sync, &vm_id_fence->base);
171 if (r) { 169 if (r) {
172 amdgpu_ring_unlock_undo(ring); 170 amdgpu_ring_unlock_undo(ring);
173 return r; 171 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 1e895b979ec6..fd8395f25723 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -127,16 +127,16 @@ struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
127/** 127/**
128 * amdgpu_vm_grab_id - allocate the next free VMID 128 * amdgpu_vm_grab_id - allocate the next free VMID
129 * 129 *
130 * @ring: ring we want to submit job to
131 * @vm: vm to allocate id for 130 * @vm: vm to allocate id for
131 * @ring: ring we want to submit job to
132 * @sync: sync object where we add dependencies
132 * 133 *
133 * Allocate an id for the vm (cayman+). 134 * Allocate an id for the vm, adding fences to the sync obj as necessary.
134 * Returns the fence we need to sync to (if any).
135 * 135 *
136 * Global and local mutex must be locked! 136 * Global mutex must be locked!
137 */ 137 */
138struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring, 138int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
139 struct amdgpu_vm *vm) 139 struct amdgpu_sync *sync)
140{ 140{
141 struct amdgpu_fence *best[AMDGPU_MAX_RINGS] = {}; 141 struct amdgpu_fence *best[AMDGPU_MAX_RINGS] = {};
142 struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; 142 struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
@@ -148,7 +148,7 @@ struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring,
148 /* check if the id is still valid */ 148 /* check if the id is still valid */
149 if (vm_id->id && vm_id->last_id_use && 149 if (vm_id->id && vm_id->last_id_use &&
150 vm_id->last_id_use == adev->vm_manager.active[vm_id->id]) 150 vm_id->last_id_use == adev->vm_manager.active[vm_id->id])
151 return NULL; 151 return 0;
152 152
153 /* we definately need to flush */ 153 /* we definately need to flush */
154 vm_id->pd_gpu_addr = ~0ll; 154 vm_id->pd_gpu_addr = ~0ll;
@@ -161,7 +161,7 @@ struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring,
161 /* found a free one */ 161 /* found a free one */
162 vm_id->id = i; 162 vm_id->id = i;
163 trace_amdgpu_vm_grab_id(i, ring->idx); 163 trace_amdgpu_vm_grab_id(i, ring->idx);
164 return NULL; 164 return 0;
165 } 165 }
166 166
167 if (amdgpu_fence_is_earlier(fence, best[fence->ring->idx])) { 167 if (amdgpu_fence_is_earlier(fence, best[fence->ring->idx])) {
@@ -172,15 +172,19 @@ struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring,
172 172
173 for (i = 0; i < 2; ++i) { 173 for (i = 0; i < 2; ++i) {
174 if (choices[i]) { 174 if (choices[i]) {
175 struct amdgpu_fence *fence;
176
177 fence = adev->vm_manager.active[choices[i]];
175 vm_id->id = choices[i]; 178 vm_id->id = choices[i];
179
176 trace_amdgpu_vm_grab_id(choices[i], ring->idx); 180 trace_amdgpu_vm_grab_id(choices[i], ring->idx);
177 return adev->vm_manager.active[choices[i]]; 181 return amdgpu_sync_fence(ring->adev, sync, &fence->base);
178 } 182 }
179 } 183 }
180 184
181 /* should never happen */ 185 /* should never happen */
182 BUG(); 186 BUG();
183 return NULL; 187 return -EINVAL;
184} 188}
185 189
186/** 190/**