diff options
author | Christian König <christian.koenig@amd.com> | 2016-05-23 10:00:32 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2016-07-07 14:51:23 -0400 |
commit | 8d76001e0058aca129f599810d4f60e36fb36c5b (patch) | |
tree | 1378f12f00ff4d7021f901a469cc58deac5a17d9 /drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |
parent | 36fd7c5c3ca6e03a265e8458905f453b552fe9df (diff) |
drm/amdgpu: reuse VMIDs assigned to a VM only if there is also a free one
This fixes a fairness problem with the GPU scheduler. VM having lot of
jobs could previously starve VM with less jobs.
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Christian König <christian.koenig@amd.com>
Acked-by: Chunming Zhou <david1.zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 113 |
1 files changed, 59 insertions, 54 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index cf989cf54072..f38978d8834c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
@@ -179,75 +179,80 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | |||
179 | uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); | 179 | uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); |
180 | struct amdgpu_device *adev = ring->adev; | 180 | struct amdgpu_device *adev = ring->adev; |
181 | struct fence *updates = sync->last_vm_update; | 181 | struct fence *updates = sync->last_vm_update; |
182 | struct amdgpu_vm_id *id; | 182 | struct amdgpu_vm_id *id, *idle; |
183 | unsigned i = ring->idx; | 183 | unsigned i = ring->idx; |
184 | int r; | 184 | int r; |
185 | 185 | ||
186 | mutex_lock(&adev->vm_manager.lock); | 186 | mutex_lock(&adev->vm_manager.lock); |
187 | 187 | ||
188 | /* Check if we can use a VMID already assigned to this VM */ | 188 | /* Check if we have an idle VMID */ |
189 | do { | 189 | list_for_each_entry(idle, &adev->vm_manager.ids_lru, list) { |
190 | struct fence *flushed; | 190 | if (amdgpu_sync_is_idle(&idle->active, ring)) |
191 | 191 | break; | |
192 | id = vm->ids[i++]; | ||
193 | if (i == AMDGPU_MAX_RINGS) | ||
194 | i = 0; | ||
195 | |||
196 | /* Check all the prerequisites to using this VMID */ | ||
197 | if (!id) | ||
198 | continue; | ||
199 | |||
200 | if (atomic64_read(&id->owner) != vm->client_id) | ||
201 | continue; | ||
202 | |||
203 | if (pd_addr != id->pd_gpu_addr) | ||
204 | continue; | ||
205 | 192 | ||
206 | if (id->last_user != ring && | 193 | } |
207 | (!id->last_flush || !fence_is_signaled(id->last_flush))) | ||
208 | continue; | ||
209 | 194 | ||
210 | flushed = id->flushed_updates; | 195 | /* If we can't find a idle VMID to use, just wait for the oldest */ |
211 | if (updates && (!flushed || fence_is_later(updates, flushed))) | 196 | if (&idle->list == &adev->vm_manager.ids_lru) { |
212 | continue; | 197 | id = list_first_entry(&adev->vm_manager.ids_lru, |
198 | struct amdgpu_vm_id, | ||
199 | list); | ||
200 | } else { | ||
201 | /* Check if we can use a VMID already assigned to this VM */ | ||
202 | do { | ||
203 | struct fence *flushed; | ||
204 | |||
205 | id = vm->ids[i++]; | ||
206 | if (i == AMDGPU_MAX_RINGS) | ||
207 | i = 0; | ||
208 | |||
209 | /* Check all the prerequisites to using this VMID */ | ||
210 | if (!id) | ||
211 | continue; | ||
212 | |||
213 | if (atomic64_read(&id->owner) != vm->client_id) | ||
214 | continue; | ||
215 | |||
216 | if (pd_addr != id->pd_gpu_addr) | ||
217 | continue; | ||
218 | |||
219 | if (id->last_user != ring && (!id->last_flush || | ||
220 | !fence_is_signaled(id->last_flush))) | ||
221 | continue; | ||
222 | |||
223 | flushed = id->flushed_updates; | ||
224 | if (updates && (!flushed || | ||
225 | fence_is_later(updates, flushed))) | ||
226 | continue; | ||
227 | |||
228 | /* Good we can use this VMID */ | ||
229 | if (id->last_user == ring) { | ||
230 | r = amdgpu_sync_fence(ring->adev, sync, | ||
231 | id->first); | ||
232 | if (r) | ||
233 | goto error; | ||
234 | } | ||
213 | 235 | ||
214 | /* Good we can use this VMID */ | 236 | /* And remember this submission as user of the VMID */ |
215 | if (id->last_user == ring) { | 237 | r = amdgpu_sync_fence(ring->adev, &id->active, fence); |
216 | r = amdgpu_sync_fence(ring->adev, sync, | ||
217 | id->first); | ||
218 | if (r) | 238 | if (r) |
219 | goto error; | 239 | goto error; |
220 | } | ||
221 | |||
222 | /* And remember this submission as user of the VMID */ | ||
223 | r = amdgpu_sync_fence(ring->adev, &id->active, fence); | ||
224 | if (r) | ||
225 | goto error; | ||
226 | 240 | ||
227 | list_move_tail(&id->list, &adev->vm_manager.ids_lru); | 241 | list_move_tail(&id->list, &adev->vm_manager.ids_lru); |
228 | vm->ids[ring->idx] = id; | 242 | vm->ids[ring->idx] = id; |
229 | 243 | ||
230 | *vm_id = id - adev->vm_manager.ids; | 244 | *vm_id = id - adev->vm_manager.ids; |
231 | *vm_pd_addr = AMDGPU_VM_NO_FLUSH; | 245 | *vm_pd_addr = AMDGPU_VM_NO_FLUSH; |
232 | trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr); | 246 | trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, |
247 | *vm_pd_addr); | ||
233 | 248 | ||
234 | mutex_unlock(&adev->vm_manager.lock); | 249 | mutex_unlock(&adev->vm_manager.lock); |
235 | return 0; | 250 | return 0; |
236 | 251 | ||
237 | } while (i != ring->idx); | 252 | } while (i != ring->idx); |
238 | 253 | ||
239 | /* Check if we have an idle VMID */ | 254 | /* Still no ID to use? Then use the idle one found earlier */ |
240 | list_for_each_entry(id, &adev->vm_manager.ids_lru, list) { | 255 | id = idle; |
241 | if (amdgpu_sync_is_idle(&id->active, ring)) | ||
242 | break; | ||
243 | |||
244 | } | ||
245 | |||
246 | /* If we can't find a idle VMID to use, just wait for the oldest */ | ||
247 | if (&id->list == &adev->vm_manager.ids_lru) { | ||
248 | id = list_first_entry(&adev->vm_manager.ids_lru, | ||
249 | struct amdgpu_vm_id, | ||
250 | list); | ||
251 | } | 256 | } |
252 | 257 | ||
253 | r = amdgpu_sync_cycle_fences(sync, &id->active, fence); | 258 | r = amdgpu_sync_cycle_fences(sync, &id->active, fence); |