diff options
author | Chunming Zhou <David1.Zhou@amd.com> | 2017-04-20 23:13:56 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2017-05-24 17:40:10 -0400 |
commit | 7a63eb23d8817477f19f2cf51fbf7b27b221049c (patch) | |
tree | 13c9bf75ad405dcc96cf107c77a80555c0f81cf1 | |
parent | c350577073de8fe21e6cdd798c4e4746d670bb47 (diff) |
drm/amdgpu: implement grab reserved vmid V4
Implement the vmid reservation.
v2: move sync waiting only when flush needs
v3: fix racy
v4: peek fence instead of get fence, and fix potential context starved.
Signed-off-by: Chunming Zhou <David1.Zhou@amd.com>
Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 79 |
1 files changed, 75 insertions, 4 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 2ea91392290c..11f49b81f653 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
@@ -391,6 +391,72 @@ static bool amdgpu_vm_had_gpu_reset(struct amdgpu_device *adev, | |||
391 | atomic_read(&adev->gpu_reset_counter); | 391 | atomic_read(&adev->gpu_reset_counter); |
392 | } | 392 | } |
393 | 393 | ||
394 | static bool amdgpu_vm_reserved_vmid_ready(struct amdgpu_vm *vm, unsigned vmhub) | ||
395 | { | ||
396 | return !!vm->reserved_vmid[vmhub]; | ||
397 | } | ||
398 | |||
399 | /* idr_mgr->lock must be held */ | ||
400 | static int amdgpu_vm_grab_reserved_vmid_locked(struct amdgpu_vm *vm, | ||
401 | struct amdgpu_ring *ring, | ||
402 | struct amdgpu_sync *sync, | ||
403 | struct dma_fence *fence, | ||
404 | struct amdgpu_job *job) | ||
405 | { | ||
406 | struct amdgpu_device *adev = ring->adev; | ||
407 | unsigned vmhub = ring->funcs->vmhub; | ||
408 | uint64_t fence_context = adev->fence_context + ring->idx; | ||
409 | struct amdgpu_vm_id *id = vm->reserved_vmid[vmhub]; | ||
410 | struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub]; | ||
411 | struct dma_fence *updates = sync->last_vm_update; | ||
412 | int r = 0; | ||
413 | struct dma_fence *flushed, *tmp; | ||
414 | bool needs_flush = false; | ||
415 | |||
416 | flushed = id->flushed_updates; | ||
417 | if ((amdgpu_vm_had_gpu_reset(adev, id)) || | ||
418 | (atomic64_read(&id->owner) != vm->client_id) || | ||
419 | (job->vm_pd_addr != id->pd_gpu_addr) || | ||
420 | (updates && (!flushed || updates->context != flushed->context || | ||
421 | dma_fence_is_later(updates, flushed))) || | ||
422 | (!id->last_flush || (id->last_flush->context != fence_context && | ||
423 | !dma_fence_is_signaled(id->last_flush)))) { | ||
424 | needs_flush = true; | ||
425 | /* to prevent one context starved by another context */ | ||
426 | id->pd_gpu_addr = 0; | ||
427 | tmp = amdgpu_sync_peek_fence(&id->active, ring); | ||
428 | if (tmp) { | ||
429 | r = amdgpu_sync_fence(adev, sync, tmp); | ||
430 | return r; | ||
431 | } | ||
432 | } | ||
433 | |||
434 | /* Good we can use this VMID. Remember this submission as | ||
435 | * user of the VMID. | ||
436 | */ | ||
437 | r = amdgpu_sync_fence(ring->adev, &id->active, fence); | ||
438 | if (r) | ||
439 | goto out; | ||
440 | |||
441 | if (updates && (!flushed || updates->context != flushed->context || | ||
442 | dma_fence_is_later(updates, flushed))) { | ||
443 | dma_fence_put(id->flushed_updates); | ||
444 | id->flushed_updates = dma_fence_get(updates); | ||
445 | } | ||
446 | id->pd_gpu_addr = job->vm_pd_addr; | ||
447 | id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter); | ||
448 | atomic64_set(&id->owner, vm->client_id); | ||
449 | job->vm_needs_flush = needs_flush; | ||
450 | if (needs_flush) { | ||
451 | dma_fence_put(id->last_flush); | ||
452 | id->last_flush = NULL; | ||
453 | } | ||
454 | job->vm_id = id - id_mgr->ids; | ||
455 | trace_amdgpu_vm_grab_id(vm, ring, job); | ||
456 | out: | ||
457 | return r; | ||
458 | } | ||
459 | |||
394 | /** | 460 | /** |
395 | * amdgpu_vm_grab_id - allocate the next free VMID | 461 | * amdgpu_vm_grab_id - allocate the next free VMID |
396 | * | 462 | * |
@@ -415,12 +481,17 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | |||
415 | unsigned i; | 481 | unsigned i; |
416 | int r = 0; | 482 | int r = 0; |
417 | 483 | ||
484 | mutex_lock(&id_mgr->lock); | ||
485 | if (amdgpu_vm_reserved_vmid_ready(vm, vmhub)) { | ||
486 | r = amdgpu_vm_grab_reserved_vmid_locked(vm, ring, sync, fence, job); | ||
487 | mutex_unlock(&id_mgr->lock); | ||
488 | return r; | ||
489 | } | ||
418 | fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL); | 490 | fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL); |
419 | if (!fences) | 491 | if (!fences) { |
492 | mutex_unlock(&id_mgr->lock); | ||
420 | return -ENOMEM; | 493 | return -ENOMEM; |
421 | 494 | } | |
422 | mutex_lock(&id_mgr->lock); | ||
423 | |||
424 | /* Check if we have an idle VMID */ | 495 | /* Check if we have an idle VMID */ |
425 | i = 0; | 496 | i = 0; |
426 | list_for_each_entry(idle, &id_mgr->ids_lru, list) { | 497 | list_for_each_entry(idle, &id_mgr->ids_lru, list) { |