diff options
author | Lucas Stach <l.stach@pengutronix.de> | 2017-12-06 11:49:39 -0500 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2017-12-07 11:51:56 -0500 |
commit | 1b1f42d8fde4fef1ed7873bf5aa91755f8c3de35 (patch) | |
tree | 3039b957f8ef645419b5649d28dc7ece3e9ceecd /drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |
parent | 9ce6aae12c66adf87b5861f8fa5705ea11d0b6ee (diff) |
drm: move amd_gpu_scheduler into common location
This moves and renames the AMDGPU scheduler to a common location in DRM
in order to facilitate re-use by other drivers. This is mostly a straight
forward rename with no code changes.
One notable exception is the function to_drm_sched_fence(), which is no
longer a inline header function to avoid the need to export the
drm_sched_fence_ops_scheduled and drm_sched_fence_ops_finished structures.
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Tested-by: Dieter Nützel <Dieter@nuetzel-hh.de>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 3ecdbdfb04dd..dbe37d621796 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
@@ -2643,7 +2643,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, | |||
2643 | AMDGPU_VM_PTE_COUNT(adev) * 8); | 2643 | AMDGPU_VM_PTE_COUNT(adev) * 8); |
2644 | unsigned ring_instance; | 2644 | unsigned ring_instance; |
2645 | struct amdgpu_ring *ring; | 2645 | struct amdgpu_ring *ring; |
2646 | struct amd_sched_rq *rq; | 2646 | struct drm_sched_rq *rq; |
2647 | int r, i; | 2647 | int r, i; |
2648 | u64 flags; | 2648 | u64 flags; |
2649 | uint64_t init_pde_value = 0; | 2649 | uint64_t init_pde_value = 0; |
@@ -2663,8 +2663,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, | |||
2663 | ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring); | 2663 | ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring); |
2664 | ring_instance %= adev->vm_manager.vm_pte_num_rings; | 2664 | ring_instance %= adev->vm_manager.vm_pte_num_rings; |
2665 | ring = adev->vm_manager.vm_pte_rings[ring_instance]; | 2665 | ring = adev->vm_manager.vm_pte_rings[ring_instance]; |
2666 | rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL]; | 2666 | rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL]; |
2667 | r = amd_sched_entity_init(&ring->sched, &vm->entity, | 2667 | r = drm_sched_entity_init(&ring->sched, &vm->entity, |
2668 | rq, amdgpu_sched_jobs, NULL); | 2668 | rq, amdgpu_sched_jobs, NULL); |
2669 | if (r) | 2669 | if (r) |
2670 | return r; | 2670 | return r; |
@@ -2744,7 +2744,7 @@ error_free_root: | |||
2744 | vm->root.base.bo = NULL; | 2744 | vm->root.base.bo = NULL; |
2745 | 2745 | ||
2746 | error_free_sched_entity: | 2746 | error_free_sched_entity: |
2747 | amd_sched_entity_fini(&ring->sched, &vm->entity); | 2747 | drm_sched_entity_fini(&ring->sched, &vm->entity); |
2748 | 2748 | ||
2749 | return r; | 2749 | return r; |
2750 | } | 2750 | } |
@@ -2803,7 +2803,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |||
2803 | spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); | 2803 | spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); |
2804 | } | 2804 | } |
2805 | 2805 | ||
2806 | amd_sched_entity_fini(vm->entity.sched, &vm->entity); | 2806 | drm_sched_entity_fini(vm->entity.sched, &vm->entity); |
2807 | 2807 | ||
2808 | if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { | 2808 | if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { |
2809 | dev_err(adev->dev, "still active bo inside vm\n"); | 2809 | dev_err(adev->dev, "still active bo inside vm\n"); |