diff options
author | Christian König <christian.koenig@amd.com> | 2016-02-10 08:20:50 -0500 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2016-02-12 15:39:07 -0500 |
commit | 703297c1fe5ce9e54428d36166e85194d54ce147 (patch) | |
tree | c0c6dcbf58bc7db1f54a7172562fe4aad5359334 /drivers | |
parent | 2d55e45a038b40c02a426fbcb2a9c6961654c6a0 (diff) |
drm/amdgpu: use separate scheduler entitiy for buffer moves
This allows us to remove the global kernel context.
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 19 |
2 files changed, 20 insertions, 1 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 55fcc929b476..bc396a8f2d55 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
@@ -456,6 +456,8 @@ struct amdgpu_mman { | |||
456 | /* buffer handling */ | 456 | /* buffer handling */ |
457 | const struct amdgpu_buffer_funcs *buffer_funcs; | 457 | const struct amdgpu_buffer_funcs *buffer_funcs; |
458 | struct amdgpu_ring *buffer_funcs_ring; | 458 | struct amdgpu_ring *buffer_funcs_ring; |
459 | /* Scheduler entity for buffer moves */ | ||
460 | struct amd_sched_entity entity; | ||
459 | }; | 461 | }; |
460 | 462 | ||
461 | int amdgpu_copy_buffer(struct amdgpu_ring *ring, | 463 | int amdgpu_copy_buffer(struct amdgpu_ring *ring, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 3deb7d3b218a..e52fc641edfb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | |||
@@ -77,6 +77,8 @@ static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref) | |||
77 | static int amdgpu_ttm_global_init(struct amdgpu_device *adev) | 77 | static int amdgpu_ttm_global_init(struct amdgpu_device *adev) |
78 | { | 78 | { |
79 | struct drm_global_reference *global_ref; | 79 | struct drm_global_reference *global_ref; |
80 | struct amdgpu_ring *ring; | ||
81 | struct amd_sched_rq *rq; | ||
80 | int r; | 82 | int r; |
81 | 83 | ||
82 | adev->mman.mem_global_referenced = false; | 84 | adev->mman.mem_global_referenced = false; |
@@ -106,13 +108,27 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev) | |||
106 | return r; | 108 | return r; |
107 | } | 109 | } |
108 | 110 | ||
111 | ring = adev->mman.buffer_funcs_ring; | ||
112 | rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL]; | ||
113 | r = amd_sched_entity_init(&ring->sched, &adev->mman.entity, | ||
114 | rq, amdgpu_sched_jobs); | ||
115 | if (r != 0) { | ||
116 | DRM_ERROR("Failed setting up TTM BO move run queue.\n"); | ||
117 | drm_global_item_unref(&adev->mman.mem_global_ref); | ||
118 | drm_global_item_unref(&adev->mman.bo_global_ref.ref); | ||
119 | return r; | ||
120 | } | ||
121 | |||
109 | adev->mman.mem_global_referenced = true; | 122 | adev->mman.mem_global_referenced = true; |
123 | |||
110 | return 0; | 124 | return 0; |
111 | } | 125 | } |
112 | 126 | ||
113 | static void amdgpu_ttm_global_fini(struct amdgpu_device *adev) | 127 | static void amdgpu_ttm_global_fini(struct amdgpu_device *adev) |
114 | { | 128 | { |
115 | if (adev->mman.mem_global_referenced) { | 129 | if (adev->mman.mem_global_referenced) { |
130 | amd_sched_entity_fini(adev->mman.entity.sched, | ||
131 | &adev->mman.entity); | ||
116 | drm_global_item_unref(&adev->mman.bo_global_ref.ref); | 132 | drm_global_item_unref(&adev->mman.bo_global_ref.ref); |
117 | drm_global_item_unref(&adev->mman.mem_global_ref); | 133 | drm_global_item_unref(&adev->mman.mem_global_ref); |
118 | adev->mman.mem_global_referenced = false; | 134 | adev->mman.mem_global_referenced = false; |
@@ -1053,7 +1069,8 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, | |||
1053 | 1069 | ||
1054 | amdgpu_ring_pad_ib(ring, &job->ibs[0]); | 1070 | amdgpu_ring_pad_ib(ring, &job->ibs[0]); |
1055 | WARN_ON(job->ibs[0].length_dw > num_dw); | 1071 | WARN_ON(job->ibs[0].length_dw > num_dw); |
1056 | r = amdgpu_job_submit(job, ring, NULL, AMDGPU_FENCE_OWNER_UNDEFINED, fence); | 1072 | r = amdgpu_job_submit(job, ring, &adev->mman.entity, |
1073 | AMDGPU_FENCE_OWNER_UNDEFINED, fence); | ||
1057 | if (r) | 1074 | if (r) |
1058 | goto error_free; | 1075 | goto error_free; |
1059 | 1076 | ||