aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
diff options
context:
space:
mode:
authorLucas Stach <l.stach@pengutronix.de>2017-12-06 11:49:39 -0500
committerAlex Deucher <alexander.deucher@amd.com>2017-12-07 11:51:56 -0500
commit1b1f42d8fde4fef1ed7873bf5aa91755f8c3de35 (patch)
tree3039b957f8ef645419b5649d28dc7ece3e9ceecd /drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
parent9ce6aae12c66adf87b5861f8fa5705ea11d0b6ee (diff)
drm: move amd_gpu_scheduler into common location
This moves and renames the AMDGPU scheduler to a common location in DRM in order to facilitate re-use by other drivers. This is mostly a straight forward rename with no code changes. One notable exception is the function to_drm_sched_fence(), which is no longer a inline header function to avoid the need to export the drm_sched_fence_ops_scheduled and drm_sched_fence_ops_finished structures. Reviewed-by: Chunming Zhou <david1.zhou@amd.com> Tested-by: Dieter Nützel <Dieter@nuetzel-hh.de> Acked-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Lucas Stach <l.stach@pengutronix.de> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_job.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index bdc210ac74f8..013c0a8cfb60 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -28,7 +28,7 @@
28#include "amdgpu.h" 28#include "amdgpu.h"
29#include "amdgpu_trace.h" 29#include "amdgpu_trace.h"
30 30
31static void amdgpu_job_timedout(struct amd_sched_job *s_job) 31static void amdgpu_job_timedout(struct drm_sched_job *s_job)
32{ 32{
33 struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base); 33 struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
34 34
@@ -96,7 +96,7 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
96 amdgpu_ib_free(job->adev, &job->ibs[i], f); 96 amdgpu_ib_free(job->adev, &job->ibs[i], f);
97} 97}
98 98
99static void amdgpu_job_free_cb(struct amd_sched_job *s_job) 99static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
100{ 100{
101 struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base); 101 struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
102 102
@@ -118,7 +118,7 @@ void amdgpu_job_free(struct amdgpu_job *job)
118} 118}
119 119
120int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, 120int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
121 struct amd_sched_entity *entity, void *owner, 121 struct drm_sched_entity *entity, void *owner,
122 struct dma_fence **f) 122 struct dma_fence **f)
123{ 123{
124 int r; 124 int r;
@@ -127,7 +127,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
127 if (!f) 127 if (!f)
128 return -EINVAL; 128 return -EINVAL;
129 129
130 r = amd_sched_job_init(&job->base, &ring->sched, entity, owner); 130 r = drm_sched_job_init(&job->base, &ring->sched, entity, owner);
131 if (r) 131 if (r)
132 return r; 132 return r;
133 133
@@ -136,13 +136,13 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
136 *f = dma_fence_get(&job->base.s_fence->finished); 136 *f = dma_fence_get(&job->base.s_fence->finished);
137 amdgpu_job_free_resources(job); 137 amdgpu_job_free_resources(job);
138 amdgpu_ring_priority_get(job->ring, job->base.s_priority); 138 amdgpu_ring_priority_get(job->ring, job->base.s_priority);
139 amd_sched_entity_push_job(&job->base, entity); 139 drm_sched_entity_push_job(&job->base, entity);
140 140
141 return 0; 141 return 0;
142} 142}
143 143
144static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job, 144static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
145 struct amd_sched_entity *s_entity) 145 struct drm_sched_entity *s_entity)
146{ 146{
147 struct amdgpu_job *job = to_amdgpu_job(sched_job); 147 struct amdgpu_job *job = to_amdgpu_job(sched_job);
148 struct amdgpu_vm *vm = job->vm; 148 struct amdgpu_vm *vm = job->vm;
@@ -151,7 +151,7 @@ static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job,
151 struct dma_fence *fence = amdgpu_sync_get_fence(&job->sync, &explicit); 151 struct dma_fence *fence = amdgpu_sync_get_fence(&job->sync, &explicit);
152 152
153 if (fence && explicit) { 153 if (fence && explicit) {
154 if (amd_sched_dependency_optimized(fence, s_entity)) { 154 if (drm_sched_dependency_optimized(fence, s_entity)) {
155 r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence, false); 155 r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence, false);
156 if (r) 156 if (r)
157 DRM_ERROR("Error adding fence to sync (%d)\n", r); 157 DRM_ERROR("Error adding fence to sync (%d)\n", r);
@@ -173,7 +173,7 @@ static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job,
173 return fence; 173 return fence;
174} 174}
175 175
176static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job) 176static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
177{ 177{
178 struct dma_fence *fence = NULL, *finished; 178 struct dma_fence *fence = NULL, *finished;
179 struct amdgpu_device *adev; 179 struct amdgpu_device *adev;
@@ -211,7 +211,7 @@ static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
211 return fence; 211 return fence;
212} 212}
213 213
214const struct amd_sched_backend_ops amdgpu_sched_ops = { 214const struct drm_sched_backend_ops amdgpu_sched_ops = {
215 .dependency = amdgpu_job_dependency, 215 .dependency = amdgpu_job_dependency,
216 .run_job = amdgpu_job_run, 216 .run_job = amdgpu_job_run,
217 .timedout_job = amdgpu_job_timedout, 217 .timedout_job = amdgpu_job_timedout,