aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2018-07-13 09:08:44 -0400
committerAlex Deucher <alexander.deucher@amd.com>2018-07-16 17:11:53 -0400
commit3320b8d2acd3d480d0dd4835d970067354eac915 (patch)
tree1410542092698899f34f8da6b2e60aecb76f0574 /drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
parent0e28b10ff1b8e65788040b51c30c9cc984060dcd (diff)
drm/amdgpu: remove job->ring
We can easily get that from the scheduler. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com> Acked-by: Chunming Zhou <david1.zhou@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_job.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c23
1 files changed, 12 insertions, 11 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 51ff751e093b..2496f2269bcb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -30,12 +30,12 @@
30 30
31static void amdgpu_job_timedout(struct drm_sched_job *s_job) 31static void amdgpu_job_timedout(struct drm_sched_job *s_job)
32{ 32{
33 struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base); 33 struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
34 struct amdgpu_job *job = to_amdgpu_job(s_job);
34 35
35 DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n", 36 DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n",
36 job->base.sched->name, 37 job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
37 atomic_read(&job->ring->fence_drv.last_seq), 38 ring->fence_drv.sync_seq);
38 job->ring->fence_drv.sync_seq);
39 39
40 amdgpu_device_gpu_recover(job->adev, job, false); 40 amdgpu_device_gpu_recover(job->adev, job, false);
41} 41}
@@ -98,9 +98,10 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
98 98
99static void amdgpu_job_free_cb(struct drm_sched_job *s_job) 99static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
100{ 100{
101 struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base); 101 struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
102 struct amdgpu_job *job = to_amdgpu_job(s_job);
102 103
103 amdgpu_ring_priority_put(job->ring, s_job->s_priority); 104 amdgpu_ring_priority_put(ring, s_job->s_priority);
104 dma_fence_put(job->fence); 105 dma_fence_put(job->fence);
105 amdgpu_sync_free(&job->sync); 106 amdgpu_sync_free(&job->sync);
106 amdgpu_sync_free(&job->sched_sync); 107 amdgpu_sync_free(&job->sched_sync);
@@ -120,6 +121,7 @@ void amdgpu_job_free(struct amdgpu_job *job)
120int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, 121int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
121 void *owner, struct dma_fence **f) 122 void *owner, struct dma_fence **f)
122{ 123{
124 struct amdgpu_ring *ring = to_amdgpu_ring(entity->sched);
123 int r; 125 int r;
124 126
125 if (!f) 127 if (!f)
@@ -130,10 +132,9 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
130 return r; 132 return r;
131 133
132 job->owner = owner; 134 job->owner = owner;
133 job->ring = to_amdgpu_ring(entity->sched);
134 *f = dma_fence_get(&job->base.s_fence->finished); 135 *f = dma_fence_get(&job->base.s_fence->finished);
135 amdgpu_job_free_resources(job); 136 amdgpu_job_free_resources(job);
136 amdgpu_ring_priority_get(job->ring, job->base.s_priority); 137 amdgpu_ring_priority_get(ring, job->base.s_priority);
137 drm_sched_entity_push_job(&job->base, entity); 138 drm_sched_entity_push_job(&job->base, entity);
138 139
139 return 0; 140 return 0;
@@ -142,6 +143,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
142static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job, 143static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
143 struct drm_sched_entity *s_entity) 144 struct drm_sched_entity *s_entity)
144{ 145{
146 struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->sched);
145 struct amdgpu_job *job = to_amdgpu_job(sched_job); 147 struct amdgpu_job *job = to_amdgpu_job(sched_job);
146 struct amdgpu_vm *vm = job->vm; 148 struct amdgpu_vm *vm = job->vm;
147 bool explicit = false; 149 bool explicit = false;
@@ -157,8 +159,6 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
157 } 159 }
158 160
159 while (fence == NULL && vm && !job->vmid) { 161 while (fence == NULL && vm && !job->vmid) {
160 struct amdgpu_ring *ring = job->ring;
161
162 r = amdgpu_vmid_grab(vm, ring, &job->sync, 162 r = amdgpu_vmid_grab(vm, ring, &job->sync,
163 &job->base.s_fence->finished, 163 &job->base.s_fence->finished,
164 job); 164 job);
@@ -173,6 +173,7 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
173 173
174static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job) 174static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
175{ 175{
176 struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
176 struct dma_fence *fence = NULL, *finished; 177 struct dma_fence *fence = NULL, *finished;
177 struct amdgpu_device *adev; 178 struct amdgpu_device *adev;
178 struct amdgpu_job *job; 179 struct amdgpu_job *job;
@@ -196,7 +197,7 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
196 if (finished->error < 0) { 197 if (finished->error < 0) {
197 DRM_INFO("Skip scheduling IBs!\n"); 198 DRM_INFO("Skip scheduling IBs!\n");
198 } else { 199 } else {
199 r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job, 200 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
200 &fence); 201 &fence);
201 if (r) 202 if (r)
202 DRM_ERROR("Error scheduling IBs (%d)\n", r); 203 DRM_ERROR("Error scheduling IBs (%d)\n", r);