diff options
author | Christian König <christian.koenig@amd.com> | 2018-07-13 11:15:54 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2018-07-17 15:18:28 -0400 |
commit | a1917b73d89e88a6ecdd076b9d6618682f1b0d08 (patch) | |
tree | 8fd7bf0e303ae40002fdedc4ce847d73f2fa296e /drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | |
parent | ee913fd9e166384aacc0aa70ffd4e93ca41d54b0 (diff) |
drm/amdgpu: remove job->adev (v2)
We can get that from the ring.
v2: squash in "drm/amdgpu: always initialize job->base.sched" (Alex)
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
Acked-by: Chunming Zhou <david1.zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_job.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 18 |
1 files changed, 11 insertions, 7 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 0e2b18ccdf2e..9b1c54ace583 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | |||
@@ -37,7 +37,7 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job) | |||
37 | job->base.sched->name, atomic_read(&ring->fence_drv.last_seq), | 37 | job->base.sched->name, atomic_read(&ring->fence_drv.last_seq), |
38 | ring->fence_drv.sync_seq); | 38 | ring->fence_drv.sync_seq); |
39 | 39 | ||
40 | amdgpu_device_gpu_recover(job->adev, job, false); | 40 | amdgpu_device_gpu_recover(ring->adev, job, false); |
41 | } | 41 | } |
42 | 42 | ||
43 | int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, | 43 | int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, |
@@ -54,7 +54,11 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, | |||
54 | if (!*job) | 54 | if (!*job) |
55 | return -ENOMEM; | 55 | return -ENOMEM; |
56 | 56 | ||
57 | (*job)->adev = adev; | 57 | /* |
58 | * Initialize the scheduler to at least some ring so that we always | ||
59 | * have a pointer to adev. | ||
60 | */ | ||
61 | (*job)->base.sched = &adev->rings[0]->sched; | ||
58 | (*job)->vm = vm; | 62 | (*job)->vm = vm; |
59 | (*job)->ibs = (void *)&(*job)[1]; | 63 | (*job)->ibs = (void *)&(*job)[1]; |
60 | (*job)->num_ibs = num_ibs; | 64 | (*job)->num_ibs = num_ibs; |
@@ -86,6 +90,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, | |||
86 | 90 | ||
87 | void amdgpu_job_free_resources(struct amdgpu_job *job) | 91 | void amdgpu_job_free_resources(struct amdgpu_job *job) |
88 | { | 92 | { |
93 | struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched); | ||
89 | struct dma_fence *f; | 94 | struct dma_fence *f; |
90 | unsigned i; | 95 | unsigned i; |
91 | 96 | ||
@@ -93,7 +98,7 @@ void amdgpu_job_free_resources(struct amdgpu_job *job) | |||
93 | f = job->base.s_fence ? &job->base.s_fence->finished : job->fence; | 98 | f = job->base.s_fence ? &job->base.s_fence->finished : job->fence; |
94 | 99 | ||
95 | for (i = 0; i < job->num_ibs; ++i) | 100 | for (i = 0; i < job->num_ibs; ++i) |
96 | amdgpu_ib_free(job->adev, &job->ibs[i], f); | 101 | amdgpu_ib_free(ring->adev, &job->ibs[i], f); |
97 | } | 102 | } |
98 | 103 | ||
99 | static void amdgpu_job_free_cb(struct drm_sched_job *s_job) | 104 | static void amdgpu_job_free_cb(struct drm_sched_job *s_job) |
@@ -167,7 +172,8 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job, | |||
167 | 172 | ||
168 | if (fence && explicit) { | 173 | if (fence && explicit) { |
169 | if (drm_sched_dependency_optimized(fence, s_entity)) { | 174 | if (drm_sched_dependency_optimized(fence, s_entity)) { |
170 | r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence, false); | 175 | r = amdgpu_sync_fence(ring->adev, &job->sched_sync, |
176 | fence, false); | ||
171 | if (r) | 177 | if (r) |
172 | DRM_ERROR("Error adding fence to sync (%d)\n", r); | 178 | DRM_ERROR("Error adding fence to sync (%d)\n", r); |
173 | } | 179 | } |
@@ -190,7 +196,6 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job) | |||
190 | { | 196 | { |
191 | struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched); | 197 | struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched); |
192 | struct dma_fence *fence = NULL, *finished; | 198 | struct dma_fence *fence = NULL, *finished; |
193 | struct amdgpu_device *adev; | ||
194 | struct amdgpu_job *job; | 199 | struct amdgpu_job *job; |
195 | int r; | 200 | int r; |
196 | 201 | ||
@@ -200,13 +205,12 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job) | |||
200 | } | 205 | } |
201 | job = to_amdgpu_job(sched_job); | 206 | job = to_amdgpu_job(sched_job); |
202 | finished = &job->base.s_fence->finished; | 207 | finished = &job->base.s_fence->finished; |
203 | adev = job->adev; | ||
204 | 208 | ||
205 | BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL)); | 209 | BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL)); |
206 | 210 | ||
207 | trace_amdgpu_sched_run_job(job); | 211 | trace_amdgpu_sched_run_job(job); |
208 | 212 | ||
209 | if (job->vram_lost_counter != atomic_read(&adev->vram_lost_counter)) | 213 | if (job->vram_lost_counter != atomic_read(&ring->adev->vram_lost_counter)) |
210 | dma_fence_set_error(finished, -ECANCELED);/* skip IB as well if VRAM lost */ | 214 | dma_fence_set_error(finished, -ECANCELED);/* skip IB as well if VRAM lost */ |
211 | 215 | ||
212 | if (finished->error < 0) { | 216 | if (finished->error < 0) { |