diff options
author | Christian König <christian.koenig@amd.com> | 2018-07-13 09:08:44 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2018-07-16 17:11:53 -0400 |
commit | 3320b8d2acd3d480d0dd4835d970067354eac915 (patch) | |
tree | 1410542092698899f34f8da6b2e60aecb76f0574 /drivers/gpu/drm/amd | |
parent | 0e28b10ff1b8e65788040b51c30c9cc984060dcd (diff) |
drm/amdgpu: remove job->ring
We can easily get that from the scheduler.
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
Acked-by: Chunming Zhou <david1.zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 18 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 23 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_job.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 2 |
7 files changed, 29 insertions, 28 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index e0cc9f878e80..90780b0f0ce5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
@@ -1027,6 +1027,7 @@ struct amdgpu_cs_parser { | |||
1027 | 1027 | ||
1028 | /* scheduler job object */ | 1028 | /* scheduler job object */ |
1029 | struct amdgpu_job *job; | 1029 | struct amdgpu_job *job; |
1030 | struct amdgpu_ring *ring; | ||
1030 | 1031 | ||
1031 | /* buffer objects */ | 1032 | /* buffer objects */ |
1032 | struct ww_acquire_ctx ticket; | 1033 | struct ww_acquire_ctx ticket; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 6eb7ee859ffd..72dc9b36b937 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |||
@@ -912,11 +912,11 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev, | |||
912 | { | 912 | { |
913 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; | 913 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; |
914 | struct amdgpu_vm *vm = &fpriv->vm; | 914 | struct amdgpu_vm *vm = &fpriv->vm; |
915 | struct amdgpu_ring *ring = p->job->ring; | 915 | struct amdgpu_ring *ring = p->ring; |
916 | int r; | 916 | int r; |
917 | 917 | ||
918 | /* Only for UVD/VCE VM emulation */ | 918 | /* Only for UVD/VCE VM emulation */ |
919 | if (p->job->ring->funcs->parse_cs) { | 919 | if (p->ring->funcs->parse_cs) { |
920 | unsigned i, j; | 920 | unsigned i, j; |
921 | 921 | ||
922 | for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) { | 922 | for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) { |
@@ -1030,10 +1030,10 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, | |||
1030 | } | 1030 | } |
1031 | } | 1031 | } |
1032 | 1032 | ||
1033 | if (parser->job->ring && parser->job->ring != ring) | 1033 | if (parser->ring && parser->ring != ring) |
1034 | return -EINVAL; | 1034 | return -EINVAL; |
1035 | 1035 | ||
1036 | parser->job->ring = ring; | 1036 | parser->ring = ring; |
1037 | 1037 | ||
1038 | r = amdgpu_ib_get(adev, vm, | 1038 | r = amdgpu_ib_get(adev, vm, |
1039 | ring->funcs->parse_cs ? chunk_ib->ib_bytes : 0, | 1039 | ring->funcs->parse_cs ? chunk_ib->ib_bytes : 0, |
@@ -1052,11 +1052,11 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, | |||
1052 | 1052 | ||
1053 | /* UVD & VCE fw doesn't support user fences */ | 1053 | /* UVD & VCE fw doesn't support user fences */ |
1054 | if (parser->job->uf_addr && ( | 1054 | if (parser->job->uf_addr && ( |
1055 | parser->job->ring->funcs->type == AMDGPU_RING_TYPE_UVD || | 1055 | parser->ring->funcs->type == AMDGPU_RING_TYPE_UVD || |
1056 | parser->job->ring->funcs->type == AMDGPU_RING_TYPE_VCE)) | 1056 | parser->ring->funcs->type == AMDGPU_RING_TYPE_VCE)) |
1057 | return -EINVAL; | 1057 | return -EINVAL; |
1058 | 1058 | ||
1059 | return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->job->ring->idx); | 1059 | return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->ring->idx); |
1060 | } | 1060 | } |
1061 | 1061 | ||
1062 | static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p, | 1062 | static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p, |
@@ -1207,7 +1207,7 @@ static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p) | |||
1207 | static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, | 1207 | static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, |
1208 | union drm_amdgpu_cs *cs) | 1208 | union drm_amdgpu_cs *cs) |
1209 | { | 1209 | { |
1210 | struct amdgpu_ring *ring = p->job->ring; | 1210 | struct amdgpu_ring *ring = p->ring; |
1211 | struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity; | 1211 | struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity; |
1212 | struct amdgpu_job *job; | 1212 | struct amdgpu_job *job; |
1213 | unsigned i; | 1213 | unsigned i; |
@@ -1256,7 +1256,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, | |||
1256 | job->uf_sequence = seq; | 1256 | job->uf_sequence = seq; |
1257 | 1257 | ||
1258 | amdgpu_job_free_resources(job); | 1258 | amdgpu_job_free_resources(job); |
1259 | amdgpu_ring_priority_get(job->ring, job->base.s_priority); | 1259 | amdgpu_ring_priority_get(p->ring, job->base.s_priority); |
1260 | 1260 | ||
1261 | trace_amdgpu_cs_ioctl(job); | 1261 | trace_amdgpu_cs_ioctl(job); |
1262 | drm_sched_entity_push_job(&job->base, entity); | 1262 | drm_sched_entity_push_job(&job->base, entity); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 2720444ff23a..2b2de5f3e6e3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | |||
@@ -3253,7 +3253,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, | |||
3253 | 3253 | ||
3254 | kthread_park(ring->sched.thread); | 3254 | kthread_park(ring->sched.thread); |
3255 | 3255 | ||
3256 | if (job && job->ring->idx != i) | 3256 | if (job && job->base.sched == &ring->sched) |
3257 | continue; | 3257 | continue; |
3258 | 3258 | ||
3259 | drm_sched_hw_job_reset(&ring->sched, &job->base); | 3259 | drm_sched_hw_job_reset(&ring->sched, &job->base); |
@@ -3277,7 +3277,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, | |||
3277 | * or all rings (in the case @job is NULL) | 3277 | * or all rings (in the case @job is NULL) |
3278 | * after above amdgpu_reset accomplished | 3278 | * after above amdgpu_reset accomplished |
3279 | */ | 3279 | */ |
3280 | if ((!job || job->ring->idx == i) && !r) | 3280 | if ((!job || job->base.sched == &ring->sched) && !r) |
3281 | drm_sched_job_recovery(&ring->sched); | 3281 | drm_sched_job_recovery(&ring->sched); |
3282 | 3282 | ||
3283 | kthread_unpark(ring->sched.thread); | 3283 | kthread_unpark(ring->sched.thread); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 51ff751e093b..2496f2269bcb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | |||
@@ -30,12 +30,12 @@ | |||
30 | 30 | ||
31 | static void amdgpu_job_timedout(struct drm_sched_job *s_job) | 31 | static void amdgpu_job_timedout(struct drm_sched_job *s_job) |
32 | { | 32 | { |
33 | struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base); | 33 | struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched); |
34 | struct amdgpu_job *job = to_amdgpu_job(s_job); | ||
34 | 35 | ||
35 | DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n", | 36 | DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n", |
36 | job->base.sched->name, | 37 | job->base.sched->name, atomic_read(&ring->fence_drv.last_seq), |
37 | atomic_read(&job->ring->fence_drv.last_seq), | 38 | ring->fence_drv.sync_seq); |
38 | job->ring->fence_drv.sync_seq); | ||
39 | 39 | ||
40 | amdgpu_device_gpu_recover(job->adev, job, false); | 40 | amdgpu_device_gpu_recover(job->adev, job, false); |
41 | } | 41 | } |
@@ -98,9 +98,10 @@ void amdgpu_job_free_resources(struct amdgpu_job *job) | |||
98 | 98 | ||
99 | static void amdgpu_job_free_cb(struct drm_sched_job *s_job) | 99 | static void amdgpu_job_free_cb(struct drm_sched_job *s_job) |
100 | { | 100 | { |
101 | struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base); | 101 | struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched); |
102 | struct amdgpu_job *job = to_amdgpu_job(s_job); | ||
102 | 103 | ||
103 | amdgpu_ring_priority_put(job->ring, s_job->s_priority); | 104 | amdgpu_ring_priority_put(ring, s_job->s_priority); |
104 | dma_fence_put(job->fence); | 105 | dma_fence_put(job->fence); |
105 | amdgpu_sync_free(&job->sync); | 106 | amdgpu_sync_free(&job->sync); |
106 | amdgpu_sync_free(&job->sched_sync); | 107 | amdgpu_sync_free(&job->sched_sync); |
@@ -120,6 +121,7 @@ void amdgpu_job_free(struct amdgpu_job *job) | |||
120 | int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, | 121 | int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, |
121 | void *owner, struct dma_fence **f) | 122 | void *owner, struct dma_fence **f) |
122 | { | 123 | { |
124 | struct amdgpu_ring *ring = to_amdgpu_ring(entity->sched); | ||
123 | int r; | 125 | int r; |
124 | 126 | ||
125 | if (!f) | 127 | if (!f) |
@@ -130,10 +132,9 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, | |||
130 | return r; | 132 | return r; |
131 | 133 | ||
132 | job->owner = owner; | 134 | job->owner = owner; |
133 | job->ring = to_amdgpu_ring(entity->sched); | ||
134 | *f = dma_fence_get(&job->base.s_fence->finished); | 135 | *f = dma_fence_get(&job->base.s_fence->finished); |
135 | amdgpu_job_free_resources(job); | 136 | amdgpu_job_free_resources(job); |
136 | amdgpu_ring_priority_get(job->ring, job->base.s_priority); | 137 | amdgpu_ring_priority_get(ring, job->base.s_priority); |
137 | drm_sched_entity_push_job(&job->base, entity); | 138 | drm_sched_entity_push_job(&job->base, entity); |
138 | 139 | ||
139 | return 0; | 140 | return 0; |
@@ -142,6 +143,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, | |||
142 | static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job, | 143 | static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job, |
143 | struct drm_sched_entity *s_entity) | 144 | struct drm_sched_entity *s_entity) |
144 | { | 145 | { |
146 | struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->sched); | ||
145 | struct amdgpu_job *job = to_amdgpu_job(sched_job); | 147 | struct amdgpu_job *job = to_amdgpu_job(sched_job); |
146 | struct amdgpu_vm *vm = job->vm; | 148 | struct amdgpu_vm *vm = job->vm; |
147 | bool explicit = false; | 149 | bool explicit = false; |
@@ -157,8 +159,6 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job, | |||
157 | } | 159 | } |
158 | 160 | ||
159 | while (fence == NULL && vm && !job->vmid) { | 161 | while (fence == NULL && vm && !job->vmid) { |
160 | struct amdgpu_ring *ring = job->ring; | ||
161 | |||
162 | r = amdgpu_vmid_grab(vm, ring, &job->sync, | 162 | r = amdgpu_vmid_grab(vm, ring, &job->sync, |
163 | &job->base.s_fence->finished, | 163 | &job->base.s_fence->finished, |
164 | job); | 164 | job); |
@@ -173,6 +173,7 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job, | |||
173 | 173 | ||
174 | static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job) | 174 | static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job) |
175 | { | 175 | { |
176 | struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched); | ||
176 | struct dma_fence *fence = NULL, *finished; | 177 | struct dma_fence *fence = NULL, *finished; |
177 | struct amdgpu_device *adev; | 178 | struct amdgpu_device *adev; |
178 | struct amdgpu_job *job; | 179 | struct amdgpu_job *job; |
@@ -196,7 +197,7 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job) | |||
196 | if (finished->error < 0) { | 197 | if (finished->error < 0) { |
197 | DRM_INFO("Skip scheduling IBs!\n"); | 198 | DRM_INFO("Skip scheduling IBs!\n"); |
198 | } else { | 199 | } else { |
199 | r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job, | 200 | r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job, |
200 | &fence); | 201 | &fence); |
201 | if (r) | 202 | if (r) |
202 | DRM_ERROR("Error scheduling IBs (%d)\n", r); | 203 | DRM_ERROR("Error scheduling IBs (%d)\n", r); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h index 39f4230e1d37..c663c1925f91 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h | |||
@@ -37,7 +37,6 @@ struct amdgpu_job { | |||
37 | struct drm_sched_job base; | 37 | struct drm_sched_job base; |
38 | struct amdgpu_device *adev; | 38 | struct amdgpu_device *adev; |
39 | struct amdgpu_vm *vm; | 39 | struct amdgpu_vm *vm; |
40 | struct amdgpu_ring *ring; | ||
41 | struct amdgpu_sync sync; | 40 | struct amdgpu_sync sync; |
42 | struct amdgpu_sync sched_sync; | 41 | struct amdgpu_sync sched_sync; |
43 | struct amdgpu_ib *ibs; | 42 | struct amdgpu_ib *ibs; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index e96e26d3f3b0..76920035eb22 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h | |||
@@ -150,10 +150,10 @@ TRACE_EVENT(amdgpu_cs, | |||
150 | 150 | ||
151 | TP_fast_assign( | 151 | TP_fast_assign( |
152 | __entry->bo_list = p->bo_list; | 152 | __entry->bo_list = p->bo_list; |
153 | __entry->ring = p->job->ring->idx; | 153 | __entry->ring = p->ring->idx; |
154 | __entry->dw = p->job->ibs[i].length_dw; | 154 | __entry->dw = p->job->ibs[i].length_dw; |
155 | __entry->fences = amdgpu_fence_count_emitted( | 155 | __entry->fences = amdgpu_fence_count_emitted( |
156 | p->job->ring); | 156 | p->ring); |
157 | ), | 157 | ), |
158 | TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u", | 158 | TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u", |
159 | __entry->bo_list, __entry->ring, __entry->dw, | 159 | __entry->bo_list, __entry->ring, __entry->dw, |
@@ -178,7 +178,7 @@ TRACE_EVENT(amdgpu_cs_ioctl, | |||
178 | __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job)) | 178 | __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job)) |
179 | __entry->context = job->base.s_fence->finished.context; | 179 | __entry->context = job->base.s_fence->finished.context; |
180 | __entry->seqno = job->base.s_fence->finished.seqno; | 180 | __entry->seqno = job->base.s_fence->finished.seqno; |
181 | __entry->ring_name = job->ring->name; | 181 | __entry->ring_name = to_amdgpu_ring(job->base.sched)->name; |
182 | __entry->num_ibs = job->num_ibs; | 182 | __entry->num_ibs = job->num_ibs; |
183 | ), | 183 | ), |
184 | TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u", | 184 | TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u", |
@@ -203,7 +203,7 @@ TRACE_EVENT(amdgpu_sched_run_job, | |||
203 | __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job)) | 203 | __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job)) |
204 | __entry->context = job->base.s_fence->finished.context; | 204 | __entry->context = job->base.s_fence->finished.context; |
205 | __entry->seqno = job->base.s_fence->finished.seqno; | 205 | __entry->seqno = job->base.s_fence->finished.seqno; |
206 | __entry->ring_name = job->ring->name; | 206 | __entry->ring_name = to_amdgpu_ring(job->base.sched)->name; |
207 | __entry->num_ibs = job->num_ibs; | 207 | __entry->num_ibs = job->num_ibs; |
208 | ), | 208 | ), |
209 | TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u", | 209 | TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u", |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 848b2e898818..7738d2b90dae 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | |||
@@ -692,11 +692,11 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, | |||
692 | struct amdgpu_bo *bo, unsigned offset) | 692 | struct amdgpu_bo *bo, unsigned offset) |
693 | { | 693 | { |
694 | struct amdgpu_device *adev = ctx->parser->adev; | 694 | struct amdgpu_device *adev = ctx->parser->adev; |
695 | uint32_t ip_instance = ctx->parser->ring->me; | ||
695 | int32_t *msg, msg_type, handle; | 696 | int32_t *msg, msg_type, handle; |
696 | void *ptr; | 697 | void *ptr; |
697 | long r; | 698 | long r; |
698 | int i; | 699 | int i; |
699 | uint32_t ip_instance = ctx->parser->job->ring->me; | ||
700 | 700 | ||
701 | if (offset & 0x3F) { | 701 | if (offset & 0x3F) { |
702 | DRM_ERROR("UVD(%d) messages must be 64 byte aligned!\n", ip_instance); | 702 | DRM_ERROR("UVD(%d) messages must be 64 byte aligned!\n", ip_instance); |