aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJack Xiao <Jack.Xiao@amd.com>2019-01-23 00:54:26 -0500
committerAlex Deucher <alexander.deucher@amd.com>2019-06-21 19:58:21 -0400
commit80f8fb9178eda5a16b5ff8e2b2e8304f0a06f5f4 (patch)
treeb80d1686bafa7785872327b61ca436041c0fff63
parent6698a3d05fda57f37add68c55a0696bfa7100413 (diff)
drm/amdgpu: mark the partial job as preempted in mcbp unit test
In mcbp unit test, the test should detect the preempted job which may be a partial execution ib and mark it as preempted; so that the gfx block can correctly generate PM4 frame. Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com> Signed-off-by: Jack Xiao <Jack.Xiao@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c44
1 files changed, 32 insertions, 12 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 8339f7a47cb2..c0dfad9b06fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -978,12 +978,40 @@ static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched)
978 spin_unlock(&sched->job_list_lock); 978 spin_unlock(&sched->job_list_lock);
979} 979}
980 980
981static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring)
982{
983 struct amdgpu_job *job;
984 struct drm_sched_job *s_job;
985 uint32_t preempt_seq;
986 struct dma_fence *fence, **ptr;
987 struct amdgpu_fence_driver *drv = &ring->fence_drv;
988 struct drm_gpu_scheduler *sched = &ring->sched;
989
990 if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
991 return;
992
993 preempt_seq = le32_to_cpu(*(drv->cpu_addr + 2));
994 if (preempt_seq <= atomic_read(&drv->last_seq))
995 return;
996
997 preempt_seq &= drv->num_fences_mask;
998 ptr = &drv->fences[preempt_seq];
999 fence = rcu_dereference_protected(*ptr, 1);
1000
1001 spin_lock(&sched->job_list_lock);
1002 list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
1003 job = to_amdgpu_job(s_job);
1004 if (job->fence == fence)
1005 /* mark the job as preempted */
1006 job->preemption_status |= AMDGPU_IB_PREEMPTED;
1007 }
1008 spin_unlock(&sched->job_list_lock);
1009}
1010
981static int amdgpu_debugfs_ib_preempt(void *data, u64 val) 1011static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
982{ 1012{
983 int r, resched, length; 1013 int r, resched, length;
984 struct amdgpu_ring *ring; 1014 struct amdgpu_ring *ring;
985 struct drm_sched_job *s_job;
986 struct amdgpu_job *job;
987 struct dma_fence **fences = NULL; 1015 struct dma_fence **fences = NULL;
988 struct amdgpu_device *adev = (struct amdgpu_device *)data; 1016 struct amdgpu_device *adev = (struct amdgpu_device *)data;
989 1017
@@ -1022,21 +1050,13 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
1022 ring->fence_drv.sync_seq) { 1050 ring->fence_drv.sync_seq) {
1023 DRM_INFO("ring %d was preempted\n", ring->idx); 1051 DRM_INFO("ring %d was preempted\n", ring->idx);
1024 1052
1053 amdgpu_ib_preempt_mark_partial_job(ring);
1054
1025 /* swap out the old fences */ 1055 /* swap out the old fences */
1026 amdgpu_ib_preempt_fences_swap(ring, fences); 1056 amdgpu_ib_preempt_fences_swap(ring, fences);
1027 1057
1028 amdgpu_fence_driver_force_completion(ring); 1058 amdgpu_fence_driver_force_completion(ring);
1029 1059
1030 s_job = list_first_entry_or_null(
1031 &ring->sched.ring_mirror_list,
1032 struct drm_sched_job, node);
1033 if (s_job) {
1034 job = to_amdgpu_job(s_job);
1035 /* mark the job as preempted */
1036 /* job->preemption_status |=
1037 AMDGPU_IB_PREEMPTED; */
1038 }
1039
1040 /* resubmit unfinished jobs */ 1060 /* resubmit unfinished jobs */
1041 amdgpu_ib_preempt_job_recovery(&ring->sched); 1061 amdgpu_ib_preempt_job_recovery(&ring->sched);
1042 1062