aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
diff options
context:
space:
mode:
authorMonk Liu <Monk.Liu@amd.com>2016-03-09 23:14:44 -0500
committerAlex Deucher <alexander.deucher@amd.com>2016-05-02 15:20:07 -0400
commitb6723c8da55af5309cf06e71a5228f3c02846c5a (patch)
tree268b16b48fca9556569ce72c02b9c1a0985b3df3 /drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
parent0de2479c953ae07fd11e7b1bc8d4fc831e6842bb (diff)
drm/amdgpu: use ref to keep job alive
this is to fix fatal page fault error that occured if: job is signaled/released after its timeout work is already put to the global queue (in this case the cancel_delayed_work will return false), which will lead to NX-protection error page fault during job_timeout_func. Signed-off-by: Monk Liu <Monk.Liu@amd.com> Reviewed-by: Chunming Zhou <david1.zhou@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_job.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c15
1 files changed, 12 insertions, 3 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 961cae4a1955..a052ac2b131d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -31,7 +31,7 @@
31static void amdgpu_job_free_handler(struct work_struct *ws) 31static void amdgpu_job_free_handler(struct work_struct *ws)
32{ 32{
33 struct amdgpu_job *job = container_of(ws, struct amdgpu_job, base.work_free_job); 33 struct amdgpu_job *job = container_of(ws, struct amdgpu_job, base.work_free_job);
34 kfree(job); 34 amd_sched_job_put(&job->base);
35} 35}
36 36
37void amdgpu_job_timeout_func(struct work_struct *work) 37void amdgpu_job_timeout_func(struct work_struct *work)
@@ -41,6 +41,8 @@ void amdgpu_job_timeout_func(struct work_struct *work)
41 job->base.sched->name, 41 job->base.sched->name,
42 (uint32_t)atomic_read(&job->ring->fence_drv.last_seq), 42 (uint32_t)atomic_read(&job->ring->fence_drv.last_seq),
43 job->ring->fence_drv.sync_seq); 43 job->ring->fence_drv.sync_seq);
44
45 amd_sched_job_put(&job->base);
44} 46}
45 47
46int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, 48int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
@@ -101,6 +103,12 @@ void amdgpu_job_free(struct amdgpu_job *job)
101 kfree(job); 103 kfree(job);
102} 104}
103 105
106void amdgpu_job_free_func(struct kref *refcount)
107{
108 struct amdgpu_job *job = container_of(refcount, struct amdgpu_job, base.refcount);
109 kfree(job);
110}
111
104int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, 112int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
105 struct amd_sched_entity *entity, void *owner, 113 struct amd_sched_entity *entity, void *owner,
106 struct fence **f) 114 struct fence **f)
@@ -113,9 +121,10 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
113 return -EINVAL; 121 return -EINVAL;
114 122
115 r = amd_sched_job_init(&job->base, &ring->sched, 123 r = amd_sched_job_init(&job->base, &ring->sched,
116 entity, owner, 124 entity,
117 amdgpu_job_timeout_func, 125 amdgpu_job_timeout_func,
118 &fence); 126 amdgpu_job_free_func,
127 owner, &fence);
119 if (r) 128 if (r)
120 return r; 129 return r;
121 130