aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2015-11-05 13:49:48 -0500
committerAlex Deucher <alexander.deucher@amd.com>2015-11-16 11:05:58 -0500
commite284022163716ecf11c37fd1057c35d689ef2c11 (patch)
tree90269611e4360eab59eeb534fcf8725d4835ed42 /drivers/gpu/drm/amd/amdgpu
parent4a562283376197722b295d27633134401bbc80f5 (diff)
drm/amdgpu: fix incorrect mutex usage v3
Before this patch the scheduler fence was created when we push the job into the queue, so we could only get the fence after pushing it. The mutex now was necessary to prevent the thread pushing the jobs to the hardware from running faster than the thread pushing the jobs into the queue. Otherwise the thread pushing jobs into the queue would have accessed possible freed up memory when it tries to get a reference to the fence. So what you get in the end is thread A: mutex_lock(&job->lock); ... Kick of thread B. ... mutex_unlock(&job->lock); And thread B: mutex_lock(&job->lock); .... mutex_unlock(&job->lock); kfree(job); I'm actually not sure if I'm still up to date on this, but this usage pattern used to be not allowed with mutexes. See here as well https://lwn.net/Articles/575460/. v2: remove unrelated changes, fix missing owner v3: rebased, add more commit message Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c27
3 files changed, 35 insertions, 37 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 7b02e3455172..0f187027c753 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1225,7 +1225,7 @@ struct amdgpu_job {
1225 struct amdgpu_device *adev; 1225 struct amdgpu_device *adev;
1226 struct amdgpu_ib *ibs; 1226 struct amdgpu_ib *ibs;
1227 uint32_t num_ibs; 1227 uint32_t num_ibs;
1228 struct mutex job_lock; 1228 void *owner;
1229 struct amdgpu_user_fence uf; 1229 struct amdgpu_user_fence uf;
1230 int (*free_job)(struct amdgpu_job *job); 1230 int (*free_job)(struct amdgpu_job *job);
1231}; 1231};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 2ae73d5232dd..44cf977ae4f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -845,8 +845,9 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
845 goto out; 845 goto out;
846 846
847 if (amdgpu_enable_scheduler && parser.num_ibs) { 847 if (amdgpu_enable_scheduler && parser.num_ibs) {
848 struct amdgpu_job *job;
849 struct amdgpu_ring * ring = parser.ibs->ring; 848 struct amdgpu_ring * ring = parser.ibs->ring;
849 struct amd_sched_fence *fence;
850 struct amdgpu_job *job;
850 851
851 job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); 852 job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
852 if (!job) { 853 if (!job) {
@@ -859,37 +860,41 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
859 job->adev = parser.adev; 860 job->adev = parser.adev;
860 job->ibs = parser.ibs; 861 job->ibs = parser.ibs;
861 job->num_ibs = parser.num_ibs; 862 job->num_ibs = parser.num_ibs;
862 job->base.owner = parser.filp; 863 job->owner = parser.filp;
863 mutex_init(&job->job_lock); 864 job->free_job = amdgpu_cs_free_job;
865
864 if (job->ibs[job->num_ibs - 1].user) { 866 if (job->ibs[job->num_ibs - 1].user) {
865 job->uf = parser.uf; 867 job->uf = parser.uf;
866 job->ibs[job->num_ibs - 1].user = &job->uf; 868 job->ibs[job->num_ibs - 1].user = &job->uf;
867 parser.uf.bo = NULL; 869 parser.uf.bo = NULL;
868 } 870 }
869 871
870 parser.ibs = NULL; 872 fence = amd_sched_fence_create(job->base.s_entity,
871 parser.num_ibs = 0; 873 parser.filp);
872 874 if (!fence) {
873 job->free_job = amdgpu_cs_free_job; 875 r = -ENOMEM;
874 mutex_lock(&job->job_lock);
875 r = amd_sched_entity_push_job(&job->base);
876 if (r) {
877 mutex_unlock(&job->job_lock);
878 amdgpu_cs_free_job(job); 876 amdgpu_cs_free_job(job);
879 kfree(job); 877 kfree(job);
880 goto out; 878 goto out;
881 } 879 }
882 cs->out.handle = 880 job->base.s_fence = fence;
883 amdgpu_ctx_add_fence(parser.ctx, ring, 881 fence_get(&fence->base);
884 &job->base.s_fence->base); 882
883 cs->out.handle = amdgpu_ctx_add_fence(parser.ctx, ring,
884 &fence->base);
885 job->ibs[job->num_ibs - 1].sequence = cs->out.handle; 885 job->ibs[job->num_ibs - 1].sequence = cs->out.handle;
886 886
887 list_sort(NULL, &parser.validated, cmp_size_smaller_first); 887 parser.ibs = NULL;
888 ttm_eu_fence_buffer_objects(&parser.ticket, 888 parser.num_ibs = 0;
889 &parser.validated, 889
890 &job->base.s_fence->base);
891 trace_amdgpu_cs_ioctl(job); 890 trace_amdgpu_cs_ioctl(job);
892 mutex_unlock(&job->job_lock); 891 amd_sched_entity_push_job(&job->base);
892
893 list_sort(NULL, &parser.validated, cmp_size_smaller_first);
894 ttm_eu_fence_buffer_objects(&parser.ticket, &parser.validated,
895 &fence->base);
896 fence_put(&fence->base);
897
893 amdgpu_cs_parser_fini_late(&parser); 898 amdgpu_cs_parser_fini_late(&parser);
894 mutex_unlock(&vm->mutex); 899 mutex_unlock(&vm->mutex);
895 return 0; 900 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index 8ef9e4415fcc..438c05254695 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -45,12 +45,8 @@ static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job)
45 return NULL; 45 return NULL;
46 } 46 }
47 job = to_amdgpu_job(sched_job); 47 job = to_amdgpu_job(sched_job);
48 mutex_lock(&job->job_lock);
49 trace_amdgpu_sched_run_job(job); 48 trace_amdgpu_sched_run_job(job);
50 r = amdgpu_ib_schedule(job->adev, 49 r = amdgpu_ib_schedule(job->adev, job->num_ibs, job->ibs, job->owner);
51 job->num_ibs,
52 job->ibs,
53 job->base.owner);
54 if (r) { 50 if (r) {
55 DRM_ERROR("Error scheduling IBs (%d)\n", r); 51 DRM_ERROR("Error scheduling IBs (%d)\n", r);
56 goto err; 52 goto err;
@@ -63,7 +59,6 @@ err:
63 if (job->free_job) 59 if (job->free_job)
64 job->free_job(job); 60 job->free_job(job);
65 61
66 mutex_unlock(&job->job_lock);
67 kfree(job); 62 kfree(job);
68 return fence ? &fence->base : NULL; 63 return fence ? &fence->base : NULL;
69} 64}
@@ -89,21 +84,19 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
89 return -ENOMEM; 84 return -ENOMEM;
90 job->base.sched = &ring->sched; 85 job->base.sched = &ring->sched;
91 job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity; 86 job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity;
87 job->base.s_fence = amd_sched_fence_create(job->base.s_entity, owner);
88 if (!job->base.s_fence) {
89 kfree(job);
90 return -ENOMEM;
91 }
92 *f = fence_get(&job->base.s_fence->base);
93
92 job->adev = adev; 94 job->adev = adev;
93 job->ibs = ibs; 95 job->ibs = ibs;
94 job->num_ibs = num_ibs; 96 job->num_ibs = num_ibs;
95 job->base.owner = owner; 97 job->owner = owner;
96 mutex_init(&job->job_lock);
97 job->free_job = free_job; 98 job->free_job = free_job;
98 mutex_lock(&job->job_lock); 99 amd_sched_entity_push_job(&job->base);
99 r = amd_sched_entity_push_job(&job->base);
100 if (r) {
101 mutex_unlock(&job->job_lock);
102 kfree(job);
103 return r;
104 }
105 *f = fence_get(&job->base.s_fence->base);
106 mutex_unlock(&job->job_lock);
107 } else { 100 } else {
108 r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner); 101 r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner);
109 if (r) 102 if (r)