aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLucas Stach <l.stach@pengutronix.de>2018-05-25 10:51:25 -0400
committerLucas Stach <l.stach@pengutronix.de>2018-08-06 09:24:05 -0400
commita0780bb1df60f00e4573db7bd53e7039e9eee1cb (patch)
treea8fbd8476214af486551c4b6b22d2c65b5a79872
parent6ae9c84ff249f2756086e71405375fd06124cf1f (diff)
drm/etnaviv: protect sched job submission with fence mutex
The documentation of drm_sched_job_init and drm_sched_entity_push_job has been clarified. Both functions should be called under a shared lock, to avoid jobs getting pushed into the scheduler queue in a different order than their sched_fence seqnos, which will confuse checks that are looking at the seqnos to infer information about completion order. Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c24
4 files changed, 21 insertions, 11 deletions
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 46ecd3e66ac9..983e67f19e45 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -388,9 +388,9 @@ static void submit_cleanup(struct kref *kref)
388 dma_fence_put(submit->in_fence); 388 dma_fence_put(submit->in_fence);
389 if (submit->out_fence) { 389 if (submit->out_fence) {
390 /* first remove from IDR, so fence can not be found anymore */ 390 /* first remove from IDR, so fence can not be found anymore */
391 mutex_lock(&submit->gpu->fence_idr_lock); 391 mutex_lock(&submit->gpu->fence_lock);
392 idr_remove(&submit->gpu->fence_idr, submit->out_fence_id); 392 idr_remove(&submit->gpu->fence_idr, submit->out_fence_id);
393 mutex_unlock(&submit->gpu->fence_idr_lock); 393 mutex_unlock(&submit->gpu->fence_lock);
394 dma_fence_put(submit->out_fence); 394 dma_fence_put(submit->out_fence);
395 } 395 }
396 kfree(submit->pmrs); 396 kfree(submit->pmrs);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 686f6552db48..18c2224ba0b8 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -1733,7 +1733,7 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
1733 1733
1734 gpu->dev = &pdev->dev; 1734 gpu->dev = &pdev->dev;
1735 mutex_init(&gpu->lock); 1735 mutex_init(&gpu->lock);
1736 mutex_init(&gpu->fence_idr_lock); 1736 mutex_init(&gpu->fence_lock);
1737 1737
1738 /* Map registers: */ 1738 /* Map registers: */
1739 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1739 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 90f17ff7888e..9a75a6937268 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -118,7 +118,7 @@ struct etnaviv_gpu {
118 u32 idle_mask; 118 u32 idle_mask;
119 119
120 /* Fencing support */ 120 /* Fencing support */
121 struct mutex fence_idr_lock; 121 struct mutex fence_lock;
122 struct idr fence_idr; 122 struct idr fence_idr;
123 u32 next_fence; 123 u32 next_fence;
124 u32 active_fence; 124 u32 active_fence;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index 50d6b88cb7aa..b267d9c4d91c 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -140,28 +140,38 @@ static const struct drm_sched_backend_ops etnaviv_sched_ops = {
140int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity, 140int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
141 struct etnaviv_gem_submit *submit) 141 struct etnaviv_gem_submit *submit)
142{ 142{
143 int ret; 143 int ret = 0;
144
145 /*
146 * Hold the fence lock across the whole operation to avoid jobs being
147 * pushed out of order with regard to their sched fence seqnos as
148 * allocated in drm_sched_job_init.
149 */
150 mutex_lock(&submit->gpu->fence_lock);
144 151
145 ret = drm_sched_job_init(&submit->sched_job, &submit->gpu->sched, 152 ret = drm_sched_job_init(&submit->sched_job, &submit->gpu->sched,
146 sched_entity, submit->cmdbuf.ctx); 153 sched_entity, submit->cmdbuf.ctx);
147 if (ret) 154 if (ret)
148 return ret; 155 goto out_unlock;
149 156
150 submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished); 157 submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
151 mutex_lock(&submit->gpu->fence_idr_lock);
152 submit->out_fence_id = idr_alloc_cyclic(&submit->gpu->fence_idr, 158 submit->out_fence_id = idr_alloc_cyclic(&submit->gpu->fence_idr,
153 submit->out_fence, 0, 159 submit->out_fence, 0,
154 INT_MAX, GFP_KERNEL); 160 INT_MAX, GFP_KERNEL);
155 mutex_unlock(&submit->gpu->fence_idr_lock); 161 if (submit->out_fence_id < 0) {
156 if (submit->out_fence_id < 0) 162 ret = -ENOMEM;
157 return -ENOMEM; 163 goto out_unlock;
164 }
158 165
159 /* the scheduler holds on to the job now */ 166 /* the scheduler holds on to the job now */
160 kref_get(&submit->refcount); 167 kref_get(&submit->refcount);
161 168
162 drm_sched_entity_push_job(&submit->sched_job, sched_entity); 169 drm_sched_entity_push_job(&submit->sched_job, sched_entity);
163 170
164 return 0; 171out_unlock:
172 mutex_unlock(&submit->gpu->fence_lock);
173
174 return ret;
165} 175}
166 176
167int etnaviv_sched_init(struct etnaviv_gpu *gpu) 177int etnaviv_sched_init(struct etnaviv_gpu *gpu)