diff options
author | Chunming Zhou <david1.zhou@amd.com> | 2015-07-21 03:53:04 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2015-08-17 16:50:35 -0400 |
commit | 4b559c90bc1870313f02cceef680884519af6b2b (patch) | |
tree | 7445ec7982f1fac5e517097bedd72cc752c41889 | |
parent | b43a9a7e87d2bbb8d0c6ae4ff06dcc604f00e31a (diff) |
drm/amdgpu: make sure the fence is emitted before ring to get it.
Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
Acked-by: Christian K?nig <christian.koenig@amd.com>
Reviewed-by: Jammy Zhou <Jammy.Zhou@amd.com>
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 26 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c | 5 |
4 files changed, 25 insertions, 18 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index becb26317467..127867c2fc37 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
@@ -81,6 +81,7 @@ extern int amdgpu_vm_size; | |||
81 | extern int amdgpu_vm_block_size; | 81 | extern int amdgpu_vm_block_size; |
82 | extern int amdgpu_enable_scheduler; | 82 | extern int amdgpu_enable_scheduler; |
83 | 83 | ||
84 | #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 | ||
84 | #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ | 85 | #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ |
85 | #define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2) | 86 | #define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2) |
86 | /* AMDGPU_IB_POOL_SIZE must be a power of 2 */ | 87 | /* AMDGPU_IB_POOL_SIZE must be a power of 2 */ |
@@ -1239,6 +1240,7 @@ struct amdgpu_cs_parser { | |||
1239 | /* user fence */ | 1240 | /* user fence */ |
1240 | struct amdgpu_user_fence uf; | 1241 | struct amdgpu_user_fence uf; |
1241 | 1242 | ||
1243 | struct amdgpu_ring *ring; | ||
1242 | struct mutex job_lock; | 1244 | struct mutex job_lock; |
1243 | struct work_struct job_work; | 1245 | struct work_struct job_work; |
1244 | int (*prepare_job)(struct amdgpu_cs_parser *sched_job); | 1246 | int (*prepare_job)(struct amdgpu_cs_parser *sched_job); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index f9d4fe985668..5f2403898b06 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |||
@@ -915,7 +915,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
915 | goto out; | 915 | goto out; |
916 | } else | 916 | } else |
917 | parser->prepare_job = amdgpu_cs_parser_prepare_job; | 917 | parser->prepare_job = amdgpu_cs_parser_prepare_job; |
918 | 918 | parser->ring = ring; | |
919 | parser->run_job = amdgpu_cs_parser_run_job; | 919 | parser->run_job = amdgpu_cs_parser_run_job; |
920 | parser->free_job = amdgpu_cs_parser_free_job; | 920 | parser->free_job = amdgpu_cs_parser_free_job; |
921 | amd_sched_push_job(ring->scheduler, | 921 | amd_sched_push_job(ring->scheduler, |
@@ -965,24 +965,16 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, | |||
965 | ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id); | 965 | ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id); |
966 | if (ctx == NULL) | 966 | if (ctx == NULL) |
967 | return -EINVAL; | 967 | return -EINVAL; |
968 | if (amdgpu_enable_scheduler) { | ||
969 | r = amd_sched_wait_ts(&ctx->rings[ring->idx].c_entity, | ||
970 | wait->in.handle, true, timeout); | ||
971 | if (r) | ||
972 | return r; | ||
973 | r = 1; | ||
974 | } else { | ||
975 | fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle); | ||
976 | if (IS_ERR(fence)) | ||
977 | r = PTR_ERR(fence); | ||
978 | 968 | ||
979 | else if (fence) { | 969 | fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle); |
980 | r = fence_wait_timeout(fence, true, timeout); | 970 | if (IS_ERR(fence)) |
981 | fence_put(fence); | 971 | r = PTR_ERR(fence); |
972 | else if (fence) { | ||
973 | r = fence_wait_timeout(fence, true, timeout); | ||
974 | fence_put(fence); | ||
975 | } else | ||
976 | r = 1; | ||
982 | 977 | ||
983 | } else | ||
984 | r = 1; | ||
985 | } | ||
986 | amdgpu_ctx_put(ctx); | 978 | amdgpu_ctx_put(ctx); |
987 | if (r < 0) | 979 | if (r < 0) |
988 | return r; | 980 | return r; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index b9be250cb206..41bc7fc0ebf6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | |||
@@ -261,6 +261,16 @@ struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, | |||
261 | struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; | 261 | struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; |
262 | struct fence *fence; | 262 | struct fence *fence; |
263 | uint64_t queued_seq; | 263 | uint64_t queued_seq; |
264 | int r; | ||
265 | |||
266 | if (amdgpu_enable_scheduler) { | ||
267 | r = amd_sched_wait_emit(&cring->c_entity, | ||
268 | seq, | ||
269 | true, | ||
270 | AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS); | ||
271 | if (r) | ||
272 | return NULL; | ||
273 | } | ||
264 | 274 | ||
265 | spin_lock(&ctx->ring_lock); | 275 | spin_lock(&ctx->ring_lock); |
266 | if (amdgpu_enable_scheduler) | 276 | if (amdgpu_enable_scheduler) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c index 1f7bf31da7fc..46ec915c9344 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c | |||
@@ -56,12 +56,15 @@ static void amdgpu_sched_run_job(struct amd_gpu_scheduler *sched, | |||
56 | sched_job->filp); | 56 | sched_job->filp); |
57 | if (r) | 57 | if (r) |
58 | goto err; | 58 | goto err; |
59 | |||
60 | if (sched_job->run_job) { | 59 | if (sched_job->run_job) { |
61 | r = sched_job->run_job(sched_job); | 60 | r = sched_job->run_job(sched_job); |
62 | if (r) | 61 | if (r) |
63 | goto err; | 62 | goto err; |
64 | } | 63 | } |
64 | atomic64_set(&c_entity->last_emitted_v_seq, | ||
65 | sched_job->uf.sequence); | ||
66 | wake_up_all(&c_entity->wait_emit); | ||
67 | |||
65 | mutex_unlock(&sched_job->job_lock); | 68 | mutex_unlock(&sched_job->job_lock); |
66 | return; | 69 | return; |
67 | err: | 70 | err: |