aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChunming Zhou <david1.zhou@amd.com>2015-07-21 03:13:53 -0400
committerAlex Deucher <alexander.deucher@amd.com>2015-08-17 16:50:35 -0400
commitb43a9a7e87d2bbb8d0c6ae4ff06dcc604f00e31a (patch)
treec6ba7b019408e5fb541a26e852cecbd38f73f0a2
parent049fc527b4641f99e573b26f1a726a3eadd0cc25 (diff)
drm/amdgpu: use scheduler user seq instead of previous user seq
Signed-off-by: Chunming Zhou <david1.zhou@amd.com> Acked-by: Christian K?nig <christian.koenig@amd.com> Reviewed-by: Jammy Zhou <Jammy.Zhou@amd.com>
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c26
1 files changed, 20 insertions, 6 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 557fb60f416b..b9be250cb206 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -225,10 +225,16 @@ uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
225 struct fence *fence) 225 struct fence *fence)
226{ 226{
227 struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; 227 struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
228 uint64_t seq = cring->sequence; 228 uint64_t seq = 0;
229 unsigned idx = seq % AMDGPU_CTX_MAX_CS_PENDING; 229 unsigned idx = 0;
230 struct fence *other = cring->fences[idx]; 230 struct fence *other = NULL;
231 231
232 if (amdgpu_enable_scheduler)
233 seq = atomic64_read(&cring->c_entity.last_queued_v_seq);
234 else
235 seq = cring->sequence;
236 idx = seq % AMDGPU_CTX_MAX_CS_PENDING;
237 other = cring->fences[idx];
232 if (other) { 238 if (other) {
233 signed long r; 239 signed long r;
234 r = fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT); 240 r = fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
@@ -240,7 +246,8 @@ uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
240 246
241 spin_lock(&ctx->ring_lock); 247 spin_lock(&ctx->ring_lock);
242 cring->fences[idx] = fence; 248 cring->fences[idx] = fence;
243 cring->sequence++; 249 if (!amdgpu_enable_scheduler)
250 cring->sequence++;
244 spin_unlock(&ctx->ring_lock); 251 spin_unlock(&ctx->ring_lock);
245 252
246 fence_put(other); 253 fence_put(other);
@@ -253,14 +260,21 @@ struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
253{ 260{
254 struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; 261 struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
255 struct fence *fence; 262 struct fence *fence;
263 uint64_t queued_seq;
256 264
257 spin_lock(&ctx->ring_lock); 265 spin_lock(&ctx->ring_lock);
258 if (seq >= cring->sequence) { 266 if (amdgpu_enable_scheduler)
267 queued_seq = atomic64_read(&cring->c_entity.last_queued_v_seq) + 1;
268 else
269 queued_seq = cring->sequence;
270
271 if (seq >= queued_seq) {
259 spin_unlock(&ctx->ring_lock); 272 spin_unlock(&ctx->ring_lock);
260 return ERR_PTR(-EINVAL); 273 return ERR_PTR(-EINVAL);
261 } 274 }
262 275
263 if (seq + AMDGPU_CTX_MAX_CS_PENDING < cring->sequence) { 276
277 if (seq + AMDGPU_CTX_MAX_CS_PENDING < queued_seq) {
264 spin_unlock(&ctx->ring_lock); 278 spin_unlock(&ctx->ring_lock);
265 return NULL; 279 return NULL;
266 } 280 }