aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorChunming Zhou <david1.zhou@amd.com>2015-08-03 23:30:09 -0400
committerAlex Deucher <alexander.deucher@amd.com>2015-08-17 16:50:59 -0400
commit7484667c6a8a9122d139a287454bc9c8799c3def (patch)
treee5c9174094b10ab1d4cb4b0af5567958876fe191 /drivers/gpu
parent27f6642d066ecea7b535dd9b24e2f41e54f3dd85 (diff)
drm/amdgpu: move sched job process from isr to fence callback
This way can avoid interrupt lost, and can process sched job exactly. Signed-off-by: Chunming Zhou <david1.zhou@amd.com> Reviewed-by: Jammy Zhou <Jammy.Zhou@amd.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c13
3 files changed, 15 insertions, 19 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index e1f093c1f011..4d6a3e825096 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -404,7 +404,7 @@ struct amdgpu_fence_driver {
404 404
405struct amdgpu_fence { 405struct amdgpu_fence {
406 struct fence base; 406 struct fence base;
407 407 struct fence_cb cb;
408 /* RB, DMA, etc. */ 408 /* RB, DMA, etc. */
409 struct amdgpu_ring *ring; 409 struct amdgpu_ring *ring;
410 uint64_t seq; 410 uint64_t seq;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 60e6d668f6b4..eb419791d1b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -350,25 +350,8 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
350 } 350 }
351 } while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq); 351 } while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq);
352 352
353 if (wake) { 353 if (wake)
354 if (amdgpu_enable_scheduler) {
355 uint64_t handled_seq =
356 amd_sched_get_handled_seq(ring->scheduler);
357 uint64_t latest_seq =
358 atomic64_read(&ring->fence_drv.last_seq);
359 if (handled_seq == latest_seq) {
360 DRM_ERROR("ring %d, EOP without seq update (lastest_seq=%llu)\n",
361 ring->idx, latest_seq);
362 goto exit;
363 }
364 do {
365 amd_sched_isr(ring->scheduler);
366 } while (amd_sched_get_handled_seq(ring->scheduler) < latest_seq);
367 }
368
369 wake_up_all(&ring->fence_drv.fence_queue); 354 wake_up_all(&ring->fence_drv.fence_queue);
370 }
371exit:
372 spin_unlock_irqrestore(&ring->fence_lock, irqflags); 355 spin_unlock_irqrestore(&ring->fence_lock, irqflags);
373} 356}
374 357
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index 83138a6c54b5..9f2f19cc4625 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -43,12 +43,20 @@ static int amdgpu_sched_prepare_job(struct amd_gpu_scheduler *sched,
43 return r; 43 return r;
44} 44}
45 45
46static void amdgpu_fence_sched_cb(struct fence *f, struct fence_cb *cb)
47{
48 struct amdgpu_fence *fence =
49 container_of(cb, struct amdgpu_fence, cb);
50 amd_sched_isr(fence->ring->scheduler);
51}
52
46static void amdgpu_sched_run_job(struct amd_gpu_scheduler *sched, 53static void amdgpu_sched_run_job(struct amd_gpu_scheduler *sched,
47 struct amd_context_entity *c_entity, 54 struct amd_context_entity *c_entity,
48 void *job) 55 void *job)
49{ 56{
50 int r = 0; 57 int r = 0;
51 struct amdgpu_cs_parser *sched_job = (struct amdgpu_cs_parser *)job; 58 struct amdgpu_cs_parser *sched_job = (struct amdgpu_cs_parser *)job;
59 struct amdgpu_fence *fence;
52 60
53 mutex_lock(&sched_job->job_lock); 61 mutex_lock(&sched_job->job_lock);
54 r = amdgpu_ib_schedule(sched_job->adev, 62 r = amdgpu_ib_schedule(sched_job->adev,
@@ -57,6 +65,11 @@ static void amdgpu_sched_run_job(struct amd_gpu_scheduler *sched,
57 sched_job->filp); 65 sched_job->filp);
58 if (r) 66 if (r)
59 goto err; 67 goto err;
68 fence = sched_job->ibs[sched_job->num_ibs - 1].fence;
69 if (fence_add_callback(&fence->base,
70 &fence->cb, amdgpu_fence_sched_cb))
71 goto err;
72
60 if (sched_job->run_job) { 73 if (sched_job->run_job) {
61 r = sched_job->run_job(sched_job); 74 r = sched_job->run_job(sched_job);
62 if (r) 75 if (r)