aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorChunming Zhou <david1.zhou@amd.com>2015-06-01 02:14:32 -0400
committerAlex Deucher <alexander.deucher@amd.com>2015-08-17 16:50:17 -0400
commitc6a4079badc2f0eda69a028622c7080a284ae147 (patch)
tree868dd6d830d2d51151b305d93075e4e969f3006c /drivers/gpu/drm
parent7f8a5290f5b6c14dd1d295e2508e0dd193a9fda5 (diff)
drm/amdgpu: always enable EOP interrupt v2
v2 (chk): always enable EOP interrupt, independent of scheduler, remove now unused delayed_irq handling. Signed-off-by: Chunming Zhou <david1.zhou@amd.com> Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Jammy Zhou <Jammy.Zhou@amd.com>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c42
2 files changed, 6 insertions, 37 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 9f47b3e013c7..1ec89d2864dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -383,7 +383,6 @@ struct amdgpu_fence_driver {
383 uint64_t sync_seq[AMDGPU_MAX_RINGS]; 383 uint64_t sync_seq[AMDGPU_MAX_RINGS];
384 atomic64_t last_seq; 384 atomic64_t last_seq;
385 bool initialized; 385 bool initialized;
386 bool delayed_irq;
387 struct amdgpu_irq_src *irq_src; 386 struct amdgpu_irq_src *irq_src;
388 unsigned irq_type; 387 unsigned irq_type;
389 struct delayed_work lockup_work; 388 struct delayed_work lockup_work;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index e3629c923862..b89dafec9ecf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -164,8 +164,6 @@ static int amdgpu_fence_check_signaled(wait_queue_t *wait, unsigned mode, int fl
164 else 164 else
165 FENCE_TRACE(&fence->base, "was already signaled\n"); 165 FENCE_TRACE(&fence->base, "was already signaled\n");
166 166
167 amdgpu_irq_put(adev, fence->ring->fence_drv.irq_src,
168 fence->ring->fence_drv.irq_type);
169 __remove_wait_queue(&adev->fence_queue, &fence->fence_wake); 167 __remove_wait_queue(&adev->fence_queue, &fence->fence_wake);
170 fence_put(&fence->base); 168 fence_put(&fence->base);
171 } else 169 } else
@@ -267,12 +265,6 @@ static void amdgpu_fence_check_lockup(struct work_struct *work)
267 return; 265 return;
268 } 266 }
269 267
270 if (fence_drv->delayed_irq && ring->adev->ddev->irq_enabled) {
271 fence_drv->delayed_irq = false;
272 amdgpu_irq_update(ring->adev, fence_drv->irq_src,
273 fence_drv->irq_type);
274 }
275
276 if (amdgpu_fence_activity(ring)) 268 if (amdgpu_fence_activity(ring))
277 wake_up_all(&ring->adev->fence_queue); 269 wake_up_all(&ring->adev->fence_queue);
278 else if (amdgpu_ring_is_lockup(ring)) { 270 else if (amdgpu_ring_is_lockup(ring)) {
@@ -420,29 +412,6 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
420 if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq) 412 if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
421 return false; 413 return false;
422 414
423 if (down_read_trylock(&adev->exclusive_lock)) {
424 amdgpu_irq_get(adev, ring->fence_drv.irq_src,
425 ring->fence_drv.irq_type);
426 if (amdgpu_fence_activity(ring))
427 wake_up_all_locked(&adev->fence_queue);
428
429 /* did fence get signaled after we enabled the sw irq? */
430 if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq) {
431 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
432 ring->fence_drv.irq_type);
433 up_read(&adev->exclusive_lock);
434 return false;
435 }
436
437 up_read(&adev->exclusive_lock);
438 } else {
439 /* we're probably in a lockup, lets not fiddle too much */
440 if (amdgpu_irq_get_delayed(adev, ring->fence_drv.irq_src,
441 ring->fence_drv.irq_type))
442 ring->fence_drv.delayed_irq = true;
443 amdgpu_fence_schedule_check(ring);
444 }
445
446 fence->fence_wake.flags = 0; 415 fence->fence_wake.flags = 0;
447 fence->fence_wake.private = NULL; 416 fence->fence_wake.private = NULL;
448 fence->fence_wake.func = amdgpu_fence_check_signaled; 417 fence->fence_wake.func = amdgpu_fence_check_signaled;
@@ -541,8 +510,6 @@ static long amdgpu_fence_wait_seq_timeout(struct amdgpu_device *adev,
541 510
542 last_seq[i] = atomic64_read(&ring->fence_drv.last_seq); 511 last_seq[i] = atomic64_read(&ring->fence_drv.last_seq);
543 trace_amdgpu_fence_wait_begin(adev->ddev, i, target_seq[i]); 512 trace_amdgpu_fence_wait_begin(adev->ddev, i, target_seq[i]);
544 amdgpu_irq_get(adev, ring->fence_drv.irq_src,
545 ring->fence_drv.irq_type);
546 } 513 }
547 514
548 if (intr) { 515 if (intr) {
@@ -561,8 +528,6 @@ static long amdgpu_fence_wait_seq_timeout(struct amdgpu_device *adev,
561 if (!ring || !target_seq[i]) 528 if (!ring || !target_seq[i])
562 continue; 529 continue;
563 530
564 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
565 ring->fence_drv.irq_type);
566 trace_amdgpu_fence_wait_end(adev->ddev, i, target_seq[i]); 531 trace_amdgpu_fence_wait_end(adev->ddev, i, target_seq[i]);
567 } 532 }
568 533
@@ -901,9 +866,12 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
901 ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index; 866 ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index;
902 } 867 }
903 amdgpu_fence_write(ring, atomic64_read(&ring->fence_drv.last_seq)); 868 amdgpu_fence_write(ring, atomic64_read(&ring->fence_drv.last_seq));
904 ring->fence_drv.initialized = true; 869 amdgpu_irq_get(adev, irq_src, irq_type);
870
905 ring->fence_drv.irq_src = irq_src; 871 ring->fence_drv.irq_src = irq_src;
906 ring->fence_drv.irq_type = irq_type; 872 ring->fence_drv.irq_type = irq_type;
873 ring->fence_drv.initialized = true;
874
907 dev_info(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, " 875 dev_info(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, "
908 "cpu addr 0x%p\n", ring->idx, 876 "cpu addr 0x%p\n", ring->idx,
909 ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr); 877 ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
@@ -980,6 +948,8 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
980 amdgpu_fence_driver_force_completion(adev); 948 amdgpu_fence_driver_force_completion(adev);
981 } 949 }
982 wake_up_all(&adev->fence_queue); 950 wake_up_all(&adev->fence_queue);
951 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
952 ring->fence_drv.irq_type);
983 ring->fence_drv.initialized = false; 953 ring->fence_drv.initialized = false;
984 } 954 }
985 mutex_unlock(&adev->ring_lock); 955 mutex_unlock(&adev->ring_lock);