aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2015-11-03 07:27:39 -0500
committerAlex Deucher <alexander.deucher@amd.com>2015-11-16 11:05:47 -0500
commitc2776afe740db5598c4c457dcacb94d4427b13f9 (patch)
treefa24ef61e23de9c5ca6a8e27ea6c35db3ee9a2ce /drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
parent935c186aaecc25258495351adaba34f7c507d298 (diff)
drm/amdgpu: use a timer for fence fallback
Less overhead than a work item and also adds proper cleanup handling. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Chunming Zhou <david1.zhou@amd.com> Acked-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c78
1 files changed, 34 insertions, 44 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index b92c1937543a..257fce356319 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -85,24 +85,6 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
85} 85}
86 86
87/** 87/**
88 * amdgpu_fence_schedule_check - schedule lockup check
89 *
90 * @ring: pointer to struct amdgpu_ring
91 *
92 * Queues a delayed work item to check for lockups.
93 */
94static void amdgpu_fence_schedule_check(struct amdgpu_ring *ring)
95{
96 /*
97 * Do not reset the timer here with mod_delayed_work,
98 * this can livelock in an interaction with TTM delayed destroy.
99 */
100 queue_delayed_work(system_power_efficient_wq,
101 &ring->fence_drv.lockup_work,
102 AMDGPU_FENCE_JIFFIES_TIMEOUT);
103}
104
105/**
106 * amdgpu_fence_emit - emit a fence on the requested ring 88 * amdgpu_fence_emit - emit a fence on the requested ring
107 * 89 *
108 * @ring: ring the fence is associated with 90 * @ring: ring the fence is associated with
@@ -136,6 +118,19 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
136} 118}
137 119
138/** 120/**
121 * amdgpu_fence_schedule_fallback - schedule fallback check
122 *
123 * @ring: pointer to struct amdgpu_ring
124 *
125 * Start a timer as fallback to our interrupts.
126 */
127static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
128{
129 mod_timer(&ring->fence_drv.fallback_timer,
130 jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
131}
132
133/**
139 * amdgpu_fence_activity - check for fence activity 134 * amdgpu_fence_activity - check for fence activity
140 * 135 *
141 * @ring: pointer to struct amdgpu_ring 136 * @ring: pointer to struct amdgpu_ring
@@ -201,45 +196,38 @@ static bool amdgpu_fence_activity(struct amdgpu_ring *ring)
201 } while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq); 196 } while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq);
202 197
203 if (seq < last_emitted) 198 if (seq < last_emitted)
204 amdgpu_fence_schedule_check(ring); 199 amdgpu_fence_schedule_fallback(ring);
205 200
206 return wake; 201 return wake;
207} 202}
208 203
209/** 204/**
210 * amdgpu_fence_check_lockup - check for hardware lockup 205 * amdgpu_fence_process - process a fence
211 * 206 *
212 * @work: delayed work item 207 * @adev: amdgpu_device pointer
208 * @ring: ring index the fence is associated with
213 * 209 *
214 * Checks for fence activity and if there is none probe 210 * Checks the current fence value and wakes the fence queue
215 * the hardware if a lockup occured. 211 * if the sequence number has increased (all asics).
216 */ 212 */
217static void amdgpu_fence_check_lockup(struct work_struct *work) 213void amdgpu_fence_process(struct amdgpu_ring *ring)
218{ 214{
219 struct amdgpu_fence_driver *fence_drv;
220 struct amdgpu_ring *ring;
221
222 fence_drv = container_of(work, struct amdgpu_fence_driver,
223 lockup_work.work);
224 ring = fence_drv->ring;
225
226 if (amdgpu_fence_activity(ring)) 215 if (amdgpu_fence_activity(ring))
227 wake_up_all(&ring->fence_drv.fence_queue); 216 wake_up_all(&ring->fence_drv.fence_queue);
228} 217}
229 218
230/** 219/**
231 * amdgpu_fence_process - process a fence 220 * amdgpu_fence_fallback - fallback for hardware interrupts
232 * 221 *
233 * @adev: amdgpu_device pointer 222 * @work: delayed work item
234 * @ring: ring index the fence is associated with
235 * 223 *
236 * Checks the current fence value and wakes the fence queue 224 * Checks for fence activity.
237 * if the sequence number has increased (all asics).
238 */ 225 */
239void amdgpu_fence_process(struct amdgpu_ring *ring) 226static void amdgpu_fence_fallback(unsigned long arg)
240{ 227{
241 if (amdgpu_fence_activity(ring)) 228 struct amdgpu_ring *ring = (void *)arg;
242 wake_up_all(&ring->fence_drv.fence_queue); 229
230 amdgpu_fence_process(ring);
243} 231}
244 232
245/** 233/**
@@ -289,7 +277,7 @@ static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq)
289 if (atomic64_read(&ring->fence_drv.last_seq) >= seq) 277 if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
290 return 0; 278 return 0;
291 279
292 amdgpu_fence_schedule_check(ring); 280 amdgpu_fence_schedule_fallback(ring);
293 wait_event(ring->fence_drv.fence_queue, ( 281 wait_event(ring->fence_drv.fence_queue, (
294 (signaled = amdgpu_fence_seq_signaled(ring, seq)))); 282 (signaled = amdgpu_fence_seq_signaled(ring, seq))));
295 283
@@ -490,9 +478,8 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
490 atomic64_set(&ring->fence_drv.last_seq, 0); 478 atomic64_set(&ring->fence_drv.last_seq, 0);
491 ring->fence_drv.initialized = false; 479 ring->fence_drv.initialized = false;
492 480
493 INIT_DELAYED_WORK(&ring->fence_drv.lockup_work, 481 setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback,
494 amdgpu_fence_check_lockup); 482 (unsigned long)ring);
495 ring->fence_drv.ring = ring;
496 483
497 init_waitqueue_head(&ring->fence_drv.fence_queue); 484 init_waitqueue_head(&ring->fence_drv.fence_queue);
498 485
@@ -556,6 +543,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
556 mutex_lock(&adev->ring_lock); 543 mutex_lock(&adev->ring_lock);
557 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 544 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
558 struct amdgpu_ring *ring = adev->rings[i]; 545 struct amdgpu_ring *ring = adev->rings[i];
546
559 if (!ring || !ring->fence_drv.initialized) 547 if (!ring || !ring->fence_drv.initialized)
560 continue; 548 continue;
561 r = amdgpu_fence_wait_empty(ring); 549 r = amdgpu_fence_wait_empty(ring);
@@ -567,6 +555,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
567 amdgpu_irq_put(adev, ring->fence_drv.irq_src, 555 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
568 ring->fence_drv.irq_type); 556 ring->fence_drv.irq_type);
569 amd_sched_fini(&ring->sched); 557 amd_sched_fini(&ring->sched);
558 del_timer_sync(&ring->fence_drv.fallback_timer);
570 ring->fence_drv.initialized = false; 559 ring->fence_drv.initialized = false;
571 } 560 }
572 mutex_unlock(&adev->ring_lock); 561 mutex_unlock(&adev->ring_lock);
@@ -750,7 +739,8 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
750 fence->fence_wake.func = amdgpu_fence_check_signaled; 739 fence->fence_wake.func = amdgpu_fence_check_signaled;
751 __add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake); 740 __add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake);
752 fence_get(f); 741 fence_get(f);
753 amdgpu_fence_schedule_check(ring); 742 if (!timer_pending(&ring->fence_drv.fallback_timer))
743 amdgpu_fence_schedule_fallback(ring);
754 FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx); 744 FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
755 return true; 745 return true;
756} 746}