diff options
author | Christian König <christian.koenig@amd.com> | 2016-01-18 09:16:53 -0500 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2016-02-10 14:16:52 -0500 |
commit | 5907a0d8af71d17811be49f2c056b3a89660e188 (patch) | |
tree | 87e6ad30a3584af3d06aa2e3d781a6f66abfe80d /drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | |
parent | 046c12c67b15018ab7ed2688a252475d5a8b9db1 (diff) |
drm/amdgpu: cleanup sync_seq handling
Not used any more without semaphores
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 94 |
1 files changed, 11 insertions, 83 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index cac03e743b58..988a32d578a0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | |||
@@ -107,7 +107,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner, | |||
107 | if ((*fence) == NULL) { | 107 | if ((*fence) == NULL) { |
108 | return -ENOMEM; | 108 | return -ENOMEM; |
109 | } | 109 | } |
110 | (*fence)->seq = ++ring->fence_drv.sync_seq[ring->idx]; | 110 | (*fence)->seq = ++ring->fence_drv.sync_seq; |
111 | (*fence)->ring = ring; | 111 | (*fence)->ring = ring; |
112 | (*fence)->owner = owner; | 112 | (*fence)->owner = owner; |
113 | fence_init(&(*fence)->base, &amdgpu_fence_ops, | 113 | fence_init(&(*fence)->base, &amdgpu_fence_ops, |
@@ -171,7 +171,7 @@ static bool amdgpu_fence_activity(struct amdgpu_ring *ring) | |||
171 | */ | 171 | */ |
172 | last_seq = atomic64_read(&ring->fence_drv.last_seq); | 172 | last_seq = atomic64_read(&ring->fence_drv.last_seq); |
173 | do { | 173 | do { |
174 | last_emitted = ring->fence_drv.sync_seq[ring->idx]; | 174 | last_emitted = ring->fence_drv.sync_seq; |
175 | seq = amdgpu_fence_read(ring); | 175 | seq = amdgpu_fence_read(ring); |
176 | seq |= last_seq & 0xffffffff00000000LL; | 176 | seq |= last_seq & 0xffffffff00000000LL; |
177 | if (seq < last_seq) { | 177 | if (seq < last_seq) { |
@@ -274,7 +274,7 @@ static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq) | |||
274 | bool signaled = false; | 274 | bool signaled = false; |
275 | 275 | ||
276 | BUG_ON(!ring); | 276 | BUG_ON(!ring); |
277 | if (seq > ring->fence_drv.sync_seq[ring->idx]) | 277 | if (seq > ring->fence_drv.sync_seq) |
278 | return -EINVAL; | 278 | return -EINVAL; |
279 | 279 | ||
280 | if (atomic64_read(&ring->fence_drv.last_seq) >= seq) | 280 | if (atomic64_read(&ring->fence_drv.last_seq) >= seq) |
@@ -304,7 +304,7 @@ int amdgpu_fence_wait_next(struct amdgpu_ring *ring) | |||
304 | { | 304 | { |
305 | uint64_t seq = atomic64_read(&ring->fence_drv.last_seq) + 1ULL; | 305 | uint64_t seq = atomic64_read(&ring->fence_drv.last_seq) + 1ULL; |
306 | 306 | ||
307 | if (seq >= ring->fence_drv.sync_seq[ring->idx]) | 307 | if (seq >= ring->fence_drv.sync_seq) |
308 | return -ENOENT; | 308 | return -ENOENT; |
309 | 309 | ||
310 | return amdgpu_fence_ring_wait_seq(ring, seq); | 310 | return amdgpu_fence_ring_wait_seq(ring, seq); |
@@ -322,7 +322,7 @@ int amdgpu_fence_wait_next(struct amdgpu_ring *ring) | |||
322 | */ | 322 | */ |
323 | int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) | 323 | int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) |
324 | { | 324 | { |
325 | uint64_t seq = ring->fence_drv.sync_seq[ring->idx]; | 325 | uint64_t seq = ring->fence_drv.sync_seq; |
326 | 326 | ||
327 | if (!seq) | 327 | if (!seq) |
328 | return 0; | 328 | return 0; |
@@ -347,7 +347,7 @@ unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring) | |||
347 | * but it's ok to report slightly wrong fence count here. | 347 | * but it's ok to report slightly wrong fence count here. |
348 | */ | 348 | */ |
349 | amdgpu_fence_process(ring); | 349 | amdgpu_fence_process(ring); |
350 | emitted = ring->fence_drv.sync_seq[ring->idx] | 350 | emitted = ring->fence_drv.sync_seq |
351 | - atomic64_read(&ring->fence_drv.last_seq); | 351 | - atomic64_read(&ring->fence_drv.last_seq); |
352 | /* to avoid 32bits warp around */ | 352 | /* to avoid 32bits warp around */ |
353 | if (emitted > 0x10000000) | 353 | if (emitted > 0x10000000) |
@@ -357,68 +357,6 @@ unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring) | |||
357 | } | 357 | } |
358 | 358 | ||
359 | /** | 359 | /** |
360 | * amdgpu_fence_need_sync - do we need a semaphore | ||
361 | * | ||
362 | * @fence: amdgpu fence object | ||
363 | * @dst_ring: which ring to check against | ||
364 | * | ||
365 | * Check if the fence needs to be synced against another ring | ||
366 | * (all asics). If so, we need to emit a semaphore. | ||
367 | * Returns true if we need to sync with another ring, false if | ||
368 | * not. | ||
369 | */ | ||
370 | bool amdgpu_fence_need_sync(struct amdgpu_fence *fence, | ||
371 | struct amdgpu_ring *dst_ring) | ||
372 | { | ||
373 | struct amdgpu_fence_driver *fdrv; | ||
374 | |||
375 | if (!fence) | ||
376 | return false; | ||
377 | |||
378 | if (fence->ring == dst_ring) | ||
379 | return false; | ||
380 | |||
381 | /* we are protected by the ring mutex */ | ||
382 | fdrv = &dst_ring->fence_drv; | ||
383 | if (fence->seq <= fdrv->sync_seq[fence->ring->idx]) | ||
384 | return false; | ||
385 | |||
386 | return true; | ||
387 | } | ||
388 | |||
389 | /** | ||
390 | * amdgpu_fence_note_sync - record the sync point | ||
391 | * | ||
392 | * @fence: amdgpu fence object | ||
393 | * @dst_ring: which ring to check against | ||
394 | * | ||
395 | * Note the sequence number at which point the fence will | ||
396 | * be synced with the requested ring (all asics). | ||
397 | */ | ||
398 | void amdgpu_fence_note_sync(struct amdgpu_fence *fence, | ||
399 | struct amdgpu_ring *dst_ring) | ||
400 | { | ||
401 | struct amdgpu_fence_driver *dst, *src; | ||
402 | unsigned i; | ||
403 | |||
404 | if (!fence) | ||
405 | return; | ||
406 | |||
407 | if (fence->ring == dst_ring) | ||
408 | return; | ||
409 | |||
410 | /* we are protected by the ring mutex */ | ||
411 | src = &fence->ring->fence_drv; | ||
412 | dst = &dst_ring->fence_drv; | ||
413 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { | ||
414 | if (i == dst_ring->idx) | ||
415 | continue; | ||
416 | |||
417 | dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]); | ||
418 | } | ||
419 | } | ||
420 | |||
421 | /** | ||
422 | * amdgpu_fence_driver_start_ring - make the fence driver | 360 | * amdgpu_fence_driver_start_ring - make the fence driver |
423 | * ready for use on the requested ring. | 361 | * ready for use on the requested ring. |
424 | * | 362 | * |
@@ -471,14 +409,12 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, | |||
471 | */ | 409 | */ |
472 | int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) | 410 | int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) |
473 | { | 411 | { |
474 | int i, r; | ||
475 | long timeout; | 412 | long timeout; |
413 | int r; | ||
476 | 414 | ||
477 | ring->fence_drv.cpu_addr = NULL; | 415 | ring->fence_drv.cpu_addr = NULL; |
478 | ring->fence_drv.gpu_addr = 0; | 416 | ring->fence_drv.gpu_addr = 0; |
479 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) | 417 | ring->fence_drv.sync_seq = 0; |
480 | ring->fence_drv.sync_seq[i] = 0; | ||
481 | |||
482 | atomic64_set(&ring->fence_drv.last_seq, 0); | 418 | atomic64_set(&ring->fence_drv.last_seq, 0); |
483 | ring->fence_drv.initialized = false; | 419 | ring->fence_drv.initialized = false; |
484 | 420 | ||
@@ -650,7 +586,7 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev) | |||
650 | if (!ring || !ring->fence_drv.initialized) | 586 | if (!ring || !ring->fence_drv.initialized) |
651 | continue; | 587 | continue; |
652 | 588 | ||
653 | amdgpu_fence_write(ring, ring->fence_drv.sync_seq[i]); | 589 | amdgpu_fence_write(ring, ring->fence_drv.sync_seq); |
654 | } | 590 | } |
655 | } | 591 | } |
656 | 592 | ||
@@ -780,7 +716,7 @@ static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data) | |||
780 | struct drm_info_node *node = (struct drm_info_node *)m->private; | 716 | struct drm_info_node *node = (struct drm_info_node *)m->private; |
781 | struct drm_device *dev = node->minor->dev; | 717 | struct drm_device *dev = node->minor->dev; |
782 | struct amdgpu_device *adev = dev->dev_private; | 718 | struct amdgpu_device *adev = dev->dev_private; |
783 | int i, j; | 719 | int i; |
784 | 720 | ||
785 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { | 721 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { |
786 | struct amdgpu_ring *ring = adev->rings[i]; | 722 | struct amdgpu_ring *ring = adev->rings[i]; |
@@ -793,15 +729,7 @@ static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data) | |||
793 | seq_printf(m, "Last signaled fence 0x%016llx\n", | 729 | seq_printf(m, "Last signaled fence 0x%016llx\n", |
794 | (unsigned long long)atomic64_read(&ring->fence_drv.last_seq)); | 730 | (unsigned long long)atomic64_read(&ring->fence_drv.last_seq)); |
795 | seq_printf(m, "Last emitted 0x%016llx\n", | 731 | seq_printf(m, "Last emitted 0x%016llx\n", |
796 | ring->fence_drv.sync_seq[i]); | 732 | ring->fence_drv.sync_seq); |
797 | |||
798 | for (j = 0; j < AMDGPU_MAX_RINGS; ++j) { | ||
799 | struct amdgpu_ring *other = adev->rings[j]; | ||
800 | if (i != j && other && other->fence_drv.initialized && | ||
801 | ring->fence_drv.sync_seq[j]) | ||
802 | seq_printf(m, "Last sync to ring %d 0x%016llx\n", | ||
803 | j, ring->fence_drv.sync_seq[j]); | ||
804 | } | ||
805 | } | 733 | } |
806 | return 0; | 734 | return 0; |
807 | } | 735 | } |