diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c | 47 |
1 files changed, 31 insertions, 16 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c index 74dad270362c..e90712443fe9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c | |||
@@ -64,8 +64,8 @@ int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev, | |||
64 | INIT_LIST_HEAD(&sa_manager->flist[i]); | 64 | INIT_LIST_HEAD(&sa_manager->flist[i]); |
65 | } | 65 | } |
66 | 66 | ||
67 | r = amdgpu_bo_create(adev, size, align, true, | 67 | r = amdgpu_bo_create(adev, size, align, true, domain, |
68 | domain, 0, NULL, &sa_manager->bo); | 68 | 0, NULL, NULL, &sa_manager->bo); |
69 | if (r) { | 69 | if (r) { |
70 | dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r); | 70 | dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r); |
71 | return r; | 71 | return r; |
@@ -145,8 +145,13 @@ static uint32_t amdgpu_sa_get_ring_from_fence(struct fence *f) | |||
145 | struct amd_sched_fence *s_fence; | 145 | struct amd_sched_fence *s_fence; |
146 | 146 | ||
147 | s_fence = to_amd_sched_fence(f); | 147 | s_fence = to_amd_sched_fence(f); |
148 | if (s_fence) | 148 | if (s_fence) { |
149 | return s_fence->scheduler->ring_id; | 149 | struct amdgpu_ring *ring; |
150 | |||
151 | ring = container_of(s_fence->sched, struct amdgpu_ring, sched); | ||
152 | return ring->idx; | ||
153 | } | ||
154 | |||
150 | a_fence = to_amdgpu_fence(f); | 155 | a_fence = to_amdgpu_fence(f); |
151 | if (a_fence) | 156 | if (a_fence) |
152 | return a_fence->ring->idx; | 157 | return a_fence->ring->idx; |
@@ -412,6 +417,26 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo, | |||
412 | } | 417 | } |
413 | 418 | ||
414 | #if defined(CONFIG_DEBUG_FS) | 419 | #if defined(CONFIG_DEBUG_FS) |
420 | |||
421 | static void amdgpu_sa_bo_dump_fence(struct fence *fence, struct seq_file *m) | ||
422 | { | ||
423 | struct amdgpu_fence *a_fence = to_amdgpu_fence(fence); | ||
424 | struct amd_sched_fence *s_fence = to_amd_sched_fence(fence); | ||
425 | |||
426 | if (a_fence) | ||
427 | seq_printf(m, " protected by 0x%016llx on ring %d", | ||
428 | a_fence->seq, a_fence->ring->idx); | ||
429 | |||
430 | if (s_fence) { | ||
431 | struct amdgpu_ring *ring; | ||
432 | |||
433 | |||
434 | ring = container_of(s_fence->sched, struct amdgpu_ring, sched); | ||
435 | seq_printf(m, " protected by 0x%016x on ring %d", | ||
436 | s_fence->base.seqno, ring->idx); | ||
437 | } | ||
438 | } | ||
439 | |||
415 | void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, | 440 | void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, |
416 | struct seq_file *m) | 441 | struct seq_file *m) |
417 | { | 442 | { |
@@ -428,18 +453,8 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, | |||
428 | } | 453 | } |
429 | seq_printf(m, "[0x%010llx 0x%010llx] size %8lld", | 454 | seq_printf(m, "[0x%010llx 0x%010llx] size %8lld", |
430 | soffset, eoffset, eoffset - soffset); | 455 | soffset, eoffset, eoffset - soffset); |
431 | if (i->fence) { | 456 | if (i->fence) |
432 | struct amdgpu_fence *a_fence = to_amdgpu_fence(i->fence); | 457 | amdgpu_sa_bo_dump_fence(i->fence, m); |
433 | struct amd_sched_fence *s_fence = to_amd_sched_fence(i->fence); | ||
434 | if (a_fence) | ||
435 | seq_printf(m, " protected by 0x%016llx on ring %d", | ||
436 | a_fence->seq, a_fence->ring->idx); | ||
437 | if (s_fence) | ||
438 | seq_printf(m, " protected by 0x%016x on ring %d", | ||
439 | s_fence->base.seqno, | ||
440 | s_fence->scheduler->ring_id); | ||
441 | |||
442 | } | ||
443 | seq_printf(m, "\n"); | 458 | seq_printf(m, "\n"); |
444 | } | 459 | } |
445 | spin_unlock(&sa_manager->wq.lock); | 460 | spin_unlock(&sa_manager->wq.lock); |