aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
diff options
context:
space:
mode:
authorChunming Zhou <david1.zhou@amd.com>2015-08-19 04:41:19 -0400
committerAlex Deucher <alexander.deucher@amd.com>2015-08-25 10:38:41 -0400
commit4ce9891ee17c6e064cc334e3297f7e992d47f3a6 (patch)
tree402cc36ef6e4db0fc0158ae9cdcee6e9e7ff2445 /drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
parent1aa4051b7f5474cca6009c13868c59d78d06f983 (diff)
drm/amdgpu: improve sa_bo->fence by kernel fence
Signed-off-by: Chunming Zhou <david1.zhou@amd.com> Reviewed-by: Christian K?nig <christian.koenig@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c50
1 files changed, 37 insertions, 13 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 4597899e9758..b7cbaa9d532e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -139,6 +139,20 @@ int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
139 return r; 139 return r;
140} 140}
141 141
142static uint32_t amdgpu_sa_get_ring_from_fence(struct fence *f)
143{
144 struct amdgpu_fence *a_fence;
145 struct amd_sched_fence *s_fence;
146
147 s_fence = to_amd_sched_fence(f);
148 if (s_fence)
149 return s_fence->entity->scheduler->ring_id;
150 a_fence = to_amdgpu_fence(f);
151 if (a_fence)
152 return a_fence->ring->idx;
153 return 0;
154}
155
142static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo) 156static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
143{ 157{
144 struct amdgpu_sa_manager *sa_manager = sa_bo->manager; 158 struct amdgpu_sa_manager *sa_manager = sa_bo->manager;
@@ -147,7 +161,7 @@ static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
147 } 161 }
148 list_del_init(&sa_bo->olist); 162 list_del_init(&sa_bo->olist);
149 list_del_init(&sa_bo->flist); 163 list_del_init(&sa_bo->flist);
150 amdgpu_fence_unref(&sa_bo->fence); 164 fence_put(sa_bo->fence);
151 kfree(sa_bo); 165 kfree(sa_bo);
152} 166}
153 167
@@ -161,7 +175,7 @@ static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager)
161 sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist); 175 sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist);
162 list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) { 176 list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
163 if (sa_bo->fence == NULL || 177 if (sa_bo->fence == NULL ||
164 !fence_is_signaled(&sa_bo->fence->base)) { 178 !fence_is_signaled(sa_bo->fence)) {
165 return; 179 return;
166 } 180 }
167 amdgpu_sa_bo_remove_locked(sa_bo); 181 amdgpu_sa_bo_remove_locked(sa_bo);
@@ -246,7 +260,7 @@ static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager,
246} 260}
247 261
248static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager, 262static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
249 struct amdgpu_fence **fences, 263 struct fence **fences,
250 unsigned *tries) 264 unsigned *tries)
251{ 265{
252 struct amdgpu_sa_bo *best_bo = NULL; 266 struct amdgpu_sa_bo *best_bo = NULL;
@@ -275,7 +289,7 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
275 sa_bo = list_first_entry(&sa_manager->flist[i], 289 sa_bo = list_first_entry(&sa_manager->flist[i],
276 struct amdgpu_sa_bo, flist); 290 struct amdgpu_sa_bo, flist);
277 291
278 if (!fence_is_signaled(&sa_bo->fence->base)) { 292 if (!fence_is_signaled(sa_bo->fence)) {
279 fences[i] = sa_bo->fence; 293 fences[i] = sa_bo->fence;
280 continue; 294 continue;
281 } 295 }
@@ -299,7 +313,8 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
299 } 313 }
300 314
301 if (best_bo) { 315 if (best_bo) {
302 ++tries[best_bo->fence->ring->idx]; 316 uint32_t idx = amdgpu_sa_get_ring_from_fence(best_bo->fence);
317 ++tries[idx];
303 sa_manager->hole = best_bo->olist.prev; 318 sa_manager->hole = best_bo->olist.prev;
304 319
305 /* we knew that this one is signaled, 320 /* we knew that this one is signaled,
@@ -315,7 +330,7 @@ int amdgpu_sa_bo_new(struct amdgpu_device *adev,
315 struct amdgpu_sa_bo **sa_bo, 330 struct amdgpu_sa_bo **sa_bo,
316 unsigned size, unsigned align) 331 unsigned size, unsigned align)
317{ 332{
318 struct amdgpu_fence *fences[AMDGPU_MAX_RINGS]; 333 struct fence *fences[AMDGPU_MAX_RINGS];
319 unsigned tries[AMDGPU_MAX_RINGS]; 334 unsigned tries[AMDGPU_MAX_RINGS];
320 int i, r; 335 int i, r;
321 signed long t; 336 signed long t;
@@ -373,7 +388,7 @@ int amdgpu_sa_bo_new(struct amdgpu_device *adev,
373} 388}
374 389
375void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo, 390void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
376 struct amdgpu_fence *fence) 391 struct fence *fence)
377{ 392{
378 struct amdgpu_sa_manager *sa_manager; 393 struct amdgpu_sa_manager *sa_manager;
379 394
@@ -383,10 +398,11 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
383 398
384 sa_manager = (*sa_bo)->manager; 399 sa_manager = (*sa_bo)->manager;
385 spin_lock(&sa_manager->wq.lock); 400 spin_lock(&sa_manager->wq.lock);
386 if (fence && !fence_is_signaled(&fence->base)) { 401 if (fence && !fence_is_signaled(fence)) {
387 (*sa_bo)->fence = amdgpu_fence_ref(fence); 402 uint32_t idx;
388 list_add_tail(&(*sa_bo)->flist, 403 (*sa_bo)->fence = fence_get(fence);
389 &sa_manager->flist[fence->ring->idx]); 404 idx = amdgpu_sa_get_ring_from_fence(fence);
405 list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]);
390 } else { 406 } else {
391 amdgpu_sa_bo_remove_locked(*sa_bo); 407 amdgpu_sa_bo_remove_locked(*sa_bo);
392 } 408 }
@@ -413,8 +429,16 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
413 seq_printf(m, "[0x%010llx 0x%010llx] size %8lld", 429 seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
414 soffset, eoffset, eoffset - soffset); 430 soffset, eoffset, eoffset - soffset);
415 if (i->fence) { 431 if (i->fence) {
416 seq_printf(m, " protected by 0x%016llx on ring %d", 432 struct amdgpu_fence *a_fence = to_amdgpu_fence(i->fence);
417 i->fence->seq, i->fence->ring->idx); 433 struct amd_sched_fence *s_fence = to_amd_sched_fence(i->fence);
434 if (a_fence)
435 seq_printf(m, " protected by 0x%016llx on ring %d",
436 a_fence->seq, a_fence->ring->idx);
437 if (s_fence)
438 seq_printf(m, " protected by 0x%016llx on ring %d",
439 s_fence->v_seq,
440 s_fence->entity->scheduler->ring_id);
441
418 } 442 }
419 seq_printf(m, "\n"); 443 seq_printf(m, "\n");
420 } 444 }