aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2016-03-11 08:50:08 -0500
committerAlex Deucher <alexander.deucher@amd.com>2016-03-14 13:43:27 -0400
commit6ba60b891cf82ae5fd2634badaa2d6752837def6 (patch)
tree70ac872e5d36aa7295186529b5ed95f8d4c7d6b4 /drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
parentf4247c5046cdea4e166620f41c0bd3b475e98fff (diff)
drm/amdgpu: stop using the ring index in the SA
The ring index will always collide as hash into the fence list, so use the context number instead. That can still cause collisions, but they are less likely than using ring indices. Signed-off-by: Christian König <christian.koenig@amd.com> Acked-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c53
1 files changed, 18 insertions, 35 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 7d8f8f1e3f7f..476a8ce28bed 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -60,9 +60,8 @@ int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
60 sa_manager->align = align; 60 sa_manager->align = align;
61 sa_manager->hole = &sa_manager->olist; 61 sa_manager->hole = &sa_manager->olist;
62 INIT_LIST_HEAD(&sa_manager->olist); 62 INIT_LIST_HEAD(&sa_manager->olist);
63 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 63 for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
64 INIT_LIST_HEAD(&sa_manager->flist[i]); 64 INIT_LIST_HEAD(&sa_manager->flist[i]);
65 }
66 65
67 r = amdgpu_bo_create(adev, size, align, true, domain, 66 r = amdgpu_bo_create(adev, size, align, true, domain,
68 0, NULL, NULL, &sa_manager->bo); 67 0, NULL, NULL, &sa_manager->bo);
@@ -228,11 +227,9 @@ static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager,
228 unsigned soffset, eoffset, wasted; 227 unsigned soffset, eoffset, wasted;
229 int i; 228 int i;
230 229
231 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 230 for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
232 if (!list_empty(&sa_manager->flist[i])) { 231 if (!list_empty(&sa_manager->flist[i]))
233 return true; 232 return true;
234 }
235 }
236 233
237 soffset = amdgpu_sa_bo_hole_soffset(sa_manager); 234 soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
238 eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager); 235 eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
@@ -265,12 +262,11 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
265 /* go over all fence list and try to find the closest sa_bo 262 /* go over all fence list and try to find the closest sa_bo
266 * of the current last 263 * of the current last
267 */ 264 */
268 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 265 for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) {
269 struct amdgpu_sa_bo *sa_bo; 266 struct amdgpu_sa_bo *sa_bo;
270 267
271 if (list_empty(&sa_manager->flist[i])) { 268 if (list_empty(&sa_manager->flist[i]))
272 continue; 269 continue;
273 }
274 270
275 sa_bo = list_first_entry(&sa_manager->flist[i], 271 sa_bo = list_first_entry(&sa_manager->flist[i],
276 struct amdgpu_sa_bo, flist); 272 struct amdgpu_sa_bo, flist);
@@ -299,7 +295,9 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
299 } 295 }
300 296
301 if (best_bo) { 297 if (best_bo) {
302 uint32_t idx = amdgpu_ring_from_fence(best_bo->fence)->idx; 298 uint32_t idx = best_bo->fence->context;
299
300 idx %= AMDGPU_SA_NUM_FENCE_LISTS;
303 ++tries[idx]; 301 ++tries[idx];
304 sa_manager->hole = best_bo->olist.prev; 302 sa_manager->hole = best_bo->olist.prev;
305 303
@@ -315,8 +313,8 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
315 struct amdgpu_sa_bo **sa_bo, 313 struct amdgpu_sa_bo **sa_bo,
316 unsigned size, unsigned align) 314 unsigned size, unsigned align)
317{ 315{
318 struct fence *fences[AMDGPU_MAX_RINGS]; 316 struct fence *fences[AMDGPU_SA_NUM_FENCE_LISTS];
319 unsigned tries[AMDGPU_MAX_RINGS]; 317 unsigned tries[AMDGPU_SA_NUM_FENCE_LISTS];
320 unsigned count; 318 unsigned count;
321 int i, r; 319 int i, r;
322 signed long t; 320 signed long t;
@@ -338,7 +336,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
338 336
339 spin_lock(&sa_manager->wq.lock); 337 spin_lock(&sa_manager->wq.lock);
340 do { 338 do {
341 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 339 for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) {
342 fences[i] = NULL; 340 fences[i] = NULL;
343 tries[i] = 0; 341 tries[i] = 0;
344 } 342 }
@@ -355,7 +353,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
355 /* see if we can skip over some allocations */ 353 /* see if we can skip over some allocations */
356 } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries)); 354 } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
357 355
358 for (i = 0, count = 0; i < AMDGPU_MAX_RINGS; ++i) 356 for (i = 0, count = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
359 if (fences[i]) 357 if (fences[i])
360 fences[count++] = fences[i]; 358 fences[count++] = fences[i];
361 359
@@ -394,8 +392,9 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
394 spin_lock(&sa_manager->wq.lock); 392 spin_lock(&sa_manager->wq.lock);
395 if (fence && !fence_is_signaled(fence)) { 393 if (fence && !fence_is_signaled(fence)) {
396 uint32_t idx; 394 uint32_t idx;
395
397 (*sa_bo)->fence = fence_get(fence); 396 (*sa_bo)->fence = fence_get(fence);
398 idx = amdgpu_ring_from_fence(fence)->idx; 397 idx = fence->context % AMDGPU_SA_NUM_FENCE_LISTS;
399 list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]); 398 list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]);
400 } else { 399 } else {
401 amdgpu_sa_bo_remove_locked(*sa_bo); 400 amdgpu_sa_bo_remove_locked(*sa_bo);
@@ -407,25 +406,6 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
407 406
408#if defined(CONFIG_DEBUG_FS) 407#if defined(CONFIG_DEBUG_FS)
409 408
410static void amdgpu_sa_bo_dump_fence(struct fence *fence, struct seq_file *m)
411{
412 struct amdgpu_fence *a_fence = to_amdgpu_fence(fence);
413 struct amd_sched_fence *s_fence = to_amd_sched_fence(fence);
414
415 if (a_fence)
416 seq_printf(m, " protected by 0x%016llx on ring %d",
417 a_fence->seq, a_fence->ring->idx);
418
419 if (s_fence) {
420 struct amdgpu_ring *ring;
421
422
423 ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
424 seq_printf(m, " protected by 0x%016x on ring %d",
425 s_fence->base.seqno, ring->idx);
426 }
427}
428
429void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, 409void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
430 struct seq_file *m) 410 struct seq_file *m)
431{ 411{
@@ -442,8 +422,11 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
442 } 422 }
443 seq_printf(m, "[0x%010llx 0x%010llx] size %8lld", 423 seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
444 soffset, eoffset, eoffset - soffset); 424 soffset, eoffset, eoffset - soffset);
425
445 if (i->fence) 426 if (i->fence)
446 amdgpu_sa_bo_dump_fence(i->fence, m); 427 seq_printf(m, " protected by 0x%08x on context %d",
428 i->fence->seqno, i->fence->context);
429
447 seq_printf(m, "\n"); 430 seq_printf(m, "\n");
448 } 431 }
449 spin_unlock(&sa_manager->wq.lock); 432 spin_unlock(&sa_manager->wq.lock);