diff options
author | Mark Brown <broonie@kernel.org> | 2015-10-12 13:09:27 -0400 |
---|---|---|
committer | Mark Brown <broonie@kernel.org> | 2015-10-12 13:09:27 -0400 |
commit | 79828b4fa835f73cdaf4bffa48696abdcbea9d02 (patch) | |
tree | 5e0fa7156acb75ba603022bc807df8f2fedb97a8 /drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c | |
parent | 721b51fcf91898299d96f4b72cb9434cda29dce6 (diff) | |
parent | 8c1a9d6323abf0fb1e5dad96cf3f1c783505ea5a (diff) |
Merge remote-tracking branch 'asoc/fix/rt5645' into asoc-fix-rt5645
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c | 56 |
1 files changed, 42 insertions, 14 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c index eb20987ce18d..74dad270362c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c | |||
@@ -139,6 +139,20 @@ int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev, | |||
139 | return r; | 139 | return r; |
140 | } | 140 | } |
141 | 141 | ||
142 | static uint32_t amdgpu_sa_get_ring_from_fence(struct fence *f) | ||
143 | { | ||
144 | struct amdgpu_fence *a_fence; | ||
145 | struct amd_sched_fence *s_fence; | ||
146 | |||
147 | s_fence = to_amd_sched_fence(f); | ||
148 | if (s_fence) | ||
149 | return s_fence->scheduler->ring_id; | ||
150 | a_fence = to_amdgpu_fence(f); | ||
151 | if (a_fence) | ||
152 | return a_fence->ring->idx; | ||
153 | return 0; | ||
154 | } | ||
155 | |||
142 | static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo) | 156 | static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo) |
143 | { | 157 | { |
144 | struct amdgpu_sa_manager *sa_manager = sa_bo->manager; | 158 | struct amdgpu_sa_manager *sa_manager = sa_bo->manager; |
@@ -147,7 +161,7 @@ static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo) | |||
147 | } | 161 | } |
148 | list_del_init(&sa_bo->olist); | 162 | list_del_init(&sa_bo->olist); |
149 | list_del_init(&sa_bo->flist); | 163 | list_del_init(&sa_bo->flist); |
150 | amdgpu_fence_unref(&sa_bo->fence); | 164 | fence_put(sa_bo->fence); |
151 | kfree(sa_bo); | 165 | kfree(sa_bo); |
152 | } | 166 | } |
153 | 167 | ||
@@ -160,7 +174,8 @@ static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager) | |||
160 | 174 | ||
161 | sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist); | 175 | sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist); |
162 | list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) { | 176 | list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) { |
163 | if (sa_bo->fence == NULL || !amdgpu_fence_signaled(sa_bo->fence)) { | 177 | if (sa_bo->fence == NULL || |
178 | !fence_is_signaled(sa_bo->fence)) { | ||
164 | return; | 179 | return; |
165 | } | 180 | } |
166 | amdgpu_sa_bo_remove_locked(sa_bo); | 181 | amdgpu_sa_bo_remove_locked(sa_bo); |
@@ -245,7 +260,7 @@ static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager, | |||
245 | } | 260 | } |
246 | 261 | ||
247 | static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager, | 262 | static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager, |
248 | struct amdgpu_fence **fences, | 263 | struct fence **fences, |
249 | unsigned *tries) | 264 | unsigned *tries) |
250 | { | 265 | { |
251 | struct amdgpu_sa_bo *best_bo = NULL; | 266 | struct amdgpu_sa_bo *best_bo = NULL; |
@@ -274,7 +289,7 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager, | |||
274 | sa_bo = list_first_entry(&sa_manager->flist[i], | 289 | sa_bo = list_first_entry(&sa_manager->flist[i], |
275 | struct amdgpu_sa_bo, flist); | 290 | struct amdgpu_sa_bo, flist); |
276 | 291 | ||
277 | if (!amdgpu_fence_signaled(sa_bo->fence)) { | 292 | if (!fence_is_signaled(sa_bo->fence)) { |
278 | fences[i] = sa_bo->fence; | 293 | fences[i] = sa_bo->fence; |
279 | continue; | 294 | continue; |
280 | } | 295 | } |
@@ -298,7 +313,8 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager, | |||
298 | } | 313 | } |
299 | 314 | ||
300 | if (best_bo) { | 315 | if (best_bo) { |
301 | ++tries[best_bo->fence->ring->idx]; | 316 | uint32_t idx = amdgpu_sa_get_ring_from_fence(best_bo->fence); |
317 | ++tries[idx]; | ||
302 | sa_manager->hole = best_bo->olist.prev; | 318 | sa_manager->hole = best_bo->olist.prev; |
303 | 319 | ||
304 | /* we knew that this one is signaled, | 320 | /* we knew that this one is signaled, |
@@ -314,9 +330,10 @@ int amdgpu_sa_bo_new(struct amdgpu_device *adev, | |||
314 | struct amdgpu_sa_bo **sa_bo, | 330 | struct amdgpu_sa_bo **sa_bo, |
315 | unsigned size, unsigned align) | 331 | unsigned size, unsigned align) |
316 | { | 332 | { |
317 | struct amdgpu_fence *fences[AMDGPU_MAX_RINGS]; | 333 | struct fence *fences[AMDGPU_MAX_RINGS]; |
318 | unsigned tries[AMDGPU_MAX_RINGS]; | 334 | unsigned tries[AMDGPU_MAX_RINGS]; |
319 | int i, r; | 335 | int i, r; |
336 | signed long t; | ||
320 | 337 | ||
321 | BUG_ON(align > sa_manager->align); | 338 | BUG_ON(align > sa_manager->align); |
322 | BUG_ON(size > sa_manager->size); | 339 | BUG_ON(size > sa_manager->size); |
@@ -350,7 +367,9 @@ int amdgpu_sa_bo_new(struct amdgpu_device *adev, | |||
350 | } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries)); | 367 | } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries)); |
351 | 368 | ||
352 | spin_unlock(&sa_manager->wq.lock); | 369 | spin_unlock(&sa_manager->wq.lock); |
353 | r = amdgpu_fence_wait_any(adev, fences, false); | 370 | t = amdgpu_fence_wait_any(adev, fences, AMDGPU_MAX_RINGS, |
371 | false, MAX_SCHEDULE_TIMEOUT); | ||
372 | r = (t > 0) ? 0 : t; | ||
354 | spin_lock(&sa_manager->wq.lock); | 373 | spin_lock(&sa_manager->wq.lock); |
355 | /* if we have nothing to wait for block */ | 374 | /* if we have nothing to wait for block */ |
356 | if (r == -ENOENT) { | 375 | if (r == -ENOENT) { |
@@ -369,7 +388,7 @@ int amdgpu_sa_bo_new(struct amdgpu_device *adev, | |||
369 | } | 388 | } |
370 | 389 | ||
371 | void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo, | 390 | void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo, |
372 | struct amdgpu_fence *fence) | 391 | struct fence *fence) |
373 | { | 392 | { |
374 | struct amdgpu_sa_manager *sa_manager; | 393 | struct amdgpu_sa_manager *sa_manager; |
375 | 394 | ||
@@ -379,10 +398,11 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo, | |||
379 | 398 | ||
380 | sa_manager = (*sa_bo)->manager; | 399 | sa_manager = (*sa_bo)->manager; |
381 | spin_lock(&sa_manager->wq.lock); | 400 | spin_lock(&sa_manager->wq.lock); |
382 | if (fence && !amdgpu_fence_signaled(fence)) { | 401 | if (fence && !fence_is_signaled(fence)) { |
383 | (*sa_bo)->fence = amdgpu_fence_ref(fence); | 402 | uint32_t idx; |
384 | list_add_tail(&(*sa_bo)->flist, | 403 | (*sa_bo)->fence = fence_get(fence); |
385 | &sa_manager->flist[fence->ring->idx]); | 404 | idx = amdgpu_sa_get_ring_from_fence(fence); |
405 | list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]); | ||
386 | } else { | 406 | } else { |
387 | amdgpu_sa_bo_remove_locked(*sa_bo); | 407 | amdgpu_sa_bo_remove_locked(*sa_bo); |
388 | } | 408 | } |
@@ -409,8 +429,16 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, | |||
409 | seq_printf(m, "[0x%010llx 0x%010llx] size %8lld", | 429 | seq_printf(m, "[0x%010llx 0x%010llx] size %8lld", |
410 | soffset, eoffset, eoffset - soffset); | 430 | soffset, eoffset, eoffset - soffset); |
411 | if (i->fence) { | 431 | if (i->fence) { |
412 | seq_printf(m, " protected by 0x%016llx on ring %d", | 432 | struct amdgpu_fence *a_fence = to_amdgpu_fence(i->fence); |
413 | i->fence->seq, i->fence->ring->idx); | 433 | struct amd_sched_fence *s_fence = to_amd_sched_fence(i->fence); |
434 | if (a_fence) | ||
435 | seq_printf(m, " protected by 0x%016llx on ring %d", | ||
436 | a_fence->seq, a_fence->ring->idx); | ||
437 | if (s_fence) | ||
438 | seq_printf(m, " protected by 0x%016x on ring %d", | ||
439 | s_fence->base.seqno, | ||
440 | s_fence->scheduler->ring_id); | ||
441 | |||
414 | } | 442 | } |
415 | seq_printf(m, "\n"); | 443 | seq_printf(m, "\n"); |
416 | } | 444 | } |