aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2016-10-25 08:00:45 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2016-10-25 08:40:39 -0400
commitf54d1867005c3323f5d8ad83eed823e84226c429 (patch)
tree026c3f57bc546d3a0205389d0f8e0d02ce8a76ac /drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
parent0fc4f78f44e6c6148cee32456f0d0023ec1c1fd8 (diff)
dma-buf: Rename struct fence to dma_fence
I plan to usurp the short name of struct fence for a core kernel struct, and so I need to rename the specialised fence/timeline for DMA operations to make room. A consensus was reached in https://lists.freedesktop.org/archives/dri-devel/2016-July/113083.html that making clear this fence applies to DMA operations was a good thing. Since then the patch has grown a bit as usage increases, so hopefully it remains a good thing! (v2...: rebase, rerun spatch) v3: Compile on msm, spotted a manual fixup that I broke. v4: Try again for msm, sorry Daniel coccinelle script: @@ @@ - struct fence + struct dma_fence @@ @@ - struct fence_ops + struct dma_fence_ops @@ @@ - struct fence_cb + struct dma_fence_cb @@ @@ - struct fence_array + struct dma_fence_array @@ @@ - enum fence_flag_bits + enum dma_fence_flag_bits @@ @@ ( - fence_init + dma_fence_init | - fence_release + dma_fence_release | - fence_free + dma_fence_free | - fence_get + dma_fence_get | - fence_get_rcu + dma_fence_get_rcu | - fence_put + dma_fence_put | - fence_signal + dma_fence_signal | - fence_signal_locked + dma_fence_signal_locked | - fence_default_wait + dma_fence_default_wait | - fence_add_callback + dma_fence_add_callback | - fence_remove_callback + dma_fence_remove_callback | - fence_enable_sw_signaling + dma_fence_enable_sw_signaling | - fence_is_signaled_locked + dma_fence_is_signaled_locked | - fence_is_signaled + dma_fence_is_signaled | - fence_is_later + dma_fence_is_later | - fence_later + dma_fence_later | - fence_wait_timeout + dma_fence_wait_timeout | - fence_wait_any_timeout + dma_fence_wait_any_timeout | - fence_wait + dma_fence_wait | - fence_context_alloc + dma_fence_context_alloc | - fence_array_create + dma_fence_array_create | - to_fence_array + to_dma_fence_array | - fence_is_array + dma_fence_is_array | - trace_fence_emit + trace_dma_fence_emit | - FENCE_TRACE + DMA_FENCE_TRACE | - FENCE_WARN + DMA_FENCE_WARN | - FENCE_ERR + DMA_FENCE_ERR ) ( ... ) Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Gustavo Padovan <gustavo.padovan@collabora.co.uk> Acked-by: Sumit Semwal <sumit.semwal@linaro.org> Acked-by: Christian König <christian.koenig@amd.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: http://patchwork.freedesktop.org/patch/msgid/20161025120045.28839-1-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index d8af37a845f4..fd26c4b8d793 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -147,7 +147,7 @@ static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
147 } 147 }
148 list_del_init(&sa_bo->olist); 148 list_del_init(&sa_bo->olist);
149 list_del_init(&sa_bo->flist); 149 list_del_init(&sa_bo->flist);
150 fence_put(sa_bo->fence); 150 dma_fence_put(sa_bo->fence);
151 kfree(sa_bo); 151 kfree(sa_bo);
152} 152}
153 153
@@ -161,7 +161,7 @@ static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager)
161 sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist); 161 sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist);
162 list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) { 162 list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
163 if (sa_bo->fence == NULL || 163 if (sa_bo->fence == NULL ||
164 !fence_is_signaled(sa_bo->fence)) { 164 !dma_fence_is_signaled(sa_bo->fence)) {
165 return; 165 return;
166 } 166 }
167 amdgpu_sa_bo_remove_locked(sa_bo); 167 amdgpu_sa_bo_remove_locked(sa_bo);
@@ -244,7 +244,7 @@ static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager,
244} 244}
245 245
246static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager, 246static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
247 struct fence **fences, 247 struct dma_fence **fences,
248 unsigned *tries) 248 unsigned *tries)
249{ 249{
250 struct amdgpu_sa_bo *best_bo = NULL; 250 struct amdgpu_sa_bo *best_bo = NULL;
@@ -272,7 +272,7 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
272 sa_bo = list_first_entry(&sa_manager->flist[i], 272 sa_bo = list_first_entry(&sa_manager->flist[i],
273 struct amdgpu_sa_bo, flist); 273 struct amdgpu_sa_bo, flist);
274 274
275 if (!fence_is_signaled(sa_bo->fence)) { 275 if (!dma_fence_is_signaled(sa_bo->fence)) {
276 fences[i] = sa_bo->fence; 276 fences[i] = sa_bo->fence;
277 continue; 277 continue;
278 } 278 }
@@ -314,7 +314,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
314 struct amdgpu_sa_bo **sa_bo, 314 struct amdgpu_sa_bo **sa_bo,
315 unsigned size, unsigned align) 315 unsigned size, unsigned align)
316{ 316{
317 struct fence *fences[AMDGPU_SA_NUM_FENCE_LISTS]; 317 struct dma_fence *fences[AMDGPU_SA_NUM_FENCE_LISTS];
318 unsigned tries[AMDGPU_SA_NUM_FENCE_LISTS]; 318 unsigned tries[AMDGPU_SA_NUM_FENCE_LISTS];
319 unsigned count; 319 unsigned count;
320 int i, r; 320 int i, r;
@@ -356,14 +356,14 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
356 356
357 for (i = 0, count = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) 357 for (i = 0, count = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
358 if (fences[i]) 358 if (fences[i])
359 fences[count++] = fence_get(fences[i]); 359 fences[count++] = dma_fence_get(fences[i]);
360 360
361 if (count) { 361 if (count) {
362 spin_unlock(&sa_manager->wq.lock); 362 spin_unlock(&sa_manager->wq.lock);
363 t = fence_wait_any_timeout(fences, count, false, 363 t = dma_fence_wait_any_timeout(fences, count, false,
364 MAX_SCHEDULE_TIMEOUT); 364 MAX_SCHEDULE_TIMEOUT);
365 for (i = 0; i < count; ++i) 365 for (i = 0; i < count; ++i)
366 fence_put(fences[i]); 366 dma_fence_put(fences[i]);
367 367
368 r = (t > 0) ? 0 : t; 368 r = (t > 0) ? 0 : t;
369 spin_lock(&sa_manager->wq.lock); 369 spin_lock(&sa_manager->wq.lock);
@@ -384,7 +384,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
384} 384}
385 385
386void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo, 386void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
387 struct fence *fence) 387 struct dma_fence *fence)
388{ 388{
389 struct amdgpu_sa_manager *sa_manager; 389 struct amdgpu_sa_manager *sa_manager;
390 390
@@ -394,10 +394,10 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
394 394
395 sa_manager = (*sa_bo)->manager; 395 sa_manager = (*sa_bo)->manager;
396 spin_lock(&sa_manager->wq.lock); 396 spin_lock(&sa_manager->wq.lock);
397 if (fence && !fence_is_signaled(fence)) { 397 if (fence && !dma_fence_is_signaled(fence)) {
398 uint32_t idx; 398 uint32_t idx;
399 399
400 (*sa_bo)->fence = fence_get(fence); 400 (*sa_bo)->fence = dma_fence_get(fence);
401 idx = fence->context % AMDGPU_SA_NUM_FENCE_LISTS; 401 idx = fence->context % AMDGPU_SA_NUM_FENCE_LISTS;
402 list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]); 402 list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]);
403 } else { 403 } else {