aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2016-10-25 08:00:45 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2016-10-25 08:40:39 -0400
commitf54d1867005c3323f5d8ad83eed823e84226c429 (patch)
tree026c3f57bc546d3a0205389d0f8e0d02ce8a76ac /drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
parent0fc4f78f44e6c6148cee32456f0d0023ec1c1fd8 (diff)
dma-buf: Rename struct fence to dma_fence
I plan to usurp the short name of struct fence for a core kernel struct, and so I need to rename the specialised fence/timeline for DMA operations to make room. A consensus was reached in https://lists.freedesktop.org/archives/dri-devel/2016-July/113083.html that making clear this fence applies to DMA operations was a good thing. Since then the patch has grown a bit as usage increases, so hopefully it remains a good thing! (v2...: rebase, rerun spatch) v3: Compile on msm, spotted a manual fixup that I broke. v4: Try again for msm, sorry Daniel coccinelle script: @@ @@ - struct fence + struct dma_fence @@ @@ - struct fence_ops + struct dma_fence_ops @@ @@ - struct fence_cb + struct dma_fence_cb @@ @@ - struct fence_array + struct dma_fence_array @@ @@ - enum fence_flag_bits + enum dma_fence_flag_bits @@ @@ ( - fence_init + dma_fence_init | - fence_release + dma_fence_release | - fence_free + dma_fence_free | - fence_get + dma_fence_get | - fence_get_rcu + dma_fence_get_rcu | - fence_put + dma_fence_put | - fence_signal + dma_fence_signal | - fence_signal_locked + dma_fence_signal_locked | - fence_default_wait + dma_fence_default_wait | - fence_add_callback + dma_fence_add_callback | - fence_remove_callback + dma_fence_remove_callback | - fence_enable_sw_signaling + dma_fence_enable_sw_signaling | - fence_is_signaled_locked + dma_fence_is_signaled_locked | - fence_is_signaled + dma_fence_is_signaled | - fence_is_later + dma_fence_is_later | - fence_later + dma_fence_later | - fence_wait_timeout + dma_fence_wait_timeout | - fence_wait_any_timeout + dma_fence_wait_any_timeout | - fence_wait + dma_fence_wait | - fence_context_alloc + dma_fence_context_alloc | - fence_array_create + dma_fence_array_create | - to_fence_array + to_dma_fence_array | - fence_is_array + dma_fence_is_array | - trace_fence_emit + trace_dma_fence_emit | - FENCE_TRACE + DMA_FENCE_TRACE | - FENCE_WARN + DMA_FENCE_WARN | - FENCE_ERR + DMA_FENCE_ERR ) ( ... ) Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Gustavo Padovan <gustavo.padovan@collabora.co.uk> Acked-by: Sumit Semwal <sumit.semwal@linaro.org> Acked-by: Christian König <christian.koenig@amd.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: http://patchwork.freedesktop.org/patch/msgid/20161025120045.28839-1-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c58
1 files changed, 29 insertions, 29 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 3a2e42f4b897..57552c79ec58 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -48,7 +48,7 @@
48 */ 48 */
49 49
50struct amdgpu_fence { 50struct amdgpu_fence {
51 struct fence base; 51 struct dma_fence base;
52 52
53 /* RB, DMA, etc. */ 53 /* RB, DMA, etc. */
54 struct amdgpu_ring *ring; 54 struct amdgpu_ring *ring;
@@ -73,8 +73,8 @@ void amdgpu_fence_slab_fini(void)
73/* 73/*
74 * Cast helper 74 * Cast helper
75 */ 75 */
76static const struct fence_ops amdgpu_fence_ops; 76static const struct dma_fence_ops amdgpu_fence_ops;
77static inline struct amdgpu_fence *to_amdgpu_fence(struct fence *f) 77static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
78{ 78{
79 struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base); 79 struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
80 80
@@ -130,11 +130,11 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
130 * Emits a fence command on the requested ring (all asics). 130 * Emits a fence command on the requested ring (all asics).
131 * Returns 0 on success, -ENOMEM on failure. 131 * Returns 0 on success, -ENOMEM on failure.
132 */ 132 */
133int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f) 133int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f)
134{ 134{
135 struct amdgpu_device *adev = ring->adev; 135 struct amdgpu_device *adev = ring->adev;
136 struct amdgpu_fence *fence; 136 struct amdgpu_fence *fence;
137 struct fence *old, **ptr; 137 struct dma_fence *old, **ptr;
138 uint32_t seq; 138 uint32_t seq;
139 139
140 fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL); 140 fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
@@ -143,10 +143,10 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
143 143
144 seq = ++ring->fence_drv.sync_seq; 144 seq = ++ring->fence_drv.sync_seq;
145 fence->ring = ring; 145 fence->ring = ring;
146 fence_init(&fence->base, &amdgpu_fence_ops, 146 dma_fence_init(&fence->base, &amdgpu_fence_ops,
147 &ring->fence_drv.lock, 147 &ring->fence_drv.lock,
148 adev->fence_context + ring->idx, 148 adev->fence_context + ring->idx,
149 seq); 149 seq);
150 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, 150 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
151 seq, AMDGPU_FENCE_FLAG_INT); 151 seq, AMDGPU_FENCE_FLAG_INT);
152 152
@@ -155,12 +155,12 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
155 * emitting the fence would mess up the hardware ring buffer. 155 * emitting the fence would mess up the hardware ring buffer.
156 */ 156 */
157 old = rcu_dereference_protected(*ptr, 1); 157 old = rcu_dereference_protected(*ptr, 1);
158 if (old && !fence_is_signaled(old)) { 158 if (old && !dma_fence_is_signaled(old)) {
159 DRM_INFO("rcu slot is busy\n"); 159 DRM_INFO("rcu slot is busy\n");
160 fence_wait(old, false); 160 dma_fence_wait(old, false);
161 } 161 }
162 162
163 rcu_assign_pointer(*ptr, fence_get(&fence->base)); 163 rcu_assign_pointer(*ptr, dma_fence_get(&fence->base));
164 164
165 *f = &fence->base; 165 *f = &fence->base;
166 166
@@ -211,7 +211,7 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
211 seq &= drv->num_fences_mask; 211 seq &= drv->num_fences_mask;
212 212
213 do { 213 do {
214 struct fence *fence, **ptr; 214 struct dma_fence *fence, **ptr;
215 215
216 ++last_seq; 216 ++last_seq;
217 last_seq &= drv->num_fences_mask; 217 last_seq &= drv->num_fences_mask;
@@ -224,13 +224,13 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
224 if (!fence) 224 if (!fence)
225 continue; 225 continue;
226 226
227 r = fence_signal(fence); 227 r = dma_fence_signal(fence);
228 if (!r) 228 if (!r)
229 FENCE_TRACE(fence, "signaled from irq context\n"); 229 DMA_FENCE_TRACE(fence, "signaled from irq context\n");
230 else 230 else
231 BUG(); 231 BUG();
232 232
233 fence_put(fence); 233 dma_fence_put(fence);
234 } while (last_seq != seq); 234 } while (last_seq != seq);
235} 235}
236 236
@@ -260,7 +260,7 @@ static void amdgpu_fence_fallback(unsigned long arg)
260int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) 260int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
261{ 261{
262 uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq); 262 uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq);
263 struct fence *fence, **ptr; 263 struct dma_fence *fence, **ptr;
264 int r; 264 int r;
265 265
266 if (!seq) 266 if (!seq)
@@ -269,14 +269,14 @@ int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
269 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; 269 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
270 rcu_read_lock(); 270 rcu_read_lock();
271 fence = rcu_dereference(*ptr); 271 fence = rcu_dereference(*ptr);
272 if (!fence || !fence_get_rcu(fence)) { 272 if (!fence || !dma_fence_get_rcu(fence)) {
273 rcu_read_unlock(); 273 rcu_read_unlock();
274 return 0; 274 return 0;
275 } 275 }
276 rcu_read_unlock(); 276 rcu_read_unlock();
277 277
278 r = fence_wait(fence, false); 278 r = dma_fence_wait(fence, false);
279 fence_put(fence); 279 dma_fence_put(fence);
280 return r; 280 return r;
281} 281}
282 282
@@ -452,7 +452,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
452 amd_sched_fini(&ring->sched); 452 amd_sched_fini(&ring->sched);
453 del_timer_sync(&ring->fence_drv.fallback_timer); 453 del_timer_sync(&ring->fence_drv.fallback_timer);
454 for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) 454 for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
455 fence_put(ring->fence_drv.fences[j]); 455 dma_fence_put(ring->fence_drv.fences[j]);
456 kfree(ring->fence_drv.fences); 456 kfree(ring->fence_drv.fences);
457 ring->fence_drv.fences = NULL; 457 ring->fence_drv.fences = NULL;
458 ring->fence_drv.initialized = false; 458 ring->fence_drv.initialized = false;
@@ -541,12 +541,12 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev)
541 * Common fence implementation 541 * Common fence implementation
542 */ 542 */
543 543
544static const char *amdgpu_fence_get_driver_name(struct fence *fence) 544static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
545{ 545{
546 return "amdgpu"; 546 return "amdgpu";
547} 547}
548 548
549static const char *amdgpu_fence_get_timeline_name(struct fence *f) 549static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
550{ 550{
551 struct amdgpu_fence *fence = to_amdgpu_fence(f); 551 struct amdgpu_fence *fence = to_amdgpu_fence(f);
552 return (const char *)fence->ring->name; 552 return (const char *)fence->ring->name;
@@ -560,7 +560,7 @@ static const char *amdgpu_fence_get_timeline_name(struct fence *f)
560 * to fence_queue that checks if this fence is signaled, and if so it 560 * to fence_queue that checks if this fence is signaled, and if so it
561 * signals the fence and removes itself. 561 * signals the fence and removes itself.
562 */ 562 */
563static bool amdgpu_fence_enable_signaling(struct fence *f) 563static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
564{ 564{
565 struct amdgpu_fence *fence = to_amdgpu_fence(f); 565 struct amdgpu_fence *fence = to_amdgpu_fence(f);
566 struct amdgpu_ring *ring = fence->ring; 566 struct amdgpu_ring *ring = fence->ring;
@@ -568,7 +568,7 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
568 if (!timer_pending(&ring->fence_drv.fallback_timer)) 568 if (!timer_pending(&ring->fence_drv.fallback_timer))
569 amdgpu_fence_schedule_fallback(ring); 569 amdgpu_fence_schedule_fallback(ring);
570 570
571 FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx); 571 DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
572 572
573 return true; 573 return true;
574} 574}
@@ -582,7 +582,7 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
582 */ 582 */
583static void amdgpu_fence_free(struct rcu_head *rcu) 583static void amdgpu_fence_free(struct rcu_head *rcu)
584{ 584{
585 struct fence *f = container_of(rcu, struct fence, rcu); 585 struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
586 struct amdgpu_fence *fence = to_amdgpu_fence(f); 586 struct amdgpu_fence *fence = to_amdgpu_fence(f);
587 kmem_cache_free(amdgpu_fence_slab, fence); 587 kmem_cache_free(amdgpu_fence_slab, fence);
588} 588}
@@ -595,16 +595,16 @@ static void amdgpu_fence_free(struct rcu_head *rcu)
595 * This function is called when the reference count becomes zero. 595 * This function is called when the reference count becomes zero.
596 * It just RCU schedules freeing up the fence. 596 * It just RCU schedules freeing up the fence.
597 */ 597 */
598static void amdgpu_fence_release(struct fence *f) 598static void amdgpu_fence_release(struct dma_fence *f)
599{ 599{
600 call_rcu(&f->rcu, amdgpu_fence_free); 600 call_rcu(&f->rcu, amdgpu_fence_free);
601} 601}
602 602
603static const struct fence_ops amdgpu_fence_ops = { 603static const struct dma_fence_ops amdgpu_fence_ops = {
604 .get_driver_name = amdgpu_fence_get_driver_name, 604 .get_driver_name = amdgpu_fence_get_driver_name,
605 .get_timeline_name = amdgpu_fence_get_timeline_name, 605 .get_timeline_name = amdgpu_fence_get_timeline_name,
606 .enable_signaling = amdgpu_fence_enable_signaling, 606 .enable_signaling = amdgpu_fence_enable_signaling,
607 .wait = fence_default_wait, 607 .wait = dma_fence_default_wait,
608 .release = amdgpu_fence_release, 608 .release = amdgpu_fence_release,
609}; 609};
610 610