aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c58
1 files changed, 29 insertions, 29 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 3a2e42f4b897..57552c79ec58 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -48,7 +48,7 @@
48 */ 48 */
49 49
50struct amdgpu_fence { 50struct amdgpu_fence {
51 struct fence base; 51 struct dma_fence base;
52 52
53 /* RB, DMA, etc. */ 53 /* RB, DMA, etc. */
54 struct amdgpu_ring *ring; 54 struct amdgpu_ring *ring;
@@ -73,8 +73,8 @@ void amdgpu_fence_slab_fini(void)
73/* 73/*
74 * Cast helper 74 * Cast helper
75 */ 75 */
76static const struct fence_ops amdgpu_fence_ops; 76static const struct dma_fence_ops amdgpu_fence_ops;
77static inline struct amdgpu_fence *to_amdgpu_fence(struct fence *f) 77static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
78{ 78{
79 struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base); 79 struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
80 80
@@ -130,11 +130,11 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
130 * Emits a fence command on the requested ring (all asics). 130 * Emits a fence command on the requested ring (all asics).
131 * Returns 0 on success, -ENOMEM on failure. 131 * Returns 0 on success, -ENOMEM on failure.
132 */ 132 */
133int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f) 133int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f)
134{ 134{
135 struct amdgpu_device *adev = ring->adev; 135 struct amdgpu_device *adev = ring->adev;
136 struct amdgpu_fence *fence; 136 struct amdgpu_fence *fence;
137 struct fence *old, **ptr; 137 struct dma_fence *old, **ptr;
138 uint32_t seq; 138 uint32_t seq;
139 139
140 fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL); 140 fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
@@ -143,10 +143,10 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
143 143
144 seq = ++ring->fence_drv.sync_seq; 144 seq = ++ring->fence_drv.sync_seq;
145 fence->ring = ring; 145 fence->ring = ring;
146 fence_init(&fence->base, &amdgpu_fence_ops, 146 dma_fence_init(&fence->base, &amdgpu_fence_ops,
147 &ring->fence_drv.lock, 147 &ring->fence_drv.lock,
148 adev->fence_context + ring->idx, 148 adev->fence_context + ring->idx,
149 seq); 149 seq);
150 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, 150 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
151 seq, AMDGPU_FENCE_FLAG_INT); 151 seq, AMDGPU_FENCE_FLAG_INT);
152 152
@@ -155,12 +155,12 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
155 * emitting the fence would mess up the hardware ring buffer. 155 * emitting the fence would mess up the hardware ring buffer.
156 */ 156 */
157 old = rcu_dereference_protected(*ptr, 1); 157 old = rcu_dereference_protected(*ptr, 1);
158 if (old && !fence_is_signaled(old)) { 158 if (old && !dma_fence_is_signaled(old)) {
159 DRM_INFO("rcu slot is busy\n"); 159 DRM_INFO("rcu slot is busy\n");
160 fence_wait(old, false); 160 dma_fence_wait(old, false);
161 } 161 }
162 162
163 rcu_assign_pointer(*ptr, fence_get(&fence->base)); 163 rcu_assign_pointer(*ptr, dma_fence_get(&fence->base));
164 164
165 *f = &fence->base; 165 *f = &fence->base;
166 166
@@ -211,7 +211,7 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
211 seq &= drv->num_fences_mask; 211 seq &= drv->num_fences_mask;
212 212
213 do { 213 do {
214 struct fence *fence, **ptr; 214 struct dma_fence *fence, **ptr;
215 215
216 ++last_seq; 216 ++last_seq;
217 last_seq &= drv->num_fences_mask; 217 last_seq &= drv->num_fences_mask;
@@ -224,13 +224,13 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
224 if (!fence) 224 if (!fence)
225 continue; 225 continue;
226 226
227 r = fence_signal(fence); 227 r = dma_fence_signal(fence);
228 if (!r) 228 if (!r)
229 FENCE_TRACE(fence, "signaled from irq context\n"); 229 DMA_FENCE_TRACE(fence, "signaled from irq context\n");
230 else 230 else
231 BUG(); 231 BUG();
232 232
233 fence_put(fence); 233 dma_fence_put(fence);
234 } while (last_seq != seq); 234 } while (last_seq != seq);
235} 235}
236 236
@@ -260,7 +260,7 @@ static void amdgpu_fence_fallback(unsigned long arg)
260int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) 260int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
261{ 261{
262 uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq); 262 uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq);
263 struct fence *fence, **ptr; 263 struct dma_fence *fence, **ptr;
264 int r; 264 int r;
265 265
266 if (!seq) 266 if (!seq)
@@ -269,14 +269,14 @@ int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
269 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; 269 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
270 rcu_read_lock(); 270 rcu_read_lock();
271 fence = rcu_dereference(*ptr); 271 fence = rcu_dereference(*ptr);
272 if (!fence || !fence_get_rcu(fence)) { 272 if (!fence || !dma_fence_get_rcu(fence)) {
273 rcu_read_unlock(); 273 rcu_read_unlock();
274 return 0; 274 return 0;
275 } 275 }
276 rcu_read_unlock(); 276 rcu_read_unlock();
277 277
278 r = fence_wait(fence, false); 278 r = dma_fence_wait(fence, false);
279 fence_put(fence); 279 dma_fence_put(fence);
280 return r; 280 return r;
281} 281}
282 282
@@ -452,7 +452,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
452 amd_sched_fini(&ring->sched); 452 amd_sched_fini(&ring->sched);
453 del_timer_sync(&ring->fence_drv.fallback_timer); 453 del_timer_sync(&ring->fence_drv.fallback_timer);
454 for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) 454 for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
455 fence_put(ring->fence_drv.fences[j]); 455 dma_fence_put(ring->fence_drv.fences[j]);
456 kfree(ring->fence_drv.fences); 456 kfree(ring->fence_drv.fences);
457 ring->fence_drv.fences = NULL; 457 ring->fence_drv.fences = NULL;
458 ring->fence_drv.initialized = false; 458 ring->fence_drv.initialized = false;
@@ -541,12 +541,12 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev)
541 * Common fence implementation 541 * Common fence implementation
542 */ 542 */
543 543
544static const char *amdgpu_fence_get_driver_name(struct fence *fence) 544static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
545{ 545{
546 return "amdgpu"; 546 return "amdgpu";
547} 547}
548 548
549static const char *amdgpu_fence_get_timeline_name(struct fence *f) 549static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
550{ 550{
551 struct amdgpu_fence *fence = to_amdgpu_fence(f); 551 struct amdgpu_fence *fence = to_amdgpu_fence(f);
552 return (const char *)fence->ring->name; 552 return (const char *)fence->ring->name;
@@ -560,7 +560,7 @@ static const char *amdgpu_fence_get_timeline_name(struct fence *f)
560 * to fence_queue that checks if this fence is signaled, and if so it 560 * to fence_queue that checks if this fence is signaled, and if so it
561 * signals the fence and removes itself. 561 * signals the fence and removes itself.
562 */ 562 */
563static bool amdgpu_fence_enable_signaling(struct fence *f) 563static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
564{ 564{
565 struct amdgpu_fence *fence = to_amdgpu_fence(f); 565 struct amdgpu_fence *fence = to_amdgpu_fence(f);
566 struct amdgpu_ring *ring = fence->ring; 566 struct amdgpu_ring *ring = fence->ring;
@@ -568,7 +568,7 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
568 if (!timer_pending(&ring->fence_drv.fallback_timer)) 568 if (!timer_pending(&ring->fence_drv.fallback_timer))
569 amdgpu_fence_schedule_fallback(ring); 569 amdgpu_fence_schedule_fallback(ring);
570 570
571 FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx); 571 DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
572 572
573 return true; 573 return true;
574} 574}
@@ -582,7 +582,7 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
582 */ 582 */
583static void amdgpu_fence_free(struct rcu_head *rcu) 583static void amdgpu_fence_free(struct rcu_head *rcu)
584{ 584{
585 struct fence *f = container_of(rcu, struct fence, rcu); 585 struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
586 struct amdgpu_fence *fence = to_amdgpu_fence(f); 586 struct amdgpu_fence *fence = to_amdgpu_fence(f);
587 kmem_cache_free(amdgpu_fence_slab, fence); 587 kmem_cache_free(amdgpu_fence_slab, fence);
588} 588}
@@ -595,16 +595,16 @@ static void amdgpu_fence_free(struct rcu_head *rcu)
595 * This function is called when the reference count becomes zero. 595 * This function is called when the reference count becomes zero.
596 * It just RCU schedules freeing up the fence. 596 * It just RCU schedules freeing up the fence.
597 */ 597 */
598static void amdgpu_fence_release(struct fence *f) 598static void amdgpu_fence_release(struct dma_fence *f)
599{ 599{
600 call_rcu(&f->rcu, amdgpu_fence_free); 600 call_rcu(&f->rcu, amdgpu_fence_free);
601} 601}
602 602
603static const struct fence_ops amdgpu_fence_ops = { 603static const struct dma_fence_ops amdgpu_fence_ops = {
604 .get_driver_name = amdgpu_fence_get_driver_name, 604 .get_driver_name = amdgpu_fence_get_driver_name,
605 .get_timeline_name = amdgpu_fence_get_timeline_name, 605 .get_timeline_name = amdgpu_fence_get_timeline_name,
606 .enable_signaling = amdgpu_fence_enable_signaling, 606 .enable_signaling = amdgpu_fence_enable_signaling,
607 .wait = fence_default_wait, 607 .wait = dma_fence_default_wait,
608 .release = amdgpu_fence_release, 608 .release = amdgpu_fence_release,
609}; 609};
610 610