aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2016-10-25 08:00:45 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2016-10-25 08:40:39 -0400
commitf54d1867005c3323f5d8ad83eed823e84226c429 (patch)
tree026c3f57bc546d3a0205389d0f8e0d02ce8a76ac /drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
parent0fc4f78f44e6c6148cee32456f0d0023ec1c1fd8 (diff)
dma-buf: Rename struct fence to dma_fence
I plan to usurp the short name of struct fence for a core kernel struct, and so I need to rename the specialised fence/timeline for DMA operations to make room. A consensus was reached in https://lists.freedesktop.org/archives/dri-devel/2016-July/113083.html that making clear this fence applies to DMA operations was a good thing. Since then the patch has grown a bit as usage increases, so hopefully it remains a good thing! (v2...: rebase, rerun spatch) v3: Compile on msm, spotted a manual fixup that I broke. v4: Try again for msm, sorry Daniel coccinelle script: @@ @@ - struct fence + struct dma_fence @@ @@ - struct fence_ops + struct dma_fence_ops @@ @@ - struct fence_cb + struct dma_fence_cb @@ @@ - struct fence_array + struct dma_fence_array @@ @@ - enum fence_flag_bits + enum dma_fence_flag_bits @@ @@ ( - fence_init + dma_fence_init | - fence_release + dma_fence_release | - fence_free + dma_fence_free | - fence_get + dma_fence_get | - fence_get_rcu + dma_fence_get_rcu | - fence_put + dma_fence_put | - fence_signal + dma_fence_signal | - fence_signal_locked + dma_fence_signal_locked | - fence_default_wait + dma_fence_default_wait | - fence_add_callback + dma_fence_add_callback | - fence_remove_callback + dma_fence_remove_callback | - fence_enable_sw_signaling + dma_fence_enable_sw_signaling | - fence_is_signaled_locked + dma_fence_is_signaled_locked | - fence_is_signaled + dma_fence_is_signaled | - fence_is_later + dma_fence_is_later | - fence_later + dma_fence_later | - fence_wait_timeout + dma_fence_wait_timeout | - fence_wait_any_timeout + dma_fence_wait_any_timeout | - fence_wait + dma_fence_wait | - fence_context_alloc + dma_fence_context_alloc | - fence_array_create + dma_fence_array_create | - to_fence_array + to_dma_fence_array | - fence_is_array + dma_fence_is_array | - trace_fence_emit + trace_dma_fence_emit | - FENCE_TRACE + DMA_FENCE_TRACE | - FENCE_WARN + DMA_FENCE_WARN | - FENCE_ERR + DMA_FENCE_ERR ) ( ... ) Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Gustavo Padovan <gustavo.padovan@collabora.co.uk> Acked-by: Sumit Semwal <sumit.semwal@linaro.org> Acked-by: Christian König <christian.koenig@amd.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: http://patchwork.freedesktop.org/patch/msgid/20161025120045.28839-1-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c48
1 files changed, 25 insertions, 23 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 5c8d3022fb87..ed814e6d0207 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -34,7 +34,7 @@
34 34
35struct amdgpu_sync_entry { 35struct amdgpu_sync_entry {
36 struct hlist_node node; 36 struct hlist_node node;
37 struct fence *fence; 37 struct dma_fence *fence;
38}; 38};
39 39
40static struct kmem_cache *amdgpu_sync_slab; 40static struct kmem_cache *amdgpu_sync_slab;
@@ -60,7 +60,8 @@ void amdgpu_sync_create(struct amdgpu_sync *sync)
60 * 60 *
61 * Test if the fence was issued by us. 61 * Test if the fence was issued by us.
62 */ 62 */
63static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f) 63static bool amdgpu_sync_same_dev(struct amdgpu_device *adev,
64 struct dma_fence *f)
64{ 65{
65 struct amd_sched_fence *s_fence = to_amd_sched_fence(f); 66 struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
66 67
@@ -81,7 +82,7 @@ static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
81 * 82 *
82 * Extract who originally created the fence. 83 * Extract who originally created the fence.
83 */ 84 */
84static void *amdgpu_sync_get_owner(struct fence *f) 85static void *amdgpu_sync_get_owner(struct dma_fence *f)
85{ 86{
86 struct amd_sched_fence *s_fence = to_amd_sched_fence(f); 87 struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
87 88
@@ -99,13 +100,14 @@ static void *amdgpu_sync_get_owner(struct fence *f)
99 * 100 *
100 * Either keep the existing fence or the new one, depending which one is later. 101 * Either keep the existing fence or the new one, depending which one is later.
101 */ 102 */
102static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence) 103static void amdgpu_sync_keep_later(struct dma_fence **keep,
104 struct dma_fence *fence)
103{ 105{
104 if (*keep && fence_is_later(*keep, fence)) 106 if (*keep && dma_fence_is_later(*keep, fence))
105 return; 107 return;
106 108
107 fence_put(*keep); 109 dma_fence_put(*keep);
108 *keep = fence_get(fence); 110 *keep = dma_fence_get(fence);
109} 111}
110 112
111/** 113/**
@@ -117,7 +119,7 @@ static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence)
117 * Tries to add the fence to an existing hash entry. Returns true when an entry 119 * Tries to add the fence to an existing hash entry. Returns true when an entry
118 * was found, false otherwise. 120 * was found, false otherwise.
119 */ 121 */
120static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct fence *f) 122static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f)
121{ 123{
122 struct amdgpu_sync_entry *e; 124 struct amdgpu_sync_entry *e;
123 125
@@ -139,7 +141,7 @@ static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct fence *f)
139 * 141 *
140 */ 142 */
141int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, 143int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
142 struct fence *f) 144 struct dma_fence *f)
143{ 145{
144 struct amdgpu_sync_entry *e; 146 struct amdgpu_sync_entry *e;
145 147
@@ -158,7 +160,7 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
158 return -ENOMEM; 160 return -ENOMEM;
159 161
160 hash_add(sync->fences, &e->node, f->context); 162 hash_add(sync->fences, &e->node, f->context);
161 e->fence = fence_get(f); 163 e->fence = dma_fence_get(f);
162 return 0; 164 return 0;
163} 165}
164 166
@@ -177,7 +179,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
177 void *owner) 179 void *owner)
178{ 180{
179 struct reservation_object_list *flist; 181 struct reservation_object_list *flist;
180 struct fence *f; 182 struct dma_fence *f;
181 void *fence_owner; 183 void *fence_owner;
182 unsigned i; 184 unsigned i;
183 int r = 0; 185 int r = 0;
@@ -231,15 +233,15 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
231 * Returns the next fence not signaled yet without removing it from the sync 233 * Returns the next fence not signaled yet without removing it from the sync
232 * object. 234 * object.
233 */ 235 */
234struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, 236struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
235 struct amdgpu_ring *ring) 237 struct amdgpu_ring *ring)
236{ 238{
237 struct amdgpu_sync_entry *e; 239 struct amdgpu_sync_entry *e;
238 struct hlist_node *tmp; 240 struct hlist_node *tmp;
239 int i; 241 int i;
240 242
241 hash_for_each_safe(sync->fences, i, tmp, e, node) { 243 hash_for_each_safe(sync->fences, i, tmp, e, node) {
242 struct fence *f = e->fence; 244 struct dma_fence *f = e->fence;
243 struct amd_sched_fence *s_fence = to_amd_sched_fence(f); 245 struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
244 246
245 if (ring && s_fence) { 247 if (ring && s_fence) {
@@ -247,16 +249,16 @@ struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
247 * when they are scheduled. 249 * when they are scheduled.
248 */ 250 */
249 if (s_fence->sched == &ring->sched) { 251 if (s_fence->sched == &ring->sched) {
250 if (fence_is_signaled(&s_fence->scheduled)) 252 if (dma_fence_is_signaled(&s_fence->scheduled))
251 continue; 253 continue;
252 254
253 return &s_fence->scheduled; 255 return &s_fence->scheduled;
254 } 256 }
255 } 257 }
256 258
257 if (fence_is_signaled(f)) { 259 if (dma_fence_is_signaled(f)) {
258 hash_del(&e->node); 260 hash_del(&e->node);
259 fence_put(f); 261 dma_fence_put(f);
260 kmem_cache_free(amdgpu_sync_slab, e); 262 kmem_cache_free(amdgpu_sync_slab, e);
261 continue; 263 continue;
262 } 264 }
@@ -274,11 +276,11 @@ struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
274 * 276 *
275 * Get and removes the next fence from the sync object not signaled yet. 277 * Get and removes the next fence from the sync object not signaled yet.
276 */ 278 */
277struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync) 279struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
278{ 280{
279 struct amdgpu_sync_entry *e; 281 struct amdgpu_sync_entry *e;
280 struct hlist_node *tmp; 282 struct hlist_node *tmp;
281 struct fence *f; 283 struct dma_fence *f;
282 int i; 284 int i;
283 285
284 hash_for_each_safe(sync->fences, i, tmp, e, node) { 286 hash_for_each_safe(sync->fences, i, tmp, e, node) {
@@ -288,10 +290,10 @@ struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
288 hash_del(&e->node); 290 hash_del(&e->node);
289 kmem_cache_free(amdgpu_sync_slab, e); 291 kmem_cache_free(amdgpu_sync_slab, e);
290 292
291 if (!fence_is_signaled(f)) 293 if (!dma_fence_is_signaled(f))
292 return f; 294 return f;
293 295
294 fence_put(f); 296 dma_fence_put(f);
295 } 297 }
296 return NULL; 298 return NULL;
297} 299}
@@ -311,11 +313,11 @@ void amdgpu_sync_free(struct amdgpu_sync *sync)
311 313
312 hash_for_each_safe(sync->fences, i, tmp, e, node) { 314 hash_for_each_safe(sync->fences, i, tmp, e, node) {
313 hash_del(&e->node); 315 hash_del(&e->node);
314 fence_put(e->fence); 316 dma_fence_put(e->fence);
315 kmem_cache_free(amdgpu_sync_slab, e); 317 kmem_cache_free(amdgpu_sync_slab, e);
316 } 318 }
317 319
318 fence_put(sync->last_vm_update); 320 dma_fence_put(sync->last_vm_update);
319} 321}
320 322
321/** 323/**