diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c | 15 |
1 files changed, 10 insertions, 5 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index a4bf21f8f1c1..f3d1a25b660f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c | |||
@@ -35,6 +35,7 @@ | |||
35 | struct amdgpu_sync_entry { | 35 | struct amdgpu_sync_entry { |
36 | struct hlist_node node; | 36 | struct hlist_node node; |
37 | struct dma_fence *fence; | 37 | struct dma_fence *fence; |
38 | bool explicit; | ||
38 | }; | 39 | }; |
39 | 40 | ||
40 | static struct kmem_cache *amdgpu_sync_slab; | 41 | static struct kmem_cache *amdgpu_sync_slab; |
@@ -141,7 +142,7 @@ static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f) | |||
141 | * | 142 | * |
142 | */ | 143 | */ |
143 | int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, | 144 | int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, |
144 | struct dma_fence *f) | 145 | struct dma_fence *f, bool explicit) |
145 | { | 146 | { |
146 | struct amdgpu_sync_entry *e; | 147 | struct amdgpu_sync_entry *e; |
147 | 148 | ||
@@ -159,6 +160,8 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, | |||
159 | if (!e) | 160 | if (!e) |
160 | return -ENOMEM; | 161 | return -ENOMEM; |
161 | 162 | ||
163 | e->explicit = explicit; | ||
164 | |||
162 | hash_add(sync->fences, &e->node, f->context); | 165 | hash_add(sync->fences, &e->node, f->context); |
163 | e->fence = dma_fence_get(f); | 166 | e->fence = dma_fence_get(f); |
164 | return 0; | 167 | return 0; |
@@ -189,7 +192,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, | |||
189 | 192 | ||
190 | /* always sync to the exclusive fence */ | 193 | /* always sync to the exclusive fence */ |
191 | f = reservation_object_get_excl(resv); | 194 | f = reservation_object_get_excl(resv); |
192 | r = amdgpu_sync_fence(adev, sync, f); | 195 | r = amdgpu_sync_fence(adev, sync, f, false); |
193 | 196 | ||
194 | if (explicit_sync) | 197 | if (explicit_sync) |
195 | return r; | 198 | return r; |
@@ -220,7 +223,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, | |||
220 | continue; | 223 | continue; |
221 | } | 224 | } |
222 | 225 | ||
223 | r = amdgpu_sync_fence(adev, sync, f); | 226 | r = amdgpu_sync_fence(adev, sync, f, false); |
224 | if (r) | 227 | if (r) |
225 | break; | 228 | break; |
226 | } | 229 | } |
@@ -275,19 +278,21 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, | |||
275 | * amdgpu_sync_get_fence - get the next fence from the sync object | 278 | * amdgpu_sync_get_fence - get the next fence from the sync object |
276 | * | 279 | * |
277 | * @sync: sync object to use | 280 | * @sync: sync object to use |
281 | * @explicit: true if the next fence is explicit | ||
278 | * | 282 | * |
279 | * Get and removes the next fence from the sync object not signaled yet. | 283 | * Get and removes the next fence from the sync object not signaled yet. |
280 | */ | 284 | */ |
281 | struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync) | 285 | struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync, bool *explicit) |
282 | { | 286 | { |
283 | struct amdgpu_sync_entry *e; | 287 | struct amdgpu_sync_entry *e; |
284 | struct hlist_node *tmp; | 288 | struct hlist_node *tmp; |
285 | struct dma_fence *f; | 289 | struct dma_fence *f; |
286 | int i; | 290 | int i; |
287 | |||
288 | hash_for_each_safe(sync->fences, i, tmp, e, node) { | 291 | hash_for_each_safe(sync->fences, i, tmp, e, node) { |
289 | 292 | ||
290 | f = e->fence; | 293 | f = e->fence; |
294 | if (explicit) | ||
295 | *explicit = e->explicit; | ||
291 | 296 | ||
292 | hash_del(&e->node); | 297 | hash_del(&e->node); |
293 | kmem_cache_free(amdgpu_sync_slab, e); | 298 | kmem_cache_free(amdgpu_sync_slab, e); |