diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c | 22 |
1 files changed, 12 insertions, 10 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index a4bf21f8f1c1..ebe1ffbab0c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c | |||
@@ -35,6 +35,7 @@ | |||
35 | struct amdgpu_sync_entry { | 35 | struct amdgpu_sync_entry { |
36 | struct hlist_node node; | 36 | struct hlist_node node; |
37 | struct dma_fence *fence; | 37 | struct dma_fence *fence; |
38 | bool explicit; | ||
38 | }; | 39 | }; |
39 | 40 | ||
40 | static struct kmem_cache *amdgpu_sync_slab; | 41 | static struct kmem_cache *amdgpu_sync_slab; |
@@ -141,7 +142,7 @@ static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f) | |||
141 | * | 142 | * |
142 | */ | 143 | */ |
143 | int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, | 144 | int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, |
144 | struct dma_fence *f) | 145 | struct dma_fence *f, bool explicit) |
145 | { | 146 | { |
146 | struct amdgpu_sync_entry *e; | 147 | struct amdgpu_sync_entry *e; |
147 | 148 | ||
@@ -159,6 +160,8 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, | |||
159 | if (!e) | 160 | if (!e) |
160 | return -ENOMEM; | 161 | return -ENOMEM; |
161 | 162 | ||
163 | e->explicit = explicit; | ||
164 | |||
162 | hash_add(sync->fences, &e->node, f->context); | 165 | hash_add(sync->fences, &e->node, f->context); |
163 | e->fence = dma_fence_get(f); | 166 | e->fence = dma_fence_get(f); |
164 | return 0; | 167 | return 0; |
@@ -189,10 +192,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, | |||
189 | 192 | ||
190 | /* always sync to the exclusive fence */ | 193 | /* always sync to the exclusive fence */ |
191 | f = reservation_object_get_excl(resv); | 194 | f = reservation_object_get_excl(resv); |
192 | r = amdgpu_sync_fence(adev, sync, f); | 195 | r = amdgpu_sync_fence(adev, sync, f, false); |
193 | |||
194 | if (explicit_sync) | ||
195 | return r; | ||
196 | 196 | ||
197 | flist = reservation_object_get_list(resv); | 197 | flist = reservation_object_get_list(resv); |
198 | if (!flist || r) | 198 | if (!flist || r) |
@@ -212,15 +212,15 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, | |||
212 | (fence_owner == AMDGPU_FENCE_OWNER_VM))) | 212 | (fence_owner == AMDGPU_FENCE_OWNER_VM))) |
213 | continue; | 213 | continue; |
214 | 214 | ||
215 | /* Ignore fence from the same owner as | 215 | /* Ignore fence from the same owner and explicit one as |
216 | * long as it isn't undefined. | 216 | * long as it isn't undefined. |
217 | */ | 217 | */ |
218 | if (owner != AMDGPU_FENCE_OWNER_UNDEFINED && | 218 | if (owner != AMDGPU_FENCE_OWNER_UNDEFINED && |
219 | fence_owner == owner) | 219 | (fence_owner == owner || explicit_sync)) |
220 | continue; | 220 | continue; |
221 | } | 221 | } |
222 | 222 | ||
223 | r = amdgpu_sync_fence(adev, sync, f); | 223 | r = amdgpu_sync_fence(adev, sync, f, false); |
224 | if (r) | 224 | if (r) |
225 | break; | 225 | break; |
226 | } | 226 | } |
@@ -275,19 +275,21 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, | |||
275 | * amdgpu_sync_get_fence - get the next fence from the sync object | 275 | * amdgpu_sync_get_fence - get the next fence from the sync object |
276 | * | 276 | * |
277 | * @sync: sync object to use | 277 | * @sync: sync object to use |
278 | * @explicit: true if the next fence is explicit | ||
278 | * | 279 | * |
279 | * Get and removes the next fence from the sync object not signaled yet. | 280 | * Get and removes the next fence from the sync object not signaled yet. |
280 | */ | 281 | */ |
281 | struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync) | 282 | struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync, bool *explicit) |
282 | { | 283 | { |
283 | struct amdgpu_sync_entry *e; | 284 | struct amdgpu_sync_entry *e; |
284 | struct hlist_node *tmp; | 285 | struct hlist_node *tmp; |
285 | struct dma_fence *f; | 286 | struct dma_fence *f; |
286 | int i; | 287 | int i; |
287 | |||
288 | hash_for_each_safe(sync->fences, i, tmp, e, node) { | 288 | hash_for_each_safe(sync->fences, i, tmp, e, node) { |
289 | 289 | ||
290 | f = e->fence; | 290 | f = e->fence; |
291 | if (explicit) | ||
292 | *explicit = e->explicit; | ||
291 | 293 | ||
292 | hash_del(&e->node); | 294 | hash_del(&e->node); |
293 | kmem_cache_free(amdgpu_sync_slab, e); | 295 | kmem_cache_free(amdgpu_sync_slab, e); |