diff options
Diffstat (limited to 'drivers/gpu/drm/radeon')
-rw-r--r-- | drivers/gpu/drm/radeon/radeon.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_cs.c | 32 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_gart.c | 24 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_gem.c | 13 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_object.c | 6 |
5 files changed, 55 insertions, 21 deletions
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 583e0f19e1c7..b237a29142d1 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -300,6 +300,7 @@ struct radeon_bo_va { | |||
300 | uint64_t soffset; | 300 | uint64_t soffset; |
301 | uint64_t eoffset; | 301 | uint64_t eoffset; |
302 | uint32_t flags; | 302 | uint32_t flags; |
303 | struct radeon_fence *fence; | ||
303 | bool valid; | 304 | bool valid; |
304 | }; | 305 | }; |
305 | 306 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 8a4c49ef0cc4..b4a0db24f4dd 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c | |||
@@ -278,6 +278,30 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) | |||
278 | return 0; | 278 | return 0; |
279 | } | 279 | } |
280 | 280 | ||
281 | static void radeon_bo_vm_fence_va(struct radeon_cs_parser *parser, | ||
282 | struct radeon_fence *fence) | ||
283 | { | ||
284 | struct radeon_fpriv *fpriv = parser->filp->driver_priv; | ||
285 | struct radeon_vm *vm = &fpriv->vm; | ||
286 | struct radeon_bo_list *lobj; | ||
287 | |||
288 | if (parser->chunk_ib_idx == -1) { | ||
289 | return; | ||
290 | } | ||
291 | if ((parser->cs_flags & RADEON_CS_USE_VM) == 0) { | ||
292 | return; | ||
293 | } | ||
294 | |||
295 | list_for_each_entry(lobj, &parser->validated, tv.head) { | ||
296 | struct radeon_bo_va *bo_va; | ||
297 | struct radeon_bo *rbo = lobj->bo; | ||
298 | |||
299 | bo_va = radeon_bo_va(rbo, vm); | ||
300 | radeon_fence_unref(&bo_va->fence); | ||
301 | bo_va->fence = radeon_fence_ref(fence); | ||
302 | } | ||
303 | } | ||
304 | |||
281 | /** | 305 | /** |
282 | * cs_parser_fini() - clean parser states | 306 | * cs_parser_fini() - clean parser states |
283 | * @parser: parser structure holding parsing context. | 307 | * @parser: parser structure holding parsing context. |
@@ -290,11 +314,14 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error) | |||
290 | { | 314 | { |
291 | unsigned i; | 315 | unsigned i; |
292 | 316 | ||
293 | if (!error) | 317 | if (!error) { |
318 | /* fence all bo va before ttm_eu_fence_buffer_objects so bo are still reserved */ | ||
319 | radeon_bo_vm_fence_va(parser, parser->ib.fence); | ||
294 | ttm_eu_fence_buffer_objects(&parser->validated, | 320 | ttm_eu_fence_buffer_objects(&parser->validated, |
295 | parser->ib.fence); | 321 | parser->ib.fence); |
296 | else | 322 | } else { |
297 | ttm_eu_backoff_reservation(&parser->validated); | 323 | ttm_eu_backoff_reservation(&parser->validated); |
324 | } | ||
298 | 325 | ||
299 | if (parser->relocs != NULL) { | 326 | if (parser->relocs != NULL) { |
300 | for (i = 0; i < parser->nrelocs; i++) { | 327 | for (i = 0; i < parser->nrelocs; i++) { |
@@ -388,7 +415,6 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, | |||
388 | 415 | ||
389 | if (parser->chunk_ib_idx == -1) | 416 | if (parser->chunk_ib_idx == -1) |
390 | return 0; | 417 | return 0; |
391 | |||
392 | if ((parser->cs_flags & RADEON_CS_USE_VM) == 0) | 418 | if ((parser->cs_flags & RADEON_CS_USE_VM) == 0) |
393 | return 0; | 419 | return 0; |
394 | 420 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index b3720054614d..9912182682ec 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c | |||
@@ -814,7 +814,7 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev, | |||
814 | return -EINVAL; | 814 | return -EINVAL; |
815 | } | 815 | } |
816 | 816 | ||
817 | if (bo_va->valid) | 817 | if (bo_va->valid && mem) |
818 | return 0; | 818 | return 0; |
819 | 819 | ||
820 | ngpu_pages = radeon_bo_ngpu_pages(bo); | 820 | ngpu_pages = radeon_bo_ngpu_pages(bo); |
@@ -859,11 +859,27 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev, | |||
859 | struct radeon_bo *bo) | 859 | struct radeon_bo *bo) |
860 | { | 860 | { |
861 | struct radeon_bo_va *bo_va; | 861 | struct radeon_bo_va *bo_va; |
862 | int r; | ||
862 | 863 | ||
863 | bo_va = radeon_bo_va(bo, vm); | 864 | bo_va = radeon_bo_va(bo, vm); |
864 | if (bo_va == NULL) | 865 | if (bo_va == NULL) |
865 | return 0; | 866 | return 0; |
866 | 867 | ||
868 | /* wait for va use to end */ | ||
869 | while (bo_va->fence) { | ||
870 | r = radeon_fence_wait(bo_va->fence, false); | ||
871 | if (r) { | ||
872 | DRM_ERROR("error while waiting for fence: %d\n", r); | ||
873 | } | ||
874 | if (r == -EDEADLK) { | ||
875 | r = radeon_gpu_reset(rdev); | ||
876 | if (!r) | ||
877 | continue; | ||
878 | } | ||
879 | break; | ||
880 | } | ||
881 | radeon_fence_unref(&bo_va->fence); | ||
882 | |||
867 | mutex_lock(&rdev->vm_manager.lock); | 883 | mutex_lock(&rdev->vm_manager.lock); |
868 | mutex_lock(&vm->mutex); | 884 | mutex_lock(&vm->mutex); |
869 | radeon_vm_bo_update_pte(rdev, vm, bo, NULL); | 885 | radeon_vm_bo_update_pte(rdev, vm, bo, NULL); |
@@ -952,12 +968,15 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) | |||
952 | radeon_vm_unbind_locked(rdev, vm); | 968 | radeon_vm_unbind_locked(rdev, vm); |
953 | mutex_unlock(&rdev->vm_manager.lock); | 969 | mutex_unlock(&rdev->vm_manager.lock); |
954 | 970 | ||
955 | /* remove all bo */ | 971 | /* remove all bo at this point non are busy any more because unbind |
972 | * waited for the last vm fence to signal | ||
973 | */ | ||
956 | r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); | 974 | r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); |
957 | if (!r) { | 975 | if (!r) { |
958 | bo_va = radeon_bo_va(rdev->ring_tmp_bo.bo, vm); | 976 | bo_va = radeon_bo_va(rdev->ring_tmp_bo.bo, vm); |
959 | list_del_init(&bo_va->bo_list); | 977 | list_del_init(&bo_va->bo_list); |
960 | list_del_init(&bo_va->vm_list); | 978 | list_del_init(&bo_va->vm_list); |
979 | radeon_fence_unref(&bo_va->fence); | ||
961 | radeon_bo_unreserve(rdev->ring_tmp_bo.bo); | 980 | radeon_bo_unreserve(rdev->ring_tmp_bo.bo); |
962 | kfree(bo_va); | 981 | kfree(bo_va); |
963 | } | 982 | } |
@@ -969,6 +988,7 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) | |||
969 | r = radeon_bo_reserve(bo_va->bo, false); | 988 | r = radeon_bo_reserve(bo_va->bo, false); |
970 | if (!r) { | 989 | if (!r) { |
971 | list_del_init(&bo_va->bo_list); | 990 | list_del_init(&bo_va->bo_list); |
991 | radeon_fence_unref(&bo_va->fence); | ||
972 | radeon_bo_unreserve(bo_va->bo); | 992 | radeon_bo_unreserve(bo_va->bo); |
973 | kfree(bo_va); | 993 | kfree(bo_va); |
974 | } | 994 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 84d045245739..1b57b0058ad6 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c | |||
@@ -134,25 +134,16 @@ void radeon_gem_object_close(struct drm_gem_object *obj, | |||
134 | struct radeon_device *rdev = rbo->rdev; | 134 | struct radeon_device *rdev = rbo->rdev; |
135 | struct radeon_fpriv *fpriv = file_priv->driver_priv; | 135 | struct radeon_fpriv *fpriv = file_priv->driver_priv; |
136 | struct radeon_vm *vm = &fpriv->vm; | 136 | struct radeon_vm *vm = &fpriv->vm; |
137 | struct radeon_bo_va *bo_va, *tmp; | ||
138 | 137 | ||
139 | if (rdev->family < CHIP_CAYMAN) { | 138 | if (rdev->family < CHIP_CAYMAN) { |
140 | return; | 139 | return; |
141 | } | 140 | } |
142 | 141 | ||
143 | if (radeon_bo_reserve(rbo, false)) { | 142 | if (radeon_bo_reserve(rbo, false)) { |
143 | dev_err(rdev->dev, "leaking bo va because we fail to reserve bo\n"); | ||
144 | return; | 144 | return; |
145 | } | 145 | } |
146 | list_for_each_entry_safe(bo_va, tmp, &rbo->va, bo_list) { | 146 | radeon_vm_bo_rmv(rdev, vm, rbo); |
147 | if (bo_va->vm == vm) { | ||
148 | /* remove from this vm address space */ | ||
149 | mutex_lock(&vm->mutex); | ||
150 | list_del(&bo_va->vm_list); | ||
151 | mutex_unlock(&vm->mutex); | ||
152 | list_del(&bo_va->bo_list); | ||
153 | kfree(bo_va); | ||
154 | } | ||
155 | } | ||
156 | radeon_bo_unreserve(rbo); | 147 | radeon_bo_unreserve(rbo); |
157 | } | 148 | } |
158 | 149 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 1f1a4c803c1d..1cb014b571ab 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -52,11 +52,7 @@ void radeon_bo_clear_va(struct radeon_bo *bo) | |||
52 | 52 | ||
53 | list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) { | 53 | list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) { |
54 | /* remove from all vm address space */ | 54 | /* remove from all vm address space */ |
55 | mutex_lock(&bo_va->vm->mutex); | 55 | radeon_vm_bo_rmv(bo->rdev, bo_va->vm, bo); |
56 | list_del(&bo_va->vm_list); | ||
57 | mutex_unlock(&bo_va->vm->mutex); | ||
58 | list_del(&bo_va->bo_list); | ||
59 | kfree(bo_va); | ||
60 | } | 56 | } |
61 | } | 57 | } |
62 | 58 | ||