aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2017-04-21 04:05:56 -0400
committerAlex Deucher <alexander.deucher@amd.com>2017-04-28 17:32:53 -0400
commit5a0f3b5f6d798637b0af5d6d3ab3eb02063e0317 (patch)
tree2a29ee6cca3dba2c18bf421bc2456a563e1a3cd9 /drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
parent05a72a2864c2b27471e9f5365448563c78f9b114 (diff)
drm/amdgpu: fix VM clearing in amdgpu_gem_object_close
We need to check if the VM is swapped out before trying to update it. Fixes: 23e0563e48f7 ("drm/amdgpu: clear freed mappings immediately when BO may be freed") Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Nicolai Hähnle <nicolai.haehnle@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c68
1 files changed, 37 insertions, 31 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 03a9c5cad222..94cb91cf93eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -139,6 +139,35 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj,
139 return 0; 139 return 0;
140} 140}
141 141
142static int amdgpu_gem_vm_check(void *param, struct amdgpu_bo *bo)
143{
144 /* if anything is swapped out don't swap it in here,
145 just abort and wait for the next CS */
146 if (!amdgpu_bo_gpu_accessible(bo))
147 return -ERESTARTSYS;
148
149 if (bo->shadow && !amdgpu_bo_gpu_accessible(bo->shadow))
150 return -ERESTARTSYS;
151
152 return 0;
153}
154
155static bool amdgpu_gem_vm_ready(struct amdgpu_device *adev,
156 struct amdgpu_vm *vm,
157 struct list_head *list)
158{
159 struct ttm_validate_buffer *entry;
160
161 list_for_each_entry(entry, list, head) {
162 struct amdgpu_bo *bo =
163 container_of(entry->bo, struct amdgpu_bo, tbo);
164 if (amdgpu_gem_vm_check(NULL, bo))
165 return false;
166 }
167
168 return !amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_gem_vm_check, NULL);
169}
170
142void amdgpu_gem_object_close(struct drm_gem_object *obj, 171void amdgpu_gem_object_close(struct drm_gem_object *obj,
143 struct drm_file *file_priv) 172 struct drm_file *file_priv)
144{ 173{
@@ -148,15 +177,13 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
148 struct amdgpu_vm *vm = &fpriv->vm; 177 struct amdgpu_vm *vm = &fpriv->vm;
149 178
150 struct amdgpu_bo_list_entry vm_pd; 179 struct amdgpu_bo_list_entry vm_pd;
151 struct list_head list, duplicates; 180 struct list_head list;
152 struct ttm_validate_buffer tv; 181 struct ttm_validate_buffer tv;
153 struct ww_acquire_ctx ticket; 182 struct ww_acquire_ctx ticket;
154 struct amdgpu_bo_va *bo_va; 183 struct amdgpu_bo_va *bo_va;
155 struct dma_fence *fence = NULL;
156 int r; 184 int r;
157 185
158 INIT_LIST_HEAD(&list); 186 INIT_LIST_HEAD(&list);
159 INIT_LIST_HEAD(&duplicates);
160 187
161 tv.bo = &bo->tbo; 188 tv.bo = &bo->tbo;
162 tv.shared = true; 189 tv.shared = true;
@@ -164,16 +191,18 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
164 191
165 amdgpu_vm_get_pd_bo(vm, &list, &vm_pd); 192 amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
166 193
167 r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates); 194 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
168 if (r) { 195 if (r) {
169 dev_err(adev->dev, "leaking bo va because " 196 dev_err(adev->dev, "leaking bo va because "
170 "we fail to reserve bo (%d)\n", r); 197 "we fail to reserve bo (%d)\n", r);
171 return; 198 return;
172 } 199 }
173 bo_va = amdgpu_vm_bo_find(vm, bo); 200 bo_va = amdgpu_vm_bo_find(vm, bo);
174 if (bo_va) { 201 if (bo_va && --bo_va->ref_count == 0) {
175 if (--bo_va->ref_count == 0) { 202 amdgpu_vm_bo_rmv(adev, bo_va);
176 amdgpu_vm_bo_rmv(adev, bo_va); 203
204 if (amdgpu_gem_vm_ready(adev, vm, &list)) {
205 struct dma_fence *fence = NULL;
177 206
178 r = amdgpu_vm_clear_freed(adev, vm, &fence); 207 r = amdgpu_vm_clear_freed(adev, vm, &fence);
179 if (unlikely(r)) { 208 if (unlikely(r)) {
@@ -502,19 +531,6 @@ out:
502 return r; 531 return r;
503} 532}
504 533
505static int amdgpu_gem_va_check(void *param, struct amdgpu_bo *bo)
506{
507 /* if anything is swapped out don't swap it in here,
508 just abort and wait for the next CS */
509 if (!amdgpu_bo_gpu_accessible(bo))
510 return -ERESTARTSYS;
511
512 if (bo->shadow && !amdgpu_bo_gpu_accessible(bo->shadow))
513 return -ERESTARTSYS;
514
515 return 0;
516}
517
518/** 534/**
519 * amdgpu_gem_va_update_vm -update the bo_va in its VM 535 * amdgpu_gem_va_update_vm -update the bo_va in its VM
520 * 536 *
@@ -533,19 +549,9 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
533 struct list_head *list, 549 struct list_head *list,
534 uint32_t operation) 550 uint32_t operation)
535{ 551{
536 struct ttm_validate_buffer *entry;
537 int r = -ERESTARTSYS; 552 int r = -ERESTARTSYS;
538 553
539 list_for_each_entry(entry, list, head) { 554 if (!amdgpu_gem_vm_ready(adev, vm, list))
540 struct amdgpu_bo *bo =
541 container_of(entry->bo, struct amdgpu_bo, tbo);
542 if (amdgpu_gem_va_check(NULL, bo))
543 goto error;
544 }
545
546 r = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_gem_va_check,
547 NULL);
548 if (r)
549 goto error; 555 goto error;
550 556
551 r = amdgpu_vm_update_directories(adev, vm); 557 r = amdgpu_vm_update_directories(adev, vm);