aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
diff options
context:
space:
mode:
authorNicolai Hähnle <nicolai.haehnle@amd.com>2017-03-23 14:34:11 -0400
committerAlex Deucher <alexander.deucher@amd.com>2017-03-29 23:55:32 -0400
commit23e0563e48f7e9e98003df5b43d6a48e162782c6 (patch)
tree29ecc7cfb4a1725370da475d0960720887868dd3 /drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
parent2de6a7c52a412985446ee358d8e27b7f3de5e3f3 (diff)
drm/amdgpu: clear freed mappings immediately when BO may be freed
Also, add the fence of the clear operations to the BO to ensure that the underlying memory can only be re-used after all PTEs pointing to it have been cleared. This avoids the following sequence of events that could be triggered by user space: 1. Submit a CS that accesses some BO _without_ adding that BO to the buffer list. 2. Free that BO. 3. Some other task re-uses the memory underlying the BO. 4. The CS is submitted to the hardware and accesses memory that is now already in use by somebody else. By clearing the page tables immediately in step 2, a GPU VM fault will be triggered in step 4 instead of wild memory accesses. v2: use amdgpu_bo_fence directly Signed-off-by: Nicolai Hähnle <nicolai.haehnle@amd.com> Reviewed-by: Chunming Zhou <david1.zhou@amd.com> Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c12
1 files changed, 12 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 7a37b93a0dfd..f85520d4e711 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -152,6 +152,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
152 struct ttm_validate_buffer tv; 152 struct ttm_validate_buffer tv;
153 struct ww_acquire_ctx ticket; 153 struct ww_acquire_ctx ticket;
154 struct amdgpu_bo_va *bo_va; 154 struct amdgpu_bo_va *bo_va;
155 struct dma_fence *fence = NULL;
155 int r; 156 int r;
156 157
157 INIT_LIST_HEAD(&list); 158 INIT_LIST_HEAD(&list);
@@ -173,6 +174,17 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
173 if (bo_va) { 174 if (bo_va) {
174 if (--bo_va->ref_count == 0) { 175 if (--bo_va->ref_count == 0) {
175 amdgpu_vm_bo_rmv(adev, bo_va); 176 amdgpu_vm_bo_rmv(adev, bo_va);
177
178 r = amdgpu_vm_clear_freed(adev, vm, &fence);
179 if (unlikely(r)) {
180 dev_err(adev->dev, "failed to clear page "
181 "tables on GEM object close (%d)\n", r);
182 }
183
184 if (fence) {
185 amdgpu_bo_fence(bo, fence, true);
186 dma_fence_put(fence);
187 }
176 } 188 }
177 } 189 }
178 ttm_eu_backoff_reservation(&ticket, &list); 190 ttm_eu_backoff_reservation(&ticket, &list);