aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2016-03-16 18:25:04 -0400
committerDave Airlie <airlied@redhat.com>2016-03-16 18:25:04 -0400
commit9f443bf53b5699835e0132d62d1e6c99a1eaeee8 (patch)
tree482b1f57019446cc866a0fc8e87bd4b0b0119775 /drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
parent70a09f36d02584fe0025fa14a5cbf276240b2fd4 (diff)
parent00b7c4ff7d482d287a591f047e0963d494569b46 (diff)
Merge branch 'drm-next-4.6' of git://people.freedesktop.org/~agd5f/linux into drm-next
A few more fixes and cleanups for 4.6: - DCE code cleanups - HDP flush/invalidation fixes - GPUVM fixes - switch to drm_vblank_[on|off] - PX fixes - misc bug fixes * 'drm-next-4.6' of git://people.freedesktop.org/~agd5f/linux: (50 commits) drm/amdgpu: split pipeline sync out of SDMA vm_flush() as well drm/amdgpu: Revert "add mutex for ba_va->valids/invalids" drm/amdgpu: Revert "add lock for interval tree in vm" drm/amdgpu: Revert "add spin lock to protect freed list in vm (v3)" drm/amdgpu: reserve the PD during unmap and remove drm/amdgpu: Fix two bugs in amdgpu_vm_bo_split_mapping drm/radeon: Don't drop DP 2.7 Ghz link setup on some cards. MAINTAINERS: update radeon entry to include amdgpu as well drm/amdgpu: disable runtime pm on PX laptops without dGPU power control drm/radeon: disable runtime pm on PX laptops without dGPU power control drm/amd/amdgpu: Fix indentation in do_set_base() (DCEv8) drm/amd/amdgpu: make afmt_init cleanup if alloc fails (DCEv8) drm/amd/amdgpu: Move config init flag to bottom of sw_init (DCEv8) drm/amd/amdgpu: Don't proceed into audio_fini if audio is disabled (DCEv8) drm/amd/amdgpu: Fix identation in do_set_base() (DCEv10) drm/amd/amdgpu: Make afmt_init cleanup if alloc fails (DCEv10) drm/amd/amdgpu: Move initialized flag to bottom of sw_init (DCEv10) drm/amd/amdgpu: Don't proceed in audio_fini if disabled (DCEv10) drm/amd/amdgpu: Fix indentation in dce_v11_0_crtc_do_set_base() drm/amd/amdgpu: Make afmt_init() cleanup if alloc fails (DCEv11) ...
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c57
1 files changed, 41 insertions, 16 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 7a47c45b2131..1ecdf6c01368 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -140,25 +140,40 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri
140void amdgpu_gem_object_close(struct drm_gem_object *obj, 140void amdgpu_gem_object_close(struct drm_gem_object *obj,
141 struct drm_file *file_priv) 141 struct drm_file *file_priv)
142{ 142{
143 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(obj); 143 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
144 struct amdgpu_device *adev = rbo->adev; 144 struct amdgpu_device *adev = bo->adev;
145 struct amdgpu_fpriv *fpriv = file_priv->driver_priv; 145 struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
146 struct amdgpu_vm *vm = &fpriv->vm; 146 struct amdgpu_vm *vm = &fpriv->vm;
147
148 struct amdgpu_bo_list_entry vm_pd;
149 struct list_head list, duplicates;
150 struct ttm_validate_buffer tv;
151 struct ww_acquire_ctx ticket;
147 struct amdgpu_bo_va *bo_va; 152 struct amdgpu_bo_va *bo_va;
148 int r; 153 int r;
149 r = amdgpu_bo_reserve(rbo, true); 154
155 INIT_LIST_HEAD(&list);
156 INIT_LIST_HEAD(&duplicates);
157
158 tv.bo = &bo->tbo;
159 tv.shared = true;
160 list_add(&tv.head, &list);
161
162 amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
163
164 r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
150 if (r) { 165 if (r) {
151 dev_err(adev->dev, "leaking bo va because " 166 dev_err(adev->dev, "leaking bo va because "
152 "we fail to reserve bo (%d)\n", r); 167 "we fail to reserve bo (%d)\n", r);
153 return; 168 return;
154 } 169 }
155 bo_va = amdgpu_vm_bo_find(vm, rbo); 170 bo_va = amdgpu_vm_bo_find(vm, bo);
156 if (bo_va) { 171 if (bo_va) {
157 if (--bo_va->ref_count == 0) { 172 if (--bo_va->ref_count == 0) {
158 amdgpu_vm_bo_rmv(adev, bo_va); 173 amdgpu_vm_bo_rmv(adev, bo_va);
159 } 174 }
160 } 175 }
161 amdgpu_bo_unreserve(rbo); 176 ttm_eu_backoff_reservation(&ticket, &list);
162} 177}
163 178
164static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r) 179static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r)
@@ -274,18 +289,23 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
274 289
275 if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) { 290 if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
276 down_read(&current->mm->mmap_sem); 291 down_read(&current->mm->mmap_sem);
292
293 r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
294 bo->tbo.ttm->pages);
295 if (r)
296 goto unlock_mmap_sem;
297
277 r = amdgpu_bo_reserve(bo, true); 298 r = amdgpu_bo_reserve(bo, true);
278 if (r) { 299 if (r)
279 up_read(&current->mm->mmap_sem); 300 goto free_pages;
280 goto release_object;
281 }
282 301
283 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); 302 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
284 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); 303 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
285 amdgpu_bo_unreserve(bo); 304 amdgpu_bo_unreserve(bo);
286 up_read(&current->mm->mmap_sem);
287 if (r) 305 if (r)
288 goto release_object; 306 goto free_pages;
307
308 up_read(&current->mm->mmap_sem);
289 } 309 }
290 310
291 r = drm_gem_handle_create(filp, gobj, &handle); 311 r = drm_gem_handle_create(filp, gobj, &handle);
@@ -297,6 +317,12 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
297 args->handle = handle; 317 args->handle = handle;
298 return 0; 318 return 0;
299 319
320free_pages:
321 release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages, false);
322
323unlock_mmap_sem:
324 up_read(&current->mm->mmap_sem);
325
300release_object: 326release_object:
301 drm_gem_object_unreference_unlocked(gobj); 327 drm_gem_object_unreference_unlocked(gobj);
302 328
@@ -569,11 +595,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
569 tv.shared = true; 595 tv.shared = true;
570 list_add(&tv.head, &list); 596 list_add(&tv.head, &list);
571 597
572 if (args->operation == AMDGPU_VA_OP_MAP) { 598 tv_pd.bo = &fpriv->vm.page_directory->tbo;
573 tv_pd.bo = &fpriv->vm.page_directory->tbo; 599 tv_pd.shared = true;
574 tv_pd.shared = true; 600 list_add(&tv_pd.head, &list);
575 list_add(&tv_pd.head, &list); 601
576 }
577 r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); 602 r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
578 if (r) { 603 if (r) {
579 drm_gem_object_unreference_unlocked(gobj); 604 drm_gem_object_unreference_unlocked(gobj);