aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c29
1 files changed, 11 insertions, 18 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 7297ca3a0ba7..087332858853 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -115,9 +115,10 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri
115 struct amdgpu_vm *vm = &fpriv->vm; 115 struct amdgpu_vm *vm = &fpriv->vm;
116 struct amdgpu_bo_va *bo_va; 116 struct amdgpu_bo_va *bo_va;
117 int r; 117 int r;
118 118 mutex_lock(&vm->mutex);
119 r = amdgpu_bo_reserve(rbo, false); 119 r = amdgpu_bo_reserve(rbo, false);
120 if (r) { 120 if (r) {
121 mutex_unlock(&vm->mutex);
121 return r; 122 return r;
122 } 123 }
123 124
@@ -128,7 +129,7 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri
128 ++bo_va->ref_count; 129 ++bo_va->ref_count;
129 } 130 }
130 amdgpu_bo_unreserve(rbo); 131 amdgpu_bo_unreserve(rbo);
131 132 mutex_unlock(&vm->mutex);
132 return 0; 133 return 0;
133} 134}
134 135
@@ -141,9 +142,10 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
141 struct amdgpu_vm *vm = &fpriv->vm; 142 struct amdgpu_vm *vm = &fpriv->vm;
142 struct amdgpu_bo_va *bo_va; 143 struct amdgpu_bo_va *bo_va;
143 int r; 144 int r;
144 145 mutex_lock(&vm->mutex);
145 r = amdgpu_bo_reserve(rbo, true); 146 r = amdgpu_bo_reserve(rbo, true);
146 if (r) { 147 if (r) {
148 mutex_unlock(&vm->mutex);
147 dev_err(adev->dev, "leaking bo va because " 149 dev_err(adev->dev, "leaking bo va because "
148 "we fail to reserve bo (%d)\n", r); 150 "we fail to reserve bo (%d)\n", r);
149 return; 151 return;
@@ -155,6 +157,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
155 } 157 }
156 } 158 }
157 amdgpu_bo_unreserve(rbo); 159 amdgpu_bo_unreserve(rbo);
160 mutex_unlock(&vm->mutex);
158} 161}
159 162
160static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r) 163static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r)
@@ -181,7 +184,6 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
181 bool kernel = false; 184 bool kernel = false;
182 int r; 185 int r;
183 186
184 down_read(&adev->exclusive_lock);
185 /* create a gem object to contain this object in */ 187 /* create a gem object to contain this object in */
186 if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS | 188 if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
187 AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) { 189 AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
@@ -214,11 +216,9 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
214 216
215 memset(args, 0, sizeof(*args)); 217 memset(args, 0, sizeof(*args));
216 args->out.handle = handle; 218 args->out.handle = handle;
217 up_read(&adev->exclusive_lock);
218 return 0; 219 return 0;
219 220
220error_unlock: 221error_unlock:
221 up_read(&adev->exclusive_lock);
222 r = amdgpu_gem_handle_lockup(adev, r); 222 r = amdgpu_gem_handle_lockup(adev, r);
223 return r; 223 return r;
224} 224}
@@ -250,8 +250,6 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
250 return -EACCES; 250 return -EACCES;
251 } 251 }
252 252
253 down_read(&adev->exclusive_lock);
254
255 /* create a gem object to contain this object in */ 253 /* create a gem object to contain this object in */
256 r = amdgpu_gem_object_create(adev, args->size, 0, 254 r = amdgpu_gem_object_create(adev, args->size, 0,
257 AMDGPU_GEM_DOMAIN_CPU, 0, 255 AMDGPU_GEM_DOMAIN_CPU, 0,
@@ -293,14 +291,12 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
293 goto handle_lockup; 291 goto handle_lockup;
294 292
295 args->handle = handle; 293 args->handle = handle;
296 up_read(&adev->exclusive_lock);
297 return 0; 294 return 0;
298 295
299release_object: 296release_object:
300 drm_gem_object_unreference_unlocked(gobj); 297 drm_gem_object_unreference_unlocked(gobj);
301 298
302handle_lockup: 299handle_lockup:
303 up_read(&adev->exclusive_lock);
304 r = amdgpu_gem_handle_lockup(adev, r); 300 r = amdgpu_gem_handle_lockup(adev, r);
305 301
306 return r; 302 return r;
@@ -488,18 +484,13 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
488 goto error_unreserve; 484 goto error_unreserve;
489 } 485 }
490 486
491 mutex_lock(&bo_va->vm->mutex);
492 r = amdgpu_vm_clear_freed(adev, bo_va->vm); 487 r = amdgpu_vm_clear_freed(adev, bo_va->vm);
493 if (r) 488 if (r)
494 goto error_unlock; 489 goto error_unreserve;
495
496 490
497 if (operation == AMDGPU_VA_OP_MAP) 491 if (operation == AMDGPU_VA_OP_MAP)
498 r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem); 492 r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem);
499 493
500error_unlock:
501 mutex_unlock(&bo_va->vm->mutex);
502
503error_unreserve: 494error_unreserve:
504 ttm_eu_backoff_reservation(&ticket, &list); 495 ttm_eu_backoff_reservation(&ticket, &list);
505 496
@@ -556,10 +547,11 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
556 gobj = drm_gem_object_lookup(dev, filp, args->handle); 547 gobj = drm_gem_object_lookup(dev, filp, args->handle);
557 if (gobj == NULL) 548 if (gobj == NULL)
558 return -ENOENT; 549 return -ENOENT;
559 550 mutex_lock(&fpriv->vm.mutex);
560 rbo = gem_to_amdgpu_bo(gobj); 551 rbo = gem_to_amdgpu_bo(gobj);
561 r = amdgpu_bo_reserve(rbo, false); 552 r = amdgpu_bo_reserve(rbo, false);
562 if (r) { 553 if (r) {
554 mutex_unlock(&fpriv->vm.mutex);
563 drm_gem_object_unreference_unlocked(gobj); 555 drm_gem_object_unreference_unlocked(gobj);
564 return r; 556 return r;
565 } 557 }
@@ -567,6 +559,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
567 bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo); 559 bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo);
568 if (!bo_va) { 560 if (!bo_va) {
569 amdgpu_bo_unreserve(rbo); 561 amdgpu_bo_unreserve(rbo);
562 mutex_unlock(&fpriv->vm.mutex);
570 return -ENOENT; 563 return -ENOENT;
571 } 564 }
572 565
@@ -591,7 +584,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
591 584
592 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE)) 585 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE))
593 amdgpu_gem_va_update_vm(adev, bo_va, args->operation); 586 amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
594 587 mutex_unlock(&fpriv->vm.mutex);
595 drm_gem_object_unreference_unlocked(gobj); 588 drm_gem_object_unreference_unlocked(gobj);
596 return r; 589 return r;
597} 590}