diff options
author | Chunming Zhou <David1.Zhou@amd.com> | 2015-11-13 02:22:04 -0500 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2015-11-20 18:22:28 -0500 |
commit | e98c1b0de6fe73f488df62d83d83f377b1b6e2b8 (patch) | |
tree | 3cde29555f58ff50ba362f1a5b20d6dbe7717ee9 /drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | |
parent | 69b576a1bc8b466ae7bff0208f1c139dbaaf802c (diff) |
drm/amdgpu: remove vm->mutex
Signed-off-by: Chunming Zhou <David1.Zhou@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 14 |
1 files changed, 2 insertions, 12 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 00c5b580f56c..fc32fc01a64b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | |||
@@ -115,12 +115,9 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri | |||
115 | struct amdgpu_vm *vm = &fpriv->vm; | 115 | struct amdgpu_vm *vm = &fpriv->vm; |
116 | struct amdgpu_bo_va *bo_va; | 116 | struct amdgpu_bo_va *bo_va; |
117 | int r; | 117 | int r; |
118 | mutex_lock(&vm->mutex); | ||
119 | r = amdgpu_bo_reserve(rbo, false); | 118 | r = amdgpu_bo_reserve(rbo, false); |
120 | if (r) { | 119 | if (r) |
121 | mutex_unlock(&vm->mutex); | ||
122 | return r; | 120 | return r; |
123 | } | ||
124 | 121 | ||
125 | bo_va = amdgpu_vm_bo_find(vm, rbo); | 122 | bo_va = amdgpu_vm_bo_find(vm, rbo); |
126 | if (!bo_va) { | 123 | if (!bo_va) { |
@@ -129,7 +126,6 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri | |||
129 | ++bo_va->ref_count; | 126 | ++bo_va->ref_count; |
130 | } | 127 | } |
131 | amdgpu_bo_unreserve(rbo); | 128 | amdgpu_bo_unreserve(rbo); |
132 | mutex_unlock(&vm->mutex); | ||
133 | return 0; | 129 | return 0; |
134 | } | 130 | } |
135 | 131 | ||
@@ -142,10 +138,8 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, | |||
142 | struct amdgpu_vm *vm = &fpriv->vm; | 138 | struct amdgpu_vm *vm = &fpriv->vm; |
143 | struct amdgpu_bo_va *bo_va; | 139 | struct amdgpu_bo_va *bo_va; |
144 | int r; | 140 | int r; |
145 | mutex_lock(&vm->mutex); | ||
146 | r = amdgpu_bo_reserve(rbo, true); | 141 | r = amdgpu_bo_reserve(rbo, true); |
147 | if (r) { | 142 | if (r) { |
148 | mutex_unlock(&vm->mutex); | ||
149 | dev_err(adev->dev, "leaking bo va because " | 143 | dev_err(adev->dev, "leaking bo va because " |
150 | "we fail to reserve bo (%d)\n", r); | 144 | "we fail to reserve bo (%d)\n", r); |
151 | return; | 145 | return; |
@@ -157,7 +151,6 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, | |||
157 | } | 151 | } |
158 | } | 152 | } |
159 | amdgpu_bo_unreserve(rbo); | 153 | amdgpu_bo_unreserve(rbo); |
160 | mutex_unlock(&vm->mutex); | ||
161 | } | 154 | } |
162 | 155 | ||
163 | static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r) | 156 | static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r) |
@@ -553,7 +546,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | |||
553 | gobj = drm_gem_object_lookup(dev, filp, args->handle); | 546 | gobj = drm_gem_object_lookup(dev, filp, args->handle); |
554 | if (gobj == NULL) | 547 | if (gobj == NULL) |
555 | return -ENOENT; | 548 | return -ENOENT; |
556 | mutex_lock(&fpriv->vm.mutex); | ||
557 | rbo = gem_to_amdgpu_bo(gobj); | 549 | rbo = gem_to_amdgpu_bo(gobj); |
558 | INIT_LIST_HEAD(&list); | 550 | INIT_LIST_HEAD(&list); |
559 | INIT_LIST_HEAD(&duplicates); | 551 | INIT_LIST_HEAD(&duplicates); |
@@ -568,7 +560,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | |||
568 | } | 560 | } |
569 | r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); | 561 | r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); |
570 | if (r) { | 562 | if (r) { |
571 | mutex_unlock(&fpriv->vm.mutex); | ||
572 | drm_gem_object_unreference_unlocked(gobj); | 563 | drm_gem_object_unreference_unlocked(gobj); |
573 | return r; | 564 | return r; |
574 | } | 565 | } |
@@ -577,7 +568,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | |||
577 | if (!bo_va) { | 568 | if (!bo_va) { |
578 | ttm_eu_backoff_reservation(&ticket, &list); | 569 | ttm_eu_backoff_reservation(&ticket, &list); |
579 | drm_gem_object_unreference_unlocked(gobj); | 570 | drm_gem_object_unreference_unlocked(gobj); |
580 | mutex_unlock(&fpriv->vm.mutex); | ||
581 | return -ENOENT; | 571 | return -ENOENT; |
582 | } | 572 | } |
583 | 573 | ||
@@ -602,7 +592,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | |||
602 | ttm_eu_backoff_reservation(&ticket, &list); | 592 | ttm_eu_backoff_reservation(&ticket, &list); |
603 | if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE)) | 593 | if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE)) |
604 | amdgpu_gem_va_update_vm(adev, bo_va, args->operation); | 594 | amdgpu_gem_va_update_vm(adev, bo_va, args->operation); |
605 | mutex_unlock(&fpriv->vm.mutex); | 595 | |
606 | drm_gem_object_unreference_unlocked(gobj); | 596 | drm_gem_object_unreference_unlocked(gobj); |
607 | return r; | 597 | return r; |
608 | } | 598 | } |