diff options
author | Christian König <christian.koenig@amd.com> | 2016-03-08 12:03:27 -0500 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2016-03-09 13:04:02 -0500 |
commit | 32b41ac21fde8f7cea465d74c570fc7bd0089163 (patch) | |
tree | a700f466f89fa94ce2b0cc9f63e24ba48518f67b | |
parent | 20250215821140801369b84f8cbe79a459a82ba5 (diff) |
drm/amdgpu: Revert "add mutex for ba_va->valids/invalids"
Not needed any more because we need to protect the elements on the list anyway.
This reverts commit 38bf516c75b4ef0f5c716e05fa9baab7c52d6c39.
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 17 |
2 files changed, 6 insertions, 12 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 28b4088b2530..9a03d566bf6d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
@@ -484,7 +484,6 @@ struct amdgpu_bo_va_mapping { | |||
484 | 484 | ||
485 | /* bo virtual addresses in a specific vm */ | 485 | /* bo virtual addresses in a specific vm */ |
486 | struct amdgpu_bo_va { | 486 | struct amdgpu_bo_va { |
487 | struct mutex mutex; | ||
488 | /* protected by bo being reserved */ | 487 | /* protected by bo being reserved */ |
489 | struct list_head bo_list; | 488 | struct list_head bo_list; |
490 | struct fence *last_pt_update; | 489 | struct fence *last_pt_update; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 0e6d0d1f4041..b6c011b83641 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
@@ -1009,9 +1009,8 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, | |||
1009 | bo_va = list_first_entry(&vm->invalidated, | 1009 | bo_va = list_first_entry(&vm->invalidated, |
1010 | struct amdgpu_bo_va, vm_status); | 1010 | struct amdgpu_bo_va, vm_status); |
1011 | spin_unlock(&vm->status_lock); | 1011 | spin_unlock(&vm->status_lock); |
1012 | mutex_lock(&bo_va->mutex); | 1012 | |
1013 | r = amdgpu_vm_bo_update(adev, bo_va, NULL); | 1013 | r = amdgpu_vm_bo_update(adev, bo_va, NULL); |
1014 | mutex_unlock(&bo_va->mutex); | ||
1015 | if (r) | 1014 | if (r) |
1016 | return r; | 1015 | return r; |
1017 | 1016 | ||
@@ -1055,7 +1054,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, | |||
1055 | INIT_LIST_HEAD(&bo_va->valids); | 1054 | INIT_LIST_HEAD(&bo_va->valids); |
1056 | INIT_LIST_HEAD(&bo_va->invalids); | 1055 | INIT_LIST_HEAD(&bo_va->invalids); |
1057 | INIT_LIST_HEAD(&bo_va->vm_status); | 1056 | INIT_LIST_HEAD(&bo_va->vm_status); |
1058 | mutex_init(&bo_va->mutex); | 1057 | |
1059 | list_add_tail(&bo_va->bo_list, &bo->va); | 1058 | list_add_tail(&bo_va->bo_list, &bo->va); |
1060 | 1059 | ||
1061 | return bo_va; | 1060 | return bo_va; |
@@ -1131,9 +1130,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, | |||
1131 | mapping->offset = offset; | 1130 | mapping->offset = offset; |
1132 | mapping->flags = flags; | 1131 | mapping->flags = flags; |
1133 | 1132 | ||
1134 | mutex_lock(&bo_va->mutex); | ||
1135 | list_add(&mapping->list, &bo_va->invalids); | 1133 | list_add(&mapping->list, &bo_va->invalids); |
1136 | mutex_unlock(&bo_va->mutex); | ||
1137 | interval_tree_insert(&mapping->it, &vm->va); | 1134 | interval_tree_insert(&mapping->it, &vm->va); |
1138 | 1135 | ||
1139 | /* Make sure the page tables are allocated */ | 1136 | /* Make sure the page tables are allocated */ |
@@ -1215,7 +1212,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, | |||
1215 | bool valid = true; | 1212 | bool valid = true; |
1216 | 1213 | ||
1217 | saddr /= AMDGPU_GPU_PAGE_SIZE; | 1214 | saddr /= AMDGPU_GPU_PAGE_SIZE; |
1218 | mutex_lock(&bo_va->mutex); | 1215 | |
1219 | list_for_each_entry(mapping, &bo_va->valids, list) { | 1216 | list_for_each_entry(mapping, &bo_va->valids, list) { |
1220 | if (mapping->it.start == saddr) | 1217 | if (mapping->it.start == saddr) |
1221 | break; | 1218 | break; |
@@ -1229,12 +1226,10 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, | |||
1229 | break; | 1226 | break; |
1230 | } | 1227 | } |
1231 | 1228 | ||
1232 | if (&mapping->list == &bo_va->invalids) { | 1229 | if (&mapping->list == &bo_va->invalids) |
1233 | mutex_unlock(&bo_va->mutex); | ||
1234 | return -ENOENT; | 1230 | return -ENOENT; |
1235 | } | ||
1236 | } | 1231 | } |
1237 | mutex_unlock(&bo_va->mutex); | 1232 | |
1238 | list_del(&mapping->list); | 1233 | list_del(&mapping->list); |
1239 | interval_tree_remove(&mapping->it, &vm->va); | 1234 | interval_tree_remove(&mapping->it, &vm->va); |
1240 | trace_amdgpu_vm_bo_unmap(bo_va, mapping); | 1235 | trace_amdgpu_vm_bo_unmap(bo_va, mapping); |
@@ -1280,8 +1275,8 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, | |||
1280 | interval_tree_remove(&mapping->it, &vm->va); | 1275 | interval_tree_remove(&mapping->it, &vm->va); |
1281 | kfree(mapping); | 1276 | kfree(mapping); |
1282 | } | 1277 | } |
1278 | |||
1283 | fence_put(bo_va->last_pt_update); | 1279 | fence_put(bo_va->last_pt_update); |
1284 | mutex_destroy(&bo_va->mutex); | ||
1285 | kfree(bo_va); | 1280 | kfree(bo_va); |
1286 | } | 1281 | } |
1287 | 1282 | ||