aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2016-03-08 11:58:35 -0500
committerAlex Deucher <alexander.deucher@amd.com>2016-03-09 13:04:02 -0500
commit20250215821140801369b84f8cbe79a459a82ba5 (patch)
treef405d7f760da6ef1ef2f9f37ef6f4801f02ee935
parente17841b97587adfbe96f48c488fd0873ddfcaff0 (diff)
drm/amdgpu: Revert "add lock for interval tree in vm"
Not needed any more because we need to protect the elements on the list anyway. This reverts commit fe237ed7efec8ac147a4572fdf81173a7f8ddda7. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Chunming Zhou <david1.zhou@amd.com> Acked-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c16
2 files changed, 2 insertions, 15 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index e32ab13227d4..28b4088b2530 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -888,7 +888,6 @@ struct amdgpu_vm_id {
888 888
889struct amdgpu_vm { 889struct amdgpu_vm {
890 /* tree of virtual addresses mapped */ 890 /* tree of virtual addresses mapped */
891 spinlock_t it_lock;
892 struct rb_root va; 891 struct rb_root va;
893 892
894 /* protecting invalidated */ 893 /* protecting invalidated */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 7e73e54eadb9..0e6d0d1f4041 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1107,9 +1107,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1107 saddr /= AMDGPU_GPU_PAGE_SIZE; 1107 saddr /= AMDGPU_GPU_PAGE_SIZE;
1108 eaddr /= AMDGPU_GPU_PAGE_SIZE; 1108 eaddr /= AMDGPU_GPU_PAGE_SIZE;
1109 1109
1110 spin_lock(&vm->it_lock);
1111 it = interval_tree_iter_first(&vm->va, saddr, eaddr); 1110 it = interval_tree_iter_first(&vm->va, saddr, eaddr);
1112 spin_unlock(&vm->it_lock);
1113 if (it) { 1111 if (it) {
1114 struct amdgpu_bo_va_mapping *tmp; 1112 struct amdgpu_bo_va_mapping *tmp;
1115 tmp = container_of(it, struct amdgpu_bo_va_mapping, it); 1113 tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
@@ -1136,10 +1134,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1136 mutex_lock(&bo_va->mutex); 1134 mutex_lock(&bo_va->mutex);
1137 list_add(&mapping->list, &bo_va->invalids); 1135 list_add(&mapping->list, &bo_va->invalids);
1138 mutex_unlock(&bo_va->mutex); 1136 mutex_unlock(&bo_va->mutex);
1139 spin_lock(&vm->it_lock);
1140 interval_tree_insert(&mapping->it, &vm->va); 1137 interval_tree_insert(&mapping->it, &vm->va);
1141 spin_unlock(&vm->it_lock);
1142 trace_amdgpu_vm_bo_map(bo_va, mapping);
1143 1138
1144 /* Make sure the page tables are allocated */ 1139 /* Make sure the page tables are allocated */
1145 saddr >>= amdgpu_vm_block_size; 1140 saddr >>= amdgpu_vm_block_size;
@@ -1191,9 +1186,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1191 1186
1192error_free: 1187error_free:
1193 list_del(&mapping->list); 1188 list_del(&mapping->list);
1194 spin_lock(&vm->it_lock);
1195 interval_tree_remove(&mapping->it, &vm->va); 1189 interval_tree_remove(&mapping->it, &vm->va);
1196 spin_unlock(&vm->it_lock);
1197 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 1190 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1198 kfree(mapping); 1191 kfree(mapping);
1199 1192
@@ -1243,9 +1236,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1243 } 1236 }
1244 mutex_unlock(&bo_va->mutex); 1237 mutex_unlock(&bo_va->mutex);
1245 list_del(&mapping->list); 1238 list_del(&mapping->list);
1246 spin_lock(&vm->it_lock);
1247 interval_tree_remove(&mapping->it, &vm->va); 1239 interval_tree_remove(&mapping->it, &vm->va);
1248 spin_unlock(&vm->it_lock);
1249 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 1240 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1250 1241
1251 if (valid) 1242 if (valid)
@@ -1280,17 +1271,13 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
1280 1271
1281 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { 1272 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
1282 list_del(&mapping->list); 1273 list_del(&mapping->list);
1283 spin_lock(&vm->it_lock);
1284 interval_tree_remove(&mapping->it, &vm->va); 1274 interval_tree_remove(&mapping->it, &vm->va);
1285 spin_unlock(&vm->it_lock);
1286 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 1275 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1287 list_add(&mapping->list, &vm->freed); 1276 list_add(&mapping->list, &vm->freed);
1288 } 1277 }
1289 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) { 1278 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
1290 list_del(&mapping->list); 1279 list_del(&mapping->list);
1291 spin_lock(&vm->it_lock);
1292 interval_tree_remove(&mapping->it, &vm->va); 1280 interval_tree_remove(&mapping->it, &vm->va);
1293 spin_unlock(&vm->it_lock);
1294 kfree(mapping); 1281 kfree(mapping);
1295 } 1282 }
1296 fence_put(bo_va->last_pt_update); 1283 fence_put(bo_va->last_pt_update);
@@ -1347,7 +1334,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1347 INIT_LIST_HEAD(&vm->invalidated); 1334 INIT_LIST_HEAD(&vm->invalidated);
1348 INIT_LIST_HEAD(&vm->cleared); 1335 INIT_LIST_HEAD(&vm->cleared);
1349 INIT_LIST_HEAD(&vm->freed); 1336 INIT_LIST_HEAD(&vm->freed);
1350 spin_lock_init(&vm->it_lock); 1337
1351 pd_size = amdgpu_vm_directory_size(adev); 1338 pd_size = amdgpu_vm_directory_size(adev);
1352 pd_entries = amdgpu_vm_num_pdes(adev); 1339 pd_entries = amdgpu_vm_num_pdes(adev);
1353 1340
@@ -1434,6 +1421,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1434 1421
1435 amdgpu_bo_unref(&vm->page_directory); 1422 amdgpu_bo_unref(&vm->page_directory);
1436 fence_put(vm->page_directory_fence); 1423 fence_put(vm->page_directory_fence);
1424
1437 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 1425 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1438 struct amdgpu_vm_id *id = &vm->ids[i]; 1426 struct amdgpu_vm_id *id = &vm->ids[i];
1439 1427