aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu
diff options
context:
space:
mode:
authorChunming Zhou <David1.Zhou@amd.com>2015-11-13 00:32:01 -0500
committerAlex Deucher <alexander.deucher@amd.com>2015-11-18 11:40:55 -0500
commitc25867dfabf045a8148fd179fa759bb17f670e42 (patch)
tree2bffcacfc26a89cd5d4166493c01b75843de2caf /drivers/gpu/drm/amd/amdgpu
parent1c16c0a7b26c6c905dc79c4194135ca2f360f0f5 (diff)
drm/amdgpu: add lock for interval tree in vm
Change-Id: I62b892a22af37b32e6b4aefca80a25cf45426ed2 Signed-off-by: Chunming Zhou <David1.Zhou@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c15
2 files changed, 15 insertions, 2 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index a5692624070a..306f75700bf8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -954,6 +954,8 @@ struct amdgpu_vm {
954 954
955 /* for id and flush management per ring */ 955 /* for id and flush management per ring */
956 struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS]; 956 struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS];
957 /* for interval tree */
958 spinlock_t it_lock;
957}; 959};
958 960
959struct amdgpu_vm_manager { 961struct amdgpu_vm_manager {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 0bdbb2480f9b..0513f3fed2c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1028,7 +1028,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1028 saddr /= AMDGPU_GPU_PAGE_SIZE; 1028 saddr /= AMDGPU_GPU_PAGE_SIZE;
1029 eaddr /= AMDGPU_GPU_PAGE_SIZE; 1029 eaddr /= AMDGPU_GPU_PAGE_SIZE;
1030 1030
1031 spin_lock(&vm->it_lock);
1031 it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1); 1032 it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1);
1033 spin_unlock(&vm->it_lock);
1032 if (it) { 1034 if (it) {
1033 struct amdgpu_bo_va_mapping *tmp; 1035 struct amdgpu_bo_va_mapping *tmp;
1034 tmp = container_of(it, struct amdgpu_bo_va_mapping, it); 1036 tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
@@ -1055,7 +1057,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1055 mapping->flags = flags; 1057 mapping->flags = flags;
1056 1058
1057 list_add(&mapping->list, &bo_va->invalids); 1059 list_add(&mapping->list, &bo_va->invalids);
1060 spin_lock(&vm->it_lock);
1058 interval_tree_insert(&mapping->it, &vm->va); 1061 interval_tree_insert(&mapping->it, &vm->va);
1062 spin_unlock(&vm->it_lock);
1059 trace_amdgpu_vm_bo_map(bo_va, mapping); 1063 trace_amdgpu_vm_bo_map(bo_va, mapping);
1060 1064
1061 /* Make sure the page tables are allocated */ 1065 /* Make sure the page tables are allocated */
@@ -1101,7 +1105,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1101 1105
1102error_free: 1106error_free:
1103 list_del(&mapping->list); 1107 list_del(&mapping->list);
1108 spin_lock(&vm->it_lock);
1104 interval_tree_remove(&mapping->it, &vm->va); 1109 interval_tree_remove(&mapping->it, &vm->va);
1110 spin_unlock(&vm->it_lock);
1105 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 1111 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1106 kfree(mapping); 1112 kfree(mapping);
1107 1113
@@ -1151,7 +1157,9 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1151 } 1157 }
1152 1158
1153 list_del(&mapping->list); 1159 list_del(&mapping->list);
1160 spin_lock(&vm->it_lock);
1154 interval_tree_remove(&mapping->it, &vm->va); 1161 interval_tree_remove(&mapping->it, &vm->va);
1162 spin_unlock(&vm->it_lock);
1155 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 1163 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1156 1164
1157 if (valid) 1165 if (valid)
@@ -1187,13 +1195,17 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
1187 1195
1188 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { 1196 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
1189 list_del(&mapping->list); 1197 list_del(&mapping->list);
1198 spin_lock(&vm->it_lock);
1190 interval_tree_remove(&mapping->it, &vm->va); 1199 interval_tree_remove(&mapping->it, &vm->va);
1200 spin_unlock(&vm->it_lock);
1191 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 1201 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1192 list_add(&mapping->list, &vm->freed); 1202 list_add(&mapping->list, &vm->freed);
1193 } 1203 }
1194 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) { 1204 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
1195 list_del(&mapping->list); 1205 list_del(&mapping->list);
1206 spin_lock(&vm->it_lock);
1196 interval_tree_remove(&mapping->it, &vm->va); 1207 interval_tree_remove(&mapping->it, &vm->va);
1208 spin_unlock(&vm->it_lock);
1197 kfree(mapping); 1209 kfree(mapping);
1198 } 1210 }
1199 1211
@@ -1248,7 +1260,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1248 INIT_LIST_HEAD(&vm->invalidated); 1260 INIT_LIST_HEAD(&vm->invalidated);
1249 INIT_LIST_HEAD(&vm->cleared); 1261 INIT_LIST_HEAD(&vm->cleared);
1250 INIT_LIST_HEAD(&vm->freed); 1262 INIT_LIST_HEAD(&vm->freed);
1251 1263 spin_lock_init(&vm->it_lock);
1252 pd_size = amdgpu_vm_directory_size(adev); 1264 pd_size = amdgpu_vm_directory_size(adev);
1253 pd_entries = amdgpu_vm_num_pdes(adev); 1265 pd_entries = amdgpu_vm_num_pdes(adev);
1254 1266
@@ -1312,7 +1324,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1312 1324
1313 amdgpu_bo_unref(&vm->page_directory); 1325 amdgpu_bo_unref(&vm->page_directory);
1314 fence_put(vm->page_directory_fence); 1326 fence_put(vm->page_directory_fence);
1315
1316 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 1327 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1317 unsigned id = vm->ids[i].id; 1328 unsigned id = vm->ids[i].id;
1318 1329