aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c67
1 files changed, 39 insertions, 28 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 5624d4484fb6..f2166320a5e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -951,21 +951,24 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
951 addr = 0; 951 addr = 0;
952 } 952 }
953 953
954 if (addr == bo_va->addr)
955 return 0;
956
957 flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem); 954 flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
958 955
959 list_for_each_entry(mapping, &bo_va->mappings, list) { 956 spin_lock(&vm->status_lock);
957 if (!list_empty(&bo_va->vm_status))
958 list_splice_init(&bo_va->valids, &bo_va->invalids);
959 spin_unlock(&vm->status_lock);
960
961 list_for_each_entry(mapping, &bo_va->invalids, list) {
960 r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, addr, 962 r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, addr,
961 flags, &bo_va->last_pt_update); 963 flags, &bo_va->last_pt_update);
962 if (r) 964 if (r)
963 return r; 965 return r;
964 } 966 }
965 967
966 bo_va->addr = addr;
967 spin_lock(&vm->status_lock); 968 spin_lock(&vm->status_lock);
968 list_del_init(&bo_va->vm_status); 969 list_del_init(&bo_va->vm_status);
970 if (!mem)
971 list_add(&bo_va->vm_status, &vm->cleared);
969 spin_unlock(&vm->status_lock); 972 spin_unlock(&vm->status_lock);
970 973
971 return 0; 974 return 0;
@@ -1065,10 +1068,10 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1065 } 1068 }
1066 bo_va->vm = vm; 1069 bo_va->vm = vm;
1067 bo_va->bo = bo; 1070 bo_va->bo = bo;
1068 bo_va->addr = 0;
1069 bo_va->ref_count = 1; 1071 bo_va->ref_count = 1;
1070 INIT_LIST_HEAD(&bo_va->bo_list); 1072 INIT_LIST_HEAD(&bo_va->bo_list);
1071 INIT_LIST_HEAD(&bo_va->mappings); 1073 INIT_LIST_HEAD(&bo_va->valids);
1074 INIT_LIST_HEAD(&bo_va->invalids);
1072 INIT_LIST_HEAD(&bo_va->vm_status); 1075 INIT_LIST_HEAD(&bo_va->vm_status);
1073 1076
1074 mutex_lock(&vm->mutex); 1077 mutex_lock(&vm->mutex);
@@ -1157,12 +1160,10 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1157 mapping->offset = offset; 1160 mapping->offset = offset;
1158 mapping->flags = flags; 1161 mapping->flags = flags;
1159 1162
1160 list_add(&mapping->list, &bo_va->mappings); 1163 list_add(&mapping->list, &bo_va->invalids);
1161 interval_tree_insert(&mapping->it, &vm->va); 1164 interval_tree_insert(&mapping->it, &vm->va);
1162 trace_amdgpu_vm_bo_map(bo_va, mapping); 1165 trace_amdgpu_vm_bo_map(bo_va, mapping);
1163 1166
1164 bo_va->addr = 0;
1165
1166 /* Make sure the page tables are allocated */ 1167 /* Make sure the page tables are allocated */
1167 saddr >>= amdgpu_vm_block_size; 1168 saddr >>= amdgpu_vm_block_size;
1168 eaddr >>= amdgpu_vm_block_size; 1169 eaddr >>= amdgpu_vm_block_size;
@@ -1243,17 +1244,27 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1243{ 1244{
1244 struct amdgpu_bo_va_mapping *mapping; 1245 struct amdgpu_bo_va_mapping *mapping;
1245 struct amdgpu_vm *vm = bo_va->vm; 1246 struct amdgpu_vm *vm = bo_va->vm;
1247 bool valid = true;
1246 1248
1247 saddr /= AMDGPU_GPU_PAGE_SIZE; 1249 saddr /= AMDGPU_GPU_PAGE_SIZE;
1248 1250
1249 list_for_each_entry(mapping, &bo_va->mappings, list) { 1251 list_for_each_entry(mapping, &bo_va->valids, list) {
1250 if (mapping->it.start == saddr) 1252 if (mapping->it.start == saddr)
1251 break; 1253 break;
1252 } 1254 }
1253 1255
1254 if (&mapping->list == &bo_va->mappings) { 1256 if (&mapping->list == &bo_va->valids) {
1255 amdgpu_bo_unreserve(bo_va->bo); 1257 valid = false;
1256 return -ENOENT; 1258
1259 list_for_each_entry(mapping, &bo_va->invalids, list) {
1260 if (mapping->it.start == saddr)
1261 break;
1262 }
1263
1264 if (&mapping->list == &bo_va->invalids) {
1265 amdgpu_bo_unreserve(bo_va->bo);
1266 return -ENOENT;
1267 }
1257 } 1268 }
1258 1269
1259 mutex_lock(&vm->mutex); 1270 mutex_lock(&vm->mutex);
@@ -1261,12 +1272,10 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1261 interval_tree_remove(&mapping->it, &vm->va); 1272 interval_tree_remove(&mapping->it, &vm->va);
1262 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 1273 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1263 1274
1264 if (bo_va->addr) { 1275 if (valid)
1265 /* clear the old address */
1266 list_add(&mapping->list, &vm->freed); 1276 list_add(&mapping->list, &vm->freed);
1267 } else { 1277 else
1268 kfree(mapping); 1278 kfree(mapping);
1269 }
1270 mutex_unlock(&vm->mutex); 1279 mutex_unlock(&vm->mutex);
1271 amdgpu_bo_unreserve(bo_va->bo); 1280 amdgpu_bo_unreserve(bo_va->bo);
1272 1281
@@ -1297,15 +1306,18 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
1297 list_del(&bo_va->vm_status); 1306 list_del(&bo_va->vm_status);
1298 spin_unlock(&vm->status_lock); 1307 spin_unlock(&vm->status_lock);
1299 1308
1300 list_for_each_entry_safe(mapping, next, &bo_va->mappings, list) { 1309 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
1301 list_del(&mapping->list); 1310 list_del(&mapping->list);
1302 interval_tree_remove(&mapping->it, &vm->va); 1311 interval_tree_remove(&mapping->it, &vm->va);
1303 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 1312 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1304 if (bo_va->addr) 1313 list_add(&mapping->list, &vm->freed);
1305 list_add(&mapping->list, &vm->freed); 1314 }
1306 else 1315 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
1307 kfree(mapping); 1316 list_del(&mapping->list);
1317 interval_tree_remove(&mapping->it, &vm->va);
1318 kfree(mapping);
1308 } 1319 }
1320
1309 amdgpu_fence_unref(&bo_va->last_pt_update); 1321 amdgpu_fence_unref(&bo_va->last_pt_update);
1310 kfree(bo_va); 1322 kfree(bo_va);
1311 1323
@@ -1327,12 +1339,10 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
1327 struct amdgpu_bo_va *bo_va; 1339 struct amdgpu_bo_va *bo_va;
1328 1340
1329 list_for_each_entry(bo_va, &bo->va, bo_list) { 1341 list_for_each_entry(bo_va, &bo->va, bo_list) {
1330 if (bo_va->addr) { 1342 spin_lock(&bo_va->vm->status_lock);
1331 spin_lock(&bo_va->vm->status_lock); 1343 if (list_empty(&bo_va->vm_status))
1332 list_del(&bo_va->vm_status);
1333 list_add(&bo_va->vm_status, &bo_va->vm->invalidated); 1344 list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
1334 spin_unlock(&bo_va->vm->status_lock); 1345 spin_unlock(&bo_va->vm->status_lock);
1335 }
1336 } 1346 }
1337} 1347}
1338 1348
@@ -1360,6 +1370,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1360 vm->va = RB_ROOT; 1370 vm->va = RB_ROOT;
1361 spin_lock_init(&vm->status_lock); 1371 spin_lock_init(&vm->status_lock);
1362 INIT_LIST_HEAD(&vm->invalidated); 1372 INIT_LIST_HEAD(&vm->invalidated);
1373 INIT_LIST_HEAD(&vm->cleared);
1363 INIT_LIST_HEAD(&vm->freed); 1374 INIT_LIST_HEAD(&vm->freed);
1364 1375
1365 pd_size = amdgpu_vm_directory_size(adev); 1376 pd_size = amdgpu_vm_directory_size(adev);