aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2016-01-12 05:01:12 -0500
committerThomas Gleixner <tglx@linutronix.de>2016-01-12 05:01:12 -0500
commit1f16f116b01c110db20ab808562c8b8bc3ee3d6e (patch)
tree44db563f64cf5f8d62af8f99a61e2b248c44ea3a /drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
parent03724ac3d48f8f0e3caf1d30fa134f8fd96c94e2 (diff)
parentf9eccf24615672896dc13251410c3f2f33a14f95 (diff)
Merge branches 'clockevents/4.4-fixes' and 'clockevents/4.5-fixes' of http://git.linaro.org/people/daniel.lezcano/linux into timers/urgent
Pull in fixes from Daniel Lezcano: - Fix the vt8500 timer leading to a system lock up when dealing with too small delta (Roman Volkov) - Select the CLKSRC_MMIO when the fsl_ftm_timer is enabled with COMPILE_TEST (Daniel Lezcano) - Prevent to compile timers using the 'iomem' API when the architecture has not HAS_IOMEM set (Richard Weinberger)
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c40
1 files changed, 29 insertions, 11 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 159ce54bbd8d..b53d273eb7a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -885,17 +885,21 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
885 struct amdgpu_bo_va_mapping *mapping; 885 struct amdgpu_bo_va_mapping *mapping;
886 int r; 886 int r;
887 887
888 spin_lock(&vm->freed_lock);
888 while (!list_empty(&vm->freed)) { 889 while (!list_empty(&vm->freed)) {
889 mapping = list_first_entry(&vm->freed, 890 mapping = list_first_entry(&vm->freed,
890 struct amdgpu_bo_va_mapping, list); 891 struct amdgpu_bo_va_mapping, list);
891 list_del(&mapping->list); 892 list_del(&mapping->list);
892 893 spin_unlock(&vm->freed_lock);
893 r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, 0, 0, NULL); 894 r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, 0, 0, NULL);
894 kfree(mapping); 895 kfree(mapping);
895 if (r) 896 if (r)
896 return r; 897 return r;
897 898
899 spin_lock(&vm->freed_lock);
898 } 900 }
901 spin_unlock(&vm->freed_lock);
902
899 return 0; 903 return 0;
900 904
901} 905}
@@ -922,8 +926,9 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
922 bo_va = list_first_entry(&vm->invalidated, 926 bo_va = list_first_entry(&vm->invalidated,
923 struct amdgpu_bo_va, vm_status); 927 struct amdgpu_bo_va, vm_status);
924 spin_unlock(&vm->status_lock); 928 spin_unlock(&vm->status_lock);
925 929 mutex_lock(&bo_va->mutex);
926 r = amdgpu_vm_bo_update(adev, bo_va, NULL); 930 r = amdgpu_vm_bo_update(adev, bo_va, NULL);
931 mutex_unlock(&bo_va->mutex);
927 if (r) 932 if (r)
928 return r; 933 return r;
929 934
@@ -967,7 +972,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
967 INIT_LIST_HEAD(&bo_va->valids); 972 INIT_LIST_HEAD(&bo_va->valids);
968 INIT_LIST_HEAD(&bo_va->invalids); 973 INIT_LIST_HEAD(&bo_va->invalids);
969 INIT_LIST_HEAD(&bo_va->vm_status); 974 INIT_LIST_HEAD(&bo_va->vm_status);
970 975 mutex_init(&bo_va->mutex);
971 list_add_tail(&bo_va->bo_list, &bo->va); 976 list_add_tail(&bo_va->bo_list, &bo->va);
972 977
973 return bo_va; 978 return bo_va;
@@ -1045,7 +1050,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1045 mapping->offset = offset; 1050 mapping->offset = offset;
1046 mapping->flags = flags; 1051 mapping->flags = flags;
1047 1052
1053 mutex_lock(&bo_va->mutex);
1048 list_add(&mapping->list, &bo_va->invalids); 1054 list_add(&mapping->list, &bo_va->invalids);
1055 mutex_unlock(&bo_va->mutex);
1049 spin_lock(&vm->it_lock); 1056 spin_lock(&vm->it_lock);
1050 interval_tree_insert(&mapping->it, &vm->va); 1057 interval_tree_insert(&mapping->it, &vm->va);
1051 spin_unlock(&vm->it_lock); 1058 spin_unlock(&vm->it_lock);
@@ -1076,6 +1083,11 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1076 if (r) 1083 if (r)
1077 goto error_free; 1084 goto error_free;
1078 1085
1086 /* Keep a reference to the page table to avoid freeing
1087 * them up in the wrong order.
1088 */
1089 pt->parent = amdgpu_bo_ref(vm->page_directory);
1090
1079 r = amdgpu_vm_clear_bo(adev, pt); 1091 r = amdgpu_vm_clear_bo(adev, pt);
1080 if (r) { 1092 if (r) {
1081 amdgpu_bo_unref(&pt); 1093 amdgpu_bo_unref(&pt);
@@ -1121,7 +1133,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1121 bool valid = true; 1133 bool valid = true;
1122 1134
1123 saddr /= AMDGPU_GPU_PAGE_SIZE; 1135 saddr /= AMDGPU_GPU_PAGE_SIZE;
1124 1136 mutex_lock(&bo_va->mutex);
1125 list_for_each_entry(mapping, &bo_va->valids, list) { 1137 list_for_each_entry(mapping, &bo_va->valids, list) {
1126 if (mapping->it.start == saddr) 1138 if (mapping->it.start == saddr)
1127 break; 1139 break;
@@ -1135,20 +1147,25 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1135 break; 1147 break;
1136 } 1148 }
1137 1149
1138 if (&mapping->list == &bo_va->invalids) 1150 if (&mapping->list == &bo_va->invalids) {
1151 mutex_unlock(&bo_va->mutex);
1139 return -ENOENT; 1152 return -ENOENT;
1153 }
1140 } 1154 }
1141 1155 mutex_unlock(&bo_va->mutex);
1142 list_del(&mapping->list); 1156 list_del(&mapping->list);
1143 spin_lock(&vm->it_lock); 1157 spin_lock(&vm->it_lock);
1144 interval_tree_remove(&mapping->it, &vm->va); 1158 interval_tree_remove(&mapping->it, &vm->va);
1145 spin_unlock(&vm->it_lock); 1159 spin_unlock(&vm->it_lock);
1146 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 1160 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1147 1161
1148 if (valid) 1162 if (valid) {
1163 spin_lock(&vm->freed_lock);
1149 list_add(&mapping->list, &vm->freed); 1164 list_add(&mapping->list, &vm->freed);
1150 else 1165 spin_unlock(&vm->freed_lock);
1166 } else {
1151 kfree(mapping); 1167 kfree(mapping);
1168 }
1152 1169
1153 return 0; 1170 return 0;
1154} 1171}
@@ -1181,7 +1198,9 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
1181 interval_tree_remove(&mapping->it, &vm->va); 1198 interval_tree_remove(&mapping->it, &vm->va);
1182 spin_unlock(&vm->it_lock); 1199 spin_unlock(&vm->it_lock);
1183 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 1200 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1201 spin_lock(&vm->freed_lock);
1184 list_add(&mapping->list, &vm->freed); 1202 list_add(&mapping->list, &vm->freed);
1203 spin_unlock(&vm->freed_lock);
1185 } 1204 }
1186 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) { 1205 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
1187 list_del(&mapping->list); 1206 list_del(&mapping->list);
@@ -1190,8 +1209,8 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
1190 spin_unlock(&vm->it_lock); 1209 spin_unlock(&vm->it_lock);
1191 kfree(mapping); 1210 kfree(mapping);
1192 } 1211 }
1193
1194 fence_put(bo_va->last_pt_update); 1212 fence_put(bo_va->last_pt_update);
1213 mutex_destroy(&bo_va->mutex);
1195 kfree(bo_va); 1214 kfree(bo_va);
1196} 1215}
1197 1216
@@ -1236,13 +1255,13 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1236 vm->ids[i].id = 0; 1255 vm->ids[i].id = 0;
1237 vm->ids[i].flushed_updates = NULL; 1256 vm->ids[i].flushed_updates = NULL;
1238 } 1257 }
1239 mutex_init(&vm->mutex);
1240 vm->va = RB_ROOT; 1258 vm->va = RB_ROOT;
1241 spin_lock_init(&vm->status_lock); 1259 spin_lock_init(&vm->status_lock);
1242 INIT_LIST_HEAD(&vm->invalidated); 1260 INIT_LIST_HEAD(&vm->invalidated);
1243 INIT_LIST_HEAD(&vm->cleared); 1261 INIT_LIST_HEAD(&vm->cleared);
1244 INIT_LIST_HEAD(&vm->freed); 1262 INIT_LIST_HEAD(&vm->freed);
1245 spin_lock_init(&vm->it_lock); 1263 spin_lock_init(&vm->it_lock);
1264 spin_lock_init(&vm->freed_lock);
1246 pd_size = amdgpu_vm_directory_size(adev); 1265 pd_size = amdgpu_vm_directory_size(adev);
1247 pd_entries = amdgpu_vm_num_pdes(adev); 1266 pd_entries = amdgpu_vm_num_pdes(adev);
1248 1267
@@ -1320,7 +1339,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1320 fence_put(vm->ids[i].flushed_updates); 1339 fence_put(vm->ids[i].flushed_updates);
1321 } 1340 }
1322 1341
1323 mutex_destroy(&vm->mutex);
1324} 1342}
1325 1343
1326/** 1344/**