aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c35
1 files changed, 3 insertions, 32 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 644fd9b8591f..ff26e330ccd6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -90,11 +90,9 @@ struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
90 struct amdgpu_bo_list_entry *list; 90 struct amdgpu_bo_list_entry *list;
91 unsigned i, idx; 91 unsigned i, idx;
92 92
93 mutex_lock(&vm->mutex);
94 list = drm_malloc_ab(vm->max_pde_used + 2, 93 list = drm_malloc_ab(vm->max_pde_used + 2,
95 sizeof(struct amdgpu_bo_list_entry)); 94 sizeof(struct amdgpu_bo_list_entry));
96 if (!list) { 95 if (!list) {
97 mutex_unlock(&vm->mutex);
98 return NULL; 96 return NULL;
99 } 97 }
100 98
@@ -119,7 +117,6 @@ struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
119 list[idx].tv.shared = true; 117 list[idx].tv.shared = true;
120 list_add(&list[idx++].tv.head, head); 118 list_add(&list[idx++].tv.head, head);
121 } 119 }
122 mutex_unlock(&vm->mutex);
123 120
124 return list; 121 return list;
125} 122}
@@ -972,9 +969,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
972 INIT_LIST_HEAD(&bo_va->invalids); 969 INIT_LIST_HEAD(&bo_va->invalids);
973 INIT_LIST_HEAD(&bo_va->vm_status); 970 INIT_LIST_HEAD(&bo_va->vm_status);
974 971
975 mutex_lock(&vm->mutex);
976 list_add_tail(&bo_va->bo_list, &bo->va); 972 list_add_tail(&bo_va->bo_list, &bo->va);
977 mutex_unlock(&vm->mutex);
978 973
979 return bo_va; 974 return bo_va;
980} 975}
@@ -1027,8 +1022,6 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1027 return -EINVAL; 1022 return -EINVAL;
1028 } 1023 }
1029 1024
1030 mutex_lock(&vm->mutex);
1031
1032 saddr /= AMDGPU_GPU_PAGE_SIZE; 1025 saddr /= AMDGPU_GPU_PAGE_SIZE;
1033 eaddr /= AMDGPU_GPU_PAGE_SIZE; 1026 eaddr /= AMDGPU_GPU_PAGE_SIZE;
1034 1027
@@ -1042,14 +1035,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1042 tmp->it.start, tmp->it.last + 1); 1035 tmp->it.start, tmp->it.last + 1);
1043 amdgpu_bo_unreserve(bo_va->bo); 1036 amdgpu_bo_unreserve(bo_va->bo);
1044 r = -EINVAL; 1037 r = -EINVAL;
1045 goto error_unlock; 1038 goto error;
1046 } 1039 }
1047 1040
1048 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); 1041 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1049 if (!mapping) { 1042 if (!mapping) {
1050 amdgpu_bo_unreserve(bo_va->bo); 1043 amdgpu_bo_unreserve(bo_va->bo);
1051 r = -ENOMEM; 1044 r = -ENOMEM;
1052 goto error_unlock; 1045 goto error;
1053 } 1046 }
1054 1047
1055 INIT_LIST_HEAD(&mapping->list); 1048 INIT_LIST_HEAD(&mapping->list);
@@ -1081,9 +1074,6 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1081 if (vm->page_tables[pt_idx].bo) 1074 if (vm->page_tables[pt_idx].bo)
1082 continue; 1075 continue;
1083 1076
1084 /* drop mutex to allocate and clear page table */
1085 mutex_unlock(&vm->mutex);
1086
1087 ww_mutex_lock(&resv->lock, NULL); 1077 ww_mutex_lock(&resv->lock, NULL);
1088 r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, 1078 r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
1089 AMDGPU_GPU_PAGE_SIZE, true, 1079 AMDGPU_GPU_PAGE_SIZE, true,
@@ -1100,32 +1090,19 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1100 goto error_free; 1090 goto error_free;
1101 } 1091 }
1102 1092
1103 /* aquire mutex again */
1104 mutex_lock(&vm->mutex);
1105 if (vm->page_tables[pt_idx].bo) {
1106 /* someone else allocated the pt in the meantime */
1107 mutex_unlock(&vm->mutex);
1108 amdgpu_bo_unref(&pt);
1109 mutex_lock(&vm->mutex);
1110 continue;
1111 }
1112
1113 vm->page_tables[pt_idx].addr = 0; 1093 vm->page_tables[pt_idx].addr = 0;
1114 vm->page_tables[pt_idx].bo = pt; 1094 vm->page_tables[pt_idx].bo = pt;
1115 } 1095 }
1116 1096
1117 mutex_unlock(&vm->mutex);
1118 return 0; 1097 return 0;
1119 1098
1120error_free: 1099error_free:
1121 mutex_lock(&vm->mutex);
1122 list_del(&mapping->list); 1100 list_del(&mapping->list);
1123 interval_tree_remove(&mapping->it, &vm->va); 1101 interval_tree_remove(&mapping->it, &vm->va);
1124 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 1102 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1125 kfree(mapping); 1103 kfree(mapping);
1126 1104
1127error_unlock: 1105error:
1128 mutex_unlock(&vm->mutex);
1129 return r; 1106 return r;
1130} 1107}
1131 1108
@@ -1170,7 +1147,6 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1170 } 1147 }
1171 } 1148 }
1172 1149
1173 mutex_lock(&vm->mutex);
1174 list_del(&mapping->list); 1150 list_del(&mapping->list);
1175 interval_tree_remove(&mapping->it, &vm->va); 1151 interval_tree_remove(&mapping->it, &vm->va);
1176 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 1152 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
@@ -1179,7 +1155,6 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1179 list_add(&mapping->list, &vm->freed); 1155 list_add(&mapping->list, &vm->freed);
1180 else 1156 else
1181 kfree(mapping); 1157 kfree(mapping);
1182 mutex_unlock(&vm->mutex);
1183 amdgpu_bo_unreserve(bo_va->bo); 1158 amdgpu_bo_unreserve(bo_va->bo);
1184 1159
1185 return 0; 1160 return 0;
@@ -1203,8 +1178,6 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
1203 1178
1204 list_del(&bo_va->bo_list); 1179 list_del(&bo_va->bo_list);
1205 1180
1206 mutex_lock(&vm->mutex);
1207
1208 spin_lock(&vm->status_lock); 1181 spin_lock(&vm->status_lock);
1209 list_del(&bo_va->vm_status); 1182 list_del(&bo_va->vm_status);
1210 spin_unlock(&vm->status_lock); 1183 spin_unlock(&vm->status_lock);
@@ -1223,8 +1196,6 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
1223 1196
1224 fence_put(bo_va->last_pt_update); 1197 fence_put(bo_va->last_pt_update);
1225 kfree(bo_va); 1198 kfree(bo_va);
1226
1227 mutex_unlock(&vm->mutex);
1228} 1199}
1229 1200
1230/** 1201/**