diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 138 |
1 files changed, 77 insertions, 61 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 633a32a48560..159ce54bbd8d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
@@ -143,10 +143,15 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | |||
143 | unsigned i; | 143 | unsigned i; |
144 | 144 | ||
145 | /* check if the id is still valid */ | 145 | /* check if the id is still valid */ |
146 | if (vm_id->id && vm_id->last_id_use && | 146 | if (vm_id->id) { |
147 | vm_id->last_id_use == adev->vm_manager.active[vm_id->id]) { | 147 | unsigned id = vm_id->id; |
148 | trace_amdgpu_vm_grab_id(vm_id->id, ring->idx); | 148 | long owner; |
149 | return 0; | 149 | |
150 | owner = atomic_long_read(&adev->vm_manager.ids[id].owner); | ||
151 | if (owner == (long)vm) { | ||
152 | trace_amdgpu_vm_grab_id(vm_id->id, ring->idx); | ||
153 | return 0; | ||
154 | } | ||
150 | } | 155 | } |
151 | 156 | ||
152 | /* we definately need to flush */ | 157 | /* we definately need to flush */ |
@@ -154,7 +159,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | |||
154 | 159 | ||
155 | /* skip over VMID 0, since it is the system VM */ | 160 | /* skip over VMID 0, since it is the system VM */ |
156 | for (i = 1; i < adev->vm_manager.nvm; ++i) { | 161 | for (i = 1; i < adev->vm_manager.nvm; ++i) { |
157 | struct fence *fence = adev->vm_manager.active[i]; | 162 | struct fence *fence = adev->vm_manager.ids[i].active; |
158 | struct amdgpu_ring *fring; | 163 | struct amdgpu_ring *fring; |
159 | 164 | ||
160 | if (fence == NULL) { | 165 | if (fence == NULL) { |
@@ -176,7 +181,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | |||
176 | if (choices[i]) { | 181 | if (choices[i]) { |
177 | struct fence *fence; | 182 | struct fence *fence; |
178 | 183 | ||
179 | fence = adev->vm_manager.active[choices[i]]; | 184 | fence = adev->vm_manager.ids[choices[i]].active; |
180 | vm_id->id = choices[i]; | 185 | vm_id->id = choices[i]; |
181 | 186 | ||
182 | trace_amdgpu_vm_grab_id(choices[i], ring->idx); | 187 | trace_amdgpu_vm_grab_id(choices[i], ring->idx); |
@@ -207,24 +212,21 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring, | |||
207 | uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); | 212 | uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); |
208 | struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; | 213 | struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; |
209 | struct fence *flushed_updates = vm_id->flushed_updates; | 214 | struct fence *flushed_updates = vm_id->flushed_updates; |
210 | bool is_earlier = false; | 215 | bool is_later; |
211 | |||
212 | if (flushed_updates && updates) { | ||
213 | BUG_ON(flushed_updates->context != updates->context); | ||
214 | is_earlier = (updates->seqno - flushed_updates->seqno <= | ||
215 | INT_MAX) ? true : false; | ||
216 | } | ||
217 | 216 | ||
218 | if (pd_addr != vm_id->pd_gpu_addr || !flushed_updates || | 217 | if (!flushed_updates) |
219 | is_earlier) { | 218 | is_later = true; |
219 | else if (!updates) | ||
220 | is_later = false; | ||
221 | else | ||
222 | is_later = fence_is_later(updates, flushed_updates); | ||
220 | 223 | ||
224 | if (pd_addr != vm_id->pd_gpu_addr || is_later) { | ||
221 | trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id); | 225 | trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id); |
222 | if (is_earlier) { | 226 | if (is_later) { |
223 | vm_id->flushed_updates = fence_get(updates); | 227 | vm_id->flushed_updates = fence_get(updates); |
224 | fence_put(flushed_updates); | 228 | fence_put(flushed_updates); |
225 | } | 229 | } |
226 | if (!flushed_updates) | ||
227 | vm_id->flushed_updates = fence_get(updates); | ||
228 | vm_id->pd_gpu_addr = pd_addr; | 230 | vm_id->pd_gpu_addr = pd_addr; |
229 | amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr); | 231 | amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr); |
230 | } | 232 | } |
@@ -244,16 +246,14 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring, | |||
244 | */ | 246 | */ |
245 | void amdgpu_vm_fence(struct amdgpu_device *adev, | 247 | void amdgpu_vm_fence(struct amdgpu_device *adev, |
246 | struct amdgpu_vm *vm, | 248 | struct amdgpu_vm *vm, |
247 | struct amdgpu_fence *fence) | 249 | struct fence *fence) |
248 | { | 250 | { |
249 | unsigned ridx = fence->ring->idx; | 251 | struct amdgpu_ring *ring = amdgpu_ring_from_fence(fence); |
250 | unsigned vm_id = vm->ids[ridx].id; | 252 | unsigned vm_id = vm->ids[ring->idx].id; |
251 | |||
252 | fence_put(adev->vm_manager.active[vm_id]); | ||
253 | adev->vm_manager.active[vm_id] = fence_get(&fence->base); | ||
254 | 253 | ||
255 | fence_put(vm->ids[ridx].last_id_use); | 254 | fence_put(adev->vm_manager.ids[vm_id].active); |
256 | vm->ids[ridx].last_id_use = fence_get(&fence->base); | 255 | adev->vm_manager.ids[vm_id].active = fence_get(fence); |
256 | atomic_long_set(&adev->vm_manager.ids[vm_id].owner, (long)vm); | ||
257 | } | 257 | } |
258 | 258 | ||
259 | /** | 259 | /** |
@@ -332,6 +332,8 @@ int amdgpu_vm_free_job(struct amdgpu_job *job) | |||
332 | * | 332 | * |
333 | * @adev: amdgpu_device pointer | 333 | * @adev: amdgpu_device pointer |
334 | * @bo: bo to clear | 334 | * @bo: bo to clear |
335 | * | ||
336 | * need to reserve bo first before calling it. | ||
335 | */ | 337 | */ |
336 | static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, | 338 | static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, |
337 | struct amdgpu_bo *bo) | 339 | struct amdgpu_bo *bo) |
@@ -343,24 +345,20 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, | |||
343 | uint64_t addr; | 345 | uint64_t addr; |
344 | int r; | 346 | int r; |
345 | 347 | ||
346 | r = amdgpu_bo_reserve(bo, false); | ||
347 | if (r) | ||
348 | return r; | ||
349 | |||
350 | r = reservation_object_reserve_shared(bo->tbo.resv); | 348 | r = reservation_object_reserve_shared(bo->tbo.resv); |
351 | if (r) | 349 | if (r) |
352 | return r; | 350 | return r; |
353 | 351 | ||
354 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); | 352 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); |
355 | if (r) | 353 | if (r) |
356 | goto error_unreserve; | 354 | goto error; |
357 | 355 | ||
358 | addr = amdgpu_bo_gpu_offset(bo); | 356 | addr = amdgpu_bo_gpu_offset(bo); |
359 | entries = amdgpu_bo_size(bo) / 8; | 357 | entries = amdgpu_bo_size(bo) / 8; |
360 | 358 | ||
361 | ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); | 359 | ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); |
362 | if (!ib) | 360 | if (!ib) |
363 | goto error_unreserve; | 361 | goto error; |
364 | 362 | ||
365 | r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, ib); | 363 | r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, ib); |
366 | if (r) | 364 | if (r) |
@@ -378,16 +376,14 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, | |||
378 | if (!r) | 376 | if (!r) |
379 | amdgpu_bo_fence(bo, fence, true); | 377 | amdgpu_bo_fence(bo, fence, true); |
380 | fence_put(fence); | 378 | fence_put(fence); |
381 | if (amdgpu_enable_scheduler) { | 379 | if (amdgpu_enable_scheduler) |
382 | amdgpu_bo_unreserve(bo); | ||
383 | return 0; | 380 | return 0; |
384 | } | 381 | |
385 | error_free: | 382 | error_free: |
386 | amdgpu_ib_free(adev, ib); | 383 | amdgpu_ib_free(adev, ib); |
387 | kfree(ib); | 384 | kfree(ib); |
388 | 385 | ||
389 | error_unreserve: | 386 | error: |
390 | amdgpu_bo_unreserve(bo); | ||
391 | return r; | 387 | return r; |
392 | } | 388 | } |
393 | 389 | ||
@@ -989,7 +985,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, | |||
989 | * Add a mapping of the BO at the specefied addr into the VM. | 985 | * Add a mapping of the BO at the specefied addr into the VM. |
990 | * Returns 0 for success, error for failure. | 986 | * Returns 0 for success, error for failure. |
991 | * | 987 | * |
992 | * Object has to be reserved and gets unreserved by this function! | 988 | * Object has to be reserved and unreserved outside! |
993 | */ | 989 | */ |
994 | int amdgpu_vm_bo_map(struct amdgpu_device *adev, | 990 | int amdgpu_vm_bo_map(struct amdgpu_device *adev, |
995 | struct amdgpu_bo_va *bo_va, | 991 | struct amdgpu_bo_va *bo_va, |
@@ -1005,30 +1001,27 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, | |||
1005 | 1001 | ||
1006 | /* validate the parameters */ | 1002 | /* validate the parameters */ |
1007 | if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || | 1003 | if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || |
1008 | size == 0 || size & AMDGPU_GPU_PAGE_MASK) { | 1004 | size == 0 || size & AMDGPU_GPU_PAGE_MASK) |
1009 | amdgpu_bo_unreserve(bo_va->bo); | ||
1010 | return -EINVAL; | 1005 | return -EINVAL; |
1011 | } | ||
1012 | 1006 | ||
1013 | /* make sure object fit at this offset */ | 1007 | /* make sure object fit at this offset */ |
1014 | eaddr = saddr + size; | 1008 | eaddr = saddr + size; |
1015 | if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo))) { | 1009 | if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo))) |
1016 | amdgpu_bo_unreserve(bo_va->bo); | ||
1017 | return -EINVAL; | 1010 | return -EINVAL; |
1018 | } | ||
1019 | 1011 | ||
1020 | last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE; | 1012 | last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE; |
1021 | if (last_pfn > adev->vm_manager.max_pfn) { | 1013 | if (last_pfn > adev->vm_manager.max_pfn) { |
1022 | dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n", | 1014 | dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n", |
1023 | last_pfn, adev->vm_manager.max_pfn); | 1015 | last_pfn, adev->vm_manager.max_pfn); |
1024 | amdgpu_bo_unreserve(bo_va->bo); | ||
1025 | return -EINVAL; | 1016 | return -EINVAL; |
1026 | } | 1017 | } |
1027 | 1018 | ||
1028 | saddr /= AMDGPU_GPU_PAGE_SIZE; | 1019 | saddr /= AMDGPU_GPU_PAGE_SIZE; |
1029 | eaddr /= AMDGPU_GPU_PAGE_SIZE; | 1020 | eaddr /= AMDGPU_GPU_PAGE_SIZE; |
1030 | 1021 | ||
1022 | spin_lock(&vm->it_lock); | ||
1031 | it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1); | 1023 | it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1); |
1024 | spin_unlock(&vm->it_lock); | ||
1032 | if (it) { | 1025 | if (it) { |
1033 | struct amdgpu_bo_va_mapping *tmp; | 1026 | struct amdgpu_bo_va_mapping *tmp; |
1034 | tmp = container_of(it, struct amdgpu_bo_va_mapping, it); | 1027 | tmp = container_of(it, struct amdgpu_bo_va_mapping, it); |
@@ -1036,14 +1029,12 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, | |||
1036 | dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with " | 1029 | dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with " |
1037 | "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr, | 1030 | "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr, |
1038 | tmp->it.start, tmp->it.last + 1); | 1031 | tmp->it.start, tmp->it.last + 1); |
1039 | amdgpu_bo_unreserve(bo_va->bo); | ||
1040 | r = -EINVAL; | 1032 | r = -EINVAL; |
1041 | goto error; | 1033 | goto error; |
1042 | } | 1034 | } |
1043 | 1035 | ||
1044 | mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); | 1036 | mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); |
1045 | if (!mapping) { | 1037 | if (!mapping) { |
1046 | amdgpu_bo_unreserve(bo_va->bo); | ||
1047 | r = -ENOMEM; | 1038 | r = -ENOMEM; |
1048 | goto error; | 1039 | goto error; |
1049 | } | 1040 | } |
@@ -1055,7 +1046,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, | |||
1055 | mapping->flags = flags; | 1046 | mapping->flags = flags; |
1056 | 1047 | ||
1057 | list_add(&mapping->list, &bo_va->invalids); | 1048 | list_add(&mapping->list, &bo_va->invalids); |
1049 | spin_lock(&vm->it_lock); | ||
1058 | interval_tree_insert(&mapping->it, &vm->va); | 1050 | interval_tree_insert(&mapping->it, &vm->va); |
1051 | spin_unlock(&vm->it_lock); | ||
1059 | trace_amdgpu_vm_bo_map(bo_va, mapping); | 1052 | trace_amdgpu_vm_bo_map(bo_va, mapping); |
1060 | 1053 | ||
1061 | /* Make sure the page tables are allocated */ | 1054 | /* Make sure the page tables are allocated */ |
@@ -1067,8 +1060,6 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, | |||
1067 | if (eaddr > vm->max_pde_used) | 1060 | if (eaddr > vm->max_pde_used) |
1068 | vm->max_pde_used = eaddr; | 1061 | vm->max_pde_used = eaddr; |
1069 | 1062 | ||
1070 | amdgpu_bo_unreserve(bo_va->bo); | ||
1071 | |||
1072 | /* walk over the address space and allocate the page tables */ | 1063 | /* walk over the address space and allocate the page tables */ |
1073 | for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) { | 1064 | for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) { |
1074 | struct reservation_object *resv = vm->page_directory->tbo.resv; | 1065 | struct reservation_object *resv = vm->page_directory->tbo.resv; |
@@ -1077,13 +1068,11 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, | |||
1077 | if (vm->page_tables[pt_idx].bo) | 1068 | if (vm->page_tables[pt_idx].bo) |
1078 | continue; | 1069 | continue; |
1079 | 1070 | ||
1080 | ww_mutex_lock(&resv->lock, NULL); | ||
1081 | r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, | 1071 | r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, |
1082 | AMDGPU_GPU_PAGE_SIZE, true, | 1072 | AMDGPU_GPU_PAGE_SIZE, true, |
1083 | AMDGPU_GEM_DOMAIN_VRAM, | 1073 | AMDGPU_GEM_DOMAIN_VRAM, |
1084 | AMDGPU_GEM_CREATE_NO_CPU_ACCESS, | 1074 | AMDGPU_GEM_CREATE_NO_CPU_ACCESS, |
1085 | NULL, resv, &pt); | 1075 | NULL, resv, &pt); |
1086 | ww_mutex_unlock(&resv->lock); | ||
1087 | if (r) | 1076 | if (r) |
1088 | goto error_free; | 1077 | goto error_free; |
1089 | 1078 | ||
@@ -1101,7 +1090,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, | |||
1101 | 1090 | ||
1102 | error_free: | 1091 | error_free: |
1103 | list_del(&mapping->list); | 1092 | list_del(&mapping->list); |
1093 | spin_lock(&vm->it_lock); | ||
1104 | interval_tree_remove(&mapping->it, &vm->va); | 1094 | interval_tree_remove(&mapping->it, &vm->va); |
1095 | spin_unlock(&vm->it_lock); | ||
1105 | trace_amdgpu_vm_bo_unmap(bo_va, mapping); | 1096 | trace_amdgpu_vm_bo_unmap(bo_va, mapping); |
1106 | kfree(mapping); | 1097 | kfree(mapping); |
1107 | 1098 | ||
@@ -1119,7 +1110,7 @@ error: | |||
1119 | * Remove a mapping of the BO at the specefied addr from the VM. | 1110 | * Remove a mapping of the BO at the specefied addr from the VM. |
1120 | * Returns 0 for success, error for failure. | 1111 | * Returns 0 for success, error for failure. |
1121 | * | 1112 | * |
1122 | * Object has to be reserved and gets unreserved by this function! | 1113 | * Object has to be reserved and unreserved outside! |
1123 | */ | 1114 | */ |
1124 | int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, | 1115 | int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, |
1125 | struct amdgpu_bo_va *bo_va, | 1116 | struct amdgpu_bo_va *bo_va, |
@@ -1144,21 +1135,20 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, | |||
1144 | break; | 1135 | break; |
1145 | } | 1136 | } |
1146 | 1137 | ||
1147 | if (&mapping->list == &bo_va->invalids) { | 1138 | if (&mapping->list == &bo_va->invalids) |
1148 | amdgpu_bo_unreserve(bo_va->bo); | ||
1149 | return -ENOENT; | 1139 | return -ENOENT; |
1150 | } | ||
1151 | } | 1140 | } |
1152 | 1141 | ||
1153 | list_del(&mapping->list); | 1142 | list_del(&mapping->list); |
1143 | spin_lock(&vm->it_lock); | ||
1154 | interval_tree_remove(&mapping->it, &vm->va); | 1144 | interval_tree_remove(&mapping->it, &vm->va); |
1145 | spin_unlock(&vm->it_lock); | ||
1155 | trace_amdgpu_vm_bo_unmap(bo_va, mapping); | 1146 | trace_amdgpu_vm_bo_unmap(bo_va, mapping); |
1156 | 1147 | ||
1157 | if (valid) | 1148 | if (valid) |
1158 | list_add(&mapping->list, &vm->freed); | 1149 | list_add(&mapping->list, &vm->freed); |
1159 | else | 1150 | else |
1160 | kfree(mapping); | 1151 | kfree(mapping); |
1161 | amdgpu_bo_unreserve(bo_va->bo); | ||
1162 | 1152 | ||
1163 | return 0; | 1153 | return 0; |
1164 | } | 1154 | } |
@@ -1187,13 +1177,17 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, | |||
1187 | 1177 | ||
1188 | list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { | 1178 | list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { |
1189 | list_del(&mapping->list); | 1179 | list_del(&mapping->list); |
1180 | spin_lock(&vm->it_lock); | ||
1190 | interval_tree_remove(&mapping->it, &vm->va); | 1181 | interval_tree_remove(&mapping->it, &vm->va); |
1182 | spin_unlock(&vm->it_lock); | ||
1191 | trace_amdgpu_vm_bo_unmap(bo_va, mapping); | 1183 | trace_amdgpu_vm_bo_unmap(bo_va, mapping); |
1192 | list_add(&mapping->list, &vm->freed); | 1184 | list_add(&mapping->list, &vm->freed); |
1193 | } | 1185 | } |
1194 | list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) { | 1186 | list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) { |
1195 | list_del(&mapping->list); | 1187 | list_del(&mapping->list); |
1188 | spin_lock(&vm->it_lock); | ||
1196 | interval_tree_remove(&mapping->it, &vm->va); | 1189 | interval_tree_remove(&mapping->it, &vm->va); |
1190 | spin_unlock(&vm->it_lock); | ||
1197 | kfree(mapping); | 1191 | kfree(mapping); |
1198 | } | 1192 | } |
1199 | 1193 | ||
@@ -1241,7 +1235,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |||
1241 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { | 1235 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { |
1242 | vm->ids[i].id = 0; | 1236 | vm->ids[i].id = 0; |
1243 | vm->ids[i].flushed_updates = NULL; | 1237 | vm->ids[i].flushed_updates = NULL; |
1244 | vm->ids[i].last_id_use = NULL; | ||
1245 | } | 1238 | } |
1246 | mutex_init(&vm->mutex); | 1239 | mutex_init(&vm->mutex); |
1247 | vm->va = RB_ROOT; | 1240 | vm->va = RB_ROOT; |
@@ -1249,7 +1242,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |||
1249 | INIT_LIST_HEAD(&vm->invalidated); | 1242 | INIT_LIST_HEAD(&vm->invalidated); |
1250 | INIT_LIST_HEAD(&vm->cleared); | 1243 | INIT_LIST_HEAD(&vm->cleared); |
1251 | INIT_LIST_HEAD(&vm->freed); | 1244 | INIT_LIST_HEAD(&vm->freed); |
1252 | 1245 | spin_lock_init(&vm->it_lock); | |
1253 | pd_size = amdgpu_vm_directory_size(adev); | 1246 | pd_size = amdgpu_vm_directory_size(adev); |
1254 | pd_entries = amdgpu_vm_num_pdes(adev); | 1247 | pd_entries = amdgpu_vm_num_pdes(adev); |
1255 | 1248 | ||
@@ -1269,8 +1262,14 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |||
1269 | NULL, NULL, &vm->page_directory); | 1262 | NULL, NULL, &vm->page_directory); |
1270 | if (r) | 1263 | if (r) |
1271 | return r; | 1264 | return r; |
1272 | 1265 | r = amdgpu_bo_reserve(vm->page_directory, false); | |
1266 | if (r) { | ||
1267 | amdgpu_bo_unref(&vm->page_directory); | ||
1268 | vm->page_directory = NULL; | ||
1269 | return r; | ||
1270 | } | ||
1273 | r = amdgpu_vm_clear_bo(adev, vm->page_directory); | 1271 | r = amdgpu_vm_clear_bo(adev, vm->page_directory); |
1272 | amdgpu_bo_unreserve(vm->page_directory); | ||
1274 | if (r) { | 1273 | if (r) { |
1275 | amdgpu_bo_unref(&vm->page_directory); | 1274 | amdgpu_bo_unref(&vm->page_directory); |
1276 | vm->page_directory = NULL; | 1275 | vm->page_directory = NULL; |
@@ -1313,11 +1312,28 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |||
1313 | 1312 | ||
1314 | amdgpu_bo_unref(&vm->page_directory); | 1313 | amdgpu_bo_unref(&vm->page_directory); |
1315 | fence_put(vm->page_directory_fence); | 1314 | fence_put(vm->page_directory_fence); |
1316 | |||
1317 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { | 1315 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { |
1316 | unsigned id = vm->ids[i].id; | ||
1317 | |||
1318 | atomic_long_cmpxchg(&adev->vm_manager.ids[id].owner, | ||
1319 | (long)vm, 0); | ||
1318 | fence_put(vm->ids[i].flushed_updates); | 1320 | fence_put(vm->ids[i].flushed_updates); |
1319 | fence_put(vm->ids[i].last_id_use); | ||
1320 | } | 1321 | } |
1321 | 1322 | ||
1322 | mutex_destroy(&vm->mutex); | 1323 | mutex_destroy(&vm->mutex); |
1323 | } | 1324 | } |
1325 | |||
1326 | /** | ||
1327 | * amdgpu_vm_manager_fini - cleanup VM manager | ||
1328 | * | ||
1329 | * @adev: amdgpu_device pointer | ||
1330 | * | ||
1331 | * Cleanup the VM manager and free resources. | ||
1332 | */ | ||
1333 | void amdgpu_vm_manager_fini(struct amdgpu_device *adev) | ||
1334 | { | ||
1335 | unsigned i; | ||
1336 | |||
1337 | for (i = 0; i < AMDGPU_NUM_VM; ++i) | ||
1338 | fence_put(adev->vm_manager.ids[i].active); | ||
1339 | } | ||