aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2017-08-03 08:02:13 -0400
committerAlex Deucher <alexander.deucher@amd.com>2017-08-29 15:28:04 -0400
commit3f3333f8a0e90ac26f84ed7b0aa344efce695c08 (patch)
tree5a807843d544d076c90d22ac71d143b1a1bbce50 /drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
parent00b5cc83c443dcd351cb2b21055656e007992b54 (diff)
drm/amdgpu: track evicted page tables v2
Instead of validating all page tables when one was evicted, track which one needs a validation. v2: simplify amdgpu_vm_ready as well Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> (v1) Reviewed-by: Chunming Zhou <david1.zhou@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c227
1 files changed, 103 insertions, 124 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 1334bbb82634..6ff3c1bf035e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -140,7 +140,7 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
140 struct list_head *validated, 140 struct list_head *validated,
141 struct amdgpu_bo_list_entry *entry) 141 struct amdgpu_bo_list_entry *entry)
142{ 142{
143 entry->robj = vm->root.bo; 143 entry->robj = vm->root.base.bo;
144 entry->priority = 0; 144 entry->priority = 0;
145 entry->tv.bo = &entry->robj->tbo; 145 entry->tv.bo = &entry->robj->tbo;
146 entry->tv.shared = true; 146 entry->tv.shared = true;
@@ -149,61 +149,6 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
149} 149}
150 150
151/** 151/**
152 * amdgpu_vm_validate_layer - validate a single page table level
153 *
154 * @parent: parent page table level
155 * @validate: callback to do the validation
156 * @param: parameter for the validation callback
157 *
158 * Validate the page table BOs on command submission if neccessary.
159 */
160static int amdgpu_vm_validate_level(struct amdgpu_vm_pt *parent,
161 int (*validate)(void *, struct amdgpu_bo *),
162 void *param, bool use_cpu_for_update,
163 struct ttm_bo_global *glob)
164{
165 unsigned i;
166 int r;
167
168 if (use_cpu_for_update) {
169 r = amdgpu_bo_kmap(parent->bo, NULL);
170 if (r)
171 return r;
172 }
173
174 if (!parent->entries)
175 return 0;
176
177 for (i = 0; i <= parent->last_entry_used; ++i) {
178 struct amdgpu_vm_pt *entry = &parent->entries[i];
179
180 if (!entry->bo)
181 continue;
182
183 r = validate(param, entry->bo);
184 if (r)
185 return r;
186
187 spin_lock(&glob->lru_lock);
188 ttm_bo_move_to_lru_tail(&entry->bo->tbo);
189 if (entry->bo->shadow)
190 ttm_bo_move_to_lru_tail(&entry->bo->shadow->tbo);
191 spin_unlock(&glob->lru_lock);
192
193 /*
194 * Recurse into the sub directory. This is harmless because we
195 * have only a maximum of 5 layers.
196 */
197 r = amdgpu_vm_validate_level(entry, validate, param,
198 use_cpu_for_update, glob);
199 if (r)
200 return r;
201 }
202
203 return r;
204}
205
206/**
207 * amdgpu_vm_validate_pt_bos - validate the page table BOs 152 * amdgpu_vm_validate_pt_bos - validate the page table BOs
208 * 153 *
209 * @adev: amdgpu device pointer 154 * @adev: amdgpu device pointer
@@ -217,32 +162,43 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
217 int (*validate)(void *p, struct amdgpu_bo *bo), 162 int (*validate)(void *p, struct amdgpu_bo *bo),
218 void *param) 163 void *param)
219{ 164{
220 uint64_t num_evictions; 165 struct ttm_bo_global *glob = adev->mman.bdev.glob;
166 int r;
221 167
222 /* We only need to validate the page tables 168 spin_lock(&vm->status_lock);
223 * if they aren't already valid. 169 while (!list_empty(&vm->evicted)) {
224 */ 170 struct amdgpu_vm_bo_base *bo_base;
225 num_evictions = atomic64_read(&adev->num_evictions); 171 struct amdgpu_bo *bo;
226 if (num_evictions == vm->last_eviction_counter)
227 return 0;
228 172
229 return amdgpu_vm_validate_level(&vm->root, validate, param, 173 bo_base = list_first_entry(&vm->evicted,
230 vm->use_cpu_for_update, 174 struct amdgpu_vm_bo_base,
231 adev->mman.bdev.glob); 175 vm_status);
232} 176 spin_unlock(&vm->status_lock);
233 177
234/** 178 bo = bo_base->bo;
235 * amdgpu_vm_check - helper for amdgpu_vm_ready 179 BUG_ON(!bo);
236 */ 180 if (bo->parent) {
237static int amdgpu_vm_check(void *param, struct amdgpu_bo *bo) 181 r = validate(param, bo);
238{ 182 if (r)
239 /* if anything is swapped out don't swap it in here, 183 return r;
240 just abort and wait for the next CS */
241 if (!amdgpu_bo_gpu_accessible(bo))
242 return -ERESTARTSYS;
243 184
244 if (bo->shadow && !amdgpu_bo_gpu_accessible(bo->shadow)) 185 spin_lock(&glob->lru_lock);
245 return -ERESTARTSYS; 186 ttm_bo_move_to_lru_tail(&bo->tbo);
187 if (bo->shadow)
188 ttm_bo_move_to_lru_tail(&bo->shadow->tbo);
189 spin_unlock(&glob->lru_lock);
190 }
191
192 if (vm->use_cpu_for_update) {
193 r = amdgpu_bo_kmap(bo, NULL);
194 if (r)
195 return r;
196 }
197
198 spin_lock(&vm->status_lock);
199 list_del_init(&bo_base->vm_status);
200 }
201 spin_unlock(&vm->status_lock);
246 202
247 return 0; 203 return 0;
248} 204}
@@ -250,17 +206,19 @@ static int amdgpu_vm_check(void *param, struct amdgpu_bo *bo)
250/** 206/**
251 * amdgpu_vm_ready - check VM is ready for updates 207 * amdgpu_vm_ready - check VM is ready for updates
252 * 208 *
253 * @adev: amdgpu device
254 * @vm: VM to check 209 * @vm: VM to check
255 * 210 *
256 * Check if all VM PDs/PTs are ready for updates 211 * Check if all VM PDs/PTs are ready for updates
257 */ 212 */
258bool amdgpu_vm_ready(struct amdgpu_device *adev, struct amdgpu_vm *vm) 213bool amdgpu_vm_ready(struct amdgpu_vm *vm)
259{ 214{
260 if (amdgpu_vm_check(NULL, vm->root.bo)) 215 bool ready;
261 return false; 216
217 spin_lock(&vm->status_lock);
218 ready = list_empty(&vm->evicted);
219 spin_unlock(&vm->status_lock);
262 220
263 return !amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_vm_check, NULL); 221 return ready;
264} 222}
265 223
266/** 224/**
@@ -326,11 +284,11 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
326 284
327 /* walk over the address space and allocate the page tables */ 285 /* walk over the address space and allocate the page tables */
328 for (pt_idx = from; pt_idx <= to; ++pt_idx) { 286 for (pt_idx = from; pt_idx <= to; ++pt_idx) {
329 struct reservation_object *resv = vm->root.bo->tbo.resv; 287 struct reservation_object *resv = vm->root.base.bo->tbo.resv;
330 struct amdgpu_vm_pt *entry = &parent->entries[pt_idx]; 288 struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
331 struct amdgpu_bo *pt; 289 struct amdgpu_bo *pt;
332 290
333 if (!entry->bo) { 291 if (!entry->base.bo) {
334 r = amdgpu_bo_create(adev, 292 r = amdgpu_bo_create(adev,
335 amdgpu_vm_bo_size(adev, level), 293 amdgpu_vm_bo_size(adev, level),
336 AMDGPU_GPU_PAGE_SIZE, true, 294 AMDGPU_GPU_PAGE_SIZE, true,
@@ -351,9 +309,12 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
351 /* Keep a reference to the root directory to avoid 309 /* Keep a reference to the root directory to avoid
352 * freeing them up in the wrong order. 310 * freeing them up in the wrong order.
353 */ 311 */
354 pt->parent = amdgpu_bo_ref(vm->root.bo); 312 pt->parent = amdgpu_bo_ref(vm->root.base.bo);
355 313
356 entry->bo = pt; 314 entry->base.vm = vm;
315 entry->base.bo = pt;
316 list_add_tail(&entry->base.bo_list, &pt->va);
317 INIT_LIST_HEAD(&entry->base.vm_status);
357 entry->addr = 0; 318 entry->addr = 0;
358 } 319 }
359 320
@@ -1020,7 +981,7 @@ static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1020 int r; 981 int r;
1021 982
1022 amdgpu_sync_create(&sync); 983 amdgpu_sync_create(&sync);
1023 amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.resv, owner); 984 amdgpu_sync_resv(adev, &sync, vm->root.base.bo->tbo.resv, owner);
1024 r = amdgpu_sync_wait(&sync, true); 985 r = amdgpu_sync_wait(&sync, true);
1025 amdgpu_sync_free(&sync); 986 amdgpu_sync_free(&sync);
1026 987
@@ -1059,10 +1020,10 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
1059 1020
1060 memset(&params, 0, sizeof(params)); 1021 memset(&params, 0, sizeof(params));
1061 params.adev = adev; 1022 params.adev = adev;
1062 shadow = parent->bo->shadow; 1023 shadow = parent->base.bo->shadow;
1063 1024
1064 if (vm->use_cpu_for_update) { 1025 if (vm->use_cpu_for_update) {
1065 pd_addr = (unsigned long)amdgpu_bo_kptr(parent->bo); 1026 pd_addr = (unsigned long)amdgpu_bo_kptr(parent->base.bo);
1066 r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM); 1027 r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
1067 if (unlikely(r)) 1028 if (unlikely(r))
1068 return r; 1029 return r;
@@ -1078,7 +1039,7 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
1078 /* assume the worst case */ 1039 /* assume the worst case */
1079 ndw += parent->last_entry_used * 6; 1040 ndw += parent->last_entry_used * 6;
1080 1041
1081 pd_addr = amdgpu_bo_gpu_offset(parent->bo); 1042 pd_addr = amdgpu_bo_gpu_offset(parent->base.bo);
1082 1043
1083 if (shadow) { 1044 if (shadow) {
1084 shadow_addr = amdgpu_bo_gpu_offset(shadow); 1045 shadow_addr = amdgpu_bo_gpu_offset(shadow);
@@ -1098,7 +1059,7 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
1098 1059
1099 /* walk over the address space and update the directory */ 1060 /* walk over the address space and update the directory */
1100 for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) { 1061 for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
1101 struct amdgpu_bo *bo = parent->entries[pt_idx].bo; 1062 struct amdgpu_bo *bo = parent->entries[pt_idx].base.bo;
1102 uint64_t pde, pt; 1063 uint64_t pde, pt;
1103 1064
1104 if (bo == NULL) 1065 if (bo == NULL)
@@ -1141,7 +1102,7 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
1141 } 1102 }
1142 1103
1143 if (count) { 1104 if (count) {
1144 if (vm->root.bo->shadow) 1105 if (vm->root.base.bo->shadow)
1145 params.func(&params, last_shadow, last_pt, 1106 params.func(&params, last_shadow, last_pt,
1146 count, incr, AMDGPU_PTE_VALID); 1107 count, incr, AMDGPU_PTE_VALID);
1147 1108
@@ -1154,7 +1115,8 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
1154 amdgpu_job_free(job); 1115 amdgpu_job_free(job);
1155 } else { 1116 } else {
1156 amdgpu_ring_pad_ib(ring, params.ib); 1117 amdgpu_ring_pad_ib(ring, params.ib);
1157 amdgpu_sync_resv(adev, &job->sync, parent->bo->tbo.resv, 1118 amdgpu_sync_resv(adev, &job->sync,
1119 parent->base.bo->tbo.resv,
1158 AMDGPU_FENCE_OWNER_VM); 1120 AMDGPU_FENCE_OWNER_VM);
1159 if (shadow) 1121 if (shadow)
1160 amdgpu_sync_resv(adev, &job->sync, 1122 amdgpu_sync_resv(adev, &job->sync,
@@ -1167,7 +1129,7 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
1167 if (r) 1129 if (r)
1168 goto error_free; 1130 goto error_free;
1169 1131
1170 amdgpu_bo_fence(parent->bo, fence, true); 1132 amdgpu_bo_fence(parent->base.bo, fence, true);
1171 dma_fence_put(vm->last_dir_update); 1133 dma_fence_put(vm->last_dir_update);
1172 vm->last_dir_update = dma_fence_get(fence); 1134 vm->last_dir_update = dma_fence_get(fence);
1173 dma_fence_put(fence); 1135 dma_fence_put(fence);
@@ -1180,7 +1142,7 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
1180 for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) { 1142 for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
1181 struct amdgpu_vm_pt *entry = &parent->entries[pt_idx]; 1143 struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
1182 1144
1183 if (!entry->bo) 1145 if (!entry->base.bo)
1184 continue; 1146 continue;
1185 1147
1186 r = amdgpu_vm_update_level(adev, vm, entry, level + 1); 1148 r = amdgpu_vm_update_level(adev, vm, entry, level + 1);
@@ -1213,7 +1175,7 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_vm_pt *parent)
1213 for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) { 1175 for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
1214 struct amdgpu_vm_pt *entry = &parent->entries[pt_idx]; 1176 struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
1215 1177
1216 if (!entry->bo) 1178 if (!entry->base.bo)
1217 continue; 1179 continue;
1218 1180
1219 entry->addr = ~0ULL; 1181 entry->addr = ~0ULL;
@@ -1268,7 +1230,7 @@ void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr,
1268 *entry = &p->vm->root; 1230 *entry = &p->vm->root;
1269 while ((*entry)->entries) { 1231 while ((*entry)->entries) {
1270 idx = addr >> (p->adev->vm_manager.block_size * level--); 1232 idx = addr >> (p->adev->vm_manager.block_size * level--);
1271 idx %= amdgpu_bo_size((*entry)->bo) / 8; 1233 idx %= amdgpu_bo_size((*entry)->base.bo) / 8;
1272 *parent = *entry; 1234 *parent = *entry;
1273 *entry = &(*entry)->entries[idx]; 1235 *entry = &(*entry)->entries[idx];
1274 } 1236 }
@@ -1304,7 +1266,7 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
1304 p->src || 1266 p->src ||
1305 !(flags & AMDGPU_PTE_VALID)) { 1267 !(flags & AMDGPU_PTE_VALID)) {
1306 1268
1307 dst = amdgpu_bo_gpu_offset(entry->bo); 1269 dst = amdgpu_bo_gpu_offset(entry->base.bo);
1308 dst = amdgpu_gart_get_vm_pde(p->adev, dst); 1270 dst = amdgpu_gart_get_vm_pde(p->adev, dst);
1309 flags = AMDGPU_PTE_VALID; 1271 flags = AMDGPU_PTE_VALID;
1310 } else { 1272 } else {
@@ -1330,18 +1292,18 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
1330 tmp = p->pages_addr; 1292 tmp = p->pages_addr;
1331 p->pages_addr = NULL; 1293 p->pages_addr = NULL;
1332 1294
1333 pd_addr = (unsigned long)amdgpu_bo_kptr(parent->bo); 1295 pd_addr = (unsigned long)amdgpu_bo_kptr(parent->base.bo);
1334 pde = pd_addr + (entry - parent->entries) * 8; 1296 pde = pd_addr + (entry - parent->entries) * 8;
1335 amdgpu_vm_cpu_set_ptes(p, pde, dst, 1, 0, flags); 1297 amdgpu_vm_cpu_set_ptes(p, pde, dst, 1, 0, flags);
1336 1298
1337 p->pages_addr = tmp; 1299 p->pages_addr = tmp;
1338 } else { 1300 } else {
1339 if (parent->bo->shadow) { 1301 if (parent->base.bo->shadow) {
1340 pd_addr = amdgpu_bo_gpu_offset(parent->bo->shadow); 1302 pd_addr = amdgpu_bo_gpu_offset(parent->base.bo->shadow);
1341 pde = pd_addr + (entry - parent->entries) * 8; 1303 pde = pd_addr + (entry - parent->entries) * 8;
1342 amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags); 1304 amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags);
1343 } 1305 }
1344 pd_addr = amdgpu_bo_gpu_offset(parent->bo); 1306 pd_addr = amdgpu_bo_gpu_offset(parent->base.bo);
1345 pde = pd_addr + (entry - parent->entries) * 8; 1307 pde = pd_addr + (entry - parent->entries) * 8;
1346 amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags); 1308 amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags);
1347 } 1309 }
@@ -1392,7 +1354,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
1392 if (entry->addr & AMDGPU_PDE_PTE) 1354 if (entry->addr & AMDGPU_PDE_PTE)
1393 continue; 1355 continue;
1394 1356
1395 pt = entry->bo; 1357 pt = entry->base.bo;
1396 if (use_cpu_update) { 1358 if (use_cpu_update) {
1397 pe_start = (unsigned long)amdgpu_bo_kptr(pt); 1359 pe_start = (unsigned long)amdgpu_bo_kptr(pt);
1398 } else { 1360 } else {
@@ -1612,12 +1574,12 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
1612 if (r) 1574 if (r)
1613 goto error_free; 1575 goto error_free;
1614 1576
1615 r = amdgpu_sync_resv(adev, &job->sync, vm->root.bo->tbo.resv, 1577 r = amdgpu_sync_resv(adev, &job->sync, vm->root.base.bo->tbo.resv,
1616 owner); 1578 owner);
1617 if (r) 1579 if (r)
1618 goto error_free; 1580 goto error_free;
1619 1581
1620 r = reservation_object_reserve_shared(vm->root.bo->tbo.resv); 1582 r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
1621 if (r) 1583 if (r)
1622 goto error_free; 1584 goto error_free;
1623 1585
@@ -1632,7 +1594,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
1632 if (r) 1594 if (r)
1633 goto error_free; 1595 goto error_free;
1634 1596
1635 amdgpu_bo_fence(vm->root.bo, f, true); 1597 amdgpu_bo_fence(vm->root.base.bo, f, true);
1636 dma_fence_put(*fence); 1598 dma_fence_put(*fence);
1637 *fence = f; 1599 *fence = f;
1638 return 0; 1600 return 0;
@@ -1927,7 +1889,7 @@ static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
1927 */ 1889 */
1928static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) 1890static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1929{ 1891{
1930 struct reservation_object *resv = vm->root.bo->tbo.resv; 1892 struct reservation_object *resv = vm->root.base.bo->tbo.resv;
1931 struct dma_fence *excl, **shared; 1893 struct dma_fence *excl, **shared;
1932 unsigned i, shared_count; 1894 unsigned i, shared_count;
1933 int r; 1895 int r;
@@ -2414,12 +2376,25 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
2414 * Mark @bo as invalid. 2376 * Mark @bo as invalid.
2415 */ 2377 */
2416void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, 2378void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2417 struct amdgpu_bo *bo) 2379 struct amdgpu_bo *bo, bool evicted)
2418{ 2380{
2419 struct amdgpu_vm_bo_base *bo_base; 2381 struct amdgpu_vm_bo_base *bo_base;
2420 2382
2421 list_for_each_entry(bo_base, &bo->va, bo_list) { 2383 list_for_each_entry(bo_base, &bo->va, bo_list) {
2384 struct amdgpu_vm *vm = bo_base->vm;
2385
2422 bo_base->moved = true; 2386 bo_base->moved = true;
2387 if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
2388 spin_lock(&bo_base->vm->status_lock);
2389 list_move(&bo_base->vm_status, &vm->evicted);
2390 spin_unlock(&bo_base->vm->status_lock);
2391 continue;
2392 }
2393
2394 /* Don't add page tables to the moved state */
2395 if (bo->tbo.type == ttm_bo_type_kernel)
2396 continue;
2397
2423 spin_lock(&bo_base->vm->status_lock); 2398 spin_lock(&bo_base->vm->status_lock);
2424 list_move(&bo_base->vm_status, &bo_base->vm->moved); 2399 list_move(&bo_base->vm_status, &bo_base->vm->moved);
2425 spin_unlock(&bo_base->vm->status_lock); 2400 spin_unlock(&bo_base->vm->status_lock);
@@ -2507,6 +2482,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2507 for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) 2482 for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2508 vm->reserved_vmid[i] = NULL; 2483 vm->reserved_vmid[i] = NULL;
2509 spin_lock_init(&vm->status_lock); 2484 spin_lock_init(&vm->status_lock);
2485 INIT_LIST_HEAD(&vm->evicted);
2510 INIT_LIST_HEAD(&vm->moved); 2486 INIT_LIST_HEAD(&vm->moved);
2511 INIT_LIST_HEAD(&vm->freed); 2487 INIT_LIST_HEAD(&vm->freed);
2512 2488
@@ -2551,30 +2527,31 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2551 r = amdgpu_bo_create(adev, amdgpu_vm_bo_size(adev, 0), align, true, 2527 r = amdgpu_bo_create(adev, amdgpu_vm_bo_size(adev, 0), align, true,
2552 AMDGPU_GEM_DOMAIN_VRAM, 2528 AMDGPU_GEM_DOMAIN_VRAM,
2553 flags, 2529 flags,
2554 NULL, NULL, init_pde_value, &vm->root.bo); 2530 NULL, NULL, init_pde_value, &vm->root.base.bo);
2555 if (r) 2531 if (r)
2556 goto error_free_sched_entity; 2532 goto error_free_sched_entity;
2557 2533
2558 r = amdgpu_bo_reserve(vm->root.bo, false); 2534 vm->root.base.vm = vm;
2559 if (r) 2535 list_add_tail(&vm->root.base.bo_list, &vm->root.base.bo->va);
2560 goto error_free_root; 2536 INIT_LIST_HEAD(&vm->root.base.vm_status);
2561
2562 vm->last_eviction_counter = atomic64_read(&adev->num_evictions);
2563 2537
2564 if (vm->use_cpu_for_update) { 2538 if (vm->use_cpu_for_update) {
2565 r = amdgpu_bo_kmap(vm->root.bo, NULL); 2539 r = amdgpu_bo_reserve(vm->root.base.bo, false);
2566 if (r) 2540 if (r)
2567 goto error_free_root; 2541 goto error_free_root;
2568 }
2569 2542
2570 amdgpu_bo_unreserve(vm->root.bo); 2543 r = amdgpu_bo_kmap(vm->root.base.bo, NULL);
2544 if (r)
2545 goto error_free_root;
2546 amdgpu_bo_unreserve(vm->root.base.bo);
2547 }
2571 2548
2572 return 0; 2549 return 0;
2573 2550
2574error_free_root: 2551error_free_root:
2575 amdgpu_bo_unref(&vm->root.bo->shadow); 2552 amdgpu_bo_unref(&vm->root.base.bo->shadow);
2576 amdgpu_bo_unref(&vm->root.bo); 2553 amdgpu_bo_unref(&vm->root.base.bo);
2577 vm->root.bo = NULL; 2554 vm->root.base.bo = NULL;
2578 2555
2579error_free_sched_entity: 2556error_free_sched_entity:
2580 amd_sched_entity_fini(&ring->sched, &vm->entity); 2557 amd_sched_entity_fini(&ring->sched, &vm->entity);
@@ -2593,9 +2570,11 @@ static void amdgpu_vm_free_levels(struct amdgpu_vm_pt *level)
2593{ 2570{
2594 unsigned i; 2571 unsigned i;
2595 2572
2596 if (level->bo) { 2573 if (level->base.bo) {
2597 amdgpu_bo_unref(&level->bo->shadow); 2574 list_del(&level->base.bo_list);
2598 amdgpu_bo_unref(&level->bo); 2575 list_del(&level->base.vm_status);
2576 amdgpu_bo_unref(&level->base.bo->shadow);
2577 amdgpu_bo_unref(&level->base.bo);
2599 } 2578 }
2600 2579
2601 if (level->entries) 2580 if (level->entries)