aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2017-11-30 09:28:03 -0500
committerAlex Deucher <alexander.deucher@amd.com>2017-12-12 14:46:00 -0500
commit8f19cd78c986fb0eb24a1847931c096ec9e214a5 (patch)
tree1a2d4ce166110b8491da0e9da796c6f311f9f2a5 /drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
parente3a1b32a12ef83e260a307e678d053d5f4570acd (diff)
drm/amdgpu: remove last_entry_used from the VM code
Not needed any more. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Chunming Zhou <david1.zhou@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c52
1 files changed, 29 insertions, 23 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index bd6296a6dab1..25fdcba5681c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -329,9 +329,6 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
329 to >= amdgpu_vm_num_entries(adev, level)) 329 to >= amdgpu_vm_num_entries(adev, level))
330 return -EINVAL; 330 return -EINVAL;
331 331
332 if (to > parent->last_entry_used)
333 parent->last_entry_used = to;
334
335 ++level; 332 ++level;
336 saddr = saddr & ((1 << shift) - 1); 333 saddr = saddr & ((1 << shift) - 1);
337 eaddr = eaddr & ((1 << shift) - 1); 334 eaddr = eaddr & ((1 << shift) - 1);
@@ -1184,16 +1181,19 @@ error_free:
1184 * 1181 *
1185 * Mark all PD level as invalid after an error. 1182 * Mark all PD level as invalid after an error.
1186 */ 1183 */
1187static void amdgpu_vm_invalidate_level(struct amdgpu_vm *vm, 1184static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev,
1188 struct amdgpu_vm_pt *parent) 1185 struct amdgpu_vm *vm,
1186 struct amdgpu_vm_pt *parent,
1187 unsigned level)
1189{ 1188{
1190 unsigned pt_idx; 1189 unsigned pt_idx, num_entries;
1191 1190
1192 /* 1191 /*
1193 * Recurse into the subdirectories. This recursion is harmless because 1192 * Recurse into the subdirectories. This recursion is harmless because
1194 * we only have a maximum of 5 layers. 1193 * we only have a maximum of 5 layers.
1195 */ 1194 */
1196 for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) { 1195 num_entries = amdgpu_vm_num_entries(adev, level);
1196 for (pt_idx = 0; pt_idx < num_entries; ++pt_idx) {
1197 struct amdgpu_vm_pt *entry = &parent->entries[pt_idx]; 1197 struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
1198 1198
1199 if (!entry->base.bo) 1199 if (!entry->base.bo)
@@ -1204,7 +1204,7 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_vm *vm,
1204 if (list_empty(&entry->base.vm_status)) 1204 if (list_empty(&entry->base.vm_status))
1205 list_add(&entry->base.vm_status, &vm->relocated); 1205 list_add(&entry->base.vm_status, &vm->relocated);
1206 spin_unlock(&vm->status_lock); 1206 spin_unlock(&vm->status_lock);
1207 amdgpu_vm_invalidate_level(vm, entry); 1207 amdgpu_vm_invalidate_level(adev, vm, entry, level + 1);
1208 } 1208 }
1209} 1209}
1210 1210
@@ -1246,7 +1246,8 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
1246 1246
1247 r = amdgpu_vm_update_pde(adev, vm, pt, entry); 1247 r = amdgpu_vm_update_pde(adev, vm, pt, entry);
1248 if (r) { 1248 if (r) {
1249 amdgpu_vm_invalidate_level(vm, &vm->root); 1249 amdgpu_vm_invalidate_level(adev, vm,
1250 &vm->root, 0);
1250 return r; 1251 return r;
1251 } 1252 }
1252 spin_lock(&vm->status_lock); 1253 spin_lock(&vm->status_lock);
@@ -1649,7 +1650,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
1649 1650
1650error_free: 1651error_free:
1651 amdgpu_job_free(job); 1652 amdgpu_job_free(job);
1652 amdgpu_vm_invalidate_level(vm, &vm->root); 1653 amdgpu_vm_invalidate_level(adev, vm, &vm->root, 0);
1653 return r; 1654 return r;
1654} 1655}
1655 1656
@@ -2713,26 +2714,31 @@ error_free_sched_entity:
2713/** 2714/**
2714 * amdgpu_vm_free_levels - free PD/PT levels 2715 * amdgpu_vm_free_levels - free PD/PT levels
2715 * 2716 *
2716 * @level: PD/PT starting level to free 2717 * @adev: amdgpu device structure
2718 * @parent: PD/PT starting level to free
2719 * @level: level of parent structure
2717 * 2720 *
2718 * Free the page directory or page table level and all sub levels. 2721 * Free the page directory or page table level and all sub levels.
2719 */ 2722 */
2720static void amdgpu_vm_free_levels(struct amdgpu_vm_pt *level) 2723static void amdgpu_vm_free_levels(struct amdgpu_device *adev,
2724 struct amdgpu_vm_pt *parent,
2725 unsigned level)
2721{ 2726{
2722 unsigned i; 2727 unsigned i, num_entries = amdgpu_vm_num_entries(adev, level);
2723 2728
2724 if (level->base.bo) { 2729 if (parent->base.bo) {
2725 list_del(&level->base.bo_list); 2730 list_del(&parent->base.bo_list);
2726 list_del(&level->base.vm_status); 2731 list_del(&parent->base.vm_status);
2727 amdgpu_bo_unref(&level->base.bo->shadow); 2732 amdgpu_bo_unref(&parent->base.bo->shadow);
2728 amdgpu_bo_unref(&level->base.bo); 2733 amdgpu_bo_unref(&parent->base.bo);
2729 } 2734 }
2730 2735
2731 if (level->entries) 2736 if (parent->entries)
2732 for (i = 0; i <= level->last_entry_used; i++) 2737 for (i = 0; i < num_entries; i++)
2733 amdgpu_vm_free_levels(&level->entries[i]); 2738 amdgpu_vm_free_levels(adev, &parent->entries[i],
2739 level + 1);
2734 2740
2735 kvfree(level->entries); 2741 kvfree(parent->entries);
2736} 2742}
2737 2743
2738/** 2744/**
@@ -2790,7 +2796,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2790 if (r) { 2796 if (r) {
2791 dev_err(adev->dev, "Leaking page tables because BO reservation failed\n"); 2797 dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
2792 } else { 2798 } else {
2793 amdgpu_vm_free_levels(&vm->root); 2799 amdgpu_vm_free_levels(adev, &vm->root, 0);
2794 amdgpu_bo_unreserve(root); 2800 amdgpu_bo_unreserve(root);
2795 } 2801 }
2796 amdgpu_bo_unref(&root); 2802 amdgpu_bo_unref(&root);