aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2018-09-10 14:02:46 -0400
committerAlex Deucher <alexander.deucher@amd.com>2018-09-13 16:14:12 -0400
commit646b90259842faa8341b076a3488a227927d84a2 (patch)
tree8c2de4481394f7e1c602e9d57ff35121adaec578 /drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
parente83dfe4d869358549bb259ab581ae4f0450c6580 (diff)
drm/amdgpu: use a single linked list for amdgpu_vm_bo_base
Instead of the double linked list. Gets the size of amdgpu_vm_pt down to 64 bytes again. We could even reduce it down to 32 bytes, but that would require some rather extreme hacks. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Chunming Zhou <david1.zhou@amd.com> Acked-by: Felix Kuehling <Felix.Kuehling@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c38
1 files changed, 25 insertions, 13 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 234764ac58cf..a7f9aaa47c49 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -309,12 +309,13 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
309{ 309{
310 base->vm = vm; 310 base->vm = vm;
311 base->bo = bo; 311 base->bo = bo;
312 INIT_LIST_HEAD(&base->bo_list); 312 base->next = NULL;
313 INIT_LIST_HEAD(&base->vm_status); 313 INIT_LIST_HEAD(&base->vm_status);
314 314
315 if (!bo) 315 if (!bo)
316 return; 316 return;
317 list_add_tail(&base->bo_list, &bo->va); 317 base->next = bo->vm_bo;
318 bo->vm_bo = base;
318 319
319 if (bo->tbo.resv != vm->root.base.bo->tbo.resv) 320 if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
320 return; 321 return;
@@ -352,7 +353,7 @@ static struct amdgpu_vm_pt *amdgpu_vm_pt_parent(struct amdgpu_vm_pt *pt)
352 if (!parent) 353 if (!parent)
353 return NULL; 354 return NULL;
354 355
355 return list_first_entry(&parent->va, struct amdgpu_vm_pt, base.bo_list); 356 return container_of(parent->vm_bo, struct amdgpu_vm_pt, base);
356} 357}
357 358
358/** 359/**
@@ -954,7 +955,7 @@ static void amdgpu_vm_free_pts(struct amdgpu_device *adev,
954 for_each_amdgpu_vm_pt_dfs_safe(adev, vm, cursor, entry) { 955 for_each_amdgpu_vm_pt_dfs_safe(adev, vm, cursor, entry) {
955 956
956 if (entry->base.bo) { 957 if (entry->base.bo) {
957 list_del(&entry->base.bo_list); 958 entry->base.bo->vm_bo = NULL;
958 list_del(&entry->base.vm_status); 959 list_del(&entry->base.vm_status);
959 amdgpu_bo_unref(&entry->base.bo->shadow); 960 amdgpu_bo_unref(&entry->base.bo->shadow);
960 amdgpu_bo_unref(&entry->base.bo); 961 amdgpu_bo_unref(&entry->base.bo);
@@ -1162,12 +1163,13 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
1162struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, 1163struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
1163 struct amdgpu_bo *bo) 1164 struct amdgpu_bo *bo)
1164{ 1165{
1165 struct amdgpu_bo_va *bo_va; 1166 struct amdgpu_vm_bo_base *base;
1166 1167
1167 list_for_each_entry(bo_va, &bo->va, base.bo_list) { 1168 for (base = bo->vm_bo; base; base = base->next) {
1168 if (bo_va->base.vm == vm) { 1169 if (base->vm != vm)
1169 return bo_va; 1170 continue;
1170 } 1171
1172 return container_of(base, struct amdgpu_bo_va, base);
1171 } 1173 }
1172 return NULL; 1174 return NULL;
1173} 1175}
@@ -2728,11 +2730,21 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
2728 struct amdgpu_bo_va_mapping *mapping, *next; 2730 struct amdgpu_bo_va_mapping *mapping, *next;
2729 struct amdgpu_bo *bo = bo_va->base.bo; 2731 struct amdgpu_bo *bo = bo_va->base.bo;
2730 struct amdgpu_vm *vm = bo_va->base.vm; 2732 struct amdgpu_vm *vm = bo_va->base.vm;
2733 struct amdgpu_vm_bo_base **base;
2731 2734
2732 if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) 2735 if (bo) {
2733 vm->bulk_moveable = false; 2736 if (bo->tbo.resv == vm->root.base.bo->tbo.resv)
2737 vm->bulk_moveable = false;
2734 2738
2735 list_del(&bo_va->base.bo_list); 2739 for (base = &bo_va->base.bo->vm_bo; *base;
2740 base = &(*base)->next) {
2741 if (*base != &bo_va->base)
2742 continue;
2743
2744 *base = bo_va->base.next;
2745 break;
2746 }
2747 }
2736 2748
2737 spin_lock(&vm->invalidated_lock); 2749 spin_lock(&vm->invalidated_lock);
2738 list_del(&bo_va->base.vm_status); 2750 list_del(&bo_va->base.vm_status);
@@ -2774,7 +2786,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2774 if (bo->parent && bo->parent->shadow == bo) 2786 if (bo->parent && bo->parent->shadow == bo)
2775 bo = bo->parent; 2787 bo = bo->parent;
2776 2788
2777 list_for_each_entry(bo_base, &bo->va, bo_list) { 2789 for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
2778 struct amdgpu_vm *vm = bo_base->vm; 2790 struct amdgpu_vm *vm = bo_base->vm;
2779 2791
2780 if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) { 2792 if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) {