aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2019-02-01 08:02:10 -0500
committerAlex Deucher <alexander.deucher@amd.com>2019-03-19 16:36:48 -0400
commite35fb064d8d4de8ecea38dae48f04d0253e3d9be (patch)
tree433f389df55e66b988005fe4c89230666521a3b1 /drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
parent0ce15d6f7d3fb1162fd7de2829dbdf6d63a6a02a (diff)
drm/amdgpu: free PDs/PTs on demand
When something is unmapped we now free the affected PDs/PTs again. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com> Acked-by: Huang Rui <ray.huang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c71
1 files changed, 53 insertions, 18 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index b25be87eb412..92334efa19a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -531,13 +531,32 @@ static void amdgpu_vm_pt_next(struct amdgpu_device *adev,
531 */ 531 */
532static void amdgpu_vm_pt_first_dfs(struct amdgpu_device *adev, 532static void amdgpu_vm_pt_first_dfs(struct amdgpu_device *adev,
533 struct amdgpu_vm *vm, 533 struct amdgpu_vm *vm,
534 struct amdgpu_vm_pt_cursor *start,
534 struct amdgpu_vm_pt_cursor *cursor) 535 struct amdgpu_vm_pt_cursor *cursor)
535{ 536{
536 amdgpu_vm_pt_start(adev, vm, 0, cursor); 537 if (start)
538 *cursor = *start;
539 else
540 amdgpu_vm_pt_start(adev, vm, 0, cursor);
537 while (amdgpu_vm_pt_descendant(adev, cursor)); 541 while (amdgpu_vm_pt_descendant(adev, cursor));
538} 542}
539 543
540/** 544/**
545 * amdgpu_vm_pt_continue_dfs - check if the deep first search should continue
546 *
547 * @start: starting point for the search
548 * @entry: current entry
549 *
550 * Returns:
551 * True when the search should continue, false otherwise.
552 */
553static bool amdgpu_vm_pt_continue_dfs(struct amdgpu_vm_pt_cursor *start,
554 struct amdgpu_vm_pt *entry)
555{
556 return entry && (!start || entry != start->entry);
557}
558
559/**
541 * amdgpu_vm_pt_next_dfs - get the next node for a deep first search 560 * amdgpu_vm_pt_next_dfs - get the next node for a deep first search
542 * 561 *
543 * @adev: amdgpu_device structure 562 * @adev: amdgpu_device structure
@@ -562,11 +581,11 @@ static void amdgpu_vm_pt_next_dfs(struct amdgpu_device *adev,
562/** 581/**
563 * for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs 582 * for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs
564 */ 583 */
565#define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, cursor, entry) \ 584#define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) \
566 for (amdgpu_vm_pt_first_dfs((adev), (vm), &(cursor)), \ 585 for (amdgpu_vm_pt_first_dfs((adev), (vm), (start), &(cursor)), \
567 (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor));\ 586 (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor));\
568 (entry); (entry) = (cursor).entry, \ 587 amdgpu_vm_pt_continue_dfs((start), (entry)); \
569 amdgpu_vm_pt_next_dfs((adev), &(cursor))) 588 (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor)))
570 589
571/** 590/**
572 * amdgpu_vm_get_pd_bo - add the VM PD to a validation list 591 * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
@@ -945,31 +964,45 @@ error_free_pt:
945} 964}
946 965
947/** 966/**
967 * amdgpu_vm_free_table - fre one PD/PT
968 *
969 * @entry: PDE to free
970 */
971static void amdgpu_vm_free_table(struct amdgpu_vm_pt *entry)
972{
973 if (entry->base.bo) {
974 entry->base.bo->vm_bo = NULL;
975 list_del(&entry->base.vm_status);
976 amdgpu_bo_unref(&entry->base.bo->shadow);
977 amdgpu_bo_unref(&entry->base.bo);
978 }
979 kvfree(entry->entries);
980 entry->entries = NULL;
981}
982
983/**
948 * amdgpu_vm_free_pts - free PD/PT levels 984 * amdgpu_vm_free_pts - free PD/PT levels
949 * 985 *
950 * @adev: amdgpu device structure 986 * @adev: amdgpu device structure
951 * @vm: amdgpu vm structure 987 * @vm: amdgpu vm structure
988 * @start: optional cursor where to start freeing PDs/PTs
952 * 989 *
953 * Free the page directory or page table level and all sub levels. 990 * Free the page directory or page table level and all sub levels.
954 */ 991 */
955static void amdgpu_vm_free_pts(struct amdgpu_device *adev, 992static void amdgpu_vm_free_pts(struct amdgpu_device *adev,
956 struct amdgpu_vm *vm) 993 struct amdgpu_vm *vm,
994 struct amdgpu_vm_pt_cursor *start)
957{ 995{
958 struct amdgpu_vm_pt_cursor cursor; 996 struct amdgpu_vm_pt_cursor cursor;
959 struct amdgpu_vm_pt *entry; 997 struct amdgpu_vm_pt *entry;
960 998
961 for_each_amdgpu_vm_pt_dfs_safe(adev, vm, cursor, entry) { 999 vm->bulk_moveable = false;
962 1000
963 if (entry->base.bo) { 1001 for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry)
964 entry->base.bo->vm_bo = NULL; 1002 amdgpu_vm_free_table(entry);
965 list_del(&entry->base.vm_status);
966 amdgpu_bo_unref(&entry->base.bo->shadow);
967 amdgpu_bo_unref(&entry->base.bo);
968 }
969 kvfree(entry->entries);
970 }
971 1003
972 BUG_ON(vm->root.base.bo); 1004 if (start)
1005 amdgpu_vm_free_table(start->entry);
973} 1006}
974 1007
975/** 1008/**
@@ -1365,7 +1398,7 @@ static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev,
1365 struct amdgpu_vm_pt_cursor cursor; 1398 struct amdgpu_vm_pt_cursor cursor;
1366 struct amdgpu_vm_pt *entry; 1399 struct amdgpu_vm_pt *entry;
1367 1400
1368 for_each_amdgpu_vm_pt_dfs_safe(adev, vm, cursor, entry) 1401 for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry)
1369 if (entry->base.bo && !entry->base.moved) 1402 if (entry->base.bo && !entry->base.moved)
1370 amdgpu_vm_bo_relocated(&entry->base); 1403 amdgpu_vm_bo_relocated(&entry->base);
1371} 1404}
@@ -1673,6 +1706,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
1673 /* Mark all child entries as huge */ 1706 /* Mark all child entries as huge */
1674 while (cursor.pfn < frag_start) { 1707 while (cursor.pfn < frag_start) {
1675 cursor.entry->huge = true; 1708 cursor.entry->huge = true;
1709 amdgpu_vm_free_pts(adev, params->vm, &cursor);
1676 amdgpu_vm_pt_next(adev, &cursor); 1710 amdgpu_vm_pt_next(adev, &cursor);
1677 } 1711 }
1678 1712
@@ -3236,10 +3270,11 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
3236 if (r) { 3270 if (r) {
3237 dev_err(adev->dev, "Leaking page tables because BO reservation failed\n"); 3271 dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
3238 } else { 3272 } else {
3239 amdgpu_vm_free_pts(adev, vm); 3273 amdgpu_vm_free_pts(adev, vm, NULL);
3240 amdgpu_bo_unreserve(root); 3274 amdgpu_bo_unreserve(root);
3241 } 3275 }
3242 amdgpu_bo_unref(&root); 3276 amdgpu_bo_unref(&root);
3277 WARN_ON(vm->root.base.bo);
3243 dma_fence_put(vm->last_update); 3278 dma_fence_put(vm->last_update);
3244 for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) 3279 for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
3245 amdgpu_vmid_free_reserved(adev, vm, i); 3280 amdgpu_vmid_free_reserved(adev, vm, i);