aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c60
1 files changed, 41 insertions, 19 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 7ca2e8fa5906..1eae307cdfd4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -604,24 +604,14 @@ static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
604 return result; 604 return result;
605} 605}
606 606
607/** 607static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
608 * amdgpu_vm_update_pdes - make sure that page directory is valid 608 struct amdgpu_vm *vm,
609 * 609 bool shadow)
610 * @adev: amdgpu_device pointer
611 * @vm: requested vm
612 * @start: start of GPU address range
613 * @end: end of GPU address range
614 *
615 * Allocates new page tables if necessary
616 * and updates the page directory.
617 * Returns 0 for success, error for failure.
618 */
619int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
620 struct amdgpu_vm *vm)
621{ 610{
622 struct amdgpu_ring *ring; 611 struct amdgpu_ring *ring;
623 struct amdgpu_bo *pd = vm->page_directory; 612 struct amdgpu_bo *pd = shadow ? vm->page_directory->shadow :
624 uint64_t pd_addr = amdgpu_bo_gpu_offset(pd); 613 vm->page_directory;
614 uint64_t pd_addr;
625 uint32_t incr = AMDGPU_VM_PTE_COUNT * 8; 615 uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
626 uint64_t last_pde = ~0, last_pt = ~0; 616 uint64_t last_pde = ~0, last_pt = ~0;
627 unsigned count = 0, pt_idx, ndw; 617 unsigned count = 0, pt_idx, ndw;
@@ -631,6 +621,9 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
631 621
632 int r; 622 int r;
633 623
624 if (!pd)
625 return 0;
626 pd_addr = amdgpu_bo_gpu_offset(pd);
634 ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); 627 ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
635 628
636 /* padding, etc. */ 629 /* padding, etc. */
@@ -656,9 +649,15 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
656 continue; 649 continue;
657 650
658 pt = amdgpu_bo_gpu_offset(bo); 651 pt = amdgpu_bo_gpu_offset(bo);
659 if (vm->page_tables[pt_idx].addr == pt) 652 if (!shadow) {
660 continue; 653 if (vm->page_tables[pt_idx].addr == pt)
661 vm->page_tables[pt_idx].addr = pt; 654 continue;
655 vm->page_tables[pt_idx].addr = pt;
656 } else {
657 if (vm->page_tables[pt_idx].shadow_addr == pt)
658 continue;
659 vm->page_tables[pt_idx].shadow_addr = pt;
660 }
662 661
663 pde = pd_addr + pt_idx * 8; 662 pde = pd_addr + pt_idx * 8;
664 if (((last_pde + 8 * count) != pde) || 663 if (((last_pde + 8 * count) != pde) ||
@@ -709,6 +708,29 @@ error_free:
709 return r; 708 return r;
710} 709}
711 710
711/*
712 * amdgpu_vm_update_pdes - make sure that page directory is valid
713 *
714 * @adev: amdgpu_device pointer
715 * @vm: requested vm
716 * @start: start of GPU address range
717 * @end: end of GPU address range
718 *
719 * Allocates new page tables if necessary
720 * and updates the page directory.
721 * Returns 0 for success, error for failure.
722 */
723int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
724 struct amdgpu_vm *vm)
725{
726 int r;
727
728 r = amdgpu_vm_update_pd_or_shadow(adev, vm, true);
729 if (r)
730 return r;
731 return amdgpu_vm_update_pd_or_shadow(adev, vm, false);
732}
733
712/** 734/**
713 * amdgpu_vm_update_ptes - make sure that page tables are valid 735 * amdgpu_vm_update_ptes - make sure that page tables are valid
714 * 736 *