aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c34
2 files changed, 16 insertions, 22 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 848e4ed7e32a..9d9831f2e2d0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -820,8 +820,8 @@ struct amdgpu_ring {
820#define AMDGPU_VM_FAULT_STOP_ALWAYS 2 820#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
821 821
822struct amdgpu_vm_pt { 822struct amdgpu_vm_pt {
823 struct amdgpu_bo_list_entry entry; 823 struct amdgpu_bo *bo;
824 uint64_t addr; 824 uint64_t addr;
825}; 825};
826 826
827struct amdgpu_vm { 827struct amdgpu_vm {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index c171b16cf0f1..b8620d3dd61e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -142,12 +142,12 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
142 142
143 /* add the vm page table to the list */ 143 /* add the vm page table to the list */
144 for (i = 0; i <= vm->max_pde_used; ++i) { 144 for (i = 0; i <= vm->max_pde_used; ++i) {
145 struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry; 145 struct amdgpu_bo *bo = vm->page_tables[i].bo;
146 146
147 if (!entry->robj) 147 if (!bo)
148 continue; 148 continue;
149 149
150 r = validate(param, entry->robj); 150 r = validate(param, bo);
151 if (r) 151 if (r)
152 return r; 152 return r;
153 } 153 }
@@ -171,12 +171,12 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
171 171
172 spin_lock(&glob->lru_lock); 172 spin_lock(&glob->lru_lock);
173 for (i = 0; i <= vm->max_pde_used; ++i) { 173 for (i = 0; i <= vm->max_pde_used; ++i) {
174 struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry; 174 struct amdgpu_bo *bo = vm->page_tables[i].bo;
175 175
176 if (!entry->robj) 176 if (!bo)
177 continue; 177 continue;
178 178
179 ttm_bo_move_to_lru_tail(&entry->robj->tbo); 179 ttm_bo_move_to_lru_tail(&bo->tbo);
180 } 180 }
181 spin_unlock(&glob->lru_lock); 181 spin_unlock(&glob->lru_lock);
182} 182}
@@ -674,7 +674,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
674 674
675 /* walk over the address space and update the page directory */ 675 /* walk over the address space and update the page directory */
676 for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { 676 for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
677 struct amdgpu_bo *bo = vm->page_tables[pt_idx].entry.robj; 677 struct amdgpu_bo *bo = vm->page_tables[pt_idx].bo;
678 uint64_t pde, pt; 678 uint64_t pde, pt;
679 679
680 if (bo == NULL) 680 if (bo == NULL)
@@ -790,11 +790,11 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
790 /* initialize the variables */ 790 /* initialize the variables */
791 addr = start; 791 addr = start;
792 pt_idx = addr >> amdgpu_vm_block_size; 792 pt_idx = addr >> amdgpu_vm_block_size;
793 pt = vm->page_tables[pt_idx].entry.robj; 793 pt = vm->page_tables[pt_idx].bo;
794 if (params->shadow) { 794 if (params->shadow) {
795 if (!pt->shadow) 795 if (!pt->shadow)
796 return; 796 return;
797 pt = vm->page_tables[pt_idx].entry.robj->shadow; 797 pt = pt->shadow;
798 } 798 }
799 if ((addr & ~mask) == (end & ~mask)) 799 if ((addr & ~mask) == (end & ~mask))
800 nptes = end - addr; 800 nptes = end - addr;
@@ -813,11 +813,11 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
813 /* walk over the address space and update the page tables */ 813 /* walk over the address space and update the page tables */
814 while (addr < end) { 814 while (addr < end) {
815 pt_idx = addr >> amdgpu_vm_block_size; 815 pt_idx = addr >> amdgpu_vm_block_size;
816 pt = vm->page_tables[pt_idx].entry.robj; 816 pt = vm->page_tables[pt_idx].bo;
817 if (params->shadow) { 817 if (params->shadow) {
818 if (!pt->shadow) 818 if (!pt->shadow)
819 return; 819 return;
820 pt = vm->page_tables[pt_idx].entry.robj->shadow; 820 pt = pt->shadow;
821 } 821 }
822 822
823 if ((addr & ~mask) == (end & ~mask)) 823 if ((addr & ~mask) == (end & ~mask))
@@ -1425,11 +1425,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1425 /* walk over the address space and allocate the page tables */ 1425 /* walk over the address space and allocate the page tables */
1426 for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) { 1426 for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
1427 struct reservation_object *resv = vm->page_directory->tbo.resv; 1427 struct reservation_object *resv = vm->page_directory->tbo.resv;
1428 struct amdgpu_bo_list_entry *entry;
1429 struct amdgpu_bo *pt; 1428 struct amdgpu_bo *pt;
1430 1429
1431 entry = &vm->page_tables[pt_idx].entry; 1430 if (vm->page_tables[pt_idx].bo)
1432 if (entry->robj)
1433 continue; 1431 continue;
1434 1432
1435 r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, 1433 r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
@@ -1463,11 +1461,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1463 } 1461 }
1464 } 1462 }
1465 1463
1466 entry->robj = pt; 1464 vm->page_tables[pt_idx].bo = pt;
1467 entry->priority = 0;
1468 entry->tv.bo = &entry->robj->tbo;
1469 entry->tv.shared = true;
1470 entry->user_pages = NULL;
1471 vm->page_tables[pt_idx].addr = 0; 1465 vm->page_tables[pt_idx].addr = 0;
1472 } 1466 }
1473 1467
@@ -1719,7 +1713,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1719 } 1713 }
1720 1714
1721 for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) { 1715 for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) {
1722 struct amdgpu_bo *pt = vm->page_tables[i].entry.robj; 1716 struct amdgpu_bo *pt = vm->page_tables[i].bo;
1723 1717
1724 if (!pt) 1718 if (!pt)
1725 continue; 1719 continue;