aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c33
1 files changed, 26 insertions, 7 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 250c8e80e646..9ce36652029e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -288,6 +288,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
288 unsigned pt_idx, from, to; 288 unsigned pt_idx, from, to;
289 int r; 289 int r;
290 u64 flags; 290 u64 flags;
291 uint64_t init_value = 0;
291 292
292 if (!parent->entries) { 293 if (!parent->entries) {
293 unsigned num_entries = amdgpu_vm_num_entries(adev, level); 294 unsigned num_entries = amdgpu_vm_num_entries(adev, level);
@@ -321,6 +322,12 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
321 flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS | 322 flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
322 AMDGPU_GEM_CREATE_SHADOW); 323 AMDGPU_GEM_CREATE_SHADOW);
323 324
325 if (vm->pte_support_ats) {
326 init_value = AMDGPU_PTE_SYSTEM;
327 if (level != adev->vm_manager.num_level - 1)
328 init_value |= AMDGPU_PDE_PTE;
329 }
330
324 /* walk over the address space and allocate the page tables */ 331 /* walk over the address space and allocate the page tables */
325 for (pt_idx = from; pt_idx <= to; ++pt_idx) { 332 for (pt_idx = from; pt_idx <= to; ++pt_idx) {
326 struct reservation_object *resv = vm->root.bo->tbo.resv; 333 struct reservation_object *resv = vm->root.bo->tbo.resv;
@@ -333,7 +340,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
333 AMDGPU_GPU_PAGE_SIZE, true, 340 AMDGPU_GPU_PAGE_SIZE, true,
334 AMDGPU_GEM_DOMAIN_VRAM, 341 AMDGPU_GEM_DOMAIN_VRAM,
335 flags, 342 flags,
336 NULL, resv, &pt); 343 NULL, resv, init_value, &pt);
337 if (r) 344 if (r)
338 return r; 345 return r;
339 346
@@ -1060,7 +1067,7 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
1060 shadow = parent->bo->shadow; 1067 shadow = parent->bo->shadow;
1061 1068
1062 if (vm->use_cpu_for_update) { 1069 if (vm->use_cpu_for_update) {
1063 pd_addr = (unsigned long)parent->bo->kptr; 1070 pd_addr = (unsigned long)amdgpu_bo_kptr(parent->bo);
1064 r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM); 1071 r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
1065 if (unlikely(r)) 1072 if (unlikely(r))
1066 return r; 1073 return r;
@@ -1401,7 +1408,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
1401 1408
1402 pt = entry->bo; 1409 pt = entry->bo;
1403 if (use_cpu_update) { 1410 if (use_cpu_update) {
1404 pe_start = (unsigned long)pt->kptr; 1411 pe_start = (unsigned long)amdgpu_bo_kptr(pt);
1405 } else { 1412 } else {
1406 if (pt->shadow) { 1413 if (pt->shadow) {
1407 pe_start = amdgpu_bo_gpu_offset(pt->shadow); 1414 pe_start = amdgpu_bo_gpu_offset(pt->shadow);
@@ -1995,15 +2002,19 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1995 struct amdgpu_bo_va_mapping *mapping; 2002 struct amdgpu_bo_va_mapping *mapping;
1996 struct dma_fence *f = NULL; 2003 struct dma_fence *f = NULL;
1997 int r; 2004 int r;
2005 uint64_t init_pte_value = 0;
1998 2006
1999 while (!list_empty(&vm->freed)) { 2007 while (!list_empty(&vm->freed)) {
2000 mapping = list_first_entry(&vm->freed, 2008 mapping = list_first_entry(&vm->freed,
2001 struct amdgpu_bo_va_mapping, list); 2009 struct amdgpu_bo_va_mapping, list);
2002 list_del(&mapping->list); 2010 list_del(&mapping->list);
2003 2011
2012 if (vm->pte_support_ats)
2013 init_pte_value = AMDGPU_PTE_SYSTEM;
2014
2004 r = amdgpu_vm_bo_update_mapping(adev, NULL, 0, NULL, vm, 2015 r = amdgpu_vm_bo_update_mapping(adev, NULL, 0, NULL, vm,
2005 mapping->start, mapping->last, 2016 mapping->start, mapping->last,
2006 0, 0, &f); 2017 init_pte_value, 0, &f);
2007 amdgpu_vm_free_mapping(adev, vm, mapping, f); 2018 amdgpu_vm_free_mapping(adev, vm, mapping, f);
2008 if (r) { 2019 if (r) {
2009 dma_fence_put(f); 2020 dma_fence_put(f);
@@ -2494,6 +2505,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2494 struct amd_sched_rq *rq; 2505 struct amd_sched_rq *rq;
2495 int r, i; 2506 int r, i;
2496 u64 flags; 2507 u64 flags;
2508 uint64_t init_pde_value = 0;
2497 2509
2498 vm->va = RB_ROOT; 2510 vm->va = RB_ROOT;
2499 vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter); 2511 vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter);
@@ -2515,10 +2527,17 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2515 if (r) 2527 if (r)
2516 return r; 2528 return r;
2517 2529
2518 if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) 2530 vm->pte_support_ats = false;
2531
2532 if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
2519 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & 2533 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2520 AMDGPU_VM_USE_CPU_FOR_COMPUTE); 2534 AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2521 else 2535
2536 if (adev->asic_type == CHIP_RAVEN) {
2537 vm->pte_support_ats = true;
2538 init_pde_value = AMDGPU_PTE_SYSTEM | AMDGPU_PDE_PTE;
2539 }
2540 } else
2522 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & 2541 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2523 AMDGPU_VM_USE_CPU_FOR_GFX); 2542 AMDGPU_VM_USE_CPU_FOR_GFX);
2524 DRM_DEBUG_DRIVER("VM update mode is %s\n", 2543 DRM_DEBUG_DRIVER("VM update mode is %s\n",
@@ -2538,7 +2557,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2538 r = amdgpu_bo_create(adev, amdgpu_vm_bo_size(adev, 0), align, true, 2557 r = amdgpu_bo_create(adev, amdgpu_vm_bo_size(adev, 0), align, true,
2539 AMDGPU_GEM_DOMAIN_VRAM, 2558 AMDGPU_GEM_DOMAIN_VRAM,
2540 flags, 2559 flags,
2541 NULL, NULL, &vm->root.bo); 2560 NULL, NULL, init_pde_value, &vm->root.bo);
2542 if (r) 2561 if (r)
2543 goto error_free_sched_entity; 2562 goto error_free_sched_entity;
2544 2563