aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2018-09-01 07:25:31 -0400
committerAlex Deucher <alexander.deucher@amd.com>2018-09-10 23:40:16 -0400
commitc12a2ee5d002e39a387001cdb5065b560568b4f5 (patch)
tree09c6ad6e42350dbe3d4bf5faeb66b828e0e03f7b /drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
parentc460f8a6f5918c2a8a2354a60b03a71310b943aa (diff)
drm/amdgpu: separate per VM BOs from normal in the moved state
Allows us to avoid taking the spinlock in more places. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c67
1 files changed, 33 insertions, 34 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index a9275a99d793..65977e7c94dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -342,9 +342,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
342 break; 342 break;
343 343
344 if (bo->tbo.type != ttm_bo_type_kernel) { 344 if (bo->tbo.type != ttm_bo_type_kernel) {
345 spin_lock(&vm->moved_lock);
346 list_move(&bo_base->vm_status, &vm->moved); 345 list_move(&bo_base->vm_status, &vm->moved);
347 spin_unlock(&vm->moved_lock);
348 } else { 346 } else {
349 if (vm->use_cpu_for_update) 347 if (vm->use_cpu_for_update)
350 r = amdgpu_bo_kmap(bo, NULL); 348 r = amdgpu_bo_kmap(bo, NULL);
@@ -1734,10 +1732,6 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1734 amdgpu_asic_flush_hdp(adev, NULL); 1732 amdgpu_asic_flush_hdp(adev, NULL);
1735 } 1733 }
1736 1734
1737 spin_lock(&vm->moved_lock);
1738 list_del_init(&bo_va->base.vm_status);
1739 spin_unlock(&vm->moved_lock);
1740
1741 /* If the BO is not in its preferred location add it back to 1735 /* If the BO is not in its preferred location add it back to
1742 * the evicted list so that it gets validated again on the 1736 * the evicted list so that it gets validated again on the
1743 * next command submission. 1737 * next command submission.
@@ -1746,9 +1740,13 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1746 uint32_t mem_type = bo->tbo.mem.mem_type; 1740 uint32_t mem_type = bo->tbo.mem.mem_type;
1747 1741
1748 if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type))) 1742 if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type)))
1749 list_add_tail(&bo_va->base.vm_status, &vm->evicted); 1743 list_move_tail(&bo_va->base.vm_status, &vm->evicted);
1750 else 1744 else
1751 list_add(&bo_va->base.vm_status, &vm->idle); 1745 list_move(&bo_va->base.vm_status, &vm->idle);
1746 } else {
1747 spin_lock(&vm->invalidated_lock);
1748 list_del_init(&bo_va->base.vm_status);
1749 spin_unlock(&vm->invalidated_lock);
1752 } 1750 }
1753 1751
1754 list_splice_init(&bo_va->invalids, &bo_va->valids); 1752 list_splice_init(&bo_va->invalids, &bo_va->valids);
@@ -1974,40 +1972,40 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
1974 struct amdgpu_vm *vm) 1972 struct amdgpu_vm *vm)
1975{ 1973{
1976 struct amdgpu_bo_va *bo_va, *tmp; 1974 struct amdgpu_bo_va *bo_va, *tmp;
1977 struct list_head moved; 1975 struct reservation_object *resv;
1978 bool clear; 1976 bool clear;
1979 int r; 1977 int r;
1980 1978
1981 INIT_LIST_HEAD(&moved); 1979 list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
1982 spin_lock(&vm->moved_lock); 1980 /* Per VM BOs never need to bo cleared in the page tables */
1983 list_splice_init(&vm->moved, &moved); 1981 r = amdgpu_vm_bo_update(adev, bo_va, false);
1984 spin_unlock(&vm->moved_lock); 1982 if (r)
1983 return r;
1984 }
1985 1985
1986 list_for_each_entry_safe(bo_va, tmp, &moved, base.vm_status) { 1986 spin_lock(&vm->invalidated_lock);
1987 struct reservation_object *resv = bo_va->base.bo->tbo.resv; 1987 while (!list_empty(&vm->invalidated)) {
1988 bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
1989 base.vm_status);
1990 resv = bo_va->base.bo->tbo.resv;
1991 spin_unlock(&vm->invalidated_lock);
1988 1992
1989 /* Per VM BOs never need to bo cleared in the page tables */
1990 if (resv == vm->root.base.bo->tbo.resv)
1991 clear = false;
1992 /* Try to reserve the BO to avoid clearing its ptes */ 1993 /* Try to reserve the BO to avoid clearing its ptes */
1993 else if (!amdgpu_vm_debug && reservation_object_trylock(resv)) 1994 if (!amdgpu_vm_debug && reservation_object_trylock(resv))
1994 clear = false; 1995 clear = false;
1995 /* Somebody else is using the BO right now */ 1996 /* Somebody else is using the BO right now */
1996 else 1997 else
1997 clear = true; 1998 clear = true;
1998 1999
1999 r = amdgpu_vm_bo_update(adev, bo_va, clear); 2000 r = amdgpu_vm_bo_update(adev, bo_va, clear);
2000 if (r) { 2001 if (r)
2001 spin_lock(&vm->moved_lock);
2002 list_splice(&moved, &vm->moved);
2003 spin_unlock(&vm->moved_lock);
2004 return r; 2002 return r;
2005 }
2006 2003
2007 if (!clear && resv != vm->root.base.bo->tbo.resv) 2004 if (!clear)
2008 reservation_object_unlock(resv); 2005 reservation_object_unlock(resv);
2009 2006 spin_lock(&vm->invalidated_lock);
2010 } 2007 }
2008 spin_unlock(&vm->invalidated_lock);
2011 2009
2012 return 0; 2010 return 0;
2013} 2011}
@@ -2072,9 +2070,7 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
2072 2070
2073 if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv && 2071 if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv &&
2074 !bo_va->base.moved) { 2072 !bo_va->base.moved) {
2075 spin_lock(&vm->moved_lock);
2076 list_move(&bo_va->base.vm_status, &vm->moved); 2073 list_move(&bo_va->base.vm_status, &vm->moved);
2077 spin_unlock(&vm->moved_lock);
2078 } 2074 }
2079 trace_amdgpu_vm_bo_map(bo_va, mapping); 2075 trace_amdgpu_vm_bo_map(bo_va, mapping);
2080} 2076}
@@ -2430,9 +2426,9 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
2430 2426
2431 list_del(&bo_va->base.bo_list); 2427 list_del(&bo_va->base.bo_list);
2432 2428
2433 spin_lock(&vm->moved_lock); 2429 spin_lock(&vm->invalidated_lock);
2434 list_del(&bo_va->base.vm_status); 2430 list_del(&bo_va->base.vm_status);
2435 spin_unlock(&vm->moved_lock); 2431 spin_unlock(&vm->invalidated_lock);
2436 2432
2437 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { 2433 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
2438 list_del(&mapping->list); 2434 list_del(&mapping->list);
@@ -2489,10 +2485,12 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2489 2485
2490 if (bo->tbo.type == ttm_bo_type_kernel) { 2486 if (bo->tbo.type == ttm_bo_type_kernel) {
2491 list_move(&bo_base->vm_status, &vm->relocated); 2487 list_move(&bo_base->vm_status, &vm->relocated);
2492 } else { 2488 } else if (bo->tbo.resv == vm->root.base.bo->tbo.resv) {
2493 spin_lock(&bo_base->vm->moved_lock);
2494 list_move(&bo_base->vm_status, &vm->moved); 2489 list_move(&bo_base->vm_status, &vm->moved);
2495 spin_unlock(&bo_base->vm->moved_lock); 2490 } else {
2491 spin_lock(&vm->invalidated_lock);
2492 list_move(&bo_base->vm_status, &vm->invalidated);
2493 spin_unlock(&vm->invalidated_lock);
2496 } 2494 }
2497 } 2495 }
2498} 2496}
@@ -2637,9 +2635,10 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2637 vm->reserved_vmid[i] = NULL; 2635 vm->reserved_vmid[i] = NULL;
2638 INIT_LIST_HEAD(&vm->evicted); 2636 INIT_LIST_HEAD(&vm->evicted);
2639 INIT_LIST_HEAD(&vm->relocated); 2637 INIT_LIST_HEAD(&vm->relocated);
2640 spin_lock_init(&vm->moved_lock);
2641 INIT_LIST_HEAD(&vm->moved); 2638 INIT_LIST_HEAD(&vm->moved);
2642 INIT_LIST_HEAD(&vm->idle); 2639 INIT_LIST_HEAD(&vm->idle);
2640 INIT_LIST_HEAD(&vm->invalidated);
2641 spin_lock_init(&vm->invalidated_lock);
2643 INIT_LIST_HEAD(&vm->freed); 2642 INIT_LIST_HEAD(&vm->freed);
2644 2643
2645 /* create scheduler entity for page table updates */ 2644 /* create scheduler entity for page table updates */