aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2017-08-01 04:51:43 -0400
committerAlex Deucher <alexander.deucher@amd.com>2017-08-17 15:46:07 -0400
commitec681545afe5a448b43a2fe5c206ee48e19dabb3 (patch)
treef69985a00503c64e886b83db2dbe2a9bc1994b6e /drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
parent4ab4016aaf82153d144fa678cd6b4b5b6f25ed70 (diff)
drm/amdgpu: separate bo_va structure
Split that into vm_bo_base and bo_va to allow other uses as well. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c81
1 files changed, 43 insertions, 38 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 14012e80fa27..f24554f2d0e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -870,8 +870,8 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
870{ 870{
871 struct amdgpu_bo_va *bo_va; 871 struct amdgpu_bo_va *bo_va;
872 872
873 list_for_each_entry(bo_va, &bo->va, bo_list) { 873 list_for_each_entry(bo_va, &bo->va, base.bo_list) {
874 if (bo_va->vm == vm) { 874 if (bo_va->base.vm == vm) {
875 return bo_va; 875 return bo_va;
876 } 876 }
877 } 877 }
@@ -1726,7 +1726,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1726 struct amdgpu_bo_va *bo_va, 1726 struct amdgpu_bo_va *bo_va,
1727 bool clear) 1727 bool clear)
1728{ 1728{
1729 struct amdgpu_vm *vm = bo_va->vm; 1729 struct amdgpu_bo *bo = bo_va->base.bo;
1730 struct amdgpu_vm *vm = bo_va->base.vm;
1730 struct amdgpu_bo_va_mapping *mapping; 1731 struct amdgpu_bo_va_mapping *mapping;
1731 dma_addr_t *pages_addr = NULL; 1732 dma_addr_t *pages_addr = NULL;
1732 uint64_t gtt_flags, flags; 1733 uint64_t gtt_flags, flags;
@@ -1735,27 +1736,27 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1735 struct dma_fence *exclusive; 1736 struct dma_fence *exclusive;
1736 int r; 1737 int r;
1737 1738
1738 if (clear || !bo_va->bo) { 1739 if (clear || !bo_va->base.bo) {
1739 mem = NULL; 1740 mem = NULL;
1740 nodes = NULL; 1741 nodes = NULL;
1741 exclusive = NULL; 1742 exclusive = NULL;
1742 } else { 1743 } else {
1743 struct ttm_dma_tt *ttm; 1744 struct ttm_dma_tt *ttm;
1744 1745
1745 mem = &bo_va->bo->tbo.mem; 1746 mem = &bo_va->base.bo->tbo.mem;
1746 nodes = mem->mm_node; 1747 nodes = mem->mm_node;
1747 if (mem->mem_type == TTM_PL_TT) { 1748 if (mem->mem_type == TTM_PL_TT) {
1748 ttm = container_of(bo_va->bo->tbo.ttm, struct 1749 ttm = container_of(bo_va->base.bo->tbo.ttm,
1749 ttm_dma_tt, ttm); 1750 struct ttm_dma_tt, ttm);
1750 pages_addr = ttm->dma_address; 1751 pages_addr = ttm->dma_address;
1751 } 1752 }
1752 exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv); 1753 exclusive = reservation_object_get_excl(bo->tbo.resv);
1753 } 1754 }
1754 1755
1755 if (bo_va->bo) { 1756 if (bo) {
1756 flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem); 1757 flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
1757 gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) && 1758 gtt_flags = (amdgpu_ttm_is_bound(bo->tbo.ttm) &&
1758 adev == amdgpu_ttm_adev(bo_va->bo->tbo.bdev)) ? 1759 adev == amdgpu_ttm_adev(bo->tbo.bdev)) ?
1759 flags : 0; 1760 flags : 0;
1760 } else { 1761 } else {
1761 flags = 0x0; 1762 flags = 0x0;
@@ -1763,7 +1764,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1763 } 1764 }
1764 1765
1765 spin_lock(&vm->status_lock); 1766 spin_lock(&vm->status_lock);
1766 if (!list_empty(&bo_va->vm_status)) 1767 if (!list_empty(&bo_va->base.vm_status))
1767 list_splice_init(&bo_va->valids, &bo_va->invalids); 1768 list_splice_init(&bo_va->valids, &bo_va->invalids);
1768 spin_unlock(&vm->status_lock); 1769 spin_unlock(&vm->status_lock);
1769 1770
@@ -1786,9 +1787,9 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1786 1787
1787 spin_lock(&vm->status_lock); 1788 spin_lock(&vm->status_lock);
1788 list_splice_init(&bo_va->invalids, &bo_va->valids); 1789 list_splice_init(&bo_va->invalids, &bo_va->valids);
1789 list_del_init(&bo_va->vm_status); 1790 list_del_init(&bo_va->base.vm_status);
1790 if (clear) 1791 if (clear)
1791 list_add(&bo_va->vm_status, &vm->cleared); 1792 list_add(&bo_va->base.vm_status, &vm->cleared);
1792 spin_unlock(&vm->status_lock); 1793 spin_unlock(&vm->status_lock);
1793 1794
1794 if (vm->use_cpu_for_update) { 1795 if (vm->use_cpu_for_update) {
@@ -2001,7 +2002,7 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
2001 spin_lock(&vm->status_lock); 2002 spin_lock(&vm->status_lock);
2002 while (!list_empty(&vm->invalidated)) { 2003 while (!list_empty(&vm->invalidated)) {
2003 bo_va = list_first_entry(&vm->invalidated, 2004 bo_va = list_first_entry(&vm->invalidated,
2004 struct amdgpu_bo_va, vm_status); 2005 struct amdgpu_bo_va, base.vm_status);
2005 spin_unlock(&vm->status_lock); 2006 spin_unlock(&vm->status_lock);
2006 2007
2007 r = amdgpu_vm_bo_update(adev, bo_va, true); 2008 r = amdgpu_vm_bo_update(adev, bo_va, true);
@@ -2041,16 +2042,17 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
2041 if (bo_va == NULL) { 2042 if (bo_va == NULL) {
2042 return NULL; 2043 return NULL;
2043 } 2044 }
2044 bo_va->vm = vm; 2045 bo_va->base.vm = vm;
2045 bo_va->bo = bo; 2046 bo_va->base.bo = bo;
2047 INIT_LIST_HEAD(&bo_va->base.bo_list);
2048 INIT_LIST_HEAD(&bo_va->base.vm_status);
2049
2046 bo_va->ref_count = 1; 2050 bo_va->ref_count = 1;
2047 INIT_LIST_HEAD(&bo_va->bo_list);
2048 INIT_LIST_HEAD(&bo_va->valids); 2051 INIT_LIST_HEAD(&bo_va->valids);
2049 INIT_LIST_HEAD(&bo_va->invalids); 2052 INIT_LIST_HEAD(&bo_va->invalids);
2050 INIT_LIST_HEAD(&bo_va->vm_status);
2051 2053
2052 if (bo) 2054 if (bo)
2053 list_add_tail(&bo_va->bo_list, &bo->va); 2055 list_add_tail(&bo_va->base.bo_list, &bo->va);
2054 2056
2055 return bo_va; 2057 return bo_va;
2056} 2058}
@@ -2075,7 +2077,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
2075 uint64_t size, uint64_t flags) 2077 uint64_t size, uint64_t flags)
2076{ 2078{
2077 struct amdgpu_bo_va_mapping *mapping, *tmp; 2079 struct amdgpu_bo_va_mapping *mapping, *tmp;
2078 struct amdgpu_vm *vm = bo_va->vm; 2080 struct amdgpu_bo *bo = bo_va->base.bo;
2081 struct amdgpu_vm *vm = bo_va->base.vm;
2079 uint64_t eaddr; 2082 uint64_t eaddr;
2080 2083
2081 /* validate the parameters */ 2084 /* validate the parameters */
@@ -2086,7 +2089,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
2086 /* make sure object fit at this offset */ 2089 /* make sure object fit at this offset */
2087 eaddr = saddr + size - 1; 2090 eaddr = saddr + size - 1;
2088 if (saddr >= eaddr || 2091 if (saddr >= eaddr ||
2089 (bo_va->bo && offset + size > amdgpu_bo_size(bo_va->bo))) 2092 (bo && offset + size > amdgpu_bo_size(bo)))
2090 return -EINVAL; 2093 return -EINVAL;
2091 2094
2092 saddr /= AMDGPU_GPU_PAGE_SIZE; 2095 saddr /= AMDGPU_GPU_PAGE_SIZE;
@@ -2096,7 +2099,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
2096 if (tmp) { 2099 if (tmp) {
2097 /* bo and tmp overlap, invalid addr */ 2100 /* bo and tmp overlap, invalid addr */
2098 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with " 2101 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
2099 "0x%010Lx-0x%010Lx\n", bo_va->bo, saddr, eaddr, 2102 "0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
2100 tmp->start, tmp->last + 1); 2103 tmp->start, tmp->last + 1);
2101 return -EINVAL; 2104 return -EINVAL;
2102 } 2105 }
@@ -2141,7 +2144,8 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
2141 uint64_t size, uint64_t flags) 2144 uint64_t size, uint64_t flags)
2142{ 2145{
2143 struct amdgpu_bo_va_mapping *mapping; 2146 struct amdgpu_bo_va_mapping *mapping;
2144 struct amdgpu_vm *vm = bo_va->vm; 2147 struct amdgpu_bo *bo = bo_va->base.bo;
2148 struct amdgpu_vm *vm = bo_va->base.vm;
2145 uint64_t eaddr; 2149 uint64_t eaddr;
2146 int r; 2150 int r;
2147 2151
@@ -2153,7 +2157,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
2153 /* make sure object fit at this offset */ 2157 /* make sure object fit at this offset */
2154 eaddr = saddr + size - 1; 2158 eaddr = saddr + size - 1;
2155 if (saddr >= eaddr || 2159 if (saddr >= eaddr ||
2156 (bo_va->bo && offset + size > amdgpu_bo_size(bo_va->bo))) 2160 (bo && offset + size > amdgpu_bo_size(bo)))
2157 return -EINVAL; 2161 return -EINVAL;
2158 2162
2159 /* Allocate all the needed memory */ 2163 /* Allocate all the needed memory */
@@ -2161,7 +2165,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
2161 if (!mapping) 2165 if (!mapping)
2162 return -ENOMEM; 2166 return -ENOMEM;
2163 2167
2164 r = amdgpu_vm_bo_clear_mappings(adev, bo_va->vm, saddr, size); 2168 r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
2165 if (r) { 2169 if (r) {
2166 kfree(mapping); 2170 kfree(mapping);
2167 return r; 2171 return r;
@@ -2201,7 +2205,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
2201 uint64_t saddr) 2205 uint64_t saddr)
2202{ 2206{
2203 struct amdgpu_bo_va_mapping *mapping; 2207 struct amdgpu_bo_va_mapping *mapping;
2204 struct amdgpu_vm *vm = bo_va->vm; 2208 struct amdgpu_vm *vm = bo_va->base.vm;
2205 bool valid = true; 2209 bool valid = true;
2206 2210
2207 saddr /= AMDGPU_GPU_PAGE_SIZE; 2211 saddr /= AMDGPU_GPU_PAGE_SIZE;
@@ -2349,12 +2353,12 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
2349 struct amdgpu_bo_va *bo_va) 2353 struct amdgpu_bo_va *bo_va)
2350{ 2354{
2351 struct amdgpu_bo_va_mapping *mapping, *next; 2355 struct amdgpu_bo_va_mapping *mapping, *next;
2352 struct amdgpu_vm *vm = bo_va->vm; 2356 struct amdgpu_vm *vm = bo_va->base.vm;
2353 2357
2354 list_del(&bo_va->bo_list); 2358 list_del(&bo_va->base.bo_list);
2355 2359
2356 spin_lock(&vm->status_lock); 2360 spin_lock(&vm->status_lock);
2357 list_del(&bo_va->vm_status); 2361 list_del(&bo_va->base.vm_status);
2358 spin_unlock(&vm->status_lock); 2362 spin_unlock(&vm->status_lock);
2359 2363
2360 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { 2364 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
@@ -2386,13 +2390,14 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
2386void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, 2390void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2387 struct amdgpu_bo *bo) 2391 struct amdgpu_bo *bo)
2388{ 2392{
2389 struct amdgpu_bo_va *bo_va; 2393 struct amdgpu_vm_bo_base *bo_base;
2390 2394
2391 list_for_each_entry(bo_va, &bo->va, bo_list) { 2395 list_for_each_entry(bo_base, &bo->va, bo_list) {
2392 spin_lock(&bo_va->vm->status_lock); 2396 spin_lock(&bo_base->vm->status_lock);
2393 if (list_empty(&bo_va->vm_status)) 2397 if (list_empty(&bo_base->vm_status))
2394 list_add(&bo_va->vm_status, &bo_va->vm->invalidated); 2398 list_add(&bo_base->vm_status,
2395 spin_unlock(&bo_va->vm->status_lock); 2399 &bo_base->vm->invalidated);
2400 spin_unlock(&bo_base->vm->status_lock);
2396 } 2401 }
2397} 2402}
2398 2403