diff options
author | Christian König <christian.koenig@amd.com> | 2017-09-11 10:54:59 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2017-09-13 12:10:06 -0400 |
commit | 4e55eb3879fea6d8c7d414cebaa5bff1da58b4a1 (patch) | |
tree | 40faee87cf8636d6162e89f6ecd1db8839cdb46c /drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |
parent | 4a00f21db800bc64264bb6764c3d0d0878e9f4c4 (diff) |
drm/amdgpu: fix amdgpu_vm_handle_moved as well v2
There is no guarantee that the last BO_VA actually needed an update.
Additional to that all command submissions must wait for moved BOs to
be cleared, not just the first one.
v2: Don't overwrite any newer fence.
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 24 |
1 files changed, 10 insertions, 14 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 64baa3138965..2df254cc802e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
@@ -1743,7 +1743,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, | |||
1743 | dma_addr_t *pages_addr = NULL; | 1743 | dma_addr_t *pages_addr = NULL; |
1744 | struct ttm_mem_reg *mem; | 1744 | struct ttm_mem_reg *mem; |
1745 | struct drm_mm_node *nodes; | 1745 | struct drm_mm_node *nodes; |
1746 | struct dma_fence *exclusive; | 1746 | struct dma_fence *exclusive, **last_update; |
1747 | uint64_t flags; | 1747 | uint64_t flags; |
1748 | int r; | 1748 | int r; |
1749 | 1749 | ||
@@ -1769,6 +1769,11 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, | |||
1769 | else | 1769 | else |
1770 | flags = 0x0; | 1770 | flags = 0x0; |
1771 | 1771 | ||
1772 | if (clear || (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv)) | ||
1773 | last_update = &vm->last_update; | ||
1774 | else | ||
1775 | last_update = &bo_va->last_pt_update; | ||
1776 | |||
1772 | if (!clear && bo_va->base.moved) { | 1777 | if (!clear && bo_va->base.moved) { |
1773 | bo_va->base.moved = false; | 1778 | bo_va->base.moved = false; |
1774 | list_splice_init(&bo_va->valids, &bo_va->invalids); | 1779 | list_splice_init(&bo_va->valids, &bo_va->invalids); |
@@ -1780,7 +1785,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, | |||
1780 | list_for_each_entry(mapping, &bo_va->invalids, list) { | 1785 | list_for_each_entry(mapping, &bo_va->invalids, list) { |
1781 | r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm, | 1786 | r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm, |
1782 | mapping, flags, nodes, | 1787 | mapping, flags, nodes, |
1783 | &bo_va->last_pt_update); | 1788 | last_update); |
1784 | if (r) | 1789 | if (r) |
1785 | return r; | 1790 | return r; |
1786 | } | 1791 | } |
@@ -1803,12 +1808,6 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, | |||
1803 | trace_amdgpu_vm_bo_mapping(mapping); | 1808 | trace_amdgpu_vm_bo_mapping(mapping); |
1804 | } | 1809 | } |
1805 | 1810 | ||
1806 | if (bo_va->base.bo && | ||
1807 | bo_va->base.bo->tbo.resv == vm->root.base.bo->tbo.resv) { | ||
1808 | dma_fence_put(vm->last_update); | ||
1809 | vm->last_update = dma_fence_get(bo_va->last_pt_update); | ||
1810 | } | ||
1811 | |||
1812 | return 0; | 1811 | return 0; |
1813 | } | 1812 | } |
1814 | 1813 | ||
@@ -2006,15 +2005,15 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, | |||
2006 | * PTs have to be reserved! | 2005 | * PTs have to be reserved! |
2007 | */ | 2006 | */ |
2008 | int amdgpu_vm_handle_moved(struct amdgpu_device *adev, | 2007 | int amdgpu_vm_handle_moved(struct amdgpu_device *adev, |
2009 | struct amdgpu_vm *vm, | 2008 | struct amdgpu_vm *vm) |
2010 | struct amdgpu_sync *sync) | ||
2011 | { | 2009 | { |
2012 | struct amdgpu_bo_va *bo_va = NULL; | ||
2013 | bool clear; | 2010 | bool clear; |
2014 | int r = 0; | 2011 | int r = 0; |
2015 | 2012 | ||
2016 | spin_lock(&vm->status_lock); | 2013 | spin_lock(&vm->status_lock); |
2017 | while (!list_empty(&vm->moved)) { | 2014 | while (!list_empty(&vm->moved)) { |
2015 | struct amdgpu_bo_va *bo_va; | ||
2016 | |||
2018 | bo_va = list_first_entry(&vm->moved, | 2017 | bo_va = list_first_entry(&vm->moved, |
2019 | struct amdgpu_bo_va, base.vm_status); | 2018 | struct amdgpu_bo_va, base.vm_status); |
2020 | spin_unlock(&vm->status_lock); | 2019 | spin_unlock(&vm->status_lock); |
@@ -2030,9 +2029,6 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, | |||
2030 | } | 2029 | } |
2031 | spin_unlock(&vm->status_lock); | 2030 | spin_unlock(&vm->status_lock); |
2032 | 2031 | ||
2033 | if (bo_va) | ||
2034 | r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update); | ||
2035 | |||
2036 | return r; | 2032 | return r; |
2037 | } | 2033 | } |
2038 | 2034 | ||