diff options
| author | Christian König <christian.koenig@amd.com> | 2017-08-22 06:50:46 -0400 |
|---|---|---|
| committer | Alex Deucher <alexander.deucher@amd.com> | 2017-08-24 11:48:47 -0400 |
| commit | 457e0fee04b0c6c57a28a10e68b16c5f1386c80c (patch) | |
| tree | ba40de6fd4a673fc374451ecba9bd4139a4f8efe /drivers | |
| parent | ab7039325f4eef2e545d8cb3d6aed67998514bcb (diff) | |
drm/amdgpu: remove the GART copy hack
This isn't used since we don't map evicted BOs to GART any more.
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Roger He <Hongbo.He@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 22 |
1 files changed, 5 insertions, 17 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 96ec4e2b56e9..3bd430e180b5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
| @@ -1616,7 +1616,6 @@ error_free: | |||
| 1616 | * | 1616 | * |
| 1617 | * @adev: amdgpu_device pointer | 1617 | * @adev: amdgpu_device pointer |
| 1618 | * @exclusive: fence we need to sync to | 1618 | * @exclusive: fence we need to sync to |
| 1619 | * @gtt_flags: flags as they are used for GTT | ||
| 1620 | * @pages_addr: DMA addresses to use for mapping | 1619 | * @pages_addr: DMA addresses to use for mapping |
| 1621 | * @vm: requested vm | 1620 | * @vm: requested vm |
| 1622 | * @mapping: mapped range and flags to use for the update | 1621 | * @mapping: mapped range and flags to use for the update |
| @@ -1630,7 +1629,6 @@ error_free: | |||
| 1630 | */ | 1629 | */ |
| 1631 | static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, | 1630 | static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, |
| 1632 | struct dma_fence *exclusive, | 1631 | struct dma_fence *exclusive, |
| 1633 | uint64_t gtt_flags, | ||
| 1634 | dma_addr_t *pages_addr, | 1632 | dma_addr_t *pages_addr, |
| 1635 | struct amdgpu_vm *vm, | 1633 | struct amdgpu_vm *vm, |
| 1636 | struct amdgpu_bo_va_mapping *mapping, | 1634 | struct amdgpu_bo_va_mapping *mapping, |
| @@ -1685,11 +1683,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, | |||
| 1685 | } | 1683 | } |
| 1686 | 1684 | ||
| 1687 | if (pages_addr) { | 1685 | if (pages_addr) { |
| 1688 | if (flags == gtt_flags) | 1686 | max_entries = min(max_entries, 16ull * 1024ull); |
| 1689 | src = adev->gart.table_addr + | ||
| 1690 | (addr >> AMDGPU_GPU_PAGE_SHIFT) * 8; | ||
| 1691 | else | ||
| 1692 | max_entries = min(max_entries, 16ull * 1024ull); | ||
| 1693 | addr = 0; | 1687 | addr = 0; |
| 1694 | } else if (flags & AMDGPU_PTE_VALID) { | 1688 | } else if (flags & AMDGPU_PTE_VALID) { |
| 1695 | addr += adev->vm_manager.vram_base_offset; | 1689 | addr += adev->vm_manager.vram_base_offset; |
| @@ -1734,10 +1728,10 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, | |||
| 1734 | struct amdgpu_vm *vm = bo_va->base.vm; | 1728 | struct amdgpu_vm *vm = bo_va->base.vm; |
| 1735 | struct amdgpu_bo_va_mapping *mapping; | 1729 | struct amdgpu_bo_va_mapping *mapping; |
| 1736 | dma_addr_t *pages_addr = NULL; | 1730 | dma_addr_t *pages_addr = NULL; |
| 1737 | uint64_t gtt_flags, flags; | ||
| 1738 | struct ttm_mem_reg *mem; | 1731 | struct ttm_mem_reg *mem; |
| 1739 | struct drm_mm_node *nodes; | 1732 | struct drm_mm_node *nodes; |
| 1740 | struct dma_fence *exclusive; | 1733 | struct dma_fence *exclusive; |
| 1734 | uint64_t flags; | ||
| 1741 | int r; | 1735 | int r; |
| 1742 | 1736 | ||
| 1743 | if (clear || !bo_va->base.bo) { | 1737 | if (clear || !bo_va->base.bo) { |
| @@ -1757,15 +1751,10 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, | |||
| 1757 | exclusive = reservation_object_get_excl(bo->tbo.resv); | 1751 | exclusive = reservation_object_get_excl(bo->tbo.resv); |
| 1758 | } | 1752 | } |
| 1759 | 1753 | ||
| 1760 | if (bo) { | 1754 | if (bo) |
| 1761 | flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem); | 1755 | flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem); |
| 1762 | gtt_flags = (amdgpu_ttm_is_bound(bo->tbo.ttm) && | 1756 | else |
| 1763 | adev == amdgpu_ttm_adev(bo->tbo.bdev)) ? | ||
| 1764 | flags : 0; | ||
| 1765 | } else { | ||
| 1766 | flags = 0x0; | 1757 | flags = 0x0; |
| 1767 | gtt_flags = ~0x0; | ||
| 1768 | } | ||
| 1769 | 1758 | ||
| 1770 | spin_lock(&vm->status_lock); | 1759 | spin_lock(&vm->status_lock); |
| 1771 | if (!list_empty(&bo_va->base.vm_status)) | 1760 | if (!list_empty(&bo_va->base.vm_status)) |
| @@ -1773,8 +1762,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, | |||
| 1773 | spin_unlock(&vm->status_lock); | 1762 | spin_unlock(&vm->status_lock); |
| 1774 | 1763 | ||
| 1775 | list_for_each_entry(mapping, &bo_va->invalids, list) { | 1764 | list_for_each_entry(mapping, &bo_va->invalids, list) { |
| 1776 | r = amdgpu_vm_bo_split_mapping(adev, exclusive, | 1765 | r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm, |
| 1777 | gtt_flags, pages_addr, vm, | ||
| 1778 | mapping, flags, nodes, | 1766 | mapping, flags, nodes, |
| 1779 | &bo_va->last_pt_update); | 1767 | &bo_va->last_pt_update); |
| 1780 | if (r) | 1768 | if (r) |
