aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2018-01-12 09:26:08 -0500
committerAlex Deucher <alexander.deucher@amd.com>2018-02-19 14:17:44 -0500
commit132f34e4b558488cc8d153a1d18833054a76e44c (patch)
treef3395e8c95a922f65b43d6d3185c082d2f7f039c /drivers/gpu/drm/amd
parent770d13b19fdf365a99e559f1d47f1380910a947d (diff)
drm/amdgpu: move struct gart_funcs into amdgpu_gmc.h
And rename it to struct gmc_funcs. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Samuel Li <Samuel.Li@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c8
-rwxr-xr-xdrivers/gpu/drm/amd/amdgpu/vce_v4_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c8
16 files changed, 114 insertions, 127 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 1b369a62bac3..3cb0707e9893 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -333,28 +333,6 @@ struct amdgpu_vm_pte_funcs {
333 uint32_t incr, uint64_t flags); 333 uint32_t incr, uint64_t flags);
334}; 334};
335 335
336/* provided by the gmc block */
337struct amdgpu_gart_funcs {
338 /* flush the vm tlb via mmio */
339 void (*flush_gpu_tlb)(struct amdgpu_device *adev,
340 uint32_t vmid);
341 /* write pte/pde updates using the cpu */
342 int (*set_pte_pde)(struct amdgpu_device *adev,
343 void *cpu_pt_addr, /* cpu addr of page table */
344 uint32_t gpu_page_idx, /* pte/pde to update */
345 uint64_t addr, /* addr to write into pte/pde */
346 uint64_t flags); /* access flags */
347 /* enable/disable PRT support */
348 void (*set_prt)(struct amdgpu_device *adev, bool enable);
349 /* set pte flags based per asic */
350 uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev,
351 uint32_t flags);
352 /* get the pde for a given mc addr */
353 void (*get_vm_pde)(struct amdgpu_device *adev, int level,
354 u64 *dst, u64 *flags);
355 uint32_t (*get_invalidate_req)(unsigned int vmid);
356};
357
358/* provided by the ih block */ 336/* provided by the ih block */
359struct amdgpu_ih_funcs { 337struct amdgpu_ih_funcs {
360 /* ring read/write ptr handling, called from interrupt context */ 338 /* ring read/write ptr handling, called from interrupt context */
@@ -1797,13 +1775,13 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
1797#define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev)) 1775#define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
1798#define amdgpu_asic_flush_hdp(adev) (adev)->asic_funcs->flush_hdp((adev)) 1776#define amdgpu_asic_flush_hdp(adev) (adev)->asic_funcs->flush_hdp((adev))
1799#define amdgpu_asic_invalidate_hdp(adev) (adev)->asic_funcs->invalidate_hdp((adev)) 1777#define amdgpu_asic_invalidate_hdp(adev) (adev)->asic_funcs->invalidate_hdp((adev))
1800#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid)) 1778#define amdgpu_gmc_flush_gpu_tlb(adev, vmid) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid))
1801#define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags)) 1779#define amdgpu_gmc_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gmc.gmc_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
1802#define amdgpu_gart_get_vm_pde(adev, level, dst, flags) (adev)->gart.gart_funcs->get_vm_pde((adev), (level), (dst), (flags)) 1780#define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags))
1781#define amdgpu_gmc_get_pte_flags(adev, flags) (adev)->gmc.gmc_funcs->get_vm_pte_flags((adev),(flags))
1803#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count))) 1782#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
1804#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr))) 1783#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
1805#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags))) 1784#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
1806#define amdgpu_vm_get_pte_flags(adev, flags) (adev)->gart.gart_funcs->get_vm_pte_flags((adev),(flags))
1807#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib))) 1785#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
1808#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r)) 1786#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
1809#define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t)) 1787#define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index a2204c770776..113c92d562c3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1775,7 +1775,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
1775 adev->mman.buffer_funcs_ring = NULL; 1775 adev->mman.buffer_funcs_ring = NULL;
1776 adev->vm_manager.vm_pte_funcs = NULL; 1776 adev->vm_manager.vm_pte_funcs = NULL;
1777 adev->vm_manager.vm_pte_num_rings = 0; 1777 adev->vm_manager.vm_pte_num_rings = 0;
1778 adev->gart.gart_funcs = NULL; 1778 adev->gmc.gmc_funcs = NULL;
1779 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS); 1779 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
1780 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); 1780 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
1781 1781
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index b730dee4cb0e..18d23878ad14 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -241,14 +241,14 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
241 continue; 241 continue;
242 242
243 for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) { 243 for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
244 amdgpu_gart_set_pte_pde(adev, adev->gart.ptr, 244 amdgpu_gmc_set_pte_pde(adev, adev->gart.ptr,
245 t, page_base, flags); 245 t, page_base, flags);
246 page_base += AMDGPU_GPU_PAGE_SIZE; 246 page_base += AMDGPU_GPU_PAGE_SIZE;
247 } 247 }
248 } 248 }
249 mb(); 249 mb();
250 amdgpu_asic_flush_hdp(adev); 250 amdgpu_asic_flush_hdp(adev);
251 amdgpu_gart_flush_gpu_tlb(adev, 0); 251 amdgpu_gmc_flush_gpu_tlb(adev, 0);
252 return 0; 252 return 0;
253} 253}
254 254
@@ -280,7 +280,7 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
280 for (i = 0; i < pages; i++) { 280 for (i = 0; i < pages; i++) {
281 page_base = dma_addr[i]; 281 page_base = dma_addr[i];
282 for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) { 282 for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
283 amdgpu_gart_set_pte_pde(adev, dst, t, page_base, flags); 283 amdgpu_gmc_set_pte_pde(adev, dst, t, page_base, flags);
284 page_base += AMDGPU_GPU_PAGE_SIZE; 284 page_base += AMDGPU_GPU_PAGE_SIZE;
285 } 285 }
286 } 286 }
@@ -331,7 +331,7 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
331 331
332 mb(); 332 mb();
333 amdgpu_asic_flush_hdp(adev); 333 amdgpu_asic_flush_hdp(adev);
334 amdgpu_gart_flush_gpu_tlb(adev, 0); 334 amdgpu_gmc_flush_gpu_tlb(adev, 0);
335 return 0; 335 return 0;
336} 336}
337 337
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
index d4a43302c2be..456295c00291 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
@@ -31,7 +31,6 @@
31 */ 31 */
32struct amdgpu_device; 32struct amdgpu_device;
33struct amdgpu_bo; 33struct amdgpu_bo;
34struct amdgpu_gart_funcs;
35 34
36#define AMDGPU_GPU_PAGE_SIZE 4096 35#define AMDGPU_GPU_PAGE_SIZE 4096
37#define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1) 36#define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1)
@@ -52,8 +51,6 @@ struct amdgpu_gart {
52 51
53 /* Asic default pte flags */ 52 /* Asic default pte flags */
54 uint64_t gart_pte_flags; 53 uint64_t gart_pte_flags;
55
56 const struct amdgpu_gart_funcs *gart_funcs;
57}; 54};
58 55
59int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev); 56int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index e48b4ec88c8c..77304a81a290 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -634,7 +634,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
634 if (r) 634 if (r)
635 goto error_backoff; 635 goto error_backoff;
636 636
637 va_flags = amdgpu_vm_get_pte_flags(adev, args->flags); 637 va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags);
638 r = amdgpu_vm_bo_map(adev, bo_va, args->va_address, 638 r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
639 args->offset_in_bo, args->map_size, 639 args->offset_in_bo, args->map_size,
640 va_flags); 640 va_flags);
@@ -654,7 +654,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
654 if (r) 654 if (r)
655 goto error_backoff; 655 goto error_backoff;
656 656
657 va_flags = amdgpu_vm_get_pte_flags(adev, args->flags); 657 va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags);
658 r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address, 658 r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
659 args->offset_in_bo, args->map_size, 659 args->offset_in_bo, args->map_size,
660 va_flags); 660 va_flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index e867be599b8d..a4a8374f7f3f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -48,6 +48,27 @@ struct amdgpu_vmhub {
48/* 48/*
49 * GPU MC structures, functions & helpers 49 * GPU MC structures, functions & helpers
50 */ 50 */
51struct amdgpu_gmc_funcs {
52 /* flush the vm tlb via mmio */
53 void (*flush_gpu_tlb)(struct amdgpu_device *adev,
54 uint32_t vmid);
55 /* write pte/pde updates using the cpu */
56 int (*set_pte_pde)(struct amdgpu_device *adev,
57 void *cpu_pt_addr, /* cpu addr of page table */
58 uint32_t gpu_page_idx, /* pte/pde to update */
59 uint64_t addr, /* addr to write into pte/pde */
60 uint64_t flags); /* access flags */
61 /* enable/disable PRT support */
62 void (*set_prt)(struct amdgpu_device *adev, bool enable);
63 /* set pte flags based per asic */
64 uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev,
65 uint32_t flags);
66 /* get the pde for a given mc addr */
67 void (*get_vm_pde)(struct amdgpu_device *adev, int level,
68 u64 *dst, u64 *flags);
69 uint32_t (*get_invalidate_req)(unsigned int vmid);
70};
71
51struct amdgpu_gmc { 72struct amdgpu_gmc {
52 resource_size_t aper_size; 73 resource_size_t aper_size;
53 resource_size_t aper_base; 74 resource_size_t aper_base;
@@ -79,6 +100,8 @@ struct amdgpu_gmc {
79 /* protects concurrent invalidation */ 100 /* protects concurrent invalidation */
80 spinlock_t invalidate_lock; 101 spinlock_t invalidate_lock;
81 bool translate_further; 102 bool translate_further;
103
104 const struct amdgpu_gmc_funcs *gmc_funcs;
82}; 105};
83 106
84#endif 107#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 988ccb248b54..da634ae6ca8f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -679,8 +679,8 @@ static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
679 value = params->pages_addr ? 679 value = params->pages_addr ?
680 amdgpu_vm_map_gart(params->pages_addr, addr) : 680 amdgpu_vm_map_gart(params->pages_addr, addr) :
681 addr; 681 addr;
682 amdgpu_gart_set_pte_pde(params->adev, (void *)(uintptr_t)pe, 682 amdgpu_gmc_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
683 i, value, flags); 683 i, value, flags);
684 addr += incr; 684 addr += incr;
685 } 685 }
686} 686}
@@ -738,7 +738,7 @@ static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params,
738 level += params->adev->vm_manager.root_level; 738 level += params->adev->vm_manager.root_level;
739 pt = amdgpu_bo_gpu_offset(bo); 739 pt = amdgpu_bo_gpu_offset(bo);
740 flags = AMDGPU_PTE_VALID; 740 flags = AMDGPU_PTE_VALID;
741 amdgpu_gart_get_vm_pde(params->adev, level, &pt, &flags); 741 amdgpu_gmc_get_vm_pde(params->adev, level, &pt, &flags);
742 if (shadow) { 742 if (shadow) {
743 pde = shadow_addr + (entry - parent->entries) * 8; 743 pde = shadow_addr + (entry - parent->entries) * 8;
744 params->func(params, pde, pt, 1, 0, flags); 744 params->func(params, pde, pt, 1, 0, flags);
@@ -967,8 +967,7 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
967 } 967 }
968 968
969 entry->huge = true; 969 entry->huge = true;
970 amdgpu_gart_get_vm_pde(p->adev, AMDGPU_VM_PDB0, 970 amdgpu_gmc_get_vm_pde(p->adev, AMDGPU_VM_PDB0, &dst, &flags);
971 &dst, &flags);
972 971
973 if (p->func == amdgpu_vm_cpu_set_ptes) { 972 if (p->func == amdgpu_vm_cpu_set_ptes) {
974 pd_addr = (unsigned long)amdgpu_bo_kptr(parent->base.bo); 973 pd_addr = (unsigned long)amdgpu_bo_kptr(parent->base.bo);
@@ -1485,7 +1484,7 @@ static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1485 1484
1486 spin_lock_irqsave(&adev->vm_manager.prt_lock, flags); 1485 spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1487 enable = !!atomic_read(&adev->vm_manager.num_prt_users); 1486 enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1488 adev->gart.gart_funcs->set_prt(adev, enable); 1487 adev->gmc.gmc_funcs->set_prt(adev, enable);
1489 spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags); 1488 spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1490} 1489}
1491 1490
@@ -1494,7 +1493,7 @@ static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1494 */ 1493 */
1495static void amdgpu_vm_prt_get(struct amdgpu_device *adev) 1494static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
1496{ 1495{
1497 if (!adev->gart.gart_funcs->set_prt) 1496 if (!adev->gmc.gmc_funcs->set_prt)
1498 return; 1497 return;
1499 1498
1500 if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1) 1499 if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
@@ -1529,7 +1528,7 @@ static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1529{ 1528{
1530 struct amdgpu_prt_cb *cb; 1529 struct amdgpu_prt_cb *cb;
1531 1530
1532 if (!adev->gart.gart_funcs->set_prt) 1531 if (!adev->gmc.gmc_funcs->set_prt)
1533 return; 1532 return;
1534 1533
1535 cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL); 1534 cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
@@ -2405,7 +2404,7 @@ static void amdgpu_vm_free_levels(struct amdgpu_device *adev,
2405void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) 2404void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2406{ 2405{
2407 struct amdgpu_bo_va_mapping *mapping, *tmp; 2406 struct amdgpu_bo_va_mapping *mapping, *tmp;
2408 bool prt_fini_needed = !!adev->gart.gart_funcs->set_prt; 2407 bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
2409 struct amdgpu_bo *root; 2408 struct amdgpu_bo *root;
2410 u64 fault; 2409 u64 fault;
2411 int i, r; 2410 int i, r;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 59928b7e741d..aaa990c5c0bb 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -3688,11 +3688,11 @@ static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
3688{ 3688{
3689 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; 3689 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
3690 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); 3690 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3691 uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid); 3691 uint32_t req = ring->adev->gmc.gmc_funcs->get_invalidate_req(vmid);
3692 uint64_t flags = AMDGPU_PTE_VALID; 3692 uint64_t flags = AMDGPU_PTE_VALID;
3693 unsigned eng = ring->vm_inv_eng; 3693 unsigned eng = ring->vm_inv_eng;
3694 3694
3695 amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags); 3695 amdgpu_gmc_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
3696 pd_addr |= flags; 3696 pd_addr |= flags;
3697 3697
3698 gfx_v9_0_write_data_to_reg(ring, usepfp, true, 3698 gfx_v9_0_write_data_to_reg(ring, usepfp, true,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 98411e3479f7..daaad3f8fb17 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -37,7 +37,7 @@
37#include "dce/dce_6_0_sh_mask.h" 37#include "dce/dce_6_0_sh_mask.h"
38#include "si_enums.h" 38#include "si_enums.h"
39 39
40static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev); 40static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev);
41static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev); 41static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
42static int gmc_v6_0_wait_for_idle(void *handle); 42static int gmc_v6_0_wait_for_idle(void *handle);
43 43
@@ -357,17 +357,14 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
357 return 0; 357 return 0;
358} 358}
359 359
360static void gmc_v6_0_gart_flush_gpu_tlb(struct amdgpu_device *adev, 360static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid)
361 uint32_t vmid)
362{ 361{
363 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); 362 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
364} 363}
365 364
366static int gmc_v6_0_gart_set_pte_pde(struct amdgpu_device *adev, 365static int gmc_v6_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
367 void *cpu_pt_addr, 366 uint32_t gpu_page_idx, uint64_t addr,
368 uint32_t gpu_page_idx, 367 uint64_t flags)
369 uint64_t addr,
370 uint64_t flags)
371{ 368{
372 void __iomem *ptr = (void *)cpu_pt_addr; 369 void __iomem *ptr = (void *)cpu_pt_addr;
373 uint64_t value; 370 uint64_t value;
@@ -559,7 +556,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
559 else 556 else
560 gmc_v6_0_set_fault_enable_default(adev, true); 557 gmc_v6_0_set_fault_enable_default(adev, true);
561 558
562 gmc_v6_0_gart_flush_gpu_tlb(adev, 0); 559 gmc_v6_0_flush_gpu_tlb(adev, 0);
563 dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n", 560 dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
564 (unsigned)(adev->gmc.gart_size >> 20), 561 (unsigned)(adev->gmc.gart_size >> 20),
565 (unsigned long long)adev->gart.table_addr); 562 (unsigned long long)adev->gart.table_addr);
@@ -793,7 +790,7 @@ static int gmc_v6_0_early_init(void *handle)
793{ 790{
794 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 791 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
795 792
796 gmc_v6_0_set_gart_funcs(adev); 793 gmc_v6_0_set_gmc_funcs(adev);
797 gmc_v6_0_set_irq_funcs(adev); 794 gmc_v6_0_set_irq_funcs(adev);
798 795
799 return 0; 796 return 0;
@@ -1127,9 +1124,9 @@ static const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
1127 .set_powergating_state = gmc_v6_0_set_powergating_state, 1124 .set_powergating_state = gmc_v6_0_set_powergating_state,
1128}; 1125};
1129 1126
1130static const struct amdgpu_gart_funcs gmc_v6_0_gart_funcs = { 1127static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = {
1131 .flush_gpu_tlb = gmc_v6_0_gart_flush_gpu_tlb, 1128 .flush_gpu_tlb = gmc_v6_0_flush_gpu_tlb,
1132 .set_pte_pde = gmc_v6_0_gart_set_pte_pde, 1129 .set_pte_pde = gmc_v6_0_set_pte_pde,
1133 .set_prt = gmc_v6_0_set_prt, 1130 .set_prt = gmc_v6_0_set_prt,
1134 .get_vm_pde = gmc_v6_0_get_vm_pde, 1131 .get_vm_pde = gmc_v6_0_get_vm_pde,
1135 .get_vm_pte_flags = gmc_v6_0_get_vm_pte_flags 1132 .get_vm_pte_flags = gmc_v6_0_get_vm_pte_flags
@@ -1140,10 +1137,10 @@ static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
1140 .process = gmc_v6_0_process_interrupt, 1137 .process = gmc_v6_0_process_interrupt,
1141}; 1138};
1142 1139
1143static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev) 1140static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev)
1144{ 1141{
1145 if (adev->gart.gart_funcs == NULL) 1142 if (adev->gmc.gmc_funcs == NULL)
1146 adev->gart.gart_funcs = &gmc_v6_0_gart_funcs; 1143 adev->gmc.gmc_funcs = &gmc_v6_0_gmc_funcs;
1147} 1144}
1148 1145
1149static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev) 1146static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 71986cddcbcc..082500222ef9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -43,7 +43,7 @@
43 43
44#include "amdgpu_atombios.h" 44#include "amdgpu_atombios.h"
45 45
46static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev); 46static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev);
47static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev); 47static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
48static int gmc_v7_0_wait_for_idle(void *handle); 48static int gmc_v7_0_wait_for_idle(void *handle);
49 49
@@ -422,22 +422,21 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
422 */ 422 */
423 423
424/** 424/**
425 * gmc_v7_0_gart_flush_gpu_tlb - gart tlb flush callback 425 * gmc_v7_0_flush_gpu_tlb - gart tlb flush callback
426 * 426 *
427 * @adev: amdgpu_device pointer 427 * @adev: amdgpu_device pointer
428 * @vmid: vm instance to flush 428 * @vmid: vm instance to flush
429 * 429 *
430 * Flush the TLB for the requested page table (CIK). 430 * Flush the TLB for the requested page table (CIK).
431 */ 431 */
432static void gmc_v7_0_gart_flush_gpu_tlb(struct amdgpu_device *adev, 432static void gmc_v7_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid)
433 uint32_t vmid)
434{ 433{
435 /* bits 0-15 are the VM contexts0-15 */ 434 /* bits 0-15 are the VM contexts0-15 */
436 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); 435 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
437} 436}
438 437
439/** 438/**
440 * gmc_v7_0_gart_set_pte_pde - update the page tables using MMIO 439 * gmc_v7_0_set_pte_pde - update the page tables using MMIO
441 * 440 *
442 * @adev: amdgpu_device pointer 441 * @adev: amdgpu_device pointer
443 * @cpu_pt_addr: cpu address of the page table 442 * @cpu_pt_addr: cpu address of the page table
@@ -447,11 +446,9 @@ static void gmc_v7_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
447 * 446 *
448 * Update the page tables using the CPU. 447 * Update the page tables using the CPU.
449 */ 448 */
450static int gmc_v7_0_gart_set_pte_pde(struct amdgpu_device *adev, 449static int gmc_v7_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
451 void *cpu_pt_addr, 450 uint32_t gpu_page_idx, uint64_t addr,
452 uint32_t gpu_page_idx, 451 uint64_t flags)
453 uint64_t addr,
454 uint64_t flags)
455{ 452{
456 void __iomem *ptr = (void *)cpu_pt_addr; 453 void __iomem *ptr = (void *)cpu_pt_addr;
457 uint64_t value; 454 uint64_t value;
@@ -672,7 +669,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
672 WREG32(mmCHUB_CONTROL, tmp); 669 WREG32(mmCHUB_CONTROL, tmp);
673 } 670 }
674 671
675 gmc_v7_0_gart_flush_gpu_tlb(adev, 0); 672 gmc_v7_0_flush_gpu_tlb(adev, 0);
676 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 673 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
677 (unsigned)(adev->gmc.gart_size >> 20), 674 (unsigned)(adev->gmc.gart_size >> 20),
678 (unsigned long long)adev->gart.table_addr); 675 (unsigned long long)adev->gart.table_addr);
@@ -919,7 +916,7 @@ static int gmc_v7_0_early_init(void *handle)
919{ 916{
920 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 917 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
921 918
922 gmc_v7_0_set_gart_funcs(adev); 919 gmc_v7_0_set_gmc_funcs(adev);
923 gmc_v7_0_set_irq_funcs(adev); 920 gmc_v7_0_set_irq_funcs(adev);
924 921
925 adev->gmc.shared_aperture_start = 0x2000000000000000ULL; 922 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
@@ -1306,9 +1303,9 @@ static const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
1306 .set_powergating_state = gmc_v7_0_set_powergating_state, 1303 .set_powergating_state = gmc_v7_0_set_powergating_state,
1307}; 1304};
1308 1305
1309static const struct amdgpu_gart_funcs gmc_v7_0_gart_funcs = { 1306static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = {
1310 .flush_gpu_tlb = gmc_v7_0_gart_flush_gpu_tlb, 1307 .flush_gpu_tlb = gmc_v7_0_flush_gpu_tlb,
1311 .set_pte_pde = gmc_v7_0_gart_set_pte_pde, 1308 .set_pte_pde = gmc_v7_0_set_pte_pde,
1312 .set_prt = gmc_v7_0_set_prt, 1309 .set_prt = gmc_v7_0_set_prt,
1313 .get_vm_pte_flags = gmc_v7_0_get_vm_pte_flags, 1310 .get_vm_pte_flags = gmc_v7_0_get_vm_pte_flags,
1314 .get_vm_pde = gmc_v7_0_get_vm_pde 1311 .get_vm_pde = gmc_v7_0_get_vm_pde
@@ -1319,10 +1316,10 @@ static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
1319 .process = gmc_v7_0_process_interrupt, 1316 .process = gmc_v7_0_process_interrupt,
1320}; 1317};
1321 1318
1322static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev) 1319static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev)
1323{ 1320{
1324 if (adev->gart.gart_funcs == NULL) 1321 if (adev->gmc.gmc_funcs == NULL)
1325 adev->gart.gart_funcs = &gmc_v7_0_gart_funcs; 1322 adev->gmc.gmc_funcs = &gmc_v7_0_gmc_funcs;
1326} 1323}
1327 1324
1328static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev) 1325static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 7a021c6fb0bd..ac73b2c60fc3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -45,7 +45,7 @@
45#include "amdgpu_atombios.h" 45#include "amdgpu_atombios.h"
46 46
47 47
48static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev); 48static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev);
49static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev); 49static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
50static int gmc_v8_0_wait_for_idle(void *handle); 50static int gmc_v8_0_wait_for_idle(void *handle);
51 51
@@ -597,14 +597,14 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
597 */ 597 */
598 598
599/** 599/**
600 * gmc_v8_0_gart_flush_gpu_tlb - gart tlb flush callback 600 * gmc_v8_0_flush_gpu_tlb - gart tlb flush callback
601 * 601 *
602 * @adev: amdgpu_device pointer 602 * @adev: amdgpu_device pointer
603 * @vmid: vm instance to flush 603 * @vmid: vm instance to flush
604 * 604 *
605 * Flush the TLB for the requested page table (CIK). 605 * Flush the TLB for the requested page table (CIK).
606 */ 606 */
607static void gmc_v8_0_gart_flush_gpu_tlb(struct amdgpu_device *adev, 607static void gmc_v8_0_flush_gpu_tlb(struct amdgpu_device *adev,
608 uint32_t vmid) 608 uint32_t vmid)
609{ 609{
610 /* bits 0-15 are the VM contexts0-15 */ 610 /* bits 0-15 are the VM contexts0-15 */
@@ -612,7 +612,7 @@ static void gmc_v8_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
612} 612}
613 613
614/** 614/**
615 * gmc_v8_0_gart_set_pte_pde - update the page tables using MMIO 615 * gmc_v8_0_set_pte_pde - update the page tables using MMIO
616 * 616 *
617 * @adev: amdgpu_device pointer 617 * @adev: amdgpu_device pointer
618 * @cpu_pt_addr: cpu address of the page table 618 * @cpu_pt_addr: cpu address of the page table
@@ -622,11 +622,9 @@ static void gmc_v8_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
622 * 622 *
623 * Update the page tables using the CPU. 623 * Update the page tables using the CPU.
624 */ 624 */
625static int gmc_v8_0_gart_set_pte_pde(struct amdgpu_device *adev, 625static int gmc_v8_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
626 void *cpu_pt_addr, 626 uint32_t gpu_page_idx, uint64_t addr,
627 uint32_t gpu_page_idx, 627 uint64_t flags)
628 uint64_t addr,
629 uint64_t flags)
630{ 628{
631 void __iomem *ptr = (void *)cpu_pt_addr; 629 void __iomem *ptr = (void *)cpu_pt_addr;
632 uint64_t value; 630 uint64_t value;
@@ -888,7 +886,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
888 else 886 else
889 gmc_v8_0_set_fault_enable_default(adev, true); 887 gmc_v8_0_set_fault_enable_default(adev, true);
890 888
891 gmc_v8_0_gart_flush_gpu_tlb(adev, 0); 889 gmc_v8_0_flush_gpu_tlb(adev, 0);
892 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 890 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
893 (unsigned)(adev->gmc.gart_size >> 20), 891 (unsigned)(adev->gmc.gart_size >> 20),
894 (unsigned long long)adev->gart.table_addr); 892 (unsigned long long)adev->gart.table_addr);
@@ -1009,7 +1007,7 @@ static int gmc_v8_0_early_init(void *handle)
1009{ 1007{
1010 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1008 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1011 1009
1012 gmc_v8_0_set_gart_funcs(adev); 1010 gmc_v8_0_set_gmc_funcs(adev);
1013 gmc_v8_0_set_irq_funcs(adev); 1011 gmc_v8_0_set_irq_funcs(adev);
1014 1012
1015 adev->gmc.shared_aperture_start = 0x2000000000000000ULL; 1013 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
@@ -1640,9 +1638,9 @@ static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
1640 .get_clockgating_state = gmc_v8_0_get_clockgating_state, 1638 .get_clockgating_state = gmc_v8_0_get_clockgating_state,
1641}; 1639};
1642 1640
1643static const struct amdgpu_gart_funcs gmc_v8_0_gart_funcs = { 1641static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = {
1644 .flush_gpu_tlb = gmc_v8_0_gart_flush_gpu_tlb, 1642 .flush_gpu_tlb = gmc_v8_0_flush_gpu_tlb,
1645 .set_pte_pde = gmc_v8_0_gart_set_pte_pde, 1643 .set_pte_pde = gmc_v8_0_set_pte_pde,
1646 .set_prt = gmc_v8_0_set_prt, 1644 .set_prt = gmc_v8_0_set_prt,
1647 .get_vm_pte_flags = gmc_v8_0_get_vm_pte_flags, 1645 .get_vm_pte_flags = gmc_v8_0_get_vm_pte_flags,
1648 .get_vm_pde = gmc_v8_0_get_vm_pde 1646 .get_vm_pde = gmc_v8_0_get_vm_pde
@@ -1653,10 +1651,10 @@ static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
1653 .process = gmc_v8_0_process_interrupt, 1651 .process = gmc_v8_0_process_interrupt,
1654}; 1652};
1655 1653
1656static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev) 1654static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev)
1657{ 1655{
1658 if (adev->gart.gart_funcs == NULL) 1656 if (adev->gmc.gmc_funcs == NULL)
1659 adev->gart.gart_funcs = &gmc_v8_0_gart_funcs; 1657 adev->gmc.gmc_funcs = &gmc_v8_0_gmc_funcs;
1660} 1658}
1661 1659
1662static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev) 1660static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index e3d0098f0524..f049c84fd76c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -316,14 +316,14 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid)
316 */ 316 */
317 317
318/** 318/**
319 * gmc_v9_0_gart_flush_gpu_tlb - gart tlb flush callback 319 * gmc_v9_0_flush_gpu_tlb - gart tlb flush callback
320 * 320 *
321 * @adev: amdgpu_device pointer 321 * @adev: amdgpu_device pointer
322 * @vmid: vm instance to flush 322 * @vmid: vm instance to flush
323 * 323 *
324 * Flush the TLB for the requested page table. 324 * Flush the TLB for the requested page table.
325 */ 325 */
326static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev, 326static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
327 uint32_t vmid) 327 uint32_t vmid)
328{ 328{
329 /* Use register 17 for GART */ 329 /* Use register 17 for GART */
@@ -367,7 +367,7 @@ static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
367} 367}
368 368
369/** 369/**
370 * gmc_v9_0_gart_set_pte_pde - update the page tables using MMIO 370 * gmc_v9_0_set_pte_pde - update the page tables using MMIO
371 * 371 *
372 * @adev: amdgpu_device pointer 372 * @adev: amdgpu_device pointer
373 * @cpu_pt_addr: cpu address of the page table 373 * @cpu_pt_addr: cpu address of the page table
@@ -377,11 +377,9 @@ static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
377 * 377 *
378 * Update the page tables using the CPU. 378 * Update the page tables using the CPU.
379 */ 379 */
380static int gmc_v9_0_gart_set_pte_pde(struct amdgpu_device *adev, 380static int gmc_v9_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
381 void *cpu_pt_addr, 381 uint32_t gpu_page_idx, uint64_t addr,
382 uint32_t gpu_page_idx, 382 uint64_t flags)
383 uint64_t addr,
384 uint64_t flags)
385{ 383{
386 void __iomem *ptr = (void *)cpu_pt_addr; 384 void __iomem *ptr = (void *)cpu_pt_addr;
387 uint64_t value; 385 uint64_t value;
@@ -491,25 +489,25 @@ static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
491 } 489 }
492} 490}
493 491
494static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = { 492static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
495 .flush_gpu_tlb = gmc_v9_0_gart_flush_gpu_tlb, 493 .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
496 .set_pte_pde = gmc_v9_0_gart_set_pte_pde, 494 .set_pte_pde = gmc_v9_0_set_pte_pde,
497 .get_invalidate_req = gmc_v9_0_get_invalidate_req, 495 .get_invalidate_req = gmc_v9_0_get_invalidate_req,
498 .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags, 496 .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
499 .get_vm_pde = gmc_v9_0_get_vm_pde 497 .get_vm_pde = gmc_v9_0_get_vm_pde
500}; 498};
501 499
502static void gmc_v9_0_set_gart_funcs(struct amdgpu_device *adev) 500static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
503{ 501{
504 if (adev->gart.gart_funcs == NULL) 502 if (adev->gmc.gmc_funcs == NULL)
505 adev->gart.gart_funcs = &gmc_v9_0_gart_funcs; 503 adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
506} 504}
507 505
508static int gmc_v9_0_early_init(void *handle) 506static int gmc_v9_0_early_init(void *handle)
509{ 507{
510 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 508 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
511 509
512 gmc_v9_0_set_gart_funcs(adev); 510 gmc_v9_0_set_gmc_funcs(adev);
513 gmc_v9_0_set_irq_funcs(adev); 511 gmc_v9_0_set_irq_funcs(adev);
514 512
515 adev->gmc.shared_aperture_start = 0x2000000000000000ULL; 513 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
@@ -981,7 +979,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
981 979
982 gfxhub_v1_0_set_fault_enable_default(adev, value); 980 gfxhub_v1_0_set_fault_enable_default(adev, value);
983 mmhub_v1_0_set_fault_enable_default(adev, value); 981 mmhub_v1_0_set_fault_enable_default(adev, value);
984 gmc_v9_0_gart_flush_gpu_tlb(adev, 0); 982 gmc_v9_0_flush_gpu_tlb(adev, 0);
985 983
986 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 984 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
987 (unsigned)(adev->gmc.gart_size >> 20), 985 (unsigned)(adev->gmc.gart_size >> 20),
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 8a0b1b9cbca8..892ec22142ae 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -1136,11 +1136,11 @@ static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1136 unsigned vmid, uint64_t pd_addr) 1136 unsigned vmid, uint64_t pd_addr)
1137{ 1137{
1138 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; 1138 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1139 uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid); 1139 uint32_t req = ring->adev->gmc.gmc_funcs->get_invalidate_req(vmid);
1140 uint64_t flags = AMDGPU_PTE_VALID; 1140 uint64_t flags = AMDGPU_PTE_VALID;
1141 unsigned eng = ring->vm_inv_eng; 1141 unsigned eng = ring->vm_inv_eng;
1142 1142
1143 amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags); 1143 amdgpu_gmc_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
1144 pd_addr |= flags; 1144 pd_addr |= flags;
1145 1145
1146 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | 1146 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 6b95f4f344b5..4c19c96a8e59 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -1294,12 +1294,12 @@ static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1294 unsigned vmid, uint64_t pd_addr) 1294 unsigned vmid, uint64_t pd_addr)
1295{ 1295{
1296 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; 1296 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1297 uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid); 1297 uint32_t req = ring->adev->gmc.gmc_funcs->get_invalidate_req(vmid);
1298 uint64_t flags = AMDGPU_PTE_VALID; 1298 uint64_t flags = AMDGPU_PTE_VALID;
1299 unsigned eng = ring->vm_inv_eng; 1299 unsigned eng = ring->vm_inv_eng;
1300 uint32_t data0, data1, mask; 1300 uint32_t data0, data1, mask;
1301 1301
1302 amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags); 1302 amdgpu_gmc_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
1303 pd_addr |= flags; 1303 pd_addr |= flags;
1304 1304
1305 data0 = (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2; 1305 data0 = (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2;
@@ -1346,11 +1346,11 @@ static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1346 unsigned int vmid, uint64_t pd_addr) 1346 unsigned int vmid, uint64_t pd_addr)
1347{ 1347{
1348 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; 1348 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1349 uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid); 1349 uint32_t req = ring->adev->gmc.gmc_funcs->get_invalidate_req(vmid);
1350 uint64_t flags = AMDGPU_PTE_VALID; 1350 uint64_t flags = AMDGPU_PTE_VALID;
1351 unsigned eng = ring->vm_inv_eng; 1351 unsigned eng = ring->vm_inv_eng;
1352 1352
1353 amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags); 1353 amdgpu_gmc_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
1354 pd_addr |= flags; 1354 pd_addr |= flags;
1355 1355
1356 amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE); 1356 amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 7cf2eef68cf2..071fb17810d8 100755
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -968,11 +968,11 @@ static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring,
968 unsigned int vmid, uint64_t pd_addr) 968 unsigned int vmid, uint64_t pd_addr)
969{ 969{
970 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; 970 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
971 uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid); 971 uint32_t req = ring->adev->gmc.gmc_funcs->get_invalidate_req(vmid);
972 uint64_t flags = AMDGPU_PTE_VALID; 972 uint64_t flags = AMDGPU_PTE_VALID;
973 unsigned eng = ring->vm_inv_eng; 973 unsigned eng = ring->vm_inv_eng;
974 974
975 amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags); 975 amdgpu_gmc_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
976 pd_addr |= flags; 976 pd_addr |= flags;
977 977
978 amdgpu_ring_write(ring, VCE_CMD_REG_WRITE); 978 amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index b99e15c43e45..659a8f2d9bf8 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -891,12 +891,12 @@ static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
891 unsigned vmid, uint64_t pd_addr) 891 unsigned vmid, uint64_t pd_addr)
892{ 892{
893 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; 893 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
894 uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid); 894 uint32_t req = ring->adev->gmc.gmc_funcs->get_invalidate_req(vmid);
895 uint64_t flags = AMDGPU_PTE_VALID; 895 uint64_t flags = AMDGPU_PTE_VALID;
896 unsigned eng = ring->vm_inv_eng; 896 unsigned eng = ring->vm_inv_eng;
897 uint32_t data0, data1, mask; 897 uint32_t data0, data1, mask;
898 898
899 amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags); 899 amdgpu_gmc_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
900 pd_addr |= flags; 900 pd_addr |= flags;
901 901
902 data0 = (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2; 902 data0 = (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2;
@@ -1024,11 +1024,11 @@ static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1024 unsigned int vmid, uint64_t pd_addr) 1024 unsigned int vmid, uint64_t pd_addr)
1025{ 1025{
1026 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; 1026 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1027 uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid); 1027 uint32_t req = ring->adev->gmc.gmc_funcs->get_invalidate_req(vmid);
1028 uint64_t flags = AMDGPU_PTE_VALID; 1028 uint64_t flags = AMDGPU_PTE_VALID;
1029 unsigned eng = ring->vm_inv_eng; 1029 unsigned eng = ring->vm_inv_eng;
1030 1030
1031 amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags); 1031 amdgpu_gmc_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
1032 pd_addr |= flags; 1032 pd_addr |= flags;
1033 1033
1034 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE); 1034 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);