summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/mm_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c45
1 files changed, 28 insertions, 17 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index 0d68464d..03a5dabb 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -597,7 +597,7 @@ int zalloc_gmmu_page_table_gk20a(struct vm_gk20a *vm,
597 return err; 597 return err;
598 598
599 gk20a_dbg(gpu_dbg_pte, "pte = 0x%p, addr=%08llx, size %d", 599 gk20a_dbg(gpu_dbg_pte, "pte = 0x%p, addr=%08llx, size %d",
600 pte, gk20a_mm_iova_addr(sgt->sgl), pte_order); 600 pte, gk20a_mm_iova_addr(vm->mm->g, sgt->sgl), pte_order);
601 601
602 pte->ref = handle; 602 pte->ref = handle;
603 pte->sgt = sgt; 603 pte->sgt = sgt;
@@ -1554,7 +1554,7 @@ dma_addr_t gk20a_mm_gpuva_to_iova_base(struct vm_gk20a *vm, u64 gpu_vaddr)
1554 mutex_lock(&vm->update_gmmu_lock); 1554 mutex_lock(&vm->update_gmmu_lock);
1555 buffer = find_mapped_buffer_locked(&vm->mapped_buffers, gpu_vaddr); 1555 buffer = find_mapped_buffer_locked(&vm->mapped_buffers, gpu_vaddr);
1556 if (buffer) 1556 if (buffer)
1557 addr = gk20a_mm_iova_addr(buffer->sgt->sgl); 1557 addr = gk20a_mm_iova_addr(vm->mm->g, buffer->sgt->sgl);
1558 mutex_unlock(&vm->update_gmmu_lock); 1558 mutex_unlock(&vm->update_gmmu_lock);
1559 1559
1560 return addr; 1560 return addr;
@@ -1657,16 +1657,19 @@ void gk20a_free_sgtable(struct sg_table **sgt)
1657 *sgt = NULL; 1657 *sgt = NULL;
1658} 1658}
1659 1659
1660u64 gk20a_mm_iova_addr(struct scatterlist *sgl) 1660u64 gk20a_mm_smmu_vaddr_translate(struct gk20a *g, dma_addr_t iova)
1661{
1662 return iova | 1ULL << g->ops.mm.get_physical_addr_bits(g);
1663}
1664
1665u64 gk20a_mm_iova_addr(struct gk20a *g, struct scatterlist *sgl)
1661{ 1666{
1662 u64 result = sg_phys(sgl); 1667 u64 result = sg_phys(sgl);
1663#ifdef CONFIG_TEGRA_IOMMU_SMMU 1668#ifdef CONFIG_TEGRA_IOMMU_SMMU
1664 if (sg_dma_address(sgl) == DMA_ERROR_CODE) 1669 if (sg_dma_address(sgl) == DMA_ERROR_CODE)
1665 result = 0; 1670 result = 0;
1666 else if (sg_dma_address(sgl)) { 1671 else if (sg_dma_address(sgl))
1667 result = sg_dma_address(sgl) | 1672 result = gk20a_mm_smmu_vaddr_translate(g, sg_dma_address(sgl));
1668 1ULL << NV_MC_SMMU_VADDR_TRANSLATION_BIT;
1669 }
1670#endif 1673#endif
1671 return result; 1674 return result;
1672} 1675}
@@ -1709,7 +1712,7 @@ static int update_gmmu_ptes_locked(struct vm_gk20a *vm,
1709 BUG_ON(space_to_skip & (page_size - 1)); 1712 BUG_ON(space_to_skip & (page_size - 1));
1710 1713
1711 while (space_to_skip > 0 && cur_chunk) { 1714 while (space_to_skip > 0 && cur_chunk) {
1712 u64 new_addr = gk20a_mm_iova_addr(cur_chunk); 1715 u64 new_addr = gk20a_mm_iova_addr(vm->mm->g, cur_chunk);
1713 if (new_addr) { 1716 if (new_addr) {
1714 addr = new_addr; 1717 addr = new_addr;
1715 addr += cur_offset; 1718 addr += cur_offset;
@@ -1759,7 +1762,8 @@ static int update_gmmu_ptes_locked(struct vm_gk20a *vm,
1759 gk20a_dbg(gpu_dbg_pte, "pte_lo=%d, pte_hi=%d", pte_lo, pte_hi); 1762 gk20a_dbg(gpu_dbg_pte, "pte_lo=%d, pte_hi=%d", pte_lo, pte_hi);
1760 for (pte_cur = pte_lo; pte_cur <= pte_hi; pte_cur++) { 1763 for (pte_cur = pte_lo; pte_cur <= pte_hi; pte_cur++) {
1761 if (likely(sgt)) { 1764 if (likely(sgt)) {
1762 u64 new_addr = gk20a_mm_iova_addr(cur_chunk); 1765 u64 new_addr = gk20a_mm_iova_addr(vm->mm->g,
1766 cur_chunk);
1763 if (new_addr) { 1767 if (new_addr) {
1764 addr = new_addr; 1768 addr = new_addr;
1765 addr += cur_offset; 1769 addr += cur_offset;
@@ -1886,11 +1890,11 @@ void update_gmmu_pde_locked(struct vm_gk20a *vm, u32 i)
1886 1890
1887 if (small_valid) 1891 if (small_valid)
1888 pte_addr[gmmu_page_size_small] = 1892 pte_addr[gmmu_page_size_small] =
1889 gk20a_mm_iova_addr(small_pte->sgt->sgl); 1893 gk20a_mm_iova_addr(vm->mm->g, small_pte->sgt->sgl);
1890 1894
1891 if (big_valid) 1895 if (big_valid)
1892 pte_addr[gmmu_page_size_big] = 1896 pte_addr[gmmu_page_size_big] =
1893 gk20a_mm_iova_addr(big_pte->sgt->sgl); 1897 gk20a_mm_iova_addr(vm->mm->g, big_pte->sgt->sgl);
1894 1898
1895 pde_v[0] = gmmu_pde_size_full_f(); 1899 pde_v[0] = gmmu_pde_size_full_f();
1896 pde_v[0] |= big_valid ? 1900 pde_v[0] |= big_valid ?
@@ -2270,7 +2274,7 @@ static int gk20a_init_vm(struct mm_gk20a *mm,
2270 goto clean_up_ptes; 2274 goto clean_up_ptes;
2271 } 2275 }
2272 gk20a_dbg(gpu_dbg_pte, "bar 1 pdes.kv = 0x%p, pdes.phys = 0x%llx", 2276 gk20a_dbg(gpu_dbg_pte, "bar 1 pdes.kv = 0x%p, pdes.phys = 0x%llx",
2273 vm->pdes.kv, gk20a_mm_iova_addr(vm->pdes.sgt->sgl)); 2277 vm->pdes.kv, gk20a_mm_iova_addr(vm->mm->g, vm->pdes.sgt->sgl));
2274 /* we could release vm->pdes.kv but it's only one page... */ 2278 /* we could release vm->pdes.kv but it's only one page... */
2275 2279
2276 /* low-half: alloc small pages */ 2280 /* low-half: alloc small pages */
@@ -2728,9 +2732,9 @@ static int gk20a_init_bar1_vm(struct mm_gk20a *mm)
2728 mm->bar1.aperture_size, false, "bar1"); 2732 mm->bar1.aperture_size, false, "bar1");
2729 2733
2730 gk20a_dbg_info("pde pa=0x%llx", 2734 gk20a_dbg_info("pde pa=0x%llx",
2731 (u64)gk20a_mm_iova_addr(vm->pdes.sgt->sgl)); 2735 (u64)gk20a_mm_iova_addr(g, vm->pdes.sgt->sgl));
2732 2736
2733 pde_addr = gk20a_mm_iova_addr(vm->pdes.sgt->sgl); 2737 pde_addr = gk20a_mm_iova_addr(g, vm->pdes.sgt->sgl);
2734 pde_addr_lo = u64_lo32(pde_addr >> ram_in_base_shift_v()); 2738 pde_addr_lo = u64_lo32(pde_addr >> ram_in_base_shift_v());
2735 pde_addr_hi = u64_hi32(pde_addr); 2739 pde_addr_hi = u64_hi32(pde_addr);
2736 2740
@@ -2814,9 +2818,9 @@ static int gk20a_init_system_vm(struct mm_gk20a *mm)
2814 SZ_128K << 10, GK20A_PMU_VA_SIZE, false, "system"); 2818 SZ_128K << 10, GK20A_PMU_VA_SIZE, false, "system");
2815 2819
2816 gk20a_dbg_info("pde pa=0x%llx", 2820 gk20a_dbg_info("pde pa=0x%llx",
2817 (u64)gk20a_mm_iova_addr(vm->pdes.sgt->sgl)); 2821 (u64)gk20a_mm_iova_addr(g, vm->pdes.sgt->sgl));
2818 2822
2819 pde_addr = gk20a_mm_iova_addr(vm->pdes.sgt->sgl); 2823 pde_addr = gk20a_mm_iova_addr(g, vm->pdes.sgt->sgl);
2820 pde_addr_lo = u64_lo32(pde_addr >> ram_in_base_shift_v()); 2824 pde_addr_lo = u64_lo32(pde_addr >> ram_in_base_shift_v());
2821 pde_addr_hi = u64_hi32(pde_addr); 2825 pde_addr_hi = u64_hi32(pde_addr);
2822 2826
@@ -3034,7 +3038,8 @@ int gk20a_vm_find_buffer(struct vm_gk20a *vm, u64 gpu_va,
3034void gk20a_mm_tlb_invalidate(struct vm_gk20a *vm) 3038void gk20a_mm_tlb_invalidate(struct vm_gk20a *vm)
3035{ 3039{
3036 struct gk20a *g = gk20a_from_vm(vm); 3040 struct gk20a *g = gk20a_from_vm(vm);
3037 u32 addr_lo = u64_lo32(gk20a_mm_iova_addr(vm->pdes.sgt->sgl) >> 12); 3041 u32 addr_lo = u64_lo32(gk20a_mm_iova_addr(vm->mm->g,
3042 vm->pdes.sgt->sgl) >> 12);
3038 u32 data; 3043 u32 data;
3039 s32 retry = 200; 3044 s32 retry = 200;
3040 static DEFINE_MUTEX(tlb_lock); 3045 static DEFINE_MUTEX(tlb_lock);
@@ -3116,6 +3121,11 @@ bool gk20a_mm_mmu_debug_mode_enabled(struct gk20a *g)
3116 fb_mmu_debug_ctrl_debug_enabled_v(); 3121 fb_mmu_debug_ctrl_debug_enabled_v();
3117} 3122}
3118 3123
3124u32 gk20a_mm_get_physical_addr_bits(struct gk20a *g)
3125{
3126 return 34;
3127}
3128
3119void gk20a_init_mm(struct gpu_ops *gops) 3129void gk20a_init_mm(struct gpu_ops *gops)
3120{ 3130{
3121 /* remember to remove NVGPU_GPU_FLAGS_SUPPORT_SPARSE_ALLOCS in 3131 /* remember to remove NVGPU_GPU_FLAGS_SUPPORT_SPARSE_ALLOCS in
@@ -3134,5 +3144,6 @@ void gk20a_init_mm(struct gpu_ops *gops)
3134 gops->mm.l2_invalidate = gk20a_mm_l2_invalidate; 3144 gops->mm.l2_invalidate = gk20a_mm_l2_invalidate;
3135 gops->mm.l2_flush = gk20a_mm_l2_flush; 3145 gops->mm.l2_flush = gk20a_mm_l2_flush;
3136 gops->mm.tlb_invalidate = gk20a_mm_tlb_invalidate; 3146 gops->mm.tlb_invalidate = gk20a_mm_tlb_invalidate;
3147 gops->mm.get_physical_addr_bits = gk20a_mm_get_physical_addr_bits;
3137} 3148}
3138 3149