summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/mm_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c53
1 files changed, 9 insertions, 44 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index f4395116..16fe7149 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -1383,7 +1383,7 @@ int nvgpu_vm_map_compbits(struct vm_gk20a *vm,
1383 return -EINVAL; 1383 return -EINVAL;
1384 } 1384 }
1385 1385
1386 *mapping_iova = gk20a_mm_iova_addr(g, mapped_buffer->sgt->sgl, 0); 1386 *mapping_iova = nvgpu_mem_get_addr_sgl(g, mapped_buffer->sgt->sgl);
1387 *compbits_win_gva = mapped_buffer->ctag_map_win_addr; 1387 *compbits_win_gva = mapped_buffer->ctag_map_win_addr;
1388 1388
1389 nvgpu_mutex_release(&vm->update_gmmu_lock); 1389 nvgpu_mutex_release(&vm->update_gmmu_lock);
@@ -1454,30 +1454,6 @@ static int gk20a_gmmu_clear_vidmem_mem(struct gk20a *g, struct nvgpu_mem *mem)
1454} 1454}
1455#endif 1455#endif
1456 1456
1457/*
1458 * If mem is in VIDMEM, return base address in vidmem
1459 * else return IOVA address for SYSMEM
1460 */
1461u64 nvgpu_mem_get_base_addr(struct gk20a *g, struct nvgpu_mem *mem,
1462 u32 flags)
1463{
1464 struct nvgpu_page_alloc *alloc;
1465 u64 addr;
1466
1467 if (mem->aperture == APERTURE_VIDMEM) {
1468 alloc = get_vidmem_page_alloc(mem->priv.sgt->sgl);
1469
1470 /* This API should not be used with > 1 chunks */
1471 WARN_ON(alloc->nr_chunks != 1);
1472
1473 addr = alloc->base;
1474 } else {
1475 addr = g->ops.mm.get_iova_addr(g, mem->priv.sgt->sgl, flags);
1476 }
1477
1478 return addr;
1479}
1480
1481#if defined(CONFIG_GK20A_VIDMEM) 1457#if defined(CONFIG_GK20A_VIDMEM)
1482static struct nvgpu_mem *get_pending_mem_desc(struct mm_gk20a *mm) 1458static struct nvgpu_mem *get_pending_mem_desc(struct mm_gk20a *mm)
1483{ 1459{
@@ -1526,8 +1502,7 @@ dma_addr_t gk20a_mm_gpuva_to_iova_base(struct vm_gk20a *vm, u64 gpu_vaddr)
1526 nvgpu_mutex_acquire(&vm->update_gmmu_lock); 1502 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
1527 buffer = __nvgpu_vm_find_mapped_buf(vm, gpu_vaddr); 1503 buffer = __nvgpu_vm_find_mapped_buf(vm, gpu_vaddr);
1528 if (buffer) 1504 if (buffer)
1529 addr = g->ops.mm.get_iova_addr(g, buffer->sgt->sgl, 1505 addr = nvgpu_mem_get_addr_sgl(g, buffer->sgt->sgl);
1530 buffer->flags);
1531 nvgpu_mutex_release(&vm->update_gmmu_lock); 1506 nvgpu_mutex_release(&vm->update_gmmu_lock);
1532 1507
1533 return addr; 1508 return addr;
@@ -1545,21 +1520,6 @@ u64 gk20a_mm_smmu_vaddr_translate(struct gk20a *g, dma_addr_t iova)
1545 return iova; 1520 return iova;
1546} 1521}
1547 1522
1548u64 gk20a_mm_iova_addr(struct gk20a *g, struct scatterlist *sgl,
1549 u32 flags)
1550{
1551 if (!device_is_iommuable(dev_from_gk20a(g)))
1552 return sg_phys(sgl);
1553
1554 if (sg_dma_address(sgl) == 0)
1555 return sg_phys(sgl);
1556
1557 if (sg_dma_address(sgl) == DMA_ERROR_CODE)
1558 return 0;
1559
1560 return gk20a_mm_smmu_vaddr_translate(g, sg_dma_address(sgl));
1561}
1562
1563/* for gk20a the "video memory" apertures here are misnomers. */ 1523/* for gk20a the "video memory" apertures here are misnomers. */
1564static inline u32 big_valid_pde0_bits(struct gk20a *g, 1524static inline u32 big_valid_pde0_bits(struct gk20a *g,
1565 struct nvgpu_gmmu_pd *pd, u64 addr) 1525 struct nvgpu_gmmu_pd *pd, u64 addr)
@@ -2071,7 +2031,7 @@ u64 gk20a_mm_inst_block_addr(struct gk20a *g, struct nvgpu_mem *inst_block)
2071 if (g->mm.has_physical_mode) 2031 if (g->mm.has_physical_mode)
2072 addr = gk20a_mem_phys(inst_block); 2032 addr = gk20a_mem_phys(inst_block);
2073 else 2033 else
2074 addr = nvgpu_mem_get_base_addr(g, inst_block, 0); 2034 addr = nvgpu_mem_get_addr(g, inst_block);
2075 2035
2076 return addr; 2036 return addr;
2077} 2037}
@@ -2194,7 +2154,7 @@ static int gk20a_init_ce_vm(struct mm_gk20a *mm)
2194void gk20a_mm_init_pdb(struct gk20a *g, struct nvgpu_mem *inst_block, 2154void gk20a_mm_init_pdb(struct gk20a *g, struct nvgpu_mem *inst_block,
2195 struct vm_gk20a *vm) 2155 struct vm_gk20a *vm)
2196{ 2156{
2197 u64 pdb_addr = nvgpu_mem_get_base_addr(g, vm->pdb.mem, 0); 2157 u64 pdb_addr = nvgpu_mem_get_addr(g, vm->pdb.mem);
2198 u32 pdb_addr_lo = u64_lo32(pdb_addr >> ram_in_base_shift_v()); 2158 u32 pdb_addr_lo = u64_lo32(pdb_addr >> ram_in_base_shift_v());
2199 u32 pdb_addr_hi = u64_hi32(pdb_addr); 2159 u32 pdb_addr_hi = u64_hi32(pdb_addr);
2200 2160
@@ -2465,6 +2425,11 @@ u32 gk20a_mm_get_physical_addr_bits(struct gk20a *g)
2465 return 34; 2425 return 34;
2466} 2426}
2467 2427
2428u64 gk20a_mm_gpu_phys_addr(struct gk20a *g, u64 phys, u32 flags)
2429{
2430 return phys;
2431}
2432
2468const struct gk20a_mmu_level *gk20a_mm_get_mmu_levels(struct gk20a *g, 2433const struct gk20a_mmu_level *gk20a_mm_get_mmu_levels(struct gk20a *g,
2469 u32 big_page_size) 2434 u32 big_page_size)
2470{ 2435{