From 583704620db88e391f6b14acc57af859a70127de Mon Sep 17 00:00:00 2001 From: Alex Waterman Date: Fri, 9 Jun 2017 11:42:50 -0700 Subject: gpu: nvgpu: Implement PD packing In some cases page directories require less than a full page of memory. For example, on Pascal, the final PD level for large pages is only 256 bytes; thus 16 PDs can fit in a single page. To allocate an entire page for each of these 256 B PDs is extremely wasteful. This patch aims to alleviate the wasted DMA memory from having small PDs in a full page by packing multiple small PDs into a single page. The packing is implemented as a slab allocator - each page is a slab and from each page multiple PD instances can be allocated. Several modifications to the nvgpu_gmmu_pd struct also needed to be made to support this. The nvgpu_mem is now a pointer and there's an explicit offset into the nvgpu_mem struct so that each nvgpu_gmmu_pd knows what portion of the memory it's using. The nvgpu_pde_phys_addr() function and the pd_write() functions also require some changes since the PD no longer is always situated at the start of the nvgpu_mem. Initialization and cleanup of the page tables for each VM was slightly modified to work through the new pd_cache implementation. Some PDs (i.e the PDB), despite not being a full page, still require a full page for alignment purposes (HW requirements). Thus a direct allocation method for PDs is still provided. This is also used when a PD that could in principle be cached is greater than a page in size. Lastly a new debug flag was added for the pd_cache code. JIRA NVGPU-30 Change-Id: I64c8037fc356783c1ef203cc143c4d71bbd5d77c Signed-off-by: Alex Waterman Reviewed-on: https://git-master/r/1506610 Reviewed-by: Terje Bergstrom GVS: Gerrit_Virtual_Submit --- drivers/gpu/nvgpu/gp10b/mm_gp10b.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/nvgpu/gp10b/mm_gp10b.c') diff --git a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c index c3867e9d..2ff199c6 100644 --- a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c @@ -164,7 +164,7 @@ static void update_gmmu_pde3_locked(struct vm_gk20a *vm, phys_addr >>= gmmu_new_pde_address_shift_v(); - pde_v[0] |= nvgpu_aperture_mask(g, &pd->mem, + pde_v[0] |= nvgpu_aperture_mask(g, pd->mem, gmmu_new_pde_aperture_sys_mem_ncoh_f(), gmmu_new_pde_aperture_video_memory_f()); pde_v[0] |= gmmu_new_pde_address_sys_f(u64_lo32(phys_addr)); @@ -209,7 +209,7 @@ static void update_gmmu_pde0_locked(struct vm_gk20a *vm, if (small_valid) { pde_v[2] |= gmmu_new_dual_pde_address_small_sys_f(small_addr); - pde_v[2] |= nvgpu_aperture_mask(g, &pd->mem, + pde_v[2] |= nvgpu_aperture_mask(g, pd->mem, gmmu_new_dual_pde_aperture_small_sys_mem_ncoh_f(), gmmu_new_dual_pde_aperture_small_video_memory_f()); pde_v[2] |= gmmu_new_dual_pde_vol_small_true_f(); @@ -219,7 +219,7 @@ static void update_gmmu_pde0_locked(struct vm_gk20a *vm, if (big_valid) { pde_v[0] |= gmmu_new_dual_pde_address_big_sys_f(big_addr); pde_v[0] |= gmmu_new_dual_pde_vol_big_true_f(); - pde_v[0] |= nvgpu_aperture_mask(g, &pd->mem, + pde_v[0] |= nvgpu_aperture_mask(g, pd->mem, gmmu_new_dual_pde_aperture_big_sys_mem_ncoh_f(), gmmu_new_dual_pde_aperture_big_video_memory_f()); pde_v[1] |= big_addr >> 28; @@ -365,14 +365,14 @@ static const struct gk20a_mmu_level *gp10b_mm_get_mmu_levels(struct gk20a *g, static void gp10b_mm_init_pdb(struct gk20a *g, struct nvgpu_mem *inst_block, struct vm_gk20a *vm) { - u64 pdb_addr = nvgpu_mem_get_base_addr(g, &vm->pdb.mem, 0); + u64 pdb_addr = nvgpu_mem_get_base_addr(g, vm->pdb.mem, 0); u32 pdb_addr_lo = u64_lo32(pdb_addr >> ram_in_base_shift_v()); u32 pdb_addr_hi = u64_hi32(pdb_addr); gk20a_dbg_info("pde pa=0x%llx", pdb_addr); nvgpu_mem_wr32(g, inst_block, ram_in_page_dir_base_lo_w(), - nvgpu_aperture_mask(g, &vm->pdb.mem, + nvgpu_aperture_mask(g, vm->pdb.mem, ram_in_page_dir_base_target_sys_mem_ncoh_f(), ram_in_page_dir_base_target_vid_mem_f()) | ram_in_page_dir_base_vol_true_f() | -- cgit v1.2.2