From 5c9bb32adba32c5cd3b0466b50206aa1b7dd7d2c Mon Sep 17 00:00:00 2001 From: Konsta Holtta Date: Tue, 26 Jun 2018 12:40:05 +0300 Subject: gpu: nvgpu: remove {map,unmap}_gmmu_pages() The GPU page tables are always mapped to the CPU now, so they don't need the nvgpu_mem_{begin,end}() calls. Change-Id: Ic48eeed3a6f002c78f89ef07922cddf835337de3 Signed-off-by: Konsta Holtta Reviewed-on: https://git-master.nvidia.com/r/1761579 Reviewed-by: svc-mobile-coverity GVS: Gerrit_Virtual_Submit Reviewed-by: Alex Waterman Reviewed-by: Terje Bergstrom Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/common/mm/gmmu.c | 40 -------------------------------------- drivers/gpu/nvgpu/gp10b/mm_gp10b.c | 4 ---- 2 files changed, 44 deletions(-) diff --git a/drivers/gpu/nvgpu/common/mm/gmmu.c b/drivers/gpu/nvgpu/common/mm/gmmu.c index 1e4065ba..78b30850 100644 --- a/drivers/gpu/nvgpu/common/mm/gmmu.c +++ b/drivers/gpu/nvgpu/common/mm/gmmu.c @@ -208,25 +208,6 @@ int nvgpu_gmmu_init_page_table(struct vm_gk20a *vm) return 0; } -/* - * Ensure that there's a CPU mapping for the page directory memory. This won't - * always be the case for 32 bit systems since we may need to save kernel - * virtual memory. - */ -static int map_gmmu_pages(struct gk20a *g, struct nvgpu_gmmu_pd *pd) -{ - return nvgpu_mem_begin(g, pd->mem); -} - -/* - * Handle any necessary CPU unmap semantics for a page directories DMA memory. - * For 64 bit platforms this is a noop. - */ -static void unmap_gmmu_pages(struct gk20a *g, struct nvgpu_gmmu_pd *pd) -{ - nvgpu_mem_end(g, pd->mem); -} - /* * Return the _physical_ address of a page directory. */ @@ -451,21 +432,12 @@ static int __set_pd_level(struct vm_gk20a *vm, attrs); if (next_l->update_entry) { - err = map_gmmu_pages(g, next_pd); - if (err) { - nvgpu_err(g, - "couldn't map ptes for update as=%d", - vm_aspace_id(vm)); - return err; - } - err = __set_pd_level(vm, next_pd, lvl + 1, phys_addr, virt_addr, chunk_size, attrs); - unmap_gmmu_pages(g, next_pd); if (err) return err; @@ -634,13 +606,6 @@ static int __nvgpu_gmmu_update_page_table(struct vm_gk20a *vm, */ length = nvgpu_align_map_length(vm, length, attrs); - err = map_gmmu_pages(g, &vm->pdb); - if (err) { - nvgpu_err(g, "couldn't map ptes for update as=%d", - vm_aspace_id(vm)); - return err; - } - __gmmu_dbg(g, attrs, "vm=%s " "%-5s GPU virt %#-12llx +%#-9llx phys %#-12llx " @@ -669,7 +634,6 @@ static int __nvgpu_gmmu_update_page_table(struct vm_gk20a *vm, length, attrs); - unmap_gmmu_pages(g, &vm->pdb); nvgpu_mb(); __gmmu_dbg(g, attrs, "%-5s Done!", sgt ? "MAP" : "UNMAP"); @@ -897,10 +861,8 @@ static int __nvgpu_locate_pte(struct gk20a *g, struct vm_gk20a *vm, pte_size = (u32)(l->entry_size / sizeof(u32)); if (data) { - map_gmmu_pages(g, pd); for (i = 0; i < pte_size; i++) data[i] = nvgpu_mem_rd32(g, pd->mem, pte_base + i); - unmap_gmmu_pages(g, pd); } if (pd_out) @@ -944,13 +906,11 @@ int __nvgpu_set_pte(struct gk20a *g, struct vm_gk20a *vm, u64 vaddr, u32 *pte) pte_size = __nvgpu_pte_words(g); - map_gmmu_pages(g, pd); for (i = 0; i < pte_size; i++) { pd_write(g, pd, pd_offs + i, pte[i]); pte_dbg(g, attrs_ptr, "PTE: idx=%-4u (%d) 0x%08x", pd_idx, i, pte[i]); } - unmap_gmmu_pages(g, pd); /* * Ensures the pd_write()s are done. The pd_write() does not do this diff --git a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c index 54772d42..e313b928 100644 --- a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c @@ -288,12 +288,8 @@ static enum gmmu_pgsz_gk20a gp10b_get_pde0_pgsz(struct gk20a *g, if (!pd->mem) return pgsz; - if (nvgpu_mem_begin(g, pd->mem)) - return pgsz; - for (i = 0; i < GP10B_PDE0_ENTRY_SIZE >> 2; i++) pde_v[i] = nvgpu_mem_rd32(g, pd->mem, pde_offset + i); - nvgpu_mem_end(g, pd->mem); /* * Check if the aperture AND address are set -- cgit v1.2.2