summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorKonsta Holtta <kholtta@nvidia.com>2018-06-26 05:40:05 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-06-28 14:13:46 -0400
commit5c9bb32adba32c5cd3b0466b50206aa1b7dd7d2c (patch)
tree5376b5bd59a4acfcb7c676c9cf48c20e93f4ff16 /drivers
parentd8833c6da35c99d73f817e614f327e5617788860 (diff)
gpu: nvgpu: remove {map,unmap}_gmmu_pages()
The GPU page tables are always mapped to the CPU now, so they don't need the nvgpu_mem_{begin,end}() calls. Change-Id: Ic48eeed3a6f002c78f89ef07922cddf835337de3 Signed-off-by: Konsta Holtta <kholtta@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1761579 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Alex Waterman <alexw@nvidia.com> Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/nvgpu/common/mm/gmmu.c40
-rw-r--r--drivers/gpu/nvgpu/gp10b/mm_gp10b.c4
2 files changed, 0 insertions, 44 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/gmmu.c b/drivers/gpu/nvgpu/common/mm/gmmu.c
index 1e4065ba..78b30850 100644
--- a/drivers/gpu/nvgpu/common/mm/gmmu.c
+++ b/drivers/gpu/nvgpu/common/mm/gmmu.c
@@ -209,25 +209,6 @@ int nvgpu_gmmu_init_page_table(struct vm_gk20a *vm)
209} 209}
210 210
211/* 211/*
212 * Ensure that there's a CPU mapping for the page directory memory. This won't
213 * always be the case for 32 bit systems since we may need to save kernel
214 * virtual memory.
215 */
216static int map_gmmu_pages(struct gk20a *g, struct nvgpu_gmmu_pd *pd)
217{
218 return nvgpu_mem_begin(g, pd->mem);
219}
220
221/*
222 * Handle any necessary CPU unmap semantics for a page directories DMA memory.
223 * For 64 bit platforms this is a noop.
224 */
225static void unmap_gmmu_pages(struct gk20a *g, struct nvgpu_gmmu_pd *pd)
226{
227 nvgpu_mem_end(g, pd->mem);
228}
229
230/*
231 * Return the _physical_ address of a page directory. 212 * Return the _physical_ address of a page directory.
232 */ 213 */
233static u64 nvgpu_pde_phys_addr(struct gk20a *g, struct nvgpu_gmmu_pd *pd) 214static u64 nvgpu_pde_phys_addr(struct gk20a *g, struct nvgpu_gmmu_pd *pd)
@@ -451,21 +432,12 @@ static int __set_pd_level(struct vm_gk20a *vm,
451 attrs); 432 attrs);
452 433
453 if (next_l->update_entry) { 434 if (next_l->update_entry) {
454 err = map_gmmu_pages(g, next_pd);
455 if (err) {
456 nvgpu_err(g,
457 "couldn't map ptes for update as=%d",
458 vm_aspace_id(vm));
459 return err;
460 }
461
462 err = __set_pd_level(vm, next_pd, 435 err = __set_pd_level(vm, next_pd,
463 lvl + 1, 436 lvl + 1,
464 phys_addr, 437 phys_addr,
465 virt_addr, 438 virt_addr,
466 chunk_size, 439 chunk_size,
467 attrs); 440 attrs);
468 unmap_gmmu_pages(g, next_pd);
469 441
470 if (err) 442 if (err)
471 return err; 443 return err;
@@ -634,13 +606,6 @@ static int __nvgpu_gmmu_update_page_table(struct vm_gk20a *vm,
634 */ 606 */
635 length = nvgpu_align_map_length(vm, length, attrs); 607 length = nvgpu_align_map_length(vm, length, attrs);
636 608
637 err = map_gmmu_pages(g, &vm->pdb);
638 if (err) {
639 nvgpu_err(g, "couldn't map ptes for update as=%d",
640 vm_aspace_id(vm));
641 return err;
642 }
643
644 __gmmu_dbg(g, attrs, 609 __gmmu_dbg(g, attrs,
645 "vm=%s " 610 "vm=%s "
646 "%-5s GPU virt %#-12llx +%#-9llx phys %#-12llx " 611 "%-5s GPU virt %#-12llx +%#-9llx phys %#-12llx "
@@ -669,7 +634,6 @@ static int __nvgpu_gmmu_update_page_table(struct vm_gk20a *vm,
669 length, 634 length,
670 attrs); 635 attrs);
671 636
672 unmap_gmmu_pages(g, &vm->pdb);
673 nvgpu_mb(); 637 nvgpu_mb();
674 638
675 __gmmu_dbg(g, attrs, "%-5s Done!", sgt ? "MAP" : "UNMAP"); 639 __gmmu_dbg(g, attrs, "%-5s Done!", sgt ? "MAP" : "UNMAP");
@@ -897,10 +861,8 @@ static int __nvgpu_locate_pte(struct gk20a *g, struct vm_gk20a *vm,
897 pte_size = (u32)(l->entry_size / sizeof(u32)); 861 pte_size = (u32)(l->entry_size / sizeof(u32));
898 862
899 if (data) { 863 if (data) {
900 map_gmmu_pages(g, pd);
901 for (i = 0; i < pte_size; i++) 864 for (i = 0; i < pte_size; i++)
902 data[i] = nvgpu_mem_rd32(g, pd->mem, pte_base + i); 865 data[i] = nvgpu_mem_rd32(g, pd->mem, pte_base + i);
903 unmap_gmmu_pages(g, pd);
904 } 866 }
905 867
906 if (pd_out) 868 if (pd_out)
@@ -944,13 +906,11 @@ int __nvgpu_set_pte(struct gk20a *g, struct vm_gk20a *vm, u64 vaddr, u32 *pte)
944 906
945 pte_size = __nvgpu_pte_words(g); 907 pte_size = __nvgpu_pte_words(g);
946 908
947 map_gmmu_pages(g, pd);
948 for (i = 0; i < pte_size; i++) { 909 for (i = 0; i < pte_size; i++) {
949 pd_write(g, pd, pd_offs + i, pte[i]); 910 pd_write(g, pd, pd_offs + i, pte[i]);
950 pte_dbg(g, attrs_ptr, 911 pte_dbg(g, attrs_ptr,
951 "PTE: idx=%-4u (%d) 0x%08x", pd_idx, i, pte[i]); 912 "PTE: idx=%-4u (%d) 0x%08x", pd_idx, i, pte[i]);
952 } 913 }
953 unmap_gmmu_pages(g, pd);
954 914
955 /* 915 /*
956 * Ensures the pd_write()s are done. The pd_write() does not do this 916 * Ensures the pd_write()s are done. The pd_write() does not do this
diff --git a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
index 54772d42..e313b928 100644
--- a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
@@ -288,12 +288,8 @@ static enum gmmu_pgsz_gk20a gp10b_get_pde0_pgsz(struct gk20a *g,
288 if (!pd->mem) 288 if (!pd->mem)
289 return pgsz; 289 return pgsz;
290 290
291 if (nvgpu_mem_begin(g, pd->mem))
292 return pgsz;
293
294 for (i = 0; i < GP10B_PDE0_ENTRY_SIZE >> 2; i++) 291 for (i = 0; i < GP10B_PDE0_ENTRY_SIZE >> 2; i++)
295 pde_v[i] = nvgpu_mem_rd32(g, pd->mem, pde_offset + i); 292 pde_v[i] = nvgpu_mem_rd32(g, pd->mem, pde_offset + i);
296 nvgpu_mem_end(g, pd->mem);
297 293
298 /* 294 /*
299 * Check if the aperture AND address are set 295 * Check if the aperture AND address are set