summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gm20b/mm_gm20b.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2015-02-09 23:54:01 -0500
committerDan Willemsen <dwillemsen@nvidia.com>2015-04-04 21:07:35 -0400
commitf9fd5bbabe0d188a06d25bacdb18b91ef65a147d (patch)
treeecf651164e5fbdbba48eec53291f2cef9ac715e7 /drivers/gpu/nvgpu/gm20b/mm_gm20b.c
parent9bf82585aa24b6052855c119855abef92671d502 (diff)
gpu: nvgpu: Unify PDE & PTE structs
Introduce a new struct gk20a_mm_entry. Allocate and store PDE and PTE arrays using the same structure. Always pass pointer to this struct when possible between functions in memory code. Change-Id: Ia4a2a6abdac9ab7ba522dafbf73fc3a3d5355c5f Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/696414
Diffstat (limited to 'drivers/gpu/nvgpu/gm20b/mm_gm20b.c')
-rw-r--r--drivers/gpu/nvgpu/gm20b/mm_gm20b.c13
1 files changed, 6 insertions, 7 deletions
diff --git a/drivers/gpu/nvgpu/gm20b/mm_gm20b.c b/drivers/gpu/nvgpu/gm20b/mm_gm20b.c
index 605464d4..37ab70fa 100644
--- a/drivers/gpu/nvgpu/gm20b/mm_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/mm_gm20b.c
@@ -32,8 +32,7 @@ static int allocate_gmmu_ptes_sparse(struct vm_gk20a *vm,
32 u32 pte_w[2] = {0, 0}; /* invalid pte */ 32 u32 pte_w[2] = {0, 0}; /* invalid pte */
33 u64 addr = 0; 33 u64 addr = 0;
34 u32 pte_cur; 34 u32 pte_cur;
35 void *pte_kv_cur; 35 struct gk20a_mm_entry *entry;
36 struct page_table_gk20a *pte;
37 struct gk20a *g = gk20a_from_vm(vm); 36 struct gk20a *g = gk20a_from_vm(vm);
38 37
39 gk20a_dbg_fn(""); 38 gk20a_dbg_fn("");
@@ -47,13 +46,13 @@ static int allocate_gmmu_ptes_sparse(struct vm_gk20a *vm,
47 /* Expect ptes of the same pde */ 46 /* Expect ptes of the same pde */
48 BUG_ON(pde_lo != pde_hi); 47 BUG_ON(pde_lo != pde_hi);
49 48
50 pte = vm->pdes.ptes[pgsz_idx] + pde_lo; 49 entry = vm->pdb.entries + pde_lo;
51 50
52 pte_lo = pte_index_from_vaddr(vm, first_vaddr, pgsz_idx); 51 pte_lo = pte_index_from_vaddr(vm, first_vaddr, pgsz_idx);
53 pte_hi = pte_index_from_vaddr(vm, last_vaddr, pgsz_idx); 52 pte_hi = pte_index_from_vaddr(vm, last_vaddr, pgsz_idx);
54 53
55 /* get cpu access to the ptes */ 54 /* get cpu access to the ptes */
56 err = map_gmmu_pages(pte->ref, pte->sgt, &pte_kv_cur, pte->size); 55 err = map_gmmu_pages(entry);
57 if (err) 56 if (err)
58 goto fail; 57 goto fail;
59 58
@@ -68,11 +67,11 @@ static int allocate_gmmu_ptes_sparse(struct vm_gk20a *vm,
68 pte_cur, addr, 67 pte_cur, addr,
69 pte_w[1], pte_w[0]); 68 pte_w[1], pte_w[0]);
70 69
71 gk20a_mem_wr32(pte_kv_cur + pte_cur*8, 0, pte_w[0]); 70 gk20a_mem_wr32(entry->cpu_va + pte_cur*8, 0, pte_w[0]);
72 gk20a_mem_wr32(pte_kv_cur + pte_cur*8, 1, pte_w[1]); 71 gk20a_mem_wr32(entry->cpu_va + pte_cur*8, 1, pte_w[1]);
73 } 72 }
74 73
75 unmap_gmmu_pages(pte->ref, pte->sgt, pte_kv_cur); 74 unmap_gmmu_pages(entry);
76 75
77 smp_mb(); 76 smp_mb();
78 g->ops.mm.tlb_invalidate(vm); 77 g->ops.mm.tlb_invalidate(vm);