summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gm20b/mm_gm20b.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2014-10-03 00:32:19 -0400
committerDan Willemsen <dwillemsen@nvidia.com>2015-03-18 15:11:46 -0400
commitecc6f27fd13e7560d124faf67d114b93d47b73de (patch)
tree4d0d065b630976db87f21ee99a63f1477499fbd5 /drivers/gpu/nvgpu/gm20b/mm_gm20b.c
parent5200902f57d0223e30dfce548355b5fe06a25203 (diff)
gpu: nvgpu: Common VM initializer
Merge initialization code from gk20a_init_system_vm(), gk20a_init_bar1_vm() and gk20a_vm_alloc_share() into gk20a_init_vm(). Remove redundant page size data, and move the page size fields to be VM specific. Bug 1558739 Bug 1560370 Change-Id: I4557d9e04d65ccb48fe1f2b116dd1bfa74cae98e Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gm20b/mm_gm20b.c')
-rw-r--r--drivers/gpu/nvgpu/gm20b/mm_gm20b.c20
1 files changed, 7 insertions, 13 deletions
diff --git a/drivers/gpu/nvgpu/gm20b/mm_gm20b.c b/drivers/gpu/nvgpu/gm20b/mm_gm20b.c
index 278ae9a6..b4622c0b 100644
--- a/drivers/gpu/nvgpu/gm20b/mm_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/mm_gm20b.c
@@ -20,12 +20,6 @@
20#include "hw_fb_gm20b.h" 20#include "hw_fb_gm20b.h"
21#include "hw_gr_gm20b.h" 21#include "hw_gr_gm20b.h"
22 22
23static const u32 gmmu_page_sizes[gmmu_nr_page_sizes] = { SZ_4K, SZ_128K };
24static const u32 gmmu_page_shifts[gmmu_nr_page_sizes] = { 12, 17 };
25static const u64 gmmu_page_offset_masks[gmmu_nr_page_sizes] = { 0xfffLL,
26 0x1ffffLL };
27static const u64 gmmu_page_masks[gmmu_nr_page_sizes] = { ~0xfffLL, ~0x1ffffLL };
28
29static int allocate_gmmu_ptes_sparse(struct vm_gk20a *vm, 23static int allocate_gmmu_ptes_sparse(struct vm_gk20a *vm,
30 enum gmmu_pgsz_gk20a pgsz_idx, 24 enum gmmu_pgsz_gk20a pgsz_idx,
31 u64 first_vaddr, u64 last_vaddr, 25 u64 first_vaddr, u64 last_vaddr,
@@ -97,9 +91,9 @@ static bool gm20b_vm_is_pde_in_range(struct vm_gk20a *vm, u64 vaddr_lo,
97 91
98 gk20a_dbg_fn(""); 92 gk20a_dbg_fn("");
99 93
100 pde_vaddr_lo = (u64)pde << vm->mm->pde_stride_shift; 94 pde_vaddr_lo = (u64)pde << vm->pde_stride_shift;
101 pde_vaddr_hi = pde_vaddr_lo | 95 pde_vaddr_hi = pde_vaddr_lo |
102 ((0x1UL << (vm->mm->pde_stride_shift)) - 1); 96 ((0x1UL << (vm->pde_stride_shift)) - 1);
103 97
104 return ((vaddr_lo <= pde_vaddr_lo) && (vaddr_hi) >= pde_vaddr_hi); 98 return ((vaddr_lo <= pde_vaddr_lo) && (vaddr_hi) >= pde_vaddr_hi);
105} 99}
@@ -108,8 +102,8 @@ static int gm20b_vm_put_sparse(struct vm_gk20a *vm, u64 vaddr,
108 u32 num_pages, u32 pgsz_idx, bool refplus) 102 u32 num_pages, u32 pgsz_idx, bool refplus)
109{ 103{
110 struct mm_gk20a *mm = vm->mm; 104 struct mm_gk20a *mm = vm->mm;
111 u32 pgsz = gmmu_page_sizes[pgsz_idx]; 105 u32 pgsz = vm->gmmu_page_sizes[pgsz_idx];
112 u32 pde_shift = vm->mm->pde_stride_shift; 106 u32 pde_shift = vm->pde_stride_shift;
113 u64 vaddr_hi; 107 u64 vaddr_hi;
114 u64 vaddr_pde_start; 108 u64 vaddr_pde_start;
115 u32 i; 109 u32 i;
@@ -127,7 +121,7 @@ static int gm20b_vm_put_sparse(struct vm_gk20a *vm, u64 vaddr,
127 gk20a_dbg_info("vaddr: 0x%llx, vaddr_hi: 0x%llx, pde_lo: 0x%x, " 121 gk20a_dbg_info("vaddr: 0x%llx, vaddr_hi: 0x%llx, pde_lo: 0x%x, "
128 "pde_hi: 0x%x, pgsz: %d, pde_stride_shift: %d", 122 "pde_hi: 0x%x, pgsz: %d, pde_stride_shift: %d",
129 vaddr, vaddr_hi, pde_lo, pde_hi, pgsz, 123 vaddr, vaddr_hi, pde_lo, pde_hi, pgsz,
130 vm->mm->pde_stride_shift); 124 vm->pde_stride_shift);
131 125
132 for (i = pde_lo; i <= pde_hi; i++) { 126 for (i = pde_lo; i <= pde_hi; i++) {
133 /* Mark all ptes as sparse. */ 127 /* Mark all ptes as sparse. */
@@ -240,7 +234,7 @@ void gm20b_vm_clear_sparse(struct vm_gk20a *vm, u64 vaddr,
240 gk20a_dbg_info("vaddr: 0x%llx, vaddr_hi: 0x%llx, pde_lo: 0x%x, " 234 gk20a_dbg_info("vaddr: 0x%llx, vaddr_hi: 0x%llx, pde_lo: 0x%x, "
241 "pde_hi: 0x%x, pgsz_idx: %d, pde_stride_shift: %d", 235 "pde_hi: 0x%x, pgsz_idx: %d, pde_stride_shift: %d",
242 vaddr, vaddr_hi, pde_lo, pde_hi, pgsz_idx, 236 vaddr, vaddr_hi, pde_lo, pde_hi, pgsz_idx,
243 vm->mm->pde_stride_shift); 237 vm->pde_stride_shift);
244 238
245 for (pde_i = pde_lo; pde_i <= pde_hi; pde_i++) { 239 for (pde_i = pde_lo; pde_i <= pde_hi; pde_i++) {
246 struct page_table_gk20a *pte = vm->pdes.ptes[pgsz_idx] + pde_i; 240 struct page_table_gk20a *pte = vm->pdes.ptes[pgsz_idx] + pde_i;
@@ -248,7 +242,7 @@ void gm20b_vm_clear_sparse(struct vm_gk20a *vm, u64 vaddr,
248 242
249 if (pte->ref_cnt == 0) { 243 if (pte->ref_cnt == 0) {
250 free_gmmu_pages(vm, pte->ref, pte->sgt, 244 free_gmmu_pages(vm, pte->ref, pte->sgt,
251 vm->mm->page_table_sizing[pgsz_idx].order, 245 vm->page_table_sizing[pgsz_idx].order,
252 pte->size); 246 pte->size);
253 pte->ref = NULL; 247 pte->ref = NULL;
254 update_gmmu_pde_locked(vm, pde_i); 248 update_gmmu_pde_locked(vm, pde_i);