summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gm20b/mm_gm20b.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2014-12-12 06:52:28 -0500
committerDan Willemsen <dwillemsen@nvidia.com>2015-03-18 15:12:32 -0400
commit0bc513fc4618feb5262079b4ef8842ba419d8111 (patch)
treef1741f426400c9cdaadb46113a62b0a064e38811 /drivers/gpu/nvgpu/gm20b/mm_gm20b.c
parente462c6a7ad0ce05775bc15c58963df4a1a5606e8 (diff)
gpu: nvgpu: Remove gk20a sparse texture & PTE freeing
Remove support for gk20a sparse textures. We're using implementation from user space, so gk20a code is never invoked. Also removes ref_cnt for PTEs, so we never free PTEs when unmapping pages, but only at VM delete time. Change-Id: I04d7d43d9bff23ee46fd0570ad189faece35dd14 Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/663294
Diffstat (limited to 'drivers/gpu/nvgpu/gm20b/mm_gm20b.c')
-rw-r--r--drivers/gpu/nvgpu/gm20b/mm_gm20b.c40
1 files changed, 2 insertions, 38 deletions
diff --git a/drivers/gpu/nvgpu/gm20b/mm_gm20b.c b/drivers/gpu/nvgpu/gm20b/mm_gm20b.c
index cd40132f..5b1a9a04 100644
--- a/drivers/gpu/nvgpu/gm20b/mm_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/mm_gm20b.c
@@ -47,8 +47,6 @@ static int allocate_gmmu_ptes_sparse(struct vm_gk20a *vm,
47 BUG_ON(pde_lo != pde_hi); 47 BUG_ON(pde_lo != pde_hi);
48 48
49 pte = vm->pdes.ptes[pgsz_idx] + pde_lo; 49 pte = vm->pdes.ptes[pgsz_idx] + pde_lo;
50 if (refplus)
51 pte->ref_cnt++;
52 50
53 pte_lo = pte_index_from_vaddr(vm, first_vaddr, pgsz_idx); 51 pte_lo = pte_index_from_vaddr(vm, first_vaddr, pgsz_idx);
54 pte_hi = pte_index_from_vaddr(vm, last_vaddr, pgsz_idx); 52 pte_hi = pte_index_from_vaddr(vm, last_vaddr, pgsz_idx);
@@ -64,10 +62,10 @@ static int allocate_gmmu_ptes_sparse(struct vm_gk20a *vm,
64 pte_w[1] = clear ? 0 : gmmu_pte_vol_true_f(); 62 pte_w[1] = clear ? 0 : gmmu_pte_vol_true_f();
65 63
66 gk20a_dbg(gpu_dbg_pte, 64 gk20a_dbg(gpu_dbg_pte,
67 "pte_cur=%d addr=%llx refs=%d" 65 "pte_cur=%d addr=%llx"
68 " [0x%08x,0x%08x]", 66 " [0x%08x,0x%08x]",
69 pte_cur, addr, 67 pte_cur, addr,
70 pte->ref_cnt, pte_w[1], pte_w[0]); 68 pte_w[1], pte_w[0]);
71 69
72 gk20a_mem_wr32(pte_kv_cur + pte_cur*8, 0, pte_w[0]); 70 gk20a_mem_wr32(pte_kv_cur + pte_cur*8, 0, pte_w[0]);
73 gk20a_mem_wr32(pte_kv_cur + pte_cur*8, 1, pte_w[1]); 71 gk20a_mem_wr32(pte_kv_cur + pte_cur*8, 1, pte_w[1]);
@@ -220,39 +218,6 @@ fail:
220 return ret; 218 return ret;
221} 219}
222 220
223static void gm20b_vm_clear_sparse(struct vm_gk20a *vm, u64 vaddr,
224 u64 size, u32 pgsz_idx) {
225 u64 vaddr_hi;
226 u32 pde_lo, pde_hi, pde_i;
227
228 gk20a_dbg_fn("");
229 vaddr_hi = vaddr + size - 1;
230 pde_range_from_vaddr_range(vm,
231 vaddr,
232 vaddr_hi,
233 &pde_lo, &pde_hi);
234
235 gk20a_dbg_info("vaddr: 0x%llx, vaddr_hi: 0x%llx, pde_lo: 0x%x, "
236 "pde_hi: 0x%x, pgsz_idx: %d, pde_stride_shift: %d",
237 vaddr, vaddr_hi, pde_lo, pde_hi, pgsz_idx,
238 vm->pde_stride_shift);
239
240 for (pde_i = pde_lo; pde_i <= pde_hi; pde_i++) {
241 struct page_table_gk20a *pte = vm->pdes.ptes[pgsz_idx] + pde_i;
242 pte->ref_cnt--;
243
244 if (pte->ref_cnt == 0) {
245 free_gmmu_pages(vm, pte->ref, pte->sgt,
246 vm->page_table_sizing[pgsz_idx].order,
247 pte->size);
248 pte->ref = NULL;
249 update_gmmu_pde_locked(vm, pde_i);
250 }
251 }
252
253 return;
254}
255
256static bool gm20b_mm_mmu_debug_mode_enabled(struct gk20a *g) 221static bool gm20b_mm_mmu_debug_mode_enabled(struct gk20a *g)
257{ 222{
258 u32 debug_ctrl = gk20a_readl(g, gr_gpcs_pri_mmu_debug_ctrl_r()); 223 u32 debug_ctrl = gk20a_readl(g, gr_gpcs_pri_mmu_debug_ctrl_r());
@@ -288,7 +253,6 @@ static u32 gm20b_mm_get_big_page_sizes(void)
288void gm20b_init_mm(struct gpu_ops *gops) 253void gm20b_init_mm(struct gpu_ops *gops)
289{ 254{
290 gops->mm.set_sparse = gm20b_vm_put_sparse; 255 gops->mm.set_sparse = gm20b_vm_put_sparse;
291 gops->mm.clear_sparse = gm20b_vm_clear_sparse;
292 gops->mm.is_debug_mode_enabled = gm20b_mm_mmu_debug_mode_enabled; 256 gops->mm.is_debug_mode_enabled = gm20b_mm_mmu_debug_mode_enabled;
293 gops->mm.gmmu_map = gk20a_locked_gmmu_map; 257 gops->mm.gmmu_map = gk20a_locked_gmmu_map;
294 gops->mm.gmmu_unmap = gk20a_locked_gmmu_unmap; 258 gops->mm.gmmu_unmap = gk20a_locked_gmmu_unmap;