From 62e80a189cfa0b6dbb9e27712a1c782e953c32f4 Mon Sep 17 00:00:00 2001 From: Kevin Huang Date: Tue, 29 Jul 2014 15:56:17 -0700 Subject: gpu: nvgpu: clear sparse in space free Gk20a unmaps the addresses binding to dummy page to clear sparse. On Gm20b, we need to free the allocated page table entry for sparse memory. Bug 1538384 Change-Id: Ie2409ab016c29f42c5f7d97dd7287b093b47f9df Signed-off-by: Kevin Huang Reviewed-on: http://git-master/r/448645 Reviewed-by: Terje Bergstrom Tested-by: Terje Bergstrom --- drivers/gpu/nvgpu/gk20a/gk20a.h | 4 +++ drivers/gpu/nvgpu/gk20a/mm_gk20a.c | 52 ++++++++++++++++++++--------- drivers/gpu/nvgpu/gk20a/mm_gk20a.h | 6 ++++ drivers/gpu/nvgpu/gm20b/mm_gm20b.c | 68 +++++++++++++++++++++++++++++++++----- 4 files changed, 106 insertions(+), 24 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h index edf1d548..b5ef3f0d 100644 --- a/drivers/gpu/nvgpu/gk20a/gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/gk20a.h @@ -234,7 +234,11 @@ struct gpu_ops { } gr_ctx; struct { int (*set_sparse)(struct vm_gk20a *vm, u64 vaddr, + u32 num_pages, u32 pgsz_idx, bool refplus); + int (*put_empty)(struct vm_gk20a *vm, u64 vaddr, u32 num_pages, u32 pgsz_idx); + void (*clear_sparse)(struct vm_gk20a *vm, u64 vaddr, + u64 size, u32 pgsz_idx); } mm; struct { int (*prepare_ucode)(struct gk20a *g); diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c index 57c61d51..93a29b13 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c @@ -100,7 +100,6 @@ static int update_gmmu_ptes_locked(struct vm_gk20a *vm, u64 first_vaddr, u64 last_vaddr, u8 kind_v, u32 ctag_offset, bool cacheable, int rw_flag); -static void update_gmmu_pde_locked(struct vm_gk20a *vm, u32 i); static void gk20a_vm_remove_support(struct vm_gk20a *vm); static int gk20a_init_system_vm(struct mm_gk20a *mm); static int gk20a_init_bar1_vm(struct mm_gk20a *mm); @@ -444,7 +443,7 @@ err_out: return -ENOMEM; } -static void free_gmmu_pages(struct vm_gk20a *vm, void *handle, +void free_gmmu_pages(struct vm_gk20a *vm, void *handle, struct sg_table *sgt, u32 order, size_t size) { @@ -534,7 +533,7 @@ err_out: return -ENOMEM; } -static void free_gmmu_pages(struct vm_gk20a *vm, void *handle, +void free_gmmu_pages(struct vm_gk20a *vm, void *handle, struct sg_table *sgt, u32 order, size_t size) { @@ -1865,7 +1864,7 @@ static inline u32 small_valid_pde1_bits(u64 pte_addr) made. So, superfluous updates will cause unnecessary pde invalidations. */ -static void update_gmmu_pde_locked(struct vm_gk20a *vm, u32 i) +void update_gmmu_pde_locked(struct vm_gk20a *vm, u32 i) { bool small_valid, big_valid; u64 pte_addr[2] = {0, 0}; @@ -1882,6 +1881,7 @@ static void update_gmmu_pde_locked(struct vm_gk20a *vm, u32 i) if (small_valid) pte_addr[gmmu_page_size_small] = gk20a_mm_iova_addr(small_pte->sgt->sgl); + if (big_valid) pte_addr[gmmu_page_size_big] = gk20a_mm_iova_addr(big_pte->sgt->sgl); @@ -1920,7 +1920,6 @@ static void update_gmmu_pde_locked(struct vm_gk20a *vm, u32 i) vm->tlb_dirty = true; } - static int gk20a_vm_put_empty(struct vm_gk20a *vm, u64 vaddr, u32 num_pages, u32 pgsz_idx) { @@ -1986,6 +1985,18 @@ err_unmap: return -EINVAL; } +static int gk20a_vm_put_sparse(struct vm_gk20a *vm, u64 vaddr, + u32 num_pages, u32 pgsz_idx, bool refplus) +{ + return gk20a_vm_put_empty(vm, vaddr, num_pages, pgsz_idx); +} + +void gk20a_vm_clear_sparse(struct vm_gk20a *vm, u64 vaddr, + u64 size, u32 pgsz_idx) { + __locked_gmmu_unmap(vm, vaddr, size, pgsz_idx, + false, gk20a_mem_flag_none); +} + /* NOTE! mapped_buffers lock must be held */ static void gk20a_vm_unmap_locked(struct mapped_buffer_node *mapped_buffer) { @@ -2000,8 +2011,18 @@ static void gk20a_vm_unmap_locked(struct mapped_buffer_node *mapped_buffer) gmmu_page_shifts[pgsz_idx]; /* there is little we can do if this fails... */ - g->ops.mm.set_sparse(vm, vaddr, num_pages, pgsz_idx); - + if (g->ops.mm.put_empty) { + g->ops.mm.put_empty(vm, vaddr, num_pages, pgsz_idx); + } else { + __locked_gmmu_unmap(vm, + mapped_buffer->addr, + mapped_buffer->size, + mapped_buffer->pgsz_idx, + mapped_buffer->va_allocated, + gk20a_mem_flag_none); + g->ops.mm.set_sparse(vm, vaddr, + num_pages, pgsz_idx, false); + } } else __locked_gmmu_unmap(vm, mapped_buffer->addr, @@ -2328,7 +2349,7 @@ int gk20a_vm_alloc_space(struct gk20a_as_share *as_share, /* mark that we need to use sparse mappings here */ if (args->flags & NVHOST_AS_ALLOC_SPACE_FLAGS_SPARSE) { err = g->ops.mm.set_sparse(vm, vaddr_start, args->pages, - pgsz_idx); + pgsz_idx, true); if (err) { mutex_unlock(&vm->update_gmmu_lock); vma->free(vma, start_page_nr, args->pages); @@ -2357,6 +2378,7 @@ int gk20a_vm_free_space(struct gk20a_as_share *as_share, struct gk20a_allocator *vma; struct vm_gk20a *vm = as_share->vm; struct vm_reserved_va_node *va_node; + struct gk20a *g = gk20a_from_vm(vm); gk20a_dbg_fn("pgsz=0x%x nr_pages=0x%x o/a=0x%llx", args->page_size, args->pages, args->offset); @@ -2400,12 +2422,10 @@ int gk20a_vm_free_space(struct gk20a_as_share *as_share, /* if this was a sparse mapping, free the va */ if (va_node->sparse) - __locked_gmmu_unmap(vm, - va_node->vaddr_start, - va_node->size, - va_node->pgsz_idx, - false, - gk20a_mem_flag_none); + g->ops.mm.clear_sparse(vm, + va_node->vaddr_start, + va_node->size, + va_node->pgsz_idx); kfree(va_node); } mutex_unlock(&vm->update_gmmu_lock); @@ -3088,6 +3108,8 @@ bool gk20a_mm_mmu_debug_mode_enabled(struct gk20a *g) void gk20a_init_mm(struct gpu_ops *gops) { - gops->mm.set_sparse = gk20a_vm_put_empty; + gops->mm.set_sparse = gk20a_vm_put_sparse; + gops->mm.put_empty = gk20a_vm_put_empty; + gops->mm.clear_sparse = gk20a_vm_clear_sparse; } diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h index da19f83e..b8726c62 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h @@ -524,6 +524,12 @@ int validate_gmmu_page_table_gk20a_locked(struct vm_gk20a *vm, int zalloc_gmmu_page_table_gk20a(struct vm_gk20a *vm, enum gmmu_pgsz_gk20a gmmu_pgsz_idx, struct page_table_gk20a *pte); + +void free_gmmu_pages(struct vm_gk20a *vm, void *handle, + struct sg_table *sgt, u32 order, + size_t size); +void update_gmmu_pde_locked(struct vm_gk20a *vm, u32 i); + struct gpu_ops; void gk20a_init_mm(struct gpu_ops *gops); #endif /*_MM_GK20A_H_ */ diff --git a/drivers/gpu/nvgpu/gm20b/mm_gm20b.c b/drivers/gpu/nvgpu/gm20b/mm_gm20b.c index 2c211a57..a16f4adf 100644 --- a/drivers/gpu/nvgpu/gm20b/mm_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/mm_gm20b.c @@ -27,7 +27,8 @@ static const u64 gmmu_page_masks[gmmu_nr_page_sizes] = { ~0xfffLL, ~0x1ffffLL }; static int allocate_gmmu_ptes_sparse(struct vm_gk20a *vm, enum gmmu_pgsz_gk20a pgsz_idx, - u64 first_vaddr, u64 last_vaddr) + u64 first_vaddr, u64 last_vaddr, + bool clear, bool refplus) { int err; u32 pte_lo, pte_hi; @@ -50,6 +51,8 @@ static int allocate_gmmu_ptes_sparse(struct vm_gk20a *vm, BUG_ON(pde_lo != pde_hi); pte = vm->pdes.ptes[pgsz_idx] + pde_lo; + if (refplus) + pte->ref_cnt++; pte_lo = pte_index_from_vaddr(vm, first_vaddr, pgsz_idx); pte_hi = pte_index_from_vaddr(vm, last_vaddr, pgsz_idx); @@ -62,7 +65,7 @@ static int allocate_gmmu_ptes_sparse(struct vm_gk20a *vm, gk20a_dbg(gpu_dbg_pte, "pte_lo=%d, pte_hi=%d", pte_lo, pte_hi); for (pte_cur = pte_lo; pte_cur <= pte_hi; pte_cur++) { pte_w[0] = gmmu_pte_valid_false_f(); - pte_w[1] = gmmu_pte_vol_true_f(); + pte_w[1] = clear ? 0 : gmmu_pte_vol_true_f(); gk20a_dbg(gpu_dbg_pte, "pte_cur=%d addr=%llx refs=%d" @@ -147,7 +150,7 @@ static bool gm20b_vm_is_pde_in_range(struct vm_gk20a *vm, u64 vaddr_lo, } static int gm20b_vm_put_sparse(struct vm_gk20a *vm, u64 vaddr, - u32 num_pages, u32 pgsz_idx) + u32 num_pages, u32 pgsz_idx, bool refplus) { struct mm_gk20a *mm = vm->mm; u32 pgsz = gmmu_page_sizes[pgsz_idx]; @@ -168,8 +171,8 @@ static int gm20b_vm_put_sparse(struct vm_gk20a *vm, u64 vaddr, gk20a_dbg_info("vaddr: 0x%llx, vaddr_hi: 0x%llx, pde_lo: 0x%x, " "pde_hi: 0x%x, pgsz: %d, pde_stride_shift: %d", - vaddr, vaddr_hi, pde_lo, pde_hi, - vm->mm->pde_stride_shift, pgsz); + vaddr, vaddr_hi, pde_lo, pde_hi, pgsz, + vm->mm->pde_stride_shift); for (i = pde_lo; i <= pde_hi; i++) { /* Mark all ptes as sparse. */ @@ -188,20 +191,22 @@ static int gm20b_vm_put_sparse(struct vm_gk20a *vm, u64 vaddr, allocate_gmmu_ptes_sparse(vm, pgsz_idx, vaddr_pde_start, PDE_ADDR_END(vaddr_pde_start, - pde_shift)); + pde_shift), false, refplus); } else { /* Check leading and trailing spaces which doesn't fit * into entire pde. */ if (pde_lo == pde_hi) allocate_gmmu_ptes_sparse(vm, pgsz_idx, vaddr, - vaddr_hi); + vaddr_hi, false, refplus); else if (i == pde_lo) allocate_gmmu_ptes_sparse(vm, pgsz_idx, vaddr, - PDE_ADDR_END(vaddr, pde_shift)); + PDE_ADDR_END(vaddr, pde_shift), false, + refplus); else allocate_gmmu_ptes_sparse(vm, pgsz_idx, PDE_ADDR_START(vaddr_hi, pde_shift), - vaddr_hi); + vaddr_hi, false, + refplus); } } @@ -265,7 +270,52 @@ fail: return ret; } +void gm20b_vm_clear_sparse(struct vm_gk20a *vm, u64 vaddr, + u64 size, u32 pgsz) { + int pgsz_idx; + u64 vaddr_hi; + u32 pde_lo, pde_hi, pde_i; + + gk20a_dbg_fn(""); + /* determine pagesz idx */ + for (pgsz_idx = gmmu_page_size_small; + pgsz_idx < gmmu_nr_page_sizes; + pgsz_idx++) { + if (gmmu_page_sizes[pgsz_idx] == pgsz) + break; + } + vaddr_hi = vaddr + size - 1; + pde_range_from_vaddr_range(vm, + vaddr, + vaddr_hi, + &pde_lo, &pde_hi); + + gk20a_dbg_info("vaddr: 0x%llx, vaddr_hi: 0x%llx, pde_lo: 0x%x, " + "pde_hi: 0x%x, pgsz: %d, pde_stride_shift: %d", + vaddr, vaddr_hi, pde_lo, pde_hi, pgsz, + vm->mm->pde_stride_shift); + + for (pde_i = pde_lo; pde_i <= pde_hi; pde_i++) { + u32 pte_lo, pte_hi; + u32 pte_cur; + void *pte_kv_cur; + + struct page_table_gk20a *pte = vm->pdes.ptes[pgsz_idx] + pde_i; + pte->ref_cnt--; + + if (pte->ref_cnt == 0) { + free_gmmu_pages(vm, pte->ref, pte->sgt, + vm->mm->page_table_sizing[pgsz_idx].order, + pte->size); + update_gmmu_pde_locked(vm, pde_i); + } + } + + return; +} + void gm20b_init_mm(struct gpu_ops *gops) { gops->mm.set_sparse = gm20b_vm_put_sparse; + gops->mm.clear_sparse = gm20b_vm_clear_sparse; } -- cgit v1.2.2