summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/mm_gk20a.h')
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.h31
1 files changed, 29 insertions, 2 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
index 7fa0b7fb..e9ac8f18 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
@@ -419,6 +419,34 @@ static inline enum gmmu_pgsz_gk20a __get_pte_size(struct vm_gk20a *vm,
419 return gmmu_page_size_small; 419 return gmmu_page_size_small;
420} 420}
421 421
422/*
423 * Buffer accessors - wrap between begin() and end() if there is no permanent
424 * kernel mapping for this buffer.
425 */
426
427int gk20a_mem_begin(struct gk20a *g, struct mem_desc *mem);
428/* nop for null mem, like with free() or vunmap() */
429void gk20a_mem_end(struct gk20a *g, struct mem_desc *mem);
430
431/* word-indexed offset */
432u32 gk20a_mem_rd32(struct gk20a *g, struct mem_desc *mem, u32 w);
433/* byte offset (32b-aligned) */
434u32 gk20a_mem_rd(struct gk20a *g, struct mem_desc *mem, u32 offset);
435/* memcpy to cpu, offset and size in bytes (32b-aligned) */
436void gk20a_mem_rd_n(struct gk20a *g, struct mem_desc *mem, u32 offset,
437 void *dest, u32 size);
438
439/* word-indexed offset */
440void gk20a_mem_wr32(struct gk20a *g, struct mem_desc *mem, u32 w, u32 data);
441/* byte offset (32b-aligned) */
442void gk20a_mem_wr(struct gk20a *g, struct mem_desc *mem, u32 offset, u32 data);
443/* memcpy from cpu, offset and size in bytes (32b-aligned) */
444void gk20a_mem_wr_n(struct gk20a *g, struct mem_desc *mem, u32 offset,
445 void *src, u32 size);
446/* size and offset in bytes (32b-aligned), filled with u32s */
447void gk20a_memset(struct gk20a *g, struct mem_desc *mem, u32 offset,
448 u32 value, u32 size);
449
422#if 0 /*related to addr bits above, concern below TBD on which is accurate */ 450#if 0 /*related to addr bits above, concern below TBD on which is accurate */
423#define bar1_instance_block_shift_gk20a() (max_physaddr_bits_gk20a() -\ 451#define bar1_instance_block_shift_gk20a() (max_physaddr_bits_gk20a() -\
424 bus_bar1_block_ptr_s()) 452 bus_bar1_block_ptr_s())
@@ -673,7 +701,6 @@ void pde_range_from_vaddr_range(struct vm_gk20a *vm,
673 u64 addr_lo, u64 addr_hi, 701 u64 addr_lo, u64 addr_hi,
674 u32 *pde_lo, u32 *pde_hi); 702 u32 *pde_lo, u32 *pde_hi);
675int gk20a_mm_pde_coverage_bit_count(struct vm_gk20a *vm); 703int gk20a_mm_pde_coverage_bit_count(struct vm_gk20a *vm);
676u32 *pde_from_index(struct vm_gk20a *vm, u32 i);
677u32 pte_index_from_vaddr(struct vm_gk20a *vm, 704u32 pte_index_from_vaddr(struct vm_gk20a *vm,
678 u64 addr, enum gmmu_pgsz_gk20a pgsz_idx); 705 u64 addr, enum gmmu_pgsz_gk20a pgsz_idx);
679void free_gmmu_pages(struct vm_gk20a *vm, 706void free_gmmu_pages(struct vm_gk20a *vm,
@@ -685,7 +712,7 @@ struct gpu_ops;
685void gk20a_init_mm(struct gpu_ops *gops); 712void gk20a_init_mm(struct gpu_ops *gops);
686const struct gk20a_mmu_level *gk20a_mm_get_mmu_levels(struct gk20a *g, 713const struct gk20a_mmu_level *gk20a_mm_get_mmu_levels(struct gk20a *g,
687 u32 big_page_size); 714 u32 big_page_size);
688void gk20a_mm_init_pdb(struct gk20a *g, void *inst_ptr, u64 pdb_addr); 715void gk20a_mm_init_pdb(struct gk20a *g, struct mem_desc *mem, u64 pdb_addr);
689 716
690void gk20a_remove_vm(struct vm_gk20a *vm, struct mem_desc *inst_block); 717void gk20a_remove_vm(struct vm_gk20a *vm, struct mem_desc *inst_block);
691 718