From f9fd5bbabe0d188a06d25bacdb18b91ef65a147d Mon Sep 17 00:00:00 2001 From: Terje Bergstrom Date: Mon, 9 Feb 2015 20:54:01 -0800 Subject: gpu: nvgpu: Unify PDE & PTE structs Introduce a new struct gk20a_mm_entry. Allocate and store PDE and PTE arrays using the same structure. Always pass pointer to this struct when possible between functions in memory code. Change-Id: Ia4a2a6abdac9ab7ba522dafbf73fc3a3d5355c5f Signed-off-by: Terje Bergstrom Reviewed-on: http://git-master/r/696414 --- drivers/gpu/nvgpu/gk20a/mm_gk20a.h | 36 ++++++++++-------------------------- 1 file changed, 10 insertions(+), 26 deletions(-) (limited to 'drivers/gpu/nvgpu/gk20a/mm_gk20a.h') diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h index 663bd5d3..40e9488d 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h @@ -195,15 +195,6 @@ struct gk20a_buffer_state { struct gk20a_fence *fence; }; -struct page_table_gk20a { - /* backing for */ - /* Either a *page or a *mem_handle */ - void *ref; - /* track mapping cnt on this page table */ - struct sg_table *sgt; - size_t size; -}; - enum gmmu_pgsz_gk20a { gmmu_page_size_small = 0, gmmu_page_size_big = 1, @@ -215,16 +206,14 @@ struct gk20a_comptags { u32 lines; }; - -struct page_directory_gk20a { +struct gk20a_mm_entry { /* backing for */ - u32 num_pdes; - void *kv; - /* Either a *page or a *mem_handle */ - void *ref; + void *cpu_va; struct sg_table *sgt; + struct page **pages; size_t size; - struct page_table_gk20a *ptes[gmmu_nr_page_sizes]; + int pgsz; + struct gk20a_mm_entry *entries; }; struct priv_cmd_queue { @@ -305,7 +294,7 @@ struct vm_gk20a { struct mutex update_gmmu_lock; - struct page_directory_gk20a pdes; + struct gk20a_mm_entry pdb; struct gk20a_allocator vma[gmmu_nr_page_sizes]; struct rb_root mapped_buffers; @@ -557,9 +546,8 @@ int gk20a_dmabuf_alloc_drvdata(struct dma_buf *dmabuf, struct device *dev); int gk20a_dmabuf_get_state(struct dma_buf *dmabuf, struct device *dev, u64 offset, struct gk20a_buffer_state **state); -int map_gmmu_pages(void *handle, struct sg_table *sgt, - void **va, size_t size); -void unmap_gmmu_pages(void *handle, struct sg_table *sgt, void *va); +int map_gmmu_pages(struct gk20a_mm_entry *entry); +void unmap_gmmu_pages(struct gk20a_mm_entry *entry); void pde_range_from_vaddr_range(struct vm_gk20a *vm, u64 addr_lo, u64 addr_hi, u32 *pde_lo, u32 *pde_hi); @@ -568,14 +556,10 @@ u32 pte_index_from_vaddr(struct vm_gk20a *vm, u64 addr, enum gmmu_pgsz_gk20a pgsz_idx); int validate_gmmu_page_table_gk20a_locked(struct vm_gk20a *vm, u32 i, enum gmmu_pgsz_gk20a gmmu_pgsz_idx); -int zalloc_gmmu_page_table_gk20a(struct vm_gk20a *vm, - enum gmmu_pgsz_gk20a gmmu_pgsz_idx, - struct page_table_gk20a *pte); -void free_gmmu_pages(struct vm_gk20a *vm, void *handle, - struct sg_table *sgt, u32 order, - size_t size); void update_gmmu_pde_locked(struct vm_gk20a *vm, u32 i); +void free_gmmu_pages(struct vm_gk20a *vm, + struct gk20a_mm_entry *entry); u32 gk20a_mm_get_physical_addr_bits(struct gk20a *g); -- cgit v1.2.2