summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2015-02-09 23:54:01 -0500
committerDan Willemsen <dwillemsen@nvidia.com>2015-04-04 21:07:35 -0400
commitf9fd5bbabe0d188a06d25bacdb18b91ef65a147d (patch)
treeecf651164e5fbdbba48eec53291f2cef9ac715e7 /drivers/gpu/nvgpu/gk20a/mm_gk20a.h
parent9bf82585aa24b6052855c119855abef92671d502 (diff)
gpu: nvgpu: Unify PDE & PTE structs
Introduce a new struct gk20a_mm_entry. Allocate and store PDE and PTE arrays using the same structure. Always pass pointer to this struct when possible between functions in memory code. Change-Id: Ia4a2a6abdac9ab7ba522dafbf73fc3a3d5355c5f Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/696414
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/mm_gk20a.h')
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.h36
1 files changed, 10 insertions, 26 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
index 663bd5d3..40e9488d 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
@@ -195,15 +195,6 @@ struct gk20a_buffer_state {
195 struct gk20a_fence *fence; 195 struct gk20a_fence *fence;
196}; 196};
197 197
198struct page_table_gk20a {
199 /* backing for */
200 /* Either a *page or a *mem_handle */
201 void *ref;
202 /* track mapping cnt on this page table */
203 struct sg_table *sgt;
204 size_t size;
205};
206
207enum gmmu_pgsz_gk20a { 198enum gmmu_pgsz_gk20a {
208 gmmu_page_size_small = 0, 199 gmmu_page_size_small = 0,
209 gmmu_page_size_big = 1, 200 gmmu_page_size_big = 1,
@@ -215,16 +206,14 @@ struct gk20a_comptags {
215 u32 lines; 206 u32 lines;
216}; 207};
217 208
218 209struct gk20a_mm_entry {
219struct page_directory_gk20a {
220 /* backing for */ 210 /* backing for */
221 u32 num_pdes; 211 void *cpu_va;
222 void *kv;
223 /* Either a *page or a *mem_handle */
224 void *ref;
225 struct sg_table *sgt; 212 struct sg_table *sgt;
213 struct page **pages;
226 size_t size; 214 size_t size;
227 struct page_table_gk20a *ptes[gmmu_nr_page_sizes]; 215 int pgsz;
216 struct gk20a_mm_entry *entries;
228}; 217};
229 218
230struct priv_cmd_queue { 219struct priv_cmd_queue {
@@ -305,7 +294,7 @@ struct vm_gk20a {
305 294
306 struct mutex update_gmmu_lock; 295 struct mutex update_gmmu_lock;
307 296
308 struct page_directory_gk20a pdes; 297 struct gk20a_mm_entry pdb;
309 298
310 struct gk20a_allocator vma[gmmu_nr_page_sizes]; 299 struct gk20a_allocator vma[gmmu_nr_page_sizes];
311 struct rb_root mapped_buffers; 300 struct rb_root mapped_buffers;
@@ -557,9 +546,8 @@ int gk20a_dmabuf_alloc_drvdata(struct dma_buf *dmabuf, struct device *dev);
557int gk20a_dmabuf_get_state(struct dma_buf *dmabuf, struct device *dev, 546int gk20a_dmabuf_get_state(struct dma_buf *dmabuf, struct device *dev,
558 u64 offset, struct gk20a_buffer_state **state); 547 u64 offset, struct gk20a_buffer_state **state);
559 548
560int map_gmmu_pages(void *handle, struct sg_table *sgt, 549int map_gmmu_pages(struct gk20a_mm_entry *entry);
561 void **va, size_t size); 550void unmap_gmmu_pages(struct gk20a_mm_entry *entry);
562void unmap_gmmu_pages(void *handle, struct sg_table *sgt, void *va);
563void pde_range_from_vaddr_range(struct vm_gk20a *vm, 551void pde_range_from_vaddr_range(struct vm_gk20a *vm,
564 u64 addr_lo, u64 addr_hi, 552 u64 addr_lo, u64 addr_hi,
565 u32 *pde_lo, u32 *pde_hi); 553 u32 *pde_lo, u32 *pde_hi);
@@ -568,14 +556,10 @@ u32 pte_index_from_vaddr(struct vm_gk20a *vm,
568 u64 addr, enum gmmu_pgsz_gk20a pgsz_idx); 556 u64 addr, enum gmmu_pgsz_gk20a pgsz_idx);
569int validate_gmmu_page_table_gk20a_locked(struct vm_gk20a *vm, 557int validate_gmmu_page_table_gk20a_locked(struct vm_gk20a *vm,
570 u32 i, enum gmmu_pgsz_gk20a gmmu_pgsz_idx); 558 u32 i, enum gmmu_pgsz_gk20a gmmu_pgsz_idx);
571int zalloc_gmmu_page_table_gk20a(struct vm_gk20a *vm,
572 enum gmmu_pgsz_gk20a gmmu_pgsz_idx,
573 struct page_table_gk20a *pte);
574 559
575void free_gmmu_pages(struct vm_gk20a *vm, void *handle,
576 struct sg_table *sgt, u32 order,
577 size_t size);
578void update_gmmu_pde_locked(struct vm_gk20a *vm, u32 i); 560void update_gmmu_pde_locked(struct vm_gk20a *vm, u32 i);
561void free_gmmu_pages(struct vm_gk20a *vm,
562 struct gk20a_mm_entry *entry);
579 563
580u32 gk20a_mm_get_physical_addr_bits(struct gk20a *g); 564u32 gk20a_mm_get_physical_addr_bits(struct gk20a *g);
581 565