summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-05-11 16:59:22 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-07-06 17:44:15 -0400
commitc1393d5b68e63c992f4c689cb788139fdf8c2f1a (patch)
tree00a588d35342d75c05fed7733e91da753ba640fb /drivers/gpu/nvgpu/gk20a/mm_gk20a.h
parent84f712dee8b582dd7d2a19345c621a2ae3bd6292 (diff)
gpu: nvgpu: gmmu programming rewrite
Update the high level mapping logic. Instead of iterating over the GPU VA iterate over the scatter-gather table chunks. As a result each GMMU page table update call is simplified dramatically. This also modifies the chip level code to no longer require an SGL as an argument. Each call to the chip level code will be guaranteed to be contiguous so it only has to worry about making a mapping from virt -> phys. This removes the dependency on Linux that the chip code currently has. With this patch the core GMMU code still uses the Linux SGL but the logic is highly transferable to a different, nvgpu specific, scatter gather list format in the near future. The last major update is to push most of the page table attribute arguments to a struct. That struct is passed on through the various mapping levels. This makes the funtions calls more simple and easier to follow. JIRA NVGPU-30 Change-Id: Ibb6b11755f99818fe642622ca0bd4cbed054f602 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master/r/1484104 Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> GVS: Gerrit_Virtual_Submit
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/mm_gk20a.h')
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.h16
1 files changed, 1 insertions, 15 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
index cf37640d..a245d0e0 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
@@ -42,12 +42,6 @@
42 outer_flush_range(pa, pa + (size_t)(size)); \ 42 outer_flush_range(pa, pa + (size_t)(size)); \
43 } while (0) 43 } while (0)
44 44
45enum gk20a_mem_rw_flag {
46 gk20a_mem_flag_none = 0,
47 gk20a_mem_flag_read_only = 1,
48 gk20a_mem_flag_write_only = 2,
49};
50
51struct gpfifo_desc { 45struct gpfifo_desc {
52 struct nvgpu_mem mem; 46 struct nvgpu_mem mem;
53 u32 entry_num; 47 u32 entry_num;
@@ -347,7 +341,7 @@ int gk20a_mm_suspend(struct gk20a *g);
347u64 gk20a_mm_iova_addr(struct gk20a *g, struct scatterlist *sgl, 341u64 gk20a_mm_iova_addr(struct gk20a *g, struct scatterlist *sgl,
348 u32 flags); 342 u32 flags);
349u64 gk20a_mm_smmu_vaddr_translate(struct gk20a *g, dma_addr_t iova); 343u64 gk20a_mm_smmu_vaddr_translate(struct gk20a *g, dma_addr_t iova);
350u64 gk20a_mem_get_base_addr(struct gk20a *g, struct nvgpu_mem *mem, 344u64 nvgpu_mem_get_base_addr(struct gk20a *g, struct nvgpu_mem *mem,
351 u32 flags); 345 u32 flags);
352 346
353void gk20a_mm_ltc_isr(struct gk20a *g); 347void gk20a_mm_ltc_isr(struct gk20a *g);
@@ -371,10 +365,6 @@ static inline phys_addr_t gk20a_mem_phys(struct nvgpu_mem *mem)
371 return 0; 365 return 0;
372} 366}
373 367
374void gk20a_pde_wr32(struct gk20a *g, struct gk20a_mm_entry *entry,
375 size_t w, size_t data);
376u64 gk20a_pde_addr(struct gk20a *g, struct gk20a_mm_entry *entry);
377
378u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm, 368u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
379 u64 map_offset, 369 u64 map_offset,
380 struct sg_table *sgt, 370 struct sg_table *sgt,
@@ -451,8 +441,4 @@ int gk20a_mm_get_buffer_info(struct device *dev, int dmabuf_fd,
451 u64 *buffer_id, u64 *buffer_len); 441 u64 *buffer_id, u64 *buffer_len);
452void gk20a_vm_unmap_locked_kref(struct kref *ref); 442void gk20a_vm_unmap_locked_kref(struct kref *ref);
453 443
454void gk20a_vm_free_entries(struct vm_gk20a *vm,
455 struct gk20a_mm_entry *parent,
456 int level);
457
458#endif /* MM_GK20A_H */ 444#endif /* MM_GK20A_H */