summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2015-01-14 11:54:26 -0500
committerDan Willemsen <dwillemsen@nvidia.com>2015-04-04 21:08:16 -0400
commitf3a920cb01d1517db5432c8062b660d6b60eb4de (patch)
treebc366a7df51745ea2d6b740395403cf2add2ebef /drivers/gpu/nvgpu/gk20a/mm_gk20a.h
parent8d1ab756ed8a7f4d3138dc5da9d2de9f52915261 (diff)
gpu: nvgpu: Refactor page mapping code
Pass always the directory structure to mm functions instead of pointers to members to it. Also split update_gmmu_ptes_locked() into smaller functions, and turn the hard coded MMU levels (PDE, PTE) into run-time parameters. Change-Id: I315ef7aebbea1e61156705361f2e2a63b5fb7bf1 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/672485 Reviewed-by: Automatic_Commit_Validation_User
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/mm_gk20a.h')
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.h37
1 files changed, 25 insertions, 12 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
index 7b355436..42c164be 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
@@ -268,6 +268,18 @@ struct vm_reserved_va_node {
268 bool sparse; 268 bool sparse;
269}; 269};
270 270
271struct gk20a_mmu_level {
272 int hi_bit[2];
273 int lo_bit[2];
274 int (*update_entry)(struct vm_gk20a *vm,
275 struct gk20a_mm_entry *pte,
276 u32 i, u32 gmmu_pgsz_idx,
277 u64 iova,
278 u32 kind_v, u32 *ctag,
279 bool cacheable, int rw_flag, bool sparse);
280 size_t entry_size;
281};
282
271struct vm_gk20a { 283struct vm_gk20a {
272 struct mm_gk20a *mm; 284 struct mm_gk20a *mm;
273 struct gk20a_as_share *as_share; /* as_share this represents */ 285 struct gk20a_as_share *as_share; /* as_share this represents */
@@ -282,13 +294,8 @@ struct vm_gk20a {
282 bool mapped; 294 bool mapped;
283 295
284 u32 big_page_size; 296 u32 big_page_size;
285 u32 pde_stride;
286 u32 pde_stride_shift;
287 297
288 struct { 298 const struct gk20a_mmu_level *mmu_levels;
289 u32 order;
290 u32 num_ptes;
291 } page_table_sizing[gmmu_nr_page_sizes];
292 299
293 struct kref ref; 300 struct kref ref;
294 301
@@ -450,7 +457,8 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
450 u32 ctag_offset, 457 u32 ctag_offset,
451 u32 flags, 458 u32 flags,
452 int rw_flag, 459 int rw_flag,
453 bool clear_ctags); 460 bool clear_ctags,
461 bool sparse);
454 462
455void gk20a_gmmu_unmap(struct vm_gk20a *vm, 463void gk20a_gmmu_unmap(struct vm_gk20a *vm,
456 u64 vaddr, 464 u64 vaddr,
@@ -462,7 +470,8 @@ void gk20a_locked_gmmu_unmap(struct vm_gk20a *vm,
462 u64 size, 470 u64 size,
463 int pgsz_idx, 471 int pgsz_idx,
464 bool va_allocated, 472 bool va_allocated,
465 int rw_flag); 473 int rw_flag,
474 bool sparse);
466 475
467struct sg_table *gk20a_mm_pin(struct device *dev, struct dma_buf *dmabuf); 476struct sg_table *gk20a_mm_pin(struct device *dev, struct dma_buf *dmabuf);
468void gk20a_mm_unpin(struct device *dev, struct dma_buf *dmabuf, 477void gk20a_mm_unpin(struct device *dev, struct dma_buf *dmabuf,
@@ -557,13 +566,10 @@ void unmap_gmmu_pages(struct gk20a_mm_entry *entry);
557void pde_range_from_vaddr_range(struct vm_gk20a *vm, 566void pde_range_from_vaddr_range(struct vm_gk20a *vm,
558 u64 addr_lo, u64 addr_hi, 567 u64 addr_lo, u64 addr_hi,
559 u32 *pde_lo, u32 *pde_hi); 568 u32 *pde_lo, u32 *pde_hi);
569int gk20a_mm_pde_coverage_bit_count(struct vm_gk20a *vm);
560u32 *pde_from_index(struct vm_gk20a *vm, u32 i); 570u32 *pde_from_index(struct vm_gk20a *vm, u32 i);
561u32 pte_index_from_vaddr(struct vm_gk20a *vm, 571u32 pte_index_from_vaddr(struct vm_gk20a *vm,
562 u64 addr, enum gmmu_pgsz_gk20a pgsz_idx); 572 u64 addr, enum gmmu_pgsz_gk20a pgsz_idx);
563int validate_gmmu_page_table_gk20a_locked(struct vm_gk20a *vm,
564 u32 i, enum gmmu_pgsz_gk20a gmmu_pgsz_idx);
565
566void update_gmmu_pde_locked(struct vm_gk20a *vm, u32 i);
567void free_gmmu_pages(struct vm_gk20a *vm, 573void free_gmmu_pages(struct vm_gk20a *vm,
568 struct gk20a_mm_entry *entry); 574 struct gk20a_mm_entry *entry);
569 575
@@ -571,4 +577,11 @@ u32 gk20a_mm_get_physical_addr_bits(struct gk20a *g);
571 577
572struct gpu_ops; 578struct gpu_ops;
573void gk20a_init_mm(struct gpu_ops *gops); 579void gk20a_init_mm(struct gpu_ops *gops);
580const struct gk20a_mmu_level *gk20a_mm_get_mmu_levels(struct gk20a *g,
581 u32 big_page_size);
582void gk20a_mm_init_pdb(struct gk20a *g, void *inst_ptr, u64 pdb_addr);
583
584extern const struct gk20a_mmu_level gk20a_mm_levels_64k[];
585extern const struct gk20a_mmu_level gk20a_mm_levels_128k[];
586
574#endif /* MM_GK20A_H */ 587#endif /* MM_GK20A_H */