summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/mm_gk20a.h')
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.h60
1 files changed, 30 insertions, 30 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
index 5ef8ae25..394d1d25 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
@@ -270,11 +270,13 @@ struct vm_gk20a {
270 270
271 struct gk20a_mm_entry pdb; 271 struct gk20a_mm_entry pdb;
272 272
273 struct nvgpu_allocator vma[gmmu_nr_page_sizes];
274
275 /* If necessary, split fixed from non-fixed. */ 273 /* If necessary, split fixed from non-fixed. */
276 struct nvgpu_allocator fixed; 274 struct nvgpu_allocator fixed;
277 275
276 struct nvgpu_allocator *vma[gmmu_nr_page_sizes];
277 struct nvgpu_allocator kernel;
278 struct nvgpu_allocator user;
279
278 struct rb_root mapped_buffers; 280 struct rb_root mapped_buffers;
279 281
280 struct list_head reserved_va_list; 282 struct list_head reserved_va_list;
@@ -425,7 +427,7 @@ static inline int bar1_aperture_size_mb_gk20a(void)
425 return 16; /* 16MB is more than enough atm. */ 427 return 16; /* 16MB is more than enough atm. */
426} 428}
427 429
428/*The maximum GPU VA range supported */ 430/* The maximum GPU VA range supported */
429#define NV_GMMU_VA_RANGE 38 431#define NV_GMMU_VA_RANGE 38
430 432
431/* The default userspace-visible GPU VA size */ 433/* The default userspace-visible GPU VA size */
@@ -434,43 +436,39 @@ static inline int bar1_aperture_size_mb_gk20a(void)
434/* The default kernel-reserved GPU VA size */ 436/* The default kernel-reserved GPU VA size */
435#define NV_MM_DEFAULT_KERNEL_SIZE (1ULL << 32) 437#define NV_MM_DEFAULT_KERNEL_SIZE (1ULL << 32)
436 438
437/* 439enum gmmu_pgsz_gk20a __get_pte_size_fixed_map(struct vm_gk20a *vm,
438 * The bottom 16GB of the space are used for small pages, the remaining high 440 u64 base, u64 size);
439 * memory is for large pages.
440 */
441static inline u64 __nv_gmmu_va_small_page_limit(void)
442{
443 return ((u64)SZ_1G * 16);
444}
445
446static inline int __nv_gmmu_va_is_big_page_region(struct vm_gk20a *vm, u64 addr)
447{
448 struct nvgpu_allocator *a = &vm->vma[gmmu_page_size_big];
449
450 if (!vm->big_pages)
451 return 0;
452
453 return addr >= nvgpu_alloc_base(a) &&
454 addr < nvgpu_alloc_base(a) + nvgpu_alloc_length(a);
455}
456 441
457/* 442/*
458 * This determines the PTE size for a given alloc. Used by both the GVA space 443 * This determines the PTE size for a given alloc. Used by both the GVA space
459 * allocator and the mm core code so that agreement can be reached on how to 444 * allocator and the mm core code so that agreement can be reached on how to
460 * map allocations. 445 * map allocations.
446 *
447 * The page size of a buffer is this:
448 *
449 * o If the VM doesn't support large pages then obviously small pages
450 * must be used.
451 * o If the base address is non-zero (fixed address map):
452 * - Attempt to find a reserved memory area and use the page size
453 * based on that.
454 * - If no reserved page size is available, default to small pages.
455 * o If the base is zero:
456 * - If the size is greater than or equal to the big page size, use big
457 * pages.
458 * - Otherwise use small pages.
461 */ 459 */
462static inline enum gmmu_pgsz_gk20a __get_pte_size(struct vm_gk20a *vm, 460static inline enum gmmu_pgsz_gk20a __get_pte_size(struct vm_gk20a *vm,
463 u64 base, u64 size) 461 u64 base, u64 size)
464{ 462{
465 /* 463 if (!vm->big_pages)
466 * Currently userspace is not ready for a true unified address space.
467 * As a result, even though the allocator supports mixed address spaces
468 * the address spaces must be treated as separate for now.
469 */
470 if (__nv_gmmu_va_is_big_page_region(vm, base))
471 return gmmu_page_size_big;
472 else
473 return gmmu_page_size_small; 464 return gmmu_page_size_small;
465
466 if (base)
467 return __get_pte_size_fixed_map(vm, base, size);
468
469 if (size >= vm->gmmu_page_sizes[gmmu_page_size_big])
470 return gmmu_page_size_big;
471 return gmmu_page_size_small;
474} 472}
475 473
476/* 474/*
@@ -797,6 +795,8 @@ void gk20a_mm_init_pdb(struct gk20a *g, struct mem_desc *mem,
797 795
798void gk20a_remove_vm(struct vm_gk20a *vm, struct mem_desc *inst_block); 796void gk20a_remove_vm(struct vm_gk20a *vm, struct mem_desc *inst_block);
799 797
798int gk20a_big_pages_possible(struct vm_gk20a *vm, u64 base, u64 size);
799
800extern const struct gk20a_mmu_level gk20a_mm_levels_64k[]; 800extern const struct gk20a_mmu_level gk20a_mm_levels_64k[];
801extern const struct gk20a_mmu_level gk20a_mm_levels_128k[]; 801extern const struct gk20a_mmu_level gk20a_mm_levels_128k[];
802 802