From 0566aee853eb32f4f796499b6b00ddf0f1d7de34 Mon Sep 17 00:00:00 2001 From: Alex Waterman Date: Thu, 16 Apr 2015 11:46:22 -0700 Subject: gpu: nvgpu: WAR for simulator bug On linsim, when the push buffers are allowed to be allocated with small pages above 4GB the simulator crashes. This patch ensures that for linsim all small page allocations are forced to be below 4GB in the GPU VA space. By doing so the simulator no longer crashes. This bug has come up because the GPU buddy allocator work generates allocations at the top of the address space first. Thus push buffers were located at between 12GB and 16GB in the GPU VA space. Change-Id: Iaef0af3fda3f37ac09a66b5e1179527d6fe08ccc Signed-off-by: Alex Waterman Reviewed-on: http://git-master/r/740728 Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom Tested-by: Terje Bergstrom --- drivers/gpu/nvgpu/gk20a/mm_gk20a.c | 13 ++++++--- drivers/gpu/nvgpu/gk20a/mm_gk20a.h | 54 ++++++++++++++++++++++++++++++++------ 2 files changed, 56 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c index 7d359ff4..5d1ff563 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c @@ -1437,7 +1437,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm, * the alignment determined by gmmu_select_page_size(). */ if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) { - int pgsz_idx = NV_GMMU_VA_IS_UPPER(offset_align) ? + int pgsz_idx = __nv_gmmu_va_is_upper(vm, offset_align) ? gmmu_page_size_big : gmmu_page_size_small; if (pgsz_idx > bfr.pgsz_idx) { gk20a_err(d, "%llx buffer pgsz %d, VA pgsz %d", @@ -2441,6 +2441,13 @@ int gk20a_init_vm(struct mm_gk20a *mm, /* note: keep the page sizes sorted lowest to highest here */ u32 gmmu_page_sizes[gmmu_nr_page_sizes] = { SZ_4K, big_page_size }; + /* + * Linsim bug: seems like we can't have pushbuffers above 4GB. Easy WAR for sim + * is to just limit the address space to 4GB. + */ + if (tegra_platform_is_linsim() && aperture_size > SZ_4G) + aperture_size = SZ_4G; + vm->mm = mm; vm->va_start = low_hole; @@ -2483,7 +2490,7 @@ int gk20a_init_vm(struct mm_gk20a *mm, * remains is allocated to large pages. */ small_vma_size = vm->va_limit; if (big_pages) { - small_vma_size = (u64)16 << 30; + small_vma_size = __nv_gmmu_va_small_page_limit(); large_vma_size = vm->va_limit - small_vma_size; } @@ -2698,7 +2705,7 @@ int gk20a_vm_free_space(struct gk20a_as_share *as_share, args->pages, args->offset); /* determine pagesz idx */ - pgsz_idx = NV_GMMU_VA_IS_UPPER(args->offset) ? + pgsz_idx = __nv_gmmu_va_is_upper(vm, args->offset) ? gmmu_page_size_big : gmmu_page_size_small; start_page_nr = (u32)(args->offset >> diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h index 24309abc..57f7a373 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h @@ -21,18 +21,11 @@ #include #include #include +#include #include #include #include "gk20a_allocator.h" -/* - * Amount of the GVA space we actually use is smaller than the available space. - * The bottom 16GB of the space are used for small pages, the remaining high - * memory is for large pages. - */ -#define NV_GMMU_VA_RANGE 37ULL -#define NV_GMMU_VA_IS_UPPER(x) ((x) >= ((u64)SZ_1G * 16)) - #ifdef CONFIG_ARM64 #define outer_flush_range(a, b) #define __cpuc_flush_dcache_area __flush_dcache_area @@ -344,6 +337,51 @@ static inline int max_vaddr_bits_gk20a(void) return 40; /* chopped for area? */ } +/* + * Amount of the GVA space we actually use is smaller than the available space. + */ +#define NV_GMMU_VA_RANGE 37 + +/* + * The bottom 16GB of the space are used for small pages, the remaining high + * memory is for large pages. On simulation use 2GB for small pages, 2GB for + * large pages (if enabled). + */ +static inline u64 __nv_gmmu_va_small_page_limit(void) +{ + if (tegra_platform_is_linsim()) + return ((u64)SZ_1G * 2); + else + return ((u64)SZ_1G * 16); +} + +static inline int __nv_gmmu_va_is_upper(struct vm_gk20a *vm, u64 addr) +{ + if (!vm->big_pages) + return 0; + + return addr >= __nv_gmmu_va_small_page_limit(); +} + +/* + * This determines the PTE size for a given alloc. Used by both the GVA space + * allocator and the mm core code so that agreement can be reached on how to + * map allocations. + */ +static inline enum gmmu_pgsz_gk20a __get_pte_size(struct vm_gk20a *vm, + u64 base, u64 size) +{ + /* + * Currently userspace is not ready for a true unified address space. + * As a result, even though the allocator supports mixed address spaces + * the address spaces must be treated as separate for now. + */ + if (__nv_gmmu_va_is_upper(vm, base)) + return gmmu_page_size_big; + else + return gmmu_page_size_small; +} + #if 0 /*related to addr bits above, concern below TBD on which is accurate */ #define bar1_instance_block_shift_gk20a() (max_physaddr_bits_gk20a() -\ bus_bar1_block_ptr_s()) -- cgit v1.2.2