summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
diff options
context:
space:
mode:
authorSami Kiminki <skiminki@nvidia.com>2015-05-22 13:48:22 -0400
committerTerje Bergstrom <tbergstrom@nvidia.com>2015-09-07 15:37:15 -0400
commiteade809c265ada214c6c47e1ad9db1706c868da0 (patch)
treea132050d7a4c2193f338e0082d258e1eac958cc1 /drivers/gpu/nvgpu/gk20a/mm_gk20a.h
parent57034b22ca17b217b683941dcebc0d69587d7d5e (diff)
gpu: nvgpu: Separate kernel and user GPU VA regions
Separate the kernel and userspace regions in the GPU virtual address space. Do this by reserving the last part of the GPU VA aperture for the kernel, and extend GPU VA aperture accordingly for regular address spaces. This prevents the kernel polluting the userspace-visible GPU VA regions, and thus, makes the success of fixed-address mapping more predictable. Bug 200077571 Change-Id: I63f0e73d4c815a4a9fa4a9ce568709974690ef0f Signed-off-by: Sami Kiminki <skiminki@nvidia.com> Reviewed-on: http://git-master/r/747191 Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Tested-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/mm_gk20a.h')
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.h47
1 files changed, 21 insertions, 26 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
index 5c6c285a..6786e3c2 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
@@ -127,9 +127,10 @@ struct gk20a_buffer_state {
127}; 127};
128 128
129enum gmmu_pgsz_gk20a { 129enum gmmu_pgsz_gk20a {
130 gmmu_page_size_small = 0, 130 gmmu_page_size_small = 0,
131 gmmu_page_size_big = 1, 131 gmmu_page_size_big = 1,
132 gmmu_nr_page_sizes = 2 132 gmmu_page_size_kernel = 2,
133 gmmu_nr_page_sizes = 3,
133}; 134};
134 135
135struct gk20a_comptags { 136struct gk20a_comptags {
@@ -284,8 +285,10 @@ void gk20a_mm_l2_invalidate(struct gk20a *g);
284struct mm_gk20a { 285struct mm_gk20a {
285 struct gk20a *g; 286 struct gk20a *g;
286 287
288 /* GPU VA default sizes address spaces for channels */
287 struct { 289 struct {
288 u64 size; 290 u64 user_size; /* userspace-visible GPU VA region */
291 u64 kernel_size; /* kernel-only GPU VA region */
289 } channel; 292 } channel;
290 293
291 struct { 294 struct {
@@ -340,26 +343,15 @@ static inline int bar1_aperture_size_mb_gk20a(void)
340{ 343{
341 return 16; /* 16MB is more than enough atm. */ 344 return 16; /* 16MB is more than enough atm. */
342} 345}
343/* max address bits */
344static inline int max_physaddr_bits_gk20a(void)
345{
346 return 40;/*"old" sys physaddr, meaningful? */
347}
348static inline int max_vid_physaddr_bits_gk20a(void)
349{
350 /* "vid phys" is asid/smmu phys?,
351 * i.e. is this the real sys physaddr? */
352 return 37;
353}
354static inline int max_vaddr_bits_gk20a(void)
355{
356 return 40; /* chopped for area? */
357}
358 346
359/* 347/*The maximum GPU VA range supported */
360 * Amount of the GVA space we actually use is smaller than the available space. 348#define NV_GMMU_VA_RANGE 38
361 */ 349
362#define NV_GMMU_VA_RANGE 40 350/* The default userspace-visible GPU VA size */
351#define NV_MM_DEFAULT_USER_SIZE (1ULL << 37)
352
353/* The default kernel-reserved GPU VA size */
354#define NV_MM_DEFAULT_KERNEL_SIZE (1ULL << 32)
363 355
364/* 356/*
365 * The bottom 16GB of the space are used for small pages, the remaining high 357 * The bottom 16GB of the space are used for small pages, the remaining high
@@ -370,12 +362,14 @@ static inline u64 __nv_gmmu_va_small_page_limit(void)
370 return ((u64)SZ_1G * 16); 362 return ((u64)SZ_1G * 16);
371} 363}
372 364
373static inline int __nv_gmmu_va_is_upper(struct vm_gk20a *vm, u64 addr) 365static inline int __nv_gmmu_va_is_big_page_region(struct vm_gk20a *vm, u64 addr)
374{ 366{
375 if (!vm->big_pages) 367 if (!vm->big_pages)
376 return 0; 368 return 0;
377 369
378 return addr >= __nv_gmmu_va_small_page_limit(); 370 return addr >= vm->vma[gmmu_page_size_big].base &&
371 addr < vm->vma[gmmu_page_size_big].base +
372 vm->vma[gmmu_page_size_big].length;
379} 373}
380 374
381/* 375/*
@@ -391,7 +385,7 @@ static inline enum gmmu_pgsz_gk20a __get_pte_size(struct vm_gk20a *vm,
391 * As a result, even though the allocator supports mixed address spaces 385 * As a result, even though the allocator supports mixed address spaces
392 * the address spaces must be treated as separate for now. 386 * the address spaces must be treated as separate for now.
393 */ 387 */
394 if (__nv_gmmu_va_is_upper(vm, base)) 388 if (__nv_gmmu_va_is_big_page_region(vm, base))
395 return gmmu_page_size_big; 389 return gmmu_page_size_big;
396 else 390 else
397 return gmmu_page_size_small; 391 return gmmu_page_size_small;
@@ -617,6 +611,7 @@ int gk20a_init_vm(struct mm_gk20a *mm,
617 struct vm_gk20a *vm, 611 struct vm_gk20a *vm,
618 u32 big_page_size, 612 u32 big_page_size,
619 u64 low_hole, 613 u64 low_hole,
614 u64 kernel_reserved,
620 u64 aperture_size, 615 u64 aperture_size,
621 bool big_pages, 616 bool big_pages,
622 char *name); 617 char *name);