summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2018-07-20 14:12:19 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-08-21 18:44:02 -0400
commit652da8116966af2a8438a9a9f135a11b4e5c6c7b (patch)
tree550e66e9127bf44b03f44a0b9b4c39cbe5b42a14 /drivers
parentbcf83fab33d8f1821cfcfd8c03411ea2fcf5334b (diff)
gpu: nvgpu: Force the PMU VM to use 128K large pages (gm20b)
Add a WAR for gm20b that allows us to force the PMU VM to use 128K large pages. For some reason setting the small page size to 64K breaks the PMU boot. Unclear why. Bug needs to be filed and fixed. Once fixed this patch can and should be reverted. Bug 200105199 Change-Id: I2b4c9e214e2a6dff33bea18bd2359c33364ba03f Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1782769 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/nvgpu/common/mm/mm.c14
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/enabled.h2
-rw-r--r--drivers/gpu/nvgpu/os/linux/driver_common.c2
-rw-r--r--drivers/gpu/nvgpu/os/linux/platform_gk20a.h2
-rw-r--r--drivers/gpu/nvgpu/os/linux/platform_gk20a_tegra.c1
5 files changed, 18 insertions, 3 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/mm.c b/drivers/gpu/nvgpu/common/mm/mm.c
index 0608d66a..2e46e211 100644
--- a/drivers/gpu/nvgpu/common/mm/mm.c
+++ b/drivers/gpu/nvgpu/common/mm/mm.c
@@ -202,13 +202,21 @@ static int nvgpu_init_system_vm(struct mm_gk20a *mm)
202 struct gk20a *g = gk20a_from_mm(mm); 202 struct gk20a *g = gk20a_from_mm(mm);
203 struct nvgpu_mem *inst_block = &mm->pmu.inst_block; 203 struct nvgpu_mem *inst_block = &mm->pmu.inst_block;
204 u32 big_page_size = g->ops.mm.get_default_big_page_size(); 204 u32 big_page_size = g->ops.mm.get_default_big_page_size();
205 u32 low_hole, aperture_size; 205 u64 low_hole, aperture_size;
206
207 /*
208 * For some reason the maxwell PMU code is dependent on the large page
209 * size. No reason AFAICT for this. Probably a bug somewhere.
210 */
211 if (nvgpu_is_enabled(g, NVGPU_MM_FORCE_128K_PMU_VM)) {
212 big_page_size = SZ_128K;
213 }
206 214
207 /* 215 /*
208 * No user region - so we will pass that as zero sized. 216 * No user region - so we will pass that as zero sized.
209 */ 217 */
210 low_hole = SZ_4K * 16; 218 low_hole = SZ_4K * 16UL;
211 aperture_size = GK20A_PMU_VA_SIZE * 2; 219 aperture_size = GK20A_PMU_VA_SIZE;
212 220
213 mm->pmu.aperture_size = GK20A_PMU_VA_SIZE; 221 mm->pmu.aperture_size = GK20A_PMU_VA_SIZE;
214 nvgpu_log_info(g, "pmu vm size = 0x%x", mm->pmu.aperture_size); 222 nvgpu_log_info(g, "pmu vm size = 0x%x", mm->pmu.aperture_size);
diff --git a/drivers/gpu/nvgpu/include/nvgpu/enabled.h b/drivers/gpu/nvgpu/include/nvgpu/enabled.h
index c0fb9218..a0b738e0 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/enabled.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/enabled.h
@@ -81,6 +81,8 @@ struct gk20a;
81#define NVGPU_USE_COHERENT_SYSMEM 26 81#define NVGPU_USE_COHERENT_SYSMEM 26
82/* Use physical scatter tables instead of IOMMU */ 82/* Use physical scatter tables instead of IOMMU */
83#define NVGPU_MM_USE_PHYSICAL_SG 27 83#define NVGPU_MM_USE_PHYSICAL_SG 27
84/* WAR for gm20b chips. */
85#define NVGPU_MM_FORCE_128K_PMU_VM 28
84 86
85/* 87/*
86 * Host flags 88 * Host flags
diff --git a/drivers/gpu/nvgpu/os/linux/driver_common.c b/drivers/gpu/nvgpu/os/linux/driver_common.c
index c651e394..f1eccd06 100644
--- a/drivers/gpu/nvgpu/os/linux/driver_common.c
+++ b/drivers/gpu/nvgpu/os/linux/driver_common.c
@@ -215,6 +215,8 @@ static void nvgpu_init_mm_vars(struct gk20a *g)
215 platform->unified_memory); 215 platform->unified_memory);
216 __nvgpu_set_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES, 216 __nvgpu_set_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES,
217 platform->unify_address_spaces); 217 platform->unify_address_spaces);
218 __nvgpu_set_enabled(g, NVGPU_MM_FORCE_128K_PMU_VM,
219 platform->force_128K_pmu_vm);
218 220
219 nvgpu_mutex_init(&g->mm.tlb_lock); 221 nvgpu_mutex_init(&g->mm.tlb_lock);
220 nvgpu_mutex_init(&g->mm.priv_lock); 222 nvgpu_mutex_init(&g->mm.priv_lock);
diff --git a/drivers/gpu/nvgpu/os/linux/platform_gk20a.h b/drivers/gpu/nvgpu/os/linux/platform_gk20a.h
index a4c3eca3..f3e80b8c 100644
--- a/drivers/gpu/nvgpu/os/linux/platform_gk20a.h
+++ b/drivers/gpu/nvgpu/os/linux/platform_gk20a.h
@@ -244,6 +244,8 @@ struct gk20a_platform {
244 bool honors_aperture; 244 bool honors_aperture;
245 /* unified or split memory with separate vidmem? */ 245 /* unified or split memory with separate vidmem? */
246 bool unified_memory; 246 bool unified_memory;
247 /* WAR for gm20b chips. */
248 bool force_128K_pmu_vm;
247 249
248 /* 250 /*
249 * DMA mask for Linux (both coh and non-coh). If not set defaults to 251 * DMA mask for Linux (both coh and non-coh). If not set defaults to
diff --git a/drivers/gpu/nvgpu/os/linux/platform_gk20a_tegra.c b/drivers/gpu/nvgpu/os/linux/platform_gk20a_tegra.c
index 432af108..033563dc 100644
--- a/drivers/gpu/nvgpu/os/linux/platform_gk20a_tegra.c
+++ b/drivers/gpu/nvgpu/os/linux/platform_gk20a_tegra.c
@@ -952,6 +952,7 @@ struct gk20a_platform gm20b_tegra_platform = {
952 952
953 .unified_memory = true, 953 .unified_memory = true,
954 .dma_mask = DMA_BIT_MASK(34), 954 .dma_mask = DMA_BIT_MASK(34),
955 .force_128K_pmu_vm = true,
955 956
956 .secure_buffer_size = 335872, 957 .secure_buffer_size = 335872,
957}; 958};