summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm/vidmem.c
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2018-08-23 07:16:23 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-09-06 19:12:17 -0400
commit577c69322eeafe5c5cf20c3038dc7b700226c202 (patch)
treed497ad3f8d87f60afeee9ba04715bdaf2b56fce8 /drivers/gpu/nvgpu/common/mm/vidmem.c
parenteb473aa0b1ad2a65195950907438a0de6e53d527 (diff)
gpu: nvgpu: increase bootstrap vidmem carveout to 256M
We right now have a bootstrap carveout in vidmem of size 16M and having base address at {total_vidmem_size - 256M} So this design divides rest of the vidmem into two chunks And the size of bootstrap carveout is also small and insufficient for vidmem allocations during boot Hence increase the bootstrap vidmem carevout to 256M and move it to the end of entire vidmem Rename the carevout name for wpr_co to bootstrap_co as it is more appropriate Also update __nvgpu_vidmem_do_clear_all() to clear only one chunk of vidmem instead of two Bug 2180284 Jira NVGPUT-12 Change-Id: I9c8d62bcd705c7112385df3d4f714e0190b48e17 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1805466 GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-by: Seshendra Gadagottu <sgadagottu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/vidmem.c')
-rw-r--r--drivers/gpu/nvgpu/common/mm/vidmem.c41
1 files changed, 12 insertions, 29 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/vidmem.c b/drivers/gpu/nvgpu/common/mm/vidmem.c
index 28a46ff0..290d6f99 100644
--- a/drivers/gpu/nvgpu/common/mm/vidmem.c
+++ b/drivers/gpu/nvgpu/common/mm/vidmem.c
@@ -86,7 +86,6 @@ static int __nvgpu_vidmem_do_clear_all(struct gk20a *g)
86{ 86{
87 struct mm_gk20a *mm = &g->mm; 87 struct mm_gk20a *mm = &g->mm;
88 struct gk20a_fence *gk20a_fence_out = NULL; 88 struct gk20a_fence *gk20a_fence_out = NULL;
89 u64 region2_base = 0;
90 int err = 0; 89 int err = 0;
91 90
92 if (mm->vidmem.ce_ctx_id == (u32)~0) 91 if (mm->vidmem.ce_ctx_id == (u32)~0)
@@ -103,28 +102,10 @@ static int __nvgpu_vidmem_do_clear_all(struct gk20a *g)
103 NVGPU_CE_DST_LOCATION_LOCAL_FB, 102 NVGPU_CE_DST_LOCATION_LOCAL_FB,
104 NVGPU_CE_MEMSET, 103 NVGPU_CE_MEMSET,
105 0, 104 0,
106 NULL);
107 if (err) {
108 nvgpu_err(g,
109 "Failed to clear vidmem region 1 : %d", err);
110 return err;
111 }
112
113 region2_base = mm->vidmem.bootstrap_base + mm->vidmem.bootstrap_size;
114
115 err = gk20a_ce_execute_ops(g,
116 mm->vidmem.ce_ctx_id,
117 0,
118 region2_base,
119 mm->vidmem.size - region2_base,
120 0x00000000,
121 NVGPU_CE_DST_LOCATION_LOCAL_FB,
122 NVGPU_CE_MEMSET,
123 0,
124 &gk20a_fence_out); 105 &gk20a_fence_out);
125 if (err) { 106 if (err) {
126 nvgpu_err(g, 107 nvgpu_err(g,
127 "Failed to clear vidmem region 2 : %d", err); 108 "Failed to clear vidmem : %d", err);
128 return err; 109 return err;
129 } 110 }
130 111
@@ -304,13 +285,13 @@ static int nvgpu_vidmem_clear_pending_allocs_thr(void *mm_ptr)
304int nvgpu_vidmem_init(struct mm_gk20a *mm) 285int nvgpu_vidmem_init(struct mm_gk20a *mm)
305{ 286{
306 struct gk20a *g = mm->g; 287 struct gk20a *g = mm->g;
307 u64 bootstrap_base, bootstrap_size, base; 288 u64 bootstrap_base, base;
289 u64 bootstrap_size = SZ_256M;
308 u64 default_page_size = SZ_64K; 290 u64 default_page_size = SZ_64K;
309 size_t size; 291 size_t size;
310 int err; 292 int err;
311 293 static struct nvgpu_alloc_carveout bootstrap_co =
312 static struct nvgpu_alloc_carveout wpr_co = 294 NVGPU_CARVEOUT("bootstrap-region", 0, 0);
313 NVGPU_CARVEOUT("wpr-region", 0, SZ_16M);
314 295
315 size = g->ops.mm.get_vidmem_size ? 296 size = g->ops.mm.get_vidmem_size ?
316 g->ops.mm.get_vidmem_size(g) : 0; 297 g->ops.mm.get_vidmem_size(g) : 0;
@@ -319,9 +300,10 @@ int nvgpu_vidmem_init(struct mm_gk20a *mm)
319 300
320 vidmem_dbg(g, "init begin"); 301 vidmem_dbg(g, "init begin");
321 302
322 wpr_co.base = size - SZ_256M; 303 bootstrap_co.base = size - bootstrap_size;
323 bootstrap_base = wpr_co.base; 304 bootstrap_co.length = bootstrap_size;
324 bootstrap_size = SZ_16M; 305
306 bootstrap_base = bootstrap_co.base;
325 base = default_page_size; 307 base = default_page_size;
326 308
327 /* 309 /*
@@ -346,7 +328,7 @@ int nvgpu_vidmem_init(struct mm_gk20a *mm)
346 } 328 }
347 329
348 /* Reserve bootstrap region in vidmem allocator */ 330 /* Reserve bootstrap region in vidmem allocator */
349 nvgpu_alloc_reserve_carveout(&g->mm.vidmem.allocator, &wpr_co); 331 nvgpu_alloc_reserve_carveout(&g->mm.vidmem.allocator, &bootstrap_co);
350 332
351 mm->vidmem.base = base; 333 mm->vidmem.base = base;
352 mm->vidmem.size = size - base; 334 mm->vidmem.size = size - base;
@@ -388,7 +370,8 @@ int nvgpu_vidmem_init(struct mm_gk20a *mm)
388 mm->vidmem.bootstrap_base + mm->vidmem.bootstrap_size); 370 mm->vidmem.bootstrap_base + mm->vidmem.bootstrap_size);
389 vidmem_dbg(g, "VIDMEM carveouts:"); 371 vidmem_dbg(g, "VIDMEM carveouts:");
390 vidmem_dbg(g, " 0x%-10llx -> 0x%-10llx %s", 372 vidmem_dbg(g, " 0x%-10llx -> 0x%-10llx %s",
391 wpr_co.base, wpr_co.base + wpr_co.length, wpr_co.name); 373 bootstrap_co.base, bootstrap_co.base + bootstrap_co.length,
374 bootstrap_co.name);
392 375
393 return 0; 376 return 0;
394 377