summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-10-09 20:45:02 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-10-20 22:03:59 -0400
commit8aacfb1da4ef0a7286f598575f7d556269a0bce3 (patch)
treec26d8f7ed26002f521d3afdb964cd3c12639fcf9 /drivers/gpu/nvgpu/common/mm
parente26ce10cc6b59314ccf5931a8c5b46a9e57b085a (diff)
gpu: nvgpu: Add VIDMEM debugging
Add some VIDMEM debugging to help track the background free thread and allocs/frees. JIRA NVGPU-30 JIRA NVGPU-138 Change-Id: I88471b29d2a42c104666b111d0d3014110c9d56c Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1576330 GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm')
-rw-r--r--drivers/gpu/nvgpu/common/mm/vidmem.c39
1 files changed, 37 insertions, 2 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/vidmem.c b/drivers/gpu/nvgpu/common/mm/vidmem.c
index 60b819d7..2fe3c492 100644
--- a/drivers/gpu/nvgpu/common/mm/vidmem.c
+++ b/drivers/gpu/nvgpu/common/mm/vidmem.c
@@ -85,6 +85,8 @@ static int __nvgpu_vidmem_do_clear_all(struct gk20a *g)
85 if (mm->vidmem.ce_ctx_id == (u32)~0) 85 if (mm->vidmem.ce_ctx_id == (u32)~0)
86 return -EINVAL; 86 return -EINVAL;
87 87
88 vidmem_dbg(g, "Clearing all VIDMEM:");
89
88 err = gk20a_ce_execute_ops(g, 90 err = gk20a_ce_execute_ops(g,
89 mm->vidmem.ce_ctx_id, 91 mm->vidmem.ce_ctx_id,
90 0, 92 0,
@@ -144,6 +146,8 @@ static int __nvgpu_vidmem_do_clear_all(struct gk20a *g)
144 146
145 mm->vidmem.cleared = true; 147 mm->vidmem.cleared = true;
146 148
149 vidmem_dbg(g, "Done!");
150
147 return 0; 151 return 0;
148} 152}
149 153
@@ -163,16 +167,24 @@ void nvgpu_vidmem_thread_pause_sync(struct mm_gk20a *mm)
163 */ 167 */
164 if (nvgpu_atomic_inc_return(&mm->vidmem.pause_count) == 1) 168 if (nvgpu_atomic_inc_return(&mm->vidmem.pause_count) == 1)
165 nvgpu_mutex_acquire(&mm->vidmem.clearing_thread_lock); 169 nvgpu_mutex_acquire(&mm->vidmem.clearing_thread_lock);
170
171 vidmem_dbg(mm->g, "Clearing thread paused; new count=%d",
172 nvgpu_atomic_read(&mm->vidmem.pause_count));
166} 173}
167 174
168void nvgpu_vidmem_thread_unpause(struct mm_gk20a *mm) 175void nvgpu_vidmem_thread_unpause(struct mm_gk20a *mm)
169{ 176{
177 vidmem_dbg(mm->g, "Unpausing clearing thread; current count=%d",
178 nvgpu_atomic_read(&mm->vidmem.pause_count));
179
170 /* 180 /*
171 * And on the last decrement (1 -> 0) release the pause lock and let 181 * And on the last decrement (1 -> 0) release the pause lock and let
172 * the vidmem clearing thread continue. 182 * the vidmem clearing thread continue.
173 */ 183 */
174 if (nvgpu_atomic_dec_return(&mm->vidmem.pause_count) == 0) 184 if (nvgpu_atomic_dec_return(&mm->vidmem.pause_count) == 0) {
175 nvgpu_mutex_release(&mm->vidmem.clearing_thread_lock); 185 nvgpu_mutex_release(&mm->vidmem.clearing_thread_lock);
186 vidmem_dbg(mm->g, " > Clearing thread really unpaused!");
187 }
176} 188}
177 189
178int nvgpu_vidmem_clear_list_enqueue(struct gk20a *g, struct nvgpu_mem *mem) 190int nvgpu_vidmem_clear_list_enqueue(struct gk20a *g, struct nvgpu_mem *mem)
@@ -222,6 +234,8 @@ static void nvgpu_vidmem_clear_pending_allocs(struct mm_gk20a *mm)
222 struct gk20a *g = mm->g; 234 struct gk20a *g = mm->g;
223 struct nvgpu_mem *mem; 235 struct nvgpu_mem *mem;
224 236
237 vidmem_dbg(g, "Running VIDMEM clearing thread:");
238
225 while ((mem = nvgpu_vidmem_clear_list_dequeue(mm)) != NULL) { 239 while ((mem = nvgpu_vidmem_clear_list_dequeue(mm)) != NULL) {
226 nvgpu_vidmem_clear(g, mem); 240 nvgpu_vidmem_clear(g, mem);
227 241
@@ -233,6 +247,8 @@ static void nvgpu_vidmem_clear_pending_allocs(struct mm_gk20a *mm)
233 __nvgpu_mem_free_vidmem_alloc(g, mem); 247 __nvgpu_mem_free_vidmem_alloc(g, mem);
234 nvgpu_kfree(g, mem); 248 nvgpu_kfree(g, mem);
235 } 249 }
250
251 vidmem_dbg(g, "Done!");
236} 252}
237 253
238static int nvgpu_vidmem_clear_pending_allocs_thr(void *mm_ptr) 254static int nvgpu_vidmem_clear_pending_allocs_thr(void *mm_ptr)
@@ -295,6 +311,8 @@ int nvgpu_vidmem_init(struct mm_gk20a *mm)
295 if (!size) 311 if (!size)
296 return 0; 312 return 0;
297 313
314 vidmem_dbg(g, "init begin");
315
298 wpr_co.base = size - SZ_256M; 316 wpr_co.base = size - SZ_256M;
299 bootstrap_base = wpr_co.base; 317 bootstrap_base = wpr_co.base;
300 bootstrap_size = SZ_16M; 318 bootstrap_size = SZ_16M;
@@ -354,7 +372,16 @@ int nvgpu_vidmem_init(struct mm_gk20a *mm)
354 if (err) 372 if (err)
355 goto fail; 373 goto fail;
356 374
357 gk20a_dbg_info("registered vidmem: %zu MB", size / SZ_1M); 375 vidmem_dbg(g, "VIDMEM Total: %zu MB", size >> 20);
376 vidmem_dbg(g, "VIDMEM Ranges:");
377 vidmem_dbg(g, " 0x%-10llx -> 0x%-10llx Primary",
378 mm->vidmem.base, mm->vidmem.base + mm->vidmem.size);
379 vidmem_dbg(g, " 0x%-10llx -> 0x%-10llx Bootstrap",
380 mm->vidmem.bootstrap_base,
381 mm->vidmem.bootstrap_base + mm->vidmem.bootstrap_size);
382 vidmem_dbg(g, "VIDMEM carveouts:");
383 vidmem_dbg(g, " 0x%-10llx -> 0x%-10llx %s",
384 wpr_co.base, wpr_co.base + wpr_co.length, wpr_co.name);
358 385
359 return 0; 386 return 0;
360 387
@@ -393,6 +420,8 @@ int nvgpu_vidmem_clear(struct gk20a *g, struct nvgpu_mem *mem)
393 420
394 alloc = mem->vidmem_alloc; 421 alloc = mem->vidmem_alloc;
395 422
423 vidmem_dbg(g, "Clearing VIDMEM buf:");
424
396 nvgpu_sgt_for_each_sgl(sgl, &alloc->sgt) { 425 nvgpu_sgt_for_each_sgl(sgl, &alloc->sgt) {
397 if (gk20a_last_fence) 426 if (gk20a_last_fence)
398 gk20a_fence_put(gk20a_last_fence); 427 gk20a_fence_put(gk20a_last_fence);
@@ -415,6 +444,10 @@ int nvgpu_vidmem_clear(struct gk20a *g, struct nvgpu_mem *mem)
415 return err; 444 return err;
416 } 445 }
417 446
447 vidmem_dbg(g, " > [0x%llx +0x%llx]",
448 nvgpu_sgt_get_phys(&alloc->sgt, sgl),
449 nvgpu_sgt_get_length(&alloc->sgt, sgl));
450
418 gk20a_last_fence = gk20a_fence_out; 451 gk20a_last_fence = gk20a_fence_out;
419 } 452 }
420 453
@@ -437,6 +470,8 @@ int nvgpu_vidmem_clear(struct gk20a *g, struct nvgpu_mem *mem)
437 "fence wait failed for CE execute ops"); 470 "fence wait failed for CE execute ops");
438 } 471 }
439 472
473 vidmem_dbg(g, " Done");
474
440 return err; 475 return err;
441} 476}
442 477