summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/vgpu
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-08-23 20:25:27 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-08-28 20:04:32 -0400
commit6365040db35fd01c8ebff39bd9dbc6c73c48fb17 (patch)
treea9396e945e59ecfab0fc6a4d018a8ba63e7438d4 /drivers/gpu/nvgpu/vgpu
parent8f2f979428e1e5a4ff71d91e30ba17813e6ee7be (diff)
gpu: vgpu: gp10b: Add map failure debugging
The vGPU mapping code in gp10b gives very little debugging info when there's a failure. This change adds much more detail to the error printing so that thee failures can more easily be debugged without needing to run the virtual guest locally. Change-Id: Ibb4412fd4ab322b366f0e08eaa399b7b90ea22c7 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1544506 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: Konsta Holtta <kholtta@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/vgpu')
-rw-r--r--drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c18
1 files changed, 16 insertions, 2 deletions
diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
index 347f27a5..85c436e5 100644
--- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
+++ b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
@@ -63,7 +63,7 @@ static u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
63 u32 page_size = vm->gmmu_page_sizes[pgsz_idx]; 63 u32 page_size = vm->gmmu_page_sizes[pgsz_idx];
64 u64 space_to_skip = buffer_offset; 64 u64 space_to_skip = buffer_offset;
65 u64 buffer_size = 0; 65 u64 buffer_size = 0;
66 u32 mem_desc_count = 0; 66 u32 mem_desc_count = 0, i;
67 struct scatterlist *sgl; 67 struct scatterlist *sgl;
68 void *handle = NULL; 68 void *handle = NULL;
69 size_t oob_size; 69 size_t oob_size;
@@ -79,6 +79,8 @@ static u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
79 if (space_to_skip & (page_size - 1)) 79 if (space_to_skip & (page_size - 1))
80 return 0; 80 return 0;
81 81
82 memset(&msg, 0, sizeof(msg));
83
82 /* Allocate (or validate when map_offset != 0) the virtual address. */ 84 /* Allocate (or validate when map_offset != 0) the virtual address. */
83 if (!map_offset) { 85 if (!map_offset) {
84 map_offset = __nvgpu_vm_alloc_va(vm, size, pgsz_idx); 86 map_offset = __nvgpu_vm_alloc_va(vm, size, pgsz_idx);
@@ -172,7 +174,19 @@ static u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
172fail: 174fail:
173 if (handle) 175 if (handle)
174 tegra_gr_comm_oob_put_ptr(handle); 176 tegra_gr_comm_oob_put_ptr(handle);
175 nvgpu_err(g, "%s: failed with err=%d", __func__, err); 177 nvgpu_err(g, "Failed: err=%d, msg.ret=%d", err, msg.ret);
178 nvgpu_err(g,
179 " Map: %-5s GPU virt %#-12llx +%#-9llx "
180 "phys offset: %#-4llx; pgsz: %3dkb perm=%-2s | "
181 "kind=%#02x APT=%-6s",
182 vm->name, map_offset, buffer_size, buffer_offset,
183 vm->gmmu_page_sizes[pgsz_idx] >> 10,
184 nvgpu_gmmu_perm_str(rw_flag),
185 kind_v, "SYSMEM");
186 for (i = 0; i < mem_desc_count; i++)
187 nvgpu_err(g, " > 0x%010llx + 0x%llx",
188 mem_desc[i].addr, mem_desc[i].length);
189
176 return 0; 190 return 0;
177} 191}
178 192