summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Fleury <tfleury@nvidia.com>2018-03-07 12:23:53 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2018-03-13 03:04:16 -0400
commit6c33a010d8e3983cc3504e073cd552f952440aa1 (patch)
treec86e364d468320c8b347b58b6a711f773b97ae42
parentf94c9d19c19883ca2b60acb8a000b34b32da0aa7 (diff)
gpu: nvgpu: add placeholder for IPA to PA
Add __nvgpu_sgl_phys function that can be used to implement IPA to PA translation in a subsequent change. Adapt existing function prototypes to add pointer to gpu context, as we will need to check if IPA to PA translation is needed. JIRA EVLR-2442 Bug 200392719 Change-Id: I5a734c958c8277d1bf673c020dafb31263f142d6 Signed-off-by: Thomas Fleury <tfleury@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1673142 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/common/linux/nvgpu_mem.c19
-rw-r--r--drivers/gpu/nvgpu/common/mm/gmmu.c4
-rw-r--r--drivers/gpu/nvgpu/common/mm/nvgpu_mem.c7
-rw-r--r--drivers/gpu/nvgpu/common/mm/page_allocator.c11
-rw-r--r--drivers/gpu/nvgpu/common/mm/vidmem.c4
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c5
-rw-r--r--drivers/gpu/nvgpu/gk20a/pramin_gk20a.c4
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/nvgpu_mem.h5
-rw-r--r--drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c2
9 files changed, 37 insertions, 24 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c b/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c
index d4549e1b..63a14f40 100644
--- a/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c
+++ b/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c
@@ -34,6 +34,11 @@
34#include "gk20a/gk20a.h" 34#include "gk20a/gk20a.h"
35#include "gk20a/mm_gk20a.h" 35#include "gk20a/mm_gk20a.h"
36 36
37static inline u64 __nvgpu_sgl_phys(struct gk20a *g, struct nvgpu_sgl *sgl)
38{
39 return sg_phys((struct scatterlist *)sgl);
40}
41
37int nvgpu_mem_begin(struct gk20a *g, struct nvgpu_mem *mem) 42int nvgpu_mem_begin(struct gk20a *g, struct nvgpu_mem *mem)
38{ 43{
39 void *cpu_va; 44 void *cpu_va;
@@ -309,10 +314,12 @@ u64 nvgpu_mem_get_addr_sgl(struct gk20a *g, struct scatterlist *sgl)
309{ 314{
310 if (nvgpu_is_enabled(g, NVGPU_MM_USE_PHYSICAL_SG) || 315 if (nvgpu_is_enabled(g, NVGPU_MM_USE_PHYSICAL_SG) ||
311 !nvgpu_iommuable(g)) 316 !nvgpu_iommuable(g))
312 return g->ops.mm.gpu_phys_addr(g, NULL, sg_phys(sgl)); 317 return g->ops.mm.gpu_phys_addr(g, NULL,
318 __nvgpu_sgl_phys(g, (struct nvgpu_sgl *)sgl));
313 319
314 if (sg_dma_address(sgl) == 0) 320 if (sg_dma_address(sgl) == 0)
315 return g->ops.mm.gpu_phys_addr(g, NULL, sg_phys(sgl)); 321 return g->ops.mm.gpu_phys_addr(g, NULL,
322 __nvgpu_sgl_phys(g, (struct nvgpu_sgl *)sgl));
316 323
317 if (sg_dma_address(sgl) == DMA_ERROR_CODE) 324 if (sg_dma_address(sgl) == DMA_ERROR_CODE)
318 return 0; 325 return 0;
@@ -376,7 +383,7 @@ u64 nvgpu_mem_get_phys_addr(struct gk20a *g, struct nvgpu_mem *mem)
376 if (mem->aperture == APERTURE_VIDMEM) 383 if (mem->aperture == APERTURE_VIDMEM)
377 return nvgpu_mem_get_addr(g, mem); 384 return nvgpu_mem_get_addr(g, mem);
378 385
379 return sg_phys(mem->priv.sgt->sgl); 386 return __nvgpu_sgl_phys(g, (struct nvgpu_sgl *)mem->priv.sgt->sgl);
380} 387}
381 388
382/* 389/*
@@ -501,9 +508,9 @@ static struct nvgpu_sgl *nvgpu_mem_linux_sgl_next(struct nvgpu_sgl *sgl)
501 return (struct nvgpu_sgl *)sg_next((struct scatterlist *)sgl); 508 return (struct nvgpu_sgl *)sg_next((struct scatterlist *)sgl);
502} 509}
503 510
504static u64 nvgpu_mem_linux_sgl_phys(struct nvgpu_sgl *sgl) 511static u64 nvgpu_mem_linux_sgl_phys(struct gk20a *g, struct nvgpu_sgl *sgl)
505{ 512{
506 return (u64)sg_phys((struct scatterlist *)sgl); 513 return (u64)__nvgpu_sgl_phys(g, sgl);
507} 514}
508 515
509static u64 nvgpu_mem_linux_sgl_dma(struct nvgpu_sgl *sgl) 516static u64 nvgpu_mem_linux_sgl_dma(struct nvgpu_sgl *sgl)
@@ -522,7 +529,7 @@ static u64 nvgpu_mem_linux_sgl_gpu_addr(struct gk20a *g,
522{ 529{
523 if (sg_dma_address((struct scatterlist *)sgl) == 0) 530 if (sg_dma_address((struct scatterlist *)sgl) == 0)
524 return g->ops.mm.gpu_phys_addr(g, attrs, 531 return g->ops.mm.gpu_phys_addr(g, attrs,
525 sg_phys((struct scatterlist *)sgl)); 532 __nvgpu_sgl_phys(g, sgl));
526 533
527 if (sg_dma_address((struct scatterlist *)sgl) == DMA_ERROR_CODE) 534 if (sg_dma_address((struct scatterlist *)sgl) == DMA_ERROR_CODE)
528 return 0; 535 return 0;
diff --git a/drivers/gpu/nvgpu/common/mm/gmmu.c b/drivers/gpu/nvgpu/common/mm/gmmu.c
index 3b57e781..44e540dc 100644
--- a/drivers/gpu/nvgpu/common/mm/gmmu.c
+++ b/drivers/gpu/nvgpu/common/mm/gmmu.c
@@ -543,7 +543,7 @@ static int __nvgpu_gmmu_do_update_page_table(struct vm_gk20a *vm,
543 } 543 }
544 544
545 phys_addr = g->ops.mm.gpu_phys_addr(g, attrs, 545 phys_addr = g->ops.mm.gpu_phys_addr(g, attrs,
546 nvgpu_sgt_get_phys(sgt, sgl)) + space_to_skip; 546 nvgpu_sgt_get_phys(g, sgt, sgl)) + space_to_skip;
547 chunk_length = min(length, 547 chunk_length = min(length,
548 nvgpu_sgt_get_length(sgt, sgl) - space_to_skip); 548 nvgpu_sgt_get_length(sgt, sgl) - space_to_skip);
549 549
@@ -629,7 +629,7 @@ static int __nvgpu_gmmu_update_page_table(struct vm_gk20a *vm,
629 sgt ? "MAP" : "UNMAP", 629 sgt ? "MAP" : "UNMAP",
630 virt_addr, 630 virt_addr,
631 length, 631 length,
632 sgt ? nvgpu_sgt_get_phys(sgt, sgt->sgl) : 0, 632 sgt ? nvgpu_sgt_get_phys(g, sgt, sgt->sgl) : 0,
633 space_to_skip, 633 space_to_skip,
634 page_size >> 10, 634 page_size >> 10,
635 nvgpu_gmmu_perm_str(attrs->rw_flag), 635 nvgpu_gmmu_perm_str(attrs->rw_flag),
diff --git a/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c b/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c
index 2b32d869..4def4938 100644
--- a/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c
+++ b/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c
@@ -81,9 +81,10 @@ struct nvgpu_sgl *nvgpu_sgt_get_next(struct nvgpu_sgt *sgt,
81 return sgt->ops->sgl_next(sgl); 81 return sgt->ops->sgl_next(sgl);
82} 82}
83 83
84u64 nvgpu_sgt_get_phys(struct nvgpu_sgt *sgt, struct nvgpu_sgl *sgl) 84u64 nvgpu_sgt_get_phys(struct gk20a *g, struct nvgpu_sgt *sgt,
85 struct nvgpu_sgl *sgl)
85{ 86{
86 return sgt->ops->sgl_phys(sgl); 87 return sgt->ops->sgl_phys(g, sgl);
87} 88}
88 89
89u64 nvgpu_sgt_get_dma(struct nvgpu_sgt *sgt, struct nvgpu_sgl *sgl) 90u64 nvgpu_sgt_get_dma(struct nvgpu_sgt *sgt, struct nvgpu_sgl *sgl)
@@ -156,7 +157,7 @@ u64 nvgpu_sgt_alignment(struct gk20a *g, struct nvgpu_sgt *sgt)
156 * of the SGT. 157 * of the SGT.
157 */ 158 */
158 nvgpu_sgt_for_each_sgl(sgl, sgt) { 159 nvgpu_sgt_for_each_sgl(sgl, sgt) {
159 chunk_align = 1ULL << __ffs(nvgpu_sgt_get_phys(sgt, sgl) | 160 chunk_align = 1ULL << __ffs(nvgpu_sgt_get_phys(g, sgt, sgl) |
160 nvgpu_sgt_get_length(sgt, sgl)); 161 nvgpu_sgt_get_length(sgt, sgl));
161 162
162 if (align) 163 if (align)
diff --git a/drivers/gpu/nvgpu/common/mm/page_allocator.c b/drivers/gpu/nvgpu/common/mm/page_allocator.c
index 13ccc48b..773d33ef 100644
--- a/drivers/gpu/nvgpu/common/mm/page_allocator.c
+++ b/drivers/gpu/nvgpu/common/mm/page_allocator.c
@@ -160,7 +160,7 @@ static struct nvgpu_sgl *nvgpu_page_alloc_sgl_next(struct nvgpu_sgl *sgl)
160 return (struct nvgpu_sgl *)sgl_impl->next; 160 return (struct nvgpu_sgl *)sgl_impl->next;
161} 161}
162 162
163static u64 nvgpu_page_alloc_sgl_phys(struct nvgpu_sgl *sgl) 163static u64 nvgpu_page_alloc_sgl_phys(struct gk20a *g, struct nvgpu_sgl *sgl)
164{ 164{
165 struct nvgpu_mem_sgl *sgl_impl = (struct nvgpu_mem_sgl *)sgl; 165 struct nvgpu_mem_sgl *sgl_impl = (struct nvgpu_mem_sgl *)sgl;
166 166
@@ -231,11 +231,12 @@ static void __nvgpu_free_pages(struct nvgpu_page_allocator *a,
231 bool free_buddy_alloc) 231 bool free_buddy_alloc)
232{ 232{
233 struct nvgpu_sgl *sgl = alloc->sgt.sgl; 233 struct nvgpu_sgl *sgl = alloc->sgt.sgl;
234 struct gk20a *g = a->owner->g;
234 235
235 if (free_buddy_alloc) { 236 if (free_buddy_alloc) {
236 while (sgl) { 237 while (sgl) {
237 nvgpu_free(&a->source_allocator, 238 nvgpu_free(&a->source_allocator,
238 nvgpu_sgt_get_phys(&alloc->sgt, sgl)); 239 nvgpu_sgt_get_phys(g, &alloc->sgt, sgl));
239 sgl = nvgpu_sgt_get_next(&alloc->sgt, sgl); 240 sgl = nvgpu_sgt_get_next(&alloc->sgt, sgl);
240 } 241 }
241 } 242 }
@@ -615,6 +616,7 @@ fail:
615static struct nvgpu_page_alloc *__nvgpu_alloc_pages( 616static struct nvgpu_page_alloc *__nvgpu_alloc_pages(
616 struct nvgpu_page_allocator *a, u64 len) 617 struct nvgpu_page_allocator *a, u64 len)
617{ 618{
619 struct gk20a *g = a->owner->g;
618 struct nvgpu_page_alloc *alloc = NULL; 620 struct nvgpu_page_alloc *alloc = NULL;
619 struct nvgpu_sgl *sgl; 621 struct nvgpu_sgl *sgl;
620 u64 pages; 622 u64 pages;
@@ -635,7 +637,7 @@ static struct nvgpu_page_alloc *__nvgpu_alloc_pages(
635 while (sgl) { 637 while (sgl) {
636 palloc_dbg(a, " Chunk %2d: 0x%010llx + 0x%llx", 638 palloc_dbg(a, " Chunk %2d: 0x%010llx + 0x%llx",
637 i++, 639 i++,
638 nvgpu_sgt_get_phys(&alloc->sgt, sgl), 640 nvgpu_sgt_get_phys(g, &alloc->sgt, sgl),
639 nvgpu_sgt_get_length(&alloc->sgt, sgl)); 641 nvgpu_sgt_get_length(&alloc->sgt, sgl));
640 sgl = nvgpu_sgt_get_next(&alloc->sgt, sgl); 642 sgl = nvgpu_sgt_get_next(&alloc->sgt, sgl);
641 } 643 }
@@ -779,6 +781,7 @@ static u64 nvgpu_page_alloc_fixed(struct nvgpu_allocator *__a,
779 struct nvgpu_page_allocator *a = page_allocator(__a); 781 struct nvgpu_page_allocator *a = page_allocator(__a);
780 struct nvgpu_page_alloc *alloc = NULL; 782 struct nvgpu_page_alloc *alloc = NULL;
781 struct nvgpu_sgl *sgl; 783 struct nvgpu_sgl *sgl;
784 struct gk20a *g = a->owner->g;
782 u64 aligned_len, pages; 785 u64 aligned_len, pages;
783 int i = 0; 786 int i = 0;
784 787
@@ -802,7 +805,7 @@ static u64 nvgpu_page_alloc_fixed(struct nvgpu_allocator *__a,
802 while (sgl) { 805 while (sgl) {
803 palloc_dbg(a, " Chunk %2d: 0x%010llx + 0x%llx", 806 palloc_dbg(a, " Chunk %2d: 0x%010llx + 0x%llx",
804 i++, 807 i++,
805 nvgpu_sgt_get_phys(&alloc->sgt, sgl), 808 nvgpu_sgt_get_phys(g, &alloc->sgt, sgl),
806 nvgpu_sgt_get_length(&alloc->sgt, sgl)); 809 nvgpu_sgt_get_length(&alloc->sgt, sgl));
807 sgl = nvgpu_sgt_get_next(&alloc->sgt, sgl); 810 sgl = nvgpu_sgt_get_next(&alloc->sgt, sgl);
808 } 811 }
diff --git a/drivers/gpu/nvgpu/common/mm/vidmem.c b/drivers/gpu/nvgpu/common/mm/vidmem.c
index e4137090..f534f45c 100644
--- a/drivers/gpu/nvgpu/common/mm/vidmem.c
+++ b/drivers/gpu/nvgpu/common/mm/vidmem.c
@@ -430,7 +430,7 @@ int nvgpu_vidmem_clear(struct gk20a *g, struct nvgpu_mem *mem)
430 err = gk20a_ce_execute_ops(g, 430 err = gk20a_ce_execute_ops(g,
431 g->mm.vidmem.ce_ctx_id, 431 g->mm.vidmem.ce_ctx_id,
432 0, 432 0,
433 nvgpu_sgt_get_phys(&alloc->sgt, sgl), 433 nvgpu_sgt_get_phys(g, &alloc->sgt, sgl),
434 nvgpu_sgt_get_length(&alloc->sgt, sgl), 434 nvgpu_sgt_get_length(&alloc->sgt, sgl),
435 0x00000000, 435 0x00000000,
436 NVGPU_CE_DST_LOCATION_LOCAL_FB, 436 NVGPU_CE_DST_LOCATION_LOCAL_FB,
@@ -445,7 +445,7 @@ int nvgpu_vidmem_clear(struct gk20a *g, struct nvgpu_mem *mem)
445 } 445 }
446 446
447 vidmem_dbg(g, " > [0x%llx +0x%llx]", 447 vidmem_dbg(g, " > [0x%llx +0x%llx]",
448 nvgpu_sgt_get_phys(&alloc->sgt, sgl), 448 nvgpu_sgt_get_phys(g, &alloc->sgt, sgl),
449 nvgpu_sgt_get_length(&alloc->sgt, sgl)); 449 nvgpu_sgt_get_length(&alloc->sgt, sgl));
450 450
451 gk20a_last_fence = gk20a_fence_out; 451 gk20a_last_fence = gk20a_fence_out;
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index 0def724d..5bd4dc57 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -867,6 +867,7 @@ static int gk20a_init_fifo_setup_sw(struct gk20a *g)
867 struct fifo_gk20a *f = &g->fifo; 867 struct fifo_gk20a *f = &g->fifo;
868 unsigned int chid, i; 868 unsigned int chid, i;
869 int err = 0; 869 int err = 0;
870 u64 userd_base;
870 871
871 gk20a_dbg_fn(""); 872 gk20a_dbg_fn("");
872 873
@@ -929,9 +930,9 @@ static int gk20a_init_fifo_setup_sw(struct gk20a *g)
929 } 930 }
930 gk20a_dbg(gpu_dbg_map, "userd gpu va = 0x%llx", f->userd.gpu_va); 931 gk20a_dbg(gpu_dbg_map, "userd gpu va = 0x%llx", f->userd.gpu_va);
931 932
933 userd_base = nvgpu_mem_get_addr(g, &f->userd);
932 for (chid = 0; chid < f->num_channels; chid++) { 934 for (chid = 0; chid < f->num_channels; chid++) {
933 f->channel[chid].userd_iova = 935 f->channel[chid].userd_iova = userd_base +
934 nvgpu_mem_get_addr(g, &f->userd) +
935 chid * f->userd_entry_size; 936 chid * f->userd_entry_size;
936 f->channel[chid].userd_gpu_va = 937 f->channel[chid].userd_gpu_va =
937 f->userd.gpu_va + chid * f->userd_entry_size; 938 f->userd.gpu_va + chid * f->userd_entry_size;
diff --git a/drivers/gpu/nvgpu/gk20a/pramin_gk20a.c b/drivers/gpu/nvgpu/gk20a/pramin_gk20a.c
index 67fd2480..a76e2580 100644
--- a/drivers/gpu/nvgpu/gk20a/pramin_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pramin_gk20a.c
@@ -34,7 +34,7 @@
34u32 gk20a_pramin_enter(struct gk20a *g, struct nvgpu_mem *mem, 34u32 gk20a_pramin_enter(struct gk20a *g, struct nvgpu_mem *mem,
35 struct nvgpu_sgt *sgt, struct nvgpu_sgl *sgl, u32 w) 35 struct nvgpu_sgt *sgt, struct nvgpu_sgl *sgl, u32 w)
36{ 36{
37 u64 bufbase = nvgpu_sgt_get_phys(sgt, sgl); 37 u64 bufbase = nvgpu_sgt_get_phys(g, sgt, sgl);
38 u64 addr = bufbase + w * sizeof(u32); 38 u64 addr = bufbase + w * sizeof(u32);
39 u32 hi = (u32)((addr & ~(u64)0xfffff) 39 u32 hi = (u32)((addr & ~(u64)0xfffff)
40 >> bus_bar0_window_target_bar0_window_base_shift_v()); 40 >> bus_bar0_window_target_bar0_window_base_shift_v());
@@ -48,7 +48,7 @@ u32 gk20a_pramin_enter(struct gk20a *g, struct nvgpu_mem *mem,
48 gk20a_dbg(gpu_dbg_mem, 48 gk20a_dbg(gpu_dbg_mem,
49 "0x%08x:%08x begin for %p,%p at [%llx,%llx] (sz %llx)", 49 "0x%08x:%08x begin for %p,%p at [%llx,%llx] (sz %llx)",
50 hi, lo, mem, sgl, bufbase, 50 hi, lo, mem, sgl, bufbase,
51 bufbase + nvgpu_sgt_get_phys(sgt, sgl), 51 bufbase + nvgpu_sgt_get_phys(g, sgt, sgl),
52 nvgpu_sgt_get_length(sgt, sgl)); 52 nvgpu_sgt_get_length(sgt, sgl));
53 53
54 WARN_ON(!bufbase); 54 WARN_ON(!bufbase);
diff --git a/drivers/gpu/nvgpu/include/nvgpu/nvgpu_mem.h b/drivers/gpu/nvgpu/include/nvgpu/nvgpu_mem.h
index 373c3eef..be0fa6bf 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/nvgpu_mem.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/nvgpu_mem.h
@@ -68,7 +68,7 @@ struct nvgpu_sgl;
68 68
69struct nvgpu_sgt_ops { 69struct nvgpu_sgt_ops {
70 struct nvgpu_sgl *(*sgl_next)(struct nvgpu_sgl *sgl); 70 struct nvgpu_sgl *(*sgl_next)(struct nvgpu_sgl *sgl);
71 u64 (*sgl_phys)(struct nvgpu_sgl *sgl); 71 u64 (*sgl_phys)(struct gk20a *g, struct nvgpu_sgl *sgl);
72 u64 (*sgl_dma)(struct nvgpu_sgl *sgl); 72 u64 (*sgl_dma)(struct nvgpu_sgl *sgl);
73 u64 (*sgl_length)(struct nvgpu_sgl *sgl); 73 u64 (*sgl_length)(struct nvgpu_sgl *sgl);
74 u64 (*sgl_gpu_addr)(struct gk20a *g, struct nvgpu_sgl *sgl, 74 u64 (*sgl_gpu_addr)(struct gk20a *g, struct nvgpu_sgl *sgl,
@@ -254,7 +254,8 @@ struct nvgpu_sgt *nvgpu_sgt_create_from_mem(struct gk20a *g,
254 254
255struct nvgpu_sgl *nvgpu_sgt_get_next(struct nvgpu_sgt *sgt, 255struct nvgpu_sgl *nvgpu_sgt_get_next(struct nvgpu_sgt *sgt,
256 struct nvgpu_sgl *sgl); 256 struct nvgpu_sgl *sgl);
257u64 nvgpu_sgt_get_phys(struct nvgpu_sgt *sgt, struct nvgpu_sgl *sgl); 257u64 nvgpu_sgt_get_phys(struct gk20a *g, struct nvgpu_sgt *sgt,
258 struct nvgpu_sgl *sgl);
258u64 nvgpu_sgt_get_dma(struct nvgpu_sgt *sgt, struct nvgpu_sgl *sgl); 259u64 nvgpu_sgt_get_dma(struct nvgpu_sgt *sgt, struct nvgpu_sgl *sgl);
259u64 nvgpu_sgt_get_length(struct nvgpu_sgt *sgt, struct nvgpu_sgl *sgl); 260u64 nvgpu_sgt_get_length(struct nvgpu_sgt *sgt, struct nvgpu_sgl *sgl);
260u64 nvgpu_sgt_get_gpu_addr(struct gk20a *g, struct nvgpu_sgt *sgt, 261u64 nvgpu_sgt_get_gpu_addr(struct gk20a *g, struct nvgpu_sgt *sgt,
diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
index ca517975..e615c486 100644
--- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
+++ b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
@@ -122,7 +122,7 @@ u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
122 continue; 122 continue;
123 } 123 }
124 124
125 phys_addr = nvgpu_sgt_get_phys(sgt, sgl) + space_to_skip; 125 phys_addr = nvgpu_sgt_get_phys(g, sgt, sgl) + space_to_skip;
126 chunk_length = min(size, 126 chunk_length = min(size,
127 nvgpu_sgt_get_length(sgt, sgl) - space_to_skip); 127 nvgpu_sgt_get_length(sgt, sgl) - space_to_skip);
128 128