summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKonsta Holtta <kholtta@nvidia.com>2018-01-22 05:19:08 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2018-03-01 15:24:06 -0500
commitba8fa334f40223ad491ab61a6c072a276017787f (patch)
treeeb06807ed4fdee3a708f66949c6daf82f193a531
parent5a35a95654d561fce09a3b9abf6b82bb7a29d74b (diff)
gpu: nvgpu: introduce explicit nvgpu_sgl type
The operations in struct nvgpu_sgt_ops have a scatter-gather list (sgl) argument which is a void pointer. Change the type signatures to take struct nvgpu_sgl * which is an opaque marker type that makes it more difficult to pass around wrong arguments, as anything goes for void *. Explicit types add also self-documentation to the code. For some added safety, some explicit type casts are now required in implementors of the nvgpu_sgt_ops interface when converting between the general nvgpu_sgl type and implementation-specific types. This is not purely a bad thing because the casts explain clearly where type conversions are happening. Jira NVGPU-30 Jira NVGPU-52 Jira NVGPU-305 Change-Id: Ic64eed6d2d39ca5786e62b172ddb7133af16817a Signed-off-by: Konsta Holtta <kholtta@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1643555 GVS: Gerrit_Virtual_Submit Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/common/linux/nvgpu_mem.c15
-rw-r--r--drivers/gpu/nvgpu/common/mm/gmmu.c2
-rw-r--r--drivers/gpu/nvgpu/common/mm/nvgpu_mem.c16
-rw-r--r--drivers/gpu/nvgpu/common/mm/page_allocator.c52
-rw-r--r--drivers/gpu/nvgpu/common/mm/vidmem.c4
-rw-r--r--drivers/gpu/nvgpu/common/pramin.c4
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.h5
-rw-r--r--drivers/gpu/nvgpu/gk20a/pramin_gk20a.c6
-rw-r--r--drivers/gpu/nvgpu/gk20a/pramin_gk20a.h8
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/nvgpu_mem.h38
-rw-r--r--drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c2
11 files changed, 85 insertions, 67 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c b/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c
index 206b83e1..7406c4d7 100644
--- a/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c
+++ b/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c
@@ -499,27 +499,28 @@ int __nvgpu_mem_create_from_phys(struct gk20a *g, struct nvgpu_mem *dest,
499} 499}
500#endif 500#endif
501 501
502static void *nvgpu_mem_linux_sgl_next(void *sgl) 502static struct nvgpu_sgl *nvgpu_mem_linux_sgl_next(struct nvgpu_sgl *sgl)
503{ 503{
504 return sg_next((struct scatterlist *)sgl); 504 return (struct nvgpu_sgl *)sg_next((struct scatterlist *)sgl);
505} 505}
506 506
507static u64 nvgpu_mem_linux_sgl_phys(void *sgl) 507static u64 nvgpu_mem_linux_sgl_phys(struct nvgpu_sgl *sgl)
508{ 508{
509 return (u64)sg_phys((struct scatterlist *)sgl); 509 return (u64)sg_phys((struct scatterlist *)sgl);
510} 510}
511 511
512static u64 nvgpu_mem_linux_sgl_dma(void *sgl) 512static u64 nvgpu_mem_linux_sgl_dma(struct nvgpu_sgl *sgl)
513{ 513{
514 return (u64)sg_dma_address((struct scatterlist *)sgl); 514 return (u64)sg_dma_address((struct scatterlist *)sgl);
515} 515}
516 516
517static u64 nvgpu_mem_linux_sgl_length(void *sgl) 517static u64 nvgpu_mem_linux_sgl_length(struct nvgpu_sgl *sgl)
518{ 518{
519 return (u64)((struct scatterlist *)sgl)->length; 519 return (u64)((struct scatterlist *)sgl)->length;
520} 520}
521 521
522static u64 nvgpu_mem_linux_sgl_gpu_addr(struct gk20a *g, void *sgl, 522static u64 nvgpu_mem_linux_sgl_gpu_addr(struct gk20a *g,
523 struct nvgpu_sgl *sgl,
523 struct nvgpu_gmmu_attrs *attrs) 524 struct nvgpu_gmmu_attrs *attrs)
524{ 525{
525 if (sg_dma_address((struct scatterlist *)sgl) == 0) 526 if (sg_dma_address((struct scatterlist *)sgl) == 0)
@@ -587,7 +588,7 @@ struct nvgpu_sgt *nvgpu_linux_sgt_create(struct gk20a *g, struct sg_table *sgt)
587 588
588 nvgpu_log(g, gpu_dbg_sgl, "Making Linux SGL!"); 589 nvgpu_log(g, gpu_dbg_sgl, "Making Linux SGL!");
589 590
590 nvgpu_sgt->sgl = sgt->sgl; 591 nvgpu_sgt->sgl = (struct nvgpu_sgl *)linux_sgl;
591 nvgpu_sgt->ops = &nvgpu_linux_sgt_ops; 592 nvgpu_sgt->ops = &nvgpu_linux_sgt_ops;
592 593
593 return nvgpu_sgt; 594 return nvgpu_sgt;
diff --git a/drivers/gpu/nvgpu/common/mm/gmmu.c b/drivers/gpu/nvgpu/common/mm/gmmu.c
index ffac324c..e1942cbd 100644
--- a/drivers/gpu/nvgpu/common/mm/gmmu.c
+++ b/drivers/gpu/nvgpu/common/mm/gmmu.c
@@ -477,7 +477,7 @@ static int __nvgpu_gmmu_do_update_page_table(struct vm_gk20a *vm,
477 struct nvgpu_gmmu_attrs *attrs) 477 struct nvgpu_gmmu_attrs *attrs)
478{ 478{
479 struct gk20a *g = gk20a_from_vm(vm); 479 struct gk20a *g = gk20a_from_vm(vm);
480 void *sgl; 480 struct nvgpu_sgl *sgl;
481 int err = 0; 481 int err = 0;
482 482
483 if (!sgt) { 483 if (!sgt) {
diff --git a/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c b/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c
index 73b6b2a7..f7c51f42 100644
--- a/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c
+++ b/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -28,27 +28,29 @@
28 28
29#include "gk20a/gk20a.h" 29#include "gk20a/gk20a.h"
30 30
31void *nvgpu_sgt_get_next(struct nvgpu_sgt *sgt, void *sgl) 31struct nvgpu_sgl *nvgpu_sgt_get_next(struct nvgpu_sgt *sgt,
32 struct nvgpu_sgl *sgl)
32{ 33{
33 return sgt->ops->sgl_next(sgl); 34 return sgt->ops->sgl_next(sgl);
34} 35}
35 36
36u64 nvgpu_sgt_get_phys(struct nvgpu_sgt *sgt, void *sgl) 37u64 nvgpu_sgt_get_phys(struct nvgpu_sgt *sgt, struct nvgpu_sgl *sgl)
37{ 38{
38 return sgt->ops->sgl_phys(sgl); 39 return sgt->ops->sgl_phys(sgl);
39} 40}
40 41
41u64 nvgpu_sgt_get_dma(struct nvgpu_sgt *sgt, void *sgl) 42u64 nvgpu_sgt_get_dma(struct nvgpu_sgt *sgt, struct nvgpu_sgl *sgl)
42{ 43{
43 return sgt->ops->sgl_dma(sgl); 44 return sgt->ops->sgl_dma(sgl);
44} 45}
45 46
46u64 nvgpu_sgt_get_length(struct nvgpu_sgt *sgt, void *sgl) 47u64 nvgpu_sgt_get_length(struct nvgpu_sgt *sgt, struct nvgpu_sgl *sgl)
47{ 48{
48 return sgt->ops->sgl_length(sgl); 49 return sgt->ops->sgl_length(sgl);
49} 50}
50 51
51u64 nvgpu_sgt_get_gpu_addr(struct gk20a *g, struct nvgpu_sgt *sgt, void *sgl, 52u64 nvgpu_sgt_get_gpu_addr(struct gk20a *g, struct nvgpu_sgt *sgt,
53 struct nvgpu_sgl *sgl,
52 struct nvgpu_gmmu_attrs *attrs) 54 struct nvgpu_gmmu_attrs *attrs)
53{ 55{
54 return sgt->ops->sgl_gpu_addr(g, sgl, attrs); 56 return sgt->ops->sgl_gpu_addr(g, sgl, attrs);
@@ -88,7 +90,7 @@ u64 nvgpu_mem_iommu_translate(struct gk20a *g, u64 phys)
88u64 nvgpu_sgt_alignment(struct gk20a *g, struct nvgpu_sgt *sgt) 90u64 nvgpu_sgt_alignment(struct gk20a *g, struct nvgpu_sgt *sgt)
89{ 91{
90 u64 align = 0, chunk_align = 0; 92 u64 align = 0, chunk_align = 0;
91 void *sgl; 93 struct nvgpu_sgl *sgl;
92 94
93 /* 95 /*
94 * If this SGT is iommuable and we want to use the IOMMU address then 96 * If this SGT is iommuable and we want to use the IOMMU address then
diff --git a/drivers/gpu/nvgpu/common/mm/page_allocator.c b/drivers/gpu/nvgpu/common/mm/page_allocator.c
index 6dc1edf7..13ccc48b 100644
--- a/drivers/gpu/nvgpu/common/mm/page_allocator.c
+++ b/drivers/gpu/nvgpu/common/mm/page_allocator.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -153,40 +153,41 @@ static void nvgpu_page_release_co(struct nvgpu_allocator *a,
153 nvgpu_alloc_release_carveout(&va->source_allocator, co); 153 nvgpu_alloc_release_carveout(&va->source_allocator, co);
154} 154}
155 155
156static void *nvgpu_page_alloc_sgl_next(void *sgl) 156static struct nvgpu_sgl *nvgpu_page_alloc_sgl_next(struct nvgpu_sgl *sgl)
157{ 157{
158 struct nvgpu_mem_sgl *nvgpu_sgl = sgl; 158 struct nvgpu_mem_sgl *sgl_impl = (struct nvgpu_mem_sgl *)sgl;
159 159
160 return nvgpu_sgl->next; 160 return (struct nvgpu_sgl *)sgl_impl->next;
161} 161}
162 162
163static u64 nvgpu_page_alloc_sgl_phys(void *sgl) 163static u64 nvgpu_page_alloc_sgl_phys(struct nvgpu_sgl *sgl)
164{ 164{
165 struct nvgpu_mem_sgl *nvgpu_sgl = sgl; 165 struct nvgpu_mem_sgl *sgl_impl = (struct nvgpu_mem_sgl *)sgl;
166 166
167 return nvgpu_sgl->phys; 167 return sgl_impl->phys;
168} 168}
169 169
170static u64 nvgpu_page_alloc_sgl_dma(void *sgl) 170static u64 nvgpu_page_alloc_sgl_dma(struct nvgpu_sgl *sgl)
171{ 171{
172 struct nvgpu_mem_sgl *nvgpu_sgl = sgl; 172 struct nvgpu_mem_sgl *sgl_impl = (struct nvgpu_mem_sgl *)sgl;
173 173
174 return nvgpu_sgl->dma; 174 return sgl_impl->dma;
175} 175}
176 176
177static u64 nvgpu_page_alloc_sgl_length(void *sgl) 177static u64 nvgpu_page_alloc_sgl_length(struct nvgpu_sgl *sgl)
178{ 178{
179 struct nvgpu_mem_sgl *nvgpu_sgl = sgl; 179 struct nvgpu_mem_sgl *sgl_impl = (struct nvgpu_mem_sgl *)sgl;
180 180
181 return nvgpu_sgl->length; 181 return sgl_impl->length;
182} 182}
183 183
184static u64 nvgpu_page_alloc_sgl_gpu_addr(struct gk20a *g, void *sgl, 184static u64 nvgpu_page_alloc_sgl_gpu_addr(struct gk20a *g,
185 struct nvgpu_sgl *sgl,
185 struct nvgpu_gmmu_attrs *attrs) 186 struct nvgpu_gmmu_attrs *attrs)
186{ 187{
187 struct nvgpu_mem_sgl *nvgpu_sgl = sgl; 188 struct nvgpu_mem_sgl *sgl_impl = (struct nvgpu_mem_sgl *)sgl;
188 189
189 return nvgpu_sgl->phys; 190 return sgl_impl->phys;
190} 191}
191 192
192static void nvgpu_page_alloc_sgt_free(struct gk20a *g, struct nvgpu_sgt *sgt) 193static void nvgpu_page_alloc_sgt_free(struct gk20a *g, struct nvgpu_sgt *sgt)
@@ -229,7 +230,7 @@ static void __nvgpu_free_pages(struct nvgpu_page_allocator *a,
229 struct nvgpu_page_alloc *alloc, 230 struct nvgpu_page_alloc *alloc,
230 bool free_buddy_alloc) 231 bool free_buddy_alloc)
231{ 232{
232 struct nvgpu_mem_sgl *sgl = alloc->sgt.sgl; 233 struct nvgpu_sgl *sgl = alloc->sgt.sgl;
233 234
234 if (free_buddy_alloc) { 235 if (free_buddy_alloc) {
235 while (sgl) { 236 while (sgl) {
@@ -239,7 +240,8 @@ static void __nvgpu_free_pages(struct nvgpu_page_allocator *a,
239 } 240 }
240 } 241 }
241 242
242 nvgpu_page_alloc_sgl_proper_free(a->owner->g, sgl); 243 nvgpu_page_alloc_sgl_proper_free(a->owner->g,
244 (struct nvgpu_mem_sgl *)sgl);
243 nvgpu_kmem_cache_free(a->alloc_cache, alloc); 245 nvgpu_kmem_cache_free(a->alloc_cache, alloc);
244} 246}
245 247
@@ -389,7 +391,7 @@ static int __do_slab_alloc(struct nvgpu_page_allocator *a,
389 alloc->length = slab_page->slab_size; 391 alloc->length = slab_page->slab_size;
390 alloc->base = slab_page->page_addr + (offs * slab_page->slab_size); 392 alloc->base = slab_page->page_addr + (offs * slab_page->slab_size);
391 393
392 sgl = alloc->sgt.sgl; 394 sgl = (struct nvgpu_mem_sgl *)alloc->sgt.sgl;
393 sgl->phys = alloc->base; 395 sgl->phys = alloc->base;
394 sgl->dma = alloc->base; 396 sgl->dma = alloc->base;
395 sgl->length = alloc->length; 397 sgl->length = alloc->length;
@@ -430,7 +432,7 @@ static struct nvgpu_page_alloc *__nvgpu_alloc_slab(
430 goto fail; 432 goto fail;
431 } 433 }
432 434
433 alloc->sgt.sgl = sgl; 435 alloc->sgt.sgl = (struct nvgpu_sgl *)sgl;
434 err = __do_slab_alloc(a, slab, alloc); 436 err = __do_slab_alloc(a, slab, alloc);
435 if (err) 437 if (err)
436 goto fail; 438 goto fail;
@@ -582,7 +584,7 @@ static struct nvgpu_page_alloc *__do_nvgpu_alloc_pages(
582 if (prev_sgl) 584 if (prev_sgl)
583 prev_sgl->next = sgl; 585 prev_sgl->next = sgl;
584 else 586 else
585 alloc->sgt.sgl = sgl; 587 alloc->sgt.sgl = (struct nvgpu_sgl *)sgl;
586 588
587 prev_sgl = sgl; 589 prev_sgl = sgl;
588 590
@@ -595,7 +597,7 @@ static struct nvgpu_page_alloc *__do_nvgpu_alloc_pages(
595 return alloc; 597 return alloc;
596 598
597fail_cleanup: 599fail_cleanup:
598 sgl = alloc->sgt.sgl; 600 sgl = (struct nvgpu_mem_sgl *)alloc->sgt.sgl;
599 while (sgl) { 601 while (sgl) {
600 struct nvgpu_mem_sgl *next = sgl->next; 602 struct nvgpu_mem_sgl *next = sgl->next;
601 603
@@ -614,7 +616,7 @@ static struct nvgpu_page_alloc *__nvgpu_alloc_pages(
614 struct nvgpu_page_allocator *a, u64 len) 616 struct nvgpu_page_allocator *a, u64 len)
615{ 617{
616 struct nvgpu_page_alloc *alloc = NULL; 618 struct nvgpu_page_alloc *alloc = NULL;
617 struct nvgpu_mem_sgl *sgl; 619 struct nvgpu_sgl *sgl;
618 u64 pages; 620 u64 pages;
619 int i = 0; 621 int i = 0;
620 622
@@ -751,7 +753,7 @@ static struct nvgpu_page_alloc *__nvgpu_alloc_pages_fixed(
751 753
752 alloc->nr_chunks = 1; 754 alloc->nr_chunks = 1;
753 alloc->length = length; 755 alloc->length = length;
754 alloc->sgt.sgl = sgl; 756 alloc->sgt.sgl = (struct nvgpu_sgl *)sgl;
755 757
756 sgl->phys = alloc->base; 758 sgl->phys = alloc->base;
757 sgl->dma = alloc->base; 759 sgl->dma = alloc->base;
@@ -776,7 +778,7 @@ static u64 nvgpu_page_alloc_fixed(struct nvgpu_allocator *__a,
776{ 778{
777 struct nvgpu_page_allocator *a = page_allocator(__a); 779 struct nvgpu_page_allocator *a = page_allocator(__a);
778 struct nvgpu_page_alloc *alloc = NULL; 780 struct nvgpu_page_alloc *alloc = NULL;
779 struct nvgpu_mem_sgl *sgl; 781 struct nvgpu_sgl *sgl;
780 u64 aligned_len, pages; 782 u64 aligned_len, pages;
781 int i = 0; 783 int i = 0;
782 784
diff --git a/drivers/gpu/nvgpu/common/mm/vidmem.c b/drivers/gpu/nvgpu/common/mm/vidmem.c
index a55b3a2b..4239bd06 100644
--- a/drivers/gpu/nvgpu/common/mm/vidmem.c
+++ b/drivers/gpu/nvgpu/common/mm/vidmem.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -413,7 +413,7 @@ int nvgpu_vidmem_clear(struct gk20a *g, struct nvgpu_mem *mem)
413 struct gk20a_fence *gk20a_fence_out = NULL; 413 struct gk20a_fence *gk20a_fence_out = NULL;
414 struct gk20a_fence *gk20a_last_fence = NULL; 414 struct gk20a_fence *gk20a_last_fence = NULL;
415 struct nvgpu_page_alloc *alloc = NULL; 415 struct nvgpu_page_alloc *alloc = NULL;
416 void *sgl = NULL; 416 struct nvgpu_sgl *sgl = NULL;
417 int err = 0; 417 int err = 0;
418 418
419 if (g->mm.vidmem.ce_ctx_id == (u32)~0) 419 if (g->mm.vidmem.ce_ctx_id == (u32)~0)
diff --git a/drivers/gpu/nvgpu/common/pramin.c b/drivers/gpu/nvgpu/common/pramin.c
index 859d40b1..98565140 100644
--- a/drivers/gpu/nvgpu/common/pramin.c
+++ b/drivers/gpu/nvgpu/common/pramin.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -44,7 +44,7 @@ void nvgpu_pramin_access_batched(struct gk20a *g, struct nvgpu_mem *mem,
44{ 44{
45 struct nvgpu_page_alloc *alloc = NULL; 45 struct nvgpu_page_alloc *alloc = NULL;
46 struct nvgpu_sgt *sgt; 46 struct nvgpu_sgt *sgt;
47 void *sgl; 47 struct nvgpu_sgl *sgl;
48 u32 byteoff, start_reg, until_end, n; 48 u32 byteoff, start_reg, until_end, n;
49 49
50 /* 50 /*
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h
index ac3364b0..5de2b439 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.h
@@ -823,9 +823,10 @@ struct gpu_ops {
823 size_t size); 823 size_t size);
824 struct { 824 struct {
825 u32 (*enter)(struct gk20a *g, struct nvgpu_mem *mem, 825 u32 (*enter)(struct gk20a *g, struct nvgpu_mem *mem,
826 struct nvgpu_sgt *sgt, void *sgl, u32 w); 826 struct nvgpu_sgt *sgt, struct nvgpu_sgl *sgl,
827 u32 w);
827 void (*exit)(struct gk20a *g, struct nvgpu_mem *mem, 828 void (*exit)(struct gk20a *g, struct nvgpu_mem *mem,
828 void *sgl); 829 struct nvgpu_sgl *sgl);
829 u32 (*data032_r)(u32 i); 830 u32 (*data032_r)(u32 i);
830 } pramin; 831 } pramin;
831 struct { 832 struct {
diff --git a/drivers/gpu/nvgpu/gk20a/pramin_gk20a.c b/drivers/gpu/nvgpu/gk20a/pramin_gk20a.c
index 05d0473e..bb8831e0 100644
--- a/drivers/gpu/nvgpu/gk20a/pramin_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pramin_gk20a.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -32,7 +32,7 @@
32 32
33/* WARNING: returns pramin_window_lock taken, complement with pramin_exit() */ 33/* WARNING: returns pramin_window_lock taken, complement with pramin_exit() */
34u32 gk20a_pramin_enter(struct gk20a *g, struct nvgpu_mem *mem, 34u32 gk20a_pramin_enter(struct gk20a *g, struct nvgpu_mem *mem,
35 struct nvgpu_sgt *sgt, void *sgl, u32 w) 35 struct nvgpu_sgt *sgt, struct nvgpu_sgl *sgl, u32 w)
36{ 36{
37 u64 bufbase = nvgpu_sgt_get_phys(sgt, sgl); 37 u64 bufbase = nvgpu_sgt_get_phys(sgt, sgl);
38 u64 addr = bufbase + w * sizeof(u32); 38 u64 addr = bufbase + w * sizeof(u32);
@@ -64,7 +64,7 @@ u32 gk20a_pramin_enter(struct gk20a *g, struct nvgpu_mem *mem,
64} 64}
65 65
66void gk20a_pramin_exit(struct gk20a *g, struct nvgpu_mem *mem, 66void gk20a_pramin_exit(struct gk20a *g, struct nvgpu_mem *mem,
67 void *sgl) 67 struct nvgpu_sgl *sgl)
68{ 68{
69 gk20a_dbg(gpu_dbg_mem, "end for %p,%p", mem, sgl); 69 gk20a_dbg(gpu_dbg_mem, "end for %p,%p", mem, sgl);
70 70
diff --git a/drivers/gpu/nvgpu/gk20a/pramin_gk20a.h b/drivers/gpu/nvgpu/gk20a/pramin_gk20a.h
index e25bda0c..a0a28088 100644
--- a/drivers/gpu/nvgpu/gk20a/pramin_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/pramin_gk20a.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -28,7 +28,9 @@ struct nvgpu_mem;
28struct nvgpu_mem_sgl; 28struct nvgpu_mem_sgl;
29 29
30u32 gk20a_pramin_enter(struct gk20a *g, struct nvgpu_mem *mem, 30u32 gk20a_pramin_enter(struct gk20a *g, struct nvgpu_mem *mem,
31 struct nvgpu_sgt *sgt, void *sgl, u32 w); 31 struct nvgpu_sgt *sgt,
32 struct nvgpu_sgl *sgl,
33 u32 w);
32void gk20a_pramin_exit(struct gk20a *g, struct nvgpu_mem *mem, 34void gk20a_pramin_exit(struct gk20a *g, struct nvgpu_mem *mem,
33 void *sgl); 35 struct nvgpu_sgl *sgl);
34#endif 36#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/nvgpu_mem.h b/drivers/gpu/nvgpu/include/nvgpu/nvgpu_mem.h
index 2b8b7015..04e947e0 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/nvgpu_mem.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/nvgpu_mem.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -54,12 +54,19 @@ enum nvgpu_aperture {
54 APERTURE_VIDMEM 54 APERTURE_VIDMEM
55}; 55};
56 56
57/*
58 * Forward declared opaque placeholder type that does not really exist, but
59 * helps the compiler help us about getting types right. In reality,
60 * implementors of nvgpu_sgt_ops will have some concrete type in place of this.
61 */
62struct nvgpu_sgl;
63
57struct nvgpu_sgt_ops { 64struct nvgpu_sgt_ops {
58 void *(*sgl_next)(void *sgl); 65 struct nvgpu_sgl *(*sgl_next)(struct nvgpu_sgl *sgl);
59 u64 (*sgl_phys)(void *sgl); 66 u64 (*sgl_phys)(struct nvgpu_sgl *sgl);
60 u64 (*sgl_dma)(void *sgl); 67 u64 (*sgl_dma)(struct nvgpu_sgl *sgl);
61 u64 (*sgl_length)(void *sgl); 68 u64 (*sgl_length)(struct nvgpu_sgl *sgl);
62 u64 (*sgl_gpu_addr)(struct gk20a *g, void *sgl, 69 u64 (*sgl_gpu_addr)(struct gk20a *g, struct nvgpu_sgl *sgl,
63 struct nvgpu_gmmu_attrs *attrs); 70 struct nvgpu_gmmu_attrs *attrs);
64 /* 71 /*
65 * If left NULL then iommuable is assumed to be false. 72 * If left NULL then iommuable is assumed to be false.
@@ -85,15 +92,16 @@ struct nvgpu_sgt {
85 /* 92 /*
86 * The first node in the scatter gather list. 93 * The first node in the scatter gather list.
87 */ 94 */
88 void *sgl; 95 struct nvgpu_sgl *sgl;
89}; 96};
90 97
91/* 98/*
92 * This struct holds the necessary information for describing a struct 99 * This struct holds the necessary information for describing a struct
93 * nvgpu_mem's scatter gather list. 100 * nvgpu_mem's scatter gather list.
94 * 101 *
95 * Not all nvgpu_sgt's use this particular implementation. Nor is a given OS 102 * This is one underlying implementation for nvgpu_sgl. Not all nvgpu_sgt's use
96 * required to use this at all. 103 * this particular implementation. Nor is a given OS required to use this at
104 * all.
97 */ 105 */
98struct nvgpu_mem_sgl { 106struct nvgpu_mem_sgl {
99 /* 107 /*
@@ -233,11 +241,13 @@ struct nvgpu_sgt *nvgpu_sgt_create(struct gk20a *g);
233struct nvgpu_sgt *nvgpu_sgt_create_from_mem(struct gk20a *g, 241struct nvgpu_sgt *nvgpu_sgt_create_from_mem(struct gk20a *g,
234 struct nvgpu_mem *mem); 242 struct nvgpu_mem *mem);
235 243
236void *nvgpu_sgt_get_next(struct nvgpu_sgt *sgt, void *sgl); 244struct nvgpu_sgl *nvgpu_sgt_get_next(struct nvgpu_sgt *sgt,
237u64 nvgpu_sgt_get_phys(struct nvgpu_sgt *sgt, void *sgl); 245 struct nvgpu_sgl *sgl);
238u64 nvgpu_sgt_get_dma(struct nvgpu_sgt *sgt, void *sgl); 246u64 nvgpu_sgt_get_phys(struct nvgpu_sgt *sgt, struct nvgpu_sgl *sgl);
239u64 nvgpu_sgt_get_length(struct nvgpu_sgt *sgt, void *sgl); 247u64 nvgpu_sgt_get_dma(struct nvgpu_sgt *sgt, struct nvgpu_sgl *sgl);
240u64 nvgpu_sgt_get_gpu_addr(struct gk20a *g, struct nvgpu_sgt *sgt, void *sgl, 248u64 nvgpu_sgt_get_length(struct nvgpu_sgt *sgt, struct nvgpu_sgl *sgl);
249u64 nvgpu_sgt_get_gpu_addr(struct gk20a *g, struct nvgpu_sgt *sgt,
250 struct nvgpu_sgl *sgl,
241 struct nvgpu_gmmu_attrs *attrs); 251 struct nvgpu_gmmu_attrs *attrs);
242void nvgpu_sgt_free(struct gk20a *g, struct nvgpu_sgt *sgt); 252void nvgpu_sgt_free(struct gk20a *g, struct nvgpu_sgt *sgt);
243 253
diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
index cf9a28c7..ca517975 100644
--- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
+++ b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
@@ -76,7 +76,7 @@ u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
76 void *handle = NULL; 76 void *handle = NULL;
77 size_t oob_size; 77 size_t oob_size;
78 u8 prot; 78 u8 prot;
79 void *sgl; 79 struct nvgpu_sgl *sgl;
80 80
81 gk20a_dbg_fn(""); 81 gk20a_dbg_fn("");
82 82