summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm
diff options
context:
space:
mode:
authorKonsta Holtta <kholtta@nvidia.com>2018-01-22 05:19:08 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2018-03-01 15:24:06 -0500
commitba8fa334f40223ad491ab61a6c072a276017787f (patch)
treeeb06807ed4fdee3a708f66949c6daf82f193a531 /drivers/gpu/nvgpu/common/mm
parent5a35a95654d561fce09a3b9abf6b82bb7a29d74b (diff)
gpu: nvgpu: introduce explicit nvgpu_sgl type
The operations in struct nvgpu_sgt_ops have a scatter-gather list (sgl) argument which is a void pointer. Change the type signatures to take struct nvgpu_sgl * which is an opaque marker type that makes it more difficult to pass around wrong arguments, as anything goes for void *. Explicit types add also self-documentation to the code. For some added safety, some explicit type casts are now required in implementors of the nvgpu_sgt_ops interface when converting between the general nvgpu_sgl type and implementation-specific types. This is not purely a bad thing because the casts explain clearly where type conversions are happening. Jira NVGPU-30 Jira NVGPU-52 Jira NVGPU-305 Change-Id: Ic64eed6d2d39ca5786e62b172ddb7133af16817a Signed-off-by: Konsta Holtta <kholtta@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1643555 GVS: Gerrit_Virtual_Submit Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm')
-rw-r--r--drivers/gpu/nvgpu/common/mm/gmmu.c2
-rw-r--r--drivers/gpu/nvgpu/common/mm/nvgpu_mem.c16
-rw-r--r--drivers/gpu/nvgpu/common/mm/page_allocator.c52
-rw-r--r--drivers/gpu/nvgpu/common/mm/vidmem.c4
4 files changed, 39 insertions, 35 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/gmmu.c b/drivers/gpu/nvgpu/common/mm/gmmu.c
index ffac324c..e1942cbd 100644
--- a/drivers/gpu/nvgpu/common/mm/gmmu.c
+++ b/drivers/gpu/nvgpu/common/mm/gmmu.c
@@ -477,7 +477,7 @@ static int __nvgpu_gmmu_do_update_page_table(struct vm_gk20a *vm,
477 struct nvgpu_gmmu_attrs *attrs) 477 struct nvgpu_gmmu_attrs *attrs)
478{ 478{
479 struct gk20a *g = gk20a_from_vm(vm); 479 struct gk20a *g = gk20a_from_vm(vm);
480 void *sgl; 480 struct nvgpu_sgl *sgl;
481 int err = 0; 481 int err = 0;
482 482
483 if (!sgt) { 483 if (!sgt) {
diff --git a/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c b/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c
index 73b6b2a7..f7c51f42 100644
--- a/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c
+++ b/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -28,27 +28,29 @@
28 28
29#include "gk20a/gk20a.h" 29#include "gk20a/gk20a.h"
30 30
31void *nvgpu_sgt_get_next(struct nvgpu_sgt *sgt, void *sgl) 31struct nvgpu_sgl *nvgpu_sgt_get_next(struct nvgpu_sgt *sgt,
32 struct nvgpu_sgl *sgl)
32{ 33{
33 return sgt->ops->sgl_next(sgl); 34 return sgt->ops->sgl_next(sgl);
34} 35}
35 36
36u64 nvgpu_sgt_get_phys(struct nvgpu_sgt *sgt, void *sgl) 37u64 nvgpu_sgt_get_phys(struct nvgpu_sgt *sgt, struct nvgpu_sgl *sgl)
37{ 38{
38 return sgt->ops->sgl_phys(sgl); 39 return sgt->ops->sgl_phys(sgl);
39} 40}
40 41
41u64 nvgpu_sgt_get_dma(struct nvgpu_sgt *sgt, void *sgl) 42u64 nvgpu_sgt_get_dma(struct nvgpu_sgt *sgt, struct nvgpu_sgl *sgl)
42{ 43{
43 return sgt->ops->sgl_dma(sgl); 44 return sgt->ops->sgl_dma(sgl);
44} 45}
45 46
46u64 nvgpu_sgt_get_length(struct nvgpu_sgt *sgt, void *sgl) 47u64 nvgpu_sgt_get_length(struct nvgpu_sgt *sgt, struct nvgpu_sgl *sgl)
47{ 48{
48 return sgt->ops->sgl_length(sgl); 49 return sgt->ops->sgl_length(sgl);
49} 50}
50 51
51u64 nvgpu_sgt_get_gpu_addr(struct gk20a *g, struct nvgpu_sgt *sgt, void *sgl, 52u64 nvgpu_sgt_get_gpu_addr(struct gk20a *g, struct nvgpu_sgt *sgt,
53 struct nvgpu_sgl *sgl,
52 struct nvgpu_gmmu_attrs *attrs) 54 struct nvgpu_gmmu_attrs *attrs)
53{ 55{
54 return sgt->ops->sgl_gpu_addr(g, sgl, attrs); 56 return sgt->ops->sgl_gpu_addr(g, sgl, attrs);
@@ -88,7 +90,7 @@ u64 nvgpu_mem_iommu_translate(struct gk20a *g, u64 phys)
88u64 nvgpu_sgt_alignment(struct gk20a *g, struct nvgpu_sgt *sgt) 90u64 nvgpu_sgt_alignment(struct gk20a *g, struct nvgpu_sgt *sgt)
89{ 91{
90 u64 align = 0, chunk_align = 0; 92 u64 align = 0, chunk_align = 0;
91 void *sgl; 93 struct nvgpu_sgl *sgl;
92 94
93 /* 95 /*
94 * If this SGT is iommuable and we want to use the IOMMU address then 96 * If this SGT is iommuable and we want to use the IOMMU address then
diff --git a/drivers/gpu/nvgpu/common/mm/page_allocator.c b/drivers/gpu/nvgpu/common/mm/page_allocator.c
index 6dc1edf7..13ccc48b 100644
--- a/drivers/gpu/nvgpu/common/mm/page_allocator.c
+++ b/drivers/gpu/nvgpu/common/mm/page_allocator.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -153,40 +153,41 @@ static void nvgpu_page_release_co(struct nvgpu_allocator *a,
153 nvgpu_alloc_release_carveout(&va->source_allocator, co); 153 nvgpu_alloc_release_carveout(&va->source_allocator, co);
154} 154}
155 155
156static void *nvgpu_page_alloc_sgl_next(void *sgl) 156static struct nvgpu_sgl *nvgpu_page_alloc_sgl_next(struct nvgpu_sgl *sgl)
157{ 157{
158 struct nvgpu_mem_sgl *nvgpu_sgl = sgl; 158 struct nvgpu_mem_sgl *sgl_impl = (struct nvgpu_mem_sgl *)sgl;
159 159
160 return nvgpu_sgl->next; 160 return (struct nvgpu_sgl *)sgl_impl->next;
161} 161}
162 162
163static u64 nvgpu_page_alloc_sgl_phys(void *sgl) 163static u64 nvgpu_page_alloc_sgl_phys(struct nvgpu_sgl *sgl)
164{ 164{
165 struct nvgpu_mem_sgl *nvgpu_sgl = sgl; 165 struct nvgpu_mem_sgl *sgl_impl = (struct nvgpu_mem_sgl *)sgl;
166 166
167 return nvgpu_sgl->phys; 167 return sgl_impl->phys;
168} 168}
169 169
170static u64 nvgpu_page_alloc_sgl_dma(void *sgl) 170static u64 nvgpu_page_alloc_sgl_dma(struct nvgpu_sgl *sgl)
171{ 171{
172 struct nvgpu_mem_sgl *nvgpu_sgl = sgl; 172 struct nvgpu_mem_sgl *sgl_impl = (struct nvgpu_mem_sgl *)sgl;
173 173
174 return nvgpu_sgl->dma; 174 return sgl_impl->dma;
175} 175}
176 176
177static u64 nvgpu_page_alloc_sgl_length(void *sgl) 177static u64 nvgpu_page_alloc_sgl_length(struct nvgpu_sgl *sgl)
178{ 178{
179 struct nvgpu_mem_sgl *nvgpu_sgl = sgl; 179 struct nvgpu_mem_sgl *sgl_impl = (struct nvgpu_mem_sgl *)sgl;
180 180
181 return nvgpu_sgl->length; 181 return sgl_impl->length;
182} 182}
183 183
184static u64 nvgpu_page_alloc_sgl_gpu_addr(struct gk20a *g, void *sgl, 184static u64 nvgpu_page_alloc_sgl_gpu_addr(struct gk20a *g,
185 struct nvgpu_sgl *sgl,
185 struct nvgpu_gmmu_attrs *attrs) 186 struct nvgpu_gmmu_attrs *attrs)
186{ 187{
187 struct nvgpu_mem_sgl *nvgpu_sgl = sgl; 188 struct nvgpu_mem_sgl *sgl_impl = (struct nvgpu_mem_sgl *)sgl;
188 189
189 return nvgpu_sgl->phys; 190 return sgl_impl->phys;
190} 191}
191 192
192static void nvgpu_page_alloc_sgt_free(struct gk20a *g, struct nvgpu_sgt *sgt) 193static void nvgpu_page_alloc_sgt_free(struct gk20a *g, struct nvgpu_sgt *sgt)
@@ -229,7 +230,7 @@ static void __nvgpu_free_pages(struct nvgpu_page_allocator *a,
229 struct nvgpu_page_alloc *alloc, 230 struct nvgpu_page_alloc *alloc,
230 bool free_buddy_alloc) 231 bool free_buddy_alloc)
231{ 232{
232 struct nvgpu_mem_sgl *sgl = alloc->sgt.sgl; 233 struct nvgpu_sgl *sgl = alloc->sgt.sgl;
233 234
234 if (free_buddy_alloc) { 235 if (free_buddy_alloc) {
235 while (sgl) { 236 while (sgl) {
@@ -239,7 +240,8 @@ static void __nvgpu_free_pages(struct nvgpu_page_allocator *a,
239 } 240 }
240 } 241 }
241 242
242 nvgpu_page_alloc_sgl_proper_free(a->owner->g, sgl); 243 nvgpu_page_alloc_sgl_proper_free(a->owner->g,
244 (struct nvgpu_mem_sgl *)sgl);
243 nvgpu_kmem_cache_free(a->alloc_cache, alloc); 245 nvgpu_kmem_cache_free(a->alloc_cache, alloc);
244} 246}
245 247
@@ -389,7 +391,7 @@ static int __do_slab_alloc(struct nvgpu_page_allocator *a,
389 alloc->length = slab_page->slab_size; 391 alloc->length = slab_page->slab_size;
390 alloc->base = slab_page->page_addr + (offs * slab_page->slab_size); 392 alloc->base = slab_page->page_addr + (offs * slab_page->slab_size);
391 393
392 sgl = alloc->sgt.sgl; 394 sgl = (struct nvgpu_mem_sgl *)alloc->sgt.sgl;
393 sgl->phys = alloc->base; 395 sgl->phys = alloc->base;
394 sgl->dma = alloc->base; 396 sgl->dma = alloc->base;
395 sgl->length = alloc->length; 397 sgl->length = alloc->length;
@@ -430,7 +432,7 @@ static struct nvgpu_page_alloc *__nvgpu_alloc_slab(
430 goto fail; 432 goto fail;
431 } 433 }
432 434
433 alloc->sgt.sgl = sgl; 435 alloc->sgt.sgl = (struct nvgpu_sgl *)sgl;
434 err = __do_slab_alloc(a, slab, alloc); 436 err = __do_slab_alloc(a, slab, alloc);
435 if (err) 437 if (err)
436 goto fail; 438 goto fail;
@@ -582,7 +584,7 @@ static struct nvgpu_page_alloc *__do_nvgpu_alloc_pages(
582 if (prev_sgl) 584 if (prev_sgl)
583 prev_sgl->next = sgl; 585 prev_sgl->next = sgl;
584 else 586 else
585 alloc->sgt.sgl = sgl; 587 alloc->sgt.sgl = (struct nvgpu_sgl *)sgl;
586 588
587 prev_sgl = sgl; 589 prev_sgl = sgl;
588 590
@@ -595,7 +597,7 @@ static struct nvgpu_page_alloc *__do_nvgpu_alloc_pages(
595 return alloc; 597 return alloc;
596 598
597fail_cleanup: 599fail_cleanup:
598 sgl = alloc->sgt.sgl; 600 sgl = (struct nvgpu_mem_sgl *)alloc->sgt.sgl;
599 while (sgl) { 601 while (sgl) {
600 struct nvgpu_mem_sgl *next = sgl->next; 602 struct nvgpu_mem_sgl *next = sgl->next;
601 603
@@ -614,7 +616,7 @@ static struct nvgpu_page_alloc *__nvgpu_alloc_pages(
614 struct nvgpu_page_allocator *a, u64 len) 616 struct nvgpu_page_allocator *a, u64 len)
615{ 617{
616 struct nvgpu_page_alloc *alloc = NULL; 618 struct nvgpu_page_alloc *alloc = NULL;
617 struct nvgpu_mem_sgl *sgl; 619 struct nvgpu_sgl *sgl;
618 u64 pages; 620 u64 pages;
619 int i = 0; 621 int i = 0;
620 622
@@ -751,7 +753,7 @@ static struct nvgpu_page_alloc *__nvgpu_alloc_pages_fixed(
751 753
752 alloc->nr_chunks = 1; 754 alloc->nr_chunks = 1;
753 alloc->length = length; 755 alloc->length = length;
754 alloc->sgt.sgl = sgl; 756 alloc->sgt.sgl = (struct nvgpu_sgl *)sgl;
755 757
756 sgl->phys = alloc->base; 758 sgl->phys = alloc->base;
757 sgl->dma = alloc->base; 759 sgl->dma = alloc->base;
@@ -776,7 +778,7 @@ static u64 nvgpu_page_alloc_fixed(struct nvgpu_allocator *__a,
776{ 778{
777 struct nvgpu_page_allocator *a = page_allocator(__a); 779 struct nvgpu_page_allocator *a = page_allocator(__a);
778 struct nvgpu_page_alloc *alloc = NULL; 780 struct nvgpu_page_alloc *alloc = NULL;
779 struct nvgpu_mem_sgl *sgl; 781 struct nvgpu_sgl *sgl;
780 u64 aligned_len, pages; 782 u64 aligned_len, pages;
781 int i = 0; 783 int i = 0;
782 784
diff --git a/drivers/gpu/nvgpu/common/mm/vidmem.c b/drivers/gpu/nvgpu/common/mm/vidmem.c
index a55b3a2b..4239bd06 100644
--- a/drivers/gpu/nvgpu/common/mm/vidmem.c
+++ b/drivers/gpu/nvgpu/common/mm/vidmem.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -413,7 +413,7 @@ int nvgpu_vidmem_clear(struct gk20a *g, struct nvgpu_mem *mem)
413 struct gk20a_fence *gk20a_fence_out = NULL; 413 struct gk20a_fence *gk20a_fence_out = NULL;
414 struct gk20a_fence *gk20a_last_fence = NULL; 414 struct gk20a_fence *gk20a_last_fence = NULL;
415 struct nvgpu_page_alloc *alloc = NULL; 415 struct nvgpu_page_alloc *alloc = NULL;
416 void *sgl = NULL; 416 struct nvgpu_sgl *sgl = NULL;
417 int err = 0; 417 int err = 0;
418 418
419 if (g->mm.vidmem.ce_ctx_id == (u32)~0) 419 if (g->mm.vidmem.ce_ctx_id == (u32)~0)