From ba8fa334f40223ad491ab61a6c072a276017787f Mon Sep 17 00:00:00 2001 From: Konsta Holtta Date: Mon, 22 Jan 2018 12:19:08 +0200 Subject: gpu: nvgpu: introduce explicit nvgpu_sgl type The operations in struct nvgpu_sgt_ops have a scatter-gather list (sgl) argument which is a void pointer. Change the type signatures to take struct nvgpu_sgl * which is an opaque marker type that makes it more difficult to pass around wrong arguments, as anything goes for void *. Explicit types add also self-documentation to the code. For some added safety, some explicit type casts are now required in implementors of the nvgpu_sgt_ops interface when converting between the general nvgpu_sgl type and implementation-specific types. This is not purely a bad thing because the casts explain clearly where type conversions are happening. Jira NVGPU-30 Jira NVGPU-52 Jira NVGPU-305 Change-Id: Ic64eed6d2d39ca5786e62b172ddb7133af16817a Signed-off-by: Konsta Holtta Reviewed-on: https://git-master.nvidia.com/r/1643555 GVS: Gerrit_Virtual_Submit Reviewed-by: Vijayakumar Subbu Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/common/linux/nvgpu_mem.c | 15 ++++---- drivers/gpu/nvgpu/common/mm/gmmu.c | 2 +- drivers/gpu/nvgpu/common/mm/nvgpu_mem.c | 16 +++++---- drivers/gpu/nvgpu/common/mm/page_allocator.c | 52 +++++++++++++++------------- drivers/gpu/nvgpu/common/mm/vidmem.c | 4 +-- drivers/gpu/nvgpu/common/pramin.c | 4 +-- drivers/gpu/nvgpu/gk20a/gk20a.h | 5 +-- drivers/gpu/nvgpu/gk20a/pramin_gk20a.c | 6 ++-- drivers/gpu/nvgpu/gk20a/pramin_gk20a.h | 8 +++-- drivers/gpu/nvgpu/include/nvgpu/nvgpu_mem.h | 38 ++++++++++++-------- drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c | 2 +- 11 files changed, 85 insertions(+), 67 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c b/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c index 206b83e1..7406c4d7 100644 --- a/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c +++ b/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c @@ -499,27 +499,28 @@ int __nvgpu_mem_create_from_phys(struct gk20a *g, struct nvgpu_mem *dest, } #endif -static void *nvgpu_mem_linux_sgl_next(void *sgl) +static struct nvgpu_sgl *nvgpu_mem_linux_sgl_next(struct nvgpu_sgl *sgl) { - return sg_next((struct scatterlist *)sgl); + return (struct nvgpu_sgl *)sg_next((struct scatterlist *)sgl); } -static u64 nvgpu_mem_linux_sgl_phys(void *sgl) +static u64 nvgpu_mem_linux_sgl_phys(struct nvgpu_sgl *sgl) { return (u64)sg_phys((struct scatterlist *)sgl); } -static u64 nvgpu_mem_linux_sgl_dma(void *sgl) +static u64 nvgpu_mem_linux_sgl_dma(struct nvgpu_sgl *sgl) { return (u64)sg_dma_address((struct scatterlist *)sgl); } -static u64 nvgpu_mem_linux_sgl_length(void *sgl) +static u64 nvgpu_mem_linux_sgl_length(struct nvgpu_sgl *sgl) { return (u64)((struct scatterlist *)sgl)->length; } -static u64 nvgpu_mem_linux_sgl_gpu_addr(struct gk20a *g, void *sgl, +static u64 nvgpu_mem_linux_sgl_gpu_addr(struct gk20a *g, + struct nvgpu_sgl *sgl, struct nvgpu_gmmu_attrs *attrs) { if (sg_dma_address((struct scatterlist *)sgl) == 0) @@ -587,7 +588,7 @@ struct nvgpu_sgt *nvgpu_linux_sgt_create(struct gk20a *g, struct sg_table *sgt) nvgpu_log(g, gpu_dbg_sgl, "Making Linux SGL!"); - nvgpu_sgt->sgl = sgt->sgl; + nvgpu_sgt->sgl = (struct nvgpu_sgl *)linux_sgl; nvgpu_sgt->ops = &nvgpu_linux_sgt_ops; return nvgpu_sgt; diff --git a/drivers/gpu/nvgpu/common/mm/gmmu.c b/drivers/gpu/nvgpu/common/mm/gmmu.c index ffac324c..e1942cbd 100644 --- a/drivers/gpu/nvgpu/common/mm/gmmu.c +++ b/drivers/gpu/nvgpu/common/mm/gmmu.c @@ -477,7 +477,7 @@ static int __nvgpu_gmmu_do_update_page_table(struct vm_gk20a *vm, struct nvgpu_gmmu_attrs *attrs) { struct gk20a *g = gk20a_from_vm(vm); - void *sgl; + struct nvgpu_sgl *sgl; int err = 0; if (!sgt) { diff --git a/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c b/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c index 73b6b2a7..f7c51f42 100644 --- a/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c +++ b/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -28,27 +28,29 @@ #include "gk20a/gk20a.h" -void *nvgpu_sgt_get_next(struct nvgpu_sgt *sgt, void *sgl) +struct nvgpu_sgl *nvgpu_sgt_get_next(struct nvgpu_sgt *sgt, + struct nvgpu_sgl *sgl) { return sgt->ops->sgl_next(sgl); } -u64 nvgpu_sgt_get_phys(struct nvgpu_sgt *sgt, void *sgl) +u64 nvgpu_sgt_get_phys(struct nvgpu_sgt *sgt, struct nvgpu_sgl *sgl) { return sgt->ops->sgl_phys(sgl); } -u64 nvgpu_sgt_get_dma(struct nvgpu_sgt *sgt, void *sgl) +u64 nvgpu_sgt_get_dma(struct nvgpu_sgt *sgt, struct nvgpu_sgl *sgl) { return sgt->ops->sgl_dma(sgl); } -u64 nvgpu_sgt_get_length(struct nvgpu_sgt *sgt, void *sgl) +u64 nvgpu_sgt_get_length(struct nvgpu_sgt *sgt, struct nvgpu_sgl *sgl) { return sgt->ops->sgl_length(sgl); } -u64 nvgpu_sgt_get_gpu_addr(struct gk20a *g, struct nvgpu_sgt *sgt, void *sgl, +u64 nvgpu_sgt_get_gpu_addr(struct gk20a *g, struct nvgpu_sgt *sgt, + struct nvgpu_sgl *sgl, struct nvgpu_gmmu_attrs *attrs) { return sgt->ops->sgl_gpu_addr(g, sgl, attrs); @@ -88,7 +90,7 @@ u64 nvgpu_mem_iommu_translate(struct gk20a *g, u64 phys) u64 nvgpu_sgt_alignment(struct gk20a *g, struct nvgpu_sgt *sgt) { u64 align = 0, chunk_align = 0; - void *sgl; + struct nvgpu_sgl *sgl; /* * If this SGT is iommuable and we want to use the IOMMU address then diff --git a/drivers/gpu/nvgpu/common/mm/page_allocator.c b/drivers/gpu/nvgpu/common/mm/page_allocator.c index 6dc1edf7..13ccc48b 100644 --- a/drivers/gpu/nvgpu/common/mm/page_allocator.c +++ b/drivers/gpu/nvgpu/common/mm/page_allocator.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -153,40 +153,41 @@ static void nvgpu_page_release_co(struct nvgpu_allocator *a, nvgpu_alloc_release_carveout(&va->source_allocator, co); } -static void *nvgpu_page_alloc_sgl_next(void *sgl) +static struct nvgpu_sgl *nvgpu_page_alloc_sgl_next(struct nvgpu_sgl *sgl) { - struct nvgpu_mem_sgl *nvgpu_sgl = sgl; + struct nvgpu_mem_sgl *sgl_impl = (struct nvgpu_mem_sgl *)sgl; - return nvgpu_sgl->next; + return (struct nvgpu_sgl *)sgl_impl->next; } -static u64 nvgpu_page_alloc_sgl_phys(void *sgl) +static u64 nvgpu_page_alloc_sgl_phys(struct nvgpu_sgl *sgl) { - struct nvgpu_mem_sgl *nvgpu_sgl = sgl; + struct nvgpu_mem_sgl *sgl_impl = (struct nvgpu_mem_sgl *)sgl; - return nvgpu_sgl->phys; + return sgl_impl->phys; } -static u64 nvgpu_page_alloc_sgl_dma(void *sgl) +static u64 nvgpu_page_alloc_sgl_dma(struct nvgpu_sgl *sgl) { - struct nvgpu_mem_sgl *nvgpu_sgl = sgl; + struct nvgpu_mem_sgl *sgl_impl = (struct nvgpu_mem_sgl *)sgl; - return nvgpu_sgl->dma; + return sgl_impl->dma; } -static u64 nvgpu_page_alloc_sgl_length(void *sgl) +static u64 nvgpu_page_alloc_sgl_length(struct nvgpu_sgl *sgl) { - struct nvgpu_mem_sgl *nvgpu_sgl = sgl; + struct nvgpu_mem_sgl *sgl_impl = (struct nvgpu_mem_sgl *)sgl; - return nvgpu_sgl->length; + return sgl_impl->length; } -static u64 nvgpu_page_alloc_sgl_gpu_addr(struct gk20a *g, void *sgl, +static u64 nvgpu_page_alloc_sgl_gpu_addr(struct gk20a *g, + struct nvgpu_sgl *sgl, struct nvgpu_gmmu_attrs *attrs) { - struct nvgpu_mem_sgl *nvgpu_sgl = sgl; + struct nvgpu_mem_sgl *sgl_impl = (struct nvgpu_mem_sgl *)sgl; - return nvgpu_sgl->phys; + return sgl_impl->phys; } static void nvgpu_page_alloc_sgt_free(struct gk20a *g, struct nvgpu_sgt *sgt) @@ -229,7 +230,7 @@ static void __nvgpu_free_pages(struct nvgpu_page_allocator *a, struct nvgpu_page_alloc *alloc, bool free_buddy_alloc) { - struct nvgpu_mem_sgl *sgl = alloc->sgt.sgl; + struct nvgpu_sgl *sgl = alloc->sgt.sgl; if (free_buddy_alloc) { while (sgl) { @@ -239,7 +240,8 @@ static void __nvgpu_free_pages(struct nvgpu_page_allocator *a, } } - nvgpu_page_alloc_sgl_proper_free(a->owner->g, sgl); + nvgpu_page_alloc_sgl_proper_free(a->owner->g, + (struct nvgpu_mem_sgl *)sgl); nvgpu_kmem_cache_free(a->alloc_cache, alloc); } @@ -389,7 +391,7 @@ static int __do_slab_alloc(struct nvgpu_page_allocator *a, alloc->length = slab_page->slab_size; alloc->base = slab_page->page_addr + (offs * slab_page->slab_size); - sgl = alloc->sgt.sgl; + sgl = (struct nvgpu_mem_sgl *)alloc->sgt.sgl; sgl->phys = alloc->base; sgl->dma = alloc->base; sgl->length = alloc->length; @@ -430,7 +432,7 @@ static struct nvgpu_page_alloc *__nvgpu_alloc_slab( goto fail; } - alloc->sgt.sgl = sgl; + alloc->sgt.sgl = (struct nvgpu_sgl *)sgl; err = __do_slab_alloc(a, slab, alloc); if (err) goto fail; @@ -582,7 +584,7 @@ static struct nvgpu_page_alloc *__do_nvgpu_alloc_pages( if (prev_sgl) prev_sgl->next = sgl; else - alloc->sgt.sgl = sgl; + alloc->sgt.sgl = (struct nvgpu_sgl *)sgl; prev_sgl = sgl; @@ -595,7 +597,7 @@ static struct nvgpu_page_alloc *__do_nvgpu_alloc_pages( return alloc; fail_cleanup: - sgl = alloc->sgt.sgl; + sgl = (struct nvgpu_mem_sgl *)alloc->sgt.sgl; while (sgl) { struct nvgpu_mem_sgl *next = sgl->next; @@ -614,7 +616,7 @@ static struct nvgpu_page_alloc *__nvgpu_alloc_pages( struct nvgpu_page_allocator *a, u64 len) { struct nvgpu_page_alloc *alloc = NULL; - struct nvgpu_mem_sgl *sgl; + struct nvgpu_sgl *sgl; u64 pages; int i = 0; @@ -751,7 +753,7 @@ static struct nvgpu_page_alloc *__nvgpu_alloc_pages_fixed( alloc->nr_chunks = 1; alloc->length = length; - alloc->sgt.sgl = sgl; + alloc->sgt.sgl = (struct nvgpu_sgl *)sgl; sgl->phys = alloc->base; sgl->dma = alloc->base; @@ -776,7 +778,7 @@ static u64 nvgpu_page_alloc_fixed(struct nvgpu_allocator *__a, { struct nvgpu_page_allocator *a = page_allocator(__a); struct nvgpu_page_alloc *alloc = NULL; - struct nvgpu_mem_sgl *sgl; + struct nvgpu_sgl *sgl; u64 aligned_len, pages; int i = 0; diff --git a/drivers/gpu/nvgpu/common/mm/vidmem.c b/drivers/gpu/nvgpu/common/mm/vidmem.c index a55b3a2b..4239bd06 100644 --- a/drivers/gpu/nvgpu/common/mm/vidmem.c +++ b/drivers/gpu/nvgpu/common/mm/vidmem.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -413,7 +413,7 @@ int nvgpu_vidmem_clear(struct gk20a *g, struct nvgpu_mem *mem) struct gk20a_fence *gk20a_fence_out = NULL; struct gk20a_fence *gk20a_last_fence = NULL; struct nvgpu_page_alloc *alloc = NULL; - void *sgl = NULL; + struct nvgpu_sgl *sgl = NULL; int err = 0; if (g->mm.vidmem.ce_ctx_id == (u32)~0) diff --git a/drivers/gpu/nvgpu/common/pramin.c b/drivers/gpu/nvgpu/common/pramin.c index 859d40b1..98565140 100644 --- a/drivers/gpu/nvgpu/common/pramin.c +++ b/drivers/gpu/nvgpu/common/pramin.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -44,7 +44,7 @@ void nvgpu_pramin_access_batched(struct gk20a *g, struct nvgpu_mem *mem, { struct nvgpu_page_alloc *alloc = NULL; struct nvgpu_sgt *sgt; - void *sgl; + struct nvgpu_sgl *sgl; u32 byteoff, start_reg, until_end, n; /* diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h index ac3364b0..5de2b439 100644 --- a/drivers/gpu/nvgpu/gk20a/gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/gk20a.h @@ -823,9 +823,10 @@ struct gpu_ops { size_t size); struct { u32 (*enter)(struct gk20a *g, struct nvgpu_mem *mem, - struct nvgpu_sgt *sgt, void *sgl, u32 w); + struct nvgpu_sgt *sgt, struct nvgpu_sgl *sgl, + u32 w); void (*exit)(struct gk20a *g, struct nvgpu_mem *mem, - void *sgl); + struct nvgpu_sgl *sgl); u32 (*data032_r)(u32 i); } pramin; struct { diff --git a/drivers/gpu/nvgpu/gk20a/pramin_gk20a.c b/drivers/gpu/nvgpu/gk20a/pramin_gk20a.c index 05d0473e..bb8831e0 100644 --- a/drivers/gpu/nvgpu/gk20a/pramin_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/pramin_gk20a.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -32,7 +32,7 @@ /* WARNING: returns pramin_window_lock taken, complement with pramin_exit() */ u32 gk20a_pramin_enter(struct gk20a *g, struct nvgpu_mem *mem, - struct nvgpu_sgt *sgt, void *sgl, u32 w) + struct nvgpu_sgt *sgt, struct nvgpu_sgl *sgl, u32 w) { u64 bufbase = nvgpu_sgt_get_phys(sgt, sgl); u64 addr = bufbase + w * sizeof(u32); @@ -64,7 +64,7 @@ u32 gk20a_pramin_enter(struct gk20a *g, struct nvgpu_mem *mem, } void gk20a_pramin_exit(struct gk20a *g, struct nvgpu_mem *mem, - void *sgl) + struct nvgpu_sgl *sgl) { gk20a_dbg(gpu_dbg_mem, "end for %p,%p", mem, sgl); diff --git a/drivers/gpu/nvgpu/gk20a/pramin_gk20a.h b/drivers/gpu/nvgpu/gk20a/pramin_gk20a.h index e25bda0c..a0a28088 100644 --- a/drivers/gpu/nvgpu/gk20a/pramin_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/pramin_gk20a.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -28,7 +28,9 @@ struct nvgpu_mem; struct nvgpu_mem_sgl; u32 gk20a_pramin_enter(struct gk20a *g, struct nvgpu_mem *mem, - struct nvgpu_sgt *sgt, void *sgl, u32 w); + struct nvgpu_sgt *sgt, + struct nvgpu_sgl *sgl, + u32 w); void gk20a_pramin_exit(struct gk20a *g, struct nvgpu_mem *mem, - void *sgl); + struct nvgpu_sgl *sgl); #endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/nvgpu_mem.h b/drivers/gpu/nvgpu/include/nvgpu/nvgpu_mem.h index 2b8b7015..04e947e0 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/nvgpu_mem.h +++ b/drivers/gpu/nvgpu/include/nvgpu/nvgpu_mem.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -54,12 +54,19 @@ enum nvgpu_aperture { APERTURE_VIDMEM }; +/* + * Forward declared opaque placeholder type that does not really exist, but + * helps the compiler help us about getting types right. In reality, + * implementors of nvgpu_sgt_ops will have some concrete type in place of this. + */ +struct nvgpu_sgl; + struct nvgpu_sgt_ops { - void *(*sgl_next)(void *sgl); - u64 (*sgl_phys)(void *sgl); - u64 (*sgl_dma)(void *sgl); - u64 (*sgl_length)(void *sgl); - u64 (*sgl_gpu_addr)(struct gk20a *g, void *sgl, + struct nvgpu_sgl *(*sgl_next)(struct nvgpu_sgl *sgl); + u64 (*sgl_phys)(struct nvgpu_sgl *sgl); + u64 (*sgl_dma)(struct nvgpu_sgl *sgl); + u64 (*sgl_length)(struct nvgpu_sgl *sgl); + u64 (*sgl_gpu_addr)(struct gk20a *g, struct nvgpu_sgl *sgl, struct nvgpu_gmmu_attrs *attrs); /* * If left NULL then iommuable is assumed to be false. @@ -85,15 +92,16 @@ struct nvgpu_sgt { /* * The first node in the scatter gather list. */ - void *sgl; + struct nvgpu_sgl *sgl; }; /* * This struct holds the necessary information for describing a struct * nvgpu_mem's scatter gather list. * - * Not all nvgpu_sgt's use this particular implementation. Nor is a given OS - * required to use this at all. + * This is one underlying implementation for nvgpu_sgl. Not all nvgpu_sgt's use + * this particular implementation. Nor is a given OS required to use this at + * all. */ struct nvgpu_mem_sgl { /* @@ -233,11 +241,13 @@ struct nvgpu_sgt *nvgpu_sgt_create(struct gk20a *g); struct nvgpu_sgt *nvgpu_sgt_create_from_mem(struct gk20a *g, struct nvgpu_mem *mem); -void *nvgpu_sgt_get_next(struct nvgpu_sgt *sgt, void *sgl); -u64 nvgpu_sgt_get_phys(struct nvgpu_sgt *sgt, void *sgl); -u64 nvgpu_sgt_get_dma(struct nvgpu_sgt *sgt, void *sgl); -u64 nvgpu_sgt_get_length(struct nvgpu_sgt *sgt, void *sgl); -u64 nvgpu_sgt_get_gpu_addr(struct gk20a *g, struct nvgpu_sgt *sgt, void *sgl, +struct nvgpu_sgl *nvgpu_sgt_get_next(struct nvgpu_sgt *sgt, + struct nvgpu_sgl *sgl); +u64 nvgpu_sgt_get_phys(struct nvgpu_sgt *sgt, struct nvgpu_sgl *sgl); +u64 nvgpu_sgt_get_dma(struct nvgpu_sgt *sgt, struct nvgpu_sgl *sgl); +u64 nvgpu_sgt_get_length(struct nvgpu_sgt *sgt, struct nvgpu_sgl *sgl); +u64 nvgpu_sgt_get_gpu_addr(struct gk20a *g, struct nvgpu_sgt *sgt, + struct nvgpu_sgl *sgl, struct nvgpu_gmmu_attrs *attrs); void nvgpu_sgt_free(struct gk20a *g, struct nvgpu_sgt *sgt); diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c index cf9a28c7..ca517975 100644 --- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c +++ b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c @@ -76,7 +76,7 @@ u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm, void *handle = NULL; size_t oob_size; u8 prot; - void *sgl; + struct nvgpu_sgl *sgl; gk20a_dbg_fn(""); -- cgit v1.2.2