From ba8fa334f40223ad491ab61a6c072a276017787f Mon Sep 17 00:00:00 2001 From: Konsta Holtta Date: Mon, 22 Jan 2018 12:19:08 +0200 Subject: gpu: nvgpu: introduce explicit nvgpu_sgl type The operations in struct nvgpu_sgt_ops have a scatter-gather list (sgl) argument which is a void pointer. Change the type signatures to take struct nvgpu_sgl * which is an opaque marker type that makes it more difficult to pass around wrong arguments, as anything goes for void *. Explicit types add also self-documentation to the code. For some added safety, some explicit type casts are now required in implementors of the nvgpu_sgt_ops interface when converting between the general nvgpu_sgl type and implementation-specific types. This is not purely a bad thing because the casts explain clearly where type conversions are happening. Jira NVGPU-30 Jira NVGPU-52 Jira NVGPU-305 Change-Id: Ic64eed6d2d39ca5786e62b172ddb7133af16817a Signed-off-by: Konsta Holtta Reviewed-on: https://git-master.nvidia.com/r/1643555 GVS: Gerrit_Virtual_Submit Reviewed-by: Vijayakumar Subbu Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/common/mm/gmmu.c | 2 +- drivers/gpu/nvgpu/common/mm/nvgpu_mem.c | 16 +++++---- drivers/gpu/nvgpu/common/mm/page_allocator.c | 52 +++++++++++++++------------- drivers/gpu/nvgpu/common/mm/vidmem.c | 4 +-- 4 files changed, 39 insertions(+), 35 deletions(-) (limited to 'drivers/gpu/nvgpu/common/mm') diff --git a/drivers/gpu/nvgpu/common/mm/gmmu.c b/drivers/gpu/nvgpu/common/mm/gmmu.c index ffac324c..e1942cbd 100644 --- a/drivers/gpu/nvgpu/common/mm/gmmu.c +++ b/drivers/gpu/nvgpu/common/mm/gmmu.c @@ -477,7 +477,7 @@ static int __nvgpu_gmmu_do_update_page_table(struct vm_gk20a *vm, struct nvgpu_gmmu_attrs *attrs) { struct gk20a *g = gk20a_from_vm(vm); - void *sgl; + struct nvgpu_sgl *sgl; int err = 0; if (!sgt) { diff --git a/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c b/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c index 73b6b2a7..f7c51f42 100644 --- a/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c +++ b/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -28,27 +28,29 @@ #include "gk20a/gk20a.h" -void *nvgpu_sgt_get_next(struct nvgpu_sgt *sgt, void *sgl) +struct nvgpu_sgl *nvgpu_sgt_get_next(struct nvgpu_sgt *sgt, + struct nvgpu_sgl *sgl) { return sgt->ops->sgl_next(sgl); } -u64 nvgpu_sgt_get_phys(struct nvgpu_sgt *sgt, void *sgl) +u64 nvgpu_sgt_get_phys(struct nvgpu_sgt *sgt, struct nvgpu_sgl *sgl) { return sgt->ops->sgl_phys(sgl); } -u64 nvgpu_sgt_get_dma(struct nvgpu_sgt *sgt, void *sgl) +u64 nvgpu_sgt_get_dma(struct nvgpu_sgt *sgt, struct nvgpu_sgl *sgl) { return sgt->ops->sgl_dma(sgl); } -u64 nvgpu_sgt_get_length(struct nvgpu_sgt *sgt, void *sgl) +u64 nvgpu_sgt_get_length(struct nvgpu_sgt *sgt, struct nvgpu_sgl *sgl) { return sgt->ops->sgl_length(sgl); } -u64 nvgpu_sgt_get_gpu_addr(struct gk20a *g, struct nvgpu_sgt *sgt, void *sgl, +u64 nvgpu_sgt_get_gpu_addr(struct gk20a *g, struct nvgpu_sgt *sgt, + struct nvgpu_sgl *sgl, struct nvgpu_gmmu_attrs *attrs) { return sgt->ops->sgl_gpu_addr(g, sgl, attrs); @@ -88,7 +90,7 @@ u64 nvgpu_mem_iommu_translate(struct gk20a *g, u64 phys) u64 nvgpu_sgt_alignment(struct gk20a *g, struct nvgpu_sgt *sgt) { u64 align = 0, chunk_align = 0; - void *sgl; + struct nvgpu_sgl *sgl; /* * If this SGT is iommuable and we want to use the IOMMU address then diff --git a/drivers/gpu/nvgpu/common/mm/page_allocator.c b/drivers/gpu/nvgpu/common/mm/page_allocator.c index 6dc1edf7..13ccc48b 100644 --- a/drivers/gpu/nvgpu/common/mm/page_allocator.c +++ b/drivers/gpu/nvgpu/common/mm/page_allocator.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -153,40 +153,41 @@ static void nvgpu_page_release_co(struct nvgpu_allocator *a, nvgpu_alloc_release_carveout(&va->source_allocator, co); } -static void *nvgpu_page_alloc_sgl_next(void *sgl) +static struct nvgpu_sgl *nvgpu_page_alloc_sgl_next(struct nvgpu_sgl *sgl) { - struct nvgpu_mem_sgl *nvgpu_sgl = sgl; + struct nvgpu_mem_sgl *sgl_impl = (struct nvgpu_mem_sgl *)sgl; - return nvgpu_sgl->next; + return (struct nvgpu_sgl *)sgl_impl->next; } -static u64 nvgpu_page_alloc_sgl_phys(void *sgl) +static u64 nvgpu_page_alloc_sgl_phys(struct nvgpu_sgl *sgl) { - struct nvgpu_mem_sgl *nvgpu_sgl = sgl; + struct nvgpu_mem_sgl *sgl_impl = (struct nvgpu_mem_sgl *)sgl; - return nvgpu_sgl->phys; + return sgl_impl->phys; } -static u64 nvgpu_page_alloc_sgl_dma(void *sgl) +static u64 nvgpu_page_alloc_sgl_dma(struct nvgpu_sgl *sgl) { - struct nvgpu_mem_sgl *nvgpu_sgl = sgl; + struct nvgpu_mem_sgl *sgl_impl = (struct nvgpu_mem_sgl *)sgl; - return nvgpu_sgl->dma; + return sgl_impl->dma; } -static u64 nvgpu_page_alloc_sgl_length(void *sgl) +static u64 nvgpu_page_alloc_sgl_length(struct nvgpu_sgl *sgl) { - struct nvgpu_mem_sgl *nvgpu_sgl = sgl; + struct nvgpu_mem_sgl *sgl_impl = (struct nvgpu_mem_sgl *)sgl; - return nvgpu_sgl->length; + return sgl_impl->length; } -static u64 nvgpu_page_alloc_sgl_gpu_addr(struct gk20a *g, void *sgl, +static u64 nvgpu_page_alloc_sgl_gpu_addr(struct gk20a *g, + struct nvgpu_sgl *sgl, struct nvgpu_gmmu_attrs *attrs) { - struct nvgpu_mem_sgl *nvgpu_sgl = sgl; + struct nvgpu_mem_sgl *sgl_impl = (struct nvgpu_mem_sgl *)sgl; - return nvgpu_sgl->phys; + return sgl_impl->phys; } static void nvgpu_page_alloc_sgt_free(struct gk20a *g, struct nvgpu_sgt *sgt) @@ -229,7 +230,7 @@ static void __nvgpu_free_pages(struct nvgpu_page_allocator *a, struct nvgpu_page_alloc *alloc, bool free_buddy_alloc) { - struct nvgpu_mem_sgl *sgl = alloc->sgt.sgl; + struct nvgpu_sgl *sgl = alloc->sgt.sgl; if (free_buddy_alloc) { while (sgl) { @@ -239,7 +240,8 @@ static void __nvgpu_free_pages(struct nvgpu_page_allocator *a, } } - nvgpu_page_alloc_sgl_proper_free(a->owner->g, sgl); + nvgpu_page_alloc_sgl_proper_free(a->owner->g, + (struct nvgpu_mem_sgl *)sgl); nvgpu_kmem_cache_free(a->alloc_cache, alloc); } @@ -389,7 +391,7 @@ static int __do_slab_alloc(struct nvgpu_page_allocator *a, alloc->length = slab_page->slab_size; alloc->base = slab_page->page_addr + (offs * slab_page->slab_size); - sgl = alloc->sgt.sgl; + sgl = (struct nvgpu_mem_sgl *)alloc->sgt.sgl; sgl->phys = alloc->base; sgl->dma = alloc->base; sgl->length = alloc->length; @@ -430,7 +432,7 @@ static struct nvgpu_page_alloc *__nvgpu_alloc_slab( goto fail; } - alloc->sgt.sgl = sgl; + alloc->sgt.sgl = (struct nvgpu_sgl *)sgl; err = __do_slab_alloc(a, slab, alloc); if (err) goto fail; @@ -582,7 +584,7 @@ static struct nvgpu_page_alloc *__do_nvgpu_alloc_pages( if (prev_sgl) prev_sgl->next = sgl; else - alloc->sgt.sgl = sgl; + alloc->sgt.sgl = (struct nvgpu_sgl *)sgl; prev_sgl = sgl; @@ -595,7 +597,7 @@ static struct nvgpu_page_alloc *__do_nvgpu_alloc_pages( return alloc; fail_cleanup: - sgl = alloc->sgt.sgl; + sgl = (struct nvgpu_mem_sgl *)alloc->sgt.sgl; while (sgl) { struct nvgpu_mem_sgl *next = sgl->next; @@ -614,7 +616,7 @@ static struct nvgpu_page_alloc *__nvgpu_alloc_pages( struct nvgpu_page_allocator *a, u64 len) { struct nvgpu_page_alloc *alloc = NULL; - struct nvgpu_mem_sgl *sgl; + struct nvgpu_sgl *sgl; u64 pages; int i = 0; @@ -751,7 +753,7 @@ static struct nvgpu_page_alloc *__nvgpu_alloc_pages_fixed( alloc->nr_chunks = 1; alloc->length = length; - alloc->sgt.sgl = sgl; + alloc->sgt.sgl = (struct nvgpu_sgl *)sgl; sgl->phys = alloc->base; sgl->dma = alloc->base; @@ -776,7 +778,7 @@ static u64 nvgpu_page_alloc_fixed(struct nvgpu_allocator *__a, { struct nvgpu_page_allocator *a = page_allocator(__a); struct nvgpu_page_alloc *alloc = NULL; - struct nvgpu_mem_sgl *sgl; + struct nvgpu_sgl *sgl; u64 aligned_len, pages; int i = 0; diff --git a/drivers/gpu/nvgpu/common/mm/vidmem.c b/drivers/gpu/nvgpu/common/mm/vidmem.c index a55b3a2b..4239bd06 100644 --- a/drivers/gpu/nvgpu/common/mm/vidmem.c +++ b/drivers/gpu/nvgpu/common/mm/vidmem.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -413,7 +413,7 @@ int nvgpu_vidmem_clear(struct gk20a *g, struct nvgpu_mem *mem) struct gk20a_fence *gk20a_fence_out = NULL; struct gk20a_fence *gk20a_last_fence = NULL; struct nvgpu_page_alloc *alloc = NULL; - void *sgl = NULL; + struct nvgpu_sgl *sgl = NULL; int err = 0; if (g->mm.vidmem.ce_ctx_id == (u32)~0) -- cgit v1.2.2