From 553fdf3534f856edce73744fd54914b9b7a829cc Mon Sep 17 00:00:00 2001 From: Srirangan Date: Tue, 14 Aug 2018 15:21:38 +0530 Subject: gpu: nvgpu: common: mm: Fix MISRA 15.6 violations MISRA Rule-15.6 requires that all if-else blocks be enclosed in braces, including single statement blocks. Fix errors due to single statement if blocks without braces, introducing the braces. JIRA NVGPU-671 Change-Id: I129cc170d27c7f1f2e193b326b95ebbe3c75ebab Signed-off-by: Srirangan Reviewed-on: https://git-master.nvidia.com/r/1795600 Reviewed-by: Adeel Raza GVS: Gerrit_Virtual_Submit Reviewed-by: Alex Waterman Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/common/mm/bitmap_allocator.c | 53 ++++++++---- drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c | 38 ++++++--- drivers/gpu/nvgpu/common/mm/page_allocator.c | 110 ++++++++++++++++--------- drivers/gpu/nvgpu/common/mm/pd_cache.c | 40 +++++---- drivers/gpu/nvgpu/common/mm/vm_area.c | 34 +++++--- 5 files changed, 178 insertions(+), 97 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/nvgpu/common/mm/bitmap_allocator.c b/drivers/gpu/nvgpu/common/mm/bitmap_allocator.c index f75f9a1f..6cdb8f3b 100644 --- a/drivers/gpu/nvgpu/common/mm/bitmap_allocator.c +++ b/drivers/gpu/nvgpu/common/mm/bitmap_allocator.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -69,21 +69,24 @@ static u64 nvgpu_bitmap_alloc_fixed(struct nvgpu_allocator *__a, /* Compute the bit offset and make sure it's aligned to a block. */ offs = base >> a->blk_shift; - if (offs * a->blk_size != base) + if (offs * a->blk_size != base) { return 0; + } offs -= a->bit_offs; blks = len >> a->blk_shift; - if (blks * a->blk_size != len) + if (blks * a->blk_size != len) { blks++; + } alloc_lock(__a); /* Check if the space requested is already occupied. */ ret = bitmap_find_next_zero_area(a->bitmap, a->num_bits, offs, blks, 0); - if (ret != offs) + if (ret != offs) { goto fail; + } bitmap_set(a->bitmap, offs, blks); @@ -115,14 +118,16 @@ static void nvgpu_bitmap_free_fixed(struct nvgpu_allocator *__a, u64 blks, offs; offs = base >> a->blk_shift; - if (WARN_ON(offs * a->blk_size != base)) + if (WARN_ON(offs * a->blk_size != base)) { return; + } offs -= a->bit_offs; blks = len >> a->blk_shift; - if (blks * a->blk_size != len) + if (blks * a->blk_size != len) { blks++; + } alloc_lock(__a); bitmap_clear(a->bitmap, offs, blks); @@ -155,8 +160,9 @@ static struct nvgpu_bitmap_alloc *find_alloc_metadata( struct nvgpu_rbtree_node *node = NULL; nvgpu_rbtree_search(addr, &node, a->allocs); - if (!node) + if (!node) { return NULL; + } alloc = nvgpu_bitmap_alloc_from_rbtree_node(node); @@ -174,8 +180,9 @@ static int __nvgpu_bitmap_store_alloc(struct nvgpu_bitmap_allocator *a, struct nvgpu_bitmap_alloc *alloc = nvgpu_kmem_cache_alloc(a->meta_data_cache); - if (!alloc) + if (!alloc) { return -ENOMEM; + } alloc->base = addr; alloc->length = len; @@ -197,8 +204,9 @@ static u64 nvgpu_bitmap_alloc(struct nvgpu_allocator *__a, u64 len) blks = len >> a->blk_shift; - if (blks * a->blk_size != len) + if (blks * a->blk_size != len) { blks++; + } alloc_lock(__a); @@ -216,8 +224,9 @@ static u64 nvgpu_bitmap_alloc(struct nvgpu_allocator *__a, u64 len) limit = find_next_bit(a->bitmap, a->num_bits, a->next_blk); offs = bitmap_find_next_zero_area(a->bitmap, limit, 0, blks, 0); - if (offs >= a->next_blk) + if (offs >= a->next_blk) { goto fail; + } } bitmap_set(a->bitmap, offs, blks); @@ -235,8 +244,9 @@ static u64 nvgpu_bitmap_alloc(struct nvgpu_allocator *__a, u64 len) * data it needs around to successfully free this allocation. */ if (!(a->flags & GPU_ALLOC_NO_ALLOC_PAGE) && - __nvgpu_bitmap_store_alloc(a, addr, blks * a->blk_size)) + __nvgpu_bitmap_store_alloc(a, addr, blks * a->blk_size)) { goto fail_reset_bitmap; + } alloc_dbg(__a, "Alloc 0x%-10llx 0x%-5llx [bits=0x%llx (%llu)]", addr, len, blks, blks); @@ -270,8 +280,9 @@ static void nvgpu_bitmap_free(struct nvgpu_allocator *__a, u64 addr) } alloc = find_alloc_metadata(a, addr); - if (!alloc) + if (!alloc) { goto done; + } /* * Address comes from adjusted offset (i.e the bit offset with @@ -288,8 +299,9 @@ static void nvgpu_bitmap_free(struct nvgpu_allocator *__a, u64 addr) a->bytes_freed += alloc->length; done: - if (a->meta_data_cache && alloc) + if (a->meta_data_cache && alloc) { nvgpu_kmem_cache_free(a->meta_data_cache, alloc); + } alloc_unlock(__a); } @@ -366,16 +378,18 @@ int nvgpu_bitmap_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a, int err; struct nvgpu_bitmap_allocator *a; - if (WARN_ON(blk_size & (blk_size - 1))) + if (WARN_ON(blk_size & (blk_size - 1))) { return -EINVAL; + } /* * blk_size must be a power-of-2; base length also need to be aligned * to blk_size. */ if (blk_size & (blk_size - 1) || - base & (blk_size - 1) || length & (blk_size - 1)) + base & (blk_size - 1) || length & (blk_size - 1)) { return -EINVAL; + } if (base == 0) { base = blk_size; @@ -383,12 +397,14 @@ int nvgpu_bitmap_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a, } a = nvgpu_kzalloc(g, sizeof(struct nvgpu_bitmap_allocator)); - if (!a) + if (!a) { return -ENOMEM; + } err = __nvgpu_alloc_common_init(__a, g, name, a, false, &bitmap_ops); - if (err) + if (err) { goto fail; + } if (!(flags & GPU_ALLOC_NO_ALLOC_PAGE)) { a->meta_data_cache = nvgpu_kmem_cache_create(g, @@ -431,8 +447,9 @@ int nvgpu_bitmap_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a, return 0; fail: - if (a->meta_data_cache) + if (a->meta_data_cache) { nvgpu_kmem_cache_destroy(a->meta_data_cache); + } nvgpu_kfree(g, a); return err; } diff --git a/drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c b/drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c index c749c729..a0b9013f 100644 --- a/drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c +++ b/drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c @@ -1,7 +1,7 @@ /* * gk20a allocator * - * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -29,40 +29,45 @@ u64 nvgpu_alloc_length(struct nvgpu_allocator *a) { - if (a->ops->length) + if (a->ops->length) { return a->ops->length(a); + } return 0; } u64 nvgpu_alloc_base(struct nvgpu_allocator *a) { - if (a->ops->base) + if (a->ops->base) { return a->ops->base(a); + } return 0; } u64 nvgpu_alloc_initialized(struct nvgpu_allocator *a) { - if (!a->ops || !a->ops->inited) + if (!a->ops || !a->ops->inited) { return 0; + } return a->ops->inited(a); } u64 nvgpu_alloc_end(struct nvgpu_allocator *a) { - if (a->ops->end) + if (a->ops->end) { return a->ops->end(a); + } return 0; } u64 nvgpu_alloc_space(struct nvgpu_allocator *a) { - if (a->ops->space) + if (a->ops->space) { return a->ops->space(a); + } return 0; } @@ -80,8 +85,9 @@ void nvgpu_free(struct nvgpu_allocator *a, u64 addr) u64 nvgpu_alloc_fixed(struct nvgpu_allocator *a, u64 base, u64 len, u32 page_size) { - if (a->ops->alloc_fixed) + if (a->ops->alloc_fixed) { return a->ops->alloc_fixed(a, base, len, page_size); + } return 0; } @@ -93,15 +99,17 @@ void nvgpu_free_fixed(struct nvgpu_allocator *a, u64 base, u64 len) * nothing. The alternative would be to fall back on the regular * free but that may be harmful in unexpected ways. */ - if (a->ops->free_fixed) + if (a->ops->free_fixed) { a->ops->free_fixed(a, base, len); + } } int nvgpu_alloc_reserve_carveout(struct nvgpu_allocator *a, struct nvgpu_alloc_carveout *co) { - if (a->ops->reserve_carveout) + if (a->ops->reserve_carveout) { return a->ops->reserve_carveout(a, co); + } return -ENODEV; } @@ -109,8 +117,9 @@ int nvgpu_alloc_reserve_carveout(struct nvgpu_allocator *a, void nvgpu_alloc_release_carveout(struct nvgpu_allocator *a, struct nvgpu_alloc_carveout *co) { - if (a->ops->release_carveout) + if (a->ops->release_carveout) { a->ops->release_carveout(a, co); + } } void nvgpu_alloc_destroy(struct nvgpu_allocator *a) @@ -137,19 +146,22 @@ int __nvgpu_alloc_common_init(struct nvgpu_allocator *a, struct gk20a *g, { int err; - if (!ops) + if (!ops) { return -EINVAL; + } /* * This is the bare minimum operations required for a sensible * allocator. */ - if (!ops->alloc || !ops->free || !ops->fini) + if (!ops->alloc || !ops->free || !ops->fini) { return -EINVAL; + } err = nvgpu_mutex_init(&a->lock); - if (err) + if (err) { return err; + } a->g = g; a->ops = ops; diff --git a/drivers/gpu/nvgpu/common/mm/page_allocator.c b/drivers/gpu/nvgpu/common/mm/page_allocator.c index 773d33ef..d001a2aa 100644 --- a/drivers/gpu/nvgpu/common/mm/page_allocator.c +++ b/drivers/gpu/nvgpu/common/mm/page_allocator.c @@ -264,8 +264,9 @@ static struct nvgpu_page_alloc *__find_page_alloc( struct nvgpu_rbtree_node *node = NULL; nvgpu_rbtree_search(addr, &node, a->allocs); - if (!node) + if (!node) { return NULL; + } alloc = nvgpu_page_alloc_from_rbtree_node(node); @@ -355,8 +356,9 @@ static int __do_slab_alloc(struct nvgpu_page_allocator *a, if (!slab_page) { slab_page = alloc_slab_page(a, slab); - if (!slab_page) + if (!slab_page) { return -ENOMEM; + } } /* @@ -376,12 +378,13 @@ static int __do_slab_alloc(struct nvgpu_page_allocator *a, bitmap_set(&slab_page->bitmap, offs, 1); slab_page->nr_objects_alloced++; - if (slab_page->nr_objects_alloced < slab_page->nr_objects) + if (slab_page->nr_objects_alloced < slab_page->nr_objects) { add_slab_page_to_partial(slab, slab_page); - else if (slab_page->nr_objects_alloced == slab_page->nr_objects) + } else if (slab_page->nr_objects_alloced == slab_page->nr_objects) { add_slab_page_to_full(slab, slab_page); - else + } else { BUG(); /* Should be impossible to hit this. */ + } /* * Handle building the nvgpu_page_alloc struct. We expect one sgl @@ -435,8 +438,9 @@ static struct nvgpu_page_alloc *__nvgpu_alloc_slab( alloc->sgt.sgl = (struct nvgpu_sgl *)sgl; err = __do_slab_alloc(a, slab, alloc); - if (err) + if (err) { goto fail; + } palloc_dbg(a, "Alloc 0x%04llx sr=%d id=0x%010llx [slab]", len, slab_nr, alloc->base); @@ -445,10 +449,12 @@ static struct nvgpu_page_alloc *__nvgpu_alloc_slab( return alloc; fail: - if (alloc) + if (alloc) { nvgpu_kmem_cache_free(a->alloc_cache, alloc); - if (sgl) + } + if (sgl) { nvgpu_kfree(a->owner->g, sgl); + } return NULL; } @@ -465,27 +471,30 @@ static void __nvgpu_free_slab(struct nvgpu_page_allocator *a, slab_page->nr_objects_alloced--; - if (slab_page->nr_objects_alloced == 0) + if (slab_page->nr_objects_alloced == 0) { new_state = SP_EMPTY; - else + } else { new_state = SP_PARTIAL; + } /* * Need to migrate the page to a different list. */ if (new_state != slab_page->state) { /* Delete - can't be in empty. */ - if (slab_page->state == SP_PARTIAL) + if (slab_page->state == SP_PARTIAL) { del_slab_page_from_partial(slab, slab_page); - else + } else { del_slab_page_from_full(slab, slab_page); + } /* And add. */ if (new_state == SP_EMPTY) { - if (nvgpu_list_empty(&slab->empty)) + if (nvgpu_list_empty(&slab->empty)) { add_slab_page_to_empty(slab, slab_page); - else + } else { free_slab_page(a, slab_page); + } } else { add_slab_page_to_partial(slab, slab_page); } @@ -515,8 +524,9 @@ static struct nvgpu_page_alloc *__do_nvgpu_alloc_pages( int i = 0; alloc = nvgpu_kmem_cache_alloc(a->alloc_cache); - if (!alloc) + if (!alloc) { goto fail; + } memset(alloc, 0, sizeof(*alloc)); @@ -535,11 +545,13 @@ static struct nvgpu_page_alloc *__do_nvgpu_alloc_pages( * requested size. The buddy allocator guarantees any given * single alloc is contiguous. */ - if (a->flags & GPU_ALLOC_FORCE_CONTIG && i != 0) + if (a->flags & GPU_ALLOC_FORCE_CONTIG && i != 0) { goto fail_cleanup; + } - if (chunk_len > max_chunk_len) + if (chunk_len > max_chunk_len) { chunk_len = max_chunk_len; + } /* * Keep attempting to allocate in smaller chunks until the alloc @@ -582,10 +594,11 @@ static struct nvgpu_page_alloc *__do_nvgpu_alloc_pages( * Build the singly linked list with a head node that is part of * the list. */ - if (prev_sgl) + if (prev_sgl) { prev_sgl->next = sgl; - else + } else { alloc->sgt.sgl = (struct nvgpu_sgl *)sgl; + } prev_sgl = sgl; @@ -671,10 +684,11 @@ static u64 nvgpu_page_alloc(struct nvgpu_allocator *__a, u64 len) alloc_lock(__a); if (a->flags & GPU_ALLOC_4K_VIDMEM_PAGES && - real_len <= (a->page_size / 2)) + real_len <= (a->page_size / 2)) { alloc = __nvgpu_alloc_slab(a, real_len); - else + } else { alloc = __nvgpu_alloc_pages(a, real_len); + } if (!alloc) { alloc_unlock(__a); @@ -684,14 +698,16 @@ static u64 nvgpu_page_alloc(struct nvgpu_allocator *__a, u64 len) __insert_page_alloc(a, alloc); a->nr_allocs++; - if (real_len > a->page_size / 2) + if (real_len > a->page_size / 2) { a->pages_alloced += alloc->length >> a->page_shift; + } alloc_unlock(__a); - if (a->flags & GPU_ALLOC_NO_SCATTER_GATHER) + if (a->flags & GPU_ALLOC_NO_SCATTER_GATHER) { return alloc->base; - else + } else { return (u64) (uintptr_t) alloc; + } } /* @@ -705,11 +721,12 @@ static void nvgpu_page_free(struct nvgpu_allocator *__a, u64 base) alloc_lock(__a); - if (a->flags & GPU_ALLOC_NO_SCATTER_GATHER) + if (a->flags & GPU_ALLOC_NO_SCATTER_GATHER) { alloc = __find_page_alloc(a, base); - else + } else { alloc = __find_page_alloc(a, ((struct nvgpu_page_alloc *)(uintptr_t)base)->base); + } if (!alloc) { palloc_dbg(a, "Hrm, found no alloc?"); @@ -743,8 +760,9 @@ static struct nvgpu_page_alloc *__nvgpu_alloc_pages_fixed( alloc = nvgpu_kmem_cache_alloc(a->alloc_cache); sgl = nvgpu_kzalloc(a->owner->g, sizeof(*sgl)); - if (!alloc || !sgl) + if (!alloc || !sgl) { goto fail; + } alloc->sgt.ops = &page_alloc_sgl_ops; alloc->base = nvgpu_alloc_fixed(&a->source_allocator, base, length, 0); @@ -765,10 +783,12 @@ static struct nvgpu_page_alloc *__nvgpu_alloc_pages_fixed( return alloc; fail: - if (sgl) + if (sgl) { nvgpu_kfree(a->owner->g, sgl); - if (alloc) + } + if (alloc) { nvgpu_kmem_cache_free(a->alloc_cache, alloc); + } return NULL; } @@ -813,10 +833,11 @@ static u64 nvgpu_page_alloc_fixed(struct nvgpu_allocator *__a, a->nr_fixed_allocs++; a->pages_alloced += pages; - if (a->flags & GPU_ALLOC_NO_SCATTER_GATHER) + if (a->flags & GPU_ALLOC_NO_SCATTER_GATHER) { return alloc->base; - else + } else { return (u64) (uintptr_t) alloc; + } } static void nvgpu_page_free_fixed(struct nvgpu_allocator *__a, @@ -829,8 +850,9 @@ static void nvgpu_page_free_fixed(struct nvgpu_allocator *__a, if (a->flags & GPU_ALLOC_NO_SCATTER_GATHER) { alloc = __find_page_alloc(a, base); - if (!alloc) + if (!alloc) { goto done; + } } else { alloc = (struct nvgpu_page_alloc *) (uintptr_t) base; } @@ -963,8 +985,9 @@ static int nvgpu_page_alloc_init_slabs(struct nvgpu_page_allocator *a) a->slabs = nvgpu_kcalloc(nvgpu_alloc_to_gpu(a->owner), nr_slabs, sizeof(struct page_alloc_slab)); - if (!a->slabs) + if (!a->slabs) { return -ENOMEM; + } a->nr_slabs = nr_slabs; for (i = 0; i < nr_slabs; i++) { @@ -990,16 +1013,19 @@ int nvgpu_page_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a, char buddy_name[sizeof(__a->name)]; int err; - if (blk_size < SZ_4K) + if (blk_size < SZ_4K) { return -EINVAL; + } a = nvgpu_kzalloc(g, sizeof(struct nvgpu_page_allocator)); - if (!a) + if (!a) { return -ENOMEM; + } err = __nvgpu_alloc_common_init(__a, g, name, a, false, &page_ops); - if (err) + if (err) { goto fail; + } a->alloc_cache = nvgpu_kmem_cache_create(g, sizeof(struct nvgpu_page_alloc)); @@ -1020,16 +1046,18 @@ int nvgpu_page_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a, if (flags & GPU_ALLOC_4K_VIDMEM_PAGES && blk_size > SZ_4K) { err = nvgpu_page_alloc_init_slabs(a); - if (err) + if (err) { goto fail; + } } snprintf(buddy_name, sizeof(buddy_name), "%s-src", name); err = nvgpu_buddy_allocator_init(g, &a->source_allocator, buddy_name, base, length, blk_size, 0); - if (err) + if (err) { goto fail; + } #ifdef CONFIG_DEBUG_FS nvgpu_init_alloc_debug(g, __a); @@ -1044,10 +1072,12 @@ int nvgpu_page_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a, return 0; fail: - if (a->alloc_cache) + if (a->alloc_cache) { nvgpu_kmem_cache_destroy(a->alloc_cache); - if (a->slab_page_cache) + } + if (a->slab_page_cache) { nvgpu_kmem_cache_destroy(a->slab_page_cache); + } nvgpu_kfree(g, a); return err; } diff --git a/drivers/gpu/nvgpu/common/mm/pd_cache.c b/drivers/gpu/nvgpu/common/mm/pd_cache.c index 84f45826..db48d168 100644 --- a/drivers/gpu/nvgpu/common/mm/pd_cache.c +++ b/drivers/gpu/nvgpu/common/mm/pd_cache.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -95,8 +95,9 @@ int nvgpu_pd_cache_init(struct gk20a *g) * This gets called from finalize_poweron() so we need to make sure we * don't reinit the pd_cache over and over. */ - if (g->mm.pd_cache) + if (g->mm.pd_cache) { return 0; + } cache = nvgpu_kzalloc(g, sizeof(*cache)); if (!cache) { @@ -123,8 +124,9 @@ void nvgpu_pd_cache_fini(struct gk20a *g) int i; struct nvgpu_pd_cache *cache = g->mm.pd_cache; - if (!cache) + if (!cache) { return; + } for (i = 0; i < NVGPU_PD_CACHE_COUNT; i++) { WARN_ON(!nvgpu_list_empty(&cache->full[i])); @@ -164,8 +166,9 @@ int __nvgpu_pd_cache_alloc_direct(struct gk20a *g, * going to be virtually contiguous and we don't have to force the * underlying allocations to be physically contiguous as well. */ - if (!nvgpu_iommuable(g) && bytes > PAGE_SIZE) + if (!nvgpu_iommuable(g) && bytes > PAGE_SIZE) { flags = NVGPU_DMA_FORCE_CONTIGUOUS; + } err = nvgpu_dma_alloc_flags(g, flags, bytes, pd->mem); if (err) { @@ -244,8 +247,9 @@ static int nvgpu_pd_cache_alloc_from_partial(struct gk20a *g, mem_offs = bit_offs * pentry->pd_size; /* Bit map full. Somethings wrong. */ - if (WARN_ON(bit_offs >= ffz(pentry_mask))) + if (WARN_ON(bit_offs >= ffz(pentry_mask))) { return -ENOMEM; + } pentry->alloc_map |= 1 << bit_offs; @@ -281,8 +285,9 @@ static struct nvgpu_pd_mem_entry *nvgpu_pd_cache_get_partial( struct nvgpu_list_node *list = &cache->partial[nvgpu_pd_cache_nr(bytes)]; - if (nvgpu_list_empty(list)) + if (nvgpu_list_empty(list)) { return NULL; + } return nvgpu_list_first_entry(list, nvgpu_pd_mem_entry, @@ -308,13 +313,15 @@ static int nvgpu_pd_cache_alloc(struct gk20a *g, struct nvgpu_pd_cache *cache, } pentry = nvgpu_pd_cache_get_partial(cache, bytes); - if (!pentry) + if (!pentry) { err = nvgpu_pd_cache_alloc_new(g, cache, pd, bytes); - else + } else { err = nvgpu_pd_cache_alloc_from_partial(g, cache, pentry, pd); + } - if (err) + if (err) { nvgpu_err(g, "PD-Alloc [C] Failed!"); + } return err; } @@ -335,14 +342,16 @@ int __nvgpu_pd_alloc(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd, u32 bytes) */ if (bytes >= PAGE_SIZE) { err = __nvgpu_pd_cache_alloc_direct(g, pd, bytes); - if (err) + if (err) { return err; + } return 0; } - if (WARN_ON(!g->mm.pd_cache)) + if (WARN_ON(!g->mm.pd_cache)) { return -ENOMEM; + } nvgpu_mutex_acquire(&g->mm.pd_cache->lock); err = nvgpu_pd_cache_alloc(g, g->mm.pd_cache, pd, bytes); @@ -355,8 +364,9 @@ void __nvgpu_pd_cache_free_direct(struct gk20a *g, struct nvgpu_gmmu_pd *pd) { pd_dbg(g, "PD-Free [D] 0x%p", pd->mem); - if (!pd->mem) + if (!pd->mem) { return; + } nvgpu_dma_free(g, pd->mem); nvgpu_kfree(g, pd->mem); @@ -407,8 +417,9 @@ static struct nvgpu_pd_mem_entry *nvgpu_pd_cache_look_up( nvgpu_rbtree_search((u64)(uintptr_t)pd->mem, &node, cache->mem_tree); - if (!node) + if (!node) { return NULL; + } return nvgpu_pd_mem_entry_from_tree_entry(node); } @@ -436,8 +447,9 @@ void __nvgpu_pd_free(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd) /* * Simple case: just DMA free. */ - if (!pd->cached) + if (!pd->cached) { return __nvgpu_pd_cache_free_direct(g, pd); + } nvgpu_mutex_acquire(&g->mm.pd_cache->lock); nvgpu_pd_cache_free(g, g->mm.pd_cache, pd); diff --git a/drivers/gpu/nvgpu/common/mm/vm_area.c b/drivers/gpu/nvgpu/common/mm/vm_area.c index 5a28b7bc..b8fecbfc 100644 --- a/drivers/gpu/nvgpu/common/mm/vm_area.c +++ b/drivers/gpu/nvgpu/common/mm/vm_area.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -34,8 +34,9 @@ struct nvgpu_vm_area *nvgpu_vm_area_find(struct vm_gk20a *vm, u64 addr) nvgpu_list_for_each_entry(vm_area, &vm->vm_area_list, nvgpu_vm_area, vm_area_list) { if (addr >= vm_area->addr && - addr < (u64)vm_area->addr + (u64)vm_area->size) + addr < (u64)vm_area->addr + (u64)vm_area->size) { return vm_area; + } } return NULL; @@ -105,12 +106,14 @@ int nvgpu_vm_area_alloc(struct vm_gk20a *vm, u32 pages, u32 page_size, page_size, pages, *addr, flags); for (; pgsz_idx < gmmu_nr_page_sizes; pgsz_idx++) { - if (vm->gmmu_page_sizes[pgsz_idx] == page_size) + if (vm->gmmu_page_sizes[pgsz_idx] == page_size) { break; + } } - if (pgsz_idx > gmmu_page_size_big) + if (pgsz_idx > gmmu_page_size_big) { return -EINVAL; + } /* * pgsz_idx isn't likely to get too crazy, since it starts at 0 and @@ -119,26 +122,30 @@ int nvgpu_vm_area_alloc(struct vm_gk20a *vm, u32 pages, u32 page_size, */ nvgpu_speculation_barrier(); - if (!vm->big_pages && pgsz_idx == gmmu_page_size_big) + if (!vm->big_pages && pgsz_idx == gmmu_page_size_big) { return -EINVAL; + } vm_area = nvgpu_kzalloc(g, sizeof(*vm_area)); - if (!vm_area) + if (!vm_area) { goto clean_up_err; + } vma = vm->vma[pgsz_idx]; - if (flags & NVGPU_VM_AREA_ALLOC_FIXED_OFFSET) + if (flags & NVGPU_VM_AREA_ALLOC_FIXED_OFFSET) { vaddr_start = nvgpu_alloc_fixed(vma, *addr, (u64)pages * (u64)page_size, page_size); - else + } else { vaddr_start = nvgpu_alloc(vma, (u64)pages * (u64)page_size); + } - if (!vaddr_start) + if (!vaddr_start) { goto clean_up_err; + } vm_area->flags = flags; vm_area->addr = vaddr_start; @@ -179,10 +186,12 @@ int nvgpu_vm_area_alloc(struct vm_gk20a *vm, u32 pages, u32 page_size, return 0; clean_up_err: - if (vaddr_start) + if (vaddr_start) { nvgpu_free(vma, vaddr_start); - if (vm_area) + } + if (vm_area) { nvgpu_kfree(g, vm_area); + } return -ENOMEM; } @@ -219,7 +228,7 @@ int nvgpu_vm_area_free(struct vm_gk20a *vm, u64 addr) } /* if this was a sparse mapping, free the va */ - if (vm_area->sparse) + if (vm_area->sparse) { g->ops.mm.gmmu_unmap(vm, vm_area->addr, vm_area->size, @@ -228,6 +237,7 @@ int nvgpu_vm_area_free(struct vm_gk20a *vm, u64 addr) gk20a_mem_flag_none, true, NULL); + } nvgpu_mutex_release(&vm->update_gmmu_lock); -- cgit v1.2.2