summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2018-09-04 20:11:22 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-09-05 23:41:19 -0400
commit2b2bde04e14135cae5f7433c755e6b8d70f88abb (patch)
treec2189ffa556f6b84628d177169204b40b5b1c5bb
parent3b413d58fa349eca1da9577359546c39effa2c8c (diff)
gpu: nvgpu: Fix MISRA 21.2 violations (page_allocator.c)
MISRA 21.2 states that we may not use reserved identifiers; since all identifiers beginning with '_' are reserved by libc, the usage of '__' as a prefix is disallowed. This change fixes uses of '__' prepended to static functions defined in page_allocator.c. Since these funcitons were static and had no naming conflicts the '__' was simply removed. For free_pages() this also needed to have a nvgpu specific prefix since free_pages() conflicts with a Linux kernel function name. JIRA NVGPU-1029 Change-Id: I8b96e55244bd2de166f7dcc64d2b30427757ea8f Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1812826 Reviewed-by: Vinod Gopalakrishnakurup <vinodg@nvidia.com> Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/common/mm/page_allocator.c58
1 files changed, 29 insertions, 29 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/page_allocator.c b/drivers/gpu/nvgpu/common/mm/page_allocator.c
index 3225f170..e559cb60 100644
--- a/drivers/gpu/nvgpu/common/mm/page_allocator.c
+++ b/drivers/gpu/nvgpu/common/mm/page_allocator.c
@@ -226,9 +226,9 @@ static void nvgpu_page_alloc_sgl_proper_free(struct gk20a *g,
226 } 226 }
227} 227}
228 228
229static void __nvgpu_free_pages(struct nvgpu_page_allocator *a, 229static void nvgpu_page_alloc_free_pages(struct nvgpu_page_allocator *a,
230 struct nvgpu_page_alloc *alloc, 230 struct nvgpu_page_alloc *alloc,
231 bool free_buddy_alloc) 231 bool free_buddy_alloc)
232{ 232{
233 struct nvgpu_sgl *sgl = alloc->sgt.sgl; 233 struct nvgpu_sgl *sgl = alloc->sgt.sgl;
234 struct gk20a *g = a->owner->g; 234 struct gk20a *g = a->owner->g;
@@ -246,8 +246,8 @@ static void __nvgpu_free_pages(struct nvgpu_page_allocator *a,
246 nvgpu_kmem_cache_free(a->alloc_cache, alloc); 246 nvgpu_kmem_cache_free(a->alloc_cache, alloc);
247} 247}
248 248
249static int __insert_page_alloc(struct nvgpu_page_allocator *a, 249static int insert_page_alloc(struct nvgpu_page_allocator *a,
250 struct nvgpu_page_alloc *alloc) 250 struct nvgpu_page_alloc *alloc)
251{ 251{
252 alloc->tree_entry.key_start = alloc->base; 252 alloc->tree_entry.key_start = alloc->base;
253 alloc->tree_entry.key_end = alloc->base + alloc->length; 253 alloc->tree_entry.key_end = alloc->base + alloc->length;
@@ -256,7 +256,7 @@ static int __insert_page_alloc(struct nvgpu_page_allocator *a,
256 return 0; 256 return 0;
257} 257}
258 258
259static struct nvgpu_page_alloc *__find_page_alloc( 259static struct nvgpu_page_alloc *find_page_alloc(
260 struct nvgpu_page_allocator *a, 260 struct nvgpu_page_allocator *a,
261 u64 addr) 261 u64 addr)
262{ 262{
@@ -329,9 +329,9 @@ static void free_slab_page(struct nvgpu_page_allocator *a,
329/* 329/*
330 * This expects @alloc to have 1 empty sgl_entry ready for usage. 330 * This expects @alloc to have 1 empty sgl_entry ready for usage.
331 */ 331 */
332static int __do_slab_alloc(struct nvgpu_page_allocator *a, 332static int do_slab_alloc(struct nvgpu_page_allocator *a,
333 struct page_alloc_slab *slab, 333 struct page_alloc_slab *slab,
334 struct nvgpu_page_alloc *alloc) 334 struct nvgpu_page_alloc *alloc)
335{ 335{
336 struct page_alloc_slab_page *slab_page = NULL; 336 struct page_alloc_slab_page *slab_page = NULL;
337 struct nvgpu_mem_sgl *sgl; 337 struct nvgpu_mem_sgl *sgl;
@@ -407,7 +407,7 @@ static int __do_slab_alloc(struct nvgpu_page_allocator *a,
407/* 407/*
408 * Allocate from a slab instead of directly from the page allocator. 408 * Allocate from a slab instead of directly from the page allocator.
409 */ 409 */
410static struct nvgpu_page_alloc *__nvgpu_alloc_slab( 410static struct nvgpu_page_alloc *nvgpu_alloc_slab(
411 struct nvgpu_page_allocator *a, u64 len) 411 struct nvgpu_page_allocator *a, u64 len)
412{ 412{
413 int err, slab_nr; 413 int err, slab_nr;
@@ -437,7 +437,7 @@ static struct nvgpu_page_alloc *__nvgpu_alloc_slab(
437 } 437 }
438 438
439 alloc->sgt.sgl = (struct nvgpu_sgl *)sgl; 439 alloc->sgt.sgl = (struct nvgpu_sgl *)sgl;
440 err = __do_slab_alloc(a, slab, alloc); 440 err = do_slab_alloc(a, slab, alloc);
441 if (err) { 441 if (err) {
442 goto fail; 442 goto fail;
443 } 443 }
@@ -458,8 +458,8 @@ fail:
458 return NULL; 458 return NULL;
459} 459}
460 460
461static void __nvgpu_free_slab(struct nvgpu_page_allocator *a, 461static void nvgpu_free_slab(struct nvgpu_page_allocator *a,
462 struct nvgpu_page_alloc *alloc) 462 struct nvgpu_page_alloc *alloc)
463{ 463{
464 struct page_alloc_slab_page *slab_page = alloc->slab_page; 464 struct page_alloc_slab_page *slab_page = alloc->slab_page;
465 struct page_alloc_slab *slab = slab_page->owner; 465 struct page_alloc_slab *slab = slab_page->owner;
@@ -503,7 +503,7 @@ static void __nvgpu_free_slab(struct nvgpu_page_allocator *a,
503 /* 503 /*
504 * Now handle the page_alloc. 504 * Now handle the page_alloc.
505 */ 505 */
506 __nvgpu_free_pages(a, alloc, false); 506 nvgpu_page_alloc_free_pages(a, alloc, false);
507 a->nr_slab_frees++; 507 a->nr_slab_frees++;
508 508
509 return; 509 return;
@@ -515,7 +515,7 @@ static void __nvgpu_free_slab(struct nvgpu_page_allocator *a,
515 * fragmentation in the space this allocator will collate smaller non-contiguous 515 * fragmentation in the space this allocator will collate smaller non-contiguous
516 * allocations together if necessary. 516 * allocations together if necessary.
517 */ 517 */
518static struct nvgpu_page_alloc *__do_nvgpu_alloc_pages( 518static struct nvgpu_page_alloc *do_nvgpu_alloc_pages(
519 struct nvgpu_page_allocator *a, u64 pages) 519 struct nvgpu_page_allocator *a, u64 pages)
520{ 520{
521 struct nvgpu_page_alloc *alloc; 521 struct nvgpu_page_alloc *alloc;
@@ -626,7 +626,7 @@ fail:
626 return NULL; 626 return NULL;
627} 627}
628 628
629static struct nvgpu_page_alloc *__nvgpu_alloc_pages( 629static struct nvgpu_page_alloc *nvgpu_alloc_pages(
630 struct nvgpu_page_allocator *a, u64 len) 630 struct nvgpu_page_allocator *a, u64 len)
631{ 631{
632 struct gk20a *g = a->owner->g; 632 struct gk20a *g = a->owner->g;
@@ -637,7 +637,7 @@ static struct nvgpu_page_alloc *__nvgpu_alloc_pages(
637 637
638 pages = ALIGN(len, a->page_size) >> a->page_shift; 638 pages = ALIGN(len, a->page_size) >> a->page_shift;
639 639
640 alloc = __do_nvgpu_alloc_pages(a, pages); 640 alloc = do_nvgpu_alloc_pages(a, pages);
641 if (!alloc) { 641 if (!alloc) {
642 palloc_dbg(a, "Alloc 0x%llx (%llu) (failed)", 642 palloc_dbg(a, "Alloc 0x%llx (%llu) (failed)",
643 pages << a->page_shift, pages); 643 pages << a->page_shift, pages);
@@ -685,9 +685,9 @@ static u64 nvgpu_page_alloc(struct nvgpu_allocator *na, u64 len)
685 alloc_lock(na); 685 alloc_lock(na);
686 if (a->flags & GPU_ALLOC_4K_VIDMEM_PAGES && 686 if (a->flags & GPU_ALLOC_4K_VIDMEM_PAGES &&
687 real_len <= (a->page_size / 2U)) { 687 real_len <= (a->page_size / 2U)) {
688 alloc = __nvgpu_alloc_slab(a, real_len); 688 alloc = nvgpu_alloc_slab(a, real_len);
689 } else { 689 } else {
690 alloc = __nvgpu_alloc_pages(a, real_len); 690 alloc = nvgpu_alloc_pages(a, real_len);
691 } 691 }
692 692
693 if (!alloc) { 693 if (!alloc) {
@@ -695,7 +695,7 @@ static u64 nvgpu_page_alloc(struct nvgpu_allocator *na, u64 len)
695 return 0; 695 return 0;
696 } 696 }
697 697
698 __insert_page_alloc(a, alloc); 698 insert_page_alloc(a, alloc);
699 699
700 a->nr_allocs++; 700 a->nr_allocs++;
701 if (real_len > a->page_size / 2U) { 701 if (real_len > a->page_size / 2U) {
@@ -722,9 +722,9 @@ static void nvgpu_page_free(struct nvgpu_allocator *na, u64 base)
722 alloc_lock(na); 722 alloc_lock(na);
723 723
724 if (a->flags & GPU_ALLOC_NO_SCATTER_GATHER) { 724 if (a->flags & GPU_ALLOC_NO_SCATTER_GATHER) {
725 alloc = __find_page_alloc(a, base); 725 alloc = find_page_alloc(a, base);
726 } else { 726 } else {
727 alloc = __find_page_alloc(a, 727 alloc = find_page_alloc(a,
728 ((struct nvgpu_page_alloc *)(uintptr_t)base)->base); 728 ((struct nvgpu_page_alloc *)(uintptr_t)base)->base);
729 } 729 }
730 730
@@ -742,17 +742,17 @@ static void nvgpu_page_free(struct nvgpu_allocator *na, u64 base)
742 * Frees *alloc. 742 * Frees *alloc.
743 */ 743 */
744 if (alloc->slab_page) { 744 if (alloc->slab_page) {
745 __nvgpu_free_slab(a, alloc); 745 nvgpu_free_slab(a, alloc);
746 } else { 746 } else {
747 a->pages_freed += (alloc->length >> a->page_shift); 747 a->pages_freed += (alloc->length >> a->page_shift);
748 __nvgpu_free_pages(a, alloc, true); 748 nvgpu_page_alloc_free_pages(a, alloc, true);
749 } 749 }
750 750
751done: 751done:
752 alloc_unlock(na); 752 alloc_unlock(na);
753} 753}
754 754
755static struct nvgpu_page_alloc *__nvgpu_alloc_pages_fixed( 755static struct nvgpu_page_alloc *nvgpu_alloc_pages_fixed(
756 struct nvgpu_page_allocator *a, u64 base, u64 length, u32 unused) 756 struct nvgpu_page_allocator *a, u64 base, u64 length, u32 unused)
757{ 757{
758 struct nvgpu_page_alloc *alloc; 758 struct nvgpu_page_alloc *alloc;
@@ -810,13 +810,13 @@ static u64 nvgpu_page_alloc_fixed(struct nvgpu_allocator *na,
810 810
811 alloc_lock(na); 811 alloc_lock(na);
812 812
813 alloc = __nvgpu_alloc_pages_fixed(a, base, aligned_len, 0); 813 alloc = nvgpu_alloc_pages_fixed(a, base, aligned_len, 0);
814 if (!alloc) { 814 if (!alloc) {
815 alloc_unlock(na); 815 alloc_unlock(na);
816 return 0; 816 return 0;
817 } 817 }
818 818
819 __insert_page_alloc(a, alloc); 819 insert_page_alloc(a, alloc);
820 alloc_unlock(na); 820 alloc_unlock(na);
821 821
822 palloc_dbg(a, "Alloc [fixed] @ 0x%010llx + 0x%llx (%llu)", 822 palloc_dbg(a, "Alloc [fixed] @ 0x%010llx + 0x%llx (%llu)",
@@ -849,7 +849,7 @@ static void nvgpu_page_free_fixed(struct nvgpu_allocator *na,
849 alloc_lock(na); 849 alloc_lock(na);
850 850
851 if (a->flags & GPU_ALLOC_NO_SCATTER_GATHER) { 851 if (a->flags & GPU_ALLOC_NO_SCATTER_GATHER) {
852 alloc = __find_page_alloc(a, base); 852 alloc = find_page_alloc(a, base);
853 if (!alloc) { 853 if (!alloc) {
854 goto done; 854 goto done;
855 } 855 }
@@ -869,7 +869,7 @@ static void nvgpu_page_free_fixed(struct nvgpu_allocator *na,
869 * allocs. This would have to be updated if the underlying 869 * allocs. This would have to be updated if the underlying
870 * allocator were to change. 870 * allocator were to change.
871 */ 871 */
872 __nvgpu_free_pages(a, alloc, true); 872 nvgpu_page_alloc_free_pages(a, alloc, true);
873 873
874done: 874done:
875 alloc_unlock(na); 875 alloc_unlock(na);