summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2018-08-20 17:36:22 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-08-22 20:33:39 -0400
commitd5473e225decc74f0d6bb015d06365dad15828d0 (patch)
tree5358ae2cc67a7a4830328a902daca7549b2d6dc2
parenta75becab204e8af9e9f0b0939dad118b3e44b895 (diff)
gpu: nvgpu: Fix MISRA 21.2 violations [3/3]
MISRA 21.2 states that we may not use reserved identifiers; since all identifiers beginning with '_' are reserved by libc, the usage of '__' as a prefix is disallowed. This change removes the usage of the '__a' argument scattered throughout the nvgpu allocator code. JIRA NVGPU-1029 Change-Id: I553a66a3d7d2c6bb21ba0a45e29a1d01f7b89f49 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1803353 Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-by: Adeel Raza <araza@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/common/mm/page_allocator.c110
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/allocator.h2
2 files changed, 56 insertions, 56 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/page_allocator.c b/drivers/gpu/nvgpu/common/mm/page_allocator.c
index d001a2aa..f6d70435 100644
--- a/drivers/gpu/nvgpu/common/mm/page_allocator.c
+++ b/drivers/gpu/nvgpu/common/mm/page_allocator.c
@@ -669,9 +669,9 @@ static struct nvgpu_page_alloc *__nvgpu_alloc_pages(
669 * precedent in the dma_alloc APIs, though, it's really just an annoying 669 * precedent in the dma_alloc APIs, though, it's really just an annoying
670 * artifact of the fact that the nvgpu_alloc() API requires a u64 return type. 670 * artifact of the fact that the nvgpu_alloc() API requires a u64 return type.
671 */ 671 */
672static u64 nvgpu_page_alloc(struct nvgpu_allocator *__a, u64 len) 672static u64 nvgpu_page_alloc(struct nvgpu_allocator *na, u64 len)
673{ 673{
674 struct nvgpu_page_allocator *a = page_allocator(__a); 674 struct nvgpu_page_allocator *a = page_allocator(na);
675 struct nvgpu_page_alloc *alloc = NULL; 675 struct nvgpu_page_alloc *alloc = NULL;
676 u64 real_len; 676 u64 real_len;
677 677
@@ -682,7 +682,7 @@ static u64 nvgpu_page_alloc(struct nvgpu_allocator *__a, u64 len)
682 real_len = a->flags & GPU_ALLOC_FORCE_CONTIG ? 682 real_len = a->flags & GPU_ALLOC_FORCE_CONTIG ?
683 roundup_pow_of_two(len) : len; 683 roundup_pow_of_two(len) : len;
684 684
685 alloc_lock(__a); 685 alloc_lock(na);
686 if (a->flags & GPU_ALLOC_4K_VIDMEM_PAGES && 686 if (a->flags & GPU_ALLOC_4K_VIDMEM_PAGES &&
687 real_len <= (a->page_size / 2)) { 687 real_len <= (a->page_size / 2)) {
688 alloc = __nvgpu_alloc_slab(a, real_len); 688 alloc = __nvgpu_alloc_slab(a, real_len);
@@ -691,7 +691,7 @@ static u64 nvgpu_page_alloc(struct nvgpu_allocator *__a, u64 len)
691 } 691 }
692 692
693 if (!alloc) { 693 if (!alloc) {
694 alloc_unlock(__a); 694 alloc_unlock(na);
695 return 0; 695 return 0;
696 } 696 }
697 697
@@ -701,7 +701,7 @@ static u64 nvgpu_page_alloc(struct nvgpu_allocator *__a, u64 len)
701 if (real_len > a->page_size / 2) { 701 if (real_len > a->page_size / 2) {
702 a->pages_alloced += alloc->length >> a->page_shift; 702 a->pages_alloced += alloc->length >> a->page_shift;
703 } 703 }
704 alloc_unlock(__a); 704 alloc_unlock(na);
705 705
706 if (a->flags & GPU_ALLOC_NO_SCATTER_GATHER) { 706 if (a->flags & GPU_ALLOC_NO_SCATTER_GATHER) {
707 return alloc->base; 707 return alloc->base;
@@ -714,12 +714,12 @@ static u64 nvgpu_page_alloc(struct nvgpu_allocator *__a, u64 len)
714 * Note: this will remove the nvgpu_page_alloc struct from the RB tree 714 * Note: this will remove the nvgpu_page_alloc struct from the RB tree
715 * if it's found. 715 * if it's found.
716 */ 716 */
717static void nvgpu_page_free(struct nvgpu_allocator *__a, u64 base) 717static void nvgpu_page_free(struct nvgpu_allocator *na, u64 base)
718{ 718{
719 struct nvgpu_page_allocator *a = page_allocator(__a); 719 struct nvgpu_page_allocator *a = page_allocator(na);
720 struct nvgpu_page_alloc *alloc; 720 struct nvgpu_page_alloc *alloc;
721 721
722 alloc_lock(__a); 722 alloc_lock(na);
723 723
724 if (a->flags & GPU_ALLOC_NO_SCATTER_GATHER) { 724 if (a->flags & GPU_ALLOC_NO_SCATTER_GATHER) {
725 alloc = __find_page_alloc(a, base); 725 alloc = __find_page_alloc(a, base);
@@ -749,7 +749,7 @@ static void nvgpu_page_free(struct nvgpu_allocator *__a, u64 base)
749 } 749 }
750 750
751done: 751done:
752 alloc_unlock(__a); 752 alloc_unlock(na);
753} 753}
754 754
755static struct nvgpu_page_alloc *__nvgpu_alloc_pages_fixed( 755static struct nvgpu_page_alloc *__nvgpu_alloc_pages_fixed(
@@ -795,10 +795,10 @@ fail:
795/* 795/*
796 * @page_size is ignored. 796 * @page_size is ignored.
797 */ 797 */
798static u64 nvgpu_page_alloc_fixed(struct nvgpu_allocator *__a, 798static u64 nvgpu_page_alloc_fixed(struct nvgpu_allocator *na,
799 u64 base, u64 len, u32 page_size) 799 u64 base, u64 len, u32 page_size)
800{ 800{
801 struct nvgpu_page_allocator *a = page_allocator(__a); 801 struct nvgpu_page_allocator *a = page_allocator(na);
802 struct nvgpu_page_alloc *alloc = NULL; 802 struct nvgpu_page_alloc *alloc = NULL;
803 struct nvgpu_sgl *sgl; 803 struct nvgpu_sgl *sgl;
804 struct gk20a *g = a->owner->g; 804 struct gk20a *g = a->owner->g;
@@ -808,16 +808,16 @@ static u64 nvgpu_page_alloc_fixed(struct nvgpu_allocator *__a,
808 aligned_len = ALIGN(len, a->page_size); 808 aligned_len = ALIGN(len, a->page_size);
809 pages = aligned_len >> a->page_shift; 809 pages = aligned_len >> a->page_shift;
810 810
811 alloc_lock(__a); 811 alloc_lock(na);
812 812
813 alloc = __nvgpu_alloc_pages_fixed(a, base, aligned_len, 0); 813 alloc = __nvgpu_alloc_pages_fixed(a, base, aligned_len, 0);
814 if (!alloc) { 814 if (!alloc) {
815 alloc_unlock(__a); 815 alloc_unlock(na);
816 return 0; 816 return 0;
817 } 817 }
818 818
819 __insert_page_alloc(a, alloc); 819 __insert_page_alloc(a, alloc);
820 alloc_unlock(__a); 820 alloc_unlock(na);
821 821
822 palloc_dbg(a, "Alloc [fixed] @ 0x%010llx + 0x%llx (%llu)", 822 palloc_dbg(a, "Alloc [fixed] @ 0x%010llx + 0x%llx (%llu)",
823 alloc->base, aligned_len, pages); 823 alloc->base, aligned_len, pages);
@@ -840,13 +840,13 @@ static u64 nvgpu_page_alloc_fixed(struct nvgpu_allocator *__a,
840 } 840 }
841} 841}
842 842
843static void nvgpu_page_free_fixed(struct nvgpu_allocator *__a, 843static void nvgpu_page_free_fixed(struct nvgpu_allocator *na,
844 u64 base, u64 len) 844 u64 base, u64 len)
845{ 845{
846 struct nvgpu_page_allocator *a = page_allocator(__a); 846 struct nvgpu_page_allocator *a = page_allocator(na);
847 struct nvgpu_page_alloc *alloc; 847 struct nvgpu_page_alloc *alloc;
848 848
849 alloc_lock(__a); 849 alloc_lock(na);
850 850
851 if (a->flags & GPU_ALLOC_NO_SCATTER_GATHER) { 851 if (a->flags & GPU_ALLOC_NO_SCATTER_GATHER) {
852 alloc = __find_page_alloc(a, base); 852 alloc = __find_page_alloc(a, base);
@@ -872,75 +872,75 @@ static void nvgpu_page_free_fixed(struct nvgpu_allocator *__a,
872 __nvgpu_free_pages(a, alloc, true); 872 __nvgpu_free_pages(a, alloc, true);
873 873
874done: 874done:
875 alloc_unlock(__a); 875 alloc_unlock(na);
876} 876}
877 877
878static void nvgpu_page_allocator_destroy(struct nvgpu_allocator *__a) 878static void nvgpu_page_allocator_destroy(struct nvgpu_allocator *na)
879{ 879{
880 struct nvgpu_page_allocator *a = page_allocator(__a); 880 struct nvgpu_page_allocator *a = page_allocator(na);
881 881
882 alloc_lock(__a); 882 alloc_lock(na);
883 nvgpu_kfree(nvgpu_alloc_to_gpu(__a), a); 883 nvgpu_kfree(nvgpu_alloc_to_gpu(na), a);
884 __a->priv = NULL; 884 na->priv = NULL;
885 alloc_unlock(__a); 885 alloc_unlock(na);
886} 886}
887 887
888#ifdef __KERNEL__ 888#ifdef __KERNEL__
889static void nvgpu_page_print_stats(struct nvgpu_allocator *__a, 889static void nvgpu_page_print_stats(struct nvgpu_allocator *na,
890 struct seq_file *s, int lock) 890 struct seq_file *s, int lock)
891{ 891{
892 struct nvgpu_page_allocator *a = page_allocator(__a); 892 struct nvgpu_page_allocator *a = page_allocator(na);
893 int i; 893 int i;
894 894
895 if (lock) 895 if (lock)
896 alloc_lock(__a); 896 alloc_lock(na);
897 897
898 __alloc_pstat(s, __a, "Page allocator:"); 898 __alloc_pstat(s, na, "Page allocator:");
899 __alloc_pstat(s, __a, " allocs %lld", a->nr_allocs); 899 __alloc_pstat(s, na, " allocs %lld", a->nr_allocs);
900 __alloc_pstat(s, __a, " frees %lld", a->nr_frees); 900 __alloc_pstat(s, na, " frees %lld", a->nr_frees);
901 __alloc_pstat(s, __a, " fixed_allocs %lld", a->nr_fixed_allocs); 901 __alloc_pstat(s, na, " fixed_allocs %lld", a->nr_fixed_allocs);
902 __alloc_pstat(s, __a, " fixed_frees %lld", a->nr_fixed_frees); 902 __alloc_pstat(s, na, " fixed_frees %lld", a->nr_fixed_frees);
903 __alloc_pstat(s, __a, " slab_allocs %lld", a->nr_slab_allocs); 903 __alloc_pstat(s, na, " slab_allocs %lld", a->nr_slab_allocs);
904 __alloc_pstat(s, __a, " slab_frees %lld", a->nr_slab_frees); 904 __alloc_pstat(s, na, " slab_frees %lld", a->nr_slab_frees);
905 __alloc_pstat(s, __a, " pages alloced %lld", a->pages_alloced); 905 __alloc_pstat(s, na, " pages alloced %lld", a->pages_alloced);
906 __alloc_pstat(s, __a, " pages freed %lld", a->pages_freed); 906 __alloc_pstat(s, na, " pages freed %lld", a->pages_freed);
907 __alloc_pstat(s, __a, ""); 907 __alloc_pstat(s, na, "");
908 908
909 __alloc_pstat(s, __a, "Page size: %lld KB", 909 __alloc_pstat(s, na, "Page size: %lld KB",
910 a->page_size >> 10); 910 a->page_size >> 10);
911 __alloc_pstat(s, __a, "Total pages: %lld (%lld MB)", 911 __alloc_pstat(s, na, "Total pages: %lld (%lld MB)",
912 a->length / a->page_size, 912 a->length / a->page_size,
913 a->length >> 20); 913 a->length >> 20);
914 __alloc_pstat(s, __a, "Available pages: %lld (%lld MB)", 914 __alloc_pstat(s, na, "Available pages: %lld (%lld MB)",
915 nvgpu_alloc_space(&a->source_allocator) / a->page_size, 915 nvgpu_alloc_space(&a->source_allocator) / a->page_size,
916 nvgpu_alloc_space(&a->source_allocator) >> 20); 916 nvgpu_alloc_space(&a->source_allocator) >> 20);
917 __alloc_pstat(s, __a, ""); 917 __alloc_pstat(s, na, "");
918 918
919 /* 919 /*
920 * Slab info. 920 * Slab info.
921 */ 921 */
922 if (a->flags & GPU_ALLOC_4K_VIDMEM_PAGES) { 922 if (a->flags & GPU_ALLOC_4K_VIDMEM_PAGES) {
923 __alloc_pstat(s, __a, "Slabs:"); 923 __alloc_pstat(s, na, "Slabs:");
924 __alloc_pstat(s, __a, " size empty partial full"); 924 __alloc_pstat(s, na, " size empty partial full");
925 __alloc_pstat(s, __a, " ---- ----- ------- ----"); 925 __alloc_pstat(s, na, " ---- ----- ------- ----");
926 926
927 for (i = 0; i < a->nr_slabs; i++) { 927 for (i = 0; i < a->nr_slabs; i++) {
928 struct page_alloc_slab *slab = &a->slabs[i]; 928 struct page_alloc_slab *slab = &a->slabs[i];
929 929
930 __alloc_pstat(s, __a, " %-9u %-9d %-9u %u", 930 __alloc_pstat(s, na, " %-9u %-9d %-9u %u",
931 slab->slab_size, 931 slab->slab_size,
932 slab->nr_empty, slab->nr_partial, 932 slab->nr_empty, slab->nr_partial,
933 slab->nr_full); 933 slab->nr_full);
934 } 934 }
935 __alloc_pstat(s, __a, ""); 935 __alloc_pstat(s, na, "");
936 } 936 }
937 937
938 __alloc_pstat(s, __a, "Source alloc: %s", 938 __alloc_pstat(s, na, "Source alloc: %s",
939 a->source_allocator.name); 939 a->source_allocator.name);
940 nvgpu_alloc_print_stats(&a->source_allocator, s, lock); 940 nvgpu_alloc_print_stats(&a->source_allocator, s, lock);
941 941
942 if (lock) 942 if (lock)
943 alloc_unlock(__a); 943 alloc_unlock(na);
944} 944}
945#endif 945#endif
946 946
@@ -1005,12 +1005,12 @@ static int nvgpu_page_alloc_init_slabs(struct nvgpu_page_allocator *a)
1005 return 0; 1005 return 0;
1006} 1006}
1007 1007
1008int nvgpu_page_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a, 1008int nvgpu_page_allocator_init(struct gk20a *g, struct nvgpu_allocator *na,
1009 const char *name, u64 base, u64 length, 1009 const char *name, u64 base, u64 length,
1010 u64 blk_size, u64 flags) 1010 u64 blk_size, u64 flags)
1011{ 1011{
1012 struct nvgpu_page_allocator *a; 1012 struct nvgpu_page_allocator *a;
1013 char buddy_name[sizeof(__a->name)]; 1013 char buddy_name[sizeof(na->name)];
1014 int err; 1014 int err;
1015 1015
1016 if (blk_size < SZ_4K) { 1016 if (blk_size < SZ_4K) {
@@ -1022,7 +1022,7 @@ int nvgpu_page_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a,
1022 return -ENOMEM; 1022 return -ENOMEM;
1023 } 1023 }
1024 1024
1025 err = __nvgpu_alloc_common_init(__a, g, name, a, false, &page_ops); 1025 err = __nvgpu_alloc_common_init(na, g, name, a, false, &page_ops);
1026 if (err) { 1026 if (err) {
1027 goto fail; 1027 goto fail;
1028 } 1028 }
@@ -1041,7 +1041,7 @@ int nvgpu_page_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a,
1041 a->page_size = blk_size; 1041 a->page_size = blk_size;
1042 a->page_shift = __ffs(blk_size); 1042 a->page_shift = __ffs(blk_size);
1043 a->allocs = NULL; 1043 a->allocs = NULL;
1044 a->owner = __a; 1044 a->owner = na;
1045 a->flags = flags; 1045 a->flags = flags;
1046 1046
1047 if (flags & GPU_ALLOC_4K_VIDMEM_PAGES && blk_size > SZ_4K) { 1047 if (flags & GPU_ALLOC_4K_VIDMEM_PAGES && blk_size > SZ_4K) {
@@ -1060,7 +1060,7 @@ int nvgpu_page_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a,
1060 } 1060 }
1061 1061
1062#ifdef CONFIG_DEBUG_FS 1062#ifdef CONFIG_DEBUG_FS
1063 nvgpu_init_alloc_debug(g, __a); 1063 nvgpu_init_alloc_debug(g, na);
1064#endif 1064#endif
1065 palloc_dbg(a, "New allocator: type page"); 1065 palloc_dbg(a, "New allocator: type page");
1066 palloc_dbg(a, " base 0x%llx", a->base); 1066 palloc_dbg(a, " base 0x%llx", a->base);
diff --git a/drivers/gpu/nvgpu/include/nvgpu/allocator.h b/drivers/gpu/nvgpu/include/nvgpu/allocator.h
index 839712db..a38e8d51 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/allocator.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/allocator.h
@@ -223,7 +223,7 @@ int nvgpu_bitmap_allocator_init(struct gk20a *g, struct nvgpu_allocator *na,
223/* 223/*
224 * Page allocator initializers. 224 * Page allocator initializers.
225 */ 225 */
226int nvgpu_page_allocator_init(struct gk20a *g, struct nvgpu_allocator *a, 226int nvgpu_page_allocator_init(struct gk20a *g, struct nvgpu_allocator *na,
227 const char *name, u64 base, u64 length, 227 const char *name, u64 base, u64 length,
228 u64 blk_size, u64 flags); 228 u64 blk_size, u64 flags);
229 229