summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2018-08-20 17:35:29 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-08-22 20:33:35 -0400
commita75becab204e8af9e9f0b0939dad118b3e44b895 (patch)
tree2a5ceef8293b9f5d73cf42451700457d410fcda0
parent54b11a456cc5d9a13633b50c2bf167e0d5dafef5 (diff)
gpu: nvgpu: Fix MISRA 21.2 violations [2/3]
MISRA 21.2 states that we may not use reserved identifiers; since all identifiers beginning with '_' are reserved by libc, the usage of '__' as a prefix is disallowed. This change removes the usage of the '__a' argument scattered throughout the nvgpu allocator code. JIRA NVGPU-1029 Change-Id: Ic39213ab800e92c6815ce5b9deb22520aa6d0630 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1803352 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/common/mm/buddy_allocator.c152
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/allocator.h4
2 files changed, 78 insertions, 78 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/buddy_allocator.c b/drivers/gpu/nvgpu/common/mm/buddy_allocator.c
index f8c97839..e684e637 100644
--- a/drivers/gpu/nvgpu/common/mm/buddy_allocator.c
+++ b/drivers/gpu/nvgpu/common/mm/buddy_allocator.c
@@ -255,18 +255,18 @@ cleanup:
255/* 255/*
256 * Clean up and destroy the passed allocator. 256 * Clean up and destroy the passed allocator.
257 */ 257 */
258static void nvgpu_buddy_allocator_destroy(struct nvgpu_allocator *__a) 258static void nvgpu_buddy_allocator_destroy(struct nvgpu_allocator *na)
259{ 259{
260 int i; 260 int i;
261 struct nvgpu_rbtree_node *node = NULL; 261 struct nvgpu_rbtree_node *node = NULL;
262 struct nvgpu_buddy *bud; 262 struct nvgpu_buddy *bud;
263 struct nvgpu_fixed_alloc *falloc; 263 struct nvgpu_fixed_alloc *falloc;
264 struct nvgpu_buddy_allocator *a = __a->priv; 264 struct nvgpu_buddy_allocator *a = na->priv;
265 265
266 alloc_lock(__a); 266 alloc_lock(na);
267 267
268#ifdef CONFIG_DEBUG_FS 268#ifdef CONFIG_DEBUG_FS
269 nvgpu_fini_alloc_debug(__a); 269 nvgpu_fini_alloc_debug(na);
270#endif 270#endif
271 271
272 /* 272 /*
@@ -311,19 +311,19 @@ static void nvgpu_buddy_allocator_destroy(struct nvgpu_allocator *__a)
311 } 311 }
312 312
313 if (a->buddy_list_len[i] != 0) { 313 if (a->buddy_list_len[i] != 0) {
314 nvgpu_info(__a->g, 314 nvgpu_info(na->g,
315 "Excess buddies!!! (%d: %llu)", 315 "Excess buddies!!! (%d: %llu)",
316 i, a->buddy_list_len[i]); 316 i, a->buddy_list_len[i]);
317 BUG(); 317 BUG();
318 } 318 }
319 if (a->buddy_list_split[i] != 0) { 319 if (a->buddy_list_split[i] != 0) {
320 nvgpu_info(__a->g, 320 nvgpu_info(na->g,
321 "Excess split nodes!!! (%d: %llu)", 321 "Excess split nodes!!! (%d: %llu)",
322 i, a->buddy_list_split[i]); 322 i, a->buddy_list_split[i]);
323 BUG(); 323 BUG();
324 } 324 }
325 if (a->buddy_list_alloced[i] != 0) { 325 if (a->buddy_list_alloced[i] != 0) {
326 nvgpu_info(__a->g, 326 nvgpu_info(na->g,
327 "Excess alloced nodes!!! (%d: %llu)", 327 "Excess alloced nodes!!! (%d: %llu)",
328 i, a->buddy_list_alloced[i]); 328 i, a->buddy_list_alloced[i]);
329 BUG(); 329 BUG();
@@ -331,9 +331,9 @@ static void nvgpu_buddy_allocator_destroy(struct nvgpu_allocator *__a)
331 } 331 }
332 332
333 nvgpu_kmem_cache_destroy(a->buddy_cache); 333 nvgpu_kmem_cache_destroy(a->buddy_cache);
334 nvgpu_kfree(nvgpu_alloc_to_gpu(__a), a); 334 nvgpu_kfree(nvgpu_alloc_to_gpu(na), a);
335 335
336 alloc_unlock(__a); 336 alloc_unlock(na);
337} 337}
338 338
339/* 339/*
@@ -814,18 +814,18 @@ static void __balloc_do_free_fixed(struct nvgpu_buddy_allocator *a,
814/* 814/*
815 * Allocate memory from the passed allocator. 815 * Allocate memory from the passed allocator.
816 */ 816 */
817static u64 nvgpu_buddy_balloc(struct nvgpu_allocator *__a, u64 len) 817static u64 nvgpu_buddy_balloc(struct nvgpu_allocator *na, u64 len)
818{ 818{
819 u64 order, addr; 819 u64 order, addr;
820 int pte_size; 820 int pte_size;
821 struct nvgpu_buddy_allocator *a = __a->priv; 821 struct nvgpu_buddy_allocator *a = na->priv;
822 822
823 alloc_lock(__a); 823 alloc_lock(na);
824 824
825 order = balloc_get_order(a, len); 825 order = balloc_get_order(a, len);
826 826
827 if (order > a->max_order) { 827 if (order > a->max_order) {
828 alloc_unlock(__a); 828 alloc_unlock(na);
829 alloc_dbg(balloc_owner(a), "Alloc fail"); 829 alloc_dbg(balloc_owner(a), "Alloc fail");
830 return 0; 830 return 0;
831 } 831 }
@@ -853,22 +853,22 @@ static u64 nvgpu_buddy_balloc(struct nvgpu_allocator *__a, u64 len)
853 853
854 a->alloc_made = 1; 854 a->alloc_made = 1;
855 855
856 alloc_unlock(__a); 856 alloc_unlock(na);
857 857
858 return addr; 858 return addr;
859} 859}
860 860
861/* 861/*
862 * Requires @__a to be locked. 862 * Requires @na to be locked.
863 */ 863 */
864static u64 __nvgpu_balloc_fixed_buddy(struct nvgpu_allocator *__a, 864static u64 __nvgpu_balloc_fixed_buddy(struct nvgpu_allocator *na,
865 u64 base, u64 len, u32 page_size) 865 u64 base, u64 len, u32 page_size)
866{ 866{
867 int pte_size = BALLOC_PTE_SIZE_ANY; 867 int pte_size = BALLOC_PTE_SIZE_ANY;
868 u64 ret, real_bytes = 0; 868 u64 ret, real_bytes = 0;
869 struct nvgpu_buddy *bud; 869 struct nvgpu_buddy *bud;
870 struct nvgpu_fixed_alloc *falloc = NULL; 870 struct nvgpu_fixed_alloc *falloc = NULL;
871 struct nvgpu_buddy_allocator *a = __a->priv; 871 struct nvgpu_buddy_allocator *a = na->priv;
872 872
873 /* If base isn't aligned to an order 0 block, fail. */ 873 /* If base isn't aligned to an order 0 block, fail. */
874 if (base & (a->blk_size - 1)) { 874 if (base & (a->blk_size - 1)) {
@@ -890,7 +890,7 @@ static u64 __nvgpu_balloc_fixed_buddy(struct nvgpu_allocator *__a,
890 } 890 }
891 } 891 }
892 892
893 falloc = nvgpu_kmalloc(nvgpu_alloc_to_gpu(__a), sizeof(*falloc)); 893 falloc = nvgpu_kmalloc(nvgpu_alloc_to_gpu(na), sizeof(*falloc));
894 if (!falloc) { 894 if (!falloc) {
895 goto fail; 895 goto fail;
896 } 896 }
@@ -929,9 +929,9 @@ static u64 __nvgpu_balloc_fixed_buddy(struct nvgpu_allocator *__a,
929 return base; 929 return base;
930 930
931fail_unlock: 931fail_unlock:
932 alloc_unlock(__a); 932 alloc_unlock(na);
933fail: 933fail:
934 nvgpu_kfree(nvgpu_alloc_to_gpu(__a), falloc); 934 nvgpu_kfree(nvgpu_alloc_to_gpu(na), falloc);
935 return 0; 935 return 0;
936} 936}
937 937
@@ -943,16 +943,16 @@ fail:
943 * 943 *
944 * Please do not use this function unless _absolutely_ necessary. 944 * Please do not use this function unless _absolutely_ necessary.
945 */ 945 */
946static u64 nvgpu_balloc_fixed_buddy(struct nvgpu_allocator *__a, 946static u64 nvgpu_balloc_fixed_buddy(struct nvgpu_allocator *na,
947 u64 base, u64 len, u32 page_size) 947 u64 base, u64 len, u32 page_size)
948{ 948{
949 u64 alloc; 949 u64 alloc;
950 struct nvgpu_buddy_allocator *a = __a->priv; 950 struct nvgpu_buddy_allocator *a = na->priv;
951 951
952 alloc_lock(__a); 952 alloc_lock(na);
953 alloc = __nvgpu_balloc_fixed_buddy(__a, base, len, page_size); 953 alloc = __nvgpu_balloc_fixed_buddy(na, base, len, page_size);
954 a->alloc_made = 1; 954 a->alloc_made = 1;
955 alloc_unlock(__a); 955 alloc_unlock(na);
956 956
957 return alloc; 957 return alloc;
958} 958}
@@ -960,17 +960,17 @@ static u64 nvgpu_balloc_fixed_buddy(struct nvgpu_allocator *__a,
960/* 960/*
961 * Free the passed allocation. 961 * Free the passed allocation.
962 */ 962 */
963static void nvgpu_buddy_bfree(struct nvgpu_allocator *__a, u64 addr) 963static void nvgpu_buddy_bfree(struct nvgpu_allocator *na, u64 addr)
964{ 964{
965 struct nvgpu_buddy *bud; 965 struct nvgpu_buddy *bud;
966 struct nvgpu_fixed_alloc *falloc; 966 struct nvgpu_fixed_alloc *falloc;
967 struct nvgpu_buddy_allocator *a = __a->priv; 967 struct nvgpu_buddy_allocator *a = na->priv;
968 968
969 if (!addr) { 969 if (!addr) {
970 return; 970 return;
971 } 971 }
972 972
973 alloc_lock(__a); 973 alloc_lock(na);
974 974
975 /* 975 /*
976 * First see if this is a fixed alloc. If not fall back to a regular 976 * First see if this is a fixed alloc. If not fall back to a regular
@@ -996,7 +996,7 @@ static void nvgpu_buddy_bfree(struct nvgpu_allocator *__a, u64 addr)
996 balloc_coalesce(a, bud); 996 balloc_coalesce(a, bud);
997 997
998done: 998done:
999 alloc_unlock(__a); 999 alloc_unlock(na);
1000 alloc_dbg(balloc_owner(a), "Free 0x%llx", addr); 1000 alloc_dbg(balloc_owner(a), "Free 0x%llx", addr);
1001 return; 1001 return;
1002} 1002}
@@ -1031,10 +1031,10 @@ static bool nvgpu_buddy_reserve_is_possible(struct nvgpu_buddy_allocator *a,
1031 * Carveouts can only be reserved before any regular allocations have been 1031 * Carveouts can only be reserved before any regular allocations have been
1032 * made. 1032 * made.
1033 */ 1033 */
1034static int nvgpu_buddy_reserve_co(struct nvgpu_allocator *__a, 1034static int nvgpu_buddy_reserve_co(struct nvgpu_allocator *na,
1035 struct nvgpu_alloc_carveout *co) 1035 struct nvgpu_alloc_carveout *co)
1036{ 1036{
1037 struct nvgpu_buddy_allocator *a = __a->priv; 1037 struct nvgpu_buddy_allocator *a = na->priv;
1038 u64 addr; 1038 u64 addr;
1039 int err = 0; 1039 int err = 0;
1040 1040
@@ -1043,7 +1043,7 @@ static int nvgpu_buddy_reserve_co(struct nvgpu_allocator *__a,
1043 return -EINVAL; 1043 return -EINVAL;
1044 } 1044 }
1045 1045
1046 alloc_lock(__a); 1046 alloc_lock(na);
1047 1047
1048 if (!nvgpu_buddy_reserve_is_possible(a, co)) { 1048 if (!nvgpu_buddy_reserve_is_possible(a, co)) {
1049 err = -EBUSY; 1049 err = -EBUSY;
@@ -1051,10 +1051,10 @@ static int nvgpu_buddy_reserve_co(struct nvgpu_allocator *__a,
1051 } 1051 }
1052 1052
1053 /* Should not be possible to fail... */ 1053 /* Should not be possible to fail... */
1054 addr = __nvgpu_balloc_fixed_buddy(__a, co->base, co->length, 0); 1054 addr = __nvgpu_balloc_fixed_buddy(na, co->base, co->length, 0);
1055 if (!addr) { 1055 if (!addr) {
1056 err = -ENOMEM; 1056 err = -ENOMEM;
1057 nvgpu_warn(__a->g, 1057 nvgpu_warn(na->g,
1058 "%s: Failed to reserve a valid carveout!", 1058 "%s: Failed to reserve a valid carveout!",
1059 __func__); 1059 __func__);
1060 goto done; 1060 goto done;
@@ -1063,22 +1063,22 @@ static int nvgpu_buddy_reserve_co(struct nvgpu_allocator *__a,
1063 nvgpu_list_add(&co->co_entry, &a->co_list); 1063 nvgpu_list_add(&co->co_entry, &a->co_list);
1064 1064
1065done: 1065done:
1066 alloc_unlock(__a); 1066 alloc_unlock(na);
1067 return err; 1067 return err;
1068} 1068}
1069 1069
1070/* 1070/*
1071 * Carveouts can be release at any time. 1071 * Carveouts can be release at any time.
1072 */ 1072 */
1073static void nvgpu_buddy_release_co(struct nvgpu_allocator *__a, 1073static void nvgpu_buddy_release_co(struct nvgpu_allocator *na,
1074 struct nvgpu_alloc_carveout *co) 1074 struct nvgpu_alloc_carveout *co)
1075{ 1075{
1076 alloc_lock(__a); 1076 alloc_lock(na);
1077 1077
1078 nvgpu_list_del(&co->co_entry); 1078 nvgpu_list_del(&co->co_entry);
1079 nvgpu_free(__a, co->base); 1079 nvgpu_free(na, co->base);
1080 1080
1081 alloc_unlock(__a); 1081 alloc_unlock(na);
1082} 1082}
1083 1083
1084static u64 nvgpu_buddy_alloc_length(struct nvgpu_allocator *a) 1084static u64 nvgpu_buddy_alloc_length(struct nvgpu_allocator *a)
@@ -1130,41 +1130,41 @@ static u64 nvgpu_buddy_alloc_space(struct nvgpu_allocator *a)
1130 * stats are printed to the kernel log. This lets this code be used for 1130 * stats are printed to the kernel log. This lets this code be used for
1131 * debugging purposes internal to the allocator. 1131 * debugging purposes internal to the allocator.
1132 */ 1132 */
1133static void nvgpu_buddy_print_stats(struct nvgpu_allocator *__a, 1133static void nvgpu_buddy_print_stats(struct nvgpu_allocator *na,
1134 struct seq_file *s, int lock) 1134 struct seq_file *s, int lock)
1135{ 1135{
1136 int i = 0; 1136 int i = 0;
1137 struct nvgpu_rbtree_node *node = NULL; 1137 struct nvgpu_rbtree_node *node = NULL;
1138 struct nvgpu_fixed_alloc *falloc; 1138 struct nvgpu_fixed_alloc *falloc;
1139 struct nvgpu_alloc_carveout *tmp; 1139 struct nvgpu_alloc_carveout *tmp;
1140 struct nvgpu_buddy_allocator *a = __a->priv; 1140 struct nvgpu_buddy_allocator *a = na->priv;
1141 1141
1142 __alloc_pstat(s, __a, "base = %llu, limit = %llu, blk_size = %llu", 1142 __alloc_pstat(s, na, "base = %llu, limit = %llu, blk_size = %llu",
1143 a->base, a->length, a->blk_size); 1143 a->base, a->length, a->blk_size);
1144 __alloc_pstat(s, __a, "Internal params:"); 1144 __alloc_pstat(s, na, "Internal params:");
1145 __alloc_pstat(s, __a, " start = 0x%llx", a->start); 1145 __alloc_pstat(s, na, " start = 0x%llx", a->start);
1146 __alloc_pstat(s, __a, " end = 0x%llx", a->end); 1146 __alloc_pstat(s, na, " end = 0x%llx", a->end);
1147 __alloc_pstat(s, __a, " count = 0x%llx", a->count); 1147 __alloc_pstat(s, na, " count = 0x%llx", a->count);
1148 __alloc_pstat(s, __a, " blks = 0x%llx", a->blks); 1148 __alloc_pstat(s, na, " blks = 0x%llx", a->blks);
1149 __alloc_pstat(s, __a, " max_order = %llu", a->max_order); 1149 __alloc_pstat(s, na, " max_order = %llu", a->max_order);
1150 1150
1151 if (lock) 1151 if (lock)
1152 alloc_lock(__a); 1152 alloc_lock(na);
1153 1153
1154 if (!nvgpu_list_empty(&a->co_list)) { 1154 if (!nvgpu_list_empty(&a->co_list)) {
1155 __alloc_pstat(s, __a, ""); 1155 __alloc_pstat(s, na, "");
1156 __alloc_pstat(s, __a, "Carveouts:"); 1156 __alloc_pstat(s, na, "Carveouts:");
1157 nvgpu_list_for_each_entry(tmp, &a->co_list, 1157 nvgpu_list_for_each_entry(tmp, &a->co_list,
1158 nvgpu_alloc_carveout, co_entry) 1158 nvgpu_alloc_carveout, co_entry)
1159 __alloc_pstat(s, __a, 1159 __alloc_pstat(s, na,
1160 " CO %2d: %-20s 0x%010llx + 0x%llx", 1160 " CO %2d: %-20s 0x%010llx + 0x%llx",
1161 i++, tmp->name, tmp->base, tmp->length); 1161 i++, tmp->name, tmp->base, tmp->length);
1162 } 1162 }
1163 1163
1164 __alloc_pstat(s, __a, ""); 1164 __alloc_pstat(s, na, "");
1165 __alloc_pstat(s, __a, "Buddy blocks:"); 1165 __alloc_pstat(s, na, "Buddy blocks:");
1166 __alloc_pstat(s, __a, " Order Free Alloced Split"); 1166 __alloc_pstat(s, na, " Order Free Alloced Split");
1167 __alloc_pstat(s, __a, " ----- ---- ------- -----"); 1167 __alloc_pstat(s, na, " ----- ---- ------- -----");
1168 1168
1169 for (i = a->max_order; i >= 0; i--) { 1169 for (i = a->max_order; i >= 0; i--) {
1170 if (a->buddy_list_len[i] == 0 && 1170 if (a->buddy_list_len[i] == 0 &&
@@ -1172,35 +1172,35 @@ static void nvgpu_buddy_print_stats(struct nvgpu_allocator *__a,
1172 a->buddy_list_split[i] == 0) 1172 a->buddy_list_split[i] == 0)
1173 continue; 1173 continue;
1174 1174
1175 __alloc_pstat(s, __a, " %3d %-7llu %-9llu %llu", i, 1175 __alloc_pstat(s, na, " %3d %-7llu %-9llu %llu", i,
1176 a->buddy_list_len[i], 1176 a->buddy_list_len[i],
1177 a->buddy_list_alloced[i], 1177 a->buddy_list_alloced[i],
1178 a->buddy_list_split[i]); 1178 a->buddy_list_split[i]);
1179 } 1179 }
1180 1180
1181 __alloc_pstat(s, __a, ""); 1181 __alloc_pstat(s, na, "");
1182 1182
1183 nvgpu_rbtree_enum_start(0, &node, a->fixed_allocs); 1183 nvgpu_rbtree_enum_start(0, &node, a->fixed_allocs);
1184 i = 1; 1184 i = 1;
1185 while (node) { 1185 while (node) {
1186 falloc = nvgpu_fixed_alloc_from_rbtree_node(node); 1186 falloc = nvgpu_fixed_alloc_from_rbtree_node(node);
1187 1187
1188 __alloc_pstat(s, __a, "Fixed alloc (%d): [0x%llx -> 0x%llx]", 1188 __alloc_pstat(s, na, "Fixed alloc (%d): [0x%llx -> 0x%llx]",
1189 i, falloc->start, falloc->end); 1189 i, falloc->start, falloc->end);
1190 1190
1191 nvgpu_rbtree_enum_next(&node, a->fixed_allocs); 1191 nvgpu_rbtree_enum_next(&node, a->fixed_allocs);
1192 } 1192 }
1193 1193
1194 __alloc_pstat(s, __a, ""); 1194 __alloc_pstat(s, na, "");
1195 __alloc_pstat(s, __a, "Bytes allocated: %llu", 1195 __alloc_pstat(s, na, "Bytes allocated: %llu",
1196 a->bytes_alloced); 1196 a->bytes_alloced);
1197 __alloc_pstat(s, __a, "Bytes allocated (real): %llu", 1197 __alloc_pstat(s, na, "Bytes allocated (real): %llu",
1198 a->bytes_alloced_real); 1198 a->bytes_alloced_real);
1199 __alloc_pstat(s, __a, "Bytes freed: %llu", 1199 __alloc_pstat(s, na, "Bytes freed: %llu",
1200 a->bytes_freed); 1200 a->bytes_freed);
1201 1201
1202 if (lock) 1202 if (lock)
1203 alloc_unlock(__a); 1203 alloc_unlock(na);
1204} 1204}
1205#endif 1205#endif
1206 1206
@@ -1245,7 +1245,7 @@ static const struct nvgpu_allocator_ops buddy_ops = {
1245 * will try and pick a reasonable max order. 1245 * will try and pick a reasonable max order.
1246 * @flags: Extra flags necessary. See GPU_BALLOC_*. 1246 * @flags: Extra flags necessary. See GPU_BALLOC_*.
1247 */ 1247 */
1248int __nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a, 1248int __nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *na,
1249 struct vm_gk20a *vm, const char *name, 1249 struct vm_gk20a *vm, const char *name,
1250 u64 base, u64 size, u64 blk_size, 1250 u64 base, u64 size, u64 blk_size,
1251 u64 max_order, u64 flags) 1251 u64 max_order, u64 flags)
@@ -1276,7 +1276,7 @@ int __nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a,
1276 return -ENOMEM; 1276 return -ENOMEM;
1277 } 1277 }
1278 1278
1279 err = __nvgpu_alloc_common_init(__a, g, name, a, false, &buddy_ops); 1279 err = __nvgpu_alloc_common_init(na, g, name, a, false, &buddy_ops);
1280 if (err) { 1280 if (err) {
1281 goto fail; 1281 goto fail;
1282 } 1282 }
@@ -1285,7 +1285,7 @@ int __nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a,
1285 a->length = size; 1285 a->length = size;
1286 a->blk_size = blk_size; 1286 a->blk_size = blk_size;
1287 a->blk_shift = __ffs(blk_size); 1287 a->blk_shift = __ffs(blk_size);
1288 a->owner = __a; 1288 a->owner = na;
1289 1289
1290 /* 1290 /*
1291 * If base is 0 then modfy base to be the size of one block so that we 1291 * If base is 0 then modfy base to be the size of one block so that we
@@ -1337,19 +1337,19 @@ int __nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a,
1337 a->initialized = 1; 1337 a->initialized = 1;
1338 1338
1339#ifdef CONFIG_DEBUG_FS 1339#ifdef CONFIG_DEBUG_FS
1340 nvgpu_init_alloc_debug(g, __a); 1340 nvgpu_init_alloc_debug(g, na);
1341#endif 1341#endif
1342 alloc_dbg(__a, "New allocator: type buddy"); 1342 alloc_dbg(na, "New allocator: type buddy");
1343 alloc_dbg(__a, " base 0x%llx", a->base); 1343 alloc_dbg(na, " base 0x%llx", a->base);
1344 alloc_dbg(__a, " size 0x%llx", a->length); 1344 alloc_dbg(na, " size 0x%llx", a->length);
1345 alloc_dbg(__a, " blk_size 0x%llx", a->blk_size); 1345 alloc_dbg(na, " blk_size 0x%llx", a->blk_size);
1346 if (flags & GPU_ALLOC_GVA_SPACE) { 1346 if (flags & GPU_ALLOC_GVA_SPACE) {
1347 alloc_dbg(balloc_owner(a), 1347 alloc_dbg(balloc_owner(a),
1348 " pde_size 0x%llx", 1348 " pde_size 0x%llx",
1349 balloc_order_to_len(a, a->pte_blk_order)); 1349 balloc_order_to_len(a, a->pte_blk_order));
1350 } 1350 }
1351 alloc_dbg(__a, " max_order %llu", a->max_order); 1351 alloc_dbg(na, " max_order %llu", a->max_order);
1352 alloc_dbg(__a, " flags 0x%llx", a->flags); 1352 alloc_dbg(na, " flags 0x%llx", a->flags);
1353 1353
1354 return 0; 1354 return 0;
1355 1355
@@ -1361,10 +1361,10 @@ fail:
1361 return err; 1361 return err;
1362} 1362}
1363 1363
1364int nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *a, 1364int nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *na,
1365 const char *name, u64 base, u64 size, 1365 const char *name, u64 base, u64 size,
1366 u64 blk_size, u64 flags) 1366 u64 blk_size, u64 flags)
1367{ 1367{
1368 return __nvgpu_buddy_allocator_init(g, a, NULL, name, 1368 return __nvgpu_buddy_allocator_init(g, na, NULL, name,
1369 base, size, blk_size, 0, 0); 1369 base, size, blk_size, 0, 0);
1370} 1370}
diff --git a/drivers/gpu/nvgpu/include/nvgpu/allocator.h b/drivers/gpu/nvgpu/include/nvgpu/allocator.h
index d7a47d23..839712db 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/allocator.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/allocator.h
@@ -205,11 +205,11 @@ static inline void alloc_unlock(struct nvgpu_allocator *a)
205/* 205/*
206 * Buddy allocator specific initializers. 206 * Buddy allocator specific initializers.
207 */ 207 */
208int __nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *a, 208int __nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *na,
209 struct vm_gk20a *vm, const char *name, 209 struct vm_gk20a *vm, const char *name,
210 u64 base, u64 size, u64 blk_size, 210 u64 base, u64 size, u64 blk_size,
211 u64 max_order, u64 flags); 211 u64 max_order, u64 flags);
212int nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *a, 212int nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *na,
213 const char *name, u64 base, u64 size, 213 const char *name, u64 base, u64 size,
214 u64 blk_size, u64 flags); 214 u64 blk_size, u64 flags);
215 215