diff options
author | Alex Waterman <alexw@nvidia.com> | 2017-11-29 18:48:10 -0500 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2018-01-25 17:24:42 -0500 |
commit | b46045f3fe71fad703ee7d689657af16925d78ca (patch) | |
tree | 47a1e4231f84c2bb5db3dabf79c7b2b1eeb817be /drivers/gpu | |
parent | 99e808567ca358e0e6d03f4731b81854070266a3 (diff) |
gpu: nvgpu: Cleanup '\n' usage in allocator debugging
These '\n' were leftover from the previous debugging macro usage
which did no add the '\n' automagically. However, once swapped over
to the nvgpu logging system the '\n' is added and no longer needs
to be present in the code.
This did require one extra modification though to keep things
consistent. The __alloc_pstat() macro, used for sending output
either to a seq_file or the terminal, needed to add the '\n' for
seq_printf() calls and the '\n' had to be deleted in the C files.
Change-Id: I4d56317fe2a87bd00033cfe79d06ffc048d91049
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1613641
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/nvgpu/common/mm/bitmap_allocator.c | 44 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/common/mm/buddy_allocator.c | 92 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/common/mm/lockless_allocator.c | 32 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/common/mm/page_allocator.c | 90 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/include/nvgpu/allocator.h | 10 |
5 files changed, 134 insertions, 134 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/bitmap_allocator.c b/drivers/gpu/nvgpu/common/mm/bitmap_allocator.c index 6bd654b8..f75f9a1f 100644 --- a/drivers/gpu/nvgpu/common/mm/bitmap_allocator.c +++ b/drivers/gpu/nvgpu/common/mm/bitmap_allocator.c | |||
@@ -91,13 +91,13 @@ static u64 nvgpu_bitmap_alloc_fixed(struct nvgpu_allocator *__a, | |||
91 | a->nr_fixed_allocs++; | 91 | a->nr_fixed_allocs++; |
92 | alloc_unlock(__a); | 92 | alloc_unlock(__a); |
93 | 93 | ||
94 | alloc_dbg(__a, "Alloc-fixed 0x%-10llx 0x%-5llx [bits=0x%llx (%llu)]\n", | 94 | alloc_dbg(__a, "Alloc-fixed 0x%-10llx 0x%-5llx [bits=0x%llx (%llu)]", |
95 | base, len, blks, blks); | 95 | base, len, blks, blks); |
96 | return base; | 96 | return base; |
97 | 97 | ||
98 | fail: | 98 | fail: |
99 | alloc_unlock(__a); | 99 | alloc_unlock(__a); |
100 | alloc_dbg(__a, "Alloc-fixed failed! (0x%llx)\n", base); | 100 | alloc_dbg(__a, "Alloc-fixed failed! (0x%llx)", base); |
101 | return 0; | 101 | return 0; |
102 | } | 102 | } |
103 | 103 | ||
@@ -129,7 +129,7 @@ static void nvgpu_bitmap_free_fixed(struct nvgpu_allocator *__a, | |||
129 | a->bytes_freed += blks * a->blk_size; | 129 | a->bytes_freed += blks * a->blk_size; |
130 | alloc_unlock(__a); | 130 | alloc_unlock(__a); |
131 | 131 | ||
132 | alloc_dbg(__a, "Free-fixed 0x%-10llx 0x%-5llx [bits=0x%llx (%llu)]\n", | 132 | alloc_dbg(__a, "Free-fixed 0x%-10llx 0x%-5llx [bits=0x%llx (%llu)]", |
133 | base, len, blks, blks); | 133 | base, len, blks, blks); |
134 | } | 134 | } |
135 | 135 | ||
@@ -238,7 +238,7 @@ static u64 nvgpu_bitmap_alloc(struct nvgpu_allocator *__a, u64 len) | |||
238 | __nvgpu_bitmap_store_alloc(a, addr, blks * a->blk_size)) | 238 | __nvgpu_bitmap_store_alloc(a, addr, blks * a->blk_size)) |
239 | goto fail_reset_bitmap; | 239 | goto fail_reset_bitmap; |
240 | 240 | ||
241 | alloc_dbg(__a, "Alloc 0x%-10llx 0x%-5llx [bits=0x%llx (%llu)]\n", | 241 | alloc_dbg(__a, "Alloc 0x%-10llx 0x%-5llx [bits=0x%llx (%llu)]", |
242 | addr, len, blks, blks); | 242 | addr, len, blks, blks); |
243 | 243 | ||
244 | a->nr_allocs++; | 244 | a->nr_allocs++; |
@@ -252,7 +252,7 @@ fail_reset_bitmap: | |||
252 | fail: | 252 | fail: |
253 | a->next_blk = 0; | 253 | a->next_blk = 0; |
254 | alloc_unlock(__a); | 254 | alloc_unlock(__a); |
255 | alloc_dbg(__a, "Alloc failed!\n"); | 255 | alloc_dbg(__a, "Alloc failed!"); |
256 | return 0; | 256 | return 0; |
257 | } | 257 | } |
258 | 258 | ||
@@ -283,7 +283,7 @@ static void nvgpu_bitmap_free(struct nvgpu_allocator *__a, u64 addr) | |||
283 | blks = alloc->length >> a->blk_shift; | 283 | blks = alloc->length >> a->blk_shift; |
284 | 284 | ||
285 | bitmap_clear(a->bitmap, offs, blks); | 285 | bitmap_clear(a->bitmap, offs, blks); |
286 | alloc_dbg(__a, "Free 0x%-10llx\n", addr); | 286 | alloc_dbg(__a, "Free 0x%-10llx", addr); |
287 | 287 | ||
288 | a->bytes_freed += alloc->length; | 288 | a->bytes_freed += alloc->length; |
289 | 289 | ||
@@ -323,18 +323,18 @@ static void nvgpu_bitmap_print_stats(struct nvgpu_allocator *__a, | |||
323 | { | 323 | { |
324 | struct nvgpu_bitmap_allocator *a = bitmap_allocator(__a); | 324 | struct nvgpu_bitmap_allocator *a = bitmap_allocator(__a); |
325 | 325 | ||
326 | __alloc_pstat(s, __a, "Bitmap allocator params:\n"); | 326 | __alloc_pstat(s, __a, "Bitmap allocator params:"); |
327 | __alloc_pstat(s, __a, " start = 0x%llx\n", a->base); | 327 | __alloc_pstat(s, __a, " start = 0x%llx", a->base); |
328 | __alloc_pstat(s, __a, " end = 0x%llx\n", a->base + a->length); | 328 | __alloc_pstat(s, __a, " end = 0x%llx", a->base + a->length); |
329 | __alloc_pstat(s, __a, " blks = 0x%llx\n", a->num_bits); | 329 | __alloc_pstat(s, __a, " blks = 0x%llx", a->num_bits); |
330 | 330 | ||
331 | /* Actual stats. */ | 331 | /* Actual stats. */ |
332 | __alloc_pstat(s, __a, "Stats:\n"); | 332 | __alloc_pstat(s, __a, "Stats:"); |
333 | __alloc_pstat(s, __a, " Number allocs = 0x%llx\n", a->nr_allocs); | 333 | __alloc_pstat(s, __a, " Number allocs = 0x%llx", a->nr_allocs); |
334 | __alloc_pstat(s, __a, " Number fixed = 0x%llx\n", a->nr_fixed_allocs); | 334 | __alloc_pstat(s, __a, " Number fixed = 0x%llx", a->nr_fixed_allocs); |
335 | __alloc_pstat(s, __a, " Bytes alloced = 0x%llx\n", a->bytes_alloced); | 335 | __alloc_pstat(s, __a, " Bytes alloced = 0x%llx", a->bytes_alloced); |
336 | __alloc_pstat(s, __a, " Bytes freed = 0x%llx\n", a->bytes_freed); | 336 | __alloc_pstat(s, __a, " Bytes freed = 0x%llx", a->bytes_freed); |
337 | __alloc_pstat(s, __a, " Outstanding = 0x%llx\n", | 337 | __alloc_pstat(s, __a, " Outstanding = 0x%llx", |
338 | a->bytes_alloced - a->bytes_freed); | 338 | a->bytes_alloced - a->bytes_freed); |
339 | } | 339 | } |
340 | #endif | 340 | #endif |
@@ -421,12 +421,12 @@ int nvgpu_bitmap_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a, | |||
421 | #ifdef CONFIG_DEBUG_FS | 421 | #ifdef CONFIG_DEBUG_FS |
422 | nvgpu_init_alloc_debug(g, __a); | 422 | nvgpu_init_alloc_debug(g, __a); |
423 | #endif | 423 | #endif |
424 | alloc_dbg(__a, "New allocator: type bitmap\n"); | 424 | alloc_dbg(__a, "New allocator: type bitmap"); |
425 | alloc_dbg(__a, " base 0x%llx\n", a->base); | 425 | alloc_dbg(__a, " base 0x%llx", a->base); |
426 | alloc_dbg(__a, " bit_offs 0x%llx\n", a->bit_offs); | 426 | alloc_dbg(__a, " bit_offs 0x%llx", a->bit_offs); |
427 | alloc_dbg(__a, " size 0x%llx\n", a->length); | 427 | alloc_dbg(__a, " size 0x%llx", a->length); |
428 | alloc_dbg(__a, " blk_size 0x%llx\n", a->blk_size); | 428 | alloc_dbg(__a, " blk_size 0x%llx", a->blk_size); |
429 | alloc_dbg(__a, " flags 0x%llx\n", a->flags); | 429 | alloc_dbg(__a, " flags 0x%llx", a->flags); |
430 | 430 | ||
431 | return 0; | 431 | return 0; |
432 | 432 | ||
diff --git a/drivers/gpu/nvgpu/common/mm/buddy_allocator.c b/drivers/gpu/nvgpu/common/mm/buddy_allocator.c index e5a9b62b..b986b29f 100644 --- a/drivers/gpu/nvgpu/common/mm/buddy_allocator.c +++ b/drivers/gpu/nvgpu/common/mm/buddy_allocator.c | |||
@@ -128,7 +128,7 @@ static void __balloc_buddy_list_add(struct nvgpu_buddy_allocator *a, | |||
128 | { | 128 | { |
129 | if (buddy_is_in_list(b)) { | 129 | if (buddy_is_in_list(b)) { |
130 | alloc_dbg(balloc_owner(a), | 130 | alloc_dbg(balloc_owner(a), |
131 | "Oops: adding added buddy (%llu:0x%llx)\n", | 131 | "Oops: adding added buddy (%llu:0x%llx)", |
132 | b->order, b->start); | 132 | b->order, b->start); |
133 | BUG(); | 133 | BUG(); |
134 | } | 134 | } |
@@ -152,7 +152,7 @@ static void __balloc_buddy_list_rem(struct nvgpu_buddy_allocator *a, | |||
152 | { | 152 | { |
153 | if (!buddy_is_in_list(b)) { | 153 | if (!buddy_is_in_list(b)) { |
154 | alloc_dbg(balloc_owner(a), | 154 | alloc_dbg(balloc_owner(a), |
155 | "Oops: removing removed buddy (%llu:0x%llx)\n", | 155 | "Oops: removing removed buddy (%llu:0x%llx)", |
156 | b->order, b->start); | 156 | b->order, b->start); |
157 | BUG(); | 157 | BUG(); |
158 | } | 158 | } |
@@ -304,19 +304,19 @@ static void nvgpu_buddy_allocator_destroy(struct nvgpu_allocator *__a) | |||
304 | 304 | ||
305 | if (a->buddy_list_len[i] != 0) { | 305 | if (a->buddy_list_len[i] != 0) { |
306 | nvgpu_info(__a->g, | 306 | nvgpu_info(__a->g, |
307 | "Excess buddies!!! (%d: %llu)\n", | 307 | "Excess buddies!!! (%d: %llu)", |
308 | i, a->buddy_list_len[i]); | 308 | i, a->buddy_list_len[i]); |
309 | BUG(); | 309 | BUG(); |
310 | } | 310 | } |
311 | if (a->buddy_list_split[i] != 0) { | 311 | if (a->buddy_list_split[i] != 0) { |
312 | nvgpu_info(__a->g, | 312 | nvgpu_info(__a->g, |
313 | "Excess split nodes!!! (%d: %llu)\n", | 313 | "Excess split nodes!!! (%d: %llu)", |
314 | i, a->buddy_list_split[i]); | 314 | i, a->buddy_list_split[i]); |
315 | BUG(); | 315 | BUG(); |
316 | } | 316 | } |
317 | if (a->buddy_list_alloced[i] != 0) { | 317 | if (a->buddy_list_alloced[i] != 0) { |
318 | nvgpu_info(__a->g, | 318 | nvgpu_info(__a->g, |
319 | "Excess alloced nodes!!! (%d: %llu)\n", | 319 | "Excess alloced nodes!!! (%d: %llu)", |
320 | i, a->buddy_list_alloced[i]); | 320 | i, a->buddy_list_alloced[i]); |
321 | BUG(); | 321 | BUG(); |
322 | } | 322 | } |
@@ -646,7 +646,7 @@ static struct nvgpu_buddy *__balloc_make_fixed_buddy( | |||
646 | /* Welp, that's the end of that. */ | 646 | /* Welp, that's the end of that. */ |
647 | alloc_dbg(balloc_owner(a), | 647 | alloc_dbg(balloc_owner(a), |
648 | "Fixed buddy PTE " | 648 | "Fixed buddy PTE " |
649 | "size mismatch!\n"); | 649 | "size mismatch!"); |
650 | return NULL; | 650 | return NULL; |
651 | } | 651 | } |
652 | 652 | ||
@@ -663,7 +663,7 @@ static struct nvgpu_buddy *__balloc_make_fixed_buddy( | |||
663 | } | 663 | } |
664 | 664 | ||
665 | if (cur_order > a->max_order) { | 665 | if (cur_order > a->max_order) { |
666 | alloc_dbg(balloc_owner(a), "No buddy for range ???\n"); | 666 | alloc_dbg(balloc_owner(a), "No buddy for range ???"); |
667 | return NULL; | 667 | return NULL; |
668 | } | 668 | } |
669 | 669 | ||
@@ -671,7 +671,7 @@ static struct nvgpu_buddy *__balloc_make_fixed_buddy( | |||
671 | while (bud->start != base || bud->order != order) { | 671 | while (bud->start != base || bud->order != order) { |
672 | if (balloc_split_buddy(a, bud, pte_size)) { | 672 | if (balloc_split_buddy(a, bud, pte_size)) { |
673 | alloc_dbg(balloc_owner(a), | 673 | alloc_dbg(balloc_owner(a), |
674 | "split buddy failed? {0x%llx, %llu}\n", | 674 | "split buddy failed? {0x%llx, %llu}", |
675 | bud->start, bud->order); | 675 | bud->start, bud->order); |
676 | balloc_coalesce(a, bud); | 676 | balloc_coalesce(a, bud); |
677 | return NULL; | 677 | return NULL; |
@@ -704,7 +704,7 @@ static u64 __balloc_do_alloc_fixed(struct nvgpu_buddy_allocator *a, | |||
704 | 704 | ||
705 | if (align_order > a->max_order) { | 705 | if (align_order > a->max_order) { |
706 | alloc_dbg(balloc_owner(a), | 706 | alloc_dbg(balloc_owner(a), |
707 | "Align order too big: %llu > %llu\n", | 707 | "Align order too big: %llu > %llu", |
708 | align_order, a->max_order); | 708 | align_order, a->max_order); |
709 | return 0; | 709 | return 0; |
710 | } | 710 | } |
@@ -723,7 +723,7 @@ static u64 __balloc_do_alloc_fixed(struct nvgpu_buddy_allocator *a, | |||
723 | align_order, pte_size); | 723 | align_order, pte_size); |
724 | if (!bud) { | 724 | if (!bud) { |
725 | alloc_dbg(balloc_owner(a), | 725 | alloc_dbg(balloc_owner(a), |
726 | "Fixed buddy failed: {0x%llx, %llu}!\n", | 726 | "Fixed buddy failed: {0x%llx, %llu}!", |
727 | balloc_base_unshift(a, inc_base), | 727 | balloc_base_unshift(a, inc_base), |
728 | align_order); | 728 | align_order); |
729 | goto err_and_cleanup; | 729 | goto err_and_cleanup; |
@@ -799,7 +799,7 @@ static u64 nvgpu_buddy_balloc(struct nvgpu_allocator *__a, u64 len) | |||
799 | 799 | ||
800 | if (order > a->max_order) { | 800 | if (order > a->max_order) { |
801 | alloc_unlock(__a); | 801 | alloc_unlock(__a); |
802 | alloc_dbg(balloc_owner(a), "Alloc fail\n"); | 802 | alloc_dbg(balloc_owner(a), "Alloc fail"); |
803 | return 0; | 803 | return 0; |
804 | } | 804 | } |
805 | 805 | ||
@@ -814,13 +814,13 @@ static u64 nvgpu_buddy_balloc(struct nvgpu_allocator *__a, u64 len) | |||
814 | a->bytes_alloced += len; | 814 | a->bytes_alloced += len; |
815 | a->bytes_alloced_real += balloc_order_to_len(a, order); | 815 | a->bytes_alloced_real += balloc_order_to_len(a, order); |
816 | alloc_dbg(balloc_owner(a), | 816 | alloc_dbg(balloc_owner(a), |
817 | "Alloc 0x%-10llx %3lld:0x%-10llx pte_size=%s\n", | 817 | "Alloc 0x%-10llx %3lld:0x%-10llx pte_size=%s", |
818 | addr, order, len, | 818 | addr, order, len, |
819 | pte_size == gmmu_page_size_big ? "big" : | 819 | pte_size == gmmu_page_size_big ? "big" : |
820 | pte_size == gmmu_page_size_small ? "small" : | 820 | pte_size == gmmu_page_size_small ? "small" : |
821 | "NA/any"); | 821 | "NA/any"); |
822 | } else { | 822 | } else { |
823 | alloc_dbg(balloc_owner(a), "Alloc failed: no mem!\n"); | 823 | alloc_dbg(balloc_owner(a), "Alloc failed: no mem!"); |
824 | } | 824 | } |
825 | 825 | ||
826 | a->alloc_made = 1; | 826 | a->alloc_made = 1; |
@@ -869,7 +869,7 @@ static u64 __nvgpu_balloc_fixed_buddy(struct nvgpu_allocator *__a, | |||
869 | 869 | ||
870 | if (!balloc_is_range_free(a, base, base + len)) { | 870 | if (!balloc_is_range_free(a, base, base + len)) { |
871 | alloc_dbg(balloc_owner(a), | 871 | alloc_dbg(balloc_owner(a), |
872 | "Range not free: 0x%llx -> 0x%llx\n", | 872 | "Range not free: 0x%llx -> 0x%llx", |
873 | base, base + len); | 873 | base, base + len); |
874 | goto fail_unlock; | 874 | goto fail_unlock; |
875 | } | 875 | } |
@@ -877,7 +877,7 @@ static u64 __nvgpu_balloc_fixed_buddy(struct nvgpu_allocator *__a, | |||
877 | ret = __balloc_do_alloc_fixed(a, falloc, base, len, pte_size); | 877 | ret = __balloc_do_alloc_fixed(a, falloc, base, len, pte_size); |
878 | if (!ret) { | 878 | if (!ret) { |
879 | alloc_dbg(balloc_owner(a), | 879 | alloc_dbg(balloc_owner(a), |
880 | "Alloc-fixed failed ?? 0x%llx -> 0x%llx\n", | 880 | "Alloc-fixed failed ?? 0x%llx -> 0x%llx", |
881 | base, base + len); | 881 | base, base + len); |
882 | goto fail_unlock; | 882 | goto fail_unlock; |
883 | } | 883 | } |
@@ -891,7 +891,7 @@ static u64 __nvgpu_balloc_fixed_buddy(struct nvgpu_allocator *__a, | |||
891 | a->bytes_alloced += len; | 891 | a->bytes_alloced += len; |
892 | a->bytes_alloced_real += real_bytes; | 892 | a->bytes_alloced_real += real_bytes; |
893 | 893 | ||
894 | alloc_dbg(balloc_owner(a), "Alloc (fixed) 0x%llx\n", base); | 894 | alloc_dbg(balloc_owner(a), "Alloc (fixed) 0x%llx", base); |
895 | 895 | ||
896 | return base; | 896 | return base; |
897 | 897 | ||
@@ -962,7 +962,7 @@ static void nvgpu_buddy_bfree(struct nvgpu_allocator *__a, u64 addr) | |||
962 | 962 | ||
963 | done: | 963 | done: |
964 | alloc_unlock(__a); | 964 | alloc_unlock(__a); |
965 | alloc_dbg(balloc_owner(a), "Free 0x%llx\n", addr); | 965 | alloc_dbg(balloc_owner(a), "Free 0x%llx", addr); |
966 | return; | 966 | return; |
967 | } | 967 | } |
968 | 968 | ||
@@ -1018,7 +1018,7 @@ static int nvgpu_buddy_reserve_co(struct nvgpu_allocator *__a, | |||
1018 | if (!addr) { | 1018 | if (!addr) { |
1019 | err = -ENOMEM; | 1019 | err = -ENOMEM; |
1020 | nvgpu_warn(__a->g, | 1020 | nvgpu_warn(__a->g, |
1021 | "%s: Failed to reserve a valid carveout!\n", | 1021 | "%s: Failed to reserve a valid carveout!", |
1022 | __func__); | 1022 | __func__); |
1023 | goto done; | 1023 | goto done; |
1024 | } | 1024 | } |
@@ -1102,32 +1102,32 @@ static void nvgpu_buddy_print_stats(struct nvgpu_allocator *__a, | |||
1102 | struct nvgpu_alloc_carveout *tmp; | 1102 | struct nvgpu_alloc_carveout *tmp; |
1103 | struct nvgpu_buddy_allocator *a = __a->priv; | 1103 | struct nvgpu_buddy_allocator *a = __a->priv; |
1104 | 1104 | ||
1105 | __alloc_pstat(s, __a, "base = %llu, limit = %llu, blk_size = %llu\n", | 1105 | __alloc_pstat(s, __a, "base = %llu, limit = %llu, blk_size = %llu", |
1106 | a->base, a->length, a->blk_size); | 1106 | a->base, a->length, a->blk_size); |
1107 | __alloc_pstat(s, __a, "Internal params:\n"); | 1107 | __alloc_pstat(s, __a, "Internal params:"); |
1108 | __alloc_pstat(s, __a, " start = 0x%llx\n", a->start); | 1108 | __alloc_pstat(s, __a, " start = 0x%llx", a->start); |
1109 | __alloc_pstat(s, __a, " end = 0x%llx\n", a->end); | 1109 | __alloc_pstat(s, __a, " end = 0x%llx", a->end); |
1110 | __alloc_pstat(s, __a, " count = 0x%llx\n", a->count); | 1110 | __alloc_pstat(s, __a, " count = 0x%llx", a->count); |
1111 | __alloc_pstat(s, __a, " blks = 0x%llx\n", a->blks); | 1111 | __alloc_pstat(s, __a, " blks = 0x%llx", a->blks); |
1112 | __alloc_pstat(s, __a, " max_order = %llu\n", a->max_order); | 1112 | __alloc_pstat(s, __a, " max_order = %llu", a->max_order); |
1113 | 1113 | ||
1114 | if (lock) | 1114 | if (lock) |
1115 | alloc_lock(__a); | 1115 | alloc_lock(__a); |
1116 | 1116 | ||
1117 | if (!nvgpu_list_empty(&a->co_list)) { | 1117 | if (!nvgpu_list_empty(&a->co_list)) { |
1118 | __alloc_pstat(s, __a, "\n"); | 1118 | __alloc_pstat(s, __a, ""); |
1119 | __alloc_pstat(s, __a, "Carveouts:\n"); | 1119 | __alloc_pstat(s, __a, "Carveouts:"); |
1120 | nvgpu_list_for_each_entry(tmp, &a->co_list, | 1120 | nvgpu_list_for_each_entry(tmp, &a->co_list, |
1121 | nvgpu_alloc_carveout, co_entry) | 1121 | nvgpu_alloc_carveout, co_entry) |
1122 | __alloc_pstat(s, __a, | 1122 | __alloc_pstat(s, __a, |
1123 | " CO %2d: %-20s 0x%010llx + 0x%llx\n", | 1123 | " CO %2d: %-20s 0x%010llx + 0x%llx", |
1124 | i++, tmp->name, tmp->base, tmp->length); | 1124 | i++, tmp->name, tmp->base, tmp->length); |
1125 | } | 1125 | } |
1126 | 1126 | ||
1127 | __alloc_pstat(s, __a, "\n"); | 1127 | __alloc_pstat(s, __a, ""); |
1128 | __alloc_pstat(s, __a, "Buddy blocks:\n"); | 1128 | __alloc_pstat(s, __a, "Buddy blocks:"); |
1129 | __alloc_pstat(s, __a, " Order Free Alloced Split\n"); | 1129 | __alloc_pstat(s, __a, " Order Free Alloced Split"); |
1130 | __alloc_pstat(s, __a, " ----- ---- ------- -----\n"); | 1130 | __alloc_pstat(s, __a, " ----- ---- ------- -----"); |
1131 | 1131 | ||
1132 | for (i = a->max_order; i >= 0; i--) { | 1132 | for (i = a->max_order; i >= 0; i--) { |
1133 | if (a->buddy_list_len[i] == 0 && | 1133 | if (a->buddy_list_len[i] == 0 && |
@@ -1135,31 +1135,31 @@ static void nvgpu_buddy_print_stats(struct nvgpu_allocator *__a, | |||
1135 | a->buddy_list_split[i] == 0) | 1135 | a->buddy_list_split[i] == 0) |
1136 | continue; | 1136 | continue; |
1137 | 1137 | ||
1138 | __alloc_pstat(s, __a, " %3d %-7llu %-9llu %llu\n", i, | 1138 | __alloc_pstat(s, __a, " %3d %-7llu %-9llu %llu", i, |
1139 | a->buddy_list_len[i], | 1139 | a->buddy_list_len[i], |
1140 | a->buddy_list_alloced[i], | 1140 | a->buddy_list_alloced[i], |
1141 | a->buddy_list_split[i]); | 1141 | a->buddy_list_split[i]); |
1142 | } | 1142 | } |
1143 | 1143 | ||
1144 | __alloc_pstat(s, __a, "\n"); | 1144 | __alloc_pstat(s, __a, ""); |
1145 | 1145 | ||
1146 | nvgpu_rbtree_enum_start(0, &node, a->fixed_allocs); | 1146 | nvgpu_rbtree_enum_start(0, &node, a->fixed_allocs); |
1147 | i = 1; | 1147 | i = 1; |
1148 | while (node) { | 1148 | while (node) { |
1149 | falloc = nvgpu_fixed_alloc_from_rbtree_node(node); | 1149 | falloc = nvgpu_fixed_alloc_from_rbtree_node(node); |
1150 | 1150 | ||
1151 | __alloc_pstat(s, __a, "Fixed alloc (%d): [0x%llx -> 0x%llx]\n", | 1151 | __alloc_pstat(s, __a, "Fixed alloc (%d): [0x%llx -> 0x%llx]", |
1152 | i, falloc->start, falloc->end); | 1152 | i, falloc->start, falloc->end); |
1153 | 1153 | ||
1154 | nvgpu_rbtree_enum_next(&node, a->fixed_allocs); | 1154 | nvgpu_rbtree_enum_next(&node, a->fixed_allocs); |
1155 | } | 1155 | } |
1156 | 1156 | ||
1157 | __alloc_pstat(s, __a, "\n"); | 1157 | __alloc_pstat(s, __a, ""); |
1158 | __alloc_pstat(s, __a, "Bytes allocated: %llu\n", | 1158 | __alloc_pstat(s, __a, "Bytes allocated: %llu", |
1159 | a->bytes_alloced); | 1159 | a->bytes_alloced); |
1160 | __alloc_pstat(s, __a, "Bytes allocated (real): %llu\n", | 1160 | __alloc_pstat(s, __a, "Bytes allocated (real): %llu", |
1161 | a->bytes_alloced_real); | 1161 | a->bytes_alloced_real); |
1162 | __alloc_pstat(s, __a, "Bytes freed: %llu\n", | 1162 | __alloc_pstat(s, __a, "Bytes freed: %llu", |
1163 | a->bytes_freed); | 1163 | a->bytes_freed); |
1164 | 1164 | ||
1165 | if (lock) | 1165 | if (lock) |
@@ -1294,16 +1294,16 @@ int __nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a, | |||
1294 | #ifdef CONFIG_DEBUG_FS | 1294 | #ifdef CONFIG_DEBUG_FS |
1295 | nvgpu_init_alloc_debug(g, __a); | 1295 | nvgpu_init_alloc_debug(g, __a); |
1296 | #endif | 1296 | #endif |
1297 | alloc_dbg(__a, "New allocator: type buddy\n"); | 1297 | alloc_dbg(__a, "New allocator: type buddy"); |
1298 | alloc_dbg(__a, " base 0x%llx\n", a->base); | 1298 | alloc_dbg(__a, " base 0x%llx", a->base); |
1299 | alloc_dbg(__a, " size 0x%llx\n", a->length); | 1299 | alloc_dbg(__a, " size 0x%llx", a->length); |
1300 | alloc_dbg(__a, " blk_size 0x%llx\n", a->blk_size); | 1300 | alloc_dbg(__a, " blk_size 0x%llx", a->blk_size); |
1301 | if (flags & GPU_ALLOC_GVA_SPACE) | 1301 | if (flags & GPU_ALLOC_GVA_SPACE) |
1302 | alloc_dbg(balloc_owner(a), | 1302 | alloc_dbg(balloc_owner(a), |
1303 | " pde_size 0x%llx\n", | 1303 | " pde_size 0x%llx", |
1304 | balloc_order_to_len(a, a->pte_blk_order)); | 1304 | balloc_order_to_len(a, a->pte_blk_order)); |
1305 | alloc_dbg(__a, " max_order %llu\n", a->max_order); | 1305 | alloc_dbg(__a, " max_order %llu", a->max_order); |
1306 | alloc_dbg(__a, " flags 0x%llx\n", a->flags); | 1306 | alloc_dbg(__a, " flags 0x%llx", a->flags); |
1307 | 1307 | ||
1308 | return 0; | 1308 | return 0; |
1309 | 1309 | ||
diff --git a/drivers/gpu/nvgpu/common/mm/lockless_allocator.c b/drivers/gpu/nvgpu/common/mm/lockless_allocator.c index 3eb10fc4..5f48d606 100644 --- a/drivers/gpu/nvgpu/common/mm/lockless_allocator.c +++ b/drivers/gpu/nvgpu/common/mm/lockless_allocator.c | |||
@@ -73,7 +73,7 @@ static u64 nvgpu_lockless_alloc(struct nvgpu_allocator *a, u64 len) | |||
73 | if (ret == head) { | 73 | if (ret == head) { |
74 | addr = pa->base + head * pa->blk_size; | 74 | addr = pa->base + head * pa->blk_size; |
75 | nvgpu_atomic_inc(&pa->nr_allocs); | 75 | nvgpu_atomic_inc(&pa->nr_allocs); |
76 | alloc_dbg(a, "Alloc node # %d @ addr 0x%llx\n", head, | 76 | alloc_dbg(a, "Alloc node # %d @ addr 0x%llx", head, |
77 | addr); | 77 | addr); |
78 | break; | 78 | break; |
79 | } | 79 | } |
@@ -81,9 +81,9 @@ static u64 nvgpu_lockless_alloc(struct nvgpu_allocator *a, u64 len) | |||
81 | } | 81 | } |
82 | 82 | ||
83 | if (addr) | 83 | if (addr) |
84 | alloc_dbg(a, "Alloc node # %d @ addr 0x%llx\n", head, addr); | 84 | alloc_dbg(a, "Alloc node # %d @ addr 0x%llx", head, addr); |
85 | else | 85 | else |
86 | alloc_dbg(a, "Alloc failed!\n"); | 86 | alloc_dbg(a, "Alloc failed!"); |
87 | 87 | ||
88 | return addr; | 88 | return addr; |
89 | } | 89 | } |
@@ -96,7 +96,7 @@ static void nvgpu_lockless_free(struct nvgpu_allocator *a, u64 addr) | |||
96 | 96 | ||
97 | cur_idx = (addr - pa->base) / pa->blk_size; | 97 | cur_idx = (addr - pa->base) / pa->blk_size; |
98 | 98 | ||
99 | alloc_dbg(a, "Free node # %llu @ addr 0x%llx\n", cur_idx, addr); | 99 | alloc_dbg(a, "Free node # %llu @ addr 0x%llx", cur_idx, addr); |
100 | 100 | ||
101 | while (1) { | 101 | while (1) { |
102 | head = NV_ACCESS_ONCE(pa->head); | 102 | head = NV_ACCESS_ONCE(pa->head); |
@@ -104,7 +104,7 @@ static void nvgpu_lockless_free(struct nvgpu_allocator *a, u64 addr) | |||
104 | ret = cmpxchg(&pa->head, head, cur_idx); | 104 | ret = cmpxchg(&pa->head, head, cur_idx); |
105 | if (ret == head) { | 105 | if (ret == head) { |
106 | nvgpu_atomic_dec(&pa->nr_allocs); | 106 | nvgpu_atomic_dec(&pa->nr_allocs); |
107 | alloc_dbg(a, "Free node # %llu\n", cur_idx); | 107 | alloc_dbg(a, "Free node # %llu", cur_idx); |
108 | break; | 108 | break; |
109 | } | 109 | } |
110 | } | 110 | } |
@@ -128,15 +128,15 @@ static void nvgpu_lockless_print_stats(struct nvgpu_allocator *a, | |||
128 | { | 128 | { |
129 | struct nvgpu_lockless_allocator *pa = a->priv; | 129 | struct nvgpu_lockless_allocator *pa = a->priv; |
130 | 130 | ||
131 | __alloc_pstat(s, a, "Lockless allocator params:\n"); | 131 | __alloc_pstat(s, a, "Lockless allocator params:"); |
132 | __alloc_pstat(s, a, " start = 0x%llx\n", pa->base); | 132 | __alloc_pstat(s, a, " start = 0x%llx", pa->base); |
133 | __alloc_pstat(s, a, " end = 0x%llx\n", pa->base + pa->length); | 133 | __alloc_pstat(s, a, " end = 0x%llx", pa->base + pa->length); |
134 | 134 | ||
135 | /* Actual stats. */ | 135 | /* Actual stats. */ |
136 | __alloc_pstat(s, a, "Stats:\n"); | 136 | __alloc_pstat(s, a, "Stats:"); |
137 | __alloc_pstat(s, a, " Number allocs = %d\n", | 137 | __alloc_pstat(s, a, " Number allocs = %d", |
138 | nvgpu_atomic_read(&pa->nr_allocs)); | 138 | nvgpu_atomic_read(&pa->nr_allocs)); |
139 | __alloc_pstat(s, a, " Number free = %d\n", | 139 | __alloc_pstat(s, a, " Number free = %d", |
140 | pa->nr_nodes - nvgpu_atomic_read(&pa->nr_allocs)); | 140 | pa->nr_nodes - nvgpu_atomic_read(&pa->nr_allocs)); |
141 | } | 141 | } |
142 | #endif | 142 | #endif |
@@ -211,11 +211,11 @@ int nvgpu_lockless_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a, | |||
211 | #ifdef CONFIG_DEBUG_FS | 211 | #ifdef CONFIG_DEBUG_FS |
212 | nvgpu_init_alloc_debug(g, __a); | 212 | nvgpu_init_alloc_debug(g, __a); |
213 | #endif | 213 | #endif |
214 | alloc_dbg(__a, "New allocator: type lockless\n"); | 214 | alloc_dbg(__a, "New allocator: type lockless"); |
215 | alloc_dbg(__a, " base 0x%llx\n", a->base); | 215 | alloc_dbg(__a, " base 0x%llx", a->base); |
216 | alloc_dbg(__a, " nodes %d\n", a->nr_nodes); | 216 | alloc_dbg(__a, " nodes %d", a->nr_nodes); |
217 | alloc_dbg(__a, " blk_size 0x%llx\n", a->blk_size); | 217 | alloc_dbg(__a, " blk_size 0x%llx", a->blk_size); |
218 | alloc_dbg(__a, " flags 0x%llx\n", a->flags); | 218 | alloc_dbg(__a, " flags 0x%llx", a->flags); |
219 | 219 | ||
220 | return 0; | 220 | return 0; |
221 | 221 | ||
diff --git a/drivers/gpu/nvgpu/common/mm/page_allocator.c b/drivers/gpu/nvgpu/common/mm/page_allocator.c index e7738919..6dc1edf7 100644 --- a/drivers/gpu/nvgpu/common/mm/page_allocator.c +++ b/drivers/gpu/nvgpu/common/mm/page_allocator.c | |||
@@ -279,7 +279,7 @@ static struct page_alloc_slab_page *alloc_slab_page( | |||
279 | 279 | ||
280 | slab_page = nvgpu_kmem_cache_alloc(a->slab_page_cache); | 280 | slab_page = nvgpu_kmem_cache_alloc(a->slab_page_cache); |
281 | if (!slab_page) { | 281 | if (!slab_page) { |
282 | palloc_dbg(a, "OOM: unable to alloc slab_page struct!\n"); | 282 | palloc_dbg(a, "OOM: unable to alloc slab_page struct!"); |
283 | return NULL; | 283 | return NULL; |
284 | } | 284 | } |
285 | 285 | ||
@@ -288,7 +288,7 @@ static struct page_alloc_slab_page *alloc_slab_page( | |||
288 | slab_page->page_addr = nvgpu_alloc(&a->source_allocator, a->page_size); | 288 | slab_page->page_addr = nvgpu_alloc(&a->source_allocator, a->page_size); |
289 | if (!slab_page->page_addr) { | 289 | if (!slab_page->page_addr) { |
290 | nvgpu_kmem_cache_free(a->slab_page_cache, slab_page); | 290 | nvgpu_kmem_cache_free(a->slab_page_cache, slab_page); |
291 | palloc_dbg(a, "OOM: vidmem is full!\n"); | 291 | palloc_dbg(a, "OOM: vidmem is full!"); |
292 | return NULL; | 292 | return NULL; |
293 | } | 293 | } |
294 | 294 | ||
@@ -301,7 +301,7 @@ static struct page_alloc_slab_page *alloc_slab_page( | |||
301 | 301 | ||
302 | a->pages_alloced++; | 302 | a->pages_alloced++; |
303 | 303 | ||
304 | palloc_dbg(a, "Allocated new slab page @ 0x%012llx size=%u\n", | 304 | palloc_dbg(a, "Allocated new slab page @ 0x%012llx size=%u", |
305 | slab_page->page_addr, slab_page->slab_size); | 305 | slab_page->page_addr, slab_page->slab_size); |
306 | 306 | ||
307 | return slab_page; | 307 | return slab_page; |
@@ -310,7 +310,7 @@ static struct page_alloc_slab_page *alloc_slab_page( | |||
310 | static void free_slab_page(struct nvgpu_page_allocator *a, | 310 | static void free_slab_page(struct nvgpu_page_allocator *a, |
311 | struct page_alloc_slab_page *slab_page) | 311 | struct page_alloc_slab_page *slab_page) |
312 | { | 312 | { |
313 | palloc_dbg(a, "Freeing slab page @ 0x%012llx\n", slab_page->page_addr); | 313 | palloc_dbg(a, "Freeing slab page @ 0x%012llx", slab_page->page_addr); |
314 | 314 | ||
315 | BUG_ON((slab_page->state != SP_NONE && slab_page->state != SP_EMPTY) || | 315 | BUG_ON((slab_page->state != SP_NONE && slab_page->state != SP_EMPTY) || |
316 | slab_page->nr_objects_alloced != 0 || | 316 | slab_page->nr_objects_alloced != 0 || |
@@ -418,7 +418,7 @@ static struct nvgpu_page_alloc *__nvgpu_alloc_slab( | |||
418 | 418 | ||
419 | alloc = nvgpu_kmem_cache_alloc(a->alloc_cache); | 419 | alloc = nvgpu_kmem_cache_alloc(a->alloc_cache); |
420 | if (!alloc) { | 420 | if (!alloc) { |
421 | palloc_dbg(a, "OOM: could not alloc page_alloc struct!\n"); | 421 | palloc_dbg(a, "OOM: could not alloc page_alloc struct!"); |
422 | goto fail; | 422 | goto fail; |
423 | } | 423 | } |
424 | 424 | ||
@@ -426,7 +426,7 @@ static struct nvgpu_page_alloc *__nvgpu_alloc_slab( | |||
426 | 426 | ||
427 | sgl = nvgpu_kzalloc(a->owner->g, sizeof(*sgl)); | 427 | sgl = nvgpu_kzalloc(a->owner->g, sizeof(*sgl)); |
428 | if (!sgl) { | 428 | if (!sgl) { |
429 | palloc_dbg(a, "OOM: could not alloc sgl struct!\n"); | 429 | palloc_dbg(a, "OOM: could not alloc sgl struct!"); |
430 | goto fail; | 430 | goto fail; |
431 | } | 431 | } |
432 | 432 | ||
@@ -435,7 +435,7 @@ static struct nvgpu_page_alloc *__nvgpu_alloc_slab( | |||
435 | if (err) | 435 | if (err) |
436 | goto fail; | 436 | goto fail; |
437 | 437 | ||
438 | palloc_dbg(a, "Alloc 0x%04llx sr=%d id=0x%010llx [slab]\n", | 438 | palloc_dbg(a, "Alloc 0x%04llx sr=%d id=0x%010llx [slab]", |
439 | len, slab_nr, alloc->base); | 439 | len, slab_nr, alloc->base); |
440 | a->nr_slab_allocs++; | 440 | a->nr_slab_allocs++; |
441 | 441 | ||
@@ -549,7 +549,7 @@ static struct nvgpu_page_alloc *__do_nvgpu_alloc_pages( | |||
549 | 549 | ||
550 | /* Divide by 2 and try again */ | 550 | /* Divide by 2 and try again */ |
551 | if (!chunk_addr) { | 551 | if (!chunk_addr) { |
552 | palloc_dbg(a, "balloc failed: 0x%llx\n", | 552 | palloc_dbg(a, "balloc failed: 0x%llx", |
553 | chunk_len); | 553 | chunk_len); |
554 | chunk_len >>= 1; | 554 | chunk_len >>= 1; |
555 | max_chunk_len = chunk_len; | 555 | max_chunk_len = chunk_len; |
@@ -559,7 +559,7 @@ static struct nvgpu_page_alloc *__do_nvgpu_alloc_pages( | |||
559 | chunk_pages = chunk_len >> a->page_shift; | 559 | chunk_pages = chunk_len >> a->page_shift; |
560 | 560 | ||
561 | if (!chunk_addr) { | 561 | if (!chunk_addr) { |
562 | palloc_dbg(a, "bailing @ 0x%llx\n", chunk_len); | 562 | palloc_dbg(a, "bailing @ 0x%llx", chunk_len); |
563 | goto fail_cleanup; | 563 | goto fail_cleanup; |
564 | } | 564 | } |
565 | 565 | ||
@@ -622,22 +622,22 @@ static struct nvgpu_page_alloc *__nvgpu_alloc_pages( | |||
622 | 622 | ||
623 | alloc = __do_nvgpu_alloc_pages(a, pages); | 623 | alloc = __do_nvgpu_alloc_pages(a, pages); |
624 | if (!alloc) { | 624 | if (!alloc) { |
625 | palloc_dbg(a, "Alloc 0x%llx (%llu) (failed)\n", | 625 | palloc_dbg(a, "Alloc 0x%llx (%llu) (failed)", |
626 | pages << a->page_shift, pages); | 626 | pages << a->page_shift, pages); |
627 | return NULL; | 627 | return NULL; |
628 | } | 628 | } |
629 | 629 | ||
630 | palloc_dbg(a, "Alloc 0x%llx (%llu) id=0x%010llx\n", | 630 | palloc_dbg(a, "Alloc 0x%llx (%llu) id=0x%010llx", |
631 | pages << a->page_shift, pages, alloc->base); | 631 | pages << a->page_shift, pages, alloc->base); |
632 | sgl = alloc->sgt.sgl; | 632 | sgl = alloc->sgt.sgl; |
633 | while (sgl) { | 633 | while (sgl) { |
634 | palloc_dbg(a, " Chunk %2d: 0x%010llx + 0x%llx\n", | 634 | palloc_dbg(a, " Chunk %2d: 0x%010llx + 0x%llx", |
635 | i++, | 635 | i++, |
636 | nvgpu_sgt_get_phys(&alloc->sgt, sgl), | 636 | nvgpu_sgt_get_phys(&alloc->sgt, sgl), |
637 | nvgpu_sgt_get_length(&alloc->sgt, sgl)); | 637 | nvgpu_sgt_get_length(&alloc->sgt, sgl)); |
638 | sgl = nvgpu_sgt_get_next(&alloc->sgt, sgl); | 638 | sgl = nvgpu_sgt_get_next(&alloc->sgt, sgl); |
639 | } | 639 | } |
640 | palloc_dbg(a, "Alloc done\n"); | 640 | palloc_dbg(a, "Alloc done"); |
641 | 641 | ||
642 | return alloc; | 642 | return alloc; |
643 | } | 643 | } |
@@ -708,13 +708,13 @@ static void nvgpu_page_free(struct nvgpu_allocator *__a, u64 base) | |||
708 | ((struct nvgpu_page_alloc *)(uintptr_t)base)->base); | 708 | ((struct nvgpu_page_alloc *)(uintptr_t)base)->base); |
709 | 709 | ||
710 | if (!alloc) { | 710 | if (!alloc) { |
711 | palloc_dbg(a, "Hrm, found no alloc?\n"); | 711 | palloc_dbg(a, "Hrm, found no alloc?"); |
712 | goto done; | 712 | goto done; |
713 | } | 713 | } |
714 | 714 | ||
715 | a->nr_frees++; | 715 | a->nr_frees++; |
716 | 716 | ||
717 | palloc_dbg(a, "Free 0x%llx id=0x%010llx\n", | 717 | palloc_dbg(a, "Free 0x%llx id=0x%010llx", |
718 | alloc->length, alloc->base); | 718 | alloc->length, alloc->base); |
719 | 719 | ||
720 | /* | 720 | /* |
@@ -794,11 +794,11 @@ static u64 nvgpu_page_alloc_fixed(struct nvgpu_allocator *__a, | |||
794 | __insert_page_alloc(a, alloc); | 794 | __insert_page_alloc(a, alloc); |
795 | alloc_unlock(__a); | 795 | alloc_unlock(__a); |
796 | 796 | ||
797 | palloc_dbg(a, "Alloc [fixed] @ 0x%010llx + 0x%llx (%llu)\n", | 797 | palloc_dbg(a, "Alloc [fixed] @ 0x%010llx + 0x%llx (%llu)", |
798 | alloc->base, aligned_len, pages); | 798 | alloc->base, aligned_len, pages); |
799 | sgl = alloc->sgt.sgl; | 799 | sgl = alloc->sgt.sgl; |
800 | while (sgl) { | 800 | while (sgl) { |
801 | palloc_dbg(a, " Chunk %2d: 0x%010llx + 0x%llx\n", | 801 | palloc_dbg(a, " Chunk %2d: 0x%010llx + 0x%llx", |
802 | i++, | 802 | i++, |
803 | nvgpu_sgt_get_phys(&alloc->sgt, sgl), | 803 | nvgpu_sgt_get_phys(&alloc->sgt, sgl), |
804 | nvgpu_sgt_get_length(&alloc->sgt, sgl)); | 804 | nvgpu_sgt_get_length(&alloc->sgt, sgl)); |
@@ -830,7 +830,7 @@ static void nvgpu_page_free_fixed(struct nvgpu_allocator *__a, | |||
830 | alloc = (struct nvgpu_page_alloc *) (uintptr_t) base; | 830 | alloc = (struct nvgpu_page_alloc *) (uintptr_t) base; |
831 | } | 831 | } |
832 | 832 | ||
833 | palloc_dbg(a, "Free [fixed] 0x%010llx + 0x%llx\n", | 833 | palloc_dbg(a, "Free [fixed] 0x%010llx + 0x%llx", |
834 | alloc->base, alloc->length); | 834 | alloc->base, alloc->length); |
835 | 835 | ||
836 | a->nr_fixed_frees++; | 836 | a->nr_fixed_frees++; |
@@ -868,47 +868,47 @@ static void nvgpu_page_print_stats(struct nvgpu_allocator *__a, | |||
868 | if (lock) | 868 | if (lock) |
869 | alloc_lock(__a); | 869 | alloc_lock(__a); |
870 | 870 | ||
871 | __alloc_pstat(s, __a, "Page allocator:\n"); | 871 | __alloc_pstat(s, __a, "Page allocator:"); |
872 | __alloc_pstat(s, __a, " allocs %lld\n", a->nr_allocs); | 872 | __alloc_pstat(s, __a, " allocs %lld", a->nr_allocs); |
873 | __alloc_pstat(s, __a, " frees %lld\n", a->nr_frees); | 873 | __alloc_pstat(s, __a, " frees %lld", a->nr_frees); |
874 | __alloc_pstat(s, __a, " fixed_allocs %lld\n", a->nr_fixed_allocs); | 874 | __alloc_pstat(s, __a, " fixed_allocs %lld", a->nr_fixed_allocs); |
875 | __alloc_pstat(s, __a, " fixed_frees %lld\n", a->nr_fixed_frees); | 875 | __alloc_pstat(s, __a, " fixed_frees %lld", a->nr_fixed_frees); |
876 | __alloc_pstat(s, __a, " slab_allocs %lld\n", a->nr_slab_allocs); | 876 | __alloc_pstat(s, __a, " slab_allocs %lld", a->nr_slab_allocs); |
877 | __alloc_pstat(s, __a, " slab_frees %lld\n", a->nr_slab_frees); | 877 | __alloc_pstat(s, __a, " slab_frees %lld", a->nr_slab_frees); |
878 | __alloc_pstat(s, __a, " pages alloced %lld\n", a->pages_alloced); | 878 | __alloc_pstat(s, __a, " pages alloced %lld", a->pages_alloced); |
879 | __alloc_pstat(s, __a, " pages freed %lld\n", a->pages_freed); | 879 | __alloc_pstat(s, __a, " pages freed %lld", a->pages_freed); |
880 | __alloc_pstat(s, __a, "\n"); | 880 | __alloc_pstat(s, __a, ""); |
881 | 881 | ||
882 | __alloc_pstat(s, __a, "Page size: %lld KB\n", | 882 | __alloc_pstat(s, __a, "Page size: %lld KB", |
883 | a->page_size >> 10); | 883 | a->page_size >> 10); |
884 | __alloc_pstat(s, __a, "Total pages: %lld (%lld MB)\n", | 884 | __alloc_pstat(s, __a, "Total pages: %lld (%lld MB)", |
885 | a->length / a->page_size, | 885 | a->length / a->page_size, |
886 | a->length >> 20); | 886 | a->length >> 20); |
887 | __alloc_pstat(s, __a, "Available pages: %lld (%lld MB)\n", | 887 | __alloc_pstat(s, __a, "Available pages: %lld (%lld MB)", |
888 | nvgpu_alloc_space(&a->source_allocator) / a->page_size, | 888 | nvgpu_alloc_space(&a->source_allocator) / a->page_size, |
889 | nvgpu_alloc_space(&a->source_allocator) >> 20); | 889 | nvgpu_alloc_space(&a->source_allocator) >> 20); |
890 | __alloc_pstat(s, __a, "\n"); | 890 | __alloc_pstat(s, __a, ""); |
891 | 891 | ||
892 | /* | 892 | /* |
893 | * Slab info. | 893 | * Slab info. |
894 | */ | 894 | */ |
895 | if (a->flags & GPU_ALLOC_4K_VIDMEM_PAGES) { | 895 | if (a->flags & GPU_ALLOC_4K_VIDMEM_PAGES) { |
896 | __alloc_pstat(s, __a, "Slabs:\n"); | 896 | __alloc_pstat(s, __a, "Slabs:"); |
897 | __alloc_pstat(s, __a, " size empty partial full\n"); | 897 | __alloc_pstat(s, __a, " size empty partial full"); |
898 | __alloc_pstat(s, __a, " ---- ----- ------- ----\n"); | 898 | __alloc_pstat(s, __a, " ---- ----- ------- ----"); |
899 | 899 | ||
900 | for (i = 0; i < a->nr_slabs; i++) { | 900 | for (i = 0; i < a->nr_slabs; i++) { |
901 | struct page_alloc_slab *slab = &a->slabs[i]; | 901 | struct page_alloc_slab *slab = &a->slabs[i]; |
902 | 902 | ||
903 | __alloc_pstat(s, __a, " %-9u %-9d %-9u %u\n", | 903 | __alloc_pstat(s, __a, " %-9u %-9d %-9u %u", |
904 | slab->slab_size, | 904 | slab->slab_size, |
905 | slab->nr_empty, slab->nr_partial, | 905 | slab->nr_empty, slab->nr_partial, |
906 | slab->nr_full); | 906 | slab->nr_full); |
907 | } | 907 | } |
908 | __alloc_pstat(s, __a, "\n"); | 908 | __alloc_pstat(s, __a, ""); |
909 | } | 909 | } |
910 | 910 | ||
911 | __alloc_pstat(s, __a, "Source alloc: %s\n", | 911 | __alloc_pstat(s, __a, "Source alloc: %s", |
912 | a->source_allocator.name); | 912 | a->source_allocator.name); |
913 | nvgpu_alloc_print_stats(&a->source_allocator, s, lock); | 913 | nvgpu_alloc_print_stats(&a->source_allocator, s, lock); |
914 | 914 | ||
@@ -1029,12 +1029,12 @@ int nvgpu_page_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a, | |||
1029 | #ifdef CONFIG_DEBUG_FS | 1029 | #ifdef CONFIG_DEBUG_FS |
1030 | nvgpu_init_alloc_debug(g, __a); | 1030 | nvgpu_init_alloc_debug(g, __a); |
1031 | #endif | 1031 | #endif |
1032 | palloc_dbg(a, "New allocator: type page\n"); | 1032 | palloc_dbg(a, "New allocator: type page"); |
1033 | palloc_dbg(a, " base 0x%llx\n", a->base); | 1033 | palloc_dbg(a, " base 0x%llx", a->base); |
1034 | palloc_dbg(a, " size 0x%llx\n", a->length); | 1034 | palloc_dbg(a, " size 0x%llx", a->length); |
1035 | palloc_dbg(a, " page_size 0x%llx\n", a->page_size); | 1035 | palloc_dbg(a, " page_size 0x%llx", a->page_size); |
1036 | palloc_dbg(a, " flags 0x%llx\n", a->flags); | 1036 | palloc_dbg(a, " flags 0x%llx", a->flags); |
1037 | palloc_dbg(a, " slabs: %d\n", a->nr_slabs); | 1037 | palloc_dbg(a, " slabs: %d", a->nr_slabs); |
1038 | 1038 | ||
1039 | return 0; | 1039 | return 0; |
1040 | 1040 | ||
diff --git a/drivers/gpu/nvgpu/include/nvgpu/allocator.h b/drivers/gpu/nvgpu/include/nvgpu/allocator.h index 1e7ab38f..2ff7e950 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/allocator.h +++ b/drivers/gpu/nvgpu/include/nvgpu/allocator.h | |||
@@ -37,7 +37,7 @@ | |||
37 | #include <nvgpu/list.h> | 37 | #include <nvgpu/list.h> |
38 | #include <nvgpu/types.h> | 38 | #include <nvgpu/types.h> |
39 | 39 | ||
40 | /* #define ALLOCATOR_DEBUG */ | 40 | /* #define ALLOCATOR_DEBUG_FINE */ |
41 | 41 | ||
42 | struct nvgpu_allocator; | 42 | struct nvgpu_allocator; |
43 | struct nvgpu_alloc_carveout; | 43 | struct nvgpu_alloc_carveout; |
@@ -300,7 +300,7 @@ static inline void nvgpu_alloc_disable_dbg(struct nvgpu_allocator *a) | |||
300 | #define __alloc_pstat(seq, allocator, fmt, arg...) \ | 300 | #define __alloc_pstat(seq, allocator, fmt, arg...) \ |
301 | do { \ | 301 | do { \ |
302 | if (seq) \ | 302 | if (seq) \ |
303 | seq_printf(seq, fmt, ##arg); \ | 303 | seq_printf(seq, fmt "\n", ##arg); \ |
304 | else \ | 304 | else \ |
305 | alloc_dbg(allocator, fmt, ##arg); \ | 305 | alloc_dbg(allocator, fmt, ##arg); \ |
306 | } while (0) | 306 | } while (0) |
@@ -311,14 +311,14 @@ static inline void nvgpu_alloc_disable_dbg(struct nvgpu_allocator *a) | |||
311 | 311 | ||
312 | /* | 312 | /* |
313 | * This gives finer control over debugging messages. By defining the | 313 | * This gives finer control over debugging messages. By defining the |
314 | * ALLOCATOR_DEBUG macro prints for an allocator will only get made if | 314 | * ALLOCATOR_DEBUG_FINE macro prints for an allocator will only get made if |
315 | * that allocator's debug flag is set. | 315 | * that allocator's debug flag is set. |
316 | * | 316 | * |
317 | * Otherwise debugging is as normal: debug statements for all allocators | 317 | * Otherwise debugging is as normal: debug statements for all allocators |
318 | * if the GPU debugging mask bit is set. Note: even when ALLOCATOR_DEBUG | 318 | * if the GPU debugging mask bit is set. Note: even when ALLOCATOR_DEBUG_FINE |
319 | * is set gpu_dbg_alloc must still also be set to true. | 319 | * is set gpu_dbg_alloc must still also be set to true. |
320 | */ | 320 | */ |
321 | #if defined(ALLOCATOR_DEBUG) | 321 | #if defined(ALLOCATOR_DEBUG_FINE) |
322 | #define alloc_dbg(a, fmt, arg...) \ | 322 | #define alloc_dbg(a, fmt, arg...) \ |
323 | do { \ | 323 | do { \ |
324 | if ((a)->debug) \ | 324 | if ((a)->debug) \ |