summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm/page_allocator.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/page_allocator.c')
-rw-r--r--drivers/gpu/nvgpu/common/mm/page_allocator.c90
1 files changed, 45 insertions, 45 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/page_allocator.c b/drivers/gpu/nvgpu/common/mm/page_allocator.c
index e7738919..6dc1edf7 100644
--- a/drivers/gpu/nvgpu/common/mm/page_allocator.c
+++ b/drivers/gpu/nvgpu/common/mm/page_allocator.c
@@ -279,7 +279,7 @@ static struct page_alloc_slab_page *alloc_slab_page(
279 279
280 slab_page = nvgpu_kmem_cache_alloc(a->slab_page_cache); 280 slab_page = nvgpu_kmem_cache_alloc(a->slab_page_cache);
281 if (!slab_page) { 281 if (!slab_page) {
282 palloc_dbg(a, "OOM: unable to alloc slab_page struct!\n"); 282 palloc_dbg(a, "OOM: unable to alloc slab_page struct!");
283 return NULL; 283 return NULL;
284 } 284 }
285 285
@@ -288,7 +288,7 @@ static struct page_alloc_slab_page *alloc_slab_page(
288 slab_page->page_addr = nvgpu_alloc(&a->source_allocator, a->page_size); 288 slab_page->page_addr = nvgpu_alloc(&a->source_allocator, a->page_size);
289 if (!slab_page->page_addr) { 289 if (!slab_page->page_addr) {
290 nvgpu_kmem_cache_free(a->slab_page_cache, slab_page); 290 nvgpu_kmem_cache_free(a->slab_page_cache, slab_page);
291 palloc_dbg(a, "OOM: vidmem is full!\n"); 291 palloc_dbg(a, "OOM: vidmem is full!");
292 return NULL; 292 return NULL;
293 } 293 }
294 294
@@ -301,7 +301,7 @@ static struct page_alloc_slab_page *alloc_slab_page(
301 301
302 a->pages_alloced++; 302 a->pages_alloced++;
303 303
304 palloc_dbg(a, "Allocated new slab page @ 0x%012llx size=%u\n", 304 palloc_dbg(a, "Allocated new slab page @ 0x%012llx size=%u",
305 slab_page->page_addr, slab_page->slab_size); 305 slab_page->page_addr, slab_page->slab_size);
306 306
307 return slab_page; 307 return slab_page;
@@ -310,7 +310,7 @@ static struct page_alloc_slab_page *alloc_slab_page(
310static void free_slab_page(struct nvgpu_page_allocator *a, 310static void free_slab_page(struct nvgpu_page_allocator *a,
311 struct page_alloc_slab_page *slab_page) 311 struct page_alloc_slab_page *slab_page)
312{ 312{
313 palloc_dbg(a, "Freeing slab page @ 0x%012llx\n", slab_page->page_addr); 313 palloc_dbg(a, "Freeing slab page @ 0x%012llx", slab_page->page_addr);
314 314
315 BUG_ON((slab_page->state != SP_NONE && slab_page->state != SP_EMPTY) || 315 BUG_ON((slab_page->state != SP_NONE && slab_page->state != SP_EMPTY) ||
316 slab_page->nr_objects_alloced != 0 || 316 slab_page->nr_objects_alloced != 0 ||
@@ -418,7 +418,7 @@ static struct nvgpu_page_alloc *__nvgpu_alloc_slab(
418 418
419 alloc = nvgpu_kmem_cache_alloc(a->alloc_cache); 419 alloc = nvgpu_kmem_cache_alloc(a->alloc_cache);
420 if (!alloc) { 420 if (!alloc) {
421 palloc_dbg(a, "OOM: could not alloc page_alloc struct!\n"); 421 palloc_dbg(a, "OOM: could not alloc page_alloc struct!");
422 goto fail; 422 goto fail;
423 } 423 }
424 424
@@ -426,7 +426,7 @@ static struct nvgpu_page_alloc *__nvgpu_alloc_slab(
426 426
427 sgl = nvgpu_kzalloc(a->owner->g, sizeof(*sgl)); 427 sgl = nvgpu_kzalloc(a->owner->g, sizeof(*sgl));
428 if (!sgl) { 428 if (!sgl) {
429 palloc_dbg(a, "OOM: could not alloc sgl struct!\n"); 429 palloc_dbg(a, "OOM: could not alloc sgl struct!");
430 goto fail; 430 goto fail;
431 } 431 }
432 432
@@ -435,7 +435,7 @@ static struct nvgpu_page_alloc *__nvgpu_alloc_slab(
435 if (err) 435 if (err)
436 goto fail; 436 goto fail;
437 437
438 palloc_dbg(a, "Alloc 0x%04llx sr=%d id=0x%010llx [slab]\n", 438 palloc_dbg(a, "Alloc 0x%04llx sr=%d id=0x%010llx [slab]",
439 len, slab_nr, alloc->base); 439 len, slab_nr, alloc->base);
440 a->nr_slab_allocs++; 440 a->nr_slab_allocs++;
441 441
@@ -549,7 +549,7 @@ static struct nvgpu_page_alloc *__do_nvgpu_alloc_pages(
549 549
550 /* Divide by 2 and try again */ 550 /* Divide by 2 and try again */
551 if (!chunk_addr) { 551 if (!chunk_addr) {
552 palloc_dbg(a, "balloc failed: 0x%llx\n", 552 palloc_dbg(a, "balloc failed: 0x%llx",
553 chunk_len); 553 chunk_len);
554 chunk_len >>= 1; 554 chunk_len >>= 1;
555 max_chunk_len = chunk_len; 555 max_chunk_len = chunk_len;
@@ -559,7 +559,7 @@ static struct nvgpu_page_alloc *__do_nvgpu_alloc_pages(
559 chunk_pages = chunk_len >> a->page_shift; 559 chunk_pages = chunk_len >> a->page_shift;
560 560
561 if (!chunk_addr) { 561 if (!chunk_addr) {
562 palloc_dbg(a, "bailing @ 0x%llx\n", chunk_len); 562 palloc_dbg(a, "bailing @ 0x%llx", chunk_len);
563 goto fail_cleanup; 563 goto fail_cleanup;
564 } 564 }
565 565
@@ -622,22 +622,22 @@ static struct nvgpu_page_alloc *__nvgpu_alloc_pages(
622 622
623 alloc = __do_nvgpu_alloc_pages(a, pages); 623 alloc = __do_nvgpu_alloc_pages(a, pages);
624 if (!alloc) { 624 if (!alloc) {
625 palloc_dbg(a, "Alloc 0x%llx (%llu) (failed)\n", 625 palloc_dbg(a, "Alloc 0x%llx (%llu) (failed)",
626 pages << a->page_shift, pages); 626 pages << a->page_shift, pages);
627 return NULL; 627 return NULL;
628 } 628 }
629 629
630 palloc_dbg(a, "Alloc 0x%llx (%llu) id=0x%010llx\n", 630 palloc_dbg(a, "Alloc 0x%llx (%llu) id=0x%010llx",
631 pages << a->page_shift, pages, alloc->base); 631 pages << a->page_shift, pages, alloc->base);
632 sgl = alloc->sgt.sgl; 632 sgl = alloc->sgt.sgl;
633 while (sgl) { 633 while (sgl) {
634 palloc_dbg(a, " Chunk %2d: 0x%010llx + 0x%llx\n", 634 palloc_dbg(a, " Chunk %2d: 0x%010llx + 0x%llx",
635 i++, 635 i++,
636 nvgpu_sgt_get_phys(&alloc->sgt, sgl), 636 nvgpu_sgt_get_phys(&alloc->sgt, sgl),
637 nvgpu_sgt_get_length(&alloc->sgt, sgl)); 637 nvgpu_sgt_get_length(&alloc->sgt, sgl));
638 sgl = nvgpu_sgt_get_next(&alloc->sgt, sgl); 638 sgl = nvgpu_sgt_get_next(&alloc->sgt, sgl);
639 } 639 }
640 palloc_dbg(a, "Alloc done\n"); 640 palloc_dbg(a, "Alloc done");
641 641
642 return alloc; 642 return alloc;
643} 643}
@@ -708,13 +708,13 @@ static void nvgpu_page_free(struct nvgpu_allocator *__a, u64 base)
708 ((struct nvgpu_page_alloc *)(uintptr_t)base)->base); 708 ((struct nvgpu_page_alloc *)(uintptr_t)base)->base);
709 709
710 if (!alloc) { 710 if (!alloc) {
711 palloc_dbg(a, "Hrm, found no alloc?\n"); 711 palloc_dbg(a, "Hrm, found no alloc?");
712 goto done; 712 goto done;
713 } 713 }
714 714
715 a->nr_frees++; 715 a->nr_frees++;
716 716
717 palloc_dbg(a, "Free 0x%llx id=0x%010llx\n", 717 palloc_dbg(a, "Free 0x%llx id=0x%010llx",
718 alloc->length, alloc->base); 718 alloc->length, alloc->base);
719 719
720 /* 720 /*
@@ -794,11 +794,11 @@ static u64 nvgpu_page_alloc_fixed(struct nvgpu_allocator *__a,
794 __insert_page_alloc(a, alloc); 794 __insert_page_alloc(a, alloc);
795 alloc_unlock(__a); 795 alloc_unlock(__a);
796 796
797 palloc_dbg(a, "Alloc [fixed] @ 0x%010llx + 0x%llx (%llu)\n", 797 palloc_dbg(a, "Alloc [fixed] @ 0x%010llx + 0x%llx (%llu)",
798 alloc->base, aligned_len, pages); 798 alloc->base, aligned_len, pages);
799 sgl = alloc->sgt.sgl; 799 sgl = alloc->sgt.sgl;
800 while (sgl) { 800 while (sgl) {
801 palloc_dbg(a, " Chunk %2d: 0x%010llx + 0x%llx\n", 801 palloc_dbg(a, " Chunk %2d: 0x%010llx + 0x%llx",
802 i++, 802 i++,
803 nvgpu_sgt_get_phys(&alloc->sgt, sgl), 803 nvgpu_sgt_get_phys(&alloc->sgt, sgl),
804 nvgpu_sgt_get_length(&alloc->sgt, sgl)); 804 nvgpu_sgt_get_length(&alloc->sgt, sgl));
@@ -830,7 +830,7 @@ static void nvgpu_page_free_fixed(struct nvgpu_allocator *__a,
830 alloc = (struct nvgpu_page_alloc *) (uintptr_t) base; 830 alloc = (struct nvgpu_page_alloc *) (uintptr_t) base;
831 } 831 }
832 832
833 palloc_dbg(a, "Free [fixed] 0x%010llx + 0x%llx\n", 833 palloc_dbg(a, "Free [fixed] 0x%010llx + 0x%llx",
834 alloc->base, alloc->length); 834 alloc->base, alloc->length);
835 835
836 a->nr_fixed_frees++; 836 a->nr_fixed_frees++;
@@ -868,47 +868,47 @@ static void nvgpu_page_print_stats(struct nvgpu_allocator *__a,
868 if (lock) 868 if (lock)
869 alloc_lock(__a); 869 alloc_lock(__a);
870 870
871 __alloc_pstat(s, __a, "Page allocator:\n"); 871 __alloc_pstat(s, __a, "Page allocator:");
872 __alloc_pstat(s, __a, " allocs %lld\n", a->nr_allocs); 872 __alloc_pstat(s, __a, " allocs %lld", a->nr_allocs);
873 __alloc_pstat(s, __a, " frees %lld\n", a->nr_frees); 873 __alloc_pstat(s, __a, " frees %lld", a->nr_frees);
874 __alloc_pstat(s, __a, " fixed_allocs %lld\n", a->nr_fixed_allocs); 874 __alloc_pstat(s, __a, " fixed_allocs %lld", a->nr_fixed_allocs);
875 __alloc_pstat(s, __a, " fixed_frees %lld\n", a->nr_fixed_frees); 875 __alloc_pstat(s, __a, " fixed_frees %lld", a->nr_fixed_frees);
876 __alloc_pstat(s, __a, " slab_allocs %lld\n", a->nr_slab_allocs); 876 __alloc_pstat(s, __a, " slab_allocs %lld", a->nr_slab_allocs);
877 __alloc_pstat(s, __a, " slab_frees %lld\n", a->nr_slab_frees); 877 __alloc_pstat(s, __a, " slab_frees %lld", a->nr_slab_frees);
878 __alloc_pstat(s, __a, " pages alloced %lld\n", a->pages_alloced); 878 __alloc_pstat(s, __a, " pages alloced %lld", a->pages_alloced);
879 __alloc_pstat(s, __a, " pages freed %lld\n", a->pages_freed); 879 __alloc_pstat(s, __a, " pages freed %lld", a->pages_freed);
880 __alloc_pstat(s, __a, "\n"); 880 __alloc_pstat(s, __a, "");
881 881
882 __alloc_pstat(s, __a, "Page size: %lld KB\n", 882 __alloc_pstat(s, __a, "Page size: %lld KB",
883 a->page_size >> 10); 883 a->page_size >> 10);
884 __alloc_pstat(s, __a, "Total pages: %lld (%lld MB)\n", 884 __alloc_pstat(s, __a, "Total pages: %lld (%lld MB)",
885 a->length / a->page_size, 885 a->length / a->page_size,
886 a->length >> 20); 886 a->length >> 20);
887 __alloc_pstat(s, __a, "Available pages: %lld (%lld MB)\n", 887 __alloc_pstat(s, __a, "Available pages: %lld (%lld MB)",
888 nvgpu_alloc_space(&a->source_allocator) / a->page_size, 888 nvgpu_alloc_space(&a->source_allocator) / a->page_size,
889 nvgpu_alloc_space(&a->source_allocator) >> 20); 889 nvgpu_alloc_space(&a->source_allocator) >> 20);
890 __alloc_pstat(s, __a, "\n"); 890 __alloc_pstat(s, __a, "");
891 891
892 /* 892 /*
893 * Slab info. 893 * Slab info.
894 */ 894 */
895 if (a->flags & GPU_ALLOC_4K_VIDMEM_PAGES) { 895 if (a->flags & GPU_ALLOC_4K_VIDMEM_PAGES) {
896 __alloc_pstat(s, __a, "Slabs:\n"); 896 __alloc_pstat(s, __a, "Slabs:");
897 __alloc_pstat(s, __a, " size empty partial full\n"); 897 __alloc_pstat(s, __a, " size empty partial full");
898 __alloc_pstat(s, __a, " ---- ----- ------- ----\n"); 898 __alloc_pstat(s, __a, " ---- ----- ------- ----");
899 899
900 for (i = 0; i < a->nr_slabs; i++) { 900 for (i = 0; i < a->nr_slabs; i++) {
901 struct page_alloc_slab *slab = &a->slabs[i]; 901 struct page_alloc_slab *slab = &a->slabs[i];
902 902
903 __alloc_pstat(s, __a, " %-9u %-9d %-9u %u\n", 903 __alloc_pstat(s, __a, " %-9u %-9d %-9u %u",
904 slab->slab_size, 904 slab->slab_size,
905 slab->nr_empty, slab->nr_partial, 905 slab->nr_empty, slab->nr_partial,
906 slab->nr_full); 906 slab->nr_full);
907 } 907 }
908 __alloc_pstat(s, __a, "\n"); 908 __alloc_pstat(s, __a, "");
909 } 909 }
910 910
911 __alloc_pstat(s, __a, "Source alloc: %s\n", 911 __alloc_pstat(s, __a, "Source alloc: %s",
912 a->source_allocator.name); 912 a->source_allocator.name);
913 nvgpu_alloc_print_stats(&a->source_allocator, s, lock); 913 nvgpu_alloc_print_stats(&a->source_allocator, s, lock);
914 914
@@ -1029,12 +1029,12 @@ int nvgpu_page_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a,
1029#ifdef CONFIG_DEBUG_FS 1029#ifdef CONFIG_DEBUG_FS
1030 nvgpu_init_alloc_debug(g, __a); 1030 nvgpu_init_alloc_debug(g, __a);
1031#endif 1031#endif
1032 palloc_dbg(a, "New allocator: type page\n"); 1032 palloc_dbg(a, "New allocator: type page");
1033 palloc_dbg(a, " base 0x%llx\n", a->base); 1033 palloc_dbg(a, " base 0x%llx", a->base);
1034 palloc_dbg(a, " size 0x%llx\n", a->length); 1034 palloc_dbg(a, " size 0x%llx", a->length);
1035 palloc_dbg(a, " page_size 0x%llx\n", a->page_size); 1035 palloc_dbg(a, " page_size 0x%llx", a->page_size);
1036 palloc_dbg(a, " flags 0x%llx\n", a->flags); 1036 palloc_dbg(a, " flags 0x%llx", a->flags);
1037 palloc_dbg(a, " slabs: %d\n", a->nr_slabs); 1037 palloc_dbg(a, " slabs: %d", a->nr_slabs);
1038 1038
1039 return 0; 1039 return 0;
1040 1040