aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ttm/ttm_page_alloc.c
diff options
context:
space:
mode:
authorJoe Perches <joe@perches.com>2012-03-17 00:43:50 -0400
committerDave Airlie <airlied@redhat.com>2012-03-20 04:45:35 -0400
commit25d0479a5925562fbf999afb5a8daa3f501c729d (patch)
treefce09c17114af428b041eee88d43cf2bc50901a7 /drivers/gpu/drm/ttm/ttm_page_alloc.c
parentf10487658a0e5fd793ee5ba7ad645c060e4afbcd (diff)
drm/ttm: Use pr_fmt and pr_<level>
Use the more current logging style. Add pr_fmt and remove the TTM_PFX uses. Coalesce formats and align arguments. Signed-off-by: Joe Perches <joe@perches.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_page_alloc.c')
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c55
1 files changed, 19 insertions, 36 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 499debda791e..ebc6fac96e36 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -30,6 +30,9 @@
30 * - Use page->lru to keep a free list 30 * - Use page->lru to keep a free list
31 * - doesn't track currently in use pages 31 * - doesn't track currently in use pages
32 */ 32 */
33
34#define pr_fmt(fmt) "[TTM] " fmt
35
33#include <linux/list.h> 36#include <linux/list.h>
34#include <linux/spinlock.h> 37#include <linux/spinlock.h>
35#include <linux/highmem.h> 38#include <linux/highmem.h>
@@ -167,18 +170,13 @@ static ssize_t ttm_pool_store(struct kobject *kobj,
167 m->options.small = val; 170 m->options.small = val;
168 else if (attr == &ttm_page_pool_alloc_size) { 171 else if (attr == &ttm_page_pool_alloc_size) {
169 if (val > NUM_PAGES_TO_ALLOC*8) { 172 if (val > NUM_PAGES_TO_ALLOC*8) {
170 printk(KERN_ERR TTM_PFX 173 pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
171 "Setting allocation size to %lu "
172 "is not allowed. Recommended size is "
173 "%lu\n",
174 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7), 174 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
175 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); 175 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
176 return size; 176 return size;
177 } else if (val > NUM_PAGES_TO_ALLOC) { 177 } else if (val > NUM_PAGES_TO_ALLOC) {
178 printk(KERN_WARNING TTM_PFX 178 pr_warn("Setting allocation size to larger than %lu is not recommended\n",
179 "Setting allocation size to " 179 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
180 "larger than %lu is not recommended.\n",
181 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
182 } 180 }
183 m->options.alloc_size = val; 181 m->options.alloc_size = val;
184 } 182 }
@@ -279,8 +277,7 @@ static void ttm_pages_put(struct page *pages[], unsigned npages)
279{ 277{
280 unsigned i; 278 unsigned i;
281 if (set_pages_array_wb(pages, npages)) 279 if (set_pages_array_wb(pages, npages))
282 printk(KERN_ERR TTM_PFX "Failed to set %d pages to wb!\n", 280 pr_err("Failed to set %d pages to wb!\n", npages);
283 npages);
284 for (i = 0; i < npages; ++i) 281 for (i = 0; i < npages; ++i)
285 __free_page(pages[i]); 282 __free_page(pages[i]);
286} 283}
@@ -315,8 +312,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
315 pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), 312 pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
316 GFP_KERNEL); 313 GFP_KERNEL);
317 if (!pages_to_free) { 314 if (!pages_to_free) {
318 printk(KERN_ERR TTM_PFX 315 pr_err("Failed to allocate memory for pool free operation\n");
319 "Failed to allocate memory for pool free operation.\n");
320 return 0; 316 return 0;
321 } 317 }
322 318
@@ -438,16 +434,12 @@ static int ttm_set_pages_caching(struct page **pages,
438 case tt_uncached: 434 case tt_uncached:
439 r = set_pages_array_uc(pages, cpages); 435 r = set_pages_array_uc(pages, cpages);
440 if (r) 436 if (r)
441 printk(KERN_ERR TTM_PFX 437 pr_err("Failed to set %d pages to uc!\n", cpages);
442 "Failed to set %d pages to uc!\n",
443 cpages);
444 break; 438 break;
445 case tt_wc: 439 case tt_wc:
446 r = set_pages_array_wc(pages, cpages); 440 r = set_pages_array_wc(pages, cpages);
447 if (r) 441 if (r)
448 printk(KERN_ERR TTM_PFX 442 pr_err("Failed to set %d pages to wc!\n", cpages);
449 "Failed to set %d pages to wc!\n",
450 cpages);
451 break; 443 break;
452 default: 444 default:
453 break; 445 break;
@@ -492,8 +484,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
492 caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL); 484 caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
493 485
494 if (!caching_array) { 486 if (!caching_array) {
495 printk(KERN_ERR TTM_PFX 487 pr_err("Unable to allocate table for new pages\n");
496 "Unable to allocate table for new pages.");
497 return -ENOMEM; 488 return -ENOMEM;
498 } 489 }
499 490
@@ -501,7 +492,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
501 p = alloc_page(gfp_flags); 492 p = alloc_page(gfp_flags);
502 493
503 if (!p) { 494 if (!p) {
504 printk(KERN_ERR TTM_PFX "Unable to get page %u.\n", i); 495 pr_err("Unable to get page %u\n", i);
505 496
506 /* store already allocated pages in the pool after 497 /* store already allocated pages in the pool after
507 * setting the caching state */ 498 * setting the caching state */
@@ -599,8 +590,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
599 ++pool->nrefills; 590 ++pool->nrefills;
600 pool->npages += alloc_size; 591 pool->npages += alloc_size;
601 } else { 592 } else {
602 printk(KERN_ERR TTM_PFX 593 pr_err("Failed to fill pool (%p)\n", pool);
603 "Failed to fill pool (%p).", pool);
604 /* If we have any pages left put them to the pool. */ 594 /* If we have any pages left put them to the pool. */
605 list_for_each_entry(p, &pool->list, lru) { 595 list_for_each_entry(p, &pool->list, lru) {
606 ++cpages; 596 ++cpages;
@@ -675,9 +665,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
675 for (i = 0; i < npages; i++) { 665 for (i = 0; i < npages; i++) {
676 if (pages[i]) { 666 if (pages[i]) {
677 if (page_count(pages[i]) != 1) 667 if (page_count(pages[i]) != 1)
678 printk(KERN_ERR TTM_PFX 668 pr_err("Erroneous page count. Leaking pages.\n");
679 "Erroneous page count. "
680 "Leaking pages.\n");
681 __free_page(pages[i]); 669 __free_page(pages[i]);
682 pages[i] = NULL; 670 pages[i] = NULL;
683 } 671 }
@@ -689,9 +677,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
689 for (i = 0; i < npages; i++) { 677 for (i = 0; i < npages; i++) {
690 if (pages[i]) { 678 if (pages[i]) {
691 if (page_count(pages[i]) != 1) 679 if (page_count(pages[i]) != 1)
692 printk(KERN_ERR TTM_PFX 680 pr_err("Erroneous page count. Leaking pages.\n");
693 "Erroneous page count. "
694 "Leaking pages.\n");
695 list_add_tail(&pages[i]->lru, &pool->list); 681 list_add_tail(&pages[i]->lru, &pool->list);
696 pages[i] = NULL; 682 pages[i] = NULL;
697 pool->npages++; 683 pool->npages++;
@@ -740,8 +726,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
740 p = alloc_page(gfp_flags); 726 p = alloc_page(gfp_flags);
741 if (!p) { 727 if (!p) {
742 728
743 printk(KERN_ERR TTM_PFX 729 pr_err("Unable to allocate page\n");
744 "Unable to allocate page.");
745 return -ENOMEM; 730 return -ENOMEM;
746 } 731 }
747 732
@@ -781,9 +766,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
781 if (r) { 766 if (r) {
782 /* If there is any pages in the list put them back to 767 /* If there is any pages in the list put them back to
783 * the pool. */ 768 * the pool. */
784 printk(KERN_ERR TTM_PFX 769 pr_err("Failed to allocate extra pages for large request\n");
785 "Failed to allocate extra pages "
786 "for large request.");
787 ttm_put_pages(pages, count, flags, cstate); 770 ttm_put_pages(pages, count, flags, cstate);
788 return r; 771 return r;
789 } 772 }
@@ -809,7 +792,7 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
809 792
810 WARN_ON(_manager); 793 WARN_ON(_manager);
811 794
812 printk(KERN_INFO TTM_PFX "Initializing pool allocator.\n"); 795 pr_info("Initializing pool allocator\n");
813 796
814 _manager = kzalloc(sizeof(*_manager), GFP_KERNEL); 797 _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
815 798
@@ -844,7 +827,7 @@ void ttm_page_alloc_fini(void)
844{ 827{
845 int i; 828 int i;
846 829
847 printk(KERN_INFO TTM_PFX "Finalizing pool allocator.\n"); 830 pr_info("Finalizing pool allocator\n");
848 ttm_pool_mm_shrink_fini(_manager); 831 ttm_pool_mm_shrink_fini(_manager);
849 832
850 for (i = 0; i < NUM_POOLS; ++i) 833 for (i = 0; i < NUM_POOLS; ++i)