diff options
author | Thomas Hellstrom <thellstrom@vmware.com> | 2010-05-26 10:21:04 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2010-05-31 19:39:49 -0400 |
commit | 4abe4389790d5f02569fbacdf035536ba84c7d44 (patch) | |
tree | 291bf58a59b5ddecc1d25b2e0632661f72a81b93 /drivers/gpu/drm | |
parent | e8613c0e29d0018a80652e6ae58660c8a75ac74b (diff) |
drm/ttm: Fix ttm_page_alloc.c
Fix a number of typos misspellings and checkpatch.pl warnings.
Replace "[ttm] " with TTM_PFX
Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_page_alloc.c | 62 |
1 files changed, 36 insertions, 26 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index b6d152360675..ef910694bd63 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c | |||
@@ -77,7 +77,7 @@ struct ttm_page_pool { | |||
77 | /** | 77 | /** |
78 | * Limits for the pool. They are handled without locks because only place where | 78 | * Limits for the pool. They are handled without locks because only place where |
79 | * they may change is in sysfs store. They won't have immediate effect anyway | 79 | * they may change is in sysfs store. They won't have immediate effect anyway |
80 | * so forcing serialiazation to access them is pointless. | 80 | * so forcing serialization to access them is pointless. |
81 | */ | 81 | */ |
82 | 82 | ||
83 | struct ttm_pool_opts { | 83 | struct ttm_pool_opts { |
@@ -165,16 +165,18 @@ static ssize_t ttm_pool_store(struct kobject *kobj, | |||
165 | m->options.small = val; | 165 | m->options.small = val; |
166 | else if (attr == &ttm_page_pool_alloc_size) { | 166 | else if (attr == &ttm_page_pool_alloc_size) { |
167 | if (val > NUM_PAGES_TO_ALLOC*8) { | 167 | if (val > NUM_PAGES_TO_ALLOC*8) { |
168 | printk(KERN_ERR "[ttm] Setting allocation size to %lu " | 168 | printk(KERN_ERR TTM_PFX |
169 | "is not allowed. Recomended size is " | 169 | "Setting allocation size to %lu " |
170 | "%lu\n", | 170 | "is not allowed. Recommended size is " |
171 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7), | 171 | "%lu\n", |
172 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); | 172 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7), |
173 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); | ||
173 | return size; | 174 | return size; |
174 | } else if (val > NUM_PAGES_TO_ALLOC) { | 175 | } else if (val > NUM_PAGES_TO_ALLOC) { |
175 | printk(KERN_WARNING "[ttm] Setting allocation size to " | 176 | printk(KERN_WARNING TTM_PFX |
176 | "larger than %lu is not recomended.\n", | 177 | "Setting allocation size to " |
177 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); | 178 | "larger than %lu is not recommended.\n", |
179 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); | ||
178 | } | 180 | } |
179 | m->options.alloc_size = val; | 181 | m->options.alloc_size = val; |
180 | } | 182 | } |
@@ -277,7 +279,7 @@ static void ttm_pages_put(struct page *pages[], unsigned npages) | |||
277 | { | 279 | { |
278 | unsigned i; | 280 | unsigned i; |
279 | if (set_pages_array_wb(pages, npages)) | 281 | if (set_pages_array_wb(pages, npages)) |
280 | printk(KERN_ERR "[ttm] Failed to set %d pages to wb!\n", | 282 | printk(KERN_ERR TTM_PFX "Failed to set %d pages to wb!\n", |
281 | npages); | 283 | npages); |
282 | for (i = 0; i < npages; ++i) | 284 | for (i = 0; i < npages; ++i) |
283 | __free_page(pages[i]); | 285 | __free_page(pages[i]); |
@@ -313,7 +315,8 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free) | |||
313 | pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), | 315 | pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), |
314 | GFP_KERNEL); | 316 | GFP_KERNEL); |
315 | if (!pages_to_free) { | 317 | if (!pages_to_free) { |
316 | printk(KERN_ERR "Failed to allocate memory for pool free operation.\n"); | 318 | printk(KERN_ERR TTM_PFX |
319 | "Failed to allocate memory for pool free operation.\n"); | ||
317 | return 0; | 320 | return 0; |
318 | } | 321 | } |
319 | 322 | ||
@@ -390,7 +393,7 @@ static int ttm_pool_get_num_unused_pages(void) | |||
390 | } | 393 | } |
391 | 394 | ||
392 | /** | 395 | /** |
393 | * Calback for mm to request pool to reduce number of page held. | 396 | * Callback for mm to request pool to reduce number of page held. |
394 | */ | 397 | */ |
395 | static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask) | 398 | static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask) |
396 | { | 399 | { |
@@ -433,14 +436,16 @@ static int ttm_set_pages_caching(struct page **pages, | |||
433 | case tt_uncached: | 436 | case tt_uncached: |
434 | r = set_pages_array_uc(pages, cpages); | 437 | r = set_pages_array_uc(pages, cpages); |
435 | if (r) | 438 | if (r) |
436 | printk(KERN_ERR "[ttm] Failed to set %d pages to uc!\n", | 439 | printk(KERN_ERR TTM_PFX |
437 | cpages); | 440 | "Failed to set %d pages to uc!\n", |
441 | cpages); | ||
438 | break; | 442 | break; |
439 | case tt_wc: | 443 | case tt_wc: |
440 | r = set_pages_array_wc(pages, cpages); | 444 | r = set_pages_array_wc(pages, cpages); |
441 | if (r) | 445 | if (r) |
442 | printk(KERN_ERR "[ttm] Failed to set %d pages to wc!\n", | 446 | printk(KERN_ERR TTM_PFX |
443 | cpages); | 447 | "Failed to set %d pages to wc!\n", |
448 | cpages); | ||
444 | break; | 449 | break; |
445 | default: | 450 | default: |
446 | break; | 451 | break; |
@@ -458,7 +463,7 @@ static void ttm_handle_caching_state_failure(struct list_head *pages, | |||
458 | struct page **failed_pages, unsigned cpages) | 463 | struct page **failed_pages, unsigned cpages) |
459 | { | 464 | { |
460 | unsigned i; | 465 | unsigned i; |
461 | /* Failed pages has to be reed */ | 466 | /* Failed pages have to be freed */ |
462 | for (i = 0; i < cpages; ++i) { | 467 | for (i = 0; i < cpages; ++i) { |
463 | list_del(&failed_pages[i]->lru); | 468 | list_del(&failed_pages[i]->lru); |
464 | __free_page(failed_pages[i]); | 469 | __free_page(failed_pages[i]); |
@@ -485,7 +490,8 @@ static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags, | |||
485 | caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL); | 490 | caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL); |
486 | 491 | ||
487 | if (!caching_array) { | 492 | if (!caching_array) { |
488 | printk(KERN_ERR "[ttm] unable to allocate table for new pages."); | 493 | printk(KERN_ERR TTM_PFX |
494 | "Unable to allocate table for new pages."); | ||
489 | return -ENOMEM; | 495 | return -ENOMEM; |
490 | } | 496 | } |
491 | 497 | ||
@@ -493,12 +499,13 @@ static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags, | |||
493 | p = alloc_page(gfp_flags); | 499 | p = alloc_page(gfp_flags); |
494 | 500 | ||
495 | if (!p) { | 501 | if (!p) { |
496 | printk(KERN_ERR "[ttm] unable to get page %u\n", i); | 502 | printk(KERN_ERR TTM_PFX "Unable to get page %u.\n", i); |
497 | 503 | ||
498 | /* store already allocated pages in the pool after | 504 | /* store already allocated pages in the pool after |
499 | * setting the caching state */ | 505 | * setting the caching state */ |
500 | if (cpages) { | 506 | if (cpages) { |
501 | r = ttm_set_pages_caching(caching_array, cstate, cpages); | 507 | r = ttm_set_pages_caching(caching_array, |
508 | cstate, cpages); | ||
502 | if (r) | 509 | if (r) |
503 | ttm_handle_caching_state_failure(pages, | 510 | ttm_handle_caching_state_failure(pages, |
504 | ttm_flags, cstate, | 511 | ttm_flags, cstate, |
@@ -590,7 +597,8 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, | |||
590 | ++pool->nrefills; | 597 | ++pool->nrefills; |
591 | pool->npages += alloc_size; | 598 | pool->npages += alloc_size; |
592 | } else { | 599 | } else { |
593 | printk(KERN_ERR "[ttm] Failed to fill pool (%p).", pool); | 600 | printk(KERN_ERR TTM_PFX |
601 | "Failed to fill pool (%p).", pool); | ||
594 | /* If we have any pages left put them to the pool. */ | 602 | /* If we have any pages left put them to the pool. */ |
595 | list_for_each_entry(p, &pool->list, lru) { | 603 | list_for_each_entry(p, &pool->list, lru) { |
596 | ++cpages; | 604 | ++cpages; |
@@ -677,7 +685,8 @@ int ttm_get_pages(struct list_head *pages, int flags, | |||
677 | p = alloc_page(gfp_flags); | 685 | p = alloc_page(gfp_flags); |
678 | if (!p) { | 686 | if (!p) { |
679 | 687 | ||
680 | printk(KERN_ERR "[ttm] unable to allocate page."); | 688 | printk(KERN_ERR TTM_PFX |
689 | "Unable to allocate page."); | ||
681 | return -ENOMEM; | 690 | return -ENOMEM; |
682 | } | 691 | } |
683 | 692 | ||
@@ -709,8 +718,9 @@ int ttm_get_pages(struct list_head *pages, int flags, | |||
709 | if (r) { | 718 | if (r) { |
710 | /* If there is any pages in the list put them back to | 719 | /* If there is any pages in the list put them back to |
711 | * the pool. */ | 720 | * the pool. */ |
712 | printk(KERN_ERR "[ttm] Failed to allocate extra pages " | 721 | printk(KERN_ERR TTM_PFX |
713 | "for large request."); | 722 | "Failed to allocate extra pages " |
723 | "for large request."); | ||
714 | ttm_put_pages(pages, 0, flags, cstate); | 724 | ttm_put_pages(pages, 0, flags, cstate); |
715 | return r; | 725 | return r; |
716 | } | 726 | } |
@@ -778,7 +788,7 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) | |||
778 | if (atomic_add_return(1, &_manager.page_alloc_inited) > 1) | 788 | if (atomic_add_return(1, &_manager.page_alloc_inited) > 1) |
779 | return 0; | 789 | return 0; |
780 | 790 | ||
781 | printk(KERN_INFO "[ttm] Initializing pool allocator.\n"); | 791 | printk(KERN_INFO TTM_PFX "Initializing pool allocator.\n"); |
782 | 792 | ||
783 | ttm_page_pool_init_locked(&_manager.wc_pool, GFP_HIGHUSER, "wc"); | 793 | ttm_page_pool_init_locked(&_manager.wc_pool, GFP_HIGHUSER, "wc"); |
784 | 794 | ||
@@ -813,7 +823,7 @@ void ttm_page_alloc_fini() | |||
813 | if (atomic_sub_return(1, &_manager.page_alloc_inited) > 0) | 823 | if (atomic_sub_return(1, &_manager.page_alloc_inited) > 0) |
814 | return; | 824 | return; |
815 | 825 | ||
816 | printk(KERN_INFO "[ttm] Finilizing pool allocator.\n"); | 826 | printk(KERN_INFO TTM_PFX "Finalizing pool allocator.\n"); |
817 | ttm_pool_mm_shrink_fini(&_manager); | 827 | ttm_pool_mm_shrink_fini(&_manager); |
818 | 828 | ||
819 | for (i = 0; i < NUM_POOLS; ++i) | 829 | for (i = 0; i < NUM_POOLS; ++i) |