aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ttm/ttm_page_alloc.c
diff options
context:
space:
mode:
authorMichel Dänzer <michel.daenzer@amd.com>2017-11-03 11:00:35 -0400
committerAlex Deucher <alexander.deucher@amd.com>2017-11-04 09:48:28 -0400
commit767601d100a53e653233aebca7c262ce0addfa99 (patch)
tree080c5ad6705b39c91df0ddee9c0658409c93130f /drivers/gpu/drm/ttm/ttm_page_alloc.c
parente1fc12c5d9ad06a2a74e97a91f1b0c5f4c723b50 (diff)
drm/ttm: Downgrade pr_err to pr_debug for memory allocation failures
Memory allocation failure should generally be handled gracefully by callers. In particular, with transparent hugepage support, attempts to allocate huge pages can fail under memory pressure, but the callers fall back to allocating individual pages instead. In that case, there would be spurious [TTM] Unable to get page %u error messages in dmesg. Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Michel Dänzer <michel.daenzer@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_page_alloc.c')
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c13
1 files changed, 6 insertions, 7 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 4d688c8d7853..316f831ad5f0 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -329,7 +329,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
329 pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), 329 pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
330 GFP_KERNEL); 330 GFP_KERNEL);
331 if (!pages_to_free) { 331 if (!pages_to_free) {
332 pr_err("Failed to allocate memory for pool free operation\n"); 332 pr_debug("Failed to allocate memory for pool free operation\n");
333 return 0; 333 return 0;
334 } 334 }
335 335
@@ -517,7 +517,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
517 caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL); 517 caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
518 518
519 if (!caching_array) { 519 if (!caching_array) {
520 pr_err("Unable to allocate table for new pages\n"); 520 pr_debug("Unable to allocate table for new pages\n");
521 return -ENOMEM; 521 return -ENOMEM;
522 } 522 }
523 523
@@ -525,7 +525,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
525 p = alloc_pages(gfp_flags, order); 525 p = alloc_pages(gfp_flags, order);
526 526
527 if (!p) { 527 if (!p) {
528 pr_err("Unable to get page %u\n", i); 528 pr_debug("Unable to get page %u\n", i);
529 529
530 /* store already allocated pages in the pool after 530 /* store already allocated pages in the pool after
531 * setting the caching state */ 531 * setting the caching state */
@@ -625,7 +625,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, int ttm_flags,
625 ++pool->nrefills; 625 ++pool->nrefills;
626 pool->npages += alloc_size; 626 pool->npages += alloc_size;
627 } else { 627 } else {
628 pr_err("Failed to fill pool (%p)\n", pool); 628 pr_debug("Failed to fill pool (%p)\n", pool);
629 /* If we have any pages left put them to the pool. */ 629 /* If we have any pages left put them to the pool. */
630 list_for_each_entry(p, &new_pages, lru) { 630 list_for_each_entry(p, &new_pages, lru) {
631 ++cpages; 631 ++cpages;
@@ -885,8 +885,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
885 while (npages) { 885 while (npages) {
886 p = alloc_page(gfp_flags); 886 p = alloc_page(gfp_flags);
887 if (!p) { 887 if (!p) {
888 888 pr_debug("Unable to allocate page\n");
889 pr_err("Unable to allocate page\n");
890 return -ENOMEM; 889 return -ENOMEM;
891 } 890 }
892 891
@@ -925,7 +924,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
925 /* If there is any pages in the list put them back to 924 /* If there is any pages in the list put them back to
926 * the pool. 925 * the pool.
927 */ 926 */
928 pr_err("Failed to allocate extra pages for large request\n"); 927 pr_debug("Failed to allocate extra pages for large request\n");
929 ttm_put_pages(pages, count, flags, cstate); 928 ttm_put_pages(pages, count, flags, cstate);
930 return r; 929 return r;
931 } 930 }