aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRoger He <Hongbo.He@amd.com>2017-11-20 20:37:52 -0500
committerAlex Deucher <alexander.deucher@amd.com>2017-12-05 14:37:05 -0500
commit6d5e4e3213f4700338627f1f2fba6d3552b1158a (patch)
tree17da26d753e33d197aad1922a1f9698e26060c96
parent1ee0d3d778f146b982b100ae7add0c4da9626e9a (diff)
drm/ttm: add page order in page pool
to indicate page order for each element in the pool Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Roger He <Hongbo.He@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c38
1 files changed, 27 insertions, 11 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index dca4d8322437..bf25ba25bfc6 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -81,6 +81,7 @@ struct ttm_page_pool {
81 char *name; 81 char *name;
82 unsigned long nfrees; 82 unsigned long nfrees;
83 unsigned long nrefills; 83 unsigned long nrefills;
84 unsigned int order;
84}; 85};
85 86
86/** 87/**
@@ -412,6 +413,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
412 struct ttm_page_pool *pool; 413 struct ttm_page_pool *pool;
413 int shrink_pages = sc->nr_to_scan; 414 int shrink_pages = sc->nr_to_scan;
414 unsigned long freed = 0; 415 unsigned long freed = 0;
416 unsigned int nr_free_pool;
415 417
416 if (!mutex_trylock(&lock)) 418 if (!mutex_trylock(&lock))
417 return SHRINK_STOP; 419 return SHRINK_STOP;
@@ -421,10 +423,15 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
421 unsigned nr_free = shrink_pages; 423 unsigned nr_free = shrink_pages;
422 if (shrink_pages == 0) 424 if (shrink_pages == 0)
423 break; 425 break;
426
424 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; 427 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
425 /* OK to use static buffer since global mutex is held. */ 428 /* OK to use static buffer since global mutex is held. */
426 shrink_pages = ttm_page_pool_free(pool, nr_free, true); 429 nr_free_pool = (nr_free >> pool->order);
427 freed += nr_free - shrink_pages; 430 if (nr_free_pool == 0)
431 continue;
432
433 shrink_pages = ttm_page_pool_free(pool, nr_free_pool, true);
434 freed += ((nr_free_pool - shrink_pages) << pool->order);
428 } 435 }
429 mutex_unlock(&lock); 436 mutex_unlock(&lock);
430 return freed; 437 return freed;
@@ -436,9 +443,12 @@ ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
436{ 443{
437 unsigned i; 444 unsigned i;
438 unsigned long count = 0; 445 unsigned long count = 0;
446 struct ttm_page_pool *pool;
439 447
440 for (i = 0; i < NUM_POOLS; ++i) 448 for (i = 0; i < NUM_POOLS; ++i) {
441 count += _manager->pools[i].npages; 449 pool = &_manager->pools[i];
450 count += (pool->npages << pool->order);
451 }
442 452
443 return count; 453 return count;
444} 454}
@@ -936,7 +946,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
936} 946}
937 947
938static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags, 948static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
939 char *name) 949 char *name, unsigned int order)
940{ 950{
941 spin_lock_init(&pool->lock); 951 spin_lock_init(&pool->lock);
942 pool->fill_lock = false; 952 pool->fill_lock = false;
@@ -944,11 +954,17 @@ static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
944 pool->npages = pool->nfrees = 0; 954 pool->npages = pool->nfrees = 0;
945 pool->gfp_flags = flags; 955 pool->gfp_flags = flags;
946 pool->name = name; 956 pool->name = name;
957 pool->order = order;
947} 958}
948 959
949int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) 960int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
950{ 961{
951 int ret; 962 int ret;
963#ifdef CONFIG_TRANSPARENT_HUGEPAGE
964 unsigned order = HPAGE_PMD_ORDER;
965#else
966 unsigned order = 0;
967#endif
952 968
953 WARN_ON(_manager); 969 WARN_ON(_manager);
954 970
@@ -956,23 +972,23 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
956 972
957 _manager = kzalloc(sizeof(*_manager), GFP_KERNEL); 973 _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
958 974
959 ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc"); 975 ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc", 0);
960 976
961 ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc"); 977 ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc", 0);
962 978
963 ttm_page_pool_init_locked(&_manager->wc_pool_dma32, 979 ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
964 GFP_USER | GFP_DMA32, "wc dma"); 980 GFP_USER | GFP_DMA32, "wc dma", 0);
965 981
966 ttm_page_pool_init_locked(&_manager->uc_pool_dma32, 982 ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
967 GFP_USER | GFP_DMA32, "uc dma"); 983 GFP_USER | GFP_DMA32, "uc dma", 0);
968 984
969 ttm_page_pool_init_locked(&_manager->wc_pool_huge, 985 ttm_page_pool_init_locked(&_manager->wc_pool_huge,
970 GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP), 986 GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP),
971 "wc huge"); 987 "wc huge", order);
972 988
973 ttm_page_pool_init_locked(&_manager->uc_pool_huge, 989 ttm_page_pool_init_locked(&_manager->uc_pool_huge,
974 GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP) 990 GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP)
975 , "uc huge"); 991 , "uc huge", order);
976 992
977 _manager->options.max_size = max_pages; 993 _manager->options.max_size = max_pages;
978 _manager->options.small = SMALL_ALLOCATION; 994 _manager->options.small = SMALL_ALLOCATION;