aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ttm
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2013-08-27 20:18:11 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2013-09-10 18:56:32 -0400
commit7dc19d5affd71370754a2c3d36b485810eaee7a1 (patch)
tree56b84fb2f7462e81c8340dea6341c30ce247f798 /drivers/gpu/drm/ttm
parent1ab6c4997e04a00c50c6d786c2f046adc0d1f5de (diff)
drivers: convert shrinkers to new count/scan API
Convert the driver shrinkers to the new API. Most changes are compile tested only because I either don't have the hardware or it's staging stuff. FWIW, the md and android code is pretty good, but the rest of it makes me want to claw my eyes out. The amount of broken code I just encountered is mind boggling. I've added comments explaining what is broken, but I fear that some of the code would be best dealt with by being dragged behind the bike shed, burying in mud up to it's neck and then run over repeatedly with a blunt lawn mower. Special mention goes to the zcache/zcache2 drivers. They can't co-exist in the build at the same time, they are under different menu options in menuconfig, they only show up when you've got the right set of mm subsystem options configured and so even compile testing is an exercise in pulling teeth. And that doesn't even take into account the horrible, broken code... [glommer@openvz.org: fixes for i915, android lowmem, zcache, bcache] Signed-off-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Glauber Costa <glommer@openvz.org> Acked-by: Mel Gorman <mgorman@suse.de> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: Kent Overstreet <koverstreet@google.com> Cc: John Stultz <john.stultz@linaro.org> Cc: David Rientjes <rientjes@google.com> Cc: Jerome Glisse <jglisse@redhat.com> Cc: Thomas Hellstrom <thellstrom@vmware.com> Cc: "Theodore Ts'o" <tytso@mit.edu> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Artem Bityutskiy <artem.bityutskiy@linux.intel.com> Cc: Arve Hjønnevåg <arve@android.com> Cc: Carlos Maiolino <cmaiolino@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Chuck Lever <chuck.lever@oracle.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: David Rientjes <rientjes@google.com> Cc: Gleb Natapov <gleb@redhat.com> Cc: Greg Thelen <gthelen@google.com> Cc: J. Bruce Fields <bfields@redhat.com> Cc: Jan Kara <jack@suse.cz> Cc: Jerome Glisse <jglisse@redhat.com> Cc: John Stultz <john.stultz@linaro.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Kent Overstreet <koverstreet@google.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Steven Whitehouse <swhiteho@redhat.com> Cc: Thomas Hellstrom <thellstrom@vmware.com> Cc: Trond Myklebust <Trond.Myklebust@netapp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c44
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c51
2 files changed, 60 insertions, 35 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index bd2a3b40cd12..863bef9f9234 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -377,28 +377,26 @@ out:
377 return nr_free; 377 return nr_free;
378} 378}
379 379
380/* Get good estimation how many pages are free in pools */
381static int ttm_pool_get_num_unused_pages(void)
382{
383 unsigned i;
384 int total = 0;
385 for (i = 0; i < NUM_POOLS; ++i)
386 total += _manager->pools[i].npages;
387
388 return total;
389}
390
391/** 380/**
392 * Callback for mm to request pool to reduce number of page held. 381 * Callback for mm to request pool to reduce number of page held.
382 *
383 * XXX: (dchinner) Deadlock warning!
384 *
385 * ttm_page_pool_free() does memory allocation using GFP_KERNEL. that means
386 * this can deadlock when called a sc->gfp_mask that is not equal to
387 * GFP_KERNEL.
388 *
389 * This code is crying out for a shrinker per pool....
393 */ 390 */
394static int ttm_pool_mm_shrink(struct shrinker *shrink, 391static unsigned long
395 struct shrink_control *sc) 392ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
396{ 393{
397 static atomic_t start_pool = ATOMIC_INIT(0); 394 static atomic_t start_pool = ATOMIC_INIT(0);
398 unsigned i; 395 unsigned i;
399 unsigned pool_offset = atomic_add_return(1, &start_pool); 396 unsigned pool_offset = atomic_add_return(1, &start_pool);
400 struct ttm_page_pool *pool; 397 struct ttm_page_pool *pool;
401 int shrink_pages = sc->nr_to_scan; 398 int shrink_pages = sc->nr_to_scan;
399 unsigned long freed = 0;
402 400
403 pool_offset = pool_offset % NUM_POOLS; 401 pool_offset = pool_offset % NUM_POOLS;
404 /* select start pool in round robin fashion */ 402 /* select start pool in round robin fashion */
@@ -408,14 +406,28 @@ static int ttm_pool_mm_shrink(struct shrinker *shrink,
408 break; 406 break;
409 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; 407 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
410 shrink_pages = ttm_page_pool_free(pool, nr_free); 408 shrink_pages = ttm_page_pool_free(pool, nr_free);
409 freed += nr_free - shrink_pages;
411 } 410 }
412 /* return estimated number of unused pages in pool */ 411 return freed;
413 return ttm_pool_get_num_unused_pages(); 412}
413
414
415static unsigned long
416ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
417{
418 unsigned i;
419 unsigned long count = 0;
420
421 for (i = 0; i < NUM_POOLS; ++i)
422 count += _manager->pools[i].npages;
423
424 return count;
414} 425}
415 426
416static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager) 427static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
417{ 428{
418 manager->mm_shrink.shrink = &ttm_pool_mm_shrink; 429 manager->mm_shrink.count_objects = ttm_pool_shrink_count;
430 manager->mm_shrink.scan_objects = ttm_pool_shrink_scan;
419 manager->mm_shrink.seeks = 1; 431 manager->mm_shrink.seeks = 1;
420 register_shrinker(&manager->mm_shrink); 432 register_shrinker(&manager->mm_shrink);
421} 433}
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index b8b394319b45..7957beeeaf73 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -918,19 +918,6 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
918} 918}
919EXPORT_SYMBOL_GPL(ttm_dma_populate); 919EXPORT_SYMBOL_GPL(ttm_dma_populate);
920 920
921/* Get good estimation how many pages are free in pools */
922static int ttm_dma_pool_get_num_unused_pages(void)
923{
924 struct device_pools *p;
925 unsigned total = 0;
926
927 mutex_lock(&_manager->lock);
928 list_for_each_entry(p, &_manager->pools, pools)
929 total += p->pool->npages_free;
930 mutex_unlock(&_manager->lock);
931 return total;
932}
933
934/* Put all pages in pages list to correct pool to wait for reuse */ 921/* Put all pages in pages list to correct pool to wait for reuse */
935void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev) 922void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
936{ 923{
@@ -1002,18 +989,29 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
1002 989
1003/** 990/**
1004 * Callback for mm to request pool to reduce number of page held. 991 * Callback for mm to request pool to reduce number of page held.
992 *
993 * XXX: (dchinner) Deadlock warning!
994 *
995 * ttm_dma_page_pool_free() does GFP_KERNEL memory allocation, and so attention
996 * needs to be paid to sc->gfp_mask to determine if this can be done or not.
997 * GFP_KERNEL memory allocation in a GFP_ATOMIC reclaim context woul dbe really
998 * bad.
999 *
1000 * I'm getting sadder as I hear more pathetical whimpers about needing per-pool
1001 * shrinkers
1005 */ 1002 */
1006static int ttm_dma_pool_mm_shrink(struct shrinker *shrink, 1003static unsigned long
1007 struct shrink_control *sc) 1004ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1008{ 1005{
1009 static atomic_t start_pool = ATOMIC_INIT(0); 1006 static atomic_t start_pool = ATOMIC_INIT(0);
1010 unsigned idx = 0; 1007 unsigned idx = 0;
1011 unsigned pool_offset = atomic_add_return(1, &start_pool); 1008 unsigned pool_offset = atomic_add_return(1, &start_pool);
1012 unsigned shrink_pages = sc->nr_to_scan; 1009 unsigned shrink_pages = sc->nr_to_scan;
1013 struct device_pools *p; 1010 struct device_pools *p;
1011 unsigned long freed = 0;
1014 1012
1015 if (list_empty(&_manager->pools)) 1013 if (list_empty(&_manager->pools))
1016 return 0; 1014 return SHRINK_STOP;
1017 1015
1018 mutex_lock(&_manager->lock); 1016 mutex_lock(&_manager->lock);
1019 pool_offset = pool_offset % _manager->npools; 1017 pool_offset = pool_offset % _manager->npools;
@@ -1029,18 +1027,33 @@ static int ttm_dma_pool_mm_shrink(struct shrinker *shrink,
1029 continue; 1027 continue;
1030 nr_free = shrink_pages; 1028 nr_free = shrink_pages;
1031 shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free); 1029 shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free);
1030 freed += nr_free - shrink_pages;
1031
1032 pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n", 1032 pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
1033 p->pool->dev_name, p->pool->name, current->pid, 1033 p->pool->dev_name, p->pool->name, current->pid,
1034 nr_free, shrink_pages); 1034 nr_free, shrink_pages);
1035 } 1035 }
1036 mutex_unlock(&_manager->lock); 1036 mutex_unlock(&_manager->lock);
1037 /* return estimated number of unused pages in pool */ 1037 return freed;
1038 return ttm_dma_pool_get_num_unused_pages(); 1038}
1039
1040static unsigned long
1041ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1042{
1043 struct device_pools *p;
1044 unsigned long count = 0;
1045
1046 mutex_lock(&_manager->lock);
1047 list_for_each_entry(p, &_manager->pools, pools)
1048 count += p->pool->npages_free;
1049 mutex_unlock(&_manager->lock);
1050 return count;
1039} 1051}
1040 1052
1041static void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager) 1053static void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
1042{ 1054{
1043 manager->mm_shrink.shrink = &ttm_dma_pool_mm_shrink; 1055 manager->mm_shrink.count_objects = ttm_dma_pool_shrink_count;
1056 manager->mm_shrink.scan_objects = &ttm_dma_pool_shrink_scan;
1044 manager->mm_shrink.seeks = 1; 1057 manager->mm_shrink.seeks = 1;
1045 register_shrinker(&manager->mm_shrink); 1058 register_shrinker(&manager->mm_shrink);
1046} 1059}