aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2012-01-04 16:35:15 -0500
committerDave Airlie <airlied@redhat.com>2012-01-06 04:30:15 -0500
commit2c05114d23c4fd2256eaf5645528c19fcefdb2c8 (patch)
treedc1f700fbacb0a7891932b13684d4a4f5a65415e /drivers/gpu
parent36d7c537c3082a492ff851fb0da40ae3d7c5565d (diff)
drm/ttm/dma: Fix accounting error when calling ttm_mem_global_free_page and don't try to free freed pages.
The code to figure out how many pages to shrink the pool ends up capping the 'count' at _manager->options.max_size - which is OK. Except that the 'count' is also used when accounting for how many pages are recycled - which we end up with the invalid values. This fixes it by using a different value for the amount of pages to shrink. On top of that we would free the cached page pool - which is nonsense as they are deleted from the pool - so there are no free pages in that pool.. Also we also missed the opportunity to batch the amount of pages to free (similar to how ttm_page_alloc.c does it). This reintroduces the code that was lost during rebasing. Reviewed-by: Jerome Glisse <jglisse@redhat.com> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c15
1 files changed, 10 insertions, 5 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index 6c06d0b601f..156ddcd304c 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -949,7 +949,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
949 struct dma_page *d_page, *next; 949 struct dma_page *d_page, *next;
950 enum pool_type type; 950 enum pool_type type;
951 bool is_cached = false; 951 bool is_cached = false;
952 unsigned count = 0, i; 952 unsigned count = 0, i, npages = 0;
953 unsigned long irq_flags; 953 unsigned long irq_flags;
954 954
955 type = ttm_to_type(ttm->page_flags, ttm->caching_state); 955 type = ttm_to_type(ttm->page_flags, ttm->caching_state);
@@ -974,8 +974,13 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
974 } else { 974 } else {
975 pool->npages_free += count; 975 pool->npages_free += count;
976 list_splice(&ttm_dma->pages_list, &pool->free_list); 976 list_splice(&ttm_dma->pages_list, &pool->free_list);
977 npages = count;
977 if (pool->npages_free > _manager->options.max_size) { 978 if (pool->npages_free > _manager->options.max_size) {
978 count = pool->npages_free - _manager->options.max_size; 979 npages = pool->npages_free - _manager->options.max_size;
980 /* free at least NUM_PAGES_TO_ALLOC number of pages
981 * to reduce calls to set_memory_wb */
982 if (npages < NUM_PAGES_TO_ALLOC)
983 npages = NUM_PAGES_TO_ALLOC;
979 } 984 }
980 } 985 }
981 spin_unlock_irqrestore(&pool->lock, irq_flags); 986 spin_unlock_irqrestore(&pool->lock, irq_flags);
@@ -999,9 +1004,9 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
999 ttm_dma->dma_address[i] = 0; 1004 ttm_dma->dma_address[i] = 0;
1000 } 1005 }
1001 1006
1002 /* shrink pool if necessary */ 1007 /* shrink pool if necessary (only on !is_cached pools)*/
1003 if (count) 1008 if (npages)
1004 ttm_dma_page_pool_free(pool, count); 1009 ttm_dma_page_pool_free(pool, npages);
1005 ttm->state = tt_unpopulated; 1010 ttm->state = tt_unpopulated;
1006} 1011}
1007EXPORT_SYMBOL_GPL(ttm_dma_unpopulate); 1012EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);