diff options
author | Francisco Jerez <currojerez@riseup.net> | 2010-07-03 22:03:07 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2010-07-06 22:14:11 -0400 |
commit | 5870a4d97da136908ca477e3a21bc9f4c2705161 (patch) | |
tree | 3030a0ee387f3187a5a30f8f4cc745788c7d69e7 /drivers/gpu/drm/ttm | |
parent | 123f94f22e3d283dfe68742b269c245b0501ad82 (diff) |
drm/ttm: Allocate the page pool manager in the heap.
Repeated ttm_page_alloc_init/fini fails noisily because the pool
manager kobj isn't zeroed out between uses (we could do just that but
statically allocated kobjects are generally considered a bad thing).
Move it to kzalloc'ed memory.
Note that this patch drops the refcounting behavior of the pool
allocator init/fini functions: it would have led to a race condition
in its current form, and anyway it was never exploited.
This fixes a regression with reloading kms modules at runtime, since
page allocator was introduced.
Signed-off-by: Francisco Jerez <currojerez@riseup.net>
Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_page_alloc.c | 68 |
1 files changed, 33 insertions, 35 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 2f047577b1e3..b1d67dc973dc 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c | |||
@@ -104,7 +104,6 @@ struct ttm_pool_opts { | |||
104 | struct ttm_pool_manager { | 104 | struct ttm_pool_manager { |
105 | struct kobject kobj; | 105 | struct kobject kobj; |
106 | struct shrinker mm_shrink; | 106 | struct shrinker mm_shrink; |
107 | atomic_t page_alloc_inited; | ||
108 | struct ttm_pool_opts options; | 107 | struct ttm_pool_opts options; |
109 | 108 | ||
110 | union { | 109 | union { |
@@ -142,7 +141,7 @@ static void ttm_pool_kobj_release(struct kobject *kobj) | |||
142 | { | 141 | { |
143 | struct ttm_pool_manager *m = | 142 | struct ttm_pool_manager *m = |
144 | container_of(kobj, struct ttm_pool_manager, kobj); | 143 | container_of(kobj, struct ttm_pool_manager, kobj); |
145 | (void)m; | 144 | kfree(m); |
146 | } | 145 | } |
147 | 146 | ||
148 | static ssize_t ttm_pool_store(struct kobject *kobj, | 147 | static ssize_t ttm_pool_store(struct kobject *kobj, |
@@ -214,9 +213,7 @@ static struct kobj_type ttm_pool_kobj_type = { | |||
214 | .default_attrs = ttm_pool_attrs, | 213 | .default_attrs = ttm_pool_attrs, |
215 | }; | 214 | }; |
216 | 215 | ||
217 | static struct ttm_pool_manager _manager = { | 216 | static struct ttm_pool_manager *_manager; |
218 | .page_alloc_inited = ATOMIC_INIT(0) | ||
219 | }; | ||
220 | 217 | ||
221 | #ifndef CONFIG_X86 | 218 | #ifndef CONFIG_X86 |
222 | static int set_pages_array_wb(struct page **pages, int addrinarray) | 219 | static int set_pages_array_wb(struct page **pages, int addrinarray) |
@@ -271,7 +268,7 @@ static struct ttm_page_pool *ttm_get_pool(int flags, | |||
271 | if (flags & TTM_PAGE_FLAG_DMA32) | 268 | if (flags & TTM_PAGE_FLAG_DMA32) |
272 | pool_index |= 0x2; | 269 | pool_index |= 0x2; |
273 | 270 | ||
274 | return &_manager.pools[pool_index]; | 271 | return &_manager->pools[pool_index]; |
275 | } | 272 | } |
276 | 273 | ||
277 | /* set memory back to wb and free the pages. */ | 274 | /* set memory back to wb and free the pages. */ |
@@ -387,7 +384,7 @@ static int ttm_pool_get_num_unused_pages(void) | |||
387 | unsigned i; | 384 | unsigned i; |
388 | int total = 0; | 385 | int total = 0; |
389 | for (i = 0; i < NUM_POOLS; ++i) | 386 | for (i = 0; i < NUM_POOLS; ++i) |
390 | total += _manager.pools[i].npages; | 387 | total += _manager->pools[i].npages; |
391 | 388 | ||
392 | return total; | 389 | return total; |
393 | } | 390 | } |
@@ -408,7 +405,7 @@ static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask) | |||
408 | unsigned nr_free = shrink_pages; | 405 | unsigned nr_free = shrink_pages; |
409 | if (shrink_pages == 0) | 406 | if (shrink_pages == 0) |
410 | break; | 407 | break; |
411 | pool = &_manager.pools[(i + pool_offset)%NUM_POOLS]; | 408 | pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; |
412 | shrink_pages = ttm_page_pool_free(pool, nr_free); | 409 | shrink_pages = ttm_page_pool_free(pool, nr_free); |
413 | } | 410 | } |
414 | /* return estimated number of unused pages in pool */ | 411 | /* return estimated number of unused pages in pool */ |
@@ -576,10 +573,10 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, | |||
576 | 573 | ||
577 | /* If allocation request is small and there is not enough | 574 | /* If allocation request is small and there is not enough |
578 | * pages in pool we fill the pool first */ | 575 | * pages in pool we fill the pool first */ |
579 | if (count < _manager.options.small | 576 | if (count < _manager->options.small |
580 | && count > pool->npages) { | 577 | && count > pool->npages) { |
581 | struct list_head new_pages; | 578 | struct list_head new_pages; |
582 | unsigned alloc_size = _manager.options.alloc_size; | 579 | unsigned alloc_size = _manager->options.alloc_size; |
583 | 580 | ||
584 | /** | 581 | /** |
585 | * Can't change page caching if in irqsave context. We have to | 582 | * Can't change page caching if in irqsave context. We have to |
@@ -759,8 +756,8 @@ void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags, | |||
759 | pool->npages += page_count; | 756 | pool->npages += page_count; |
760 | /* Check that we don't go over the pool limit */ | 757 | /* Check that we don't go over the pool limit */ |
761 | page_count = 0; | 758 | page_count = 0; |
762 | if (pool->npages > _manager.options.max_size) { | 759 | if (pool->npages > _manager->options.max_size) { |
763 | page_count = pool->npages - _manager.options.max_size; | 760 | page_count = pool->npages - _manager->options.max_size; |
764 | /* free at least NUM_PAGES_TO_ALLOC number of pages | 761 | /* free at least NUM_PAGES_TO_ALLOC number of pages |
765 | * to reduce calls to set_memory_wb */ | 762 | * to reduce calls to set_memory_wb */ |
766 | if (page_count < NUM_PAGES_TO_ALLOC) | 763 | if (page_count < NUM_PAGES_TO_ALLOC) |
@@ -785,33 +782,36 @@ static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags, | |||
785 | int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) | 782 | int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) |
786 | { | 783 | { |
787 | int ret; | 784 | int ret; |
788 | if (atomic_add_return(1, &_manager.page_alloc_inited) > 1) | 785 | |
789 | return 0; | 786 | WARN_ON(_manager); |
790 | 787 | ||
791 | printk(KERN_INFO TTM_PFX "Initializing pool allocator.\n"); | 788 | printk(KERN_INFO TTM_PFX "Initializing pool allocator.\n"); |
792 | 789 | ||
793 | ttm_page_pool_init_locked(&_manager.wc_pool, GFP_HIGHUSER, "wc"); | 790 | _manager = kzalloc(sizeof(*_manager), GFP_KERNEL); |
794 | 791 | ||
795 | ttm_page_pool_init_locked(&_manager.uc_pool, GFP_HIGHUSER, "uc"); | 792 | ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc"); |
796 | 793 | ||
797 | ttm_page_pool_init_locked(&_manager.wc_pool_dma32, GFP_USER | GFP_DMA32, | 794 | ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc"); |
798 | "wc dma"); | ||
799 | 795 | ||
800 | ttm_page_pool_init_locked(&_manager.uc_pool_dma32, GFP_USER | GFP_DMA32, | 796 | ttm_page_pool_init_locked(&_manager->wc_pool_dma32, |
801 | "uc dma"); | 797 | GFP_USER | GFP_DMA32, "wc dma"); |
802 | 798 | ||
803 | _manager.options.max_size = max_pages; | 799 | ttm_page_pool_init_locked(&_manager->uc_pool_dma32, |
804 | _manager.options.small = SMALL_ALLOCATION; | 800 | GFP_USER | GFP_DMA32, "uc dma"); |
805 | _manager.options.alloc_size = NUM_PAGES_TO_ALLOC; | ||
806 | 801 | ||
807 | kobject_init(&_manager.kobj, &ttm_pool_kobj_type); | 802 | _manager->options.max_size = max_pages; |
808 | ret = kobject_add(&_manager.kobj, &glob->kobj, "pool"); | 803 | _manager->options.small = SMALL_ALLOCATION; |
804 | _manager->options.alloc_size = NUM_PAGES_TO_ALLOC; | ||
805 | |||
806 | ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type, | ||
807 | &glob->kobj, "pool"); | ||
809 | if (unlikely(ret != 0)) { | 808 | if (unlikely(ret != 0)) { |
810 | kobject_put(&_manager.kobj); | 809 | kobject_put(&_manager->kobj); |
810 | _manager = NULL; | ||
811 | return ret; | 811 | return ret; |
812 | } | 812 | } |
813 | 813 | ||
814 | ttm_pool_mm_shrink_init(&_manager); | 814 | ttm_pool_mm_shrink_init(_manager); |
815 | 815 | ||
816 | return 0; | 816 | return 0; |
817 | } | 817 | } |
@@ -820,16 +820,14 @@ void ttm_page_alloc_fini() | |||
820 | { | 820 | { |
821 | int i; | 821 | int i; |
822 | 822 | ||
823 | if (atomic_sub_return(1, &_manager.page_alloc_inited) > 0) | ||
824 | return; | ||
825 | |||
826 | printk(KERN_INFO TTM_PFX "Finalizing pool allocator.\n"); | 823 | printk(KERN_INFO TTM_PFX "Finalizing pool allocator.\n"); |
827 | ttm_pool_mm_shrink_fini(&_manager); | 824 | ttm_pool_mm_shrink_fini(_manager); |
828 | 825 | ||
829 | for (i = 0; i < NUM_POOLS; ++i) | 826 | for (i = 0; i < NUM_POOLS; ++i) |
830 | ttm_page_pool_free(&_manager.pools[i], FREE_ALL_PAGES); | 827 | ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES); |
831 | 828 | ||
832 | kobject_put(&_manager.kobj); | 829 | kobject_put(&_manager->kobj); |
830 | _manager = NULL; | ||
833 | } | 831 | } |
834 | 832 | ||
835 | int ttm_page_alloc_debugfs(struct seq_file *m, void *data) | 833 | int ttm_page_alloc_debugfs(struct seq_file *m, void *data) |
@@ -837,14 +835,14 @@ int ttm_page_alloc_debugfs(struct seq_file *m, void *data) | |||
837 | struct ttm_page_pool *p; | 835 | struct ttm_page_pool *p; |
838 | unsigned i; | 836 | unsigned i; |
839 | char *h[] = {"pool", "refills", "pages freed", "size"}; | 837 | char *h[] = {"pool", "refills", "pages freed", "size"}; |
840 | if (atomic_read(&_manager.page_alloc_inited) == 0) { | 838 | if (!_manager) { |
841 | seq_printf(m, "No pool allocator running.\n"); | 839 | seq_printf(m, "No pool allocator running.\n"); |
842 | return 0; | 840 | return 0; |
843 | } | 841 | } |
844 | seq_printf(m, "%6s %12s %13s %8s\n", | 842 | seq_printf(m, "%6s %12s %13s %8s\n", |
845 | h[0], h[1], h[2], h[3]); | 843 | h[0], h[1], h[2], h[3]); |
846 | for (i = 0; i < NUM_POOLS; ++i) { | 844 | for (i = 0; i < NUM_POOLS; ++i) { |
847 | p = &_manager.pools[i]; | 845 | p = &_manager->pools[i]; |
848 | 846 | ||
849 | seq_printf(m, "%6s %12ld %13ld %8d\n", | 847 | seq_printf(m, "%6s %12ld %13ld %8d\n", |
850 | p->name, p->nrefills, | 848 | p->name, p->nrefills, |