diff options
author | Paul Mundt <lethal@linux-sh.org> | 2010-08-03 22:56:17 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2010-08-03 22:56:17 -0400 |
commit | 701ec7a7b04a62c74ab1b83b59a3fd35c0ba5fdb (patch) | |
tree | c07aa954f48ec45c422641052d46008697a4a6b1 /drivers/gpu/drm/ttm/ttm_page_alloc.c | |
parent | 285eba57db7bd7d7c3c5929fb8621fdcaaea1b00 (diff) | |
parent | 3a09b1be53d23df780a0cd0e4087a05e2ca4a00c (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts:
arch/arm/configs/ap4evb_defconfig
arch/arm/configs/g3evm_defconfig
arch/arm/configs/g4evm_defconfig
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_page_alloc.c')
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_page_alloc.c | 74 |
1 files changed, 37 insertions, 37 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 2f047577b1e3..ca904799f018 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c | |||
@@ -40,11 +40,13 @@ | |||
40 | #include <linux/slab.h> | 40 | #include <linux/slab.h> |
41 | 41 | ||
42 | #include <asm/atomic.h> | 42 | #include <asm/atomic.h> |
43 | #include <asm/agp.h> | ||
44 | 43 | ||
45 | #include "ttm/ttm_bo_driver.h" | 44 | #include "ttm/ttm_bo_driver.h" |
46 | #include "ttm/ttm_page_alloc.h" | 45 | #include "ttm/ttm_page_alloc.h" |
47 | 46 | ||
47 | #ifdef TTM_HAS_AGP | ||
48 | #include <asm/agp.h> | ||
49 | #endif | ||
48 | 50 | ||
49 | #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *)) | 51 | #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *)) |
50 | #define SMALL_ALLOCATION 16 | 52 | #define SMALL_ALLOCATION 16 |
@@ -104,7 +106,6 @@ struct ttm_pool_opts { | |||
104 | struct ttm_pool_manager { | 106 | struct ttm_pool_manager { |
105 | struct kobject kobj; | 107 | struct kobject kobj; |
106 | struct shrinker mm_shrink; | 108 | struct shrinker mm_shrink; |
107 | atomic_t page_alloc_inited; | ||
108 | struct ttm_pool_opts options; | 109 | struct ttm_pool_opts options; |
109 | 110 | ||
110 | union { | 111 | union { |
@@ -142,7 +143,7 @@ static void ttm_pool_kobj_release(struct kobject *kobj) | |||
142 | { | 143 | { |
143 | struct ttm_pool_manager *m = | 144 | struct ttm_pool_manager *m = |
144 | container_of(kobj, struct ttm_pool_manager, kobj); | 145 | container_of(kobj, struct ttm_pool_manager, kobj); |
145 | (void)m; | 146 | kfree(m); |
146 | } | 147 | } |
147 | 148 | ||
148 | static ssize_t ttm_pool_store(struct kobject *kobj, | 149 | static ssize_t ttm_pool_store(struct kobject *kobj, |
@@ -214,9 +215,7 @@ static struct kobj_type ttm_pool_kobj_type = { | |||
214 | .default_attrs = ttm_pool_attrs, | 215 | .default_attrs = ttm_pool_attrs, |
215 | }; | 216 | }; |
216 | 217 | ||
217 | static struct ttm_pool_manager _manager = { | 218 | static struct ttm_pool_manager *_manager; |
218 | .page_alloc_inited = ATOMIC_INIT(0) | ||
219 | }; | ||
220 | 219 | ||
221 | #ifndef CONFIG_X86 | 220 | #ifndef CONFIG_X86 |
222 | static int set_pages_array_wb(struct page **pages, int addrinarray) | 221 | static int set_pages_array_wb(struct page **pages, int addrinarray) |
@@ -271,7 +270,7 @@ static struct ttm_page_pool *ttm_get_pool(int flags, | |||
271 | if (flags & TTM_PAGE_FLAG_DMA32) | 270 | if (flags & TTM_PAGE_FLAG_DMA32) |
272 | pool_index |= 0x2; | 271 | pool_index |= 0x2; |
273 | 272 | ||
274 | return &_manager.pools[pool_index]; | 273 | return &_manager->pools[pool_index]; |
275 | } | 274 | } |
276 | 275 | ||
277 | /* set memory back to wb and free the pages. */ | 276 | /* set memory back to wb and free the pages. */ |
@@ -387,7 +386,7 @@ static int ttm_pool_get_num_unused_pages(void) | |||
387 | unsigned i; | 386 | unsigned i; |
388 | int total = 0; | 387 | int total = 0; |
389 | for (i = 0; i < NUM_POOLS; ++i) | 388 | for (i = 0; i < NUM_POOLS; ++i) |
390 | total += _manager.pools[i].npages; | 389 | total += _manager->pools[i].npages; |
391 | 390 | ||
392 | return total; | 391 | return total; |
393 | } | 392 | } |
@@ -395,7 +394,7 @@ static int ttm_pool_get_num_unused_pages(void) | |||
395 | /** | 394 | /** |
396 | * Callback for mm to request pool to reduce number of page held. | 395 | * Callback for mm to request pool to reduce number of page held. |
397 | */ | 396 | */ |
398 | static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask) | 397 | static int ttm_pool_mm_shrink(struct shrinker *shrink, int shrink_pages, gfp_t gfp_mask) |
399 | { | 398 | { |
400 | static atomic_t start_pool = ATOMIC_INIT(0); | 399 | static atomic_t start_pool = ATOMIC_INIT(0); |
401 | unsigned i; | 400 | unsigned i; |
@@ -408,7 +407,7 @@ static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask) | |||
408 | unsigned nr_free = shrink_pages; | 407 | unsigned nr_free = shrink_pages; |
409 | if (shrink_pages == 0) | 408 | if (shrink_pages == 0) |
410 | break; | 409 | break; |
411 | pool = &_manager.pools[(i + pool_offset)%NUM_POOLS]; | 410 | pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; |
412 | shrink_pages = ttm_page_pool_free(pool, nr_free); | 411 | shrink_pages = ttm_page_pool_free(pool, nr_free); |
413 | } | 412 | } |
414 | /* return estimated number of unused pages in pool */ | 413 | /* return estimated number of unused pages in pool */ |
@@ -576,10 +575,10 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, | |||
576 | 575 | ||
577 | /* If allocation request is small and there is not enough | 576 | /* If allocation request is small and there is not enough |
578 | * pages in pool we fill the pool first */ | 577 | * pages in pool we fill the pool first */ |
579 | if (count < _manager.options.small | 578 | if (count < _manager->options.small |
580 | && count > pool->npages) { | 579 | && count > pool->npages) { |
581 | struct list_head new_pages; | 580 | struct list_head new_pages; |
582 | unsigned alloc_size = _manager.options.alloc_size; | 581 | unsigned alloc_size = _manager->options.alloc_size; |
583 | 582 | ||
584 | /** | 583 | /** |
585 | * Can't change page caching if in irqsave context. We have to | 584 | * Can't change page caching if in irqsave context. We have to |
@@ -759,8 +758,8 @@ void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags, | |||
759 | pool->npages += page_count; | 758 | pool->npages += page_count; |
760 | /* Check that we don't go over the pool limit */ | 759 | /* Check that we don't go over the pool limit */ |
761 | page_count = 0; | 760 | page_count = 0; |
762 | if (pool->npages > _manager.options.max_size) { | 761 | if (pool->npages > _manager->options.max_size) { |
763 | page_count = pool->npages - _manager.options.max_size; | 762 | page_count = pool->npages - _manager->options.max_size; |
764 | /* free at least NUM_PAGES_TO_ALLOC number of pages | 763 | /* free at least NUM_PAGES_TO_ALLOC number of pages |
765 | * to reduce calls to set_memory_wb */ | 764 | * to reduce calls to set_memory_wb */ |
766 | if (page_count < NUM_PAGES_TO_ALLOC) | 765 | if (page_count < NUM_PAGES_TO_ALLOC) |
@@ -785,33 +784,36 @@ static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags, | |||
785 | int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) | 784 | int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) |
786 | { | 785 | { |
787 | int ret; | 786 | int ret; |
788 | if (atomic_add_return(1, &_manager.page_alloc_inited) > 1) | 787 | |
789 | return 0; | 788 | WARN_ON(_manager); |
790 | 789 | ||
791 | printk(KERN_INFO TTM_PFX "Initializing pool allocator.\n"); | 790 | printk(KERN_INFO TTM_PFX "Initializing pool allocator.\n"); |
792 | 791 | ||
793 | ttm_page_pool_init_locked(&_manager.wc_pool, GFP_HIGHUSER, "wc"); | 792 | _manager = kzalloc(sizeof(*_manager), GFP_KERNEL); |
793 | |||
794 | ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc"); | ||
794 | 795 | ||
795 | ttm_page_pool_init_locked(&_manager.uc_pool, GFP_HIGHUSER, "uc"); | 796 | ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc"); |
796 | 797 | ||
797 | ttm_page_pool_init_locked(&_manager.wc_pool_dma32, GFP_USER | GFP_DMA32, | 798 | ttm_page_pool_init_locked(&_manager->wc_pool_dma32, |
798 | "wc dma"); | 799 | GFP_USER | GFP_DMA32, "wc dma"); |
799 | 800 | ||
800 | ttm_page_pool_init_locked(&_manager.uc_pool_dma32, GFP_USER | GFP_DMA32, | 801 | ttm_page_pool_init_locked(&_manager->uc_pool_dma32, |
801 | "uc dma"); | 802 | GFP_USER | GFP_DMA32, "uc dma"); |
802 | 803 | ||
803 | _manager.options.max_size = max_pages; | 804 | _manager->options.max_size = max_pages; |
804 | _manager.options.small = SMALL_ALLOCATION; | 805 | _manager->options.small = SMALL_ALLOCATION; |
805 | _manager.options.alloc_size = NUM_PAGES_TO_ALLOC; | 806 | _manager->options.alloc_size = NUM_PAGES_TO_ALLOC; |
806 | 807 | ||
807 | kobject_init(&_manager.kobj, &ttm_pool_kobj_type); | 808 | ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type, |
808 | ret = kobject_add(&_manager.kobj, &glob->kobj, "pool"); | 809 | &glob->kobj, "pool"); |
809 | if (unlikely(ret != 0)) { | 810 | if (unlikely(ret != 0)) { |
810 | kobject_put(&_manager.kobj); | 811 | kobject_put(&_manager->kobj); |
812 | _manager = NULL; | ||
811 | return ret; | 813 | return ret; |
812 | } | 814 | } |
813 | 815 | ||
814 | ttm_pool_mm_shrink_init(&_manager); | 816 | ttm_pool_mm_shrink_init(_manager); |
815 | 817 | ||
816 | return 0; | 818 | return 0; |
817 | } | 819 | } |
@@ -820,16 +822,14 @@ void ttm_page_alloc_fini() | |||
820 | { | 822 | { |
821 | int i; | 823 | int i; |
822 | 824 | ||
823 | if (atomic_sub_return(1, &_manager.page_alloc_inited) > 0) | ||
824 | return; | ||
825 | |||
826 | printk(KERN_INFO TTM_PFX "Finalizing pool allocator.\n"); | 825 | printk(KERN_INFO TTM_PFX "Finalizing pool allocator.\n"); |
827 | ttm_pool_mm_shrink_fini(&_manager); | 826 | ttm_pool_mm_shrink_fini(_manager); |
828 | 827 | ||
829 | for (i = 0; i < NUM_POOLS; ++i) | 828 | for (i = 0; i < NUM_POOLS; ++i) |
830 | ttm_page_pool_free(&_manager.pools[i], FREE_ALL_PAGES); | 829 | ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES); |
831 | 830 | ||
832 | kobject_put(&_manager.kobj); | 831 | kobject_put(&_manager->kobj); |
832 | _manager = NULL; | ||
833 | } | 833 | } |
834 | 834 | ||
835 | int ttm_page_alloc_debugfs(struct seq_file *m, void *data) | 835 | int ttm_page_alloc_debugfs(struct seq_file *m, void *data) |
@@ -837,14 +837,14 @@ int ttm_page_alloc_debugfs(struct seq_file *m, void *data) | |||
837 | struct ttm_page_pool *p; | 837 | struct ttm_page_pool *p; |
838 | unsigned i; | 838 | unsigned i; |
839 | char *h[] = {"pool", "refills", "pages freed", "size"}; | 839 | char *h[] = {"pool", "refills", "pages freed", "size"}; |
840 | if (atomic_read(&_manager.page_alloc_inited) == 0) { | 840 | if (!_manager) { |
841 | seq_printf(m, "No pool allocator running.\n"); | 841 | seq_printf(m, "No pool allocator running.\n"); |
842 | return 0; | 842 | return 0; |
843 | } | 843 | } |
844 | seq_printf(m, "%6s %12s %13s %8s\n", | 844 | seq_printf(m, "%6s %12s %13s %8s\n", |
845 | h[0], h[1], h[2], h[3]); | 845 | h[0], h[1], h[2], h[3]); |
846 | for (i = 0; i < NUM_POOLS; ++i) { | 846 | for (i = 0; i < NUM_POOLS; ++i) { |
847 | p = &_manager.pools[i]; | 847 | p = &_manager->pools[i]; |
848 | 848 | ||
849 | seq_printf(m, "%6s %12ld %13ld %8d\n", | 849 | seq_printf(m, "%6s %12ld %13ld %8d\n", |
850 | p->name, p->nrefills, | 850 | p->name, p->nrefills, |