aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ttm
diff options
context:
space:
mode:
authorJerome Glisse <jglisse@redhat.com>2011-11-10 18:24:09 -0500
committerDave Airlie <airlied@redhat.com>2011-12-06 05:39:11 -0500
commit822c4d9ae0d55a4fcea9f0a462bc6406a06692e2 (patch)
tree023576f530939ff497bb7fe44b62da0ce50221be /drivers/gpu/drm/ttm
parentf9517e63ffae6a1062a0a2ac4eea60be49b9dfd4 (diff)
drm/ttm: page allocation use page array instead of list
Use the ttm_tt pages array for pages allocations, move the list unwinding into the page allocation functions. Signed-off-by: Jerome Glisse <jglisse@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c85
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c36
2 files changed, 59 insertions, 62 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 727e93daac3b..0f3e6d2395b3 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -619,8 +619,10 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
619 * @return count of pages still required to fulfill the request. 619 * @return count of pages still required to fulfill the request.
620 */ 620 */
621static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, 621static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
622 struct list_head *pages, int ttm_flags, 622 struct list_head *pages,
623 enum ttm_caching_state cstate, unsigned count) 623 int ttm_flags,
624 enum ttm_caching_state cstate,
625 unsigned count)
624{ 626{
625 unsigned long irq_flags; 627 unsigned long irq_flags;
626 struct list_head *p; 628 struct list_head *p;
@@ -664,13 +666,15 @@ out:
664 * On success pages list will hold count number of correctly 666 * On success pages list will hold count number of correctly
665 * cached pages. 667 * cached pages.
666 */ 668 */
667int ttm_get_pages(struct list_head *pages, int flags, 669int ttm_get_pages(struct page **pages, int flags,
668 enum ttm_caching_state cstate, unsigned count, 670 enum ttm_caching_state cstate, unsigned npages,
669 dma_addr_t *dma_address) 671 dma_addr_t *dma_address)
670{ 672{
671 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); 673 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
674 struct list_head plist;
672 struct page *p = NULL; 675 struct page *p = NULL;
673 gfp_t gfp_flags = GFP_USER; 676 gfp_t gfp_flags = GFP_USER;
677 unsigned count;
674 int r; 678 int r;
675 679
676 /* set zero flag for page allocation if required */ 680 /* set zero flag for page allocation if required */
@@ -684,7 +688,7 @@ int ttm_get_pages(struct list_head *pages, int flags,
684 else 688 else
685 gfp_flags |= GFP_HIGHUSER; 689 gfp_flags |= GFP_HIGHUSER;
686 690
687 for (r = 0; r < count; ++r) { 691 for (r = 0; r < npages; ++r) {
688 p = alloc_page(gfp_flags); 692 p = alloc_page(gfp_flags);
689 if (!p) { 693 if (!p) {
690 694
@@ -693,85 +697,100 @@ int ttm_get_pages(struct list_head *pages, int flags,
693 return -ENOMEM; 697 return -ENOMEM;
694 } 698 }
695 699
696 list_add(&p->lru, pages); 700 pages[r] = p;
697 } 701 }
698 return 0; 702 return 0;
699 } 703 }
700 704
701
702 /* combine zero flag to pool flags */ 705 /* combine zero flag to pool flags */
703 gfp_flags |= pool->gfp_flags; 706 gfp_flags |= pool->gfp_flags;
704 707
705 /* First we take pages from the pool */ 708 /* First we take pages from the pool */
706 count = ttm_page_pool_get_pages(pool, pages, flags, cstate, count); 709 INIT_LIST_HEAD(&plist);
710 npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
711 count = 0;
712 list_for_each_entry(p, &plist, lru) {
713 pages[count++] = p;
714 }
707 715
708 /* clear the pages coming from the pool if requested */ 716 /* clear the pages coming from the pool if requested */
709 if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) { 717 if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
710 list_for_each_entry(p, pages, lru) { 718 list_for_each_entry(p, &plist, lru) {
711 clear_page(page_address(p)); 719 clear_page(page_address(p));
712 } 720 }
713 } 721 }
714 722
715 /* If pool didn't have enough pages allocate new one. */ 723 /* If pool didn't have enough pages allocate new one. */
716 if (count > 0) { 724 if (npages > 0) {
717 /* ttm_alloc_new_pages doesn't reference pool so we can run 725 /* ttm_alloc_new_pages doesn't reference pool so we can run
718 * multiple requests in parallel. 726 * multiple requests in parallel.
719 **/ 727 **/
720 r = ttm_alloc_new_pages(pages, gfp_flags, flags, cstate, count); 728 INIT_LIST_HEAD(&plist);
729 r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages);
730 list_for_each_entry(p, &plist, lru) {
731 pages[count++] = p;
732 }
721 if (r) { 733 if (r) {
722 /* If there is any pages in the list put them back to 734 /* If there is any pages in the list put them back to
723 * the pool. */ 735 * the pool. */
724 printk(KERN_ERR TTM_PFX 736 printk(KERN_ERR TTM_PFX
725 "Failed to allocate extra pages " 737 "Failed to allocate extra pages "
726 "for large request."); 738 "for large request.");
727 ttm_put_pages(pages, 0, flags, cstate, NULL); 739 ttm_put_pages(pages, count, flags, cstate, NULL);
728 return r; 740 return r;
729 } 741 }
730 } 742 }
731 743
732
733 return 0; 744 return 0;
734} 745}
735 746
736/* Put all pages in pages list to correct pool to wait for reuse */ 747/* Put all pages in pages list to correct pool to wait for reuse */
737void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags, 748void ttm_put_pages(struct page **pages, unsigned npages, int flags,
738 enum ttm_caching_state cstate, dma_addr_t *dma_address) 749 enum ttm_caching_state cstate, dma_addr_t *dma_address)
739{ 750{
740 unsigned long irq_flags; 751 unsigned long irq_flags;
741 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); 752 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
742 struct page *p, *tmp; 753 unsigned i;
743 754
744 if (pool == NULL) { 755 if (pool == NULL) {
745 /* No pool for this memory type so free the pages */ 756 /* No pool for this memory type so free the pages */
746 757 for (i = 0; i < npages; i++) {
747 list_for_each_entry_safe(p, tmp, pages, lru) { 758 if (pages[i]) {
748 __free_page(p); 759 if (page_count(pages[i]) != 1)
760 printk(KERN_ERR TTM_PFX
761 "Erroneous page count. "
762 "Leaking pages.\n");
763 __free_page(pages[i]);
764 pages[i] = NULL;
765 }
749 } 766 }
750 /* Make the pages list empty */
751 INIT_LIST_HEAD(pages);
752 return; 767 return;
753 } 768 }
754 if (page_count == 0) {
755 list_for_each_entry_safe(p, tmp, pages, lru) {
756 ++page_count;
757 }
758 }
759 769
760 spin_lock_irqsave(&pool->lock, irq_flags); 770 spin_lock_irqsave(&pool->lock, irq_flags);
761 list_splice_init(pages, &pool->list); 771 for (i = 0; i < npages; i++) {
762 pool->npages += page_count; 772 if (pages[i]) {
773 if (page_count(pages[i]) != 1)
774 printk(KERN_ERR TTM_PFX
775 "Erroneous page count. "
776 "Leaking pages.\n");
777 list_add_tail(&pages[i]->lru, &pool->list);
778 pages[i] = NULL;
779 pool->npages++;
780 }
781 }
763 /* Check that we don't go over the pool limit */ 782 /* Check that we don't go over the pool limit */
764 page_count = 0; 783 npages = 0;
765 if (pool->npages > _manager->options.max_size) { 784 if (pool->npages > _manager->options.max_size) {
766 page_count = pool->npages - _manager->options.max_size; 785 npages = pool->npages - _manager->options.max_size;
767 /* free at least NUM_PAGES_TO_ALLOC number of pages 786 /* free at least NUM_PAGES_TO_ALLOC number of pages
768 * to reduce calls to set_memory_wb */ 787 * to reduce calls to set_memory_wb */
769 if (page_count < NUM_PAGES_TO_ALLOC) 788 if (npages < NUM_PAGES_TO_ALLOC)
770 page_count = NUM_PAGES_TO_ALLOC; 789 npages = NUM_PAGES_TO_ALLOC;
771 } 790 }
772 spin_unlock_irqrestore(&pool->lock, irq_flags); 791 spin_unlock_irqrestore(&pool->lock, irq_flags);
773 if (page_count) 792 if (npages)
774 ttm_page_pool_free(pool, page_count); 793 ttm_page_pool_free(pool, npages);
775} 794}
776 795
777static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags, 796static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 54bbbadba93c..6e079dedfc4f 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -66,22 +66,16 @@ static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
66static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index) 66static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
67{ 67{
68 struct page *p; 68 struct page *p;
69 struct list_head h;
70 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; 69 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
71 int ret; 70 int ret;
72 71
73 if (NULL == (p = ttm->pages[index])) { 72 if (NULL == (p = ttm->pages[index])) {
74 73
75 INIT_LIST_HEAD(&h); 74 ret = ttm_get_pages(&p, ttm->page_flags, ttm->caching_state, 1,
76
77 ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1,
78 &ttm->dma_address[index]); 75 &ttm->dma_address[index]);
79
80 if (ret != 0) 76 if (ret != 0)
81 return NULL; 77 return NULL;
82 78
83 p = list_first_entry(&h, struct page, lru);
84
85 ret = ttm_mem_global_alloc_page(mem_glob, p, false, false); 79 ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
86 if (unlikely(ret != 0)) 80 if (unlikely(ret != 0))
87 goto out_err; 81 goto out_err;
@@ -90,9 +84,7 @@ static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
90 } 84 }
91 return p; 85 return p;
92out_err: 86out_err:
93 INIT_LIST_HEAD(&h); 87 ttm_put_pages(&p, 1, ttm->page_flags,
94 list_add(&p->lru, &h);
95 ttm_put_pages(&h, 1, ttm->page_flags,
96 ttm->caching_state, &ttm->dma_address[index]); 88 ttm->caching_state, &ttm->dma_address[index]);
97 return NULL; 89 return NULL;
98} 90}
@@ -243,33 +235,19 @@ EXPORT_SYMBOL(ttm_tt_set_placement_caching);
243 235
244static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm) 236static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
245{ 237{
246 int i;
247 unsigned count = 0;
248 struct list_head h;
249 struct page *cur_page;
250 struct ttm_backend *be = ttm->be; 238 struct ttm_backend *be = ttm->be;
251 239 unsigned i;
252 INIT_LIST_HEAD(&h);
253 240
254 if (be) 241 if (be)
255 be->func->clear(be); 242 be->func->clear(be);
256 for (i = 0; i < ttm->num_pages; ++i) { 243 for (i = 0; i < ttm->num_pages; ++i) {
257 244 if (ttm->pages[i]) {
258 cur_page = ttm->pages[i];
259 ttm->pages[i] = NULL;
260 if (cur_page) {
261 if (page_count(cur_page) != 1)
262 printk(KERN_ERR TTM_PFX
263 "Erroneous page count. "
264 "Leaking pages.\n");
265 ttm_mem_global_free_page(ttm->glob->mem_glob, 245 ttm_mem_global_free_page(ttm->glob->mem_glob,
266 cur_page); 246 ttm->pages[i]);
267 list_add(&cur_page->lru, &h); 247 ttm_put_pages(&ttm->pages[i], 1, ttm->page_flags,
268 count++; 248 ttm->caching_state, &ttm->dma_address[i]);
269 } 249 }
270 } 250 }
271 ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state,
272 ttm->dma_address);
273 ttm->state = tt_unpopulated; 251 ttm->state = tt_unpopulated;
274} 252}
275 253