aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ttm/ttm_page_alloc.c
diff options
context:
space:
mode:
authorJerome Glisse <jglisse@redhat.com>2011-11-09 17:15:26 -0500
committerDave Airlie <airlied@redhat.com>2011-12-06 05:40:02 -0500
commit8e7e70522d760c4ccd4cd370ebfa0ba69e006c6e (patch)
treea2b0f931e513f3aeba174b974bd5e869685fe288 /drivers/gpu/drm/ttm/ttm_page_alloc.c
parent3230cfc34fca9d17c1628cf0e4ac25199592a69a (diff)
drm/ttm: isolate dma data from ttm_tt V4
Move dma data to a superset ttm_dma_tt structure which herit from ttm_tt. This allow driver that don't use dma functionalities to not have to waste memory for it. V2 Rebase on top of no memory account changes (where/when is my delorean when i need it ?) V3 Make sure page list is initialized empty V4 typo/syntax fixes Signed-off-by: Jerome Glisse <jglisse@redhat.com> Reviewed-by: Thomas Hellstrom <thellstrom@vmware.com>
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_page_alloc.c')
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c114
1 files changed, 56 insertions, 58 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 8d6267e434ab..499debda791e 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -662,13 +662,61 @@ out:
662 return count; 662 return count;
663} 663}
664 664
665/* Put all pages in pages list to correct pool to wait for reuse */
666static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
667 enum ttm_caching_state cstate)
668{
669 unsigned long irq_flags;
670 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
671 unsigned i;
672
673 if (pool == NULL) {
674 /* No pool for this memory type so free the pages */
675 for (i = 0; i < npages; i++) {
676 if (pages[i]) {
677 if (page_count(pages[i]) != 1)
678 printk(KERN_ERR TTM_PFX
679 "Erroneous page count. "
680 "Leaking pages.\n");
681 __free_page(pages[i]);
682 pages[i] = NULL;
683 }
684 }
685 return;
686 }
687
688 spin_lock_irqsave(&pool->lock, irq_flags);
689 for (i = 0; i < npages; i++) {
690 if (pages[i]) {
691 if (page_count(pages[i]) != 1)
692 printk(KERN_ERR TTM_PFX
693 "Erroneous page count. "
694 "Leaking pages.\n");
695 list_add_tail(&pages[i]->lru, &pool->list);
696 pages[i] = NULL;
697 pool->npages++;
698 }
699 }
700 /* Check that we don't go over the pool limit */
701 npages = 0;
702 if (pool->npages > _manager->options.max_size) {
703 npages = pool->npages - _manager->options.max_size;
704 /* free at least NUM_PAGES_TO_ALLOC number of pages
705 * to reduce calls to set_memory_wb */
706 if (npages < NUM_PAGES_TO_ALLOC)
707 npages = NUM_PAGES_TO_ALLOC;
708 }
709 spin_unlock_irqrestore(&pool->lock, irq_flags);
710 if (npages)
711 ttm_page_pool_free(pool, npages);
712}
713
665/* 714/*
666 * On success pages list will hold count number of correctly 715 * On success pages list will hold count number of correctly
667 * cached pages. 716 * cached pages.
668 */ 717 */
669int ttm_get_pages(struct page **pages, int flags, 718static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
670 enum ttm_caching_state cstate, unsigned npages, 719 enum ttm_caching_state cstate)
671 dma_addr_t *dma_address)
672{ 720{
673 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); 721 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
674 struct list_head plist; 722 struct list_head plist;
@@ -736,7 +784,7 @@ int ttm_get_pages(struct page **pages, int flags,
736 printk(KERN_ERR TTM_PFX 784 printk(KERN_ERR TTM_PFX
737 "Failed to allocate extra pages " 785 "Failed to allocate extra pages "
738 "for large request."); 786 "for large request.");
739 ttm_put_pages(pages, count, flags, cstate, NULL); 787 ttm_put_pages(pages, count, flags, cstate);
740 return r; 788 return r;
741 } 789 }
742 } 790 }
@@ -744,55 +792,6 @@ int ttm_get_pages(struct page **pages, int flags,
744 return 0; 792 return 0;
745} 793}
746 794
747/* Put all pages in pages list to correct pool to wait for reuse */
748void ttm_put_pages(struct page **pages, unsigned npages, int flags,
749 enum ttm_caching_state cstate, dma_addr_t *dma_address)
750{
751 unsigned long irq_flags;
752 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
753 unsigned i;
754
755 if (pool == NULL) {
756 /* No pool for this memory type so free the pages */
757 for (i = 0; i < npages; i++) {
758 if (pages[i]) {
759 if (page_count(pages[i]) != 1)
760 printk(KERN_ERR TTM_PFX
761 "Erroneous page count. "
762 "Leaking pages.\n");
763 __free_page(pages[i]);
764 pages[i] = NULL;
765 }
766 }
767 return;
768 }
769
770 spin_lock_irqsave(&pool->lock, irq_flags);
771 for (i = 0; i < npages; i++) {
772 if (pages[i]) {
773 if (page_count(pages[i]) != 1)
774 printk(KERN_ERR TTM_PFX
775 "Erroneous page count. "
776 "Leaking pages.\n");
777 list_add_tail(&pages[i]->lru, &pool->list);
778 pages[i] = NULL;
779 pool->npages++;
780 }
781 }
782 /* Check that we don't go over the pool limit */
783 npages = 0;
784 if (pool->npages > _manager->options.max_size) {
785 npages = pool->npages - _manager->options.max_size;
786 /* free at least NUM_PAGES_TO_ALLOC number of pages
787 * to reduce calls to set_memory_wb */
788 if (npages < NUM_PAGES_TO_ALLOC)
789 npages = NUM_PAGES_TO_ALLOC;
790 }
791 spin_unlock_irqrestore(&pool->lock, irq_flags);
792 if (npages)
793 ttm_page_pool_free(pool, npages);
794}
795
796static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags, 795static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
797 char *name) 796 char *name)
798{ 797{
@@ -865,9 +864,9 @@ int ttm_pool_populate(struct ttm_tt *ttm)
865 return 0; 864 return 0;
866 865
867 for (i = 0; i < ttm->num_pages; ++i) { 866 for (i = 0; i < ttm->num_pages; ++i) {
868 ret = ttm_get_pages(&ttm->pages[i], ttm->page_flags, 867 ret = ttm_get_pages(&ttm->pages[i], 1,
869 ttm->caching_state, 1, 868 ttm->page_flags,
870 &ttm->dma_address[i]); 869 ttm->caching_state);
871 if (ret != 0) { 870 if (ret != 0) {
872 ttm_pool_unpopulate(ttm); 871 ttm_pool_unpopulate(ttm);
873 return -ENOMEM; 872 return -ENOMEM;
@@ -904,8 +903,7 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm)
904 ttm->pages[i]); 903 ttm->pages[i]);
905 ttm_put_pages(&ttm->pages[i], 1, 904 ttm_put_pages(&ttm->pages[i], 1,
906 ttm->page_flags, 905 ttm->page_flags,
907 ttm->caching_state, 906 ttm->caching_state);
908 ttm->dma_address);
909 } 907 }
910 } 908 }
911 ttm->state = tt_unpopulated; 909 ttm->state = tt_unpopulated;