diff options
author | Christian König <christian.koenig@amd.com> | 2017-09-21 05:28:25 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2017-10-19 15:27:02 -0400 |
commit | 8593e9b85e0aa67db62ec395774021a139efc2cd (patch) | |
tree | 45880c5e3d751df3fdd35f5a2d5b66d38774fcb6 /drivers/gpu | |
parent | 0284f1ead87463bc17cf5e81a24fc65c052486f3 (diff) |
drm/ttm: move more logic into ttm_page_pool_get_pages
Make it easier to add huge page pool.
Signed-off-by: Christian König <christian.koenig@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_page_alloc.c | 98 |
1 files changed, 52 insertions, 46 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 1bc6053b4581..39747326bf3e 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c | |||
@@ -627,19 +627,20 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, | |||
627 | } | 627 | } |
628 | 628 | ||
629 | /** | 629 | /** |
630 | * Cut 'count' number of pages from the pool and put them on the return list. | 630 | * Allocate pages from the pool and put them on the return list. |
631 | * | 631 | * |
632 | * @return count of pages still required to fulfill the request. | 632 | * @return zero for success or negative error code. |
633 | */ | 633 | */ |
634 | static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, | 634 | static int ttm_page_pool_get_pages(struct ttm_page_pool *pool, |
635 | struct list_head *pages, | 635 | struct list_head *pages, |
636 | int ttm_flags, | 636 | int ttm_flags, |
637 | enum ttm_caching_state cstate, | 637 | enum ttm_caching_state cstate, |
638 | unsigned count) | 638 | unsigned count) |
639 | { | 639 | { |
640 | unsigned long irq_flags; | 640 | unsigned long irq_flags; |
641 | struct list_head *p; | 641 | struct list_head *p; |
642 | unsigned i; | 642 | unsigned i; |
643 | int r = 0; | ||
643 | 644 | ||
644 | spin_lock_irqsave(&pool->lock, irq_flags); | 645 | spin_lock_irqsave(&pool->lock, irq_flags); |
645 | ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags); | 646 | ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags); |
@@ -672,7 +673,35 @@ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, | |||
672 | count = 0; | 673 | count = 0; |
673 | out: | 674 | out: |
674 | spin_unlock_irqrestore(&pool->lock, irq_flags); | 675 | spin_unlock_irqrestore(&pool->lock, irq_flags); |
675 | return count; | 676 | |
677 | /* clear the pages coming from the pool if requested */ | ||
678 | if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC) { | ||
679 | struct page *page; | ||
680 | |||
681 | list_for_each_entry(page, pages, lru) { | ||
682 | if (PageHighMem(page)) | ||
683 | clear_highpage(page); | ||
684 | else | ||
685 | clear_page(page_address(page)); | ||
686 | } | ||
687 | } | ||
688 | |||
689 | /* If pool didn't have enough pages allocate new one. */ | ||
690 | if (count) { | ||
691 | gfp_t gfp_flags = pool->gfp_flags; | ||
692 | |||
693 | /* set zero flag for page allocation if required */ | ||
694 | if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC) | ||
695 | gfp_flags |= __GFP_ZERO; | ||
696 | |||
697 | /* ttm_alloc_new_pages doesn't reference pool so we can run | ||
698 | * multiple requests in parallel. | ||
699 | **/ | ||
700 | r = ttm_alloc_new_pages(pages, gfp_flags, ttm_flags, cstate, | ||
701 | count); | ||
702 | } | ||
703 | |||
704 | return r; | ||
676 | } | 705 | } |
677 | 706 | ||
678 | /* Put all pages in pages list to correct pool to wait for reuse */ | 707 | /* Put all pages in pages list to correct pool to wait for reuse */ |
@@ -742,18 +771,18 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags, | |||
742 | struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); | 771 | struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); |
743 | struct list_head plist; | 772 | struct list_head plist; |
744 | struct page *p = NULL; | 773 | struct page *p = NULL; |
745 | gfp_t gfp_flags = GFP_USER; | ||
746 | unsigned count; | 774 | unsigned count; |
747 | int r; | 775 | int r; |
748 | 776 | ||
749 | /* set zero flag for page allocation if required */ | ||
750 | if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) | ||
751 | gfp_flags |= __GFP_ZERO; | ||
752 | |||
753 | /* No pool for cached pages */ | 777 | /* No pool for cached pages */ |
754 | if (pool == NULL) { | 778 | if (pool == NULL) { |
779 | gfp_t gfp_flags = GFP_USER; | ||
755 | unsigned i, j; | 780 | unsigned i, j; |
756 | 781 | ||
782 | /* set zero flag for page allocation if required */ | ||
783 | if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) | ||
784 | gfp_flags |= __GFP_ZERO; | ||
785 | |||
757 | if (flags & TTM_PAGE_FLAG_DMA32) | 786 | if (flags & TTM_PAGE_FLAG_DMA32) |
758 | gfp_flags |= GFP_DMA32; | 787 | gfp_flags |= GFP_DMA32; |
759 | else | 788 | else |
@@ -792,44 +821,21 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags, | |||
792 | return 0; | 821 | return 0; |
793 | } | 822 | } |
794 | 823 | ||
795 | /* combine zero flag to pool flags */ | ||
796 | gfp_flags |= pool->gfp_flags; | ||
797 | |||
798 | /* First we take pages from the pool */ | 824 | /* First we take pages from the pool */ |
799 | INIT_LIST_HEAD(&plist); | 825 | INIT_LIST_HEAD(&plist); |
800 | npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages); | 826 | r = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages); |
827 | |||
801 | count = 0; | 828 | count = 0; |
802 | list_for_each_entry(p, &plist, lru) { | 829 | list_for_each_entry(p, &plist, lru) |
803 | pages[count++] = p; | 830 | pages[count++] = p; |
804 | } | ||
805 | |||
806 | /* clear the pages coming from the pool if requested */ | ||
807 | if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) { | ||
808 | list_for_each_entry(p, &plist, lru) { | ||
809 | if (PageHighMem(p)) | ||
810 | clear_highpage(p); | ||
811 | else | ||
812 | clear_page(page_address(p)); | ||
813 | } | ||
814 | } | ||
815 | 831 | ||
816 | /* If pool didn't have enough pages allocate new one. */ | 832 | if (r) { |
817 | if (npages > 0) { | 833 | /* If there is any pages in the list put them back to |
818 | /* ttm_alloc_new_pages doesn't reference pool so we can run | 834 | * the pool. |
819 | * multiple requests in parallel. | 835 | */ |
820 | **/ | 836 | pr_err("Failed to allocate extra pages for large request\n"); |
821 | INIT_LIST_HEAD(&plist); | 837 | ttm_put_pages(pages, count, flags, cstate); |
822 | r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages); | 838 | return r; |
823 | list_for_each_entry(p, &plist, lru) { | ||
824 | pages[count++] = p; | ||
825 | } | ||
826 | if (r) { | ||
827 | /* If there is any pages in the list put them back to | ||
828 | * the pool. */ | ||
829 | pr_err("Failed to allocate extra pages for large request\n"); | ||
830 | ttm_put_pages(pages, count, flags, cstate); | ||
831 | return r; | ||
832 | } | ||
833 | } | 839 | } |
834 | 840 | ||
835 | return 0; | 841 | return 0; |