diff options
author | Christian König <christian.koenig@amd.com> | 2017-09-20 09:06:12 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2017-10-19 15:27:01 -0400 |
commit | 0284f1ead87463bc17cf5e81a24fc65c052486f3 (patch) | |
tree | aa45c998153bef9ec1f18b5fed7eb843c7e5084c /drivers/gpu/drm/ttm/ttm_page_alloc.c | |
parent | f4c809914a7c3e4a59cf543da6c2a15d0f75ee38 (diff) |
drm/ttm: add transparent huge page support for cached allocations v2
Try to allocate huge pages when it makes sense.
v2: avoid compound pages for now
Signed-off-by: Christian König <christian.koenig@amd.com>
Acked-by: Felix Kuehling <Felix.Kuehling@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_page_alloc.c')
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_page_alloc.c | 50 |
1 files changed, 42 insertions, 8 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 6c852e81660b..1bc6053b4581 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c | |||
@@ -685,12 +685,24 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags, | |||
685 | 685 | ||
686 | if (pool == NULL) { | 686 | if (pool == NULL) { |
687 | /* No pool for this memory type so free the pages */ | 687 | /* No pool for this memory type so free the pages */ |
688 | for (i = 0; i < npages; i++) { | 688 | i = 0; |
689 | if (pages[i]) { | 689 | while (i < npages) { |
690 | if (page_count(pages[i]) != 1) | 690 | unsigned order; |
691 | pr_err("Erroneous page count. Leaking pages.\n"); | 691 | |
692 | __free_page(pages[i]); | 692 | if (!pages[i]) { |
693 | pages[i] = NULL; | 693 | ++i; |
694 | continue; | ||
695 | } | ||
696 | |||
697 | if (page_count(pages[i]) != 1) | ||
698 | pr_err("Erroneous page count. Leaking pages.\n"); | ||
699 | order = compound_order(pages[i]); | ||
700 | __free_pages(pages[i], order); | ||
701 | |||
702 | order = 1 << order; | ||
703 | while (order) { | ||
704 | pages[i++] = NULL; | ||
705 | --order; | ||
694 | } | 706 | } |
695 | } | 707 | } |
696 | return; | 708 | return; |
@@ -740,12 +752,33 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags, | |||
740 | 752 | ||
741 | /* No pool for cached pages */ | 753 | /* No pool for cached pages */ |
742 | if (pool == NULL) { | 754 | if (pool == NULL) { |
755 | unsigned i, j; | ||
756 | |||
743 | if (flags & TTM_PAGE_FLAG_DMA32) | 757 | if (flags & TTM_PAGE_FLAG_DMA32) |
744 | gfp_flags |= GFP_DMA32; | 758 | gfp_flags |= GFP_DMA32; |
745 | else | 759 | else |
746 | gfp_flags |= GFP_HIGHUSER; | 760 | gfp_flags |= GFP_HIGHUSER; |
747 | 761 | ||
748 | for (r = 0; r < npages; ++r) { | 762 | i = 0; |
763 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
764 | while (npages >= HPAGE_PMD_NR) { | ||
765 | gfp_t huge_flags = gfp_flags; | ||
766 | |||
767 | huge_flags |= GFP_TRANSHUGE; | ||
768 | huge_flags &= ~__GFP_MOVABLE; | ||
769 | huge_flags &= ~__GFP_COMP; | ||
770 | p = alloc_pages(huge_flags, HPAGE_PMD_ORDER); | ||
771 | if (!p) | ||
772 | break; | ||
773 | |||
774 | for (j = 0; j < HPAGE_PMD_NR; ++j) | ||
775 | pages[i++] = p++; | ||
776 | |||
777 | npages -= HPAGE_PMD_NR; | ||
778 | } | ||
779 | #endif | ||
780 | |||
781 | while (npages) { | ||
749 | p = alloc_page(gfp_flags); | 782 | p = alloc_page(gfp_flags); |
750 | if (!p) { | 783 | if (!p) { |
751 | 784 | ||
@@ -753,7 +786,8 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags, | |||
753 | return -ENOMEM; | 786 | return -ENOMEM; |
754 | } | 787 | } |
755 | 788 | ||
756 | pages[r] = p; | 789 | pages[i++] = p; |
790 | --npages; | ||
757 | } | 791 | } |
758 | return 0; | 792 | return 0; |
759 | } | 793 | } |