aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ttm/ttm_page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_page_alloc.c')
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c367
1 files changed, 268 insertions, 99 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index e11fd76e06f4..4d688c8d7853 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -95,7 +95,7 @@ struct ttm_pool_opts {
95 unsigned small; 95 unsigned small;
96}; 96};
97 97
98#define NUM_POOLS 4 98#define NUM_POOLS 6
99 99
100/** 100/**
101 * struct ttm_pool_manager - Holds memory pools for fst allocation 101 * struct ttm_pool_manager - Holds memory pools for fst allocation
@@ -122,6 +122,8 @@ struct ttm_pool_manager {
122 struct ttm_page_pool uc_pool; 122 struct ttm_page_pool uc_pool;
123 struct ttm_page_pool wc_pool_dma32; 123 struct ttm_page_pool wc_pool_dma32;
124 struct ttm_page_pool uc_pool_dma32; 124 struct ttm_page_pool uc_pool_dma32;
125 struct ttm_page_pool wc_pool_huge;
126 struct ttm_page_pool uc_pool_huge;
125 } ; 127 } ;
126 }; 128 };
127}; 129};
@@ -256,8 +258,8 @@ static int set_pages_array_uc(struct page **pages, int addrinarray)
256 258
257/** 259/**
258 * Select the right pool or requested caching state and ttm flags. */ 260 * Select the right pool or requested caching state and ttm flags. */
259static struct ttm_page_pool *ttm_get_pool(int flags, 261static struct ttm_page_pool *ttm_get_pool(int flags, bool huge,
260 enum ttm_caching_state cstate) 262 enum ttm_caching_state cstate)
261{ 263{
262 int pool_index; 264 int pool_index;
263 265
@@ -269,9 +271,15 @@ static struct ttm_page_pool *ttm_get_pool(int flags,
269 else 271 else
270 pool_index = 0x1; 272 pool_index = 0x1;
271 273
272 if (flags & TTM_PAGE_FLAG_DMA32) 274 if (flags & TTM_PAGE_FLAG_DMA32) {
275 if (huge)
276 return NULL;
273 pool_index |= 0x2; 277 pool_index |= 0x2;
274 278
279 } else if (huge) {
280 pool_index |= 0x4;
281 }
282
275 return &_manager->pools[pool_index]; 283 return &_manager->pools[pool_index];
276} 284}
277 285
@@ -494,12 +502,14 @@ static void ttm_handle_caching_state_failure(struct list_head *pages,
494 * pages returned in pages array. 502 * pages returned in pages array.
495 */ 503 */
496static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, 504static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
497 int ttm_flags, enum ttm_caching_state cstate, unsigned count) 505 int ttm_flags, enum ttm_caching_state cstate,
506 unsigned count, unsigned order)
498{ 507{
499 struct page **caching_array; 508 struct page **caching_array;
500 struct page *p; 509 struct page *p;
501 int r = 0; 510 int r = 0;
502 unsigned i, cpages; 511 unsigned i, j, cpages;
512 unsigned npages = 1 << order;
503 unsigned max_cpages = min(count, 513 unsigned max_cpages = min(count,
504 (unsigned)(PAGE_SIZE/sizeof(struct page *))); 514 (unsigned)(PAGE_SIZE/sizeof(struct page *)));
505 515
@@ -512,7 +522,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
512 } 522 }
513 523
514 for (i = 0, cpages = 0; i < count; ++i) { 524 for (i = 0, cpages = 0; i < count; ++i) {
515 p = alloc_page(gfp_flags); 525 p = alloc_pages(gfp_flags, order);
516 526
517 if (!p) { 527 if (!p) {
518 pr_err("Unable to get page %u\n", i); 528 pr_err("Unable to get page %u\n", i);
@@ -531,14 +541,18 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
531 goto out; 541 goto out;
532 } 542 }
533 543
544 list_add(&p->lru, pages);
545
534#ifdef CONFIG_HIGHMEM 546#ifdef CONFIG_HIGHMEM
535 /* gfp flags of highmem page should never be dma32 so we 547 /* gfp flags of highmem page should never be dma32 so we
536 * we should be fine in such case 548 * we should be fine in such case
537 */ 549 */
538 if (!PageHighMem(p)) 550 if (PageHighMem(p))
551 continue;
552
539#endif 553#endif
540 { 554 for (j = 0; j < npages; ++j) {
541 caching_array[cpages++] = p; 555 caching_array[cpages++] = p++;
542 if (cpages == max_cpages) { 556 if (cpages == max_cpages) {
543 557
544 r = ttm_set_pages_caching(caching_array, 558 r = ttm_set_pages_caching(caching_array,
@@ -552,8 +566,6 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
552 cpages = 0; 566 cpages = 0;
553 } 567 }
554 } 568 }
555
556 list_add(&p->lru, pages);
557 } 569 }
558 570
559 if (cpages) { 571 if (cpages) {
@@ -573,9 +585,9 @@ out:
573 * Fill the given pool if there aren't enough pages and the requested number of 585 * Fill the given pool if there aren't enough pages and the requested number of
574 * pages is small. 586 * pages is small.
575 */ 587 */
576static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, 588static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, int ttm_flags,
577 int ttm_flags, enum ttm_caching_state cstate, unsigned count, 589 enum ttm_caching_state cstate,
578 unsigned long *irq_flags) 590 unsigned count, unsigned long *irq_flags)
579{ 591{
580 struct page *p; 592 struct page *p;
581 int r; 593 int r;
@@ -605,7 +617,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
605 617
606 INIT_LIST_HEAD(&new_pages); 618 INIT_LIST_HEAD(&new_pages);
607 r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags, 619 r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
608 cstate, alloc_size); 620 cstate, alloc_size, 0);
609 spin_lock_irqsave(&pool->lock, *irq_flags); 621 spin_lock_irqsave(&pool->lock, *irq_flags);
610 622
611 if (!r) { 623 if (!r) {
@@ -627,22 +639,25 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
627} 639}
628 640
629/** 641/**
630 * Cut 'count' number of pages from the pool and put them on the return list. 642 * Allocate pages from the pool and put them on the return list.
631 * 643 *
632 * @return count of pages still required to fulfill the request. 644 * @return zero for success or negative error code.
633 */ 645 */
634static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, 646static int ttm_page_pool_get_pages(struct ttm_page_pool *pool,
635 struct list_head *pages, 647 struct list_head *pages,
636 int ttm_flags, 648 int ttm_flags,
637 enum ttm_caching_state cstate, 649 enum ttm_caching_state cstate,
638 unsigned count) 650 unsigned count, unsigned order)
639{ 651{
640 unsigned long irq_flags; 652 unsigned long irq_flags;
641 struct list_head *p; 653 struct list_head *p;
642 unsigned i; 654 unsigned i;
655 int r = 0;
643 656
644 spin_lock_irqsave(&pool->lock, irq_flags); 657 spin_lock_irqsave(&pool->lock, irq_flags);
645 ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags); 658 if (!order)
659 ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count,
660 &irq_flags);
646 661
647 if (count >= pool->npages) { 662 if (count >= pool->npages) {
648 /* take all pages from the pool */ 663 /* take all pages from the pool */
@@ -672,32 +687,126 @@ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
672 count = 0; 687 count = 0;
673out: 688out:
674 spin_unlock_irqrestore(&pool->lock, irq_flags); 689 spin_unlock_irqrestore(&pool->lock, irq_flags);
675 return count; 690
691 /* clear the pages coming from the pool if requested */
692 if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
693 struct page *page;
694
695 list_for_each_entry(page, pages, lru) {
696 if (PageHighMem(page))
697 clear_highpage(page);
698 else
699 clear_page(page_address(page));
700 }
701 }
702
703 /* If pool didn't have enough pages allocate new one. */
704 if (count) {
705 gfp_t gfp_flags = pool->gfp_flags;
706
707 /* set zero flag for page allocation if required */
708 if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
709 gfp_flags |= __GFP_ZERO;
710
711 /* ttm_alloc_new_pages doesn't reference pool so we can run
712 * multiple requests in parallel.
713 **/
714 r = ttm_alloc_new_pages(pages, gfp_flags, ttm_flags, cstate,
715 count, order);
716 }
717
718 return r;
676} 719}
677 720
678/* Put all pages in pages list to correct pool to wait for reuse */ 721/* Put all pages in pages list to correct pool to wait for reuse */
679static void ttm_put_pages(struct page **pages, unsigned npages, int flags, 722static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
680 enum ttm_caching_state cstate) 723 enum ttm_caching_state cstate)
681{ 724{
725 struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate);
726#ifdef CONFIG_TRANSPARENT_HUGEPAGE
727 struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate);
728#endif
682 unsigned long irq_flags; 729 unsigned long irq_flags;
683 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
684 unsigned i; 730 unsigned i;
685 731
686 if (pool == NULL) { 732 if (pool == NULL) {
687 /* No pool for this memory type so free the pages */ 733 /* No pool for this memory type so free the pages */
688 for (i = 0; i < npages; i++) { 734 i = 0;
689 if (pages[i]) { 735 while (i < npages) {
690 if (page_count(pages[i]) != 1) 736#ifdef CONFIG_TRANSPARENT_HUGEPAGE
691 pr_err("Erroneous page count. Leaking pages.\n"); 737 struct page *p = pages[i];
692 __free_page(pages[i]); 738#endif
693 pages[i] = NULL; 739 unsigned order = 0, j;
740
741 if (!pages[i]) {
742 ++i;
743 continue;
744 }
745
746#ifdef CONFIG_TRANSPARENT_HUGEPAGE
747 for (j = 0; j < HPAGE_PMD_NR; ++j)
748 if (p++ != pages[i + j])
749 break;
750
751 if (j == HPAGE_PMD_NR)
752 order = HPAGE_PMD_ORDER;
753#endif
754
755 if (page_count(pages[i]) != 1)
756 pr_err("Erroneous page count. Leaking pages.\n");
757 __free_pages(pages[i], order);
758
759 j = 1 << order;
760 while (j) {
761 pages[i++] = NULL;
762 --j;
694 } 763 }
695 } 764 }
696 return; 765 return;
697 } 766 }
698 767
768 i = 0;
769#ifdef CONFIG_TRANSPARENT_HUGEPAGE
770 if (huge) {
771 unsigned max_size, n2free;
772
773 spin_lock_irqsave(&huge->lock, irq_flags);
774 while (i < npages) {
775 struct page *p = pages[i];
776 unsigned j;
777
778 if (!p)
779 break;
780
781 for (j = 0; j < HPAGE_PMD_NR; ++j)
782 if (p++ != pages[i + j])
783 break;
784
785 if (j != HPAGE_PMD_NR)
786 break;
787
788 list_add_tail(&pages[i]->lru, &huge->list);
789
790 for (j = 0; j < HPAGE_PMD_NR; ++j)
791 pages[i++] = NULL;
792 huge->npages++;
793 }
794
795 /* Check that we don't go over the pool limit */
796 max_size = _manager->options.max_size;
797 max_size /= HPAGE_PMD_NR;
798 if (huge->npages > max_size)
799 n2free = huge->npages - max_size;
800 else
801 n2free = 0;
802 spin_unlock_irqrestore(&huge->lock, irq_flags);
803 if (n2free)
804 ttm_page_pool_free(huge, n2free, false);
805 }
806#endif
807
699 spin_lock_irqsave(&pool->lock, irq_flags); 808 spin_lock_irqsave(&pool->lock, irq_flags);
700 for (i = 0; i < npages; i++) { 809 while (i < npages) {
701 if (pages[i]) { 810 if (pages[i]) {
702 if (page_count(pages[i]) != 1) 811 if (page_count(pages[i]) != 1)
703 pr_err("Erroneous page count. Leaking pages.\n"); 812 pr_err("Erroneous page count. Leaking pages.\n");
@@ -705,6 +814,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
705 pages[i] = NULL; 814 pages[i] = NULL;
706 pool->npages++; 815 pool->npages++;
707 } 816 }
817 ++i;
708 } 818 }
709 /* Check that we don't go over the pool limit */ 819 /* Check that we don't go over the pool limit */
710 npages = 0; 820 npages = 0;
@@ -727,25 +837,52 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
727static int ttm_get_pages(struct page **pages, unsigned npages, int flags, 837static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
728 enum ttm_caching_state cstate) 838 enum ttm_caching_state cstate)
729{ 839{
730 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); 840 struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate);
841#ifdef CONFIG_TRANSPARENT_HUGEPAGE
842 struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate);
843#endif
731 struct list_head plist; 844 struct list_head plist;
732 struct page *p = NULL; 845 struct page *p = NULL;
733 gfp_t gfp_flags = GFP_USER;
734 unsigned count; 846 unsigned count;
735 int r; 847 int r;
736 848
737 /* set zero flag for page allocation if required */
738 if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
739 gfp_flags |= __GFP_ZERO;
740
741 /* No pool for cached pages */ 849 /* No pool for cached pages */
742 if (pool == NULL) { 850 if (pool == NULL) {
851 gfp_t gfp_flags = GFP_USER;
852 unsigned i;
853#ifdef CONFIG_TRANSPARENT_HUGEPAGE
854 unsigned j;
855#endif
856
857 /* set zero flag for page allocation if required */
858 if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
859 gfp_flags |= __GFP_ZERO;
860
743 if (flags & TTM_PAGE_FLAG_DMA32) 861 if (flags & TTM_PAGE_FLAG_DMA32)
744 gfp_flags |= GFP_DMA32; 862 gfp_flags |= GFP_DMA32;
745 else 863 else
746 gfp_flags |= GFP_HIGHUSER; 864 gfp_flags |= GFP_HIGHUSER;
747 865
748 for (r = 0; r < npages; ++r) { 866 i = 0;
867#ifdef CONFIG_TRANSPARENT_HUGEPAGE
868 while (npages >= HPAGE_PMD_NR) {
869 gfp_t huge_flags = gfp_flags;
870
871 huge_flags |= GFP_TRANSHUGE;
872 huge_flags &= ~__GFP_MOVABLE;
873 huge_flags &= ~__GFP_COMP;
874 p = alloc_pages(huge_flags, HPAGE_PMD_ORDER);
875 if (!p)
876 break;
877
878 for (j = 0; j < HPAGE_PMD_NR; ++j)
879 pages[i++] = p++;
880
881 npages -= HPAGE_PMD_NR;
882 }
883#endif
884
885 while (npages) {
749 p = alloc_page(gfp_flags); 886 p = alloc_page(gfp_flags);
750 if (!p) { 887 if (!p) {
751 888
@@ -753,49 +890,44 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
753 return -ENOMEM; 890 return -ENOMEM;
754 } 891 }
755 892
756 pages[r] = p; 893 pages[i++] = p;
894 --npages;
757 } 895 }
758 return 0; 896 return 0;
759 } 897 }
760 898
761 /* combine zero flag to pool flags */
762 gfp_flags |= pool->gfp_flags;
763
764 /* First we take pages from the pool */
765 INIT_LIST_HEAD(&plist);
766 npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
767 count = 0; 899 count = 0;
768 list_for_each_entry(p, &plist, lru) {
769 pages[count++] = p;
770 }
771 900
772 /* clear the pages coming from the pool if requested */ 901#ifdef CONFIG_TRANSPARENT_HUGEPAGE
773 if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) { 902 if (huge && npages >= HPAGE_PMD_NR) {
903 INIT_LIST_HEAD(&plist);
904 ttm_page_pool_get_pages(huge, &plist, flags, cstate,
905 npages / HPAGE_PMD_NR,
906 HPAGE_PMD_ORDER);
907
774 list_for_each_entry(p, &plist, lru) { 908 list_for_each_entry(p, &plist, lru) {
775 if (PageHighMem(p)) 909 unsigned j;
776 clear_highpage(p); 910
777 else 911 for (j = 0; j < HPAGE_PMD_NR; ++j)
778 clear_page(page_address(p)); 912 pages[count++] = &p[j];
779 } 913 }
780 } 914 }
915#endif
781 916
782 /* If pool didn't have enough pages allocate new one. */ 917 INIT_LIST_HEAD(&plist);
783 if (npages > 0) { 918 r = ttm_page_pool_get_pages(pool, &plist, flags, cstate,
784 /* ttm_alloc_new_pages doesn't reference pool so we can run 919 npages - count, 0);
785 * multiple requests in parallel. 920
786 **/ 921 list_for_each_entry(p, &plist, lru)
787 INIT_LIST_HEAD(&plist); 922 pages[count++] = p;
788 r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages); 923
789 list_for_each_entry(p, &plist, lru) { 924 if (r) {
790 pages[count++] = p; 925 /* If there is any pages in the list put them back to
791 } 926 * the pool.
792 if (r) { 927 */
793 /* If there is any pages in the list put them back to 928 pr_err("Failed to allocate extra pages for large request\n");
794 * the pool. */ 929 ttm_put_pages(pages, count, flags, cstate);
795 pr_err("Failed to allocate extra pages for large request\n"); 930 return r;
796 ttm_put_pages(pages, count, flags, cstate);
797 return r;
798 }
799 } 931 }
800 932
801 return 0; 933 return 0;
@@ -832,6 +964,14 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
832 ttm_page_pool_init_locked(&_manager->uc_pool_dma32, 964 ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
833 GFP_USER | GFP_DMA32, "uc dma"); 965 GFP_USER | GFP_DMA32, "uc dma");
834 966
967 ttm_page_pool_init_locked(&_manager->wc_pool_huge,
968 GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP),
969 "wc huge");
970
971 ttm_page_pool_init_locked(&_manager->uc_pool_huge,
972 GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP)
973 , "uc huge");
974
835 _manager->options.max_size = max_pages; 975 _manager->options.max_size = max_pages;
836 _manager->options.small = SMALL_ALLOCATION; 976 _manager->options.small = SMALL_ALLOCATION;
837 _manager->options.alloc_size = NUM_PAGES_TO_ALLOC; 977 _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
@@ -873,15 +1013,14 @@ int ttm_pool_populate(struct ttm_tt *ttm)
873 if (ttm->state != tt_unpopulated) 1013 if (ttm->state != tt_unpopulated)
874 return 0; 1014 return 0;
875 1015
876 for (i = 0; i < ttm->num_pages; ++i) { 1016 ret = ttm_get_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
877 ret = ttm_get_pages(&ttm->pages[i], 1, 1017 ttm->caching_state);
878 ttm->page_flags, 1018 if (unlikely(ret != 0)) {
879 ttm->caching_state); 1019 ttm_pool_unpopulate(ttm);
880 if (ret != 0) { 1020 return ret;
881 ttm_pool_unpopulate(ttm); 1021 }
882 return -ENOMEM;
883 }
884 1022
1023 for (i = 0; i < ttm->num_pages; ++i) {
885 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], 1024 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
886 PAGE_SIZE); 1025 PAGE_SIZE);
887 if (unlikely(ret != 0)) { 1026 if (unlikely(ret != 0)) {
@@ -908,14 +1047,14 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm)
908 unsigned i; 1047 unsigned i;
909 1048
910 for (i = 0; i < ttm->num_pages; ++i) { 1049 for (i = 0; i < ttm->num_pages; ++i) {
911 if (ttm->pages[i]) { 1050 if (!ttm->pages[i])
912 ttm_mem_global_free_page(ttm->glob->mem_glob, 1051 continue;
913 ttm->pages[i], PAGE_SIZE); 1052
914 ttm_put_pages(&ttm->pages[i], 1, 1053 ttm_mem_global_free_page(ttm->glob->mem_glob, ttm->pages[i],
915 ttm->page_flags, 1054 PAGE_SIZE);
916 ttm->caching_state);
917 }
918 } 1055 }
1056 ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
1057 ttm->caching_state);
919 ttm->state = tt_unpopulated; 1058 ttm->state = tt_unpopulated;
920} 1059}
921EXPORT_SYMBOL(ttm_pool_unpopulate); 1060EXPORT_SYMBOL(ttm_pool_unpopulate);
@@ -923,16 +1062,26 @@ EXPORT_SYMBOL(ttm_pool_unpopulate);
923#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU) 1062#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
924int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt) 1063int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt)
925{ 1064{
926 unsigned i; 1065 unsigned i, j;
927 int r; 1066 int r;
928 1067
929 r = ttm_pool_populate(&tt->ttm); 1068 r = ttm_pool_populate(&tt->ttm);
930 if (r) 1069 if (r)
931 return r; 1070 return r;
932 1071
933 for (i = 0; i < tt->ttm.num_pages; i++) { 1072 for (i = 0; i < tt->ttm.num_pages; ++i) {
1073 struct page *p = tt->ttm.pages[i];
1074 size_t num_pages = 1;
1075
1076 for (j = i + 1; j < tt->ttm.num_pages; ++j) {
1077 if (++p != tt->ttm.pages[j])
1078 break;
1079
1080 ++num_pages;
1081 }
1082
934 tt->dma_address[i] = dma_map_page(dev, tt->ttm.pages[i], 1083 tt->dma_address[i] = dma_map_page(dev, tt->ttm.pages[i],
935 0, PAGE_SIZE, 1084 0, num_pages * PAGE_SIZE,
936 DMA_BIDIRECTIONAL); 1085 DMA_BIDIRECTIONAL);
937 if (dma_mapping_error(dev, tt->dma_address[i])) { 1086 if (dma_mapping_error(dev, tt->dma_address[i])) {
938 while (i--) { 1087 while (i--) {
@@ -943,6 +1092,11 @@ int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt)
943 ttm_pool_unpopulate(&tt->ttm); 1092 ttm_pool_unpopulate(&tt->ttm);
944 return -EFAULT; 1093 return -EFAULT;
945 } 1094 }
1095
1096 for (j = 1; j < num_pages; ++j) {
1097 tt->dma_address[i + 1] = tt->dma_address[i] + PAGE_SIZE;
1098 ++i;
1099 }
946 } 1100 }
947 return 0; 1101 return 0;
948} 1102}
@@ -950,13 +1104,28 @@ EXPORT_SYMBOL(ttm_populate_and_map_pages);
950 1104
951void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt) 1105void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt)
952{ 1106{
953 unsigned i; 1107 unsigned i, j;
954 1108
955 for (i = 0; i < tt->ttm.num_pages; i++) { 1109 for (i = 0; i < tt->ttm.num_pages;) {
956 if (tt->dma_address[i]) { 1110 struct page *p = tt->ttm.pages[i];
957 dma_unmap_page(dev, tt->dma_address[i], 1111 size_t num_pages = 1;
958 PAGE_SIZE, DMA_BIDIRECTIONAL); 1112
1113 if (!tt->dma_address[i] || !tt->ttm.pages[i]) {
1114 ++i;
1115 continue;
959 } 1116 }
1117
1118 for (j = i + 1; j < tt->ttm.num_pages; ++j) {
1119 if (++p != tt->ttm.pages[j])
1120 break;
1121
1122 ++num_pages;
1123 }
1124
1125 dma_unmap_page(dev, tt->dma_address[i], num_pages * PAGE_SIZE,
1126 DMA_BIDIRECTIONAL);
1127
1128 i += num_pages;
960 } 1129 }
961 ttm_pool_unpopulate(&tt->ttm); 1130 ttm_pool_unpopulate(&tt->ttm);
962} 1131}
@@ -972,12 +1141,12 @@ int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
972 seq_printf(m, "No pool allocator running.\n"); 1141 seq_printf(m, "No pool allocator running.\n");
973 return 0; 1142 return 0;
974 } 1143 }
975 seq_printf(m, "%6s %12s %13s %8s\n", 1144 seq_printf(m, "%7s %12s %13s %8s\n",
976 h[0], h[1], h[2], h[3]); 1145 h[0], h[1], h[2], h[3]);
977 for (i = 0; i < NUM_POOLS; ++i) { 1146 for (i = 0; i < NUM_POOLS; ++i) {
978 p = &_manager->pools[i]; 1147 p = &_manager->pools[i];
979 1148
980 seq_printf(m, "%6s %12ld %13ld %8d\n", 1149 seq_printf(m, "%7s %12ld %13ld %8d\n",
981 p->name, p->nrefills, 1150 p->name, p->nrefills,
982 p->nfrees, p->npages); 1151 p->nfrees, p->npages);
983 } 1152 }