aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorJerome Glisse <jglisse@redhat.com>2011-11-09 17:15:26 -0500
committerDave Airlie <airlied@redhat.com>2011-12-06 05:40:02 -0500
commit8e7e70522d760c4ccd4cd370ebfa0ba69e006c6e (patch)
treea2b0f931e513f3aeba174b974bd5e869685fe288 /drivers/gpu
parent3230cfc34fca9d17c1628cf0e4ac25199592a69a (diff)
drm/ttm: isolate dma data from ttm_tt V4
Move dma data to a superset ttm_dma_tt structure which herit from ttm_tt. This allow driver that don't use dma functionalities to not have to waste memory for it. V2 Rebase on top of no memory account changes (where/when is my delorean when i need it ?) V3 Make sure page list is initialized empty V4 typo/syntax fixes Signed-off-by: Jerome Glisse <jglisse@redhat.com> Reviewed-by: Thomas Hellstrom <thellstrom@vmware.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c43
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c114
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c35
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c60
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c2
7 files changed, 172 insertions, 122 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 2dc0d8303cb7..d6326af9fcc0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1052,6 +1052,7 @@ nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
1052static int 1052static int
1053nouveau_ttm_tt_populate(struct ttm_tt *ttm) 1053nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1054{ 1054{
1055 struct ttm_dma_tt *ttm_dma = (void *)ttm;
1055 struct drm_nouveau_private *dev_priv; 1056 struct drm_nouveau_private *dev_priv;
1056 struct drm_device *dev; 1057 struct drm_device *dev;
1057 unsigned i; 1058 unsigned i;
@@ -1065,7 +1066,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1065 1066
1066#ifdef CONFIG_SWIOTLB 1067#ifdef CONFIG_SWIOTLB
1067 if (swiotlb_nr_tbl()) { 1068 if (swiotlb_nr_tbl()) {
1068 return ttm_dma_populate(ttm, dev->dev); 1069 return ttm_dma_populate((void *)ttm, dev->dev);
1069 } 1070 }
1070#endif 1071#endif
1071 1072
@@ -1075,14 +1076,14 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1075 } 1076 }
1076 1077
1077 for (i = 0; i < ttm->num_pages; i++) { 1078 for (i = 0; i < ttm->num_pages; i++) {
1078 ttm->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i], 1079 ttm_dma->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i],
1079 0, PAGE_SIZE, 1080 0, PAGE_SIZE,
1080 PCI_DMA_BIDIRECTIONAL); 1081 PCI_DMA_BIDIRECTIONAL);
1081 if (pci_dma_mapping_error(dev->pdev, ttm->dma_address[i])) { 1082 if (pci_dma_mapping_error(dev->pdev, ttm_dma->dma_address[i])) {
1082 while (--i) { 1083 while (--i) {
1083 pci_unmap_page(dev->pdev, ttm->dma_address[i], 1084 pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
1084 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 1085 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
1085 ttm->dma_address[i] = 0; 1086 ttm_dma->dma_address[i] = 0;
1086 } 1087 }
1087 ttm_pool_unpopulate(ttm); 1088 ttm_pool_unpopulate(ttm);
1088 return -EFAULT; 1089 return -EFAULT;
@@ -1094,6 +1095,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1094static void 1095static void
1095nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) 1096nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1096{ 1097{
1098 struct ttm_dma_tt *ttm_dma = (void *)ttm;
1097 struct drm_nouveau_private *dev_priv; 1099 struct drm_nouveau_private *dev_priv;
1098 struct drm_device *dev; 1100 struct drm_device *dev;
1099 unsigned i; 1101 unsigned i;
@@ -1103,14 +1105,14 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1103 1105
1104#ifdef CONFIG_SWIOTLB 1106#ifdef CONFIG_SWIOTLB
1105 if (swiotlb_nr_tbl()) { 1107 if (swiotlb_nr_tbl()) {
1106 ttm_dma_unpopulate(ttm, dev->dev); 1108 ttm_dma_unpopulate((void *)ttm, dev->dev);
1107 return; 1109 return;
1108 } 1110 }
1109#endif 1111#endif
1110 1112
1111 for (i = 0; i < ttm->num_pages; i++) { 1113 for (i = 0; i < ttm->num_pages; i++) {
1112 if (ttm->dma_address[i]) { 1114 if (ttm_dma->dma_address[i]) {
1113 pci_unmap_page(dev->pdev, ttm->dma_address[i], 1115 pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
1114 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 1116 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
1115 } 1117 }
1116 } 1118 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index ee1eb7cba798..47f245edf538 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -8,7 +8,10 @@
8#define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1) 8#define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1)
9 9
10struct nouveau_sgdma_be { 10struct nouveau_sgdma_be {
11 struct ttm_tt ttm; 11 /* this has to be the first field so populate/unpopulated in
12 * nouve_bo.c works properly, otherwise have to move them here
13 */
14 struct ttm_dma_tt ttm;
12 struct drm_device *dev; 15 struct drm_device *dev;
13 u64 offset; 16 u64 offset;
14}; 17};
@@ -20,6 +23,7 @@ nouveau_sgdma_destroy(struct ttm_tt *ttm)
20 23
21 if (ttm) { 24 if (ttm) {
22 NV_DEBUG(nvbe->dev, "\n"); 25 NV_DEBUG(nvbe->dev, "\n");
26 ttm_dma_tt_fini(&nvbe->ttm);
23 kfree(nvbe); 27 kfree(nvbe);
24 } 28 }
25} 29}
@@ -38,7 +42,7 @@ nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
38 nvbe->offset = mem->start << PAGE_SHIFT; 42 nvbe->offset = mem->start << PAGE_SHIFT;
39 pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2; 43 pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
40 for (i = 0; i < ttm->num_pages; i++) { 44 for (i = 0; i < ttm->num_pages; i++) {
41 dma_addr_t dma_offset = ttm->dma_address[i]; 45 dma_addr_t dma_offset = nvbe->ttm.dma_address[i];
42 uint32_t offset_l = lower_32_bits(dma_offset); 46 uint32_t offset_l = lower_32_bits(dma_offset);
43 47
44 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) { 48 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
@@ -97,7 +101,7 @@ nv41_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
97 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; 101 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
98 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; 102 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
99 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; 103 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
100 dma_addr_t *list = ttm->dma_address; 104 dma_addr_t *list = nvbe->ttm.dma_address;
101 u32 pte = mem->start << 2; 105 u32 pte = mem->start << 2;
102 u32 cnt = ttm->num_pages; 106 u32 cnt = ttm->num_pages;
103 107
@@ -206,7 +210,7 @@ nv44_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
206 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; 210 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
207 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; 211 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
208 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; 212 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
209 dma_addr_t *list = ttm->dma_address; 213 dma_addr_t *list = nvbe->ttm.dma_address;
210 u32 pte = mem->start << 2, tmp[4]; 214 u32 pte = mem->start << 2, tmp[4];
211 u32 cnt = ttm->num_pages; 215 u32 cnt = ttm->num_pages;
212 int i; 216 int i;
@@ -282,10 +286,11 @@ static struct ttm_backend_func nv44_sgdma_backend = {
282static int 286static int
283nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) 287nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
284{ 288{
289 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
285 struct nouveau_mem *node = mem->mm_node; 290 struct nouveau_mem *node = mem->mm_node;
286 291
287 /* noop: bound in move_notify() */ 292 /* noop: bound in move_notify() */
288 node->pages = ttm->dma_address; 293 node->pages = nvbe->ttm.dma_address;
289 return 0; 294 return 0;
290} 295}
291 296
@@ -316,12 +321,13 @@ nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
316 return NULL; 321 return NULL;
317 322
318 nvbe->dev = dev; 323 nvbe->dev = dev;
319 nvbe->ttm.func = dev_priv->gart_info.func; 324 nvbe->ttm.ttm.func = dev_priv->gart_info.func;
320 325
321 if (ttm_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) { 326 if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) {
327 kfree(nvbe);
322 return NULL; 328 return NULL;
323 } 329 }
324 return &nvbe->ttm; 330 return &nvbe->ttm.ttm;
325} 331}
326 332
327int 333int
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index f499b2c69de5..e111a3812434 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -501,7 +501,7 @@ static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg)
501 * TTM backend functions. 501 * TTM backend functions.
502 */ 502 */
503struct radeon_ttm_tt { 503struct radeon_ttm_tt {
504 struct ttm_tt ttm; 504 struct ttm_dma_tt ttm;
505 struct radeon_device *rdev; 505 struct radeon_device *rdev;
506 u64 offset; 506 u64 offset;
507}; 507};
@@ -509,17 +509,16 @@ struct radeon_ttm_tt {
509static int radeon_ttm_backend_bind(struct ttm_tt *ttm, 509static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
510 struct ttm_mem_reg *bo_mem) 510 struct ttm_mem_reg *bo_mem)
511{ 511{
512 struct radeon_ttm_tt *gtt; 512 struct radeon_ttm_tt *gtt = (void*)ttm;
513 int r; 513 int r;
514 514
515 gtt = container_of(ttm, struct radeon_ttm_tt, ttm);
516 gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); 515 gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
517 if (!ttm->num_pages) { 516 if (!ttm->num_pages) {
518 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", 517 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
519 ttm->num_pages, bo_mem, ttm); 518 ttm->num_pages, bo_mem, ttm);
520 } 519 }
521 r = radeon_gart_bind(gtt->rdev, gtt->offset, 520 r = radeon_gart_bind(gtt->rdev, gtt->offset,
522 ttm->num_pages, ttm->pages, ttm->dma_address); 521 ttm->num_pages, ttm->pages, gtt->ttm.dma_address);
523 if (r) { 522 if (r) {
524 DRM_ERROR("failed to bind %lu pages at 0x%08X\n", 523 DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
525 ttm->num_pages, (unsigned)gtt->offset); 524 ttm->num_pages, (unsigned)gtt->offset);
@@ -530,18 +529,17 @@ static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
530 529
531static int radeon_ttm_backend_unbind(struct ttm_tt *ttm) 530static int radeon_ttm_backend_unbind(struct ttm_tt *ttm)
532{ 531{
533 struct radeon_ttm_tt *gtt; 532 struct radeon_ttm_tt *gtt = (void *)ttm;
534 533
535 gtt = container_of(ttm, struct radeon_ttm_tt, ttm);
536 radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages); 534 radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages);
537 return 0; 535 return 0;
538} 536}
539 537
540static void radeon_ttm_backend_destroy(struct ttm_tt *ttm) 538static void radeon_ttm_backend_destroy(struct ttm_tt *ttm)
541{ 539{
542 struct radeon_ttm_tt *gtt; 540 struct radeon_ttm_tt *gtt = (void *)ttm;
543 541
544 gtt = container_of(ttm, struct radeon_ttm_tt, ttm); 542 ttm_dma_tt_fini(&gtt->ttm);
545 kfree(gtt); 543 kfree(gtt);
546} 544}
547 545
@@ -570,17 +568,19 @@ struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
570 if (gtt == NULL) { 568 if (gtt == NULL) {
571 return NULL; 569 return NULL;
572 } 570 }
573 gtt->ttm.func = &radeon_backend_func; 571 gtt->ttm.ttm.func = &radeon_backend_func;
574 gtt->rdev = rdev; 572 gtt->rdev = rdev;
575 if (ttm_tt_init(&gtt->ttm, bdev, size, page_flags, dummy_read_page)) { 573 if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags, dummy_read_page)) {
574 kfree(gtt);
576 return NULL; 575 return NULL;
577 } 576 }
578 return &gtt->ttm; 577 return &gtt->ttm.ttm;
579} 578}
580 579
581static int radeon_ttm_tt_populate(struct ttm_tt *ttm) 580static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
582{ 581{
583 struct radeon_device *rdev; 582 struct radeon_device *rdev;
583 struct radeon_ttm_tt *gtt = (void *)ttm;
584 unsigned i; 584 unsigned i;
585 int r; 585 int r;
586 586
@@ -591,7 +591,7 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
591 591
592#ifdef CONFIG_SWIOTLB 592#ifdef CONFIG_SWIOTLB
593 if (swiotlb_nr_tbl()) { 593 if (swiotlb_nr_tbl()) {
594 return ttm_dma_populate(ttm, rdev->dev); 594 return ttm_dma_populate(&gtt->ttm, rdev->dev);
595 } 595 }
596#endif 596#endif
597 597
@@ -601,14 +601,14 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
601 } 601 }
602 602
603 for (i = 0; i < ttm->num_pages; i++) { 603 for (i = 0; i < ttm->num_pages; i++) {
604 ttm->dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i], 604 gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i],
605 0, PAGE_SIZE, 605 0, PAGE_SIZE,
606 PCI_DMA_BIDIRECTIONAL); 606 PCI_DMA_BIDIRECTIONAL);
607 if (pci_dma_mapping_error(rdev->pdev, ttm->dma_address[i])) { 607 if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
608 while (--i) { 608 while (--i) {
609 pci_unmap_page(rdev->pdev, ttm->dma_address[i], 609 pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
610 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 610 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
611 ttm->dma_address[i] = 0; 611 gtt->ttm.dma_address[i] = 0;
612 } 612 }
613 ttm_pool_unpopulate(ttm); 613 ttm_pool_unpopulate(ttm);
614 return -EFAULT; 614 return -EFAULT;
@@ -620,20 +620,21 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
620static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm) 620static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
621{ 621{
622 struct radeon_device *rdev; 622 struct radeon_device *rdev;
623 struct radeon_ttm_tt *gtt = (void *)ttm;
623 unsigned i; 624 unsigned i;
624 625
625 rdev = radeon_get_rdev(ttm->bdev); 626 rdev = radeon_get_rdev(ttm->bdev);
626 627
627#ifdef CONFIG_SWIOTLB 628#ifdef CONFIG_SWIOTLB
628 if (swiotlb_nr_tbl()) { 629 if (swiotlb_nr_tbl()) {
629 ttm_dma_unpopulate(ttm, rdev->dev); 630 ttm_dma_unpopulate(&gtt->ttm, rdev->dev);
630 return; 631 return;
631 } 632 }
632#endif 633#endif
633 634
634 for (i = 0; i < ttm->num_pages; i++) { 635 for (i = 0; i < ttm->num_pages; i++) {
635 if (ttm->dma_address[i]) { 636 if (gtt->ttm.dma_address[i]) {
636 pci_unmap_page(rdev->pdev, ttm->dma_address[i], 637 pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
637 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 638 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
638 } 639 }
639 } 640 }
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 8d6267e434ab..499debda791e 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -662,13 +662,61 @@ out:
662 return count; 662 return count;
663} 663}
664 664
665/* Put all pages in pages list to correct pool to wait for reuse */
666static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
667 enum ttm_caching_state cstate)
668{
669 unsigned long irq_flags;
670 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
671 unsigned i;
672
673 if (pool == NULL) {
674 /* No pool for this memory type so free the pages */
675 for (i = 0; i < npages; i++) {
676 if (pages[i]) {
677 if (page_count(pages[i]) != 1)
678 printk(KERN_ERR TTM_PFX
679 "Erroneous page count. "
680 "Leaking pages.\n");
681 __free_page(pages[i]);
682 pages[i] = NULL;
683 }
684 }
685 return;
686 }
687
688 spin_lock_irqsave(&pool->lock, irq_flags);
689 for (i = 0; i < npages; i++) {
690 if (pages[i]) {
691 if (page_count(pages[i]) != 1)
692 printk(KERN_ERR TTM_PFX
693 "Erroneous page count. "
694 "Leaking pages.\n");
695 list_add_tail(&pages[i]->lru, &pool->list);
696 pages[i] = NULL;
697 pool->npages++;
698 }
699 }
700 /* Check that we don't go over the pool limit */
701 npages = 0;
702 if (pool->npages > _manager->options.max_size) {
703 npages = pool->npages - _manager->options.max_size;
704 /* free at least NUM_PAGES_TO_ALLOC number of pages
705 * to reduce calls to set_memory_wb */
706 if (npages < NUM_PAGES_TO_ALLOC)
707 npages = NUM_PAGES_TO_ALLOC;
708 }
709 spin_unlock_irqrestore(&pool->lock, irq_flags);
710 if (npages)
711 ttm_page_pool_free(pool, npages);
712}
713
665/* 714/*
666 * On success pages list will hold count number of correctly 715 * On success pages list will hold count number of correctly
667 * cached pages. 716 * cached pages.
668 */ 717 */
669int ttm_get_pages(struct page **pages, int flags, 718static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
670 enum ttm_caching_state cstate, unsigned npages, 719 enum ttm_caching_state cstate)
671 dma_addr_t *dma_address)
672{ 720{
673 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); 721 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
674 struct list_head plist; 722 struct list_head plist;
@@ -736,7 +784,7 @@ int ttm_get_pages(struct page **pages, int flags,
736 printk(KERN_ERR TTM_PFX 784 printk(KERN_ERR TTM_PFX
737 "Failed to allocate extra pages " 785 "Failed to allocate extra pages "
738 "for large request."); 786 "for large request.");
739 ttm_put_pages(pages, count, flags, cstate, NULL); 787 ttm_put_pages(pages, count, flags, cstate);
740 return r; 788 return r;
741 } 789 }
742 } 790 }
@@ -744,55 +792,6 @@ int ttm_get_pages(struct page **pages, int flags,
744 return 0; 792 return 0;
745} 793}
746 794
747/* Put all pages in pages list to correct pool to wait for reuse */
748void ttm_put_pages(struct page **pages, unsigned npages, int flags,
749 enum ttm_caching_state cstate, dma_addr_t *dma_address)
750{
751 unsigned long irq_flags;
752 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
753 unsigned i;
754
755 if (pool == NULL) {
756 /* No pool for this memory type so free the pages */
757 for (i = 0; i < npages; i++) {
758 if (pages[i]) {
759 if (page_count(pages[i]) != 1)
760 printk(KERN_ERR TTM_PFX
761 "Erroneous page count. "
762 "Leaking pages.\n");
763 __free_page(pages[i]);
764 pages[i] = NULL;
765 }
766 }
767 return;
768 }
769
770 spin_lock_irqsave(&pool->lock, irq_flags);
771 for (i = 0; i < npages; i++) {
772 if (pages[i]) {
773 if (page_count(pages[i]) != 1)
774 printk(KERN_ERR TTM_PFX
775 "Erroneous page count. "
776 "Leaking pages.\n");
777 list_add_tail(&pages[i]->lru, &pool->list);
778 pages[i] = NULL;
779 pool->npages++;
780 }
781 }
782 /* Check that we don't go over the pool limit */
783 npages = 0;
784 if (pool->npages > _manager->options.max_size) {
785 npages = pool->npages - _manager->options.max_size;
786 /* free at least NUM_PAGES_TO_ALLOC number of pages
787 * to reduce calls to set_memory_wb */
788 if (npages < NUM_PAGES_TO_ALLOC)
789 npages = NUM_PAGES_TO_ALLOC;
790 }
791 spin_unlock_irqrestore(&pool->lock, irq_flags);
792 if (npages)
793 ttm_page_pool_free(pool, npages);
794}
795
796static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags, 795static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
797 char *name) 796 char *name)
798{ 797{
@@ -865,9 +864,9 @@ int ttm_pool_populate(struct ttm_tt *ttm)
865 return 0; 864 return 0;
866 865
867 for (i = 0; i < ttm->num_pages; ++i) { 866 for (i = 0; i < ttm->num_pages; ++i) {
868 ret = ttm_get_pages(&ttm->pages[i], ttm->page_flags, 867 ret = ttm_get_pages(&ttm->pages[i], 1,
869 ttm->caching_state, 1, 868 ttm->page_flags,
870 &ttm->dma_address[i]); 869 ttm->caching_state);
871 if (ret != 0) { 870 if (ret != 0) {
872 ttm_pool_unpopulate(ttm); 871 ttm_pool_unpopulate(ttm);
873 return -ENOMEM; 872 return -ENOMEM;
@@ -904,8 +903,7 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm)
904 ttm->pages[i]); 903 ttm->pages[i]);
905 ttm_put_pages(&ttm->pages[i], 1, 904 ttm_put_pages(&ttm->pages[i], 1,
906 ttm->page_flags, 905 ttm->page_flags,
907 ttm->caching_state, 906 ttm->caching_state);
908 ttm->dma_address);
909 } 907 }
910 } 908 }
911 ttm->state = tt_unpopulated; 909 ttm->state = tt_unpopulated;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index 7a4779304877..6678abca0d98 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -789,7 +789,7 @@ out:
789 789
790/* 790/*
791 * @return count of pages still required to fulfill the request. 791 * @return count of pages still required to fulfill the request.
792*/ 792 */
793static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool, 793static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
794 unsigned long *irq_flags) 794 unsigned long *irq_flags)
795{ 795{
@@ -838,10 +838,11 @@ static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
838 * allocates one page at a time. 838 * allocates one page at a time.
839 */ 839 */
840static int ttm_dma_pool_get_pages(struct dma_pool *pool, 840static int ttm_dma_pool_get_pages(struct dma_pool *pool,
841 struct ttm_tt *ttm, 841 struct ttm_dma_tt *ttm_dma,
842 unsigned index) 842 unsigned index)
843{ 843{
844 struct dma_page *d_page; 844 struct dma_page *d_page;
845 struct ttm_tt *ttm = &ttm_dma->ttm;
845 unsigned long irq_flags; 846 unsigned long irq_flags;
846 int count, r = -ENOMEM; 847 int count, r = -ENOMEM;
847 848
@@ -850,8 +851,8 @@ static int ttm_dma_pool_get_pages(struct dma_pool *pool,
850 if (count) { 851 if (count) {
851 d_page = list_first_entry(&pool->free_list, struct dma_page, page_list); 852 d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
852 ttm->pages[index] = d_page->p; 853 ttm->pages[index] = d_page->p;
853 ttm->dma_address[index] = d_page->dma; 854 ttm_dma->dma_address[index] = d_page->dma;
854 list_move_tail(&d_page->page_list, &ttm->alloc_list); 855 list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
855 r = 0; 856 r = 0;
856 pool->npages_in_use += 1; 857 pool->npages_in_use += 1;
857 pool->npages_free -= 1; 858 pool->npages_free -= 1;
@@ -864,8 +865,9 @@ static int ttm_dma_pool_get_pages(struct dma_pool *pool,
864 * On success pages list will hold count number of correctly 865 * On success pages list will hold count number of correctly
865 * cached pages. On failure will hold the negative return value (-ENOMEM, etc). 866 * cached pages. On failure will hold the negative return value (-ENOMEM, etc).
866 */ 867 */
867int ttm_dma_populate(struct ttm_tt *ttm, struct device *dev) 868int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
868{ 869{
870 struct ttm_tt *ttm = &ttm_dma->ttm;
869 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; 871 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
870 struct dma_pool *pool; 872 struct dma_pool *pool;
871 enum pool_type type; 873 enum pool_type type;
@@ -892,18 +894,18 @@ int ttm_dma_populate(struct ttm_tt *ttm, struct device *dev)
892 } 894 }
893 } 895 }
894 896
895 INIT_LIST_HEAD(&ttm->alloc_list); 897 INIT_LIST_HEAD(&ttm_dma->pages_list);
896 for (i = 0; i < ttm->num_pages; ++i) { 898 for (i = 0; i < ttm->num_pages; ++i) {
897 ret = ttm_dma_pool_get_pages(pool, ttm, i); 899 ret = ttm_dma_pool_get_pages(pool, ttm_dma, i);
898 if (ret != 0) { 900 if (ret != 0) {
899 ttm_dma_unpopulate(ttm, dev); 901 ttm_dma_unpopulate(ttm_dma, dev);
900 return -ENOMEM; 902 return -ENOMEM;
901 } 903 }
902 904
903 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], 905 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
904 false, false); 906 false, false);
905 if (unlikely(ret != 0)) { 907 if (unlikely(ret != 0)) {
906 ttm_dma_unpopulate(ttm, dev); 908 ttm_dma_unpopulate(ttm_dma, dev);
907 return -ENOMEM; 909 return -ENOMEM;
908 } 910 }
909 } 911 }
@@ -911,7 +913,7 @@ int ttm_dma_populate(struct ttm_tt *ttm, struct device *dev)
911 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { 913 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
912 ret = ttm_tt_swapin(ttm); 914 ret = ttm_tt_swapin(ttm);
913 if (unlikely(ret != 0)) { 915 if (unlikely(ret != 0)) {
914 ttm_dma_unpopulate(ttm, dev); 916 ttm_dma_unpopulate(ttm_dma, dev);
915 return ret; 917 return ret;
916 } 918 }
917 } 919 }
@@ -937,8 +939,9 @@ static int ttm_dma_pool_get_num_unused_pages(void)
937} 939}
938 940
939/* Put all pages in pages list to correct pool to wait for reuse */ 941/* Put all pages in pages list to correct pool to wait for reuse */
940void ttm_dma_unpopulate(struct ttm_tt *ttm, struct device *dev) 942void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
941{ 943{
944 struct ttm_tt *ttm = &ttm_dma->ttm;
942 struct dma_pool *pool; 945 struct dma_pool *pool;
943 struct dma_page *d_page, *next; 946 struct dma_page *d_page, *next;
944 enum pool_type type; 947 enum pool_type type;
@@ -956,7 +959,7 @@ void ttm_dma_unpopulate(struct ttm_tt *ttm, struct device *dev)
956 ttm_to_type(ttm->page_flags, tt_cached)) == pool); 959 ttm_to_type(ttm->page_flags, tt_cached)) == pool);
957 960
958 /* make sure pages array match list and count number of pages */ 961 /* make sure pages array match list and count number of pages */
959 list_for_each_entry(d_page, &ttm->alloc_list, page_list) { 962 list_for_each_entry(d_page, &ttm_dma->pages_list, page_list) {
960 ttm->pages[count] = d_page->p; 963 ttm->pages[count] = d_page->p;
961 count++; 964 count++;
962 } 965 }
@@ -967,7 +970,7 @@ void ttm_dma_unpopulate(struct ttm_tt *ttm, struct device *dev)
967 pool->nfrees += count; 970 pool->nfrees += count;
968 } else { 971 } else {
969 pool->npages_free += count; 972 pool->npages_free += count;
970 list_splice(&ttm->alloc_list, &pool->free_list); 973 list_splice(&ttm_dma->pages_list, &pool->free_list);
971 if (pool->npages_free > _manager->options.max_size) { 974 if (pool->npages_free > _manager->options.max_size) {
972 count = pool->npages_free - _manager->options.max_size; 975 count = pool->npages_free - _manager->options.max_size;
973 } 976 }
@@ -975,7 +978,7 @@ void ttm_dma_unpopulate(struct ttm_tt *ttm, struct device *dev)
975 spin_unlock_irqrestore(&pool->lock, irq_flags); 978 spin_unlock_irqrestore(&pool->lock, irq_flags);
976 979
977 if (is_cached) { 980 if (is_cached) {
978 list_for_each_entry_safe(d_page, next, &ttm->alloc_list, page_list) { 981 list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, page_list) {
979 ttm_mem_global_free_page(ttm->glob->mem_glob, 982 ttm_mem_global_free_page(ttm->glob->mem_glob,
980 d_page->p); 983 d_page->p);
981 ttm_dma_page_put(pool, d_page); 984 ttm_dma_page_put(pool, d_page);
@@ -987,10 +990,10 @@ void ttm_dma_unpopulate(struct ttm_tt *ttm, struct device *dev)
987 } 990 }
988 } 991 }
989 992
990 INIT_LIST_HEAD(&ttm->alloc_list); 993 INIT_LIST_HEAD(&ttm_dma->pages_list);
991 for (i = 0; i < ttm->num_pages; i++) { 994 for (i = 0; i < ttm->num_pages; i++) {
992 ttm->pages[i] = NULL; 995 ttm->pages[i] = NULL;
993 ttm->dma_address[i] = 0; 996 ttm_dma->dma_address[i] = 0;
994 } 997 }
995 998
996 /* shrink pool if necessary */ 999 /* shrink pool if necessary */
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 1625739b434b..58e1fa14fe3a 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -48,17 +48,14 @@
48 */ 48 */
49static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm) 49static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
50{ 50{
51 ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages)); 51 ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(void*));
52 ttm->dma_address = drm_calloc_large(ttm->num_pages,
53 sizeof(*ttm->dma_address));
54} 52}
55 53
56static void ttm_tt_free_page_directory(struct ttm_tt *ttm) 54static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
57{ 55{
58 drm_free_large(ttm->pages); 56 ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, sizeof(void*));
59 ttm->pages = NULL; 57 ttm->dma_address = drm_calloc_large(ttm->ttm.num_pages,
60 drm_free_large(ttm->dma_address); 58 sizeof(*ttm->dma_address));
61 ttm->dma_address = NULL;
62} 59}
63 60
64#ifdef CONFIG_X86 61#ifdef CONFIG_X86
@@ -173,7 +170,6 @@ void ttm_tt_destroy(struct ttm_tt *ttm)
173 170
174 if (likely(ttm->pages != NULL)) { 171 if (likely(ttm->pages != NULL)) {
175 ttm->bdev->driver->ttm_tt_unpopulate(ttm); 172 ttm->bdev->driver->ttm_tt_unpopulate(ttm);
176 ttm_tt_free_page_directory(ttm);
177 } 173 }
178 174
179 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) && 175 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
@@ -196,9 +192,8 @@ int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
196 ttm->dummy_read_page = dummy_read_page; 192 ttm->dummy_read_page = dummy_read_page;
197 ttm->state = tt_unpopulated; 193 ttm->state = tt_unpopulated;
198 194
199 INIT_LIST_HEAD(&ttm->alloc_list);
200 ttm_tt_alloc_page_directory(ttm); 195 ttm_tt_alloc_page_directory(ttm);
201 if (!ttm->pages || !ttm->dma_address) { 196 if (!ttm->pages) {
202 ttm_tt_destroy(ttm); 197 ttm_tt_destroy(ttm);
203 printk(KERN_ERR TTM_PFX "Failed allocating page table\n"); 198 printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
204 return -ENOMEM; 199 return -ENOMEM;
@@ -207,6 +202,49 @@ int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
207} 202}
208EXPORT_SYMBOL(ttm_tt_init); 203EXPORT_SYMBOL(ttm_tt_init);
209 204
205void ttm_tt_fini(struct ttm_tt *ttm)
206{
207 drm_free_large(ttm->pages);
208 ttm->pages = NULL;
209}
210EXPORT_SYMBOL(ttm_tt_fini);
211
212int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
213 unsigned long size, uint32_t page_flags,
214 struct page *dummy_read_page)
215{
216 struct ttm_tt *ttm = &ttm_dma->ttm;
217
218 ttm->bdev = bdev;
219 ttm->glob = bdev->glob;
220 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
221 ttm->caching_state = tt_cached;
222 ttm->page_flags = page_flags;
223 ttm->dummy_read_page = dummy_read_page;
224 ttm->state = tt_unpopulated;
225
226 INIT_LIST_HEAD(&ttm_dma->pages_list);
227 ttm_dma_tt_alloc_page_directory(ttm_dma);
228 if (!ttm->pages || !ttm_dma->dma_address) {
229 ttm_tt_destroy(ttm);
230 printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
231 return -ENOMEM;
232 }
233 return 0;
234}
235EXPORT_SYMBOL(ttm_dma_tt_init);
236
237void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
238{
239 struct ttm_tt *ttm = &ttm_dma->ttm;
240
241 drm_free_large(ttm->pages);
242 ttm->pages = NULL;
243 drm_free_large(ttm_dma->dma_address);
244 ttm_dma->dma_address = NULL;
245}
246EXPORT_SYMBOL(ttm_dma_tt_fini);
247
210void ttm_tt_unbind(struct ttm_tt *ttm) 248void ttm_tt_unbind(struct ttm_tt *ttm)
211{ 249{
212 int ret; 250 int ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 3986d7468232..1e2c0fb7f786 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -168,6 +168,7 @@ static void vmw_ttm_destroy(struct ttm_tt *ttm)
168{ 168{
169 struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm); 169 struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
170 170
171 ttm_tt_fini(ttm);
171 kfree(vmw_be); 172 kfree(vmw_be);
172} 173}
173 174
@@ -191,6 +192,7 @@ struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
191 vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev); 192 vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
192 193
193 if (ttm_tt_init(&vmw_be->ttm, bdev, size, page_flags, dummy_read_page)) { 194 if (ttm_tt_init(&vmw_be->ttm, bdev, size, page_flags, dummy_read_page)) {
195 kfree(vmw_be);
194 return NULL; 196 return NULL;
195 } 197 }
196 198