aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2011-02-22 21:06:39 -0500
committerDave Airlie <airlied@redhat.com>2011-02-22 21:06:39 -0500
commitde1e7cd63a8ec26a3bd3740708cfd72dd76509e2 (patch)
tree52bc82a71f34e92895d22821543a2be011834505
parent7811bddb6654337fd85837ef14c1a96a0c264745 (diff)
parent5a893fc28f0393adb7c885a871b8c59e623fd528 (diff)
Merge branch 'stable/ttm.pci-api.v5' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen into drm-next
* 'stable/ttm.pci-api.v5' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen: ttm: Include the 'struct dev' when using the DMA API. nouveau/ttm/PCIe: Use dma_addr if TTM has set it. radeon/ttm/PCIe: Use dma_addr if TTM has set it. ttm: Expand (*populate) to support an array of DMA addresses. ttm: Utilize the DMA API for pages that have TTM_PAGE_FLAG_DMA32 set. ttm: Introduce a placeholder for DMA (bus) addresses.
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c31
-rw-r--r--drivers/gpu/drm/radeon/radeon.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c36
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c9
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c35
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c2
-rw-r--r--include/drm/ttm/ttm_bo_driver.h7
-rw-r--r--include/drm/ttm/ttm_page_alloc.h12
12 files changed, 119 insertions, 36 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 123969dd4f56..2b4e5e912110 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -409,6 +409,7 @@ nouveau_mem_vram_init(struct drm_device *dev)
409 if (ret) 409 if (ret)
410 return ret; 410 return ret;
411 411
412 dev_priv->ttm.bdev.dev = dev->dev;
412 ret = ttm_bo_device_init(&dev_priv->ttm.bdev, 413 ret = ttm_bo_device_init(&dev_priv->ttm.bdev,
413 dev_priv->ttm.bo_global_ref.ref.object, 414 dev_priv->ttm.bo_global_ref.ref.object,
414 &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET, 415 &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET,
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 9a250eb53098..07b115184b87 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -12,6 +12,7 @@ struct nouveau_sgdma_be {
12 struct drm_device *dev; 12 struct drm_device *dev;
13 13
14 dma_addr_t *pages; 14 dma_addr_t *pages;
15 bool *ttm_alloced;
15 unsigned nr_pages; 16 unsigned nr_pages;
16 17
17 u64 offset; 18 u64 offset;
@@ -20,7 +21,8 @@ struct nouveau_sgdma_be {
20 21
21static int 22static int
22nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages, 23nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
23 struct page **pages, struct page *dummy_read_page) 24 struct page **pages, struct page *dummy_read_page,
25 dma_addr_t *dma_addrs)
24{ 26{
25 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; 27 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
26 struct drm_device *dev = nvbe->dev; 28 struct drm_device *dev = nvbe->dev;
@@ -34,15 +36,25 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
34 if (!nvbe->pages) 36 if (!nvbe->pages)
35 return -ENOMEM; 37 return -ENOMEM;
36 38
39 nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL);
40 if (!nvbe->ttm_alloced)
41 return -ENOMEM;
42
37 nvbe->nr_pages = 0; 43 nvbe->nr_pages = 0;
38 while (num_pages--) { 44 while (num_pages--) {
39 nvbe->pages[nvbe->nr_pages] = 45 if (dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE) {
40 pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0, 46 nvbe->pages[nvbe->nr_pages] =
47 dma_addrs[nvbe->nr_pages];
48 nvbe->ttm_alloced[nvbe->nr_pages] = true;
49 } else {
50 nvbe->pages[nvbe->nr_pages] =
51 pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
41 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 52 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
42 if (pci_dma_mapping_error(dev->pdev, 53 if (pci_dma_mapping_error(dev->pdev,
43 nvbe->pages[nvbe->nr_pages])) { 54 nvbe->pages[nvbe->nr_pages])) {
44 be->func->clear(be); 55 be->func->clear(be);
45 return -EFAULT; 56 return -EFAULT;
57 }
46 } 58 }
47 59
48 nvbe->nr_pages++; 60 nvbe->nr_pages++;
@@ -65,11 +77,14 @@ nouveau_sgdma_clear(struct ttm_backend *be)
65 be->func->unbind(be); 77 be->func->unbind(be);
66 78
67 while (nvbe->nr_pages--) { 79 while (nvbe->nr_pages--) {
68 pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages], 80 if (!nvbe->ttm_alloced[nvbe->nr_pages])
81 pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
69 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 82 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
70 } 83 }
71 kfree(nvbe->pages); 84 kfree(nvbe->pages);
85 kfree(nvbe->ttm_alloced);
72 nvbe->pages = NULL; 86 nvbe->pages = NULL;
87 nvbe->ttm_alloced = NULL;
73 nvbe->nr_pages = 0; 88 nvbe->nr_pages = 0;
74 } 89 }
75} 90}
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index a4605362c528..82aa59941aa1 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -328,6 +328,7 @@ struct radeon_gart {
328 union radeon_gart_table table; 328 union radeon_gart_table table;
329 struct page **pages; 329 struct page **pages;
330 dma_addr_t *pages_addr; 330 dma_addr_t *pages_addr;
331 bool *ttm_alloced;
331 bool ready; 332 bool ready;
332}; 333};
333 334
@@ -340,7 +341,8 @@ void radeon_gart_fini(struct radeon_device *rdev);
340void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, 341void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
341 int pages); 342 int pages);
342int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, 343int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
343 int pages, struct page **pagelist); 344 int pages, struct page **pagelist,
345 dma_addr_t *dma_addr);
344 346
345 347
346/* 348/*
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 65016117d95f..a6b0fed7bae9 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -149,8 +149,9 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
149 p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); 149 p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
150 for (i = 0; i < pages; i++, p++) { 150 for (i = 0; i < pages; i++, p++) {
151 if (rdev->gart.pages[p]) { 151 if (rdev->gart.pages[p]) {
152 pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p], 152 if (!rdev->gart.ttm_alloced[p])
153 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 153 pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
154 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
154 rdev->gart.pages[p] = NULL; 155 rdev->gart.pages[p] = NULL;
155 rdev->gart.pages_addr[p] = rdev->dummy_page.addr; 156 rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
156 page_base = rdev->gart.pages_addr[p]; 157 page_base = rdev->gart.pages_addr[p];
@@ -165,7 +166,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
165} 166}
166 167
167int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, 168int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
168 int pages, struct page **pagelist) 169 int pages, struct page **pagelist, dma_addr_t *dma_addr)
169{ 170{
170 unsigned t; 171 unsigned t;
171 unsigned p; 172 unsigned p;
@@ -180,15 +181,22 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
180 p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); 181 p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
181 182
182 for (i = 0; i < pages; i++, p++) { 183 for (i = 0; i < pages; i++, p++) {
183 /* we need to support large memory configurations */ 184 /* On TTM path, we only use the DMA API if TTM_PAGE_FLAG_DMA32
184 /* assume that unbind have already been call on the range */ 185 * is requested. */
185 rdev->gart.pages_addr[p] = pci_map_page(rdev->pdev, pagelist[i], 186 if (dma_addr[i] != DMA_ERROR_CODE) {
187 rdev->gart.ttm_alloced[p] = true;
188 rdev->gart.pages_addr[p] = dma_addr[i];
189 } else {
190 /* we need to support large memory configurations */
191 /* assume that unbind have already been call on the range */
192 rdev->gart.pages_addr[p] = pci_map_page(rdev->pdev, pagelist[i],
186 0, PAGE_SIZE, 193 0, PAGE_SIZE,
187 PCI_DMA_BIDIRECTIONAL); 194 PCI_DMA_BIDIRECTIONAL);
188 if (pci_dma_mapping_error(rdev->pdev, rdev->gart.pages_addr[p])) { 195 if (pci_dma_mapping_error(rdev->pdev, rdev->gart.pages_addr[p])) {
189 /* FIXME: failed to map page (return -ENOMEM?) */ 196 /* FIXME: failed to map page (return -ENOMEM?) */
190 radeon_gart_unbind(rdev, offset, pages); 197 radeon_gart_unbind(rdev, offset, pages);
191 return -ENOMEM; 198 return -ENOMEM;
199 }
192 } 200 }
193 rdev->gart.pages[p] = pagelist[i]; 201 rdev->gart.pages[p] = pagelist[i];
194 page_base = rdev->gart.pages_addr[p]; 202 page_base = rdev->gart.pages_addr[p];
@@ -251,6 +259,12 @@ int radeon_gart_init(struct radeon_device *rdev)
251 radeon_gart_fini(rdev); 259 radeon_gart_fini(rdev);
252 return -ENOMEM; 260 return -ENOMEM;
253 } 261 }
262 rdev->gart.ttm_alloced = kzalloc(sizeof(bool) *
263 rdev->gart.num_cpu_pages, GFP_KERNEL);
264 if (rdev->gart.ttm_alloced == NULL) {
265 radeon_gart_fini(rdev);
266 return -ENOMEM;
267 }
254 /* set GART entry to point to the dummy page by default */ 268 /* set GART entry to point to the dummy page by default */
255 for (i = 0; i < rdev->gart.num_cpu_pages; i++) { 269 for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
256 rdev->gart.pages_addr[i] = rdev->dummy_page.addr; 270 rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
@@ -267,6 +281,8 @@ void radeon_gart_fini(struct radeon_device *rdev)
267 rdev->gart.ready = false; 281 rdev->gart.ready = false;
268 kfree(rdev->gart.pages); 282 kfree(rdev->gart.pages);
269 kfree(rdev->gart.pages_addr); 283 kfree(rdev->gart.pages_addr);
284 kfree(rdev->gart.ttm_alloced);
270 rdev->gart.pages = NULL; 285 rdev->gart.pages = NULL;
271 rdev->gart.pages_addr = NULL; 286 rdev->gart.pages_addr = NULL;
287 rdev->gart.ttm_alloced = NULL;
272} 288}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 1272e4b6a1d4..c345e899e881 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -513,6 +513,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
513 if (r) { 513 if (r) {
514 return r; 514 return r;
515 } 515 }
516 rdev->mman.bdev.dev = rdev->dev;
516 /* No others user of address space so set it to 0 */ 517 /* No others user of address space so set it to 0 */
517 r = ttm_bo_device_init(&rdev->mman.bdev, 518 r = ttm_bo_device_init(&rdev->mman.bdev,
518 rdev->mman.bo_global_ref.ref.object, 519 rdev->mman.bo_global_ref.ref.object,
@@ -647,6 +648,7 @@ struct radeon_ttm_backend {
647 unsigned long num_pages; 648 unsigned long num_pages;
648 struct page **pages; 649 struct page **pages;
649 struct page *dummy_read_page; 650 struct page *dummy_read_page;
651 dma_addr_t *dma_addrs;
650 bool populated; 652 bool populated;
651 bool bound; 653 bool bound;
652 unsigned offset; 654 unsigned offset;
@@ -655,12 +657,14 @@ struct radeon_ttm_backend {
655static int radeon_ttm_backend_populate(struct ttm_backend *backend, 657static int radeon_ttm_backend_populate(struct ttm_backend *backend,
656 unsigned long num_pages, 658 unsigned long num_pages,
657 struct page **pages, 659 struct page **pages,
658 struct page *dummy_read_page) 660 struct page *dummy_read_page,
661 dma_addr_t *dma_addrs)
659{ 662{
660 struct radeon_ttm_backend *gtt; 663 struct radeon_ttm_backend *gtt;
661 664
662 gtt = container_of(backend, struct radeon_ttm_backend, backend); 665 gtt = container_of(backend, struct radeon_ttm_backend, backend);
663 gtt->pages = pages; 666 gtt->pages = pages;
667 gtt->dma_addrs = dma_addrs;
664 gtt->num_pages = num_pages; 668 gtt->num_pages = num_pages;
665 gtt->dummy_read_page = dummy_read_page; 669 gtt->dummy_read_page = dummy_read_page;
666 gtt->populated = true; 670 gtt->populated = true;
@@ -673,6 +677,7 @@ static void radeon_ttm_backend_clear(struct ttm_backend *backend)
673 677
674 gtt = container_of(backend, struct radeon_ttm_backend, backend); 678 gtt = container_of(backend, struct radeon_ttm_backend, backend);
675 gtt->pages = NULL; 679 gtt->pages = NULL;
680 gtt->dma_addrs = NULL;
676 gtt->num_pages = 0; 681 gtt->num_pages = 0;
677 gtt->dummy_read_page = NULL; 682 gtt->dummy_read_page = NULL;
678 gtt->populated = false; 683 gtt->populated = false;
@@ -693,7 +698,7 @@ static int radeon_ttm_backend_bind(struct ttm_backend *backend,
693 gtt->num_pages, bo_mem, backend); 698 gtt->num_pages, bo_mem, backend);
694 } 699 }
695 r = radeon_gart_bind(gtt->rdev, gtt->offset, 700 r = radeon_gart_bind(gtt->rdev, gtt->offset,
696 gtt->num_pages, gtt->pages); 701 gtt->num_pages, gtt->pages, gtt->dma_addrs);
697 if (r) { 702 if (r) {
698 DRM_ERROR("failed to bind %lu pages at 0x%08X\n", 703 DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
699 gtt->num_pages, gtt->offset); 704 gtt->num_pages, gtt->offset);
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index f999e36f30b4..1c4a72f681c1 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -47,7 +47,8 @@ struct ttm_agp_backend {
47 47
48static int ttm_agp_populate(struct ttm_backend *backend, 48static int ttm_agp_populate(struct ttm_backend *backend,
49 unsigned long num_pages, struct page **pages, 49 unsigned long num_pages, struct page **pages,
50 struct page *dummy_read_page) 50 struct page *dummy_read_page,
51 dma_addr_t *dma_addrs)
51{ 52{
52 struct ttm_agp_backend *agp_be = 53 struct ttm_agp_backend *agp_be =
53 container_of(backend, struct ttm_agp_backend, backend); 54 container_of(backend, struct ttm_agp_backend, backend);
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index b1e02fffd3cc..35849dbf3ab5 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -38,6 +38,7 @@
38#include <linux/mm.h> 38#include <linux/mm.h>
39#include <linux/seq_file.h> /* for seq_printf */ 39#include <linux/seq_file.h> /* for seq_printf */
40#include <linux/slab.h> 40#include <linux/slab.h>
41#include <linux/dma-mapping.h>
41 42
42#include <asm/atomic.h> 43#include <asm/atomic.h>
43 44
@@ -662,7 +663,8 @@ out:
662 * cached pages. 663 * cached pages.
663 */ 664 */
664int ttm_get_pages(struct list_head *pages, int flags, 665int ttm_get_pages(struct list_head *pages, int flags,
665 enum ttm_caching_state cstate, unsigned count) 666 enum ttm_caching_state cstate, unsigned count,
667 dma_addr_t *dma_address, struct device *dev)
666{ 668{
667 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); 669 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
668 struct page *p = NULL; 670 struct page *p = NULL;
@@ -681,14 +683,22 @@ int ttm_get_pages(struct list_head *pages, int flags,
681 gfp_flags |= GFP_HIGHUSER; 683 gfp_flags |= GFP_HIGHUSER;
682 684
683 for (r = 0; r < count; ++r) { 685 for (r = 0; r < count; ++r) {
684 p = alloc_page(gfp_flags); 686 if ((flags & TTM_PAGE_FLAG_DMA32) && dma_address) {
687 void *addr;
688 addr = dma_alloc_coherent(dev, PAGE_SIZE,
689 &dma_address[r],
690 gfp_flags);
691 if (addr == NULL)
692 return -ENOMEM;
693 p = virt_to_page(addr);
694 } else
695 p = alloc_page(gfp_flags);
685 if (!p) { 696 if (!p) {
686 697
687 printk(KERN_ERR TTM_PFX 698 printk(KERN_ERR TTM_PFX
688 "Unable to allocate page."); 699 "Unable to allocate page.");
689 return -ENOMEM; 700 return -ENOMEM;
690 } 701 }
691
692 list_add(&p->lru, pages); 702 list_add(&p->lru, pages);
693 } 703 }
694 return 0; 704 return 0;
@@ -720,7 +730,7 @@ int ttm_get_pages(struct list_head *pages, int flags,
720 printk(KERN_ERR TTM_PFX 730 printk(KERN_ERR TTM_PFX
721 "Failed to allocate extra pages " 731 "Failed to allocate extra pages "
722 "for large request."); 732 "for large request.");
723 ttm_put_pages(pages, 0, flags, cstate); 733 ttm_put_pages(pages, 0, flags, cstate, NULL, NULL);
724 return r; 734 return r;
725 } 735 }
726 } 736 }
@@ -731,17 +741,30 @@ int ttm_get_pages(struct list_head *pages, int flags,
731 741
732/* Put all pages in pages list to correct pool to wait for reuse */ 742/* Put all pages in pages list to correct pool to wait for reuse */
733void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags, 743void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
734 enum ttm_caching_state cstate) 744 enum ttm_caching_state cstate, dma_addr_t *dma_address,
745 struct device *dev)
735{ 746{
736 unsigned long irq_flags; 747 unsigned long irq_flags;
737 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); 748 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
738 struct page *p, *tmp; 749 struct page *p, *tmp;
750 unsigned r;
739 751
740 if (pool == NULL) { 752 if (pool == NULL) {
741 /* No pool for this memory type so free the pages */ 753 /* No pool for this memory type so free the pages */
742 754
755 r = page_count-1;
743 list_for_each_entry_safe(p, tmp, pages, lru) { 756 list_for_each_entry_safe(p, tmp, pages, lru) {
744 __free_page(p); 757 if ((flags & TTM_PAGE_FLAG_DMA32) && dma_address) {
758 void *addr = page_address(p);
759 WARN_ON(!addr || !dma_address[r]);
760 if (addr)
761 dma_free_coherent(dev, PAGE_SIZE,
762 addr,
763 dma_address[r]);
764 dma_address[r] = 0;
765 } else
766 __free_page(p);
767 r--;
745 } 768 }
746 /* Make the pages list empty */ 769 /* Make the pages list empty */
747 INIT_LIST_HEAD(pages); 770 INIT_LIST_HEAD(pages);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index af789dc869b9..0f8fc9ff0c53 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -49,12 +49,16 @@ static int ttm_tt_swapin(struct ttm_tt *ttm);
49static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm) 49static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
50{ 50{
51 ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages)); 51 ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages));
52 ttm->dma_address = drm_calloc_large(ttm->num_pages,
53 sizeof(*ttm->dma_address));
52} 54}
53 55
54static void ttm_tt_free_page_directory(struct ttm_tt *ttm) 56static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
55{ 57{
56 drm_free_large(ttm->pages); 58 drm_free_large(ttm->pages);
57 ttm->pages = NULL; 59 ttm->pages = NULL;
60 drm_free_large(ttm->dma_address);
61 ttm->dma_address = NULL;
58} 62}
59 63
60static void ttm_tt_free_user_pages(struct ttm_tt *ttm) 64static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
@@ -105,7 +109,8 @@ static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
105 109
106 INIT_LIST_HEAD(&h); 110 INIT_LIST_HEAD(&h);
107 111
108 ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1); 112 ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1,
113 &ttm->dma_address[index], ttm->be->bdev->dev);
109 114
110 if (ret != 0) 115 if (ret != 0)
111 return NULL; 116 return NULL;
@@ -164,7 +169,7 @@ int ttm_tt_populate(struct ttm_tt *ttm)
164 } 169 }
165 170
166 be->func->populate(be, ttm->num_pages, ttm->pages, 171 be->func->populate(be, ttm->num_pages, ttm->pages,
167 ttm->dummy_read_page); 172 ttm->dummy_read_page, ttm->dma_address);
168 ttm->state = tt_unbound; 173 ttm->state = tt_unbound;
169 return 0; 174 return 0;
170} 175}
@@ -298,7 +303,8 @@ static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
298 count++; 303 count++;
299 } 304 }
300 } 305 }
301 ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state); 306 ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state,
307 ttm->dma_address, ttm->be->bdev->dev);
302 ttm->state = tt_unpopulated; 308 ttm->state = tt_unpopulated;
303 ttm->first_himem_page = ttm->num_pages; 309 ttm->first_himem_page = ttm->num_pages;
304 ttm->last_lomem_page = -1; 310 ttm->last_lomem_page = -1;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 80bc37b274e7..87e43e0733bf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -102,7 +102,8 @@ struct vmw_ttm_backend {
102 102
103static int vmw_ttm_populate(struct ttm_backend *backend, 103static int vmw_ttm_populate(struct ttm_backend *backend,
104 unsigned long num_pages, struct page **pages, 104 unsigned long num_pages, struct page **pages,
105 struct page *dummy_read_page) 105 struct page *dummy_read_page,
106 dma_addr_t *dma_addrs)
106{ 107{
107 struct vmw_ttm_backend *vmw_be = 108 struct vmw_ttm_backend *vmw_be =
108 container_of(backend, struct vmw_ttm_backend, backend); 109 container_of(backend, struct vmw_ttm_backend, backend);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 96949b93d920..df04661e2b93 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -322,7 +322,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
322 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); 322 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
323 dev_priv->active_master = &dev_priv->fbdev_master; 323 dev_priv->active_master = &dev_priv->fbdev_master;
324 324
325 325 dev_priv->bdev.dev = dev->dev;
326 ret = ttm_bo_device_init(&dev_priv->bdev, 326 ret = ttm_bo_device_init(&dev_priv->bdev,
327 dev_priv->bo_global_ref.ref.object, 327 dev_priv->bo_global_ref.ref.object,
328 &vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET, 328 &vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET,
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 1da8af6ac884..38ff06822609 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -50,13 +50,15 @@ struct ttm_backend_func {
50 * @pages: Array of pointers to ttm pages. 50 * @pages: Array of pointers to ttm pages.
51 * @dummy_read_page: Page to be used instead of NULL pages in the 51 * @dummy_read_page: Page to be used instead of NULL pages in the
52 * array @pages. 52 * array @pages.
53 * @dma_addrs: Array of DMA (bus) address of the ttm pages.
53 * 54 *
54 * Populate the backend with ttm pages. Depending on the backend, 55 * Populate the backend with ttm pages. Depending on the backend,
55 * it may or may not copy the @pages array. 56 * it may or may not copy the @pages array.
56 */ 57 */
57 int (*populate) (struct ttm_backend *backend, 58 int (*populate) (struct ttm_backend *backend,
58 unsigned long num_pages, struct page **pages, 59 unsigned long num_pages, struct page **pages,
59 struct page *dummy_read_page); 60 struct page *dummy_read_page,
61 dma_addr_t *dma_addrs);
60 /** 62 /**
61 * struct ttm_backend_func member clear 63 * struct ttm_backend_func member clear
62 * 64 *
@@ -149,6 +151,7 @@ enum ttm_caching_state {
149 * @swap_storage: Pointer to shmem struct file for swap storage. 151 * @swap_storage: Pointer to shmem struct file for swap storage.
150 * @caching_state: The current caching state of the pages. 152 * @caching_state: The current caching state of the pages.
151 * @state: The current binding state of the pages. 153 * @state: The current binding state of the pages.
154 * @dma_address: The DMA (bus) addresses of the pages (if TTM_PAGE_FLAG_DMA32)
152 * 155 *
153 * This is a structure holding the pages, caching- and aperture binding 156 * This is a structure holding the pages, caching- and aperture binding
154 * status for a buffer object that isn't backed by fixed (VRAM / AGP) 157 * status for a buffer object that isn't backed by fixed (VRAM / AGP)
@@ -173,6 +176,7 @@ struct ttm_tt {
173 tt_unbound, 176 tt_unbound,
174 tt_unpopulated, 177 tt_unpopulated,
175 } state; 178 } state;
179 dma_addr_t *dma_address;
176}; 180};
177 181
178#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */ 182#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */
@@ -547,6 +551,7 @@ struct ttm_bo_device {
547 struct list_head device_list; 551 struct list_head device_list;
548 struct ttm_bo_global *glob; 552 struct ttm_bo_global *glob;
549 struct ttm_bo_driver *driver; 553 struct ttm_bo_driver *driver;
554 struct device *dev;
550 rwlock_t vm_lock; 555 rwlock_t vm_lock;
551 struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES]; 556 struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
552 spinlock_t fence_lock; 557 spinlock_t fence_lock;
diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
index 116821448c38..ccb6b7a240e2 100644
--- a/include/drm/ttm/ttm_page_alloc.h
+++ b/include/drm/ttm/ttm_page_alloc.h
@@ -36,11 +36,15 @@
36 * @flags: ttm flags for page allocation. 36 * @flags: ttm flags for page allocation.
37 * @cstate: ttm caching state for the page. 37 * @cstate: ttm caching state for the page.
38 * @count: number of pages to allocate. 38 * @count: number of pages to allocate.
39 * @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set).
40 * @dev: struct device for appropiate DMA accounting.
39 */ 41 */
40int ttm_get_pages(struct list_head *pages, 42int ttm_get_pages(struct list_head *pages,
41 int flags, 43 int flags,
42 enum ttm_caching_state cstate, 44 enum ttm_caching_state cstate,
43 unsigned count); 45 unsigned count,
46 dma_addr_t *dma_address,
47 struct device *dev);
44/** 48/**
45 * Put linked list of pages to pool. 49 * Put linked list of pages to pool.
46 * 50 *
@@ -49,11 +53,15 @@ int ttm_get_pages(struct list_head *pages,
49 * count. 53 * count.
50 * @flags: ttm flags for page allocation. 54 * @flags: ttm flags for page allocation.
51 * @cstate: ttm caching state. 55 * @cstate: ttm caching state.
56 * @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set).
57 * @dev: struct device for appropiate DMA accounting.
52 */ 58 */
53void ttm_put_pages(struct list_head *pages, 59void ttm_put_pages(struct list_head *pages,
54 unsigned page_count, 60 unsigned page_count,
55 int flags, 61 int flags,
56 enum ttm_caching_state cstate); 62 enum ttm_caching_state cstate,
63 dma_addr_t *dma_address,
64 struct device *dev);
57/** 65/**
58 * Initialize pool allocator. 66 * Initialize pool allocator.
59 */ 67 */