aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/radeon/radeon.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c36
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c5
3 files changed, 33 insertions, 12 deletions
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 73f600d39ad4..c9bbab921e61 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -317,6 +317,7 @@ struct radeon_gart {
317 union radeon_gart_table table; 317 union radeon_gart_table table;
318 struct page **pages; 318 struct page **pages;
319 dma_addr_t *pages_addr; 319 dma_addr_t *pages_addr;
320 bool *ttm_alloced;
320 bool ready; 321 bool ready;
321}; 322};
322 323
@@ -329,7 +330,8 @@ void radeon_gart_fini(struct radeon_device *rdev);
329void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, 330void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
330 int pages); 331 int pages);
331int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, 332int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
332 int pages, struct page **pagelist); 333 int pages, struct page **pagelist,
334 dma_addr_t *dma_addr);
333 335
334 336
335/* 337/*
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index e65b90317fab..5214bc29d9b3 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -149,8 +149,9 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
149 p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); 149 p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
150 for (i = 0; i < pages; i++, p++) { 150 for (i = 0; i < pages; i++, p++) {
151 if (rdev->gart.pages[p]) { 151 if (rdev->gart.pages[p]) {
152 pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p], 152 if (!rdev->gart.ttm_alloced[p])
153 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 153 pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
154 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
154 rdev->gart.pages[p] = NULL; 155 rdev->gart.pages[p] = NULL;
155 rdev->gart.pages_addr[p] = rdev->dummy_page.addr; 156 rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
156 page_base = rdev->gart.pages_addr[p]; 157 page_base = rdev->gart.pages_addr[p];
@@ -165,7 +166,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
165} 166}
166 167
167int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, 168int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
168 int pages, struct page **pagelist) 169 int pages, struct page **pagelist, dma_addr_t *dma_addr)
169{ 170{
170 unsigned t; 171 unsigned t;
171 unsigned p; 172 unsigned p;
@@ -180,15 +181,22 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
180 p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); 181 p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
181 182
182 for (i = 0; i < pages; i++, p++) { 183 for (i = 0; i < pages; i++, p++) {
183 /* we need to support large memory configurations */ 184 /* On TTM path, we only use the DMA API if TTM_PAGE_FLAG_DMA32
184 /* assume that unbind have already been call on the range */ 185 * is requested. */
185 rdev->gart.pages_addr[p] = pci_map_page(rdev->pdev, pagelist[i], 186 if (dma_addr[i] != DMA_ERROR_CODE) {
187 rdev->gart.ttm_alloced[p] = true;
188 rdev->gart.pages_addr[p] = dma_addr[i];
189 } else {
190 /* we need to support large memory configurations */
191 /* assume that unbind have already been call on the range */
192 rdev->gart.pages_addr[p] = pci_map_page(rdev->pdev, pagelist[i],
186 0, PAGE_SIZE, 193 0, PAGE_SIZE,
187 PCI_DMA_BIDIRECTIONAL); 194 PCI_DMA_BIDIRECTIONAL);
188 if (pci_dma_mapping_error(rdev->pdev, rdev->gart.pages_addr[p])) { 195 if (pci_dma_mapping_error(rdev->pdev, rdev->gart.pages_addr[p])) {
189 /* FIXME: failed to map page (return -ENOMEM?) */ 196 /* FIXME: failed to map page (return -ENOMEM?) */
190 radeon_gart_unbind(rdev, offset, pages); 197 radeon_gart_unbind(rdev, offset, pages);
191 return -ENOMEM; 198 return -ENOMEM;
199 }
192 } 200 }
193 rdev->gart.pages[p] = pagelist[i]; 201 rdev->gart.pages[p] = pagelist[i];
194 page_base = rdev->gart.pages_addr[p]; 202 page_base = rdev->gart.pages_addr[p];
@@ -251,6 +259,12 @@ int radeon_gart_init(struct radeon_device *rdev)
251 radeon_gart_fini(rdev); 259 radeon_gart_fini(rdev);
252 return -ENOMEM; 260 return -ENOMEM;
253 } 261 }
262 rdev->gart.ttm_alloced = kzalloc(sizeof(bool) *
263 rdev->gart.num_cpu_pages, GFP_KERNEL);
264 if (rdev->gart.ttm_alloced == NULL) {
265 radeon_gart_fini(rdev);
266 return -ENOMEM;
267 }
254 /* set GART entry to point to the dummy page by default */ 268 /* set GART entry to point to the dummy page by default */
255 for (i = 0; i < rdev->gart.num_cpu_pages; i++) { 269 for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
256 rdev->gart.pages_addr[i] = rdev->dummy_page.addr; 270 rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
@@ -267,6 +281,8 @@ void radeon_gart_fini(struct radeon_device *rdev)
267 rdev->gart.ready = false; 281 rdev->gart.ready = false;
268 kfree(rdev->gart.pages); 282 kfree(rdev->gart.pages);
269 kfree(rdev->gart.pages_addr); 283 kfree(rdev->gart.pages_addr);
284 kfree(rdev->gart.ttm_alloced);
270 rdev->gart.pages = NULL; 285 rdev->gart.pages = NULL;
271 rdev->gart.pages_addr = NULL; 286 rdev->gart.pages_addr = NULL;
287 rdev->gart.ttm_alloced = NULL;
272} 288}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 6f156e9d3f31..ca045058e498 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -647,6 +647,7 @@ struct radeon_ttm_backend {
647 unsigned long num_pages; 647 unsigned long num_pages;
648 struct page **pages; 648 struct page **pages;
649 struct page *dummy_read_page; 649 struct page *dummy_read_page;
650 dma_addr_t *dma_addrs;
650 bool populated; 651 bool populated;
651 bool bound; 652 bool bound;
652 unsigned offset; 653 unsigned offset;
@@ -662,6 +663,7 @@ static int radeon_ttm_backend_populate(struct ttm_backend *backend,
662 663
663 gtt = container_of(backend, struct radeon_ttm_backend, backend); 664 gtt = container_of(backend, struct radeon_ttm_backend, backend);
664 gtt->pages = pages; 665 gtt->pages = pages;
666 gtt->dma_addrs = dma_addrs;
665 gtt->num_pages = num_pages; 667 gtt->num_pages = num_pages;
666 gtt->dummy_read_page = dummy_read_page; 668 gtt->dummy_read_page = dummy_read_page;
667 gtt->populated = true; 669 gtt->populated = true;
@@ -674,6 +676,7 @@ static void radeon_ttm_backend_clear(struct ttm_backend *backend)
674 676
675 gtt = container_of(backend, struct radeon_ttm_backend, backend); 677 gtt = container_of(backend, struct radeon_ttm_backend, backend);
676 gtt->pages = NULL; 678 gtt->pages = NULL;
679 gtt->dma_addrs = NULL;
677 gtt->num_pages = 0; 680 gtt->num_pages = 0;
678 gtt->dummy_read_page = NULL; 681 gtt->dummy_read_page = NULL;
679 gtt->populated = false; 682 gtt->populated = false;
@@ -694,7 +697,7 @@ static int radeon_ttm_backend_bind(struct ttm_backend *backend,
694 gtt->num_pages, bo_mem, backend); 697 gtt->num_pages, bo_mem, backend);
695 } 698 }
696 r = radeon_gart_bind(gtt->rdev, gtt->offset, 699 r = radeon_gart_bind(gtt->rdev, gtt->offset,
697 gtt->num_pages, gtt->pages); 700 gtt->num_pages, gtt->pages, gtt->dma_addrs);
698 if (r) { 701 if (r) {
699 DRM_ERROR("failed to bind %lu pages at 0x%08X\n", 702 DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
700 gtt->num_pages, gtt->offset); 703 gtt->num_pages, gtt->offset);