aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/radeon_gart.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_gart.c')
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c44
1 files changed, 31 insertions, 13 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index e65b90317fab..a533f52fd163 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -78,9 +78,9 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
78 int r; 78 int r;
79 79
80 if (rdev->gart.table.vram.robj == NULL) { 80 if (rdev->gart.table.vram.robj == NULL) {
81 r = radeon_bo_create(rdev, NULL, rdev->gart.table_size, 81 r = radeon_bo_create(rdev, rdev->gart.table_size,
82 true, RADEON_GEM_DOMAIN_VRAM, 82 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
83 &rdev->gart.table.vram.robj); 83 &rdev->gart.table.vram.robj);
84 if (r) { 84 if (r) {
85 return r; 85 return r;
86 } 86 }
@@ -149,8 +149,9 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
149 p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); 149 p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
150 for (i = 0; i < pages; i++, p++) { 150 for (i = 0; i < pages; i++, p++) {
151 if (rdev->gart.pages[p]) { 151 if (rdev->gart.pages[p]) {
152 pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p], 152 if (!rdev->gart.ttm_alloced[p])
153 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 153 pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
154 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
154 rdev->gart.pages[p] = NULL; 155 rdev->gart.pages[p] = NULL;
155 rdev->gart.pages_addr[p] = rdev->dummy_page.addr; 156 rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
156 page_base = rdev->gart.pages_addr[p]; 157 page_base = rdev->gart.pages_addr[p];
@@ -165,7 +166,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
165} 166}
166 167
167int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, 168int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
168 int pages, struct page **pagelist) 169 int pages, struct page **pagelist, dma_addr_t *dma_addr)
169{ 170{
170 unsigned t; 171 unsigned t;
171 unsigned p; 172 unsigned p;
@@ -180,15 +181,22 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
180 p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); 181 p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
181 182
182 for (i = 0; i < pages; i++, p++) { 183 for (i = 0; i < pages; i++, p++) {
183 /* we need to support large memory configurations */ 184 /* we reverted the patch using dma_addr in TTM for now but this
184 /* assume that unbind have already been call on the range */ 185 * code stops building on alpha so just comment it out for now */
185 rdev->gart.pages_addr[p] = pci_map_page(rdev->pdev, pagelist[i], 186 if (0) { /*dma_addr[i] != DMA_ERROR_CODE) */
187 rdev->gart.ttm_alloced[p] = true;
188 rdev->gart.pages_addr[p] = dma_addr[i];
189 } else {
190 /* we need to support large memory configurations */
191 /* assume that unbind have already been call on the range */
192 rdev->gart.pages_addr[p] = pci_map_page(rdev->pdev, pagelist[i],
186 0, PAGE_SIZE, 193 0, PAGE_SIZE,
187 PCI_DMA_BIDIRECTIONAL); 194 PCI_DMA_BIDIRECTIONAL);
188 if (pci_dma_mapping_error(rdev->pdev, rdev->gart.pages_addr[p])) { 195 if (pci_dma_mapping_error(rdev->pdev, rdev->gart.pages_addr[p])) {
189 /* FIXME: failed to map page (return -ENOMEM?) */ 196 /* FIXME: failed to map page (return -ENOMEM?) */
190 radeon_gart_unbind(rdev, offset, pages); 197 radeon_gart_unbind(rdev, offset, pages);
191 return -ENOMEM; 198 return -ENOMEM;
199 }
192 } 200 }
193 rdev->gart.pages[p] = pagelist[i]; 201 rdev->gart.pages[p] = pagelist[i];
194 page_base = rdev->gart.pages_addr[p]; 202 page_base = rdev->gart.pages_addr[p];
@@ -251,6 +259,12 @@ int radeon_gart_init(struct radeon_device *rdev)
251 radeon_gart_fini(rdev); 259 radeon_gart_fini(rdev);
252 return -ENOMEM; 260 return -ENOMEM;
253 } 261 }
262 rdev->gart.ttm_alloced = kzalloc(sizeof(bool) *
263 rdev->gart.num_cpu_pages, GFP_KERNEL);
264 if (rdev->gart.ttm_alloced == NULL) {
265 radeon_gart_fini(rdev);
266 return -ENOMEM;
267 }
254 /* set GART entry to point to the dummy page by default */ 268 /* set GART entry to point to the dummy page by default */
255 for (i = 0; i < rdev->gart.num_cpu_pages; i++) { 269 for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
256 rdev->gart.pages_addr[i] = rdev->dummy_page.addr; 270 rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
@@ -267,6 +281,10 @@ void radeon_gart_fini(struct radeon_device *rdev)
267 rdev->gart.ready = false; 281 rdev->gart.ready = false;
268 kfree(rdev->gart.pages); 282 kfree(rdev->gart.pages);
269 kfree(rdev->gart.pages_addr); 283 kfree(rdev->gart.pages_addr);
284 kfree(rdev->gart.ttm_alloced);
270 rdev->gart.pages = NULL; 285 rdev->gart.pages = NULL;
271 rdev->gart.pages_addr = NULL; 286 rdev->gart.pages_addr = NULL;
287 rdev->gart.ttm_alloced = NULL;
288
289 radeon_dummy_page_fini(rdev);
272} 290}