diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_gart.c')
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_gart.c | 36 |
1 files changed, 26 insertions, 10 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index 65016117d95..a6b0fed7bae 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c | |||
@@ -149,8 +149,9 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, | |||
149 | p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); | 149 | p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); |
150 | for (i = 0; i < pages; i++, p++) { | 150 | for (i = 0; i < pages; i++, p++) { |
151 | if (rdev->gart.pages[p]) { | 151 | if (rdev->gart.pages[p]) { |
152 | pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p], | 152 | if (!rdev->gart.ttm_alloced[p]) |
153 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | 153 | pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p], |
154 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
154 | rdev->gart.pages[p] = NULL; | 155 | rdev->gart.pages[p] = NULL; |
155 | rdev->gart.pages_addr[p] = rdev->dummy_page.addr; | 156 | rdev->gart.pages_addr[p] = rdev->dummy_page.addr; |
156 | page_base = rdev->gart.pages_addr[p]; | 157 | page_base = rdev->gart.pages_addr[p]; |
@@ -165,7 +166,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, | |||
165 | } | 166 | } |
166 | 167 | ||
167 | int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, | 168 | int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, |
168 | int pages, struct page **pagelist) | 169 | int pages, struct page **pagelist, dma_addr_t *dma_addr) |
169 | { | 170 | { |
170 | unsigned t; | 171 | unsigned t; |
171 | unsigned p; | 172 | unsigned p; |
@@ -180,15 +181,22 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, | |||
180 | p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); | 181 | p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); |
181 | 182 | ||
182 | for (i = 0; i < pages; i++, p++) { | 183 | for (i = 0; i < pages; i++, p++) { |
183 | /* we need to support large memory configurations */ | 184 | /* On TTM path, we only use the DMA API if TTM_PAGE_FLAG_DMA32 |
184 | /* assume that unbind have already been call on the range */ | 185 | * is requested. */ |
185 | rdev->gart.pages_addr[p] = pci_map_page(rdev->pdev, pagelist[i], | 186 | if (dma_addr[i] != DMA_ERROR_CODE) { |
187 | rdev->gart.ttm_alloced[p] = true; | ||
188 | rdev->gart.pages_addr[p] = dma_addr[i]; | ||
189 | } else { | ||
190 | /* we need to support large memory configurations */ | ||
191 | /* assume that unbind have already been call on the range */ | ||
192 | rdev->gart.pages_addr[p] = pci_map_page(rdev->pdev, pagelist[i], | ||
186 | 0, PAGE_SIZE, | 193 | 0, PAGE_SIZE, |
187 | PCI_DMA_BIDIRECTIONAL); | 194 | PCI_DMA_BIDIRECTIONAL); |
188 | if (pci_dma_mapping_error(rdev->pdev, rdev->gart.pages_addr[p])) { | 195 | if (pci_dma_mapping_error(rdev->pdev, rdev->gart.pages_addr[p])) { |
189 | /* FIXME: failed to map page (return -ENOMEM?) */ | 196 | /* FIXME: failed to map page (return -ENOMEM?) */ |
190 | radeon_gart_unbind(rdev, offset, pages); | 197 | radeon_gart_unbind(rdev, offset, pages); |
191 | return -ENOMEM; | 198 | return -ENOMEM; |
199 | } | ||
192 | } | 200 | } |
193 | rdev->gart.pages[p] = pagelist[i]; | 201 | rdev->gart.pages[p] = pagelist[i]; |
194 | page_base = rdev->gart.pages_addr[p]; | 202 | page_base = rdev->gart.pages_addr[p]; |
@@ -251,6 +259,12 @@ int radeon_gart_init(struct radeon_device *rdev) | |||
251 | radeon_gart_fini(rdev); | 259 | radeon_gart_fini(rdev); |
252 | return -ENOMEM; | 260 | return -ENOMEM; |
253 | } | 261 | } |
262 | rdev->gart.ttm_alloced = kzalloc(sizeof(bool) * | ||
263 | rdev->gart.num_cpu_pages, GFP_KERNEL); | ||
264 | if (rdev->gart.ttm_alloced == NULL) { | ||
265 | radeon_gart_fini(rdev); | ||
266 | return -ENOMEM; | ||
267 | } | ||
254 | /* set GART entry to point to the dummy page by default */ | 268 | /* set GART entry to point to the dummy page by default */ |
255 | for (i = 0; i < rdev->gart.num_cpu_pages; i++) { | 269 | for (i = 0; i < rdev->gart.num_cpu_pages; i++) { |
256 | rdev->gart.pages_addr[i] = rdev->dummy_page.addr; | 270 | rdev->gart.pages_addr[i] = rdev->dummy_page.addr; |
@@ -267,6 +281,8 @@ void radeon_gart_fini(struct radeon_device *rdev) | |||
267 | rdev->gart.ready = false; | 281 | rdev->gart.ready = false; |
268 | kfree(rdev->gart.pages); | 282 | kfree(rdev->gart.pages); |
269 | kfree(rdev->gart.pages_addr); | 283 | kfree(rdev->gart.pages_addr); |
284 | kfree(rdev->gart.ttm_alloced); | ||
270 | rdev->gart.pages = NULL; | 285 | rdev->gart.pages = NULL; |
271 | rdev->gart.pages_addr = NULL; | 286 | rdev->gart.pages_addr = NULL; |
287 | rdev->gart.ttm_alloced = NULL; | ||
272 | } | 288 | } |