aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/tegra/gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/tegra/gem.c')
-rw-r--r--drivers/gpu/drm/tegra/gem.c52
1 files changed, 40 insertions, 12 deletions
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index da32086cbeaf..8777b7f75791 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -216,32 +216,58 @@ static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
216 } 216 }
217} 217}
218 218
219static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo, 219static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
220 size_t size)
221{ 220{
221 struct scatterlist *s;
222 struct sg_table *sgt;
223 unsigned int i;
224
222 bo->pages = drm_gem_get_pages(&bo->gem); 225 bo->pages = drm_gem_get_pages(&bo->gem);
223 if (IS_ERR(bo->pages)) 226 if (IS_ERR(bo->pages))
224 return PTR_ERR(bo->pages); 227 return PTR_ERR(bo->pages);
225 228
226 bo->num_pages = size >> PAGE_SHIFT; 229 bo->num_pages = bo->gem.size >> PAGE_SHIFT;
227 230
228 bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages); 231 sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
229 if (IS_ERR(bo->sgt)) { 232 if (IS_ERR(sgt))
230 drm_gem_put_pages(&bo->gem, bo->pages, false, false); 233 goto put_pages;
231 return PTR_ERR(bo->sgt); 234
235 /*
236 * Fake up the SG table so that dma_map_sg() can be used to flush the
237 * pages associated with it. Note that this relies on the fact that
238 * the DMA API doesn't hook into IOMMU on Tegra, therefore mapping is
239 * only cache maintenance.
240 *
241 * TODO: Replace this by drm_clflash_sg() once it can be implemented
242 * without relying on symbols that are not exported.
243 */
244 for_each_sg(sgt->sgl, s, sgt->nents, i)
245 sg_dma_address(s) = sg_phys(s);
246
247 if (dma_map_sg(drm->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE) == 0) {
248 sgt = ERR_PTR(-ENOMEM);
249 goto release_sgt;
232 } 250 }
233 251
252 bo->sgt = sgt;
253
234 return 0; 254 return 0;
255
256release_sgt:
257 sg_free_table(sgt);
258 kfree(sgt);
259put_pages:
260 drm_gem_put_pages(&bo->gem, bo->pages, false, false);
261 return PTR_ERR(sgt);
235} 262}
236 263
237static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo, 264static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
238 size_t size)
239{ 265{
240 struct tegra_drm *tegra = drm->dev_private; 266 struct tegra_drm *tegra = drm->dev_private;
241 int err; 267 int err;
242 268
243 if (tegra->domain) { 269 if (tegra->domain) {
244 err = tegra_bo_get_pages(drm, bo, size); 270 err = tegra_bo_get_pages(drm, bo);
245 if (err < 0) 271 if (err < 0)
246 return err; 272 return err;
247 273
@@ -251,6 +277,8 @@ static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo,
251 return err; 277 return err;
252 } 278 }
253 } else { 279 } else {
280 size_t size = bo->gem.size;
281
254 bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr, 282 bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr,
255 GFP_KERNEL | __GFP_NOWARN); 283 GFP_KERNEL | __GFP_NOWARN);
256 if (!bo->vaddr) { 284 if (!bo->vaddr) {
@@ -274,7 +302,7 @@ struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
274 if (IS_ERR(bo)) 302 if (IS_ERR(bo))
275 return bo; 303 return bo;
276 304
277 err = tegra_bo_alloc(drm, bo, size); 305 err = tegra_bo_alloc(drm, bo);
278 if (err < 0) 306 if (err < 0)
279 goto release; 307 goto release;
280 308