aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorThierry Reding <treding@nvidia.com>2014-12-16 10:35:26 -0500
committerThierry Reding <treding@nvidia.com>2014-12-17 08:27:37 -0500
commita04251fc94b58ec25476e57986dfec727b812c22 (patch)
treeb0f5e99c969ab2ce782030e918ac07e6ebf17ef9 /drivers/gpu
parent6b59cc1c86e90cccf8fb0b4dee5fbc226bb82d3e (diff)
drm/tegra: gem: Flush buffer objects upon allocation
Buffers obtained via shmem may still have associated cachelines. If they aren't properly flushed they may cause framebuffer corruption if the cache gets flushed after the application has drawn to it. Signed-off-by: Thierry Reding <treding@nvidia.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/tegra/gem.c36
1 files changed, 32 insertions, 4 deletions
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index da32086cbeaf..c888bed4036f 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -219,19 +219,47 @@ static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
219static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo, 219static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo,
220 size_t size) 220 size_t size)
221{ 221{
222 struct scatterlist *s;
223 struct sg_table *sgt;
224 unsigned int i;
225
222 bo->pages = drm_gem_get_pages(&bo->gem); 226 bo->pages = drm_gem_get_pages(&bo->gem);
223 if (IS_ERR(bo->pages)) 227 if (IS_ERR(bo->pages))
224 return PTR_ERR(bo->pages); 228 return PTR_ERR(bo->pages);
225 229
226 bo->num_pages = size >> PAGE_SHIFT; 230 bo->num_pages = size >> PAGE_SHIFT;
227 231
228 bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages); 232 sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
229 if (IS_ERR(bo->sgt)) { 233 if (IS_ERR(sgt))
230 drm_gem_put_pages(&bo->gem, bo->pages, false, false); 234 goto put_pages;
231 return PTR_ERR(bo->sgt); 235
236 /*
237 * Fake up the SG table so that dma_map_sg() can be used to flush the
238 * pages associated with it. Note that this relies on the fact that
239 * the DMA API doesn't hook into IOMMU on Tegra, therefore mapping is
240 * only cache maintenance.
241 *
242 * TODO: Replace this by drm_clflash_sg() once it can be implemented
243 * without relying on symbols that are not exported.
244 */
245 for_each_sg(sgt->sgl, s, sgt->nents, i)
246 sg_dma_address(s) = sg_phys(s);
247
248 if (dma_map_sg(drm->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE) == 0) {
249 sgt = ERR_PTR(-ENOMEM);
250 goto release_sgt;
232 } 251 }
233 252
253 bo->sgt = sgt;
254
234 return 0; 255 return 0;
256
257release_sgt:
258 sg_free_table(sgt);
259 kfree(sgt);
260put_pages:
261 drm_gem_put_pages(&bo->gem, bo->pages, false, false);
262 return PTR_ERR(sgt);
235} 263}
236 264
237static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo, 265static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo,