aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ttm
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2014-09-04 03:47:23 -0400
committerDave Airlie <airlied@redhat.com>2014-09-23 01:00:26 -0400
commit94318d50ffc84a1ebaf1a83a0a56bbbaf415bacf (patch)
tree33615cd5feabb051b2f8f54614a297d8e4ccd2af /drivers/gpu/drm/ttm
parent6bd3110ce6e50fb15c975b26f068d606f4434431 (diff)
drm/ttm: Clean usage of ttm_io_prot() with TTM_PL_FLAG_CACHED
Today, most callers of ttm_io_prot() check TTM_PL_FLAG_CACHED before calling it since on some archs it will unconditionally create non-cached mappings. But not all callers do which is incorrect as far as I can tell. Instead, move that check inside ttm_io_port() itself for all archs and make powerpc use the same implementation as ia64 and arm Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c20
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c5
2 files changed, 9 insertions, 16 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 824af90cbe31..882cccdad272 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -480,28 +480,24 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
480 480
481pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp) 481pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
482{ 482{
483 /* Cached mappings need no adjustment */
484 if (caching_flags & TTM_PL_FLAG_CACHED)
485 return tmp;
486
483#if defined(__i386__) || defined(__x86_64__) 487#if defined(__i386__) || defined(__x86_64__)
484 if (caching_flags & TTM_PL_FLAG_WC) 488 if (caching_flags & TTM_PL_FLAG_WC)
485 tmp = pgprot_writecombine(tmp); 489 tmp = pgprot_writecombine(tmp);
486 else if (boot_cpu_data.x86 > 3) 490 else if (boot_cpu_data.x86 > 3)
487 tmp = pgprot_noncached(tmp); 491 tmp = pgprot_noncached(tmp);
488
489#elif defined(__powerpc__)
490 if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
491 pgprot_val(tmp) |= _PAGE_NO_CACHE;
492 if (caching_flags & TTM_PL_FLAG_UNCACHED)
493 pgprot_val(tmp) |= _PAGE_GUARDED;
494 }
495#endif 492#endif
496#if defined(__ia64__) || defined(__arm__) 493#if defined(__ia64__) || defined(__arm__) || defined(__powerpc__)
497 if (caching_flags & TTM_PL_FLAG_WC) 494 if (caching_flags & TTM_PL_FLAG_WC)
498 tmp = pgprot_writecombine(tmp); 495 tmp = pgprot_writecombine(tmp);
499 else 496 else
500 tmp = pgprot_noncached(tmp); 497 tmp = pgprot_noncached(tmp);
501#endif 498#endif
502#if defined(__sparc__) || defined(__mips__) 499#if defined(__sparc__) || defined(__mips__)
503 if (!(caching_flags & TTM_PL_FLAG_CACHED)) 500 tmp = pgprot_noncached(tmp);
504 tmp = pgprot_noncached(tmp);
505#endif 501#endif
506 return tmp; 502 return tmp;
507} 503}
@@ -560,9 +556,7 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
560 * We need to use vmap to get the desired page protection 556 * We need to use vmap to get the desired page protection
561 * or to make the buffer object look contiguous. 557 * or to make the buffer object look contiguous.
562 */ 558 */
563 prot = (mem->placement & TTM_PL_FLAG_CACHED) ? 559 prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
564 PAGE_KERNEL :
565 ttm_io_prot(mem->placement, PAGE_KERNEL);
566 map->bo_kmap_type = ttm_bo_map_vmap; 560 map->bo_kmap_type = ttm_bo_map_vmap;
567 map->virtual = vmap(ttm->pages + start_page, num_pages, 561 map->virtual = vmap(ttm->pages + start_page, num_pages,
568 0, prot); 562 0, prot);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index d05437f219e9..8fb7213277cc 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -197,9 +197,8 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
197 cvma.vm_page_prot); 197 cvma.vm_page_prot);
198 } else { 198 } else {
199 ttm = bo->ttm; 199 ttm = bo->ttm;
200 if (!(bo->mem.placement & TTM_PL_FLAG_CACHED)) 200 cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
201 cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, 201 cvma.vm_page_prot);
202 cvma.vm_page_prot);
203 202
204 /* Allocate all page at once, most common usage */ 203 /* Allocate all page at once, most common usage */
205 if (ttm->bdev->driver->ttm_tt_populate(ttm)) { 204 if (ttm->bdev->driver->ttm_tt_populate(ttm)) {