aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexandre Courbot <gnurou@gmail.com>2016-07-13 02:29:35 -0400
committerBen Skeggs <bskeggs@redhat.com>2016-07-13 21:55:18 -0400
commit36a471baa851359b3ca5a96e752c7c1a6266aef0 (patch)
tree664a3321a4bcaf3a533771e838dc8b327815b86e
parent0e67bed2c765ff0fdaec62c963881f5416fe3692 (diff)
drm/nouveau/ttm: remove special handling of coherent objects
TTM-allocated coherent objects were populated using the DMA API and accessed using the mapping it returned to workaround coherency issues. These issues seem to have been solved, thus remove this extra case to handle and use the regular kernel mapping functions. Signed-off-by: Alexandre Courbot <acourbot@nvidia.com> Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c61
1 files changed, 5 insertions, 56 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index d836c44b6cf0..711592798fe8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -424,13 +424,7 @@ nouveau_bo_map(struct nouveau_bo *nvbo)
424 if (ret) 424 if (ret)
425 return ret; 425 return ret;
426 426
427 /* 427 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
428 * TTM buffers allocated using the DMA API already have a mapping, let's
429 * use it instead.
430 */
431 if (!nvbo->force_coherent)
432 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
433 &nvbo->kmap);
434 428
435 ttm_bo_unreserve(&nvbo->bo); 429 ttm_bo_unreserve(&nvbo->bo);
436 return ret; 430 return ret;
@@ -442,12 +436,7 @@ nouveau_bo_unmap(struct nouveau_bo *nvbo)
442 if (!nvbo) 436 if (!nvbo)
443 return; 437 return;
444 438
445 /* 439 ttm_bo_kunmap(&nvbo->kmap);
446 * TTM buffers allocated using the DMA API already had a coherent
447 * mapping which we used, no need to unmap.
448 */
449 if (!nvbo->force_coherent)
450 ttm_bo_kunmap(&nvbo->kmap);
451} 440}
452 441
453void 442void
@@ -506,35 +495,13 @@ nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
506 return 0; 495 return 0;
507} 496}
508 497
509static inline void *
510_nouveau_bo_mem_index(struct nouveau_bo *nvbo, unsigned index, void *mem, u8 sz)
511{
512 struct ttm_dma_tt *dma_tt;
513 u8 *m = mem;
514
515 index *= sz;
516
517 if (m) {
518 /* kmap'd address, return the corresponding offset */
519 m += index;
520 } else {
521 /* DMA-API mapping, lookup the right address */
522 dma_tt = (struct ttm_dma_tt *)nvbo->bo.ttm;
523 m = dma_tt->cpu_address[index / PAGE_SIZE];
524 m += index % PAGE_SIZE;
525 }
526
527 return m;
528}
529#define nouveau_bo_mem_index(o, i, m) _nouveau_bo_mem_index(o, i, m, sizeof(*m))
530
531void 498void
532nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val) 499nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
533{ 500{
534 bool is_iomem; 501 bool is_iomem;
535 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); 502 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
536 503
537 mem = nouveau_bo_mem_index(nvbo, index, mem); 504 mem += index;
538 505
539 if (is_iomem) 506 if (is_iomem)
540 iowrite16_native(val, (void __force __iomem *)mem); 507 iowrite16_native(val, (void __force __iomem *)mem);
@@ -548,7 +515,7 @@ nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
548 bool is_iomem; 515 bool is_iomem;
549 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); 516 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
550 517
551 mem = nouveau_bo_mem_index(nvbo, index, mem); 518 mem += index;
552 519
553 if (is_iomem) 520 if (is_iomem)
554 return ioread32_native((void __force __iomem *)mem); 521 return ioread32_native((void __force __iomem *)mem);
@@ -562,7 +529,7 @@ nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
562 bool is_iomem; 529 bool is_iomem;
563 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); 530 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
564 531
565 mem = nouveau_bo_mem_index(nvbo, index, mem); 532 mem += index;
566 533
567 if (is_iomem) 534 if (is_iomem)
568 iowrite32_native(val, (void __force __iomem *)mem); 535 iowrite32_native(val, (void __force __iomem *)mem);
@@ -1492,14 +1459,6 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1492 dev = drm->dev; 1459 dev = drm->dev;
1493 pdev = device->dev; 1460 pdev = device->dev;
1494 1461
1495 /*
1496 * Objects matching this condition have been marked as force_coherent,
1497 * so use the DMA API for them.
1498 */
1499 if (!nvxx_device(&drm->device)->func->cpu_coherent &&
1500 ttm->caching_state == tt_uncached)
1501 return ttm_dma_populate(ttm_dma, dev->dev);
1502
1503#if IS_ENABLED(CONFIG_AGP) 1462#if IS_ENABLED(CONFIG_AGP)
1504 if (drm->agp.bridge) { 1463 if (drm->agp.bridge) {
1505 return ttm_agp_tt_populate(ttm); 1464 return ttm_agp_tt_populate(ttm);
@@ -1557,16 +1516,6 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1557 dev = drm->dev; 1516 dev = drm->dev;
1558 pdev = device->dev; 1517 pdev = device->dev;
1559 1518
1560 /*
1561 * Objects matching this condition have been marked as force_coherent,
1562 * so use the DMA API for them.
1563 */
1564 if (!nvxx_device(&drm->device)->func->cpu_coherent &&
1565 ttm->caching_state == tt_uncached) {
1566 ttm_dma_unpopulate(ttm_dma, dev->dev);
1567 return;
1568 }
1569
1570#if IS_ENABLED(CONFIG_AGP) 1519#if IS_ENABLED(CONFIG_AGP)
1571 if (drm->agp.bridge) { 1520 if (drm->agp.bridge) {
1572 ttm_agp_tt_unpopulate(ttm); 1521 ttm_agp_tt_unpopulate(ttm);