aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nouveau_fence.c
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2011-02-01 22:21:57 -0500
committerBen Skeggs <bskeggs@redhat.com>2011-02-24 15:44:27 -0500
commite3b7ed5e9972dd4878a5390fd3147a973cbe2d05 (patch)
treed90da211e9d233b48a0f79368c4d07ec136a1c43 /drivers/gpu/drm/nouveau/nouveau_fence.c
parentfc772ec48d7d57ef5fb3fe171eae467d3d440bc4 (diff)
drm/nv84: use vm offsets for semaphores
We may well be making more use of semaphores in the future, having the entire VM available makes requiring DMA objects for each and every semaphore block unnecessary. Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_fence.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c22
1 files changed, 17 insertions, 5 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index d820ad29dfe1..7eef3a11aaa1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -348,6 +348,9 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
348 OUT_RING (chan, 1); 348 OUT_RING (chan, 1);
349 } else 349 } else
350 if (dev_priv->chipset < 0xc0) { 350 if (dev_priv->chipset < 0xc0) {
351 struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
352 u64 offset = vma->offset + sema->mem->start;
353
351 /* 354 /*
352 * NV50 tries to be too smart and context-switch 355 * NV50 tries to be too smart and context-switch
353 * between semaphores instead of doing a "first come, 356 * between semaphores instead of doing a "first come,
@@ -372,8 +375,8 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
372 BEGIN_RING(chan, NvSubSw, 0x0080, 1); 375 BEGIN_RING(chan, NvSubSw, 0x0080, 1);
373 OUT_RING (chan, 0); 376 OUT_RING (chan, 0);
374 BEGIN_RING(chan, NvSubSw, 0x0010, 4); 377 BEGIN_RING(chan, NvSubSw, 0x0010, 4);
375 OUT_RING (chan, upper_32_bits(sema->mem->start)); 378 OUT_RING (chan, upper_32_bits(offset));
376 OUT_RING (chan, lower_32_bits(sema->mem->start)); 379 OUT_RING (chan, lower_32_bits(offset));
377 OUT_RING (chan, 1); 380 OUT_RING (chan, 1);
378 OUT_RING (chan, 1); /* ACQUIRE_EQ */ 381 OUT_RING (chan, 1); /* ACQUIRE_EQ */
379 } else { 382 } else {
@@ -424,6 +427,9 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
424 } 427 }
425 } else 428 } else
426 if (dev_priv->chipset < 0xc0) { 429 if (dev_priv->chipset < 0xc0) {
430 struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
431 u64 offset = vma->offset + sema->mem->start;
432
427 /* 433 /*
428 * Emits release and forces the card to context switch right 434 * Emits release and forces the card to context switch right
429 * afterwards, there may be another channel waiting for the 435 * afterwards, there may be another channel waiting for the
@@ -435,8 +441,8 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
435 return ret; 441 return ret;
436 442
437 BEGIN_RING(chan, NvSubSw, 0x0010, 4); 443 BEGIN_RING(chan, NvSubSw, 0x0010, 4);
438 OUT_RING (chan, upper_32_bits(sema->mem->start)); 444 OUT_RING (chan, upper_32_bits(offset));
439 OUT_RING (chan, lower_32_bits(sema->mem->start)); 445 OUT_RING (chan, lower_32_bits(offset));
440 OUT_RING (chan, 1); 446 OUT_RING (chan, 1);
441 OUT_RING (chan, 2); /* RELEASE */ 447 OUT_RING (chan, 2); /* RELEASE */
442 BEGIN_RING(chan, NvSubSw, 0x0080, 1); 448 BEGIN_RING(chan, NvSubSw, 0x0080, 1);
@@ -545,7 +551,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
545 OUT_RING(chan, NvSw); 551 OUT_RING(chan, NvSw);
546 552
547 /* Create a DMA object for the shared cross-channel sync area. */ 553 /* Create a DMA object for the shared cross-channel sync area. */
548 if (USE_SEMA(dev)) { 554 if (USE_SEMA(dev) && dev_priv->chipset < 0x84) {
549 struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem; 555 struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem;
550 556
551 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 557 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
@@ -565,6 +571,12 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
565 return ret; 571 return ret;
566 BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1); 572 BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
567 OUT_RING(chan, NvSema); 573 OUT_RING(chan, NvSema);
574 } else {
575 ret = RING_SPACE(chan, 2);
576 if (ret)
577 return ret;
578 BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
579 OUT_RING (chan, chan->vram_handle); /* whole VM */
568 } 580 }
569 581
570 FIRE_RING(chan); 582 FIRE_RING(chan);