aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2016-08-15 11:42:18 -0400
committerSumit Semwal <sumit.semwal@linaro.org>2016-08-16 03:01:35 -0400
commitae4e46b14bd7a12fb7908425846be7ceb0853bbc (patch)
treed34ce6b16f6e83e97b7041874c97f17a5b167428
parent90844f00049e9f42573fd31d7c32e8fd31d3fd07 (diff)
dma-buf: Wait on the reservation object when sync'ing before CPU access
Rendering operations to the dma-buf are tracked implicitly via the reservation_object (dmabuf->resv). This is used to allow poll() to wait upon outstanding rendering (or just query the current status of rendering). The dma-buf sync ioctl allows userspace to prepare the dma-buf for CPU access, which should include waiting upon rendering. (Some drivers may need to do more work to ensure that the dma-buf mmap is coherent as well as complete.) v2: Always wait upon the reservation object implicitly. We choose to do it after the native handler in case it can do so more efficiently. Testcase: igt/prime_vgem Testcase: igt/gem_concurrent_blit # *vgem* Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Sumit Semwal <sumit.semwal@linaro.org> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: Eric Anholt <eric@anholt.net> Cc: linux-media@vger.kernel.org Cc: dri-devel@lists.freedesktop.org Cc: linaro-mm-sig@lists.linaro.org Cc: linux-kernel@vger.kernel.org Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch> Signed-off-by: Sumit Semwal <sumit.semwal@linaro.org> Link: http://patchwork.freedesktop.org/patch/msgid/1471275738-31994-1-git-send-email-chris@chris-wilson.co.uk
-rw-r--r--drivers/dma-buf/dma-buf.c23
1 files changed, 23 insertions, 0 deletions
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index ddaee60ae52a..cf04d249a6a4 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -586,6 +586,22 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
586} 586}
587EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment); 587EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
588 588
589static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
590 enum dma_data_direction direction)
591{
592 bool write = (direction == DMA_BIDIRECTIONAL ||
593 direction == DMA_TO_DEVICE);
594 struct reservation_object *resv = dmabuf->resv;
595 long ret;
596
597 /* Wait on any implicit rendering fences */
598 ret = reservation_object_wait_timeout_rcu(resv, write, true,
599 MAX_SCHEDULE_TIMEOUT);
600 if (ret < 0)
601 return ret;
602
603 return 0;
604}
589 605
590/** 606/**
591 * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the 607 * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
@@ -608,6 +624,13 @@ int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
608 if (dmabuf->ops->begin_cpu_access) 624 if (dmabuf->ops->begin_cpu_access)
609 ret = dmabuf->ops->begin_cpu_access(dmabuf, direction); 625 ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
610 626
627 /* Ensure that all fences are waited upon - but we first allow
628 * the native handler the chance to do so more efficiently if it
629 * chooses. A double invocation here will be reasonably cheap no-op.
630 */
631 if (ret == 0)
632 ret = __dma_buf_begin_cpu_access(dmabuf, direction);
633
611 return ret; 634 return ret;
612} 635}
613EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access); 636EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);