aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2016-03-18 16:02:39 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2016-03-19 06:03:49 -0400
commit18b862dcd57a3e23e34c8cd1e939f68548c1209a (patch)
tree2cae837505f27d327f0a51751c3d79e88148e8aa
parentb47ff7e6a534bc285eb98f90eadd2a56e2e54056 (diff)
dma-buf, drm, ion: Propagate error code from dma_buf_start_cpu_access()
Drivers, especially i915.ko, can fail during the initial migration of a dma-buf for CPU access. However, the error code from the driver was not being propagated back to ioctl and so userspace was blissfully ignorant of the failure. Rendering corruption ensues. Whilst fixing the ioctl to return the error code from dma_buf_start_cpu_access(), also do the same for dma_buf_end_cpu_access(). For most drivers, dma_buf_end_cpu_access() cannot fail. i915.ko however, as most drivers would, wants to avoid being uninterruptible (as would be required to guarrantee no failure when flushing the buffer to the device). As userspace already has to handle errors from the SYNC_IOCTL, take advantage of this to be able to restart the syscall across signals. This fixes a coherency issue for i915.ko as well as reducing the uninterruptible hold upon its BKL, the struct_mutex. Fixes commit c11e391da2a8fe973c3c2398452000bed505851e Author: Daniel Vetter <daniel.vetter@ffwll.ch> Date: Thu Feb 11 20:04:51 2016 -0200 dma-buf: Add ioctls to allow userspace to flush Testcase: igt/gem_concurrent_blit/*dmabuf*interruptible Testcase: igt/prime_mmap_coherency/ioctl-errors Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tiago Vignatti <tiago.vignatti@intel.com> Cc: Stéphane Marchesin <marcheu@chromium.org> Cc: David Herrmann <dh.herrmann@gmail.com> Cc: Sumit Semwal <sumit.semwal@linaro.org> Cc: Daniel Vetter <daniel.vetter@intel.com> CC: linux-media@vger.kernel.org Cc: dri-devel@lists.freedesktop.org Cc: linaro-mm-sig@lists.linaro.org Cc: intel-gfx@lists.freedesktop.org Cc: devel@driverdev.osuosl.org Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: http://patchwork.freedesktop.org/patch/msgid/1458331359-2634-1-git-send-email-chris@chris-wilson.co.uk
-rw-r--r--drivers/dma-buf/dma-buf.c17
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c15
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c5
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c4
-rw-r--r--drivers/staging/android/ion/ion.c6
-rw-r--r--include/linux/dma-buf.h6
6 files changed, 28 insertions, 25 deletions
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 9810d1df0691..774a60f4309a 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -259,6 +259,7 @@ static long dma_buf_ioctl(struct file *file,
259 struct dma_buf *dmabuf; 259 struct dma_buf *dmabuf;
260 struct dma_buf_sync sync; 260 struct dma_buf_sync sync;
261 enum dma_data_direction direction; 261 enum dma_data_direction direction;
262 int ret;
262 263
263 dmabuf = file->private_data; 264 dmabuf = file->private_data;
264 265
@@ -285,11 +286,11 @@ static long dma_buf_ioctl(struct file *file,
285 } 286 }
286 287
287 if (sync.flags & DMA_BUF_SYNC_END) 288 if (sync.flags & DMA_BUF_SYNC_END)
288 dma_buf_end_cpu_access(dmabuf, direction); 289 ret = dma_buf_end_cpu_access(dmabuf, direction);
289 else 290 else
290 dma_buf_begin_cpu_access(dmabuf, direction); 291 ret = dma_buf_begin_cpu_access(dmabuf, direction);
291 292
292 return 0; 293 return ret;
293 default: 294 default:
294 return -ENOTTY; 295 return -ENOTTY;
295 } 296 }
@@ -613,13 +614,17 @@ EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
613 * 614 *
614 * This call must always succeed. 615 * This call must always succeed.
615 */ 616 */
616void dma_buf_end_cpu_access(struct dma_buf *dmabuf, 617int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
617 enum dma_data_direction direction) 618 enum dma_data_direction direction)
618{ 619{
620 int ret = 0;
621
619 WARN_ON(!dmabuf); 622 WARN_ON(!dmabuf);
620 623
621 if (dmabuf->ops->end_cpu_access) 624 if (dmabuf->ops->end_cpu_access)
622 dmabuf->ops->end_cpu_access(dmabuf, direction); 625 ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
626
627 return ret;
623} 628}
624EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access); 629EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
625 630
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 1f3eef6fb345..0506016e18e0 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -228,25 +228,20 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_dire
228 return ret; 228 return ret;
229} 229}
230 230
231static void i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction) 231static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
232{ 232{
233 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); 233 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
234 struct drm_device *dev = obj->base.dev; 234 struct drm_device *dev = obj->base.dev;
235 struct drm_i915_private *dev_priv = to_i915(dev);
236 bool was_interruptible;
237 int ret; 235 int ret;
238 236
239 mutex_lock(&dev->struct_mutex); 237 ret = i915_mutex_lock_interruptible(dev);
240 was_interruptible = dev_priv->mm.interruptible; 238 if (ret)
241 dev_priv->mm.interruptible = false; 239 return ret;
242 240
243 ret = i915_gem_object_set_to_gtt_domain(obj, false); 241 ret = i915_gem_object_set_to_gtt_domain(obj, false);
244
245 dev_priv->mm.interruptible = was_interruptible;
246 mutex_unlock(&dev->struct_mutex); 242 mutex_unlock(&dev->struct_mutex);
247 243
248 if (unlikely(ret)) 244 return ret;
249 DRM_ERROR("unable to flush buffer following CPU access; rendering may be corrupt\n");
250} 245}
251 246
252static const struct dma_buf_ops i915_dmabuf_ops = { 247static const struct dma_buf_ops i915_dmabuf_ops = {
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index aebae1c2dab2..c75249de2984 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -93,11 +93,12 @@ static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer,
93 return omap_gem_get_pages(obj, &pages, true); 93 return omap_gem_get_pages(obj, &pages, true);
94} 94}
95 95
96static void omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer, 96static int omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer,
97 enum dma_data_direction dir) 97 enum dma_data_direction dir)
98{ 98{
99 struct drm_gem_object *obj = buffer->priv; 99 struct drm_gem_object *obj = buffer->priv;
100 omap_gem_put_pages(obj); 100 omap_gem_put_pages(obj);
101 return 0;
101} 102}
102 103
103 104
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index c427499133d6..33239a2b264a 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -423,8 +423,8 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
423 } 423 }
424 424
425 if (ufb->obj->base.import_attach) { 425 if (ufb->obj->base.import_attach) {
426 dma_buf_end_cpu_access(ufb->obj->base.import_attach->dmabuf, 426 ret = dma_buf_end_cpu_access(ufb->obj->base.import_attach->dmabuf,
427 DMA_FROM_DEVICE); 427 DMA_FROM_DEVICE);
428 } 428 }
429 429
430 unlock: 430 unlock:
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 0754a37c9674..49436b4510f4 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -1075,14 +1075,16 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1075 return PTR_ERR_OR_ZERO(vaddr); 1075 return PTR_ERR_OR_ZERO(vaddr);
1076} 1076}
1077 1077
1078static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, 1078static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1079 enum dma_data_direction direction) 1079 enum dma_data_direction direction)
1080{ 1080{
1081 struct ion_buffer *buffer = dmabuf->priv; 1081 struct ion_buffer *buffer = dmabuf->priv;
1082 1082
1083 mutex_lock(&buffer->lock); 1083 mutex_lock(&buffer->lock);
1084 ion_buffer_kmap_put(buffer); 1084 ion_buffer_kmap_put(buffer);
1085 mutex_unlock(&buffer->lock); 1085 mutex_unlock(&buffer->lock);
1086
1087 return 0;
1086} 1088}
1087 1089
1088static struct dma_buf_ops dma_buf_ops = { 1090static struct dma_buf_ops dma_buf_ops = {
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 532108ea0c1c..3fe90d494edb 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -94,7 +94,7 @@ struct dma_buf_ops {
94 void (*release)(struct dma_buf *); 94 void (*release)(struct dma_buf *);
95 95
96 int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction); 96 int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction);
97 void (*end_cpu_access)(struct dma_buf *, enum dma_data_direction); 97 int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
98 void *(*kmap_atomic)(struct dma_buf *, unsigned long); 98 void *(*kmap_atomic)(struct dma_buf *, unsigned long);
99 void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *); 99 void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *);
100 void *(*kmap)(struct dma_buf *, unsigned long); 100 void *(*kmap)(struct dma_buf *, unsigned long);
@@ -224,8 +224,8 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
224 enum dma_data_direction); 224 enum dma_data_direction);
225int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, 225int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
226 enum dma_data_direction dir); 226 enum dma_data_direction dir);
227void dma_buf_end_cpu_access(struct dma_buf *dma_buf, 227int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
228 enum dma_data_direction dir); 228 enum dma_data_direction dir);
229void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long); 229void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long);
230void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *); 230void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *);
231void *dma_buf_kmap(struct dma_buf *, unsigned long); 231void *dma_buf_kmap(struct dma_buf *, unsigned long);