diff options
| author | Dave Airlie <airlied@redhat.com> | 2016-03-23 18:41:59 -0400 |
|---|---|---|
| committer | Dave Airlie <airlied@redhat.com> | 2016-03-23 18:41:59 -0400 |
| commit | 17efca93c8728445522dedafc033b3384a26a39d (patch) | |
| tree | 44c3b4a4e24a202077f4746fb240c850d86f8853 | |
| parent | 568d7c764ae01f3706085ac8f0d8a8ac7e826bd7 (diff) | |
| parent | b47bcb93bbf201e9c5af698945755efeb60c0bc8 (diff) | |
Merge tag 'topic/drm-misc-2016-03-22' of git://anongit.freedesktop.org/drm-intel into drm-next
Bunch of small fixupes all over. Plus a dma-buf patch that Sumit asked me
to cherry-pick since that's the only one he had in his tree.
There's a sparse issue outstanding in the color mgr stuff, but Lionel is
still working on something that actually appeases sparse.
* tag 'topic/drm-misc-2016-03-22' of git://anongit.freedesktop.org/drm-intel:
dma-buf/fence: fix fence_is_later v2
dma-buf: Update docs for SYNC ioctl
drm: remove excess description
dma-buf, drm, ion: Propagate error code from dma_buf_start_cpu_access()
drm/atmel-hlcdc: use helper to get crtc state
drm/atomic: use helper to get crtc state
| -rw-r--r-- | Documentation/dma-buf-sharing.txt | 11 | ||||
| -rw-r--r-- | drivers/dma-buf/dma-buf.c | 19 | ||||
| -rw-r--r-- | drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c | 2 | ||||
| -rw-r--r-- | drivers/gpu/drm/drm_atomic.c | 1 | ||||
| -rw-r--r-- | drivers/gpu/drm/drm_atomic_helper.c | 14 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/i915_gem_dmabuf.c | 15 | ||||
| -rw-r--r-- | drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c | 5 | ||||
| -rw-r--r-- | drivers/gpu/drm/udl/udl_fb.c | 4 | ||||
| -rw-r--r-- | drivers/staging/android/ion/ion.c | 6 | ||||
| -rw-r--r-- | include/linux/dma-buf.h | 6 | ||||
| -rw-r--r-- | include/linux/fence.h | 2 |
11 files changed, 45 insertions, 40 deletions
diff --git a/Documentation/dma-buf-sharing.txt b/Documentation/dma-buf-sharing.txt index 32ac32e773e1..ca44c5820585 100644 --- a/Documentation/dma-buf-sharing.txt +++ b/Documentation/dma-buf-sharing.txt | |||
| @@ -352,7 +352,8 @@ Being able to mmap an export dma-buf buffer object has 2 main use-cases: | |||
| 352 | 352 | ||
| 353 | No special interfaces, userspace simply calls mmap on the dma-buf fd, making | 353 | No special interfaces, userspace simply calls mmap on the dma-buf fd, making |
| 354 | sure that the cache synchronization ioctl (DMA_BUF_IOCTL_SYNC) is *always* | 354 | sure that the cache synchronization ioctl (DMA_BUF_IOCTL_SYNC) is *always* |
| 355 | used when the access happens. This is discussed next paragraphs. | 355 | used when the access happens. Note that DMA_BUF_IOCTL_SYNC can fail with |
| 356 | -EAGAIN or -EINTR, in which case it must be restarted. | ||
| 356 | 357 | ||
| 357 | Some systems might need some sort of cache coherency management e.g. when | 358 | Some systems might need some sort of cache coherency management e.g. when |
| 358 | CPU and GPU domains are being accessed through dma-buf at the same time. To | 359 | CPU and GPU domains are being accessed through dma-buf at the same time. To |
| @@ -366,10 +367,10 @@ Being able to mmap an export dma-buf buffer object has 2 main use-cases: | |||
| 366 | want (with the new data being consumed by the GPU or say scanout device) | 367 | want (with the new data being consumed by the GPU or say scanout device) |
| 367 | - munmap once you don't need the buffer any more | 368 | - munmap once you don't need the buffer any more |
| 368 | 369 | ||
| 369 | Therefore, for correctness and optimal performance, systems with the memory | 370 | For correctness and optimal performance, it is always required to use |
| 370 | cache shared by the GPU and CPU i.e. the "coherent" and also the | 371 | SYNC_START and SYNC_END before and after, respectively, when accessing the |
| 371 | "incoherent" are always required to use SYNC_START and SYNC_END before and | 372 | mapped address. Userspace cannot rely on coherent access, even when there |
| 372 | after, respectively, when accessing the mapped address. | 373 | are systems where it just works without calling these ioctls. |
| 373 | 374 | ||
| 374 | 2. Supporting existing mmap interfaces in importers | 375 | 2. Supporting existing mmap interfaces in importers |
| 375 | 376 | ||
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 9810d1df0691..4a2c07ee6677 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c | |||
| @@ -259,6 +259,7 @@ static long dma_buf_ioctl(struct file *file, | |||
| 259 | struct dma_buf *dmabuf; | 259 | struct dma_buf *dmabuf; |
| 260 | struct dma_buf_sync sync; | 260 | struct dma_buf_sync sync; |
| 261 | enum dma_data_direction direction; | 261 | enum dma_data_direction direction; |
| 262 | int ret; | ||
| 262 | 263 | ||
| 263 | dmabuf = file->private_data; | 264 | dmabuf = file->private_data; |
| 264 | 265 | ||
| @@ -285,11 +286,11 @@ static long dma_buf_ioctl(struct file *file, | |||
| 285 | } | 286 | } |
| 286 | 287 | ||
| 287 | if (sync.flags & DMA_BUF_SYNC_END) | 288 | if (sync.flags & DMA_BUF_SYNC_END) |
| 288 | dma_buf_end_cpu_access(dmabuf, direction); | 289 | ret = dma_buf_end_cpu_access(dmabuf, direction); |
| 289 | else | 290 | else |
| 290 | dma_buf_begin_cpu_access(dmabuf, direction); | 291 | ret = dma_buf_begin_cpu_access(dmabuf, direction); |
| 291 | 292 | ||
| 292 | return 0; | 293 | return ret; |
| 293 | default: | 294 | default: |
| 294 | return -ENOTTY; | 295 | return -ENOTTY; |
| 295 | } | 296 | } |
| @@ -611,15 +612,19 @@ EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access); | |||
| 611 | * @dmabuf: [in] buffer to complete cpu access for. | 612 | * @dmabuf: [in] buffer to complete cpu access for. |
| 612 | * @direction: [in] length of range for cpu access. | 613 | * @direction: [in] length of range for cpu access. |
| 613 | * | 614 | * |
| 614 | * This call must always succeed. | 615 | * Can return negative error values, returns 0 on success. |
| 615 | */ | 616 | */ |
| 616 | void dma_buf_end_cpu_access(struct dma_buf *dmabuf, | 617 | int dma_buf_end_cpu_access(struct dma_buf *dmabuf, |
| 617 | enum dma_data_direction direction) | 618 | enum dma_data_direction direction) |
| 618 | { | 619 | { |
| 620 | int ret = 0; | ||
| 621 | |||
| 619 | WARN_ON(!dmabuf); | 622 | WARN_ON(!dmabuf); |
| 620 | 623 | ||
| 621 | if (dmabuf->ops->end_cpu_access) | 624 | if (dmabuf->ops->end_cpu_access) |
| 622 | dmabuf->ops->end_cpu_access(dmabuf, direction); | 625 | ret = dmabuf->ops->end_cpu_access(dmabuf, direction); |
| 626 | |||
| 627 | return ret; | ||
| 623 | } | 628 | } |
| 624 | EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access); | 629 | EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access); |
| 625 | 630 | ||
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c index 1ffe9c329c46..d65dcaee3832 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c | |||
| @@ -558,7 +558,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p, | |||
| 558 | if (!state->base.crtc || !fb) | 558 | if (!state->base.crtc || !fb) |
| 559 | return 0; | 559 | return 0; |
| 560 | 560 | ||
| 561 | crtc_state = s->state->crtc_states[drm_crtc_index(s->crtc)]; | 561 | crtc_state = drm_atomic_get_existing_crtc_state(s->state, s->crtc); |
| 562 | mode = &crtc_state->adjusted_mode; | 562 | mode = &crtc_state->adjusted_mode; |
| 563 | 563 | ||
| 564 | state->src_x = s->src_x; | 564 | state->src_x = s->src_x; |
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index a2596eb803fc..8ee1db866e80 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c | |||
| @@ -380,7 +380,6 @@ EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc); | |||
| 380 | * drm_atomic_replace_property_blob - replace a blob property | 380 | * drm_atomic_replace_property_blob - replace a blob property |
| 381 | * @blob: a pointer to the member blob to be replaced | 381 | * @blob: a pointer to the member blob to be replaced |
| 382 | * @new_blob: the new blob to replace with | 382 | * @new_blob: the new blob to replace with |
| 383 | * @expected_size: the expected size of the new blob | ||
| 384 | * @replaced: whether the blob has been replaced | 383 | * @replaced: whether the blob has been replaced |
| 385 | * | 384 | * |
| 386 | * RETURNS: | 385 | * RETURNS: |
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 2bb90faa0ee2..4befe25c81c7 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c | |||
| @@ -67,7 +67,8 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state, | |||
| 67 | struct drm_crtc_state *crtc_state; | 67 | struct drm_crtc_state *crtc_state; |
| 68 | 68 | ||
| 69 | if (plane->state->crtc) { | 69 | if (plane->state->crtc) { |
| 70 | crtc_state = state->crtc_states[drm_crtc_index(plane->state->crtc)]; | 70 | crtc_state = drm_atomic_get_existing_crtc_state(state, |
| 71 | plane->state->crtc); | ||
| 71 | 72 | ||
| 72 | if (WARN_ON(!crtc_state)) | 73 | if (WARN_ON(!crtc_state)) |
| 73 | return; | 74 | return; |
| @@ -76,8 +77,8 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state, | |||
| 76 | } | 77 | } |
| 77 | 78 | ||
| 78 | if (plane_state->crtc) { | 79 | if (plane_state->crtc) { |
| 79 | crtc_state = | 80 | crtc_state = drm_atomic_get_existing_crtc_state(state, |
| 80 | state->crtc_states[drm_crtc_index(plane_state->crtc)]; | 81 | plane_state->crtc); |
| 81 | 82 | ||
| 82 | if (WARN_ON(!crtc_state)) | 83 | if (WARN_ON(!crtc_state)) |
| 83 | return; | 84 | return; |
| @@ -374,8 +375,8 @@ mode_fixup(struct drm_atomic_state *state) | |||
| 374 | if (!conn_state->crtc || !conn_state->best_encoder) | 375 | if (!conn_state->crtc || !conn_state->best_encoder) |
| 375 | continue; | 376 | continue; |
| 376 | 377 | ||
| 377 | crtc_state = | 378 | crtc_state = drm_atomic_get_existing_crtc_state(state, |
| 378 | state->crtc_states[drm_crtc_index(conn_state->crtc)]; | 379 | conn_state->crtc); |
| 379 | 380 | ||
| 380 | /* | 381 | /* |
| 381 | * Each encoder has at most one connector (since we always steal | 382 | * Each encoder has at most one connector (since we always steal |
| @@ -679,7 +680,8 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) | |||
| 679 | if (!old_conn_state->crtc) | 680 | if (!old_conn_state->crtc) |
| 680 | continue; | 681 | continue; |
| 681 | 682 | ||
| 682 | old_crtc_state = old_state->crtc_states[drm_crtc_index(old_conn_state->crtc)]; | 683 | old_crtc_state = drm_atomic_get_existing_crtc_state(old_state, |
| 684 | old_conn_state->crtc); | ||
| 683 | 685 | ||
| 684 | if (!old_crtc_state->active || | 686 | if (!old_crtc_state->active || |
| 685 | !drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state)) | 687 | !drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state)) |
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index 1f3eef6fb345..0506016e18e0 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c | |||
| @@ -228,25 +228,20 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_dire | |||
| 228 | return ret; | 228 | return ret; |
| 229 | } | 229 | } |
| 230 | 230 | ||
| 231 | static void i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction) | 231 | static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction) |
| 232 | { | 232 | { |
| 233 | struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); | 233 | struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); |
| 234 | struct drm_device *dev = obj->base.dev; | 234 | struct drm_device *dev = obj->base.dev; |
| 235 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
| 236 | bool was_interruptible; | ||
| 237 | int ret; | 235 | int ret; |
| 238 | 236 | ||
| 239 | mutex_lock(&dev->struct_mutex); | 237 | ret = i915_mutex_lock_interruptible(dev); |
| 240 | was_interruptible = dev_priv->mm.interruptible; | 238 | if (ret) |
| 241 | dev_priv->mm.interruptible = false; | 239 | return ret; |
| 242 | 240 | ||
| 243 | ret = i915_gem_object_set_to_gtt_domain(obj, false); | 241 | ret = i915_gem_object_set_to_gtt_domain(obj, false); |
| 244 | |||
| 245 | dev_priv->mm.interruptible = was_interruptible; | ||
| 246 | mutex_unlock(&dev->struct_mutex); | 242 | mutex_unlock(&dev->struct_mutex); |
| 247 | 243 | ||
| 248 | if (unlikely(ret)) | 244 | return ret; |
| 249 | DRM_ERROR("unable to flush buffer following CPU access; rendering may be corrupt\n"); | ||
| 250 | } | 245 | } |
| 251 | 246 | ||
| 252 | static const struct dma_buf_ops i915_dmabuf_ops = { | 247 | static const struct dma_buf_ops i915_dmabuf_ops = { |
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c index 3cf8aab23a39..af267c35d813 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c +++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c | |||
| @@ -97,11 +97,12 @@ static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer, | |||
| 97 | return omap_gem_get_pages(obj, &pages, true); | 97 | return omap_gem_get_pages(obj, &pages, true); |
| 98 | } | 98 | } |
| 99 | 99 | ||
| 100 | static void omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer, | 100 | static int omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer, |
| 101 | enum dma_data_direction dir) | 101 | enum dma_data_direction dir) |
| 102 | { | 102 | { |
| 103 | struct drm_gem_object *obj = buffer->priv; | 103 | struct drm_gem_object *obj = buffer->priv; |
| 104 | omap_gem_put_pages(obj); | 104 | omap_gem_put_pages(obj); |
| 105 | return 0; | ||
| 105 | } | 106 | } |
| 106 | 107 | ||
| 107 | 108 | ||
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c index c427499133d6..33239a2b264a 100644 --- a/drivers/gpu/drm/udl/udl_fb.c +++ b/drivers/gpu/drm/udl/udl_fb.c | |||
| @@ -423,8 +423,8 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb, | |||
| 423 | } | 423 | } |
| 424 | 424 | ||
| 425 | if (ufb->obj->base.import_attach) { | 425 | if (ufb->obj->base.import_attach) { |
| 426 | dma_buf_end_cpu_access(ufb->obj->base.import_attach->dmabuf, | 426 | ret = dma_buf_end_cpu_access(ufb->obj->base.import_attach->dmabuf, |
| 427 | DMA_FROM_DEVICE); | 427 | DMA_FROM_DEVICE); |
| 428 | } | 428 | } |
| 429 | 429 | ||
| 430 | unlock: | 430 | unlock: |
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index 0754a37c9674..49436b4510f4 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c | |||
| @@ -1075,14 +1075,16 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, | |||
| 1075 | return PTR_ERR_OR_ZERO(vaddr); | 1075 | return PTR_ERR_OR_ZERO(vaddr); |
| 1076 | } | 1076 | } |
| 1077 | 1077 | ||
| 1078 | static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, | 1078 | static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, |
| 1079 | enum dma_data_direction direction) | 1079 | enum dma_data_direction direction) |
| 1080 | { | 1080 | { |
| 1081 | struct ion_buffer *buffer = dmabuf->priv; | 1081 | struct ion_buffer *buffer = dmabuf->priv; |
| 1082 | 1082 | ||
| 1083 | mutex_lock(&buffer->lock); | 1083 | mutex_lock(&buffer->lock); |
| 1084 | ion_buffer_kmap_put(buffer); | 1084 | ion_buffer_kmap_put(buffer); |
| 1085 | mutex_unlock(&buffer->lock); | 1085 | mutex_unlock(&buffer->lock); |
| 1086 | |||
| 1087 | return 0; | ||
| 1086 | } | 1088 | } |
| 1087 | 1089 | ||
| 1088 | static struct dma_buf_ops dma_buf_ops = { | 1090 | static struct dma_buf_ops dma_buf_ops = { |
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 532108ea0c1c..3fe90d494edb 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h | |||
| @@ -94,7 +94,7 @@ struct dma_buf_ops { | |||
| 94 | void (*release)(struct dma_buf *); | 94 | void (*release)(struct dma_buf *); |
| 95 | 95 | ||
| 96 | int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction); | 96 | int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction); |
| 97 | void (*end_cpu_access)(struct dma_buf *, enum dma_data_direction); | 97 | int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction); |
| 98 | void *(*kmap_atomic)(struct dma_buf *, unsigned long); | 98 | void *(*kmap_atomic)(struct dma_buf *, unsigned long); |
| 99 | void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *); | 99 | void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *); |
| 100 | void *(*kmap)(struct dma_buf *, unsigned long); | 100 | void *(*kmap)(struct dma_buf *, unsigned long); |
| @@ -224,8 +224,8 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *, | |||
| 224 | enum dma_data_direction); | 224 | enum dma_data_direction); |
| 225 | int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, | 225 | int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, |
| 226 | enum dma_data_direction dir); | 226 | enum dma_data_direction dir); |
| 227 | void dma_buf_end_cpu_access(struct dma_buf *dma_buf, | 227 | int dma_buf_end_cpu_access(struct dma_buf *dma_buf, |
| 228 | enum dma_data_direction dir); | 228 | enum dma_data_direction dir); |
| 229 | void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long); | 229 | void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long); |
| 230 | void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *); | 230 | void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *); |
| 231 | void *dma_buf_kmap(struct dma_buf *, unsigned long); | 231 | void *dma_buf_kmap(struct dma_buf *, unsigned long); |
diff --git a/include/linux/fence.h b/include/linux/fence.h index bb522011383b..5aa95eb886f7 100644 --- a/include/linux/fence.h +++ b/include/linux/fence.h | |||
| @@ -292,7 +292,7 @@ static inline bool fence_is_later(struct fence *f1, struct fence *f2) | |||
| 292 | if (WARN_ON(f1->context != f2->context)) | 292 | if (WARN_ON(f1->context != f2->context)) |
| 293 | return false; | 293 | return false; |
| 294 | 294 | ||
| 295 | return f1->seqno - f2->seqno < INT_MAX; | 295 | return (int)(f1->seqno - f2->seqno) > 0; |
| 296 | } | 296 | } |
| 297 | 297 | ||
| 298 | /** | 298 | /** |
