diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_execbuffer.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_execbuffer.c | 31 |
1 files changed, 29 insertions, 2 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index a1da3028a949..71834741bd87 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -26,14 +26,18 @@ | |||
26 | * | 26 | * |
27 | */ | 27 | */ |
28 | 28 | ||
29 | #include <linux/dma_remapping.h> | ||
30 | #include <linux/reservation.h> | ||
31 | #include <linux/uaccess.h> | ||
32 | |||
29 | #include <drm/drmP.h> | 33 | #include <drm/drmP.h> |
30 | #include <drm/i915_drm.h> | 34 | #include <drm/i915_drm.h> |
35 | |||
31 | #include "i915_drv.h" | 36 | #include "i915_drv.h" |
37 | #include "i915_gem_dmabuf.h" | ||
32 | #include "i915_trace.h" | 38 | #include "i915_trace.h" |
33 | #include "intel_drv.h" | 39 | #include "intel_drv.h" |
34 | #include "intel_frontbuffer.h" | 40 | #include "intel_frontbuffer.h" |
35 | #include <linux/dma_remapping.h> | ||
36 | #include <linux/uaccess.h> | ||
37 | 41 | ||
38 | #define __EXEC_OBJECT_HAS_PIN (1<<31) | 42 | #define __EXEC_OBJECT_HAS_PIN (1<<31) |
39 | #define __EXEC_OBJECT_HAS_FENCE (1<<30) | 43 | #define __EXEC_OBJECT_HAS_FENCE (1<<30) |
@@ -1205,6 +1209,28 @@ void i915_vma_move_to_active(struct i915_vma *vma, | |||
1205 | list_move_tail(&vma->vm_link, &vma->vm->active_list); | 1209 | list_move_tail(&vma->vm_link, &vma->vm->active_list); |
1206 | } | 1210 | } |
1207 | 1211 | ||
1212 | static void eb_export_fence(struct drm_i915_gem_object *obj, | ||
1213 | struct drm_i915_gem_request *req, | ||
1214 | unsigned int flags) | ||
1215 | { | ||
1216 | struct reservation_object *resv; | ||
1217 | |||
1218 | resv = i915_gem_object_get_dmabuf_resv(obj); | ||
1219 | if (!resv) | ||
1220 | return; | ||
1221 | |||
1222 | /* Ignore errors from failing to allocate the new fence, we can't | ||
1223 | * handle an error right now. Worst case should be missed | ||
1224 | * synchronisation leading to rendering corruption. | ||
1225 | */ | ||
1226 | ww_mutex_lock(&resv->lock, NULL); | ||
1227 | if (flags & EXEC_OBJECT_WRITE) | ||
1228 | reservation_object_add_excl_fence(resv, &req->fence); | ||
1229 | else if (reservation_object_reserve_shared(resv) == 0) | ||
1230 | reservation_object_add_shared_fence(resv, &req->fence); | ||
1231 | ww_mutex_unlock(&resv->lock); | ||
1232 | } | ||
1233 | |||
1208 | static void | 1234 | static void |
1209 | i915_gem_execbuffer_move_to_active(struct list_head *vmas, | 1235 | i915_gem_execbuffer_move_to_active(struct list_head *vmas, |
1210 | struct drm_i915_gem_request *req) | 1236 | struct drm_i915_gem_request *req) |
@@ -1224,6 +1250,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas, | |||
1224 | obj->base.read_domains = obj->base.pending_read_domains; | 1250 | obj->base.read_domains = obj->base.pending_read_domains; |
1225 | 1251 | ||
1226 | i915_vma_move_to_active(vma, req, vma->exec_entry->flags); | 1252 | i915_vma_move_to_active(vma, req, vma->exec_entry->flags); |
1253 | eb_export_fence(obj, req, vma->exec_entry->flags); | ||
1227 | trace_i915_gem_object_change_domain(obj, old_read, old_write); | 1254 | trace_i915_gem_object_change_domain(obj, old_read, old_write); |
1228 | } | 1255 | } |
1229 | } | 1256 | } |