diff options
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_dmabuf.c | 58 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_execbuffer.c | 31 |
2 files changed, 84 insertions, 5 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index 3a00ab3ad06e..c60a8d5bbad0 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c | |||
@@ -23,9 +23,13 @@ | |||
23 | * Authors: | 23 | * Authors: |
24 | * Dave Airlie <airlied@redhat.com> | 24 | * Dave Airlie <airlied@redhat.com> |
25 | */ | 25 | */ |
26 | |||
27 | #include <linux/dma-buf.h> | ||
28 | #include <linux/reservation.h> | ||
29 | |||
26 | #include <drm/drmP.h> | 30 | #include <drm/drmP.h> |
31 | |||
27 | #include "i915_drv.h" | 32 | #include "i915_drv.h" |
28 | #include <linux/dma-buf.h> | ||
29 | 33 | ||
30 | static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf) | 34 | static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf) |
31 | { | 35 | { |
@@ -218,25 +222,73 @@ static const struct dma_buf_ops i915_dmabuf_ops = { | |||
218 | .end_cpu_access = i915_gem_end_cpu_access, | 222 | .end_cpu_access = i915_gem_end_cpu_access, |
219 | }; | 223 | }; |
220 | 224 | ||
225 | static void export_fences(struct drm_i915_gem_object *obj, | ||
226 | struct dma_buf *dma_buf) | ||
227 | { | ||
228 | struct reservation_object *resv = dma_buf->resv; | ||
229 | struct drm_i915_gem_request *req; | ||
230 | unsigned long active; | ||
231 | int idx; | ||
232 | |||
233 | active = __I915_BO_ACTIVE(obj); | ||
234 | if (!active) | ||
235 | return; | ||
236 | |||
237 | /* Serialise with execbuf to prevent concurrent fence-loops */ | ||
238 | mutex_lock(&obj->base.dev->struct_mutex); | ||
239 | |||
240 | /* Mark the object for future fences before racily adding old fences */ | ||
241 | obj->base.dma_buf = dma_buf; | ||
242 | |||
243 | ww_mutex_lock(&resv->lock, NULL); | ||
244 | |||
245 | for_each_active(active, idx) { | ||
246 | req = i915_gem_active_get(&obj->last_read[idx], | ||
247 | &obj->base.dev->struct_mutex); | ||
248 | if (!req) | ||
249 | continue; | ||
250 | |||
251 | if (reservation_object_reserve_shared(resv) == 0) | ||
252 | reservation_object_add_shared_fence(resv, &req->fence); | ||
253 | |||
254 | i915_gem_request_put(req); | ||
255 | } | ||
256 | |||
257 | req = i915_gem_active_get(&obj->last_write, | ||
258 | &obj->base.dev->struct_mutex); | ||
259 | if (req) { | ||
260 | reservation_object_add_excl_fence(resv, &req->fence); | ||
261 | i915_gem_request_put(req); | ||
262 | } | ||
263 | |||
264 | ww_mutex_unlock(&resv->lock); | ||
265 | mutex_unlock(&obj->base.dev->struct_mutex); | ||
266 | } | ||
267 | |||
221 | struct dma_buf *i915_gem_prime_export(struct drm_device *dev, | 268 | struct dma_buf *i915_gem_prime_export(struct drm_device *dev, |
222 | struct drm_gem_object *gem_obj, int flags) | 269 | struct drm_gem_object *gem_obj, int flags) |
223 | { | 270 | { |
224 | struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); | 271 | struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); |
225 | DEFINE_DMA_BUF_EXPORT_INFO(exp_info); | 272 | DEFINE_DMA_BUF_EXPORT_INFO(exp_info); |
273 | struct dma_buf *dma_buf; | ||
226 | 274 | ||
227 | exp_info.ops = &i915_dmabuf_ops; | 275 | exp_info.ops = &i915_dmabuf_ops; |
228 | exp_info.size = gem_obj->size; | 276 | exp_info.size = gem_obj->size; |
229 | exp_info.flags = flags; | 277 | exp_info.flags = flags; |
230 | exp_info.priv = gem_obj; | 278 | exp_info.priv = gem_obj; |
231 | 279 | ||
232 | |||
233 | if (obj->ops->dmabuf_export) { | 280 | if (obj->ops->dmabuf_export) { |
234 | int ret = obj->ops->dmabuf_export(obj); | 281 | int ret = obj->ops->dmabuf_export(obj); |
235 | if (ret) | 282 | if (ret) |
236 | return ERR_PTR(ret); | 283 | return ERR_PTR(ret); |
237 | } | 284 | } |
238 | 285 | ||
239 | return dma_buf_export(&exp_info); | 286 | dma_buf = dma_buf_export(&exp_info); |
287 | if (IS_ERR(dma_buf)) | ||
288 | return dma_buf; | ||
289 | |||
290 | export_fences(obj, dma_buf); | ||
291 | return dma_buf; | ||
240 | } | 292 | } |
241 | 293 | ||
242 | static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) | 294 | static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) |
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index a1da3028a949..71834741bd87 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -26,14 +26,18 @@ | |||
26 | * | 26 | * |
27 | */ | 27 | */ |
28 | 28 | ||
29 | #include <linux/dma_remapping.h> | ||
30 | #include <linux/reservation.h> | ||
31 | #include <linux/uaccess.h> | ||
32 | |||
29 | #include <drm/drmP.h> | 33 | #include <drm/drmP.h> |
30 | #include <drm/i915_drm.h> | 34 | #include <drm/i915_drm.h> |
35 | |||
31 | #include "i915_drv.h" | 36 | #include "i915_drv.h" |
37 | #include "i915_gem_dmabuf.h" | ||
32 | #include "i915_trace.h" | 38 | #include "i915_trace.h" |
33 | #include "intel_drv.h" | 39 | #include "intel_drv.h" |
34 | #include "intel_frontbuffer.h" | 40 | #include "intel_frontbuffer.h" |
35 | #include <linux/dma_remapping.h> | ||
36 | #include <linux/uaccess.h> | ||
37 | 41 | ||
38 | #define __EXEC_OBJECT_HAS_PIN (1<<31) | 42 | #define __EXEC_OBJECT_HAS_PIN (1<<31) |
39 | #define __EXEC_OBJECT_HAS_FENCE (1<<30) | 43 | #define __EXEC_OBJECT_HAS_FENCE (1<<30) |
@@ -1205,6 +1209,28 @@ void i915_vma_move_to_active(struct i915_vma *vma, | |||
1205 | list_move_tail(&vma->vm_link, &vma->vm->active_list); | 1209 | list_move_tail(&vma->vm_link, &vma->vm->active_list); |
1206 | } | 1210 | } |
1207 | 1211 | ||
1212 | static void eb_export_fence(struct drm_i915_gem_object *obj, | ||
1213 | struct drm_i915_gem_request *req, | ||
1214 | unsigned int flags) | ||
1215 | { | ||
1216 | struct reservation_object *resv; | ||
1217 | |||
1218 | resv = i915_gem_object_get_dmabuf_resv(obj); | ||
1219 | if (!resv) | ||
1220 | return; | ||
1221 | |||
1222 | /* Ignore errors from failing to allocate the new fence, we can't | ||
1223 | * handle an error right now. Worst case should be missed | ||
1224 | * synchronisation leading to rendering corruption. | ||
1225 | */ | ||
1226 | ww_mutex_lock(&resv->lock, NULL); | ||
1227 | if (flags & EXEC_OBJECT_WRITE) | ||
1228 | reservation_object_add_excl_fence(resv, &req->fence); | ||
1229 | else if (reservation_object_reserve_shared(resv) == 0) | ||
1230 | reservation_object_add_shared_fence(resv, &req->fence); | ||
1231 | ww_mutex_unlock(&resv->lock); | ||
1232 | } | ||
1233 | |||
1208 | static void | 1234 | static void |
1209 | i915_gem_execbuffer_move_to_active(struct list_head *vmas, | 1235 | i915_gem_execbuffer_move_to_active(struct list_head *vmas, |
1210 | struct drm_i915_gem_request *req) | 1236 | struct drm_i915_gem_request *req) |
@@ -1224,6 +1250,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas, | |||
1224 | obj->base.read_domains = obj->base.pending_read_domains; | 1250 | obj->base.read_domains = obj->base.pending_read_domains; |
1225 | 1251 | ||
1226 | i915_vma_move_to_active(vma, req, vma->exec_entry->flags); | 1252 | i915_vma_move_to_active(vma, req, vma->exec_entry->flags); |
1253 | eb_export_fence(obj, req, vma->exec_entry->flags); | ||
1227 | trace_i915_gem_object_change_domain(obj, old_read, old_write); | 1254 | trace_i915_gem_object_change_domain(obj, old_read, old_write); |
1228 | } | 1255 | } |
1229 | } | 1256 | } |