aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorRobert Foss <robert.foss@collabora.com>2018-11-12 11:51:54 -0500
committerGerd Hoffmann <kraxel@redhat.com>2018-11-14 08:21:01 -0500
commit9fdd90c0f42440b7f1e4a3f7262d222eee4b4cdf (patch)
tree3790a9c569e93d98f8603742d85bf23ae5958dd5 /drivers
parent2ae7f165c0b022a8ed1732f8ab2c11b2c173146e (diff)
drm/virtio: add virtio_gpu_alloc_fence()
Refactor fence creation, add fences to relevant GPU operations and add cursor helper functions. This removes the potential for allocation failures from the cmd_submit and atomic_commit paths. Now a fence will be allocated first and only after that will we proceed with the rest of the execution. Signed-off-by: Gustavo Padovan <gustavo.padovan@collabora.com> Signed-off-by: Robert Foss <robert.foss@collabora.com> Link: http://patchwork.freedesktop.org/patch/msgid/20181112165157.32765-2-robert.foss@collabora.com Suggested-by: Rob Herring <robh@kernel.org> Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fence.c29
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c30
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c46
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c2
5 files changed, 96 insertions, 15 deletions
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 6474e83cbf3d..acd130c58e33 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -131,6 +131,7 @@ struct virtio_gpu_framebuffer {
131 int x1, y1, x2, y2; /* dirty rect */ 131 int x1, y1, x2, y2; /* dirty rect */
132 spinlock_t dirty_lock; 132 spinlock_t dirty_lock;
133 uint32_t hw_res_handle; 133 uint32_t hw_res_handle;
134 struct virtio_gpu_fence *fence;
134}; 135};
135#define to_virtio_gpu_framebuffer(x) \ 136#define to_virtio_gpu_framebuffer(x) \
136 container_of(x, struct virtio_gpu_framebuffer, base) 137 container_of(x, struct virtio_gpu_framebuffer, base)
@@ -349,6 +350,9 @@ void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev);
349int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma); 350int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma);
350 351
351/* virtio_gpu_fence.c */ 352/* virtio_gpu_fence.c */
353struct virtio_gpu_fence *virtio_gpu_fence_alloc(
354 struct virtio_gpu_device *vgdev);
355void virtio_gpu_fence_cleanup(struct virtio_gpu_fence *fence);
352int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev, 356int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
353 struct virtio_gpu_ctrl_hdr *cmd_hdr, 357 struct virtio_gpu_ctrl_hdr *cmd_hdr,
354 struct virtio_gpu_fence **fence); 358 struct virtio_gpu_fence **fence);
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index 00c742a441bf..6b5d92215cfb 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -67,6 +67,28 @@ static const struct dma_fence_ops virtio_fence_ops = {
67 .timeline_value_str = virtio_timeline_value_str, 67 .timeline_value_str = virtio_timeline_value_str,
68}; 68};
69 69
70struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev)
71{
72 struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
73 struct virtio_gpu_fence *fence = kzalloc(sizeof(struct virtio_gpu_fence),
74 GFP_ATOMIC);
75 if (!fence)
76 return fence;
77
78 fence->drv = drv;
79 dma_fence_init(&fence->f, &virtio_fence_ops, &drv->lock, drv->context, 0);
80
81 return fence;
82}
83
84void virtio_gpu_fence_cleanup(struct virtio_gpu_fence *fence)
85{
86 if (!fence)
87 return;
88
89 dma_fence_put(&fence->f);
90}
91
70int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev, 92int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
71 struct virtio_gpu_ctrl_hdr *cmd_hdr, 93 struct virtio_gpu_ctrl_hdr *cmd_hdr,
72 struct virtio_gpu_fence **fence) 94 struct virtio_gpu_fence **fence)
@@ -74,15 +96,8 @@ int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
74 struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv; 96 struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
75 unsigned long irq_flags; 97 unsigned long irq_flags;
76 98
77 *fence = kmalloc(sizeof(struct virtio_gpu_fence), GFP_ATOMIC);
78 if ((*fence) == NULL)
79 return -ENOMEM;
80
81 spin_lock_irqsave(&drv->lock, irq_flags); 99 spin_lock_irqsave(&drv->lock, irq_flags);
82 (*fence)->drv = drv;
83 (*fence)->seq = ++drv->sync_seq; 100 (*fence)->seq = ++drv->sync_seq;
84 dma_fence_init(&(*fence)->f, &virtio_fence_ops, &drv->lock,
85 drv->context, (*fence)->seq);
86 dma_fence_get(&(*fence)->f); 101 dma_fence_get(&(*fence)->f);
87 list_add_tail(&(*fence)->node, &drv->fences); 102 list_add_tail(&(*fence)->node, &drv->fences);
88 spin_unlock_irqrestore(&drv->lock, irq_flags); 103 spin_unlock_irqrestore(&drv->lock, irq_flags);
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index bc5afa4f906e..d69fc356df0a 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -168,6 +168,13 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
168 ret = PTR_ERR(buf); 168 ret = PTR_ERR(buf);
169 goto out_unresv; 169 goto out_unresv;
170 } 170 }
171
172 fence = virtio_gpu_fence_alloc(vgdev);
173 if (!fence) {
174 kfree(buf);
175 ret = -ENOMEM;
176 goto out_unresv;
177 }
171 virtio_gpu_cmd_submit(vgdev, buf, exbuf->size, 178 virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
172 vfpriv->ctx_id, &fence); 179 vfpriv->ctx_id, &fence);
173 180
@@ -283,11 +290,17 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
283 rc_3d.nr_samples = cpu_to_le32(rc->nr_samples); 290 rc_3d.nr_samples = cpu_to_le32(rc->nr_samples);
284 rc_3d.flags = cpu_to_le32(rc->flags); 291 rc_3d.flags = cpu_to_le32(rc->flags);
285 292
293 fence = virtio_gpu_fence_alloc(vgdev);
294 if (!fence) {
295 ret = -ENOMEM;
296 goto fail_backoff;
297 }
298
286 virtio_gpu_cmd_resource_create_3d(vgdev, qobj, &rc_3d, NULL); 299 virtio_gpu_cmd_resource_create_3d(vgdev, qobj, &rc_3d, NULL);
287 ret = virtio_gpu_object_attach(vgdev, qobj, &fence); 300 ret = virtio_gpu_object_attach(vgdev, qobj, &fence);
288 if (ret) { 301 if (ret) {
289 ttm_eu_backoff_reservation(&ticket, &validate_list); 302 virtio_gpu_fence_cleanup(fence);
290 goto fail_unref; 303 goto fail_backoff;
291 } 304 }
292 ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f); 305 ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
293 } 306 }
@@ -312,6 +325,8 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
312 dma_fence_put(&fence->f); 325 dma_fence_put(&fence->f);
313 } 326 }
314 return 0; 327 return 0;
328fail_backoff:
329 ttm_eu_backoff_reservation(&ticket, &validate_list);
315fail_unref: 330fail_unref:
316 if (vgdev->has_virgl_3d) { 331 if (vgdev->has_virgl_3d) {
317 virtio_gpu_unref_list(&validate_list); 332 virtio_gpu_unref_list(&validate_list);
@@ -374,6 +389,12 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
374 goto out_unres; 389 goto out_unres;
375 390
376 convert_to_hw_box(&box, &args->box); 391 convert_to_hw_box(&box, &args->box);
392
393 fence = virtio_gpu_fence_alloc(vgdev);
394 if (!fence) {
395 ret = -ENOMEM;
396 goto out_unres;
397 }
377 virtio_gpu_cmd_transfer_from_host_3d 398 virtio_gpu_cmd_transfer_from_host_3d
378 (vgdev, qobj->hw_res_handle, 399 (vgdev, qobj->hw_res_handle,
379 vfpriv->ctx_id, offset, args->level, 400 vfpriv->ctx_id, offset, args->level,
@@ -423,6 +444,11 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
423 (vgdev, qobj, offset, 444 (vgdev, qobj, offset,
424 box.w, box.h, box.x, box.y, NULL); 445 box.w, box.h, box.x, box.y, NULL);
425 } else { 446 } else {
447 fence = virtio_gpu_fence_alloc(vgdev);
448 if (!fence) {
449 ret = -ENOMEM;
450 goto out_unres;
451 }
426 virtio_gpu_cmd_transfer_to_host_3d 452 virtio_gpu_cmd_transfer_to_host_3d
427 (vgdev, qobj, 453 (vgdev, qobj,
428 vfpriv ? vfpriv->ctx_id : 0, offset, 454 vfpriv ? vfpriv->ctx_id : 0, offset,
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index a9f4ae7d4483..b84ac8c25856 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -137,6 +137,41 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
137 plane->state->src_h >> 16); 137 plane->state->src_h >> 16);
138} 138}
139 139
140static int virtio_gpu_cursor_prepare_fb(struct drm_plane *plane,
141 struct drm_plane_state *new_state)
142{
143 struct drm_device *dev = plane->dev;
144 struct virtio_gpu_device *vgdev = dev->dev_private;
145 struct virtio_gpu_framebuffer *vgfb;
146 struct virtio_gpu_object *bo;
147
148 if (!new_state->fb)
149 return 0;
150
151 vgfb = to_virtio_gpu_framebuffer(new_state->fb);
152 bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
153 if (bo && bo->dumb && (plane->state->fb != new_state->fb)) {
154 vgfb->fence = virtio_gpu_fence_alloc(vgdev);
155 if (!vgfb->fence)
156 return -ENOMEM;
157 }
158
159 return 0;
160}
161
162static void virtio_gpu_cursor_cleanup_fb(struct drm_plane *plane,
163 struct drm_plane_state *old_state)
164{
165 struct virtio_gpu_framebuffer *vgfb;
166
167 if (!plane->state->fb)
168 return;
169
170 vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
171 if (vgfb->fence)
172 virtio_gpu_fence_cleanup(vgfb->fence);
173}
174
140static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, 175static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
141 struct drm_plane_state *old_state) 176 struct drm_plane_state *old_state)
142{ 177{
@@ -144,7 +179,6 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
144 struct virtio_gpu_device *vgdev = dev->dev_private; 179 struct virtio_gpu_device *vgdev = dev->dev_private;
145 struct virtio_gpu_output *output = NULL; 180 struct virtio_gpu_output *output = NULL;
146 struct virtio_gpu_framebuffer *vgfb; 181 struct virtio_gpu_framebuffer *vgfb;
147 struct virtio_gpu_fence *fence = NULL;
148 struct virtio_gpu_object *bo = NULL; 182 struct virtio_gpu_object *bo = NULL;
149 uint32_t handle; 183 uint32_t handle;
150 int ret = 0; 184 int ret = 0;
@@ -170,13 +204,13 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
170 (vgdev, bo, 0, 204 (vgdev, bo, 0,
171 cpu_to_le32(plane->state->crtc_w), 205 cpu_to_le32(plane->state->crtc_w),
172 cpu_to_le32(plane->state->crtc_h), 206 cpu_to_le32(plane->state->crtc_h),
173 0, 0, &fence); 207 0, 0, &vgfb->fence);
174 ret = virtio_gpu_object_reserve(bo, false); 208 ret = virtio_gpu_object_reserve(bo, false);
175 if (!ret) { 209 if (!ret) {
176 reservation_object_add_excl_fence(bo->tbo.resv, 210 reservation_object_add_excl_fence(bo->tbo.resv,
177 &fence->f); 211 &vgfb->fence->f);
178 dma_fence_put(&fence->f); 212 dma_fence_put(&vgfb->fence->f);
179 fence = NULL; 213 vgfb->fence = NULL;
180 virtio_gpu_object_unreserve(bo); 214 virtio_gpu_object_unreserve(bo);
181 virtio_gpu_object_wait(bo, false); 215 virtio_gpu_object_wait(bo, false);
182 } 216 }
@@ -218,6 +252,8 @@ static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = {
218}; 252};
219 253
220static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = { 254static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = {
255 .prepare_fb = virtio_gpu_cursor_prepare_fb,
256 .cleanup_fb = virtio_gpu_cursor_cleanup_fb,
221 .atomic_check = virtio_gpu_plane_atomic_check, 257 .atomic_check = virtio_gpu_plane_atomic_check,
222 .atomic_update = virtio_gpu_cursor_plane_update, 258 .atomic_update = virtio_gpu_cursor_plane_update,
223}; 259};
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 51bef1775e47..93f2c3a51ee8 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -896,9 +896,9 @@ void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
896 struct virtio_gpu_object *obj) 896 struct virtio_gpu_object *obj)
897{ 897{
898 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); 898 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
899 struct virtio_gpu_fence *fence;
900 899
901 if (use_dma_api && obj->mapped) { 900 if (use_dma_api && obj->mapped) {
901 struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev);
902 /* detach backing and wait for the host process it ... */ 902 /* detach backing and wait for the host process it ... */
903 virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, &fence); 903 virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, &fence);
904 dma_fence_wait(&fence->f, true); 904 dma_fence_wait(&fence->f, true);