aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGerd Hoffmann <kraxel@redhat.com>2015-08-19 17:44:15 -0400
committerGerd Hoffmann <kraxel@redhat.com>2015-10-16 04:44:00 -0400
commitec2f0577c7b1fccc7a5d7ee8002a4f162061498f (patch)
tree218790a9de25b9f7f415d94f51d4114c9f730ca5
parent9c73f4782642c785569ad50e01324002d160bd09 (diff)
virtio-gpu: add & use virtio_gpu_queue_fenced_ctrl_buffer
Add helper function to handle the submission of fenced control requests. Make sure we initialize the fence while holding the virtqueue lock, so requests can't be reordered. Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fence.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c40
2 files changed, 35 insertions, 7 deletions
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index 1da632631dac..793ad9f631fd 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -81,7 +81,7 @@ int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
81 struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv; 81 struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
82 unsigned long irq_flags; 82 unsigned long irq_flags;
83 83
84 *fence = kmalloc(sizeof(struct virtio_gpu_fence), GFP_KERNEL); 84 *fence = kmalloc(sizeof(struct virtio_gpu_fence), GFP_ATOMIC);
85 if ((*fence) == NULL) 85 if ((*fence) == NULL)
86 return -ENOMEM; 86 return -ENOMEM;
87 87
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 5b9bc242890f..ee25e9a4ae03 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -347,6 +347,38 @@ static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
347 return rc; 347 return rc;
348} 348}
349 349
350static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
351 struct virtio_gpu_vbuffer *vbuf,
352 struct virtio_gpu_ctrl_hdr *hdr,
353 struct virtio_gpu_fence **fence)
354{
355 struct virtqueue *vq = vgdev->ctrlq.vq;
356 int rc;
357
358again:
359 spin_lock(&vgdev->ctrlq.qlock);
360
361 /*
362 * Make sure we have enouth space in the virtqueue. If not
363 * wait here until we have.
364 *
365 * Without that virtio_gpu_queue_ctrl_buffer_nolock might have
366 * to wait for free space, which can result in fence ids being
367 * submitted out-of-order.
368 */
369 if (vq->num_free < 3) {
370 spin_unlock(&vgdev->ctrlq.qlock);
371 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
372 goto again;
373 }
374
375 if (fence)
376 virtio_gpu_fence_emit(vgdev, hdr, fence);
377 rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
378 spin_unlock(&vgdev->ctrlq.qlock);
379 return rc;
380}
381
350static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev, 382static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
351 struct virtio_gpu_vbuffer *vbuf) 383 struct virtio_gpu_vbuffer *vbuf)
352{ 384{
@@ -499,9 +531,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
499 cmd_p->r.x = x; 531 cmd_p->r.x = x;
500 cmd_p->r.y = y; 532 cmd_p->r.y = y;
501 533
502 if (fence) 534 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
503 virtio_gpu_fence_emit(vgdev, &cmd_p->hdr, fence);
504 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
505} 535}
506 536
507static void 537static void
@@ -524,9 +554,7 @@ virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
524 vbuf->data_buf = ents; 554 vbuf->data_buf = ents;
525 vbuf->data_size = sizeof(*ents) * nents; 555 vbuf->data_size = sizeof(*ents) * nents;
526 556
527 if (fence) 557 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
528 virtio_gpu_fence_emit(vgdev, &cmd_p->hdr, fence);
529 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
530} 558}
531 559
532static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev, 560static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,