aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGerd Hoffmann <kraxel@redhat.com>2018-08-29 08:20:26 -0400
committerGerd Hoffmann <kraxel@redhat.com>2018-09-05 02:26:26 -0400
commita3b815f09bb846255c458c181b8a5b1cc66891b4 (patch)
tree53aceaede9167dbb6abb93ad51a5620fff6d7d18
parentb3f13ec958a77497da76cc7a89d60b741b79ba22 (diff)
drm/virtio: add iommu support.
Use the dma mapping api and properly add iommu mappings for objects, unless virtio is in iommu quirk mode. Signed-off-by: Gerd Hoffmann <kraxel@redhat.com> Reviewed-by: Dave Airlie <airlied@redhat.com> Link: http://patchwork.freedesktop.org/patch/msgid/20180829122026.27012-3-kraxel@redhat.com
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c46
2 files changed, 38 insertions, 9 deletions
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 4fda0da6d949..f8f4a40dd1b8 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -57,6 +57,7 @@ struct virtio_gpu_object {
57 uint32_t hw_res_handle; 57 uint32_t hw_res_handle;
58 58
59 struct sg_table *pages; 59 struct sg_table *pages;
60 uint32_t mapped;
60 void *vmap; 61 void *vmap;
61 bool dumb; 62 bool dumb;
62 struct ttm_place placement_code; 63 struct ttm_place placement_code;
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index e23c1927722f..5784c3ea8767 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -424,7 +424,8 @@ void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
424} 424}
425 425
426static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev, 426static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
427 uint32_t resource_id) 427 uint32_t resource_id,
428 struct virtio_gpu_fence **fence)
428{ 429{
429 struct virtio_gpu_resource_detach_backing *cmd_p; 430 struct virtio_gpu_resource_detach_backing *cmd_p;
430 struct virtio_gpu_vbuffer *vbuf; 431 struct virtio_gpu_vbuffer *vbuf;
@@ -435,7 +436,7 @@ static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgde
435 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING); 436 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
436 cmd_p->resource_id = cpu_to_le32(resource_id); 437 cmd_p->resource_id = cpu_to_le32(resource_id);
437 438
438 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); 439 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
439} 440}
440 441
441void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev, 442void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
@@ -849,9 +850,10 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
849 uint32_t resource_id, 850 uint32_t resource_id,
850 struct virtio_gpu_fence **fence) 851 struct virtio_gpu_fence **fence)
851{ 852{
853 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
852 struct virtio_gpu_mem_entry *ents; 854 struct virtio_gpu_mem_entry *ents;
853 struct scatterlist *sg; 855 struct scatterlist *sg;
854 int si; 856 int si, nents;
855 857
856 if (!obj->pages) { 858 if (!obj->pages) {
857 int ret; 859 int ret;
@@ -861,23 +863,33 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
861 return ret; 863 return ret;
862 } 864 }
863 865
866 if (use_dma_api) {
867 obj->mapped = dma_map_sg(vgdev->vdev->dev.parent,
868 obj->pages->sgl, obj->pages->nents,
869 DMA_TO_DEVICE);
870 nents = obj->mapped;
871 } else {
872 nents = obj->pages->nents;
873 }
874
864 /* gets freed when the ring has consumed it */ 875 /* gets freed when the ring has consumed it */
865 ents = kmalloc_array(obj->pages->nents, 876 ents = kmalloc_array(nents, sizeof(struct virtio_gpu_mem_entry),
866 sizeof(struct virtio_gpu_mem_entry),
867 GFP_KERNEL); 877 GFP_KERNEL);
868 if (!ents) { 878 if (!ents) {
869 DRM_ERROR("failed to allocate ent list\n"); 879 DRM_ERROR("failed to allocate ent list\n");
870 return -ENOMEM; 880 return -ENOMEM;
871 } 881 }
872 882
873 for_each_sg(obj->pages->sgl, sg, obj->pages->nents, si) { 883 for_each_sg(obj->pages->sgl, sg, nents, si) {
874 ents[si].addr = cpu_to_le64(sg_phys(sg)); 884 ents[si].addr = cpu_to_le64(use_dma_api
885 ? sg_dma_address(sg)
886 : sg_phys(sg));
875 ents[si].length = cpu_to_le32(sg->length); 887 ents[si].length = cpu_to_le32(sg->length);
876 ents[si].padding = 0; 888 ents[si].padding = 0;
877 } 889 }
878 890
879 virtio_gpu_cmd_resource_attach_backing(vgdev, resource_id, 891 virtio_gpu_cmd_resource_attach_backing(vgdev, resource_id,
880 ents, obj->pages->nents, 892 ents, nents,
881 fence); 893 fence);
882 obj->hw_res_handle = resource_id; 894 obj->hw_res_handle = resource_id;
883 return 0; 895 return 0;
@@ -886,7 +898,23 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
886void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev, 898void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
887 struct virtio_gpu_object *obj) 899 struct virtio_gpu_object *obj)
888{ 900{
889 virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle); 901 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
902 struct virtio_gpu_fence *fence;
903
904 if (use_dma_api && obj->mapped) {
905 /* detach backing and wait for the host process it ... */
906 virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, &fence);
907 dma_fence_wait(&fence->f, true);
908 dma_fence_put(&fence->f);
909
910 /* ... then tear down iommu mappings */
911 dma_unmap_sg(vgdev->vdev->dev.parent,
912 obj->pages->sgl, obj->mapped,
913 DMA_TO_DEVICE);
914 obj->mapped = 0;
915 } else {
916 virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL);
917 }
890} 918}
891 919
892void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev, 920void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,