aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGerd Hoffmann <kraxel@redhat.com>2014-10-28 07:48:00 -0400
committerGerd Hoffmann <kraxel@redhat.com>2015-10-16 04:44:02 -0400
commit62fb7a5e10962ac6ae2a2d2dbd3aedcb2a3e3257 (patch)
tree1998adc2960bd46f3a8ce8adb0be17ba13fca722
parent7552ed8a1a810552e16664d7020d61d2b01b9199 (diff)
virtio-gpu: add 3d/virgl support
Add the bits needed for opengl rendering support: query capabilities, new virtio commands, drm ioctls. Signed-off-by: Dave Airlie <airlied@redhat.com> Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
-rw-r--r--drivers/gpu/drm/virtio/Makefile3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c15
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h60
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c41
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c573
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c133
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ttm.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c265
-rw-r--r--include/uapi/drm/Kbuild1
-rw-r--r--include/uapi/drm/virtgpu_drm.h167
-rw-r--r--include/uapi/linux/virtio_gpu.h112
11 files changed, 1368 insertions, 3 deletions
diff --git a/drivers/gpu/drm/virtio/Makefile b/drivers/gpu/drm/virtio/Makefile
index 2ee1602d77d4..da7bf192e462 100644
--- a/drivers/gpu/drm/virtio/Makefile
+++ b/drivers/gpu/drm/virtio/Makefile
@@ -6,6 +6,7 @@ ccflags-y := -Iinclude/drm
6 6
7virtio-gpu-y := virtgpu_drv.o virtgpu_kms.o virtgpu_drm_bus.o virtgpu_gem.o \ 7virtio-gpu-y := virtgpu_drv.o virtgpu_kms.o virtgpu_drm_bus.o virtgpu_gem.o \
8 virtgpu_fb.o virtgpu_display.o virtgpu_vq.o virtgpu_ttm.o \ 8 virtgpu_fb.o virtgpu_display.o virtgpu_vq.o virtgpu_ttm.o \
9 virtgpu_fence.o virtgpu_object.o virtgpu_debugfs.o virtgpu_plane.o 9 virtgpu_fence.o virtgpu_object.o virtgpu_debugfs.o virtgpu_plane.o \
10 virtgpu_ioctl.o
10 11
11obj-$(CONFIG_DRM_VIRTIO_GPU) += virtio-gpu.o 12obj-$(CONFIG_DRM_VIRTIO_GPU) += virtio-gpu.o
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 7d9610aaeff9..c27d3a30df0a 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -73,6 +73,14 @@ static struct virtio_device_id id_table[] = {
73}; 73};
74 74
75static unsigned int features[] = { 75static unsigned int features[] = {
76#ifdef __LITTLE_ENDIAN
77 /*
78 * Gallium command stream send by virgl is native endian.
79 * Because of that we only support little endian guests on
80 * little endian hosts.
81 */
82 VIRTIO_GPU_F_VIRGL,
83#endif
76}; 84};
77static struct virtio_driver virtio_gpu_driver = { 85static struct virtio_driver virtio_gpu_driver = {
78 .feature_table = features, 86 .feature_table = features,
@@ -114,6 +122,8 @@ static struct drm_driver driver = {
114 .set_busid = drm_virtio_set_busid, 122 .set_busid = drm_virtio_set_busid,
115 .load = virtio_gpu_driver_load, 123 .load = virtio_gpu_driver_load,
116 .unload = virtio_gpu_driver_unload, 124 .unload = virtio_gpu_driver_unload,
125 .open = virtio_gpu_driver_open,
126 .postclose = virtio_gpu_driver_postclose,
117 127
118 .dumb_create = virtio_gpu_mode_dumb_create, 128 .dumb_create = virtio_gpu_mode_dumb_create,
119 .dumb_map_offset = virtio_gpu_mode_dumb_mmap, 129 .dumb_map_offset = virtio_gpu_mode_dumb_mmap,
@@ -125,8 +135,13 @@ static struct drm_driver driver = {
125#endif 135#endif
126 136
127 .gem_free_object = virtio_gpu_gem_free_object, 137 .gem_free_object = virtio_gpu_gem_free_object,
138 .gem_open_object = virtio_gpu_gem_object_open,
139 .gem_close_object = virtio_gpu_gem_object_close,
128 .fops = &virtio_gpu_driver_fops, 140 .fops = &virtio_gpu_driver_fops,
129 141
142 .ioctls = virtio_gpu_ioctls,
143 .num_ioctls = DRM_VIRTIO_NUM_IOCTLS,
144
130 .name = DRIVER_NAME, 145 .name = DRIVER_NAME,
131 .desc = DRIVER_DESC, 146 .desc = DRIVER_DESC,
132 .date = DRIVER_DATE, 147 .date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 6d4db2dba90b..27191088a701 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -146,6 +146,21 @@ struct virtio_gpu_queue {
146 struct work_struct dequeue_work; 146 struct work_struct dequeue_work;
147}; 147};
148 148
149struct virtio_gpu_drv_capset {
150 uint32_t id;
151 uint32_t max_version;
152 uint32_t max_size;
153};
154
155struct virtio_gpu_drv_cap_cache {
156 struct list_head head;
157 void *caps_cache;
158 uint32_t id;
159 uint32_t version;
160 uint32_t size;
161 atomic_t is_valid;
162};
163
149struct virtio_gpu_device { 164struct virtio_gpu_device {
150 struct device *dev; 165 struct device *dev;
151 struct drm_device *ddev; 166 struct drm_device *ddev;
@@ -179,7 +194,13 @@ struct virtio_gpu_device {
179 struct idr ctx_id_idr; 194 struct idr ctx_id_idr;
180 spinlock_t ctx_id_idr_lock; 195 spinlock_t ctx_id_idr_lock;
181 196
197 bool has_virgl_3d;
198
182 struct work_struct config_changed_work; 199 struct work_struct config_changed_work;
200
201 struct virtio_gpu_drv_capset *capsets;
202 uint32_t num_capsets;
203 struct list_head cap_cache;
183}; 204};
184 205
185struct virtio_gpu_fpriv { 206struct virtio_gpu_fpriv {
@@ -193,6 +214,8 @@ extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
193/* virtio_kms.c */ 214/* virtio_kms.c */
194int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags); 215int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags);
195int virtio_gpu_driver_unload(struct drm_device *dev); 216int virtio_gpu_driver_unload(struct drm_device *dev);
217int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file);
218void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file);
196 219
197/* virtio_gem.c */ 220/* virtio_gem.c */
198void virtio_gpu_gem_free_object(struct drm_gem_object *gem_obj); 221void virtio_gpu_gem_free_object(struct drm_gem_object *gem_obj);
@@ -203,6 +226,10 @@ int virtio_gpu_gem_create(struct drm_file *file,
203 uint64_t size, 226 uint64_t size,
204 struct drm_gem_object **obj_p, 227 struct drm_gem_object **obj_p,
205 uint32_t *handle_p); 228 uint32_t *handle_p);
229int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
230 struct drm_file *file);
231void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
232 struct drm_file *file);
206struct virtio_gpu_object *virtio_gpu_alloc_object(struct drm_device *dev, 233struct virtio_gpu_object *virtio_gpu_alloc_object(struct drm_device *dev,
207 size_t size, bool kernel, 234 size_t size, bool kernel,
208 bool pinned); 235 bool pinned);
@@ -260,10 +287,43 @@ void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
260int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev); 287int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev);
261void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev, 288void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
262 uint32_t resource_id); 289 uint32_t resource_id);
290int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx);
291int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
292 int idx, int version,
293 struct virtio_gpu_drv_cap_cache **cache_p);
294void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
295 uint32_t nlen, const char *name);
296void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
297 uint32_t id);
298void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
299 uint32_t ctx_id,
300 uint32_t resource_id);
301void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
302 uint32_t ctx_id,
303 uint32_t resource_id);
304void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
305 void *data, uint32_t data_size,
306 uint32_t ctx_id, struct virtio_gpu_fence **fence);
307void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
308 uint32_t resource_id, uint32_t ctx_id,
309 uint64_t offset, uint32_t level,
310 struct virtio_gpu_box *box,
311 struct virtio_gpu_fence **fence);
312void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
313 uint32_t resource_id, uint32_t ctx_id,
314 uint64_t offset, uint32_t level,
315 struct virtio_gpu_box *box,
316 struct virtio_gpu_fence **fence);
317void
318virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
319 struct virtio_gpu_resource_create_3d *rc_3d,
320 struct virtio_gpu_fence **fence);
263void virtio_gpu_ctrl_ack(struct virtqueue *vq); 321void virtio_gpu_ctrl_ack(struct virtqueue *vq);
264void virtio_gpu_cursor_ack(struct virtqueue *vq); 322void virtio_gpu_cursor_ack(struct virtqueue *vq);
323void virtio_gpu_fence_ack(struct virtqueue *vq);
265void virtio_gpu_dequeue_ctrl_func(struct work_struct *work); 324void virtio_gpu_dequeue_ctrl_func(struct work_struct *work);
266void virtio_gpu_dequeue_cursor_func(struct work_struct *work); 325void virtio_gpu_dequeue_cursor_func(struct work_struct *work);
326void virtio_gpu_dequeue_fence_func(struct work_struct *work);
267 327
268/* virtio_gpu_display.c */ 328/* virtio_gpu_display.c */
269int virtio_gpu_framebuffer_init(struct drm_device *dev, 329int virtio_gpu_framebuffer_init(struct drm_device *dev,
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index cfa0d27150bd..1feb7cee3f0d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -138,3 +138,44 @@ int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
138 drm_gem_object_unreference_unlocked(gobj); 138 drm_gem_object_unreference_unlocked(gobj);
139 return 0; 139 return 0;
140} 140}
141
142int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
143 struct drm_file *file)
144{
145 struct virtio_gpu_device *vgdev = obj->dev->dev_private;
146 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
147 struct virtio_gpu_object *qobj = gem_to_virtio_gpu_obj(obj);
148 int r;
149
150 if (!vgdev->has_virgl_3d)
151 return 0;
152
153 r = virtio_gpu_object_reserve(qobj, false);
154 if (r)
155 return r;
156
157 virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id,
158 qobj->hw_res_handle);
159 virtio_gpu_object_unreserve(qobj);
160 return 0;
161}
162
163void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
164 struct drm_file *file)
165{
166 struct virtio_gpu_device *vgdev = obj->dev->dev_private;
167 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
168 struct virtio_gpu_object *qobj = gem_to_virtio_gpu_obj(obj);
169 int r;
170
171 if (!vgdev->has_virgl_3d)
172 return;
173
174 r = virtio_gpu_object_reserve(qobj, false);
175 if (r)
176 return;
177
178 virtio_gpu_cmd_context_detach_resource(vgdev, vfpriv->ctx_id,
179 qobj->hw_res_handle);
180 virtio_gpu_object_unreserve(qobj);
181}
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
new file mode 100644
index 000000000000..4ef672b314c6
--- /dev/null
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -0,0 +1,573 @@
1/*
2 * Copyright (C) 2015 Red Hat, Inc.
3 * All Rights Reserved.
4 *
5 * Authors:
6 * Dave Airlie
7 * Alon Levy
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice shall be included in
17 * all copies or substantial portions of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 */
27
28#include <drm/drmP.h>
29#include "virtgpu_drv.h"
30#include <drm/virtgpu_drm.h>
31#include "ttm/ttm_execbuf_util.h"
32
33static void convert_to_hw_box(struct virtio_gpu_box *dst,
34 const struct drm_virtgpu_3d_box *src)
35{
36 dst->x = cpu_to_le32(src->x);
37 dst->y = cpu_to_le32(src->y);
38 dst->z = cpu_to_le32(src->z);
39 dst->w = cpu_to_le32(src->w);
40 dst->h = cpu_to_le32(src->h);
41 dst->d = cpu_to_le32(src->d);
42}
43
44static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
45 struct drm_file *file_priv)
46{
47 struct virtio_gpu_device *vgdev = dev->dev_private;
48 struct drm_virtgpu_map *virtio_gpu_map = data;
49
50 return virtio_gpu_mode_dumb_mmap(file_priv, vgdev->ddev,
51 virtio_gpu_map->handle,
52 &virtio_gpu_map->offset);
53}
54
55static int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
56 struct list_head *head)
57{
58 struct ttm_validate_buffer *buf;
59 struct ttm_buffer_object *bo;
60 struct virtio_gpu_object *qobj;
61 int ret;
62
63 ret = ttm_eu_reserve_buffers(ticket, head, true, NULL);
64 if (ret != 0)
65 return ret;
66
67 list_for_each_entry(buf, head, head) {
68 bo = buf->bo;
69 qobj = container_of(bo, struct virtio_gpu_object, tbo);
70 ret = ttm_bo_validate(bo, &qobj->placement, false, false);
71 if (ret) {
72 ttm_eu_backoff_reservation(ticket, head);
73 return ret;
74 }
75 }
76 return 0;
77}
78
79static void virtio_gpu_unref_list(struct list_head *head)
80{
81 struct ttm_validate_buffer *buf;
82 struct ttm_buffer_object *bo;
83 struct virtio_gpu_object *qobj;
84 list_for_each_entry(buf, head, head) {
85 bo = buf->bo;
86 qobj = container_of(bo, struct virtio_gpu_object, tbo);
87
88 drm_gem_object_unreference_unlocked(&qobj->gem_base);
89 }
90}
91
92static int virtio_gpu_execbuffer(struct drm_device *dev,
93 struct drm_virtgpu_execbuffer *exbuf,
94 struct drm_file *drm_file)
95{
96 struct virtio_gpu_device *vgdev = dev->dev_private;
97 struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
98 struct drm_gem_object *gobj;
99 struct virtio_gpu_fence *fence;
100 struct virtio_gpu_object *qobj;
101 int ret;
102 uint32_t *bo_handles = NULL;
103 void __user *user_bo_handles = NULL;
104 struct list_head validate_list;
105 struct ttm_validate_buffer *buflist = NULL;
106 int i;
107 struct ww_acquire_ctx ticket;
108 void *buf;
109
110 if (vgdev->has_virgl_3d == false)
111 return -ENOSYS;
112
113 INIT_LIST_HEAD(&validate_list);
114 if (exbuf->num_bo_handles) {
115
116 bo_handles = drm_malloc_ab(exbuf->num_bo_handles,
117 sizeof(uint32_t));
118 buflist = drm_calloc_large(exbuf->num_bo_handles,
119 sizeof(struct ttm_validate_buffer));
120 if (!bo_handles || !buflist) {
121 drm_free_large(bo_handles);
122 drm_free_large(buflist);
123 return -ENOMEM;
124 }
125
126 user_bo_handles = (void __user *)(uintptr_t)exbuf->bo_handles;
127 if (copy_from_user(bo_handles, user_bo_handles,
128 exbuf->num_bo_handles * sizeof(uint32_t))) {
129 ret = -EFAULT;
130 drm_free_large(bo_handles);
131 drm_free_large(buflist);
132 return ret;
133 }
134
135 for (i = 0; i < exbuf->num_bo_handles; i++) {
136 gobj = drm_gem_object_lookup(dev,
137 drm_file, bo_handles[i]);
138 if (!gobj) {
139 drm_free_large(bo_handles);
140 drm_free_large(buflist);
141 return -ENOENT;
142 }
143
144 qobj = gem_to_virtio_gpu_obj(gobj);
145 buflist[i].bo = &qobj->tbo;
146
147 list_add(&buflist[i].head, &validate_list);
148 }
149 drm_free_large(bo_handles);
150 }
151
152 ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
153 if (ret)
154 goto out_free;
155
156 buf = kmalloc(exbuf->size, GFP_KERNEL);
157 if (!buf) {
158 ret = -ENOMEM;
159 goto out_unresv;
160 }
161 if (copy_from_user(buf, (void __user *)(uintptr_t)exbuf->command,
162 exbuf->size)) {
163 kfree(buf);
164 ret = -EFAULT;
165 goto out_unresv;
166 }
167 virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
168 vfpriv->ctx_id, &fence);
169
170 ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
171
172 /* fence the command bo */
173 virtio_gpu_unref_list(&validate_list);
174 drm_free_large(buflist);
175 fence_put(&fence->f);
176 return 0;
177
178out_unresv:
179 ttm_eu_backoff_reservation(&ticket, &validate_list);
180out_free:
181 virtio_gpu_unref_list(&validate_list);
182 drm_free_large(buflist);
183 return ret;
184}
185
186/*
187 * Usage of execbuffer:
188 * Relocations need to take into account the full VIRTIO_GPUDrawable size.
189 * However, the command as passed from user space must *not* contain the initial
190 * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
191 */
192static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
193 struct drm_file *file_priv)
194{
195 struct drm_virtgpu_execbuffer *execbuffer = data;
196 return virtio_gpu_execbuffer(dev, execbuffer, file_priv);
197}
198
199
200static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
201 struct drm_file *file_priv)
202{
203 struct virtio_gpu_device *vgdev = dev->dev_private;
204 struct drm_virtgpu_getparam *param = data;
205 int value;
206
207 switch (param->param) {
208 case VIRTGPU_PARAM_3D_FEATURES:
209 value = vgdev->has_virgl_3d == true ? 1 : 0;
210 break;
211 default:
212 return -EINVAL;
213 }
214 if (copy_to_user((void __user *)(unsigned long)param->value,
215 &value, sizeof(int))) {
216 return -EFAULT;
217 }
218 return 0;
219}
220
221static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
222 struct drm_file *file_priv)
223{
224 struct virtio_gpu_device *vgdev = dev->dev_private;
225 struct drm_virtgpu_resource_create *rc = data;
226 int ret;
227 uint32_t res_id;
228 struct virtio_gpu_object *qobj;
229 struct drm_gem_object *obj;
230 uint32_t handle = 0;
231 uint32_t size;
232 struct list_head validate_list;
233 struct ttm_validate_buffer mainbuf;
234 struct virtio_gpu_fence *fence = NULL;
235 struct ww_acquire_ctx ticket;
236 struct virtio_gpu_resource_create_3d rc_3d;
237
238 if (vgdev->has_virgl_3d == false) {
239 if (rc->depth > 1)
240 return -EINVAL;
241 if (rc->nr_samples > 1)
242 return -EINVAL;
243 if (rc->last_level > 1)
244 return -EINVAL;
245 if (rc->target != 2)
246 return -EINVAL;
247 if (rc->array_size > 1)
248 return -EINVAL;
249 }
250
251 INIT_LIST_HEAD(&validate_list);
252 memset(&mainbuf, 0, sizeof(struct ttm_validate_buffer));
253
254 virtio_gpu_resource_id_get(vgdev, &res_id);
255
256 size = rc->size;
257
258 /* allocate a single page size object */
259 if (size == 0)
260 size = PAGE_SIZE;
261
262 qobj = virtio_gpu_alloc_object(dev, size, false, false);
263 if (IS_ERR(qobj)) {
264 ret = PTR_ERR(qobj);
265 goto fail_id;
266 }
267 obj = &qobj->gem_base;
268
269 if (!vgdev->has_virgl_3d) {
270 virtio_gpu_cmd_create_resource(vgdev, res_id, rc->format,
271 rc->width, rc->height);
272
273 ret = virtio_gpu_object_attach(vgdev, qobj, res_id, NULL);
274 } else {
275 /* use a gem reference since unref list undoes them */
276 drm_gem_object_reference(&qobj->gem_base);
277 mainbuf.bo = &qobj->tbo;
278 list_add(&mainbuf.head, &validate_list);
279
280 ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
281 if (ret) {
282 DRM_DEBUG("failed to validate\n");
283 goto fail_unref;
284 }
285
286 rc_3d.resource_id = cpu_to_le32(res_id);
287 rc_3d.target = cpu_to_le32(rc->target);
288 rc_3d.format = cpu_to_le32(rc->format);
289 rc_3d.bind = cpu_to_le32(rc->bind);
290 rc_3d.width = cpu_to_le32(rc->width);
291 rc_3d.height = cpu_to_le32(rc->height);
292 rc_3d.depth = cpu_to_le32(rc->depth);
293 rc_3d.array_size = cpu_to_le32(rc->array_size);
294 rc_3d.last_level = cpu_to_le32(rc->last_level);
295 rc_3d.nr_samples = cpu_to_le32(rc->nr_samples);
296 rc_3d.flags = cpu_to_le32(rc->flags);
297
298 virtio_gpu_cmd_resource_create_3d(vgdev, &rc_3d, NULL);
299 ret = virtio_gpu_object_attach(vgdev, qobj, res_id, &fence);
300 if (ret) {
301 ttm_eu_backoff_reservation(&ticket, &validate_list);
302 goto fail_unref;
303 }
304 ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
305 }
306
307 qobj->hw_res_handle = res_id;
308
309 ret = drm_gem_handle_create(file_priv, obj, &handle);
310 if (ret) {
311
312 drm_gem_object_release(obj);
313 if (vgdev->has_virgl_3d) {
314 virtio_gpu_unref_list(&validate_list);
315 fence_put(&fence->f);
316 }
317 return ret;
318 }
319 drm_gem_object_unreference_unlocked(obj);
320
321 rc->res_handle = res_id; /* similiar to a VM address */
322 rc->bo_handle = handle;
323
324 if (vgdev->has_virgl_3d) {
325 virtio_gpu_unref_list(&validate_list);
326 fence_put(&fence->f);
327 }
328 return 0;
329fail_unref:
330 if (vgdev->has_virgl_3d) {
331 virtio_gpu_unref_list(&validate_list);
332 fence_put(&fence->f);
333 }
334//fail_obj:
335// drm_gem_object_handle_unreference_unlocked(obj);
336fail_id:
337 virtio_gpu_resource_id_put(vgdev, res_id);
338 return ret;
339}
340
341static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
342 struct drm_file *file_priv)
343{
344 struct drm_virtgpu_resource_info *ri = data;
345 struct drm_gem_object *gobj = NULL;
346 struct virtio_gpu_object *qobj = NULL;
347
348 gobj = drm_gem_object_lookup(dev, file_priv, ri->bo_handle);
349 if (gobj == NULL)
350 return -ENOENT;
351
352 qobj = gem_to_virtio_gpu_obj(gobj);
353
354 ri->size = qobj->gem_base.size;
355 ri->res_handle = qobj->hw_res_handle;
356 drm_gem_object_unreference_unlocked(gobj);
357 return 0;
358}
359
360static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
361 void *data,
362 struct drm_file *file)
363{
364 struct virtio_gpu_device *vgdev = dev->dev_private;
365 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
366 struct drm_virtgpu_3d_transfer_from_host *args = data;
367 struct drm_gem_object *gobj = NULL;
368 struct virtio_gpu_object *qobj = NULL;
369 struct virtio_gpu_fence *fence;
370 int ret;
371 u32 offset = args->offset;
372 struct virtio_gpu_box box;
373
374 if (vgdev->has_virgl_3d == false)
375 return -ENOSYS;
376
377 gobj = drm_gem_object_lookup(dev, file, args->bo_handle);
378 if (gobj == NULL)
379 return -ENOENT;
380
381 qobj = gem_to_virtio_gpu_obj(gobj);
382
383 ret = virtio_gpu_object_reserve(qobj, false);
384 if (ret)
385 goto out;
386
387 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
388 true, false);
389 if (unlikely(ret))
390 goto out_unres;
391
392 convert_to_hw_box(&box, &args->box);
393 virtio_gpu_cmd_transfer_from_host_3d
394 (vgdev, qobj->hw_res_handle,
395 vfpriv->ctx_id, offset, args->level,
396 &box, &fence);
397 reservation_object_add_excl_fence(qobj->tbo.resv,
398 &fence->f);
399
400 fence_put(&fence->f);
401out_unres:
402 virtio_gpu_object_unreserve(qobj);
403out:
404 drm_gem_object_unreference_unlocked(gobj);
405 return ret;
406}
407
408static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
409 struct drm_file *file)
410{
411 struct virtio_gpu_device *vgdev = dev->dev_private;
412 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
413 struct drm_virtgpu_3d_transfer_to_host *args = data;
414 struct drm_gem_object *gobj = NULL;
415 struct virtio_gpu_object *qobj = NULL;
416 struct virtio_gpu_fence *fence;
417 struct virtio_gpu_box box;
418 int ret;
419 u32 offset = args->offset;
420
421 gobj = drm_gem_object_lookup(dev, file, args->bo_handle);
422 if (gobj == NULL)
423 return -ENOENT;
424
425 qobj = gem_to_virtio_gpu_obj(gobj);
426
427 ret = virtio_gpu_object_reserve(qobj, false);
428 if (ret)
429 goto out;
430
431 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
432 true, false);
433 if (unlikely(ret))
434 goto out_unres;
435
436 convert_to_hw_box(&box, &args->box);
437 if (!vgdev->has_virgl_3d) {
438 virtio_gpu_cmd_transfer_to_host_2d
439 (vgdev, qobj->hw_res_handle, offset,
440 box.w, box.h, box.x, box.y, NULL);
441 } else {
442 virtio_gpu_cmd_transfer_to_host_3d
443 (vgdev, qobj->hw_res_handle,
444 vfpriv ? vfpriv->ctx_id : 0, offset,
445 args->level, &box, &fence);
446 reservation_object_add_excl_fence(qobj->tbo.resv,
447 &fence->f);
448 fence_put(&fence->f);
449 }
450
451out_unres:
452 virtio_gpu_object_unreserve(qobj);
453out:
454 drm_gem_object_unreference_unlocked(gobj);
455 return ret;
456}
457
458static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
459 struct drm_file *file)
460{
461 struct drm_virtgpu_3d_wait *args = data;
462 struct drm_gem_object *gobj = NULL;
463 struct virtio_gpu_object *qobj = NULL;
464 int ret;
465 bool nowait = false;
466
467 gobj = drm_gem_object_lookup(dev, file, args->handle);
468 if (gobj == NULL)
469 return -ENOENT;
470
471 qobj = gem_to_virtio_gpu_obj(gobj);
472
473 if (args->flags & VIRTGPU_WAIT_NOWAIT)
474 nowait = true;
475 ret = virtio_gpu_object_wait(qobj, nowait);
476
477 drm_gem_object_unreference_unlocked(gobj);
478 return ret;
479}
480
481static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
482 void *data, struct drm_file *file)
483{
484 struct virtio_gpu_device *vgdev = dev->dev_private;
485 struct drm_virtgpu_get_caps *args = data;
486 int size;
487 int i;
488 int found_valid = -1;
489 int ret;
490 struct virtio_gpu_drv_cap_cache *cache_ent;
491 void *ptr;
492 if (vgdev->num_capsets == 0)
493 return -ENOSYS;
494
495 spin_lock(&vgdev->display_info_lock);
496 for (i = 0; i < vgdev->num_capsets; i++) {
497 if (vgdev->capsets[i].id == args->cap_set_id) {
498 if (vgdev->capsets[i].max_version >= args->cap_set_ver) {
499 found_valid = i;
500 break;
501 }
502 }
503 }
504
505 if (found_valid == -1) {
506 spin_unlock(&vgdev->display_info_lock);
507 return -EINVAL;
508 }
509
510 size = vgdev->capsets[found_valid].max_size;
511 if (args->size > size) {
512 spin_unlock(&vgdev->display_info_lock);
513 return -EINVAL;
514 }
515
516 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
517 if (cache_ent->id == args->cap_set_id &&
518 cache_ent->version == args->cap_set_ver) {
519 ptr = cache_ent->caps_cache;
520 spin_unlock(&vgdev->display_info_lock);
521 goto copy_exit;
522 }
523 }
524 spin_unlock(&vgdev->display_info_lock);
525
526 /* not in cache - need to talk to hw */
527 virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
528 &cache_ent);
529
530 ret = wait_event_timeout(vgdev->resp_wq,
531 atomic_read(&cache_ent->is_valid), 5 * HZ);
532
533 ptr = cache_ent->caps_cache;
534
535copy_exit:
536 if (copy_to_user((void __user *)(unsigned long)args->addr, ptr, size))
537 return -EFAULT;
538
539 return 0;
540}
541
542struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
543 DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
544 DRM_AUTH|DRM_UNLOCKED),
545
546 DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl,
547 DRM_AUTH|DRM_UNLOCKED),
548
549 DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl,
550 DRM_AUTH|DRM_UNLOCKED),
551
552 DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE,
553 virtio_gpu_resource_create_ioctl,
554 DRM_AUTH|DRM_UNLOCKED),
555
556 DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl,
557 DRM_AUTH|DRM_UNLOCKED),
558
559 /* make transfer async to the main ring? - no sure, can we
560 thread these in the underlying GL */
561 DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST,
562 virtio_gpu_transfer_from_host_ioctl,
563 DRM_AUTH|DRM_UNLOCKED),
564 DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST,
565 virtio_gpu_transfer_to_host_ioctl,
566 DRM_AUTH|DRM_UNLOCKED),
567
568 DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl,
569 DRM_AUTH|DRM_UNLOCKED),
570
571 DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
572 DRM_AUTH|DRM_UNLOCKED),
573};
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index 782766c00d70..06496a128162 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -52,6 +52,41 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work)
52 events_clear, &events_clear); 52 events_clear, &events_clear);
53} 53}
54 54
55static void virtio_gpu_ctx_id_get(struct virtio_gpu_device *vgdev,
56 uint32_t *resid)
57{
58 int handle;
59
60 idr_preload(GFP_KERNEL);
61 spin_lock(&vgdev->ctx_id_idr_lock);
62 handle = idr_alloc(&vgdev->ctx_id_idr, NULL, 1, 0, 0);
63 spin_unlock(&vgdev->ctx_id_idr_lock);
64 idr_preload_end();
65 *resid = handle;
66}
67
68static void virtio_gpu_ctx_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
69{
70 spin_lock(&vgdev->ctx_id_idr_lock);
71 idr_remove(&vgdev->ctx_id_idr, id);
72 spin_unlock(&vgdev->ctx_id_idr_lock);
73}
74
75static void virtio_gpu_context_create(struct virtio_gpu_device *vgdev,
76 uint32_t nlen, const char *name,
77 uint32_t *ctx_id)
78{
79 virtio_gpu_ctx_id_get(vgdev, ctx_id);
80 virtio_gpu_cmd_context_create(vgdev, *ctx_id, nlen, name);
81}
82
83static void virtio_gpu_context_destroy(struct virtio_gpu_device *vgdev,
84 uint32_t ctx_id)
85{
86 virtio_gpu_cmd_context_destroy(vgdev, ctx_id);
87 virtio_gpu_ctx_id_put(vgdev, ctx_id);
88}
89
55static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq, 90static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq,
56 void (*work_func)(struct work_struct *work)) 91 void (*work_func)(struct work_struct *work))
57{ 92{
@@ -60,6 +95,36 @@ static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq,
60 INIT_WORK(&vgvq->dequeue_work, work_func); 95 INIT_WORK(&vgvq->dequeue_work, work_func);
61} 96}
62 97
98static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev,
99 int num_capsets)
100{
101 int i, ret;
102
103 vgdev->capsets = kcalloc(num_capsets,
104 sizeof(struct virtio_gpu_drv_capset),
105 GFP_KERNEL);
106 if (!vgdev->capsets) {
107 DRM_ERROR("failed to allocate cap sets\n");
108 return;
109 }
110 for (i = 0; i < num_capsets; i++) {
111 virtio_gpu_cmd_get_capset_info(vgdev, i);
112 ret = wait_event_timeout(vgdev->resp_wq,
113 vgdev->capsets[i].id > 0, 5 * HZ);
114 if (ret == 0) {
115 DRM_ERROR("timed out waiting for cap set %d\n", i);
116 kfree(vgdev->capsets);
117 vgdev->capsets = NULL;
118 return;
119 }
120 DRM_INFO("cap set %d: id %d, max-version %d, max-size %d\n",
121 i, vgdev->capsets[i].id,
122 vgdev->capsets[i].max_version,
123 vgdev->capsets[i].max_size);
124 }
125 vgdev->num_capsets = num_capsets;
126}
127
63int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags) 128int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
64{ 129{
65 static vq_callback_t *callbacks[] = { 130 static vq_callback_t *callbacks[] = {
@@ -70,7 +135,7 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
70 struct virtio_gpu_device *vgdev; 135 struct virtio_gpu_device *vgdev;
71 /* this will expand later */ 136 /* this will expand later */
72 struct virtqueue *vqs[2]; 137 struct virtqueue *vqs[2];
73 u32 num_scanouts; 138 u32 num_scanouts, num_capsets;
74 int ret; 139 int ret;
75 140
76 if (!virtio_has_feature(dev->virtdev, VIRTIO_F_VERSION_1)) 141 if (!virtio_has_feature(dev->virtdev, VIRTIO_F_VERSION_1))
@@ -96,9 +161,15 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
96 161
97 spin_lock_init(&vgdev->fence_drv.lock); 162 spin_lock_init(&vgdev->fence_drv.lock);
98 INIT_LIST_HEAD(&vgdev->fence_drv.fences); 163 INIT_LIST_HEAD(&vgdev->fence_drv.fences);
164 INIT_LIST_HEAD(&vgdev->cap_cache);
99 INIT_WORK(&vgdev->config_changed_work, 165 INIT_WORK(&vgdev->config_changed_work,
100 virtio_gpu_config_changed_work_func); 166 virtio_gpu_config_changed_work_func);
101 167
168 if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_VIRGL))
169 vgdev->has_virgl_3d = true;
170 DRM_INFO("virgl 3d acceleration %s\n",
171 vgdev->has_virgl_3d ? "enabled" : "not available");
172
102 ret = vgdev->vdev->config->find_vqs(vgdev->vdev, 2, vqs, 173 ret = vgdev->vdev->config->find_vqs(vgdev->vdev, 2, vqs,
103 callbacks, names); 174 callbacks, names);
104 if (ret) { 175 if (ret) {
@@ -129,6 +200,11 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
129 ret = -EINVAL; 200 ret = -EINVAL;
130 goto err_scanouts; 201 goto err_scanouts;
131 } 202 }
203 DRM_INFO("number of scanouts: %d\n", num_scanouts);
204
205 virtio_cread(vgdev->vdev, struct virtio_gpu_config,
206 num_capsets, &num_capsets);
207 DRM_INFO("number of cap sets: %d\n", num_capsets);
132 208
133 ret = virtio_gpu_modeset_init(vgdev); 209 ret = virtio_gpu_modeset_init(vgdev);
134 if (ret) 210 if (ret)
@@ -137,6 +213,8 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
137 virtio_device_ready(vgdev->vdev); 213 virtio_device_ready(vgdev->vdev);
138 vgdev->vqs_ready = true; 214 vgdev->vqs_ready = true;
139 215
216 if (num_capsets)
217 virtio_gpu_get_capsets(vgdev, num_capsets);
140 virtio_gpu_cmd_get_display_info(vgdev); 218 virtio_gpu_cmd_get_display_info(vgdev);
141 wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending, 219 wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending,
142 5 * HZ); 220 5 * HZ);
@@ -157,6 +235,16 @@ err_vqs:
157 return ret; 235 return ret;
158} 236}
159 237
238static void virtio_gpu_cleanup_cap_cache(struct virtio_gpu_device *vgdev)
239{
240 struct virtio_gpu_drv_cap_cache *cache_ent, *tmp;
241
242 list_for_each_entry_safe(cache_ent, tmp, &vgdev->cap_cache, head) {
243 kfree(cache_ent->caps_cache);
244 kfree(cache_ent);
245 }
246}
247
160int virtio_gpu_driver_unload(struct drm_device *dev) 248int virtio_gpu_driver_unload(struct drm_device *dev)
161{ 249{
162 struct virtio_gpu_device *vgdev = dev->dev_private; 250 struct virtio_gpu_device *vgdev = dev->dev_private;
@@ -170,6 +258,49 @@ int virtio_gpu_driver_unload(struct drm_device *dev)
170 virtio_gpu_modeset_fini(vgdev); 258 virtio_gpu_modeset_fini(vgdev);
171 virtio_gpu_ttm_fini(vgdev); 259 virtio_gpu_ttm_fini(vgdev);
172 virtio_gpu_free_vbufs(vgdev); 260 virtio_gpu_free_vbufs(vgdev);
261 virtio_gpu_cleanup_cap_cache(vgdev);
262 kfree(vgdev->capsets);
173 kfree(vgdev); 263 kfree(vgdev);
174 return 0; 264 return 0;
175} 265}
266
267int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)
268{
269 struct virtio_gpu_device *vgdev = dev->dev_private;
270 struct virtio_gpu_fpriv *vfpriv;
271 uint32_t id;
272 char dbgname[64], tmpname[TASK_COMM_LEN];
273
274 /* can't create contexts without 3d renderer */
275 if (!vgdev->has_virgl_3d)
276 return 0;
277
278 get_task_comm(tmpname, current);
279 snprintf(dbgname, sizeof(dbgname), "%s", tmpname);
280 dbgname[63] = 0;
281 /* allocate a virt GPU context for this opener */
282 vfpriv = kzalloc(sizeof(*vfpriv), GFP_KERNEL);
283 if (!vfpriv)
284 return -ENOMEM;
285
286 virtio_gpu_context_create(vgdev, strlen(dbgname), dbgname, &id);
287
288 vfpriv->ctx_id = id;
289 file->driver_priv = vfpriv;
290 return 0;
291}
292
293void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file)
294{
295 struct virtio_gpu_device *vgdev = dev->dev_private;
296 struct virtio_gpu_fpriv *vfpriv;
297
298 if (!vgdev->has_virgl_3d)
299 return;
300
301 vfpriv = file->driver_priv;
302
303 virtio_gpu_context_destroy(vgdev, vfpriv->ctx_id);
304 kfree(vfpriv);
305 file->driver_priv = NULL;
306}
diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c
index b092d7b9a292..9fd924cd2b7f 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ttm.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c
@@ -32,6 +32,7 @@
32#include <ttm/ttm_module.h> 32#include <ttm/ttm_module.h>
33#include <drm/drmP.h> 33#include <drm/drmP.h>
34#include <drm/drm.h> 34#include <drm/drm.h>
35#include <drm/virtgpu_drm.h>
35#include "virtgpu_drv.h" 36#include "virtgpu_drv.h"
36 37
37#include <linux/delay.h> 38#include <linux/delay.h>
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index ee25e9a4ae03..5a0f8a745b9d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -586,6 +586,47 @@ static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
586 drm_kms_helper_hotplug_event(vgdev->ddev); 586 drm_kms_helper_hotplug_event(vgdev->ddev);
587} 587}
588 588
589static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
590 struct virtio_gpu_vbuffer *vbuf)
591{
592 struct virtio_gpu_get_capset_info *cmd =
593 (struct virtio_gpu_get_capset_info *)vbuf->buf;
594 struct virtio_gpu_resp_capset_info *resp =
595 (struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
596 int i = le32_to_cpu(cmd->capset_index);
597
598 spin_lock(&vgdev->display_info_lock);
599 vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
600 vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
601 vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
602 spin_unlock(&vgdev->display_info_lock);
603 wake_up(&vgdev->resp_wq);
604}
605
606static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
607 struct virtio_gpu_vbuffer *vbuf)
608{
609 struct virtio_gpu_get_capset *cmd =
610 (struct virtio_gpu_get_capset *)vbuf->buf;
611 struct virtio_gpu_resp_capset *resp =
612 (struct virtio_gpu_resp_capset *)vbuf->resp_buf;
613 struct virtio_gpu_drv_cap_cache *cache_ent;
614
615 spin_lock(&vgdev->display_info_lock);
616 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
617 if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
618 cache_ent->id == le32_to_cpu(cmd->capset_id)) {
619 memcpy(cache_ent->caps_cache, resp->capset_data,
620 cache_ent->size);
621 atomic_set(&cache_ent->is_valid, 1);
622 break;
623 }
624 }
625 spin_unlock(&vgdev->display_info_lock);
626 wake_up(&vgdev->resp_wq);
627}
628
629
589int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev) 630int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
590{ 631{
591 struct virtio_gpu_ctrl_hdr *cmd_p; 632 struct virtio_gpu_ctrl_hdr *cmd_p;
@@ -609,6 +650,230 @@ int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
609 return 0; 650 return 0;
610} 651}
611 652
653int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
654{
655 struct virtio_gpu_get_capset_info *cmd_p;
656 struct virtio_gpu_vbuffer *vbuf;
657 void *resp_buf;
658
659 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
660 GFP_KERNEL);
661 if (!resp_buf)
662 return -ENOMEM;
663
664 cmd_p = virtio_gpu_alloc_cmd_resp
665 (vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
666 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
667 resp_buf);
668 memset(cmd_p, 0, sizeof(*cmd_p));
669
670 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
671 cmd_p->capset_index = cpu_to_le32(idx);
672 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
673 return 0;
674}
675
676int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
677 int idx, int version,
678 struct virtio_gpu_drv_cap_cache **cache_p)
679{
680 struct virtio_gpu_get_capset *cmd_p;
681 struct virtio_gpu_vbuffer *vbuf;
682 int max_size = vgdev->capsets[idx].max_size;
683 struct virtio_gpu_drv_cap_cache *cache_ent;
684 void *resp_buf;
685
686 if (idx > vgdev->num_capsets)
687 return -EINVAL;
688
689 if (version > vgdev->capsets[idx].max_version)
690 return -EINVAL;
691
692 cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
693 if (!cache_ent)
694 return -ENOMEM;
695
696 cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
697 if (!cache_ent->caps_cache) {
698 kfree(cache_ent);
699 return -ENOMEM;
700 }
701
702 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
703 GFP_KERNEL);
704 if (!resp_buf) {
705 kfree(cache_ent->caps_cache);
706 kfree(cache_ent);
707 return -ENOMEM;
708 }
709
710 cache_ent->version = version;
711 cache_ent->id = vgdev->capsets[idx].id;
712 atomic_set(&cache_ent->is_valid, 0);
713 cache_ent->size = max_size;
714 spin_lock(&vgdev->display_info_lock);
715 list_add_tail(&cache_ent->head, &vgdev->cap_cache);
716 spin_unlock(&vgdev->display_info_lock);
717
718 cmd_p = virtio_gpu_alloc_cmd_resp
719 (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
720 sizeof(struct virtio_gpu_resp_capset) + max_size,
721 resp_buf);
722 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
723 cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
724 cmd_p->capset_version = cpu_to_le32(version);
725 *cache_p = cache_ent;
726 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
727
728 return 0;
729}
730
731void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
732 uint32_t nlen, const char *name)
733{
734 struct virtio_gpu_ctx_create *cmd_p;
735 struct virtio_gpu_vbuffer *vbuf;
736
737 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
738 memset(cmd_p, 0, sizeof(*cmd_p));
739
740 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
741 cmd_p->hdr.ctx_id = cpu_to_le32(id);
742 cmd_p->nlen = cpu_to_le32(nlen);
743 strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name)-1);
744 cmd_p->debug_name[sizeof(cmd_p->debug_name)-1] = 0;
745 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
746}
747
748void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
749 uint32_t id)
750{
751 struct virtio_gpu_ctx_destroy *cmd_p;
752 struct virtio_gpu_vbuffer *vbuf;
753
754 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
755 memset(cmd_p, 0, sizeof(*cmd_p));
756
757 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
758 cmd_p->hdr.ctx_id = cpu_to_le32(id);
759 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
760}
761
762void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
763 uint32_t ctx_id,
764 uint32_t resource_id)
765{
766 struct virtio_gpu_ctx_resource *cmd_p;
767 struct virtio_gpu_vbuffer *vbuf;
768
769 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
770 memset(cmd_p, 0, sizeof(*cmd_p));
771
772 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
773 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
774 cmd_p->resource_id = cpu_to_le32(resource_id);
775 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
776
777}
778
779void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
780 uint32_t ctx_id,
781 uint32_t resource_id)
782{
783 struct virtio_gpu_ctx_resource *cmd_p;
784 struct virtio_gpu_vbuffer *vbuf;
785
786 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
787 memset(cmd_p, 0, sizeof(*cmd_p));
788
789 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
790 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
791 cmd_p->resource_id = cpu_to_le32(resource_id);
792 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
793}
794
795void
796virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
797 struct virtio_gpu_resource_create_3d *rc_3d,
798 struct virtio_gpu_fence **fence)
799{
800 struct virtio_gpu_resource_create_3d *cmd_p;
801 struct virtio_gpu_vbuffer *vbuf;
802
803 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
804 memset(cmd_p, 0, sizeof(*cmd_p));
805
806 *cmd_p = *rc_3d;
807 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
808 cmd_p->hdr.flags = 0;
809
810 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
811}
812
813void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
814 uint32_t resource_id, uint32_t ctx_id,
815 uint64_t offset, uint32_t level,
816 struct virtio_gpu_box *box,
817 struct virtio_gpu_fence **fence)
818{
819 struct virtio_gpu_transfer_host_3d *cmd_p;
820 struct virtio_gpu_vbuffer *vbuf;
821
822 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
823 memset(cmd_p, 0, sizeof(*cmd_p));
824
825 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
826 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
827 cmd_p->resource_id = cpu_to_le32(resource_id);
828 cmd_p->box = *box;
829 cmd_p->offset = cpu_to_le64(offset);
830 cmd_p->level = cpu_to_le32(level);
831
832 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
833}
834
835void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
836 uint32_t resource_id, uint32_t ctx_id,
837 uint64_t offset, uint32_t level,
838 struct virtio_gpu_box *box,
839 struct virtio_gpu_fence **fence)
840{
841 struct virtio_gpu_transfer_host_3d *cmd_p;
842 struct virtio_gpu_vbuffer *vbuf;
843
844 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
845 memset(cmd_p, 0, sizeof(*cmd_p));
846
847 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
848 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
849 cmd_p->resource_id = cpu_to_le32(resource_id);
850 cmd_p->box = *box;
851 cmd_p->offset = cpu_to_le64(offset);
852 cmd_p->level = cpu_to_le32(level);
853
854 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
855}
856
857void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
858 void *data, uint32_t data_size,
859 uint32_t ctx_id, struct virtio_gpu_fence **fence)
860{
861 struct virtio_gpu_cmd_submit *cmd_p;
862 struct virtio_gpu_vbuffer *vbuf;
863
864 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
865 memset(cmd_p, 0, sizeof(*cmd_p));
866
867 vbuf->data_buf = data;
868 vbuf->data_size = data_size;
869
870 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
871 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
872 cmd_p->size = cpu_to_le32(data_size);
873
874 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
875}
876
612int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, 877int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
613 struct virtio_gpu_object *obj, 878 struct virtio_gpu_object *obj,
614 uint32_t resource_id, 879 uint32_t resource_id,
diff --git a/include/uapi/drm/Kbuild b/include/uapi/drm/Kbuild
index 2d9a25daab05..38d437096c35 100644
--- a/include/uapi/drm/Kbuild
+++ b/include/uapi/drm/Kbuild
@@ -17,3 +17,4 @@ header-y += tegra_drm.h
17header-y += via_drm.h 17header-y += via_drm.h
18header-y += vmwgfx_drm.h 18header-y += vmwgfx_drm.h
19header-y += msm_drm.h 19header-y += msm_drm.h
20header-y += virtgpu_drm.h
diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h
new file mode 100644
index 000000000000..fc9e2d6e5e2f
--- /dev/null
+++ b/include/uapi/drm/virtgpu_drm.h
@@ -0,0 +1,167 @@
1/*
2 * Copyright 2013 Red Hat
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24#ifndef VIRTGPU_DRM_H
25#define VIRTGPU_DRM_H
26
27#include <stddef.h>
28#include "drm/drm.h"
29
30/* Please note that modifications to all structs defined here are
31 * subject to backwards-compatibility constraints.
32 *
33 * Do not use pointers, use uint64_t instead for 32 bit / 64 bit user/kernel
34 * compatibility Keep fields aligned to their size
35 */
36
37#define DRM_VIRTGPU_MAP 0x01
38#define DRM_VIRTGPU_EXECBUFFER 0x02
39#define DRM_VIRTGPU_GETPARAM 0x03
40#define DRM_VIRTGPU_RESOURCE_CREATE 0x04
41#define DRM_VIRTGPU_RESOURCE_INFO 0x05
42#define DRM_VIRTGPU_TRANSFER_FROM_HOST 0x06
43#define DRM_VIRTGPU_TRANSFER_TO_HOST 0x07
44#define DRM_VIRTGPU_WAIT 0x08
45#define DRM_VIRTGPU_GET_CAPS 0x09
46
47struct drm_virtgpu_map {
48 uint64_t offset; /* use for mmap system call */
49 uint32_t handle;
50 uint32_t pad;
51};
52
53struct drm_virtgpu_execbuffer {
54 uint32_t flags; /* for future use */
55 uint32_t size;
56 uint64_t command; /* void* */
57 uint64_t bo_handles;
58 uint32_t num_bo_handles;
59 uint32_t pad;
60};
61
62#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
63
64struct drm_virtgpu_getparam {
65 uint64_t param;
66 uint64_t value;
67};
68
69/* NO_BO flags? NO resource flag? */
70/* resource flag for y_0_top */
71struct drm_virtgpu_resource_create {
72 uint32_t target;
73 uint32_t format;
74 uint32_t bind;
75 uint32_t width;
76 uint32_t height;
77 uint32_t depth;
78 uint32_t array_size;
79 uint32_t last_level;
80 uint32_t nr_samples;
81 uint32_t flags;
82 uint32_t bo_handle; /* if this is set - recreate a new resource attached to this bo ? */
83 uint32_t res_handle; /* returned by kernel */
84 uint32_t size; /* validate transfer in the host */
85 uint32_t stride; /* validate transfer in the host */
86};
87
88struct drm_virtgpu_resource_info {
89 uint32_t bo_handle;
90 uint32_t res_handle;
91 uint32_t size;
92 uint32_t stride;
93};
94
95struct drm_virtgpu_3d_box {
96 uint32_t x;
97 uint32_t y;
98 uint32_t z;
99 uint32_t w;
100 uint32_t h;
101 uint32_t d;
102};
103
104struct drm_virtgpu_3d_transfer_to_host {
105 uint32_t bo_handle;
106 struct drm_virtgpu_3d_box box;
107 uint32_t level;
108 uint32_t offset;
109};
110
111struct drm_virtgpu_3d_transfer_from_host {
112 uint32_t bo_handle;
113 struct drm_virtgpu_3d_box box;
114 uint32_t level;
115 uint32_t offset;
116};
117
118#define VIRTGPU_WAIT_NOWAIT 1 /* like it */
119struct drm_virtgpu_3d_wait {
120 uint32_t handle; /* 0 is an invalid handle */
121 uint32_t flags;
122};
123
124struct drm_virtgpu_get_caps {
125 uint32_t cap_set_id;
126 uint32_t cap_set_ver;
127 uint64_t addr;
128 uint32_t size;
129 uint32_t pad;
130};
131
132#define DRM_IOCTL_VIRTGPU_MAP \
133 DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map)
134
135#define DRM_IOCTL_VIRTGPU_EXECBUFFER \
136 DRM_IOW(DRM_COMMAND_BASE + DRM_VIRTGPU_EXECBUFFER,\
137 struct drm_virtgpu_execbuffer)
138
139#define DRM_IOCTL_VIRTGPU_GETPARAM \
140 DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GETPARAM,\
141 struct drm_virtgpu_getparam)
142
143#define DRM_IOCTL_VIRTGPU_RESOURCE_CREATE \
144 DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_CREATE, \
145 struct drm_virtgpu_resource_create)
146
147#define DRM_IOCTL_VIRTGPU_RESOURCE_INFO \
148 DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_INFO, \
149 struct drm_virtgpu_resource_info)
150
151#define DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST \
152 DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_TRANSFER_FROM_HOST, \
153 struct drm_virtgpu_3d_transfer_from_host)
154
155#define DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST \
156 DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_TRANSFER_TO_HOST, \
157 struct drm_virtgpu_3d_transfer_to_host)
158
159#define DRM_IOCTL_VIRTGPU_WAIT \
160 DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_WAIT, \
161 struct drm_virtgpu_3d_wait)
162
163#define DRM_IOCTL_VIRTGPU_GET_CAPS \
164 DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GET_CAPS, \
165 struct drm_virtgpu_get_caps)
166
167#endif
diff --git a/include/uapi/linux/virtio_gpu.h b/include/uapi/linux/virtio_gpu.h
index 478be5270e26..7a63faa9065c 100644
--- a/include/uapi/linux/virtio_gpu.h
+++ b/include/uapi/linux/virtio_gpu.h
@@ -40,6 +40,8 @@
40 40
41#include <linux/types.h> 41#include <linux/types.h>
42 42
43#define VIRTIO_GPU_F_VIRGL 0
44
43enum virtio_gpu_ctrl_type { 45enum virtio_gpu_ctrl_type {
44 VIRTIO_GPU_UNDEFINED = 0, 46 VIRTIO_GPU_UNDEFINED = 0,
45 47
@@ -52,6 +54,18 @@ enum virtio_gpu_ctrl_type {
52 VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D, 54 VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D,
53 VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING, 55 VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING,
54 VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING, 56 VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING,
57 VIRTIO_GPU_CMD_GET_CAPSET_INFO,
58 VIRTIO_GPU_CMD_GET_CAPSET,
59
60 /* 3d commands */
61 VIRTIO_GPU_CMD_CTX_CREATE = 0x0200,
62 VIRTIO_GPU_CMD_CTX_DESTROY,
63 VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE,
64 VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE,
65 VIRTIO_GPU_CMD_RESOURCE_CREATE_3D,
66 VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D,
67 VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D,
68 VIRTIO_GPU_CMD_SUBMIT_3D,
55 69
56 /* cursor commands */ 70 /* cursor commands */
57 VIRTIO_GPU_CMD_UPDATE_CURSOR = 0x0300, 71 VIRTIO_GPU_CMD_UPDATE_CURSOR = 0x0300,
@@ -60,6 +74,8 @@ enum virtio_gpu_ctrl_type {
60 /* success responses */ 74 /* success responses */
61 VIRTIO_GPU_RESP_OK_NODATA = 0x1100, 75 VIRTIO_GPU_RESP_OK_NODATA = 0x1100,
62 VIRTIO_GPU_RESP_OK_DISPLAY_INFO, 76 VIRTIO_GPU_RESP_OK_DISPLAY_INFO,
77 VIRTIO_GPU_RESP_OK_CAPSET_INFO,
78 VIRTIO_GPU_RESP_OK_CAPSET,
63 79
64 /* error responses */ 80 /* error responses */
65 VIRTIO_GPU_RESP_ERR_UNSPEC = 0x1200, 81 VIRTIO_GPU_RESP_ERR_UNSPEC = 0x1200,
@@ -180,13 +196,107 @@ struct virtio_gpu_resp_display_info {
180 } pmodes[VIRTIO_GPU_MAX_SCANOUTS]; 196 } pmodes[VIRTIO_GPU_MAX_SCANOUTS];
181}; 197};
182 198
199/* data passed in the control vq, 3d related */
200
201struct virtio_gpu_box {
202 __le32 x, y, z;
203 __le32 w, h, d;
204};
205
206/* VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D, VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D */
207struct virtio_gpu_transfer_host_3d {
208 struct virtio_gpu_ctrl_hdr hdr;
209 struct virtio_gpu_box box;
210 __le64 offset;
211 __le32 resource_id;
212 __le32 level;
213 __le32 stride;
214 __le32 layer_stride;
215};
216
217/* VIRTIO_GPU_CMD_RESOURCE_CREATE_3D */
218#define VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP (1 << 0)
219struct virtio_gpu_resource_create_3d {
220 struct virtio_gpu_ctrl_hdr hdr;
221 __le32 resource_id;
222 __le32 target;
223 __le32 format;
224 __le32 bind;
225 __le32 width;
226 __le32 height;
227 __le32 depth;
228 __le32 array_size;
229 __le32 last_level;
230 __le32 nr_samples;
231 __le32 flags;
232 __le32 padding;
233};
234
235/* VIRTIO_GPU_CMD_CTX_CREATE */
236struct virtio_gpu_ctx_create {
237 struct virtio_gpu_ctrl_hdr hdr;
238 __le32 nlen;
239 __le32 padding;
240 char debug_name[64];
241};
242
243/* VIRTIO_GPU_CMD_CTX_DESTROY */
244struct virtio_gpu_ctx_destroy {
245 struct virtio_gpu_ctrl_hdr hdr;
246};
247
248/* VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE, VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE */
249struct virtio_gpu_ctx_resource {
250 struct virtio_gpu_ctrl_hdr hdr;
251 __le32 resource_id;
252 __le32 padding;
253};
254
255/* VIRTIO_GPU_CMD_SUBMIT_3D */
256struct virtio_gpu_cmd_submit {
257 struct virtio_gpu_ctrl_hdr hdr;
258 __le32 size;
259 __le32 padding;
260};
261
262#define VIRTIO_GPU_CAPSET_VIRGL 1
263
264/* VIRTIO_GPU_CMD_GET_CAPSET_INFO */
265struct virtio_gpu_get_capset_info {
266 struct virtio_gpu_ctrl_hdr hdr;
267 __le32 capset_index;
268 __le32 padding;
269};
270
271/* VIRTIO_GPU_RESP_OK_CAPSET_INFO */
272struct virtio_gpu_resp_capset_info {
273 struct virtio_gpu_ctrl_hdr hdr;
274 __le32 capset_id;
275 __le32 capset_max_version;
276 __le32 capset_max_size;
277 __le32 padding;
278};
279
280/* VIRTIO_GPU_CMD_GET_CAPSET */
281struct virtio_gpu_get_capset {
282 struct virtio_gpu_ctrl_hdr hdr;
283 __le32 capset_id;
284 __le32 capset_version;
285};
286
287/* VIRTIO_GPU_RESP_OK_CAPSET */
288struct virtio_gpu_resp_capset {
289 struct virtio_gpu_ctrl_hdr hdr;
290 uint8_t capset_data[];
291};
292
183#define VIRTIO_GPU_EVENT_DISPLAY (1 << 0) 293#define VIRTIO_GPU_EVENT_DISPLAY (1 << 0)
184 294
185struct virtio_gpu_config { 295struct virtio_gpu_config {
186 __u32 events_read; 296 __u32 events_read;
187 __u32 events_clear; 297 __u32 events_clear;
188 __u32 num_scanouts; 298 __u32 num_scanouts;
189 __u32 reserved; 299 __u32 num_capsets;
190}; 300};
191 301
192/* simple formats for fbcon/X use */ 302/* simple formats for fbcon/X use */