aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2009-12-22 10:53:41 -0500
committerDave Airlie <airlied@redhat.com>2009-12-22 19:06:24 -0500
commit7a73ba7469cbea631050094fd14f73acebb97cf9 (patch)
tree2c1bc2b28a395578967343e14a3a4b90c66e55f5 /drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
parent3d3a5b3290043618e8409f3fb68a63de6156fdd4 (diff)
drm/vmwgfx: Use TTM handles instead of SIDs as user-space surface handles.
Improve the command verifier to catch all occurences of surface handles, and translate to SIDs. This way DMA buffers and 3D surfaces share a common handle space, which makes it possible for the kms code to differentiate. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c153
1 files changed, 129 insertions, 24 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 7e73cf51e298..2e92da567403 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -73,21 +73,32 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
73 73
74static int vmw_cmd_sid_check(struct vmw_private *dev_priv, 74static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
75 struct vmw_sw_context *sw_context, 75 struct vmw_sw_context *sw_context,
76 uint32_t sid) 76 uint32_t *sid)
77{ 77{
78 if (unlikely((!sw_context->sid_valid || sid != sw_context->last_sid) && 78 if (*sid == SVGA3D_INVALID_ID)
79 sid != SVGA3D_INVALID_ID)) { 79 return 0;
80 int ret = vmw_surface_check(dev_priv, sw_context->tfile, sid); 80
81 if (unlikely((!sw_context->sid_valid ||
82 *sid != sw_context->last_sid))) {
83 int real_id;
84 int ret = vmw_surface_check(dev_priv, sw_context->tfile,
85 *sid, &real_id);
81 86
82 if (unlikely(ret != 0)) { 87 if (unlikely(ret != 0)) {
83 DRM_ERROR("Could ot find or use surface %u\n", 88 DRM_ERROR("Could ot find or use surface 0x%08x "
84 (unsigned) sid); 89 "address 0x%08lx\n",
90 (unsigned int) *sid,
91 (unsigned long) sid);
85 return ret; 92 return ret;
86 } 93 }
87 94
88 sw_context->last_sid = sid; 95 sw_context->last_sid = *sid;
89 sw_context->sid_valid = true; 96 sw_context->sid_valid = true;
90 } 97 *sid = real_id;
98 sw_context->sid_translation = real_id;
99 } else
100 *sid = sw_context->sid_translation;
101
91 return 0; 102 return 0;
92} 103}
93 104
@@ -107,7 +118,8 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
107 return ret; 118 return ret;
108 119
109 cmd = container_of(header, struct vmw_sid_cmd, header); 120 cmd = container_of(header, struct vmw_sid_cmd, header);
110 return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.target.sid); 121 ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.target.sid);
122 return ret;
111} 123}
112 124
113static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv, 125static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
@@ -121,10 +133,10 @@ static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
121 int ret; 133 int ret;
122 134
123 cmd = container_of(header, struct vmw_sid_cmd, header); 135 cmd = container_of(header, struct vmw_sid_cmd, header);
124 ret = vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.src.sid); 136 ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
125 if (unlikely(ret != 0)) 137 if (unlikely(ret != 0))
126 return ret; 138 return ret;
127 return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.dest.sid); 139 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
128} 140}
129 141
130static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv, 142static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
@@ -138,10 +150,10 @@ static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
138 int ret; 150 int ret;
139 151
140 cmd = container_of(header, struct vmw_sid_cmd, header); 152 cmd = container_of(header, struct vmw_sid_cmd, header);
141 ret = vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.src.sid); 153 ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
142 if (unlikely(ret != 0)) 154 if (unlikely(ret != 0))
143 return ret; 155 return ret;
144 return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.dest.sid); 156 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
145} 157}
146 158
147static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv, 159static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
@@ -154,7 +166,7 @@ static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
154 } *cmd; 166 } *cmd;
155 167
156 cmd = container_of(header, struct vmw_sid_cmd, header); 168 cmd = container_of(header, struct vmw_sid_cmd, header);
157 return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.srcImage.sid); 169 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid);
158} 170}
159 171
160static int vmw_cmd_present_check(struct vmw_private *dev_priv, 172static int vmw_cmd_present_check(struct vmw_private *dev_priv,
@@ -167,7 +179,7 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
167 } *cmd; 179 } *cmd;
168 180
169 cmd = container_of(header, struct vmw_sid_cmd, header); 181 cmd = container_of(header, struct vmw_sid_cmd, header);
170 return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.sid); 182 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
171} 183}
172 184
173static int vmw_cmd_dma(struct vmw_private *dev_priv, 185static int vmw_cmd_dma(struct vmw_private *dev_priv,
@@ -187,12 +199,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
187 uint32_t cur_validate_node; 199 uint32_t cur_validate_node;
188 struct ttm_validate_buffer *val_buf; 200 struct ttm_validate_buffer *val_buf;
189 201
190
191 cmd = container_of(header, struct vmw_dma_cmd, header); 202 cmd = container_of(header, struct vmw_dma_cmd, header);
192 ret = vmw_cmd_sid_check(dev_priv, sw_context, cmd->dma.host.sid);
193 if (unlikely(ret != 0))
194 return ret;
195
196 handle = cmd->dma.guest.ptr.gmrId; 203 handle = cmd->dma.guest.ptr.gmrId;
197 ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); 204 ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
198 if (unlikely(ret != 0)) { 205 if (unlikely(ret != 0)) {
@@ -228,14 +235,23 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
228 ++sw_context->cur_val_buf; 235 ++sw_context->cur_val_buf;
229 } 236 }
230 237
231 ret = vmw_user_surface_lookup(dev_priv, sw_context->tfile, 238 ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
232 cmd->dma.host.sid, &srf); 239 cmd->dma.host.sid, &srf);
233 if (ret) { 240 if (ret) {
234 DRM_ERROR("could not find surface\n"); 241 DRM_ERROR("could not find surface\n");
235 goto out_no_reloc; 242 goto out_no_reloc;
236 } 243 }
237 244
245 /**
246 * Patch command stream with device SID.
247 */
248
249 cmd->dma.host.sid = srf->res.id;
238 vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header); 250 vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header);
251 /**
252 * FIXME: May deadlock here when called from the
253 * command parsing code.
254 */
239 vmw_surface_unreference(&srf); 255 vmw_surface_unreference(&srf);
240 256
241out_no_reloc: 257out_no_reloc:
@@ -243,6 +259,90 @@ out_no_reloc:
243 return ret; 259 return ret;
244} 260}
245 261
262static int vmw_cmd_draw(struct vmw_private *dev_priv,
263 struct vmw_sw_context *sw_context,
264 SVGA3dCmdHeader *header)
265{
266 struct vmw_draw_cmd {
267 SVGA3dCmdHeader header;
268 SVGA3dCmdDrawPrimitives body;
269 } *cmd;
270 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
271 (unsigned long)header + sizeof(*cmd));
272 SVGA3dPrimitiveRange *range;
273 uint32_t i;
274 uint32_t maxnum;
275 int ret;
276
277 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
278 if (unlikely(ret != 0))
279 return ret;
280
281 cmd = container_of(header, struct vmw_draw_cmd, header);
282 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
283
284 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
285 DRM_ERROR("Illegal number of vertex declarations.\n");
286 return -EINVAL;
287 }
288
289 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
290 ret = vmw_cmd_sid_check(dev_priv, sw_context,
291 &decl->array.surfaceId);
292 if (unlikely(ret != 0))
293 return ret;
294 }
295
296 maxnum = (header->size - sizeof(cmd->body) -
297 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
298 if (unlikely(cmd->body.numRanges > maxnum)) {
299 DRM_ERROR("Illegal number of index ranges.\n");
300 return -EINVAL;
301 }
302
303 range = (SVGA3dPrimitiveRange *) decl;
304 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
305 ret = vmw_cmd_sid_check(dev_priv, sw_context,
306 &range->indexArray.surfaceId);
307 if (unlikely(ret != 0))
308 return ret;
309 }
310 return 0;
311}
312
313
314static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
315 struct vmw_sw_context *sw_context,
316 SVGA3dCmdHeader *header)
317{
318 struct vmw_tex_state_cmd {
319 SVGA3dCmdHeader header;
320 SVGA3dCmdSetTextureState state;
321 };
322
323 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
324 ((unsigned long) header + header->size + sizeof(header));
325 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
326 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
327 int ret;
328
329 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
330 if (unlikely(ret != 0))
331 return ret;
332
333 for (; cur_state < last_state; ++cur_state) {
334 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
335 continue;
336
337 ret = vmw_cmd_sid_check(dev_priv, sw_context,
338 &cur_state->value);
339 if (unlikely(ret != 0))
340 return ret;
341 }
342
343 return 0;
344}
345
246 346
247typedef int (*vmw_cmd_func) (struct vmw_private *, 347typedef int (*vmw_cmd_func) (struct vmw_private *,
248 struct vmw_sw_context *, 348 struct vmw_sw_context *,
@@ -264,7 +364,7 @@ static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
264 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check), 364 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check),
265 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET, 365 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
266 &vmw_cmd_set_render_target_check), 366 &vmw_cmd_set_render_target_check),
267 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_cid_check), 367 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state),
268 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check), 368 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check),
269 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check), 369 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check),
270 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check), 370 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check),
@@ -276,7 +376,7 @@ static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
276 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check), 376 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
277 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check), 377 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check),
278 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check), 378 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
279 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_cid_check), 379 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
280 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check), 380 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
281 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check), 381 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
282 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_cid_check), 382 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_cid_check),
@@ -291,6 +391,7 @@ static int vmw_cmd_check(struct vmw_private *dev_priv,
291 void *buf, uint32_t *size) 391 void *buf, uint32_t *size)
292{ 392{
293 uint32_t cmd_id; 393 uint32_t cmd_id;
394 uint32_t size_remaining = *size;
294 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf; 395 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
295 int ret; 396 int ret;
296 397
@@ -304,6 +405,9 @@ static int vmw_cmd_check(struct vmw_private *dev_priv,
304 *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader); 405 *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
305 406
306 cmd_id -= SVGA_3D_CMD_BASE; 407 cmd_id -= SVGA_3D_CMD_BASE;
408 if (unlikely(*size > size_remaining))
409 goto out_err;
410
307 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE)) 411 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
308 goto out_err; 412 goto out_err;
309 413
@@ -326,6 +430,7 @@ static int vmw_cmd_check_all(struct vmw_private *dev_priv,
326 int ret; 430 int ret;
327 431
328 while (cur_size > 0) { 432 while (cur_size > 0) {
433 size = cur_size;
329 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size); 434 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
330 if (unlikely(ret != 0)) 435 if (unlikely(ret != 0))
331 return ret; 436 return ret;