aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2009-12-22 10:53:41 -0500
committerDave Airlie <airlied@redhat.com>2009-12-22 19:06:24 -0500
commit7a73ba7469cbea631050094fd14f73acebb97cf9 (patch)
tree2c1bc2b28a395578967343e14a3a4b90c66e55f5 /drivers
parent3d3a5b3290043618e8409f3fb68a63de6156fdd4 (diff)
drm/vmwgfx: Use TTM handles instead of SIDs as user-space surface handles.
Improve the command verifier to catch all occurences of surface handles, and translate to SIDs. This way DMA buffers and 3D surfaces share a common handle space, which makes it possible for the kms code to differentiate. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c153
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c149
4 files changed, 209 insertions, 111 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 43546d09d1b0..e61bd85b6975 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -123,6 +123,7 @@ struct vmw_sw_context{
123 uint32_t last_cid; 123 uint32_t last_cid;
124 bool cid_valid; 124 bool cid_valid;
125 uint32_t last_sid; 125 uint32_t last_sid;
126 uint32_t sid_translation;
126 bool sid_valid; 127 bool sid_valid;
127 struct ttm_object_file *tfile; 128 struct ttm_object_file *tfile;
128 struct list_head validate_nodes; 129 struct list_head validate_nodes;
@@ -317,9 +318,10 @@ extern void vmw_surface_res_free(struct vmw_resource *res);
317extern int vmw_surface_init(struct vmw_private *dev_priv, 318extern int vmw_surface_init(struct vmw_private *dev_priv,
318 struct vmw_surface *srf, 319 struct vmw_surface *srf,
319 void (*res_free) (struct vmw_resource *res)); 320 void (*res_free) (struct vmw_resource *res));
320extern int vmw_user_surface_lookup(struct vmw_private *dev_priv, 321extern int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
321 struct ttm_object_file *tfile, 322 struct ttm_object_file *tfile,
322 int sid, struct vmw_surface **out); 323 uint32_t handle,
324 struct vmw_surface **out);
323extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, 325extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
324 struct drm_file *file_priv); 326 struct drm_file *file_priv);
325extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data, 327extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
@@ -328,7 +330,7 @@ extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
328 struct drm_file *file_priv); 330 struct drm_file *file_priv);
329extern int vmw_surface_check(struct vmw_private *dev_priv, 331extern int vmw_surface_check(struct vmw_private *dev_priv,
330 struct ttm_object_file *tfile, 332 struct ttm_object_file *tfile,
331 int id); 333 uint32_t handle, int *id);
332extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo); 334extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
333extern int vmw_dmabuf_init(struct vmw_private *dev_priv, 335extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
334 struct vmw_dma_buffer *vmw_bo, 336 struct vmw_dma_buffer *vmw_bo,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 7e73cf51e298..2e92da567403 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -73,21 +73,32 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
73 73
74static int vmw_cmd_sid_check(struct vmw_private *dev_priv, 74static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
75 struct vmw_sw_context *sw_context, 75 struct vmw_sw_context *sw_context,
76 uint32_t sid) 76 uint32_t *sid)
77{ 77{
78 if (unlikely((!sw_context->sid_valid || sid != sw_context->last_sid) && 78 if (*sid == SVGA3D_INVALID_ID)
79 sid != SVGA3D_INVALID_ID)) { 79 return 0;
80 int ret = vmw_surface_check(dev_priv, sw_context->tfile, sid); 80
81 if (unlikely((!sw_context->sid_valid ||
82 *sid != sw_context->last_sid))) {
83 int real_id;
84 int ret = vmw_surface_check(dev_priv, sw_context->tfile,
85 *sid, &real_id);
81 86
82 if (unlikely(ret != 0)) { 87 if (unlikely(ret != 0)) {
83 DRM_ERROR("Could ot find or use surface %u\n", 88 DRM_ERROR("Could ot find or use surface 0x%08x "
84 (unsigned) sid); 89 "address 0x%08lx\n",
90 (unsigned int) *sid,
91 (unsigned long) sid);
85 return ret; 92 return ret;
86 } 93 }
87 94
88 sw_context->last_sid = sid; 95 sw_context->last_sid = *sid;
89 sw_context->sid_valid = true; 96 sw_context->sid_valid = true;
90 } 97 *sid = real_id;
98 sw_context->sid_translation = real_id;
99 } else
100 *sid = sw_context->sid_translation;
101
91 return 0; 102 return 0;
92} 103}
93 104
@@ -107,7 +118,8 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
107 return ret; 118 return ret;
108 119
109 cmd = container_of(header, struct vmw_sid_cmd, header); 120 cmd = container_of(header, struct vmw_sid_cmd, header);
110 return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.target.sid); 121 ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.target.sid);
122 return ret;
111} 123}
112 124
113static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv, 125static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
@@ -121,10 +133,10 @@ static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
121 int ret; 133 int ret;
122 134
123 cmd = container_of(header, struct vmw_sid_cmd, header); 135 cmd = container_of(header, struct vmw_sid_cmd, header);
124 ret = vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.src.sid); 136 ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
125 if (unlikely(ret != 0)) 137 if (unlikely(ret != 0))
126 return ret; 138 return ret;
127 return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.dest.sid); 139 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
128} 140}
129 141
130static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv, 142static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
@@ -138,10 +150,10 @@ static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
138 int ret; 150 int ret;
139 151
140 cmd = container_of(header, struct vmw_sid_cmd, header); 152 cmd = container_of(header, struct vmw_sid_cmd, header);
141 ret = vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.src.sid); 153 ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
142 if (unlikely(ret != 0)) 154 if (unlikely(ret != 0))
143 return ret; 155 return ret;
144 return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.dest.sid); 156 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
145} 157}
146 158
147static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv, 159static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
@@ -154,7 +166,7 @@ static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
154 } *cmd; 166 } *cmd;
155 167
156 cmd = container_of(header, struct vmw_sid_cmd, header); 168 cmd = container_of(header, struct vmw_sid_cmd, header);
157 return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.srcImage.sid); 169 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid);
158} 170}
159 171
160static int vmw_cmd_present_check(struct vmw_private *dev_priv, 172static int vmw_cmd_present_check(struct vmw_private *dev_priv,
@@ -167,7 +179,7 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
167 } *cmd; 179 } *cmd;
168 180
169 cmd = container_of(header, struct vmw_sid_cmd, header); 181 cmd = container_of(header, struct vmw_sid_cmd, header);
170 return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.sid); 182 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
171} 183}
172 184
173static int vmw_cmd_dma(struct vmw_private *dev_priv, 185static int vmw_cmd_dma(struct vmw_private *dev_priv,
@@ -187,12 +199,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
187 uint32_t cur_validate_node; 199 uint32_t cur_validate_node;
188 struct ttm_validate_buffer *val_buf; 200 struct ttm_validate_buffer *val_buf;
189 201
190
191 cmd = container_of(header, struct vmw_dma_cmd, header); 202 cmd = container_of(header, struct vmw_dma_cmd, header);
192 ret = vmw_cmd_sid_check(dev_priv, sw_context, cmd->dma.host.sid);
193 if (unlikely(ret != 0))
194 return ret;
195
196 handle = cmd->dma.guest.ptr.gmrId; 203 handle = cmd->dma.guest.ptr.gmrId;
197 ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); 204 ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
198 if (unlikely(ret != 0)) { 205 if (unlikely(ret != 0)) {
@@ -228,14 +235,23 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
228 ++sw_context->cur_val_buf; 235 ++sw_context->cur_val_buf;
229 } 236 }
230 237
231 ret = vmw_user_surface_lookup(dev_priv, sw_context->tfile, 238 ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
232 cmd->dma.host.sid, &srf); 239 cmd->dma.host.sid, &srf);
233 if (ret) { 240 if (ret) {
234 DRM_ERROR("could not find surface\n"); 241 DRM_ERROR("could not find surface\n");
235 goto out_no_reloc; 242 goto out_no_reloc;
236 } 243 }
237 244
245 /**
246 * Patch command stream with device SID.
247 */
248
249 cmd->dma.host.sid = srf->res.id;
238 vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header); 250 vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header);
251 /**
252 * FIXME: May deadlock here when called from the
253 * command parsing code.
254 */
239 vmw_surface_unreference(&srf); 255 vmw_surface_unreference(&srf);
240 256
241out_no_reloc: 257out_no_reloc:
@@ -243,6 +259,90 @@ out_no_reloc:
243 return ret; 259 return ret;
244} 260}
245 261
262static int vmw_cmd_draw(struct vmw_private *dev_priv,
263 struct vmw_sw_context *sw_context,
264 SVGA3dCmdHeader *header)
265{
266 struct vmw_draw_cmd {
267 SVGA3dCmdHeader header;
268 SVGA3dCmdDrawPrimitives body;
269 } *cmd;
270 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
271 (unsigned long)header + sizeof(*cmd));
272 SVGA3dPrimitiveRange *range;
273 uint32_t i;
274 uint32_t maxnum;
275 int ret;
276
277 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
278 if (unlikely(ret != 0))
279 return ret;
280
281 cmd = container_of(header, struct vmw_draw_cmd, header);
282 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
283
284 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
285 DRM_ERROR("Illegal number of vertex declarations.\n");
286 return -EINVAL;
287 }
288
289 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
290 ret = vmw_cmd_sid_check(dev_priv, sw_context,
291 &decl->array.surfaceId);
292 if (unlikely(ret != 0))
293 return ret;
294 }
295
296 maxnum = (header->size - sizeof(cmd->body) -
297 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
298 if (unlikely(cmd->body.numRanges > maxnum)) {
299 DRM_ERROR("Illegal number of index ranges.\n");
300 return -EINVAL;
301 }
302
303 range = (SVGA3dPrimitiveRange *) decl;
304 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
305 ret = vmw_cmd_sid_check(dev_priv, sw_context,
306 &range->indexArray.surfaceId);
307 if (unlikely(ret != 0))
308 return ret;
309 }
310 return 0;
311}
312
313
314static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
315 struct vmw_sw_context *sw_context,
316 SVGA3dCmdHeader *header)
317{
318 struct vmw_tex_state_cmd {
319 SVGA3dCmdHeader header;
320 SVGA3dCmdSetTextureState state;
321 };
322
323 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
324 ((unsigned long) header + header->size + sizeof(header));
325 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
326 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
327 int ret;
328
329 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
330 if (unlikely(ret != 0))
331 return ret;
332
333 for (; cur_state < last_state; ++cur_state) {
334 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
335 continue;
336
337 ret = vmw_cmd_sid_check(dev_priv, sw_context,
338 &cur_state->value);
339 if (unlikely(ret != 0))
340 return ret;
341 }
342
343 return 0;
344}
345
246 346
247typedef int (*vmw_cmd_func) (struct vmw_private *, 347typedef int (*vmw_cmd_func) (struct vmw_private *,
248 struct vmw_sw_context *, 348 struct vmw_sw_context *,
@@ -264,7 +364,7 @@ static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
264 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check), 364 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check),
265 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET, 365 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
266 &vmw_cmd_set_render_target_check), 366 &vmw_cmd_set_render_target_check),
267 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_cid_check), 367 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state),
268 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check), 368 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check),
269 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check), 369 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check),
270 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check), 370 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check),
@@ -276,7 +376,7 @@ static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
276 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check), 376 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
277 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check), 377 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check),
278 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check), 378 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
279 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_cid_check), 379 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
280 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check), 380 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
281 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check), 381 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
282 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_cid_check), 382 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_cid_check),
@@ -291,6 +391,7 @@ static int vmw_cmd_check(struct vmw_private *dev_priv,
291 void *buf, uint32_t *size) 391 void *buf, uint32_t *size)
292{ 392{
293 uint32_t cmd_id; 393 uint32_t cmd_id;
394 uint32_t size_remaining = *size;
294 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf; 395 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
295 int ret; 396 int ret;
296 397
@@ -304,6 +405,9 @@ static int vmw_cmd_check(struct vmw_private *dev_priv,
304 *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader); 405 *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
305 406
306 cmd_id -= SVGA_3D_CMD_BASE; 407 cmd_id -= SVGA_3D_CMD_BASE;
408 if (unlikely(*size > size_remaining))
409 goto out_err;
410
307 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE)) 411 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
308 goto out_err; 412 goto out_err;
309 413
@@ -326,6 +430,7 @@ static int vmw_cmd_check_all(struct vmw_private *dev_priv,
326 int ret; 430 int ret;
327 431
328 while (cur_size > 0) { 432 while (cur_size > 0) {
433 size = cur_size;
329 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size); 434 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
330 if (unlikely(ret != 0)) 435 if (unlikely(ret != 0))
331 return ret; 436 return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index e9403be446fe..b1af76e371c3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -106,8 +106,8 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
106 int ret; 106 int ret;
107 107
108 if (handle) { 108 if (handle) {
109 ret = vmw_user_surface_lookup(dev_priv, tfile, 109 ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
110 handle, &surface); 110 handle, &surface);
111 if (!ret) { 111 if (!ret) {
112 if (!surface->snooper.image) { 112 if (!surface->snooper.image) {
113 DRM_ERROR("surface not suitable for cursor\n"); 113 DRM_ERROR("surface not suitable for cursor\n");
@@ -704,8 +704,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
704 struct vmw_dma_buffer *bo = NULL; 704 struct vmw_dma_buffer *bo = NULL;
705 int ret; 705 int ret;
706 706
707 ret = vmw_user_surface_lookup(dev_priv, tfile, 707 ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
708 mode_cmd->handle, &surface); 708 mode_cmd->handle, &surface);
709 if (ret) 709 if (ret)
710 goto try_dmabuf; 710 goto try_dmabuf;
711 711
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index a1ceed0c8e07..c012d5927f65 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -488,28 +488,44 @@ static void vmw_user_surface_free(struct vmw_resource *res)
488 kfree(user_srf); 488 kfree(user_srf);
489} 489}
490 490
491int vmw_user_surface_lookup(struct vmw_private *dev_priv, 491int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
492 struct ttm_object_file *tfile, 492 struct ttm_object_file *tfile,
493 int sid, struct vmw_surface **out) 493 uint32_t handle, struct vmw_surface **out)
494{ 494{
495 struct vmw_resource *res; 495 struct vmw_resource *res;
496 struct vmw_surface *srf; 496 struct vmw_surface *srf;
497 struct vmw_user_surface *user_srf; 497 struct vmw_user_surface *user_srf;
498 struct ttm_base_object *base;
499 int ret = -EINVAL;
498 500
499 res = vmw_resource_lookup(dev_priv, &dev_priv->surface_idr, sid); 501 base = ttm_base_object_lookup(tfile, handle);
500 if (unlikely(res == NULL)) 502 if (unlikely(base == NULL))
501 return -EINVAL; 503 return -EINVAL;
502 504
503 if (res->res_free != &vmw_user_surface_free) 505 if (unlikely(base->object_type != VMW_RES_SURFACE))
504 return -EINVAL; 506 goto out_bad_resource;
505 507
506 srf = container_of(res, struct vmw_surface, res); 508 user_srf = container_of(base, struct vmw_user_surface, base);
507 user_srf = container_of(srf, struct vmw_user_surface, srf); 509 srf = &user_srf->srf;
508 if (user_srf->base.tfile != tfile && !user_srf->base.shareable) 510 res = &srf->res;
509 return -EPERM; 511
512 read_lock(&dev_priv->resource_lock);
513
514 if (!res->avail || res->res_free != &vmw_user_surface_free) {
515 read_unlock(&dev_priv->resource_lock);
516 goto out_bad_resource;
517 }
518
519 kref_get(&res->kref);
520 read_unlock(&dev_priv->resource_lock);
510 521
511 *out = srf; 522 *out = srf;
512 return 0; 523 ret = 0;
524
525out_bad_resource:
526 ttm_base_object_unref(&base);
527
528 return ret;
513} 529}
514 530
515static void vmw_user_surface_base_release(struct ttm_base_object **p_base) 531static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
@@ -526,35 +542,10 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
526int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, 542int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
527 struct drm_file *file_priv) 543 struct drm_file *file_priv)
528{ 544{
529 struct vmw_private *dev_priv = vmw_priv(dev);
530 struct vmw_resource *res;
531 struct vmw_surface *srf;
532 struct vmw_user_surface *user_srf;
533 struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data; 545 struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
534 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 546 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
535 int ret = 0;
536
537 res = vmw_resource_lookup(dev_priv, &dev_priv->surface_idr, arg->sid);
538 if (unlikely(res == NULL))
539 return -EINVAL;
540
541 if (res->res_free != &vmw_user_surface_free) {
542 ret = -EINVAL;
543 goto out;
544 }
545 547
546 srf = container_of(res, struct vmw_surface, res); 548 return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
547 user_srf = container_of(srf, struct vmw_user_surface, srf);
548 if (user_srf->base.tfile != tfile && !user_srf->base.shareable) {
549 ret = -EPERM;
550 goto out;
551 }
552
553 ttm_ref_object_base_unref(tfile, user_srf->base.hash.key,
554 TTM_REF_USAGE);
555out:
556 vmw_resource_unreference(&res);
557 return ret;
558} 549}
559 550
560int vmw_surface_define_ioctl(struct drm_device *dev, void *data, 551int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
@@ -649,7 +640,10 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
649 } 640 }
650 srf->snooper.crtc = NULL; 641 srf->snooper.crtc = NULL;
651 642
652 rep->sid = res->id; 643 rep->sid = user_srf->base.hash.key;
644 if (rep->sid == SVGA3D_INVALID_ID)
645 DRM_ERROR("Created bad Surface ID.\n");
646
653 vmw_resource_unreference(&res); 647 vmw_resource_unreference(&res);
654 return 0; 648 return 0;
655out_err1: 649out_err1:
@@ -662,39 +656,33 @@ out_err0:
662int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, 656int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
663 struct drm_file *file_priv) 657 struct drm_file *file_priv)
664{ 658{
665 struct vmw_private *dev_priv = vmw_priv(dev);
666 union drm_vmw_surface_reference_arg *arg = 659 union drm_vmw_surface_reference_arg *arg =
667 (union drm_vmw_surface_reference_arg *)data; 660 (union drm_vmw_surface_reference_arg *)data;
668 struct drm_vmw_surface_arg *req = &arg->req; 661 struct drm_vmw_surface_arg *req = &arg->req;
669 struct drm_vmw_surface_create_req *rep = &arg->rep; 662 struct drm_vmw_surface_create_req *rep = &arg->rep;
670 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 663 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
671 struct vmw_resource *res;
672 struct vmw_surface *srf; 664 struct vmw_surface *srf;
673 struct vmw_user_surface *user_srf; 665 struct vmw_user_surface *user_srf;
674 struct drm_vmw_size __user *user_sizes; 666 struct drm_vmw_size __user *user_sizes;
675 int ret; 667 struct ttm_base_object *base;
668 int ret = -EINVAL;
676 669
677 res = vmw_resource_lookup(dev_priv, &dev_priv->surface_idr, req->sid); 670 base = ttm_base_object_lookup(tfile, req->sid);
678 if (unlikely(res == NULL)) 671 if (unlikely(base == NULL)) {
672 DRM_ERROR("Could not find surface to reference.\n");
679 return -EINVAL; 673 return -EINVAL;
680
681 if (res->res_free != &vmw_user_surface_free) {
682 ret = -EINVAL;
683 goto out;
684 } 674 }
685 675
686 srf = container_of(res, struct vmw_surface, res); 676 if (unlikely(base->object_type != VMW_RES_SURFACE))
687 user_srf = container_of(srf, struct vmw_user_surface, srf); 677 goto out_bad_resource;
688 if (user_srf->base.tfile != tfile && !user_srf->base.shareable) { 678
689 DRM_ERROR("Tried to reference none shareable surface\n"); 679 user_srf = container_of(base, struct vmw_user_surface, base);
690 ret = -EPERM; 680 srf = &user_srf->srf;
691 goto out;
692 }
693 681
694 ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL); 682 ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
695 if (unlikely(ret != 0)) { 683 if (unlikely(ret != 0)) {
696 DRM_ERROR("Could not add a reference to a surface.\n"); 684 DRM_ERROR("Could not add a reference to a surface.\n");
697 goto out; 685 goto out_no_reference;
698 } 686 }
699 687
700 rep->flags = srf->flags; 688 rep->flags = srf->flags;
@@ -706,40 +694,43 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
706 if (user_sizes) 694 if (user_sizes)
707 ret = copy_to_user(user_sizes, srf->sizes, 695 ret = copy_to_user(user_sizes, srf->sizes,
708 srf->num_sizes * sizeof(*srf->sizes)); 696 srf->num_sizes * sizeof(*srf->sizes));
709 if (unlikely(ret != 0)) { 697 if (unlikely(ret != 0))
710 DRM_ERROR("copy_to_user failed %p %u\n", 698 DRM_ERROR("copy_to_user failed %p %u\n",
711 user_sizes, srf->num_sizes); 699 user_sizes, srf->num_sizes);
712 /** 700out_bad_resource:
713 * FIXME: Unreference surface here? 701out_no_reference:
714 */ 702 ttm_base_object_unref(&base);
715 goto out; 703
716 }
717out:
718 vmw_resource_unreference(&res);
719 return ret; 704 return ret;
720} 705}
721 706
722int vmw_surface_check(struct vmw_private *dev_priv, 707int vmw_surface_check(struct vmw_private *dev_priv,
723 struct ttm_object_file *tfile, 708 struct ttm_object_file *tfile,
724 int id) 709 uint32_t handle, int *id)
725{ 710{
726 struct vmw_resource *res; 711 struct ttm_base_object *base;
727 int ret = 0; 712 struct vmw_user_surface *user_srf;
728 713
729 read_lock(&dev_priv->resource_lock); 714 int ret = -EPERM;
730 res = idr_find(&dev_priv->surface_idr, id);
731 if (res && res->avail) {
732 struct vmw_surface *srf =
733 container_of(res, struct vmw_surface, res);
734 struct vmw_user_surface *usrf =
735 container_of(srf, struct vmw_user_surface, srf);
736 715
737 if (usrf->base.tfile != tfile && !usrf->base.shareable) 716 base = ttm_base_object_lookup(tfile, handle);
738 ret = -EPERM; 717 if (unlikely(base == NULL))
739 } else 718 return -EINVAL;
740 ret = -EINVAL; 719
741 read_unlock(&dev_priv->resource_lock); 720 if (unlikely(base->object_type != VMW_RES_SURFACE))
721 goto out_bad_surface;
742 722
723 user_srf = container_of(base, struct vmw_user_surface, base);
724 *id = user_srf->srf.res.id;
725 ret = 0;
726
727out_bad_surface:
728 /**
729 * FIXME: May deadlock here when called from the
730 * command parsing code.
731 */
732
733 ttm_base_object_unref(&base);
743 return ret; 734 return ret;
744} 735}
745 736