diff options
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 108 |
1 files changed, 92 insertions, 16 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index d69caf92ffe7..0897359b3e4e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
| @@ -182,25 +182,19 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv, | |||
| 182 | return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid); | 182 | return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid); |
| 183 | } | 183 | } |
| 184 | 184 | ||
| 185 | static int vmw_cmd_dma(struct vmw_private *dev_priv, | 185 | static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, |
| 186 | struct vmw_sw_context *sw_context, | 186 | struct vmw_sw_context *sw_context, |
| 187 | SVGA3dCmdHeader *header) | 187 | SVGAGuestPtr *ptr, |
| 188 | struct vmw_dma_buffer **vmw_bo_p) | ||
| 188 | { | 189 | { |
| 189 | uint32_t handle; | ||
| 190 | struct vmw_dma_buffer *vmw_bo = NULL; | 190 | struct vmw_dma_buffer *vmw_bo = NULL; |
| 191 | struct ttm_buffer_object *bo; | 191 | struct ttm_buffer_object *bo; |
| 192 | struct vmw_surface *srf = NULL; | 192 | uint32_t handle = ptr->gmrId; |
| 193 | struct vmw_dma_cmd { | ||
| 194 | SVGA3dCmdHeader header; | ||
| 195 | SVGA3dCmdSurfaceDMA dma; | ||
| 196 | } *cmd; | ||
| 197 | struct vmw_relocation *reloc; | 193 | struct vmw_relocation *reloc; |
| 198 | int ret; | ||
| 199 | uint32_t cur_validate_node; | 194 | uint32_t cur_validate_node; |
| 200 | struct ttm_validate_buffer *val_buf; | 195 | struct ttm_validate_buffer *val_buf; |
| 196 | int ret; | ||
| 201 | 197 | ||
| 202 | cmd = container_of(header, struct vmw_dma_cmd, header); | ||
| 203 | handle = cmd->dma.guest.ptr.gmrId; | ||
| 204 | ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); | 198 | ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); |
| 205 | if (unlikely(ret != 0)) { | 199 | if (unlikely(ret != 0)) { |
| 206 | DRM_ERROR("Could not find or use GMR region.\n"); | 200 | DRM_ERROR("Could not find or use GMR region.\n"); |
| @@ -209,14 +203,14 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, | |||
| 209 | bo = &vmw_bo->base; | 203 | bo = &vmw_bo->base; |
| 210 | 204 | ||
| 211 | if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { | 205 | if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { |
| 212 | DRM_ERROR("Max number of DMA commands per submission" | 206 | DRM_ERROR("Max number relocations per submission" |
| 213 | " exceeded\n"); | 207 | " exceeded\n"); |
| 214 | ret = -EINVAL; | 208 | ret = -EINVAL; |
| 215 | goto out_no_reloc; | 209 | goto out_no_reloc; |
| 216 | } | 210 | } |
| 217 | 211 | ||
| 218 | reloc = &sw_context->relocs[sw_context->cur_reloc++]; | 212 | reloc = &sw_context->relocs[sw_context->cur_reloc++]; |
| 219 | reloc->location = &cmd->dma.guest.ptr; | 213 | reloc->location = ptr; |
| 220 | 214 | ||
| 221 | cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf); | 215 | cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf); |
| 222 | if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) { | 216 | if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) { |
| @@ -234,7 +228,89 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, | |||
| 234 | list_add_tail(&val_buf->head, &sw_context->validate_nodes); | 228 | list_add_tail(&val_buf->head, &sw_context->validate_nodes); |
| 235 | ++sw_context->cur_val_buf; | 229 | ++sw_context->cur_val_buf; |
| 236 | } | 230 | } |
| 231 | *vmw_bo_p = vmw_bo; | ||
| 232 | return 0; | ||
| 233 | |||
| 234 | out_no_reloc: | ||
| 235 | vmw_dmabuf_unreference(&vmw_bo); | ||
| 236 | vmw_bo_p = NULL; | ||
| 237 | return ret; | ||
| 238 | } | ||
| 239 | |||
| 240 | static int vmw_cmd_end_query(struct vmw_private *dev_priv, | ||
| 241 | struct vmw_sw_context *sw_context, | ||
| 242 | SVGA3dCmdHeader *header) | ||
| 243 | { | ||
| 244 | struct vmw_dma_buffer *vmw_bo; | ||
| 245 | struct vmw_query_cmd { | ||
| 246 | SVGA3dCmdHeader header; | ||
| 247 | SVGA3dCmdEndQuery q; | ||
| 248 | } *cmd; | ||
| 249 | int ret; | ||
| 250 | |||
| 251 | cmd = container_of(header, struct vmw_query_cmd, header); | ||
| 252 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); | ||
| 253 | if (unlikely(ret != 0)) | ||
| 254 | return ret; | ||
| 255 | |||
| 256 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, | ||
| 257 | &cmd->q.guestResult, | ||
| 258 | &vmw_bo); | ||
| 259 | if (unlikely(ret != 0)) | ||
| 260 | return ret; | ||
| 261 | |||
| 262 | vmw_dmabuf_unreference(&vmw_bo); | ||
| 263 | return 0; | ||
| 264 | } | ||
| 237 | 265 | ||
| 266 | static int vmw_cmd_wait_query(struct vmw_private *dev_priv, | ||
| 267 | struct vmw_sw_context *sw_context, | ||
| 268 | SVGA3dCmdHeader *header) | ||
| 269 | { | ||
| 270 | struct vmw_dma_buffer *vmw_bo; | ||
| 271 | struct vmw_query_cmd { | ||
| 272 | SVGA3dCmdHeader header; | ||
| 273 | SVGA3dCmdWaitForQuery q; | ||
| 274 | } *cmd; | ||
| 275 | int ret; | ||
| 276 | |||
| 277 | cmd = container_of(header, struct vmw_query_cmd, header); | ||
| 278 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); | ||
| 279 | if (unlikely(ret != 0)) | ||
| 280 | return ret; | ||
| 281 | |||
| 282 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, | ||
| 283 | &cmd->q.guestResult, | ||
| 284 | &vmw_bo); | ||
| 285 | if (unlikely(ret != 0)) | ||
| 286 | return ret; | ||
| 287 | |||
| 288 | vmw_dmabuf_unreference(&vmw_bo); | ||
| 289 | return 0; | ||
| 290 | } | ||
| 291 | |||
| 292 | |||
| 293 | static int vmw_cmd_dma(struct vmw_private *dev_priv, | ||
| 294 | struct vmw_sw_context *sw_context, | ||
| 295 | SVGA3dCmdHeader *header) | ||
| 296 | { | ||
| 297 | struct vmw_dma_buffer *vmw_bo = NULL; | ||
| 298 | struct ttm_buffer_object *bo; | ||
| 299 | struct vmw_surface *srf = NULL; | ||
| 300 | struct vmw_dma_cmd { | ||
| 301 | SVGA3dCmdHeader header; | ||
| 302 | SVGA3dCmdSurfaceDMA dma; | ||
| 303 | } *cmd; | ||
| 304 | int ret; | ||
| 305 | |||
| 306 | cmd = container_of(header, struct vmw_dma_cmd, header); | ||
| 307 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, | ||
| 308 | &cmd->dma.guest.ptr, | ||
| 309 | &vmw_bo); | ||
| 310 | if (unlikely(ret != 0)) | ||
| 311 | return ret; | ||
| 312 | |||
| 313 | bo = &vmw_bo->base; | ||
| 238 | ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile, | 314 | ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile, |
| 239 | cmd->dma.host.sid, &srf); | 315 | cmd->dma.host.sid, &srf); |
| 240 | if (ret) { | 316 | if (ret) { |
| @@ -379,8 +455,8 @@ static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = { | |||
| 379 | VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw), | 455 | VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw), |
| 380 | VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check), | 456 | VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check), |
| 381 | VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check), | 457 | VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check), |
| 382 | VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_cid_check), | 458 | VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query), |
| 383 | VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_cid_check), | 459 | VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query), |
| 384 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok), | 460 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok), |
| 385 | VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, | 461 | VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, |
| 386 | &vmw_cmd_blt_surf_screen_check) | 462 | &vmw_cmd_blt_surf_screen_check) |
