aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2014-01-31 04:12:10 -0500
committerThomas Hellstrom <thellstrom@vmware.com>2014-02-05 02:39:19 -0500
commitd5bde956630b86462ee22055f5816a04290aed57 (patch)
treee5a3fbc34a11695fb722e63e780ca2462eeaf94a
parentc1a21373d2cb94a7808161a8c237b249cd799ce7 (diff)
drm/vmwgfx: Emulate legacy shaders on guest-backed devices v2
Command stream legacy shader creation and destruction is replaced by NOPs in the command stream, and instead guest-backed shaders are created and destroyed as part of the command validation process. v2: Removed some stray debug messages. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Jakob Bornecrantz <jakob@vmware.com>
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h29
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c197
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c465
4 files changed, 620 insertions, 78 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 9893328f8fdc..3bdc0adc656d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -941,6 +941,7 @@ static void vmw_postclose(struct drm_device *dev,
941 drm_master_put(&vmw_fp->locked_master); 941 drm_master_put(&vmw_fp->locked_master);
942 } 942 }
943 943
944 vmw_compat_shader_man_destroy(vmw_fp->shman);
944 ttm_object_file_release(&vmw_fp->tfile); 945 ttm_object_file_release(&vmw_fp->tfile);
945 kfree(vmw_fp); 946 kfree(vmw_fp);
946} 947}
@@ -960,11 +961,17 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
960 if (unlikely(vmw_fp->tfile == NULL)) 961 if (unlikely(vmw_fp->tfile == NULL))
961 goto out_no_tfile; 962 goto out_no_tfile;
962 963
964 vmw_fp->shman = vmw_compat_shader_man_create(dev_priv);
965 if (IS_ERR(vmw_fp->shman))
966 goto out_no_shman;
967
963 file_priv->driver_priv = vmw_fp; 968 file_priv->driver_priv = vmw_fp;
964 dev_priv->bdev.dev_mapping = dev->dev_mapping; 969 dev_priv->bdev.dev_mapping = dev->dev_mapping;
965 970
966 return 0; 971 return 0;
967 972
973out_no_shman:
974 ttm_object_file_release(&vmw_fp->tfile);
968out_no_tfile: 975out_no_tfile:
969 kfree(vmw_fp); 976 kfree(vmw_fp);
970 return ret; 977 return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 554e7fa33082..cef0ff7ac738 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -75,10 +75,14 @@
75#define VMW_RES_FENCE ttm_driver_type3 75#define VMW_RES_FENCE ttm_driver_type3
76#define VMW_RES_SHADER ttm_driver_type4 76#define VMW_RES_SHADER ttm_driver_type4
77 77
78struct vmw_compat_shader_manager;
79
78struct vmw_fpriv { 80struct vmw_fpriv {
79 struct drm_master *locked_master; 81 struct drm_master *locked_master;
80 struct ttm_object_file *tfile; 82 struct ttm_object_file *tfile;
81 struct list_head fence_events; 83 struct list_head fence_events;
84 bool gb_aware;
85 struct vmw_compat_shader_manager *shman;
82}; 86};
83 87
84struct vmw_dma_buffer { 88struct vmw_dma_buffer {
@@ -318,7 +322,7 @@ struct vmw_sw_context{
318 struct drm_open_hash res_ht; 322 struct drm_open_hash res_ht;
319 bool res_ht_initialized; 323 bool res_ht_initialized;
320 bool kernel; /**< is the called made from the kernel */ 324 bool kernel; /**< is the called made from the kernel */
321 struct ttm_object_file *tfile; 325 struct vmw_fpriv *fp;
322 struct list_head validate_nodes; 326 struct list_head validate_nodes;
323 struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS]; 327 struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
324 uint32_t cur_reloc; 328 uint32_t cur_reloc;
@@ -336,6 +340,7 @@ struct vmw_sw_context{
336 bool needs_post_query_barrier; 340 bool needs_post_query_barrier;
337 struct vmw_resource *error_resource; 341 struct vmw_resource *error_resource;
338 struct vmw_ctx_binding_state staged_bindings; 342 struct vmw_ctx_binding_state staged_bindings;
343 struct list_head staged_shaders;
339}; 344};
340 345
341struct vmw_legacy_display; 346struct vmw_legacy_display;
@@ -991,6 +996,28 @@ extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
991 struct drm_file *file_priv); 996 struct drm_file *file_priv);
992extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, 997extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
993 struct drm_file *file_priv); 998 struct drm_file *file_priv);
999extern int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
1000 SVGA3dShaderType shader_type,
1001 u32 *user_key);
1002extern void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
1003 struct list_head *list);
1004extern void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
1005 struct list_head *list);
1006extern int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
1007 u32 user_key,
1008 SVGA3dShaderType shader_type,
1009 struct list_head *list);
1010extern int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
1011 u32 user_key, const void *bytecode,
1012 SVGA3dShaderType shader_type,
1013 size_t size,
1014 struct ttm_object_file *tfile,
1015 struct list_head *list);
1016extern struct vmw_compat_shader_manager *
1017vmw_compat_shader_man_create(struct vmw_private *dev_priv);
1018extern void
1019vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man);
1020
994 1021
995/** 1022/**
996 * Inline helper functions 1023 * Inline helper functions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 9441825c7860..352224b9d667 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -235,8 +235,12 @@ static void vmw_resource_relocations_apply(uint32_t *cb,
235{ 235{
236 struct vmw_resource_relocation *rel; 236 struct vmw_resource_relocation *rel;
237 237
238 list_for_each_entry(rel, list, head) 238 list_for_each_entry(rel, list, head) {
239 cb[rel->offset] = rel->res->id; 239 if (likely(rel->res != NULL))
240 cb[rel->offset] = rel->res->id;
241 else
242 cb[rel->offset] = SVGA_3D_CMD_NOP;
243 }
240} 244}
241 245
242static int vmw_cmd_invalid(struct vmw_private *dev_priv, 246static int vmw_cmd_invalid(struct vmw_private *dev_priv,
@@ -381,22 +385,27 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
381} 385}
382 386
383/** 387/**
384 * vmw_cmd_res_check - Check that a resource is present and if so, put it 388 * vmw_cmd_compat_res_check - Check that a resource is present and if so, put it
385 * on the resource validate list unless it's already there. 389 * on the resource validate list unless it's already there.
386 * 390 *
387 * @dev_priv: Pointer to a device private structure. 391 * @dev_priv: Pointer to a device private structure.
388 * @sw_context: Pointer to the software context. 392 * @sw_context: Pointer to the software context.
389 * @res_type: Resource type. 393 * @res_type: Resource type.
390 * @converter: User-space visisble type specific information. 394 * @converter: User-space visisble type specific information.
391 * @id: Pointer to the location in the command buffer currently being 395 * @id: user-space resource id handle.
396 * @id_loc: Pointer to the location in the command buffer currently being
392 * parsed from where the user-space resource id handle is located. 397 * parsed from where the user-space resource id handle is located.
398 * @p_val: Pointer to pointer to resource validalidation node. Populated
399 * on exit.
393 */ 400 */
394static int vmw_cmd_res_check(struct vmw_private *dev_priv, 401static int
395 struct vmw_sw_context *sw_context, 402vmw_cmd_compat_res_check(struct vmw_private *dev_priv,
396 enum vmw_res_type res_type, 403 struct vmw_sw_context *sw_context,
397 const struct vmw_user_resource_conv *converter, 404 enum vmw_res_type res_type,
398 uint32_t *id, 405 const struct vmw_user_resource_conv *converter,
399 struct vmw_resource_val_node **p_val) 406 uint32_t id,
407 uint32_t *id_loc,
408 struct vmw_resource_val_node **p_val)
400{ 409{
401 struct vmw_res_cache_entry *rcache = 410 struct vmw_res_cache_entry *rcache =
402 &sw_context->res_cache[res_type]; 411 &sw_context->res_cache[res_type];
@@ -404,7 +413,7 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
404 struct vmw_resource_val_node *node; 413 struct vmw_resource_val_node *node;
405 int ret; 414 int ret;
406 415
407 if (*id == SVGA3D_INVALID_ID) { 416 if (id == SVGA3D_INVALID_ID) {
408 if (p_val) 417 if (p_val)
409 *p_val = NULL; 418 *p_val = NULL;
410 if (res_type == vmw_res_context) { 419 if (res_type == vmw_res_context) {
@@ -419,7 +428,7 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
419 * resource 428 * resource
420 */ 429 */
421 430
422 if (likely(rcache->valid && *id == rcache->handle)) { 431 if (likely(rcache->valid && id == rcache->handle)) {
423 const struct vmw_resource *res = rcache->res; 432 const struct vmw_resource *res = rcache->res;
424 433
425 rcache->node->first_usage = false; 434 rcache->node->first_usage = false;
@@ -428,28 +437,28 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
428 437
429 return vmw_resource_relocation_add 438 return vmw_resource_relocation_add
430 (&sw_context->res_relocations, res, 439 (&sw_context->res_relocations, res,
431 id - sw_context->buf_start); 440 id_loc - sw_context->buf_start);
432 } 441 }
433 442
434 ret = vmw_user_resource_lookup_handle(dev_priv, 443 ret = vmw_user_resource_lookup_handle(dev_priv,
435 sw_context->tfile, 444 sw_context->fp->tfile,
436 *id, 445 id,
437 converter, 446 converter,
438 &res); 447 &res);
439 if (unlikely(ret != 0)) { 448 if (unlikely(ret != 0)) {
440 DRM_ERROR("Could not find or use resource 0x%08x.\n", 449 DRM_ERROR("Could not find or use resource 0x%08x.\n",
441 (unsigned) *id); 450 (unsigned) id);
442 dump_stack(); 451 dump_stack();
443 return ret; 452 return ret;
444 } 453 }
445 454
446 rcache->valid = true; 455 rcache->valid = true;
447 rcache->res = res; 456 rcache->res = res;
448 rcache->handle = *id; 457 rcache->handle = id;
449 458
450 ret = vmw_resource_relocation_add(&sw_context->res_relocations, 459 ret = vmw_resource_relocation_add(&sw_context->res_relocations,
451 res, 460 res,
452 id - sw_context->buf_start); 461 id_loc - sw_context->buf_start);
453 if (unlikely(ret != 0)) 462 if (unlikely(ret != 0))
454 goto out_no_reloc; 463 goto out_no_reloc;
455 464
@@ -483,6 +492,31 @@ out_no_reloc:
483} 492}
484 493
485/** 494/**
495 * vmw_cmd_res_check - Check that a resource is present and if so, put it
496 * on the resource validate list unless it's already there.
497 *
498 * @dev_priv: Pointer to a device private structure.
499 * @sw_context: Pointer to the software context.
500 * @res_type: Resource type.
501 * @converter: User-space visisble type specific information.
502 * @id_loc: Pointer to the location in the command buffer currently being
503 * parsed from where the user-space resource id handle is located.
504 * @p_val: Pointer to pointer to resource validalidation node. Populated
505 * on exit.
506 */
507static int
508vmw_cmd_res_check(struct vmw_private *dev_priv,
509 struct vmw_sw_context *sw_context,
510 enum vmw_res_type res_type,
511 const struct vmw_user_resource_conv *converter,
512 uint32_t *id_loc,
513 struct vmw_resource_val_node **p_val)
514{
515 return vmw_cmd_compat_res_check(dev_priv, sw_context, res_type,
516 converter, *id_loc, id_loc, p_val);
517}
518
519/**
486 * vmw_cmd_cid_check - Check a command header for valid context information. 520 * vmw_cmd_cid_check - Check a command header for valid context information.
487 * 521 *
488 * @dev_priv: Pointer to a device private structure. 522 * @dev_priv: Pointer to a device private structure.
@@ -769,7 +803,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
769 struct vmw_relocation *reloc; 803 struct vmw_relocation *reloc;
770 int ret; 804 int ret;
771 805
772 ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); 806 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
773 if (unlikely(ret != 0)) { 807 if (unlikely(ret != 0)) {
774 DRM_ERROR("Could not find or use MOB buffer.\n"); 808 DRM_ERROR("Could not find or use MOB buffer.\n");
775 return -EINVAL; 809 return -EINVAL;
@@ -830,7 +864,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
830 struct vmw_relocation *reloc; 864 struct vmw_relocation *reloc;
831 int ret; 865 int ret;
832 866
833 ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); 867 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
834 if (unlikely(ret != 0)) { 868 if (unlikely(ret != 0)) {
835 DRM_ERROR("Could not find or use GMR region.\n"); 869 DRM_ERROR("Could not find or use GMR region.\n");
836 return -EINVAL; 870 return -EINVAL;
@@ -1129,7 +1163,8 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
1129 1163
1130 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res); 1164 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1131 1165
1132 vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header); 1166 vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
1167 header);
1133 1168
1134out_no_surface: 1169out_no_surface:
1135 vmw_dmabuf_unreference(&vmw_bo); 1170 vmw_dmabuf_unreference(&vmw_bo);
@@ -1480,6 +1515,98 @@ static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1480 &cmd->body.sid, NULL); 1515 &cmd->body.sid, NULL);
1481} 1516}
1482 1517
1518
1519/**
1520 * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
1521 * command
1522 *
1523 * @dev_priv: Pointer to a device private struct.
1524 * @sw_context: The software context being used for this batch.
1525 * @header: Pointer to the command header in the command stream.
1526 */
1527static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1528 struct vmw_sw_context *sw_context,
1529 SVGA3dCmdHeader *header)
1530{
1531 struct vmw_shader_define_cmd {
1532 SVGA3dCmdHeader header;
1533 SVGA3dCmdDefineShader body;
1534 } *cmd;
1535 int ret;
1536 size_t size;
1537
1538 cmd = container_of(header, struct vmw_shader_define_cmd,
1539 header);
1540
1541 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1542 user_context_converter, &cmd->body.cid,
1543 NULL);
1544 if (unlikely(ret != 0))
1545 return ret;
1546
1547 if (unlikely(!dev_priv->has_mob))
1548 return 0;
1549
1550 size = cmd->header.size - sizeof(cmd->body);
1551 ret = vmw_compat_shader_add(sw_context->fp->shman,
1552 cmd->body.shid, cmd + 1,
1553 cmd->body.type, size,
1554 sw_context->fp->tfile,
1555 &sw_context->staged_shaders);
1556 if (unlikely(ret != 0))
1557 return ret;
1558
1559 return vmw_resource_relocation_add(&sw_context->res_relocations,
1560 NULL, &cmd->header.id -
1561 sw_context->buf_start);
1562
1563 return 0;
1564}
1565
1566/**
1567 * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
1568 * command
1569 *
1570 * @dev_priv: Pointer to a device private struct.
1571 * @sw_context: The software context being used for this batch.
1572 * @header: Pointer to the command header in the command stream.
1573 */
1574static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1575 struct vmw_sw_context *sw_context,
1576 SVGA3dCmdHeader *header)
1577{
1578 struct vmw_shader_destroy_cmd {
1579 SVGA3dCmdHeader header;
1580 SVGA3dCmdDestroyShader body;
1581 } *cmd;
1582 int ret;
1583
1584 cmd = container_of(header, struct vmw_shader_destroy_cmd,
1585 header);
1586
1587 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1588 user_context_converter, &cmd->body.cid,
1589 NULL);
1590 if (unlikely(ret != 0))
1591 return ret;
1592
1593 if (unlikely(!dev_priv->has_mob))
1594 return 0;
1595
1596 ret = vmw_compat_shader_remove(sw_context->fp->shman,
1597 cmd->body.shid,
1598 cmd->body.type,
1599 &sw_context->staged_shaders);
1600 if (unlikely(ret != 0))
1601 return ret;
1602
1603 return vmw_resource_relocation_add(&sw_context->res_relocations,
1604 NULL, &cmd->header.id -
1605 sw_context->buf_start);
1606
1607 return 0;
1608}
1609
1483/** 1610/**
1484 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER 1611 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
1485 * command 1612 * command
@@ -1511,10 +1638,17 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1511 if (dev_priv->has_mob) { 1638 if (dev_priv->has_mob) {
1512 struct vmw_ctx_bindinfo bi; 1639 struct vmw_ctx_bindinfo bi;
1513 struct vmw_resource_val_node *res_node; 1640 struct vmw_resource_val_node *res_node;
1641 u32 shid = cmd->body.shid;
1642
1643 (void) vmw_compat_shader_lookup(sw_context->fp->shman,
1644 cmd->body.type,
1645 &shid);
1514 1646
1515 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader, 1647 ret = vmw_cmd_compat_res_check(dev_priv, sw_context,
1516 user_shader_converter, 1648 vmw_res_shader,
1517 &cmd->body.shid, &res_node); 1649 user_shader_converter,
1650 shid,
1651 &cmd->body.shid, &res_node);
1518 if (unlikely(ret != 0)) 1652 if (unlikely(ret != 0))
1519 return ret; 1653 return ret;
1520 1654
@@ -1669,10 +1803,10 @@ static const struct vmw_cmd_entry const vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
1669 true, false, false), 1803 true, false, false),
1670 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check, 1804 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
1671 false, false, false), 1805 false, false, false),
1672 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check, 1806 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
1673 true, true, false), 1807 true, false, false),
1674 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check, 1808 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
1675 true, true, false), 1809 true, false, false),
1676 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader, 1810 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
1677 true, false, false), 1811 true, false, false),
1678 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const, 1812 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
@@ -2206,7 +2340,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2206 } else 2340 } else
2207 sw_context->kernel = true; 2341 sw_context->kernel = true;
2208 2342
2209 sw_context->tfile = vmw_fpriv(file_priv)->tfile; 2343 sw_context->fp = vmw_fpriv(file_priv);
2210 sw_context->cur_reloc = 0; 2344 sw_context->cur_reloc = 0;
2211 sw_context->cur_val_buf = 0; 2345 sw_context->cur_val_buf = 0;
2212 sw_context->fence_flags = 0; 2346 sw_context->fence_flags = 0;
@@ -2223,6 +2357,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2223 goto out_unlock; 2357 goto out_unlock;
2224 sw_context->res_ht_initialized = true; 2358 sw_context->res_ht_initialized = true;
2225 } 2359 }
2360 INIT_LIST_HEAD(&sw_context->staged_shaders);
2226 2361
2227 INIT_LIST_HEAD(&resource_list); 2362 INIT_LIST_HEAD(&resource_list);
2228 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, 2363 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
@@ -2311,6 +2446,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2311 } 2446 }
2312 2447
2313 list_splice_init(&sw_context->resource_list, &resource_list); 2448 list_splice_init(&sw_context->resource_list, &resource_list);
2449 vmw_compat_shaders_commit(sw_context->fp->shman,
2450 &sw_context->staged_shaders);
2314 mutex_unlock(&dev_priv->cmdbuf_mutex); 2451 mutex_unlock(&dev_priv->cmdbuf_mutex);
2315 2452
2316 /* 2453 /*
@@ -2337,6 +2474,8 @@ out_unlock:
2337 list_splice_init(&sw_context->resource_list, &resource_list); 2474 list_splice_init(&sw_context->resource_list, &resource_list);
2338 error_resource = sw_context->error_resource; 2475 error_resource = sw_context->error_resource;
2339 sw_context->error_resource = NULL; 2476 sw_context->error_resource = NULL;
2477 vmw_compat_shaders_revert(sw_context->fp->shman,
2478 &sw_context->staged_shaders);
2340 mutex_unlock(&dev_priv->cmdbuf_mutex); 2479 mutex_unlock(&dev_priv->cmdbuf_mutex);
2341 2480
2342 /* 2481 /*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 1457ec4b7125..be85d7fdfb5b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -29,6 +29,8 @@
29#include "vmwgfx_resource_priv.h" 29#include "vmwgfx_resource_priv.h"
30#include "ttm/ttm_placement.h" 30#include "ttm/ttm_placement.h"
31 31
32#define VMW_COMPAT_SHADER_HT_ORDER 12
33
32struct vmw_shader { 34struct vmw_shader {
33 struct vmw_resource res; 35 struct vmw_resource res;
34 SVGA3dShaderType type; 36 SVGA3dShaderType type;
@@ -40,6 +42,50 @@ struct vmw_user_shader {
40 struct vmw_shader shader; 42 struct vmw_shader shader;
41}; 43};
42 44
45/**
46 * enum vmw_compat_shader_state - Staging state for compat shaders
47 */
48enum vmw_compat_shader_state {
49 VMW_COMPAT_COMMITED,
50 VMW_COMPAT_ADD,
51 VMW_COMPAT_DEL
52};
53
54/**
55 * struct vmw_compat_shader - Metadata for compat shaders.
56 *
57 * @handle: The TTM handle of the guest backed shader.
58 * @tfile: The struct ttm_object_file the guest backed shader is registered
59 * with.
60 * @hash: Hash item for lookup.
61 * @head: List head for staging lists or the compat shader manager list.
62 * @state: Staging state.
63 *
64 * The structure is protected by the cmdbuf lock.
65 */
66struct vmw_compat_shader {
67 u32 handle;
68 struct ttm_object_file *tfile;
69 struct drm_hash_item hash;
70 struct list_head head;
71 enum vmw_compat_shader_state state;
72};
73
74/**
75 * struct vmw_compat_shader_manager - Compat shader manager.
76 *
77 * @shaders: Hash table containing staged and commited compat shaders
78 * @list: List of commited shaders.
79 * @dev_priv: Pointer to a device private structure.
80 *
81 * @shaders and @list are protected by the cmdbuf mutex for now.
82 */
83struct vmw_compat_shader_manager {
84 struct drm_open_hash shaders;
85 struct list_head list;
86 struct vmw_private *dev_priv;
87};
88
43static void vmw_user_shader_free(struct vmw_resource *res); 89static void vmw_user_shader_free(struct vmw_resource *res);
44static struct vmw_resource * 90static struct vmw_resource *
45vmw_user_shader_base_to_res(struct ttm_base_object *base); 91vmw_user_shader_base_to_res(struct ttm_base_object *base);
@@ -325,13 +371,81 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
325 TTM_REF_USAGE); 371 TTM_REF_USAGE);
326} 372}
327 373
374int vmw_shader_alloc(struct vmw_private *dev_priv,
375 struct vmw_dma_buffer *buffer,
376 size_t shader_size,
377 size_t offset,
378 SVGA3dShaderType shader_type,
379 struct ttm_object_file *tfile,
380 u32 *handle)
381{
382 struct vmw_user_shader *ushader;
383 struct vmw_resource *res, *tmp;
384 int ret;
385
386 /*
387 * Approximate idr memory usage with 128 bytes. It will be limited
388 * by maximum number_of shaders anyway.
389 */
390 if (unlikely(vmw_user_shader_size == 0))
391 vmw_user_shader_size =
392 ttm_round_pot(sizeof(struct vmw_user_shader)) + 128;
393
394 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
395 vmw_user_shader_size,
396 false, true);
397 if (unlikely(ret != 0)) {
398 if (ret != -ERESTARTSYS)
399 DRM_ERROR("Out of graphics memory for shader "
400 "creation.\n");
401 goto out;
402 }
403
404 ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
405 if (unlikely(ushader == NULL)) {
406 ttm_mem_global_free(vmw_mem_glob(dev_priv),
407 vmw_user_shader_size);
408 ret = -ENOMEM;
409 goto out;
410 }
411
412 res = &ushader->shader.res;
413 ushader->base.shareable = false;
414 ushader->base.tfile = NULL;
415
416 /*
417 * From here on, the destructor takes over resource freeing.
418 */
419
420 ret = vmw_gb_shader_init(dev_priv, res, shader_size,
421 offset, shader_type, buffer,
422 vmw_user_shader_free);
423 if (unlikely(ret != 0))
424 goto out;
425
426 tmp = vmw_resource_reference(res);
427 ret = ttm_base_object_init(tfile, &ushader->base, false,
428 VMW_RES_SHADER,
429 &vmw_user_shader_base_release, NULL);
430
431 if (unlikely(ret != 0)) {
432 vmw_resource_unreference(&tmp);
433 goto out_err;
434 }
435
436 if (handle)
437 *handle = ushader->base.hash.key;
438out_err:
439 vmw_resource_unreference(&res);
440out:
441 return ret;
442}
443
444
328int vmw_shader_define_ioctl(struct drm_device *dev, void *data, 445int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
329 struct drm_file *file_priv) 446 struct drm_file *file_priv)
330{ 447{
331 struct vmw_private *dev_priv = vmw_priv(dev); 448 struct vmw_private *dev_priv = vmw_priv(dev);
332 struct vmw_user_shader *ushader;
333 struct vmw_resource *res;
334 struct vmw_resource *tmp;
335 struct drm_vmw_shader_create_arg *arg = 449 struct drm_vmw_shader_create_arg *arg =
336 (struct drm_vmw_shader_create_arg *)data; 450 (struct drm_vmw_shader_create_arg *)data;
337 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 451 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
@@ -373,69 +487,324 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
373 goto out_bad_arg; 487 goto out_bad_arg;
374 } 488 }
375 489
376 /* 490 ret = ttm_read_lock(&vmaster->lock, true);
377 * Approximate idr memory usage with 128 bytes. It will be limited 491 if (unlikely(ret != 0))
378 * by maximum number_of shaders anyway. 492 goto out_bad_arg;
379 */
380 493
381 if (unlikely(vmw_user_shader_size == 0)) 494 ret = vmw_shader_alloc(dev_priv, buffer, arg->size, arg->offset,
382 vmw_user_shader_size = ttm_round_pot(sizeof(*ushader)) 495 shader_type, tfile, &arg->shader_handle);
383 + 128;
384 496
385 ret = ttm_read_lock(&vmaster->lock, true); 497 ttm_read_unlock(&vmaster->lock);
498out_bad_arg:
499 vmw_dmabuf_unreference(&buffer);
500 return ret;
501}
502
503/**
504 * vmw_compat_shader_lookup - Look up a compat shader
505 *
506 * @man: Pointer to the compat shader manager.
507 * @shader_type: The shader type, that combined with the user_key identifies
508 * the shader.
509 * @user_key: On entry, this should be a pointer to the user_key.
510 * On successful exit, it will contain the guest-backed shader's TTM handle.
511 *
512 * Returns 0 on success. Non-zero on failure, in which case the value pointed
513 * to by @user_key is unmodified.
514 */
515int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
516 SVGA3dShaderType shader_type,
517 u32 *user_key)
518{
519 struct drm_hash_item *hash;
520 int ret;
521 unsigned long key = *user_key | (shader_type << 24);
522
523 ret = drm_ht_find_item(&man->shaders, key, &hash);
386 if (unlikely(ret != 0)) 524 if (unlikely(ret != 0))
387 return ret; 525 return ret;
388 526
389 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), 527 *user_key = drm_hash_entry(hash, struct vmw_compat_shader,
390 vmw_user_shader_size, 528 hash)->handle;
391 false, true); 529
392 if (unlikely(ret != 0)) { 530 return 0;
393 if (ret != -ERESTARTSYS) 531}
394 DRM_ERROR("Out of graphics memory for shader" 532
395 " creation.\n"); 533/**
396 goto out_unlock; 534 * vmw_compat_shader_free - Free a compat shader.
535 *
536 * @man: Pointer to the compat shader manager.
537 * @entry: Pointer to a struct vmw_compat_shader.
538 *
539 * Frees a struct vmw_compat_shder entry and drops its reference to the
540 * guest backed shader.
541 */
542static void vmw_compat_shader_free(struct vmw_compat_shader_manager *man,
543 struct vmw_compat_shader *entry)
544{
545 list_del(&entry->head);
546 WARN_ON(drm_ht_remove_item(&man->shaders, &entry->hash));
547 WARN_ON(ttm_ref_object_base_unref(entry->tfile, entry->handle,
548 TTM_REF_USAGE));
549 kfree(entry);
550}
551
552/**
553 * vmw_compat_shaders_commit - Commit a list of compat shader actions.
554 *
555 * @man: Pointer to the compat shader manager.
556 * @list: Caller's list of compat shader actions.
557 *
558 * This function commits a list of compat shader additions or removals.
559 * It is typically called when the execbuf ioctl call triggering these
560 * actions has commited the fifo contents to the device.
561 */
562void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
563 struct list_head *list)
564{
565 struct vmw_compat_shader *entry, *next;
566
567 list_for_each_entry_safe(entry, next, list, head) {
568 list_del(&entry->head);
569 switch (entry->state) {
570 case VMW_COMPAT_ADD:
571 entry->state = VMW_COMPAT_COMMITED;
572 list_add_tail(&entry->head, &man->list);
573 break;
574 case VMW_COMPAT_DEL:
575 ttm_ref_object_base_unref(entry->tfile, entry->handle,
576 TTM_REF_USAGE);
577 kfree(entry);
578 break;
579 default:
580 BUG();
581 break;
582 }
397 } 583 }
584}
398 585
399 ushader = kzalloc(sizeof(*ushader), GFP_KERNEL); 586/**
400 if (unlikely(ushader == NULL)) { 587 * vmw_compat_shaders_revert - Revert a list of compat shader actions
401 ttm_mem_global_free(vmw_mem_glob(dev_priv), 588 *
402 vmw_user_shader_size); 589 * @man: Pointer to the compat shader manager.
403 ret = -ENOMEM; 590 * @list: Caller's list of compat shader actions.
404 goto out_unlock; 591 *
592 * This function reverts a list of compat shader additions or removals.
593 * It is typically called when the execbuf ioctl call triggering these
594 * actions failed for some reason, and the command stream was never
595 * submitted.
596 */
597void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
598 struct list_head *list)
599{
600 struct vmw_compat_shader *entry, *next;
601 int ret;
602
603 list_for_each_entry_safe(entry, next, list, head) {
604 switch (entry->state) {
605 case VMW_COMPAT_ADD:
606 vmw_compat_shader_free(man, entry);
607 break;
608 case VMW_COMPAT_DEL:
609 ret = drm_ht_insert_item(&man->shaders, &entry->hash);
610 list_del(&entry->head);
611 list_add_tail(&entry->head, &man->list);
612 entry->state = VMW_COMPAT_COMMITED;
613 break;
614 default:
615 BUG();
616 break;
617 }
405 } 618 }
619}
406 620
407 res = &ushader->shader.res; 621/**
408 ushader->base.shareable = false; 622 * vmw_compat_shader_remove - Stage a compat shader for removal.
409 ushader->base.tfile = NULL; 623 *
624 * @man: Pointer to the compat shader manager
625 * @user_key: The key that is used to identify the shader. The key is
626 * unique to the shader type.
627 * @shader_type: Shader type.
628 * @list: Caller's list of staged shader actions.
629 *
630 * This function stages a compat shader for removal and removes the key from
631 * the shader manager's hash table. If the shader was previously only staged
632 * for addition it is completely removed (But the execbuf code may keep a
633 * reference if it was bound to a context between addition and removal). If
634 * it was previously commited to the manager, it is staged for removal.
635 */
636int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
637 u32 user_key, SVGA3dShaderType shader_type,
638 struct list_head *list)
639{
640 struct vmw_compat_shader *entry;
641 struct drm_hash_item *hash;
642 int ret;
410 643
411 /* 644 ret = drm_ht_find_item(&man->shaders, user_key | (shader_type << 24),
412 * From here on, the destructor takes over resource freeing. 645 &hash);
413 */ 646 if (likely(ret != 0))
647 return -EINVAL;
414 648
415 ret = vmw_gb_shader_init(dev_priv, res, arg->size, 649 entry = drm_hash_entry(hash, struct vmw_compat_shader, hash);
416 arg->offset, shader_type, buffer, 650
417 vmw_user_shader_free); 651 switch (entry->state) {
652 case VMW_COMPAT_ADD:
653 vmw_compat_shader_free(man, entry);
654 break;
655 case VMW_COMPAT_COMMITED:
656 (void) drm_ht_remove_item(&man->shaders, &entry->hash);
657 list_del(&entry->head);
658 entry->state = VMW_COMPAT_DEL;
659 list_add_tail(&entry->head, list);
660 break;
661 default:
662 BUG();
663 break;
664 }
665
666 return 0;
667}
668
669/**
670 * vmw_compat_shader_add - Create a compat shader and add the
671 * key to the manager
672 *
673 * @man: Pointer to the compat shader manager
674 * @user_key: The key that is used to identify the shader. The key is
675 * unique to the shader type.
676 * @bytecode: Pointer to the bytecode of the shader.
677 * @shader_type: Shader type.
678 * @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is
679 * to be created with.
680 * @list: Caller's list of staged shader actions.
681 *
682 * Note that only the key is added to the shader manager's hash table.
683 * The shader is not yet added to the shader manager's list of shaders.
684 */
685int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
686 u32 user_key, const void *bytecode,
687 SVGA3dShaderType shader_type,
688 size_t size,
689 struct ttm_object_file *tfile,
690 struct list_head *list)
691{
692 struct vmw_dma_buffer *buf;
693 struct ttm_bo_kmap_obj map;
694 bool is_iomem;
695 struct vmw_compat_shader *compat;
696 u32 handle;
697 int ret;
698
699 if (user_key > ((1 << 24) - 1) || (unsigned) shader_type > 16)
700 return -EINVAL;
701
702 /* Allocate and pin a DMA buffer */
703 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
704 if (unlikely(buf == NULL))
705 return -ENOMEM;
706
707 ret = vmw_dmabuf_init(man->dev_priv, buf, size, &vmw_sys_ne_placement,
708 true, vmw_dmabuf_bo_free);
418 if (unlikely(ret != 0)) 709 if (unlikely(ret != 0))
419 goto out_unlock; 710 goto out;
420 711
421 tmp = vmw_resource_reference(res); 712 ret = ttm_bo_reserve(&buf->base, false, true, false, NULL);
422 ret = ttm_base_object_init(tfile, &ushader->base, false, 713 if (unlikely(ret != 0))
423 VMW_RES_SHADER, 714 goto no_reserve;
424 &vmw_user_shader_base_release, NULL);
425 715
716 /* Map and copy shader bytecode. */
717 ret = ttm_bo_kmap(&buf->base, 0, PAGE_ALIGN(size) >> PAGE_SHIFT,
718 &map);
426 if (unlikely(ret != 0)) { 719 if (unlikely(ret != 0)) {
427 vmw_resource_unreference(&tmp); 720 ttm_bo_unreserve(&buf->base);
428 goto out_err; 721 goto no_reserve;
429 } 722 }
430 723
431 arg->shader_handle = ushader->base.hash.key; 724 memcpy(ttm_kmap_obj_virtual(&map, &is_iomem), bytecode, size);
432out_err: 725 WARN_ON(is_iomem);
433 vmw_resource_unreference(&res); 726
434out_unlock: 727 ttm_bo_kunmap(&map);
435 ttm_read_unlock(&vmaster->lock); 728 ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, false, true);
436out_bad_arg: 729 WARN_ON(ret != 0);
437 vmw_dmabuf_unreference(&buffer); 730 ttm_bo_unreserve(&buf->base);
731
732 /* Create a guest-backed shader container backed by the dma buffer */
733 ret = vmw_shader_alloc(man->dev_priv, buf, size, 0, shader_type,
734 tfile, &handle);
735 vmw_dmabuf_unreference(&buf);
736 if (unlikely(ret != 0))
737 goto no_reserve;
738 /*
739 * Create a compat shader structure and stage it for insertion
740 * in the manager
741 */
742 compat = kzalloc(sizeof(*compat), GFP_KERNEL);
743 if (compat == NULL)
744 goto no_compat;
745
746 compat->hash.key = user_key | (shader_type << 24);
747 ret = drm_ht_insert_item(&man->shaders, &compat->hash);
748 if (unlikely(ret != 0))
749 goto out_invalid_key;
750
751 compat->state = VMW_COMPAT_ADD;
752 compat->handle = handle;
753 compat->tfile = tfile;
754 list_add_tail(&compat->head, list);
438 755
756 return 0;
757
758out_invalid_key:
759 kfree(compat);
760no_compat:
761 ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
762no_reserve:
763out:
439 return ret; 764 return ret;
765}
766
767/**
768 * vmw_compat_shader_man_create - Create a compat shader manager
769 *
770 * @dev_priv: Pointer to a device private structure.
771 *
772 * Typically done at file open time. If successful returns a pointer to a
773 * compat shader manager. Otherwise returns an error pointer.
774 */
775struct vmw_compat_shader_manager *
776vmw_compat_shader_man_create(struct vmw_private *dev_priv)
777{
778 struct vmw_compat_shader_manager *man;
779 int ret;
780
781 man = kzalloc(sizeof(*man), GFP_KERNEL);
782
783 man->dev_priv = dev_priv;
784 INIT_LIST_HEAD(&man->list);
785 ret = drm_ht_create(&man->shaders, VMW_COMPAT_SHADER_HT_ORDER);
786 if (ret == 0)
787 return man;
788
789 kfree(man);
790 return ERR_PTR(ret);
791}
792
793/**
794 * vmw_compat_shader_man_destroy - Destroy a compat shader manager
795 *
796 * @man: Pointer to the shader manager to destroy.
797 *
798 * Typically done at file close time.
799 */
800void vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man)
801{
802 struct vmw_compat_shader *entry, *next;
803
804 mutex_lock(&man->dev_priv->cmdbuf_mutex);
805 list_for_each_entry_safe(entry, next, &man->list, head)
806 vmw_compat_shader_free(man, entry);
440 807
808 mutex_unlock(&man->dev_priv->cmdbuf_mutex);
809 kfree(man);
441} 810}