aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2011-10-04 14:13:30 -0400
committerDave Airlie <airlied@redhat.com>2011-10-05 05:17:22 -0400
commite2fa3a76839ada0d788549607263a036aa654243 (patch)
tree24883fcf9f80483aed537661a49ed389d0dff671 /drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
parente93daed8e2fd5ce3dc98efe9938426127a534ccc (diff)
vmwgfx: Fix up query processing
Previously, query results could be placed in any buffer object, but since we didn't allow pinned buffer objects, query results could be written when that buffer was evicted, corrupting data in other buffers. Now, require that buffers holding query results are no more than two pages large, and allow one single pinned such buffer. When the command submission code encounters query result structures in other buffers, the queries in the pinned buffer will be finished using a query barrier for the last hardware context using the buffer. Also if the command submission code detects that a new hardware context is used for queries, all queries of the previous hardware context is also flushed. Currently we use waiting for a no-op occlusion query as a query barrier for a specific context. The query buffer is also flushed and unpinned on context destructions, master drops and before scanout bo placement. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Jakob Bornecrantz <jakob@vmware.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c57
1 files changed, 57 insertions, 0 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index d7ed33e732a0..62d6377b8ee8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -505,3 +505,60 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
505out_err: 505out_err:
506 return ret; 506 return ret;
507} 507}
508
509/**
510 * vmw_fifo_emit_dummy_query - emits a dummy query to the fifo.
511 *
512 * @dev_priv: The device private structure.
513 * @cid: The hardware context id used for the query.
514 *
515 * This function is used to emit a dummy occlusion query with
516 * no primitives rendered between query begin and query end.
517 * It's used to provide a query barrier, in order to know that when
518 * this query is finished, all preceding queries are also finished.
519 *
520 * A Query results structure should have been initialized at the start
521 * of the dev_priv->dummy_query_bo buffer object. And that buffer object
522 * must also be either reserved or pinned when this function is called.
523 *
524 * Returns -ENOMEM on failure to reserve fifo space.
525 */
526int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
527 uint32_t cid)
528{
529 /*
530 * A query wait without a preceding query end will
531 * actually finish all queries for this cid
532 * without writing to the query result structure.
533 */
534
535 struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
536 struct {
537 SVGA3dCmdHeader header;
538 SVGA3dCmdWaitForQuery body;
539 } *cmd;
540
541 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
542
543 if (unlikely(cmd == NULL)) {
544 DRM_ERROR("Out of fifo space for dummy query.\n");
545 return -ENOMEM;
546 }
547
548 cmd->header.id = SVGA_3D_CMD_WAIT_FOR_QUERY;
549 cmd->header.size = sizeof(cmd->body);
550 cmd->body.cid = cid;
551 cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
552
553 if (bo->mem.mem_type == TTM_PL_VRAM) {
554 cmd->body.guestResult.gmrId = SVGA_GMR_FRAMEBUFFER;
555 cmd->body.guestResult.offset = bo->offset;
556 } else {
557 cmd->body.guestResult.gmrId = bo->mem.start;
558 cmd->body.guestResult.offset = 0;
559 }
560
561 vmw_fifo_commit(dev_priv, sizeof(*cmd));
562
563 return 0;
564}