aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2011-10-04 14:13:30 -0400
committerDave Airlie <airlied@redhat.com>2011-10-05 05:17:22 -0400
commite2fa3a76839ada0d788549607263a036aa654243 (patch)
tree24883fcf9f80483aed537661a49ed389d0dff671 /drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
parente93daed8e2fd5ce3dc98efe9938426127a534ccc (diff)
vmwgfx: Fix up query processing
Previously, query results could be placed in any buffer object, but since we didn't allow pinned buffer objects, query results could be written when that buffer was evicted, corrupting data in other buffers. Now, require that buffers holding query results are no more than two pages large, and allow one single pinned such buffer. When the command submission code encounters query result structures in other buffers, the queries in the pinned buffer will be finished using a query barrier for the last hardware context using the buffer. Also if the command submission code detects that a new hardware context is used for queries, all queries of the previous hardware context is also flushed. Currently we use waiting for a no-op occlusion query as a query barrier for a specific context. The query buffer is also flushed and unpinned on context destructions, master drops and before scanout bo placement. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Jakob Bornecrantz <jakob@vmware.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_resource.c')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c8
1 files changed, 6 insertions, 2 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index c1b6ffd4ce7b..36c9d033220a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -126,7 +126,7 @@ static int vmw_resource_init(struct vmw_private *dev_priv,
126 res->idr = idr; 126 res->idr = idr;
127 res->avail = false; 127 res->avail = false;
128 res->dev_priv = dev_priv; 128 res->dev_priv = dev_priv;
129 129 INIT_LIST_HEAD(&res->query_head);
130 do { 130 do {
131 if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0)) 131 if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
132 return -ENOMEM; 132 return -ENOMEM;
@@ -194,8 +194,12 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
194 struct { 194 struct {
195 SVGA3dCmdHeader header; 195 SVGA3dCmdHeader header;
196 SVGA3dCmdDestroyContext body; 196 SVGA3dCmdDestroyContext body;
197 } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 197 } *cmd;
198 198
199
200 vmw_execbuf_release_pinned_bo(dev_priv, true, res->id);
201
202 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
199 if (unlikely(cmd == NULL)) { 203 if (unlikely(cmd == NULL)) {
200 DRM_ERROR("Failed reserving FIFO space for surface " 204 DRM_ERROR("Failed reserving FIFO space for surface "
201 "destruction.\n"); 205 "destruction.\n");