aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2011-10-04 14:13:30 -0400
committerDave Airlie <airlied@redhat.com>2011-10-05 05:17:22 -0400
commite2fa3a76839ada0d788549607263a036aa654243 (patch)
tree24883fcf9f80483aed537661a49ed389d0dff671 /drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
parente93daed8e2fd5ce3dc98efe9938426127a534ccc (diff)
vmwgfx: Fix up query processing
Previously, query results could be placed in any buffer object, but since we didn't allow pinned buffer objects, query results could be written when that buffer was evicted, corrupting data in other buffers. Now, require that buffers holding query results are no more than two pages large, and allow one single pinned such buffer. When the command submission code encounters query result structures in other buffers, the queries in the pinned buffer will be finished using a query barrier for the last hardware context using the buffer. Also if the command submission code detects that a new hardware context is used for queries, all queries of the previous hardware context is also flushed. Currently we use waiting for a no-op occlusion query as a query barrier for a specific context. The query buffer is also flushed and unpinned on context destructions, master drops and before scanout bo placement. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Jakob Bornecrantz <jakob@vmware.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c44
1 files changed, 44 insertions, 0 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
index 7f744a82892a..3fa884db08ab 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
@@ -42,6 +42,7 @@
42 * May only be called by the current master since it assumes that the 42 * May only be called by the current master since it assumes that the
43 * master lock is the current master's lock. 43 * master lock is the current master's lock.
44 * This function takes the master's lock in write mode. 44 * This function takes the master's lock in write mode.
45 * Flushes and unpins the query bo to avoid failures.
45 * 46 *
46 * Returns 47 * Returns
47 * -ERESTARTSYS if interrupted by a signal. 48 * -ERESTARTSYS if interrupted by a signal.
@@ -59,6 +60,8 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
59 if (unlikely(ret != 0)) 60 if (unlikely(ret != 0))
60 return ret; 61 return ret;
61 62
63 vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
64
62 ret = ttm_bo_reserve(bo, interruptible, false, false, 0); 65 ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
63 if (unlikely(ret != 0)) 66 if (unlikely(ret != 0))
64 goto err; 67 goto err;
@@ -78,6 +81,7 @@ err:
78 * May only be called by the current master since it assumes that the 81 * May only be called by the current master since it assumes that the
79 * master lock is the current master's lock. 82 * master lock is the current master's lock.
80 * This function takes the master's lock in write mode. 83 * This function takes the master's lock in write mode.
84 * Flushes and unpins the query bo if @pin == true to avoid failures.
81 * 85 *
82 * @dev_priv: Driver private. 86 * @dev_priv: Driver private.
83 * @buf: DMA buffer to move. 87 * @buf: DMA buffer to move.
@@ -100,6 +104,9 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
100 if (unlikely(ret != 0)) 104 if (unlikely(ret != 0))
101 return ret; 105 return ret;
102 106
107 if (pin)
108 vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
109
103 ret = ttm_bo_reserve(bo, interruptible, false, false, 0); 110 ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
104 if (unlikely(ret != 0)) 111 if (unlikely(ret != 0))
105 goto err; 112 goto err;
@@ -177,6 +184,7 @@ int vmw_dmabuf_to_vram(struct vmw_private *dev_priv,
177 * May only be called by the current master since it assumes that the 184 * May only be called by the current master since it assumes that the
178 * master lock is the current master's lock. 185 * master lock is the current master's lock.
179 * This function takes the master's lock in write mode. 186 * This function takes the master's lock in write mode.
187 * Flushes and unpins the query bo if @pin == true to avoid failures.
180 * 188 *
181 * @dev_priv: Driver private. 189 * @dev_priv: Driver private.
182 * @buf: DMA buffer to move. 190 * @buf: DMA buffer to move.
@@ -205,6 +213,9 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
205 if (unlikely(ret != 0)) 213 if (unlikely(ret != 0))
206 return ret; 214 return ret;
207 215
216 if (pin)
217 vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
218
208 ret = ttm_bo_reserve(bo, interruptible, false, false, 0); 219 ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
209 if (unlikely(ret != 0)) 220 if (unlikely(ret != 0))
210 goto err_unlock; 221 goto err_unlock;
@@ -276,3 +287,36 @@ void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
276 ptr->offset = 0; 287 ptr->offset = 0;
277 } 288 }
278} 289}
290
291
292/**
293 * vmw_bo_pin - Pin or unpin a buffer object without moving it.
294 *
295 * @bo: The buffer object. Must be reserved, and present either in VRAM
296 * or GMR memory.
297 * @pin: Whether to pin or unpin.
298 *
299 */
300void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
301{
302 uint32_t pl_flags;
303 struct ttm_placement placement;
304 uint32_t old_mem_type = bo->mem.mem_type;
305 int ret;
306
307 BUG_ON(!atomic_read(&bo->reserved));
308 BUG_ON(old_mem_type != TTM_PL_VRAM &&
309 old_mem_type != VMW_PL_FLAG_GMR);
310
311 pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED;
312 if (pin)
313 pl_flags |= TTM_PL_FLAG_NO_EVICT;
314
315 memset(&placement, 0, sizeof(placement));
316 placement.num_placement = 1;
317 placement.placement = &pl_flags;
318
319 ret = ttm_bo_validate(bo, &placement, false, true, true);
320
321 BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
322}