diff options
author | Thomas Hellstrom <thellstrom@vmware.com> | 2011-09-01 16:18:44 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2011-09-06 06:51:11 -0400 |
commit | ae2a104058e217548215bfe6c6c8a98752139c29 (patch) | |
tree | 9f3fccec1c8f4e78b67b44fc6b4cdda10f3291da /drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | |
parent | 4f73a96bd76914009682432842ac04a32ab9115b (diff) |
vmwgfx: Implement fence objects
Will be needed for queries and drm event-driven throttling.
As a benefit, they help avoid stale user-space fence handles.
Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: Jakob Bornecrantz <jakob@vmware.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c')
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | 26 |
1 files changed, 13 insertions, 13 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c index 72d95617bc5..5d665ce8cbe 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | |||
@@ -274,39 +274,39 @@ static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) | |||
274 | 274 | ||
275 | static void *vmw_sync_obj_ref(void *sync_obj) | 275 | static void *vmw_sync_obj_ref(void *sync_obj) |
276 | { | 276 | { |
277 | return sync_obj; | 277 | |
278 | return (void *) | ||
279 | vmw_fence_obj_reference((struct vmw_fence_obj *) sync_obj); | ||
278 | } | 280 | } |
279 | 281 | ||
280 | static void vmw_sync_obj_unref(void **sync_obj) | 282 | static void vmw_sync_obj_unref(void **sync_obj) |
281 | { | 283 | { |
282 | *sync_obj = NULL; | 284 | vmw_fence_obj_unreference((struct vmw_fence_obj **) sync_obj); |
283 | } | 285 | } |
284 | 286 | ||
285 | static int vmw_sync_obj_flush(void *sync_obj, void *sync_arg) | 287 | static int vmw_sync_obj_flush(void *sync_obj, void *sync_arg) |
286 | { | 288 | { |
287 | struct vmw_private *dev_priv = (struct vmw_private *)sync_arg; | 289 | vmw_fence_obj_flush((struct vmw_fence_obj *) sync_obj); |
288 | |||
289 | mutex_lock(&dev_priv->hw_mutex); | ||
290 | vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); | ||
291 | mutex_unlock(&dev_priv->hw_mutex); | ||
292 | return 0; | 290 | return 0; |
293 | } | 291 | } |
294 | 292 | ||
295 | static bool vmw_sync_obj_signaled(void *sync_obj, void *sync_arg) | 293 | static bool vmw_sync_obj_signaled(void *sync_obj, void *sync_arg) |
296 | { | 294 | { |
297 | struct vmw_private *dev_priv = (struct vmw_private *)sync_arg; | 295 | unsigned long flags = (unsigned long) sync_arg; |
298 | uint32_t seqno = (unsigned long) sync_obj; | 296 | return vmw_fence_obj_signaled((struct vmw_fence_obj *) sync_obj, |
297 | (uint32_t) flags); | ||
299 | 298 | ||
300 | return vmw_seqno_passed(dev_priv, seqno); | ||
301 | } | 299 | } |
302 | 300 | ||
303 | static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg, | 301 | static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg, |
304 | bool lazy, bool interruptible) | 302 | bool lazy, bool interruptible) |
305 | { | 303 | { |
306 | struct vmw_private *dev_priv = (struct vmw_private *)sync_arg; | 304 | unsigned long flags = (unsigned long) sync_arg; |
307 | uint32_t seqno = (unsigned long) sync_obj; | ||
308 | 305 | ||
309 | return vmw_wait_seqno(dev_priv, false, seqno, false, 3*HZ); | 306 | return vmw_fence_obj_wait((struct vmw_fence_obj *) sync_obj, |
307 | (uint32_t) flags, | ||
308 | lazy, interruptible, | ||
309 | VMW_FENCE_WAIT_TIMEOUT); | ||
310 | } | 310 | } |
311 | 311 | ||
312 | struct ttm_bo_driver vmw_bo_driver = { | 312 | struct ttm_bo_driver vmw_bo_driver = { |