aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorMaarten Lankhorst <maarten.lankhorst@canonical.com>2012-10-12 11:01:43 -0400
committerDave Airlie <airlied@redhat.com>2012-11-20 01:09:35 -0500
commitbe013367fd6fbab52ddf6f76c243f4109090c890 (patch)
tree1fcfd1146c1e5e6d4299e754c179c03d2d3df6d0 /drivers/gpu
parent0355cf3a0f49e26f4b84d9da7189b2324cf1df6d (diff)
drm/vmwgfx: remove use of fence_obj_args
It's always hardcoded to the same value. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com> Reviewed-By: Thomas Hellstrom <thellstrom@vmware.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c15
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c4
3 files changed, 4 insertions, 22 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 9826fbc88154..da12922b6313 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -318,19 +318,16 @@ static int vmw_sync_obj_flush(void *sync_obj, void *sync_arg)
318 318
319static bool vmw_sync_obj_signaled(void *sync_obj, void *sync_arg) 319static bool vmw_sync_obj_signaled(void *sync_obj, void *sync_arg)
320{ 320{
321 unsigned long flags = (unsigned long) sync_arg;
322 return vmw_fence_obj_signaled((struct vmw_fence_obj *) sync_obj, 321 return vmw_fence_obj_signaled((struct vmw_fence_obj *) sync_obj,
323 (uint32_t) flags); 322 DRM_VMW_FENCE_FLAG_EXEC);
324 323
325} 324}
326 325
327static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg, 326static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg,
328 bool lazy, bool interruptible) 327 bool lazy, bool interruptible)
329{ 328{
330 unsigned long flags = (unsigned long) sync_arg;
331
332 return vmw_fence_obj_wait((struct vmw_fence_obj *) sync_obj, 329 return vmw_fence_obj_wait((struct vmw_fence_obj *) sync_obj,
333 (uint32_t) flags, 330 DRM_VMW_FENCE_FLAG_EXEC,
334 lazy, interruptible, 331 lazy, interruptible,
335 VMW_FENCE_WAIT_TIMEOUT); 332 VMW_FENCE_WAIT_TIMEOUT);
336} 333}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 30654b4cc972..e5775a0db495 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -71,7 +71,6 @@ static void vmw_resource_to_validate_list(struct vmw_sw_context *sw_context,
71 */ 71 */
72static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, 72static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
73 struct ttm_buffer_object *bo, 73 struct ttm_buffer_object *bo,
74 uint32_t fence_flags,
75 uint32_t *p_val_node) 74 uint32_t *p_val_node)
76{ 75{
77 uint32_t val_node; 76 uint32_t val_node;
@@ -87,15 +86,12 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
87 86
88 val_buf = &sw_context->val_bufs[val_node]; 87 val_buf = &sw_context->val_bufs[val_node];
89 if (unlikely(val_node == sw_context->cur_val_buf)) { 88 if (unlikely(val_node == sw_context->cur_val_buf)) {
90 val_buf->new_sync_obj_arg = NULL;
91 val_buf->bo = ttm_bo_reference(bo); 89 val_buf->bo = ttm_bo_reference(bo);
92 list_add_tail(&val_buf->head, &sw_context->validate_nodes); 90 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
93 ++sw_context->cur_val_buf; 91 ++sw_context->cur_val_buf;
94 } 92 }
95 93
96 val_buf->new_sync_obj_arg = (void *) 94 sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC;
97 ((unsigned long) val_buf->new_sync_obj_arg | fence_flags);
98 sw_context->fence_flags |= fence_flags;
99 95
100 if (p_val_node) 96 if (p_val_node)
101 *p_val_node = val_node; 97 *p_val_node = val_node;
@@ -313,7 +309,6 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
313 cid_to_add = sw_context->cur_query_cid; 309 cid_to_add = sw_context->cur_query_cid;
314 ret = vmw_bo_to_validate_list(sw_context, 310 ret = vmw_bo_to_validate_list(sw_context,
315 sw_context->cur_query_bo, 311 sw_context->cur_query_bo,
316 DRM_VMW_FENCE_FLAG_EXEC,
317 NULL); 312 NULL);
318 if (unlikely(ret != 0)) 313 if (unlikely(ret != 0))
319 return ret; 314 return ret;
@@ -322,7 +317,6 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
322 317
323 ret = vmw_bo_to_validate_list(sw_context, 318 ret = vmw_bo_to_validate_list(sw_context,
324 dev_priv->dummy_query_bo, 319 dev_priv->dummy_query_bo,
325 DRM_VMW_FENCE_FLAG_EXEC,
326 NULL); 320 NULL);
327 if (unlikely(ret != 0)) 321 if (unlikely(ret != 0))
328 return ret; 322 return ret;
@@ -346,7 +340,6 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
346 &sw_context->query_list); 340 &sw_context->query_list);
347 ret = vmw_bo_to_validate_list(sw_context, 341 ret = vmw_bo_to_validate_list(sw_context,
348 dev_priv->dummy_query_bo, 342 dev_priv->dummy_query_bo,
349 DRM_VMW_FENCE_FLAG_EXEC,
350 NULL); 343 NULL);
351 if (unlikely(ret != 0)) 344 if (unlikely(ret != 0))
352 return ret; 345 return ret;
@@ -465,8 +458,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
465 reloc = &sw_context->relocs[sw_context->cur_reloc++]; 458 reloc = &sw_context->relocs[sw_context->cur_reloc++];
466 reloc->location = ptr; 459 reloc->location = ptr;
467 460
468 ret = vmw_bo_to_validate_list(sw_context, bo, DRM_VMW_FENCE_FLAG_EXEC, 461 ret = vmw_bo_to_validate_list(sw_context, bo, &reloc->index);
469 &reloc->index);
470 if (unlikely(ret != 0)) 462 if (unlikely(ret != 0))
471 goto out_no_reloc; 463 goto out_no_reloc;
472 464
@@ -1290,12 +1282,9 @@ void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
1290 1282
1291 INIT_LIST_HEAD(&validate_list); 1283 INIT_LIST_HEAD(&validate_list);
1292 1284
1293 pinned_val.new_sync_obj_arg = (void *)(unsigned long)
1294 DRM_VMW_FENCE_FLAG_EXEC;
1295 pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo); 1285 pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
1296 list_add_tail(&pinned_val.head, &validate_list); 1286 list_add_tail(&pinned_val.head, &validate_list);
1297 1287
1298 query_val.new_sync_obj_arg = pinned_val.new_sync_obj_arg;
1299 query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo); 1288 query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
1300 list_add_tail(&query_val.head, &validate_list); 1289 list_add_tail(&query_val.head, &validate_list);
1301 1290
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index be87124a2769..596cef3c9189 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -833,8 +833,6 @@ int vmw_surface_do_validate(struct vmw_private *dev_priv,
833 if (srf->backup) { 833 if (srf->backup) {
834 INIT_LIST_HEAD(&val_list); 834 INIT_LIST_HEAD(&val_list);
835 val_buf.bo = ttm_bo_reference(srf->backup); 835 val_buf.bo = ttm_bo_reference(srf->backup);
836 val_buf.new_sync_obj_arg = (void *)((unsigned long)
837 DRM_VMW_FENCE_FLAG_EXEC);
838 list_add_tail(&val_buf.head, &val_list); 836 list_add_tail(&val_buf.head, &val_list);
839 ret = ttm_eu_reserve_buffers(&val_list); 837 ret = ttm_eu_reserve_buffers(&val_list);
840 if (unlikely(ret != 0)) 838 if (unlikely(ret != 0))
@@ -966,8 +964,6 @@ int vmw_surface_evict(struct vmw_private *dev_priv,
966 964
967 INIT_LIST_HEAD(&val_list); 965 INIT_LIST_HEAD(&val_list);
968 val_buf.bo = ttm_bo_reference(srf->backup); 966 val_buf.bo = ttm_bo_reference(srf->backup);
969 val_buf.new_sync_obj_arg = (void *)(unsigned long)
970 DRM_VMW_FENCE_FLAG_EXEC;
971 list_add_tail(&val_buf.head, &val_list); 967 list_add_tail(&val_buf.head, &val_list);
972 ret = ttm_eu_reserve_buffers(&val_list); 968 ret = ttm_eu_reserve_buffers(&val_list);
973 if (unlikely(ret != 0)) 969 if (unlikely(ret != 0))