aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/vmwgfx
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2012-11-21 05:19:53 -0500
committerThomas Hellstrom <thellstrom@vmware.com>2014-01-17 01:52:22 -0500
commit96c5f0df22aaf1f20075bc6ad3bdd7656e49cf4d (patch)
tree862783760c332324bd7e75c3e7cbebf450822042 /drivers/gpu/drm/vmwgfx
parentafb0e50fae3aaeef2ca58e27cf650cb388846f19 (diff)
drm/vmwgfx: Add the possibility to validate a buffer as a MOB
Also do basic consistency checking. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Brian Paul <brianp@vmware.com> Reviewed-by: Zack Rusin <zackr@vmware.com>
Diffstat (limited to 'drivers/gpu/drm/vmwgfx')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c26
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c2
3 files changed, 23 insertions, 7 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 6ce733df200a..a9a0d6949ca2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -94,6 +94,7 @@ struct vmw_dma_buffer {
94struct vmw_validate_buffer { 94struct vmw_validate_buffer {
95 struct ttm_validate_buffer base; 95 struct ttm_validate_buffer base;
96 struct drm_hash_item hash; 96 struct drm_hash_item hash;
97 bool validate_as_mob;
97}; 98};
98 99
99struct vmw_res_func; 100struct vmw_res_func;
@@ -645,6 +646,7 @@ extern struct ttm_placement vmw_sys_placement;
645extern struct ttm_placement vmw_sys_ne_placement; 646extern struct ttm_placement vmw_sys_ne_placement;
646extern struct ttm_placement vmw_evictable_placement; 647extern struct ttm_placement vmw_evictable_placement;
647extern struct ttm_placement vmw_srf_placement; 648extern struct ttm_placement vmw_srf_placement;
649extern struct ttm_placement vmw_mob_placement;
648extern struct ttm_bo_driver vmw_bo_driver; 650extern struct ttm_bo_driver vmw_bo_driver;
649extern int vmw_dma_quiescent(struct drm_device *dev); 651extern int vmw_dma_quiescent(struct drm_device *dev);
650extern void vmw_piter_start(struct vmw_piter *viter, 652extern void vmw_piter_start(struct vmw_piter *viter,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 599f6469a1eb..9d7e49d3801b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -224,6 +224,7 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
224 * 224 *
225 * @sw_context: The software context used for this command submission batch. 225 * @sw_context: The software context used for this command submission batch.
226 * @bo: The buffer object to add. 226 * @bo: The buffer object to add.
227 * @validate_as_mob: Validate this buffer as a MOB.
227 * @p_val_node: If non-NULL Will be updated with the validate node number 228 * @p_val_node: If non-NULL Will be updated with the validate node number
228 * on return. 229 * on return.
229 * 230 *
@@ -232,6 +233,7 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
232 */ 233 */
233static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, 234static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
234 struct ttm_buffer_object *bo, 235 struct ttm_buffer_object *bo,
236 bool validate_as_mob,
235 uint32_t *p_val_node) 237 uint32_t *p_val_node)
236{ 238{
237 uint32_t val_node; 239 uint32_t val_node;
@@ -244,6 +246,10 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
244 &hash) == 0)) { 246 &hash) == 0)) {
245 vval_buf = container_of(hash, struct vmw_validate_buffer, 247 vval_buf = container_of(hash, struct vmw_validate_buffer,
246 hash); 248 hash);
249 if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
250 DRM_ERROR("Inconsistent buffer usage.\n");
251 return -EINVAL;
252 }
247 val_buf = &vval_buf->base; 253 val_buf = &vval_buf->base;
248 val_node = vval_buf - sw_context->val_bufs; 254 val_node = vval_buf - sw_context->val_bufs;
249 } else { 255 } else {
@@ -266,6 +272,7 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
266 val_buf->bo = ttm_bo_reference(bo); 272 val_buf->bo = ttm_bo_reference(bo);
267 val_buf->reserved = false; 273 val_buf->reserved = false;
268 list_add_tail(&val_buf->head, &sw_context->validate_nodes); 274 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
275 vval_buf->validate_as_mob = validate_as_mob;
269 } 276 }
270 277
271 sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC; 278 sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC;
@@ -302,7 +309,8 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
302 struct ttm_buffer_object *bo = &res->backup->base; 309 struct ttm_buffer_object *bo = &res->backup->base;
303 310
304 ret = vmw_bo_to_validate_list 311 ret = vmw_bo_to_validate_list
305 (sw_context, bo, NULL); 312 (sw_context, bo,
313 vmw_resource_needs_backup(res), NULL);
306 314
307 if (unlikely(ret != 0)) 315 if (unlikely(ret != 0))
308 return ret; 316 return ret;
@@ -586,7 +594,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
586 sw_context->needs_post_query_barrier = true; 594 sw_context->needs_post_query_barrier = true;
587 ret = vmw_bo_to_validate_list(sw_context, 595 ret = vmw_bo_to_validate_list(sw_context,
588 sw_context->cur_query_bo, 596 sw_context->cur_query_bo,
589 NULL); 597 dev_priv->has_mob, NULL);
590 if (unlikely(ret != 0)) 598 if (unlikely(ret != 0))
591 return ret; 599 return ret;
592 } 600 }
@@ -594,7 +602,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
594 602
595 ret = vmw_bo_to_validate_list(sw_context, 603 ret = vmw_bo_to_validate_list(sw_context,
596 dev_priv->dummy_query_bo, 604 dev_priv->dummy_query_bo,
597 NULL); 605 dev_priv->has_mob, NULL);
598 if (unlikely(ret != 0)) 606 if (unlikely(ret != 0))
599 return ret; 607 return ret;
600 608
@@ -718,7 +726,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
718 reloc = &sw_context->relocs[sw_context->cur_reloc++]; 726 reloc = &sw_context->relocs[sw_context->cur_reloc++];
719 reloc->location = ptr; 727 reloc->location = ptr;
720 728
721 ret = vmw_bo_to_validate_list(sw_context, bo, &reloc->index); 729 ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index);
722 if (unlikely(ret != 0)) 730 if (unlikely(ret != 0))
723 goto out_no_reloc; 731 goto out_no_reloc;
724 732
@@ -1224,7 +1232,8 @@ static void vmw_clear_validations(struct vmw_sw_context *sw_context)
1224} 1232}
1225 1233
1226static int vmw_validate_single_buffer(struct vmw_private *dev_priv, 1234static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
1227 struct ttm_buffer_object *bo) 1235 struct ttm_buffer_object *bo,
1236 bool validate_as_mob)
1228{ 1237{
1229 int ret; 1238 int ret;
1230 1239
@@ -1238,6 +1247,9 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
1238 dev_priv->dummy_query_bo_pinned)) 1247 dev_priv->dummy_query_bo_pinned))
1239 return 0; 1248 return 0;
1240 1249
1250 if (validate_as_mob)
1251 return ttm_bo_validate(bo, &vmw_mob_placement, true, false);
1252
1241 /** 1253 /**
1242 * Put BO in VRAM if there is space, otherwise as a GMR. 1254 * Put BO in VRAM if there is space, otherwise as a GMR.
1243 * If there is no space in VRAM and GMR ids are all used up, 1255 * If there is no space in VRAM and GMR ids are all used up,
@@ -1259,7 +1271,6 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
1259 return ret; 1271 return ret;
1260} 1272}
1261 1273
1262
1263static int vmw_validate_buffers(struct vmw_private *dev_priv, 1274static int vmw_validate_buffers(struct vmw_private *dev_priv,
1264 struct vmw_sw_context *sw_context) 1275 struct vmw_sw_context *sw_context)
1265{ 1276{
@@ -1267,7 +1278,8 @@ static int vmw_validate_buffers(struct vmw_private *dev_priv,
1267 int ret; 1278 int ret;
1268 1279
1269 list_for_each_entry(entry, &sw_context->validate_nodes, base.head) { 1280 list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
1270 ret = vmw_validate_single_buffer(dev_priv, entry->base.bo); 1281 ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
1282 entry->validate_as_mob);
1271 if (unlikely(ret != 0)) 1283 if (unlikely(ret != 0))
1272 return ret; 1284 return ret;
1273 } 1285 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 9b5ea2ac7ddf..1a62eedb0fea 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -471,6 +471,8 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
471 } 471 }
472 472
473 ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size, 473 ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
474 (dev_priv->has_mob) ?
475 &vmw_sys_placement :
474 &vmw_vram_sys_placement, true, 476 &vmw_vram_sys_placement, true,
475 &vmw_user_dmabuf_destroy); 477 &vmw_user_dmabuf_destroy);
476 if (unlikely(ret != 0)) 478 if (unlikely(ret != 0))