aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2018-06-19 09:02:16 -0400
committerThomas Hellstrom <thellstrom@vmware.com>2018-07-03 14:33:30 -0400
commitf1d34bfd70b1b4543a139ea28bad4c001c5f413d (patch)
tree0d3fb3ee166a2d81f4f7e7e2338dd3c625929554
parent07c13bb78c8b8a9cb6ee169659528945038d5e85 (diff)
drm/vmwgfx: Replace vmw_dma_buffer with vmw_buffer_object
Initially vmware buffer objects were only used as DMA buffers, so the name DMA buffer was a natural one. However, currently they are used also as dumb buffers and MOBs backing guest backed objects so renaming them to buffer objects is logical. Particularly since there is a dmabuf subsystem in the kernel where a dma buffer means something completely different. This also renames user-space api structures and IOCTL names correspondingly, but the old names remain defined for now and the ABI hasn't changed. There are a couple of minor style changes to make checkpatch happy. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Sinclair Yeh <syeh@vmware.com> Reviewed-by: Deepak Rawat <drawat@vmware.com>
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c (renamed from drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c)58
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h129
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c86
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c16
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c203
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h58
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c24
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c280
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c100
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c22
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c64
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c37
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c (renamed from drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c)2
-rw-r--r--include/uapi/drm/vmwgfx_drm.h61
20 files changed, 589 insertions, 607 deletions
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 794cc9d5c9b0..09b2aa08363e 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -1,9 +1,9 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ 2vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
3 vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \ 3 vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_ttm_buffer.o \
4 vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ 4 vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
5 vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \ 5 vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
6 vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \ 6 vmwgfx_fence.o vmwgfx_bo.o vmwgfx_scrn.o vmwgfx_context.o \
7 vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \ 7 vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \
8 vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \ 8 vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
9 vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \ 9 vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index d59d9dd16ebc..f26f658cccdb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -32,7 +32,7 @@
32 32
33 33
34/** 34/**
35 * vmw_dmabuf_pin_in_placement - Validate a buffer to placement. 35 * vmw_bo_pin_in_placement - Validate a buffer to placement.
36 * 36 *
37 * @dev_priv: Driver private. 37 * @dev_priv: Driver private.
38 * @buf: DMA buffer to move. 38 * @buf: DMA buffer to move.
@@ -42,10 +42,10 @@
42 * Returns 42 * Returns
43 * -ERESTARTSYS if interrupted by a signal. 43 * -ERESTARTSYS if interrupted by a signal.
44 */ 44 */
45int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv, 45int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
46 struct vmw_dma_buffer *buf, 46 struct vmw_buffer_object *buf,
47 struct ttm_placement *placement, 47 struct ttm_placement *placement,
48 bool interruptible) 48 bool interruptible)
49{ 49{
50 struct ttm_operation_ctx ctx = {interruptible, false }; 50 struct ttm_operation_ctx ctx = {interruptible, false };
51 struct ttm_buffer_object *bo = &buf->base; 51 struct ttm_buffer_object *bo = &buf->base;
@@ -79,7 +79,7 @@ err:
79} 79}
80 80
81/** 81/**
82 * vmw_dmabuf_pin_in_vram_or_gmr - Move a buffer to vram or gmr. 82 * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
83 * 83 *
84 * This function takes the reservation_sem in write mode. 84 * This function takes the reservation_sem in write mode.
85 * Flushes and unpins the query bo to avoid failures. 85 * Flushes and unpins the query bo to avoid failures.
@@ -92,9 +92,9 @@ err:
92 * Returns 92 * Returns
93 * -ERESTARTSYS if interrupted by a signal. 93 * -ERESTARTSYS if interrupted by a signal.
94 */ 94 */
95int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv, 95int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
96 struct vmw_dma_buffer *buf, 96 struct vmw_buffer_object *buf,
97 bool interruptible) 97 bool interruptible)
98{ 98{
99 struct ttm_operation_ctx ctx = {interruptible, false }; 99 struct ttm_operation_ctx ctx = {interruptible, false };
100 struct ttm_buffer_object *bo = &buf->base; 100 struct ttm_buffer_object *bo = &buf->base;
@@ -134,7 +134,7 @@ err:
134} 134}
135 135
136/** 136/**
137 * vmw_dmabuf_pin_in_vram - Move a buffer to vram. 137 * vmw_bo_pin_in_vram - Move a buffer to vram.
138 * 138 *
139 * This function takes the reservation_sem in write mode. 139 * This function takes the reservation_sem in write mode.
140 * Flushes and unpins the query bo to avoid failures. 140 * Flushes and unpins the query bo to avoid failures.
@@ -146,16 +146,16 @@ err:
146 * Returns 146 * Returns
147 * -ERESTARTSYS if interrupted by a signal. 147 * -ERESTARTSYS if interrupted by a signal.
148 */ 148 */
149int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv, 149int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
150 struct vmw_dma_buffer *buf, 150 struct vmw_buffer_object *buf,
151 bool interruptible) 151 bool interruptible)
152{ 152{
153 return vmw_dmabuf_pin_in_placement(dev_priv, buf, &vmw_vram_placement, 153 return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
154 interruptible); 154 interruptible);
155} 155}
156 156
157/** 157/**
158 * vmw_dmabuf_pin_in_start_of_vram - Move a buffer to start of vram. 158 * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram.
159 * 159 *
160 * This function takes the reservation_sem in write mode. 160 * This function takes the reservation_sem in write mode.
161 * Flushes and unpins the query bo to avoid failures. 161 * Flushes and unpins the query bo to avoid failures.
@@ -167,9 +167,9 @@ int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
167 * Returns 167 * Returns
168 * -ERESTARTSYS if interrupted by a signal. 168 * -ERESTARTSYS if interrupted by a signal.
169 */ 169 */
170int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv, 170int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
171 struct vmw_dma_buffer *buf, 171 struct vmw_buffer_object *buf,
172 bool interruptible) 172 bool interruptible)
173{ 173{
174 struct ttm_operation_ctx ctx = {interruptible, false }; 174 struct ttm_operation_ctx ctx = {interruptible, false };
175 struct ttm_buffer_object *bo = &buf->base; 175 struct ttm_buffer_object *bo = &buf->base;
@@ -226,7 +226,7 @@ err_unlock:
226} 226}
227 227
228/** 228/**
229 * vmw_dmabuf_unpin - Unpin the buffer given buffer, does not move the buffer. 229 * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer.
230 * 230 *
231 * This function takes the reservation_sem in write mode. 231 * This function takes the reservation_sem in write mode.
232 * 232 *
@@ -237,9 +237,9 @@ err_unlock:
237 * Returns 237 * Returns
238 * -ERESTARTSYS if interrupted by a signal. 238 * -ERESTARTSYS if interrupted by a signal.
239 */ 239 */
240int vmw_dmabuf_unpin(struct vmw_private *dev_priv, 240int vmw_bo_unpin(struct vmw_private *dev_priv,
241 struct vmw_dma_buffer *buf, 241 struct vmw_buffer_object *buf,
242 bool interruptible) 242 bool interruptible)
243{ 243{
244 struct ttm_buffer_object *bo = &buf->base; 244 struct ttm_buffer_object *bo = &buf->base;
245 int ret; 245 int ret;
@@ -288,7 +288,7 @@ void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
288 * @pin: Whether to pin or unpin. 288 * @pin: Whether to pin or unpin.
289 * 289 *
290 */ 290 */
291void vmw_bo_pin_reserved(struct vmw_dma_buffer *vbo, bool pin) 291void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
292{ 292{
293 struct ttm_operation_ctx ctx = { false, true }; 293 struct ttm_operation_ctx ctx = { false, true };
294 struct ttm_place pl; 294 struct ttm_place pl;
@@ -326,14 +326,14 @@ void vmw_bo_pin_reserved(struct vmw_dma_buffer *vbo, bool pin)
326 326
327 327
328/* 328/*
329 * vmw_dma_buffer_unmap - Tear down a cached buffer object map. 329 * vmw_buffer_object_unmap - Tear down a cached buffer object map.
330 * 330 *
331 * @vbo: The buffer object whose map we are tearing down. 331 * @vbo: The buffer object whose map we are tearing down.
332 * 332 *
333 * This function tears down a cached map set up using 333 * This function tears down a cached map set up using
334 * vmw_dma_buffer_map_and_cache(). 334 * vmw_buffer_object_map_and_cache().
335 */ 335 */
336void vmw_dma_buffer_unmap(struct vmw_dma_buffer *vbo) 336void vmw_buffer_object_unmap(struct vmw_buffer_object *vbo)
337{ 337{
338 if (vbo->map.bo == NULL) 338 if (vbo->map.bo == NULL)
339 return; 339 return;
@@ -343,7 +343,7 @@ void vmw_dma_buffer_unmap(struct vmw_dma_buffer *vbo)
343 343
344 344
345/* 345/*
346 * vmw_dma_buffer_map_and_cache - Map a buffer object and cache the map 346 * vmw_buffer_object_map_and_cache - Map a buffer object and cache the map
347 * 347 *
348 * @vbo: The buffer object to map 348 * @vbo: The buffer object to map
349 * Return: A kernel virtual address or NULL if mapping failed. 349 * Return: A kernel virtual address or NULL if mapping failed.
@@ -357,7 +357,7 @@ void vmw_dma_buffer_unmap(struct vmw_dma_buffer *vbo)
357 * 3) Buffer object destruction 357 * 3) Buffer object destruction
358 * 358 *
359 */ 359 */
360void *vmw_dma_buffer_map_and_cache(struct vmw_dma_buffer *vbo) 360void *vmw_buffer_object_map_and_cache(struct vmw_buffer_object *vbo)
361{ 361{
362 struct ttm_buffer_object *bo = &vbo->base; 362 struct ttm_buffer_object *bo = &vbo->base;
363 bool not_used; 363 bool not_used;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 3767ac335aca..ff8acc74786c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -38,7 +38,7 @@ struct vmw_user_context {
38 struct vmw_cmdbuf_res_manager *man; 38 struct vmw_cmdbuf_res_manager *man;
39 struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX]; 39 struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX];
40 spinlock_t cotable_lock; 40 spinlock_t cotable_lock;
41 struct vmw_dma_buffer *dx_query_mob; 41 struct vmw_buffer_object *dx_query_mob;
42}; 42};
43 43
44static void vmw_user_context_free(struct vmw_resource *res); 44static void vmw_user_context_free(struct vmw_resource *res);
@@ -900,7 +900,7 @@ vmw_context_binding_state(struct vmw_resource *ctx)
900 * specified in the parameter. 0 otherwise. 900 * specified in the parameter. 0 otherwise.
901 */ 901 */
902int vmw_context_bind_dx_query(struct vmw_resource *ctx_res, 902int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
903 struct vmw_dma_buffer *mob) 903 struct vmw_buffer_object *mob)
904{ 904{
905 struct vmw_user_context *uctx = 905 struct vmw_user_context *uctx =
906 container_of(ctx_res, struct vmw_user_context, res); 906 container_of(ctx_res, struct vmw_user_context, res);
@@ -908,7 +908,7 @@ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
908 if (mob == NULL) { 908 if (mob == NULL) {
909 if (uctx->dx_query_mob) { 909 if (uctx->dx_query_mob) {
910 uctx->dx_query_mob->dx_query_ctx = NULL; 910 uctx->dx_query_mob->dx_query_ctx = NULL;
911 vmw_dmabuf_unreference(&uctx->dx_query_mob); 911 vmw_bo_unreference(&uctx->dx_query_mob);
912 uctx->dx_query_mob = NULL; 912 uctx->dx_query_mob = NULL;
913 } 913 }
914 914
@@ -922,7 +922,7 @@ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
922 mob->dx_query_ctx = ctx_res; 922 mob->dx_query_ctx = ctx_res;
923 923
924 if (!uctx->dx_query_mob) 924 if (!uctx->dx_query_mob)
925 uctx->dx_query_mob = vmw_dmabuf_reference(mob); 925 uctx->dx_query_mob = vmw_bo_reference(mob);
926 926
927 return 0; 927 return 0;
928} 928}
@@ -932,7 +932,7 @@ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
932 * 932 *
933 * @ctx_res: The context resource 933 * @ctx_res: The context resource
934 */ 934 */
935struct vmw_dma_buffer * 935struct vmw_buffer_object *
936vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res) 936vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
937{ 937{
938 struct vmw_user_context *uctx = 938 struct vmw_user_context *uctx =
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index cbf54ea7b4c0..1052cd3cb700 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -390,7 +390,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
390 struct ttm_operation_ctx ctx = { false, false }; 390 struct ttm_operation_ctx ctx = { false, false };
391 struct vmw_private *dev_priv = res->dev_priv; 391 struct vmw_private *dev_priv = res->dev_priv;
392 struct vmw_cotable *vcotbl = vmw_cotable(res); 392 struct vmw_cotable *vcotbl = vmw_cotable(res);
393 struct vmw_dma_buffer *buf, *old_buf = res->backup; 393 struct vmw_buffer_object *buf, *old_buf = res->backup;
394 struct ttm_buffer_object *bo, *old_bo = &res->backup->base; 394 struct ttm_buffer_object *bo, *old_bo = &res->backup->base;
395 size_t old_size = res->backup_size; 395 size_t old_size = res->backup_size;
396 size_t old_size_read_back = vcotbl->size_read_back; 396 size_t old_size_read_back = vcotbl->size_read_back;
@@ -415,8 +415,8 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
415 if (!buf) 415 if (!buf)
416 return -ENOMEM; 416 return -ENOMEM;
417 417
418 ret = vmw_dmabuf_init(dev_priv, buf, new_size, &vmw_mob_ne_placement, 418 ret = vmw_bo_init(dev_priv, buf, new_size, &vmw_mob_ne_placement,
419 true, vmw_dmabuf_bo_free); 419 true, vmw_bo_bo_free);
420 if (ret) { 420 if (ret) {
421 DRM_ERROR("Failed initializing new cotable MOB.\n"); 421 DRM_ERROR("Failed initializing new cotable MOB.\n");
422 return ret; 422 return ret;
@@ -482,7 +482,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
482 /* Let go of the old mob. */ 482 /* Let go of the old mob. */
483 list_del(&res->mob_head); 483 list_del(&res->mob_head);
484 list_add_tail(&res->mob_head, &buf->res_list); 484 list_add_tail(&res->mob_head, &buf->res_list);
485 vmw_dmabuf_unreference(&old_buf); 485 vmw_bo_unreference(&old_buf);
486 res->id = vcotbl->type; 486 res->id = vcotbl->type;
487 487
488 return 0; 488 return 0;
@@ -491,7 +491,7 @@ out_map_new:
491 ttm_bo_kunmap(&old_map); 491 ttm_bo_kunmap(&old_map);
492out_wait: 492out_wait:
493 ttm_bo_unreserve(bo); 493 ttm_bo_unreserve(bo);
494 vmw_dmabuf_unreference(&buf); 494 vmw_bo_unreference(&buf);
495 495
496 return ret; 496 return ret;
497} 497}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 09cc721160c4..4f18304226bc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -153,9 +153,9 @@
153static const struct drm_ioctl_desc vmw_ioctls[] = { 153static const struct drm_ioctl_desc vmw_ioctls[] = {
154 VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl, 154 VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
155 DRM_AUTH | DRM_RENDER_ALLOW), 155 DRM_AUTH | DRM_RENDER_ALLOW),
156 VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl, 156 VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_bo_alloc_ioctl,
157 DRM_AUTH | DRM_RENDER_ALLOW), 157 DRM_AUTH | DRM_RENDER_ALLOW),
158 VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl, 158 VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl,
159 DRM_RENDER_ALLOW), 159 DRM_RENDER_ALLOW),
160 VMW_IOCTL_DEF(VMW_CURSOR_BYPASS, 160 VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
161 vmw_kms_cursor_bypass_ioctl, 161 vmw_kms_cursor_bypass_ioctl,
@@ -219,7 +219,7 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
219 vmw_gb_surface_reference_ioctl, 219 vmw_gb_surface_reference_ioctl,
220 DRM_AUTH | DRM_RENDER_ALLOW), 220 DRM_AUTH | DRM_RENDER_ALLOW),
221 VMW_IOCTL_DEF(VMW_SYNCCPU, 221 VMW_IOCTL_DEF(VMW_SYNCCPU,
222 vmw_user_dmabuf_synccpu_ioctl, 222 vmw_user_bo_synccpu_ioctl,
223 DRM_RENDER_ALLOW), 223 DRM_RENDER_ALLOW),
224 VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT, 224 VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
225 vmw_extended_context_define_ioctl, 225 vmw_extended_context_define_ioctl,
@@ -321,7 +321,7 @@ static void vmw_print_capabilities(uint32_t capabilities)
321static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) 321static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
322{ 322{
323 int ret; 323 int ret;
324 struct vmw_dma_buffer *vbo; 324 struct vmw_buffer_object *vbo;
325 struct ttm_bo_kmap_obj map; 325 struct ttm_bo_kmap_obj map;
326 volatile SVGA3dQueryResult *result; 326 volatile SVGA3dQueryResult *result;
327 bool dummy; 327 bool dummy;
@@ -335,9 +335,9 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
335 if (!vbo) 335 if (!vbo)
336 return -ENOMEM; 336 return -ENOMEM;
337 337
338 ret = vmw_dmabuf_init(dev_priv, vbo, PAGE_SIZE, 338 ret = vmw_bo_init(dev_priv, vbo, PAGE_SIZE,
339 &vmw_sys_ne_placement, false, 339 &vmw_sys_ne_placement, false,
340 &vmw_dmabuf_bo_free); 340 &vmw_bo_bo_free);
341 if (unlikely(ret != 0)) 341 if (unlikely(ret != 0))
342 return ret; 342 return ret;
343 343
@@ -358,7 +358,7 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
358 358
359 if (unlikely(ret != 0)) { 359 if (unlikely(ret != 0)) {
360 DRM_ERROR("Dummy query buffer map failed.\n"); 360 DRM_ERROR("Dummy query buffer map failed.\n");
361 vmw_dmabuf_unreference(&vbo); 361 vmw_bo_unreference(&vbo);
362 } else 362 } else
363 dev_priv->dummy_query_bo = vbo; 363 dev_priv->dummy_query_bo = vbo;
364 364
@@ -460,7 +460,7 @@ static void vmw_release_device_early(struct vmw_private *dev_priv)
460 460
461 BUG_ON(dev_priv->pinned_bo != NULL); 461 BUG_ON(dev_priv->pinned_bo != NULL);
462 462
463 vmw_dmabuf_unreference(&dev_priv->dummy_query_bo); 463 vmw_bo_unreference(&dev_priv->dummy_query_bo);
464 if (dev_priv->cman) 464 if (dev_priv->cman)
465 vmw_cmdbuf_remove_pool(dev_priv->cman); 465 vmw_cmdbuf_remove_pool(dev_priv->cman);
466 466
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 5fcbe1620d50..25c2f668ad6c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -86,7 +86,7 @@ struct vmw_fpriv {
86 bool gb_aware; 86 bool gb_aware;
87}; 87};
88 88
89struct vmw_dma_buffer { 89struct vmw_buffer_object {
90 struct ttm_buffer_object base; 90 struct ttm_buffer_object base;
91 struct list_head res_list; 91 struct list_head res_list;
92 s32 pin_count; 92 s32 pin_count;
@@ -120,7 +120,7 @@ struct vmw_resource {
120 unsigned long backup_size; 120 unsigned long backup_size;
121 bool res_dirty; /* Protected by backup buffer reserved */ 121 bool res_dirty; /* Protected by backup buffer reserved */
122 bool backup_dirty; /* Protected by backup buffer reserved */ 122 bool backup_dirty; /* Protected by backup buffer reserved */
123 struct vmw_dma_buffer *backup; 123 struct vmw_buffer_object *backup;
124 unsigned long backup_offset; 124 unsigned long backup_offset;
125 unsigned long pin_count; /* Protected by resource reserved */ 125 unsigned long pin_count; /* Protected by resource reserved */
126 const struct vmw_res_func *func; 126 const struct vmw_res_func *func;
@@ -304,7 +304,7 @@ struct vmw_sw_context{
304 uint32_t cmd_bounce_size; 304 uint32_t cmd_bounce_size;
305 struct list_head resource_list; 305 struct list_head resource_list;
306 struct list_head ctx_resource_list; /* For contexts and cotables */ 306 struct list_head ctx_resource_list; /* For contexts and cotables */
307 struct vmw_dma_buffer *cur_query_bo; 307 struct vmw_buffer_object *cur_query_bo;
308 struct list_head res_relocations; 308 struct list_head res_relocations;
309 uint32_t *buf_start; 309 uint32_t *buf_start;
310 struct vmw_res_cache_entry res_cache[vmw_res_max]; 310 struct vmw_res_cache_entry res_cache[vmw_res_max];
@@ -315,7 +315,7 @@ struct vmw_sw_context{
315 bool staged_bindings_inuse; 315 bool staged_bindings_inuse;
316 struct list_head staged_cmd_res; 316 struct list_head staged_cmd_res;
317 struct vmw_resource_val_node *dx_ctx_node; 317 struct vmw_resource_val_node *dx_ctx_node;
318 struct vmw_dma_buffer *dx_query_mob; 318 struct vmw_buffer_object *dx_query_mob;
319 struct vmw_resource *dx_query_ctx; 319 struct vmw_resource *dx_query_ctx;
320 struct vmw_cmdbuf_res_manager *man; 320 struct vmw_cmdbuf_res_manager *man;
321}; 321};
@@ -513,8 +513,8 @@ struct vmw_private {
513 * are protected by the cmdbuf mutex. 513 * are protected by the cmdbuf mutex.
514 */ 514 */
515 515
516 struct vmw_dma_buffer *dummy_query_bo; 516 struct vmw_buffer_object *dummy_query_bo;
517 struct vmw_dma_buffer *pinned_bo; 517 struct vmw_buffer_object *pinned_bo;
518 uint32_t query_cid; 518 uint32_t query_cid;
519 uint32_t query_cid_valid; 519 uint32_t query_cid_valid;
520 bool dummy_query_bo_pinned; 520 bool dummy_query_bo_pinned;
@@ -623,43 +623,43 @@ extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
623 struct ttm_object_file *tfile, 623 struct ttm_object_file *tfile,
624 uint32_t handle, 624 uint32_t handle,
625 struct vmw_surface **out_surf, 625 struct vmw_surface **out_surf,
626 struct vmw_dma_buffer **out_buf); 626 struct vmw_buffer_object **out_buf);
627extern int vmw_user_resource_lookup_handle( 627extern int vmw_user_resource_lookup_handle(
628 struct vmw_private *dev_priv, 628 struct vmw_private *dev_priv,
629 struct ttm_object_file *tfile, 629 struct ttm_object_file *tfile,
630 uint32_t handle, 630 uint32_t handle,
631 const struct vmw_user_resource_conv *converter, 631 const struct vmw_user_resource_conv *converter,
632 struct vmw_resource **p_res); 632 struct vmw_resource **p_res);
633extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo); 633extern void vmw_bo_bo_free(struct ttm_buffer_object *bo);
634extern int vmw_dmabuf_init(struct vmw_private *dev_priv, 634extern int vmw_bo_init(struct vmw_private *dev_priv,
635 struct vmw_dma_buffer *vmw_bo, 635 struct vmw_buffer_object *vmw_bo,
636 size_t size, struct ttm_placement *placement, 636 size_t size, struct ttm_placement *placement,
637 bool interuptable, 637 bool interuptable,
638 void (*bo_free) (struct ttm_buffer_object *bo)); 638 void (*bo_free)(struct ttm_buffer_object *bo));
639extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo, 639extern int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
640 struct ttm_object_file *tfile); 640 struct ttm_object_file *tfile);
641extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, 641extern int vmw_user_bo_alloc(struct vmw_private *dev_priv,
642 struct ttm_object_file *tfile, 642 struct ttm_object_file *tfile,
643 uint32_t size, 643 uint32_t size,
644 bool shareable, 644 bool shareable,
645 uint32_t *handle, 645 uint32_t *handle,
646 struct vmw_dma_buffer **p_dma_buf, 646 struct vmw_buffer_object **p_dma_buf,
647 struct ttm_base_object **p_base); 647 struct ttm_base_object **p_base);
648extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, 648extern int vmw_user_bo_reference(struct ttm_object_file *tfile,
649 struct vmw_dma_buffer *dma_buf, 649 struct vmw_buffer_object *dma_buf,
650 uint32_t *handle); 650 uint32_t *handle);
651extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, 651extern int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
652 struct drm_file *file_priv); 652 struct drm_file *file_priv);
653extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, 653extern int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
654 struct drm_file *file_priv); 654 struct drm_file *file_priv);
655extern int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data, 655extern int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
656 struct drm_file *file_priv); 656 struct drm_file *file_priv);
657extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo, 657extern uint32_t vmw_bo_validate_node(struct ttm_buffer_object *bo,
658 uint32_t cur_validate_node); 658 uint32_t cur_validate_node);
659extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo); 659extern void vmw_bo_validate_clear(struct ttm_buffer_object *bo);
660extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, 660extern int vmw_user_bo_lookup(struct ttm_object_file *tfile,
661 uint32_t id, struct vmw_dma_buffer **out, 661 uint32_t id, struct vmw_buffer_object **out,
662 struct ttm_base_object **base); 662 struct ttm_base_object **base);
663extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, 663extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
664 struct drm_file *file_priv); 664 struct drm_file *file_priv);
665extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, 665extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
@@ -670,43 +670,43 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
670 struct vmw_resource **out); 670 struct vmw_resource **out);
671extern void vmw_resource_unreserve(struct vmw_resource *res, 671extern void vmw_resource_unreserve(struct vmw_resource *res,
672 bool switch_backup, 672 bool switch_backup,
673 struct vmw_dma_buffer *new_backup, 673 struct vmw_buffer_object *new_backup,
674 unsigned long new_backup_offset); 674 unsigned long new_backup_offset);
675extern void vmw_resource_move_notify(struct ttm_buffer_object *bo, 675extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
676 struct ttm_mem_reg *mem); 676 struct ttm_mem_reg *mem);
677extern void vmw_query_move_notify(struct ttm_buffer_object *bo, 677extern void vmw_query_move_notify(struct ttm_buffer_object *bo,
678 struct ttm_mem_reg *mem); 678 struct ttm_mem_reg *mem);
679extern void vmw_resource_swap_notify(struct ttm_buffer_object *bo); 679extern void vmw_resource_swap_notify(struct ttm_buffer_object *bo);
680extern int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob); 680extern int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob);
681extern void vmw_fence_single_bo(struct ttm_buffer_object *bo, 681extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
682 struct vmw_fence_obj *fence); 682 struct vmw_fence_obj *fence);
683extern void vmw_resource_evict_all(struct vmw_private *dev_priv); 683extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
684 684
685 685
686/** 686/**
687 * DMA buffer helper routines - vmwgfx_dmabuf.c 687 * Buffer object helper functions - vmwgfx_bo.c
688 */ 688 */
689extern int vmw_dmabuf_pin_in_placement(struct vmw_private *vmw_priv, 689extern int vmw_bo_pin_in_placement(struct vmw_private *vmw_priv,
690 struct vmw_dma_buffer *bo, 690 struct vmw_buffer_object *bo,
691 struct ttm_placement *placement, 691 struct ttm_placement *placement,
692 bool interruptible);
693extern int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
694 struct vmw_buffer_object *buf,
695 bool interruptible);
696extern int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
697 struct vmw_buffer_object *buf,
698 bool interruptible);
699extern int vmw_bo_pin_in_start_of_vram(struct vmw_private *vmw_priv,
700 struct vmw_buffer_object *bo,
692 bool interruptible); 701 bool interruptible);
693extern int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv, 702extern int vmw_bo_unpin(struct vmw_private *vmw_priv,
694 struct vmw_dma_buffer *buf, 703 struct vmw_buffer_object *bo,
695 bool interruptible); 704 bool interruptible);
696extern int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
697 struct vmw_dma_buffer *buf,
698 bool interruptible);
699extern int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *vmw_priv,
700 struct vmw_dma_buffer *bo,
701 bool interruptible);
702extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv,
703 struct vmw_dma_buffer *bo,
704 bool interruptible);
705extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf, 705extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
706 SVGAGuestPtr *ptr); 706 SVGAGuestPtr *ptr);
707extern void vmw_bo_pin_reserved(struct vmw_dma_buffer *bo, bool pin); 707extern void vmw_bo_pin_reserved(struct vmw_buffer_object *bo, bool pin);
708extern void *vmw_dma_buffer_map_and_cache(struct vmw_dma_buffer *vbo); 708extern void *vmw_buffer_object_map_and_cache(struct vmw_buffer_object *vbo);
709extern void vmw_dma_buffer_unmap(struct vmw_dma_buffer *vbo); 709extern void vmw_buffer_object_unmap(struct vmw_buffer_object *vbo);
710 710
711/** 711/**
712 * Misc Ioctl functionality - vmwgfx_ioctl.c 712 * Misc Ioctl functionality - vmwgfx_ioctl.c
@@ -758,7 +758,7 @@ extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
758extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma); 758extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
759 759
760/** 760/**
761 * TTM buffer object driver - vmwgfx_buffer.c 761 * TTM buffer object driver - vmwgfx_ttm_buffer.c
762 */ 762 */
763 763
764extern const size_t vmw_tt_size; 764extern const size_t vmw_tt_size;
@@ -1041,8 +1041,8 @@ vmw_context_binding_state(struct vmw_resource *ctx);
1041extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx, 1041extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
1042 bool readback); 1042 bool readback);
1043extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res, 1043extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
1044 struct vmw_dma_buffer *mob); 1044 struct vmw_buffer_object *mob);
1045extern struct vmw_dma_buffer * 1045extern struct vmw_buffer_object *
1046vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res); 1046vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res);
1047 1047
1048 1048
@@ -1243,9 +1243,9 @@ static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
1243 return srf; 1243 return srf;
1244} 1244}
1245 1245
1246static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf) 1246static inline void vmw_bo_unreference(struct vmw_buffer_object **buf)
1247{ 1247{
1248 struct vmw_dma_buffer *tmp_buf = *buf; 1248 struct vmw_buffer_object *tmp_buf = *buf;
1249 1249
1250 *buf = NULL; 1250 *buf = NULL;
1251 if (tmp_buf != NULL) { 1251 if (tmp_buf != NULL) {
@@ -1255,7 +1255,8 @@ static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
1255 } 1255 }
1256} 1256}
1257 1257
1258static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf) 1258static inline struct vmw_buffer_object *
1259vmw_bo_reference(struct vmw_buffer_object *buf)
1259{ 1260{
1260 if (ttm_bo_reference(&buf->base)) 1261 if (ttm_bo_reference(&buf->base))
1261 return buf; 1262 return buf;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index c9d5cc237124..a8b194655c40 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -92,7 +92,7 @@ struct vmw_resource_val_node {
92 struct list_head head; 92 struct list_head head;
93 struct drm_hash_item hash; 93 struct drm_hash_item hash;
94 struct vmw_resource *res; 94 struct vmw_resource *res;
95 struct vmw_dma_buffer *new_backup; 95 struct vmw_buffer_object *new_backup;
96 struct vmw_ctx_binding_state *staged_bindings; 96 struct vmw_ctx_binding_state *staged_bindings;
97 unsigned long new_backup_offset; 97 unsigned long new_backup_offset;
98 u32 first_usage : 1; 98 u32 first_usage : 1;
@@ -126,9 +126,9 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
126static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, 126static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
127 struct vmw_sw_context *sw_context, 127 struct vmw_sw_context *sw_context,
128 SVGAMobId *id, 128 SVGAMobId *id,
129 struct vmw_dma_buffer **vmw_bo_p); 129 struct vmw_buffer_object **vmw_bo_p);
130static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, 130static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
131 struct vmw_dma_buffer *vbo, 131 struct vmw_buffer_object *vbo,
132 bool validate_as_mob, 132 bool validate_as_mob,
133 uint32_t *p_val_node); 133 uint32_t *p_val_node);
134/** 134/**
@@ -185,7 +185,7 @@ static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
185 } 185 }
186 vmw_resource_unreserve(res, switch_backup, val->new_backup, 186 vmw_resource_unreserve(res, switch_backup, val->new_backup,
187 val->new_backup_offset); 187 val->new_backup_offset);
188 vmw_dmabuf_unreference(&val->new_backup); 188 vmw_bo_unreference(&val->new_backup);
189 } 189 }
190} 190}
191 191
@@ -423,7 +423,7 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
423 } 423 }
424 424
425 if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) { 425 if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
426 struct vmw_dma_buffer *dx_query_mob; 426 struct vmw_buffer_object *dx_query_mob;
427 427
428 dx_query_mob = vmw_context_get_dx_query_mob(ctx); 428 dx_query_mob = vmw_context_get_dx_query_mob(ctx);
429 if (dx_query_mob) 429 if (dx_query_mob)
@@ -544,7 +544,7 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
544 * submission is reached. 544 * submission is reached.
545 */ 545 */
546static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, 546static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
547 struct vmw_dma_buffer *vbo, 547 struct vmw_buffer_object *vbo,
548 bool validate_as_mob, 548 bool validate_as_mob,
549 uint32_t *p_val_node) 549 uint32_t *p_val_node)
550{ 550{
@@ -616,7 +616,7 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
616 return ret; 616 return ret;
617 617
618 if (res->backup) { 618 if (res->backup) {
619 struct vmw_dma_buffer *vbo = res->backup; 619 struct vmw_buffer_object *vbo = res->backup;
620 620
621 ret = vmw_bo_to_validate_list 621 ret = vmw_bo_to_validate_list
622 (sw_context, vbo, 622 (sw_context, vbo,
@@ -628,7 +628,7 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
628 } 628 }
629 629
630 if (sw_context->dx_query_mob) { 630 if (sw_context->dx_query_mob) {
631 struct vmw_dma_buffer *expected_dx_query_mob; 631 struct vmw_buffer_object *expected_dx_query_mob;
632 632
633 expected_dx_query_mob = 633 expected_dx_query_mob =
634 vmw_context_get_dx_query_mob(sw_context->dx_query_ctx); 634 vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
@@ -657,7 +657,7 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
657 657
658 list_for_each_entry(val, &sw_context->resource_list, head) { 658 list_for_each_entry(val, &sw_context->resource_list, head) {
659 struct vmw_resource *res = val->res; 659 struct vmw_resource *res = val->res;
660 struct vmw_dma_buffer *backup = res->backup; 660 struct vmw_buffer_object *backup = res->backup;
661 661
662 ret = vmw_resource_validate(res); 662 ret = vmw_resource_validate(res);
663 if (unlikely(ret != 0)) { 663 if (unlikely(ret != 0)) {
@@ -668,7 +668,7 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
668 668
669 /* Check if the resource switched backup buffer */ 669 /* Check if the resource switched backup buffer */
670 if (backup && res->backup && (backup != res->backup)) { 670 if (backup && res->backup && (backup != res->backup)) {
671 struct vmw_dma_buffer *vbo = res->backup; 671 struct vmw_buffer_object *vbo = res->backup;
672 672
673 ret = vmw_bo_to_validate_list 673 ret = vmw_bo_to_validate_list
674 (sw_context, vbo, 674 (sw_context, vbo,
@@ -821,7 +821,7 @@ out_no_reloc:
821static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res) 821static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
822{ 822{
823 struct vmw_private *dev_priv = ctx_res->dev_priv; 823 struct vmw_private *dev_priv = ctx_res->dev_priv;
824 struct vmw_dma_buffer *dx_query_mob; 824 struct vmw_buffer_object *dx_query_mob;
825 struct { 825 struct {
826 SVGA3dCmdHeader header; 826 SVGA3dCmdHeader header;
827 SVGA3dCmdDXBindAllQuery body; 827 SVGA3dCmdDXBindAllQuery body;
@@ -1152,7 +1152,7 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
1152 * command batch. 1152 * command batch.
1153 */ 1153 */
1154static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, 1154static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
1155 struct vmw_dma_buffer *new_query_bo, 1155 struct vmw_buffer_object *new_query_bo,
1156 struct vmw_sw_context *sw_context) 1156 struct vmw_sw_context *sw_context)
1157{ 1157{
1158 struct vmw_res_cache_entry *ctx_entry = 1158 struct vmw_res_cache_entry *ctx_entry =
@@ -1234,7 +1234,7 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1234 if (dev_priv->pinned_bo != sw_context->cur_query_bo) { 1234 if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1235 if (dev_priv->pinned_bo) { 1235 if (dev_priv->pinned_bo) {
1236 vmw_bo_pin_reserved(dev_priv->pinned_bo, false); 1236 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1237 vmw_dmabuf_unreference(&dev_priv->pinned_bo); 1237 vmw_bo_unreference(&dev_priv->pinned_bo);
1238 } 1238 }
1239 1239
1240 if (!sw_context->needs_post_query_barrier) { 1240 if (!sw_context->needs_post_query_barrier) {
@@ -1256,7 +1256,7 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1256 dev_priv->query_cid = sw_context->last_query_ctx->id; 1256 dev_priv->query_cid = sw_context->last_query_ctx->id;
1257 dev_priv->query_cid_valid = true; 1257 dev_priv->query_cid_valid = true;
1258 dev_priv->pinned_bo = 1258 dev_priv->pinned_bo =
1259 vmw_dmabuf_reference(sw_context->cur_query_bo); 1259 vmw_bo_reference(sw_context->cur_query_bo);
1260 } 1260 }
1261 } 1261 }
1262} 1262}
@@ -1282,15 +1282,14 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1282static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, 1282static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1283 struct vmw_sw_context *sw_context, 1283 struct vmw_sw_context *sw_context,
1284 SVGAMobId *id, 1284 SVGAMobId *id,
1285 struct vmw_dma_buffer **vmw_bo_p) 1285 struct vmw_buffer_object **vmw_bo_p)
1286{ 1286{
1287 struct vmw_dma_buffer *vmw_bo = NULL; 1287 struct vmw_buffer_object *vmw_bo = NULL;
1288 uint32_t handle = *id; 1288 uint32_t handle = *id;
1289 struct vmw_relocation *reloc; 1289 struct vmw_relocation *reloc;
1290 int ret; 1290 int ret;
1291 1291
1292 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo, 1292 ret = vmw_user_bo_lookup(sw_context->fp->tfile, handle, &vmw_bo, NULL);
1293 NULL);
1294 if (unlikely(ret != 0)) { 1293 if (unlikely(ret != 0)) {
1295 DRM_ERROR("Could not find or use MOB buffer.\n"); 1294 DRM_ERROR("Could not find or use MOB buffer.\n");
1296 ret = -EINVAL; 1295 ret = -EINVAL;
@@ -1316,7 +1315,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1316 return 0; 1315 return 0;
1317 1316
1318out_no_reloc: 1317out_no_reloc:
1319 vmw_dmabuf_unreference(&vmw_bo); 1318 vmw_bo_unreference(&vmw_bo);
1320 *vmw_bo_p = NULL; 1319 *vmw_bo_p = NULL;
1321 return ret; 1320 return ret;
1322} 1321}
@@ -1343,15 +1342,14 @@ out_no_reloc:
1343static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, 1342static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1344 struct vmw_sw_context *sw_context, 1343 struct vmw_sw_context *sw_context,
1345 SVGAGuestPtr *ptr, 1344 SVGAGuestPtr *ptr,
1346 struct vmw_dma_buffer **vmw_bo_p) 1345 struct vmw_buffer_object **vmw_bo_p)
1347{ 1346{
1348 struct vmw_dma_buffer *vmw_bo = NULL; 1347 struct vmw_buffer_object *vmw_bo = NULL;
1349 uint32_t handle = ptr->gmrId; 1348 uint32_t handle = ptr->gmrId;
1350 struct vmw_relocation *reloc; 1349 struct vmw_relocation *reloc;
1351 int ret; 1350 int ret;
1352 1351
1353 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo, 1352 ret = vmw_user_bo_lookup(sw_context->fp->tfile, handle, &vmw_bo, NULL);
1354 NULL);
1355 if (unlikely(ret != 0)) { 1353 if (unlikely(ret != 0)) {
1356 DRM_ERROR("Could not find or use GMR region.\n"); 1354 DRM_ERROR("Could not find or use GMR region.\n");
1357 ret = -EINVAL; 1355 ret = -EINVAL;
@@ -1376,7 +1374,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1376 return 0; 1374 return 0;
1377 1375
1378out_no_reloc: 1376out_no_reloc:
1379 vmw_dmabuf_unreference(&vmw_bo); 1377 vmw_bo_unreference(&vmw_bo);
1380 *vmw_bo_p = NULL; 1378 *vmw_bo_p = NULL;
1381 return ret; 1379 return ret;
1382} 1380}
@@ -1447,7 +1445,7 @@ static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1447 SVGA3dCmdDXBindQuery q; 1445 SVGA3dCmdDXBindQuery q;
1448 } *cmd; 1446 } *cmd;
1449 1447
1450 struct vmw_dma_buffer *vmw_bo; 1448 struct vmw_buffer_object *vmw_bo;
1451 int ret; 1449 int ret;
1452 1450
1453 1451
@@ -1466,7 +1464,7 @@ static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1466 sw_context->dx_query_mob = vmw_bo; 1464 sw_context->dx_query_mob = vmw_bo;
1467 sw_context->dx_query_ctx = sw_context->dx_ctx_node->res; 1465 sw_context->dx_query_ctx = sw_context->dx_ctx_node->res;
1468 1466
1469 vmw_dmabuf_unreference(&vmw_bo); 1467 vmw_bo_unreference(&vmw_bo);
1470 1468
1471 return ret; 1469 return ret;
1472} 1470}
@@ -1549,7 +1547,7 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1549 struct vmw_sw_context *sw_context, 1547 struct vmw_sw_context *sw_context,
1550 SVGA3dCmdHeader *header) 1548 SVGA3dCmdHeader *header)
1551{ 1549{
1552 struct vmw_dma_buffer *vmw_bo; 1550 struct vmw_buffer_object *vmw_bo;
1553 struct vmw_query_cmd { 1551 struct vmw_query_cmd {
1554 SVGA3dCmdHeader header; 1552 SVGA3dCmdHeader header;
1555 SVGA3dCmdEndGBQuery q; 1553 SVGA3dCmdEndGBQuery q;
@@ -1569,7 +1567,7 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1569 1567
1570 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context); 1568 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1571 1569
1572 vmw_dmabuf_unreference(&vmw_bo); 1570 vmw_bo_unreference(&vmw_bo);
1573 return ret; 1571 return ret;
1574} 1572}
1575 1573
@@ -1584,7 +1582,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1584 struct vmw_sw_context *sw_context, 1582 struct vmw_sw_context *sw_context,
1585 SVGA3dCmdHeader *header) 1583 SVGA3dCmdHeader *header)
1586{ 1584{
1587 struct vmw_dma_buffer *vmw_bo; 1585 struct vmw_buffer_object *vmw_bo;
1588 struct vmw_query_cmd { 1586 struct vmw_query_cmd {
1589 SVGA3dCmdHeader header; 1587 SVGA3dCmdHeader header;
1590 SVGA3dCmdEndQuery q; 1588 SVGA3dCmdEndQuery q;
@@ -1623,7 +1621,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1623 1621
1624 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context); 1622 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1625 1623
1626 vmw_dmabuf_unreference(&vmw_bo); 1624 vmw_bo_unreference(&vmw_bo);
1627 return ret; 1625 return ret;
1628} 1626}
1629 1627
@@ -1638,7 +1636,7 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1638 struct vmw_sw_context *sw_context, 1636 struct vmw_sw_context *sw_context,
1639 SVGA3dCmdHeader *header) 1637 SVGA3dCmdHeader *header)
1640{ 1638{
1641 struct vmw_dma_buffer *vmw_bo; 1639 struct vmw_buffer_object *vmw_bo;
1642 struct vmw_query_cmd { 1640 struct vmw_query_cmd {
1643 SVGA3dCmdHeader header; 1641 SVGA3dCmdHeader header;
1644 SVGA3dCmdWaitForGBQuery q; 1642 SVGA3dCmdWaitForGBQuery q;
@@ -1656,7 +1654,7 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1656 if (unlikely(ret != 0)) 1654 if (unlikely(ret != 0))
1657 return ret; 1655 return ret;
1658 1656
1659 vmw_dmabuf_unreference(&vmw_bo); 1657 vmw_bo_unreference(&vmw_bo);
1660 return 0; 1658 return 0;
1661} 1659}
1662 1660
@@ -1671,7 +1669,7 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1671 struct vmw_sw_context *sw_context, 1669 struct vmw_sw_context *sw_context,
1672 SVGA3dCmdHeader *header) 1670 SVGA3dCmdHeader *header)
1673{ 1671{
1674 struct vmw_dma_buffer *vmw_bo; 1672 struct vmw_buffer_object *vmw_bo;
1675 struct vmw_query_cmd { 1673 struct vmw_query_cmd {
1676 SVGA3dCmdHeader header; 1674 SVGA3dCmdHeader header;
1677 SVGA3dCmdWaitForQuery q; 1675 SVGA3dCmdWaitForQuery q;
@@ -1708,7 +1706,7 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1708 if (unlikely(ret != 0)) 1706 if (unlikely(ret != 0))
1709 return ret; 1707 return ret;
1710 1708
1711 vmw_dmabuf_unreference(&vmw_bo); 1709 vmw_bo_unreference(&vmw_bo);
1712 return 0; 1710 return 0;
1713} 1711}
1714 1712
@@ -1716,7 +1714,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
1716 struct vmw_sw_context *sw_context, 1714 struct vmw_sw_context *sw_context,
1717 SVGA3dCmdHeader *header) 1715 SVGA3dCmdHeader *header)
1718{ 1716{
1719 struct vmw_dma_buffer *vmw_bo = NULL; 1717 struct vmw_buffer_object *vmw_bo = NULL;
1720 struct vmw_surface *srf = NULL; 1718 struct vmw_surface *srf = NULL;
1721 struct vmw_dma_cmd { 1719 struct vmw_dma_cmd {
1722 SVGA3dCmdHeader header; 1720 SVGA3dCmdHeader header;
@@ -1768,7 +1766,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
1768 header); 1766 header);
1769 1767
1770out_no_surface: 1768out_no_surface:
1771 vmw_dmabuf_unreference(&vmw_bo); 1769 vmw_bo_unreference(&vmw_bo);
1772 return ret; 1770 return ret;
1773} 1771}
1774 1772
@@ -1887,7 +1885,7 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1887 struct vmw_sw_context *sw_context, 1885 struct vmw_sw_context *sw_context,
1888 void *buf) 1886 void *buf)
1889{ 1887{
1890 struct vmw_dma_buffer *vmw_bo; 1888 struct vmw_buffer_object *vmw_bo;
1891 int ret; 1889 int ret;
1892 1890
1893 struct { 1891 struct {
@@ -1901,7 +1899,7 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1901 if (unlikely(ret != 0)) 1899 if (unlikely(ret != 0))
1902 return ret; 1900 return ret;
1903 1901
1904 vmw_dmabuf_unreference(&vmw_bo); 1902 vmw_bo_unreference(&vmw_bo);
1905 1903
1906 return ret; 1904 return ret;
1907} 1905}
@@ -1928,7 +1926,7 @@ static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1928 uint32_t *buf_id, 1926 uint32_t *buf_id,
1929 unsigned long backup_offset) 1927 unsigned long backup_offset)
1930{ 1928{
1931 struct vmw_dma_buffer *dma_buf; 1929 struct vmw_buffer_object *dma_buf;
1932 int ret; 1930 int ret;
1933 1931
1934 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf); 1932 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
@@ -1939,7 +1937,7 @@ static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1939 if (val_node->first_usage) 1937 if (val_node->first_usage)
1940 val_node->no_buffer_needed = true; 1938 val_node->no_buffer_needed = true;
1941 1939
1942 vmw_dmabuf_unreference(&val_node->new_backup); 1940 vmw_bo_unreference(&val_node->new_backup);
1943 val_node->new_backup = dma_buf; 1941 val_node->new_backup = dma_buf;
1944 val_node->new_backup_offset = backup_offset; 1942 val_node->new_backup_offset = backup_offset;
1945 1943
@@ -3701,8 +3699,8 @@ int vmw_validate_single_buffer(struct vmw_private *dev_priv,
3701 bool interruptible, 3699 bool interruptible,
3702 bool validate_as_mob) 3700 bool validate_as_mob)
3703{ 3701{
3704 struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer, 3702 struct vmw_buffer_object *vbo =
3705 base); 3703 container_of(bo, struct vmw_buffer_object, base);
3706 struct ttm_operation_ctx ctx = { interruptible, true }; 3704 struct ttm_operation_ctx ctx = { interruptible, true };
3707 int ret; 3705 int ret;
3708 3706
@@ -4423,7 +4421,7 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4423 4421
4424 ttm_bo_unref(&query_val.bo); 4422 ttm_bo_unref(&query_val.bo);
4425 ttm_bo_unref(&pinned_val.bo); 4423 ttm_bo_unref(&pinned_val.bo);
4426 vmw_dmabuf_unreference(&dev_priv->pinned_bo); 4424 vmw_bo_unreference(&dev_priv->pinned_bo);
4427out_unlock: 4425out_unlock:
4428 return; 4426 return;
4429 4427
@@ -4432,7 +4430,7 @@ out_no_emit:
4432out_no_reserve: 4430out_no_reserve:
4433 ttm_bo_unref(&query_val.bo); 4431 ttm_bo_unref(&query_val.bo);
4434 ttm_bo_unref(&pinned_val.bo); 4432 ttm_bo_unref(&pinned_val.bo);
4435 vmw_dmabuf_unreference(&dev_priv->pinned_bo); 4433 vmw_bo_unreference(&dev_priv->pinned_bo);
4436} 4434}
4437 4435
4438/** 4436/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 9b7e0aca5f84..dcde4985c574 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -42,7 +42,7 @@ struct vmw_fb_par {
42 void *vmalloc; 42 void *vmalloc;
43 43
44 struct mutex bo_mutex; 44 struct mutex bo_mutex;
45 struct vmw_dma_buffer *vmw_bo; 45 struct vmw_buffer_object *vmw_bo;
46 unsigned bo_size; 46 unsigned bo_size;
47 struct drm_framebuffer *set_fb; 47 struct drm_framebuffer *set_fb;
48 struct drm_display_mode *set_mode; 48 struct drm_display_mode *set_mode;
@@ -184,7 +184,7 @@ static void vmw_fb_dirty_flush(struct work_struct *work)
184 struct drm_clip_rect clip; 184 struct drm_clip_rect clip;
185 struct drm_framebuffer *cur_fb; 185 struct drm_framebuffer *cur_fb;
186 u8 *src_ptr, *dst_ptr; 186 u8 *src_ptr, *dst_ptr;
187 struct vmw_dma_buffer *vbo = par->vmw_bo; 187 struct vmw_buffer_object *vbo = par->vmw_bo;
188 void *virtual; 188 void *virtual;
189 189
190 if (!READ_ONCE(par->dirty.active)) 190 if (!READ_ONCE(par->dirty.active))
@@ -197,7 +197,7 @@ static void vmw_fb_dirty_flush(struct work_struct *work)
197 197
198 (void) ttm_read_lock(&vmw_priv->reservation_sem, false); 198 (void) ttm_read_lock(&vmw_priv->reservation_sem, false);
199 (void) ttm_bo_reserve(&vbo->base, false, false, NULL); 199 (void) ttm_bo_reserve(&vbo->base, false, false, NULL);
200 virtual = vmw_dma_buffer_map_and_cache(vbo); 200 virtual = vmw_buffer_object_map_and_cache(vbo);
201 if (!virtual) 201 if (!virtual)
202 goto out_unreserve; 202 goto out_unreserve;
203 203
@@ -391,9 +391,9 @@ static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
391 */ 391 */
392 392
393static int vmw_fb_create_bo(struct vmw_private *vmw_priv, 393static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
394 size_t size, struct vmw_dma_buffer **out) 394 size_t size, struct vmw_buffer_object **out)
395{ 395{
396 struct vmw_dma_buffer *vmw_bo; 396 struct vmw_buffer_object *vmw_bo;
397 int ret; 397 int ret;
398 398
399 (void) ttm_write_lock(&vmw_priv->reservation_sem, false); 399 (void) ttm_write_lock(&vmw_priv->reservation_sem, false);
@@ -404,10 +404,10 @@ static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
404 goto err_unlock; 404 goto err_unlock;
405 } 405 }
406 406
407 ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size, 407 ret = vmw_bo_init(vmw_priv, vmw_bo, size,
408 &vmw_sys_placement, 408 &vmw_sys_placement,
409 false, 409 false,
410 &vmw_dmabuf_bo_free); 410 &vmw_bo_bo_free);
411 if (unlikely(ret != 0)) 411 if (unlikely(ret != 0))
412 goto err_unlock; /* init frees the buffer on failure */ 412 goto err_unlock; /* init frees the buffer on failure */
413 413
@@ -491,7 +491,7 @@ static int vmw_fb_kms_detach(struct vmw_fb_par *par,
491 } 491 }
492 492
493 if (par->vmw_bo && detach_bo && unref_bo) 493 if (par->vmw_bo && detach_bo && unref_bo)
494 vmw_dmabuf_unreference(&par->vmw_bo); 494 vmw_bo_unreference(&par->vmw_bo);
495 495
496 return 0; 496 return 0;
497} 497}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index c5e8eae0dbe2..5e0c8f775c92 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -377,8 +377,8 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
377 } 377 }
378 378
379 vfb = vmw_framebuffer_to_vfb(fb); 379 vfb = vmw_framebuffer_to_vfb(fb);
380 if (!vfb->dmabuf) { 380 if (!vfb->bo) {
381 DRM_ERROR("Framebuffer not dmabuf backed.\n"); 381 DRM_ERROR("Framebuffer not buffer backed.\n");
382 ret = -EINVAL; 382 ret = -EINVAL;
383 goto out_no_ttm_lock; 383 goto out_no_ttm_lock;
384 } 384 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index ef96ba7432ad..7a32be0cef14 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -85,10 +85,10 @@ static int vmw_cursor_update_image(struct vmw_private *dev_priv,
85 return 0; 85 return 0;
86} 86}
87 87
88static int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv, 88static int vmw_cursor_update_bo(struct vmw_private *dev_priv,
89 struct vmw_dma_buffer *dmabuf, 89 struct vmw_buffer_object *bo,
90 u32 width, u32 height, 90 u32 width, u32 height,
91 u32 hotspotX, u32 hotspotY) 91 u32 hotspotX, u32 hotspotY)
92{ 92{
93 struct ttm_bo_kmap_obj map; 93 struct ttm_bo_kmap_obj map;
94 unsigned long kmap_offset; 94 unsigned long kmap_offset;
@@ -100,13 +100,13 @@ static int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
100 kmap_offset = 0; 100 kmap_offset = 0;
101 kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT; 101 kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
102 102
103 ret = ttm_bo_reserve(&dmabuf->base, true, false, NULL); 103 ret = ttm_bo_reserve(&bo->base, true, false, NULL);
104 if (unlikely(ret != 0)) { 104 if (unlikely(ret != 0)) {
105 DRM_ERROR("reserve failed\n"); 105 DRM_ERROR("reserve failed\n");
106 return -EINVAL; 106 return -EINVAL;
107 } 107 }
108 108
109 ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map); 109 ret = ttm_bo_kmap(&bo->base, kmap_offset, kmap_num, &map);
110 if (unlikely(ret != 0)) 110 if (unlikely(ret != 0))
111 goto err_unreserve; 111 goto err_unreserve;
112 112
@@ -116,7 +116,7 @@ static int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
116 116
117 ttm_bo_kunmap(&map); 117 ttm_bo_kunmap(&map);
118err_unreserve: 118err_unreserve:
119 ttm_bo_unreserve(&dmabuf->base); 119 ttm_bo_unreserve(&bo->base);
120 120
121 return ret; 121 return ret;
122} 122}
@@ -352,13 +352,13 @@ vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
352 if (vps->surf) 352 if (vps->surf)
353 vmw_surface_unreference(&vps->surf); 353 vmw_surface_unreference(&vps->surf);
354 354
355 if (vps->dmabuf) 355 if (vps->bo)
356 vmw_dmabuf_unreference(&vps->dmabuf); 356 vmw_bo_unreference(&vps->bo);
357 357
358 if (fb) { 358 if (fb) {
359 if (vmw_framebuffer_to_vfb(fb)->dmabuf) { 359 if (vmw_framebuffer_to_vfb(fb)->bo) {
360 vps->dmabuf = vmw_framebuffer_to_vfbd(fb)->buffer; 360 vps->bo = vmw_framebuffer_to_vfbd(fb)->buffer;
361 vmw_dmabuf_reference(vps->dmabuf); 361 vmw_bo_reference(vps->bo);
362 } else { 362 } else {
363 vps->surf = vmw_framebuffer_to_vfbs(fb)->surface; 363 vps->surf = vmw_framebuffer_to_vfbs(fb)->surface;
364 vmw_surface_reference(vps->surf); 364 vmw_surface_reference(vps->surf);
@@ -390,7 +390,7 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
390 } 390 }
391 391
392 du->cursor_surface = vps->surf; 392 du->cursor_surface = vps->surf;
393 du->cursor_dmabuf = vps->dmabuf; 393 du->cursor_bo = vps->bo;
394 394
395 if (vps->surf) { 395 if (vps->surf) {
396 du->cursor_age = du->cursor_surface->snooper.age; 396 du->cursor_age = du->cursor_surface->snooper.age;
@@ -399,11 +399,11 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
399 vps->surf->snooper.image, 399 vps->surf->snooper.image,
400 64, 64, hotspot_x, 400 64, 64, hotspot_x,
401 hotspot_y); 401 hotspot_y);
402 } else if (vps->dmabuf) { 402 } else if (vps->bo) {
403 ret = vmw_cursor_update_dmabuf(dev_priv, vps->dmabuf, 403 ret = vmw_cursor_update_bo(dev_priv, vps->bo,
404 plane->state->crtc_w, 404 plane->state->crtc_w,
405 plane->state->crtc_h, 405 plane->state->crtc_h,
406 hotspot_x, hotspot_y); 406 hotspot_x, hotspot_y);
407 } else { 407 } else {
408 vmw_cursor_update_position(dev_priv, false, 0, 0); 408 vmw_cursor_update_position(dev_priv, false, 0, 0);
409 return; 409 return;
@@ -519,7 +519,7 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
519 ret = -EINVAL; 519 ret = -EINVAL;
520 } 520 }
521 521
522 if (!vmw_framebuffer_to_vfb(fb)->dmabuf) 522 if (!vmw_framebuffer_to_vfb(fb)->bo)
523 surface = vmw_framebuffer_to_vfbs(fb)->surface; 523 surface = vmw_framebuffer_to_vfbs(fb)->surface;
524 524
525 if (surface && !surface->snooper.image) { 525 if (surface && !surface->snooper.image) {
@@ -687,8 +687,8 @@ vmw_du_plane_duplicate_state(struct drm_plane *plane)
687 if (vps->surf) 687 if (vps->surf)
688 (void) vmw_surface_reference(vps->surf); 688 (void) vmw_surface_reference(vps->surf);
689 689
690 if (vps->dmabuf) 690 if (vps->bo)
691 (void) vmw_dmabuf_reference(vps->dmabuf); 691 (void) vmw_bo_reference(vps->bo);
692 692
693 state = &vps->base; 693 state = &vps->base;
694 694
@@ -745,8 +745,8 @@ vmw_du_plane_destroy_state(struct drm_plane *plane,
745 if (vps->surf) 745 if (vps->surf)
746 vmw_surface_unreference(&vps->surf); 746 vmw_surface_unreference(&vps->surf);
747 747
748 if (vps->dmabuf) 748 if (vps->bo)
749 vmw_dmabuf_unreference(&vps->dmabuf); 749 vmw_bo_unreference(&vps->bo);
750 750
751 drm_atomic_helper_plane_destroy_state(plane, state); 751 drm_atomic_helper_plane_destroy_state(plane, state);
752} 752}
@@ -902,12 +902,12 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
902 902
903/** 903/**
904 * vmw_kms_readback - Perform a readback from the screen system to 904 * vmw_kms_readback - Perform a readback from the screen system to
905 * a dma-buffer backed framebuffer. 905 * a buffer-object backed framebuffer.
906 * 906 *
907 * @dev_priv: Pointer to the device private structure. 907 * @dev_priv: Pointer to the device private structure.
908 * @file_priv: Pointer to a struct drm_file identifying the caller. 908 * @file_priv: Pointer to a struct drm_file identifying the caller.
909 * Must be set to NULL if @user_fence_rep is NULL. 909 * Must be set to NULL if @user_fence_rep is NULL.
910 * @vfb: Pointer to the dma-buffer backed framebuffer. 910 * @vfb: Pointer to the buffer-object backed framebuffer.
911 * @user_fence_rep: User-space provided structure for fence information. 911 * @user_fence_rep: User-space provided structure for fence information.
912 * Must be set to non-NULL if @file_priv is non-NULL. 912 * Must be set to non-NULL if @file_priv is non-NULL.
913 * @vclips: Array of clip rects. 913 * @vclips: Array of clip rects.
@@ -951,7 +951,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
951 struct vmw_framebuffer **out, 951 struct vmw_framebuffer **out,
952 const struct drm_mode_fb_cmd2 952 const struct drm_mode_fb_cmd2
953 *mode_cmd, 953 *mode_cmd,
954 bool is_dmabuf_proxy) 954 bool is_bo_proxy)
955 955
956{ 956{
957 struct drm_device *dev = dev_priv->dev; 957 struct drm_device *dev = dev_priv->dev;
@@ -1019,7 +1019,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
1019 drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd); 1019 drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
1020 vfbs->surface = vmw_surface_reference(surface); 1020 vfbs->surface = vmw_surface_reference(surface);
1021 vfbs->base.user_handle = mode_cmd->handles[0]; 1021 vfbs->base.user_handle = mode_cmd->handles[0];
1022 vfbs->is_dmabuf_proxy = is_dmabuf_proxy; 1022 vfbs->is_bo_proxy = is_bo_proxy;
1023 1023
1024 *out = &vfbs->base; 1024 *out = &vfbs->base;
1025 1025
@@ -1038,30 +1038,30 @@ out_err1:
1038} 1038}
1039 1039
1040/* 1040/*
1041 * Dmabuf framebuffer code 1041 * Buffer-object framebuffer code
1042 */ 1042 */
1043 1043
1044static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer) 1044static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
1045{ 1045{
1046 struct vmw_framebuffer_dmabuf *vfbd = 1046 struct vmw_framebuffer_bo *vfbd =
1047 vmw_framebuffer_to_vfbd(framebuffer); 1047 vmw_framebuffer_to_vfbd(framebuffer);
1048 1048
1049 drm_framebuffer_cleanup(framebuffer); 1049 drm_framebuffer_cleanup(framebuffer);
1050 vmw_dmabuf_unreference(&vfbd->buffer); 1050 vmw_bo_unreference(&vfbd->buffer);
1051 if (vfbd->base.user_obj) 1051 if (vfbd->base.user_obj)
1052 ttm_base_object_unref(&vfbd->base.user_obj); 1052 ttm_base_object_unref(&vfbd->base.user_obj);
1053 1053
1054 kfree(vfbd); 1054 kfree(vfbd);
1055} 1055}
1056 1056
1057static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, 1057static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer,
1058 struct drm_file *file_priv, 1058 struct drm_file *file_priv,
1059 unsigned flags, unsigned color, 1059 unsigned int flags, unsigned int color,
1060 struct drm_clip_rect *clips, 1060 struct drm_clip_rect *clips,
1061 unsigned num_clips) 1061 unsigned int num_clips)
1062{ 1062{
1063 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); 1063 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
1064 struct vmw_framebuffer_dmabuf *vfbd = 1064 struct vmw_framebuffer_bo *vfbd =
1065 vmw_framebuffer_to_vfbd(framebuffer); 1065 vmw_framebuffer_to_vfbd(framebuffer);
1066 struct drm_clip_rect norect; 1066 struct drm_clip_rect norect;
1067 int ret, increment = 1; 1067 int ret, increment = 1;
@@ -1092,13 +1092,13 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
1092 true, true, NULL); 1092 true, true, NULL);
1093 break; 1093 break;
1094 case vmw_du_screen_object: 1094 case vmw_du_screen_object:
1095 ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, &vfbd->base, 1095 ret = vmw_kms_sou_do_bo_dirty(dev_priv, &vfbd->base,
1096 clips, NULL, num_clips, 1096 clips, NULL, num_clips,
1097 increment, true, NULL, NULL); 1097 increment, true, NULL, NULL);
1098 break; 1098 break;
1099 case vmw_du_legacy: 1099 case vmw_du_legacy:
1100 ret = vmw_kms_ldu_do_dmabuf_dirty(dev_priv, &vfbd->base, 0, 0, 1100 ret = vmw_kms_ldu_do_bo_dirty(dev_priv, &vfbd->base, 0, 0,
1101 clips, num_clips, increment); 1101 clips, num_clips, increment);
1102 break; 1102 break;
1103 default: 1103 default:
1104 ret = -EINVAL; 1104 ret = -EINVAL;
@@ -1114,23 +1114,23 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
1114 return ret; 1114 return ret;
1115} 1115}
1116 1116
1117static const struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = { 1117static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
1118 .destroy = vmw_framebuffer_dmabuf_destroy, 1118 .destroy = vmw_framebuffer_bo_destroy,
1119 .dirty = vmw_framebuffer_dmabuf_dirty, 1119 .dirty = vmw_framebuffer_bo_dirty,
1120}; 1120};
1121 1121
1122/** 1122/**
1123 * Pin the dmabuffer in a location suitable for access by the 1123 * Pin the bofer in a location suitable for access by the
1124 * display system. 1124 * display system.
1125 */ 1125 */
1126static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb) 1126static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
1127{ 1127{
1128 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); 1128 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
1129 struct vmw_dma_buffer *buf; 1129 struct vmw_buffer_object *buf;
1130 struct ttm_placement *placement; 1130 struct ttm_placement *placement;
1131 int ret; 1131 int ret;
1132 1132
1133 buf = vfb->dmabuf ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer : 1133 buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
1134 vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup; 1134 vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
1135 1135
1136 if (!buf) 1136 if (!buf)
@@ -1139,12 +1139,12 @@ static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
1139 switch (dev_priv->active_display_unit) { 1139 switch (dev_priv->active_display_unit) {
1140 case vmw_du_legacy: 1140 case vmw_du_legacy:
1141 vmw_overlay_pause_all(dev_priv); 1141 vmw_overlay_pause_all(dev_priv);
1142 ret = vmw_dmabuf_pin_in_start_of_vram(dev_priv, buf, false); 1142 ret = vmw_bo_pin_in_start_of_vram(dev_priv, buf, false);
1143 vmw_overlay_resume_all(dev_priv); 1143 vmw_overlay_resume_all(dev_priv);
1144 break; 1144 break;
1145 case vmw_du_screen_object: 1145 case vmw_du_screen_object:
1146 case vmw_du_screen_target: 1146 case vmw_du_screen_target:
1147 if (vfb->dmabuf) { 1147 if (vfb->bo) {
1148 if (dev_priv->capabilities & SVGA_CAP_3D) { 1148 if (dev_priv->capabilities & SVGA_CAP_3D) {
1149 /* 1149 /*
1150 * Use surface DMA to get content to 1150 * Use surface DMA to get content to
@@ -1160,8 +1160,7 @@ static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
1160 placement = &vmw_mob_placement; 1160 placement = &vmw_mob_placement;
1161 } 1161 }
1162 1162
1163 return vmw_dmabuf_pin_in_placement(dev_priv, buf, placement, 1163 return vmw_bo_pin_in_placement(dev_priv, buf, placement, false);
1164 false);
1165 default: 1164 default:
1166 return -EINVAL; 1165 return -EINVAL;
1167 } 1166 }
@@ -1172,36 +1171,36 @@ static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
1172static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb) 1171static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb)
1173{ 1172{
1174 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); 1173 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
1175 struct vmw_dma_buffer *buf; 1174 struct vmw_buffer_object *buf;
1176 1175
1177 buf = vfb->dmabuf ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer : 1176 buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
1178 vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup; 1177 vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
1179 1178
1180 if (WARN_ON(!buf)) 1179 if (WARN_ON(!buf))
1181 return 0; 1180 return 0;
1182 1181
1183 return vmw_dmabuf_unpin(dev_priv, buf, false); 1182 return vmw_bo_unpin(dev_priv, buf, false);
1184} 1183}
1185 1184
1186/** 1185/**
1187 * vmw_create_dmabuf_proxy - create a proxy surface for the DMA buf 1186 * vmw_create_bo_proxy - create a proxy surface for the buffer object
1188 * 1187 *
1189 * @dev: DRM device 1188 * @dev: DRM device
1190 * @mode_cmd: parameters for the new surface 1189 * @mode_cmd: parameters for the new surface
1191 * @dmabuf_mob: MOB backing the DMA buf 1190 * @bo_mob: MOB backing the buffer object
1192 * @srf_out: newly created surface 1191 * @srf_out: newly created surface
1193 * 1192 *
1194 * When the content FB is a DMA buf, we create a surface as a proxy to the 1193 * When the content FB is a buffer object, we create a surface as a proxy to the
1195 * same buffer. This way we can do a surface copy rather than a surface DMA. 1194 * same buffer. This way we can do a surface copy rather than a surface DMA.
1196 * This is a more efficient approach 1195 * This is a more efficient approach
1197 * 1196 *
1198 * RETURNS: 1197 * RETURNS:
1199 * 0 on success, error code otherwise 1198 * 0 on success, error code otherwise
1200 */ 1199 */
1201static int vmw_create_dmabuf_proxy(struct drm_device *dev, 1200static int vmw_create_bo_proxy(struct drm_device *dev,
1202 const struct drm_mode_fb_cmd2 *mode_cmd, 1201 const struct drm_mode_fb_cmd2 *mode_cmd,
1203 struct vmw_dma_buffer *dmabuf_mob, 1202 struct vmw_buffer_object *bo_mob,
1204 struct vmw_surface **srf_out) 1203 struct vmw_surface **srf_out)
1205{ 1204{
1206 uint32_t format; 1205 uint32_t format;
1207 struct drm_vmw_size content_base_size = {0}; 1206 struct drm_vmw_size content_base_size = {0};
@@ -1258,8 +1257,8 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
1258 /* Reserve and switch the backing mob. */ 1257 /* Reserve and switch the backing mob. */
1259 mutex_lock(&res->dev_priv->cmdbuf_mutex); 1258 mutex_lock(&res->dev_priv->cmdbuf_mutex);
1260 (void) vmw_resource_reserve(res, false, true); 1259 (void) vmw_resource_reserve(res, false, true);
1261 vmw_dmabuf_unreference(&res->backup); 1260 vmw_bo_unreference(&res->backup);
1262 res->backup = vmw_dmabuf_reference(dmabuf_mob); 1261 res->backup = vmw_bo_reference(bo_mob);
1263 res->backup_offset = 0; 1262 res->backup_offset = 0;
1264 vmw_resource_unreserve(res, false, NULL, 0); 1263 vmw_resource_unreserve(res, false, NULL, 0);
1265 mutex_unlock(&res->dev_priv->cmdbuf_mutex); 1264 mutex_unlock(&res->dev_priv->cmdbuf_mutex);
@@ -1269,21 +1268,21 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
1269 1268
1270 1269
1271 1270
1272static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv, 1271static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
1273 struct vmw_dma_buffer *dmabuf, 1272 struct vmw_buffer_object *bo,
1274 struct vmw_framebuffer **out, 1273 struct vmw_framebuffer **out,
1275 const struct drm_mode_fb_cmd2 1274 const struct drm_mode_fb_cmd2
1276 *mode_cmd) 1275 *mode_cmd)
1277 1276
1278{ 1277{
1279 struct drm_device *dev = dev_priv->dev; 1278 struct drm_device *dev = dev_priv->dev;
1280 struct vmw_framebuffer_dmabuf *vfbd; 1279 struct vmw_framebuffer_bo *vfbd;
1281 unsigned int requested_size; 1280 unsigned int requested_size;
1282 struct drm_format_name_buf format_name; 1281 struct drm_format_name_buf format_name;
1283 int ret; 1282 int ret;
1284 1283
1285 requested_size = mode_cmd->height * mode_cmd->pitches[0]; 1284 requested_size = mode_cmd->height * mode_cmd->pitches[0];
1286 if (unlikely(requested_size > dmabuf->base.num_pages * PAGE_SIZE)) { 1285 if (unlikely(requested_size > bo->base.num_pages * PAGE_SIZE)) {
1287 DRM_ERROR("Screen buffer object size is too small " 1286 DRM_ERROR("Screen buffer object size is too small "
1288 "for requested mode.\n"); 1287 "for requested mode.\n");
1289 return -EINVAL; 1288 return -EINVAL;
@@ -1312,20 +1311,20 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
1312 } 1311 }
1313 1312
1314 drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd); 1313 drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
1315 vfbd->base.dmabuf = true; 1314 vfbd->base.bo = true;
1316 vfbd->buffer = vmw_dmabuf_reference(dmabuf); 1315 vfbd->buffer = vmw_bo_reference(bo);
1317 vfbd->base.user_handle = mode_cmd->handles[0]; 1316 vfbd->base.user_handle = mode_cmd->handles[0];
1318 *out = &vfbd->base; 1317 *out = &vfbd->base;
1319 1318
1320 ret = drm_framebuffer_init(dev, &vfbd->base.base, 1319 ret = drm_framebuffer_init(dev, &vfbd->base.base,
1321 &vmw_framebuffer_dmabuf_funcs); 1320 &vmw_framebuffer_bo_funcs);
1322 if (ret) 1321 if (ret)
1323 goto out_err2; 1322 goto out_err2;
1324 1323
1325 return 0; 1324 return 0;
1326 1325
1327out_err2: 1326out_err2:
1328 vmw_dmabuf_unreference(&dmabuf); 1327 vmw_bo_unreference(&bo);
1329 kfree(vfbd); 1328 kfree(vfbd);
1330out_err1: 1329out_err1:
1331 return ret; 1330 return ret;
@@ -1354,57 +1353,57 @@ vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
1354 * vmw_kms_new_framebuffer - Create a new framebuffer. 1353 * vmw_kms_new_framebuffer - Create a new framebuffer.
1355 * 1354 *
1356 * @dev_priv: Pointer to device private struct. 1355 * @dev_priv: Pointer to device private struct.
1357 * @dmabuf: Pointer to dma buffer to wrap the kms framebuffer around. 1356 * @bo: Pointer to buffer object to wrap the kms framebuffer around.
1358 * Either @dmabuf or @surface must be NULL. 1357 * Either @bo or @surface must be NULL.
1359 * @surface: Pointer to a surface to wrap the kms framebuffer around. 1358 * @surface: Pointer to a surface to wrap the kms framebuffer around.
1360 * Either @dmabuf or @surface must be NULL. 1359 * Either @bo or @surface must be NULL.
1361 * @only_2d: No presents will occur to this dma buffer based framebuffer. This 1360 * @only_2d: No presents will occur to this buffer object based framebuffer.
1362 * Helps the code to do some important optimizations. 1361 * This helps the code to do some important optimizations.
1363 * @mode_cmd: Frame-buffer metadata. 1362 * @mode_cmd: Frame-buffer metadata.
1364 */ 1363 */
1365struct vmw_framebuffer * 1364struct vmw_framebuffer *
1366vmw_kms_new_framebuffer(struct vmw_private *dev_priv, 1365vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
1367 struct vmw_dma_buffer *dmabuf, 1366 struct vmw_buffer_object *bo,
1368 struct vmw_surface *surface, 1367 struct vmw_surface *surface,
1369 bool only_2d, 1368 bool only_2d,
1370 const struct drm_mode_fb_cmd2 *mode_cmd) 1369 const struct drm_mode_fb_cmd2 *mode_cmd)
1371{ 1370{
1372 struct vmw_framebuffer *vfb = NULL; 1371 struct vmw_framebuffer *vfb = NULL;
1373 bool is_dmabuf_proxy = false; 1372 bool is_bo_proxy = false;
1374 int ret; 1373 int ret;
1375 1374
1376 /* 1375 /*
1377 * We cannot use the SurfaceDMA command in an non-accelerated VM, 1376 * We cannot use the SurfaceDMA command in an non-accelerated VM,
1378 * therefore, wrap the DMA buf in a surface so we can use the 1377 * therefore, wrap the buffer object in a surface so we can use the
1379 * SurfaceCopy command. 1378 * SurfaceCopy command.
1380 */ 1379 */
1381 if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height) && 1380 if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height) &&
1382 dmabuf && only_2d && 1381 bo && only_2d &&
1383 mode_cmd->width > 64 && /* Don't create a proxy for cursor */ 1382 mode_cmd->width > 64 && /* Don't create a proxy for cursor */
1384 dev_priv->active_display_unit == vmw_du_screen_target) { 1383 dev_priv->active_display_unit == vmw_du_screen_target) {
1385 ret = vmw_create_dmabuf_proxy(dev_priv->dev, mode_cmd, 1384 ret = vmw_create_bo_proxy(dev_priv->dev, mode_cmd,
1386 dmabuf, &surface); 1385 bo, &surface);
1387 if (ret) 1386 if (ret)
1388 return ERR_PTR(ret); 1387 return ERR_PTR(ret);
1389 1388
1390 is_dmabuf_proxy = true; 1389 is_bo_proxy = true;
1391 } 1390 }
1392 1391
1393 /* Create the new framebuffer depending one what we have */ 1392 /* Create the new framebuffer depending one what we have */
1394 if (surface) { 1393 if (surface) {
1395 ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb, 1394 ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
1396 mode_cmd, 1395 mode_cmd,
1397 is_dmabuf_proxy); 1396 is_bo_proxy);
1398 1397
1399 /* 1398 /*
1400 * vmw_create_dmabuf_proxy() adds a reference that is no longer 1399 * vmw_create_bo_proxy() adds a reference that is no longer
1401 * needed 1400 * needed
1402 */ 1401 */
1403 if (is_dmabuf_proxy) 1402 if (is_bo_proxy)
1404 vmw_surface_unreference(&surface); 1403 vmw_surface_unreference(&surface);
1405 } else if (dmabuf) { 1404 } else if (bo) {
1406 ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, dmabuf, &vfb, 1405 ret = vmw_kms_new_framebuffer_bo(dev_priv, bo, &vfb,
1407 mode_cmd); 1406 mode_cmd);
1408 } else { 1407 } else {
1409 BUG(); 1408 BUG();
1410 } 1409 }
@@ -1430,7 +1429,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1430 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 1429 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1431 struct vmw_framebuffer *vfb = NULL; 1430 struct vmw_framebuffer *vfb = NULL;
1432 struct vmw_surface *surface = NULL; 1431 struct vmw_surface *surface = NULL;
1433 struct vmw_dma_buffer *bo = NULL; 1432 struct vmw_buffer_object *bo = NULL;
1434 struct ttm_base_object *user_obj; 1433 struct ttm_base_object *user_obj;
1435 int ret; 1434 int ret;
1436 1435
@@ -1466,7 +1465,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1466 * End conditioned code. 1465 * End conditioned code.
1467 */ 1466 */
1468 1467
1469 /* returns either a dmabuf or surface */ 1468 /* returns either a bo or surface */
1470 ret = vmw_user_lookup_handle(dev_priv, tfile, 1469 ret = vmw_user_lookup_handle(dev_priv, tfile,
1471 mode_cmd->handles[0], 1470 mode_cmd->handles[0],
1472 &surface, &bo); 1471 &surface, &bo);
@@ -1494,7 +1493,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1494err_out: 1493err_out:
1495 /* vmw_user_lookup_handle takes one ref so does new_fb */ 1494 /* vmw_user_lookup_handle takes one ref so does new_fb */
1496 if (bo) 1495 if (bo)
1497 vmw_dmabuf_unreference(&bo); 1496 vmw_bo_unreference(&bo);
1498 if (surface) 1497 if (surface)
1499 vmw_surface_unreference(&surface); 1498 vmw_surface_unreference(&surface);
1500 1499
@@ -2427,7 +2426,7 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
2427 * interrupted by a signal. 2426 * interrupted by a signal.
2428 */ 2427 */
2429int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv, 2428int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
2430 struct vmw_dma_buffer *buf, 2429 struct vmw_buffer_object *buf,
2431 bool interruptible, 2430 bool interruptible,
2432 bool validate_as_mob, 2431 bool validate_as_mob,
2433 bool for_cpu_blit) 2432 bool for_cpu_blit)
@@ -2459,7 +2458,7 @@ int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
2459 * Helper to be used if an error forces the caller to undo the actions of 2458 * Helper to be used if an error forces the caller to undo the actions of
2460 * vmw_kms_helper_buffer_prepare. 2459 * vmw_kms_helper_buffer_prepare.
2461 */ 2460 */
2462void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf) 2461void vmw_kms_helper_buffer_revert(struct vmw_buffer_object *buf)
2463{ 2462{
2464 if (buf) 2463 if (buf)
2465 ttm_bo_unreserve(&buf->base); 2464 ttm_bo_unreserve(&buf->base);
@@ -2482,7 +2481,7 @@ void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf)
2482 */ 2481 */
2483void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv, 2482void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
2484 struct drm_file *file_priv, 2483 struct drm_file *file_priv,
2485 struct vmw_dma_buffer *buf, 2484 struct vmw_buffer_object *buf,
2486 struct vmw_fence_obj **out_fence, 2485 struct vmw_fence_obj **out_fence,
2487 struct drm_vmw_fence_rep __user * 2486 struct drm_vmw_fence_rep __user *
2488 user_fence_rep) 2487 user_fence_rep)
@@ -2522,7 +2521,7 @@ void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx)
2522 struct vmw_resource *res = ctx->res; 2521 struct vmw_resource *res = ctx->res;
2523 2522
2524 vmw_kms_helper_buffer_revert(ctx->buf); 2523 vmw_kms_helper_buffer_revert(ctx->buf);
2525 vmw_dmabuf_unreference(&ctx->buf); 2524 vmw_bo_unreference(&ctx->buf);
2526 vmw_resource_unreserve(res, false, NULL, 0); 2525 vmw_resource_unreserve(res, false, NULL, 0);
2527 mutex_unlock(&res->dev_priv->cmdbuf_mutex); 2526 mutex_unlock(&res->dev_priv->cmdbuf_mutex);
2528} 2527}
@@ -2567,7 +2566,7 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
2567 if (ret) 2566 if (ret)
2568 goto out_unreserve; 2567 goto out_unreserve;
2569 2568
2570 ctx->buf = vmw_dmabuf_reference(res->backup); 2569 ctx->buf = vmw_bo_reference(res->backup);
2571 } 2570 }
2572 ret = vmw_resource_validate(res); 2571 ret = vmw_resource_validate(res);
2573 if (ret) 2572 if (ret)
@@ -2600,7 +2599,7 @@ void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
2600 vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf, 2599 vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf,
2601 out_fence, NULL); 2600 out_fence, NULL);
2602 2601
2603 vmw_dmabuf_unreference(&ctx->buf); 2602 vmw_bo_unreference(&ctx->buf);
2604 vmw_resource_unreserve(res, false, NULL, 0); 2603 vmw_resource_unreserve(res, false, NULL, 0);
2605 mutex_unlock(&res->dev_priv->cmdbuf_mutex); 2604 mutex_unlock(&res->dev_priv->cmdbuf_mutex);
2606} 2605}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 6b7c012719f1..ff1caed38f94 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -90,7 +90,7 @@ struct vmw_kms_dirty {
90#define vmw_framebuffer_to_vfbs(x) \ 90#define vmw_framebuffer_to_vfbs(x) \
91 container_of(x, struct vmw_framebuffer_surface, base.base) 91 container_of(x, struct vmw_framebuffer_surface, base.base)
92#define vmw_framebuffer_to_vfbd(x) \ 92#define vmw_framebuffer_to_vfbd(x) \
93 container_of(x, struct vmw_framebuffer_dmabuf, base.base) 93 container_of(x, struct vmw_framebuffer_bo, base.base)
94 94
95/** 95/**
96 * Base class for framebuffers 96 * Base class for framebuffers
@@ -102,7 +102,7 @@ struct vmw_framebuffer {
102 struct drm_framebuffer base; 102 struct drm_framebuffer base;
103 int (*pin)(struct vmw_framebuffer *fb); 103 int (*pin)(struct vmw_framebuffer *fb);
104 int (*unpin)(struct vmw_framebuffer *fb); 104 int (*unpin)(struct vmw_framebuffer *fb);
105 bool dmabuf; 105 bool bo;
106 struct ttm_base_object *user_obj; 106 struct ttm_base_object *user_obj;
107 uint32_t user_handle; 107 uint32_t user_handle;
108}; 108};
@@ -117,15 +117,15 @@ struct vmw_clip_rect {
117struct vmw_framebuffer_surface { 117struct vmw_framebuffer_surface {
118 struct vmw_framebuffer base; 118 struct vmw_framebuffer base;
119 struct vmw_surface *surface; 119 struct vmw_surface *surface;
120 struct vmw_dma_buffer *buffer; 120 struct vmw_buffer_object *buffer;
121 struct list_head head; 121 struct list_head head;
122 bool is_dmabuf_proxy; /* true if this is proxy surface for DMA buf */ 122 bool is_bo_proxy; /* true if this is proxy surface for DMA buf */
123}; 123};
124 124
125 125
126struct vmw_framebuffer_dmabuf { 126struct vmw_framebuffer_bo {
127 struct vmw_framebuffer base; 127 struct vmw_framebuffer base;
128 struct vmw_dma_buffer *buffer; 128 struct vmw_buffer_object *buffer;
129}; 129};
130 130
131 131
@@ -161,18 +161,18 @@ struct vmw_crtc_state {
161 * 161 *
162 * @base DRM plane object 162 * @base DRM plane object
163 * @surf Display surface for STDU 163 * @surf Display surface for STDU
164 * @dmabuf display dmabuf for SOU 164 * @bo display bo for SOU
165 * @content_fb_type Used by STDU. 165 * @content_fb_type Used by STDU.
166 * @dmabuf_size Size of the dmabuf, used by Screen Object Display Unit 166 * @bo_size Size of the bo, used by Screen Object Display Unit
167 * @pinned pin count for STDU display surface 167 * @pinned pin count for STDU display surface
168 */ 168 */
169struct vmw_plane_state { 169struct vmw_plane_state {
170 struct drm_plane_state base; 170 struct drm_plane_state base;
171 struct vmw_surface *surf; 171 struct vmw_surface *surf;
172 struct vmw_dma_buffer *dmabuf; 172 struct vmw_buffer_object *bo;
173 173
174 int content_fb_type; 174 int content_fb_type;
175 unsigned long dmabuf_size; 175 unsigned long bo_size;
176 176
177 int pinned; 177 int pinned;
178 178
@@ -209,7 +209,7 @@ struct vmw_display_unit {
209 struct drm_plane cursor; 209 struct drm_plane cursor;
210 210
211 struct vmw_surface *cursor_surface; 211 struct vmw_surface *cursor_surface;
212 struct vmw_dma_buffer *cursor_dmabuf; 212 struct vmw_buffer_object *cursor_bo;
213 size_t cursor_age; 213 size_t cursor_age;
214 214
215 int cursor_x; 215 int cursor_x;
@@ -243,7 +243,7 @@ struct vmw_display_unit {
243 243
244struct vmw_validation_ctx { 244struct vmw_validation_ctx {
245 struct vmw_resource *res; 245 struct vmw_resource *res;
246 struct vmw_dma_buffer *buf; 246 struct vmw_buffer_object *buf;
247}; 247};
248 248
249#define vmw_crtc_to_du(x) \ 249#define vmw_crtc_to_du(x) \
@@ -291,14 +291,14 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
291 struct vmw_kms_dirty *dirty); 291 struct vmw_kms_dirty *dirty);
292 292
293int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv, 293int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
294 struct vmw_dma_buffer *buf, 294 struct vmw_buffer_object *buf,
295 bool interruptible, 295 bool interruptible,
296 bool validate_as_mob, 296 bool validate_as_mob,
297 bool for_cpu_blit); 297 bool for_cpu_blit);
298void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf); 298void vmw_kms_helper_buffer_revert(struct vmw_buffer_object *buf);
299void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv, 299void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
300 struct drm_file *file_priv, 300 struct drm_file *file_priv,
301 struct vmw_dma_buffer *buf, 301 struct vmw_buffer_object *buf,
302 struct vmw_fence_obj **out_fence, 302 struct vmw_fence_obj **out_fence,
303 struct drm_vmw_fence_rep __user * 303 struct drm_vmw_fence_rep __user *
304 user_fence_rep); 304 user_fence_rep);
@@ -316,7 +316,7 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
316 uint32_t num_clips); 316 uint32_t num_clips);
317struct vmw_framebuffer * 317struct vmw_framebuffer *
318vmw_kms_new_framebuffer(struct vmw_private *dev_priv, 318vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
319 struct vmw_dma_buffer *dmabuf, 319 struct vmw_buffer_object *bo,
320 struct vmw_surface *surface, 320 struct vmw_surface *surface,
321 bool only_2d, 321 bool only_2d,
322 const struct drm_mode_fb_cmd2 *mode_cmd); 322 const struct drm_mode_fb_cmd2 *mode_cmd);
@@ -384,11 +384,11 @@ void vmw_du_connector_destroy_state(struct drm_connector *connector,
384 */ 384 */
385int vmw_kms_ldu_init_display(struct vmw_private *dev_priv); 385int vmw_kms_ldu_init_display(struct vmw_private *dev_priv);
386int vmw_kms_ldu_close_display(struct vmw_private *dev_priv); 386int vmw_kms_ldu_close_display(struct vmw_private *dev_priv);
387int vmw_kms_ldu_do_dmabuf_dirty(struct vmw_private *dev_priv, 387int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv,
388 struct vmw_framebuffer *framebuffer, 388 struct vmw_framebuffer *framebuffer,
389 unsigned flags, unsigned color, 389 unsigned int flags, unsigned int color,
390 struct drm_clip_rect *clips, 390 struct drm_clip_rect *clips,
391 unsigned num_clips, int increment); 391 unsigned int num_clips, int increment);
392int vmw_kms_update_proxy(struct vmw_resource *res, 392int vmw_kms_update_proxy(struct vmw_resource *res,
393 const struct drm_clip_rect *clips, 393 const struct drm_clip_rect *clips,
394 unsigned num_clips, 394 unsigned num_clips,
@@ -408,14 +408,14 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
408 unsigned num_clips, int inc, 408 unsigned num_clips, int inc,
409 struct vmw_fence_obj **out_fence, 409 struct vmw_fence_obj **out_fence,
410 struct drm_crtc *crtc); 410 struct drm_crtc *crtc);
411int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv, 411int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv,
412 struct vmw_framebuffer *framebuffer, 412 struct vmw_framebuffer *framebuffer,
413 struct drm_clip_rect *clips, 413 struct drm_clip_rect *clips,
414 struct drm_vmw_rect *vclips, 414 struct drm_vmw_rect *vclips,
415 unsigned num_clips, int increment, 415 unsigned int num_clips, int increment,
416 bool interruptible, 416 bool interruptible,
417 struct vmw_fence_obj **out_fence, 417 struct vmw_fence_obj **out_fence,
418 struct drm_crtc *crtc); 418 struct drm_crtc *crtc);
419int vmw_kms_sou_readback(struct vmw_private *dev_priv, 419int vmw_kms_sou_readback(struct vmw_private *dev_priv,
420 struct drm_file *file_priv, 420 struct drm_file *file_priv,
421 struct vmw_framebuffer *vfb, 421 struct vmw_framebuffer *vfb,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 4a5907e3f560..a2dd9a829219 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -547,11 +547,11 @@ int vmw_kms_ldu_close_display(struct vmw_private *dev_priv)
547} 547}
548 548
549 549
550int vmw_kms_ldu_do_dmabuf_dirty(struct vmw_private *dev_priv, 550int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv,
551 struct vmw_framebuffer *framebuffer, 551 struct vmw_framebuffer *framebuffer,
552 unsigned flags, unsigned color, 552 unsigned int flags, unsigned int color,
553 struct drm_clip_rect *clips, 553 struct drm_clip_rect *clips,
554 unsigned num_clips, int increment) 554 unsigned int num_clips, int increment)
555{ 555{
556 size_t fifo_size; 556 size_t fifo_size;
557 int i; 557 int i;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index 222c9c2123a1..09420ef19ecb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -38,7 +38,7 @@
38#define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE) 38#define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE)
39 39
40struct vmw_stream { 40struct vmw_stream {
41 struct vmw_dma_buffer *buf; 41 struct vmw_buffer_object *buf;
42 bool claimed; 42 bool claimed;
43 bool paused; 43 bool paused;
44 struct drm_vmw_control_stream_arg saved; 44 struct drm_vmw_control_stream_arg saved;
@@ -94,7 +94,7 @@ static inline void fill_flush(struct vmw_escape_video_flush *cmd,
94 * -ERESTARTSYS if interrupted by a signal. 94 * -ERESTARTSYS if interrupted by a signal.
95 */ 95 */
96static int vmw_overlay_send_put(struct vmw_private *dev_priv, 96static int vmw_overlay_send_put(struct vmw_private *dev_priv,
97 struct vmw_dma_buffer *buf, 97 struct vmw_buffer_object *buf,
98 struct drm_vmw_control_stream_arg *arg, 98 struct drm_vmw_control_stream_arg *arg,
99 bool interruptible) 99 bool interruptible)
100{ 100{
@@ -225,16 +225,16 @@ static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
225 * used with GMRs instead of being locked to vram. 225 * used with GMRs instead of being locked to vram.
226 */ 226 */
227static int vmw_overlay_move_buffer(struct vmw_private *dev_priv, 227static int vmw_overlay_move_buffer(struct vmw_private *dev_priv,
228 struct vmw_dma_buffer *buf, 228 struct vmw_buffer_object *buf,
229 bool pin, bool inter) 229 bool pin, bool inter)
230{ 230{
231 if (!pin) 231 if (!pin)
232 return vmw_dmabuf_unpin(dev_priv, buf, inter); 232 return vmw_bo_unpin(dev_priv, buf, inter);
233 233
234 if (dev_priv->active_display_unit == vmw_du_legacy) 234 if (dev_priv->active_display_unit == vmw_du_legacy)
235 return vmw_dmabuf_pin_in_vram(dev_priv, buf, inter); 235 return vmw_bo_pin_in_vram(dev_priv, buf, inter);
236 236
237 return vmw_dmabuf_pin_in_vram_or_gmr(dev_priv, buf, inter); 237 return vmw_bo_pin_in_vram_or_gmr(dev_priv, buf, inter);
238} 238}
239 239
240/** 240/**
@@ -278,7 +278,7 @@ static int vmw_overlay_stop(struct vmw_private *dev_priv,
278 } 278 }
279 279
280 if (!pause) { 280 if (!pause) {
281 vmw_dmabuf_unreference(&stream->buf); 281 vmw_bo_unreference(&stream->buf);
282 stream->paused = false; 282 stream->paused = false;
283 } else { 283 } else {
284 stream->paused = true; 284 stream->paused = true;
@@ -297,7 +297,7 @@ static int vmw_overlay_stop(struct vmw_private *dev_priv,
297 * -ERESTARTSYS if interrupted. 297 * -ERESTARTSYS if interrupted.
298 */ 298 */
299static int vmw_overlay_update_stream(struct vmw_private *dev_priv, 299static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
300 struct vmw_dma_buffer *buf, 300 struct vmw_buffer_object *buf,
301 struct drm_vmw_control_stream_arg *arg, 301 struct drm_vmw_control_stream_arg *arg,
302 bool interruptible) 302 bool interruptible)
303{ 303{
@@ -347,7 +347,7 @@ static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
347 } 347 }
348 348
349 if (stream->buf != buf) 349 if (stream->buf != buf)
350 stream->buf = vmw_dmabuf_reference(buf); 350 stream->buf = vmw_bo_reference(buf);
351 stream->saved = *arg; 351 stream->saved = *arg;
352 /* stream is no longer stopped/paused */ 352 /* stream is no longer stopped/paused */
353 stream->paused = false; 353 stream->paused = false;
@@ -466,7 +466,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
466 struct vmw_overlay *overlay = dev_priv->overlay_priv; 466 struct vmw_overlay *overlay = dev_priv->overlay_priv;
467 struct drm_vmw_control_stream_arg *arg = 467 struct drm_vmw_control_stream_arg *arg =
468 (struct drm_vmw_control_stream_arg *)data; 468 (struct drm_vmw_control_stream_arg *)data;
469 struct vmw_dma_buffer *buf; 469 struct vmw_buffer_object *buf;
470 struct vmw_resource *res; 470 struct vmw_resource *res;
471 int ret; 471 int ret;
472 472
@@ -484,13 +484,13 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
484 goto out_unlock; 484 goto out_unlock;
485 } 485 }
486 486
487 ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf, NULL); 487 ret = vmw_user_bo_lookup(tfile, arg->handle, &buf, NULL);
488 if (ret) 488 if (ret)
489 goto out_unlock; 489 goto out_unlock;
490 490
491 ret = vmw_overlay_update_stream(dev_priv, buf, arg, true); 491 ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
492 492
493 vmw_dmabuf_unreference(&buf); 493 vmw_bo_unreference(&buf);
494 494
495out_unlock: 495out_unlock:
496 mutex_unlock(&overlay->mutex); 496 mutex_unlock(&overlay->mutex);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 6b3a942b18df..5aaf9ac65cba 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -35,9 +35,9 @@
35 35
36#define VMW_RES_EVICT_ERR_COUNT 10 36#define VMW_RES_EVICT_ERR_COUNT 10
37 37
38struct vmw_user_dma_buffer { 38struct vmw_user_buffer_object {
39 struct ttm_prime_object prime; 39 struct ttm_prime_object prime;
40 struct vmw_dma_buffer dma; 40 struct vmw_buffer_object vbo;
41}; 41};
42 42
43struct vmw_bo_user_rep { 43struct vmw_bo_user_rep {
@@ -45,17 +45,18 @@ struct vmw_bo_user_rep {
45 uint64_t map_handle; 45 uint64_t map_handle;
46}; 46};
47 47
48static inline struct vmw_dma_buffer * 48static inline struct vmw_buffer_object *
49vmw_dma_buffer(struct ttm_buffer_object *bo) 49vmw_buffer_object(struct ttm_buffer_object *bo)
50{ 50{
51 return container_of(bo, struct vmw_dma_buffer, base); 51 return container_of(bo, struct vmw_buffer_object, base);
52} 52}
53 53
54static inline struct vmw_user_dma_buffer * 54static inline struct vmw_user_buffer_object *
55vmw_user_dma_buffer(struct ttm_buffer_object *bo) 55vmw_user_buffer_object(struct ttm_buffer_object *bo)
56{ 56{
57 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); 57 struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
58 return container_of(vmw_bo, struct vmw_user_dma_buffer, dma); 58
59 return container_of(vmw_bo, struct vmw_user_buffer_object, vbo);
59} 60}
60 61
61struct vmw_resource *vmw_resource_reference(struct vmw_resource *res) 62struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
@@ -116,7 +117,7 @@ static void vmw_resource_release(struct kref *kref)
116 res->backup_dirty = false; 117 res->backup_dirty = false;
117 list_del_init(&res->mob_head); 118 list_del_init(&res->mob_head);
118 ttm_bo_unreserve(bo); 119 ttm_bo_unreserve(bo);
119 vmw_dmabuf_unreference(&res->backup); 120 vmw_bo_unreference(&res->backup);
120 } 121 }
121 122
122 if (likely(res->hw_destroy != NULL)) { 123 if (likely(res->hw_destroy != NULL)) {
@@ -287,7 +288,7 @@ out_bad_resource:
287} 288}
288 289
289/** 290/**
290 * Helper function that looks either a surface or dmabuf. 291 * Helper function that looks either a surface or bo.
291 * 292 *
292 * The pointer this pointed at by out_surf and out_buf needs to be null. 293 * The pointer this pointed at by out_surf and out_buf needs to be null.
293 */ 294 */
@@ -295,7 +296,7 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
295 struct ttm_object_file *tfile, 296 struct ttm_object_file *tfile,
296 uint32_t handle, 297 uint32_t handle,
297 struct vmw_surface **out_surf, 298 struct vmw_surface **out_surf,
298 struct vmw_dma_buffer **out_buf) 299 struct vmw_buffer_object **out_buf)
299{ 300{
300 struct vmw_resource *res; 301 struct vmw_resource *res;
301 int ret; 302 int ret;
@@ -311,7 +312,7 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
311 } 312 }
312 313
313 *out_surf = NULL; 314 *out_surf = NULL;
314 ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf, NULL); 315 ret = vmw_user_bo_lookup(tfile, handle, out_buf, NULL);
315 return ret; 316 return ret;
316} 317}
317 318
@@ -320,14 +321,14 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
320 */ 321 */
321 322
322/** 323/**
323 * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers 324 * vmw_bo_acc_size - Calculate the pinned memory usage of buffers
324 * 325 *
325 * @dev_priv: Pointer to a struct vmw_private identifying the device. 326 * @dev_priv: Pointer to a struct vmw_private identifying the device.
326 * @size: The requested buffer size. 327 * @size: The requested buffer size.
327 * @user: Whether this is an ordinary dma buffer or a user dma buffer. 328 * @user: Whether this is an ordinary dma buffer or a user dma buffer.
328 */ 329 */
329static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size, 330static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
330 bool user) 331 bool user)
331{ 332{
332 static size_t struct_size, user_struct_size; 333 static size_t struct_size, user_struct_size;
333 size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 334 size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
@@ -337,9 +338,9 @@ static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
337 size_t backend_size = ttm_round_pot(vmw_tt_size); 338 size_t backend_size = ttm_round_pot(vmw_tt_size);
338 339
339 struct_size = backend_size + 340 struct_size = backend_size +
340 ttm_round_pot(sizeof(struct vmw_dma_buffer)); 341 ttm_round_pot(sizeof(struct vmw_buffer_object));
341 user_struct_size = backend_size + 342 user_struct_size = backend_size +
342 ttm_round_pot(sizeof(struct vmw_user_dma_buffer)); 343 ttm_round_pot(sizeof(struct vmw_user_buffer_object));
343 } 344 }
344 345
345 if (dev_priv->map_mode == vmw_dma_alloc_coherent) 346 if (dev_priv->map_mode == vmw_dma_alloc_coherent)
@@ -350,36 +351,36 @@ static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
350 page_array_size; 351 page_array_size;
351} 352}
352 353
353void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) 354void vmw_bo_bo_free(struct ttm_buffer_object *bo)
354{ 355{
355 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); 356 struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
356 357
357 vmw_dma_buffer_unmap(vmw_bo); 358 vmw_buffer_object_unmap(vmw_bo);
358 kfree(vmw_bo); 359 kfree(vmw_bo);
359} 360}
360 361
361static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) 362static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
362{ 363{
363 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); 364 struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo);
364 365
365 vmw_dma_buffer_unmap(&vmw_user_bo->dma); 366 vmw_buffer_object_unmap(&vmw_user_bo->vbo);
366 ttm_prime_object_kfree(vmw_user_bo, prime); 367 ttm_prime_object_kfree(vmw_user_bo, prime);
367} 368}
368 369
369int vmw_dmabuf_init(struct vmw_private *dev_priv, 370int vmw_bo_init(struct vmw_private *dev_priv,
370 struct vmw_dma_buffer *vmw_bo, 371 struct vmw_buffer_object *vmw_bo,
371 size_t size, struct ttm_placement *placement, 372 size_t size, struct ttm_placement *placement,
372 bool interruptible, 373 bool interruptible,
373 void (*bo_free) (struct ttm_buffer_object *bo)) 374 void (*bo_free)(struct ttm_buffer_object *bo))
374{ 375{
375 struct ttm_bo_device *bdev = &dev_priv->bdev; 376 struct ttm_bo_device *bdev = &dev_priv->bdev;
376 size_t acc_size; 377 size_t acc_size;
377 int ret; 378 int ret;
378 bool user = (bo_free == &vmw_user_dmabuf_destroy); 379 bool user = (bo_free == &vmw_user_bo_destroy);
379 380
380 BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free))); 381 WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free)));
381 382
382 acc_size = vmw_dmabuf_acc_size(dev_priv, size, user); 383 acc_size = vmw_bo_acc_size(dev_priv, size, user);
383 memset(vmw_bo, 0, sizeof(*vmw_bo)); 384 memset(vmw_bo, 0, sizeof(*vmw_bo));
384 385
385 INIT_LIST_HEAD(&vmw_bo->res_list); 386 INIT_LIST_HEAD(&vmw_bo->res_list);
@@ -391,9 +392,9 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
391 return ret; 392 return ret;
392} 393}
393 394
394static void vmw_user_dmabuf_release(struct ttm_base_object **p_base) 395static void vmw_user_bo_release(struct ttm_base_object **p_base)
395{ 396{
396 struct vmw_user_dma_buffer *vmw_user_bo; 397 struct vmw_user_buffer_object *vmw_user_bo;
397 struct ttm_base_object *base = *p_base; 398 struct ttm_base_object *base = *p_base;
398 struct ttm_buffer_object *bo; 399 struct ttm_buffer_object *bo;
399 400
@@ -402,21 +403,22 @@ static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
402 if (unlikely(base == NULL)) 403 if (unlikely(base == NULL))
403 return; 404 return;
404 405
405 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, 406 vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
406 prime.base); 407 prime.base);
407 bo = &vmw_user_bo->dma.base; 408 bo = &vmw_user_bo->vbo.base;
408 ttm_bo_unref(&bo); 409 ttm_bo_unref(&bo);
409} 410}
410 411
411static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base, 412static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
412 enum ttm_ref_type ref_type) 413 enum ttm_ref_type ref_type)
413{ 414{
414 struct vmw_user_dma_buffer *user_bo; 415 struct vmw_user_buffer_object *user_bo;
415 user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base); 416
417 user_bo = container_of(base, struct vmw_user_buffer_object, prime.base);
416 418
417 switch (ref_type) { 419 switch (ref_type) {
418 case TTM_REF_SYNCCPU_WRITE: 420 case TTM_REF_SYNCCPU_WRITE:
419 ttm_bo_synccpu_write_release(&user_bo->dma.base); 421 ttm_bo_synccpu_write_release(&user_bo->vbo.base);
420 break; 422 break;
421 default: 423 default:
422 BUG(); 424 BUG();
@@ -424,7 +426,7 @@ static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
424} 426}
425 427
426/** 428/**
427 * vmw_user_dmabuf_alloc - Allocate a user dma buffer 429 * vmw_user_bo_alloc - Allocate a user dma buffer
428 * 430 *
429 * @dev_priv: Pointer to a struct device private. 431 * @dev_priv: Pointer to a struct device private.
430 * @tfile: Pointer to a struct ttm_object_file on which to register the user 432 * @tfile: Pointer to a struct ttm_object_file on which to register the user
@@ -432,18 +434,18 @@ static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
432 * @size: Size of the dma buffer. 434 * @size: Size of the dma buffer.
433 * @shareable: Boolean whether the buffer is shareable with other open files. 435 * @shareable: Boolean whether the buffer is shareable with other open files.
434 * @handle: Pointer to where the handle value should be assigned. 436 * @handle: Pointer to where the handle value should be assigned.
435 * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer 437 * @p_vbo: Pointer to where the refcounted struct vmw_buffer_object pointer
436 * should be assigned. 438 * should be assigned.
437 */ 439 */
438int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, 440int vmw_user_bo_alloc(struct vmw_private *dev_priv,
439 struct ttm_object_file *tfile, 441 struct ttm_object_file *tfile,
440 uint32_t size, 442 uint32_t size,
441 bool shareable, 443 bool shareable,
442 uint32_t *handle, 444 uint32_t *handle,
443 struct vmw_dma_buffer **p_dma_buf, 445 struct vmw_buffer_object **p_vbo,
444 struct ttm_base_object **p_base) 446 struct ttm_base_object **p_base)
445{ 447{
446 struct vmw_user_dma_buffer *user_bo; 448 struct vmw_user_buffer_object *user_bo;
447 struct ttm_buffer_object *tmp; 449 struct ttm_buffer_object *tmp;
448 int ret; 450 int ret;
449 451
@@ -453,28 +455,28 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
453 return -ENOMEM; 455 return -ENOMEM;
454 } 456 }
455 457
456 ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size, 458 ret = vmw_bo_init(dev_priv, &user_bo->vbo, size,
457 (dev_priv->has_mob) ? 459 (dev_priv->has_mob) ?
458 &vmw_sys_placement : 460 &vmw_sys_placement :
459 &vmw_vram_sys_placement, true, 461 &vmw_vram_sys_placement, true,
460 &vmw_user_dmabuf_destroy); 462 &vmw_user_bo_destroy);
461 if (unlikely(ret != 0)) 463 if (unlikely(ret != 0))
462 return ret; 464 return ret;
463 465
464 tmp = ttm_bo_reference(&user_bo->dma.base); 466 tmp = ttm_bo_reference(&user_bo->vbo.base);
465 ret = ttm_prime_object_init(tfile, 467 ret = ttm_prime_object_init(tfile,
466 size, 468 size,
467 &user_bo->prime, 469 &user_bo->prime,
468 shareable, 470 shareable,
469 ttm_buffer_type, 471 ttm_buffer_type,
470 &vmw_user_dmabuf_release, 472 &vmw_user_bo_release,
471 &vmw_user_dmabuf_ref_obj_release); 473 &vmw_user_bo_ref_obj_release);
472 if (unlikely(ret != 0)) { 474 if (unlikely(ret != 0)) {
473 ttm_bo_unref(&tmp); 475 ttm_bo_unref(&tmp);
474 goto out_no_base_object; 476 goto out_no_base_object;
475 } 477 }
476 478
477 *p_dma_buf = &user_bo->dma; 479 *p_vbo = &user_bo->vbo;
478 if (p_base) { 480 if (p_base) {
479 *p_base = &user_bo->prime.base; 481 *p_base = &user_bo->prime.base;
480 kref_get(&(*p_base)->refcount); 482 kref_get(&(*p_base)->refcount);
@@ -486,21 +488,21 @@ out_no_base_object:
486} 488}
487 489
488/** 490/**
489 * vmw_user_dmabuf_verify_access - verify access permissions on this 491 * vmw_user_bo_verify_access - verify access permissions on this
490 * buffer object. 492 * buffer object.
491 * 493 *
492 * @bo: Pointer to the buffer object being accessed 494 * @bo: Pointer to the buffer object being accessed
493 * @tfile: Identifying the caller. 495 * @tfile: Identifying the caller.
494 */ 496 */
495int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo, 497int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
496 struct ttm_object_file *tfile) 498 struct ttm_object_file *tfile)
497{ 499{
498 struct vmw_user_dma_buffer *vmw_user_bo; 500 struct vmw_user_buffer_object *vmw_user_bo;
499 501
500 if (unlikely(bo->destroy != vmw_user_dmabuf_destroy)) 502 if (unlikely(bo->destroy != vmw_user_bo_destroy))
501 return -EPERM; 503 return -EPERM;
502 504
503 vmw_user_bo = vmw_user_dma_buffer(bo); 505 vmw_user_bo = vmw_user_buffer_object(bo);
504 506
505 /* Check that the caller has opened the object. */ 507 /* Check that the caller has opened the object. */
506 if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base))) 508 if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
@@ -511,7 +513,7 @@ int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
511} 513}
512 514
513/** 515/**
514 * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu 516 * vmw_user_bo_synccpu_grab - Grab a struct vmw_user_buffer_object for cpu
515 * access, idling previous GPU operations on the buffer and optionally 517 * access, idling previous GPU operations on the buffer and optionally
516 * blocking it for further command submissions. 518 * blocking it for further command submissions.
517 * 519 *
@@ -521,11 +523,11 @@ int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
521 * 523 *
522 * A blocking grab will be automatically released when @tfile is closed. 524 * A blocking grab will be automatically released when @tfile is closed.
523 */ 525 */
524static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo, 526static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
525 struct ttm_object_file *tfile, 527 struct ttm_object_file *tfile,
526 uint32_t flags) 528 uint32_t flags)
527{ 529{
528 struct ttm_buffer_object *bo = &user_bo->dma.base; 530 struct ttm_buffer_object *bo = &user_bo->vbo.base;
529 bool existed; 531 bool existed;
530 int ret; 532 int ret;
531 533
@@ -550,20 +552,20 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
550 ret = ttm_ref_object_add(tfile, &user_bo->prime.base, 552 ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
551 TTM_REF_SYNCCPU_WRITE, &existed, false); 553 TTM_REF_SYNCCPU_WRITE, &existed, false);
552 if (ret != 0 || existed) 554 if (ret != 0 || existed)
553 ttm_bo_synccpu_write_release(&user_bo->dma.base); 555 ttm_bo_synccpu_write_release(&user_bo->vbo.base);
554 556
555 return ret; 557 return ret;
556} 558}
557 559
558/** 560/**
559 * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access, 561 * vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
560 * and unblock command submission on the buffer if blocked. 562 * and unblock command submission on the buffer if blocked.
561 * 563 *
562 * @handle: Handle identifying the buffer object. 564 * @handle: Handle identifying the buffer object.
563 * @tfile: Identifying the caller. 565 * @tfile: Identifying the caller.
564 * @flags: Flags indicating the type of release. 566 * @flags: Flags indicating the type of release.
565 */ 567 */
566static int vmw_user_dmabuf_synccpu_release(uint32_t handle, 568static int vmw_user_bo_synccpu_release(uint32_t handle,
567 struct ttm_object_file *tfile, 569 struct ttm_object_file *tfile,
568 uint32_t flags) 570 uint32_t flags)
569{ 571{
@@ -575,7 +577,7 @@ static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
575} 577}
576 578
577/** 579/**
578 * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu 580 * vmw_user_bo_synccpu_release - ioctl function implementing the synccpu
579 * functionality. 581 * functionality.
580 * 582 *
581 * @dev: Identifies the drm device. 583 * @dev: Identifies the drm device.
@@ -585,13 +587,13 @@ static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
585 * This function checks the ioctl arguments for validity and calls the 587 * This function checks the ioctl arguments for validity and calls the
586 * relevant synccpu functions. 588 * relevant synccpu functions.
587 */ 589 */
588int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data, 590int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
589 struct drm_file *file_priv) 591 struct drm_file *file_priv)
590{ 592{
591 struct drm_vmw_synccpu_arg *arg = 593 struct drm_vmw_synccpu_arg *arg =
592 (struct drm_vmw_synccpu_arg *) data; 594 (struct drm_vmw_synccpu_arg *) data;
593 struct vmw_dma_buffer *dma_buf; 595 struct vmw_buffer_object *vbo;
594 struct vmw_user_dma_buffer *user_bo; 596 struct vmw_user_buffer_object *user_bo;
595 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 597 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
596 struct ttm_base_object *buffer_base; 598 struct ttm_base_object *buffer_base;
597 int ret; 599 int ret;
@@ -606,15 +608,15 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
606 608
607 switch (arg->op) { 609 switch (arg->op) {
608 case drm_vmw_synccpu_grab: 610 case drm_vmw_synccpu_grab:
609 ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf, 611 ret = vmw_user_bo_lookup(tfile, arg->handle, &vbo,
610 &buffer_base); 612 &buffer_base);
611 if (unlikely(ret != 0)) 613 if (unlikely(ret != 0))
612 return ret; 614 return ret;
613 615
614 user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, 616 user_bo = container_of(vbo, struct vmw_user_buffer_object,
615 dma); 617 vbo);
616 ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags); 618 ret = vmw_user_bo_synccpu_grab(user_bo, tfile, arg->flags);
617 vmw_dmabuf_unreference(&dma_buf); 619 vmw_bo_unreference(&vbo);
618 ttm_base_object_unref(&buffer_base); 620 ttm_base_object_unref(&buffer_base);
619 if (unlikely(ret != 0 && ret != -ERESTARTSYS && 621 if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
620 ret != -EBUSY)) { 622 ret != -EBUSY)) {
@@ -624,8 +626,8 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
624 } 626 }
625 break; 627 break;
626 case drm_vmw_synccpu_release: 628 case drm_vmw_synccpu_release:
627 ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile, 629 ret = vmw_user_bo_synccpu_release(arg->handle, tfile,
628 arg->flags); 630 arg->flags);
629 if (unlikely(ret != 0)) { 631 if (unlikely(ret != 0)) {
630 DRM_ERROR("Failed synccpu release on handle 0x%08x.\n", 632 DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
631 (unsigned int) arg->handle); 633 (unsigned int) arg->handle);
@@ -640,15 +642,15 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
640 return 0; 642 return 0;
641} 643}
642 644
643int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, 645int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
644 struct drm_file *file_priv) 646 struct drm_file *file_priv)
645{ 647{
646 struct vmw_private *dev_priv = vmw_priv(dev); 648 struct vmw_private *dev_priv = vmw_priv(dev);
647 union drm_vmw_alloc_dmabuf_arg *arg = 649 union drm_vmw_alloc_dmabuf_arg *arg =
648 (union drm_vmw_alloc_dmabuf_arg *)data; 650 (union drm_vmw_alloc_dmabuf_arg *)data;
649 struct drm_vmw_alloc_dmabuf_req *req = &arg->req; 651 struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
650 struct drm_vmw_dmabuf_rep *rep = &arg->rep; 652 struct drm_vmw_dmabuf_rep *rep = &arg->rep;
651 struct vmw_dma_buffer *dma_buf; 653 struct vmw_buffer_object *vbo;
652 uint32_t handle; 654 uint32_t handle;
653 int ret; 655 int ret;
654 656
@@ -656,27 +658,27 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
656 if (unlikely(ret != 0)) 658 if (unlikely(ret != 0))
657 return ret; 659 return ret;
658 660
659 ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, 661 ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
660 req->size, false, &handle, &dma_buf, 662 req->size, false, &handle, &vbo,
661 NULL); 663 NULL);
662 if (unlikely(ret != 0)) 664 if (unlikely(ret != 0))
663 goto out_no_dmabuf; 665 goto out_no_bo;
664 666
665 rep->handle = handle; 667 rep->handle = handle;
666 rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node); 668 rep->map_handle = drm_vma_node_offset_addr(&vbo->base.vma_node);
667 rep->cur_gmr_id = handle; 669 rep->cur_gmr_id = handle;
668 rep->cur_gmr_offset = 0; 670 rep->cur_gmr_offset = 0;
669 671
670 vmw_dmabuf_unreference(&dma_buf); 672 vmw_bo_unreference(&vbo);
671 673
672out_no_dmabuf: 674out_no_bo:
673 ttm_read_unlock(&dev_priv->reservation_sem); 675 ttm_read_unlock(&dev_priv->reservation_sem);
674 676
675 return ret; 677 return ret;
676} 678}
677 679
678int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, 680int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
679 struct drm_file *file_priv) 681 struct drm_file *file_priv)
680{ 682{
681 struct drm_vmw_unref_dmabuf_arg *arg = 683 struct drm_vmw_unref_dmabuf_arg *arg =
682 (struct drm_vmw_unref_dmabuf_arg *)data; 684 (struct drm_vmw_unref_dmabuf_arg *)data;
@@ -686,11 +688,11 @@ int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
686 TTM_REF_USAGE); 688 TTM_REF_USAGE);
687} 689}
688 690
689int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, 691int vmw_user_bo_lookup(struct ttm_object_file *tfile,
690 uint32_t handle, struct vmw_dma_buffer **out, 692 uint32_t handle, struct vmw_buffer_object **out,
691 struct ttm_base_object **p_base) 693 struct ttm_base_object **p_base)
692{ 694{
693 struct vmw_user_dma_buffer *vmw_user_bo; 695 struct vmw_user_buffer_object *vmw_user_bo;
694 struct ttm_base_object *base; 696 struct ttm_base_object *base;
695 697
696 base = ttm_base_object_lookup(tfile, handle); 698 base = ttm_base_object_lookup(tfile, handle);
@@ -707,28 +709,28 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
707 return -EINVAL; 709 return -EINVAL;
708 } 710 }
709 711
710 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, 712 vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
711 prime.base); 713 prime.base);
712 (void)ttm_bo_reference(&vmw_user_bo->dma.base); 714 (void)ttm_bo_reference(&vmw_user_bo->vbo.base);
713 if (p_base) 715 if (p_base)
714 *p_base = base; 716 *p_base = base;
715 else 717 else
716 ttm_base_object_unref(&base); 718 ttm_base_object_unref(&base);
717 *out = &vmw_user_bo->dma; 719 *out = &vmw_user_bo->vbo;
718 720
719 return 0; 721 return 0;
720} 722}
721 723
722int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, 724int vmw_user_bo_reference(struct ttm_object_file *tfile,
723 struct vmw_dma_buffer *dma_buf, 725 struct vmw_buffer_object *vbo,
724 uint32_t *handle) 726 uint32_t *handle)
725{ 727{
726 struct vmw_user_dma_buffer *user_bo; 728 struct vmw_user_buffer_object *user_bo;
727 729
728 if (dma_buf->base.destroy != vmw_user_dmabuf_destroy) 730 if (vbo->base.destroy != vmw_user_bo_destroy)
729 return -EINVAL; 731 return -EINVAL;
730 732
731 user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma); 733 user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
732 734
733 *handle = user_bo->prime.base.hash.key; 735 *handle = user_bo->prime.base.hash.key;
734 return ttm_ref_object_add(tfile, &user_bo->prime.base, 736 return ttm_ref_object_add(tfile, &user_bo->prime.base,
@@ -743,7 +745,7 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
743 * @args: Pointer to a struct drm_mode_create_dumb structure 745 * @args: Pointer to a struct drm_mode_create_dumb structure
744 * 746 *
745 * This is a driver callback for the core drm create_dumb functionality. 747 * This is a driver callback for the core drm create_dumb functionality.
746 * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except 748 * Note that this is very similar to the vmw_bo_alloc ioctl, except
747 * that the arguments have a different format. 749 * that the arguments have a different format.
748 */ 750 */
749int vmw_dumb_create(struct drm_file *file_priv, 751int vmw_dumb_create(struct drm_file *file_priv,
@@ -751,7 +753,7 @@ int vmw_dumb_create(struct drm_file *file_priv,
751 struct drm_mode_create_dumb *args) 753 struct drm_mode_create_dumb *args)
752{ 754{
753 struct vmw_private *dev_priv = vmw_priv(dev); 755 struct vmw_private *dev_priv = vmw_priv(dev);
754 struct vmw_dma_buffer *dma_buf; 756 struct vmw_buffer_object *vbo;
755 int ret; 757 int ret;
756 758
757 args->pitch = args->width * ((args->bpp + 7) / 8); 759 args->pitch = args->width * ((args->bpp + 7) / 8);
@@ -761,14 +763,14 @@ int vmw_dumb_create(struct drm_file *file_priv,
761 if (unlikely(ret != 0)) 763 if (unlikely(ret != 0))
762 return ret; 764 return ret;
763 765
764 ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, 766 ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
765 args->size, false, &args->handle, 767 args->size, false, &args->handle,
766 &dma_buf, NULL); 768 &vbo, NULL);
767 if (unlikely(ret != 0)) 769 if (unlikely(ret != 0))
768 goto out_no_dmabuf; 770 goto out_no_bo;
769 771
770 vmw_dmabuf_unreference(&dma_buf); 772 vmw_bo_unreference(&vbo);
771out_no_dmabuf: 773out_no_bo:
772 ttm_read_unlock(&dev_priv->reservation_sem); 774 ttm_read_unlock(&dev_priv->reservation_sem);
773 return ret; 775 return ret;
774} 776}
@@ -788,15 +790,15 @@ int vmw_dumb_map_offset(struct drm_file *file_priv,
788 uint64_t *offset) 790 uint64_t *offset)
789{ 791{
790 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 792 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
791 struct vmw_dma_buffer *out_buf; 793 struct vmw_buffer_object *out_buf;
792 int ret; 794 int ret;
793 795
794 ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf, NULL); 796 ret = vmw_user_bo_lookup(tfile, handle, &out_buf, NULL);
795 if (ret != 0) 797 if (ret != 0)
796 return -EINVAL; 798 return -EINVAL;
797 799
798 *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node); 800 *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
799 vmw_dmabuf_unreference(&out_buf); 801 vmw_bo_unreference(&out_buf);
800 return 0; 802 return 0;
801} 803}
802 804
@@ -829,7 +831,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
829{ 831{
830 unsigned long size = 832 unsigned long size =
831 (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK; 833 (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
832 struct vmw_dma_buffer *backup; 834 struct vmw_buffer_object *backup;
833 int ret; 835 int ret;
834 836
835 if (likely(res->backup)) { 837 if (likely(res->backup)) {
@@ -841,16 +843,16 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
841 if (unlikely(!backup)) 843 if (unlikely(!backup))
842 return -ENOMEM; 844 return -ENOMEM;
843 845
844 ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size, 846 ret = vmw_bo_init(res->dev_priv, backup, res->backup_size,
845 res->func->backup_placement, 847 res->func->backup_placement,
846 interruptible, 848 interruptible,
847 &vmw_dmabuf_bo_free); 849 &vmw_bo_bo_free);
848 if (unlikely(ret != 0)) 850 if (unlikely(ret != 0))
849 goto out_no_dmabuf; 851 goto out_no_bo;
850 852
851 res->backup = backup; 853 res->backup = backup;
852 854
853out_no_dmabuf: 855out_no_bo:
854 return ret; 856 return ret;
855} 857}
856 858
@@ -919,7 +921,7 @@ out_bind_failed:
919 */ 921 */
920void vmw_resource_unreserve(struct vmw_resource *res, 922void vmw_resource_unreserve(struct vmw_resource *res,
921 bool switch_backup, 923 bool switch_backup,
922 struct vmw_dma_buffer *new_backup, 924 struct vmw_buffer_object *new_backup,
923 unsigned long new_backup_offset) 925 unsigned long new_backup_offset)
924{ 926{
925 struct vmw_private *dev_priv = res->dev_priv; 927 struct vmw_private *dev_priv = res->dev_priv;
@@ -931,11 +933,11 @@ void vmw_resource_unreserve(struct vmw_resource *res,
931 if (res->backup) { 933 if (res->backup) {
932 lockdep_assert_held(&res->backup->base.resv->lock.base); 934 lockdep_assert_held(&res->backup->base.resv->lock.base);
933 list_del_init(&res->mob_head); 935 list_del_init(&res->mob_head);
934 vmw_dmabuf_unreference(&res->backup); 936 vmw_bo_unreference(&res->backup);
935 } 937 }
936 938
937 if (new_backup) { 939 if (new_backup) {
938 res->backup = vmw_dmabuf_reference(new_backup); 940 res->backup = vmw_bo_reference(new_backup);
939 lockdep_assert_held(&new_backup->base.resv->lock.base); 941 lockdep_assert_held(&new_backup->base.resv->lock.base);
940 list_add_tail(&res->mob_head, &new_backup->res_list); 942 list_add_tail(&res->mob_head, &new_backup->res_list);
941 } else { 943 } else {
@@ -1007,7 +1009,7 @@ out_no_validate:
1007out_no_reserve: 1009out_no_reserve:
1008 ttm_bo_unref(&val_buf->bo); 1010 ttm_bo_unref(&val_buf->bo);
1009 if (backup_dirty) 1011 if (backup_dirty)
1010 vmw_dmabuf_unreference(&res->backup); 1012 vmw_bo_unreference(&res->backup);
1011 1013
1012 return ret; 1014 return ret;
1013} 1015}
@@ -1171,7 +1173,7 @@ int vmw_resource_validate(struct vmw_resource *res)
1171 goto out_no_validate; 1173 goto out_no_validate;
1172 else if (!res->func->needs_backup && res->backup) { 1174 else if (!res->func->needs_backup && res->backup) {
1173 list_del_init(&res->mob_head); 1175 list_del_init(&res->mob_head);
1174 vmw_dmabuf_unreference(&res->backup); 1176 vmw_bo_unreference(&res->backup);
1175 } 1177 }
1176 1178
1177 return 0; 1179 return 0;
@@ -1230,22 +1232,22 @@ void vmw_fence_single_bo(struct ttm_buffer_object *bo,
1230void vmw_resource_move_notify(struct ttm_buffer_object *bo, 1232void vmw_resource_move_notify(struct ttm_buffer_object *bo,
1231 struct ttm_mem_reg *mem) 1233 struct ttm_mem_reg *mem)
1232{ 1234{
1233 struct vmw_dma_buffer *dma_buf; 1235 struct vmw_buffer_object *vbo;
1234 1236
1235 if (mem == NULL) 1237 if (mem == NULL)
1236 return; 1238 return;
1237 1239
1238 if (bo->destroy != vmw_dmabuf_bo_free && 1240 if (bo->destroy != vmw_bo_bo_free &&
1239 bo->destroy != vmw_user_dmabuf_destroy) 1241 bo->destroy != vmw_user_bo_destroy)
1240 return; 1242 return;
1241 1243
1242 dma_buf = container_of(bo, struct vmw_dma_buffer, base); 1244 vbo = container_of(bo, struct vmw_buffer_object, base);
1243 1245
1244 /* 1246 /*
1245 * Kill any cached kernel maps before move. An optimization could 1247 * Kill any cached kernel maps before move. An optimization could
1246 * be to do this iff source or destination memory type is VRAM. 1248 * be to do this iff source or destination memory type is VRAM.
1247 */ 1249 */
1248 vmw_dma_buffer_unmap(dma_buf); 1250 vmw_buffer_object_unmap(vbo);
1249 1251
1250 if (mem->mem_type != VMW_PL_MOB) { 1252 if (mem->mem_type != VMW_PL_MOB) {
1251 struct vmw_resource *res, *n; 1253 struct vmw_resource *res, *n;
@@ -1254,7 +1256,7 @@ void vmw_resource_move_notify(struct ttm_buffer_object *bo,
1254 val_buf.bo = bo; 1256 val_buf.bo = bo;
1255 val_buf.shared = false; 1257 val_buf.shared = false;
1256 1258
1257 list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) { 1259 list_for_each_entry_safe(res, n, &vbo->res_list, mob_head) {
1258 1260
1259 if (unlikely(res->func->unbind == NULL)) 1261 if (unlikely(res->func->unbind == NULL))
1260 continue; 1262 continue;
@@ -1277,12 +1279,12 @@ void vmw_resource_move_notify(struct ttm_buffer_object *bo,
1277 */ 1279 */
1278void vmw_resource_swap_notify(struct ttm_buffer_object *bo) 1280void vmw_resource_swap_notify(struct ttm_buffer_object *bo)
1279{ 1281{
1280 if (bo->destroy != vmw_dmabuf_bo_free && 1282 if (bo->destroy != vmw_bo_bo_free &&
1281 bo->destroy != vmw_user_dmabuf_destroy) 1283 bo->destroy != vmw_user_bo_destroy)
1282 return; 1284 return;
1283 1285
1284 /* Kill any cached kernel maps before swapout */ 1286 /* Kill any cached kernel maps before swapout */
1285 vmw_dma_buffer_unmap(vmw_dma_buffer(bo)); 1287 vmw_buffer_object_unmap(vmw_buffer_object(bo));
1286} 1288}
1287 1289
1288 1290
@@ -1294,7 +1296,7 @@ void vmw_resource_swap_notify(struct ttm_buffer_object *bo)
1294 * Read back cached states from the device if they exist. This function 1296 * Read back cached states from the device if they exist. This function
1295 * assumings binding_mutex is held. 1297 * assumings binding_mutex is held.
1296 */ 1298 */
1297int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob) 1299int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
1298{ 1300{
1299 struct vmw_resource *dx_query_ctx; 1301 struct vmw_resource *dx_query_ctx;
1300 struct vmw_private *dev_priv; 1302 struct vmw_private *dev_priv;
@@ -1344,7 +1346,7 @@ int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob)
1344void vmw_query_move_notify(struct ttm_buffer_object *bo, 1346void vmw_query_move_notify(struct ttm_buffer_object *bo,
1345 struct ttm_mem_reg *mem) 1347 struct ttm_mem_reg *mem)
1346{ 1348{
1347 struct vmw_dma_buffer *dx_query_mob; 1349 struct vmw_buffer_object *dx_query_mob;
1348 struct ttm_bo_device *bdev = bo->bdev; 1350 struct ttm_bo_device *bdev = bo->bdev;
1349 struct vmw_private *dev_priv; 1351 struct vmw_private *dev_priv;
1350 1352
@@ -1353,7 +1355,7 @@ void vmw_query_move_notify(struct ttm_buffer_object *bo,
1353 1355
1354 mutex_lock(&dev_priv->binding_mutex); 1356 mutex_lock(&dev_priv->binding_mutex);
1355 1357
1356 dx_query_mob = container_of(bo, struct vmw_dma_buffer, base); 1358 dx_query_mob = container_of(bo, struct vmw_buffer_object, base);
1357 if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) { 1359 if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
1358 mutex_unlock(&dev_priv->binding_mutex); 1360 mutex_unlock(&dev_priv->binding_mutex);
1359 return; 1361 return;
@@ -1481,7 +1483,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
1481 goto out_no_reserve; 1483 goto out_no_reserve;
1482 1484
1483 if (res->pin_count == 0) { 1485 if (res->pin_count == 0) {
1484 struct vmw_dma_buffer *vbo = NULL; 1486 struct vmw_buffer_object *vbo = NULL;
1485 1487
1486 if (res->backup) { 1488 if (res->backup) {
1487 vbo = res->backup; 1489 vbo = res->backup;
@@ -1539,7 +1541,7 @@ void vmw_resource_unpin(struct vmw_resource *res)
1539 1541
1540 WARN_ON(res->pin_count == 0); 1542 WARN_ON(res->pin_count == 0);
1541 if (--res->pin_count == 0 && res->backup) { 1543 if (--res->pin_count == 0 && res->backup) {
1542 struct vmw_dma_buffer *vbo = res->backup; 1544 struct vmw_buffer_object *vbo = res->backup;
1543 1545
1544 (void) ttm_bo_reserve(&vbo->base, false, false, NULL); 1546 (void) ttm_bo_reserve(&vbo->base, false, false, NULL);
1545 vmw_bo_pin_reserved(vbo, false); 1547 vmw_bo_pin_reserved(vbo, false);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 9798640cbfcd..74dfd4621b7e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -66,7 +66,7 @@ struct vmw_kms_sou_readback_blit {
66 SVGAFifoCmdBlitScreenToGMRFB body; 66 SVGAFifoCmdBlitScreenToGMRFB body;
67}; 67};
68 68
69struct vmw_kms_sou_dmabuf_blit { 69struct vmw_kms_sou_bo_blit {
70 uint32 header; 70 uint32 header;
71 SVGAFifoCmdBlitGMRFBToScreen body; 71 SVGAFifoCmdBlitGMRFBToScreen body;
72}; 72};
@@ -83,7 +83,7 @@ struct vmw_screen_object_unit {
83 struct vmw_display_unit base; 83 struct vmw_display_unit base;
84 84
85 unsigned long buffer_size; /**< Size of allocated buffer */ 85 unsigned long buffer_size; /**< Size of allocated buffer */
86 struct vmw_dma_buffer *buffer; /**< Backing store buffer */ 86 struct vmw_buffer_object *buffer; /**< Backing store buffer */
87 87
88 bool defined; 88 bool defined;
89}; 89};
@@ -240,8 +240,8 @@ static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc)
240 } 240 }
241 241
242 if (vfb) { 242 if (vfb) {
243 sou->buffer = vps->dmabuf; 243 sou->buffer = vps->bo;
244 sou->buffer_size = vps->dmabuf_size; 244 sou->buffer_size = vps->bo_size;
245 245
246 ret = vmw_sou_fifo_create(dev_priv, sou, crtc->x, crtc->y, 246 ret = vmw_sou_fifo_create(dev_priv, sou, crtc->x, crtc->y,
247 &crtc->mode); 247 &crtc->mode);
@@ -408,10 +408,10 @@ vmw_sou_primary_plane_cleanup_fb(struct drm_plane *plane,
408 struct drm_crtc *crtc = plane->state->crtc ? 408 struct drm_crtc *crtc = plane->state->crtc ?
409 plane->state->crtc : old_state->crtc; 409 plane->state->crtc : old_state->crtc;
410 410
411 if (vps->dmabuf) 411 if (vps->bo)
412 vmw_dmabuf_unpin(vmw_priv(crtc->dev), vps->dmabuf, false); 412 vmw_bo_unpin(vmw_priv(crtc->dev), vps->bo, false);
413 vmw_dmabuf_unreference(&vps->dmabuf); 413 vmw_bo_unreference(&vps->bo);
414 vps->dmabuf_size = 0; 414 vps->bo_size = 0;
415 415
416 vmw_du_plane_cleanup_fb(plane, old_state); 416 vmw_du_plane_cleanup_fb(plane, old_state);
417} 417}
@@ -440,8 +440,8 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
440 440
441 441
442 if (!new_fb) { 442 if (!new_fb) {
443 vmw_dmabuf_unreference(&vps->dmabuf); 443 vmw_bo_unreference(&vps->bo);
444 vps->dmabuf_size = 0; 444 vps->bo_size = 0;
445 445
446 return 0; 446 return 0;
447 } 447 }
@@ -449,22 +449,22 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
449 size = new_state->crtc_w * new_state->crtc_h * 4; 449 size = new_state->crtc_w * new_state->crtc_h * 4;
450 dev_priv = vmw_priv(crtc->dev); 450 dev_priv = vmw_priv(crtc->dev);
451 451
452 if (vps->dmabuf) { 452 if (vps->bo) {
453 if (vps->dmabuf_size == size) { 453 if (vps->bo_size == size) {
454 /* 454 /*
455 * Note that this might temporarily up the pin-count 455 * Note that this might temporarily up the pin-count
456 * to 2, until cleanup_fb() is called. 456 * to 2, until cleanup_fb() is called.
457 */ 457 */
458 return vmw_dmabuf_pin_in_vram(dev_priv, vps->dmabuf, 458 return vmw_bo_pin_in_vram(dev_priv, vps->bo,
459 true); 459 true);
460 } 460 }
461 461
462 vmw_dmabuf_unreference(&vps->dmabuf); 462 vmw_bo_unreference(&vps->bo);
463 vps->dmabuf_size = 0; 463 vps->bo_size = 0;
464 } 464 }
465 465
466 vps->dmabuf = kzalloc(sizeof(*vps->dmabuf), GFP_KERNEL); 466 vps->bo = kzalloc(sizeof(*vps->bo), GFP_KERNEL);
467 if (!vps->dmabuf) 467 if (!vps->bo)
468 return -ENOMEM; 468 return -ENOMEM;
469 469
470 vmw_svga_enable(dev_priv); 470 vmw_svga_enable(dev_priv);
@@ -473,22 +473,22 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
473 * resume the overlays, this is preferred to failing to alloc. 473 * resume the overlays, this is preferred to failing to alloc.
474 */ 474 */
475 vmw_overlay_pause_all(dev_priv); 475 vmw_overlay_pause_all(dev_priv);
476 ret = vmw_dmabuf_init(dev_priv, vps->dmabuf, size, 476 ret = vmw_bo_init(dev_priv, vps->bo, size,
477 &vmw_vram_ne_placement, 477 &vmw_vram_ne_placement,
478 false, &vmw_dmabuf_bo_free); 478 false, &vmw_bo_bo_free);
479 vmw_overlay_resume_all(dev_priv); 479 vmw_overlay_resume_all(dev_priv);
480 if (ret) { 480 if (ret) {
481 vps->dmabuf = NULL; /* vmw_dmabuf_init frees on error */ 481 vps->bo = NULL; /* vmw_bo_init frees on error */
482 return ret; 482 return ret;
483 } 483 }
484 484
485 vps->dmabuf_size = size; 485 vps->bo_size = size;
486 486
487 /* 487 /*
488 * TTM already thinks the buffer is pinned, but make sure the 488 * TTM already thinks the buffer is pinned, but make sure the
489 * pin_count is upped. 489 * pin_count is upped.
490 */ 490 */
491 return vmw_dmabuf_pin_in_vram(dev_priv, vps->dmabuf, true); 491 return vmw_bo_pin_in_vram(dev_priv, vps->bo, true);
492} 492}
493 493
494 494
@@ -512,10 +512,10 @@ vmw_sou_primary_plane_atomic_update(struct drm_plane *plane,
512 vclips.w = crtc->mode.hdisplay; 512 vclips.w = crtc->mode.hdisplay;
513 vclips.h = crtc->mode.vdisplay; 513 vclips.h = crtc->mode.vdisplay;
514 514
515 if (vfb->dmabuf) 515 if (vfb->bo)
516 ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, vfb, NULL, 516 ret = vmw_kms_sou_do_bo_dirty(dev_priv, vfb, NULL,
517 &vclips, 1, 1, true, 517 &vclips, 1, 1, true,
518 &fence, crtc); 518 &fence, crtc);
519 else 519 else
520 ret = vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, 520 ret = vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL,
521 &vclips, NULL, 0, 0, 521 &vclips, NULL, 0, 0,
@@ -775,11 +775,11 @@ int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
775 return 0; 775 return 0;
776} 776}
777 777
778static int do_dmabuf_define_gmrfb(struct vmw_private *dev_priv, 778static int do_bo_define_gmrfb(struct vmw_private *dev_priv,
779 struct vmw_framebuffer *framebuffer) 779 struct vmw_framebuffer *framebuffer)
780{ 780{
781 struct vmw_dma_buffer *buf = 781 struct vmw_buffer_object *buf =
782 container_of(framebuffer, struct vmw_framebuffer_dmabuf, 782 container_of(framebuffer, struct vmw_framebuffer_bo,
783 base)->buffer; 783 base)->buffer;
784 int depth = framebuffer->base.format->depth; 784 int depth = framebuffer->base.format->depth;
785 struct { 785 struct {
@@ -970,13 +970,13 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
970} 970}
971 971
972/** 972/**
973 * vmw_sou_dmabuf_fifo_commit - Callback to submit a set of readback clips. 973 * vmw_sou_bo_fifo_commit - Callback to submit a set of readback clips.
974 * 974 *
975 * @dirty: The closure structure. 975 * @dirty: The closure structure.
976 * 976 *
977 * Commits a previously built command buffer of readback clips. 977 * Commits a previously built command buffer of readback clips.
978 */ 978 */
979static void vmw_sou_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty) 979static void vmw_sou_bo_fifo_commit(struct vmw_kms_dirty *dirty)
980{ 980{
981 if (!dirty->num_hits) { 981 if (!dirty->num_hits) {
982 vmw_fifo_commit(dirty->dev_priv, 0); 982 vmw_fifo_commit(dirty->dev_priv, 0);
@@ -984,20 +984,20 @@ static void vmw_sou_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
984 } 984 }
985 985
986 vmw_fifo_commit(dirty->dev_priv, 986 vmw_fifo_commit(dirty->dev_priv,
987 sizeof(struct vmw_kms_sou_dmabuf_blit) * 987 sizeof(struct vmw_kms_sou_bo_blit) *
988 dirty->num_hits); 988 dirty->num_hits);
989} 989}
990 990
991/** 991/**
992 * vmw_sou_dmabuf_clip - Callback to encode a readback cliprect. 992 * vmw_sou_bo_clip - Callback to encode a readback cliprect.
993 * 993 *
994 * @dirty: The closure structure 994 * @dirty: The closure structure
995 * 995 *
996 * Encodes a BLIT_GMRFB_TO_SCREEN cliprect. 996 * Encodes a BLIT_GMRFB_TO_SCREEN cliprect.
997 */ 997 */
998static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty) 998static void vmw_sou_bo_clip(struct vmw_kms_dirty *dirty)
999{ 999{
1000 struct vmw_kms_sou_dmabuf_blit *blit = dirty->cmd; 1000 struct vmw_kms_sou_bo_blit *blit = dirty->cmd;
1001 1001
1002 blit += dirty->num_hits; 1002 blit += dirty->num_hits;
1003 blit->header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN; 1003 blit->header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
@@ -1012,10 +1012,10 @@ static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty)
1012} 1012}
1013 1013
1014/** 1014/**
1015 * vmw_kms_do_dmabuf_dirty - Dirty part of a dma-buffer backed framebuffer 1015 * vmw_kms_do_bo_dirty - Dirty part of a buffer-object backed framebuffer
1016 * 1016 *
1017 * @dev_priv: Pointer to the device private structure. 1017 * @dev_priv: Pointer to the device private structure.
1018 * @framebuffer: Pointer to the dma-buffer backed framebuffer. 1018 * @framebuffer: Pointer to the buffer-object backed framebuffer.
1019 * @clips: Array of clip rects. 1019 * @clips: Array of clip rects.
1020 * @vclips: Alternate array of clip rects. Either @clips or @vclips must 1020 * @vclips: Alternate array of clip rects. Either @clips or @vclips must
1021 * be NULL. 1021 * be NULL.
@@ -1025,12 +1025,12 @@ static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty)
1025 * @out_fence: If non-NULL, will return a ref-counted pointer to a 1025 * @out_fence: If non-NULL, will return a ref-counted pointer to a
1026 * struct vmw_fence_obj. The returned fence pointer may be NULL in which 1026 * struct vmw_fence_obj. The returned fence pointer may be NULL in which
1027 * case the device has already synchronized. 1027 * case the device has already synchronized.
1028 * @crtc: If crtc is passed, perform dmabuf dirty on that crtc only. 1028 * @crtc: If crtc is passed, perform bo dirty on that crtc only.
1029 * 1029 *
1030 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if 1030 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
1031 * interrupted. 1031 * interrupted.
1032 */ 1032 */
1033int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv, 1033int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv,
1034 struct vmw_framebuffer *framebuffer, 1034 struct vmw_framebuffer *framebuffer,
1035 struct drm_clip_rect *clips, 1035 struct drm_clip_rect *clips,
1036 struct drm_vmw_rect *vclips, 1036 struct drm_vmw_rect *vclips,
@@ -1039,8 +1039,8 @@ int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
1039 struct vmw_fence_obj **out_fence, 1039 struct vmw_fence_obj **out_fence,
1040 struct drm_crtc *crtc) 1040 struct drm_crtc *crtc)
1041{ 1041{
1042 struct vmw_dma_buffer *buf = 1042 struct vmw_buffer_object *buf =
1043 container_of(framebuffer, struct vmw_framebuffer_dmabuf, 1043 container_of(framebuffer, struct vmw_framebuffer_bo,
1044 base)->buffer; 1044 base)->buffer;
1045 struct vmw_kms_dirty dirty; 1045 struct vmw_kms_dirty dirty;
1046 int ret; 1046 int ret;
@@ -1050,14 +1050,14 @@ int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
1050 if (ret) 1050 if (ret)
1051 return ret; 1051 return ret;
1052 1052
1053 ret = do_dmabuf_define_gmrfb(dev_priv, framebuffer); 1053 ret = do_bo_define_gmrfb(dev_priv, framebuffer);
1054 if (unlikely(ret != 0)) 1054 if (unlikely(ret != 0))
1055 goto out_revert; 1055 goto out_revert;
1056 1056
1057 dirty.crtc = crtc; 1057 dirty.crtc = crtc;
1058 dirty.fifo_commit = vmw_sou_dmabuf_fifo_commit; 1058 dirty.fifo_commit = vmw_sou_bo_fifo_commit;
1059 dirty.clip = vmw_sou_dmabuf_clip; 1059 dirty.clip = vmw_sou_bo_clip;
1060 dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_dmabuf_blit) * 1060 dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_bo_blit) *
1061 num_clips; 1061 num_clips;
1062 ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips, 1062 ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
1063 0, 0, num_clips, increment, &dirty); 1063 0, 0, num_clips, increment, &dirty);
@@ -1116,12 +1116,12 @@ static void vmw_sou_readback_clip(struct vmw_kms_dirty *dirty)
1116 1116
1117/** 1117/**
1118 * vmw_kms_sou_readback - Perform a readback from the screen object system to 1118 * vmw_kms_sou_readback - Perform a readback from the screen object system to
1119 * a dma-buffer backed framebuffer. 1119 * a buffer-object backed framebuffer.
1120 * 1120 *
1121 * @dev_priv: Pointer to the device private structure. 1121 * @dev_priv: Pointer to the device private structure.
1122 * @file_priv: Pointer to a struct drm_file identifying the caller. 1122 * @file_priv: Pointer to a struct drm_file identifying the caller.
1123 * Must be set to NULL if @user_fence_rep is NULL. 1123 * Must be set to NULL if @user_fence_rep is NULL.
1124 * @vfb: Pointer to the dma-buffer backed framebuffer. 1124 * @vfb: Pointer to the buffer-object backed framebuffer.
1125 * @user_fence_rep: User-space provided structure for fence information. 1125 * @user_fence_rep: User-space provided structure for fence information.
1126 * Must be set to non-NULL if @file_priv is non-NULL. 1126 * Must be set to non-NULL if @file_priv is non-NULL.
1127 * @vclips: Array of clip rects. 1127 * @vclips: Array of clip rects.
@@ -1139,8 +1139,8 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv,
1139 uint32_t num_clips, 1139 uint32_t num_clips,
1140 struct drm_crtc *crtc) 1140 struct drm_crtc *crtc)
1141{ 1141{
1142 struct vmw_dma_buffer *buf = 1142 struct vmw_buffer_object *buf =
1143 container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer; 1143 container_of(vfb, struct vmw_framebuffer_bo, base)->buffer;
1144 struct vmw_kms_dirty dirty; 1144 struct vmw_kms_dirty dirty;
1145 int ret; 1145 int ret;
1146 1146
@@ -1149,7 +1149,7 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv,
1149 if (ret) 1149 if (ret)
1150 return ret; 1150 return ret;
1151 1151
1152 ret = do_dmabuf_define_gmrfb(dev_priv, vfb); 1152 ret = do_bo_define_gmrfb(dev_priv, vfb);
1153 if (unlikely(ret != 0)) 1153 if (unlikely(ret != 0))
1154 goto out_revert; 1154 goto out_revert;
1155 1155
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 73b8e9a16368..f6c939f3ff5e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -159,7 +159,7 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv,
159 SVGA3dShaderType type, 159 SVGA3dShaderType type,
160 uint8_t num_input_sig, 160 uint8_t num_input_sig,
161 uint8_t num_output_sig, 161 uint8_t num_output_sig,
162 struct vmw_dma_buffer *byte_code, 162 struct vmw_buffer_object *byte_code,
163 void (*res_free) (struct vmw_resource *res)) 163 void (*res_free) (struct vmw_resource *res))
164{ 164{
165 struct vmw_shader *shader = vmw_res_to_shader(res); 165 struct vmw_shader *shader = vmw_res_to_shader(res);
@@ -178,7 +178,7 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv,
178 178
179 res->backup_size = size; 179 res->backup_size = size;
180 if (byte_code) { 180 if (byte_code) {
181 res->backup = vmw_dmabuf_reference(byte_code); 181 res->backup = vmw_bo_reference(byte_code);
182 res->backup_offset = offset; 182 res->backup_offset = offset;
183 } 183 }
184 shader->size = size; 184 shader->size = size;
@@ -723,7 +723,7 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
723} 723}
724 724
725static int vmw_user_shader_alloc(struct vmw_private *dev_priv, 725static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
726 struct vmw_dma_buffer *buffer, 726 struct vmw_buffer_object *buffer,
727 size_t shader_size, 727 size_t shader_size,
728 size_t offset, 728 size_t offset,
729 SVGA3dShaderType shader_type, 729 SVGA3dShaderType shader_type,
@@ -801,7 +801,7 @@ out:
801 801
802 802
803static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv, 803static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
804 struct vmw_dma_buffer *buffer, 804 struct vmw_buffer_object *buffer,
805 size_t shader_size, 805 size_t shader_size,
806 size_t offset, 806 size_t offset,
807 SVGA3dShaderType shader_type) 807 SVGA3dShaderType shader_type)
@@ -862,12 +862,12 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
862{ 862{
863 struct vmw_private *dev_priv = vmw_priv(dev); 863 struct vmw_private *dev_priv = vmw_priv(dev);
864 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 864 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
865 struct vmw_dma_buffer *buffer = NULL; 865 struct vmw_buffer_object *buffer = NULL;
866 SVGA3dShaderType shader_type; 866 SVGA3dShaderType shader_type;
867 int ret; 867 int ret;
868 868
869 if (buffer_handle != SVGA3D_INVALID_ID) { 869 if (buffer_handle != SVGA3D_INVALID_ID) {
870 ret = vmw_user_dmabuf_lookup(tfile, buffer_handle, 870 ret = vmw_user_bo_lookup(tfile, buffer_handle,
871 &buffer, NULL); 871 &buffer, NULL);
872 if (unlikely(ret != 0)) { 872 if (unlikely(ret != 0)) {
873 DRM_ERROR("Could not find buffer for shader " 873 DRM_ERROR("Could not find buffer for shader "
@@ -906,7 +906,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
906 906
907 ttm_read_unlock(&dev_priv->reservation_sem); 907 ttm_read_unlock(&dev_priv->reservation_sem);
908out_bad_arg: 908out_bad_arg:
909 vmw_dmabuf_unreference(&buffer); 909 vmw_bo_unreference(&buffer);
910 return ret; 910 return ret;
911} 911}
912 912
@@ -983,7 +983,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
983 struct list_head *list) 983 struct list_head *list)
984{ 984{
985 struct ttm_operation_ctx ctx = { false, true }; 985 struct ttm_operation_ctx ctx = { false, true };
986 struct vmw_dma_buffer *buf; 986 struct vmw_buffer_object *buf;
987 struct ttm_bo_kmap_obj map; 987 struct ttm_bo_kmap_obj map;
988 bool is_iomem; 988 bool is_iomem;
989 int ret; 989 int ret;
@@ -997,8 +997,8 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
997 if (unlikely(!buf)) 997 if (unlikely(!buf))
998 return -ENOMEM; 998 return -ENOMEM;
999 999
1000 ret = vmw_dmabuf_init(dev_priv, buf, size, &vmw_sys_ne_placement, 1000 ret = vmw_bo_init(dev_priv, buf, size, &vmw_sys_ne_placement,
1001 true, vmw_dmabuf_bo_free); 1001 true, vmw_bo_bo_free);
1002 if (unlikely(ret != 0)) 1002 if (unlikely(ret != 0))
1003 goto out; 1003 goto out;
1004 1004
@@ -1031,7 +1031,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
1031 res, list); 1031 res, list);
1032 vmw_resource_unreference(&res); 1032 vmw_resource_unreference(&res);
1033no_reserve: 1033no_reserve:
1034 vmw_dmabuf_unreference(&buf); 1034 vmw_bo_unreference(&buf);
1035out: 1035out:
1036 return ret; 1036 return ret;
1037} 1037}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 152e96cb1c01..537df9034008 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -44,7 +44,7 @@
44enum stdu_content_type { 44enum stdu_content_type {
45 SAME_AS_DISPLAY = 0, 45 SAME_AS_DISPLAY = 0,
46 SEPARATE_SURFACE, 46 SEPARATE_SURFACE,
47 SEPARATE_DMA 47 SEPARATE_BO
48}; 48};
49 49
50/** 50/**
@@ -58,7 +58,7 @@ enum stdu_content_type {
58 * @bottom: Bottom side of bounding box. 58 * @bottom: Bottom side of bounding box.
59 * @fb_left: Left side of the framebuffer/content bounding box 59 * @fb_left: Left side of the framebuffer/content bounding box
60 * @fb_top: Top of the framebuffer/content bounding box 60 * @fb_top: Top of the framebuffer/content bounding box
61 * @buf: DMA buffer when DMA-ing between buffer and screen targets. 61 * @buf: buffer object when DMA-ing between buffer and screen targets.
62 * @sid: Surface ID when copying between surface and screen targets. 62 * @sid: Surface ID when copying between surface and screen targets.
63 */ 63 */
64struct vmw_stdu_dirty { 64struct vmw_stdu_dirty {
@@ -68,7 +68,7 @@ struct vmw_stdu_dirty {
68 s32 fb_left, fb_top; 68 s32 fb_left, fb_top;
69 u32 pitch; 69 u32 pitch;
70 union { 70 union {
71 struct vmw_dma_buffer *buf; 71 struct vmw_buffer_object *buf;
72 u32 sid; 72 u32 sid;
73 }; 73 };
74}; 74};
@@ -508,14 +508,14 @@ static int vmw_stdu_crtc_page_flip(struct drm_crtc *crtc,
508 508
509 509
510/** 510/**
511 * vmw_stdu_dmabuf_clip - Callback to encode a suface DMA command cliprect 511 * vmw_stdu_bo_clip - Callback to encode a suface DMA command cliprect
512 * 512 *
513 * @dirty: The closure structure. 513 * @dirty: The closure structure.
514 * 514 *
515 * Encodes a surface DMA command cliprect and updates the bounding box 515 * Encodes a surface DMA command cliprect and updates the bounding box
516 * for the DMA. 516 * for the DMA.
517 */ 517 */
518static void vmw_stdu_dmabuf_clip(struct vmw_kms_dirty *dirty) 518static void vmw_stdu_bo_clip(struct vmw_kms_dirty *dirty)
519{ 519{
520 struct vmw_stdu_dirty *ddirty = 520 struct vmw_stdu_dirty *ddirty =
521 container_of(dirty, struct vmw_stdu_dirty, base); 521 container_of(dirty, struct vmw_stdu_dirty, base);
@@ -543,14 +543,14 @@ static void vmw_stdu_dmabuf_clip(struct vmw_kms_dirty *dirty)
543} 543}
544 544
545/** 545/**
546 * vmw_stdu_dmabuf_fifo_commit - Callback to fill in and submit a DMA command. 546 * vmw_stdu_bo_fifo_commit - Callback to fill in and submit a DMA command.
547 * 547 *
548 * @dirty: The closure structure. 548 * @dirty: The closure structure.
549 * 549 *
550 * Fills in the missing fields in a DMA command, and optionally encodes 550 * Fills in the missing fields in a DMA command, and optionally encodes
551 * a screen target update command, depending on transfer direction. 551 * a screen target update command, depending on transfer direction.
552 */ 552 */
553static void vmw_stdu_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty) 553static void vmw_stdu_bo_fifo_commit(struct vmw_kms_dirty *dirty)
554{ 554{
555 struct vmw_stdu_dirty *ddirty = 555 struct vmw_stdu_dirty *ddirty =
556 container_of(dirty, struct vmw_stdu_dirty, base); 556 container_of(dirty, struct vmw_stdu_dirty, base);
@@ -594,13 +594,13 @@ static void vmw_stdu_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
594 594
595 595
596/** 596/**
597 * vmw_stdu_dmabuf_cpu_clip - Callback to encode a CPU blit 597 * vmw_stdu_bo_cpu_clip - Callback to encode a CPU blit
598 * 598 *
599 * @dirty: The closure structure. 599 * @dirty: The closure structure.
600 * 600 *
601 * This function calculates the bounding box for all the incoming clips. 601 * This function calculates the bounding box for all the incoming clips.
602 */ 602 */
603static void vmw_stdu_dmabuf_cpu_clip(struct vmw_kms_dirty *dirty) 603static void vmw_stdu_bo_cpu_clip(struct vmw_kms_dirty *dirty)
604{ 604{
605 struct vmw_stdu_dirty *ddirty = 605 struct vmw_stdu_dirty *ddirty =
606 container_of(dirty, struct vmw_stdu_dirty, base); 606 container_of(dirty, struct vmw_stdu_dirty, base);
@@ -624,14 +624,14 @@ static void vmw_stdu_dmabuf_cpu_clip(struct vmw_kms_dirty *dirty)
624 624
625 625
626/** 626/**
627 * vmw_stdu_dmabuf_cpu_commit - Callback to do a CPU blit from DMAbuf 627 * vmw_stdu_bo_cpu_commit - Callback to do a CPU blit from buffer object
628 * 628 *
629 * @dirty: The closure structure. 629 * @dirty: The closure structure.
630 * 630 *
631 * For the special case when we cannot create a proxy surface in a 631 * For the special case when we cannot create a proxy surface in a
632 * 2D VM, we have to do a CPU blit ourselves. 632 * 2D VM, we have to do a CPU blit ourselves.
633 */ 633 */
634static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty) 634static void vmw_stdu_bo_cpu_commit(struct vmw_kms_dirty *dirty)
635{ 635{
636 struct vmw_stdu_dirty *ddirty = 636 struct vmw_stdu_dirty *ddirty =
637 container_of(dirty, struct vmw_stdu_dirty, base); 637 container_of(dirty, struct vmw_stdu_dirty, base);
@@ -652,7 +652,7 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
652 if (width == 0 || height == 0) 652 if (width == 0 || height == 0)
653 return; 653 return;
654 654
655 /* Assume we are blitting from Guest (dmabuf) to Host (display_srf) */ 655 /* Assume we are blitting from Guest (bo) to Host (display_srf) */
656 dst_pitch = stdu->display_srf->base_size.width * stdu->cpp; 656 dst_pitch = stdu->display_srf->base_size.width * stdu->cpp;
657 dst_bo = &stdu->display_srf->res.backup->base; 657 dst_bo = &stdu->display_srf->res.backup->base;
658 dst_offset = ddirty->top * dst_pitch + ddirty->left * stdu->cpp; 658 dst_offset = ddirty->top * dst_pitch + ddirty->left * stdu->cpp;
@@ -712,13 +712,13 @@ out_cleanup:
712} 712}
713 713
714/** 714/**
715 * vmw_kms_stdu_dma - Perform a DMA transfer between a dma-buffer backed 715 * vmw_kms_stdu_dma - Perform a DMA transfer between a buffer-object backed
716 * framebuffer and the screen target system. 716 * framebuffer and the screen target system.
717 * 717 *
718 * @dev_priv: Pointer to the device private structure. 718 * @dev_priv: Pointer to the device private structure.
719 * @file_priv: Pointer to a struct drm-file identifying the caller. May be 719 * @file_priv: Pointer to a struct drm-file identifying the caller. May be
720 * set to NULL, but then @user_fence_rep must also be set to NULL. 720 * set to NULL, but then @user_fence_rep must also be set to NULL.
721 * @vfb: Pointer to the dma-buffer backed framebuffer. 721 * @vfb: Pointer to the buffer-object backed framebuffer.
722 * @clips: Array of clip rects. Either @clips or @vclips must be NULL. 722 * @clips: Array of clip rects. Either @clips or @vclips must be NULL.
723 * @vclips: Alternate array of clip rects. Either @clips or @vclips must 723 * @vclips: Alternate array of clip rects. Either @clips or @vclips must
724 * be NULL. 724 * be NULL.
@@ -747,8 +747,8 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
747 bool interruptible, 747 bool interruptible,
748 struct drm_crtc *crtc) 748 struct drm_crtc *crtc)
749{ 749{
750 struct vmw_dma_buffer *buf = 750 struct vmw_buffer_object *buf =
751 container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer; 751 container_of(vfb, struct vmw_framebuffer_bo, base)->buffer;
752 struct vmw_stdu_dirty ddirty; 752 struct vmw_stdu_dirty ddirty;
753 int ret; 753 int ret;
754 bool cpu_blit = !(dev_priv->capabilities & SVGA_CAP_3D); 754 bool cpu_blit = !(dev_priv->capabilities & SVGA_CAP_3D);
@@ -770,8 +770,8 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
770 ddirty.fb_left = ddirty.fb_top = S32_MAX; 770 ddirty.fb_left = ddirty.fb_top = S32_MAX;
771 ddirty.pitch = vfb->base.pitches[0]; 771 ddirty.pitch = vfb->base.pitches[0];
772 ddirty.buf = buf; 772 ddirty.buf = buf;
773 ddirty.base.fifo_commit = vmw_stdu_dmabuf_fifo_commit; 773 ddirty.base.fifo_commit = vmw_stdu_bo_fifo_commit;
774 ddirty.base.clip = vmw_stdu_dmabuf_clip; 774 ddirty.base.clip = vmw_stdu_bo_clip;
775 ddirty.base.fifo_reserve_size = sizeof(struct vmw_stdu_dma) + 775 ddirty.base.fifo_reserve_size = sizeof(struct vmw_stdu_dma) +
776 num_clips * sizeof(SVGA3dCopyBox) + 776 num_clips * sizeof(SVGA3dCopyBox) +
777 sizeof(SVGA3dCmdSurfaceDMASuffix); 777 sizeof(SVGA3dCmdSurfaceDMASuffix);
@@ -780,8 +780,8 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
780 780
781 781
782 if (cpu_blit) { 782 if (cpu_blit) {
783 ddirty.base.fifo_commit = vmw_stdu_dmabuf_cpu_commit; 783 ddirty.base.fifo_commit = vmw_stdu_bo_cpu_commit;
784 ddirty.base.clip = vmw_stdu_dmabuf_cpu_clip; 784 ddirty.base.clip = vmw_stdu_bo_cpu_clip;
785 ddirty.base.fifo_reserve_size = 0; 785 ddirty.base.fifo_reserve_size = 0;
786 } 786 }
787 787
@@ -927,7 +927,7 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
927 if (ret) 927 if (ret)
928 return ret; 928 return ret;
929 929
930 if (vfbs->is_dmabuf_proxy) { 930 if (vfbs->is_bo_proxy) {
931 ret = vmw_kms_update_proxy(srf, clips, num_clips, inc); 931 ret = vmw_kms_update_proxy(srf, clips, num_clips, inc);
932 if (ret) 932 if (ret)
933 goto out_finish; 933 goto out_finish;
@@ -1075,7 +1075,7 @@ vmw_stdu_primary_plane_cleanup_fb(struct drm_plane *plane,
1075 * @new_state: info on the new plane state, including the FB 1075 * @new_state: info on the new plane state, including the FB
1076 * 1076 *
1077 * This function allocates a new display surface if the content is 1077 * This function allocates a new display surface if the content is
1078 * backed by a DMA. The display surface is pinned here, and it'll 1078 * backed by a buffer object. The display surface is pinned here, and it'll
1079 * be unpinned in .cleanup_fb() 1079 * be unpinned in .cleanup_fb()
1080 * 1080 *
1081 * Returns 0 on success 1081 * Returns 0 on success
@@ -1105,13 +1105,13 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
1105 } 1105 }
1106 1106
1107 vfb = vmw_framebuffer_to_vfb(new_fb); 1107 vfb = vmw_framebuffer_to_vfb(new_fb);
1108 new_vfbs = (vfb->dmabuf) ? NULL : vmw_framebuffer_to_vfbs(new_fb); 1108 new_vfbs = (vfb->bo) ? NULL : vmw_framebuffer_to_vfbs(new_fb);
1109 1109
1110 if (new_vfbs && new_vfbs->surface->base_size.width == hdisplay && 1110 if (new_vfbs && new_vfbs->surface->base_size.width == hdisplay &&
1111 new_vfbs->surface->base_size.height == vdisplay) 1111 new_vfbs->surface->base_size.height == vdisplay)
1112 new_content_type = SAME_AS_DISPLAY; 1112 new_content_type = SAME_AS_DISPLAY;
1113 else if (vfb->dmabuf) 1113 else if (vfb->bo)
1114 new_content_type = SEPARATE_DMA; 1114 new_content_type = SEPARATE_BO;
1115 else 1115 else
1116 new_content_type = SEPARATE_SURFACE; 1116 new_content_type = SEPARATE_SURFACE;
1117 1117
@@ -1124,10 +1124,10 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
1124 display_base_size.depth = 1; 1124 display_base_size.depth = 1;
1125 1125
1126 /* 1126 /*
1127 * If content buffer is a DMA buf, then we have to construct 1127 * If content buffer is a buffer object, then we have to
1128 * surface info 1128 * construct surface info
1129 */ 1129 */
1130 if (new_content_type == SEPARATE_DMA) { 1130 if (new_content_type == SEPARATE_BO) {
1131 1131
1132 switch (new_fb->format->cpp[0]*8) { 1132 switch (new_fb->format->cpp[0]*8) {
1133 case 32: 1133 case 32:
@@ -1212,12 +1212,12 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
1212 vps->content_fb_type = new_content_type; 1212 vps->content_fb_type = new_content_type;
1213 1213
1214 /* 1214 /*
1215 * This should only happen if the DMA buf is too large to create a 1215 * This should only happen if the buffer object is too large to create a
1216 * proxy surface for. 1216 * proxy surface for.
1217 * If we are a 2D VM with a DMA buffer then we have to use CPU blit 1217 * If we are a 2D VM with a buffer object then we have to use CPU blit
1218 * so cache these mappings 1218 * so cache these mappings
1219 */ 1219 */
1220 if (vps->content_fb_type == SEPARATE_DMA && 1220 if (vps->content_fb_type == SEPARATE_BO &&
1221 !(dev_priv->capabilities & SVGA_CAP_3D)) 1221 !(dev_priv->capabilities & SVGA_CAP_3D))
1222 vps->cpp = new_fb->pitches[0] / new_fb->width; 1222 vps->cpp = new_fb->pitches[0] / new_fb->width;
1223 1223
@@ -1276,7 +1276,7 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
1276 if (ret) 1276 if (ret)
1277 DRM_ERROR("Failed to bind surface to STDU.\n"); 1277 DRM_ERROR("Failed to bind surface to STDU.\n");
1278 1278
1279 if (vfb->dmabuf) 1279 if (vfb->bo)
1280 ret = vmw_kms_stdu_dma(dev_priv, NULL, vfb, NULL, NULL, 1280 ret = vmw_kms_stdu_dma(dev_priv, NULL, vfb, NULL, NULL,
1281 &vclips, 1, 1, true, false, 1281 &vclips, 1, 1, true, false,
1282 crtc); 1282 crtc);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index b236c48bf265..2b2e8aa7114a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -842,12 +842,12 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
842 if (dev_priv->has_mob && req->shareable) { 842 if (dev_priv->has_mob && req->shareable) {
843 uint32_t backup_handle; 843 uint32_t backup_handle;
844 844
845 ret = vmw_user_dmabuf_alloc(dev_priv, tfile, 845 ret = vmw_user_bo_alloc(dev_priv, tfile,
846 res->backup_size, 846 res->backup_size,
847 true, 847 true,
848 &backup_handle, 848 &backup_handle,
849 &res->backup, 849 &res->backup,
850 &user_srf->backup_base); 850 &user_srf->backup_base);
851 if (unlikely(ret != 0)) { 851 if (unlikely(ret != 0)) {
852 vmw_resource_unreference(&res); 852 vmw_resource_unreference(&res);
853 goto out_unlock; 853 goto out_unlock;
@@ -1317,14 +1317,14 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1317 1317
1318 1318
1319 if (req->buffer_handle != SVGA3D_INVALID_ID) { 1319 if (req->buffer_handle != SVGA3D_INVALID_ID) {
1320 ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle, 1320 ret = vmw_user_bo_lookup(tfile, req->buffer_handle,
1321 &res->backup, 1321 &res->backup,
1322 &user_srf->backup_base); 1322 &user_srf->backup_base);
1323 if (ret == 0) { 1323 if (ret == 0) {
1324 if (res->backup->base.num_pages * PAGE_SIZE < 1324 if (res->backup->base.num_pages * PAGE_SIZE <
1325 res->backup_size) { 1325 res->backup_size) {
1326 DRM_ERROR("Surface backup buffer is too small.\n"); 1326 DRM_ERROR("Surface backup buffer is too small.\n");
1327 vmw_dmabuf_unreference(&res->backup); 1327 vmw_bo_unreference(&res->backup);
1328 ret = -EINVAL; 1328 ret = -EINVAL;
1329 goto out_unlock; 1329 goto out_unlock;
1330 } else { 1330 } else {
@@ -1332,13 +1332,13 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1332 } 1332 }
1333 } 1333 }
1334 } else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer) 1334 } else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer)
1335 ret = vmw_user_dmabuf_alloc(dev_priv, tfile, 1335 ret = vmw_user_bo_alloc(dev_priv, tfile,
1336 res->backup_size, 1336 res->backup_size,
1337 req->drm_surface_flags & 1337 req->drm_surface_flags &
1338 drm_vmw_surface_flag_shareable, 1338 drm_vmw_surface_flag_shareable,
1339 &backup_handle, 1339 &backup_handle,
1340 &res->backup, 1340 &res->backup,
1341 &user_srf->backup_base); 1341 &user_srf->backup_base);
1342 1342
1343 if (unlikely(ret != 0)) { 1343 if (unlikely(ret != 0)) {
1344 vmw_resource_unreference(&res); 1344 vmw_resource_unreference(&res);
@@ -1414,8 +1414,7 @@ int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
1414 } 1414 }
1415 1415
1416 mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */ 1416 mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
1417 ret = vmw_user_dmabuf_reference(tfile, srf->res.backup, 1417 ret = vmw_user_bo_reference(tfile, srf->res.backup, &backup_handle);
1418 &backup_handle);
1419 mutex_unlock(&dev_priv->cmdbuf_mutex); 1418 mutex_unlock(&dev_priv->cmdbuf_mutex);
1420 1419
1421 if (unlikely(ret != 0)) { 1420 if (unlikely(ret != 0)) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index 21111fd091f9..0931f43913b1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -798,7 +798,7 @@ static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
798 struct ttm_object_file *tfile = 798 struct ttm_object_file *tfile =
799 vmw_fpriv((struct drm_file *)filp->private_data)->tfile; 799 vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
800 800
801 return vmw_user_dmabuf_verify_access(bo, tfile); 801 return vmw_user_bo_verify_access(bo, tfile);
802} 802}
803 803
804static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 804static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h
index 0bc784f5e0db..57115a5fe61a 100644
--- a/include/uapi/drm/vmwgfx_drm.h
+++ b/include/uapi/drm/vmwgfx_drm.h
@@ -40,6 +40,7 @@ extern "C" {
40 40
41#define DRM_VMW_GET_PARAM 0 41#define DRM_VMW_GET_PARAM 0
42#define DRM_VMW_ALLOC_DMABUF 1 42#define DRM_VMW_ALLOC_DMABUF 1
43#define DRM_VMW_ALLOC_BO 1
43#define DRM_VMW_UNREF_DMABUF 2 44#define DRM_VMW_UNREF_DMABUF 2
44#define DRM_VMW_HANDLE_CLOSE 2 45#define DRM_VMW_HANDLE_CLOSE 2
45#define DRM_VMW_CURSOR_BYPASS 3 46#define DRM_VMW_CURSOR_BYPASS 3
@@ -356,9 +357,9 @@ struct drm_vmw_fence_rep {
356 357
357/*************************************************************************/ 358/*************************************************************************/
358/** 359/**
359 * DRM_VMW_ALLOC_DMABUF 360 * DRM_VMW_ALLOC_BO
360 * 361 *
361 * Allocate a DMA buffer that is visible also to the host. 362 * Allocate a buffer object that is visible also to the host.
362 * NOTE: The buffer is 363 * NOTE: The buffer is
363 * identified by a handle and an offset, which are private to the guest, but 364 * identified by a handle and an offset, which are private to the guest, but
364 * useable in the command stream. The guest kernel may translate these 365 * useable in the command stream. The guest kernel may translate these
@@ -366,27 +367,28 @@ struct drm_vmw_fence_rep {
366 * be zero at all times, or it may disappear from the interface before it is 367 * be zero at all times, or it may disappear from the interface before it is
367 * fixed. 368 * fixed.
368 * 369 *
369 * The DMA buffer may stay user-space mapped in the guest at all times, 370 * The buffer object may stay user-space mapped in the guest at all times,
370 * and is thus suitable for sub-allocation. 371 * and is thus suitable for sub-allocation.
371 * 372 *
372 * DMA buffers are mapped using the mmap() syscall on the drm device. 373 * Buffer objects are mapped using the mmap() syscall on the drm device.
373 */ 374 */
374 375
375/** 376/**
376 * struct drm_vmw_alloc_dmabuf_req 377 * struct drm_vmw_alloc_bo_req
377 * 378 *
378 * @size: Required minimum size of the buffer. 379 * @size: Required minimum size of the buffer.
379 * 380 *
380 * Input data to the DRM_VMW_ALLOC_DMABUF Ioctl. 381 * Input data to the DRM_VMW_ALLOC_BO Ioctl.
381 */ 382 */
382 383
383struct drm_vmw_alloc_dmabuf_req { 384struct drm_vmw_alloc_bo_req {
384 __u32 size; 385 __u32 size;
385 __u32 pad64; 386 __u32 pad64;
386}; 387};
388#define drm_vmw_alloc_dmabuf_req drm_vmw_alloc_bo_req
387 389
388/** 390/**
389 * struct drm_vmw_dmabuf_rep 391 * struct drm_vmw_bo_rep
390 * 392 *
391 * @map_handle: Offset to use in the mmap() call used to map the buffer. 393 * @map_handle: Offset to use in the mmap() call used to map the buffer.
392 * @handle: Handle unique to this buffer. Used for unreferencing. 394 * @handle: Handle unique to this buffer. Used for unreferencing.
@@ -395,50 +397,32 @@ struct drm_vmw_alloc_dmabuf_req {
395 * @cur_gmr_offset: Offset to use in the command stream when this buffer is 397 * @cur_gmr_offset: Offset to use in the command stream when this buffer is
396 * referenced. See note above. 398 * referenced. See note above.
397 * 399 *
398 * Output data from the DRM_VMW_ALLOC_DMABUF Ioctl. 400 * Output data from the DRM_VMW_ALLOC_BO Ioctl.
399 */ 401 */
400 402
401struct drm_vmw_dmabuf_rep { 403struct drm_vmw_bo_rep {
402 __u64 map_handle; 404 __u64 map_handle;
403 __u32 handle; 405 __u32 handle;
404 __u32 cur_gmr_id; 406 __u32 cur_gmr_id;
405 __u32 cur_gmr_offset; 407 __u32 cur_gmr_offset;
406 __u32 pad64; 408 __u32 pad64;
407}; 409};
410#define drm_vmw_dmabuf_rep drm_vmw_bo_rep
408 411
409/** 412/**
410 * union drm_vmw_dmabuf_arg 413 * union drm_vmw_alloc_bo_arg
411 * 414 *
412 * @req: Input data as described above. 415 * @req: Input data as described above.
413 * @rep: Output data as described above. 416 * @rep: Output data as described above.
414 * 417 *
415 * Argument to the DRM_VMW_ALLOC_DMABUF Ioctl. 418 * Argument to the DRM_VMW_ALLOC_BO Ioctl.
416 */ 419 */
417 420
418union drm_vmw_alloc_dmabuf_arg { 421union drm_vmw_alloc_bo_arg {
419 struct drm_vmw_alloc_dmabuf_req req; 422 struct drm_vmw_alloc_bo_req req;
420 struct drm_vmw_dmabuf_rep rep; 423 struct drm_vmw_bo_rep rep;
421};
422
423/*************************************************************************/
424/**
425 * DRM_VMW_UNREF_DMABUF - Free a DMA buffer.
426 *
427 */
428
429/**
430 * struct drm_vmw_unref_dmabuf_arg
431 *
432 * @handle: Handle indicating what buffer to free. Obtained from the
433 * DRM_VMW_ALLOC_DMABUF Ioctl.
434 *
435 * Argument to the DRM_VMW_UNREF_DMABUF Ioctl.
436 */
437
438struct drm_vmw_unref_dmabuf_arg {
439 __u32 handle;
440 __u32 pad64;
441}; 424};
425#define drm_vmw_alloc_dmabuf_arg drm_vmw_alloc_bo_arg
442 426
443/*************************************************************************/ 427/*************************************************************************/
444/** 428/**
@@ -1103,9 +1087,8 @@ union drm_vmw_extended_context_arg {
1103 * DRM_VMW_HANDLE_CLOSE - Close a user-space handle and release its 1087 * DRM_VMW_HANDLE_CLOSE - Close a user-space handle and release its
1104 * underlying resource. 1088 * underlying resource.
1105 * 1089 *
1106 * Note that this ioctl is overlaid on the DRM_VMW_UNREF_DMABUF Ioctl. 1090 * Note that this ioctl is overlaid on the deprecated DRM_VMW_UNREF_DMABUF
1107 * The ioctl arguments therefore need to be identical in layout. 1091 * Ioctl.
1108 *
1109 */ 1092 */
1110 1093
1111/** 1094/**
@@ -1119,7 +1102,7 @@ struct drm_vmw_handle_close_arg {
1119 __u32 handle; 1102 __u32 handle;
1120 __u32 pad64; 1103 __u32 pad64;
1121}; 1104};
1122 1105#define drm_vmw_unref_dmabuf_arg drm_vmw_handle_close_arg
1123 1106
1124#if defined(__cplusplus) 1107#if defined(__cplusplus)
1125} 1108}