aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2018-06-19 09:02:16 -0400
committerThomas Hellstrom <thellstrom@vmware.com>2018-07-03 14:33:30 -0400
commitf1d34bfd70b1b4543a139ea28bad4c001c5f413d (patch)
tree0d3fb3ee166a2d81f4f7e7e2338dd3c625929554 /drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
parent07c13bb78c8b8a9cb6ee169659528945038d5e85 (diff)
drm/vmwgfx: Replace vmw_dma_buffer with vmw_buffer_object
Initially vmware buffer objects were only used as DMA buffers, so the name DMA buffer was a natural one. However, currently they are used also as dumb buffers and MOBs backing guest backed objects so renaming them to buffer objects is logical. Particularly since there is a dmabuf subsystem in the kernel where a dma buffer means something completely different. This also renames user-space api structures and IOCTL names correspondingly, but the old names remain defined for now and the ABI hasn't changed. There are a couple of minor style changes to make checkpatch happy. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Sinclair Yeh <syeh@vmware.com> Reviewed-by: Deepak Rawat <drawat@vmware.com>
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_resource.c')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c280
1 files changed, 141 insertions, 139 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 6b3a942b18df..5aaf9ac65cba 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -35,9 +35,9 @@
35 35
36#define VMW_RES_EVICT_ERR_COUNT 10 36#define VMW_RES_EVICT_ERR_COUNT 10
37 37
38struct vmw_user_dma_buffer { 38struct vmw_user_buffer_object {
39 struct ttm_prime_object prime; 39 struct ttm_prime_object prime;
40 struct vmw_dma_buffer dma; 40 struct vmw_buffer_object vbo;
41}; 41};
42 42
43struct vmw_bo_user_rep { 43struct vmw_bo_user_rep {
@@ -45,17 +45,18 @@ struct vmw_bo_user_rep {
45 uint64_t map_handle; 45 uint64_t map_handle;
46}; 46};
47 47
48static inline struct vmw_dma_buffer * 48static inline struct vmw_buffer_object *
49vmw_dma_buffer(struct ttm_buffer_object *bo) 49vmw_buffer_object(struct ttm_buffer_object *bo)
50{ 50{
51 return container_of(bo, struct vmw_dma_buffer, base); 51 return container_of(bo, struct vmw_buffer_object, base);
52} 52}
53 53
54static inline struct vmw_user_dma_buffer * 54static inline struct vmw_user_buffer_object *
55vmw_user_dma_buffer(struct ttm_buffer_object *bo) 55vmw_user_buffer_object(struct ttm_buffer_object *bo)
56{ 56{
57 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); 57 struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
58 return container_of(vmw_bo, struct vmw_user_dma_buffer, dma); 58
59 return container_of(vmw_bo, struct vmw_user_buffer_object, vbo);
59} 60}
60 61
61struct vmw_resource *vmw_resource_reference(struct vmw_resource *res) 62struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
@@ -116,7 +117,7 @@ static void vmw_resource_release(struct kref *kref)
116 res->backup_dirty = false; 117 res->backup_dirty = false;
117 list_del_init(&res->mob_head); 118 list_del_init(&res->mob_head);
118 ttm_bo_unreserve(bo); 119 ttm_bo_unreserve(bo);
119 vmw_dmabuf_unreference(&res->backup); 120 vmw_bo_unreference(&res->backup);
120 } 121 }
121 122
122 if (likely(res->hw_destroy != NULL)) { 123 if (likely(res->hw_destroy != NULL)) {
@@ -287,7 +288,7 @@ out_bad_resource:
287} 288}
288 289
289/** 290/**
290 * Helper function that looks either a surface or dmabuf. 291 * Helper function that looks either a surface or bo.
291 * 292 *
292 * The pointer this pointed at by out_surf and out_buf needs to be null. 293 * The pointer this pointed at by out_surf and out_buf needs to be null.
293 */ 294 */
@@ -295,7 +296,7 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
295 struct ttm_object_file *tfile, 296 struct ttm_object_file *tfile,
296 uint32_t handle, 297 uint32_t handle,
297 struct vmw_surface **out_surf, 298 struct vmw_surface **out_surf,
298 struct vmw_dma_buffer **out_buf) 299 struct vmw_buffer_object **out_buf)
299{ 300{
300 struct vmw_resource *res; 301 struct vmw_resource *res;
301 int ret; 302 int ret;
@@ -311,7 +312,7 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
311 } 312 }
312 313
313 *out_surf = NULL; 314 *out_surf = NULL;
314 ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf, NULL); 315 ret = vmw_user_bo_lookup(tfile, handle, out_buf, NULL);
315 return ret; 316 return ret;
316} 317}
317 318
@@ -320,14 +321,14 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
320 */ 321 */
321 322
322/** 323/**
323 * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers 324 * vmw_bo_acc_size - Calculate the pinned memory usage of buffers
324 * 325 *
325 * @dev_priv: Pointer to a struct vmw_private identifying the device. 326 * @dev_priv: Pointer to a struct vmw_private identifying the device.
326 * @size: The requested buffer size. 327 * @size: The requested buffer size.
327 * @user: Whether this is an ordinary dma buffer or a user dma buffer. 328 * @user: Whether this is an ordinary dma buffer or a user dma buffer.
328 */ 329 */
329static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size, 330static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
330 bool user) 331 bool user)
331{ 332{
332 static size_t struct_size, user_struct_size; 333 static size_t struct_size, user_struct_size;
333 size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 334 size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
@@ -337,9 +338,9 @@ static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
337 size_t backend_size = ttm_round_pot(vmw_tt_size); 338 size_t backend_size = ttm_round_pot(vmw_tt_size);
338 339
339 struct_size = backend_size + 340 struct_size = backend_size +
340 ttm_round_pot(sizeof(struct vmw_dma_buffer)); 341 ttm_round_pot(sizeof(struct vmw_buffer_object));
341 user_struct_size = backend_size + 342 user_struct_size = backend_size +
342 ttm_round_pot(sizeof(struct vmw_user_dma_buffer)); 343 ttm_round_pot(sizeof(struct vmw_user_buffer_object));
343 } 344 }
344 345
345 if (dev_priv->map_mode == vmw_dma_alloc_coherent) 346 if (dev_priv->map_mode == vmw_dma_alloc_coherent)
@@ -350,36 +351,36 @@ static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
350 page_array_size; 351 page_array_size;
351} 352}
352 353
353void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) 354void vmw_bo_bo_free(struct ttm_buffer_object *bo)
354{ 355{
355 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); 356 struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
356 357
357 vmw_dma_buffer_unmap(vmw_bo); 358 vmw_buffer_object_unmap(vmw_bo);
358 kfree(vmw_bo); 359 kfree(vmw_bo);
359} 360}
360 361
361static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) 362static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
362{ 363{
363 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); 364 struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo);
364 365
365 vmw_dma_buffer_unmap(&vmw_user_bo->dma); 366 vmw_buffer_object_unmap(&vmw_user_bo->vbo);
366 ttm_prime_object_kfree(vmw_user_bo, prime); 367 ttm_prime_object_kfree(vmw_user_bo, prime);
367} 368}
368 369
369int vmw_dmabuf_init(struct vmw_private *dev_priv, 370int vmw_bo_init(struct vmw_private *dev_priv,
370 struct vmw_dma_buffer *vmw_bo, 371 struct vmw_buffer_object *vmw_bo,
371 size_t size, struct ttm_placement *placement, 372 size_t size, struct ttm_placement *placement,
372 bool interruptible, 373 bool interruptible,
373 void (*bo_free) (struct ttm_buffer_object *bo)) 374 void (*bo_free)(struct ttm_buffer_object *bo))
374{ 375{
375 struct ttm_bo_device *bdev = &dev_priv->bdev; 376 struct ttm_bo_device *bdev = &dev_priv->bdev;
376 size_t acc_size; 377 size_t acc_size;
377 int ret; 378 int ret;
378 bool user = (bo_free == &vmw_user_dmabuf_destroy); 379 bool user = (bo_free == &vmw_user_bo_destroy);
379 380
380 BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free))); 381 WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free)));
381 382
382 acc_size = vmw_dmabuf_acc_size(dev_priv, size, user); 383 acc_size = vmw_bo_acc_size(dev_priv, size, user);
383 memset(vmw_bo, 0, sizeof(*vmw_bo)); 384 memset(vmw_bo, 0, sizeof(*vmw_bo));
384 385
385 INIT_LIST_HEAD(&vmw_bo->res_list); 386 INIT_LIST_HEAD(&vmw_bo->res_list);
@@ -391,9 +392,9 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
391 return ret; 392 return ret;
392} 393}
393 394
394static void vmw_user_dmabuf_release(struct ttm_base_object **p_base) 395static void vmw_user_bo_release(struct ttm_base_object **p_base)
395{ 396{
396 struct vmw_user_dma_buffer *vmw_user_bo; 397 struct vmw_user_buffer_object *vmw_user_bo;
397 struct ttm_base_object *base = *p_base; 398 struct ttm_base_object *base = *p_base;
398 struct ttm_buffer_object *bo; 399 struct ttm_buffer_object *bo;
399 400
@@ -402,21 +403,22 @@ static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
402 if (unlikely(base == NULL)) 403 if (unlikely(base == NULL))
403 return; 404 return;
404 405
405 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, 406 vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
406 prime.base); 407 prime.base);
407 bo = &vmw_user_bo->dma.base; 408 bo = &vmw_user_bo->vbo.base;
408 ttm_bo_unref(&bo); 409 ttm_bo_unref(&bo);
409} 410}
410 411
411static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base, 412static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
412 enum ttm_ref_type ref_type) 413 enum ttm_ref_type ref_type)
413{ 414{
414 struct vmw_user_dma_buffer *user_bo; 415 struct vmw_user_buffer_object *user_bo;
415 user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base); 416
417 user_bo = container_of(base, struct vmw_user_buffer_object, prime.base);
416 418
417 switch (ref_type) { 419 switch (ref_type) {
418 case TTM_REF_SYNCCPU_WRITE: 420 case TTM_REF_SYNCCPU_WRITE:
419 ttm_bo_synccpu_write_release(&user_bo->dma.base); 421 ttm_bo_synccpu_write_release(&user_bo->vbo.base);
420 break; 422 break;
421 default: 423 default:
422 BUG(); 424 BUG();
@@ -424,7 +426,7 @@ static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
424} 426}
425 427
426/** 428/**
427 * vmw_user_dmabuf_alloc - Allocate a user dma buffer 429 * vmw_user_bo_alloc - Allocate a user dma buffer
428 * 430 *
429 * @dev_priv: Pointer to a struct device private. 431 * @dev_priv: Pointer to a struct device private.
430 * @tfile: Pointer to a struct ttm_object_file on which to register the user 432 * @tfile: Pointer to a struct ttm_object_file on which to register the user
@@ -432,18 +434,18 @@ static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
432 * @size: Size of the dma buffer. 434 * @size: Size of the dma buffer.
433 * @shareable: Boolean whether the buffer is shareable with other open files. 435 * @shareable: Boolean whether the buffer is shareable with other open files.
434 * @handle: Pointer to where the handle value should be assigned. 436 * @handle: Pointer to where the handle value should be assigned.
435 * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer 437 * @p_vbo: Pointer to where the refcounted struct vmw_buffer_object pointer
436 * should be assigned. 438 * should be assigned.
437 */ 439 */
438int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, 440int vmw_user_bo_alloc(struct vmw_private *dev_priv,
439 struct ttm_object_file *tfile, 441 struct ttm_object_file *tfile,
440 uint32_t size, 442 uint32_t size,
441 bool shareable, 443 bool shareable,
442 uint32_t *handle, 444 uint32_t *handle,
443 struct vmw_dma_buffer **p_dma_buf, 445 struct vmw_buffer_object **p_vbo,
444 struct ttm_base_object **p_base) 446 struct ttm_base_object **p_base)
445{ 447{
446 struct vmw_user_dma_buffer *user_bo; 448 struct vmw_user_buffer_object *user_bo;
447 struct ttm_buffer_object *tmp; 449 struct ttm_buffer_object *tmp;
448 int ret; 450 int ret;
449 451
@@ -453,28 +455,28 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
453 return -ENOMEM; 455 return -ENOMEM;
454 } 456 }
455 457
456 ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size, 458 ret = vmw_bo_init(dev_priv, &user_bo->vbo, size,
457 (dev_priv->has_mob) ? 459 (dev_priv->has_mob) ?
458 &vmw_sys_placement : 460 &vmw_sys_placement :
459 &vmw_vram_sys_placement, true, 461 &vmw_vram_sys_placement, true,
460 &vmw_user_dmabuf_destroy); 462 &vmw_user_bo_destroy);
461 if (unlikely(ret != 0)) 463 if (unlikely(ret != 0))
462 return ret; 464 return ret;
463 465
464 tmp = ttm_bo_reference(&user_bo->dma.base); 466 tmp = ttm_bo_reference(&user_bo->vbo.base);
465 ret = ttm_prime_object_init(tfile, 467 ret = ttm_prime_object_init(tfile,
466 size, 468 size,
467 &user_bo->prime, 469 &user_bo->prime,
468 shareable, 470 shareable,
469 ttm_buffer_type, 471 ttm_buffer_type,
470 &vmw_user_dmabuf_release, 472 &vmw_user_bo_release,
471 &vmw_user_dmabuf_ref_obj_release); 473 &vmw_user_bo_ref_obj_release);
472 if (unlikely(ret != 0)) { 474 if (unlikely(ret != 0)) {
473 ttm_bo_unref(&tmp); 475 ttm_bo_unref(&tmp);
474 goto out_no_base_object; 476 goto out_no_base_object;
475 } 477 }
476 478
477 *p_dma_buf = &user_bo->dma; 479 *p_vbo = &user_bo->vbo;
478 if (p_base) { 480 if (p_base) {
479 *p_base = &user_bo->prime.base; 481 *p_base = &user_bo->prime.base;
480 kref_get(&(*p_base)->refcount); 482 kref_get(&(*p_base)->refcount);
@@ -486,21 +488,21 @@ out_no_base_object:
486} 488}
487 489
488/** 490/**
489 * vmw_user_dmabuf_verify_access - verify access permissions on this 491 * vmw_user_bo_verify_access - verify access permissions on this
490 * buffer object. 492 * buffer object.
491 * 493 *
492 * @bo: Pointer to the buffer object being accessed 494 * @bo: Pointer to the buffer object being accessed
493 * @tfile: Identifying the caller. 495 * @tfile: Identifying the caller.
494 */ 496 */
495int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo, 497int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
496 struct ttm_object_file *tfile) 498 struct ttm_object_file *tfile)
497{ 499{
498 struct vmw_user_dma_buffer *vmw_user_bo; 500 struct vmw_user_buffer_object *vmw_user_bo;
499 501
500 if (unlikely(bo->destroy != vmw_user_dmabuf_destroy)) 502 if (unlikely(bo->destroy != vmw_user_bo_destroy))
501 return -EPERM; 503 return -EPERM;
502 504
503 vmw_user_bo = vmw_user_dma_buffer(bo); 505 vmw_user_bo = vmw_user_buffer_object(bo);
504 506
505 /* Check that the caller has opened the object. */ 507 /* Check that the caller has opened the object. */
506 if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base))) 508 if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
@@ -511,7 +513,7 @@ int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
511} 513}
512 514
513/** 515/**
514 * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu 516 * vmw_user_bo_synccpu_grab - Grab a struct vmw_user_buffer_object for cpu
515 * access, idling previous GPU operations on the buffer and optionally 517 * access, idling previous GPU operations on the buffer and optionally
516 * blocking it for further command submissions. 518 * blocking it for further command submissions.
517 * 519 *
@@ -521,11 +523,11 @@ int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
521 * 523 *
522 * A blocking grab will be automatically released when @tfile is closed. 524 * A blocking grab will be automatically released when @tfile is closed.
523 */ 525 */
524static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo, 526static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
525 struct ttm_object_file *tfile, 527 struct ttm_object_file *tfile,
526 uint32_t flags) 528 uint32_t flags)
527{ 529{
528 struct ttm_buffer_object *bo = &user_bo->dma.base; 530 struct ttm_buffer_object *bo = &user_bo->vbo.base;
529 bool existed; 531 bool existed;
530 int ret; 532 int ret;
531 533
@@ -550,20 +552,20 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
550 ret = ttm_ref_object_add(tfile, &user_bo->prime.base, 552 ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
551 TTM_REF_SYNCCPU_WRITE, &existed, false); 553 TTM_REF_SYNCCPU_WRITE, &existed, false);
552 if (ret != 0 || existed) 554 if (ret != 0 || existed)
553 ttm_bo_synccpu_write_release(&user_bo->dma.base); 555 ttm_bo_synccpu_write_release(&user_bo->vbo.base);
554 556
555 return ret; 557 return ret;
556} 558}
557 559
558/** 560/**
559 * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access, 561 * vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
560 * and unblock command submission on the buffer if blocked. 562 * and unblock command submission on the buffer if blocked.
561 * 563 *
562 * @handle: Handle identifying the buffer object. 564 * @handle: Handle identifying the buffer object.
563 * @tfile: Identifying the caller. 565 * @tfile: Identifying the caller.
564 * @flags: Flags indicating the type of release. 566 * @flags: Flags indicating the type of release.
565 */ 567 */
566static int vmw_user_dmabuf_synccpu_release(uint32_t handle, 568static int vmw_user_bo_synccpu_release(uint32_t handle,
567 struct ttm_object_file *tfile, 569 struct ttm_object_file *tfile,
568 uint32_t flags) 570 uint32_t flags)
569{ 571{
@@ -575,7 +577,7 @@ static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
575} 577}
576 578
577/** 579/**
578 * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu 580 * vmw_user_bo_synccpu_release - ioctl function implementing the synccpu
579 * functionality. 581 * functionality.
580 * 582 *
581 * @dev: Identifies the drm device. 583 * @dev: Identifies the drm device.
@@ -585,13 +587,13 @@ static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
585 * This function checks the ioctl arguments for validity and calls the 587 * This function checks the ioctl arguments for validity and calls the
586 * relevant synccpu functions. 588 * relevant synccpu functions.
587 */ 589 */
588int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data, 590int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
589 struct drm_file *file_priv) 591 struct drm_file *file_priv)
590{ 592{
591 struct drm_vmw_synccpu_arg *arg = 593 struct drm_vmw_synccpu_arg *arg =
592 (struct drm_vmw_synccpu_arg *) data; 594 (struct drm_vmw_synccpu_arg *) data;
593 struct vmw_dma_buffer *dma_buf; 595 struct vmw_buffer_object *vbo;
594 struct vmw_user_dma_buffer *user_bo; 596 struct vmw_user_buffer_object *user_bo;
595 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 597 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
596 struct ttm_base_object *buffer_base; 598 struct ttm_base_object *buffer_base;
597 int ret; 599 int ret;
@@ -606,15 +608,15 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
606 608
607 switch (arg->op) { 609 switch (arg->op) {
608 case drm_vmw_synccpu_grab: 610 case drm_vmw_synccpu_grab:
609 ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf, 611 ret = vmw_user_bo_lookup(tfile, arg->handle, &vbo,
610 &buffer_base); 612 &buffer_base);
611 if (unlikely(ret != 0)) 613 if (unlikely(ret != 0))
612 return ret; 614 return ret;
613 615
614 user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, 616 user_bo = container_of(vbo, struct vmw_user_buffer_object,
615 dma); 617 vbo);
616 ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags); 618 ret = vmw_user_bo_synccpu_grab(user_bo, tfile, arg->flags);
617 vmw_dmabuf_unreference(&dma_buf); 619 vmw_bo_unreference(&vbo);
618 ttm_base_object_unref(&buffer_base); 620 ttm_base_object_unref(&buffer_base);
619 if (unlikely(ret != 0 && ret != -ERESTARTSYS && 621 if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
620 ret != -EBUSY)) { 622 ret != -EBUSY)) {
@@ -624,8 +626,8 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
624 } 626 }
625 break; 627 break;
626 case drm_vmw_synccpu_release: 628 case drm_vmw_synccpu_release:
627 ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile, 629 ret = vmw_user_bo_synccpu_release(arg->handle, tfile,
628 arg->flags); 630 arg->flags);
629 if (unlikely(ret != 0)) { 631 if (unlikely(ret != 0)) {
630 DRM_ERROR("Failed synccpu release on handle 0x%08x.\n", 632 DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
631 (unsigned int) arg->handle); 633 (unsigned int) arg->handle);
@@ -640,15 +642,15 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
640 return 0; 642 return 0;
641} 643}
642 644
643int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, 645int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
644 struct drm_file *file_priv) 646 struct drm_file *file_priv)
645{ 647{
646 struct vmw_private *dev_priv = vmw_priv(dev); 648 struct vmw_private *dev_priv = vmw_priv(dev);
647 union drm_vmw_alloc_dmabuf_arg *arg = 649 union drm_vmw_alloc_dmabuf_arg *arg =
648 (union drm_vmw_alloc_dmabuf_arg *)data; 650 (union drm_vmw_alloc_dmabuf_arg *)data;
649 struct drm_vmw_alloc_dmabuf_req *req = &arg->req; 651 struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
650 struct drm_vmw_dmabuf_rep *rep = &arg->rep; 652 struct drm_vmw_dmabuf_rep *rep = &arg->rep;
651 struct vmw_dma_buffer *dma_buf; 653 struct vmw_buffer_object *vbo;
652 uint32_t handle; 654 uint32_t handle;
653 int ret; 655 int ret;
654 656
@@ -656,27 +658,27 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
656 if (unlikely(ret != 0)) 658 if (unlikely(ret != 0))
657 return ret; 659 return ret;
658 660
659 ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, 661 ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
660 req->size, false, &handle, &dma_buf, 662 req->size, false, &handle, &vbo,
661 NULL); 663 NULL);
662 if (unlikely(ret != 0)) 664 if (unlikely(ret != 0))
663 goto out_no_dmabuf; 665 goto out_no_bo;
664 666
665 rep->handle = handle; 667 rep->handle = handle;
666 rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node); 668 rep->map_handle = drm_vma_node_offset_addr(&vbo->base.vma_node);
667 rep->cur_gmr_id = handle; 669 rep->cur_gmr_id = handle;
668 rep->cur_gmr_offset = 0; 670 rep->cur_gmr_offset = 0;
669 671
670 vmw_dmabuf_unreference(&dma_buf); 672 vmw_bo_unreference(&vbo);
671 673
672out_no_dmabuf: 674out_no_bo:
673 ttm_read_unlock(&dev_priv->reservation_sem); 675 ttm_read_unlock(&dev_priv->reservation_sem);
674 676
675 return ret; 677 return ret;
676} 678}
677 679
678int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, 680int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
679 struct drm_file *file_priv) 681 struct drm_file *file_priv)
680{ 682{
681 struct drm_vmw_unref_dmabuf_arg *arg = 683 struct drm_vmw_unref_dmabuf_arg *arg =
682 (struct drm_vmw_unref_dmabuf_arg *)data; 684 (struct drm_vmw_unref_dmabuf_arg *)data;
@@ -686,11 +688,11 @@ int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
686 TTM_REF_USAGE); 688 TTM_REF_USAGE);
687} 689}
688 690
689int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, 691int vmw_user_bo_lookup(struct ttm_object_file *tfile,
690 uint32_t handle, struct vmw_dma_buffer **out, 692 uint32_t handle, struct vmw_buffer_object **out,
691 struct ttm_base_object **p_base) 693 struct ttm_base_object **p_base)
692{ 694{
693 struct vmw_user_dma_buffer *vmw_user_bo; 695 struct vmw_user_buffer_object *vmw_user_bo;
694 struct ttm_base_object *base; 696 struct ttm_base_object *base;
695 697
696 base = ttm_base_object_lookup(tfile, handle); 698 base = ttm_base_object_lookup(tfile, handle);
@@ -707,28 +709,28 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
707 return -EINVAL; 709 return -EINVAL;
708 } 710 }
709 711
710 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, 712 vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
711 prime.base); 713 prime.base);
712 (void)ttm_bo_reference(&vmw_user_bo->dma.base); 714 (void)ttm_bo_reference(&vmw_user_bo->vbo.base);
713 if (p_base) 715 if (p_base)
714 *p_base = base; 716 *p_base = base;
715 else 717 else
716 ttm_base_object_unref(&base); 718 ttm_base_object_unref(&base);
717 *out = &vmw_user_bo->dma; 719 *out = &vmw_user_bo->vbo;
718 720
719 return 0; 721 return 0;
720} 722}
721 723
722int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, 724int vmw_user_bo_reference(struct ttm_object_file *tfile,
723 struct vmw_dma_buffer *dma_buf, 725 struct vmw_buffer_object *vbo,
724 uint32_t *handle) 726 uint32_t *handle)
725{ 727{
726 struct vmw_user_dma_buffer *user_bo; 728 struct vmw_user_buffer_object *user_bo;
727 729
728 if (dma_buf->base.destroy != vmw_user_dmabuf_destroy) 730 if (vbo->base.destroy != vmw_user_bo_destroy)
729 return -EINVAL; 731 return -EINVAL;
730 732
731 user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma); 733 user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
732 734
733 *handle = user_bo->prime.base.hash.key; 735 *handle = user_bo->prime.base.hash.key;
734 return ttm_ref_object_add(tfile, &user_bo->prime.base, 736 return ttm_ref_object_add(tfile, &user_bo->prime.base,
@@ -743,7 +745,7 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
743 * @args: Pointer to a struct drm_mode_create_dumb structure 745 * @args: Pointer to a struct drm_mode_create_dumb structure
744 * 746 *
745 * This is a driver callback for the core drm create_dumb functionality. 747 * This is a driver callback for the core drm create_dumb functionality.
746 * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except 748 * Note that this is very similar to the vmw_bo_alloc ioctl, except
747 * that the arguments have a different format. 749 * that the arguments have a different format.
748 */ 750 */
749int vmw_dumb_create(struct drm_file *file_priv, 751int vmw_dumb_create(struct drm_file *file_priv,
@@ -751,7 +753,7 @@ int vmw_dumb_create(struct drm_file *file_priv,
751 struct drm_mode_create_dumb *args) 753 struct drm_mode_create_dumb *args)
752{ 754{
753 struct vmw_private *dev_priv = vmw_priv(dev); 755 struct vmw_private *dev_priv = vmw_priv(dev);
754 struct vmw_dma_buffer *dma_buf; 756 struct vmw_buffer_object *vbo;
755 int ret; 757 int ret;
756 758
757 args->pitch = args->width * ((args->bpp + 7) / 8); 759 args->pitch = args->width * ((args->bpp + 7) / 8);
@@ -761,14 +763,14 @@ int vmw_dumb_create(struct drm_file *file_priv,
761 if (unlikely(ret != 0)) 763 if (unlikely(ret != 0))
762 return ret; 764 return ret;
763 765
764 ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, 766 ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
765 args->size, false, &args->handle, 767 args->size, false, &args->handle,
766 &dma_buf, NULL); 768 &vbo, NULL);
767 if (unlikely(ret != 0)) 769 if (unlikely(ret != 0))
768 goto out_no_dmabuf; 770 goto out_no_bo;
769 771
770 vmw_dmabuf_unreference(&dma_buf); 772 vmw_bo_unreference(&vbo);
771out_no_dmabuf: 773out_no_bo:
772 ttm_read_unlock(&dev_priv->reservation_sem); 774 ttm_read_unlock(&dev_priv->reservation_sem);
773 return ret; 775 return ret;
774} 776}
@@ -788,15 +790,15 @@ int vmw_dumb_map_offset(struct drm_file *file_priv,
788 uint64_t *offset) 790 uint64_t *offset)
789{ 791{
790 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 792 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
791 struct vmw_dma_buffer *out_buf; 793 struct vmw_buffer_object *out_buf;
792 int ret; 794 int ret;
793 795
794 ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf, NULL); 796 ret = vmw_user_bo_lookup(tfile, handle, &out_buf, NULL);
795 if (ret != 0) 797 if (ret != 0)
796 return -EINVAL; 798 return -EINVAL;
797 799
798 *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node); 800 *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
799 vmw_dmabuf_unreference(&out_buf); 801 vmw_bo_unreference(&out_buf);
800 return 0; 802 return 0;
801} 803}
802 804
@@ -829,7 +831,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
829{ 831{
830 unsigned long size = 832 unsigned long size =
831 (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK; 833 (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
832 struct vmw_dma_buffer *backup; 834 struct vmw_buffer_object *backup;
833 int ret; 835 int ret;
834 836
835 if (likely(res->backup)) { 837 if (likely(res->backup)) {
@@ -841,16 +843,16 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
841 if (unlikely(!backup)) 843 if (unlikely(!backup))
842 return -ENOMEM; 844 return -ENOMEM;
843 845
844 ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size, 846 ret = vmw_bo_init(res->dev_priv, backup, res->backup_size,
845 res->func->backup_placement, 847 res->func->backup_placement,
846 interruptible, 848 interruptible,
847 &vmw_dmabuf_bo_free); 849 &vmw_bo_bo_free);
848 if (unlikely(ret != 0)) 850 if (unlikely(ret != 0))
849 goto out_no_dmabuf; 851 goto out_no_bo;
850 852
851 res->backup = backup; 853 res->backup = backup;
852 854
853out_no_dmabuf: 855out_no_bo:
854 return ret; 856 return ret;
855} 857}
856 858
@@ -919,7 +921,7 @@ out_bind_failed:
919 */ 921 */
920void vmw_resource_unreserve(struct vmw_resource *res, 922void vmw_resource_unreserve(struct vmw_resource *res,
921 bool switch_backup, 923 bool switch_backup,
922 struct vmw_dma_buffer *new_backup, 924 struct vmw_buffer_object *new_backup,
923 unsigned long new_backup_offset) 925 unsigned long new_backup_offset)
924{ 926{
925 struct vmw_private *dev_priv = res->dev_priv; 927 struct vmw_private *dev_priv = res->dev_priv;
@@ -931,11 +933,11 @@ void vmw_resource_unreserve(struct vmw_resource *res,
931 if (res->backup) { 933 if (res->backup) {
932 lockdep_assert_held(&res->backup->base.resv->lock.base); 934 lockdep_assert_held(&res->backup->base.resv->lock.base);
933 list_del_init(&res->mob_head); 935 list_del_init(&res->mob_head);
934 vmw_dmabuf_unreference(&res->backup); 936 vmw_bo_unreference(&res->backup);
935 } 937 }
936 938
937 if (new_backup) { 939 if (new_backup) {
938 res->backup = vmw_dmabuf_reference(new_backup); 940 res->backup = vmw_bo_reference(new_backup);
939 lockdep_assert_held(&new_backup->base.resv->lock.base); 941 lockdep_assert_held(&new_backup->base.resv->lock.base);
940 list_add_tail(&res->mob_head, &new_backup->res_list); 942 list_add_tail(&res->mob_head, &new_backup->res_list);
941 } else { 943 } else {
@@ -1007,7 +1009,7 @@ out_no_validate:
1007out_no_reserve: 1009out_no_reserve:
1008 ttm_bo_unref(&val_buf->bo); 1010 ttm_bo_unref(&val_buf->bo);
1009 if (backup_dirty) 1011 if (backup_dirty)
1010 vmw_dmabuf_unreference(&res->backup); 1012 vmw_bo_unreference(&res->backup);
1011 1013
1012 return ret; 1014 return ret;
1013} 1015}
@@ -1171,7 +1173,7 @@ int vmw_resource_validate(struct vmw_resource *res)
1171 goto out_no_validate; 1173 goto out_no_validate;
1172 else if (!res->func->needs_backup && res->backup) { 1174 else if (!res->func->needs_backup && res->backup) {
1173 list_del_init(&res->mob_head); 1175 list_del_init(&res->mob_head);
1174 vmw_dmabuf_unreference(&res->backup); 1176 vmw_bo_unreference(&res->backup);
1175 } 1177 }
1176 1178
1177 return 0; 1179 return 0;
@@ -1230,22 +1232,22 @@ void vmw_fence_single_bo(struct ttm_buffer_object *bo,
1230void vmw_resource_move_notify(struct ttm_buffer_object *bo, 1232void vmw_resource_move_notify(struct ttm_buffer_object *bo,
1231 struct ttm_mem_reg *mem) 1233 struct ttm_mem_reg *mem)
1232{ 1234{
1233 struct vmw_dma_buffer *dma_buf; 1235 struct vmw_buffer_object *vbo;
1234 1236
1235 if (mem == NULL) 1237 if (mem == NULL)
1236 return; 1238 return;
1237 1239
1238 if (bo->destroy != vmw_dmabuf_bo_free && 1240 if (bo->destroy != vmw_bo_bo_free &&
1239 bo->destroy != vmw_user_dmabuf_destroy) 1241 bo->destroy != vmw_user_bo_destroy)
1240 return; 1242 return;
1241 1243
1242 dma_buf = container_of(bo, struct vmw_dma_buffer, base); 1244 vbo = container_of(bo, struct vmw_buffer_object, base);
1243 1245
1244 /* 1246 /*
1245 * Kill any cached kernel maps before move. An optimization could 1247 * Kill any cached kernel maps before move. An optimization could
1246 * be to do this iff source or destination memory type is VRAM. 1248 * be to do this iff source or destination memory type is VRAM.
1247 */ 1249 */
1248 vmw_dma_buffer_unmap(dma_buf); 1250 vmw_buffer_object_unmap(vbo);
1249 1251
1250 if (mem->mem_type != VMW_PL_MOB) { 1252 if (mem->mem_type != VMW_PL_MOB) {
1251 struct vmw_resource *res, *n; 1253 struct vmw_resource *res, *n;
@@ -1254,7 +1256,7 @@ void vmw_resource_move_notify(struct ttm_buffer_object *bo,
1254 val_buf.bo = bo; 1256 val_buf.bo = bo;
1255 val_buf.shared = false; 1257 val_buf.shared = false;
1256 1258
1257 list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) { 1259 list_for_each_entry_safe(res, n, &vbo->res_list, mob_head) {
1258 1260
1259 if (unlikely(res->func->unbind == NULL)) 1261 if (unlikely(res->func->unbind == NULL))
1260 continue; 1262 continue;
@@ -1277,12 +1279,12 @@ void vmw_resource_move_notify(struct ttm_buffer_object *bo,
1277 */ 1279 */
1278void vmw_resource_swap_notify(struct ttm_buffer_object *bo) 1280void vmw_resource_swap_notify(struct ttm_buffer_object *bo)
1279{ 1281{
1280 if (bo->destroy != vmw_dmabuf_bo_free && 1282 if (bo->destroy != vmw_bo_bo_free &&
1281 bo->destroy != vmw_user_dmabuf_destroy) 1283 bo->destroy != vmw_user_bo_destroy)
1282 return; 1284 return;
1283 1285
1284 /* Kill any cached kernel maps before swapout */ 1286 /* Kill any cached kernel maps before swapout */
1285 vmw_dma_buffer_unmap(vmw_dma_buffer(bo)); 1287 vmw_buffer_object_unmap(vmw_buffer_object(bo));
1286} 1288}
1287 1289
1288 1290
@@ -1294,7 +1296,7 @@ void vmw_resource_swap_notify(struct ttm_buffer_object *bo)
1294 * Read back cached states from the device if they exist. This function 1296 * Read back cached states from the device if they exist. This function
1295 * assumings binding_mutex is held. 1297 * assumings binding_mutex is held.
1296 */ 1298 */
1297int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob) 1299int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
1298{ 1300{
1299 struct vmw_resource *dx_query_ctx; 1301 struct vmw_resource *dx_query_ctx;
1300 struct vmw_private *dev_priv; 1302 struct vmw_private *dev_priv;
@@ -1344,7 +1346,7 @@ int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob)
1344void vmw_query_move_notify(struct ttm_buffer_object *bo, 1346void vmw_query_move_notify(struct ttm_buffer_object *bo,
1345 struct ttm_mem_reg *mem) 1347 struct ttm_mem_reg *mem)
1346{ 1348{
1347 struct vmw_dma_buffer *dx_query_mob; 1349 struct vmw_buffer_object *dx_query_mob;
1348 struct ttm_bo_device *bdev = bo->bdev; 1350 struct ttm_bo_device *bdev = bo->bdev;
1349 struct vmw_private *dev_priv; 1351 struct vmw_private *dev_priv;
1350 1352
@@ -1353,7 +1355,7 @@ void vmw_query_move_notify(struct ttm_buffer_object *bo,
1353 1355
1354 mutex_lock(&dev_priv->binding_mutex); 1356 mutex_lock(&dev_priv->binding_mutex);
1355 1357
1356 dx_query_mob = container_of(bo, struct vmw_dma_buffer, base); 1358 dx_query_mob = container_of(bo, struct vmw_buffer_object, base);
1357 if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) { 1359 if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
1358 mutex_unlock(&dev_priv->binding_mutex); 1360 mutex_unlock(&dev_priv->binding_mutex);
1359 return; 1361 return;
@@ -1481,7 +1483,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
1481 goto out_no_reserve; 1483 goto out_no_reserve;
1482 1484
1483 if (res->pin_count == 0) { 1485 if (res->pin_count == 0) {
1484 struct vmw_dma_buffer *vbo = NULL; 1486 struct vmw_buffer_object *vbo = NULL;
1485 1487
1486 if (res->backup) { 1488 if (res->backup) {
1487 vbo = res->backup; 1489 vbo = res->backup;
@@ -1539,7 +1541,7 @@ void vmw_resource_unpin(struct vmw_resource *res)
1539 1541
1540 WARN_ON(res->pin_count == 0); 1542 WARN_ON(res->pin_count == 0);
1541 if (--res->pin_count == 0 && res->backup) { 1543 if (--res->pin_count == 0 && res->backup) {
1542 struct vmw_dma_buffer *vbo = res->backup; 1544 struct vmw_buffer_object *vbo = res->backup;
1543 1545
1544 (void) ttm_bo_reserve(&vbo->base, false, false, NULL); 1546 (void) ttm_bo_reserve(&vbo->base, false, false, NULL);
1545 vmw_bo_pin_reserved(vbo, false); 1547 vmw_bo_pin_reserved(vbo, false);