aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2013-11-21 03:46:26 -0500
committerDave Airlie <airlied@redhat.com>2013-11-21 03:46:26 -0500
commit28adb3026d01da3f6acded3cec817e1a3ba37f44 (patch)
tree26df1bf50f9ccf6fd282f3b22d103cb3b30ecaa3
parentcf969677945e6e19810d616873617320da002e32 (diff)
parentc486d4f894d7c7d0e4148426360aa354384f6dc8 (diff)
Merge branch 'vmwgfx-fixes-3.13' of git://people.freedesktop.org/~thomash/linux into drm-fixes
Below is a fix for a false lockep warning, and the vmwgfx prime implementation. * 'vmwgfx-fixes-3.13' of git://people.freedesktop.org/~thomash/linux: drm/vmwgfx: Make vmwgfx dma buffers prime aware drm/vmwgfx: Make surfaces prime-aware drm/vmwgfx: Hook up the prime ioctls drm/ttm: Add a minimal prime implementation for ttm base objects drm/vmwgfx: Fix false lockdep warning drm/ttm: Allow execbuf util reserves without ticket
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c32
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c254
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h14
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_prime.c137
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c63
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c30
-rw-r--r--include/drm/ttm/ttm_execbuf_util.h3
-rw-r--r--include/drm/ttm/ttm_object.h61
10 files changed, 533 insertions, 70 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 6c911789ae5c..479e9418e3d7 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -32,8 +32,7 @@
32#include <linux/sched.h> 32#include <linux/sched.h>
33#include <linux/module.h> 33#include <linux/module.h>
34 34
35static void ttm_eu_backoff_reservation_locked(struct list_head *list, 35static void ttm_eu_backoff_reservation_locked(struct list_head *list)
36 struct ww_acquire_ctx *ticket)
37{ 36{
38 struct ttm_validate_buffer *entry; 37 struct ttm_validate_buffer *entry;
39 38
@@ -93,8 +92,9 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
93 entry = list_first_entry(list, struct ttm_validate_buffer, head); 92 entry = list_first_entry(list, struct ttm_validate_buffer, head);
94 glob = entry->bo->glob; 93 glob = entry->bo->glob;
95 spin_lock(&glob->lru_lock); 94 spin_lock(&glob->lru_lock);
96 ttm_eu_backoff_reservation_locked(list, ticket); 95 ttm_eu_backoff_reservation_locked(list);
97 ww_acquire_fini(ticket); 96 if (ticket)
97 ww_acquire_fini(ticket);
98 spin_unlock(&glob->lru_lock); 98 spin_unlock(&glob->lru_lock);
99} 99}
100EXPORT_SYMBOL(ttm_eu_backoff_reservation); 100EXPORT_SYMBOL(ttm_eu_backoff_reservation);
@@ -130,7 +130,8 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
130 entry = list_first_entry(list, struct ttm_validate_buffer, head); 130 entry = list_first_entry(list, struct ttm_validate_buffer, head);
131 glob = entry->bo->glob; 131 glob = entry->bo->glob;
132 132
133 ww_acquire_init(ticket, &reservation_ww_class); 133 if (ticket)
134 ww_acquire_init(ticket, &reservation_ww_class);
134retry: 135retry:
135 list_for_each_entry(entry, list, head) { 136 list_for_each_entry(entry, list, head) {
136 struct ttm_buffer_object *bo = entry->bo; 137 struct ttm_buffer_object *bo = entry->bo;
@@ -139,16 +140,17 @@ retry:
139 if (entry->reserved) 140 if (entry->reserved)
140 continue; 141 continue;
141 142
142 143 ret = ttm_bo_reserve_nolru(bo, true, (ticket == NULL), true,
143 ret = ttm_bo_reserve_nolru(bo, true, false, true, ticket); 144 ticket);
144 145
145 if (ret == -EDEADLK) { 146 if (ret == -EDEADLK) {
146 /* uh oh, we lost out, drop every reservation and try 147 /* uh oh, we lost out, drop every reservation and try
147 * to only reserve this buffer, then start over if 148 * to only reserve this buffer, then start over if
148 * this succeeds. 149 * this succeeds.
149 */ 150 */
151 BUG_ON(ticket == NULL);
150 spin_lock(&glob->lru_lock); 152 spin_lock(&glob->lru_lock);
151 ttm_eu_backoff_reservation_locked(list, ticket); 153 ttm_eu_backoff_reservation_locked(list);
152 spin_unlock(&glob->lru_lock); 154 spin_unlock(&glob->lru_lock);
153 ttm_eu_list_ref_sub(list); 155 ttm_eu_list_ref_sub(list);
154 ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, 156 ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
@@ -175,7 +177,8 @@ retry:
175 } 177 }
176 } 178 }
177 179
178 ww_acquire_done(ticket); 180 if (ticket)
181 ww_acquire_done(ticket);
179 spin_lock(&glob->lru_lock); 182 spin_lock(&glob->lru_lock);
180 ttm_eu_del_from_lru_locked(list); 183 ttm_eu_del_from_lru_locked(list);
181 spin_unlock(&glob->lru_lock); 184 spin_unlock(&glob->lru_lock);
@@ -184,12 +187,14 @@ retry:
184 187
185err: 188err:
186 spin_lock(&glob->lru_lock); 189 spin_lock(&glob->lru_lock);
187 ttm_eu_backoff_reservation_locked(list, ticket); 190 ttm_eu_backoff_reservation_locked(list);
188 spin_unlock(&glob->lru_lock); 191 spin_unlock(&glob->lru_lock);
189 ttm_eu_list_ref_sub(list); 192 ttm_eu_list_ref_sub(list);
190err_fini: 193err_fini:
191 ww_acquire_done(ticket); 194 if (ticket) {
192 ww_acquire_fini(ticket); 195 ww_acquire_done(ticket);
196 ww_acquire_fini(ticket);
197 }
193 return ret; 198 return ret;
194} 199}
195EXPORT_SYMBOL(ttm_eu_reserve_buffers); 200EXPORT_SYMBOL(ttm_eu_reserve_buffers);
@@ -224,7 +229,8 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
224 } 229 }
225 spin_unlock(&bdev->fence_lock); 230 spin_unlock(&bdev->fence_lock);
226 spin_unlock(&glob->lru_lock); 231 spin_unlock(&glob->lru_lock);
227 ww_acquire_fini(ticket); 232 if (ticket)
233 ww_acquire_fini(ticket);
228 234
229 list_for_each_entry(entry, list, head) { 235 list_for_each_entry(entry, list, head) {
230 if (entry->old_sync_obj) 236 if (entry->old_sync_obj)
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index a868176c258a..6fe7b92a82d1 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA 3 * Copyright (c) 2009-2013 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -26,6 +26,12 @@
26 **************************************************************************/ 26 **************************************************************************/
27/* 27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 *
30 * While no substantial code is shared, the prime code is inspired by
31 * drm_prime.c, with
32 * Authors:
33 * Dave Airlie <airlied@redhat.com>
34 * Rob Clark <rob.clark@linaro.org>
29 */ 35 */
30/** @file ttm_ref_object.c 36/** @file ttm_ref_object.c
31 * 37 *
@@ -34,6 +40,7 @@
34 * and release on file close. 40 * and release on file close.
35 */ 41 */
36 42
43
37/** 44/**
38 * struct ttm_object_file 45 * struct ttm_object_file
39 * 46 *
@@ -84,6 +91,9 @@ struct ttm_object_device {
84 struct drm_open_hash object_hash; 91 struct drm_open_hash object_hash;
85 atomic_t object_count; 92 atomic_t object_count;
86 struct ttm_mem_global *mem_glob; 93 struct ttm_mem_global *mem_glob;
94 struct dma_buf_ops ops;
95 void (*dmabuf_release)(struct dma_buf *dma_buf);
96 size_t dma_buf_size;
87}; 97};
88 98
89/** 99/**
@@ -116,6 +126,8 @@ struct ttm_ref_object {
116 struct ttm_object_file *tfile; 126 struct ttm_object_file *tfile;
117}; 127};
118 128
129static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf);
130
119static inline struct ttm_object_file * 131static inline struct ttm_object_file *
120ttm_object_file_ref(struct ttm_object_file *tfile) 132ttm_object_file_ref(struct ttm_object_file *tfile)
121{ 133{
@@ -416,9 +428,10 @@ out_err:
416} 428}
417EXPORT_SYMBOL(ttm_object_file_init); 429EXPORT_SYMBOL(ttm_object_file_init);
418 430
419struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global 431struct ttm_object_device *
420 *mem_glob, 432ttm_object_device_init(struct ttm_mem_global *mem_glob,
421 unsigned int hash_order) 433 unsigned int hash_order,
434 const struct dma_buf_ops *ops)
422{ 435{
423 struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL); 436 struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
424 int ret; 437 int ret;
@@ -430,10 +443,17 @@ struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
430 spin_lock_init(&tdev->object_lock); 443 spin_lock_init(&tdev->object_lock);
431 atomic_set(&tdev->object_count, 0); 444 atomic_set(&tdev->object_count, 0);
432 ret = drm_ht_create(&tdev->object_hash, hash_order); 445 ret = drm_ht_create(&tdev->object_hash, hash_order);
446 if (ret != 0)
447 goto out_no_object_hash;
433 448
434 if (likely(ret == 0)) 449 tdev->ops = *ops;
435 return tdev; 450 tdev->dmabuf_release = tdev->ops.release;
451 tdev->ops.release = ttm_prime_dmabuf_release;
452 tdev->dma_buf_size = ttm_round_pot(sizeof(struct dma_buf)) +
453 ttm_round_pot(sizeof(struct file));
454 return tdev;
436 455
456out_no_object_hash:
437 kfree(tdev); 457 kfree(tdev);
438 return NULL; 458 return NULL;
439} 459}
@@ -452,3 +472,225 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev)
452 kfree(tdev); 472 kfree(tdev);
453} 473}
454EXPORT_SYMBOL(ttm_object_device_release); 474EXPORT_SYMBOL(ttm_object_device_release);
475
476/**
477 * get_dma_buf_unless_doomed - get a dma_buf reference if possible.
478 *
479 * @dma_buf: Non-refcounted pointer to a struct dma-buf.
480 *
481 * Obtain a file reference from a lookup structure that doesn't refcount
482 * the file, but synchronizes with its release method to make sure it has
483 * not been freed yet. See for example kref_get_unless_zero documentation.
484 * Returns true if refcounting succeeds, false otherwise.
485 *
486 * Nobody really wants this as a public API yet, so let it mature here
487 * for some time...
488 */
489static bool __must_check get_dma_buf_unless_doomed(struct dma_buf *dmabuf)
490{
491 return atomic_long_inc_not_zero(&dmabuf->file->f_count) != 0L;
492}
493
494/**
495 * ttm_prime_refcount_release - refcount release method for a prime object.
496 *
497 * @p_base: Pointer to ttm_base_object pointer.
498 *
499 * This is a wrapper that calls the refcount_release founction of the
500 * underlying object. At the same time it cleans up the prime object.
501 * This function is called when all references to the base object we
502 * derive from are gone.
503 */
504static void ttm_prime_refcount_release(struct ttm_base_object **p_base)
505{
506 struct ttm_base_object *base = *p_base;
507 struct ttm_prime_object *prime;
508
509 *p_base = NULL;
510 prime = container_of(base, struct ttm_prime_object, base);
511 BUG_ON(prime->dma_buf != NULL);
512 mutex_destroy(&prime->mutex);
513 if (prime->refcount_release)
514 prime->refcount_release(&base);
515}
516
517/**
518 * ttm_prime_dmabuf_release - Release method for the dma-bufs we export
519 *
520 * @dma_buf:
521 *
522 * This function first calls the dma_buf release method the driver
523 * provides. Then it cleans up our dma_buf pointer used for lookup,
524 * and finally releases the reference the dma_buf has on our base
525 * object.
526 */
527static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf)
528{
529 struct ttm_prime_object *prime =
530 (struct ttm_prime_object *) dma_buf->priv;
531 struct ttm_base_object *base = &prime->base;
532 struct ttm_object_device *tdev = base->tfile->tdev;
533
534 if (tdev->dmabuf_release)
535 tdev->dmabuf_release(dma_buf);
536 mutex_lock(&prime->mutex);
537 if (prime->dma_buf == dma_buf)
538 prime->dma_buf = NULL;
539 mutex_unlock(&prime->mutex);
540 ttm_mem_global_free(tdev->mem_glob, tdev->dma_buf_size);
541 ttm_base_object_unref(&base);
542}
543
544/**
545 * ttm_prime_fd_to_handle - Get a base object handle from a prime fd
546 *
547 * @tfile: A struct ttm_object_file identifying the caller.
548 * @fd: The prime / dmabuf fd.
549 * @handle: The returned handle.
550 *
551 * This function returns a handle to an object that previously exported
552 * a dma-buf. Note that we don't handle imports yet, because we simply
553 * have no consumers of that implementation.
554 */
555int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
556 int fd, u32 *handle)
557{
558 struct ttm_object_device *tdev = tfile->tdev;
559 struct dma_buf *dma_buf;
560 struct ttm_prime_object *prime;
561 struct ttm_base_object *base;
562 int ret;
563
564 dma_buf = dma_buf_get(fd);
565 if (IS_ERR(dma_buf))
566 return PTR_ERR(dma_buf);
567
568 if (dma_buf->ops != &tdev->ops)
569 return -ENOSYS;
570
571 prime = (struct ttm_prime_object *) dma_buf->priv;
572 base = &prime->base;
573 *handle = base->hash.key;
574 ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
575
576 dma_buf_put(dma_buf);
577
578 return ret;
579}
580EXPORT_SYMBOL_GPL(ttm_prime_fd_to_handle);
581
582/**
583 * ttm_prime_handle_to_fd - Return a dma_buf fd from a ttm prime object
584 *
585 * @tfile: Struct ttm_object_file identifying the caller.
586 * @handle: Handle to the object we're exporting from.
587 * @flags: flags for dma-buf creation. We just pass them on.
588 * @prime_fd: The returned file descriptor.
589 *
590 */
591int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
592 uint32_t handle, uint32_t flags,
593 int *prime_fd)
594{
595 struct ttm_object_device *tdev = tfile->tdev;
596 struct ttm_base_object *base;
597 struct dma_buf *dma_buf;
598 struct ttm_prime_object *prime;
599 int ret;
600
601 base = ttm_base_object_lookup(tfile, handle);
602 if (unlikely(base == NULL ||
603 base->object_type != ttm_prime_type)) {
604 ret = -ENOENT;
605 goto out_unref;
606 }
607
608 prime = container_of(base, struct ttm_prime_object, base);
609 if (unlikely(!base->shareable)) {
610 ret = -EPERM;
611 goto out_unref;
612 }
613
614 ret = mutex_lock_interruptible(&prime->mutex);
615 if (unlikely(ret != 0)) {
616 ret = -ERESTARTSYS;
617 goto out_unref;
618 }
619
620 dma_buf = prime->dma_buf;
621 if (!dma_buf || !get_dma_buf_unless_doomed(dma_buf)) {
622
623 /*
624 * Need to create a new dma_buf, with memory accounting.
625 */
626 ret = ttm_mem_global_alloc(tdev->mem_glob, tdev->dma_buf_size,
627 false, true);
628 if (unlikely(ret != 0)) {
629 mutex_unlock(&prime->mutex);
630 goto out_unref;
631 }
632
633 dma_buf = dma_buf_export(prime, &tdev->ops,
634 prime->size, flags);
635 if (IS_ERR(dma_buf)) {
636 ret = PTR_ERR(dma_buf);
637 ttm_mem_global_free(tdev->mem_glob,
638 tdev->dma_buf_size);
639 mutex_unlock(&prime->mutex);
640 goto out_unref;
641 }
642
643 /*
644 * dma_buf has taken the base object reference
645 */
646 base = NULL;
647 prime->dma_buf = dma_buf;
648 }
649 mutex_unlock(&prime->mutex);
650
651 ret = dma_buf_fd(dma_buf, flags);
652 if (ret >= 0) {
653 *prime_fd = ret;
654 ret = 0;
655 } else
656 dma_buf_put(dma_buf);
657
658out_unref:
659 if (base)
660 ttm_base_object_unref(&base);
661 return ret;
662}
663EXPORT_SYMBOL_GPL(ttm_prime_handle_to_fd);
664
665/**
666 * ttm_prime_object_init - Initialize a ttm_prime_object
667 *
668 * @tfile: struct ttm_object_file identifying the caller
669 * @size: The size of the dma_bufs we export.
670 * @prime: The object to be initialized.
671 * @shareable: See ttm_base_object_init
672 * @type: See ttm_base_object_init
673 * @refcount_release: See ttm_base_object_init
674 * @ref_obj_release: See ttm_base_object_init
675 *
676 * Initializes an object which is compatible with the drm_prime model
677 * for data sharing between processes and devices.
678 */
679int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size,
680 struct ttm_prime_object *prime, bool shareable,
681 enum ttm_object_type type,
682 void (*refcount_release) (struct ttm_base_object **),
683 void (*ref_obj_release) (struct ttm_base_object *,
684 enum ttm_ref_type ref_type))
685{
686 mutex_init(&prime->mutex);
687 prime->size = PAGE_ALIGN(size);
688 prime->real_type = type;
689 prime->dma_buf = NULL;
690 prime->refcount_release = refcount_release;
691 return ttm_base_object_init(tfile, &prime->base, shareable,
692 ttm_prime_type,
693 ttm_prime_refcount_release,
694 ref_obj_release);
695}
696EXPORT_SYMBOL(ttm_prime_object_init);
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 2cc6cd91ac11..9f8b690bcf52 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -6,6 +6,6 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
6 vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ 6 vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
7 vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \ 7 vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
8 vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \ 8 vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
9 vmwgfx_surface.o 9 vmwgfx_surface.o vmwgfx_prime.o
10 10
11obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o 11obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 20d5485eaf98..c7a549694e59 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -677,7 +677,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
677 } 677 }
678 678
679 dev_priv->tdev = ttm_object_device_init 679 dev_priv->tdev = ttm_object_device_init
680 (dev_priv->mem_global_ref.object, 12); 680 (dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);
681 681
682 if (unlikely(dev_priv->tdev == NULL)) { 682 if (unlikely(dev_priv->tdev == NULL)) {
683 DRM_ERROR("Unable to initialize TTM object management.\n"); 683 DRM_ERROR("Unable to initialize TTM object management.\n");
@@ -1210,7 +1210,7 @@ static const struct file_operations vmwgfx_driver_fops = {
1210 1210
1211static struct drm_driver driver = { 1211static struct drm_driver driver = {
1212 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | 1212 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
1213 DRIVER_MODESET, 1213 DRIVER_MODESET | DRIVER_PRIME,
1214 .load = vmw_driver_load, 1214 .load = vmw_driver_load,
1215 .unload = vmw_driver_unload, 1215 .unload = vmw_driver_unload,
1216 .lastclose = vmw_lastclose, 1216 .lastclose = vmw_lastclose,
@@ -1235,6 +1235,9 @@ static struct drm_driver driver = {
1235 .dumb_map_offset = vmw_dumb_map_offset, 1235 .dumb_map_offset = vmw_dumb_map_offset,
1236 .dumb_destroy = vmw_dumb_destroy, 1236 .dumb_destroy = vmw_dumb_destroy,
1237 1237
1238 .prime_fd_to_handle = vmw_prime_fd_to_handle,
1239 .prime_handle_to_fd = vmw_prime_handle_to_fd,
1240
1238 .fops = &vmwgfx_driver_fops, 1241 .fops = &vmwgfx_driver_fops,
1239 .name = VMWGFX_DRIVER_NAME, 1242 .name = VMWGFX_DRIVER_NAME,
1240 .desc = VMWGFX_DRIVER_DESC, 1243 .desc = VMWGFX_DRIVER_DESC,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index e401d5dbcb96..db85985c7086 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -819,6 +819,20 @@ int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
819extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func; 819extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
820 820
821/** 821/**
822 * Prime - vmwgfx_prime.c
823 */
824
825extern const struct dma_buf_ops vmw_prime_dmabuf_ops;
826extern int vmw_prime_fd_to_handle(struct drm_device *dev,
827 struct drm_file *file_priv,
828 int fd, u32 *handle);
829extern int vmw_prime_handle_to_fd(struct drm_device *dev,
830 struct drm_file *file_priv,
831 uint32_t handle, uint32_t flags,
832 int *prime_fd);
833
834
835/**
822 * Inline helper functions 836 * Inline helper functions
823 */ 837 */
824 838
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
new file mode 100644
index 000000000000..31fe32d8d65a
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
@@ -0,0 +1,137 @@
1/**************************************************************************
2 *
3 * Copyright © 2013 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors:
29 * Thomas Hellstrom <thellstrom@vmware.com>
30 *
31 */
32
33#include "vmwgfx_drv.h"
34#include <linux/dma-buf.h>
35#include <drm/ttm/ttm_object.h>
36
37/*
38 * DMA-BUF attach- and mapping methods. No need to implement
39 * these until we have other virtual devices use them.
40 */
41
42static int vmw_prime_map_attach(struct dma_buf *dma_buf,
43 struct device *target_dev,
44 struct dma_buf_attachment *attach)
45{
46 return -ENOSYS;
47}
48
49static void vmw_prime_map_detach(struct dma_buf *dma_buf,
50 struct dma_buf_attachment *attach)
51{
52}
53
54static struct sg_table *vmw_prime_map_dma_buf(struct dma_buf_attachment *attach,
55 enum dma_data_direction dir)
56{
57 return ERR_PTR(-ENOSYS);
58}
59
60static void vmw_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
61 struct sg_table *sgb,
62 enum dma_data_direction dir)
63{
64}
65
66static void *vmw_prime_dmabuf_vmap(struct dma_buf *dma_buf)
67{
68 return NULL;
69}
70
71static void vmw_prime_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
72{
73}
74
75static void *vmw_prime_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
76 unsigned long page_num)
77{
78 return NULL;
79}
80
81static void vmw_prime_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
82 unsigned long page_num, void *addr)
83{
84
85}
86static void *vmw_prime_dmabuf_kmap(struct dma_buf *dma_buf,
87 unsigned long page_num)
88{
89 return NULL;
90}
91
92static void vmw_prime_dmabuf_kunmap(struct dma_buf *dma_buf,
93 unsigned long page_num, void *addr)
94{
95
96}
97
98static int vmw_prime_dmabuf_mmap(struct dma_buf *dma_buf,
99 struct vm_area_struct *vma)
100{
101 WARN_ONCE(true, "Attempted use of dmabuf mmap. Bad.\n");
102 return -ENOSYS;
103}
104
105const struct dma_buf_ops vmw_prime_dmabuf_ops = {
106 .attach = vmw_prime_map_attach,
107 .detach = vmw_prime_map_detach,
108 .map_dma_buf = vmw_prime_map_dma_buf,
109 .unmap_dma_buf = vmw_prime_unmap_dma_buf,
110 .release = NULL,
111 .kmap = vmw_prime_dmabuf_kmap,
112 .kmap_atomic = vmw_prime_dmabuf_kmap_atomic,
113 .kunmap = vmw_prime_dmabuf_kunmap,
114 .kunmap_atomic = vmw_prime_dmabuf_kunmap_atomic,
115 .mmap = vmw_prime_dmabuf_mmap,
116 .vmap = vmw_prime_dmabuf_vmap,
117 .vunmap = vmw_prime_dmabuf_vunmap,
118};
119
120int vmw_prime_fd_to_handle(struct drm_device *dev,
121 struct drm_file *file_priv,
122 int fd, u32 *handle)
123{
124 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
125
126 return ttm_prime_fd_to_handle(tfile, fd, handle);
127}
128
129int vmw_prime_handle_to_fd(struct drm_device *dev,
130 struct drm_file *file_priv,
131 uint32_t handle, uint32_t flags,
132 int *prime_fd)
133{
134 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
135
136 return ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
137}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 252501a54def..efe2b74c5eb1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -35,7 +35,7 @@
35#define VMW_RES_EVICT_ERR_COUNT 10 35#define VMW_RES_EVICT_ERR_COUNT 10
36 36
37struct vmw_user_dma_buffer { 37struct vmw_user_dma_buffer {
38 struct ttm_base_object base; 38 struct ttm_prime_object prime;
39 struct vmw_dma_buffer dma; 39 struct vmw_dma_buffer dma;
40}; 40};
41 41
@@ -297,7 +297,7 @@ int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
297 if (unlikely(base == NULL)) 297 if (unlikely(base == NULL))
298 return -EINVAL; 298 return -EINVAL;
299 299
300 if (unlikely(base->object_type != converter->object_type)) 300 if (unlikely(ttm_base_object_type(base) != converter->object_type))
301 goto out_bad_resource; 301 goto out_bad_resource;
302 302
303 res = converter->base_obj_to_res(base); 303 res = converter->base_obj_to_res(base);
@@ -387,7 +387,7 @@ static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
387{ 387{
388 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); 388 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
389 389
390 ttm_base_object_kfree(vmw_user_bo, base); 390 ttm_prime_object_kfree(vmw_user_bo, prime);
391} 391}
392 392
393static void vmw_user_dmabuf_release(struct ttm_base_object **p_base) 393static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
@@ -401,7 +401,8 @@ static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
401 if (unlikely(base == NULL)) 401 if (unlikely(base == NULL))
402 return; 402 return;
403 403
404 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base); 404 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
405 prime.base);
405 bo = &vmw_user_bo->dma.base; 406 bo = &vmw_user_bo->dma.base;
406 ttm_bo_unref(&bo); 407 ttm_bo_unref(&bo);
407} 408}
@@ -442,18 +443,19 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
442 return ret; 443 return ret;
443 444
444 tmp = ttm_bo_reference(&user_bo->dma.base); 445 tmp = ttm_bo_reference(&user_bo->dma.base);
445 ret = ttm_base_object_init(tfile, 446 ret = ttm_prime_object_init(tfile,
446 &user_bo->base, 447 size,
447 shareable, 448 &user_bo->prime,
448 ttm_buffer_type, 449 shareable,
449 &vmw_user_dmabuf_release, NULL); 450 ttm_buffer_type,
451 &vmw_user_dmabuf_release, NULL);
450 if (unlikely(ret != 0)) { 452 if (unlikely(ret != 0)) {
451 ttm_bo_unref(&tmp); 453 ttm_bo_unref(&tmp);
452 goto out_no_base_object; 454 goto out_no_base_object;
453 } 455 }
454 456
455 *p_dma_buf = &user_bo->dma; 457 *p_dma_buf = &user_bo->dma;
456 *handle = user_bo->base.hash.key; 458 *handle = user_bo->prime.base.hash.key;
457 459
458out_no_base_object: 460out_no_base_object:
459 return ret; 461 return ret;
@@ -475,8 +477,8 @@ int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
475 return -EPERM; 477 return -EPERM;
476 478
477 vmw_user_bo = vmw_user_dma_buffer(bo); 479 vmw_user_bo = vmw_user_dma_buffer(bo);
478 return (vmw_user_bo->base.tfile == tfile || 480 return (vmw_user_bo->prime.base.tfile == tfile ||
479 vmw_user_bo->base.shareable) ? 0 : -EPERM; 481 vmw_user_bo->prime.base.shareable) ? 0 : -EPERM;
480} 482}
481 483
482int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, 484int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
@@ -538,14 +540,15 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
538 return -ESRCH; 540 return -ESRCH;
539 } 541 }
540 542
541 if (unlikely(base->object_type != ttm_buffer_type)) { 543 if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
542 ttm_base_object_unref(&base); 544 ttm_base_object_unref(&base);
543 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n", 545 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
544 (unsigned long)handle); 546 (unsigned long)handle);
545 return -EINVAL; 547 return -EINVAL;
546 } 548 }
547 549
548 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base); 550 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
551 prime.base);
549 (void)ttm_bo_reference(&vmw_user_bo->dma.base); 552 (void)ttm_bo_reference(&vmw_user_bo->dma.base);
550 ttm_base_object_unref(&base); 553 ttm_base_object_unref(&base);
551 *out = &vmw_user_bo->dma; 554 *out = &vmw_user_bo->dma;
@@ -562,7 +565,8 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
562 return -EINVAL; 565 return -EINVAL;
563 566
564 user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma); 567 user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
565 return ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL); 568 return ttm_ref_object_add(tfile, &user_bo->prime.base,
569 TTM_REF_USAGE, NULL);
566} 570}
567 571
568/* 572/*
@@ -807,15 +811,16 @@ int vmw_dumb_create(struct drm_file *file_priv,
807 goto out_no_dmabuf; 811 goto out_no_dmabuf;
808 812
809 tmp = ttm_bo_reference(&vmw_user_bo->dma.base); 813 tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
810 ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile, 814 ret = ttm_prime_object_init(vmw_fpriv(file_priv)->tfile,
811 &vmw_user_bo->base, 815 args->size,
812 false, 816 &vmw_user_bo->prime,
813 ttm_buffer_type, 817 false,
814 &vmw_user_dmabuf_release, NULL); 818 ttm_buffer_type,
819 &vmw_user_dmabuf_release, NULL);
815 if (unlikely(ret != 0)) 820 if (unlikely(ret != 0))
816 goto out_no_base_object; 821 goto out_no_base_object;
817 822
818 args->handle = vmw_user_bo->base.hash.key; 823 args->handle = vmw_user_bo->prime.base.hash.key;
819 824
820out_no_base_object: 825out_no_base_object:
821 ttm_bo_unref(&tmp); 826 ttm_bo_unref(&tmp);
@@ -994,7 +999,6 @@ void vmw_resource_unreserve(struct vmw_resource *res,
994 */ 999 */
995static int 1000static int
996vmw_resource_check_buffer(struct vmw_resource *res, 1001vmw_resource_check_buffer(struct vmw_resource *res,
997 struct ww_acquire_ctx *ticket,
998 bool interruptible, 1002 bool interruptible,
999 struct ttm_validate_buffer *val_buf) 1003 struct ttm_validate_buffer *val_buf)
1000{ 1004{
@@ -1011,7 +1015,7 @@ vmw_resource_check_buffer(struct vmw_resource *res,
1011 INIT_LIST_HEAD(&val_list); 1015 INIT_LIST_HEAD(&val_list);
1012 val_buf->bo = ttm_bo_reference(&res->backup->base); 1016 val_buf->bo = ttm_bo_reference(&res->backup->base);
1013 list_add_tail(&val_buf->head, &val_list); 1017 list_add_tail(&val_buf->head, &val_list);
1014 ret = ttm_eu_reserve_buffers(ticket, &val_list); 1018 ret = ttm_eu_reserve_buffers(NULL, &val_list);
1015 if (unlikely(ret != 0)) 1019 if (unlikely(ret != 0))
1016 goto out_no_reserve; 1020 goto out_no_reserve;
1017 1021
@@ -1029,7 +1033,7 @@ vmw_resource_check_buffer(struct vmw_resource *res,
1029 return 0; 1033 return 0;
1030 1034
1031out_no_validate: 1035out_no_validate:
1032 ttm_eu_backoff_reservation(ticket, &val_list); 1036 ttm_eu_backoff_reservation(NULL, &val_list);
1033out_no_reserve: 1037out_no_reserve:
1034 ttm_bo_unref(&val_buf->bo); 1038 ttm_bo_unref(&val_buf->bo);
1035 if (backup_dirty) 1039 if (backup_dirty)
@@ -1074,8 +1078,7 @@ int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
1074 * @val_buf: Backup buffer information. 1078 * @val_buf: Backup buffer information.
1075 */ 1079 */
1076static void 1080static void
1077vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket, 1081vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
1078 struct ttm_validate_buffer *val_buf)
1079{ 1082{
1080 struct list_head val_list; 1083 struct list_head val_list;
1081 1084
@@ -1084,7 +1087,7 @@ vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
1084 1087
1085 INIT_LIST_HEAD(&val_list); 1088 INIT_LIST_HEAD(&val_list);
1086 list_add_tail(&val_buf->head, &val_list); 1089 list_add_tail(&val_buf->head, &val_list);
1087 ttm_eu_backoff_reservation(ticket, &val_list); 1090 ttm_eu_backoff_reservation(NULL, &val_list);
1088 ttm_bo_unref(&val_buf->bo); 1091 ttm_bo_unref(&val_buf->bo);
1089} 1092}
1090 1093
@@ -1099,14 +1102,12 @@ int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
1099{ 1102{
1100 struct ttm_validate_buffer val_buf; 1103 struct ttm_validate_buffer val_buf;
1101 const struct vmw_res_func *func = res->func; 1104 const struct vmw_res_func *func = res->func;
1102 struct ww_acquire_ctx ticket;
1103 int ret; 1105 int ret;
1104 1106
1105 BUG_ON(!func->may_evict); 1107 BUG_ON(!func->may_evict);
1106 1108
1107 val_buf.bo = NULL; 1109 val_buf.bo = NULL;
1108 ret = vmw_resource_check_buffer(res, &ticket, interruptible, 1110 ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
1109 &val_buf);
1110 if (unlikely(ret != 0)) 1111 if (unlikely(ret != 0))
1111 return ret; 1112 return ret;
1112 1113
@@ -1121,7 +1122,7 @@ int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
1121 res->backup_dirty = true; 1122 res->backup_dirty = true;
1122 res->res_dirty = false; 1123 res->res_dirty = false;
1123out_no_unbind: 1124out_no_unbind:
1124 vmw_resource_backoff_reservation(&ticket, &val_buf); 1125 vmw_resource_backoff_reservation(&val_buf);
1125 1126
1126 return ret; 1127 return ret;
1127} 1128}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 582814339748..7de2ea8bd553 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -38,7 +38,7 @@
38 * @size: TTM accounting size for the surface. 38 * @size: TTM accounting size for the surface.
39 */ 39 */
40struct vmw_user_surface { 40struct vmw_user_surface {
41 struct ttm_base_object base; 41 struct ttm_prime_object prime;
42 struct vmw_surface srf; 42 struct vmw_surface srf;
43 uint32_t size; 43 uint32_t size;
44 uint32_t backup_handle; 44 uint32_t backup_handle;
@@ -580,7 +580,8 @@ static int vmw_surface_init(struct vmw_private *dev_priv,
580static struct vmw_resource * 580static struct vmw_resource *
581vmw_user_surface_base_to_res(struct ttm_base_object *base) 581vmw_user_surface_base_to_res(struct ttm_base_object *base)
582{ 582{
583 return &(container_of(base, struct vmw_user_surface, base)->srf.res); 583 return &(container_of(base, struct vmw_user_surface,
584 prime.base)->srf.res);
584} 585}
585 586
586/** 587/**
@@ -599,7 +600,7 @@ static void vmw_user_surface_free(struct vmw_resource *res)
599 kfree(srf->offsets); 600 kfree(srf->offsets);
600 kfree(srf->sizes); 601 kfree(srf->sizes);
601 kfree(srf->snooper.image); 602 kfree(srf->snooper.image);
602 ttm_base_object_kfree(user_srf, base); 603 ttm_prime_object_kfree(user_srf, prime);
603 ttm_mem_global_free(vmw_mem_glob(dev_priv), size); 604 ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
604} 605}
605 606
@@ -616,7 +617,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
616{ 617{
617 struct ttm_base_object *base = *p_base; 618 struct ttm_base_object *base = *p_base;
618 struct vmw_user_surface *user_srf = 619 struct vmw_user_surface *user_srf =
619 container_of(base, struct vmw_user_surface, base); 620 container_of(base, struct vmw_user_surface, prime.base);
620 struct vmw_resource *res = &user_srf->srf.res; 621 struct vmw_resource *res = &user_srf->srf.res;
621 622
622 *p_base = NULL; 623 *p_base = NULL;
@@ -790,8 +791,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
790 } 791 }
791 srf->snooper.crtc = NULL; 792 srf->snooper.crtc = NULL;
792 793
793 user_srf->base.shareable = false; 794 user_srf->prime.base.shareable = false;
794 user_srf->base.tfile = NULL; 795 user_srf->prime.base.tfile = NULL;
795 796
796 /** 797 /**
797 * From this point, the generic resource management functions 798 * From this point, the generic resource management functions
@@ -803,9 +804,9 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
803 goto out_unlock; 804 goto out_unlock;
804 805
805 tmp = vmw_resource_reference(&srf->res); 806 tmp = vmw_resource_reference(&srf->res);
806 ret = ttm_base_object_init(tfile, &user_srf->base, 807 ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
807 req->shareable, VMW_RES_SURFACE, 808 req->shareable, VMW_RES_SURFACE,
808 &vmw_user_surface_base_release, NULL); 809 &vmw_user_surface_base_release, NULL);
809 810
810 if (unlikely(ret != 0)) { 811 if (unlikely(ret != 0)) {
811 vmw_resource_unreference(&tmp); 812 vmw_resource_unreference(&tmp);
@@ -813,7 +814,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
813 goto out_unlock; 814 goto out_unlock;
814 } 815 }
815 816
816 rep->sid = user_srf->base.hash.key; 817 rep->sid = user_srf->prime.base.hash.key;
817 vmw_resource_unreference(&res); 818 vmw_resource_unreference(&res);
818 819
819 ttm_read_unlock(&vmaster->lock); 820 ttm_read_unlock(&vmaster->lock);
@@ -823,7 +824,7 @@ out_no_copy:
823out_no_offsets: 824out_no_offsets:
824 kfree(srf->sizes); 825 kfree(srf->sizes);
825out_no_sizes: 826out_no_sizes:
826 ttm_base_object_kfree(user_srf, base); 827 ttm_prime_object_kfree(user_srf, prime);
827out_no_user_srf: 828out_no_user_srf:
828 ttm_mem_global_free(vmw_mem_glob(dev_priv), size); 829 ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
829out_unlock: 830out_unlock:
@@ -859,13 +860,14 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
859 return -EINVAL; 860 return -EINVAL;
860 } 861 }
861 862
862 if (unlikely(base->object_type != VMW_RES_SURFACE)) 863 if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE))
863 goto out_bad_resource; 864 goto out_bad_resource;
864 865
865 user_srf = container_of(base, struct vmw_user_surface, base); 866 user_srf = container_of(base, struct vmw_user_surface, prime.base);
866 srf = &user_srf->srf; 867 srf = &user_srf->srf;
867 868
868 ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL); 869 ret = ttm_ref_object_add(tfile, &user_srf->prime.base,
870 TTM_REF_USAGE, NULL);
869 if (unlikely(ret != 0)) { 871 if (unlikely(ret != 0)) {
870 DRM_ERROR("Could not add a reference to a surface.\n"); 872 DRM_ERROR("Could not add a reference to a surface.\n");
871 goto out_no_reference; 873 goto out_no_reference;
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
index ec8a1d306510..16db7d01a336 100644
--- a/include/drm/ttm/ttm_execbuf_util.h
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -70,7 +70,8 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
70/** 70/**
71 * function ttm_eu_reserve_buffers 71 * function ttm_eu_reserve_buffers
72 * 72 *
73 * @ticket: [out] ww_acquire_ctx returned by call. 73 * @ticket: [out] ww_acquire_ctx filled in by call, or NULL if only
74 * non-blocking reserves should be tried.
74 * @list: thread private list of ttm_validate_buffer structs. 75 * @list: thread private list of ttm_validate_buffer structs.
75 * 76 *
76 * Tries to reserve bos pointed to by the list entries for validation. 77 * Tries to reserve bos pointed to by the list entries for validation.
diff --git a/include/drm/ttm/ttm_object.h b/include/drm/ttm/ttm_object.h
index fc0cf0649901..58b029894eb3 100644
--- a/include/drm/ttm/ttm_object.h
+++ b/include/drm/ttm/ttm_object.h
@@ -41,6 +41,7 @@
41#include <drm/drm_hashtab.h> 41#include <drm/drm_hashtab.h>
42#include <linux/kref.h> 42#include <linux/kref.h>
43#include <linux/rcupdate.h> 43#include <linux/rcupdate.h>
44#include <linux/dma-buf.h>
44#include <ttm/ttm_memory.h> 45#include <ttm/ttm_memory.h>
45 46
46/** 47/**
@@ -77,6 +78,7 @@ enum ttm_object_type {
77 ttm_fence_type, 78 ttm_fence_type,
78 ttm_buffer_type, 79 ttm_buffer_type,
79 ttm_lock_type, 80 ttm_lock_type,
81 ttm_prime_type,
80 ttm_driver_type0 = 256, 82 ttm_driver_type0 = 256,
81 ttm_driver_type1, 83 ttm_driver_type1,
82 ttm_driver_type2, 84 ttm_driver_type2,
@@ -132,6 +134,30 @@ struct ttm_base_object {
132 enum ttm_ref_type ref_type); 134 enum ttm_ref_type ref_type);
133}; 135};
134 136
137
138/**
139 * struct ttm_prime_object - Modified base object that is prime-aware
140 *
141 * @base: struct ttm_base_object that we derive from
142 * @mutex: Mutex protecting the @dma_buf member.
143 * @size: Size of the dma_buf associated with this object
144 * @real_type: Type of the underlying object. Needed since we're setting
145 * the value of @base::object_type to ttm_prime_type
146 * @dma_buf: Non ref-coutned pointer to a struct dma_buf created from this
147 * object.
148 * @refcount_release: The underlying object's release method. Needed since
149 * we set @base::refcount_release to our own release method.
150 */
151
152struct ttm_prime_object {
153 struct ttm_base_object base;
154 struct mutex mutex;
155 size_t size;
156 enum ttm_object_type real_type;
157 struct dma_buf *dma_buf;
158 void (*refcount_release) (struct ttm_base_object **);
159};
160
135/** 161/**
136 * ttm_base_object_init 162 * ttm_base_object_init
137 * 163 *
@@ -248,14 +274,18 @@ extern void ttm_object_file_release(struct ttm_object_file **p_tfile);
248/** 274/**
249 * ttm_object device init - initialize a struct ttm_object_device 275 * ttm_object device init - initialize a struct ttm_object_device
250 * 276 *
277 * @mem_glob: struct ttm_mem_global for memory accounting.
251 * @hash_order: Order of hash table used to hash the base objects. 278 * @hash_order: Order of hash table used to hash the base objects.
279 * @ops: DMA buf ops for prime objects of this device.
252 * 280 *
253 * This function is typically called on device initialization to prepare 281 * This function is typically called on device initialization to prepare
254 * data structures needed for ttm base and ref objects. 282 * data structures needed for ttm base and ref objects.
255 */ 283 */
256 284
257extern struct ttm_object_device *ttm_object_device_init 285extern struct ttm_object_device *
258 (struct ttm_mem_global *mem_glob, unsigned int hash_order); 286ttm_object_device_init(struct ttm_mem_global *mem_glob,
287 unsigned int hash_order,
288 const struct dma_buf_ops *ops);
259 289
260/** 290/**
261 * ttm_object_device_release - release data held by a ttm_object_device 291 * ttm_object_device_release - release data held by a ttm_object_device
@@ -272,4 +302,31 @@ extern void ttm_object_device_release(struct ttm_object_device **p_tdev);
272 302
273#define ttm_base_object_kfree(__object, __base)\ 303#define ttm_base_object_kfree(__object, __base)\
274 kfree_rcu(__object, __base.rhead) 304 kfree_rcu(__object, __base.rhead)
305
306extern int ttm_prime_object_init(struct ttm_object_file *tfile,
307 size_t size,
308 struct ttm_prime_object *prime,
309 bool shareable,
310 enum ttm_object_type type,
311 void (*refcount_release)
312 (struct ttm_base_object **),
313 void (*ref_obj_release)
314 (struct ttm_base_object *,
315 enum ttm_ref_type ref_type));
316
317static inline enum ttm_object_type
318ttm_base_object_type(struct ttm_base_object *base)
319{
320 return (base->object_type == ttm_prime_type) ?
321 container_of(base, struct ttm_prime_object, base)->real_type :
322 base->object_type;
323}
324extern int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
325 int fd, u32 *handle);
326extern int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
327 uint32_t handle, uint32_t flags,
328 int *prime_fd);
329
330#define ttm_prime_object_kfree(__obj, __prime) \
331 kfree_rcu(__obj, __prime.base.rhead)
275#endif 332#endif