aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ttm
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2013-12-17 09:27:08 -0500
committerIngo Molnar <mingo@kernel.org>2013-12-17 09:27:08 -0500
commitbb799d3b980eb803ca2da4a4eefbd9308f8d988a (patch)
tree69fbe0cd6d47b23a50f5e1d87bf7489532fae149 /drivers/gpu/drm/ttm
parent919fc6e34831d1c2b58bfb5ae261dc3facc9b269 (diff)
parent319e2e3f63c348a9b66db4667efa73178e18b17d (diff)
Merge tag 'v3.13-rc4' into core/locking
Merge Linux 3.13-rc4, to refresh this rather old tree with the latest fixes. Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c35
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c7
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c26
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c32
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c254
5 files changed, 326 insertions, 28 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 8d5a646ebe6a..07e02c4bf5a8 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -151,7 +151,7 @@ static void ttm_bo_release_list(struct kref *list_kref)
151 atomic_dec(&bo->glob->bo_count); 151 atomic_dec(&bo->glob->bo_count);
152 if (bo->resv == &bo->ttm_resv) 152 if (bo->resv == &bo->ttm_resv)
153 reservation_object_fini(&bo->ttm_resv); 153 reservation_object_fini(&bo->ttm_resv);
154 154 mutex_destroy(&bo->wu_mutex);
155 if (bo->destroy) 155 if (bo->destroy)
156 bo->destroy(bo); 156 bo->destroy(bo);
157 else { 157 else {
@@ -1123,6 +1123,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1123 INIT_LIST_HEAD(&bo->ddestroy); 1123 INIT_LIST_HEAD(&bo->ddestroy);
1124 INIT_LIST_HEAD(&bo->swap); 1124 INIT_LIST_HEAD(&bo->swap);
1125 INIT_LIST_HEAD(&bo->io_reserve_lru); 1125 INIT_LIST_HEAD(&bo->io_reserve_lru);
1126 mutex_init(&bo->wu_mutex);
1126 bo->bdev = bdev; 1127 bo->bdev = bdev;
1127 bo->glob = bdev->glob; 1128 bo->glob = bdev->glob;
1128 bo->type = type; 1129 bo->type = type;
@@ -1704,3 +1705,35 @@ void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1704 ; 1705 ;
1705} 1706}
1706EXPORT_SYMBOL(ttm_bo_swapout_all); 1707EXPORT_SYMBOL(ttm_bo_swapout_all);
1708
1709/**
1710 * ttm_bo_wait_unreserved - interruptible wait for a buffer object to become
1711 * unreserved
1712 *
1713 * @bo: Pointer to buffer
1714 */
1715int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
1716{
1717 int ret;
1718
1719 /*
1720 * In the absense of a wait_unlocked API,
1721 * Use the bo::wu_mutex to avoid triggering livelocks due to
1722 * concurrent use of this function. Note that this use of
1723 * bo::wu_mutex can go away if we change locking order to
1724 * mmap_sem -> bo::reserve.
1725 */
1726 ret = mutex_lock_interruptible(&bo->wu_mutex);
1727 if (unlikely(ret != 0))
1728 return -ERESTARTSYS;
1729 if (!ww_mutex_is_locked(&bo->resv->lock))
1730 goto out_unlock;
1731 ret = ttm_bo_reserve_nolru(bo, true, false, false, NULL);
1732 if (unlikely(ret != 0))
1733 goto out_unlock;
1734 ww_mutex_unlock(&bo->resv->lock);
1735
1736out_unlock:
1737 mutex_unlock(&bo->wu_mutex);
1738 return ret;
1739}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 4834c463c38b..15b86a94949d 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -350,10 +350,13 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
350 goto out2; 350 goto out2;
351 351
352 /* 352 /*
353 * Move nonexistent data. NOP. 353 * Don't move nonexistent data. Clear destination instead.
354 */ 354 */
355 if (old_iomap == NULL && ttm == NULL) 355 if (old_iomap == NULL &&
356 (ttm == NULL || ttm->state == tt_unpopulated)) {
357 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
356 goto out2; 358 goto out2;
359 }
357 360
358 /* 361 /*
359 * TTM might be null for moves within the same region. 362 * TTM might be null for moves within the same region.
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index ac617f3ecd0c..b249ab9b1eb2 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -107,13 +107,28 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
107 /* 107 /*
108 * Work around locking order reversal in fault / nopfn 108 * Work around locking order reversal in fault / nopfn
109 * between mmap_sem and bo_reserve: Perform a trylock operation 109 * between mmap_sem and bo_reserve: Perform a trylock operation
110 * for reserve, and if it fails, retry the fault after scheduling. 110 * for reserve, and if it fails, retry the fault after waiting
111 * for the buffer to become unreserved.
111 */ 112 */
112 113 ret = ttm_bo_reserve(bo, true, true, false, NULL);
113 ret = ttm_bo_reserve(bo, true, true, false, 0);
114 if (unlikely(ret != 0)) { 114 if (unlikely(ret != 0)) {
115 if (ret == -EBUSY) 115 if (ret != -EBUSY)
116 set_need_resched(); 116 return VM_FAULT_NOPAGE;
117
118 if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
119 if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
120 up_read(&vma->vm_mm->mmap_sem);
121 (void) ttm_bo_wait_unreserved(bo);
122 }
123
124 return VM_FAULT_RETRY;
125 }
126
127 /*
128 * If we'd want to change locking order to
129 * mmap_sem -> bo::reserve, we'd use a blocking reserve here
130 * instead of retrying the fault...
131 */
117 return VM_FAULT_NOPAGE; 132 return VM_FAULT_NOPAGE;
118 } 133 }
119 134
@@ -123,7 +138,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
123 case 0: 138 case 0:
124 break; 139 break;
125 case -EBUSY: 140 case -EBUSY:
126 set_need_resched();
127 case -ERESTARTSYS: 141 case -ERESTARTSYS:
128 retval = VM_FAULT_NOPAGE; 142 retval = VM_FAULT_NOPAGE;
129 goto out_unlock; 143 goto out_unlock;
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 6c911789ae5c..479e9418e3d7 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -32,8 +32,7 @@
32#include <linux/sched.h> 32#include <linux/sched.h>
33#include <linux/module.h> 33#include <linux/module.h>
34 34
35static void ttm_eu_backoff_reservation_locked(struct list_head *list, 35static void ttm_eu_backoff_reservation_locked(struct list_head *list)
36 struct ww_acquire_ctx *ticket)
37{ 36{
38 struct ttm_validate_buffer *entry; 37 struct ttm_validate_buffer *entry;
39 38
@@ -93,8 +92,9 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
93 entry = list_first_entry(list, struct ttm_validate_buffer, head); 92 entry = list_first_entry(list, struct ttm_validate_buffer, head);
94 glob = entry->bo->glob; 93 glob = entry->bo->glob;
95 spin_lock(&glob->lru_lock); 94 spin_lock(&glob->lru_lock);
96 ttm_eu_backoff_reservation_locked(list, ticket); 95 ttm_eu_backoff_reservation_locked(list);
97 ww_acquire_fini(ticket); 96 if (ticket)
97 ww_acquire_fini(ticket);
98 spin_unlock(&glob->lru_lock); 98 spin_unlock(&glob->lru_lock);
99} 99}
100EXPORT_SYMBOL(ttm_eu_backoff_reservation); 100EXPORT_SYMBOL(ttm_eu_backoff_reservation);
@@ -130,7 +130,8 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
130 entry = list_first_entry(list, struct ttm_validate_buffer, head); 130 entry = list_first_entry(list, struct ttm_validate_buffer, head);
131 glob = entry->bo->glob; 131 glob = entry->bo->glob;
132 132
133 ww_acquire_init(ticket, &reservation_ww_class); 133 if (ticket)
134 ww_acquire_init(ticket, &reservation_ww_class);
134retry: 135retry:
135 list_for_each_entry(entry, list, head) { 136 list_for_each_entry(entry, list, head) {
136 struct ttm_buffer_object *bo = entry->bo; 137 struct ttm_buffer_object *bo = entry->bo;
@@ -139,16 +140,17 @@ retry:
139 if (entry->reserved) 140 if (entry->reserved)
140 continue; 141 continue;
141 142
142 143 ret = ttm_bo_reserve_nolru(bo, true, (ticket == NULL), true,
143 ret = ttm_bo_reserve_nolru(bo, true, false, true, ticket); 144 ticket);
144 145
145 if (ret == -EDEADLK) { 146 if (ret == -EDEADLK) {
146 /* uh oh, we lost out, drop every reservation and try 147 /* uh oh, we lost out, drop every reservation and try
147 * to only reserve this buffer, then start over if 148 * to only reserve this buffer, then start over if
148 * this succeeds. 149 * this succeeds.
149 */ 150 */
151 BUG_ON(ticket == NULL);
150 spin_lock(&glob->lru_lock); 152 spin_lock(&glob->lru_lock);
151 ttm_eu_backoff_reservation_locked(list, ticket); 153 ttm_eu_backoff_reservation_locked(list);
152 spin_unlock(&glob->lru_lock); 154 spin_unlock(&glob->lru_lock);
153 ttm_eu_list_ref_sub(list); 155 ttm_eu_list_ref_sub(list);
154 ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, 156 ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
@@ -175,7 +177,8 @@ retry:
175 } 177 }
176 } 178 }
177 179
178 ww_acquire_done(ticket); 180 if (ticket)
181 ww_acquire_done(ticket);
179 spin_lock(&glob->lru_lock); 182 spin_lock(&glob->lru_lock);
180 ttm_eu_del_from_lru_locked(list); 183 ttm_eu_del_from_lru_locked(list);
181 spin_unlock(&glob->lru_lock); 184 spin_unlock(&glob->lru_lock);
@@ -184,12 +187,14 @@ retry:
184 187
185err: 188err:
186 spin_lock(&glob->lru_lock); 189 spin_lock(&glob->lru_lock);
187 ttm_eu_backoff_reservation_locked(list, ticket); 190 ttm_eu_backoff_reservation_locked(list);
188 spin_unlock(&glob->lru_lock); 191 spin_unlock(&glob->lru_lock);
189 ttm_eu_list_ref_sub(list); 192 ttm_eu_list_ref_sub(list);
190err_fini: 193err_fini:
191 ww_acquire_done(ticket); 194 if (ticket) {
192 ww_acquire_fini(ticket); 195 ww_acquire_done(ticket);
196 ww_acquire_fini(ticket);
197 }
193 return ret; 198 return ret;
194} 199}
195EXPORT_SYMBOL(ttm_eu_reserve_buffers); 200EXPORT_SYMBOL(ttm_eu_reserve_buffers);
@@ -224,7 +229,8 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
224 } 229 }
225 spin_unlock(&bdev->fence_lock); 230 spin_unlock(&bdev->fence_lock);
226 spin_unlock(&glob->lru_lock); 231 spin_unlock(&glob->lru_lock);
227 ww_acquire_fini(ticket); 232 if (ticket)
233 ww_acquire_fini(ticket);
228 234
229 list_for_each_entry(entry, list, head) { 235 list_for_each_entry(entry, list, head) {
230 if (entry->old_sync_obj) 236 if (entry->old_sync_obj)
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index a868176c258a..6fe7b92a82d1 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA 3 * Copyright (c) 2009-2013 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -26,6 +26,12 @@
26 **************************************************************************/ 26 **************************************************************************/
27/* 27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 *
30 * While no substantial code is shared, the prime code is inspired by
31 * drm_prime.c, with
32 * Authors:
33 * Dave Airlie <airlied@redhat.com>
34 * Rob Clark <rob.clark@linaro.org>
29 */ 35 */
30/** @file ttm_ref_object.c 36/** @file ttm_ref_object.c
31 * 37 *
@@ -34,6 +40,7 @@
34 * and release on file close. 40 * and release on file close.
35 */ 41 */
36 42
43
37/** 44/**
38 * struct ttm_object_file 45 * struct ttm_object_file
39 * 46 *
@@ -84,6 +91,9 @@ struct ttm_object_device {
84 struct drm_open_hash object_hash; 91 struct drm_open_hash object_hash;
85 atomic_t object_count; 92 atomic_t object_count;
86 struct ttm_mem_global *mem_glob; 93 struct ttm_mem_global *mem_glob;
94 struct dma_buf_ops ops;
95 void (*dmabuf_release)(struct dma_buf *dma_buf);
96 size_t dma_buf_size;
87}; 97};
88 98
89/** 99/**
@@ -116,6 +126,8 @@ struct ttm_ref_object {
116 struct ttm_object_file *tfile; 126 struct ttm_object_file *tfile;
117}; 127};
118 128
129static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf);
130
119static inline struct ttm_object_file * 131static inline struct ttm_object_file *
120ttm_object_file_ref(struct ttm_object_file *tfile) 132ttm_object_file_ref(struct ttm_object_file *tfile)
121{ 133{
@@ -416,9 +428,10 @@ out_err:
416} 428}
417EXPORT_SYMBOL(ttm_object_file_init); 429EXPORT_SYMBOL(ttm_object_file_init);
418 430
419struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global 431struct ttm_object_device *
420 *mem_glob, 432ttm_object_device_init(struct ttm_mem_global *mem_glob,
421 unsigned int hash_order) 433 unsigned int hash_order,
434 const struct dma_buf_ops *ops)
422{ 435{
423 struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL); 436 struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
424 int ret; 437 int ret;
@@ -430,10 +443,17 @@ struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
430 spin_lock_init(&tdev->object_lock); 443 spin_lock_init(&tdev->object_lock);
431 atomic_set(&tdev->object_count, 0); 444 atomic_set(&tdev->object_count, 0);
432 ret = drm_ht_create(&tdev->object_hash, hash_order); 445 ret = drm_ht_create(&tdev->object_hash, hash_order);
446 if (ret != 0)
447 goto out_no_object_hash;
433 448
434 if (likely(ret == 0)) 449 tdev->ops = *ops;
435 return tdev; 450 tdev->dmabuf_release = tdev->ops.release;
451 tdev->ops.release = ttm_prime_dmabuf_release;
452 tdev->dma_buf_size = ttm_round_pot(sizeof(struct dma_buf)) +
453 ttm_round_pot(sizeof(struct file));
454 return tdev;
436 455
456out_no_object_hash:
437 kfree(tdev); 457 kfree(tdev);
438 return NULL; 458 return NULL;
439} 459}
@@ -452,3 +472,225 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev)
452 kfree(tdev); 472 kfree(tdev);
453} 473}
454EXPORT_SYMBOL(ttm_object_device_release); 474EXPORT_SYMBOL(ttm_object_device_release);
475
476/**
477 * get_dma_buf_unless_doomed - get a dma_buf reference if possible.
478 *
479 * @dma_buf: Non-refcounted pointer to a struct dma-buf.
480 *
481 * Obtain a file reference from a lookup structure that doesn't refcount
482 * the file, but synchronizes with its release method to make sure it has
483 * not been freed yet. See for example kref_get_unless_zero documentation.
484 * Returns true if refcounting succeeds, false otherwise.
485 *
486 * Nobody really wants this as a public API yet, so let it mature here
487 * for some time...
488 */
489static bool __must_check get_dma_buf_unless_doomed(struct dma_buf *dmabuf)
490{
491 return atomic_long_inc_not_zero(&dmabuf->file->f_count) != 0L;
492}
493
494/**
495 * ttm_prime_refcount_release - refcount release method for a prime object.
496 *
497 * @p_base: Pointer to ttm_base_object pointer.
498 *
499 * This is a wrapper that calls the refcount_release founction of the
500 * underlying object. At the same time it cleans up the prime object.
501 * This function is called when all references to the base object we
502 * derive from are gone.
503 */
504static void ttm_prime_refcount_release(struct ttm_base_object **p_base)
505{
506 struct ttm_base_object *base = *p_base;
507 struct ttm_prime_object *prime;
508
509 *p_base = NULL;
510 prime = container_of(base, struct ttm_prime_object, base);
511 BUG_ON(prime->dma_buf != NULL);
512 mutex_destroy(&prime->mutex);
513 if (prime->refcount_release)
514 prime->refcount_release(&base);
515}
516
517/**
518 * ttm_prime_dmabuf_release - Release method for the dma-bufs we export
519 *
520 * @dma_buf:
521 *
522 * This function first calls the dma_buf release method the driver
523 * provides. Then it cleans up our dma_buf pointer used for lookup,
524 * and finally releases the reference the dma_buf has on our base
525 * object.
526 */
527static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf)
528{
529 struct ttm_prime_object *prime =
530 (struct ttm_prime_object *) dma_buf->priv;
531 struct ttm_base_object *base = &prime->base;
532 struct ttm_object_device *tdev = base->tfile->tdev;
533
534 if (tdev->dmabuf_release)
535 tdev->dmabuf_release(dma_buf);
536 mutex_lock(&prime->mutex);
537 if (prime->dma_buf == dma_buf)
538 prime->dma_buf = NULL;
539 mutex_unlock(&prime->mutex);
540 ttm_mem_global_free(tdev->mem_glob, tdev->dma_buf_size);
541 ttm_base_object_unref(&base);
542}
543
544/**
545 * ttm_prime_fd_to_handle - Get a base object handle from a prime fd
546 *
547 * @tfile: A struct ttm_object_file identifying the caller.
548 * @fd: The prime / dmabuf fd.
549 * @handle: The returned handle.
550 *
551 * This function returns a handle to an object that previously exported
552 * a dma-buf. Note that we don't handle imports yet, because we simply
553 * have no consumers of that implementation.
554 */
555int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
556 int fd, u32 *handle)
557{
558 struct ttm_object_device *tdev = tfile->tdev;
559 struct dma_buf *dma_buf;
560 struct ttm_prime_object *prime;
561 struct ttm_base_object *base;
562 int ret;
563
564 dma_buf = dma_buf_get(fd);
565 if (IS_ERR(dma_buf))
566 return PTR_ERR(dma_buf);
567
568 if (dma_buf->ops != &tdev->ops)
569 return -ENOSYS;
570
571 prime = (struct ttm_prime_object *) dma_buf->priv;
572 base = &prime->base;
573 *handle = base->hash.key;
574 ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
575
576 dma_buf_put(dma_buf);
577
578 return ret;
579}
580EXPORT_SYMBOL_GPL(ttm_prime_fd_to_handle);
581
582/**
583 * ttm_prime_handle_to_fd - Return a dma_buf fd from a ttm prime object
584 *
585 * @tfile: Struct ttm_object_file identifying the caller.
586 * @handle: Handle to the object we're exporting from.
587 * @flags: flags for dma-buf creation. We just pass them on.
588 * @prime_fd: The returned file descriptor.
589 *
590 */
591int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
592 uint32_t handle, uint32_t flags,
593 int *prime_fd)
594{
595 struct ttm_object_device *tdev = tfile->tdev;
596 struct ttm_base_object *base;
597 struct dma_buf *dma_buf;
598 struct ttm_prime_object *prime;
599 int ret;
600
601 base = ttm_base_object_lookup(tfile, handle);
602 if (unlikely(base == NULL ||
603 base->object_type != ttm_prime_type)) {
604 ret = -ENOENT;
605 goto out_unref;
606 }
607
608 prime = container_of(base, struct ttm_prime_object, base);
609 if (unlikely(!base->shareable)) {
610 ret = -EPERM;
611 goto out_unref;
612 }
613
614 ret = mutex_lock_interruptible(&prime->mutex);
615 if (unlikely(ret != 0)) {
616 ret = -ERESTARTSYS;
617 goto out_unref;
618 }
619
620 dma_buf = prime->dma_buf;
621 if (!dma_buf || !get_dma_buf_unless_doomed(dma_buf)) {
622
623 /*
624 * Need to create a new dma_buf, with memory accounting.
625 */
626 ret = ttm_mem_global_alloc(tdev->mem_glob, tdev->dma_buf_size,
627 false, true);
628 if (unlikely(ret != 0)) {
629 mutex_unlock(&prime->mutex);
630 goto out_unref;
631 }
632
633 dma_buf = dma_buf_export(prime, &tdev->ops,
634 prime->size, flags);
635 if (IS_ERR(dma_buf)) {
636 ret = PTR_ERR(dma_buf);
637 ttm_mem_global_free(tdev->mem_glob,
638 tdev->dma_buf_size);
639 mutex_unlock(&prime->mutex);
640 goto out_unref;
641 }
642
643 /*
644 * dma_buf has taken the base object reference
645 */
646 base = NULL;
647 prime->dma_buf = dma_buf;
648 }
649 mutex_unlock(&prime->mutex);
650
651 ret = dma_buf_fd(dma_buf, flags);
652 if (ret >= 0) {
653 *prime_fd = ret;
654 ret = 0;
655 } else
656 dma_buf_put(dma_buf);
657
658out_unref:
659 if (base)
660 ttm_base_object_unref(&base);
661 return ret;
662}
663EXPORT_SYMBOL_GPL(ttm_prime_handle_to_fd);
664
665/**
666 * ttm_prime_object_init - Initialize a ttm_prime_object
667 *
668 * @tfile: struct ttm_object_file identifying the caller
669 * @size: The size of the dma_bufs we export.
670 * @prime: The object to be initialized.
671 * @shareable: See ttm_base_object_init
672 * @type: See ttm_base_object_init
673 * @refcount_release: See ttm_base_object_init
674 * @ref_obj_release: See ttm_base_object_init
675 *
676 * Initializes an object which is compatible with the drm_prime model
677 * for data sharing between processes and devices.
678 */
679int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size,
680 struct ttm_prime_object *prime, bool shareable,
681 enum ttm_object_type type,
682 void (*refcount_release) (struct ttm_base_object **),
683 void (*ref_obj_release) (struct ttm_base_object *,
684 enum ttm_ref_type ref_type))
685{
686 mutex_init(&prime->mutex);
687 prime->size = PAGE_ALIGN(size);
688 prime->real_type = type;
689 prime->dma_buf = NULL;
690 prime->refcount_release = refcount_release;
691 return ttm_base_object_init(tfile, &prime->base, shareable,
692 ttm_prime_type,
693 ttm_prime_refcount_release,
694 ref_obj_release);
695}
696EXPORT_SYMBOL(ttm_prime_object_init);