aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/radeon_object.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_object.h')
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h151
1 files changed, 139 insertions, 12 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 10e8af6bb456..7ab43de1e244 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -28,19 +28,146 @@
28#ifndef __RADEON_OBJECT_H__ 28#ifndef __RADEON_OBJECT_H__
29#define __RADEON_OBJECT_H__ 29#define __RADEON_OBJECT_H__
30 30
31#include <ttm/ttm_bo_api.h> 31#include <drm/radeon_drm.h>
32#include <ttm/ttm_bo_driver.h> 32#include "radeon.h"
33#include <ttm/ttm_placement.h>
34#include <ttm/ttm_module.h>
35 33
36/* 34/**
37 * TTM. 35 * radeon_mem_type_to_domain - return domain corresponding to mem_type
36 * @mem_type: ttm memory type
37 *
38 * Returns corresponding domain of the ttm mem_type
39 */
40static inline unsigned radeon_mem_type_to_domain(u32 mem_type)
41{
42 switch (mem_type) {
43 case TTM_PL_VRAM:
44 return RADEON_GEM_DOMAIN_VRAM;
45 case TTM_PL_TT:
46 return RADEON_GEM_DOMAIN_GTT;
47 case TTM_PL_SYSTEM:
48 return RADEON_GEM_DOMAIN_CPU;
49 default:
50 break;
51 }
52 return 0;
53}
54
55/**
56 * radeon_bo_reserve - reserve bo
57 * @bo: bo structure
58 * @no_wait: don't sleep while trying to reserve (return -EBUSY)
59 *
60 * Returns:
61 * -EBUSY: buffer is busy and @no_wait is true
62 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
63 * a signal. Release all buffer reservations and return to user-space.
38 */ 64 */
39struct radeon_mman { 65static inline int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait)
40 struct ttm_bo_global_ref bo_global_ref; 66{
41 struct ttm_global_reference mem_global_ref; 67 int r;
42 bool mem_global_referenced; 68
43 struct ttm_bo_device bdev; 69 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
44}; 70 if (unlikely(r != 0)) {
71 if (r != -ERESTARTSYS)
72 dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
73 return r;
74 }
75 return 0;
76}
77
78static inline void radeon_bo_unreserve(struct radeon_bo *bo)
79{
80 ttm_bo_unreserve(&bo->tbo);
81}
82
83/**
84 * radeon_bo_gpu_offset - return GPU offset of bo
85 * @bo: radeon object for which we query the offset
86 *
87 * Returns current GPU offset of the object.
88 *
89 * Note: object should either be pinned or reserved when calling this
90 * function, it might be usefull to add check for this for debugging.
91 */
92static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo)
93{
94 return bo->tbo.offset;
95}
96
97static inline unsigned long radeon_bo_size(struct radeon_bo *bo)
98{
99 return bo->tbo.num_pages << PAGE_SHIFT;
100}
101
102static inline bool radeon_bo_is_reserved(struct radeon_bo *bo)
103{
104 return !!atomic_read(&bo->tbo.reserved);
105}
106
107/**
108 * radeon_bo_mmap_offset - return mmap offset of bo
109 * @bo: radeon object for which we query the offset
110 *
111 * Returns mmap offset of the object.
112 *
113 * Note: addr_space_offset is constant after ttm bo init thus isn't protected
114 * by any lock.
115 */
116static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo)
117{
118 return bo->tbo.addr_space_offset;
119}
120
121static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
122 bool no_wait)
123{
124 int r;
125
126 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
127 if (unlikely(r != 0)) {
128 if (r != -ERESTARTSYS)
129 dev_err(bo->rdev->dev, "%p reserve failed for wait\n", bo);
130 return r;
131 }
132 spin_lock(&bo->tbo.lock);
133 if (mem_type)
134 *mem_type = bo->tbo.mem.mem_type;
135 if (bo->tbo.sync_obj)
136 r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
137 spin_unlock(&bo->tbo.lock);
138 ttm_bo_unreserve(&bo->tbo);
139 return r;
140}
45 141
142extern int radeon_bo_create(struct radeon_device *rdev,
143 struct drm_gem_object *gobj, unsigned long size,
144 bool kernel, u32 domain,
145 struct radeon_bo **bo_ptr);
146extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
147extern void radeon_bo_kunmap(struct radeon_bo *bo);
148extern void radeon_bo_unref(struct radeon_bo **bo);
149extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr);
150extern int radeon_bo_unpin(struct radeon_bo *bo);
151extern int radeon_bo_evict_vram(struct radeon_device *rdev);
152extern void radeon_bo_force_delete(struct radeon_device *rdev);
153extern int radeon_bo_init(struct radeon_device *rdev);
154extern void radeon_bo_fini(struct radeon_device *rdev);
155extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
156 struct list_head *head);
157extern int radeon_bo_list_reserve(struct list_head *head);
158extern void radeon_bo_list_unreserve(struct list_head *head);
159extern int radeon_bo_list_validate(struct list_head *head);
160extern void radeon_bo_list_fence(struct list_head *head, void *fence);
161extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
162 struct vm_area_struct *vma);
163extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
164 u32 tiling_flags, u32 pitch);
165extern void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
166 u32 *tiling_flags, u32 *pitch);
167extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
168 bool force_drop);
169extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
170 struct ttm_mem_reg *mem);
171extern void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
172extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
46#endif 173#endif