diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_object.h')
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_object.h | 157 |
1 files changed, 145 insertions, 12 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index 10e8af6bb456..e9da13077e2f 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h | |||
@@ -28,19 +28,152 @@ | |||
28 | #ifndef __RADEON_OBJECT_H__ | 28 | #ifndef __RADEON_OBJECT_H__ |
29 | #define __RADEON_OBJECT_H__ | 29 | #define __RADEON_OBJECT_H__ |
30 | 30 | ||
31 | #include <ttm/ttm_bo_api.h> | 31 | #include <drm/radeon_drm.h> |
32 | #include <ttm/ttm_bo_driver.h> | 32 | #include "radeon.h" |
33 | #include <ttm/ttm_placement.h> | ||
34 | #include <ttm/ttm_module.h> | ||
35 | 33 | ||
36 | /* | 34 | /** |
37 | * TTM. | 35 | * radeon_mem_type_to_domain - return domain corresponding to mem_type |
36 | * @mem_type: ttm memory type | ||
37 | * | ||
38 | * Returns corresponding domain of the ttm mem_type | ||
39 | */ | ||
40 | static inline unsigned radeon_mem_type_to_domain(u32 mem_type) | ||
41 | { | ||
42 | switch (mem_type) { | ||
43 | case TTM_PL_VRAM: | ||
44 | return RADEON_GEM_DOMAIN_VRAM; | ||
45 | case TTM_PL_TT: | ||
46 | return RADEON_GEM_DOMAIN_GTT; | ||
47 | case TTM_PL_SYSTEM: | ||
48 | return RADEON_GEM_DOMAIN_CPU; | ||
49 | default: | ||
50 | break; | ||
51 | } | ||
52 | return 0; | ||
53 | } | ||
54 | |||
55 | /** | ||
56 | * radeon_bo_reserve - reserve bo | ||
57 | * @bo: bo structure | ||
58 | * @no_wait: don't sleep while trying to reserve (return -EBUSY) | ||
59 | * | ||
60 | * Returns: | ||
61 | * -EBUSY: buffer is busy and @no_wait is true | ||
62 | * -ERESTART: A wait for the buffer to become unreserved was interrupted by | ||
63 | * a signal. Release all buffer reservations and return to user-space. | ||
64 | */ | ||
65 | static inline int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait) | ||
66 | { | ||
67 | int r; | ||
68 | |||
69 | retry: | ||
70 | r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); | ||
71 | if (unlikely(r != 0)) { | ||
72 | if (r == -ERESTART) | ||
73 | goto retry; | ||
74 | dev_err(bo->rdev->dev, "%p reserve failed\n", bo); | ||
75 | return r; | ||
76 | } | ||
77 | return 0; | ||
78 | } | ||
79 | |||
80 | static inline void radeon_bo_unreserve(struct radeon_bo *bo) | ||
81 | { | ||
82 | ttm_bo_unreserve(&bo->tbo); | ||
83 | } | ||
84 | |||
85 | /** | ||
86 | * radeon_bo_gpu_offset - return GPU offset of bo | ||
87 | * @bo: radeon object for which we query the offset | ||
88 | * | ||
89 | * Returns current GPU offset of the object. | ||
90 | * | ||
91 | * Note: object should either be pinned or reserved when calling this | ||
92 | * function, it might be usefull to add check for this for debugging. | ||
93 | */ | ||
94 | static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo) | ||
95 | { | ||
96 | return bo->tbo.offset; | ||
97 | } | ||
98 | |||
99 | static inline unsigned long radeon_bo_size(struct radeon_bo *bo) | ||
100 | { | ||
101 | return bo->tbo.num_pages << PAGE_SHIFT; | ||
102 | } | ||
103 | |||
104 | static inline bool radeon_bo_is_reserved(struct radeon_bo *bo) | ||
105 | { | ||
106 | return !!atomic_read(&bo->tbo.reserved); | ||
107 | } | ||
108 | |||
109 | /** | ||
110 | * radeon_bo_mmap_offset - return mmap offset of bo | ||
111 | * @bo: radeon object for which we query the offset | ||
112 | * | ||
113 | * Returns mmap offset of the object. | ||
114 | * | ||
115 | * Note: addr_space_offset is constant after ttm bo init thus isn't protected | ||
116 | * by any lock. | ||
38 | */ | 117 | */ |
39 | struct radeon_mman { | 118 | static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo) |
40 | struct ttm_bo_global_ref bo_global_ref; | 119 | { |
41 | struct ttm_global_reference mem_global_ref; | 120 | return bo->tbo.addr_space_offset; |
42 | bool mem_global_referenced; | 121 | } |
43 | struct ttm_bo_device bdev; | 122 | |
44 | }; | 123 | static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, |
124 | bool no_wait) | ||
125 | { | ||
126 | int r; | ||
127 | |||
128 | retry: | ||
129 | r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); | ||
130 | if (unlikely(r != 0)) { | ||
131 | if (r == -ERESTART) | ||
132 | goto retry; | ||
133 | dev_err(bo->rdev->dev, "%p reserve failed for wait\n", bo); | ||
134 | return r; | ||
135 | } | ||
136 | spin_lock(&bo->tbo.lock); | ||
137 | if (mem_type) | ||
138 | *mem_type = bo->tbo.mem.mem_type; | ||
139 | if (bo->tbo.sync_obj) | ||
140 | r = ttm_bo_wait(&bo->tbo, true, true, no_wait); | ||
141 | spin_unlock(&bo->tbo.lock); | ||
142 | ttm_bo_unreserve(&bo->tbo); | ||
143 | if (unlikely(r == -ERESTART)) | ||
144 | goto retry; | ||
145 | return r; | ||
146 | } | ||
147 | |||
148 | extern int radeon_bo_create(struct radeon_device *rdev, | ||
149 | struct drm_gem_object *gobj, unsigned long size, | ||
150 | bool kernel, u32 domain, | ||
151 | struct radeon_bo **bo_ptr); | ||
152 | extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr); | ||
153 | extern void radeon_bo_kunmap(struct radeon_bo *bo); | ||
154 | extern void radeon_bo_unref(struct radeon_bo **bo); | ||
155 | extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr); | ||
156 | extern int radeon_bo_unpin(struct radeon_bo *bo); | ||
157 | extern int radeon_bo_evict_vram(struct radeon_device *rdev); | ||
158 | extern void radeon_bo_force_delete(struct radeon_device *rdev); | ||
159 | extern int radeon_bo_init(struct radeon_device *rdev); | ||
160 | extern void radeon_bo_fini(struct radeon_device *rdev); | ||
161 | extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj, | ||
162 | struct list_head *head); | ||
163 | extern int radeon_bo_list_reserve(struct list_head *head); | ||
164 | extern void radeon_bo_list_unreserve(struct list_head *head); | ||
165 | extern int radeon_bo_list_validate(struct list_head *head, void *fence); | ||
166 | extern void radeon_bo_list_unvalidate(struct list_head *head, void *fence); | ||
167 | extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo, | ||
168 | struct vm_area_struct *vma); | ||
169 | extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo, | ||
170 | u32 tiling_flags, u32 pitch); | ||
171 | extern void radeon_bo_get_tiling_flags(struct radeon_bo *bo, | ||
172 | u32 *tiling_flags, u32 *pitch); | ||
173 | extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, | ||
174 | bool force_drop); | ||
175 | extern void radeon_bo_move_notify(struct ttm_buffer_object *bo, | ||
176 | struct ttm_mem_reg *mem); | ||
177 | extern void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo); | ||
45 | 178 | ||
46 | #endif | 179 | #endif |