aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/radeon_object.c
diff options
context:
space:
mode:
authorJerome Glisse <jglisse@redhat.com>2009-11-20 08:29:23 -0500
committerDave Airlie <airlied@redhat.com>2009-12-01 23:00:18 -0500
commit4c7886791264f03428d5424befb1b96f08fc90f4 (patch)
tree2c644931001b06969fb3038e7beb68db436c4872 /drivers/gpu/drm/radeon/radeon_object.c
parent1614f8b17b8cc3ad143541d41569623d30dbc9ec (diff)
drm/radeon/kms: Rework radeon object handling
The locking & protection of radeon object was somewhat messy. This patch completely rework it to now use ttm reserve as a protection for the radeon object structure member. It also shrink down the various radeon object structure by removing field which were redondant with the ttm information. Last it converts few simple functions to inline which should with performances. airlied: rebase on top of r600 and other changes. Signed-off-by: Jerome Glisse <jglisse@redhat.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_object.c')
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c539
1 files changed, 213 insertions, 326 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 98835f51e35e..bec494384825 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -34,74 +34,32 @@
34#include "radeon_drm.h" 34#include "radeon_drm.h"
35#include "radeon.h" 35#include "radeon.h"
36 36
37struct radeon_object {
38 struct ttm_buffer_object tobj;
39 struct list_head list;
40 struct radeon_device *rdev;
41 struct drm_gem_object *gobj;
42 struct ttm_bo_kmap_obj kmap;
43 unsigned pin_count;
44 uint64_t gpu_addr;
45 void *kptr;
46 bool is_iomem;
47 uint32_t tiling_flags;
48 uint32_t pitch;
49 int surface_reg;
50};
51 37
52int radeon_ttm_init(struct radeon_device *rdev); 38int radeon_ttm_init(struct radeon_device *rdev);
53void radeon_ttm_fini(struct radeon_device *rdev); 39void radeon_ttm_fini(struct radeon_device *rdev);
40static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
54 41
55/* 42/*
56 * To exclude mutual BO access we rely on bo_reserve exclusion, as all 43 * To exclude mutual BO access we rely on bo_reserve exclusion, as all
57 * function are calling it. 44 * function are calling it.
58 */ 45 */
59 46
60static int radeon_object_reserve(struct radeon_object *robj, bool interruptible) 47static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
61{ 48{
62 return ttm_bo_reserve(&robj->tobj, interruptible, false, false, 0); 49 struct radeon_bo *bo;
63}
64 50
65static void radeon_object_unreserve(struct radeon_object *robj) 51 bo = container_of(tbo, struct radeon_bo, tbo);
66{ 52 mutex_lock(&bo->rdev->gem.mutex);
67 ttm_bo_unreserve(&robj->tobj); 53 list_del_init(&bo->list);
54 mutex_unlock(&bo->rdev->gem.mutex);
55 radeon_bo_clear_surface_reg(bo);
56 kfree(bo);
68} 57}
69 58
70static void radeon_ttm_object_object_destroy(struct ttm_buffer_object *tobj) 59static inline u32 radeon_ttm_flags_from_domain(u32 domain)
71{ 60{
72 struct radeon_object *robj; 61 u32 flags = 0;
73
74 robj = container_of(tobj, struct radeon_object, tobj);
75 list_del_init(&robj->list);
76 radeon_object_clear_surface_reg(robj);
77 kfree(robj);
78}
79
80static inline void radeon_object_gpu_addr(struct radeon_object *robj)
81{
82 /* Default gpu address */
83 robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
84 if (robj->tobj.mem.mm_node == NULL) {
85 return;
86 }
87 robj->gpu_addr = ((u64)robj->tobj.mem.mm_node->start) << PAGE_SHIFT;
88 switch (robj->tobj.mem.mem_type) {
89 case TTM_PL_VRAM:
90 robj->gpu_addr += (u64)robj->rdev->mc.vram_location;
91 break;
92 case TTM_PL_TT:
93 robj->gpu_addr += (u64)robj->rdev->mc.gtt_location;
94 break;
95 default:
96 DRM_ERROR("Unknown placement %d\n", robj->tobj.mem.mem_type);
97 robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
98 return;
99 }
100}
101 62
102static inline uint32_t radeon_object_flags_from_domain(uint32_t domain)
103{
104 uint32_t flags = 0;
105 if (domain & RADEON_GEM_DOMAIN_VRAM) { 63 if (domain & RADEON_GEM_DOMAIN_VRAM) {
106 flags |= TTM_PL_FLAG_VRAM | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED; 64 flags |= TTM_PL_FLAG_VRAM | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED;
107 } 65 }
@@ -117,17 +75,13 @@ static inline uint32_t radeon_object_flags_from_domain(uint32_t domain)
117 return flags; 75 return flags;
118} 76}
119 77
120int radeon_object_create(struct radeon_device *rdev, 78int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
121 struct drm_gem_object *gobj, 79 unsigned long size, bool kernel, u32 domain,
122 unsigned long size, 80 struct radeon_bo **bo_ptr)
123 bool kernel,
124 uint32_t domain,
125 bool interruptible,
126 struct radeon_object **robj_ptr)
127{ 81{
128 struct radeon_object *robj; 82 struct radeon_bo *bo;
129 enum ttm_bo_type type; 83 enum ttm_bo_type type;
130 uint32_t flags; 84 u32 flags;
131 int r; 85 int r;
132 86
133 if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) { 87 if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
@@ -138,207 +92,140 @@ int radeon_object_create(struct radeon_device *rdev,
138 } else { 92 } else {
139 type = ttm_bo_type_device; 93 type = ttm_bo_type_device;
140 } 94 }
141 *robj_ptr = NULL; 95 *bo_ptr = NULL;
142 robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL); 96 bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
143 if (robj == NULL) { 97 if (bo == NULL)
144 return -ENOMEM; 98 return -ENOMEM;
145 } 99 bo->rdev = rdev;
146 robj->rdev = rdev; 100 bo->gobj = gobj;
147 robj->gobj = gobj; 101 bo->surface_reg = -1;
148 robj->surface_reg = -1; 102 INIT_LIST_HEAD(&bo->list);
149 INIT_LIST_HEAD(&robj->list); 103
150 104 flags = radeon_ttm_flags_from_domain(domain);
151 flags = radeon_object_flags_from_domain(domain); 105retry:
152 r = ttm_buffer_object_init(&rdev->mman.bdev, &robj->tobj, size, type, flags, 106 r = ttm_buffer_object_init(&rdev->mman.bdev, &bo->tbo, size, type,
153 0, 0, false, NULL, size, 107 flags, 0, 0, true, NULL, size,
154 &radeon_ttm_object_object_destroy); 108 &radeon_ttm_bo_destroy);
155 if (unlikely(r != 0)) { 109 if (unlikely(r != 0)) {
110 if (r == -ERESTART)
111 goto retry;
156 /* ttm call radeon_ttm_object_object_destroy if error happen */ 112 /* ttm call radeon_ttm_object_object_destroy if error happen */
157 DRM_ERROR("Failed to allocate TTM object (%ld, 0x%08X, %u)\n", 113 dev_err(rdev->dev, "object_init failed for (%ld, 0x%08X)\n",
158 size, flags, 0); 114 size, flags);
159 return r; 115 return r;
160 } 116 }
161 *robj_ptr = robj; 117 *bo_ptr = bo;
162 if (gobj) { 118 if (gobj) {
163 list_add_tail(&robj->list, &rdev->gem.objects); 119 mutex_lock(&bo->rdev->gem.mutex);
120 list_add_tail(&bo->list, &rdev->gem.objects);
121 mutex_unlock(&bo->rdev->gem.mutex);
164 } 122 }
165 return 0; 123 return 0;
166} 124}
167 125
168int radeon_object_kmap(struct radeon_object *robj, void **ptr) 126int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
169{ 127{
128 bool is_iomem;
170 int r; 129 int r;
171 130
172 spin_lock(&robj->tobj.lock); 131 if (bo->kptr) {
173 if (robj->kptr) {
174 if (ptr) { 132 if (ptr) {
175 *ptr = robj->kptr; 133 *ptr = bo->kptr;
176 } 134 }
177 spin_unlock(&robj->tobj.lock);
178 return 0; 135 return 0;
179 } 136 }
180 spin_unlock(&robj->tobj.lock); 137 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
181 r = ttm_bo_kmap(&robj->tobj, 0, robj->tobj.num_pages, &robj->kmap);
182 if (r) { 138 if (r) {
183 return r; 139 return r;
184 } 140 }
185 spin_lock(&robj->tobj.lock); 141 bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
186 robj->kptr = ttm_kmap_obj_virtual(&robj->kmap, &robj->is_iomem);
187 spin_unlock(&robj->tobj.lock);
188 if (ptr) { 142 if (ptr) {
189 *ptr = robj->kptr; 143 *ptr = bo->kptr;
190 } 144 }
191 radeon_object_check_tiling(robj, 0, 0); 145 radeon_bo_check_tiling(bo, 0, 0);
192 return 0; 146 return 0;
193} 147}
194 148
195void radeon_object_kunmap(struct radeon_object *robj) 149void radeon_bo_kunmap(struct radeon_bo *bo)
196{ 150{
197 spin_lock(&robj->tobj.lock); 151 if (bo->kptr == NULL)
198 if (robj->kptr == NULL) {
199 spin_unlock(&robj->tobj.lock);
200 return; 152 return;
201 } 153 bo->kptr = NULL;
202 robj->kptr = NULL; 154 radeon_bo_check_tiling(bo, 0, 0);
203 spin_unlock(&robj->tobj.lock); 155 ttm_bo_kunmap(&bo->kmap);
204 radeon_object_check_tiling(robj, 0, 0);
205 ttm_bo_kunmap(&robj->kmap);
206} 156}
207 157
208void radeon_object_unref(struct radeon_object **robj) 158void radeon_bo_unref(struct radeon_bo **bo)
209{ 159{
210 struct ttm_buffer_object *tobj; 160 struct ttm_buffer_object *tbo;
211 161
212 if ((*robj) == NULL) { 162 if ((*bo) == NULL)
213 return; 163 return;
214 } 164 tbo = &((*bo)->tbo);
215 tobj = &((*robj)->tobj); 165 ttm_bo_unref(&tbo);
216 ttm_bo_unref(&tobj); 166 if (tbo == NULL)
217 if (tobj == NULL) { 167 *bo = NULL;
218 *robj = NULL;
219 }
220}
221
222int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset)
223{
224 *offset = robj->tobj.addr_space_offset;
225 return 0;
226} 168}
227 169
228int radeon_object_pin(struct radeon_object *robj, uint32_t domain, 170int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
229 uint64_t *gpu_addr)
230{ 171{
231 uint32_t flags; 172 u32 flags;
232 uint32_t tmp; 173 u32 tmp;
233 int r; 174 int r;
234 175
235 flags = radeon_object_flags_from_domain(domain); 176 flags = radeon_ttm_flags_from_domain(domain);
236 spin_lock(&robj->tobj.lock); 177 if (bo->pin_count) {
237 if (robj->pin_count) { 178 bo->pin_count++;
238 robj->pin_count++; 179 if (gpu_addr)
239 if (gpu_addr != NULL) { 180 *gpu_addr = radeon_bo_gpu_offset(bo);
240 *gpu_addr = robj->gpu_addr;
241 }
242 spin_unlock(&robj->tobj.lock);
243 return 0; 181 return 0;
244 } 182 }
245 spin_unlock(&robj->tobj.lock); 183 tmp = bo->tbo.mem.placement;
246 r = radeon_object_reserve(robj, false);
247 if (unlikely(r != 0)) {
248 DRM_ERROR("radeon: failed to reserve object for pinning it.\n");
249 return r;
250 }
251 tmp = robj->tobj.mem.placement;
252 ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM); 184 ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM);
253 robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING; 185 bo->tbo.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT |
254 r = ttm_buffer_object_validate(&robj->tobj, 186 TTM_PL_MASK_CACHING;
255 robj->tobj.proposed_placement, 187retry:
256 false, false); 188 r = ttm_buffer_object_validate(&bo->tbo, bo->tbo.proposed_placement,
257 radeon_object_gpu_addr(robj); 189 true, false);
258 if (gpu_addr != NULL) { 190 if (likely(r == 0)) {
259 *gpu_addr = robj->gpu_addr; 191 bo->pin_count = 1;
192 if (gpu_addr != NULL)
193 *gpu_addr = radeon_bo_gpu_offset(bo);
260 } 194 }
261 robj->pin_count = 1;
262 if (unlikely(r != 0)) { 195 if (unlikely(r != 0)) {
263 DRM_ERROR("radeon: failed to pin object.\n"); 196 if (r == -ERESTART)
197 goto retry;
198 dev_err(bo->rdev->dev, "%p pin failed\n", bo);
264 } 199 }
265 radeon_object_unreserve(robj);
266 return r; 200 return r;
267} 201}
268 202
269void radeon_object_unpin(struct radeon_object *robj) 203int radeon_bo_unpin(struct radeon_bo *bo)
270{ 204{
271 uint32_t flags;
272 int r; 205 int r;
273 206
274 spin_lock(&robj->tobj.lock); 207 if (!bo->pin_count) {
275 if (!robj->pin_count) { 208 dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
276 spin_unlock(&robj->tobj.lock); 209 return 0;
277 printk(KERN_WARNING "Unpin not necessary for %p !\n", robj);
278 return;
279 }
280 robj->pin_count--;
281 if (robj->pin_count) {
282 spin_unlock(&robj->tobj.lock);
283 return;
284 }
285 spin_unlock(&robj->tobj.lock);
286 r = radeon_object_reserve(robj, false);
287 if (unlikely(r != 0)) {
288 DRM_ERROR("radeon: failed to reserve object for unpinning it.\n");
289 return;
290 }
291 flags = robj->tobj.mem.placement;
292 robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT;
293 r = ttm_buffer_object_validate(&robj->tobj,
294 robj->tobj.proposed_placement,
295 false, false);
296 if (unlikely(r != 0)) {
297 DRM_ERROR("radeon: failed to unpin buffer.\n");
298 }
299 radeon_object_unreserve(robj);
300}
301
302int radeon_object_wait(struct radeon_object *robj)
303{
304 int r = 0;
305
306 /* FIXME: should use block reservation instead */
307 r = radeon_object_reserve(robj, true);
308 if (unlikely(r != 0)) {
309 DRM_ERROR("radeon: failed to reserve object for waiting.\n");
310 return r;
311 }
312 spin_lock(&robj->tobj.lock);
313 if (robj->tobj.sync_obj) {
314 r = ttm_bo_wait(&robj->tobj, true, true, false);
315 } 210 }
316 spin_unlock(&robj->tobj.lock); 211 bo->pin_count--;
317 radeon_object_unreserve(robj); 212 if (bo->pin_count)
318 radeon_hdp_flush(robj->rdev); 213 return 0;
319 return r; 214 bo->tbo.proposed_placement = bo->tbo.mem.placement &
320} 215 ~TTM_PL_FLAG_NO_EVICT;
321 216retry:
322int radeon_object_busy_domain(struct radeon_object *robj, uint32_t *cur_placement) 217 r = ttm_buffer_object_validate(&bo->tbo, bo->tbo.proposed_placement,
323{ 218 true, false);
324 int r = 0;
325
326 r = radeon_object_reserve(robj, true);
327 if (unlikely(r != 0)) { 219 if (unlikely(r != 0)) {
328 DRM_ERROR("radeon: failed to reserve object for waiting.\n"); 220 if (r == -ERESTART)
221 goto retry;
222 dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
329 return r; 223 return r;
330 } 224 }
331 spin_lock(&robj->tobj.lock); 225 return 0;
332 *cur_placement = robj->tobj.mem.mem_type;
333 if (robj->tobj.sync_obj) {
334 r = ttm_bo_wait(&robj->tobj, true, true, true);
335 }
336 spin_unlock(&robj->tobj.lock);
337 radeon_object_unreserve(robj);
338 return r;
339} 226}
340 227
341int radeon_object_evict_vram(struct radeon_device *rdev) 228int radeon_bo_evict_vram(struct radeon_device *rdev)
342{ 229{
343 if (rdev->flags & RADEON_IS_IGP) { 230 if (rdev->flags & RADEON_IS_IGP) {
344 /* Useless to evict on IGP chips */ 231 /* Useless to evict on IGP chips */
@@ -347,30 +234,32 @@ int radeon_object_evict_vram(struct radeon_device *rdev)
347 return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM); 234 return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
348} 235}
349 236
350void radeon_object_force_delete(struct radeon_device *rdev) 237void radeon_bo_force_delete(struct radeon_device *rdev)
351{ 238{
352 struct radeon_object *robj, *n; 239 struct radeon_bo *bo, *n;
353 struct drm_gem_object *gobj; 240 struct drm_gem_object *gobj;
354 241
355 if (list_empty(&rdev->gem.objects)) { 242 if (list_empty(&rdev->gem.objects)) {
356 return; 243 return;
357 } 244 }
358 DRM_ERROR("Userspace still has active objects !\n"); 245 dev_err(rdev->dev, "Userspace still has active objects !\n");
359 list_for_each_entry_safe(robj, n, &rdev->gem.objects, list) { 246 list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
360 mutex_lock(&rdev->ddev->struct_mutex); 247 mutex_lock(&rdev->ddev->struct_mutex);
361 gobj = robj->gobj; 248 gobj = bo->gobj;
362 DRM_ERROR("Force free for (%p,%p,%lu,%lu)\n", 249 dev_err(rdev->dev, "%p %p %lu %lu force free\n",
363 gobj, robj, (unsigned long)gobj->size, 250 gobj, bo, (unsigned long)gobj->size,
364 *((unsigned long *)&gobj->refcount)); 251 *((unsigned long *)&gobj->refcount));
365 list_del_init(&robj->list); 252 mutex_lock(&bo->rdev->gem.mutex);
366 radeon_object_unref(&robj); 253 list_del_init(&bo->list);
254 mutex_unlock(&bo->rdev->gem.mutex);
255 radeon_bo_unref(&bo);
367 gobj->driver_private = NULL; 256 gobj->driver_private = NULL;
368 drm_gem_object_unreference(gobj); 257 drm_gem_object_unreference(gobj);
369 mutex_unlock(&rdev->ddev->struct_mutex); 258 mutex_unlock(&rdev->ddev->struct_mutex);
370 } 259 }
371} 260}
372 261
373int radeon_object_init(struct radeon_device *rdev) 262int radeon_bo_init(struct radeon_device *rdev)
374{ 263{
375 /* Add an MTRR for the VRAM */ 264 /* Add an MTRR for the VRAM */
376 rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size, 265 rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
@@ -383,13 +272,13 @@ int radeon_object_init(struct radeon_device *rdev)
383 return radeon_ttm_init(rdev); 272 return radeon_ttm_init(rdev);
384} 273}
385 274
386void radeon_object_fini(struct radeon_device *rdev) 275void radeon_bo_fini(struct radeon_device *rdev)
387{ 276{
388 radeon_ttm_fini(rdev); 277 radeon_ttm_fini(rdev);
389} 278}
390 279
391void radeon_object_list_add_object(struct radeon_object_list *lobj, 280void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
392 struct list_head *head) 281 struct list_head *head)
393{ 282{
394 if (lobj->wdomain) { 283 if (lobj->wdomain) {
395 list_add(&lobj->list, head); 284 list_add(&lobj->list, head);
@@ -398,72 +287,67 @@ void radeon_object_list_add_object(struct radeon_object_list *lobj,
398 } 287 }
399} 288}
400 289
401int radeon_object_list_reserve(struct list_head *head) 290int radeon_bo_list_reserve(struct list_head *head)
402{ 291{
403 struct radeon_object_list *lobj; 292 struct radeon_bo_list *lobj;
404 int r; 293 int r;
405 294
406 list_for_each_entry(lobj, head, list){ 295 list_for_each_entry(lobj, head, list){
407 if (!lobj->robj->pin_count) { 296 r = radeon_bo_reserve(lobj->bo, false);
408 r = radeon_object_reserve(lobj->robj, true); 297 if (unlikely(r != 0))
409 if (unlikely(r != 0)) { 298 return r;
410 DRM_ERROR("radeon: failed to reserve object.\n");
411 return r;
412 }
413 } else {
414 }
415 } 299 }
416 return 0; 300 return 0;
417} 301}
418 302
419void radeon_object_list_unreserve(struct list_head *head) 303void radeon_bo_list_unreserve(struct list_head *head)
420{ 304{
421 struct radeon_object_list *lobj; 305 struct radeon_bo_list *lobj;
422 306
423 list_for_each_entry(lobj, head, list) { 307 list_for_each_entry(lobj, head, list) {
424 if (!lobj->robj->pin_count) { 308 /* only unreserve object we successfully reserved */
425 radeon_object_unreserve(lobj->robj); 309 if (radeon_bo_is_reserved(lobj->bo))
426 } 310 radeon_bo_unreserve(lobj->bo);
427 } 311 }
428} 312}
429 313
430int radeon_object_list_validate(struct list_head *head, void *fence) 314int radeon_bo_list_validate(struct list_head *head, void *fence)
431{ 315{
432 struct radeon_object_list *lobj; 316 struct radeon_bo_list *lobj;
433 struct radeon_object *robj; 317 struct radeon_bo *bo;
434 struct radeon_fence *old_fence = NULL; 318 struct radeon_fence *old_fence = NULL;
435 int r; 319 int r;
436 320
437 r = radeon_object_list_reserve(head); 321 r = radeon_bo_list_reserve(head);
438 if (unlikely(r != 0)) { 322 if (unlikely(r != 0)) {
439 radeon_object_list_unreserve(head);
440 return r; 323 return r;
441 } 324 }
442 list_for_each_entry(lobj, head, list) { 325 list_for_each_entry(lobj, head, list) {
443 robj = lobj->robj; 326 bo = lobj->bo;
444 if (!robj->pin_count) { 327 if (!bo->pin_count) {
445 if (lobj->wdomain) { 328 if (lobj->wdomain) {
446 robj->tobj.proposed_placement = 329 bo->tbo.proposed_placement =
447 radeon_object_flags_from_domain(lobj->wdomain); 330 radeon_ttm_flags_from_domain(lobj->wdomain);
448 } else { 331 } else {
449 robj->tobj.proposed_placement = 332 bo->tbo.proposed_placement =
450 radeon_object_flags_from_domain(lobj->rdomain); 333 radeon_ttm_flags_from_domain(lobj->rdomain);
451 } 334 }
452 r = ttm_buffer_object_validate(&robj->tobj, 335retry:
453 robj->tobj.proposed_placement, 336 r = ttm_buffer_object_validate(&bo->tbo,
454 true, false); 337 bo->tbo.proposed_placement,
338 true, false);
455 if (unlikely(r)) { 339 if (unlikely(r)) {
456 DRM_ERROR("radeon: failed to validate.\n"); 340 if (r == -ERESTART)
341 goto retry;
457 return r; 342 return r;
458 } 343 }
459 radeon_object_gpu_addr(robj);
460 } 344 }
461 lobj->gpu_offset = robj->gpu_addr; 345 lobj->gpu_offset = radeon_bo_gpu_offset(bo);
462 lobj->tiling_flags = robj->tiling_flags; 346 lobj->tiling_flags = bo->tiling_flags;
463 if (fence) { 347 if (fence) {
464 old_fence = (struct radeon_fence *)robj->tobj.sync_obj; 348 old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
465 robj->tobj.sync_obj = radeon_fence_ref(fence); 349 bo->tbo.sync_obj = radeon_fence_ref(fence);
466 robj->tobj.sync_obj_arg = NULL; 350 bo->tbo.sync_obj_arg = NULL;
467 } 351 }
468 if (old_fence) { 352 if (old_fence) {
469 radeon_fence_unref(&old_fence); 353 radeon_fence_unref(&old_fence);
@@ -472,51 +356,44 @@ int radeon_object_list_validate(struct list_head *head, void *fence)
472 return 0; 356 return 0;
473} 357}
474 358
475void radeon_object_list_unvalidate(struct list_head *head) 359void radeon_bo_list_unvalidate(struct list_head *head, void *fence)
476{ 360{
477 struct radeon_object_list *lobj; 361 struct radeon_bo_list *lobj;
478 struct radeon_fence *old_fence = NULL; 362 struct radeon_fence *old_fence;
479 363
480 list_for_each_entry(lobj, head, list) { 364 if (fence)
481 old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj; 365 list_for_each_entry(lobj, head, list) {
482 lobj->robj->tobj.sync_obj = NULL; 366 old_fence = to_radeon_fence(lobj->bo->tbo.sync_obj);
483 if (old_fence) { 367 if (old_fence == fence) {
484 radeon_fence_unref(&old_fence); 368 lobj->bo->tbo.sync_obj = NULL;
369 radeon_fence_unref(&old_fence);
370 }
485 } 371 }
486 } 372 radeon_bo_list_unreserve(head);
487 radeon_object_list_unreserve(head);
488}
489
490void radeon_object_list_clean(struct list_head *head)
491{
492 radeon_object_list_unreserve(head);
493} 373}
494 374
495int radeon_object_fbdev_mmap(struct radeon_object *robj, 375int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
496 struct vm_area_struct *vma) 376 struct vm_area_struct *vma)
497{ 377{
498 return ttm_fbdev_mmap(vma, &robj->tobj); 378 return ttm_fbdev_mmap(vma, &bo->tbo);
499} 379}
500 380
501unsigned long radeon_object_size(struct radeon_object *robj) 381static int radeon_bo_get_surface_reg(struct radeon_bo *bo)
502{ 382{
503 return robj->tobj.num_pages << PAGE_SHIFT; 383 struct radeon_device *rdev = bo->rdev;
504}
505
506int radeon_object_get_surface_reg(struct radeon_object *robj)
507{
508 struct radeon_device *rdev = robj->rdev;
509 struct radeon_surface_reg *reg; 384 struct radeon_surface_reg *reg;
510 struct radeon_object *old_object; 385 struct radeon_bo *old_object;
511 int steal; 386 int steal;
512 int i; 387 int i;
513 388
514 if (!robj->tiling_flags) 389 BUG_ON(!atomic_read(&bo->tbo.reserved));
390
391 if (!bo->tiling_flags)
515 return 0; 392 return 0;
516 393
517 if (robj->surface_reg >= 0) { 394 if (bo->surface_reg >= 0) {
518 reg = &rdev->surface_regs[robj->surface_reg]; 395 reg = &rdev->surface_regs[bo->surface_reg];
519 i = robj->surface_reg; 396 i = bo->surface_reg;
520 goto out; 397 goto out;
521 } 398 }
522 399
@@ -524,10 +401,10 @@ int radeon_object_get_surface_reg(struct radeon_object *robj)
524 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { 401 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
525 402
526 reg = &rdev->surface_regs[i]; 403 reg = &rdev->surface_regs[i];
527 if (!reg->robj) 404 if (!reg->bo)
528 break; 405 break;
529 406
530 old_object = reg->robj; 407 old_object = reg->bo;
531 if (old_object->pin_count == 0) 408 if (old_object->pin_count == 0)
532 steal = i; 409 steal = i;
533 } 410 }
@@ -538,91 +415,101 @@ int radeon_object_get_surface_reg(struct radeon_object *robj)
538 return -ENOMEM; 415 return -ENOMEM;
539 /* find someone with a surface reg and nuke their BO */ 416 /* find someone with a surface reg and nuke their BO */
540 reg = &rdev->surface_regs[steal]; 417 reg = &rdev->surface_regs[steal];
541 old_object = reg->robj; 418 old_object = reg->bo;
542 /* blow away the mapping */ 419 /* blow away the mapping */
543 DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object); 420 DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
544 ttm_bo_unmap_virtual(&old_object->tobj); 421 ttm_bo_unmap_virtual(&old_object->tbo);
545 old_object->surface_reg = -1; 422 old_object->surface_reg = -1;
546 i = steal; 423 i = steal;
547 } 424 }
548 425
549 robj->surface_reg = i; 426 bo->surface_reg = i;
550 reg->robj = robj; 427 reg->bo = bo;
551 428
552out: 429out:
553 radeon_set_surface_reg(rdev, i, robj->tiling_flags, robj->pitch, 430 radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
554 robj->tobj.mem.mm_node->start << PAGE_SHIFT, 431 bo->tbo.mem.mm_node->start << PAGE_SHIFT,
555 robj->tobj.num_pages << PAGE_SHIFT); 432 bo->tbo.num_pages << PAGE_SHIFT);
556 return 0; 433 return 0;
557} 434}
558 435
559void radeon_object_clear_surface_reg(struct radeon_object *robj) 436static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
560{ 437{
561 struct radeon_device *rdev = robj->rdev; 438 struct radeon_device *rdev = bo->rdev;
562 struct radeon_surface_reg *reg; 439 struct radeon_surface_reg *reg;
563 440
564 if (robj->surface_reg == -1) 441 if (bo->surface_reg == -1)
565 return; 442 return;
566 443
567 reg = &rdev->surface_regs[robj->surface_reg]; 444 reg = &rdev->surface_regs[bo->surface_reg];
568 radeon_clear_surface_reg(rdev, robj->surface_reg); 445 radeon_clear_surface_reg(rdev, bo->surface_reg);
569 446
570 reg->robj = NULL; 447 reg->bo = NULL;
571 robj->surface_reg = -1; 448 bo->surface_reg = -1;
572} 449}
573 450
574void radeon_object_set_tiling_flags(struct radeon_object *robj, 451int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
575 uint32_t tiling_flags, uint32_t pitch) 452 uint32_t tiling_flags, uint32_t pitch)
576{ 453{
577 robj->tiling_flags = tiling_flags; 454 int r;
578 robj->pitch = pitch; 455
456 r = radeon_bo_reserve(bo, false);
457 if (unlikely(r != 0))
458 return r;
459 bo->tiling_flags = tiling_flags;
460 bo->pitch = pitch;
461 radeon_bo_unreserve(bo);
462 return 0;
579} 463}
580 464
581void radeon_object_get_tiling_flags(struct radeon_object *robj, 465void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
582 uint32_t *tiling_flags, 466 uint32_t *tiling_flags,
583 uint32_t *pitch) 467 uint32_t *pitch)
584{ 468{
469 BUG_ON(!atomic_read(&bo->tbo.reserved));
585 if (tiling_flags) 470 if (tiling_flags)
586 *tiling_flags = robj->tiling_flags; 471 *tiling_flags = bo->tiling_flags;
587 if (pitch) 472 if (pitch)
588 *pitch = robj->pitch; 473 *pitch = bo->pitch;
589} 474}
590 475
591int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved, 476int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
592 bool force_drop) 477 bool force_drop)
593{ 478{
594 if (!(robj->tiling_flags & RADEON_TILING_SURFACE)) 479 BUG_ON(!atomic_read(&bo->tbo.reserved));
480
481 if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
595 return 0; 482 return 0;
596 483
597 if (force_drop) { 484 if (force_drop) {
598 radeon_object_clear_surface_reg(robj); 485 radeon_bo_clear_surface_reg(bo);
599 return 0; 486 return 0;
600 } 487 }
601 488
602 if (robj->tobj.mem.mem_type != TTM_PL_VRAM) { 489 if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
603 if (!has_moved) 490 if (!has_moved)
604 return 0; 491 return 0;
605 492
606 if (robj->surface_reg >= 0) 493 if (bo->surface_reg >= 0)
607 radeon_object_clear_surface_reg(robj); 494 radeon_bo_clear_surface_reg(bo);
608 return 0; 495 return 0;
609 } 496 }
610 497
611 if ((robj->surface_reg >= 0) && !has_moved) 498 if ((bo->surface_reg >= 0) && !has_moved)
612 return 0; 499 return 0;
613 500
614 return radeon_object_get_surface_reg(robj); 501 return radeon_bo_get_surface_reg(bo);
615} 502}
616 503
617void radeon_bo_move_notify(struct ttm_buffer_object *bo, 504void radeon_bo_move_notify(struct ttm_buffer_object *bo,
618 struct ttm_mem_reg *mem) 505 struct ttm_mem_reg *mem)
619{ 506{
620 struct radeon_object *robj = container_of(bo, struct radeon_object, tobj); 507 struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
621 radeon_object_check_tiling(robj, 0, 1); 508 radeon_bo_check_tiling(rbo, 0, 1);
622} 509}
623 510
624void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) 511void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
625{ 512{
626 struct radeon_object *robj = container_of(bo, struct radeon_object, tobj); 513 struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
627 radeon_object_check_tiling(robj, 0, 0); 514 radeon_bo_check_tiling(rbo, 0, 0);
628} 515}