aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2011-02-22 21:07:27 -0500
committerDave Airlie <airlied@redhat.com>2011-02-22 21:07:27 -0500
commit63871f89d158e3f3e469dde00dd15763d474cb3c (patch)
tree0872202dbe52d6a8160215487ae0386c8e475a44 /drivers/gpu
parentde1e7cd63a8ec26a3bd3740708cfd72dd76509e2 (diff)
parent4546b2c1d6e256c716e5240f5d6198a078fd7a22 (diff)
Merge branch 'drm-mm-cleanup' into drm-next
* drm-mm-cleanup: radeon: move blit functions to radeon_asic.h radeon: kill decls for inline functions radeon: consolidate asic-specific function decls for r600 & later drm/radeon: kill radeon_bo->gobj pointer drm/radeon: introduce gem_to_radeon_bo helper drm/radeon: embed struct drm_gem_object drm: mm: add helper to unwind scan state drm: mm: add api for embedding struct drm_mm_node drm: mm: extract node insert helper functions drm: mm: track free areas implicitly drm/nouveau: don't munge in drm_mm internals
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/drm_mm.c570
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_instmem.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c8
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/r600.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c1
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h77
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h77
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c43
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c28
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h7
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c2
-rw-r--r--drivers/gpu/drm/radeon/rv770.c2
26 files changed, 456 insertions, 412 deletions
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index c59515ba7e69..add1737dae0d 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -64,8 +64,8 @@ static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
64 else { 64 else {
65 child = 65 child =
66 list_entry(mm->unused_nodes.next, 66 list_entry(mm->unused_nodes.next,
67 struct drm_mm_node, free_stack); 67 struct drm_mm_node, node_list);
68 list_del(&child->free_stack); 68 list_del(&child->node_list);
69 --mm->num_unused; 69 --mm->num_unused;
70 } 70 }
71 spin_unlock(&mm->unused_lock); 71 spin_unlock(&mm->unused_lock);
@@ -94,195 +94,242 @@ int drm_mm_pre_get(struct drm_mm *mm)
94 return ret; 94 return ret;
95 } 95 }
96 ++mm->num_unused; 96 ++mm->num_unused;
97 list_add_tail(&node->free_stack, &mm->unused_nodes); 97 list_add_tail(&node->node_list, &mm->unused_nodes);
98 } 98 }
99 spin_unlock(&mm->unused_lock); 99 spin_unlock(&mm->unused_lock);
100 return 0; 100 return 0;
101} 101}
102EXPORT_SYMBOL(drm_mm_pre_get); 102EXPORT_SYMBOL(drm_mm_pre_get);
103 103
104static int drm_mm_create_tail_node(struct drm_mm *mm, 104static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
105 unsigned long start,
106 unsigned long size, int atomic)
107{ 105{
108 struct drm_mm_node *child; 106 return hole_node->start + hole_node->size;
109 107}
110 child = drm_mm_kmalloc(mm, atomic);
111 if (unlikely(child == NULL))
112 return -ENOMEM;
113
114 child->free = 1;
115 child->size = size;
116 child->start = start;
117 child->mm = mm;
118 108
119 list_add_tail(&child->node_list, &mm->node_list); 109static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
120 list_add_tail(&child->free_stack, &mm->free_stack); 110{
111 struct drm_mm_node *next_node =
112 list_entry(hole_node->node_list.next, struct drm_mm_node,
113 node_list);
121 114
122 return 0; 115 return next_node->start;
123} 116}
124 117
125static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent, 118static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
126 unsigned long size, 119 struct drm_mm_node *node,
127 int atomic) 120 unsigned long size, unsigned alignment)
128{ 121{
129 struct drm_mm_node *child; 122 struct drm_mm *mm = hole_node->mm;
123 unsigned long tmp = 0, wasted = 0;
124 unsigned long hole_start = drm_mm_hole_node_start(hole_node);
125 unsigned long hole_end = drm_mm_hole_node_end(hole_node);
130 126
131 child = drm_mm_kmalloc(parent->mm, atomic); 127 BUG_ON(!hole_node->hole_follows || node->allocated);
132 if (unlikely(child == NULL))
133 return NULL;
134 128
135 INIT_LIST_HEAD(&child->free_stack); 129 if (alignment)
130 tmp = hole_start % alignment;
136 131
137 child->size = size; 132 if (!tmp) {
138 child->start = parent->start; 133 hole_node->hole_follows = 0;
139 child->mm = parent->mm; 134 list_del_init(&hole_node->hole_stack);
135 } else
136 wasted = alignment - tmp;
140 137
141 list_add_tail(&child->node_list, &parent->node_list); 138 node->start = hole_start + wasted;
142 INIT_LIST_HEAD(&child->free_stack); 139 node->size = size;
140 node->mm = mm;
141 node->allocated = 1;
143 142
144 parent->size -= size; 143 INIT_LIST_HEAD(&node->hole_stack);
145 parent->start += size; 144 list_add(&node->node_list, &hole_node->node_list);
146 return child; 145
147} 146 BUG_ON(node->start + node->size > hole_end);
148 147
148 if (node->start + node->size < hole_end) {
149 list_add(&node->hole_stack, &mm->hole_stack);
150 node->hole_follows = 1;
151 } else {
152 node->hole_follows = 0;
153 }
154}
149 155
150struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node, 156struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
151 unsigned long size, 157 unsigned long size,
152 unsigned alignment, 158 unsigned alignment,
153 int atomic) 159 int atomic)
154{ 160{
161 struct drm_mm_node *node;
155 162
156 struct drm_mm_node *align_splitoff = NULL; 163 node = drm_mm_kmalloc(hole_node->mm, atomic);
157 unsigned tmp = 0; 164 if (unlikely(node == NULL))
165 return NULL;
158 166
159 if (alignment) 167 drm_mm_insert_helper(hole_node, node, size, alignment);
160 tmp = node->start % alignment;
161 168
162 if (tmp) { 169 return node;
163 align_splitoff = 170}
164 drm_mm_split_at_start(node, alignment - tmp, atomic); 171EXPORT_SYMBOL(drm_mm_get_block_generic);
165 if (unlikely(align_splitoff == NULL))
166 return NULL;
167 }
168 172
169 if (node->size == size) { 173/**
170 list_del_init(&node->free_stack); 174 * Search for free space and insert a preallocated memory node. Returns
171 node->free = 0; 175 * -ENOSPC if no suitable free area is available. The preallocated memory node
172 } else { 176 * must be cleared.
173 node = drm_mm_split_at_start(node, size, atomic); 177 */
174 } 178int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
179 unsigned long size, unsigned alignment)
180{
181 struct drm_mm_node *hole_node;
175 182
176 if (align_splitoff) 183 hole_node = drm_mm_search_free(mm, size, alignment, 0);
177 drm_mm_put_block(align_splitoff); 184 if (!hole_node)
185 return -ENOSPC;
178 186
179 return node; 187 drm_mm_insert_helper(hole_node, node, size, alignment);
188
189 return 0;
180} 190}
181EXPORT_SYMBOL(drm_mm_get_block_generic); 191EXPORT_SYMBOL(drm_mm_insert_node);
182 192
183struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node, 193static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
184 unsigned long size, 194 struct drm_mm_node *node,
185 unsigned alignment, 195 unsigned long size, unsigned alignment,
186 unsigned long start, 196 unsigned long start, unsigned long end)
187 unsigned long end,
188 int atomic)
189{ 197{
190 struct drm_mm_node *align_splitoff = NULL; 198 struct drm_mm *mm = hole_node->mm;
191 unsigned tmp = 0; 199 unsigned long tmp = 0, wasted = 0;
192 unsigned wasted = 0; 200 unsigned long hole_start = drm_mm_hole_node_start(hole_node);
201 unsigned long hole_end = drm_mm_hole_node_end(hole_node);
202
203 BUG_ON(!hole_node->hole_follows || node->allocated);
193 204
194 if (node->start < start) 205 if (hole_start < start)
195 wasted += start - node->start; 206 wasted += start - hole_start;
196 if (alignment) 207 if (alignment)
197 tmp = ((node->start + wasted) % alignment); 208 tmp = (hole_start + wasted) % alignment;
198 209
199 if (tmp) 210 if (tmp)
200 wasted += alignment - tmp; 211 wasted += alignment - tmp;
201 if (wasted) { 212
202 align_splitoff = drm_mm_split_at_start(node, wasted, atomic); 213 if (!wasted) {
203 if (unlikely(align_splitoff == NULL)) 214 hole_node->hole_follows = 0;
204 return NULL; 215 list_del_init(&hole_node->hole_stack);
205 } 216 }
206 217
207 if (node->size == size) { 218 node->start = hole_start + wasted;
208 list_del_init(&node->free_stack); 219 node->size = size;
209 node->free = 0; 220 node->mm = mm;
221 node->allocated = 1;
222
223 INIT_LIST_HEAD(&node->hole_stack);
224 list_add(&node->node_list, &hole_node->node_list);
225
226 BUG_ON(node->start + node->size > hole_end);
227 BUG_ON(node->start + node->size > end);
228
229 if (node->start + node->size < hole_end) {
230 list_add(&node->hole_stack, &mm->hole_stack);
231 node->hole_follows = 1;
210 } else { 232 } else {
211 node = drm_mm_split_at_start(node, size, atomic); 233 node->hole_follows = 0;
212 } 234 }
235}
213 236
214 if (align_splitoff) 237struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
215 drm_mm_put_block(align_splitoff); 238 unsigned long size,
239 unsigned alignment,
240 unsigned long start,
241 unsigned long end,
242 int atomic)
243{
244 struct drm_mm_node *node;
245
246 node = drm_mm_kmalloc(hole_node->mm, atomic);
247 if (unlikely(node == NULL))
248 return NULL;
249
250 drm_mm_insert_helper_range(hole_node, node, size, alignment,
251 start, end);
216 252
217 return node; 253 return node;
218} 254}
219EXPORT_SYMBOL(drm_mm_get_block_range_generic); 255EXPORT_SYMBOL(drm_mm_get_block_range_generic);
220 256
221/* 257/**
222 * Put a block. Merge with the previous and / or next block if they are free. 258 * Search for free space and insert a preallocated memory node. Returns
223 * Otherwise add to the free stack. 259 * -ENOSPC if no suitable free area is available. This is for range
260 * restricted allocations. The preallocated memory node must be cleared.
224 */ 261 */
225 262int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
226void drm_mm_put_block(struct drm_mm_node *cur) 263 unsigned long size, unsigned alignment,
264 unsigned long start, unsigned long end)
227{ 265{
266 struct drm_mm_node *hole_node;
228 267
229 struct drm_mm *mm = cur->mm; 268 hole_node = drm_mm_search_free_in_range(mm, size, alignment,
230 struct list_head *cur_head = &cur->node_list; 269 start, end, 0);
231 struct list_head *root_head = &mm->node_list; 270 if (!hole_node)
232 struct drm_mm_node *prev_node = NULL; 271 return -ENOSPC;
233 struct drm_mm_node *next_node;
234 272
235 int merged = 0; 273 drm_mm_insert_helper_range(hole_node, node, size, alignment,
274 start, end);
236 275
237 BUG_ON(cur->scanned_block || cur->scanned_prev_free 276 return 0;
238 || cur->scanned_next_free); 277}
278EXPORT_SYMBOL(drm_mm_insert_node_in_range);
239 279
240 if (cur_head->prev != root_head) { 280/**
241 prev_node = 281 * Remove a memory node from the allocator.
242 list_entry(cur_head->prev, struct drm_mm_node, node_list); 282 */
243 if (prev_node->free) { 283void drm_mm_remove_node(struct drm_mm_node *node)
244 prev_node->size += cur->size; 284{
245 merged = 1; 285 struct drm_mm *mm = node->mm;
246 } 286 struct drm_mm_node *prev_node;
247 } 287
248 if (cur_head->next != root_head) { 288 BUG_ON(node->scanned_block || node->scanned_prev_free
249 next_node = 289 || node->scanned_next_free);
250 list_entry(cur_head->next, struct drm_mm_node, node_list); 290
251 if (next_node->free) { 291 prev_node =
252 if (merged) { 292 list_entry(node->node_list.prev, struct drm_mm_node, node_list);
253 prev_node->size += next_node->size; 293
254 list_del(&next_node->node_list); 294 if (node->hole_follows) {
255 list_del(&next_node->free_stack); 295 BUG_ON(drm_mm_hole_node_start(node)
256 spin_lock(&mm->unused_lock); 296 == drm_mm_hole_node_end(node));
257 if (mm->num_unused < MM_UNUSED_TARGET) { 297 list_del(&node->hole_stack);
258 list_add(&next_node->free_stack, 298 } else
259 &mm->unused_nodes); 299 BUG_ON(drm_mm_hole_node_start(node)
260 ++mm->num_unused; 300 != drm_mm_hole_node_end(node));
261 } else 301
262 kfree(next_node); 302 if (!prev_node->hole_follows) {
263 spin_unlock(&mm->unused_lock); 303 prev_node->hole_follows = 1;
264 } else { 304 list_add(&prev_node->hole_stack, &mm->hole_stack);
265 next_node->size += cur->size; 305 } else
266 next_node->start = cur->start; 306 list_move(&prev_node->hole_stack, &mm->hole_stack);
267 merged = 1; 307
268 } 308 list_del(&node->node_list);
269 } 309 node->allocated = 0;
270 }
271 if (!merged) {
272 cur->free = 1;
273 list_add(&cur->free_stack, &mm->free_stack);
274 } else {
275 list_del(&cur->node_list);
276 spin_lock(&mm->unused_lock);
277 if (mm->num_unused < MM_UNUSED_TARGET) {
278 list_add(&cur->free_stack, &mm->unused_nodes);
279 ++mm->num_unused;
280 } else
281 kfree(cur);
282 spin_unlock(&mm->unused_lock);
283 }
284} 310}
311EXPORT_SYMBOL(drm_mm_remove_node);
312
313/*
314 * Remove a memory node from the allocator and free the allocated struct
315 * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
316 * drm_mm_get_block functions.
317 */
318void drm_mm_put_block(struct drm_mm_node *node)
319{
285 320
321 struct drm_mm *mm = node->mm;
322
323 drm_mm_remove_node(node);
324
325 spin_lock(&mm->unused_lock);
326 if (mm->num_unused < MM_UNUSED_TARGET) {
327 list_add(&node->node_list, &mm->unused_nodes);
328 ++mm->num_unused;
329 } else
330 kfree(node);
331 spin_unlock(&mm->unused_lock);
332}
286EXPORT_SYMBOL(drm_mm_put_block); 333EXPORT_SYMBOL(drm_mm_put_block);
287 334
288static int check_free_hole(unsigned long start, unsigned long end, 335static int check_free_hole(unsigned long start, unsigned long end,
@@ -319,8 +366,10 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
319 best = NULL; 366 best = NULL;
320 best_size = ~0UL; 367 best_size = ~0UL;
321 368
322 list_for_each_entry(entry, &mm->free_stack, free_stack) { 369 list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
323 if (!check_free_hole(entry->start, entry->start + entry->size, 370 BUG_ON(!entry->hole_follows);
371 if (!check_free_hole(drm_mm_hole_node_start(entry),
372 drm_mm_hole_node_end(entry),
324 size, alignment)) 373 size, alignment))
325 continue; 374 continue;
326 375
@@ -353,12 +402,13 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
353 best = NULL; 402 best = NULL;
354 best_size = ~0UL; 403 best_size = ~0UL;
355 404
356 list_for_each_entry(entry, &mm->free_stack, free_stack) { 405 list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
357 unsigned long adj_start = entry->start < start ? 406 unsigned long adj_start = drm_mm_hole_node_start(entry) < start ?
358 start : entry->start; 407 start : drm_mm_hole_node_start(entry);
359 unsigned long adj_end = entry->start + entry->size > end ? 408 unsigned long adj_end = drm_mm_hole_node_end(entry) > end ?
360 end : entry->start + entry->size; 409 end : drm_mm_hole_node_end(entry);
361 410
411 BUG_ON(!entry->hole_follows);
362 if (!check_free_hole(adj_start, adj_end, size, alignment)) 412 if (!check_free_hole(adj_start, adj_end, size, alignment))
363 continue; 413 continue;
364 414
@@ -376,6 +426,23 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
376EXPORT_SYMBOL(drm_mm_search_free_in_range); 426EXPORT_SYMBOL(drm_mm_search_free_in_range);
377 427
378/** 428/**
429 * Moves an allocation. To be used with embedded struct drm_mm_node.
430 */
431void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
432{
433 list_replace(&old->node_list, &new->node_list);
434 list_replace(&old->node_list, &new->hole_stack);
435 new->hole_follows = old->hole_follows;
436 new->mm = old->mm;
437 new->start = old->start;
438 new->size = old->size;
439
440 old->allocated = 0;
441 new->allocated = 1;
442}
443EXPORT_SYMBOL(drm_mm_replace_node);
444
445/**
379 * Initializa lru scanning. 446 * Initializa lru scanning.
380 * 447 *
381 * This simply sets up the scanning routines with the parameters for the desired 448 * This simply sets up the scanning routines with the parameters for the desired
@@ -393,6 +460,7 @@ void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
393 mm->scan_hit_start = 0; 460 mm->scan_hit_start = 0;
394 mm->scan_hit_size = 0; 461 mm->scan_hit_size = 0;
395 mm->scan_check_range = 0; 462 mm->scan_check_range = 0;
463 mm->prev_scanned_node = NULL;
396} 464}
397EXPORT_SYMBOL(drm_mm_init_scan); 465EXPORT_SYMBOL(drm_mm_init_scan);
398 466
@@ -418,6 +486,7 @@ void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
418 mm->scan_start = start; 486 mm->scan_start = start;
419 mm->scan_end = end; 487 mm->scan_end = end;
420 mm->scan_check_range = 1; 488 mm->scan_check_range = 1;
489 mm->prev_scanned_node = NULL;
421} 490}
422EXPORT_SYMBOL(drm_mm_init_scan_with_range); 491EXPORT_SYMBOL(drm_mm_init_scan_with_range);
423 492
@@ -430,70 +499,42 @@ EXPORT_SYMBOL(drm_mm_init_scan_with_range);
430int drm_mm_scan_add_block(struct drm_mm_node *node) 499int drm_mm_scan_add_block(struct drm_mm_node *node)
431{ 500{
432 struct drm_mm *mm = node->mm; 501 struct drm_mm *mm = node->mm;
433 struct list_head *prev_free, *next_free; 502 struct drm_mm_node *prev_node;
434 struct drm_mm_node *prev_node, *next_node; 503 unsigned long hole_start, hole_end;
435 unsigned long adj_start; 504 unsigned long adj_start;
436 unsigned long adj_end; 505 unsigned long adj_end;
437 506
438 mm->scanned_blocks++; 507 mm->scanned_blocks++;
439 508
440 prev_free = next_free = NULL; 509 BUG_ON(node->scanned_block);
441
442 BUG_ON(node->free);
443 node->scanned_block = 1; 510 node->scanned_block = 1;
444 node->free = 1;
445
446 if (node->node_list.prev != &mm->node_list) {
447 prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
448 node_list);
449
450 if (prev_node->free) {
451 list_del(&prev_node->node_list);
452 511
453 node->start = prev_node->start; 512 prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
454 node->size += prev_node->size; 513 node_list);
455 514
456 prev_node->scanned_prev_free = 1; 515 node->scanned_preceeds_hole = prev_node->hole_follows;
457 516 prev_node->hole_follows = 1;
458 prev_free = &prev_node->free_stack; 517 list_del(&node->node_list);
459 } 518 node->node_list.prev = &prev_node->node_list;
460 } 519 node->node_list.next = &mm->prev_scanned_node->node_list;
461 520 mm->prev_scanned_node = node;
462 if (node->node_list.next != &mm->node_list) {
463 next_node = list_entry(node->node_list.next, struct drm_mm_node,
464 node_list);
465
466 if (next_node->free) {
467 list_del(&next_node->node_list);
468
469 node->size += next_node->size;
470
471 next_node->scanned_next_free = 1;
472
473 next_free = &next_node->free_stack;
474 }
475 }
476
477 /* The free_stack list is not used for allocated objects, so these two
478 * pointers can be abused (as long as no allocations in this memory
479 * manager happens). */
480 node->free_stack.prev = prev_free;
481 node->free_stack.next = next_free;
482 521
522 hole_start = drm_mm_hole_node_start(prev_node);
523 hole_end = drm_mm_hole_node_end(prev_node);
483 if (mm->scan_check_range) { 524 if (mm->scan_check_range) {
484 adj_start = node->start < mm->scan_start ? 525 adj_start = hole_start < mm->scan_start ?
485 mm->scan_start : node->start; 526 mm->scan_start : hole_start;
486 adj_end = node->start + node->size > mm->scan_end ? 527 adj_end = hole_end > mm->scan_end ?
487 mm->scan_end : node->start + node->size; 528 mm->scan_end : hole_end;
488 } else { 529 } else {
489 adj_start = node->start; 530 adj_start = hole_start;
490 adj_end = node->start + node->size; 531 adj_end = hole_end;
491 } 532 }
492 533
493 if (check_free_hole(adj_start , adj_end, 534 if (check_free_hole(adj_start , adj_end,
494 mm->scan_size, mm->scan_alignment)) { 535 mm->scan_size, mm->scan_alignment)) {
495 mm->scan_hit_start = node->start; 536 mm->scan_hit_start = hole_start;
496 mm->scan_hit_size = node->size; 537 mm->scan_hit_size = hole_end;
497 538
498 return 1; 539 return 1;
499 } 540 }
@@ -519,39 +560,19 @@ EXPORT_SYMBOL(drm_mm_scan_add_block);
519int drm_mm_scan_remove_block(struct drm_mm_node *node) 560int drm_mm_scan_remove_block(struct drm_mm_node *node)
520{ 561{
521 struct drm_mm *mm = node->mm; 562 struct drm_mm *mm = node->mm;
522 struct drm_mm_node *prev_node, *next_node; 563 struct drm_mm_node *prev_node;
523 564
524 mm->scanned_blocks--; 565 mm->scanned_blocks--;
525 566
526 BUG_ON(!node->scanned_block); 567 BUG_ON(!node->scanned_block);
527 node->scanned_block = 0; 568 node->scanned_block = 0;
528 node->free = 0;
529
530 prev_node = list_entry(node->free_stack.prev, struct drm_mm_node,
531 free_stack);
532 next_node = list_entry(node->free_stack.next, struct drm_mm_node,
533 free_stack);
534 569
535 if (prev_node) { 570 prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
536 BUG_ON(!prev_node->scanned_prev_free); 571 node_list);
537 prev_node->scanned_prev_free = 0;
538
539 list_add_tail(&prev_node->node_list, &node->node_list);
540
541 node->start = prev_node->start + prev_node->size;
542 node->size -= prev_node->size;
543 }
544 572
545 if (next_node) { 573 prev_node->hole_follows = node->scanned_preceeds_hole;
546 BUG_ON(!next_node->scanned_next_free); 574 INIT_LIST_HEAD(&node->node_list);
547 next_node->scanned_next_free = 0; 575 list_add(&node->node_list, &prev_node->node_list);
548
549 list_add(&next_node->node_list, &node->node_list);
550
551 node->size -= next_node->size;
552 }
553
554 INIT_LIST_HEAD(&node->free_stack);
555 576
556 /* Only need to check for containement because start&size for the 577 /* Only need to check for containement because start&size for the
557 * complete resulting free block (not just the desired part) is 578 * complete resulting free block (not just the desired part) is
@@ -568,7 +589,7 @@ EXPORT_SYMBOL(drm_mm_scan_remove_block);
568 589
569int drm_mm_clean(struct drm_mm * mm) 590int drm_mm_clean(struct drm_mm * mm)
570{ 591{
571 struct list_head *head = &mm->node_list; 592 struct list_head *head = &mm->head_node.node_list;
572 593
573 return (head->next->next == head); 594 return (head->next->next == head);
574} 595}
@@ -576,38 +597,40 @@ EXPORT_SYMBOL(drm_mm_clean);
576 597
577int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) 598int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
578{ 599{
579 INIT_LIST_HEAD(&mm->node_list); 600 INIT_LIST_HEAD(&mm->hole_stack);
580 INIT_LIST_HEAD(&mm->free_stack);
581 INIT_LIST_HEAD(&mm->unused_nodes); 601 INIT_LIST_HEAD(&mm->unused_nodes);
582 mm->num_unused = 0; 602 mm->num_unused = 0;
583 mm->scanned_blocks = 0; 603 mm->scanned_blocks = 0;
584 spin_lock_init(&mm->unused_lock); 604 spin_lock_init(&mm->unused_lock);
585 605
586 return drm_mm_create_tail_node(mm, start, size, 0); 606 /* Clever trick to avoid a special case in the free hole tracking. */
607 INIT_LIST_HEAD(&mm->head_node.node_list);
608 INIT_LIST_HEAD(&mm->head_node.hole_stack);
609 mm->head_node.hole_follows = 1;
610 mm->head_node.scanned_block = 0;
611 mm->head_node.scanned_prev_free = 0;
612 mm->head_node.scanned_next_free = 0;
613 mm->head_node.mm = mm;
614 mm->head_node.start = start + size;
615 mm->head_node.size = start - mm->head_node.start;
616 list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
617
618 return 0;
587} 619}
588EXPORT_SYMBOL(drm_mm_init); 620EXPORT_SYMBOL(drm_mm_init);
589 621
590void drm_mm_takedown(struct drm_mm * mm) 622void drm_mm_takedown(struct drm_mm * mm)
591{ 623{
592 struct list_head *bnode = mm->free_stack.next; 624 struct drm_mm_node *entry, *next;
593 struct drm_mm_node *entry;
594 struct drm_mm_node *next;
595 625
596 entry = list_entry(bnode, struct drm_mm_node, free_stack); 626 if (!list_empty(&mm->head_node.node_list)) {
597
598 if (entry->node_list.next != &mm->node_list ||
599 entry->free_stack.next != &mm->free_stack) {
600 DRM_ERROR("Memory manager not clean. Delaying takedown\n"); 627 DRM_ERROR("Memory manager not clean. Delaying takedown\n");
601 return; 628 return;
602 } 629 }
603 630
604 list_del(&entry->free_stack);
605 list_del(&entry->node_list);
606 kfree(entry);
607
608 spin_lock(&mm->unused_lock); 631 spin_lock(&mm->unused_lock);
609 list_for_each_entry_safe(entry, next, &mm->unused_nodes, free_stack) { 632 list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
610 list_del(&entry->free_stack); 633 list_del(&entry->node_list);
611 kfree(entry); 634 kfree(entry);
612 --mm->num_unused; 635 --mm->num_unused;
613 } 636 }
@@ -620,19 +643,37 @@ EXPORT_SYMBOL(drm_mm_takedown);
620void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) 643void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
621{ 644{
622 struct drm_mm_node *entry; 645 struct drm_mm_node *entry;
623 int total_used = 0, total_free = 0, total = 0; 646 unsigned long total_used = 0, total_free = 0, total = 0;
624 647 unsigned long hole_start, hole_end, hole_size;
625 list_for_each_entry(entry, &mm->node_list, node_list) { 648
626 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n", 649 hole_start = drm_mm_hole_node_start(&mm->head_node);
650 hole_end = drm_mm_hole_node_end(&mm->head_node);
651 hole_size = hole_end - hole_start;
652 if (hole_size)
653 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
654 prefix, hole_start, hole_end,
655 hole_size);
656 total_free += hole_size;
657
658 drm_mm_for_each_node(entry, mm) {
659 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
627 prefix, entry->start, entry->start + entry->size, 660 prefix, entry->start, entry->start + entry->size,
628 entry->size, entry->free ? "free" : "used"); 661 entry->size);
629 total += entry->size; 662 total_used += entry->size;
630 if (entry->free) 663
631 total_free += entry->size; 664 if (entry->hole_follows) {
632 else 665 hole_start = drm_mm_hole_node_start(entry);
633 total_used += entry->size; 666 hole_end = drm_mm_hole_node_end(entry);
667 hole_size = hole_end - hole_start;
668 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
669 prefix, hole_start, hole_end,
670 hole_size);
671 total_free += hole_size;
672 }
634 } 673 }
635 printk(KERN_DEBUG "%s total: %d, used %d free %d\n", prefix, total, 674 total = total_free + total_used;
675
676 printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
636 total_used, total_free); 677 total_used, total_free);
637} 678}
638EXPORT_SYMBOL(drm_mm_debug_table); 679EXPORT_SYMBOL(drm_mm_debug_table);
@@ -641,17 +682,34 @@ EXPORT_SYMBOL(drm_mm_debug_table);
641int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) 682int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
642{ 683{
643 struct drm_mm_node *entry; 684 struct drm_mm_node *entry;
644 int total_used = 0, total_free = 0, total = 0; 685 unsigned long total_used = 0, total_free = 0, total = 0;
645 686 unsigned long hole_start, hole_end, hole_size;
646 list_for_each_entry(entry, &mm->node_list, node_list) { 687
647 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: %s\n", entry->start, entry->start + entry->size, entry->size, entry->free ? "free" : "used"); 688 hole_start = drm_mm_hole_node_start(&mm->head_node);
648 total += entry->size; 689 hole_end = drm_mm_hole_node_end(&mm->head_node);
649 if (entry->free) 690 hole_size = hole_end - hole_start;
650 total_free += entry->size; 691 if (hole_size)
651 else 692 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
652 total_used += entry->size; 693 hole_start, hole_end, hole_size);
694 total_free += hole_size;
695
696 drm_mm_for_each_node(entry, mm) {
697 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
698 entry->start, entry->start + entry->size,
699 entry->size);
700 total_used += entry->size;
701 if (entry->hole_follows) {
702 hole_start = drm_mm_hole_node_start(&mm->head_node);
703 hole_end = drm_mm_hole_node_end(&mm->head_node);
704 hole_size = hole_end - hole_start;
705 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
706 hole_start, hole_end, hole_size);
707 total_free += hole_size;
708 }
653 } 709 }
654 seq_printf(m, "total: %d, used %d free %d\n", total, total_used, total_free); 710 total = total_free + total_used;
711
712 seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
655 return 0; 713 return 0;
656} 714}
657EXPORT_SYMBOL(drm_mm_dump_table); 715EXPORT_SYMBOL(drm_mm_dump_table);
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index 30b6544467ca..03adfe4c7665 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -909,7 +909,7 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
909 nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd); 909 nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
910 nouveau_gpuobj_ref(NULL, &chan->vm_pd); 910 nouveau_gpuobj_ref(NULL, &chan->vm_pd);
911 911
912 if (chan->ramin_heap.free_stack.next) 912 if (drm_mm_initialized(&chan->ramin_heap))
913 drm_mm_takedown(&chan->ramin_heap); 913 drm_mm_takedown(&chan->ramin_heap);
914 nouveau_gpuobj_ref(NULL, &chan->ramin); 914 nouveau_gpuobj_ref(NULL, &chan->ramin);
915} 915}
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index ea0041810ae3..300285ae8e9e 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -56,7 +56,7 @@ nv50_channel_del(struct nouveau_channel **pchan)
56 nouveau_gpuobj_ref(NULL, &chan->ramfc); 56 nouveau_gpuobj_ref(NULL, &chan->ramfc);
57 nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd); 57 nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
58 nouveau_gpuobj_ref(NULL, &chan->vm_pd); 58 nouveau_gpuobj_ref(NULL, &chan->vm_pd);
59 if (chan->ramin_heap.free_stack.next) 59 if (drm_mm_initialized(&chan->ramin_heap))
60 drm_mm_takedown(&chan->ramin_heap); 60 drm_mm_takedown(&chan->ramin_heap);
61 nouveau_gpuobj_ref(NULL, &chan->ramin); 61 nouveau_gpuobj_ref(NULL, &chan->ramin);
62 kfree(chan); 62 kfree(chan);
@@ -259,7 +259,7 @@ nv50_instmem_takedown(struct drm_device *dev)
259 nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]); 259 nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]);
260 nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL); 260 nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL);
261 261
262 if (dev_priv->ramin_heap.free_stack.next) 262 if (drm_mm_initialized(&dev_priv->ramin_heap))
263 drm_mm_takedown(&dev_priv->ramin_heap); 263 drm_mm_takedown(&dev_priv->ramin_heap);
264 264
265 dev_priv->engine.instmem.priv = NULL; 265 dev_priv->engine.instmem.priv = NULL;
diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c
index c09091749054..82357d2df1f4 100644
--- a/drivers/gpu/drm/nouveau/nvc0_instmem.c
+++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c
@@ -67,7 +67,7 @@ nvc0_channel_del(struct nouveau_channel **pchan)
67 return; 67 return;
68 68
69 nouveau_vm_ref(NULL, &chan->vm, NULL); 69 nouveau_vm_ref(NULL, &chan->vm, NULL);
70 if (chan->ramin_heap.free_stack.next) 70 if (drm_mm_initialized(&chan->ramin_heap))
71 drm_mm_takedown(&chan->ramin_heap); 71 drm_mm_takedown(&chan->ramin_heap);
72 nouveau_gpuobj_ref(NULL, &chan->ramin); 72 nouveau_gpuobj_ref(NULL, &chan->ramin);
73 kfree(chan); 73 kfree(chan);
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index b1537000a104..d56f08d3cbdc 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1030,7 +1030,7 @@ static int evergreen_crtc_do_set_base(struct drm_crtc *crtc,
1030 * just update base pointers 1030 * just update base pointers
1031 */ 1031 */
1032 obj = radeon_fb->obj; 1032 obj = radeon_fb->obj;
1033 rbo = obj->driver_private; 1033 rbo = gem_to_radeon_bo(obj);
1034 r = radeon_bo_reserve(rbo, false); 1034 r = radeon_bo_reserve(rbo, false);
1035 if (unlikely(r != 0)) 1035 if (unlikely(r != 0))
1036 return r; 1036 return r;
@@ -1145,7 +1145,7 @@ static int evergreen_crtc_do_set_base(struct drm_crtc *crtc,
1145 1145
1146 if (!atomic && fb && fb != crtc->fb) { 1146 if (!atomic && fb && fb != crtc->fb) {
1147 radeon_fb = to_radeon_framebuffer(fb); 1147 radeon_fb = to_radeon_framebuffer(fb);
1148 rbo = radeon_fb->obj->driver_private; 1148 rbo = gem_to_radeon_bo(radeon_fb->obj);
1149 r = radeon_bo_reserve(rbo, false); 1149 r = radeon_bo_reserve(rbo, false);
1150 if (unlikely(r != 0)) 1150 if (unlikely(r != 0))
1151 return r; 1151 return r;
@@ -1191,7 +1191,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
1191 } 1191 }
1192 1192
1193 obj = radeon_fb->obj; 1193 obj = radeon_fb->obj;
1194 rbo = obj->driver_private; 1194 rbo = gem_to_radeon_bo(obj);
1195 r = radeon_bo_reserve(rbo, false); 1195 r = radeon_bo_reserve(rbo, false);
1196 if (unlikely(r != 0)) 1196 if (unlikely(r != 0))
1197 return r; 1197 return r;
@@ -1308,7 +1308,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
1308 1308
1309 if (!atomic && fb && fb != crtc->fb) { 1309 if (!atomic && fb && fb != crtc->fb) {
1310 radeon_fb = to_radeon_framebuffer(fb); 1310 radeon_fb = to_radeon_framebuffer(fb);
1311 rbo = radeon_fb->obj->driver_private; 1311 rbo = gem_to_radeon_bo(radeon_fb->obj);
1312 r = radeon_bo_reserve(rbo, false); 1312 r = radeon_bo_reserve(rbo, false);
1313 if (unlikely(r != 0)) 1313 if (unlikely(r != 0))
1314 return r; 1314 return r;
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index a1ba4b3053d0..2ed930e02f3a 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -572,7 +572,7 @@ int evergreen_blit_init(struct radeon_device *rdev)
572 obj_size += evergreen_ps_size * 4; 572 obj_size += evergreen_ps_size * 4;
573 obj_size = ALIGN(obj_size, 256); 573 obj_size = ALIGN(obj_size, 256);
574 574
575 r = radeon_bo_create(rdev, NULL, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, 575 r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
576 &rdev->r600_blit.shader_obj); 576 &rdev->r600_blit.shader_obj);
577 if (r) { 577 if (r) {
578 DRM_ERROR("evergreen failed to allocate shader\n"); 578 DRM_ERROR("evergreen failed to allocate shader\n");
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 650672a0f5ad..be780a6b9b1d 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2728,7 +2728,7 @@ static int r600_ih_ring_alloc(struct radeon_device *rdev)
2728 2728
2729 /* Allocate ring buffer */ 2729 /* Allocate ring buffer */
2730 if (rdev->ih.ring_obj == NULL) { 2730 if (rdev->ih.ring_obj == NULL) {
2731 r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size, 2731 r = radeon_bo_create(rdev, rdev->ih.ring_size,
2732 PAGE_SIZE, true, 2732 PAGE_SIZE, true,
2733 RADEON_GEM_DOMAIN_GTT, 2733 RADEON_GEM_DOMAIN_GTT,
2734 &rdev->ih.ring_obj); 2734 &rdev->ih.ring_obj);
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index b5443fe1c1d1..846fae576399 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -26,6 +26,7 @@
26#include "drmP.h" 26#include "drmP.h"
27#include "radeon.h" 27#include "radeon.h"
28#include "radeon_reg.h" 28#include "radeon_reg.h"
29#include "radeon_asic.h"
29#include "atom.h" 30#include "atom.h"
30 31
31#define AUDIO_TIMER_INTERVALL 100 /* 1/10 sekund should be enough */ 32#define AUDIO_TIMER_INTERVALL 100 /* 1/10 sekund should be enough */
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index 86e5aa07f0db..16e211a614d7 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -501,7 +501,7 @@ int r600_blit_init(struct radeon_device *rdev)
501 obj_size += r6xx_ps_size * 4; 501 obj_size += r6xx_ps_size * 4;
502 obj_size = ALIGN(obj_size, 256); 502 obj_size = ALIGN(obj_size, 256);
503 503
504 r = radeon_bo_create(rdev, NULL, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, 504 r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
505 &rdev->r600_blit.shader_obj); 505 &rdev->r600_blit.shader_obj);
506 if (r) { 506 if (r) {
507 DRM_ERROR("r600 failed to allocate shader\n"); 507 DRM_ERROR("r600 failed to allocate shader\n");
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index e6a58ed48dcf..50db6d62eec2 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -26,6 +26,7 @@
26#include "drmP.h" 26#include "drmP.h"
27#include "radeon_drm.h" 27#include "radeon_drm.h"
28#include "radeon.h" 28#include "radeon.h"
29#include "radeon_asic.h"
29#include "atom.h" 30#include "atom.h"
30 31
31/* 32/*
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 82aa59941aa1..55fefe763965 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -258,8 +258,9 @@ struct radeon_bo {
258 int surface_reg; 258 int surface_reg;
259 /* Constant after initialization */ 259 /* Constant after initialization */
260 struct radeon_device *rdev; 260 struct radeon_device *rdev;
261 struct drm_gem_object *gobj; 261 struct drm_gem_object gem_base;
262}; 262};
263#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
263 264
264struct radeon_bo_list { 265struct radeon_bo_list {
265 struct ttm_validate_buffer tv; 266 struct ttm_validate_buffer tv;
@@ -1197,19 +1198,6 @@ int radeon_device_init(struct radeon_device *rdev,
1197void radeon_device_fini(struct radeon_device *rdev); 1198void radeon_device_fini(struct radeon_device *rdev);
1198int radeon_gpu_wait_for_idle(struct radeon_device *rdev); 1199int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
1199 1200
1200/* r600 blit */
1201int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes);
1202void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence);
1203void r600_kms_blit_copy(struct radeon_device *rdev,
1204 u64 src_gpu_addr, u64 dst_gpu_addr,
1205 int size_bytes);
1206/* evergreen blit */
1207int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes);
1208void evergreen_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence);
1209void evergreen_kms_blit_copy(struct radeon_device *rdev,
1210 u64 src_gpu_addr, u64 dst_gpu_addr,
1211 int size_bytes);
1212
1213static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) 1201static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
1214{ 1202{
1215 if (reg < rdev->rmmio_size) 1203 if (reg < rdev->rmmio_size)
@@ -1460,59 +1448,12 @@ extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc
1460extern int radeon_resume_kms(struct drm_device *dev); 1448extern int radeon_resume_kms(struct drm_device *dev);
1461extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state); 1449extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
1462 1450
1463/* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */ 1451/*
1464extern bool r600_card_posted(struct radeon_device *rdev); 1452 * r600 functions used by radeon_encoder.c
1465extern void r600_cp_stop(struct radeon_device *rdev); 1453 */
1466extern int r600_cp_start(struct radeon_device *rdev);
1467extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
1468extern int r600_cp_resume(struct radeon_device *rdev);
1469extern void r600_cp_fini(struct radeon_device *rdev);
1470extern int r600_count_pipe_bits(uint32_t val);
1471extern int r600_mc_wait_for_idle(struct radeon_device *rdev);
1472extern int r600_pcie_gart_init(struct radeon_device *rdev);
1473extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
1474extern int r600_ib_test(struct radeon_device *rdev);
1475extern int r600_ring_test(struct radeon_device *rdev);
1476extern void r600_scratch_init(struct radeon_device *rdev);
1477extern int r600_blit_init(struct radeon_device *rdev);
1478extern void r600_blit_fini(struct radeon_device *rdev);
1479extern int r600_init_microcode(struct radeon_device *rdev);
1480extern int r600_asic_reset(struct radeon_device *rdev);
1481/* r600 irq */
1482extern int r600_irq_init(struct radeon_device *rdev);
1483extern void r600_irq_fini(struct radeon_device *rdev);
1484extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
1485extern int r600_irq_set(struct radeon_device *rdev);
1486extern void r600_irq_suspend(struct radeon_device *rdev);
1487extern void r600_disable_interrupts(struct radeon_device *rdev);
1488extern void r600_rlc_stop(struct radeon_device *rdev);
1489/* r600 audio */
1490extern int r600_audio_init(struct radeon_device *rdev);
1491extern int r600_audio_tmds_index(struct drm_encoder *encoder);
1492extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
1493extern int r600_audio_channels(struct radeon_device *rdev);
1494extern int r600_audio_bits_per_sample(struct radeon_device *rdev);
1495extern int r600_audio_rate(struct radeon_device *rdev);
1496extern uint8_t r600_audio_status_bits(struct radeon_device *rdev);
1497extern uint8_t r600_audio_category_code(struct radeon_device *rdev);
1498extern void r600_audio_schedule_polling(struct radeon_device *rdev);
1499extern void r600_audio_enable_polling(struct drm_encoder *encoder);
1500extern void r600_audio_disable_polling(struct drm_encoder *encoder);
1501extern void r600_audio_fini(struct radeon_device *rdev);
1502extern void r600_hdmi_init(struct drm_encoder *encoder);
1503extern void r600_hdmi_enable(struct drm_encoder *encoder); 1454extern void r600_hdmi_enable(struct drm_encoder *encoder);
1504extern void r600_hdmi_disable(struct drm_encoder *encoder); 1455extern void r600_hdmi_disable(struct drm_encoder *encoder);
1505extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode); 1456extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
1506extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
1507extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
1508
1509extern void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
1510extern void r700_cp_stop(struct radeon_device *rdev);
1511extern void r700_cp_fini(struct radeon_device *rdev);
1512extern void evergreen_disable_interrupt_state(struct radeon_device *rdev);
1513extern int evergreen_irq_set(struct radeon_device *rdev);
1514extern int evergreen_blit_init(struct radeon_device *rdev);
1515extern void evergreen_blit_fini(struct radeon_device *rdev);
1516 1457
1517extern int ni_init_microcode(struct radeon_device *rdev); 1458extern int ni_init_microcode(struct radeon_device *rdev);
1518extern int btc_mc_load_microcode(struct radeon_device *rdev); 1459extern int btc_mc_load_microcode(struct radeon_device *rdev);
@@ -1524,14 +1465,6 @@ extern int radeon_acpi_init(struct radeon_device *rdev);
1524static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; } 1465static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; }
1525#endif 1466#endif
1526 1467
1527/* evergreen */
1528struct evergreen_mc_save {
1529 u32 vga_control[6];
1530 u32 vga_render_control;
1531 u32 vga_hdp_control;
1532 u32 crtc_control[6];
1533};
1534
1535#include "radeon_object.h" 1468#include "radeon_object.h"
1536 1469
1537#endif 1470#endif
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index c59bd98a2029..1c7317e3aa8c 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -57,8 +57,6 @@ int r100_init(struct radeon_device *rdev);
57void r100_fini(struct radeon_device *rdev); 57void r100_fini(struct radeon_device *rdev);
58int r100_suspend(struct radeon_device *rdev); 58int r100_suspend(struct radeon_device *rdev);
59int r100_resume(struct radeon_device *rdev); 59int r100_resume(struct radeon_device *rdev);
60uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
61void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
62void r100_vga_set_state(struct radeon_device *rdev, bool state); 60void r100_vga_set_state(struct radeon_device *rdev, bool state);
63bool r100_gpu_is_lockup(struct radeon_device *rdev); 61bool r100_gpu_is_lockup(struct radeon_device *rdev);
64int r100_asic_reset(struct radeon_device *rdev); 62int r100_asic_reset(struct radeon_device *rdev);
@@ -164,8 +162,6 @@ extern void r300_fence_ring_emit(struct radeon_device *rdev,
164extern int r300_cs_parse(struct radeon_cs_parser *p); 162extern int r300_cs_parse(struct radeon_cs_parser *p);
165extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); 163extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
166extern int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); 164extern int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
167extern uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
168extern void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
169extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); 165extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
170extern int rv370_get_pcie_lanes(struct radeon_device *rdev); 166extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
171extern void r300_set_reg_safe(struct radeon_device *rdev); 167extern void r300_set_reg_safe(struct radeon_device *rdev);
@@ -208,7 +204,6 @@ void rs400_gart_adjust_size(struct radeon_device *rdev);
208void rs400_gart_disable(struct radeon_device *rdev); 204void rs400_gart_disable(struct radeon_device *rdev);
209void rs400_gart_fini(struct radeon_device *rdev); 205void rs400_gart_fini(struct radeon_device *rdev);
210 206
211
212/* 207/*
213 * rs600. 208 * rs600.
214 */ 209 */
@@ -270,8 +265,6 @@ void rv515_fini(struct radeon_device *rdev);
270uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg); 265uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
271void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 266void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
272void rv515_ring_start(struct radeon_device *rdev); 267void rv515_ring_start(struct radeon_device *rdev);
273uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
274void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
275void rv515_bandwidth_update(struct radeon_device *rdev); 268void rv515_bandwidth_update(struct radeon_device *rdev);
276int rv515_resume(struct radeon_device *rdev); 269int rv515_resume(struct radeon_device *rdev);
277int rv515_suspend(struct radeon_device *rdev); 270int rv515_suspend(struct radeon_device *rdev);
@@ -307,14 +300,13 @@ void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
307int r600_cs_parse(struct radeon_cs_parser *p); 300int r600_cs_parse(struct radeon_cs_parser *p);
308void r600_fence_ring_emit(struct radeon_device *rdev, 301void r600_fence_ring_emit(struct radeon_device *rdev,
309 struct radeon_fence *fence); 302 struct radeon_fence *fence);
310int r600_irq_process(struct radeon_device *rdev);
311int r600_irq_set(struct radeon_device *rdev);
312bool r600_gpu_is_lockup(struct radeon_device *rdev); 303bool r600_gpu_is_lockup(struct radeon_device *rdev);
313int r600_asic_reset(struct radeon_device *rdev); 304int r600_asic_reset(struct radeon_device *rdev);
314int r600_set_surface_reg(struct radeon_device *rdev, int reg, 305int r600_set_surface_reg(struct radeon_device *rdev, int reg,
315 uint32_t tiling_flags, uint32_t pitch, 306 uint32_t tiling_flags, uint32_t pitch,
316 uint32_t offset, uint32_t obj_size); 307 uint32_t offset, uint32_t obj_size);
317void r600_clear_surface_reg(struct radeon_device *rdev, int reg); 308void r600_clear_surface_reg(struct radeon_device *rdev, int reg);
309int r600_ib_test(struct radeon_device *rdev);
318void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); 310void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
319int r600_ring_test(struct radeon_device *rdev); 311int r600_ring_test(struct radeon_device *rdev);
320int r600_copy_blit(struct radeon_device *rdev, 312int r600_copy_blit(struct radeon_device *rdev,
@@ -333,6 +325,50 @@ extern void rs780_pm_init_profile(struct radeon_device *rdev);
333extern void r600_pm_get_dynpm_state(struct radeon_device *rdev); 325extern void r600_pm_get_dynpm_state(struct radeon_device *rdev);
334extern void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes); 326extern void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes);
335extern int r600_get_pcie_lanes(struct radeon_device *rdev); 327extern int r600_get_pcie_lanes(struct radeon_device *rdev);
328bool r600_card_posted(struct radeon_device *rdev);
329void r600_cp_stop(struct radeon_device *rdev);
330int r600_cp_start(struct radeon_device *rdev);
331void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
332int r600_cp_resume(struct radeon_device *rdev);
333void r600_cp_fini(struct radeon_device *rdev);
334int r600_count_pipe_bits(uint32_t val);
335int r600_mc_wait_for_idle(struct radeon_device *rdev);
336int r600_pcie_gart_init(struct radeon_device *rdev);
337void r600_scratch_init(struct radeon_device *rdev);
338int r600_blit_init(struct radeon_device *rdev);
339void r600_blit_fini(struct radeon_device *rdev);
340int r600_init_microcode(struct radeon_device *rdev);
341/* r600 irq */
342int r600_irq_process(struct radeon_device *rdev);
343int r600_irq_init(struct radeon_device *rdev);
344void r600_irq_fini(struct radeon_device *rdev);
345void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
346int r600_irq_set(struct radeon_device *rdev);
347void r600_irq_suspend(struct radeon_device *rdev);
348void r600_disable_interrupts(struct radeon_device *rdev);
349void r600_rlc_stop(struct radeon_device *rdev);
350/* r600 audio */
351int r600_audio_init(struct radeon_device *rdev);
352int r600_audio_tmds_index(struct drm_encoder *encoder);
353void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
354int r600_audio_channels(struct radeon_device *rdev);
355int r600_audio_bits_per_sample(struct radeon_device *rdev);
356int r600_audio_rate(struct radeon_device *rdev);
357uint8_t r600_audio_status_bits(struct radeon_device *rdev);
358uint8_t r600_audio_category_code(struct radeon_device *rdev);
359void r600_audio_schedule_polling(struct radeon_device *rdev);
360void r600_audio_enable_polling(struct drm_encoder *encoder);
361void r600_audio_disable_polling(struct drm_encoder *encoder);
362void r600_audio_fini(struct radeon_device *rdev);
363void r600_hdmi_init(struct drm_encoder *encoder);
364int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
365void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
366/* r600 blit */
367int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes);
368void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence);
369void r600_kms_blit_copy(struct radeon_device *rdev,
370 u64 src_gpu_addr, u64 dst_gpu_addr,
371 int size_bytes);
336 372
337/* 373/*
338 * rv770,rv730,rv710,rv740 374 * rv770,rv730,rv710,rv740
@@ -341,12 +377,21 @@ int rv770_init(struct radeon_device *rdev);
341void rv770_fini(struct radeon_device *rdev); 377void rv770_fini(struct radeon_device *rdev);
342int rv770_suspend(struct radeon_device *rdev); 378int rv770_suspend(struct radeon_device *rdev);
343int rv770_resume(struct radeon_device *rdev); 379int rv770_resume(struct radeon_device *rdev);
344extern void rv770_pm_misc(struct radeon_device *rdev); 380void rv770_pm_misc(struct radeon_device *rdev);
345extern u32 rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); 381u32 rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
382void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
383void r700_cp_stop(struct radeon_device *rdev);
384void r700_cp_fini(struct radeon_device *rdev);
346 385
347/* 386/*
348 * evergreen 387 * evergreen
349 */ 388 */
389struct evergreen_mc_save {
390 u32 vga_control[6];
391 u32 vga_render_control;
392 u32 vga_hdp_control;
393 u32 crtc_control[6];
394};
350void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev); 395void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev);
351int evergreen_init(struct radeon_device *rdev); 396int evergreen_init(struct radeon_device *rdev);
352void evergreen_fini(struct radeon_device *rdev); 397void evergreen_fini(struct radeon_device *rdev);
@@ -374,5 +419,15 @@ extern void evergreen_pm_finish(struct radeon_device *rdev);
374extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc); 419extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc);
375extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); 420extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
376extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc); 421extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
422void evergreen_disable_interrupt_state(struct radeon_device *rdev);
423int evergreen_blit_init(struct radeon_device *rdev);
424void evergreen_blit_fini(struct radeon_device *rdev);
425/* evergreen blit */
426int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes);
427void evergreen_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence);
428void evergreen_kms_blit_copy(struct radeon_device *rdev,
429 u64 src_gpu_addr, u64 dst_gpu_addr,
430 int size_bytes);
431
377 432
378#endif 433#endif
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index c558685cc637..10191d9372d8 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -41,7 +41,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
41 41
42 size = bsize; 42 size = bsize;
43 n = 1024; 43 n = 1024;
44 r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, sdomain, &sobj); 44 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, &sobj);
45 if (r) { 45 if (r) {
46 goto out_cleanup; 46 goto out_cleanup;
47 } 47 }
@@ -53,7 +53,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
53 if (r) { 53 if (r) {
54 goto out_cleanup; 54 goto out_cleanup;
55 } 55 }
56 r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, ddomain, &dobj); 56 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, &dobj);
57 if (r) { 57 if (r) {
58 goto out_cleanup; 58 goto out_cleanup;
59 } 59 }
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 35b5eb8fbe2a..8c1916941871 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -75,7 +75,7 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
75 return -ENOENT; 75 return -ENOENT;
76 } 76 }
77 p->relocs_ptr[i] = &p->relocs[i]; 77 p->relocs_ptr[i] = &p->relocs[i];
78 p->relocs[i].robj = p->relocs[i].gobj->driver_private; 78 p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
79 p->relocs[i].lobj.bo = p->relocs[i].robj; 79 p->relocs[i].lobj.bo = p->relocs[i].robj;
80 p->relocs[i].lobj.wdomain = r->write_domain; 80 p->relocs[i].lobj.wdomain = r->write_domain;
81 p->relocs[i].lobj.rdomain = r->read_domains; 81 p->relocs[i].lobj.rdomain = r->read_domains;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 0d478932b1a9..7c0a3f26ab5e 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -184,7 +184,7 @@ int radeon_wb_init(struct radeon_device *rdev)
184 int r; 184 int r;
185 185
186 if (rdev->wb.wb_obj == NULL) { 186 if (rdev->wb.wb_obj == NULL) {
187 r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, 187 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
188 RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj); 188 RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
189 if (r) { 189 if (r) {
190 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); 190 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
@@ -860,7 +860,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
860 if (rfb == NULL || rfb->obj == NULL) { 860 if (rfb == NULL || rfb->obj == NULL) {
861 continue; 861 continue;
862 } 862 }
863 robj = rfb->obj->driver_private; 863 robj = gem_to_radeon_bo(rfb->obj);
864 /* don't unpin kernel fb objects */ 864 /* don't unpin kernel fb objects */
865 if (!radeon_fbdev_robj_is_fb(rdev, robj)) { 865 if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
866 r = radeon_bo_reserve(robj, false); 866 r = radeon_bo_reserve(robj, false);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 2eff98cfd728..4409975a363c 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -371,7 +371,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
371 new_radeon_fb = to_radeon_framebuffer(fb); 371 new_radeon_fb = to_radeon_framebuffer(fb);
372 /* schedule unpin of the old buffer */ 372 /* schedule unpin of the old buffer */
373 obj = old_radeon_fb->obj; 373 obj = old_radeon_fb->obj;
374 rbo = obj->driver_private; 374 rbo = gem_to_radeon_bo(obj);
375 work->old_rbo = rbo; 375 work->old_rbo = rbo;
376 INIT_WORK(&work->work, radeon_unpin_work_func); 376 INIT_WORK(&work->work, radeon_unpin_work_func);
377 377
@@ -391,7 +391,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
391 391
392 /* pin the new buffer */ 392 /* pin the new buffer */
393 obj = new_radeon_fb->obj; 393 obj = new_radeon_fb->obj;
394 rbo = obj->driver_private; 394 rbo = gem_to_radeon_bo(obj);
395 395
396 DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n", 396 DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n",
397 work->old_rbo, rbo); 397 work->old_rbo, rbo);
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index cb968f997ce7..28431e78ab56 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -90,7 +90,7 @@ int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tile
90 90
91static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj) 91static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
92{ 92{
93 struct radeon_bo *rbo = gobj->driver_private; 93 struct radeon_bo *rbo = gem_to_radeon_bo(gobj);
94 int ret; 94 int ret;
95 95
96 ret = radeon_bo_reserve(rbo, false); 96 ret = radeon_bo_reserve(rbo, false);
@@ -128,7 +128,7 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
128 aligned_size); 128 aligned_size);
129 return -ENOMEM; 129 return -ENOMEM;
130 } 130 }
131 rbo = gobj->driver_private; 131 rbo = gem_to_radeon_bo(gobj);
132 132
133 if (fb_tiled) 133 if (fb_tiled)
134 tiling_flags = RADEON_TILING_MACRO; 134 tiling_flags = RADEON_TILING_MACRO;
@@ -202,7 +202,7 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
202 mode_cmd.depth = sizes->surface_depth; 202 mode_cmd.depth = sizes->surface_depth;
203 203
204 ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj); 204 ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj);
205 rbo = gobj->driver_private; 205 rbo = gem_to_radeon_bo(gobj);
206 206
207 /* okay we have an object now allocate the framebuffer */ 207 /* okay we have an object now allocate the framebuffer */
208 info = framebuffer_alloc(0, device); 208 info = framebuffer_alloc(0, device);
@@ -403,14 +403,14 @@ int radeon_fbdev_total_size(struct radeon_device *rdev)
403 struct radeon_bo *robj; 403 struct radeon_bo *robj;
404 int size = 0; 404 int size = 0;
405 405
406 robj = rdev->mode_info.rfbdev->rfb.obj->driver_private; 406 robj = gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj);
407 size += radeon_bo_size(robj); 407 size += radeon_bo_size(robj);
408 return size; 408 return size;
409} 409}
410 410
411bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj) 411bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
412{ 412{
413 if (robj == rdev->mode_info.rfbdev->rfb.obj->driver_private) 413 if (robj == gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj))
414 return true; 414 return true;
415 return false; 415 return false;
416} 416}
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index a6b0fed7bae9..f0534ef2f331 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -78,7 +78,7 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
78 int r; 78 int r;
79 79
80 if (rdev->gart.table.vram.robj == NULL) { 80 if (rdev->gart.table.vram.robj == NULL) {
81 r = radeon_bo_create(rdev, NULL, rdev->gart.table_size, 81 r = radeon_bo_create(rdev, rdev->gart.table_size,
82 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, 82 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
83 &rdev->gart.table.vram.robj); 83 &rdev->gart.table.vram.robj);
84 if (r) { 84 if (r) {
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index ede5dccdf79f..a419b67d8401 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -32,21 +32,18 @@
32 32
33int radeon_gem_object_init(struct drm_gem_object *obj) 33int radeon_gem_object_init(struct drm_gem_object *obj)
34{ 34{
35 /* we do nothings here */ 35 BUG();
36
36 return 0; 37 return 0;
37} 38}
38 39
39void radeon_gem_object_free(struct drm_gem_object *gobj) 40void radeon_gem_object_free(struct drm_gem_object *gobj)
40{ 41{
41 struct radeon_bo *robj = gobj->driver_private; 42 struct radeon_bo *robj = gem_to_radeon_bo(gobj);
42 43
43 gobj->driver_private = NULL;
44 if (robj) { 44 if (robj) {
45 radeon_bo_unref(&robj); 45 radeon_bo_unref(&robj);
46 } 46 }
47
48 drm_gem_object_release(gobj);
49 kfree(gobj);
50} 47}
51 48
52int radeon_gem_object_create(struct radeon_device *rdev, int size, 49int radeon_gem_object_create(struct radeon_device *rdev, int size,
@@ -54,36 +51,34 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
54 bool discardable, bool kernel, 51 bool discardable, bool kernel,
55 struct drm_gem_object **obj) 52 struct drm_gem_object **obj)
56{ 53{
57 struct drm_gem_object *gobj;
58 struct radeon_bo *robj; 54 struct radeon_bo *robj;
59 int r; 55 int r;
60 56
61 *obj = NULL; 57 *obj = NULL;
62 gobj = drm_gem_object_alloc(rdev->ddev, size);
63 if (!gobj) {
64 return -ENOMEM;
65 }
66 /* At least align on page size */ 58 /* At least align on page size */
67 if (alignment < PAGE_SIZE) { 59 if (alignment < PAGE_SIZE) {
68 alignment = PAGE_SIZE; 60 alignment = PAGE_SIZE;
69 } 61 }
70 r = radeon_bo_create(rdev, gobj, size, alignment, kernel, initial_domain, &robj); 62 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, &robj);
71 if (r) { 63 if (r) {
72 if (r != -ERESTARTSYS) 64 if (r != -ERESTARTSYS)
73 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n", 65 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
74 size, initial_domain, alignment, r); 66 size, initial_domain, alignment, r);
75 drm_gem_object_unreference_unlocked(gobj);
76 return r; 67 return r;
77 } 68 }
78 gobj->driver_private = robj; 69 *obj = &robj->gem_base;
79 *obj = gobj; 70
71 mutex_lock(&rdev->gem.mutex);
72 list_add_tail(&robj->list, &rdev->gem.objects);
73 mutex_unlock(&rdev->gem.mutex);
74
80 return 0; 75 return 0;
81} 76}
82 77
83int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain, 78int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
84 uint64_t *gpu_addr) 79 uint64_t *gpu_addr)
85{ 80{
86 struct radeon_bo *robj = obj->driver_private; 81 struct radeon_bo *robj = gem_to_radeon_bo(obj);
87 int r; 82 int r;
88 83
89 r = radeon_bo_reserve(robj, false); 84 r = radeon_bo_reserve(robj, false);
@@ -96,7 +91,7 @@ int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
96 91
97void radeon_gem_object_unpin(struct drm_gem_object *obj) 92void radeon_gem_object_unpin(struct drm_gem_object *obj)
98{ 93{
99 struct radeon_bo *robj = obj->driver_private; 94 struct radeon_bo *robj = gem_to_radeon_bo(obj);
100 int r; 95 int r;
101 96
102 r = radeon_bo_reserve(robj, false); 97 r = radeon_bo_reserve(robj, false);
@@ -114,7 +109,7 @@ int radeon_gem_set_domain(struct drm_gem_object *gobj,
114 int r; 109 int r;
115 110
116 /* FIXME: reeimplement */ 111 /* FIXME: reeimplement */
117 robj = gobj->driver_private; 112 robj = gem_to_radeon_bo(gobj);
118 /* work out where to validate the buffer to */ 113 /* work out where to validate the buffer to */
119 domain = wdomain; 114 domain = wdomain;
120 if (!domain) { 115 if (!domain) {
@@ -228,7 +223,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
228 if (gobj == NULL) { 223 if (gobj == NULL) {
229 return -ENOENT; 224 return -ENOENT;
230 } 225 }
231 robj = gobj->driver_private; 226 robj = gem_to_radeon_bo(gobj);
232 227
233 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); 228 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
234 229
@@ -247,7 +242,7 @@ int radeon_mode_dumb_mmap(struct drm_file *filp,
247 if (gobj == NULL) { 242 if (gobj == NULL) {
248 return -ENOENT; 243 return -ENOENT;
249 } 244 }
250 robj = gobj->driver_private; 245 robj = gem_to_radeon_bo(gobj);
251 *offset_p = radeon_bo_mmap_offset(robj); 246 *offset_p = radeon_bo_mmap_offset(robj);
252 drm_gem_object_unreference_unlocked(gobj); 247 drm_gem_object_unreference_unlocked(gobj);
253 return 0; 248 return 0;
@@ -274,7 +269,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
274 if (gobj == NULL) { 269 if (gobj == NULL) {
275 return -ENOENT; 270 return -ENOENT;
276 } 271 }
277 robj = gobj->driver_private; 272 robj = gem_to_radeon_bo(gobj);
278 r = radeon_bo_wait(robj, &cur_placement, true); 273 r = radeon_bo_wait(robj, &cur_placement, true);
279 switch (cur_placement) { 274 switch (cur_placement) {
280 case TTM_PL_VRAM: 275 case TTM_PL_VRAM:
@@ -304,7 +299,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
304 if (gobj == NULL) { 299 if (gobj == NULL) {
305 return -ENOENT; 300 return -ENOENT;
306 } 301 }
307 robj = gobj->driver_private; 302 robj = gem_to_radeon_bo(gobj);
308 r = radeon_bo_wait(robj, NULL, false); 303 r = radeon_bo_wait(robj, NULL, false);
309 /* callback hw specific functions if any */ 304 /* callback hw specific functions if any */
310 if (robj->rdev->asic->ioctl_wait_idle) 305 if (robj->rdev->asic->ioctl_wait_idle)
@@ -325,7 +320,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
325 gobj = drm_gem_object_lookup(dev, filp, args->handle); 320 gobj = drm_gem_object_lookup(dev, filp, args->handle);
326 if (gobj == NULL) 321 if (gobj == NULL)
327 return -ENOENT; 322 return -ENOENT;
328 robj = gobj->driver_private; 323 robj = gem_to_radeon_bo(gobj);
329 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); 324 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
330 drm_gem_object_unreference_unlocked(gobj); 325 drm_gem_object_unreference_unlocked(gobj);
331 return r; 326 return r;
@@ -343,7 +338,7 @@ int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
343 gobj = drm_gem_object_lookup(dev, filp, args->handle); 338 gobj = drm_gem_object_lookup(dev, filp, args->handle);
344 if (gobj == NULL) 339 if (gobj == NULL)
345 return -ENOENT; 340 return -ENOENT;
346 rbo = gobj->driver_private; 341 rbo = gem_to_radeon_bo(gobj);
347 r = radeon_bo_reserve(rbo, false); 342 r = radeon_bo_reserve(rbo, false);
348 if (unlikely(r != 0)) 343 if (unlikely(r != 0))
349 goto out; 344 goto out;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index cf0638c3b7c7..9ae599eb2e6d 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -415,7 +415,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
415 415
416 /* Pin framebuffer & get tilling informations */ 416 /* Pin framebuffer & get tilling informations */
417 obj = radeon_fb->obj; 417 obj = radeon_fb->obj;
418 rbo = obj->driver_private; 418 rbo = gem_to_radeon_bo(obj);
419 r = radeon_bo_reserve(rbo, false); 419 r = radeon_bo_reserve(rbo, false);
420 if (unlikely(r != 0)) 420 if (unlikely(r != 0))
421 return r; 421 return r;
@@ -520,7 +520,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
520 520
521 if (!atomic && fb && fb != crtc->fb) { 521 if (!atomic && fb && fb != crtc->fb) {
522 radeon_fb = to_radeon_framebuffer(fb); 522 radeon_fb = to_radeon_framebuffer(fb);
523 rbo = radeon_fb->obj->driver_private; 523 rbo = gem_to_radeon_bo(radeon_fb->obj);
524 r = radeon_bo_reserve(rbo, false); 524 r = radeon_bo_reserve(rbo, false);
525 if (unlikely(r != 0)) 525 if (unlikely(r != 0))
526 return r; 526 return r;
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 7d6b8e88f746..8758d02cca1a 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -55,6 +55,7 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
55 list_del_init(&bo->list); 55 list_del_init(&bo->list);
56 mutex_unlock(&bo->rdev->gem.mutex); 56 mutex_unlock(&bo->rdev->gem.mutex);
57 radeon_bo_clear_surface_reg(bo); 57 radeon_bo_clear_surface_reg(bo);
58 drm_gem_object_release(&bo->gem_base);
58 kfree(bo); 59 kfree(bo);
59} 60}
60 61
@@ -86,7 +87,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
86 rbo->placement.num_busy_placement = c; 87 rbo->placement.num_busy_placement = c;
87} 88}
88 89
89int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, 90int radeon_bo_create(struct radeon_device *rdev,
90 unsigned long size, int byte_align, bool kernel, u32 domain, 91 unsigned long size, int byte_align, bool kernel, u32 domain,
91 struct radeon_bo **bo_ptr) 92 struct radeon_bo **bo_ptr)
92{ 93{
@@ -96,6 +97,8 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
96 unsigned long max_size = 0; 97 unsigned long max_size = 0;
97 int r; 98 int r;
98 99
100 size = ALIGN(size, PAGE_SIZE);
101
99 if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) { 102 if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
100 rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping; 103 rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
101 } 104 }
@@ -118,8 +121,13 @@ retry:
118 bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); 121 bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
119 if (bo == NULL) 122 if (bo == NULL)
120 return -ENOMEM; 123 return -ENOMEM;
124 r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size);
125 if (unlikely(r)) {
126 kfree(bo);
127 return r;
128 }
121 bo->rdev = rdev; 129 bo->rdev = rdev;
122 bo->gobj = gobj; 130 bo->gem_base.driver_private = NULL;
123 bo->surface_reg = -1; 131 bo->surface_reg = -1;
124 INIT_LIST_HEAD(&bo->list); 132 INIT_LIST_HEAD(&bo->list);
125 radeon_ttm_placement_from_domain(bo, domain); 133 radeon_ttm_placement_from_domain(bo, domain);
@@ -142,12 +150,9 @@ retry:
142 return r; 150 return r;
143 } 151 }
144 *bo_ptr = bo; 152 *bo_ptr = bo;
145 if (gobj) { 153
146 mutex_lock(&bo->rdev->gem.mutex);
147 list_add_tail(&bo->list, &rdev->gem.objects);
148 mutex_unlock(&bo->rdev->gem.mutex);
149 }
150 trace_radeon_bo_create(bo); 154 trace_radeon_bo_create(bo);
155
151 return 0; 156 return 0;
152} 157}
153 158
@@ -260,7 +265,6 @@ int radeon_bo_evict_vram(struct radeon_device *rdev)
260void radeon_bo_force_delete(struct radeon_device *rdev) 265void radeon_bo_force_delete(struct radeon_device *rdev)
261{ 266{
262 struct radeon_bo *bo, *n; 267 struct radeon_bo *bo, *n;
263 struct drm_gem_object *gobj;
264 268
265 if (list_empty(&rdev->gem.objects)) { 269 if (list_empty(&rdev->gem.objects)) {
266 return; 270 return;
@@ -268,16 +272,14 @@ void radeon_bo_force_delete(struct radeon_device *rdev)
268 dev_err(rdev->dev, "Userspace still has active objects !\n"); 272 dev_err(rdev->dev, "Userspace still has active objects !\n");
269 list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { 273 list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
270 mutex_lock(&rdev->ddev->struct_mutex); 274 mutex_lock(&rdev->ddev->struct_mutex);
271 gobj = bo->gobj;
272 dev_err(rdev->dev, "%p %p %lu %lu force free\n", 275 dev_err(rdev->dev, "%p %p %lu %lu force free\n",
273 gobj, bo, (unsigned long)gobj->size, 276 &bo->gem_base, bo, (unsigned long)bo->gem_base.size,
274 *((unsigned long *)&gobj->refcount)); 277 *((unsigned long *)&bo->gem_base.refcount));
275 mutex_lock(&bo->rdev->gem.mutex); 278 mutex_lock(&bo->rdev->gem.mutex);
276 list_del_init(&bo->list); 279 list_del_init(&bo->list);
277 mutex_unlock(&bo->rdev->gem.mutex); 280 mutex_unlock(&bo->rdev->gem.mutex);
278 radeon_bo_unref(&bo); 281 radeon_bo_unref(&bo);
279 gobj->driver_private = NULL; 282 drm_gem_object_unreference(&bo->gem_base);
280 drm_gem_object_unreference(gobj);
281 mutex_unlock(&rdev->ddev->struct_mutex); 283 mutex_unlock(&rdev->ddev->struct_mutex);
282 } 284 }
283} 285}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 22d4c237dea5..7f8e778dba46 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -137,10 +137,9 @@ static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
137} 137}
138 138
139extern int radeon_bo_create(struct radeon_device *rdev, 139extern int radeon_bo_create(struct radeon_device *rdev,
140 struct drm_gem_object *gobj, unsigned long size, 140 unsigned long size, int byte_align,
141 int byte_align, 141 bool kernel, u32 domain,
142 bool kernel, u32 domain, 142 struct radeon_bo **bo_ptr);
143 struct radeon_bo **bo_ptr);
144extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr); 143extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
145extern void radeon_bo_kunmap(struct radeon_bo *bo); 144extern void radeon_bo_kunmap(struct radeon_bo *bo);
146extern void radeon_bo_unref(struct radeon_bo **bo); 145extern void radeon_bo_unref(struct radeon_bo **bo);
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 06e79822a2bf..992d99d13fc5 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -175,7 +175,7 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
175 return 0; 175 return 0;
176 INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib); 176 INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib);
177 /* Allocate 1M object buffer */ 177 /* Allocate 1M object buffer */
178 r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, 178 r = radeon_bo_create(rdev, RADEON_IB_POOL_SIZE*64*1024,
179 PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT, 179 PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT,
180 &rdev->ib_pool.robj); 180 &rdev->ib_pool.robj);
181 if (r) { 181 if (r) {
@@ -332,7 +332,7 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
332 rdev->cp.ring_size = ring_size; 332 rdev->cp.ring_size = ring_size;
333 /* Allocate ring buffer */ 333 /* Allocate ring buffer */
334 if (rdev->cp.ring_obj == NULL) { 334 if (rdev->cp.ring_obj == NULL) {
335 r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, PAGE_SIZE, true, 335 r = radeon_bo_create(rdev, rdev->cp.ring_size, PAGE_SIZE, true,
336 RADEON_GEM_DOMAIN_GTT, 336 RADEON_GEM_DOMAIN_GTT,
337 &rdev->cp.ring_obj); 337 &rdev->cp.ring_obj);
338 if (r) { 338 if (r) {
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 5b44f652145c..dee4a0c1b4b2 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -52,7 +52,7 @@ void radeon_test_moves(struct radeon_device *rdev)
52 goto out_cleanup; 52 goto out_cleanup;
53 } 53 }
54 54
55 r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, 55 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
56 &vram_obj); 56 &vram_obj);
57 if (r) { 57 if (r) {
58 DRM_ERROR("Failed to create VRAM object\n"); 58 DRM_ERROR("Failed to create VRAM object\n");
@@ -71,7 +71,7 @@ void radeon_test_moves(struct radeon_device *rdev)
71 void **gtt_start, **gtt_end; 71 void **gtt_start, **gtt_end;
72 void **vram_start, **vram_end; 72 void **vram_start, **vram_end;
73 73
74 r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, 74 r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
75 RADEON_GEM_DOMAIN_GTT, gtt_obj + i); 75 RADEON_GEM_DOMAIN_GTT, gtt_obj + i);
76 if (r) { 76 if (r) {
77 DRM_ERROR("Failed to create GTT object %d\n", i); 77 DRM_ERROR("Failed to create GTT object %d\n", i);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index c345e899e881..177adc884b74 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -530,7 +530,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
530 DRM_ERROR("Failed initializing VRAM heap.\n"); 530 DRM_ERROR("Failed initializing VRAM heap.\n");
531 return r; 531 return r;
532 } 532 }
533 r = radeon_bo_create(rdev, NULL, 256 * 1024, PAGE_SIZE, true, 533 r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
534 RADEON_GEM_DOMAIN_VRAM, 534 RADEON_GEM_DOMAIN_VRAM,
535 &rdev->stollen_vga_memory); 535 &rdev->stollen_vga_memory);
536 if (r) { 536 if (r) {
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 2211a323db41..3a95999d2fef 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -999,7 +999,7 @@ static int rv770_vram_scratch_init(struct radeon_device *rdev)
999 u64 gpu_addr; 999 u64 gpu_addr;
1000 1000
1001 if (rdev->vram_scratch.robj == NULL) { 1001 if (rdev->vram_scratch.robj == NULL) {
1002 r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, 1002 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
1003 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, 1003 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
1004 &rdev->vram_scratch.robj); 1004 &rdev->vram_scratch.robj);
1005 if (r) { 1005 if (r) {