aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJerome Glisse <jglisse@redhat.com>2009-11-20 08:29:23 -0500
committerDave Airlie <airlied@redhat.com>2009-12-01 23:00:18 -0500
commit4c7886791264f03428d5424befb1b96f08fc90f4 (patch)
tree2c644931001b06969fb3038e7beb68db436c4872
parent1614f8b17b8cc3ad143541d41569623d30dbc9ec (diff)
drm/radeon/kms: Rework radeon object handling
The locking & protection of radeon object was somewhat messy. This patch completely rework it to now use ttm reserve as a protection for the radeon object structure member. It also shrink down the various radeon object structure by removing field which were redondant with the ttm information. Last it converts few simple functions to inline which should with performances. airlied: rebase on top of r600 and other changes. Signed-off-by: Jerome Glisse <jglisse@redhat.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c30
-rw-r--r--drivers/gpu/drm/radeon/r100.c90
-rw-r--r--drivers/gpu/drm/radeon/r100_track.h10
-rw-r--r--drivers/gpu/drm/radeon/r300.c15
-rw-r--r--drivers/gpu/drm/radeon/r420.c4
-rw-r--r--drivers/gpu/drm/radeon/r520.c2
-rw-r--r--drivers/gpu/drm/radeon/r600.c100
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c30
-rw-r--r--drivers/gpu/drm/radeon/radeon.h113
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c36
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c63
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c42
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c98
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c539
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h157
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c67
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c55
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c36
-rw-r--r--drivers/gpu/drm/radeon/rs400.c4
-rw-r--r--drivers/gpu/drm/radeon/rs600.c15
-rw-r--r--drivers/gpu/drm/radeon/rs690.c4
-rw-r--r--drivers/gpu/drm/radeon/rv515.c6
-rw-r--r--drivers/gpu/drm/radeon/rv770.c30
26 files changed, 901 insertions, 701 deletions
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index f5987afcd48d..7c489d1b3514 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -574,9 +574,10 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
574 struct radeon_device *rdev = dev->dev_private; 574 struct radeon_device *rdev = dev->dev_private;
575 struct radeon_framebuffer *radeon_fb; 575 struct radeon_framebuffer *radeon_fb;
576 struct drm_gem_object *obj; 576 struct drm_gem_object *obj;
577 struct drm_radeon_gem_object *obj_priv; 577 struct radeon_bo *rbo;
578 uint64_t fb_location; 578 uint64_t fb_location;
579 uint32_t fb_format, fb_pitch_pixels, tiling_flags; 579 uint32_t fb_format, fb_pitch_pixels, tiling_flags;
580 int r;
580 581
581 /* no fb bound */ 582 /* no fb bound */
582 if (!crtc->fb) { 583 if (!crtc->fb) {
@@ -586,12 +587,21 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
586 587
587 radeon_fb = to_radeon_framebuffer(crtc->fb); 588 radeon_fb = to_radeon_framebuffer(crtc->fb);
588 589
590 /* Pin framebuffer & get tilling informations */
589 obj = radeon_fb->obj; 591 obj = radeon_fb->obj;
590 obj_priv = obj->driver_private; 592 rbo = obj->driver_private;
591 593 r = radeon_bo_reserve(rbo, false);
592 if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &fb_location)) { 594 if (unlikely(r != 0))
595 return r;
596 r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location);
597 if (unlikely(r != 0)) {
598 radeon_bo_unreserve(rbo);
593 return -EINVAL; 599 return -EINVAL;
594 } 600 }
601 radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
602 radeon_bo_unreserve(rbo);
603 if (tiling_flags & RADEON_TILING_MACRO)
604 fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE;
595 605
596 switch (crtc->fb->bits_per_pixel) { 606 switch (crtc->fb->bits_per_pixel) {
597 case 8: 607 case 8:
@@ -621,11 +631,6 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
621 return -EINVAL; 631 return -EINVAL;
622 } 632 }
623 633
624 radeon_object_get_tiling_flags(obj->driver_private,
625 &tiling_flags, NULL);
626 if (tiling_flags & RADEON_TILING_MACRO)
627 fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE;
628
629 if (tiling_flags & RADEON_TILING_MICRO) 634 if (tiling_flags & RADEON_TILING_MICRO)
630 fb_format |= AVIVO_D1GRPH_TILED; 635 fb_format |= AVIVO_D1GRPH_TILED;
631 636
@@ -677,7 +682,12 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
677 682
678 if (old_fb && old_fb != crtc->fb) { 683 if (old_fb && old_fb != crtc->fb) {
679 radeon_fb = to_radeon_framebuffer(old_fb); 684 radeon_fb = to_radeon_framebuffer(old_fb);
680 radeon_gem_object_unpin(radeon_fb->obj); 685 rbo = radeon_fb->obj->driver_private;
686 r = radeon_bo_reserve(rbo, false);
687 if (unlikely(r != 0))
688 return r;
689 radeon_bo_unpin(rbo);
690 radeon_bo_unreserve(rbo);
681 } 691 }
682 692
683 /* Bytes per pixel may have changed */ 693 /* Bytes per pixel may have changed */
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 04d4b4ca0ef3..9b2ac9d69c0f 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -261,24 +261,27 @@ int r100_wb_init(struct radeon_device *rdev)
261 int r; 261 int r;
262 262
263 if (rdev->wb.wb_obj == NULL) { 263 if (rdev->wb.wb_obj == NULL) {
264 r = radeon_object_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, 264 r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
265 true, 265 RADEON_GEM_DOMAIN_GTT,
266 RADEON_GEM_DOMAIN_GTT, 266 &rdev->wb.wb_obj);
267 false, &rdev->wb.wb_obj);
268 if (r) { 267 if (r) {
269 DRM_ERROR("radeon: failed to create WB buffer (%d).\n", r); 268 dev_err(rdev->dev, "(%d) create WB buffer failed\n", r);
270 return r; 269 return r;
271 } 270 }
272 r = radeon_object_pin(rdev->wb.wb_obj, 271 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
273 RADEON_GEM_DOMAIN_GTT, 272 if (unlikely(r != 0))
274 &rdev->wb.gpu_addr); 273 return r;
274 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
275 &rdev->wb.gpu_addr);
275 if (r) { 276 if (r) {
276 DRM_ERROR("radeon: failed to pin WB buffer (%d).\n", r); 277 dev_err(rdev->dev, "(%d) pin WB buffer failed\n", r);
278 radeon_bo_unreserve(rdev->wb.wb_obj);
277 return r; 279 return r;
278 } 280 }
279 r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); 281 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
282 radeon_bo_unreserve(rdev->wb.wb_obj);
280 if (r) { 283 if (r) {
281 DRM_ERROR("radeon: failed to map WB buffer (%d).\n", r); 284 dev_err(rdev->dev, "(%d) map WB buffer failed\n", r);
282 return r; 285 return r;
283 } 286 }
284 } 287 }
@@ -296,11 +299,19 @@ void r100_wb_disable(struct radeon_device *rdev)
296 299
297void r100_wb_fini(struct radeon_device *rdev) 300void r100_wb_fini(struct radeon_device *rdev)
298{ 301{
302 int r;
303
299 r100_wb_disable(rdev); 304 r100_wb_disable(rdev);
300 if (rdev->wb.wb_obj) { 305 if (rdev->wb.wb_obj) {
301 radeon_object_kunmap(rdev->wb.wb_obj); 306 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
302 radeon_object_unpin(rdev->wb.wb_obj); 307 if (unlikely(r != 0)) {
303 radeon_object_unref(&rdev->wb.wb_obj); 308 dev_err(rdev->dev, "(%d) can't finish WB\n", r);
309 return;
310 }
311 radeon_bo_kunmap(rdev->wb.wb_obj);
312 radeon_bo_unpin(rdev->wb.wb_obj);
313 radeon_bo_unreserve(rdev->wb.wb_obj);
314 radeon_bo_unref(&rdev->wb.wb_obj);
304 rdev->wb.wb = NULL; 315 rdev->wb.wb = NULL;
305 rdev->wb.wb_obj = NULL; 316 rdev->wb.wb_obj = NULL;
306 } 317 }
@@ -1294,17 +1305,17 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1294 1305
1295int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, 1306int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
1296 struct radeon_cs_packet *pkt, 1307 struct radeon_cs_packet *pkt,
1297 struct radeon_object *robj) 1308 struct radeon_bo *robj)
1298{ 1309{
1299 unsigned idx; 1310 unsigned idx;
1300 u32 value; 1311 u32 value;
1301 idx = pkt->idx + 1; 1312 idx = pkt->idx + 1;
1302 value = radeon_get_ib_value(p, idx + 2); 1313 value = radeon_get_ib_value(p, idx + 2);
1303 if ((value + 1) > radeon_object_size(robj)) { 1314 if ((value + 1) > radeon_bo_size(robj)) {
1304 DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER " 1315 DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
1305 "(need %u have %lu) !\n", 1316 "(need %u have %lu) !\n",
1306 value + 1, 1317 value + 1,
1307 radeon_object_size(robj)); 1318 radeon_bo_size(robj));
1308 return -EINVAL; 1319 return -EINVAL;
1309 } 1320 }
1310 return 0; 1321 return 0;
@@ -2608,7 +2619,7 @@ static int r100_cs_track_cube(struct radeon_device *rdev,
2608 struct r100_cs_track *track, unsigned idx) 2619 struct r100_cs_track *track, unsigned idx)
2609{ 2620{
2610 unsigned face, w, h; 2621 unsigned face, w, h;
2611 struct radeon_object *cube_robj; 2622 struct radeon_bo *cube_robj;
2612 unsigned long size; 2623 unsigned long size;
2613 2624
2614 for (face = 0; face < 5; face++) { 2625 for (face = 0; face < 5; face++) {
@@ -2621,9 +2632,9 @@ static int r100_cs_track_cube(struct radeon_device *rdev,
2621 2632
2622 size += track->textures[idx].cube_info[face].offset; 2633 size += track->textures[idx].cube_info[face].offset;
2623 2634
2624 if (size > radeon_object_size(cube_robj)) { 2635 if (size > radeon_bo_size(cube_robj)) {
2625 DRM_ERROR("Cube texture offset greater than object size %lu %lu\n", 2636 DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
2626 size, radeon_object_size(cube_robj)); 2637 size, radeon_bo_size(cube_robj));
2627 r100_cs_track_texture_print(&track->textures[idx]); 2638 r100_cs_track_texture_print(&track->textures[idx]);
2628 return -1; 2639 return -1;
2629 } 2640 }
@@ -2634,7 +2645,7 @@ static int r100_cs_track_cube(struct radeon_device *rdev,
2634static int r100_cs_track_texture_check(struct radeon_device *rdev, 2645static int r100_cs_track_texture_check(struct radeon_device *rdev,
2635 struct r100_cs_track *track) 2646 struct r100_cs_track *track)
2636{ 2647{
2637 struct radeon_object *robj; 2648 struct radeon_bo *robj;
2638 unsigned long size; 2649 unsigned long size;
2639 unsigned u, i, w, h; 2650 unsigned u, i, w, h;
2640 int ret; 2651 int ret;
@@ -2690,9 +2701,9 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev,
2690 "%u\n", track->textures[u].tex_coord_type, u); 2701 "%u\n", track->textures[u].tex_coord_type, u);
2691 return -EINVAL; 2702 return -EINVAL;
2692 } 2703 }
2693 if (size > radeon_object_size(robj)) { 2704 if (size > radeon_bo_size(robj)) {
2694 DRM_ERROR("Texture of unit %u needs %lu bytes but is " 2705 DRM_ERROR("Texture of unit %u needs %lu bytes but is "
2695 "%lu\n", u, size, radeon_object_size(robj)); 2706 "%lu\n", u, size, radeon_bo_size(robj));
2696 r100_cs_track_texture_print(&track->textures[u]); 2707 r100_cs_track_texture_print(&track->textures[u]);
2697 return -EINVAL; 2708 return -EINVAL;
2698 } 2709 }
@@ -2714,10 +2725,10 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
2714 } 2725 }
2715 size = track->cb[i].pitch * track->cb[i].cpp * track->maxy; 2726 size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
2716 size += track->cb[i].offset; 2727 size += track->cb[i].offset;
2717 if (size > radeon_object_size(track->cb[i].robj)) { 2728 if (size > radeon_bo_size(track->cb[i].robj)) {
2718 DRM_ERROR("[drm] Buffer too small for color buffer %d " 2729 DRM_ERROR("[drm] Buffer too small for color buffer %d "
2719 "(need %lu have %lu) !\n", i, size, 2730 "(need %lu have %lu) !\n", i, size,
2720 radeon_object_size(track->cb[i].robj)); 2731 radeon_bo_size(track->cb[i].robj));
2721 DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n", 2732 DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
2722 i, track->cb[i].pitch, track->cb[i].cpp, 2733 i, track->cb[i].pitch, track->cb[i].cpp,
2723 track->cb[i].offset, track->maxy); 2734 track->cb[i].offset, track->maxy);
@@ -2731,10 +2742,10 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
2731 } 2742 }
2732 size = track->zb.pitch * track->zb.cpp * track->maxy; 2743 size = track->zb.pitch * track->zb.cpp * track->maxy;
2733 size += track->zb.offset; 2744 size += track->zb.offset;
2734 if (size > radeon_object_size(track->zb.robj)) { 2745 if (size > radeon_bo_size(track->zb.robj)) {
2735 DRM_ERROR("[drm] Buffer too small for z buffer " 2746 DRM_ERROR("[drm] Buffer too small for z buffer "
2736 "(need %lu have %lu) !\n", size, 2747 "(need %lu have %lu) !\n", size,
2737 radeon_object_size(track->zb.robj)); 2748 radeon_bo_size(track->zb.robj));
2738 DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n", 2749 DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n",
2739 track->zb.pitch, track->zb.cpp, 2750 track->zb.pitch, track->zb.cpp,
2740 track->zb.offset, track->maxy); 2751 track->zb.offset, track->maxy);
@@ -2752,11 +2763,12 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
2752 "bound\n", prim_walk, i); 2763 "bound\n", prim_walk, i);
2753 return -EINVAL; 2764 return -EINVAL;
2754 } 2765 }
2755 if (size > radeon_object_size(track->arrays[i].robj)) { 2766 if (size > radeon_bo_size(track->arrays[i].robj)) {
2756 DRM_ERROR("(PW %u) Vertex array %u need %lu dwords " 2767 dev_err(rdev->dev, "(PW %u) Vertex array %u "
2757 "have %lu dwords\n", prim_walk, i, 2768 "need %lu dwords have %lu dwords\n",
2758 size >> 2, 2769 prim_walk, i, size >> 2,
2759 radeon_object_size(track->arrays[i].robj) >> 2); 2770 radeon_bo_size(track->arrays[i].robj)
2771 >> 2);
2760 DRM_ERROR("Max indices %u\n", track->max_indx); 2772 DRM_ERROR("Max indices %u\n", track->max_indx);
2761 return -EINVAL; 2773 return -EINVAL;
2762 } 2774 }
@@ -2770,10 +2782,12 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
2770 "bound\n", prim_walk, i); 2782 "bound\n", prim_walk, i);
2771 return -EINVAL; 2783 return -EINVAL;
2772 } 2784 }
2773 if (size > radeon_object_size(track->arrays[i].robj)) { 2785 if (size > radeon_bo_size(track->arrays[i].robj)) {
2774 DRM_ERROR("(PW %u) Vertex array %u need %lu dwords " 2786 dev_err(rdev->dev, "(PW %u) Vertex array %u "
2775 "have %lu dwords\n", prim_walk, i, size >> 2, 2787 "need %lu dwords have %lu dwords\n",
2776 radeon_object_size(track->arrays[i].robj) >> 2); 2788 prim_walk, i, size >> 2,
2789 radeon_bo_size(track->arrays[i].robj)
2790 >> 2);
2777 return -EINVAL; 2791 return -EINVAL;
2778 } 2792 }
2779 } 2793 }
@@ -3188,7 +3202,7 @@ void r100_fini(struct radeon_device *rdev)
3188 r100_pci_gart_fini(rdev); 3202 r100_pci_gart_fini(rdev);
3189 radeon_irq_kms_fini(rdev); 3203 radeon_irq_kms_fini(rdev);
3190 radeon_fence_driver_fini(rdev); 3204 radeon_fence_driver_fini(rdev);
3191 radeon_object_fini(rdev); 3205 radeon_bo_fini(rdev);
3192 radeon_atombios_fini(rdev); 3206 radeon_atombios_fini(rdev);
3193 kfree(rdev->bios); 3207 kfree(rdev->bios);
3194 rdev->bios = NULL; 3208 rdev->bios = NULL;
@@ -3276,7 +3290,7 @@ int r100_init(struct radeon_device *rdev)
3276 if (r) 3290 if (r)
3277 return r; 3291 return r;
3278 /* Memory manager */ 3292 /* Memory manager */
3279 r = radeon_object_init(rdev); 3293 r = radeon_bo_init(rdev);
3280 if (r) 3294 if (r)
3281 return r; 3295 return r;
3282 if (rdev->flags & RADEON_IS_PCI) { 3296 if (rdev->flags & RADEON_IS_PCI) {
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
index 0daf0d76a891..ca50903dd2bb 100644
--- a/drivers/gpu/drm/radeon/r100_track.h
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -10,26 +10,26 @@
10 * CS functions 10 * CS functions
11 */ 11 */
12struct r100_cs_track_cb { 12struct r100_cs_track_cb {
13 struct radeon_object *robj; 13 struct radeon_bo *robj;
14 unsigned pitch; 14 unsigned pitch;
15 unsigned cpp; 15 unsigned cpp;
16 unsigned offset; 16 unsigned offset;
17}; 17};
18 18
19struct r100_cs_track_array { 19struct r100_cs_track_array {
20 struct radeon_object *robj; 20 struct radeon_bo *robj;
21 unsigned esize; 21 unsigned esize;
22}; 22};
23 23
24struct r100_cs_cube_info { 24struct r100_cs_cube_info {
25 struct radeon_object *robj; 25 struct radeon_bo *robj;
26 unsigned offset; 26 unsigned offset;
27 unsigned width; 27 unsigned width;
28 unsigned height; 28 unsigned height;
29}; 29};
30 30
31struct r100_cs_track_texture { 31struct r100_cs_track_texture {
32 struct radeon_object *robj; 32 struct radeon_bo *robj;
33 struct r100_cs_cube_info cube_info[5]; /* info for 5 non-primary faces */ 33 struct r100_cs_cube_info cube_info[5]; /* info for 5 non-primary faces */
34 unsigned pitch; 34 unsigned pitch;
35 unsigned width; 35 unsigned width;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 6be3acdc9e7d..b3d1d8b9df92 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -137,14 +137,19 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
137 137
138void rv370_pcie_gart_disable(struct radeon_device *rdev) 138void rv370_pcie_gart_disable(struct radeon_device *rdev)
139{ 139{
140 uint32_t tmp; 140 u32 tmp;
141 int r;
141 142
142 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); 143 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
143 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; 144 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
144 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN); 145 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
145 if (rdev->gart.table.vram.robj) { 146 if (rdev->gart.table.vram.robj) {
146 radeon_object_kunmap(rdev->gart.table.vram.robj); 147 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
147 radeon_object_unpin(rdev->gart.table.vram.robj); 148 if (likely(r == 0)) {
149 radeon_bo_kunmap(rdev->gart.table.vram.robj);
150 radeon_bo_unpin(rdev->gart.table.vram.robj);
151 radeon_bo_unreserve(rdev->gart.table.vram.robj);
152 }
148 } 153 }
149} 154}
150 155
@@ -1270,7 +1275,7 @@ void r300_fini(struct radeon_device *rdev)
1270 r100_pci_gart_fini(rdev); 1275 r100_pci_gart_fini(rdev);
1271 radeon_irq_kms_fini(rdev); 1276 radeon_irq_kms_fini(rdev);
1272 radeon_fence_driver_fini(rdev); 1277 radeon_fence_driver_fini(rdev);
1273 radeon_object_fini(rdev); 1278 radeon_bo_fini(rdev);
1274 radeon_atombios_fini(rdev); 1279 radeon_atombios_fini(rdev);
1275 kfree(rdev->bios); 1280 kfree(rdev->bios);
1276 rdev->bios = NULL; 1281 rdev->bios = NULL;
@@ -1328,7 +1333,7 @@ int r300_init(struct radeon_device *rdev)
1328 if (r) 1333 if (r)
1329 return r; 1334 return r;
1330 /* Memory manager */ 1335 /* Memory manager */
1331 r = radeon_object_init(rdev); 1336 r = radeon_bo_init(rdev);
1332 if (r) 1337 if (r)
1333 return r; 1338 return r;
1334 if (rdev->flags & RADEON_IS_PCIE) { 1339 if (rdev->flags & RADEON_IS_PCIE) {
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 885610f8dd85..d72f0439b2fa 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -257,7 +257,7 @@ void r420_fini(struct radeon_device *rdev)
257 radeon_agp_fini(rdev); 257 radeon_agp_fini(rdev);
258 radeon_irq_kms_fini(rdev); 258 radeon_irq_kms_fini(rdev);
259 radeon_fence_driver_fini(rdev); 259 radeon_fence_driver_fini(rdev);
260 radeon_object_fini(rdev); 260 radeon_bo_fini(rdev);
261 if (rdev->is_atom_bios) { 261 if (rdev->is_atom_bios) {
262 radeon_atombios_fini(rdev); 262 radeon_atombios_fini(rdev);
263 } else { 263 } else {
@@ -325,7 +325,7 @@ int r420_init(struct radeon_device *rdev)
325 return r; 325 return r;
326 } 326 }
327 /* Memory manager */ 327 /* Memory manager */
328 r = radeon_object_init(rdev); 328 r = radeon_bo_init(rdev);
329 if (r) { 329 if (r) {
330 return r; 330 return r;
331 } 331 }
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 92fbc982b889..788eef5c2a08 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -279,7 +279,7 @@ int r520_init(struct radeon_device *rdev)
279 if (r) 279 if (r)
280 return r; 280 return r;
281 /* Memory manager */ 281 /* Memory manager */
282 r = radeon_object_init(rdev); 282 r = radeon_bo_init(rdev);
283 if (r) 283 if (r)
284 return r; 284 return r;
285 r = rv370_pcie_gart_init(rdev); 285 r = rv370_pcie_gart_init(rdev);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 5966027aa967..26947e8dadcb 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -184,7 +184,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev)
184void r600_pcie_gart_disable(struct radeon_device *rdev) 184void r600_pcie_gart_disable(struct radeon_device *rdev)
185{ 185{
186 u32 tmp; 186 u32 tmp;
187 int i; 187 int i, r;
188 188
189 /* Disable all tables */ 189 /* Disable all tables */
190 for (i = 0; i < 7; i++) 190 for (i = 0; i < 7; i++)
@@ -212,8 +212,12 @@ void r600_pcie_gart_disable(struct radeon_device *rdev)
212 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp); 212 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
213 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); 213 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
214 if (rdev->gart.table.vram.robj) { 214 if (rdev->gart.table.vram.robj) {
215 radeon_object_kunmap(rdev->gart.table.vram.robj); 215 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
216 radeon_object_unpin(rdev->gart.table.vram.robj); 216 if (likely(r == 0)) {
217 radeon_bo_kunmap(rdev->gart.table.vram.robj);
218 radeon_bo_unpin(rdev->gart.table.vram.robj);
219 radeon_bo_unreserve(rdev->gart.table.vram.robj);
220 }
217 } 221 }
218} 222}
219 223
@@ -1436,10 +1440,16 @@ int r600_ring_test(struct radeon_device *rdev)
1436 1440
1437void r600_wb_disable(struct radeon_device *rdev) 1441void r600_wb_disable(struct radeon_device *rdev)
1438{ 1442{
1443 int r;
1444
1439 WREG32(SCRATCH_UMSK, 0); 1445 WREG32(SCRATCH_UMSK, 0);
1440 if (rdev->wb.wb_obj) { 1446 if (rdev->wb.wb_obj) {
1441 radeon_object_kunmap(rdev->wb.wb_obj); 1447 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
1442 radeon_object_unpin(rdev->wb.wb_obj); 1448 if (unlikely(r != 0))
1449 return;
1450 radeon_bo_kunmap(rdev->wb.wb_obj);
1451 radeon_bo_unpin(rdev->wb.wb_obj);
1452 radeon_bo_unreserve(rdev->wb.wb_obj);
1443 } 1453 }
1444} 1454}
1445 1455
@@ -1447,7 +1457,7 @@ void r600_wb_fini(struct radeon_device *rdev)
1447{ 1457{
1448 r600_wb_disable(rdev); 1458 r600_wb_disable(rdev);
1449 if (rdev->wb.wb_obj) { 1459 if (rdev->wb.wb_obj) {
1450 radeon_object_unref(&rdev->wb.wb_obj); 1460 radeon_bo_unref(&rdev->wb.wb_obj);
1451 rdev->wb.wb = NULL; 1461 rdev->wb.wb = NULL;
1452 rdev->wb.wb_obj = NULL; 1462 rdev->wb.wb_obj = NULL;
1453 } 1463 }
@@ -1458,22 +1468,29 @@ int r600_wb_enable(struct radeon_device *rdev)
1458 int r; 1468 int r;
1459 1469
1460 if (rdev->wb.wb_obj == NULL) { 1470 if (rdev->wb.wb_obj == NULL) {
1461 r = radeon_object_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true, 1471 r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
1462 RADEON_GEM_DOMAIN_GTT, false, &rdev->wb.wb_obj); 1472 RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
1463 if (r) { 1473 if (r) {
1464 dev_warn(rdev->dev, "failed to create WB buffer (%d).\n", r); 1474 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
1475 return r;
1476 }
1477 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
1478 if (unlikely(r != 0)) {
1479 r600_wb_fini(rdev);
1465 return r; 1480 return r;
1466 } 1481 }
1467 r = radeon_object_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, 1482 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
1468 &rdev->wb.gpu_addr); 1483 &rdev->wb.gpu_addr);
1469 if (r) { 1484 if (r) {
1470 dev_warn(rdev->dev, "failed to pin WB buffer (%d).\n", r); 1485 radeon_bo_unreserve(rdev->wb.wb_obj);
1486 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
1471 r600_wb_fini(rdev); 1487 r600_wb_fini(rdev);
1472 return r; 1488 return r;
1473 } 1489 }
1474 r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); 1490 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
1491 radeon_bo_unreserve(rdev->wb.wb_obj);
1475 if (r) { 1492 if (r) {
1476 dev_warn(rdev->dev, "failed to map WB buffer (%d).\n", r); 1493 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
1477 r600_wb_fini(rdev); 1494 r600_wb_fini(rdev);
1478 return r; 1495 return r;
1479 } 1496 }
@@ -1563,10 +1580,14 @@ int r600_startup(struct radeon_device *rdev)
1563 } 1580 }
1564 r600_gpu_init(rdev); 1581 r600_gpu_init(rdev);
1565 1582
1566 r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, 1583 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
1567 &rdev->r600_blit.shader_gpu_addr); 1584 if (unlikely(r != 0))
1585 return r;
1586 r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
1587 &rdev->r600_blit.shader_gpu_addr);
1588 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
1568 if (r) { 1589 if (r) {
1569 DRM_ERROR("failed to pin blit object %d\n", r); 1590 dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
1570 return r; 1591 return r;
1571 } 1592 }
1572 1593
@@ -1639,13 +1660,19 @@ int r600_resume(struct radeon_device *rdev)
1639 1660
1640int r600_suspend(struct radeon_device *rdev) 1661int r600_suspend(struct radeon_device *rdev)
1641{ 1662{
1663 int r;
1664
1642 /* FIXME: we should wait for ring to be empty */ 1665 /* FIXME: we should wait for ring to be empty */
1643 r600_cp_stop(rdev); 1666 r600_cp_stop(rdev);
1644 rdev->cp.ready = false; 1667 rdev->cp.ready = false;
1645 r600_wb_disable(rdev); 1668 r600_wb_disable(rdev);
1646 r600_pcie_gart_disable(rdev); 1669 r600_pcie_gart_disable(rdev);
1647 /* unpin shaders bo */ 1670 /* unpin shaders bo */
1648 radeon_object_unpin(rdev->r600_blit.shader_obj); 1671 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
1672 if (unlikely(r != 0))
1673 return r;
1674 radeon_bo_unpin(rdev->r600_blit.shader_obj);
1675 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
1649 return 0; 1676 return 0;
1650} 1677}
1651 1678
@@ -1710,7 +1737,7 @@ int r600_init(struct radeon_device *rdev)
1710 if (r) 1737 if (r)
1711 return r; 1738 return r;
1712 /* Memory manager */ 1739 /* Memory manager */
1713 r = radeon_object_init(rdev); 1740 r = radeon_bo_init(rdev);
1714 if (r) 1741 if (r)
1715 return r; 1742 return r;
1716 1743
@@ -1782,7 +1809,7 @@ void r600_fini(struct radeon_device *rdev)
1782 radeon_clocks_fini(rdev); 1809 radeon_clocks_fini(rdev);
1783 if (rdev->flags & RADEON_IS_AGP) 1810 if (rdev->flags & RADEON_IS_AGP)
1784 radeon_agp_fini(rdev); 1811 radeon_agp_fini(rdev);
1785 radeon_object_fini(rdev); 1812 radeon_bo_fini(rdev);
1786 radeon_atombios_fini(rdev); 1813 radeon_atombios_fini(rdev);
1787 kfree(rdev->bios); 1814 kfree(rdev->bios);
1788 rdev->bios = NULL; 1815 rdev->bios = NULL;
@@ -1897,24 +1924,28 @@ static int r600_ih_ring_alloc(struct radeon_device *rdev, unsigned ring_size)
1897 rdev->ih.ring_size = ring_size; 1924 rdev->ih.ring_size = ring_size;
1898 /* Allocate ring buffer */ 1925 /* Allocate ring buffer */
1899 if (rdev->ih.ring_obj == NULL) { 1926 if (rdev->ih.ring_obj == NULL) {
1900 r = radeon_object_create(rdev, NULL, rdev->ih.ring_size, 1927 r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size,
1901 true, 1928 true,
1902 RADEON_GEM_DOMAIN_GTT, 1929 RADEON_GEM_DOMAIN_GTT,
1903 false, 1930 &rdev->ih.ring_obj);
1904 &rdev->ih.ring_obj);
1905 if (r) { 1931 if (r) {
1906 DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r); 1932 DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
1907 return r; 1933 return r;
1908 } 1934 }
1909 r = radeon_object_pin(rdev->ih.ring_obj, 1935 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
1910 RADEON_GEM_DOMAIN_GTT, 1936 if (unlikely(r != 0))
1911 &rdev->ih.gpu_addr); 1937 return r;
1938 r = radeon_bo_pin(rdev->ih.ring_obj,
1939 RADEON_GEM_DOMAIN_GTT,
1940 &rdev->ih.gpu_addr);
1912 if (r) { 1941 if (r) {
1942 radeon_bo_unreserve(rdev->ih.ring_obj);
1913 DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r); 1943 DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
1914 return r; 1944 return r;
1915 } 1945 }
1916 r = radeon_object_kmap(rdev->ih.ring_obj, 1946 r = radeon_bo_kmap(rdev->ih.ring_obj,
1917 (void **)&rdev->ih.ring); 1947 (void **)&rdev->ih.ring);
1948 radeon_bo_unreserve(rdev->ih.ring_obj);
1918 if (r) { 1949 if (r) {
1919 DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r); 1950 DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
1920 return r; 1951 return r;
@@ -1928,10 +1959,15 @@ static int r600_ih_ring_alloc(struct radeon_device *rdev, unsigned ring_size)
1928 1959
1929static void r600_ih_ring_fini(struct radeon_device *rdev) 1960static void r600_ih_ring_fini(struct radeon_device *rdev)
1930{ 1961{
1962 int r;
1931 if (rdev->ih.ring_obj) { 1963 if (rdev->ih.ring_obj) {
1932 radeon_object_kunmap(rdev->ih.ring_obj); 1964 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
1933 radeon_object_unpin(rdev->ih.ring_obj); 1965 if (likely(r == 0)) {
1934 radeon_object_unref(&rdev->ih.ring_obj); 1966 radeon_bo_kunmap(rdev->ih.ring_obj);
1967 radeon_bo_unpin(rdev->ih.ring_obj);
1968 radeon_bo_unreserve(rdev->ih.ring_obj);
1969 }
1970 radeon_bo_unref(&rdev->ih.ring_obj);
1935 rdev->ih.ring = NULL; 1971 rdev->ih.ring = NULL;
1936 rdev->ih.ring_obj = NULL; 1972 rdev->ih.ring_obj = NULL;
1937 } 1973 }
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index c20909c34e8a..9aecafb51b66 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -473,9 +473,8 @@ int r600_blit_init(struct radeon_device *rdev)
473 obj_size += r6xx_ps_size * 4; 473 obj_size += r6xx_ps_size * 4;
474 obj_size = ALIGN(obj_size, 256); 474 obj_size = ALIGN(obj_size, 256);
475 475
476 r = radeon_object_create(rdev, NULL, obj_size, 476 r = radeon_bo_create(rdev, NULL, obj_size, true, RADEON_GEM_DOMAIN_VRAM,
477 true, RADEON_GEM_DOMAIN_VRAM, 477 &rdev->r600_blit.shader_obj);
478 false, &rdev->r600_blit.shader_obj);
479 if (r) { 478 if (r) {
480 DRM_ERROR("r600 failed to allocate shader\n"); 479 DRM_ERROR("r600 failed to allocate shader\n");
481 return r; 480 return r;
@@ -485,12 +484,14 @@ int r600_blit_init(struct radeon_device *rdev)
485 obj_size, 484 obj_size,
486 rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset); 485 rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset);
487 486
488 r = radeon_object_kmap(rdev->r600_blit.shader_obj, &ptr); 487 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
488 if (unlikely(r != 0))
489 return r;
490 r = radeon_bo_kmap(rdev->r600_blit.shader_obj, &ptr);
489 if (r) { 491 if (r) {
490 DRM_ERROR("failed to map blit object %d\n", r); 492 DRM_ERROR("failed to map blit object %d\n", r);
491 return r; 493 return r;
492 } 494 }
493
494 if (rdev->family >= CHIP_RV770) 495 if (rdev->family >= CHIP_RV770)
495 memcpy_toio(ptr + rdev->r600_blit.state_offset, 496 memcpy_toio(ptr + rdev->r600_blit.state_offset,
496 r7xx_default_state, rdev->r600_blit.state_len * 4); 497 r7xx_default_state, rdev->r600_blit.state_len * 4);
@@ -500,19 +501,26 @@ int r600_blit_init(struct radeon_device *rdev)
500 if (num_packet2s) 501 if (num_packet2s)
501 memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4), 502 memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
502 packet2s, num_packet2s * 4); 503 packet2s, num_packet2s * 4);
503
504
505 memcpy(ptr + rdev->r600_blit.vs_offset, r6xx_vs, r6xx_vs_size * 4); 504 memcpy(ptr + rdev->r600_blit.vs_offset, r6xx_vs, r6xx_vs_size * 4);
506 memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4); 505 memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4);
507 506 radeon_bo_kunmap(rdev->r600_blit.shader_obj);
508 radeon_object_kunmap(rdev->r600_blit.shader_obj); 507 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
509 return 0; 508 return 0;
510} 509}
511 510
512void r600_blit_fini(struct radeon_device *rdev) 511void r600_blit_fini(struct radeon_device *rdev)
513{ 512{
514 radeon_object_unpin(rdev->r600_blit.shader_obj); 513 int r;
515 radeon_object_unref(&rdev->r600_blit.shader_obj); 514
515 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
516 if (unlikely(r != 0)) {
517 dev_err(rdev->dev, "(%d) can't finish r600 blit\n", r);
518 goto out_unref;
519 }
520 radeon_bo_unpin(rdev->r600_blit.shader_obj);
521 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
522out_unref:
523 radeon_bo_unref(&rdev->r600_blit.shader_obj);
516} 524}
517 525
518int r600_vb_ib_get(struct radeon_device *rdev) 526int r600_vb_ib_get(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index bdad153953e6..57416d2b9650 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -28,8 +28,6 @@
28#ifndef __RADEON_H__ 28#ifndef __RADEON_H__
29#define __RADEON_H__ 29#define __RADEON_H__
30 30
31#include "radeon_object.h"
32
33/* TODO: Here are things that needs to be done : 31/* TODO: Here are things that needs to be done :
34 * - surface allocator & initializer : (bit like scratch reg) should 32 * - surface allocator & initializer : (bit like scratch reg) should
35 * initialize HDP_ stuff on RS600, R600, R700 hw, well anythings 33 * initialize HDP_ stuff on RS600, R600, R700 hw, well anythings
@@ -67,6 +65,11 @@
67#include <linux/list.h> 65#include <linux/list.h>
68#include <linux/kref.h> 66#include <linux/kref.h>
69 67
68#include <ttm/ttm_bo_api.h>
69#include <ttm/ttm_bo_driver.h>
70#include <ttm/ttm_placement.h>
71#include <ttm/ttm_module.h>
72
70#include "radeon_family.h" 73#include "radeon_family.h"
71#include "radeon_mode.h" 74#include "radeon_mode.h"
72#include "radeon_reg.h" 75#include "radeon_reg.h"
@@ -186,76 +189,60 @@ void radeon_fence_unref(struct radeon_fence **fence);
186 * Tiling registers 189 * Tiling registers
187 */ 190 */
188struct radeon_surface_reg { 191struct radeon_surface_reg {
189 struct radeon_object *robj; 192 struct radeon_bo *bo;
190}; 193};
191 194
192#define RADEON_GEM_MAX_SURFACES 8 195#define RADEON_GEM_MAX_SURFACES 8
193 196
194/* 197/*
195 * Radeon buffer. 198 * TTM.
196 */ 199 */
197struct radeon_object; 200struct radeon_mman {
201 struct ttm_bo_global_ref bo_global_ref;
202 struct ttm_global_reference mem_global_ref;
203 bool mem_global_referenced;
204 struct ttm_bo_device bdev;
205};
206
207struct radeon_bo {
208 /* Protected by gem.mutex */
209 struct list_head list;
210 /* Protected by tbo.reserved */
211 struct ttm_buffer_object tbo;
212 struct ttm_bo_kmap_obj kmap;
213 unsigned pin_count;
214 void *kptr;
215 u32 tiling_flags;
216 u32 pitch;
217 int surface_reg;
218 /* Constant after initialization */
219 struct radeon_device *rdev;
220 struct drm_gem_object *gobj;
221};
198 222
199struct radeon_object_list { 223struct radeon_bo_list {
200 struct list_head list; 224 struct list_head list;
201 struct radeon_object *robj; 225 struct radeon_bo *bo;
202 uint64_t gpu_offset; 226 uint64_t gpu_offset;
203 unsigned rdomain; 227 unsigned rdomain;
204 unsigned wdomain; 228 unsigned wdomain;
205 uint32_t tiling_flags; 229 u32 tiling_flags;
206}; 230};
207 231
208int radeon_object_init(struct radeon_device *rdev);
209void radeon_object_fini(struct radeon_device *rdev);
210int radeon_object_create(struct radeon_device *rdev,
211 struct drm_gem_object *gobj,
212 unsigned long size,
213 bool kernel,
214 uint32_t domain,
215 bool interruptible,
216 struct radeon_object **robj_ptr);
217int radeon_object_kmap(struct radeon_object *robj, void **ptr);
218void radeon_object_kunmap(struct radeon_object *robj);
219void radeon_object_unref(struct radeon_object **robj);
220int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
221 uint64_t *gpu_addr);
222void radeon_object_unpin(struct radeon_object *robj);
223int radeon_object_wait(struct radeon_object *robj);
224int radeon_object_busy_domain(struct radeon_object *robj, uint32_t *cur_placement);
225int radeon_object_evict_vram(struct radeon_device *rdev);
226int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset);
227void radeon_object_force_delete(struct radeon_device *rdev);
228void radeon_object_list_add_object(struct radeon_object_list *lobj,
229 struct list_head *head);
230int radeon_object_list_validate(struct list_head *head, void *fence);
231void radeon_object_list_unvalidate(struct list_head *head);
232void radeon_object_list_clean(struct list_head *head);
233int radeon_object_fbdev_mmap(struct radeon_object *robj,
234 struct vm_area_struct *vma);
235unsigned long radeon_object_size(struct radeon_object *robj);
236void radeon_object_clear_surface_reg(struct radeon_object *robj);
237int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved,
238 bool force_drop);
239void radeon_object_set_tiling_flags(struct radeon_object *robj,
240 uint32_t tiling_flags, uint32_t pitch);
241void radeon_object_get_tiling_flags(struct radeon_object *robj, uint32_t *tiling_flags, uint32_t *pitch);
242void radeon_bo_move_notify(struct ttm_buffer_object *bo,
243 struct ttm_mem_reg *mem);
244void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
245/* 232/*
246 * GEM objects. 233 * GEM objects.
247 */ 234 */
248struct radeon_gem { 235struct radeon_gem {
236 struct mutex mutex;
249 struct list_head objects; 237 struct list_head objects;
250}; 238};
251 239
252int radeon_gem_init(struct radeon_device *rdev); 240int radeon_gem_init(struct radeon_device *rdev);
253void radeon_gem_fini(struct radeon_device *rdev); 241void radeon_gem_fini(struct radeon_device *rdev);
254int radeon_gem_object_create(struct radeon_device *rdev, int size, 242int radeon_gem_object_create(struct radeon_device *rdev, int size,
255 int alignment, int initial_domain, 243 int alignment, int initial_domain,
256 bool discardable, bool kernel, 244 bool discardable, bool kernel,
257 bool interruptible, 245 struct drm_gem_object **obj);
258 struct drm_gem_object **obj);
259int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain, 246int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
260 uint64_t *gpu_addr); 247 uint64_t *gpu_addr);
261void radeon_gem_object_unpin(struct drm_gem_object *obj); 248void radeon_gem_object_unpin(struct drm_gem_object *obj);
@@ -271,7 +258,7 @@ struct radeon_gart_table_ram {
271}; 258};
272 259
273struct radeon_gart_table_vram { 260struct radeon_gart_table_vram {
274 struct radeon_object *robj; 261 struct radeon_bo *robj;
275 volatile uint32_t *ptr; 262 volatile uint32_t *ptr;
276}; 263};
277 264
@@ -379,7 +366,7 @@ struct radeon_ib {
379 */ 366 */
380struct radeon_ib_pool { 367struct radeon_ib_pool {
381 struct mutex mutex; 368 struct mutex mutex;
382 struct radeon_object *robj; 369 struct radeon_bo *robj;
383 struct list_head scheduled_ibs; 370 struct list_head scheduled_ibs;
384 struct radeon_ib ibs[RADEON_IB_POOL_SIZE]; 371 struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
385 bool ready; 372 bool ready;
@@ -387,7 +374,7 @@ struct radeon_ib_pool {
387}; 374};
388 375
389struct radeon_cp { 376struct radeon_cp {
390 struct radeon_object *ring_obj; 377 struct radeon_bo *ring_obj;
391 volatile uint32_t *ring; 378 volatile uint32_t *ring;
392 unsigned rptr; 379 unsigned rptr;
393 unsigned wptr; 380 unsigned wptr;
@@ -406,7 +393,7 @@ struct radeon_cp {
406 * R6xx+ IH ring 393 * R6xx+ IH ring
407 */ 394 */
408struct r600_ih { 395struct r600_ih {
409 struct radeon_object *ring_obj; 396 struct radeon_bo *ring_obj;
410 volatile uint32_t *ring; 397 volatile uint32_t *ring;
411 unsigned rptr; 398 unsigned rptr;
412 unsigned wptr; 399 unsigned wptr;
@@ -420,7 +407,7 @@ struct r600_ih {
420}; 407};
421 408
422struct r600_blit { 409struct r600_blit {
423 struct radeon_object *shader_obj; 410 struct radeon_bo *shader_obj;
424 u64 shader_gpu_addr; 411 u64 shader_gpu_addr;
425 u32 vs_offset, ps_offset; 412 u32 vs_offset, ps_offset;
426 u32 state_offset; 413 u32 state_offset;
@@ -450,8 +437,8 @@ void radeon_ring_fini(struct radeon_device *rdev);
450 */ 437 */
451struct radeon_cs_reloc { 438struct radeon_cs_reloc {
452 struct drm_gem_object *gobj; 439 struct drm_gem_object *gobj;
453 struct radeon_object *robj; 440 struct radeon_bo *robj;
454 struct radeon_object_list lobj; 441 struct radeon_bo_list lobj;
455 uint32_t handle; 442 uint32_t handle;
456 uint32_t flags; 443 uint32_t flags;
457}; 444};
@@ -547,7 +534,7 @@ void radeon_agp_fini(struct radeon_device *rdev);
547 * Writeback 534 * Writeback
548 */ 535 */
549struct radeon_wb { 536struct radeon_wb {
550 struct radeon_object *wb_obj; 537 struct radeon_bo *wb_obj;
551 volatile uint32_t *wb; 538 volatile uint32_t *wb;
552 uint64_t gpu_addr; 539 uint64_t gpu_addr;
553}; 540};
@@ -772,9 +759,9 @@ struct radeon_device {
772 uint8_t *bios; 759 uint8_t *bios;
773 bool is_atom_bios; 760 bool is_atom_bios;
774 uint16_t bios_header_start; 761 uint16_t bios_header_start;
775 struct radeon_object *stollen_vga_memory; 762 struct radeon_bo *stollen_vga_memory;
776 struct fb_info *fbdev_info; 763 struct fb_info *fbdev_info;
777 struct radeon_object *fbdev_robj; 764 struct radeon_bo *fbdev_rbo;
778 struct radeon_framebuffer *fbdev_rfb; 765 struct radeon_framebuffer *fbdev_rfb;
779 /* Register mmio */ 766 /* Register mmio */
780 resource_size_t rmmio_base; 767 resource_size_t rmmio_base;
@@ -852,6 +839,10 @@ static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32
852 } 839 }
853} 840}
854 841
842/*
843 * Cast helper
844 */
845#define to_radeon_fence(p) ((struct radeon_fence *)(p))
855 846
856/* 847/*
857 * Registers read & write functions. 848 * Registers read & write functions.
@@ -1046,7 +1037,7 @@ extern int r100_cp_reset(struct radeon_device *rdev);
1046extern void r100_vga_render_disable(struct radeon_device *rdev); 1037extern void r100_vga_render_disable(struct radeon_device *rdev);
1047extern int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, 1038extern int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
1048 struct radeon_cs_packet *pkt, 1039 struct radeon_cs_packet *pkt,
1049 struct radeon_object *robj); 1040 struct radeon_bo *robj);
1050extern int r100_cs_parse_packet0(struct radeon_cs_parser *p, 1041extern int r100_cs_parse_packet0(struct radeon_cs_parser *p,
1051 struct radeon_cs_packet *pkt, 1042 struct radeon_cs_packet *pkt,
1052 const unsigned *auth, unsigned n, 1043 const unsigned *auth, unsigned n,
@@ -1138,4 +1129,6 @@ extern void r600_irq_fini(struct radeon_device *rdev);
1138extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size); 1129extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
1139extern int r600_irq_set(struct radeon_device *rdev); 1130extern int r600_irq_set(struct radeon_device *rdev);
1140 1131
1132#include "radeon_object.h"
1133
1141#endif 1134#endif
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 10bd50a7db87..4ddfd4b5bc51 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -29,8 +29,8 @@
29void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize, 29void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
30 unsigned sdomain, unsigned ddomain) 30 unsigned sdomain, unsigned ddomain)
31{ 31{
32 struct radeon_object *dobj = NULL; 32 struct radeon_bo *dobj = NULL;
33 struct radeon_object *sobj = NULL; 33 struct radeon_bo *sobj = NULL;
34 struct radeon_fence *fence = NULL; 34 struct radeon_fence *fence = NULL;
35 uint64_t saddr, daddr; 35 uint64_t saddr, daddr;
36 unsigned long start_jiffies; 36 unsigned long start_jiffies;
@@ -41,19 +41,27 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
41 41
42 size = bsize; 42 size = bsize;
43 n = 1024; 43 n = 1024;
44 r = radeon_object_create(rdev, NULL, size, true, sdomain, false, &sobj); 44 r = radeon_bo_create(rdev, NULL, size, true, sdomain, &sobj);
45 if (r) { 45 if (r) {
46 goto out_cleanup; 46 goto out_cleanup;
47 } 47 }
48 r = radeon_object_pin(sobj, sdomain, &saddr); 48 r = radeon_bo_reserve(sobj, false);
49 if (unlikely(r != 0))
50 goto out_cleanup;
51 r = radeon_bo_pin(sobj, sdomain, &saddr);
52 radeon_bo_unreserve(sobj);
49 if (r) { 53 if (r) {
50 goto out_cleanup; 54 goto out_cleanup;
51 } 55 }
52 r = radeon_object_create(rdev, NULL, size, true, ddomain, false, &dobj); 56 r = radeon_bo_create(rdev, NULL, size, true, ddomain, &dobj);
53 if (r) { 57 if (r) {
54 goto out_cleanup; 58 goto out_cleanup;
55 } 59 }
56 r = radeon_object_pin(dobj, ddomain, &daddr); 60 r = radeon_bo_reserve(dobj, false);
61 if (unlikely(r != 0))
62 goto out_cleanup;
63 r = radeon_bo_pin(dobj, ddomain, &daddr);
64 radeon_bo_unreserve(dobj);
57 if (r) { 65 if (r) {
58 goto out_cleanup; 66 goto out_cleanup;
59 } 67 }
@@ -109,12 +117,20 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
109 } 117 }
110out_cleanup: 118out_cleanup:
111 if (sobj) { 119 if (sobj) {
112 radeon_object_unpin(sobj); 120 r = radeon_bo_reserve(sobj, false);
113 radeon_object_unref(&sobj); 121 if (likely(r == 0)) {
122 radeon_bo_unpin(sobj);
123 radeon_bo_unreserve(sobj);
124 }
125 radeon_bo_unref(&sobj);
114 } 126 }
115 if (dobj) { 127 if (dobj) {
116 radeon_object_unpin(dobj); 128 r = radeon_bo_reserve(dobj, false);
117 radeon_object_unref(&dobj); 129 if (likely(r == 0)) {
130 radeon_bo_unpin(dobj);
131 radeon_bo_unreserve(dobj);
132 }
133 radeon_bo_unref(&dobj);
118 } 134 }
119 if (fence) { 135 if (fence) {
120 radeon_fence_unref(&fence); 136 radeon_fence_unref(&fence);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 5ab2cf96a264..65590a0f1d93 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -76,17 +76,17 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
76 } 76 }
77 p->relocs_ptr[i] = &p->relocs[i]; 77 p->relocs_ptr[i] = &p->relocs[i];
78 p->relocs[i].robj = p->relocs[i].gobj->driver_private; 78 p->relocs[i].robj = p->relocs[i].gobj->driver_private;
79 p->relocs[i].lobj.robj = p->relocs[i].robj; 79 p->relocs[i].lobj.bo = p->relocs[i].robj;
80 p->relocs[i].lobj.rdomain = r->read_domains; 80 p->relocs[i].lobj.rdomain = r->read_domains;
81 p->relocs[i].lobj.wdomain = r->write_domain; 81 p->relocs[i].lobj.wdomain = r->write_domain;
82 p->relocs[i].handle = r->handle; 82 p->relocs[i].handle = r->handle;
83 p->relocs[i].flags = r->flags; 83 p->relocs[i].flags = r->flags;
84 INIT_LIST_HEAD(&p->relocs[i].lobj.list); 84 INIT_LIST_HEAD(&p->relocs[i].lobj.list);
85 radeon_object_list_add_object(&p->relocs[i].lobj, 85 radeon_bo_list_add_object(&p->relocs[i].lobj,
86 &p->validated); 86 &p->validated);
87 } 87 }
88 } 88 }
89 return radeon_object_list_validate(&p->validated, p->ib->fence); 89 return radeon_bo_list_validate(&p->validated, p->ib->fence);
90} 90}
91 91
92int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) 92int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
@@ -190,9 +190,10 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
190 unsigned i; 190 unsigned i;
191 191
192 if (error) { 192 if (error) {
193 radeon_object_list_unvalidate(&parser->validated); 193 radeon_bo_list_unvalidate(&parser->validated,
194 parser->ib->fence);
194 } else { 195 } else {
195 radeon_object_list_clean(&parser->validated); 196 radeon_bo_list_unreserve(&parser->validated);
196 } 197 }
197 for (i = 0; i < parser->nrelocs; i++) { 198 for (i = 0; i < parser->nrelocs; i++) {
198 if (parser->relocs[i].gobj) { 199 if (parser->relocs[i].gobj) {
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index c962f34c92af..a014ba4cc97c 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -564,6 +564,7 @@ int radeon_device_init(struct radeon_device *rdev,
564 mutex_init(&rdev->cp.mutex); 564 mutex_init(&rdev->cp.mutex);
565 if (rdev->family >= CHIP_R600) 565 if (rdev->family >= CHIP_R600)
566 spin_lock_init(&rdev->ih.lock); 566 spin_lock_init(&rdev->ih.lock);
567 mutex_init(&rdev->gem.mutex);
567 rwlock_init(&rdev->fence_drv.lock); 568 rwlock_init(&rdev->fence_drv.lock);
568 INIT_LIST_HEAD(&rdev->gem.objects); 569 INIT_LIST_HEAD(&rdev->gem.objects);
569 570
@@ -653,6 +654,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
653{ 654{
654 struct radeon_device *rdev = dev->dev_private; 655 struct radeon_device *rdev = dev->dev_private;
655 struct drm_crtc *crtc; 656 struct drm_crtc *crtc;
657 int r;
656 658
657 if (dev == NULL || rdev == NULL) { 659 if (dev == NULL || rdev == NULL) {
658 return -ENODEV; 660 return -ENODEV;
@@ -663,18 +665,22 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
663 /* unpin the front buffers */ 665 /* unpin the front buffers */
664 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 666 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
665 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb); 667 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
666 struct radeon_object *robj; 668 struct radeon_bo *robj;
667 669
668 if (rfb == NULL || rfb->obj == NULL) { 670 if (rfb == NULL || rfb->obj == NULL) {
669 continue; 671 continue;
670 } 672 }
671 robj = rfb->obj->driver_private; 673 robj = rfb->obj->driver_private;
672 if (robj != rdev->fbdev_robj) { 674 if (robj != rdev->fbdev_rbo) {
673 radeon_object_unpin(robj); 675 r = radeon_bo_reserve(robj, false);
676 if (unlikely(r == 0)) {
677 radeon_bo_unpin(robj);
678 radeon_bo_unreserve(robj);
679 }
674 } 680 }
675 } 681 }
676 /* evict vram memory */ 682 /* evict vram memory */
677 radeon_object_evict_vram(rdev); 683 radeon_bo_evict_vram(rdev);
678 /* wait for gpu to finish processing current batch */ 684 /* wait for gpu to finish processing current batch */
679 radeon_fence_wait_last(rdev); 685 radeon_fence_wait_last(rdev);
680 686
@@ -682,7 +688,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
682 688
683 radeon_suspend(rdev); 689 radeon_suspend(rdev);
684 /* evict remaining vram memory */ 690 /* evict remaining vram memory */
685 radeon_object_evict_vram(rdev); 691 radeon_bo_evict_vram(rdev);
686 692
687 pci_save_state(dev->pdev); 693 pci_save_state(dev->pdev);
688 if (state.event == PM_EVENT_SUSPEND) { 694 if (state.event == PM_EVENT_SUSPEND) {
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index cb2f16a0b8ff..66055b3d8668 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -140,7 +140,7 @@ int radeonfb_create(struct drm_device *dev,
140 struct radeon_framebuffer *rfb; 140 struct radeon_framebuffer *rfb;
141 struct drm_mode_fb_cmd mode_cmd; 141 struct drm_mode_fb_cmd mode_cmd;
142 struct drm_gem_object *gobj = NULL; 142 struct drm_gem_object *gobj = NULL;
143 struct radeon_object *robj = NULL; 143 struct radeon_bo *rbo = NULL;
144 struct device *device = &rdev->pdev->dev; 144 struct device *device = &rdev->pdev->dev;
145 int size, aligned_size, ret; 145 int size, aligned_size, ret;
146 u64 fb_gpuaddr; 146 u64 fb_gpuaddr;
@@ -168,14 +168,14 @@ int radeonfb_create(struct drm_device *dev,
168 ret = radeon_gem_object_create(rdev, aligned_size, 0, 168 ret = radeon_gem_object_create(rdev, aligned_size, 0,
169 RADEON_GEM_DOMAIN_VRAM, 169 RADEON_GEM_DOMAIN_VRAM,
170 false, ttm_bo_type_kernel, 170 false, ttm_bo_type_kernel,
171 false, &gobj); 171 &gobj);
172 if (ret) { 172 if (ret) {
173 printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n", 173 printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n",
174 surface_width, surface_height); 174 surface_width, surface_height);
175 ret = -ENOMEM; 175 ret = -ENOMEM;
176 goto out; 176 goto out;
177 } 177 }
178 robj = gobj->driver_private; 178 rbo = gobj->driver_private;
179 179
180 if (fb_tiled) 180 if (fb_tiled)
181 tiling_flags = RADEON_TILING_MACRO; 181 tiling_flags = RADEON_TILING_MACRO;
@@ -192,8 +192,13 @@ int radeonfb_create(struct drm_device *dev,
192 } 192 }
193#endif 193#endif
194 194
195 if (tiling_flags) 195 if (tiling_flags) {
196 radeon_object_set_tiling_flags(robj, tiling_flags | RADEON_TILING_SURFACE, mode_cmd.pitch); 196 ret = radeon_bo_set_tiling_flags(rbo,
197 tiling_flags | RADEON_TILING_SURFACE,
198 mode_cmd.pitch);
199 if (ret)
200 dev_err(rdev->dev, "FB failed to set tiling flags\n");
201 }
197 mutex_lock(&rdev->ddev->struct_mutex); 202 mutex_lock(&rdev->ddev->struct_mutex);
198 fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj); 203 fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj);
199 if (fb == NULL) { 204 if (fb == NULL) {
@@ -201,10 +206,19 @@ int radeonfb_create(struct drm_device *dev,
201 ret = -ENOMEM; 206 ret = -ENOMEM;
202 goto out_unref; 207 goto out_unref;
203 } 208 }
204 ret = radeon_object_pin(robj, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr); 209 ret = radeon_bo_reserve(rbo, false);
210 if (unlikely(ret != 0))
211 goto out_unref;
212 ret = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr);
213 if (ret) {
214 radeon_bo_unreserve(rbo);
215 goto out_unref;
216 }
217 if (fb_tiled)
218 radeon_bo_check_tiling(rbo, 0, 0);
219 ret = radeon_bo_kmap(rbo, &fbptr);
220 radeon_bo_unreserve(rbo);
205 if (ret) { 221 if (ret) {
206 printk(KERN_ERR "failed to pin framebuffer\n");
207 ret = -ENOMEM;
208 goto out_unref; 222 goto out_unref;
209 } 223 }
210 224
@@ -213,7 +227,7 @@ int radeonfb_create(struct drm_device *dev,
213 *fb_p = fb; 227 *fb_p = fb;
214 rfb = to_radeon_framebuffer(fb); 228 rfb = to_radeon_framebuffer(fb);
215 rdev->fbdev_rfb = rfb; 229 rdev->fbdev_rfb = rfb;
216 rdev->fbdev_robj = robj; 230 rdev->fbdev_rbo = rbo;
217 231
218 info = framebuffer_alloc(sizeof(struct radeon_fb_device), device); 232 info = framebuffer_alloc(sizeof(struct radeon_fb_device), device);
219 if (info == NULL) { 233 if (info == NULL) {
@@ -234,15 +248,7 @@ int radeonfb_create(struct drm_device *dev,
234 if (ret) 248 if (ret)
235 goto out_unref; 249 goto out_unref;
236 250
237 if (fb_tiled) 251 memset_io(fbptr, 0xff, aligned_size);
238 radeon_object_check_tiling(robj, 0, 0);
239
240 ret = radeon_object_kmap(robj, &fbptr);
241 if (ret) {
242 goto out_unref;
243 }
244
245 memset_io(fbptr, 0, aligned_size);
246 252
247 strcpy(info->fix.id, "radeondrmfb"); 253 strcpy(info->fix.id, "radeondrmfb");
248 254
@@ -288,8 +294,12 @@ int radeonfb_create(struct drm_device *dev,
288 return 0; 294 return 0;
289 295
290out_unref: 296out_unref:
291 if (robj) { 297 if (rbo) {
292 radeon_object_kunmap(robj); 298 ret = radeon_bo_reserve(rbo, false);
299 if (likely(ret == 0)) {
300 radeon_bo_kunmap(rbo);
301 radeon_bo_unreserve(rbo);
302 }
293 } 303 }
294 if (fb && ret) { 304 if (fb && ret) {
295 list_del(&fb->filp_head); 305 list_del(&fb->filp_head);
@@ -335,7 +345,8 @@ int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
335{ 345{
336 struct fb_info *info; 346 struct fb_info *info;
337 struct radeon_framebuffer *rfb = to_radeon_framebuffer(fb); 347 struct radeon_framebuffer *rfb = to_radeon_framebuffer(fb);
338 struct radeon_object *robj; 348 struct radeon_bo *rbo;
349 int r;
339 350
340 if (!fb) { 351 if (!fb) {
341 return -EINVAL; 352 return -EINVAL;
@@ -343,10 +354,14 @@ int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
343 info = fb->fbdev; 354 info = fb->fbdev;
344 if (info) { 355 if (info) {
345 struct radeon_fb_device *rfbdev = info->par; 356 struct radeon_fb_device *rfbdev = info->par;
346 robj = rfb->obj->driver_private; 357 rbo = rfb->obj->driver_private;
347 unregister_framebuffer(info); 358 unregister_framebuffer(info);
348 radeon_object_kunmap(robj); 359 r = radeon_bo_reserve(rbo, false);
349 radeon_object_unpin(robj); 360 if (likely(r == 0)) {
361 radeon_bo_kunmap(rbo);
362 radeon_bo_unpin(rbo);
363 radeon_bo_unreserve(rbo);
364 }
350 drm_fb_helper_free(&rfbdev->helper); 365 drm_fb_helper_free(&rfbdev->helper);
351 framebuffer_release(info); 366 framebuffer_release(info);
352 } 367 }
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index a68d7566178c..e73d56e83fa6 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -78,11 +78,9 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
78 int r; 78 int r;
79 79
80 if (rdev->gart.table.vram.robj == NULL) { 80 if (rdev->gart.table.vram.robj == NULL) {
81 r = radeon_object_create(rdev, NULL, 81 r = radeon_bo_create(rdev, NULL, rdev->gart.table_size,
82 rdev->gart.table_size, 82 true, RADEON_GEM_DOMAIN_VRAM,
83 true, 83 &rdev->gart.table.vram.robj);
84 RADEON_GEM_DOMAIN_VRAM,
85 false, &rdev->gart.table.vram.robj);
86 if (r) { 84 if (r) {
87 return r; 85 return r;
88 } 86 }
@@ -95,32 +93,38 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev)
95 uint64_t gpu_addr; 93 uint64_t gpu_addr;
96 int r; 94 int r;
97 95
98 r = radeon_object_pin(rdev->gart.table.vram.robj, 96 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
99 RADEON_GEM_DOMAIN_VRAM, &gpu_addr); 97 if (unlikely(r != 0))
100 if (r) {
101 radeon_object_unref(&rdev->gart.table.vram.robj);
102 return r; 98 return r;
103 } 99 r = radeon_bo_pin(rdev->gart.table.vram.robj,
104 r = radeon_object_kmap(rdev->gart.table.vram.robj, 100 RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
105 (void **)&rdev->gart.table.vram.ptr);
106 if (r) { 101 if (r) {
107 radeon_object_unpin(rdev->gart.table.vram.robj); 102 radeon_bo_unreserve(rdev->gart.table.vram.robj);
108 radeon_object_unref(&rdev->gart.table.vram.robj);
109 DRM_ERROR("radeon: failed to map gart vram table.\n");
110 return r; 103 return r;
111 } 104 }
105 r = radeon_bo_kmap(rdev->gart.table.vram.robj,
106 (void **)&rdev->gart.table.vram.ptr);
107 if (r)
108 radeon_bo_unpin(rdev->gart.table.vram.robj);
109 radeon_bo_unreserve(rdev->gart.table.vram.robj);
112 rdev->gart.table_addr = gpu_addr; 110 rdev->gart.table_addr = gpu_addr;
113 return 0; 111 return r;
114} 112}
115 113
116void radeon_gart_table_vram_free(struct radeon_device *rdev) 114void radeon_gart_table_vram_free(struct radeon_device *rdev)
117{ 115{
116 int r;
117
118 if (rdev->gart.table.vram.robj == NULL) { 118 if (rdev->gart.table.vram.robj == NULL) {
119 return; 119 return;
120 } 120 }
121 radeon_object_kunmap(rdev->gart.table.vram.robj); 121 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
122 radeon_object_unpin(rdev->gart.table.vram.robj); 122 if (likely(r == 0)) {
123 radeon_object_unref(&rdev->gart.table.vram.robj); 123 radeon_bo_kunmap(rdev->gart.table.vram.robj);
124 radeon_bo_unpin(rdev->gart.table.vram.robj);
125 radeon_bo_unreserve(rdev->gart.table.vram.robj);
126 }
127 radeon_bo_unref(&rdev->gart.table.vram.robj);
124} 128}
125 129
126 130
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 9c4f895a026e..e927f998f76f 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -38,22 +38,21 @@ int radeon_gem_object_init(struct drm_gem_object *obj)
38 38
39void radeon_gem_object_free(struct drm_gem_object *gobj) 39void radeon_gem_object_free(struct drm_gem_object *gobj)
40{ 40{
41 struct radeon_object *robj = gobj->driver_private; 41 struct radeon_bo *robj = gobj->driver_private;
42 42
43 gobj->driver_private = NULL; 43 gobj->driver_private = NULL;
44 if (robj) { 44 if (robj) {
45 radeon_object_unref(&robj); 45 radeon_bo_unref(&robj);
46 } 46 }
47} 47}
48 48
49int radeon_gem_object_create(struct radeon_device *rdev, int size, 49int radeon_gem_object_create(struct radeon_device *rdev, int size,
50 int alignment, int initial_domain, 50 int alignment, int initial_domain,
51 bool discardable, bool kernel, 51 bool discardable, bool kernel,
52 bool interruptible, 52 struct drm_gem_object **obj)
53 struct drm_gem_object **obj)
54{ 53{
55 struct drm_gem_object *gobj; 54 struct drm_gem_object *gobj;
56 struct radeon_object *robj; 55 struct radeon_bo *robj;
57 int r; 56 int r;
58 57
59 *obj = NULL; 58 *obj = NULL;
@@ -65,8 +64,7 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
65 if (alignment < PAGE_SIZE) { 64 if (alignment < PAGE_SIZE) {
66 alignment = PAGE_SIZE; 65 alignment = PAGE_SIZE;
67 } 66 }
68 r = radeon_object_create(rdev, gobj, size, kernel, initial_domain, 67 r = radeon_bo_create(rdev, gobj, size, kernel, initial_domain, &robj);
69 interruptible, &robj);
70 if (r) { 68 if (r) {
71 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u)\n", 69 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u)\n",
72 size, initial_domain, alignment); 70 size, initial_domain, alignment);
@@ -83,33 +81,33 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
83int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain, 81int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
84 uint64_t *gpu_addr) 82 uint64_t *gpu_addr)
85{ 83{
86 struct radeon_object *robj = obj->driver_private; 84 struct radeon_bo *robj = obj->driver_private;
87 uint32_t flags; 85 int r;
88 86
89 switch (pin_domain) { 87 r = radeon_bo_reserve(robj, false);
90 case RADEON_GEM_DOMAIN_VRAM: 88 if (unlikely(r != 0))
91 flags = TTM_PL_FLAG_VRAM; 89 return r;
92 break; 90 r = radeon_bo_pin(robj, pin_domain, gpu_addr);
93 case RADEON_GEM_DOMAIN_GTT: 91 radeon_bo_unreserve(robj);
94 flags = TTM_PL_FLAG_TT; 92 return r;
95 break;
96 default:
97 flags = TTM_PL_FLAG_SYSTEM;
98 break;
99 }
100 return radeon_object_pin(robj, flags, gpu_addr);
101} 93}
102 94
103void radeon_gem_object_unpin(struct drm_gem_object *obj) 95void radeon_gem_object_unpin(struct drm_gem_object *obj)
104{ 96{
105 struct radeon_object *robj = obj->driver_private; 97 struct radeon_bo *robj = obj->driver_private;
106 radeon_object_unpin(robj); 98 int r;
99
100 r = radeon_bo_reserve(robj, false);
101 if (likely(r == 0)) {
102 radeon_bo_unpin(robj);
103 radeon_bo_unreserve(robj);
104 }
107} 105}
108 106
109int radeon_gem_set_domain(struct drm_gem_object *gobj, 107int radeon_gem_set_domain(struct drm_gem_object *gobj,
110 uint32_t rdomain, uint32_t wdomain) 108 uint32_t rdomain, uint32_t wdomain)
111{ 109{
112 struct radeon_object *robj; 110 struct radeon_bo *robj;
113 uint32_t domain; 111 uint32_t domain;
114 int r; 112 int r;
115 113
@@ -127,11 +125,12 @@ int radeon_gem_set_domain(struct drm_gem_object *gobj,
127 } 125 }
128 if (domain == RADEON_GEM_DOMAIN_CPU) { 126 if (domain == RADEON_GEM_DOMAIN_CPU) {
129 /* Asking for cpu access wait for object idle */ 127 /* Asking for cpu access wait for object idle */
130 r = radeon_object_wait(robj); 128 r = radeon_bo_wait(robj, NULL, false);
131 if (r) { 129 if (r) {
132 printk(KERN_ERR "Failed to wait for object !\n"); 130 printk(KERN_ERR "Failed to wait for object !\n");
133 return r; 131 return r;
134 } 132 }
133 radeon_hdp_flush(robj->rdev);
135 } 134 }
136 return 0; 135 return 0;
137} 136}
@@ -144,7 +143,7 @@ int radeon_gem_init(struct radeon_device *rdev)
144 143
145void radeon_gem_fini(struct radeon_device *rdev) 144void radeon_gem_fini(struct radeon_device *rdev)
146{ 145{
147 radeon_object_force_delete(rdev); 146 radeon_bo_force_delete(rdev);
148} 147}
149 148
150 149
@@ -160,9 +159,9 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
160 args->vram_size = rdev->mc.real_vram_size; 159 args->vram_size = rdev->mc.real_vram_size;
161 args->vram_visible = rdev->mc.real_vram_size; 160 args->vram_visible = rdev->mc.real_vram_size;
162 if (rdev->stollen_vga_memory) 161 if (rdev->stollen_vga_memory)
163 args->vram_visible -= radeon_object_size(rdev->stollen_vga_memory); 162 args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
164 if (rdev->fbdev_robj) 163 if (rdev->fbdev_rbo)
165 args->vram_visible -= radeon_object_size(rdev->fbdev_robj); 164 args->vram_visible -= radeon_bo_size(rdev->fbdev_rbo);
166 args->gart_size = rdev->mc.gtt_size - rdev->cp.ring_size - 4096 - 165 args->gart_size = rdev->mc.gtt_size - rdev->cp.ring_size - 4096 -
167 RADEON_IB_POOL_SIZE*64*1024; 166 RADEON_IB_POOL_SIZE*64*1024;
168 return 0; 167 return 0;
@@ -196,8 +195,8 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
196 /* create a gem object to contain this object in */ 195 /* create a gem object to contain this object in */
197 args->size = roundup(args->size, PAGE_SIZE); 196 args->size = roundup(args->size, PAGE_SIZE);
198 r = radeon_gem_object_create(rdev, args->size, args->alignment, 197 r = radeon_gem_object_create(rdev, args->size, args->alignment,
199 args->initial_domain, false, 198 args->initial_domain, false,
200 false, true, &gobj); 199 false, &gobj);
201 if (r) { 200 if (r) {
202 return r; 201 return r;
203 } 202 }
@@ -222,7 +221,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
222 * just validate the BO into a certain domain */ 221 * just validate the BO into a certain domain */
223 struct drm_radeon_gem_set_domain *args = data; 222 struct drm_radeon_gem_set_domain *args = data;
224 struct drm_gem_object *gobj; 223 struct drm_gem_object *gobj;
225 struct radeon_object *robj; 224 struct radeon_bo *robj;
226 int r; 225 int r;
227 226
228 /* for now if someone requests domain CPU - 227 /* for now if someone requests domain CPU -
@@ -248,19 +247,18 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
248{ 247{
249 struct drm_radeon_gem_mmap *args = data; 248 struct drm_radeon_gem_mmap *args = data;
250 struct drm_gem_object *gobj; 249 struct drm_gem_object *gobj;
251 struct radeon_object *robj; 250 struct radeon_bo *robj;
252 int r;
253 251
254 gobj = drm_gem_object_lookup(dev, filp, args->handle); 252 gobj = drm_gem_object_lookup(dev, filp, args->handle);
255 if (gobj == NULL) { 253 if (gobj == NULL) {
256 return -EINVAL; 254 return -EINVAL;
257 } 255 }
258 robj = gobj->driver_private; 256 robj = gobj->driver_private;
259 r = radeon_object_mmap(robj, &args->addr_ptr); 257 args->addr_ptr = radeon_bo_mmap_offset(robj);
260 mutex_lock(&dev->struct_mutex); 258 mutex_lock(&dev->struct_mutex);
261 drm_gem_object_unreference(gobj); 259 drm_gem_object_unreference(gobj);
262 mutex_unlock(&dev->struct_mutex); 260 mutex_unlock(&dev->struct_mutex);
263 return r; 261 return 0;
264} 262}
265 263
266int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, 264int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
@@ -268,7 +266,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
268{ 266{
269 struct drm_radeon_gem_busy *args = data; 267 struct drm_radeon_gem_busy *args = data;
270 struct drm_gem_object *gobj; 268 struct drm_gem_object *gobj;
271 struct radeon_object *robj; 269 struct radeon_bo *robj;
272 int r; 270 int r;
273 uint32_t cur_placement; 271 uint32_t cur_placement;
274 272
@@ -277,7 +275,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
277 return -EINVAL; 275 return -EINVAL;
278 } 276 }
279 robj = gobj->driver_private; 277 robj = gobj->driver_private;
280 r = radeon_object_busy_domain(robj, &cur_placement); 278 r = radeon_bo_wait(robj, &cur_placement, true);
281 switch (cur_placement) { 279 switch (cur_placement) {
282 case TTM_PL_VRAM: 280 case TTM_PL_VRAM:
283 args->domain = RADEON_GEM_DOMAIN_VRAM; 281 args->domain = RADEON_GEM_DOMAIN_VRAM;
@@ -301,7 +299,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
301{ 299{
302 struct drm_radeon_gem_wait_idle *args = data; 300 struct drm_radeon_gem_wait_idle *args = data;
303 struct drm_gem_object *gobj; 301 struct drm_gem_object *gobj;
304 struct radeon_object *robj; 302 struct radeon_bo *robj;
305 int r; 303 int r;
306 304
307 gobj = drm_gem_object_lookup(dev, filp, args->handle); 305 gobj = drm_gem_object_lookup(dev, filp, args->handle);
@@ -309,10 +307,11 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
309 return -EINVAL; 307 return -EINVAL;
310 } 308 }
311 robj = gobj->driver_private; 309 robj = gobj->driver_private;
312 r = radeon_object_wait(robj); 310 r = radeon_bo_wait(robj, NULL, false);
313 mutex_lock(&dev->struct_mutex); 311 mutex_lock(&dev->struct_mutex);
314 drm_gem_object_unreference(gobj); 312 drm_gem_object_unreference(gobj);
315 mutex_unlock(&dev->struct_mutex); 313 mutex_unlock(&dev->struct_mutex);
314 radeon_hdp_flush(robj->rdev);
316 return r; 315 return r;
317} 316}
318 317
@@ -321,7 +320,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
321{ 320{
322 struct drm_radeon_gem_set_tiling *args = data; 321 struct drm_radeon_gem_set_tiling *args = data;
323 struct drm_gem_object *gobj; 322 struct drm_gem_object *gobj;
324 struct radeon_object *robj; 323 struct radeon_bo *robj;
325 int r = 0; 324 int r = 0;
326 325
327 DRM_DEBUG("%d \n", args->handle); 326 DRM_DEBUG("%d \n", args->handle);
@@ -329,7 +328,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
329 if (gobj == NULL) 328 if (gobj == NULL)
330 return -EINVAL; 329 return -EINVAL;
331 robj = gobj->driver_private; 330 robj = gobj->driver_private;
332 radeon_object_set_tiling_flags(robj, args->tiling_flags, args->pitch); 331 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
333 mutex_lock(&dev->struct_mutex); 332 mutex_lock(&dev->struct_mutex);
334 drm_gem_object_unreference(gobj); 333 drm_gem_object_unreference(gobj);
335 mutex_unlock(&dev->struct_mutex); 334 mutex_unlock(&dev->struct_mutex);
@@ -341,16 +340,19 @@ int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
341{ 340{
342 struct drm_radeon_gem_get_tiling *args = data; 341 struct drm_radeon_gem_get_tiling *args = data;
343 struct drm_gem_object *gobj; 342 struct drm_gem_object *gobj;
344 struct radeon_object *robj; 343 struct radeon_bo *rbo;
345 int r = 0; 344 int r = 0;
346 345
347 DRM_DEBUG("\n"); 346 DRM_DEBUG("\n");
348 gobj = drm_gem_object_lookup(dev, filp, args->handle); 347 gobj = drm_gem_object_lookup(dev, filp, args->handle);
349 if (gobj == NULL) 348 if (gobj == NULL)
350 return -EINVAL; 349 return -EINVAL;
351 robj = gobj->driver_private; 350 rbo = gobj->driver_private;
352 radeon_object_get_tiling_flags(robj, &args->tiling_flags, 351 r = radeon_bo_reserve(rbo, false);
353 &args->pitch); 352 if (unlikely(r != 0))
353 return r;
354 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
355 radeon_bo_unreserve(rbo);
354 mutex_lock(&dev->struct_mutex); 356 mutex_lock(&dev->struct_mutex);
355 drm_gem_object_unreference(gobj); 357 drm_gem_object_unreference(gobj);
356 mutex_unlock(&dev->struct_mutex); 358 mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index c5c5c022e8c0..c1e1706d06b4 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -400,12 +400,14 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
400 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 400 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
401 struct radeon_framebuffer *radeon_fb; 401 struct radeon_framebuffer *radeon_fb;
402 struct drm_gem_object *obj; 402 struct drm_gem_object *obj;
403 struct radeon_bo *rbo;
403 uint64_t base; 404 uint64_t base;
404 uint32_t crtc_offset, crtc_offset_cntl, crtc_tile_x0_y0 = 0; 405 uint32_t crtc_offset, crtc_offset_cntl, crtc_tile_x0_y0 = 0;
405 uint32_t crtc_pitch, pitch_pixels; 406 uint32_t crtc_pitch, pitch_pixels;
406 uint32_t tiling_flags; 407 uint32_t tiling_flags;
407 int format; 408 int format;
408 uint32_t gen_cntl_reg, gen_cntl_val; 409 uint32_t gen_cntl_reg, gen_cntl_val;
410 int r;
409 411
410 DRM_DEBUG("\n"); 412 DRM_DEBUG("\n");
411 /* no fb bound */ 413 /* no fb bound */
@@ -436,10 +438,22 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
436 return false; 438 return false;
437 } 439 }
438 440
441 /* Pin framebuffer & get tilling informations */
439 obj = radeon_fb->obj; 442 obj = radeon_fb->obj;
440 if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &base)) { 443 rbo = obj->driver_private;
444 r = radeon_bo_reserve(rbo, false);
445 if (unlikely(r != 0))
446 return r;
447 r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &base);
448 if (unlikely(r != 0)) {
449 radeon_bo_unreserve(rbo);
441 return -EINVAL; 450 return -EINVAL;
442 } 451 }
452 radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
453 radeon_bo_unreserve(rbo);
454 if (tiling_flags & RADEON_TILING_MICRO)
455 DRM_ERROR("trying to scanout microtiled buffer\n");
456
443 /* if scanout was in GTT this really wouldn't work */ 457 /* if scanout was in GTT this really wouldn't work */
444 /* crtc offset is from display base addr not FB location */ 458 /* crtc offset is from display base addr not FB location */
445 radeon_crtc->legacy_display_base_addr = rdev->mc.vram_location; 459 radeon_crtc->legacy_display_base_addr = rdev->mc.vram_location;
@@ -454,10 +468,6 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
454 (crtc->fb->bits_per_pixel * 8)); 468 (crtc->fb->bits_per_pixel * 8));
455 crtc_pitch |= crtc_pitch << 16; 469 crtc_pitch |= crtc_pitch << 16;
456 470
457 radeon_object_get_tiling_flags(obj->driver_private,
458 &tiling_flags, NULL);
459 if (tiling_flags & RADEON_TILING_MICRO)
460 DRM_ERROR("trying to scanout microtiled buffer\n");
461 471
462 if (tiling_flags & RADEON_TILING_MACRO) { 472 if (tiling_flags & RADEON_TILING_MACRO) {
463 if (ASIC_IS_R300(rdev)) 473 if (ASIC_IS_R300(rdev))
@@ -535,7 +545,12 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
535 545
536 if (old_fb && old_fb != crtc->fb) { 546 if (old_fb && old_fb != crtc->fb) {
537 radeon_fb = to_radeon_framebuffer(old_fb); 547 radeon_fb = to_radeon_framebuffer(old_fb);
538 radeon_gem_object_unpin(radeon_fb->obj); 548 rbo = radeon_fb->obj->driver_private;
549 r = radeon_bo_reserve(rbo, false);
550 if (unlikely(r != 0))
551 return r;
552 radeon_bo_unpin(rbo);
553 radeon_bo_unreserve(rbo);
539 } 554 }
540 555
541 /* Bytes per pixel may have changed */ 556 /* Bytes per pixel may have changed */
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 98835f51e35e..bec494384825 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -34,74 +34,32 @@
34#include "radeon_drm.h" 34#include "radeon_drm.h"
35#include "radeon.h" 35#include "radeon.h"
36 36
37struct radeon_object {
38 struct ttm_buffer_object tobj;
39 struct list_head list;
40 struct radeon_device *rdev;
41 struct drm_gem_object *gobj;
42 struct ttm_bo_kmap_obj kmap;
43 unsigned pin_count;
44 uint64_t gpu_addr;
45 void *kptr;
46 bool is_iomem;
47 uint32_t tiling_flags;
48 uint32_t pitch;
49 int surface_reg;
50};
51 37
52int radeon_ttm_init(struct radeon_device *rdev); 38int radeon_ttm_init(struct radeon_device *rdev);
53void radeon_ttm_fini(struct radeon_device *rdev); 39void radeon_ttm_fini(struct radeon_device *rdev);
40static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
54 41
55/* 42/*
56 * To exclude mutual BO access we rely on bo_reserve exclusion, as all 43 * To exclude mutual BO access we rely on bo_reserve exclusion, as all
57 * function are calling it. 44 * function are calling it.
58 */ 45 */
59 46
60static int radeon_object_reserve(struct radeon_object *robj, bool interruptible) 47static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
61{ 48{
62 return ttm_bo_reserve(&robj->tobj, interruptible, false, false, 0); 49 struct radeon_bo *bo;
63}
64 50
65static void radeon_object_unreserve(struct radeon_object *robj) 51 bo = container_of(tbo, struct radeon_bo, tbo);
66{ 52 mutex_lock(&bo->rdev->gem.mutex);
67 ttm_bo_unreserve(&robj->tobj); 53 list_del_init(&bo->list);
54 mutex_unlock(&bo->rdev->gem.mutex);
55 radeon_bo_clear_surface_reg(bo);
56 kfree(bo);
68} 57}
69 58
70static void radeon_ttm_object_object_destroy(struct ttm_buffer_object *tobj) 59static inline u32 radeon_ttm_flags_from_domain(u32 domain)
71{ 60{
72 struct radeon_object *robj; 61 u32 flags = 0;
73
74 robj = container_of(tobj, struct radeon_object, tobj);
75 list_del_init(&robj->list);
76 radeon_object_clear_surface_reg(robj);
77 kfree(robj);
78}
79
80static inline void radeon_object_gpu_addr(struct radeon_object *robj)
81{
82 /* Default gpu address */
83 robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
84 if (robj->tobj.mem.mm_node == NULL) {
85 return;
86 }
87 robj->gpu_addr = ((u64)robj->tobj.mem.mm_node->start) << PAGE_SHIFT;
88 switch (robj->tobj.mem.mem_type) {
89 case TTM_PL_VRAM:
90 robj->gpu_addr += (u64)robj->rdev->mc.vram_location;
91 break;
92 case TTM_PL_TT:
93 robj->gpu_addr += (u64)robj->rdev->mc.gtt_location;
94 break;
95 default:
96 DRM_ERROR("Unknown placement %d\n", robj->tobj.mem.mem_type);
97 robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
98 return;
99 }
100}
101 62
102static inline uint32_t radeon_object_flags_from_domain(uint32_t domain)
103{
104 uint32_t flags = 0;
105 if (domain & RADEON_GEM_DOMAIN_VRAM) { 63 if (domain & RADEON_GEM_DOMAIN_VRAM) {
106 flags |= TTM_PL_FLAG_VRAM | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED; 64 flags |= TTM_PL_FLAG_VRAM | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED;
107 } 65 }
@@ -117,17 +75,13 @@ static inline uint32_t radeon_object_flags_from_domain(uint32_t domain)
117 return flags; 75 return flags;
118} 76}
119 77
120int radeon_object_create(struct radeon_device *rdev, 78int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
121 struct drm_gem_object *gobj, 79 unsigned long size, bool kernel, u32 domain,
122 unsigned long size, 80 struct radeon_bo **bo_ptr)
123 bool kernel,
124 uint32_t domain,
125 bool interruptible,
126 struct radeon_object **robj_ptr)
127{ 81{
128 struct radeon_object *robj; 82 struct radeon_bo *bo;
129 enum ttm_bo_type type; 83 enum ttm_bo_type type;
130 uint32_t flags; 84 u32 flags;
131 int r; 85 int r;
132 86
133 if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) { 87 if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
@@ -138,207 +92,140 @@ int radeon_object_create(struct radeon_device *rdev,
138 } else { 92 } else {
139 type = ttm_bo_type_device; 93 type = ttm_bo_type_device;
140 } 94 }
141 *robj_ptr = NULL; 95 *bo_ptr = NULL;
142 robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL); 96 bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
143 if (robj == NULL) { 97 if (bo == NULL)
144 return -ENOMEM; 98 return -ENOMEM;
145 } 99 bo->rdev = rdev;
146 robj->rdev = rdev; 100 bo->gobj = gobj;
147 robj->gobj = gobj; 101 bo->surface_reg = -1;
148 robj->surface_reg = -1; 102 INIT_LIST_HEAD(&bo->list);
149 INIT_LIST_HEAD(&robj->list); 103
150 104 flags = radeon_ttm_flags_from_domain(domain);
151 flags = radeon_object_flags_from_domain(domain); 105retry:
152 r = ttm_buffer_object_init(&rdev->mman.bdev, &robj->tobj, size, type, flags, 106 r = ttm_buffer_object_init(&rdev->mman.bdev, &bo->tbo, size, type,
153 0, 0, false, NULL, size, 107 flags, 0, 0, true, NULL, size,
154 &radeon_ttm_object_object_destroy); 108 &radeon_ttm_bo_destroy);
155 if (unlikely(r != 0)) { 109 if (unlikely(r != 0)) {
110 if (r == -ERESTART)
111 goto retry;
156 /* ttm call radeon_ttm_object_object_destroy if error happen */ 112 /* ttm call radeon_ttm_object_object_destroy if error happen */
157 DRM_ERROR("Failed to allocate TTM object (%ld, 0x%08X, %u)\n", 113 dev_err(rdev->dev, "object_init failed for (%ld, 0x%08X)\n",
158 size, flags, 0); 114 size, flags);
159 return r; 115 return r;
160 } 116 }
161 *robj_ptr = robj; 117 *bo_ptr = bo;
162 if (gobj) { 118 if (gobj) {
163 list_add_tail(&robj->list, &rdev->gem.objects); 119 mutex_lock(&bo->rdev->gem.mutex);
120 list_add_tail(&bo->list, &rdev->gem.objects);
121 mutex_unlock(&bo->rdev->gem.mutex);
164 } 122 }
165 return 0; 123 return 0;
166} 124}
167 125
168int radeon_object_kmap(struct radeon_object *robj, void **ptr) 126int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
169{ 127{
128 bool is_iomem;
170 int r; 129 int r;
171 130
172 spin_lock(&robj->tobj.lock); 131 if (bo->kptr) {
173 if (robj->kptr) {
174 if (ptr) { 132 if (ptr) {
175 *ptr = robj->kptr; 133 *ptr = bo->kptr;
176 } 134 }
177 spin_unlock(&robj->tobj.lock);
178 return 0; 135 return 0;
179 } 136 }
180 spin_unlock(&robj->tobj.lock); 137 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
181 r = ttm_bo_kmap(&robj->tobj, 0, robj->tobj.num_pages, &robj->kmap);
182 if (r) { 138 if (r) {
183 return r; 139 return r;
184 } 140 }
185 spin_lock(&robj->tobj.lock); 141 bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
186 robj->kptr = ttm_kmap_obj_virtual(&robj->kmap, &robj->is_iomem);
187 spin_unlock(&robj->tobj.lock);
188 if (ptr) { 142 if (ptr) {
189 *ptr = robj->kptr; 143 *ptr = bo->kptr;
190 } 144 }
191 radeon_object_check_tiling(robj, 0, 0); 145 radeon_bo_check_tiling(bo, 0, 0);
192 return 0; 146 return 0;
193} 147}
194 148
195void radeon_object_kunmap(struct radeon_object *robj) 149void radeon_bo_kunmap(struct radeon_bo *bo)
196{ 150{
197 spin_lock(&robj->tobj.lock); 151 if (bo->kptr == NULL)
198 if (robj->kptr == NULL) {
199 spin_unlock(&robj->tobj.lock);
200 return; 152 return;
201 } 153 bo->kptr = NULL;
202 robj->kptr = NULL; 154 radeon_bo_check_tiling(bo, 0, 0);
203 spin_unlock(&robj->tobj.lock); 155 ttm_bo_kunmap(&bo->kmap);
204 radeon_object_check_tiling(robj, 0, 0);
205 ttm_bo_kunmap(&robj->kmap);
206} 156}
207 157
208void radeon_object_unref(struct radeon_object **robj) 158void radeon_bo_unref(struct radeon_bo **bo)
209{ 159{
210 struct ttm_buffer_object *tobj; 160 struct ttm_buffer_object *tbo;
211 161
212 if ((*robj) == NULL) { 162 if ((*bo) == NULL)
213 return; 163 return;
214 } 164 tbo = &((*bo)->tbo);
215 tobj = &((*robj)->tobj); 165 ttm_bo_unref(&tbo);
216 ttm_bo_unref(&tobj); 166 if (tbo == NULL)
217 if (tobj == NULL) { 167 *bo = NULL;
218 *robj = NULL;
219 }
220}
221
222int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset)
223{
224 *offset = robj->tobj.addr_space_offset;
225 return 0;
226} 168}
227 169
228int radeon_object_pin(struct radeon_object *robj, uint32_t domain, 170int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
229 uint64_t *gpu_addr)
230{ 171{
231 uint32_t flags; 172 u32 flags;
232 uint32_t tmp; 173 u32 tmp;
233 int r; 174 int r;
234 175
235 flags = radeon_object_flags_from_domain(domain); 176 flags = radeon_ttm_flags_from_domain(domain);
236 spin_lock(&robj->tobj.lock); 177 if (bo->pin_count) {
237 if (robj->pin_count) { 178 bo->pin_count++;
238 robj->pin_count++; 179 if (gpu_addr)
239 if (gpu_addr != NULL) { 180 *gpu_addr = radeon_bo_gpu_offset(bo);
240 *gpu_addr = robj->gpu_addr;
241 }
242 spin_unlock(&robj->tobj.lock);
243 return 0; 181 return 0;
244 } 182 }
245 spin_unlock(&robj->tobj.lock); 183 tmp = bo->tbo.mem.placement;
246 r = radeon_object_reserve(robj, false);
247 if (unlikely(r != 0)) {
248 DRM_ERROR("radeon: failed to reserve object for pinning it.\n");
249 return r;
250 }
251 tmp = robj->tobj.mem.placement;
252 ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM); 184 ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM);
253 robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING; 185 bo->tbo.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT |
254 r = ttm_buffer_object_validate(&robj->tobj, 186 TTM_PL_MASK_CACHING;
255 robj->tobj.proposed_placement, 187retry:
256 false, false); 188 r = ttm_buffer_object_validate(&bo->tbo, bo->tbo.proposed_placement,
257 radeon_object_gpu_addr(robj); 189 true, false);
258 if (gpu_addr != NULL) { 190 if (likely(r == 0)) {
259 *gpu_addr = robj->gpu_addr; 191 bo->pin_count = 1;
192 if (gpu_addr != NULL)
193 *gpu_addr = radeon_bo_gpu_offset(bo);
260 } 194 }
261 robj->pin_count = 1;
262 if (unlikely(r != 0)) { 195 if (unlikely(r != 0)) {
263 DRM_ERROR("radeon: failed to pin object.\n"); 196 if (r == -ERESTART)
197 goto retry;
198 dev_err(bo->rdev->dev, "%p pin failed\n", bo);
264 } 199 }
265 radeon_object_unreserve(robj);
266 return r; 200 return r;
267} 201}
268 202
269void radeon_object_unpin(struct radeon_object *robj) 203int radeon_bo_unpin(struct radeon_bo *bo)
270{ 204{
271 uint32_t flags;
272 int r; 205 int r;
273 206
274 spin_lock(&robj->tobj.lock); 207 if (!bo->pin_count) {
275 if (!robj->pin_count) { 208 dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
276 spin_unlock(&robj->tobj.lock); 209 return 0;
277 printk(KERN_WARNING "Unpin not necessary for %p !\n", robj);
278 return;
279 }
280 robj->pin_count--;
281 if (robj->pin_count) {
282 spin_unlock(&robj->tobj.lock);
283 return;
284 }
285 spin_unlock(&robj->tobj.lock);
286 r = radeon_object_reserve(robj, false);
287 if (unlikely(r != 0)) {
288 DRM_ERROR("radeon: failed to reserve object for unpinning it.\n");
289 return;
290 }
291 flags = robj->tobj.mem.placement;
292 robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT;
293 r = ttm_buffer_object_validate(&robj->tobj,
294 robj->tobj.proposed_placement,
295 false, false);
296 if (unlikely(r != 0)) {
297 DRM_ERROR("radeon: failed to unpin buffer.\n");
298 }
299 radeon_object_unreserve(robj);
300}
301
302int radeon_object_wait(struct radeon_object *robj)
303{
304 int r = 0;
305
306 /* FIXME: should use block reservation instead */
307 r = radeon_object_reserve(robj, true);
308 if (unlikely(r != 0)) {
309 DRM_ERROR("radeon: failed to reserve object for waiting.\n");
310 return r;
311 }
312 spin_lock(&robj->tobj.lock);
313 if (robj->tobj.sync_obj) {
314 r = ttm_bo_wait(&robj->tobj, true, true, false);
315 } 210 }
316 spin_unlock(&robj->tobj.lock); 211 bo->pin_count--;
317 radeon_object_unreserve(robj); 212 if (bo->pin_count)
318 radeon_hdp_flush(robj->rdev); 213 return 0;
319 return r; 214 bo->tbo.proposed_placement = bo->tbo.mem.placement &
320} 215 ~TTM_PL_FLAG_NO_EVICT;
321 216retry:
322int radeon_object_busy_domain(struct radeon_object *robj, uint32_t *cur_placement) 217 r = ttm_buffer_object_validate(&bo->tbo, bo->tbo.proposed_placement,
323{ 218 true, false);
324 int r = 0;
325
326 r = radeon_object_reserve(robj, true);
327 if (unlikely(r != 0)) { 219 if (unlikely(r != 0)) {
328 DRM_ERROR("radeon: failed to reserve object for waiting.\n"); 220 if (r == -ERESTART)
221 goto retry;
222 dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
329 return r; 223 return r;
330 } 224 }
331 spin_lock(&robj->tobj.lock); 225 return 0;
332 *cur_placement = robj->tobj.mem.mem_type;
333 if (robj->tobj.sync_obj) {
334 r = ttm_bo_wait(&robj->tobj, true, true, true);
335 }
336 spin_unlock(&robj->tobj.lock);
337 radeon_object_unreserve(robj);
338 return r;
339} 226}
340 227
341int radeon_object_evict_vram(struct radeon_device *rdev) 228int radeon_bo_evict_vram(struct radeon_device *rdev)
342{ 229{
343 if (rdev->flags & RADEON_IS_IGP) { 230 if (rdev->flags & RADEON_IS_IGP) {
344 /* Useless to evict on IGP chips */ 231 /* Useless to evict on IGP chips */
@@ -347,30 +234,32 @@ int radeon_object_evict_vram(struct radeon_device *rdev)
347 return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM); 234 return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
348} 235}
349 236
350void radeon_object_force_delete(struct radeon_device *rdev) 237void radeon_bo_force_delete(struct radeon_device *rdev)
351{ 238{
352 struct radeon_object *robj, *n; 239 struct radeon_bo *bo, *n;
353 struct drm_gem_object *gobj; 240 struct drm_gem_object *gobj;
354 241
355 if (list_empty(&rdev->gem.objects)) { 242 if (list_empty(&rdev->gem.objects)) {
356 return; 243 return;
357 } 244 }
358 DRM_ERROR("Userspace still has active objects !\n"); 245 dev_err(rdev->dev, "Userspace still has active objects !\n");
359 list_for_each_entry_safe(robj, n, &rdev->gem.objects, list) { 246 list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
360 mutex_lock(&rdev->ddev->struct_mutex); 247 mutex_lock(&rdev->ddev->struct_mutex);
361 gobj = robj->gobj; 248 gobj = bo->gobj;
362 DRM_ERROR("Force free for (%p,%p,%lu,%lu)\n", 249 dev_err(rdev->dev, "%p %p %lu %lu force free\n",
363 gobj, robj, (unsigned long)gobj->size, 250 gobj, bo, (unsigned long)gobj->size,
364 *((unsigned long *)&gobj->refcount)); 251 *((unsigned long *)&gobj->refcount));
365 list_del_init(&robj->list); 252 mutex_lock(&bo->rdev->gem.mutex);
366 radeon_object_unref(&robj); 253 list_del_init(&bo->list);
254 mutex_unlock(&bo->rdev->gem.mutex);
255 radeon_bo_unref(&bo);
367 gobj->driver_private = NULL; 256 gobj->driver_private = NULL;
368 drm_gem_object_unreference(gobj); 257 drm_gem_object_unreference(gobj);
369 mutex_unlock(&rdev->ddev->struct_mutex); 258 mutex_unlock(&rdev->ddev->struct_mutex);
370 } 259 }
371} 260}
372 261
373int radeon_object_init(struct radeon_device *rdev) 262int radeon_bo_init(struct radeon_device *rdev)
374{ 263{
375 /* Add an MTRR for the VRAM */ 264 /* Add an MTRR for the VRAM */
376 rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size, 265 rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
@@ -383,13 +272,13 @@ int radeon_object_init(struct radeon_device *rdev)
383 return radeon_ttm_init(rdev); 272 return radeon_ttm_init(rdev);
384} 273}
385 274
386void radeon_object_fini(struct radeon_device *rdev) 275void radeon_bo_fini(struct radeon_device *rdev)
387{ 276{
388 radeon_ttm_fini(rdev); 277 radeon_ttm_fini(rdev);
389} 278}
390 279
391void radeon_object_list_add_object(struct radeon_object_list *lobj, 280void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
392 struct list_head *head) 281 struct list_head *head)
393{ 282{
394 if (lobj->wdomain) { 283 if (lobj->wdomain) {
395 list_add(&lobj->list, head); 284 list_add(&lobj->list, head);
@@ -398,72 +287,67 @@ void radeon_object_list_add_object(struct radeon_object_list *lobj,
398 } 287 }
399} 288}
400 289
401int radeon_object_list_reserve(struct list_head *head) 290int radeon_bo_list_reserve(struct list_head *head)
402{ 291{
403 struct radeon_object_list *lobj; 292 struct radeon_bo_list *lobj;
404 int r; 293 int r;
405 294
406 list_for_each_entry(lobj, head, list){ 295 list_for_each_entry(lobj, head, list){
407 if (!lobj->robj->pin_count) { 296 r = radeon_bo_reserve(lobj->bo, false);
408 r = radeon_object_reserve(lobj->robj, true); 297 if (unlikely(r != 0))
409 if (unlikely(r != 0)) { 298 return r;
410 DRM_ERROR("radeon: failed to reserve object.\n");
411 return r;
412 }
413 } else {
414 }
415 } 299 }
416 return 0; 300 return 0;
417} 301}
418 302
419void radeon_object_list_unreserve(struct list_head *head) 303void radeon_bo_list_unreserve(struct list_head *head)
420{ 304{
421 struct radeon_object_list *lobj; 305 struct radeon_bo_list *lobj;
422 306
423 list_for_each_entry(lobj, head, list) { 307 list_for_each_entry(lobj, head, list) {
424 if (!lobj->robj->pin_count) { 308 /* only unreserve object we successfully reserved */
425 radeon_object_unreserve(lobj->robj); 309 if (radeon_bo_is_reserved(lobj->bo))
426 } 310 radeon_bo_unreserve(lobj->bo);
427 } 311 }
428} 312}
429 313
430int radeon_object_list_validate(struct list_head *head, void *fence) 314int radeon_bo_list_validate(struct list_head *head, void *fence)
431{ 315{
432 struct radeon_object_list *lobj; 316 struct radeon_bo_list *lobj;
433 struct radeon_object *robj; 317 struct radeon_bo *bo;
434 struct radeon_fence *old_fence = NULL; 318 struct radeon_fence *old_fence = NULL;
435 int r; 319 int r;
436 320
437 r = radeon_object_list_reserve(head); 321 r = radeon_bo_list_reserve(head);
438 if (unlikely(r != 0)) { 322 if (unlikely(r != 0)) {
439 radeon_object_list_unreserve(head);
440 return r; 323 return r;
441 } 324 }
442 list_for_each_entry(lobj, head, list) { 325 list_for_each_entry(lobj, head, list) {
443 robj = lobj->robj; 326 bo = lobj->bo;
444 if (!robj->pin_count) { 327 if (!bo->pin_count) {
445 if (lobj->wdomain) { 328 if (lobj->wdomain) {
446 robj->tobj.proposed_placement = 329 bo->tbo.proposed_placement =
447 radeon_object_flags_from_domain(lobj->wdomain); 330 radeon_ttm_flags_from_domain(lobj->wdomain);
448 } else { 331 } else {
449 robj->tobj.proposed_placement = 332 bo->tbo.proposed_placement =
450 radeon_object_flags_from_domain(lobj->rdomain); 333 radeon_ttm_flags_from_domain(lobj->rdomain);
451 } 334 }
452 r = ttm_buffer_object_validate(&robj->tobj, 335retry:
453 robj->tobj.proposed_placement, 336 r = ttm_buffer_object_validate(&bo->tbo,
454 true, false); 337 bo->tbo.proposed_placement,
338 true, false);
455 if (unlikely(r)) { 339 if (unlikely(r)) {
456 DRM_ERROR("radeon: failed to validate.\n"); 340 if (r == -ERESTART)
341 goto retry;
457 return r; 342 return r;
458 } 343 }
459 radeon_object_gpu_addr(robj);
460 } 344 }
461 lobj->gpu_offset = robj->gpu_addr; 345 lobj->gpu_offset = radeon_bo_gpu_offset(bo);
462 lobj->tiling_flags = robj->tiling_flags; 346 lobj->tiling_flags = bo->tiling_flags;
463 if (fence) { 347 if (fence) {
464 old_fence = (struct radeon_fence *)robj->tobj.sync_obj; 348 old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
465 robj->tobj.sync_obj = radeon_fence_ref(fence); 349 bo->tbo.sync_obj = radeon_fence_ref(fence);
466 robj->tobj.sync_obj_arg = NULL; 350 bo->tbo.sync_obj_arg = NULL;
467 } 351 }
468 if (old_fence) { 352 if (old_fence) {
469 radeon_fence_unref(&old_fence); 353 radeon_fence_unref(&old_fence);
@@ -472,51 +356,44 @@ int radeon_object_list_validate(struct list_head *head, void *fence)
472 return 0; 356 return 0;
473} 357}
474 358
475void radeon_object_list_unvalidate(struct list_head *head) 359void radeon_bo_list_unvalidate(struct list_head *head, void *fence)
476{ 360{
477 struct radeon_object_list *lobj; 361 struct radeon_bo_list *lobj;
478 struct radeon_fence *old_fence = NULL; 362 struct radeon_fence *old_fence;
479 363
480 list_for_each_entry(lobj, head, list) { 364 if (fence)
481 old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj; 365 list_for_each_entry(lobj, head, list) {
482 lobj->robj->tobj.sync_obj = NULL; 366 old_fence = to_radeon_fence(lobj->bo->tbo.sync_obj);
483 if (old_fence) { 367 if (old_fence == fence) {
484 radeon_fence_unref(&old_fence); 368 lobj->bo->tbo.sync_obj = NULL;
369 radeon_fence_unref(&old_fence);
370 }
485 } 371 }
486 } 372 radeon_bo_list_unreserve(head);
487 radeon_object_list_unreserve(head);
488}
489
490void radeon_object_list_clean(struct list_head *head)
491{
492 radeon_object_list_unreserve(head);
493} 373}
494 374
495int radeon_object_fbdev_mmap(struct radeon_object *robj, 375int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
496 struct vm_area_struct *vma) 376 struct vm_area_struct *vma)
497{ 377{
498 return ttm_fbdev_mmap(vma, &robj->tobj); 378 return ttm_fbdev_mmap(vma, &bo->tbo);
499} 379}
500 380
501unsigned long radeon_object_size(struct radeon_object *robj) 381static int radeon_bo_get_surface_reg(struct radeon_bo *bo)
502{ 382{
503 return robj->tobj.num_pages << PAGE_SHIFT; 383 struct radeon_device *rdev = bo->rdev;
504}
505
506int radeon_object_get_surface_reg(struct radeon_object *robj)
507{
508 struct radeon_device *rdev = robj->rdev;
509 struct radeon_surface_reg *reg; 384 struct radeon_surface_reg *reg;
510 struct radeon_object *old_object; 385 struct radeon_bo *old_object;
511 int steal; 386 int steal;
512 int i; 387 int i;
513 388
514 if (!robj->tiling_flags) 389 BUG_ON(!atomic_read(&bo->tbo.reserved));
390
391 if (!bo->tiling_flags)
515 return 0; 392 return 0;
516 393
517 if (robj->surface_reg >= 0) { 394 if (bo->surface_reg >= 0) {
518 reg = &rdev->surface_regs[robj->surface_reg]; 395 reg = &rdev->surface_regs[bo->surface_reg];
519 i = robj->surface_reg; 396 i = bo->surface_reg;
520 goto out; 397 goto out;
521 } 398 }
522 399
@@ -524,10 +401,10 @@ int radeon_object_get_surface_reg(struct radeon_object *robj)
524 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { 401 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
525 402
526 reg = &rdev->surface_regs[i]; 403 reg = &rdev->surface_regs[i];
527 if (!reg->robj) 404 if (!reg->bo)
528 break; 405 break;
529 406
530 old_object = reg->robj; 407 old_object = reg->bo;
531 if (old_object->pin_count == 0) 408 if (old_object->pin_count == 0)
532 steal = i; 409 steal = i;
533 } 410 }
@@ -538,91 +415,101 @@ int radeon_object_get_surface_reg(struct radeon_object *robj)
538 return -ENOMEM; 415 return -ENOMEM;
539 /* find someone with a surface reg and nuke their BO */ 416 /* find someone with a surface reg and nuke their BO */
540 reg = &rdev->surface_regs[steal]; 417 reg = &rdev->surface_regs[steal];
541 old_object = reg->robj; 418 old_object = reg->bo;
542 /* blow away the mapping */ 419 /* blow away the mapping */
543 DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object); 420 DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
544 ttm_bo_unmap_virtual(&old_object->tobj); 421 ttm_bo_unmap_virtual(&old_object->tbo);
545 old_object->surface_reg = -1; 422 old_object->surface_reg = -1;
546 i = steal; 423 i = steal;
547 } 424 }
548 425
549 robj->surface_reg = i; 426 bo->surface_reg = i;
550 reg->robj = robj; 427 reg->bo = bo;
551 428
552out: 429out:
553 radeon_set_surface_reg(rdev, i, robj->tiling_flags, robj->pitch, 430 radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
554 robj->tobj.mem.mm_node->start << PAGE_SHIFT, 431 bo->tbo.mem.mm_node->start << PAGE_SHIFT,
555 robj->tobj.num_pages << PAGE_SHIFT); 432 bo->tbo.num_pages << PAGE_SHIFT);
556 return 0; 433 return 0;
557} 434}
558 435
559void radeon_object_clear_surface_reg(struct radeon_object *robj) 436static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
560{ 437{
561 struct radeon_device *rdev = robj->rdev; 438 struct radeon_device *rdev = bo->rdev;
562 struct radeon_surface_reg *reg; 439 struct radeon_surface_reg *reg;
563 440
564 if (robj->surface_reg == -1) 441 if (bo->surface_reg == -1)
565 return; 442 return;
566 443
567 reg = &rdev->surface_regs[robj->surface_reg]; 444 reg = &rdev->surface_regs[bo->surface_reg];
568 radeon_clear_surface_reg(rdev, robj->surface_reg); 445 radeon_clear_surface_reg(rdev, bo->surface_reg);
569 446
570 reg->robj = NULL; 447 reg->bo = NULL;
571 robj->surface_reg = -1; 448 bo->surface_reg = -1;
572} 449}
573 450
574void radeon_object_set_tiling_flags(struct radeon_object *robj, 451int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
575 uint32_t tiling_flags, uint32_t pitch) 452 uint32_t tiling_flags, uint32_t pitch)
576{ 453{
577 robj->tiling_flags = tiling_flags; 454 int r;
578 robj->pitch = pitch; 455
456 r = radeon_bo_reserve(bo, false);
457 if (unlikely(r != 0))
458 return r;
459 bo->tiling_flags = tiling_flags;
460 bo->pitch = pitch;
461 radeon_bo_unreserve(bo);
462 return 0;
579} 463}
580 464
581void radeon_object_get_tiling_flags(struct radeon_object *robj, 465void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
582 uint32_t *tiling_flags, 466 uint32_t *tiling_flags,
583 uint32_t *pitch) 467 uint32_t *pitch)
584{ 468{
469 BUG_ON(!atomic_read(&bo->tbo.reserved));
585 if (tiling_flags) 470 if (tiling_flags)
586 *tiling_flags = robj->tiling_flags; 471 *tiling_flags = bo->tiling_flags;
587 if (pitch) 472 if (pitch)
588 *pitch = robj->pitch; 473 *pitch = bo->pitch;
589} 474}
590 475
591int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved, 476int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
592 bool force_drop) 477 bool force_drop)
593{ 478{
594 if (!(robj->tiling_flags & RADEON_TILING_SURFACE)) 479 BUG_ON(!atomic_read(&bo->tbo.reserved));
480
481 if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
595 return 0; 482 return 0;
596 483
597 if (force_drop) { 484 if (force_drop) {
598 radeon_object_clear_surface_reg(robj); 485 radeon_bo_clear_surface_reg(bo);
599 return 0; 486 return 0;
600 } 487 }
601 488
602 if (robj->tobj.mem.mem_type != TTM_PL_VRAM) { 489 if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
603 if (!has_moved) 490 if (!has_moved)
604 return 0; 491 return 0;
605 492
606 if (robj->surface_reg >= 0) 493 if (bo->surface_reg >= 0)
607 radeon_object_clear_surface_reg(robj); 494 radeon_bo_clear_surface_reg(bo);
608 return 0; 495 return 0;
609 } 496 }
610 497
611 if ((robj->surface_reg >= 0) && !has_moved) 498 if ((bo->surface_reg >= 0) && !has_moved)
612 return 0; 499 return 0;
613 500
614 return radeon_object_get_surface_reg(robj); 501 return radeon_bo_get_surface_reg(bo);
615} 502}
616 503
617void radeon_bo_move_notify(struct ttm_buffer_object *bo, 504void radeon_bo_move_notify(struct ttm_buffer_object *bo,
618 struct ttm_mem_reg *mem) 505 struct ttm_mem_reg *mem)
619{ 506{
620 struct radeon_object *robj = container_of(bo, struct radeon_object, tobj); 507 struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
621 radeon_object_check_tiling(robj, 0, 1); 508 radeon_bo_check_tiling(rbo, 0, 1);
622} 509}
623 510
624void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) 511void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
625{ 512{
626 struct radeon_object *robj = container_of(bo, struct radeon_object, tobj); 513 struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
627 radeon_object_check_tiling(robj, 0, 0); 514 radeon_bo_check_tiling(rbo, 0, 0);
628} 515}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 10e8af6bb456..e9da13077e2f 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -28,19 +28,152 @@
28#ifndef __RADEON_OBJECT_H__ 28#ifndef __RADEON_OBJECT_H__
29#define __RADEON_OBJECT_H__ 29#define __RADEON_OBJECT_H__
30 30
31#include <ttm/ttm_bo_api.h> 31#include <drm/radeon_drm.h>
32#include <ttm/ttm_bo_driver.h> 32#include "radeon.h"
33#include <ttm/ttm_placement.h>
34#include <ttm/ttm_module.h>
35 33
36/* 34/**
37 * TTM. 35 * radeon_mem_type_to_domain - return domain corresponding to mem_type
36 * @mem_type: ttm memory type
37 *
38 * Returns corresponding domain of the ttm mem_type
39 */
40static inline unsigned radeon_mem_type_to_domain(u32 mem_type)
41{
42 switch (mem_type) {
43 case TTM_PL_VRAM:
44 return RADEON_GEM_DOMAIN_VRAM;
45 case TTM_PL_TT:
46 return RADEON_GEM_DOMAIN_GTT;
47 case TTM_PL_SYSTEM:
48 return RADEON_GEM_DOMAIN_CPU;
49 default:
50 break;
51 }
52 return 0;
53}
54
55/**
56 * radeon_bo_reserve - reserve bo
57 * @bo: bo structure
58 * @no_wait: don't sleep while trying to reserve (return -EBUSY)
59 *
60 * Returns:
61 * -EBUSY: buffer is busy and @no_wait is true
62 * -ERESTART: A wait for the buffer to become unreserved was interrupted by
63 * a signal. Release all buffer reservations and return to user-space.
64 */
65static inline int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait)
66{
67 int r;
68
69retry:
70 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
71 if (unlikely(r != 0)) {
72 if (r == -ERESTART)
73 goto retry;
74 dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
75 return r;
76 }
77 return 0;
78}
79
80static inline void radeon_bo_unreserve(struct radeon_bo *bo)
81{
82 ttm_bo_unreserve(&bo->tbo);
83}
84
85/**
86 * radeon_bo_gpu_offset - return GPU offset of bo
87 * @bo: radeon object for which we query the offset
88 *
89 * Returns current GPU offset of the object.
90 *
91 * Note: object should either be pinned or reserved when calling this
92 * function, it might be usefull to add check for this for debugging.
93 */
94static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo)
95{
96 return bo->tbo.offset;
97}
98
99static inline unsigned long radeon_bo_size(struct radeon_bo *bo)
100{
101 return bo->tbo.num_pages << PAGE_SHIFT;
102}
103
104static inline bool radeon_bo_is_reserved(struct radeon_bo *bo)
105{
106 return !!atomic_read(&bo->tbo.reserved);
107}
108
109/**
110 * radeon_bo_mmap_offset - return mmap offset of bo
111 * @bo: radeon object for which we query the offset
112 *
113 * Returns mmap offset of the object.
114 *
115 * Note: addr_space_offset is constant after ttm bo init thus isn't protected
116 * by any lock.
38 */ 117 */
39struct radeon_mman { 118static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo)
40 struct ttm_bo_global_ref bo_global_ref; 119{
41 struct ttm_global_reference mem_global_ref; 120 return bo->tbo.addr_space_offset;
42 bool mem_global_referenced; 121}
43 struct ttm_bo_device bdev; 122
44}; 123static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
124 bool no_wait)
125{
126 int r;
127
128retry:
129 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
130 if (unlikely(r != 0)) {
131 if (r == -ERESTART)
132 goto retry;
133 dev_err(bo->rdev->dev, "%p reserve failed for wait\n", bo);
134 return r;
135 }
136 spin_lock(&bo->tbo.lock);
137 if (mem_type)
138 *mem_type = bo->tbo.mem.mem_type;
139 if (bo->tbo.sync_obj)
140 r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
141 spin_unlock(&bo->tbo.lock);
142 ttm_bo_unreserve(&bo->tbo);
143 if (unlikely(r == -ERESTART))
144 goto retry;
145 return r;
146}
147
148extern int radeon_bo_create(struct radeon_device *rdev,
149 struct drm_gem_object *gobj, unsigned long size,
150 bool kernel, u32 domain,
151 struct radeon_bo **bo_ptr);
152extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
153extern void radeon_bo_kunmap(struct radeon_bo *bo);
154extern void radeon_bo_unref(struct radeon_bo **bo);
155extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr);
156extern int radeon_bo_unpin(struct radeon_bo *bo);
157extern int radeon_bo_evict_vram(struct radeon_device *rdev);
158extern void radeon_bo_force_delete(struct radeon_device *rdev);
159extern int radeon_bo_init(struct radeon_device *rdev);
160extern void radeon_bo_fini(struct radeon_device *rdev);
161extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
162 struct list_head *head);
163extern int radeon_bo_list_reserve(struct list_head *head);
164extern void radeon_bo_list_unreserve(struct list_head *head);
165extern int radeon_bo_list_validate(struct list_head *head, void *fence);
166extern void radeon_bo_list_unvalidate(struct list_head *head, void *fence);
167extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
168 struct vm_area_struct *vma);
169extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
170 u32 tiling_flags, u32 pitch);
171extern void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
172 u32 *tiling_flags, u32 *pitch);
173extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
174 bool force_drop);
175extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
176 struct ttm_mem_reg *mem);
177extern void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
45 178
46#endif 179#endif
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 747b4bffb84b..4d12b2d17b4d 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -165,19 +165,24 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
165 return 0; 165 return 0;
166 /* Allocate 1M object buffer */ 166 /* Allocate 1M object buffer */
167 INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs); 167 INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs);
168 r = radeon_object_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, 168 r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
169 true, RADEON_GEM_DOMAIN_GTT, 169 true, RADEON_GEM_DOMAIN_GTT,
170 false, &rdev->ib_pool.robj); 170 &rdev->ib_pool.robj);
171 if (r) { 171 if (r) {
172 DRM_ERROR("radeon: failed to ib pool (%d).\n", r); 172 DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
173 return r; 173 return r;
174 } 174 }
175 r = radeon_object_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr); 175 r = radeon_bo_reserve(rdev->ib_pool.robj, false);
176 if (unlikely(r != 0))
177 return r;
178 r = radeon_bo_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
176 if (r) { 179 if (r) {
180 radeon_bo_unreserve(rdev->ib_pool.robj);
177 DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r); 181 DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r);
178 return r; 182 return r;
179 } 183 }
180 r = radeon_object_kmap(rdev->ib_pool.robj, &ptr); 184 r = radeon_bo_kmap(rdev->ib_pool.robj, &ptr);
185 radeon_bo_unreserve(rdev->ib_pool.robj);
181 if (r) { 186 if (r) {
182 DRM_ERROR("radeon: failed to map ib poll (%d).\n", r); 187 DRM_ERROR("radeon: failed to map ib poll (%d).\n", r);
183 return r; 188 return r;
@@ -203,14 +208,21 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
203 208
204void radeon_ib_pool_fini(struct radeon_device *rdev) 209void radeon_ib_pool_fini(struct radeon_device *rdev)
205{ 210{
211 int r;
212
206 if (!rdev->ib_pool.ready) { 213 if (!rdev->ib_pool.ready) {
207 return; 214 return;
208 } 215 }
209 mutex_lock(&rdev->ib_pool.mutex); 216 mutex_lock(&rdev->ib_pool.mutex);
210 bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); 217 bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
211 if (rdev->ib_pool.robj) { 218 if (rdev->ib_pool.robj) {
212 radeon_object_kunmap(rdev->ib_pool.robj); 219 r = radeon_bo_reserve(rdev->ib_pool.robj, false);
213 radeon_object_unref(&rdev->ib_pool.robj); 220 if (likely(r == 0)) {
221 radeon_bo_kunmap(rdev->ib_pool.robj);
222 radeon_bo_unpin(rdev->ib_pool.robj);
223 radeon_bo_unreserve(rdev->ib_pool.robj);
224 }
225 radeon_bo_unref(&rdev->ib_pool.robj);
214 rdev->ib_pool.robj = NULL; 226 rdev->ib_pool.robj = NULL;
215 } 227 }
216 mutex_unlock(&rdev->ib_pool.mutex); 228 mutex_unlock(&rdev->ib_pool.mutex);
@@ -288,29 +300,28 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
288 rdev->cp.ring_size = ring_size; 300 rdev->cp.ring_size = ring_size;
289 /* Allocate ring buffer */ 301 /* Allocate ring buffer */
290 if (rdev->cp.ring_obj == NULL) { 302 if (rdev->cp.ring_obj == NULL) {
291 r = radeon_object_create(rdev, NULL, rdev->cp.ring_size, 303 r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, true,
292 true, 304 RADEON_GEM_DOMAIN_GTT,
293 RADEON_GEM_DOMAIN_GTT, 305 &rdev->cp.ring_obj);
294 false,
295 &rdev->cp.ring_obj);
296 if (r) { 306 if (r) {
297 DRM_ERROR("radeon: failed to create ring buffer (%d).\n", r); 307 dev_err(rdev->dev, "(%d) ring create failed\n", r);
298 mutex_unlock(&rdev->cp.mutex);
299 return r; 308 return r;
300 } 309 }
301 r = radeon_object_pin(rdev->cp.ring_obj, 310 r = radeon_bo_reserve(rdev->cp.ring_obj, false);
302 RADEON_GEM_DOMAIN_GTT, 311 if (unlikely(r != 0))
303 &rdev->cp.gpu_addr); 312 return r;
313 r = radeon_bo_pin(rdev->cp.ring_obj, RADEON_GEM_DOMAIN_GTT,
314 &rdev->cp.gpu_addr);
304 if (r) { 315 if (r) {
305 DRM_ERROR("radeon: failed to pin ring buffer (%d).\n", r); 316 radeon_bo_unreserve(rdev->cp.ring_obj);
306 mutex_unlock(&rdev->cp.mutex); 317 dev_err(rdev->dev, "(%d) ring pin failed\n", r);
307 return r; 318 return r;
308 } 319 }
309 r = radeon_object_kmap(rdev->cp.ring_obj, 320 r = radeon_bo_kmap(rdev->cp.ring_obj,
310 (void **)&rdev->cp.ring); 321 (void **)&rdev->cp.ring);
322 radeon_bo_unreserve(rdev->cp.ring_obj);
311 if (r) { 323 if (r) {
312 DRM_ERROR("radeon: failed to map ring buffer (%d).\n", r); 324 dev_err(rdev->dev, "(%d) ring map failed\n", r);
313 mutex_unlock(&rdev->cp.mutex);
314 return r; 325 return r;
315 } 326 }
316 } 327 }
@@ -321,11 +332,17 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
321 332
322void radeon_ring_fini(struct radeon_device *rdev) 333void radeon_ring_fini(struct radeon_device *rdev)
323{ 334{
335 int r;
336
324 mutex_lock(&rdev->cp.mutex); 337 mutex_lock(&rdev->cp.mutex);
325 if (rdev->cp.ring_obj) { 338 if (rdev->cp.ring_obj) {
326 radeon_object_kunmap(rdev->cp.ring_obj); 339 r = radeon_bo_reserve(rdev->cp.ring_obj, false);
327 radeon_object_unpin(rdev->cp.ring_obj); 340 if (likely(r == 0)) {
328 radeon_object_unref(&rdev->cp.ring_obj); 341 radeon_bo_kunmap(rdev->cp.ring_obj);
342 radeon_bo_unpin(rdev->cp.ring_obj);
343 radeon_bo_unreserve(rdev->cp.ring_obj);
344 }
345 radeon_bo_unref(&rdev->cp.ring_obj);
329 rdev->cp.ring = NULL; 346 rdev->cp.ring = NULL;
330 rdev->cp.ring_obj = NULL; 347 rdev->cp.ring_obj = NULL;
331 } 348 }
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index f8a465d9a1cf..391c973ec4db 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -30,8 +30,8 @@
30/* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */ 30/* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */
31void radeon_test_moves(struct radeon_device *rdev) 31void radeon_test_moves(struct radeon_device *rdev)
32{ 32{
33 struct radeon_object *vram_obj = NULL; 33 struct radeon_bo *vram_obj = NULL;
34 struct radeon_object **gtt_obj = NULL; 34 struct radeon_bo **gtt_obj = NULL;
35 struct radeon_fence *fence = NULL; 35 struct radeon_fence *fence = NULL;
36 uint64_t gtt_addr, vram_addr; 36 uint64_t gtt_addr, vram_addr;
37 unsigned i, n, size; 37 unsigned i, n, size;
@@ -52,38 +52,42 @@ void radeon_test_moves(struct radeon_device *rdev)
52 goto out_cleanup; 52 goto out_cleanup;
53 } 53 }
54 54
55 r = radeon_object_create(rdev, NULL, size, true, RADEON_GEM_DOMAIN_VRAM, 55 r = radeon_bo_create(rdev, NULL, size, true, RADEON_GEM_DOMAIN_VRAM,
56 false, &vram_obj); 56 &vram_obj);
57 if (r) { 57 if (r) {
58 DRM_ERROR("Failed to create VRAM object\n"); 58 DRM_ERROR("Failed to create VRAM object\n");
59 goto out_cleanup; 59 goto out_cleanup;
60 } 60 }
61 61 r = radeon_bo_reserve(vram_obj, false);
62 r = radeon_object_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr); 62 if (unlikely(r != 0))
63 goto out_cleanup;
64 r = radeon_bo_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr);
63 if (r) { 65 if (r) {
64 DRM_ERROR("Failed to pin VRAM object\n"); 66 DRM_ERROR("Failed to pin VRAM object\n");
65 goto out_cleanup; 67 goto out_cleanup;
66 } 68 }
67
68 for (i = 0; i < n; i++) { 69 for (i = 0; i < n; i++) {
69 void *gtt_map, *vram_map; 70 void *gtt_map, *vram_map;
70 void **gtt_start, **gtt_end; 71 void **gtt_start, **gtt_end;
71 void **vram_start, **vram_end; 72 void **vram_start, **vram_end;
72 73
73 r = radeon_object_create(rdev, NULL, size, true, 74 r = radeon_bo_create(rdev, NULL, size, true,
74 RADEON_GEM_DOMAIN_GTT, false, gtt_obj + i); 75 RADEON_GEM_DOMAIN_GTT, gtt_obj + i);
75 if (r) { 76 if (r) {
76 DRM_ERROR("Failed to create GTT object %d\n", i); 77 DRM_ERROR("Failed to create GTT object %d\n", i);
77 goto out_cleanup; 78 goto out_cleanup;
78 } 79 }
79 80
80 r = radeon_object_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, &gtt_addr); 81 r = radeon_bo_reserve(gtt_obj[i], false);
82 if (unlikely(r != 0))
83 goto out_cleanup;
84 r = radeon_bo_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, &gtt_addr);
81 if (r) { 85 if (r) {
82 DRM_ERROR("Failed to pin GTT object %d\n", i); 86 DRM_ERROR("Failed to pin GTT object %d\n", i);
83 goto out_cleanup; 87 goto out_cleanup;
84 } 88 }
85 89
86 r = radeon_object_kmap(gtt_obj[i], &gtt_map); 90 r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
87 if (r) { 91 if (r) {
88 DRM_ERROR("Failed to map GTT object %d\n", i); 92 DRM_ERROR("Failed to map GTT object %d\n", i);
89 goto out_cleanup; 93 goto out_cleanup;
@@ -94,7 +98,7 @@ void radeon_test_moves(struct radeon_device *rdev)
94 gtt_start++) 98 gtt_start++)
95 *gtt_start = gtt_start; 99 *gtt_start = gtt_start;
96 100
97 radeon_object_kunmap(gtt_obj[i]); 101 radeon_bo_kunmap(gtt_obj[i]);
98 102
99 r = radeon_fence_create(rdev, &fence); 103 r = radeon_fence_create(rdev, &fence);
100 if (r) { 104 if (r) {
@@ -116,7 +120,7 @@ void radeon_test_moves(struct radeon_device *rdev)
116 120
117 radeon_fence_unref(&fence); 121 radeon_fence_unref(&fence);
118 122
119 r = radeon_object_kmap(vram_obj, &vram_map); 123 r = radeon_bo_kmap(vram_obj, &vram_map);
120 if (r) { 124 if (r) {
121 DRM_ERROR("Failed to map VRAM object after copy %d\n", i); 125 DRM_ERROR("Failed to map VRAM object after copy %d\n", i);
122 goto out_cleanup; 126 goto out_cleanup;
@@ -131,13 +135,13 @@ void radeon_test_moves(struct radeon_device *rdev)
131 "expected 0x%p (GTT map 0x%p-0x%p)\n", 135 "expected 0x%p (GTT map 0x%p-0x%p)\n",
132 i, *vram_start, gtt_start, gtt_map, 136 i, *vram_start, gtt_start, gtt_map,
133 gtt_end); 137 gtt_end);
134 radeon_object_kunmap(vram_obj); 138 radeon_bo_kunmap(vram_obj);
135 goto out_cleanup; 139 goto out_cleanup;
136 } 140 }
137 *vram_start = vram_start; 141 *vram_start = vram_start;
138 } 142 }
139 143
140 radeon_object_kunmap(vram_obj); 144 radeon_bo_kunmap(vram_obj);
141 145
142 r = radeon_fence_create(rdev, &fence); 146 r = radeon_fence_create(rdev, &fence);
143 if (r) { 147 if (r) {
@@ -159,7 +163,7 @@ void radeon_test_moves(struct radeon_device *rdev)
159 163
160 radeon_fence_unref(&fence); 164 radeon_fence_unref(&fence);
161 165
162 r = radeon_object_kmap(gtt_obj[i], &gtt_map); 166 r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
163 if (r) { 167 if (r) {
164 DRM_ERROR("Failed to map GTT object after copy %d\n", i); 168 DRM_ERROR("Failed to map GTT object after copy %d\n", i);
165 goto out_cleanup; 169 goto out_cleanup;
@@ -174,12 +178,12 @@ void radeon_test_moves(struct radeon_device *rdev)
174 "expected 0x%p (VRAM map 0x%p-0x%p)\n", 178 "expected 0x%p (VRAM map 0x%p-0x%p)\n",
175 i, *gtt_start, vram_start, vram_map, 179 i, *gtt_start, vram_start, vram_map,
176 vram_end); 180 vram_end);
177 radeon_object_kunmap(gtt_obj[i]); 181 radeon_bo_kunmap(gtt_obj[i]);
178 goto out_cleanup; 182 goto out_cleanup;
179 } 183 }
180 } 184 }
181 185
182 radeon_object_kunmap(gtt_obj[i]); 186 radeon_bo_kunmap(gtt_obj[i]);
183 187
184 DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n", 188 DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
185 gtt_addr - rdev->mc.gtt_location); 189 gtt_addr - rdev->mc.gtt_location);
@@ -187,14 +191,20 @@ void radeon_test_moves(struct radeon_device *rdev)
187 191
188out_cleanup: 192out_cleanup:
189 if (vram_obj) { 193 if (vram_obj) {
190 radeon_object_unpin(vram_obj); 194 if (radeon_bo_is_reserved(vram_obj)) {
191 radeon_object_unref(&vram_obj); 195 radeon_bo_unpin(vram_obj);
196 radeon_bo_unreserve(vram_obj);
197 }
198 radeon_bo_unref(&vram_obj);
192 } 199 }
193 if (gtt_obj) { 200 if (gtt_obj) {
194 for (i = 0; i < n; i++) { 201 for (i = 0; i < n; i++) {
195 if (gtt_obj[i]) { 202 if (gtt_obj[i]) {
196 radeon_object_unpin(gtt_obj[i]); 203 if (radeon_bo_is_reserved(gtt_obj[i])) {
197 radeon_object_unref(&gtt_obj[i]); 204 radeon_bo_unpin(gtt_obj[i]);
205 radeon_bo_unreserve(gtt_obj[i]);
206 }
207 radeon_bo_unref(&gtt_obj[i]);
198 } 208 }
199 } 209 }
200 kfree(gtt_obj); 210 kfree(gtt_obj);
@@ -206,4 +216,3 @@ out_cleanup:
206 printk(KERN_WARNING "Error while testing BO move.\n"); 216 printk(KERN_WARNING "Error while testing BO move.\n");
207 } 217 }
208} 218}
209
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 1381e06d6af3..bdb46c8cadd1 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -150,7 +150,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
150 man->default_caching = TTM_PL_FLAG_CACHED; 150 man->default_caching = TTM_PL_FLAG_CACHED;
151 break; 151 break;
152 case TTM_PL_TT: 152 case TTM_PL_TT:
153 man->gpu_offset = 0; 153 man->gpu_offset = rdev->mc.gtt_location;
154 man->available_caching = TTM_PL_MASK_CACHING; 154 man->available_caching = TTM_PL_MASK_CACHING;
155 man->default_caching = TTM_PL_FLAG_CACHED; 155 man->default_caching = TTM_PL_FLAG_CACHED;
156 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; 156 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
@@ -180,7 +180,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
180 break; 180 break;
181 case TTM_PL_VRAM: 181 case TTM_PL_VRAM:
182 /* "On-card" video ram */ 182 /* "On-card" video ram */
183 man->gpu_offset = 0; 183 man->gpu_offset = rdev->mc.vram_location;
184 man->flags = TTM_MEMTYPE_FLAG_FIXED | 184 man->flags = TTM_MEMTYPE_FLAG_FIXED |
185 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | 185 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
186 TTM_MEMTYPE_FLAG_MAPPABLE; 186 TTM_MEMTYPE_FLAG_MAPPABLE;
@@ -482,27 +482,31 @@ int radeon_ttm_init(struct radeon_device *rdev)
482 DRM_ERROR("failed initializing buffer object driver(%d).\n", r); 482 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
483 return r; 483 return r;
484 } 484 }
485 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, 0, 485 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM,
486 ((rdev->mc.real_vram_size) >> PAGE_SHIFT)); 486 0, rdev->mc.real_vram_size >> PAGE_SHIFT);
487 if (r) { 487 if (r) {
488 DRM_ERROR("Failed initializing VRAM heap.\n"); 488 DRM_ERROR("Failed initializing VRAM heap.\n");
489 return r; 489 return r;
490 } 490 }
491 r = radeon_object_create(rdev, NULL, 256 * 1024, true, 491 r = radeon_bo_create(rdev, NULL, 256 * 1024, true,
492 RADEON_GEM_DOMAIN_VRAM, false, 492 RADEON_GEM_DOMAIN_VRAM,
493 &rdev->stollen_vga_memory); 493 &rdev->stollen_vga_memory);
494 if (r) { 494 if (r) {
495 return r; 495 return r;
496 } 496 }
497 r = radeon_object_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL); 497 r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
498 if (r)
499 return r;
500 r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
501 radeon_bo_unreserve(rdev->stollen_vga_memory);
498 if (r) { 502 if (r) {
499 radeon_object_unref(&rdev->stollen_vga_memory); 503 radeon_bo_unref(&rdev->stollen_vga_memory);
500 return r; 504 return r;
501 } 505 }
502 DRM_INFO("radeon: %uM of VRAM memory ready\n", 506 DRM_INFO("radeon: %uM of VRAM memory ready\n",
503 (unsigned)rdev->mc.real_vram_size / (1024 * 1024)); 507 (unsigned)rdev->mc.real_vram_size / (1024 * 1024));
504 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, 0, 508 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
505 ((rdev->mc.gtt_size) >> PAGE_SHIFT)); 509 0, rdev->mc.gtt_size >> PAGE_SHIFT);
506 if (r) { 510 if (r) {
507 DRM_ERROR("Failed initializing GTT heap.\n"); 511 DRM_ERROR("Failed initializing GTT heap.\n");
508 return r; 512 return r;
@@ -523,9 +527,15 @@ int radeon_ttm_init(struct radeon_device *rdev)
523 527
524void radeon_ttm_fini(struct radeon_device *rdev) 528void radeon_ttm_fini(struct radeon_device *rdev)
525{ 529{
530 int r;
531
526 if (rdev->stollen_vga_memory) { 532 if (rdev->stollen_vga_memory) {
527 radeon_object_unpin(rdev->stollen_vga_memory); 533 r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
528 radeon_object_unref(&rdev->stollen_vga_memory); 534 if (r == 0) {
535 radeon_bo_unpin(rdev->stollen_vga_memory);
536 radeon_bo_unreserve(rdev->stollen_vga_memory);
537 }
538 radeon_bo_unref(&rdev->stollen_vga_memory);
529 } 539 }
530 ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM); 540 ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM);
531 ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT); 541 ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 50907f84461b..8d12b8a1ff13 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -452,7 +452,7 @@ void rs400_fini(struct radeon_device *rdev)
452 rs400_gart_fini(rdev); 452 rs400_gart_fini(rdev);
453 radeon_irq_kms_fini(rdev); 453 radeon_irq_kms_fini(rdev);
454 radeon_fence_driver_fini(rdev); 454 radeon_fence_driver_fini(rdev);
455 radeon_object_fini(rdev); 455 radeon_bo_fini(rdev);
456 radeon_atombios_fini(rdev); 456 radeon_atombios_fini(rdev);
457 kfree(rdev->bios); 457 kfree(rdev->bios);
458 rdev->bios = NULL; 458 rdev->bios = NULL;
@@ -509,7 +509,7 @@ int rs400_init(struct radeon_device *rdev)
509 if (r) 509 if (r)
510 return r; 510 return r;
511 /* Memory manager */ 511 /* Memory manager */
512 r = radeon_object_init(rdev); 512 r = radeon_bo_init(rdev);
513 if (r) 513 if (r)
514 return r; 514 return r;
515 r = rs400_gart_init(rdev); 515 r = rs400_gart_init(rdev);
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 9b6303dd7d3a..c97eb63a21d2 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -146,15 +146,20 @@ int rs600_gart_enable(struct radeon_device *rdev)
146 146
147void rs600_gart_disable(struct radeon_device *rdev) 147void rs600_gart_disable(struct radeon_device *rdev)
148{ 148{
149 uint32_t tmp; 149 u32 tmp;
150 int r;
150 151
151 /* FIXME: disable out of gart access */ 152 /* FIXME: disable out of gart access */
152 WREG32_MC(R_000100_MC_PT0_CNTL, 0); 153 WREG32_MC(R_000100_MC_PT0_CNTL, 0);
153 tmp = RREG32_MC(R_000009_MC_CNTL1); 154 tmp = RREG32_MC(R_000009_MC_CNTL1);
154 WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES); 155 WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES);
155 if (rdev->gart.table.vram.robj) { 156 if (rdev->gart.table.vram.robj) {
156 radeon_object_kunmap(rdev->gart.table.vram.robj); 157 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
157 radeon_object_unpin(rdev->gart.table.vram.robj); 158 if (r == 0) {
159 radeon_bo_kunmap(rdev->gart.table.vram.robj);
160 radeon_bo_unpin(rdev->gart.table.vram.robj);
161 radeon_bo_unreserve(rdev->gart.table.vram.robj);
162 }
158 } 163 }
159} 164}
160 165
@@ -444,7 +449,7 @@ void rs600_fini(struct radeon_device *rdev)
444 rs600_gart_fini(rdev); 449 rs600_gart_fini(rdev);
445 radeon_irq_kms_fini(rdev); 450 radeon_irq_kms_fini(rdev);
446 radeon_fence_driver_fini(rdev); 451 radeon_fence_driver_fini(rdev);
447 radeon_object_fini(rdev); 452 radeon_bo_fini(rdev);
448 radeon_atombios_fini(rdev); 453 radeon_atombios_fini(rdev);
449 kfree(rdev->bios); 454 kfree(rdev->bios);
450 rdev->bios = NULL; 455 rdev->bios = NULL;
@@ -503,7 +508,7 @@ int rs600_init(struct radeon_device *rdev)
503 if (r) 508 if (r)
504 return r; 509 return r;
505 /* Memory manager */ 510 /* Memory manager */
506 r = radeon_object_init(rdev); 511 r = radeon_bo_init(rdev);
507 if (r) 512 if (r)
508 return r; 513 return r;
509 r = rs600_gart_init(rdev); 514 r = rs600_gart_init(rdev);
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 4607025125c0..e7a5f87c23fe 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -661,7 +661,7 @@ void rs690_fini(struct radeon_device *rdev)
661 rs400_gart_fini(rdev); 661 rs400_gart_fini(rdev);
662 radeon_irq_kms_fini(rdev); 662 radeon_irq_kms_fini(rdev);
663 radeon_fence_driver_fini(rdev); 663 radeon_fence_driver_fini(rdev);
664 radeon_object_fini(rdev); 664 radeon_bo_fini(rdev);
665 radeon_atombios_fini(rdev); 665 radeon_atombios_fini(rdev);
666 kfree(rdev->bios); 666 kfree(rdev->bios);
667 rdev->bios = NULL; 667 rdev->bios = NULL;
@@ -721,7 +721,7 @@ int rs690_init(struct radeon_device *rdev)
721 if (r) 721 if (r)
722 return r; 722 return r;
723 /* Memory manager */ 723 /* Memory manager */
724 r = radeon_object_init(rdev); 724 r = radeon_bo_init(rdev);
725 if (r) 725 if (r)
726 return r; 726 return r;
727 r = rs400_gart_init(rdev); 727 r = rs400_gart_init(rdev);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 0ecf5d939aa0..7793239e24b2 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -539,11 +539,11 @@ void rv515_fini(struct radeon_device *rdev)
539 r100_wb_fini(rdev); 539 r100_wb_fini(rdev);
540 r100_ib_fini(rdev); 540 r100_ib_fini(rdev);
541 radeon_gem_fini(rdev); 541 radeon_gem_fini(rdev);
542 rv370_pcie_gart_fini(rdev); 542 rv370_pcie_gart_fini(rdev);
543 radeon_agp_fini(rdev); 543 radeon_agp_fini(rdev);
544 radeon_irq_kms_fini(rdev); 544 radeon_irq_kms_fini(rdev);
545 radeon_fence_driver_fini(rdev); 545 radeon_fence_driver_fini(rdev);
546 radeon_object_fini(rdev); 546 radeon_bo_fini(rdev);
547 radeon_atombios_fini(rdev); 547 radeon_atombios_fini(rdev);
548 kfree(rdev->bios); 548 kfree(rdev->bios);
549 rdev->bios = NULL; 549 rdev->bios = NULL;
@@ -600,7 +600,7 @@ int rv515_init(struct radeon_device *rdev)
600 if (r) 600 if (r)
601 return r; 601 return r;
602 /* Memory manager */ 602 /* Memory manager */
603 r = radeon_object_init(rdev); 603 r = radeon_bo_init(rdev);
604 if (r) 604 if (r)
605 return r; 605 return r;
606 r = rv370_pcie_gart_init(rdev); 606 r = rv370_pcie_gart_init(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index a96be8b3a530..dd4f02096a80 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -92,7 +92,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
92void rv770_pcie_gart_disable(struct radeon_device *rdev) 92void rv770_pcie_gart_disable(struct radeon_device *rdev)
93{ 93{
94 u32 tmp; 94 u32 tmp;
95 int i; 95 int i, r;
96 96
97 /* Disable all tables */ 97 /* Disable all tables */
98 for (i = 0; i < 7; i++) 98 for (i = 0; i < 7; i++)
@@ -113,8 +113,12 @@ void rv770_pcie_gart_disable(struct radeon_device *rdev)
113 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); 113 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
114 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); 114 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
115 if (rdev->gart.table.vram.robj) { 115 if (rdev->gart.table.vram.robj) {
116 radeon_object_kunmap(rdev->gart.table.vram.robj); 116 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
117 radeon_object_unpin(rdev->gart.table.vram.robj); 117 if (likely(r == 0)) {
118 radeon_bo_kunmap(rdev->gart.table.vram.robj);
119 radeon_bo_unpin(rdev->gart.table.vram.robj);
120 radeon_bo_unreserve(rdev->gart.table.vram.robj);
121 }
118 } 122 }
119} 123}
120 124
@@ -880,8 +884,12 @@ static int rv770_startup(struct radeon_device *rdev)
880 } 884 }
881 rv770_gpu_init(rdev); 885 rv770_gpu_init(rdev);
882 886
883 r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, 887 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
884 &rdev->r600_blit.shader_gpu_addr); 888 if (unlikely(r != 0))
889 return r;
890 r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
891 &rdev->r600_blit.shader_gpu_addr);
892 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
885 if (r) { 893 if (r) {
886 DRM_ERROR("failed to pin blit object %d\n", r); 894 DRM_ERROR("failed to pin blit object %d\n", r);
887 return r; 895 return r;
@@ -943,13 +951,19 @@ int rv770_resume(struct radeon_device *rdev)
943 951
944int rv770_suspend(struct radeon_device *rdev) 952int rv770_suspend(struct radeon_device *rdev)
945{ 953{
954 int r;
955
946 /* FIXME: we should wait for ring to be empty */ 956 /* FIXME: we should wait for ring to be empty */
947 r700_cp_stop(rdev); 957 r700_cp_stop(rdev);
948 rdev->cp.ready = false; 958 rdev->cp.ready = false;
949 r600_wb_disable(rdev); 959 r600_wb_disable(rdev);
950 rv770_pcie_gart_disable(rdev); 960 rv770_pcie_gart_disable(rdev);
951 /* unpin shaders bo */ 961 /* unpin shaders bo */
952 radeon_object_unpin(rdev->r600_blit.shader_obj); 962 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
963 if (likely(r == 0)) {
964 radeon_bo_unpin(rdev->r600_blit.shader_obj);
965 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
966 }
953 return 0; 967 return 0;
954} 968}
955 969
@@ -1011,7 +1025,7 @@ int rv770_init(struct radeon_device *rdev)
1011 if (r) 1025 if (r)
1012 return r; 1026 return r;
1013 /* Memory manager */ 1027 /* Memory manager */
1014 r = radeon_object_init(rdev); 1028 r = radeon_bo_init(rdev);
1015 if (r) 1029 if (r)
1016 return r; 1030 return r;
1017 1031
@@ -1082,7 +1096,7 @@ void rv770_fini(struct radeon_device *rdev)
1082 radeon_clocks_fini(rdev); 1096 radeon_clocks_fini(rdev);
1083 if (rdev->flags & RADEON_IS_AGP) 1097 if (rdev->flags & RADEON_IS_AGP)
1084 radeon_agp_fini(rdev); 1098 radeon_agp_fini(rdev);
1085 radeon_object_fini(rdev); 1099 radeon_bo_fini(rdev);
1086 radeon_atombios_fini(rdev); 1100 radeon_atombios_fini(rdev);
1087 kfree(rdev->bios); 1101 kfree(rdev->bios);
1088 rdev->bios = NULL; 1102 rdev->bios = NULL;