aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/r600.c
diff options
context:
space:
mode:
authorJerome Glisse <jglisse@redhat.com>2009-10-01 12:02:12 -0400
committerDave Airlie <airlied@redhat.com>2009-10-01 19:33:52 -0400
commit81cc35bfc19ebe4b823396fe4fef67a923360916 (patch)
treeda9cbddd170b1c93067f9c8e6411cbe02860f3e2 /drivers/gpu/drm/radeon/r600.c
parent62a8ea3f7bb61e5f92db0a648b7cc566852c36ec (diff)
drm/radeon/kms: Fix R600 write back buffer
This split write back buffer handling into 3 functions, wb_fini for cleanup, wb_enable/wb_disable for enabling/disabling write back used for suspend/resume. This should fix potential issue of letting the write back active before suspending. We need to allocate memory in wb_enable because we can only allocate once GART is running. Signed-off-by: Jerome Glisse <jglisse@redhat.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/radeon/r600.c')
-rw-r--r--drivers/gpu/drm/radeon/r600.c67
1 files changed, 33 insertions, 34 deletions
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 17fff7b6e591..11fa801a2c52 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1350,32 +1350,47 @@ int r600_ring_test(struct radeon_device *rdev)
1350 return r; 1350 return r;
1351} 1351}
1352 1352
1353/* 1353void r600_wb_disable(struct radeon_device *rdev)
1354 * Writeback 1354{
1355 */ 1355 WREG32(SCRATCH_UMSK, 0);
1356int r600_wb_init(struct radeon_device *rdev) 1356 if (rdev->wb.wb_obj) {
1357 radeon_object_kunmap(rdev->wb.wb_obj);
1358 radeon_object_unpin(rdev->wb.wb_obj);
1359 }
1360}
1361
1362void r600_wb_fini(struct radeon_device *rdev)
1363{
1364 r600_wb_disable(rdev);
1365 if (rdev->wb.wb_obj) {
1366 radeon_object_unref(&rdev->wb.wb_obj);
1367 rdev->wb.wb = NULL;
1368 rdev->wb.wb_obj = NULL;
1369 }
1370}
1371
1372int r600_wb_enable(struct radeon_device *rdev)
1357{ 1373{
1358 int r; 1374 int r;
1359 1375
1360 if (rdev->wb.wb_obj == NULL) { 1376 if (rdev->wb.wb_obj == NULL) {
1361 r = radeon_object_create(rdev, NULL, 4096, 1377 r = radeon_object_create(rdev, NULL, 4096, true,
1362 true, 1378 RADEON_GEM_DOMAIN_GTT, false, &rdev->wb.wb_obj);
1363 RADEON_GEM_DOMAIN_GTT,
1364 false, &rdev->wb.wb_obj);
1365 if (r) { 1379 if (r) {
1366 DRM_ERROR("radeon: failed to create WB buffer (%d).\n", r); 1380 dev_warn(rdev->dev, "failed to create WB buffer (%d).\n", r);
1367 return r; 1381 return r;
1368 } 1382 }
1369 r = radeon_object_pin(rdev->wb.wb_obj, 1383 r = radeon_object_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
1370 RADEON_GEM_DOMAIN_GTT, 1384 &rdev->wb.gpu_addr);
1371 &rdev->wb.gpu_addr);
1372 if (r) { 1385 if (r) {
1373 DRM_ERROR("radeon: failed to pin WB buffer (%d).\n", r); 1386 dev_warn(rdev->dev, "failed to pin WB buffer (%d).\n", r);
1387 r600_wb_fini(rdev);
1374 return r; 1388 return r;
1375 } 1389 }
1376 r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); 1390 r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
1377 if (r) { 1391 if (r) {
1378 DRM_ERROR("radeon: failed to map WB buffer (%d).\n", r); 1392 dev_warn(rdev->dev, "failed to map WB buffer (%d).\n", r);
1393 r600_wb_fini(rdev);
1379 return r; 1394 return r;
1380 } 1395 }
1381 } 1396 }
@@ -1386,21 +1401,6 @@ int r600_wb_init(struct radeon_device *rdev)
1386 return 0; 1401 return 0;
1387} 1402}
1388 1403
1389void r600_wb_fini(struct radeon_device *rdev)
1390{
1391 if (rdev->wb.wb_obj) {
1392 radeon_object_kunmap(rdev->wb.wb_obj);
1393 radeon_object_unpin(rdev->wb.wb_obj);
1394 radeon_object_unref(&rdev->wb.wb_obj);
1395 rdev->wb.wb = NULL;
1396 rdev->wb.wb_obj = NULL;
1397 }
1398}
1399
1400
1401/*
1402 * CS
1403 */
1404void r600_fence_ring_emit(struct radeon_device *rdev, 1404void r600_fence_ring_emit(struct radeon_device *rdev,
1405 struct radeon_fence *fence) 1405 struct radeon_fence *fence)
1406{ 1406{
@@ -1500,9 +1500,8 @@ int r600_startup(struct radeon_device *rdev)
1500 r = r600_cp_resume(rdev); 1500 r = r600_cp_resume(rdev);
1501 if (r) 1501 if (r)
1502 return r; 1502 return r;
1503 r = r600_wb_init(rdev); 1503 /* write back buffer are not vital so don't worry about failure */
1504 if (r) 1504 r600_wb_enable(rdev);
1505 return r;
1506 return 0; 1505 return 0;
1507} 1506}
1508 1507
@@ -1539,13 +1538,12 @@ int r600_resume(struct radeon_device *rdev)
1539 return r; 1538 return r;
1540} 1539}
1541 1540
1542
1543int r600_suspend(struct radeon_device *rdev) 1541int r600_suspend(struct radeon_device *rdev)
1544{ 1542{
1545 /* FIXME: we should wait for ring to be empty */ 1543 /* FIXME: we should wait for ring to be empty */
1546 r600_cp_stop(rdev); 1544 r600_cp_stop(rdev);
1547 rdev->cp.ready = false; 1545 rdev->cp.ready = false;
1548 1546 r600_wb_disable(rdev);
1549 r600_pcie_gart_disable(rdev); 1547 r600_pcie_gart_disable(rdev);
1550 /* unpin shaders bo */ 1548 /* unpin shaders bo */
1551 radeon_object_unpin(rdev->r600_blit.shader_obj); 1549 radeon_object_unpin(rdev->r600_blit.shader_obj);
@@ -1668,6 +1666,7 @@ void r600_fini(struct radeon_device *rdev)
1668 1666
1669 r600_blit_fini(rdev); 1667 r600_blit_fini(rdev);
1670 radeon_ring_fini(rdev); 1668 radeon_ring_fini(rdev);
1669 r600_wb_fini(rdev);
1671 r600_pcie_gart_fini(rdev); 1670 r600_pcie_gart_fini(rdev);
1672 radeon_gem_fini(rdev); 1671 radeon_gem_fini(rdev);
1673 radeon_fence_driver_fini(rdev); 1672 radeon_fence_driver_fini(rdev);