aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/r300.c
diff options
context:
space:
mode:
authorDavid Vrabel <david.vrabel@csr.com>2010-02-25 08:35:22 -0500
committerDavid Vrabel <david.vrabel@csr.com>2010-02-25 08:35:22 -0500
commit03806fa20f6a081493a731a4b18ea66317f9f947 (patch)
tree630796c65c501e3612253ee4d4af58082a5f984c /drivers/gpu/drm/radeon/r300.c
parent35fb2a816a06ded2a3ff83d896c34b83c8e1d556 (diff)
parentbaac35c4155a8aa826c70acee6553368ca5243a2 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6 into for-upstream
Diffstat (limited to 'drivers/gpu/drm/radeon/r300.c')
-rw-r--r--drivers/gpu/drm/radeon/r300.c33
1 files changed, 25 insertions, 8 deletions
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 3f2cc9e2e8d9..43b55a030b4d 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -36,7 +36,15 @@
36#include "rv350d.h" 36#include "rv350d.h"
37#include "r300_reg_safe.h" 37#include "r300_reg_safe.h"
38 38
39/* This files gather functions specifics to: r300,r350,rv350,rv370,rv380 */ 39/* This files gather functions specifics to: r300,r350,rv350,rv370,rv380
40 *
41 * GPU Errata:
42 * - HOST_PATH_CNTL: r300 family seems to dislike write to HOST_PATH_CNTL
43 * using MMIO to flush host path read cache, this lead to HARDLOCKUP.
44 * However, scheduling such write to the ring seems harmless, i suspect
45 * the CP read collide with the flush somehow, or maybe the MC, hard to
46 * tell. (Jerome Glisse)
47 */
40 48
41/* 49/*
42 * rv370,rv380 PCIE GART 50 * rv370,rv380 PCIE GART
@@ -178,6 +186,11 @@ void r300_fence_ring_emit(struct radeon_device *rdev,
178 /* Wait until IDLE & CLEAN */ 186 /* Wait until IDLE & CLEAN */
179 radeon_ring_write(rdev, PACKET0(0x1720, 0)); 187 radeon_ring_write(rdev, PACKET0(0x1720, 0));
180 radeon_ring_write(rdev, (1 << 17) | (1 << 16) | (1 << 9)); 188 radeon_ring_write(rdev, (1 << 17) | (1 << 16) | (1 << 9));
189 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
190 radeon_ring_write(rdev, rdev->config.r300.hdp_cntl |
191 RADEON_HDP_READ_BUFFER_INVALIDATE);
192 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
193 radeon_ring_write(rdev, rdev->config.r300.hdp_cntl);
181 /* Emit fence sequence & fire IRQ */ 194 /* Emit fence sequence & fire IRQ */
182 radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0)); 195 radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
183 radeon_ring_write(rdev, fence->seq); 196 radeon_ring_write(rdev, fence->seq);
@@ -493,11 +506,14 @@ void r300_vram_info(struct radeon_device *rdev)
493 506
494 /* DDR for all card after R300 & IGP */ 507 /* DDR for all card after R300 & IGP */
495 rdev->mc.vram_is_ddr = true; 508 rdev->mc.vram_is_ddr = true;
509
496 tmp = RREG32(RADEON_MEM_CNTL); 510 tmp = RREG32(RADEON_MEM_CNTL);
497 if (tmp & R300_MEM_NUM_CHANNELS_MASK) { 511 tmp &= R300_MEM_NUM_CHANNELS_MASK;
498 rdev->mc.vram_width = 128; 512 switch (tmp) {
499 } else { 513 case 0: rdev->mc.vram_width = 64; break;
500 rdev->mc.vram_width = 64; 514 case 1: rdev->mc.vram_width = 128; break;
515 case 2: rdev->mc.vram_width = 256; break;
516 default: rdev->mc.vram_width = 128; break;
501 } 517 }
502 518
503 r100_vram_init_sizes(rdev); 519 r100_vram_init_sizes(rdev);
@@ -1258,6 +1274,7 @@ static int r300_startup(struct radeon_device *rdev)
1258 } 1274 }
1259 /* Enable IRQ */ 1275 /* Enable IRQ */
1260 r100_irq_set(rdev); 1276 r100_irq_set(rdev);
1277 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
1261 /* 1M ring buffer */ 1278 /* 1M ring buffer */
1262 r = r100_cp_init(rdev, 1024 * 1024); 1279 r = r100_cp_init(rdev, 1024 * 1024);
1263 if (r) { 1280 if (r) {
@@ -1313,7 +1330,6 @@ int r300_suspend(struct radeon_device *rdev)
1313 1330
1314void r300_fini(struct radeon_device *rdev) 1331void r300_fini(struct radeon_device *rdev)
1315{ 1332{
1316 r300_suspend(rdev);
1317 r100_cp_fini(rdev); 1333 r100_cp_fini(rdev);
1318 r100_wb_fini(rdev); 1334 r100_wb_fini(rdev);
1319 r100_ib_fini(rdev); 1335 r100_ib_fini(rdev);
@@ -1322,6 +1338,7 @@ void r300_fini(struct radeon_device *rdev)
1322 rv370_pcie_gart_fini(rdev); 1338 rv370_pcie_gart_fini(rdev);
1323 if (rdev->flags & RADEON_IS_PCI) 1339 if (rdev->flags & RADEON_IS_PCI)
1324 r100_pci_gart_fini(rdev); 1340 r100_pci_gart_fini(rdev);
1341 radeon_agp_fini(rdev);
1325 radeon_irq_kms_fini(rdev); 1342 radeon_irq_kms_fini(rdev);
1326 radeon_fence_driver_fini(rdev); 1343 radeon_fence_driver_fini(rdev);
1327 radeon_bo_fini(rdev); 1344 radeon_bo_fini(rdev);
@@ -1403,15 +1420,15 @@ int r300_init(struct radeon_device *rdev)
1403 if (r) { 1420 if (r) {
1404 /* Somethings want wront with the accel init stop accel */ 1421 /* Somethings want wront with the accel init stop accel */
1405 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 1422 dev_err(rdev->dev, "Disabling GPU acceleration\n");
1406 r300_suspend(rdev);
1407 r100_cp_fini(rdev); 1423 r100_cp_fini(rdev);
1408 r100_wb_fini(rdev); 1424 r100_wb_fini(rdev);
1409 r100_ib_fini(rdev); 1425 r100_ib_fini(rdev);
1426 radeon_irq_kms_fini(rdev);
1410 if (rdev->flags & RADEON_IS_PCIE) 1427 if (rdev->flags & RADEON_IS_PCIE)
1411 rv370_pcie_gart_fini(rdev); 1428 rv370_pcie_gart_fini(rdev);
1412 if (rdev->flags & RADEON_IS_PCI) 1429 if (rdev->flags & RADEON_IS_PCI)
1413 r100_pci_gart_fini(rdev); 1430 r100_pci_gart_fini(rdev);
1414 radeon_irq_kms_fini(rdev); 1431 radeon_agp_fini(rdev);
1415 rdev->accel_working = false; 1432 rdev->accel_working = false;
1416 } 1433 }
1417 return 0; 1434 return 0;