aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/r420.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/radeon/r420.c')
-rw-r--r--drivers/gpu/drm/radeon/r420.c48
1 files changed, 42 insertions, 6 deletions
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index c05a7270cf0c..d9373246c97f 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -30,7 +30,15 @@
30#include "radeon_reg.h" 30#include "radeon_reg.h"
31#include "radeon.h" 31#include "radeon.h"
32#include "atom.h" 32#include "atom.h"
33#include "r100d.h"
33#include "r420d.h" 34#include "r420d.h"
35#include "r420_reg_safe.h"
36
37static void r420_set_reg_safe(struct radeon_device *rdev)
38{
39 rdev->config.r300.reg_safe_bm = r420_reg_safe_bm;
40 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r420_reg_safe_bm);
41}
34 42
35int r420_mc_init(struct radeon_device *rdev) 43int r420_mc_init(struct radeon_device *rdev)
36{ 44{
@@ -42,9 +50,7 @@ int r420_mc_init(struct radeon_device *rdev)
42 if (rdev->flags & RADEON_IS_AGP) { 50 if (rdev->flags & RADEON_IS_AGP) {
43 r = radeon_agp_init(rdev); 51 r = radeon_agp_init(rdev);
44 if (r) { 52 if (r) {
45 printk(KERN_WARNING "[drm] Disabling AGP\n"); 53 radeon_agp_disable(rdev);
46 rdev->flags &= ~RADEON_IS_AGP;
47 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
48 } else { 54 } else {
49 rdev->mc.gtt_location = rdev->mc.agp_base; 55 rdev->mc.gtt_location = rdev->mc.agp_base;
50 } 56 }
@@ -165,6 +171,34 @@ static void r420_clock_resume(struct radeon_device *rdev)
165 WREG32_PLL(R_00000D_SCLK_CNTL, sclk_cntl); 171 WREG32_PLL(R_00000D_SCLK_CNTL, sclk_cntl);
166} 172}
167 173
174static void r420_cp_errata_init(struct radeon_device *rdev)
175{
176 /* RV410 and R420 can lock up if CP DMA to host memory happens
177 * while the 2D engine is busy.
178 *
179 * The proper workaround is to queue a RESYNC at the beginning
180 * of the CP init, apparently.
181 */
182 radeon_scratch_get(rdev, &rdev->config.r300.resync_scratch);
183 radeon_ring_lock(rdev, 8);
184 radeon_ring_write(rdev, PACKET0(R300_CP_RESYNC_ADDR, 1));
185 radeon_ring_write(rdev, rdev->config.r300.resync_scratch);
186 radeon_ring_write(rdev, 0xDEADBEEF);
187 radeon_ring_unlock_commit(rdev);
188}
189
190static void r420_cp_errata_fini(struct radeon_device *rdev)
191{
192 /* Catch the RESYNC we dispatched all the way back,
193 * at the very beginning of the CP init.
194 */
195 radeon_ring_lock(rdev, 8);
196 radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
197 radeon_ring_write(rdev, R300_RB3D_DC_FINISH);
198 radeon_ring_unlock_commit(rdev);
199 radeon_scratch_free(rdev, rdev->config.r300.resync_scratch);
200}
201
168static int r420_startup(struct radeon_device *rdev) 202static int r420_startup(struct radeon_device *rdev)
169{ 203{
170 int r; 204 int r;
@@ -190,12 +224,14 @@ static int r420_startup(struct radeon_device *rdev)
190 r420_pipes_init(rdev); 224 r420_pipes_init(rdev);
191 /* Enable IRQ */ 225 /* Enable IRQ */
192 r100_irq_set(rdev); 226 r100_irq_set(rdev);
227 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
193 /* 1M ring buffer */ 228 /* 1M ring buffer */
194 r = r100_cp_init(rdev, 1024 * 1024); 229 r = r100_cp_init(rdev, 1024 * 1024);
195 if (r) { 230 if (r) {
196 dev_err(rdev->dev, "failled initializing CP (%d).\n", r); 231 dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
197 return r; 232 return r;
198 } 233 }
234 r420_cp_errata_init(rdev);
199 r = r100_wb_init(rdev); 235 r = r100_wb_init(rdev);
200 if (r) { 236 if (r) {
201 dev_err(rdev->dev, "failled initializing WB (%d).\n", r); 237 dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
@@ -238,6 +274,7 @@ int r420_resume(struct radeon_device *rdev)
238 274
239int r420_suspend(struct radeon_device *rdev) 275int r420_suspend(struct radeon_device *rdev)
240{ 276{
277 r420_cp_errata_fini(rdev);
241 r100_cp_disable(rdev); 278 r100_cp_disable(rdev);
242 r100_wb_disable(rdev); 279 r100_wb_disable(rdev);
243 r100_irq_disable(rdev); 280 r100_irq_disable(rdev);
@@ -346,22 +383,21 @@ int r420_init(struct radeon_device *rdev)
346 if (r) 383 if (r)
347 return r; 384 return r;
348 } 385 }
349 r300_set_reg_safe(rdev); 386 r420_set_reg_safe(rdev);
350 rdev->accel_working = true; 387 rdev->accel_working = true;
351 r = r420_startup(rdev); 388 r = r420_startup(rdev);
352 if (r) { 389 if (r) {
353 /* Somethings want wront with the accel init stop accel */ 390 /* Somethings want wront with the accel init stop accel */
354 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 391 dev_err(rdev->dev, "Disabling GPU acceleration\n");
355 r420_suspend(rdev);
356 r100_cp_fini(rdev); 392 r100_cp_fini(rdev);
357 r100_wb_fini(rdev); 393 r100_wb_fini(rdev);
358 r100_ib_fini(rdev); 394 r100_ib_fini(rdev);
395 radeon_irq_kms_fini(rdev);
359 if (rdev->flags & RADEON_IS_PCIE) 396 if (rdev->flags & RADEON_IS_PCIE)
360 rv370_pcie_gart_fini(rdev); 397 rv370_pcie_gart_fini(rdev);
361 if (rdev->flags & RADEON_IS_PCI) 398 if (rdev->flags & RADEON_IS_PCI)
362 r100_pci_gart_fini(rdev); 399 r100_pci_gart_fini(rdev);
363 radeon_agp_fini(rdev); 400 radeon_agp_fini(rdev);
364 radeon_irq_kms_fini(rdev);
365 rdev->accel_working = false; 401 rdev->accel_working = false;
366 } 402 }
367 return 0; 403 return 0;