diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-10-29 04:02:15 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-10-29 04:02:20 -0400 |
commit | 9de09ace8d518141a4375e1d216ab64db4377799 (patch) | |
tree | da8e7a77f4ea91eb3bb73fc6da72ecf8c99e1c16 /drivers/gpu/drm/radeon/r600.c | |
parent | 1beee96bae0daf7f491356777c3080cc436950f5 (diff) | |
parent | 6d3f1e12f46a2f9a1bb7e7aa433df8dd31ce5647 (diff) |
Merge branch 'tracing/urgent' into tracing/core
Merge reason: Pick up fixes and move base from -rc1 to -rc5.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'drivers/gpu/drm/radeon/r600.c')
-rw-r--r-- | drivers/gpu/drm/radeon/r600.c | 314 |
1 files changed, 163 insertions, 151 deletions
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 2e4e60edbff4..609719490ec2 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -65,16 +65,11 @@ MODULE_FIRMWARE("radeon/RV710_me.bin"); | |||
65 | 65 | ||
66 | int r600_debugfs_mc_info_init(struct radeon_device *rdev); | 66 | int r600_debugfs_mc_info_init(struct radeon_device *rdev); |
67 | 67 | ||
68 | /* This files gather functions specifics to: | 68 | /* r600,rv610,rv630,rv620,rv635,rv670 */ |
69 | * r600,rv610,rv630,rv620,rv635,rv670 | ||
70 | * | ||
71 | * Some of these functions might be used by newer ASICs. | ||
72 | */ | ||
73 | int r600_mc_wait_for_idle(struct radeon_device *rdev); | 69 | int r600_mc_wait_for_idle(struct radeon_device *rdev); |
74 | void r600_gpu_init(struct radeon_device *rdev); | 70 | void r600_gpu_init(struct radeon_device *rdev); |
75 | void r600_fini(struct radeon_device *rdev); | 71 | void r600_fini(struct radeon_device *rdev); |
76 | 72 | ||
77 | |||
78 | /* | 73 | /* |
79 | * R600 PCIE GART | 74 | * R600 PCIE GART |
80 | */ | 75 | */ |
@@ -168,7 +163,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev) | |||
168 | WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); | 163 | WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); |
169 | WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); | 164 | WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); |
170 | WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); | 165 | WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); |
171 | WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end - 1) >> 12); | 166 | WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); |
172 | WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); | 167 | WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); |
173 | WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | | 168 | WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | |
174 | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); | 169 | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); |
@@ -225,6 +220,40 @@ void r600_pcie_gart_fini(struct radeon_device *rdev) | |||
225 | radeon_gart_fini(rdev); | 220 | radeon_gart_fini(rdev); |
226 | } | 221 | } |
227 | 222 | ||
223 | void r600_agp_enable(struct radeon_device *rdev) | ||
224 | { | ||
225 | u32 tmp; | ||
226 | int i; | ||
227 | |||
228 | /* Setup L2 cache */ | ||
229 | WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | | ||
230 | ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | | ||
231 | EFFECTIVE_L2_QUEUE_SIZE(7)); | ||
232 | WREG32(VM_L2_CNTL2, 0); | ||
233 | WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1)); | ||
234 | /* Setup TLB control */ | ||
235 | tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING | | ||
236 | SYSTEM_ACCESS_MODE_NOT_IN_SYS | | ||
237 | EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) | | ||
238 | ENABLE_WAIT_L2_QUERY; | ||
239 | WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp); | ||
240 | WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp); | ||
241 | WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING); | ||
242 | WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); | ||
243 | WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp); | ||
244 | WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp); | ||
245 | WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp); | ||
246 | WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp); | ||
247 | WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp); | ||
248 | WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp); | ||
249 | WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp); | ||
250 | WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp); | ||
251 | WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); | ||
252 | WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); | ||
253 | for (i = 0; i < 7; i++) | ||
254 | WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); | ||
255 | } | ||
256 | |||
228 | int r600_mc_wait_for_idle(struct radeon_device *rdev) | 257 | int r600_mc_wait_for_idle(struct radeon_device *rdev) |
229 | { | 258 | { |
230 | unsigned i; | 259 | unsigned i; |
@@ -240,14 +269,9 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev) | |||
240 | return -1; | 269 | return -1; |
241 | } | 270 | } |
242 | 271 | ||
243 | static void r600_mc_resume(struct radeon_device *rdev) | 272 | static void r600_mc_program(struct radeon_device *rdev) |
244 | { | 273 | { |
245 | u32 d1vga_control, d2vga_control; | 274 | struct rv515_mc_save save; |
246 | u32 vga_render_control, vga_hdp_control; | ||
247 | u32 d1crtc_control, d2crtc_control; | ||
248 | u32 new_d1grph_primary, new_d1grph_secondary; | ||
249 | u32 new_d2grph_primary, new_d2grph_secondary; | ||
250 | u64 old_vram_start; | ||
251 | u32 tmp; | 275 | u32 tmp; |
252 | int i, j; | 276 | int i, j; |
253 | 277 | ||
@@ -261,85 +285,51 @@ static void r600_mc_resume(struct radeon_device *rdev) | |||
261 | } | 285 | } |
262 | WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0); | 286 | WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0); |
263 | 287 | ||
264 | d1vga_control = RREG32(D1VGA_CONTROL); | 288 | rv515_mc_stop(rdev, &save); |
265 | d2vga_control = RREG32(D2VGA_CONTROL); | ||
266 | vga_render_control = RREG32(VGA_RENDER_CONTROL); | ||
267 | vga_hdp_control = RREG32(VGA_HDP_CONTROL); | ||
268 | d1crtc_control = RREG32(D1CRTC_CONTROL); | ||
269 | d2crtc_control = RREG32(D2CRTC_CONTROL); | ||
270 | old_vram_start = (u64)(RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24; | ||
271 | new_d1grph_primary = RREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS); | ||
272 | new_d1grph_secondary = RREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS); | ||
273 | new_d1grph_primary += rdev->mc.vram_start - old_vram_start; | ||
274 | new_d1grph_secondary += rdev->mc.vram_start - old_vram_start; | ||
275 | new_d2grph_primary = RREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS); | ||
276 | new_d2grph_secondary = RREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS); | ||
277 | new_d2grph_primary += rdev->mc.vram_start - old_vram_start; | ||
278 | new_d2grph_secondary += rdev->mc.vram_start - old_vram_start; | ||
279 | |||
280 | /* Stop all video */ | ||
281 | WREG32(D1VGA_CONTROL, 0); | ||
282 | WREG32(D2VGA_CONTROL, 0); | ||
283 | WREG32(VGA_RENDER_CONTROL, 0); | ||
284 | WREG32(D1CRTC_UPDATE_LOCK, 1); | ||
285 | WREG32(D2CRTC_UPDATE_LOCK, 1); | ||
286 | WREG32(D1CRTC_CONTROL, 0); | ||
287 | WREG32(D2CRTC_CONTROL, 0); | ||
288 | WREG32(D1CRTC_UPDATE_LOCK, 0); | ||
289 | WREG32(D2CRTC_UPDATE_LOCK, 0); | ||
290 | |||
291 | mdelay(1); | ||
292 | if (r600_mc_wait_for_idle(rdev)) { | 289 | if (r600_mc_wait_for_idle(rdev)) { |
293 | printk(KERN_WARNING "[drm] MC not idle !\n"); | 290 | dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); |
294 | } | 291 | } |
295 | 292 | /* Lockout access through VGA aperture (doesn't exist before R600) */ | |
296 | /* Lockout access through VGA aperture*/ | ||
297 | WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE); | 293 | WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE); |
298 | |||
299 | /* Update configuration */ | 294 | /* Update configuration */ |
300 | WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12); | 295 | if (rdev->flags & RADEON_IS_AGP) { |
301 | WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, (rdev->mc.vram_end - 1) >> 12); | 296 | if (rdev->mc.vram_start < rdev->mc.gtt_start) { |
297 | /* VRAM before AGP */ | ||
298 | WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, | ||
299 | rdev->mc.vram_start >> 12); | ||
300 | WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, | ||
301 | rdev->mc.gtt_end >> 12); | ||
302 | } else { | ||
303 | /* VRAM after AGP */ | ||
304 | WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, | ||
305 | rdev->mc.gtt_start >> 12); | ||
306 | WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, | ||
307 | rdev->mc.vram_end >> 12); | ||
308 | } | ||
309 | } else { | ||
310 | WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12); | ||
311 | WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12); | ||
312 | } | ||
302 | WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); | 313 | WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); |
303 | tmp = (((rdev->mc.vram_end - 1) >> 24) & 0xFFFF) << 16; | 314 | tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16; |
304 | tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); | 315 | tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); |
305 | WREG32(MC_VM_FB_LOCATION, tmp); | 316 | WREG32(MC_VM_FB_LOCATION, tmp); |
306 | WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); | 317 | WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); |
307 | WREG32(HDP_NONSURFACE_INFO, (2 << 7)); | 318 | WREG32(HDP_NONSURFACE_INFO, (2 << 7)); |
308 | WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF); | 319 | WREG32(HDP_NONSURFACE_SIZE, rdev->mc.mc_vram_size | 0x3FF); |
309 | if (rdev->flags & RADEON_IS_AGP) { | 320 | if (rdev->flags & RADEON_IS_AGP) { |
310 | WREG32(MC_VM_AGP_TOP, (rdev->mc.gtt_end - 1) >> 16); | 321 | WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22); |
311 | WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16); | 322 | WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22); |
312 | WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22); | 323 | WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22); |
313 | } else { | 324 | } else { |
314 | WREG32(MC_VM_AGP_BASE, 0); | 325 | WREG32(MC_VM_AGP_BASE, 0); |
315 | WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF); | 326 | WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF); |
316 | WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF); | 327 | WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF); |
317 | } | 328 | } |
318 | WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS, new_d1grph_primary); | ||
319 | WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS, new_d1grph_secondary); | ||
320 | WREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS, new_d2grph_primary); | ||
321 | WREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS, new_d2grph_secondary); | ||
322 | WREG32(VGA_MEMORY_BASE_ADDRESS, rdev->mc.vram_start); | ||
323 | |||
324 | /* Unlock host access */ | ||
325 | WREG32(VGA_HDP_CONTROL, vga_hdp_control); | ||
326 | |||
327 | mdelay(1); | ||
328 | if (r600_mc_wait_for_idle(rdev)) { | 329 | if (r600_mc_wait_for_idle(rdev)) { |
329 | printk(KERN_WARNING "[drm] MC not idle !\n"); | 330 | dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); |
330 | } | 331 | } |
331 | 332 | rv515_mc_resume(rdev, &save); | |
332 | /* Restore video state */ | ||
333 | WREG32(D1CRTC_UPDATE_LOCK, 1); | ||
334 | WREG32(D2CRTC_UPDATE_LOCK, 1); | ||
335 | WREG32(D1CRTC_CONTROL, d1crtc_control); | ||
336 | WREG32(D2CRTC_CONTROL, d2crtc_control); | ||
337 | WREG32(D1CRTC_UPDATE_LOCK, 0); | ||
338 | WREG32(D2CRTC_UPDATE_LOCK, 0); | ||
339 | WREG32(D1VGA_CONTROL, d1vga_control); | ||
340 | WREG32(D2VGA_CONTROL, d2vga_control); | ||
341 | WREG32(VGA_RENDER_CONTROL, vga_render_control); | ||
342 | |||
343 | /* we need to own VRAM, so turn off the VGA renderer here | 333 | /* we need to own VRAM, so turn off the VGA renderer here |
344 | * to stop it overwriting our objects */ | 334 | * to stop it overwriting our objects */ |
345 | rv515_vga_render_disable(rdev); | 335 | rv515_vga_render_disable(rdev); |
@@ -445,9 +435,9 @@ int r600_mc_init(struct radeon_device *rdev) | |||
445 | } | 435 | } |
446 | } | 436 | } |
447 | rdev->mc.vram_start = rdev->mc.vram_location; | 437 | rdev->mc.vram_start = rdev->mc.vram_location; |
448 | rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size; | 438 | rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; |
449 | rdev->mc.gtt_start = rdev->mc.gtt_location; | 439 | rdev->mc.gtt_start = rdev->mc.gtt_location; |
450 | rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size; | 440 | rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; |
451 | /* FIXME: we should enforce default clock in case GPU is not in | 441 | /* FIXME: we should enforce default clock in case GPU is not in |
452 | * default setup | 442 | * default setup |
453 | */ | 443 | */ |
@@ -463,6 +453,7 @@ int r600_mc_init(struct radeon_device *rdev) | |||
463 | */ | 453 | */ |
464 | int r600_gpu_soft_reset(struct radeon_device *rdev) | 454 | int r600_gpu_soft_reset(struct radeon_device *rdev) |
465 | { | 455 | { |
456 | struct rv515_mc_save save; | ||
466 | u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) | | 457 | u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) | |
467 | S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) | | 458 | S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) | |
468 | S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) | | 459 | S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) | |
@@ -480,13 +471,25 @@ int r600_gpu_soft_reset(struct radeon_device *rdev) | |||
480 | S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) | | 471 | S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) | |
481 | S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1); | 472 | S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1); |
482 | u32 srbm_reset = 0; | 473 | u32 srbm_reset = 0; |
474 | u32 tmp; | ||
483 | 475 | ||
476 | dev_info(rdev->dev, "GPU softreset \n"); | ||
477 | dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n", | ||
478 | RREG32(R_008010_GRBM_STATUS)); | ||
479 | dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n", | ||
480 | RREG32(R_008014_GRBM_STATUS2)); | ||
481 | dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n", | ||
482 | RREG32(R_000E50_SRBM_STATUS)); | ||
483 | rv515_mc_stop(rdev, &save); | ||
484 | if (r600_mc_wait_for_idle(rdev)) { | ||
485 | dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); | ||
486 | } | ||
484 | /* Disable CP parsing/prefetching */ | 487 | /* Disable CP parsing/prefetching */ |
485 | WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(0xff)); | 488 | WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(0xff)); |
486 | /* Check if any of the rendering block is busy and reset it */ | 489 | /* Check if any of the rendering block is busy and reset it */ |
487 | if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) || | 490 | if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) || |
488 | (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) { | 491 | (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) { |
489 | WREG32(R_008020_GRBM_SOFT_RESET, S_008020_SOFT_RESET_CR(1) | | 492 | tmp = S_008020_SOFT_RESET_CR(1) | |
490 | S_008020_SOFT_RESET_DB(1) | | 493 | S_008020_SOFT_RESET_DB(1) | |
491 | S_008020_SOFT_RESET_CB(1) | | 494 | S_008020_SOFT_RESET_CB(1) | |
492 | S_008020_SOFT_RESET_PA(1) | | 495 | S_008020_SOFT_RESET_PA(1) | |
@@ -498,14 +501,18 @@ int r600_gpu_soft_reset(struct radeon_device *rdev) | |||
498 | S_008020_SOFT_RESET_TC(1) | | 501 | S_008020_SOFT_RESET_TC(1) | |
499 | S_008020_SOFT_RESET_TA(1) | | 502 | S_008020_SOFT_RESET_TA(1) | |
500 | S_008020_SOFT_RESET_VC(1) | | 503 | S_008020_SOFT_RESET_VC(1) | |
501 | S_008020_SOFT_RESET_VGT(1)); | 504 | S_008020_SOFT_RESET_VGT(1); |
505 | dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp); | ||
506 | WREG32(R_008020_GRBM_SOFT_RESET, tmp); | ||
502 | (void)RREG32(R_008020_GRBM_SOFT_RESET); | 507 | (void)RREG32(R_008020_GRBM_SOFT_RESET); |
503 | udelay(50); | 508 | udelay(50); |
504 | WREG32(R_008020_GRBM_SOFT_RESET, 0); | 509 | WREG32(R_008020_GRBM_SOFT_RESET, 0); |
505 | (void)RREG32(R_008020_GRBM_SOFT_RESET); | 510 | (void)RREG32(R_008020_GRBM_SOFT_RESET); |
506 | } | 511 | } |
507 | /* Reset CP (we always reset CP) */ | 512 | /* Reset CP (we always reset CP) */ |
508 | WREG32(R_008020_GRBM_SOFT_RESET, S_008020_SOFT_RESET_CP(1)); | 513 | tmp = S_008020_SOFT_RESET_CP(1); |
514 | dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp); | ||
515 | WREG32(R_008020_GRBM_SOFT_RESET, tmp); | ||
509 | (void)RREG32(R_008020_GRBM_SOFT_RESET); | 516 | (void)RREG32(R_008020_GRBM_SOFT_RESET); |
510 | udelay(50); | 517 | udelay(50); |
511 | WREG32(R_008020_GRBM_SOFT_RESET, 0); | 518 | WREG32(R_008020_GRBM_SOFT_RESET, 0); |
@@ -533,6 +540,14 @@ int r600_gpu_soft_reset(struct radeon_device *rdev) | |||
533 | srbm_reset |= S_000E60_SOFT_RESET_RLC(1); | 540 | srbm_reset |= S_000E60_SOFT_RESET_RLC(1); |
534 | if (G_000E50_SEM_BUSY(RREG32(R_000E50_SRBM_STATUS))) | 541 | if (G_000E50_SEM_BUSY(RREG32(R_000E50_SRBM_STATUS))) |
535 | srbm_reset |= S_000E60_SOFT_RESET_SEM(1); | 542 | srbm_reset |= S_000E60_SOFT_RESET_SEM(1); |
543 | if (G_000E50_BIF_BUSY(RREG32(R_000E50_SRBM_STATUS))) | ||
544 | srbm_reset |= S_000E60_SOFT_RESET_BIF(1); | ||
545 | dev_info(rdev->dev, " R_000E60_SRBM_SOFT_RESET=0x%08X\n", srbm_reset); | ||
546 | WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset); | ||
547 | (void)RREG32(R_000E60_SRBM_SOFT_RESET); | ||
548 | udelay(50); | ||
549 | WREG32(R_000E60_SRBM_SOFT_RESET, 0); | ||
550 | (void)RREG32(R_000E60_SRBM_SOFT_RESET); | ||
536 | WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset); | 551 | WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset); |
537 | (void)RREG32(R_000E60_SRBM_SOFT_RESET); | 552 | (void)RREG32(R_000E60_SRBM_SOFT_RESET); |
538 | udelay(50); | 553 | udelay(50); |
@@ -540,6 +555,17 @@ int r600_gpu_soft_reset(struct radeon_device *rdev) | |||
540 | (void)RREG32(R_000E60_SRBM_SOFT_RESET); | 555 | (void)RREG32(R_000E60_SRBM_SOFT_RESET); |
541 | /* Wait a little for things to settle down */ | 556 | /* Wait a little for things to settle down */ |
542 | udelay(50); | 557 | udelay(50); |
558 | dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n", | ||
559 | RREG32(R_008010_GRBM_STATUS)); | ||
560 | dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n", | ||
561 | RREG32(R_008014_GRBM_STATUS2)); | ||
562 | dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n", | ||
563 | RREG32(R_000E50_SRBM_STATUS)); | ||
564 | /* After reset we need to reinit the asic as GPU often endup in an | ||
565 | * incoherent state. | ||
566 | */ | ||
567 | atom_asic_init(rdev->mode_info.atom_context); | ||
568 | rv515_mc_resume(rdev, &save); | ||
543 | return 0; | 569 | return 0; |
544 | } | 570 | } |
545 | 571 | ||
@@ -1350,32 +1376,47 @@ int r600_ring_test(struct radeon_device *rdev) | |||
1350 | return r; | 1376 | return r; |
1351 | } | 1377 | } |
1352 | 1378 | ||
1353 | /* | 1379 | void r600_wb_disable(struct radeon_device *rdev) |
1354 | * Writeback | 1380 | { |
1355 | */ | 1381 | WREG32(SCRATCH_UMSK, 0); |
1356 | int r600_wb_init(struct radeon_device *rdev) | 1382 | if (rdev->wb.wb_obj) { |
1383 | radeon_object_kunmap(rdev->wb.wb_obj); | ||
1384 | radeon_object_unpin(rdev->wb.wb_obj); | ||
1385 | } | ||
1386 | } | ||
1387 | |||
1388 | void r600_wb_fini(struct radeon_device *rdev) | ||
1389 | { | ||
1390 | r600_wb_disable(rdev); | ||
1391 | if (rdev->wb.wb_obj) { | ||
1392 | radeon_object_unref(&rdev->wb.wb_obj); | ||
1393 | rdev->wb.wb = NULL; | ||
1394 | rdev->wb.wb_obj = NULL; | ||
1395 | } | ||
1396 | } | ||
1397 | |||
1398 | int r600_wb_enable(struct radeon_device *rdev) | ||
1357 | { | 1399 | { |
1358 | int r; | 1400 | int r; |
1359 | 1401 | ||
1360 | if (rdev->wb.wb_obj == NULL) { | 1402 | if (rdev->wb.wb_obj == NULL) { |
1361 | r = radeon_object_create(rdev, NULL, 4096, | 1403 | r = radeon_object_create(rdev, NULL, 4096, true, |
1362 | true, | 1404 | RADEON_GEM_DOMAIN_GTT, false, &rdev->wb.wb_obj); |
1363 | RADEON_GEM_DOMAIN_GTT, | ||
1364 | false, &rdev->wb.wb_obj); | ||
1365 | if (r) { | 1405 | if (r) { |
1366 | DRM_ERROR("radeon: failed to create WB buffer (%d).\n", r); | 1406 | dev_warn(rdev->dev, "failed to create WB buffer (%d).\n", r); |
1367 | return r; | 1407 | return r; |
1368 | } | 1408 | } |
1369 | r = radeon_object_pin(rdev->wb.wb_obj, | 1409 | r = radeon_object_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, |
1370 | RADEON_GEM_DOMAIN_GTT, | 1410 | &rdev->wb.gpu_addr); |
1371 | &rdev->wb.gpu_addr); | ||
1372 | if (r) { | 1411 | if (r) { |
1373 | DRM_ERROR("radeon: failed to pin WB buffer (%d).\n", r); | 1412 | dev_warn(rdev->dev, "failed to pin WB buffer (%d).\n", r); |
1413 | r600_wb_fini(rdev); | ||
1374 | return r; | 1414 | return r; |
1375 | } | 1415 | } |
1376 | r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); | 1416 | r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); |
1377 | if (r) { | 1417 | if (r) { |
1378 | DRM_ERROR("radeon: failed to map WB buffer (%d).\n", r); | 1418 | dev_warn(rdev->dev, "failed to map WB buffer (%d).\n", r); |
1419 | r600_wb_fini(rdev); | ||
1379 | return r; | 1420 | return r; |
1380 | } | 1421 | } |
1381 | } | 1422 | } |
@@ -1386,21 +1427,6 @@ int r600_wb_init(struct radeon_device *rdev) | |||
1386 | return 0; | 1427 | return 0; |
1387 | } | 1428 | } |
1388 | 1429 | ||
1389 | void r600_wb_fini(struct radeon_device *rdev) | ||
1390 | { | ||
1391 | if (rdev->wb.wb_obj) { | ||
1392 | radeon_object_kunmap(rdev->wb.wb_obj); | ||
1393 | radeon_object_unpin(rdev->wb.wb_obj); | ||
1394 | radeon_object_unref(&rdev->wb.wb_obj); | ||
1395 | rdev->wb.wb = NULL; | ||
1396 | rdev->wb.wb_obj = NULL; | ||
1397 | } | ||
1398 | } | ||
1399 | |||
1400 | |||
1401 | /* | ||
1402 | * CS | ||
1403 | */ | ||
1404 | void r600_fence_ring_emit(struct radeon_device *rdev, | 1430 | void r600_fence_ring_emit(struct radeon_device *rdev, |
1405 | struct radeon_fence *fence) | 1431 | struct radeon_fence *fence) |
1406 | { | 1432 | { |
@@ -1477,11 +1503,14 @@ int r600_startup(struct radeon_device *rdev) | |||
1477 | { | 1503 | { |
1478 | int r; | 1504 | int r; |
1479 | 1505 | ||
1480 | r600_gpu_reset(rdev); | 1506 | r600_mc_program(rdev); |
1481 | r600_mc_resume(rdev); | 1507 | if (rdev->flags & RADEON_IS_AGP) { |
1482 | r = r600_pcie_gart_enable(rdev); | 1508 | r600_agp_enable(rdev); |
1483 | if (r) | 1509 | } else { |
1484 | return r; | 1510 | r = r600_pcie_gart_enable(rdev); |
1511 | if (r) | ||
1512 | return r; | ||
1513 | } | ||
1485 | r600_gpu_init(rdev); | 1514 | r600_gpu_init(rdev); |
1486 | 1515 | ||
1487 | r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, | 1516 | r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, |
@@ -1500,9 +1529,8 @@ int r600_startup(struct radeon_device *rdev) | |||
1500 | r = r600_cp_resume(rdev); | 1529 | r = r600_cp_resume(rdev); |
1501 | if (r) | 1530 | if (r) |
1502 | return r; | 1531 | return r; |
1503 | r = r600_wb_init(rdev); | 1532 | /* write back buffer are not vital so don't worry about failure */ |
1504 | if (r) | 1533 | r600_wb_enable(rdev); |
1505 | return r; | ||
1506 | return 0; | 1534 | return 0; |
1507 | } | 1535 | } |
1508 | 1536 | ||
@@ -1524,15 +1552,12 @@ int r600_resume(struct radeon_device *rdev) | |||
1524 | { | 1552 | { |
1525 | int r; | 1553 | int r; |
1526 | 1554 | ||
1527 | if (radeon_gpu_reset(rdev)) { | 1555 | /* Do not reset GPU before posting, on r600 hw unlike on r500 hw, |
1528 | /* FIXME: what do we want to do here ? */ | 1556 | * posting will perform necessary task to bring back GPU into good |
1529 | } | 1557 | * shape. |
1558 | */ | ||
1530 | /* post card */ | 1559 | /* post card */ |
1531 | if (rdev->is_atom_bios) { | 1560 | atom_asic_init(rdev->mode_info.atom_context); |
1532 | atom_asic_init(rdev->mode_info.atom_context); | ||
1533 | } else { | ||
1534 | radeon_combios_asic_init(rdev->ddev); | ||
1535 | } | ||
1536 | /* Initialize clocks */ | 1561 | /* Initialize clocks */ |
1537 | r = radeon_clocks_init(rdev); | 1562 | r = radeon_clocks_init(rdev); |
1538 | if (r) { | 1563 | if (r) { |
@@ -1545,7 +1570,7 @@ int r600_resume(struct radeon_device *rdev) | |||
1545 | return r; | 1570 | return r; |
1546 | } | 1571 | } |
1547 | 1572 | ||
1548 | r = radeon_ib_test(rdev); | 1573 | r = r600_ib_test(rdev); |
1549 | if (r) { | 1574 | if (r) { |
1550 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); | 1575 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); |
1551 | return r; | 1576 | return r; |
@@ -1553,13 +1578,12 @@ int r600_resume(struct radeon_device *rdev) | |||
1553 | return r; | 1578 | return r; |
1554 | } | 1579 | } |
1555 | 1580 | ||
1556 | |||
1557 | int r600_suspend(struct radeon_device *rdev) | 1581 | int r600_suspend(struct radeon_device *rdev) |
1558 | { | 1582 | { |
1559 | /* FIXME: we should wait for ring to be empty */ | 1583 | /* FIXME: we should wait for ring to be empty */ |
1560 | r600_cp_stop(rdev); | 1584 | r600_cp_stop(rdev); |
1561 | rdev->cp.ready = false; | 1585 | rdev->cp.ready = false; |
1562 | 1586 | r600_wb_disable(rdev); | |
1563 | r600_pcie_gart_disable(rdev); | 1587 | r600_pcie_gart_disable(rdev); |
1564 | /* unpin shaders bo */ | 1588 | /* unpin shaders bo */ |
1565 | radeon_object_unpin(rdev->r600_blit.shader_obj); | 1589 | radeon_object_unpin(rdev->r600_blit.shader_obj); |
@@ -1576,7 +1600,6 @@ int r600_init(struct radeon_device *rdev) | |||
1576 | { | 1600 | { |
1577 | int r; | 1601 | int r; |
1578 | 1602 | ||
1579 | rdev->new_init_path = true; | ||
1580 | r = radeon_dummy_page_init(rdev); | 1603 | r = radeon_dummy_page_init(rdev); |
1581 | if (r) | 1604 | if (r) |
1582 | return r; | 1605 | return r; |
@@ -1593,8 +1616,10 @@ int r600_init(struct radeon_device *rdev) | |||
1593 | return -EINVAL; | 1616 | return -EINVAL; |
1594 | } | 1617 | } |
1595 | /* Must be an ATOMBIOS */ | 1618 | /* Must be an ATOMBIOS */ |
1596 | if (!rdev->is_atom_bios) | 1619 | if (!rdev->is_atom_bios) { |
1620 | dev_err(rdev->dev, "Expecting atombios for R600 GPU\n"); | ||
1597 | return -EINVAL; | 1621 | return -EINVAL; |
1622 | } | ||
1598 | r = radeon_atombios_init(rdev); | 1623 | r = radeon_atombios_init(rdev); |
1599 | if (r) | 1624 | if (r) |
1600 | return r; | 1625 | return r; |
@@ -1616,15 +1641,8 @@ int r600_init(struct radeon_device *rdev) | |||
1616 | if (r) | 1641 | if (r) |
1617 | return r; | 1642 | return r; |
1618 | r = r600_mc_init(rdev); | 1643 | r = r600_mc_init(rdev); |
1619 | if (r) { | 1644 | if (r) |
1620 | if (rdev->flags & RADEON_IS_AGP) { | ||
1621 | /* Retry with disabling AGP */ | ||
1622 | r600_fini(rdev); | ||
1623 | rdev->flags &= ~RADEON_IS_AGP; | ||
1624 | return r600_init(rdev); | ||
1625 | } | ||
1626 | return r; | 1645 | return r; |
1627 | } | ||
1628 | /* Memory manager */ | 1646 | /* Memory manager */ |
1629 | r = radeon_object_init(rdev); | 1647 | r = radeon_object_init(rdev); |
1630 | if (r) | 1648 | if (r) |
@@ -1653,12 +1671,10 @@ int r600_init(struct radeon_device *rdev) | |||
1653 | 1671 | ||
1654 | r = r600_startup(rdev); | 1672 | r = r600_startup(rdev); |
1655 | if (r) { | 1673 | if (r) { |
1656 | if (rdev->flags & RADEON_IS_AGP) { | 1674 | r600_suspend(rdev); |
1657 | /* Retry with disabling AGP */ | 1675 | r600_wb_fini(rdev); |
1658 | r600_fini(rdev); | 1676 | radeon_ring_fini(rdev); |
1659 | rdev->flags &= ~RADEON_IS_AGP; | 1677 | r600_pcie_gart_fini(rdev); |
1660 | return r600_init(rdev); | ||
1661 | } | ||
1662 | rdev->accel_working = false; | 1678 | rdev->accel_working = false; |
1663 | } | 1679 | } |
1664 | if (rdev->accel_working) { | 1680 | if (rdev->accel_working) { |
@@ -1667,7 +1683,7 @@ int r600_init(struct radeon_device *rdev) | |||
1667 | DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); | 1683 | DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); |
1668 | rdev->accel_working = false; | 1684 | rdev->accel_working = false; |
1669 | } | 1685 | } |
1670 | r = radeon_ib_test(rdev); | 1686 | r = r600_ib_test(rdev); |
1671 | if (r) { | 1687 | if (r) { |
1672 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); | 1688 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); |
1673 | rdev->accel_working = false; | 1689 | rdev->accel_working = false; |
@@ -1683,19 +1699,15 @@ void r600_fini(struct radeon_device *rdev) | |||
1683 | 1699 | ||
1684 | r600_blit_fini(rdev); | 1700 | r600_blit_fini(rdev); |
1685 | radeon_ring_fini(rdev); | 1701 | radeon_ring_fini(rdev); |
1702 | r600_wb_fini(rdev); | ||
1686 | r600_pcie_gart_fini(rdev); | 1703 | r600_pcie_gart_fini(rdev); |
1687 | radeon_gem_fini(rdev); | 1704 | radeon_gem_fini(rdev); |
1688 | radeon_fence_driver_fini(rdev); | 1705 | radeon_fence_driver_fini(rdev); |
1689 | radeon_clocks_fini(rdev); | 1706 | radeon_clocks_fini(rdev); |
1690 | #if __OS_HAS_AGP | ||
1691 | if (rdev->flags & RADEON_IS_AGP) | 1707 | if (rdev->flags & RADEON_IS_AGP) |
1692 | radeon_agp_fini(rdev); | 1708 | radeon_agp_fini(rdev); |
1693 | #endif | ||
1694 | radeon_object_fini(rdev); | 1709 | radeon_object_fini(rdev); |
1695 | if (rdev->is_atom_bios) | 1710 | radeon_atombios_fini(rdev); |
1696 | radeon_atombios_fini(rdev); | ||
1697 | else | ||
1698 | radeon_combios_fini(rdev); | ||
1699 | kfree(rdev->bios); | 1711 | kfree(rdev->bios); |
1700 | rdev->bios = NULL; | 1712 | rdev->bios = NULL; |
1701 | radeon_dummy_page_fini(rdev); | 1713 | radeon_dummy_page_fini(rdev); |