diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/r300.c')
-rw-r--r-- | drivers/gpu/drm/radeon/r300.c | 157 |
1 files changed, 82 insertions, 75 deletions
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 43b55a030b4d..4cef90cd74e5 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
@@ -117,18 +117,19 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev) | |||
117 | r = radeon_gart_table_vram_pin(rdev); | 117 | r = radeon_gart_table_vram_pin(rdev); |
118 | if (r) | 118 | if (r) |
119 | return r; | 119 | return r; |
120 | radeon_gart_restore(rdev); | ||
120 | /* discard memory request outside of configured range */ | 121 | /* discard memory request outside of configured range */ |
121 | tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; | 122 | tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; |
122 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); | 123 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); |
123 | WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_location); | 124 | WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_start); |
124 | tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - RADEON_GPU_PAGE_SIZE; | 125 | tmp = rdev->mc.gtt_end & ~RADEON_GPU_PAGE_MASK; |
125 | WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp); | 126 | WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp); |
126 | WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0); | 127 | WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0); |
127 | WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0); | 128 | WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0); |
128 | table_addr = rdev->gart.table_addr; | 129 | table_addr = rdev->gart.table_addr; |
129 | WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr); | 130 | WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr); |
130 | /* FIXME: setup default page */ | 131 | /* FIXME: setup default page */ |
131 | WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_location); | 132 | WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_start); |
132 | WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0); | 133 | WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0); |
133 | /* Clear error */ | 134 | /* Clear error */ |
134 | WREG32_PCIE(0x18, 0); | 135 | WREG32_PCIE(0x18, 0); |
@@ -174,18 +175,20 @@ void r300_fence_ring_emit(struct radeon_device *rdev, | |||
174 | /* Who ever call radeon_fence_emit should call ring_lock and ask | 175 | /* Who ever call radeon_fence_emit should call ring_lock and ask |
175 | * for enough space (today caller are ib schedule and buffer move) */ | 176 | * for enough space (today caller are ib schedule and buffer move) */ |
176 | /* Write SC register so SC & US assert idle */ | 177 | /* Write SC register so SC & US assert idle */ |
177 | radeon_ring_write(rdev, PACKET0(0x43E0, 0)); | 178 | radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_TL, 0)); |
178 | radeon_ring_write(rdev, 0); | 179 | radeon_ring_write(rdev, 0); |
179 | radeon_ring_write(rdev, PACKET0(0x43E4, 0)); | 180 | radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_BR, 0)); |
180 | radeon_ring_write(rdev, 0); | 181 | radeon_ring_write(rdev, 0); |
181 | /* Flush 3D cache */ | 182 | /* Flush 3D cache */ |
182 | radeon_ring_write(rdev, PACKET0(0x4E4C, 0)); | 183 | radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); |
183 | radeon_ring_write(rdev, (2 << 0)); | 184 | radeon_ring_write(rdev, R300_RB3D_DC_FLUSH); |
184 | radeon_ring_write(rdev, PACKET0(0x4F18, 0)); | 185 | radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); |
185 | radeon_ring_write(rdev, (1 << 0)); | 186 | radeon_ring_write(rdev, R300_ZC_FLUSH); |
186 | /* Wait until IDLE & CLEAN */ | 187 | /* Wait until IDLE & CLEAN */ |
187 | radeon_ring_write(rdev, PACKET0(0x1720, 0)); | 188 | radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); |
188 | radeon_ring_write(rdev, (1 << 17) | (1 << 16) | (1 << 9)); | 189 | radeon_ring_write(rdev, (RADEON_WAIT_3D_IDLECLEAN | |
190 | RADEON_WAIT_2D_IDLECLEAN | | ||
191 | RADEON_WAIT_DMA_GUI_IDLE)); | ||
189 | radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0)); | 192 | radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0)); |
190 | radeon_ring_write(rdev, rdev->config.r300.hdp_cntl | | 193 | radeon_ring_write(rdev, rdev->config.r300.hdp_cntl | |
191 | RADEON_HDP_READ_BUFFER_INVALIDATE); | 194 | RADEON_HDP_READ_BUFFER_INVALIDATE); |
@@ -198,50 +201,6 @@ void r300_fence_ring_emit(struct radeon_device *rdev, | |||
198 | radeon_ring_write(rdev, RADEON_SW_INT_FIRE); | 201 | radeon_ring_write(rdev, RADEON_SW_INT_FIRE); |
199 | } | 202 | } |
200 | 203 | ||
201 | int r300_copy_dma(struct radeon_device *rdev, | ||
202 | uint64_t src_offset, | ||
203 | uint64_t dst_offset, | ||
204 | unsigned num_pages, | ||
205 | struct radeon_fence *fence) | ||
206 | { | ||
207 | uint32_t size; | ||
208 | uint32_t cur_size; | ||
209 | int i, num_loops; | ||
210 | int r = 0; | ||
211 | |||
212 | /* radeon pitch is /64 */ | ||
213 | size = num_pages << PAGE_SHIFT; | ||
214 | num_loops = DIV_ROUND_UP(size, 0x1FFFFF); | ||
215 | r = radeon_ring_lock(rdev, num_loops * 4 + 64); | ||
216 | if (r) { | ||
217 | DRM_ERROR("radeon: moving bo (%d).\n", r); | ||
218 | return r; | ||
219 | } | ||
220 | /* Must wait for 2D idle & clean before DMA or hangs might happen */ | ||
221 | radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0 )); | ||
222 | radeon_ring_write(rdev, (1 << 16)); | ||
223 | for (i = 0; i < num_loops; i++) { | ||
224 | cur_size = size; | ||
225 | if (cur_size > 0x1FFFFF) { | ||
226 | cur_size = 0x1FFFFF; | ||
227 | } | ||
228 | size -= cur_size; | ||
229 | radeon_ring_write(rdev, PACKET0(0x720, 2)); | ||
230 | radeon_ring_write(rdev, src_offset); | ||
231 | radeon_ring_write(rdev, dst_offset); | ||
232 | radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30)); | ||
233 | src_offset += cur_size; | ||
234 | dst_offset += cur_size; | ||
235 | } | ||
236 | radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); | ||
237 | radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE); | ||
238 | if (fence) { | ||
239 | r = radeon_fence_emit(rdev, fence); | ||
240 | } | ||
241 | radeon_ring_unlock_commit(rdev); | ||
242 | return r; | ||
243 | } | ||
244 | |||
245 | void r300_ring_start(struct radeon_device *rdev) | 204 | void r300_ring_start(struct radeon_device *rdev) |
246 | { | 205 | { |
247 | unsigned gb_tile_config; | 206 | unsigned gb_tile_config; |
@@ -281,8 +240,8 @@ void r300_ring_start(struct radeon_device *rdev) | |||
281 | radeon_ring_write(rdev, | 240 | radeon_ring_write(rdev, |
282 | RADEON_WAIT_2D_IDLECLEAN | | 241 | RADEON_WAIT_2D_IDLECLEAN | |
283 | RADEON_WAIT_3D_IDLECLEAN); | 242 | RADEON_WAIT_3D_IDLECLEAN); |
284 | radeon_ring_write(rdev, PACKET0(0x170C, 0)); | 243 | radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0)); |
285 | radeon_ring_write(rdev, 1 << 31); | 244 | radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG); |
286 | radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0)); | 245 | radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0)); |
287 | radeon_ring_write(rdev, 0); | 246 | radeon_ring_write(rdev, 0); |
288 | radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0)); | 247 | radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0)); |
@@ -349,8 +308,8 @@ int r300_mc_wait_for_idle(struct radeon_device *rdev) | |||
349 | 308 | ||
350 | for (i = 0; i < rdev->usec_timeout; i++) { | 309 | for (i = 0; i < rdev->usec_timeout; i++) { |
351 | /* read MC_STATUS */ | 310 | /* read MC_STATUS */ |
352 | tmp = RREG32(0x0150); | 311 | tmp = RREG32(RADEON_MC_STATUS); |
353 | if (tmp & (1 << 4)) { | 312 | if (tmp & R300_MC_IDLE) { |
354 | return 0; | 313 | return 0; |
355 | } | 314 | } |
356 | DRM_UDELAY(1); | 315 | DRM_UDELAY(1); |
@@ -395,8 +354,8 @@ void r300_gpu_init(struct radeon_device *rdev) | |||
395 | "programming pipes. Bad things might happen.\n"); | 354 | "programming pipes. Bad things might happen.\n"); |
396 | } | 355 | } |
397 | 356 | ||
398 | tmp = RREG32(0x170C); | 357 | tmp = RREG32(R300_DST_PIPE_CONFIG); |
399 | WREG32(0x170C, tmp | (1 << 31)); | 358 | WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG); |
400 | 359 | ||
401 | WREG32(R300_RB2D_DSTCACHE_MODE, | 360 | WREG32(R300_RB2D_DSTCACHE_MODE, |
402 | R300_DC_AUTOFLUSH_ENABLE | | 361 | R300_DC_AUTOFLUSH_ENABLE | |
@@ -437,8 +396,8 @@ int r300_ga_reset(struct radeon_device *rdev) | |||
437 | /* GA still busy soft reset it */ | 396 | /* GA still busy soft reset it */ |
438 | WREG32(0x429C, 0x200); | 397 | WREG32(0x429C, 0x200); |
439 | WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0); | 398 | WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0); |
440 | WREG32(0x43E0, 0); | 399 | WREG32(R300_RE_SCISSORS_TL, 0); |
441 | WREG32(0x43E4, 0); | 400 | WREG32(R300_RE_SCISSORS_BR, 0); |
442 | WREG32(0x24AC, 0); | 401 | WREG32(0x24AC, 0); |
443 | } | 402 | } |
444 | /* Wait to prevent race in RBBM_STATUS */ | 403 | /* Wait to prevent race in RBBM_STATUS */ |
@@ -488,7 +447,7 @@ int r300_gpu_reset(struct radeon_device *rdev) | |||
488 | } | 447 | } |
489 | /* Check if GPU is idle */ | 448 | /* Check if GPU is idle */ |
490 | status = RREG32(RADEON_RBBM_STATUS); | 449 | status = RREG32(RADEON_RBBM_STATUS); |
491 | if (status & (1 << 31)) { | 450 | if (status & RADEON_RBBM_ACTIVE) { |
492 | DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status); | 451 | DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status); |
493 | return -1; | 452 | return -1; |
494 | } | 453 | } |
@@ -500,13 +459,13 @@ int r300_gpu_reset(struct radeon_device *rdev) | |||
500 | /* | 459 | /* |
501 | * r300,r350,rv350,rv380 VRAM info | 460 | * r300,r350,rv350,rv380 VRAM info |
502 | */ | 461 | */ |
503 | void r300_vram_info(struct radeon_device *rdev) | 462 | void r300_mc_init(struct radeon_device *rdev) |
504 | { | 463 | { |
505 | uint32_t tmp; | 464 | u64 base; |
465 | u32 tmp; | ||
506 | 466 | ||
507 | /* DDR for all card after R300 & IGP */ | 467 | /* DDR for all card after R300 & IGP */ |
508 | rdev->mc.vram_is_ddr = true; | 468 | rdev->mc.vram_is_ddr = true; |
509 | |||
510 | tmp = RREG32(RADEON_MEM_CNTL); | 469 | tmp = RREG32(RADEON_MEM_CNTL); |
511 | tmp &= R300_MEM_NUM_CHANNELS_MASK; | 470 | tmp &= R300_MEM_NUM_CHANNELS_MASK; |
512 | switch (tmp) { | 471 | switch (tmp) { |
@@ -515,8 +474,13 @@ void r300_vram_info(struct radeon_device *rdev) | |||
515 | case 2: rdev->mc.vram_width = 256; break; | 474 | case 2: rdev->mc.vram_width = 256; break; |
516 | default: rdev->mc.vram_width = 128; break; | 475 | default: rdev->mc.vram_width = 128; break; |
517 | } | 476 | } |
518 | |||
519 | r100_vram_init_sizes(rdev); | 477 | r100_vram_init_sizes(rdev); |
478 | base = rdev->mc.aper_base; | ||
479 | if (rdev->flags & RADEON_IS_IGP) | ||
480 | base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; | ||
481 | radeon_vram_location(rdev, &rdev->mc, base); | ||
482 | if (!(rdev->flags & RADEON_IS_AGP)) | ||
483 | radeon_gtt_location(rdev, &rdev->mc); | ||
520 | } | 484 | } |
521 | 485 | ||
522 | void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes) | 486 | void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes) |
@@ -578,6 +542,40 @@ void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes) | |||
578 | 542 | ||
579 | } | 543 | } |
580 | 544 | ||
545 | int rv370_get_pcie_lanes(struct radeon_device *rdev) | ||
546 | { | ||
547 | u32 link_width_cntl; | ||
548 | |||
549 | if (rdev->flags & RADEON_IS_IGP) | ||
550 | return 0; | ||
551 | |||
552 | if (!(rdev->flags & RADEON_IS_PCIE)) | ||
553 | return 0; | ||
554 | |||
555 | /* FIXME wait for idle */ | ||
556 | |||
557 | if (rdev->family < CHIP_R600) | ||
558 | link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); | ||
559 | else | ||
560 | link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL); | ||
561 | |||
562 | switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) { | ||
563 | case RADEON_PCIE_LC_LINK_WIDTH_X0: | ||
564 | return 0; | ||
565 | case RADEON_PCIE_LC_LINK_WIDTH_X1: | ||
566 | return 1; | ||
567 | case RADEON_PCIE_LC_LINK_WIDTH_X2: | ||
568 | return 2; | ||
569 | case RADEON_PCIE_LC_LINK_WIDTH_X4: | ||
570 | return 4; | ||
571 | case RADEON_PCIE_LC_LINK_WIDTH_X8: | ||
572 | return 8; | ||
573 | case RADEON_PCIE_LC_LINK_WIDTH_X16: | ||
574 | default: | ||
575 | return 16; | ||
576 | } | ||
577 | } | ||
578 | |||
581 | #if defined(CONFIG_DEBUG_FS) | 579 | #if defined(CONFIG_DEBUG_FS) |
582 | static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data) | 580 | static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data) |
583 | { | 581 | { |
@@ -707,6 +705,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
707 | tile_flags |= R300_TXO_MACRO_TILE; | 705 | tile_flags |= R300_TXO_MACRO_TILE; |
708 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) | 706 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) |
709 | tile_flags |= R300_TXO_MICRO_TILE; | 707 | tile_flags |= R300_TXO_MICRO_TILE; |
708 | else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) | ||
709 | tile_flags |= R300_TXO_MICRO_TILE_SQUARE; | ||
710 | 710 | ||
711 | tmp = idx_value + ((u32)reloc->lobj.gpu_offset); | 711 | tmp = idx_value + ((u32)reloc->lobj.gpu_offset); |
712 | tmp |= tile_flags; | 712 | tmp |= tile_flags; |
@@ -757,6 +757,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
757 | tile_flags |= R300_COLOR_TILE_ENABLE; | 757 | tile_flags |= R300_COLOR_TILE_ENABLE; |
758 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) | 758 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) |
759 | tile_flags |= R300_COLOR_MICROTILE_ENABLE; | 759 | tile_flags |= R300_COLOR_MICROTILE_ENABLE; |
760 | else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) | ||
761 | tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE; | ||
760 | 762 | ||
761 | tmp = idx_value & ~(0x7 << 16); | 763 | tmp = idx_value & ~(0x7 << 16); |
762 | tmp |= tile_flags; | 764 | tmp |= tile_flags; |
@@ -828,7 +830,9 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
828 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) | 830 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) |
829 | tile_flags |= R300_DEPTHMACROTILE_ENABLE; | 831 | tile_flags |= R300_DEPTHMACROTILE_ENABLE; |
830 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) | 832 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) |
831 | tile_flags |= R300_DEPTHMICROTILE_TILED;; | 833 | tile_flags |= R300_DEPTHMICROTILE_TILED; |
834 | else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) | ||
835 | tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE; | ||
832 | 836 | ||
833 | tmp = idx_value & ~(0x7 << 16); | 837 | tmp = idx_value & ~(0x7 << 16); |
834 | tmp |= tile_flags; | 838 | tmp |= tile_flags; |
@@ -1387,12 +1391,15 @@ int r300_init(struct radeon_device *rdev) | |||
1387 | radeon_get_clock_info(rdev->ddev); | 1391 | radeon_get_clock_info(rdev->ddev); |
1388 | /* Initialize power management */ | 1392 | /* Initialize power management */ |
1389 | radeon_pm_init(rdev); | 1393 | radeon_pm_init(rdev); |
1390 | /* Get vram informations */ | 1394 | /* initialize AGP */ |
1391 | r300_vram_info(rdev); | 1395 | if (rdev->flags & RADEON_IS_AGP) { |
1392 | /* Initialize memory controller (also test AGP) */ | 1396 | r = radeon_agp_init(rdev); |
1393 | r = r420_mc_init(rdev); | 1397 | if (r) { |
1394 | if (r) | 1398 | radeon_agp_disable(rdev); |
1395 | return r; | 1399 | } |
1400 | } | ||
1401 | /* initialize memory controller */ | ||
1402 | r300_mc_init(rdev); | ||
1396 | /* Fence driver */ | 1403 | /* Fence driver */ |
1397 | r = radeon_fence_driver_init(rdev); | 1404 | r = radeon_fence_driver_init(rdev); |
1398 | if (r) | 1405 | if (r) |