diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/r300.c')
-rw-r--r-- | drivers/gpu/drm/radeon/r300.c | 120 |
1 files changed, 89 insertions, 31 deletions
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index e2ed5bc08170..053f4ec397f7 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
@@ -30,6 +30,8 @@ | |||
30 | #include "drm.h" | 30 | #include "drm.h" |
31 | #include "radeon_reg.h" | 31 | #include "radeon_reg.h" |
32 | #include "radeon.h" | 32 | #include "radeon.h" |
33 | #include "radeon_drm.h" | ||
34 | #include "radeon_share.h" | ||
33 | 35 | ||
34 | /* r300,r350,rv350,rv370,rv380 depends on : */ | 36 | /* r300,r350,rv350,rv370,rv380 depends on : */ |
35 | void r100_hdp_reset(struct radeon_device *rdev); | 37 | void r100_hdp_reset(struct radeon_device *rdev); |
@@ -44,6 +46,7 @@ int r100_gui_wait_for_idle(struct radeon_device *rdev); | |||
44 | int r100_cs_packet_parse(struct radeon_cs_parser *p, | 46 | int r100_cs_packet_parse(struct radeon_cs_parser *p, |
45 | struct radeon_cs_packet *pkt, | 47 | struct radeon_cs_packet *pkt, |
46 | unsigned idx); | 48 | unsigned idx); |
49 | int r100_cs_packet_parse_vline(struct radeon_cs_parser *p); | ||
47 | int r100_cs_packet_next_reloc(struct radeon_cs_parser *p, | 50 | int r100_cs_packet_next_reloc(struct radeon_cs_parser *p, |
48 | struct radeon_cs_reloc **cs_reloc); | 51 | struct radeon_cs_reloc **cs_reloc); |
49 | int r100_cs_parse_packet0(struct radeon_cs_parser *p, | 52 | int r100_cs_parse_packet0(struct radeon_cs_parser *p, |
@@ -80,8 +83,8 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev) | |||
80 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB); | 83 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB); |
81 | (void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); | 84 | (void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); |
82 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); | 85 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); |
83 | mb(); | ||
84 | } | 86 | } |
87 | mb(); | ||
85 | } | 88 | } |
86 | 89 | ||
87 | int rv370_pcie_gart_enable(struct radeon_device *rdev) | 90 | int rv370_pcie_gart_enable(struct radeon_device *rdev) |
@@ -150,8 +153,13 @@ int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) | |||
150 | if (i < 0 || i > rdev->gart.num_gpu_pages) { | 153 | if (i < 0 || i > rdev->gart.num_gpu_pages) { |
151 | return -EINVAL; | 154 | return -EINVAL; |
152 | } | 155 | } |
153 | addr = (((u32)addr) >> 8) | ((upper_32_bits(addr) & 0xff) << 4) | 0xC; | 156 | addr = (lower_32_bits(addr) >> 8) | |
154 | writel(cpu_to_le32(addr), ((void __iomem *)ptr) + (i * 4)); | 157 | ((upper_32_bits(addr) & 0xff) << 24) | |
158 | 0xc; | ||
159 | /* on x86 we want this to be CPU endian, on powerpc | ||
160 | * on powerpc without HW swappers, it'll get swapped on way | ||
161 | * into VRAM - so no need for cpu_to_le32 on VRAM tables */ | ||
162 | writel(addr, ((void __iomem *)ptr) + (i * 4)); | ||
155 | return 0; | 163 | return 0; |
156 | } | 164 | } |
157 | 165 | ||
@@ -440,6 +448,7 @@ void r300_gpu_init(struct radeon_device *rdev) | |||
440 | /* rv350,rv370,rv380 */ | 448 | /* rv350,rv370,rv380 */ |
441 | rdev->num_gb_pipes = 1; | 449 | rdev->num_gb_pipes = 1; |
442 | } | 450 | } |
451 | rdev->num_z_pipes = 1; | ||
443 | gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16); | 452 | gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16); |
444 | switch (rdev->num_gb_pipes) { | 453 | switch (rdev->num_gb_pipes) { |
445 | case 2: | 454 | case 2: |
@@ -478,7 +487,8 @@ void r300_gpu_init(struct radeon_device *rdev) | |||
478 | printk(KERN_WARNING "Failed to wait MC idle while " | 487 | printk(KERN_WARNING "Failed to wait MC idle while " |
479 | "programming pipes. Bad things might happen.\n"); | 488 | "programming pipes. Bad things might happen.\n"); |
480 | } | 489 | } |
481 | DRM_INFO("radeon: %d pipes initialized.\n", rdev->num_gb_pipes); | 490 | DRM_INFO("radeon: %d quad pipes, %d Z pipes initialized.\n", |
491 | rdev->num_gb_pipes, rdev->num_z_pipes); | ||
482 | } | 492 | } |
483 | 493 | ||
484 | int r300_ga_reset(struct radeon_device *rdev) | 494 | int r300_ga_reset(struct radeon_device *rdev) |
@@ -579,35 +589,12 @@ void r300_vram_info(struct radeon_device *rdev) | |||
579 | } else { | 589 | } else { |
580 | rdev->mc.vram_width = 64; | 590 | rdev->mc.vram_width = 64; |
581 | } | 591 | } |
582 | rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE); | ||
583 | 592 | ||
584 | rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); | 593 | r100_vram_init_sizes(rdev); |
585 | rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); | ||
586 | } | 594 | } |
587 | 595 | ||
588 | 596 | ||
589 | /* | 597 | /* |
590 | * Indirect registers accessor | ||
591 | */ | ||
592 | uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg) | ||
593 | { | ||
594 | uint32_t r; | ||
595 | |||
596 | WREG8(RADEON_PCIE_INDEX, ((reg) & 0xff)); | ||
597 | (void)RREG32(RADEON_PCIE_INDEX); | ||
598 | r = RREG32(RADEON_PCIE_DATA); | ||
599 | return r; | ||
600 | } | ||
601 | |||
602 | void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | ||
603 | { | ||
604 | WREG8(RADEON_PCIE_INDEX, ((reg) & 0xff)); | ||
605 | (void)RREG32(RADEON_PCIE_INDEX); | ||
606 | WREG32(RADEON_PCIE_DATA, (v)); | ||
607 | (void)RREG32(RADEON_PCIE_DATA); | ||
608 | } | ||
609 | |||
610 | /* | ||
611 | * PCIE Lanes | 598 | * PCIE Lanes |
612 | */ | 599 | */ |
613 | 600 | ||
@@ -970,7 +957,7 @@ static inline void r300_cs_track_clear(struct r300_cs_track *track) | |||
970 | 957 | ||
971 | static const unsigned r300_reg_safe_bm[159] = { | 958 | static const unsigned r300_reg_safe_bm[159] = { |
972 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | 959 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
973 | 0xFFFFFFBF, 0xFFFFFFFF, 0xFFFFFFBF, 0xFFFFFFFF, | 960 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
974 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | 961 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
975 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | 962 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
976 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | 963 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
@@ -1019,7 +1006,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
1019 | struct radeon_cs_reloc *reloc; | 1006 | struct radeon_cs_reloc *reloc; |
1020 | struct r300_cs_track *track; | 1007 | struct r300_cs_track *track; |
1021 | volatile uint32_t *ib; | 1008 | volatile uint32_t *ib; |
1022 | uint32_t tmp; | 1009 | uint32_t tmp, tile_flags = 0; |
1023 | unsigned i; | 1010 | unsigned i; |
1024 | int r; | 1011 | int r; |
1025 | 1012 | ||
@@ -1027,6 +1014,16 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
1027 | ib_chunk = &p->chunks[p->chunk_ib_idx]; | 1014 | ib_chunk = &p->chunks[p->chunk_ib_idx]; |
1028 | track = (struct r300_cs_track*)p->track; | 1015 | track = (struct r300_cs_track*)p->track; |
1029 | switch(reg) { | 1016 | switch(reg) { |
1017 | case AVIVO_D1MODE_VLINE_START_END: | ||
1018 | case RADEON_CRTC_GUI_TRIG_VLINE: | ||
1019 | r = r100_cs_packet_parse_vline(p); | ||
1020 | if (r) { | ||
1021 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", | ||
1022 | idx, reg); | ||
1023 | r100_cs_dump_packet(p, pkt); | ||
1024 | return r; | ||
1025 | } | ||
1026 | break; | ||
1030 | case RADEON_DST_PITCH_OFFSET: | 1027 | case RADEON_DST_PITCH_OFFSET: |
1031 | case RADEON_SRC_PITCH_OFFSET: | 1028 | case RADEON_SRC_PITCH_OFFSET: |
1032 | r = r100_cs_packet_next_reloc(p, &reloc); | 1029 | r = r100_cs_packet_next_reloc(p, &reloc); |
@@ -1038,7 +1035,19 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
1038 | } | 1035 | } |
1039 | tmp = ib_chunk->kdata[idx] & 0x003fffff; | 1036 | tmp = ib_chunk->kdata[idx] & 0x003fffff; |
1040 | tmp += (((u32)reloc->lobj.gpu_offset) >> 10); | 1037 | tmp += (((u32)reloc->lobj.gpu_offset) >> 10); |
1041 | ib[idx] = (ib_chunk->kdata[idx] & 0xffc00000) | tmp; | 1038 | |
1039 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) | ||
1040 | tile_flags |= RADEON_DST_TILE_MACRO; | ||
1041 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { | ||
1042 | if (reg == RADEON_SRC_PITCH_OFFSET) { | ||
1043 | DRM_ERROR("Cannot src blit from microtiled surface\n"); | ||
1044 | r100_cs_dump_packet(p, pkt); | ||
1045 | return -EINVAL; | ||
1046 | } | ||
1047 | tile_flags |= RADEON_DST_TILE_MICRO; | ||
1048 | } | ||
1049 | tmp |= tile_flags; | ||
1050 | ib[idx] = (ib_chunk->kdata[idx] & 0x3fc00000) | tmp; | ||
1042 | break; | 1051 | break; |
1043 | case R300_RB3D_COLOROFFSET0: | 1052 | case R300_RB3D_COLOROFFSET0: |
1044 | case R300_RB3D_COLOROFFSET1: | 1053 | case R300_RB3D_COLOROFFSET1: |
@@ -1127,6 +1136,23 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
1127 | /* RB3D_COLORPITCH1 */ | 1136 | /* RB3D_COLORPITCH1 */ |
1128 | /* RB3D_COLORPITCH2 */ | 1137 | /* RB3D_COLORPITCH2 */ |
1129 | /* RB3D_COLORPITCH3 */ | 1138 | /* RB3D_COLORPITCH3 */ |
1139 | r = r100_cs_packet_next_reloc(p, &reloc); | ||
1140 | if (r) { | ||
1141 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", | ||
1142 | idx, reg); | ||
1143 | r100_cs_dump_packet(p, pkt); | ||
1144 | return r; | ||
1145 | } | ||
1146 | |||
1147 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) | ||
1148 | tile_flags |= R300_COLOR_TILE_ENABLE; | ||
1149 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) | ||
1150 | tile_flags |= R300_COLOR_MICROTILE_ENABLE; | ||
1151 | |||
1152 | tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); | ||
1153 | tmp |= tile_flags; | ||
1154 | ib[idx] = tmp; | ||
1155 | |||
1130 | i = (reg - 0x4E38) >> 2; | 1156 | i = (reg - 0x4E38) >> 2; |
1131 | track->cb[i].pitch = ib_chunk->kdata[idx] & 0x3FFE; | 1157 | track->cb[i].pitch = ib_chunk->kdata[idx] & 0x3FFE; |
1132 | switch (((ib_chunk->kdata[idx] >> 21) & 0xF)) { | 1158 | switch (((ib_chunk->kdata[idx] >> 21) & 0xF)) { |
@@ -1182,6 +1208,23 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
1182 | break; | 1208 | break; |
1183 | case 0x4F24: | 1209 | case 0x4F24: |
1184 | /* ZB_DEPTHPITCH */ | 1210 | /* ZB_DEPTHPITCH */ |
1211 | r = r100_cs_packet_next_reloc(p, &reloc); | ||
1212 | if (r) { | ||
1213 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", | ||
1214 | idx, reg); | ||
1215 | r100_cs_dump_packet(p, pkt); | ||
1216 | return r; | ||
1217 | } | ||
1218 | |||
1219 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) | ||
1220 | tile_flags |= R300_DEPTHMACROTILE_ENABLE; | ||
1221 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) | ||
1222 | tile_flags |= R300_DEPTHMICROTILE_TILED;; | ||
1223 | |||
1224 | tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); | ||
1225 | tmp |= tile_flags; | ||
1226 | ib[idx] = tmp; | ||
1227 | |||
1185 | track->zb.pitch = ib_chunk->kdata[idx] & 0x3FFC; | 1228 | track->zb.pitch = ib_chunk->kdata[idx] & 0x3FFC; |
1186 | break; | 1229 | break; |
1187 | case 0x4104: | 1230 | case 0x4104: |
@@ -1341,6 +1384,21 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
1341 | tmp = (ib_chunk->kdata[idx] >> 22) & 0xF; | 1384 | tmp = (ib_chunk->kdata[idx] >> 22) & 0xF; |
1342 | track->textures[i].txdepth = tmp; | 1385 | track->textures[i].txdepth = tmp; |
1343 | break; | 1386 | break; |
1387 | case R300_ZB_ZPASS_ADDR: | ||
1388 | r = r100_cs_packet_next_reloc(p, &reloc); | ||
1389 | if (r) { | ||
1390 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", | ||
1391 | idx, reg); | ||
1392 | r100_cs_dump_packet(p, pkt); | ||
1393 | return r; | ||
1394 | } | ||
1395 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | ||
1396 | break; | ||
1397 | case 0x4be8: | ||
1398 | /* valid register only on RV530 */ | ||
1399 | if (p->rdev->family == CHIP_RV530) | ||
1400 | break; | ||
1401 | /* fallthrough do not move */ | ||
1344 | default: | 1402 | default: |
1345 | printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", | 1403 | printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", |
1346 | reg, idx); | 1404 | reg, idx); |