diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/r100.c')
-rw-r--r-- | drivers/gpu/drm/radeon/r100.c | 188 |
1 files changed, 73 insertions, 115 deletions
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 737970b43aef..9ab976d97e91 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -863,13 +863,11 @@ int r100_cs_parse_packet0(struct radeon_cs_parser *p, | |||
863 | void r100_cs_dump_packet(struct radeon_cs_parser *p, | 863 | void r100_cs_dump_packet(struct radeon_cs_parser *p, |
864 | struct radeon_cs_packet *pkt) | 864 | struct radeon_cs_packet *pkt) |
865 | { | 865 | { |
866 | struct radeon_cs_chunk *ib_chunk; | ||
867 | volatile uint32_t *ib; | 866 | volatile uint32_t *ib; |
868 | unsigned i; | 867 | unsigned i; |
869 | unsigned idx; | 868 | unsigned idx; |
870 | 869 | ||
871 | ib = p->ib->ptr; | 870 | ib = p->ib->ptr; |
872 | ib_chunk = &p->chunks[p->chunk_ib_idx]; | ||
873 | idx = pkt->idx; | 871 | idx = pkt->idx; |
874 | for (i = 0; i <= (pkt->count + 1); i++, idx++) { | 872 | for (i = 0; i <= (pkt->count + 1); i++, idx++) { |
875 | DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]); | 873 | DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]); |
@@ -896,7 +894,7 @@ int r100_cs_packet_parse(struct radeon_cs_parser *p, | |||
896 | idx, ib_chunk->length_dw); | 894 | idx, ib_chunk->length_dw); |
897 | return -EINVAL; | 895 | return -EINVAL; |
898 | } | 896 | } |
899 | header = ib_chunk->kdata[idx]; | 897 | header = radeon_get_ib_value(p, idx); |
900 | pkt->idx = idx; | 898 | pkt->idx = idx; |
901 | pkt->type = CP_PACKET_GET_TYPE(header); | 899 | pkt->type = CP_PACKET_GET_TYPE(header); |
902 | pkt->count = CP_PACKET_GET_COUNT(header); | 900 | pkt->count = CP_PACKET_GET_COUNT(header); |
@@ -939,7 +937,6 @@ int r100_cs_packet_parse(struct radeon_cs_parser *p, | |||
939 | */ | 937 | */ |
940 | int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) | 938 | int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) |
941 | { | 939 | { |
942 | struct radeon_cs_chunk *ib_chunk; | ||
943 | struct drm_mode_object *obj; | 940 | struct drm_mode_object *obj; |
944 | struct drm_crtc *crtc; | 941 | struct drm_crtc *crtc; |
945 | struct radeon_crtc *radeon_crtc; | 942 | struct radeon_crtc *radeon_crtc; |
@@ -947,8 +944,9 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
947 | int crtc_id; | 944 | int crtc_id; |
948 | int r; | 945 | int r; |
949 | uint32_t header, h_idx, reg; | 946 | uint32_t header, h_idx, reg; |
947 | volatile uint32_t *ib; | ||
950 | 948 | ||
951 | ib_chunk = &p->chunks[p->chunk_ib_idx]; | 949 | ib = p->ib->ptr; |
952 | 950 | ||
953 | /* parse the wait until */ | 951 | /* parse the wait until */ |
954 | r = r100_cs_packet_parse(p, &waitreloc, p->idx); | 952 | r = r100_cs_packet_parse(p, &waitreloc, p->idx); |
@@ -963,7 +961,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
963 | return r; | 961 | return r; |
964 | } | 962 | } |
965 | 963 | ||
966 | if (ib_chunk->kdata[waitreloc.idx + 1] != RADEON_WAIT_CRTC_VLINE) { | 964 | if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) { |
967 | DRM_ERROR("vline wait had illegal wait until\n"); | 965 | DRM_ERROR("vline wait had illegal wait until\n"); |
968 | r = -EINVAL; | 966 | r = -EINVAL; |
969 | return r; | 967 | return r; |
@@ -978,9 +976,9 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
978 | p->idx += waitreloc.count; | 976 | p->idx += waitreloc.count; |
979 | p->idx += p3reloc.count; | 977 | p->idx += p3reloc.count; |
980 | 978 | ||
981 | header = ib_chunk->kdata[h_idx]; | 979 | header = radeon_get_ib_value(p, h_idx); |
982 | crtc_id = ib_chunk->kdata[h_idx + 5]; | 980 | crtc_id = radeon_get_ib_value(p, h_idx + 5); |
983 | reg = ib_chunk->kdata[h_idx] >> 2; | 981 | reg = header >> 2; |
984 | mutex_lock(&p->rdev->ddev->mode_config.mutex); | 982 | mutex_lock(&p->rdev->ddev->mode_config.mutex); |
985 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); | 983 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); |
986 | if (!obj) { | 984 | if (!obj) { |
@@ -994,8 +992,9 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
994 | 992 | ||
995 | if (!crtc->enabled) { | 993 | if (!crtc->enabled) { |
996 | /* if the CRTC isn't enabled - we need to nop out the wait until */ | 994 | /* if the CRTC isn't enabled - we need to nop out the wait until */ |
997 | ib_chunk->kdata[h_idx + 2] = PACKET2(0); | 995 | |
998 | ib_chunk->kdata[h_idx + 3] = PACKET2(0); | 996 | ib[h_idx + 2] = PACKET2(0); |
997 | ib[h_idx + 3] = PACKET2(0); | ||
999 | } else if (crtc_id == 1) { | 998 | } else if (crtc_id == 1) { |
1000 | switch (reg) { | 999 | switch (reg) { |
1001 | case AVIVO_D1MODE_VLINE_START_END: | 1000 | case AVIVO_D1MODE_VLINE_START_END: |
@@ -1011,8 +1010,8 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
1011 | r = -EINVAL; | 1010 | r = -EINVAL; |
1012 | goto out; | 1011 | goto out; |
1013 | } | 1012 | } |
1014 | ib_chunk->kdata[h_idx] = header; | 1013 | ib[h_idx] = header; |
1015 | ib_chunk->kdata[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1; | 1014 | ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1; |
1016 | } | 1015 | } |
1017 | out: | 1016 | out: |
1018 | mutex_unlock(&p->rdev->ddev->mode_config.mutex); | 1017 | mutex_unlock(&p->rdev->ddev->mode_config.mutex); |
@@ -1033,7 +1032,6 @@ out: | |||
1033 | int r100_cs_packet_next_reloc(struct radeon_cs_parser *p, | 1032 | int r100_cs_packet_next_reloc(struct radeon_cs_parser *p, |
1034 | struct radeon_cs_reloc **cs_reloc) | 1033 | struct radeon_cs_reloc **cs_reloc) |
1035 | { | 1034 | { |
1036 | struct radeon_cs_chunk *ib_chunk; | ||
1037 | struct radeon_cs_chunk *relocs_chunk; | 1035 | struct radeon_cs_chunk *relocs_chunk; |
1038 | struct radeon_cs_packet p3reloc; | 1036 | struct radeon_cs_packet p3reloc; |
1039 | unsigned idx; | 1037 | unsigned idx; |
@@ -1044,7 +1042,6 @@ int r100_cs_packet_next_reloc(struct radeon_cs_parser *p, | |||
1044 | return -EINVAL; | 1042 | return -EINVAL; |
1045 | } | 1043 | } |
1046 | *cs_reloc = NULL; | 1044 | *cs_reloc = NULL; |
1047 | ib_chunk = &p->chunks[p->chunk_ib_idx]; | ||
1048 | relocs_chunk = &p->chunks[p->chunk_relocs_idx]; | 1045 | relocs_chunk = &p->chunks[p->chunk_relocs_idx]; |
1049 | r = r100_cs_packet_parse(p, &p3reloc, p->idx); | 1046 | r = r100_cs_packet_parse(p, &p3reloc, p->idx); |
1050 | if (r) { | 1047 | if (r) { |
@@ -1057,7 +1054,7 @@ int r100_cs_packet_next_reloc(struct radeon_cs_parser *p, | |||
1057 | r100_cs_dump_packet(p, &p3reloc); | 1054 | r100_cs_dump_packet(p, &p3reloc); |
1058 | return -EINVAL; | 1055 | return -EINVAL; |
1059 | } | 1056 | } |
1060 | idx = ib_chunk->kdata[p3reloc.idx + 1]; | 1057 | idx = radeon_get_ib_value(p, p3reloc.idx + 1); |
1061 | if (idx >= relocs_chunk->length_dw) { | 1058 | if (idx >= relocs_chunk->length_dw) { |
1062 | DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", | 1059 | DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", |
1063 | idx, relocs_chunk->length_dw); | 1060 | idx, relocs_chunk->length_dw); |
@@ -1126,7 +1123,6 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1126 | struct radeon_cs_packet *pkt, | 1123 | struct radeon_cs_packet *pkt, |
1127 | unsigned idx, unsigned reg) | 1124 | unsigned idx, unsigned reg) |
1128 | { | 1125 | { |
1129 | struct radeon_cs_chunk *ib_chunk; | ||
1130 | struct radeon_cs_reloc *reloc; | 1126 | struct radeon_cs_reloc *reloc; |
1131 | struct r100_cs_track *track; | 1127 | struct r100_cs_track *track; |
1132 | volatile uint32_t *ib; | 1128 | volatile uint32_t *ib; |
@@ -1134,11 +1130,13 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1134 | int r; | 1130 | int r; |
1135 | int i, face; | 1131 | int i, face; |
1136 | u32 tile_flags = 0; | 1132 | u32 tile_flags = 0; |
1133 | u32 idx_value; | ||
1137 | 1134 | ||
1138 | ib = p->ib->ptr; | 1135 | ib = p->ib->ptr; |
1139 | ib_chunk = &p->chunks[p->chunk_ib_idx]; | ||
1140 | track = (struct r100_cs_track *)p->track; | 1136 | track = (struct r100_cs_track *)p->track; |
1141 | 1137 | ||
1138 | idx_value = radeon_get_ib_value(p, idx); | ||
1139 | |||
1142 | switch (reg) { | 1140 | switch (reg) { |
1143 | case RADEON_CRTC_GUI_TRIG_VLINE: | 1141 | case RADEON_CRTC_GUI_TRIG_VLINE: |
1144 | r = r100_cs_packet_parse_vline(p); | 1142 | r = r100_cs_packet_parse_vline(p); |
@@ -1166,8 +1164,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1166 | return r; | 1164 | return r; |
1167 | } | 1165 | } |
1168 | track->zb.robj = reloc->robj; | 1166 | track->zb.robj = reloc->robj; |
1169 | track->zb.offset = ib_chunk->kdata[idx]; | 1167 | track->zb.offset = idx_value; |
1170 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | 1168 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1171 | break; | 1169 | break; |
1172 | case RADEON_RB3D_COLOROFFSET: | 1170 | case RADEON_RB3D_COLOROFFSET: |
1173 | r = r100_cs_packet_next_reloc(p, &reloc); | 1171 | r = r100_cs_packet_next_reloc(p, &reloc); |
@@ -1178,8 +1176,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1178 | return r; | 1176 | return r; |
1179 | } | 1177 | } |
1180 | track->cb[0].robj = reloc->robj; | 1178 | track->cb[0].robj = reloc->robj; |
1181 | track->cb[0].offset = ib_chunk->kdata[idx]; | 1179 | track->cb[0].offset = idx_value; |
1182 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | 1180 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1183 | break; | 1181 | break; |
1184 | case RADEON_PP_TXOFFSET_0: | 1182 | case RADEON_PP_TXOFFSET_0: |
1185 | case RADEON_PP_TXOFFSET_1: | 1183 | case RADEON_PP_TXOFFSET_1: |
@@ -1192,7 +1190,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1192 | r100_cs_dump_packet(p, pkt); | 1190 | r100_cs_dump_packet(p, pkt); |
1193 | return r; | 1191 | return r; |
1194 | } | 1192 | } |
1195 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | 1193 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1196 | track->textures[i].robj = reloc->robj; | 1194 | track->textures[i].robj = reloc->robj; |
1197 | break; | 1195 | break; |
1198 | case RADEON_PP_CUBIC_OFFSET_T0_0: | 1196 | case RADEON_PP_CUBIC_OFFSET_T0_0: |
@@ -1208,8 +1206,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1208 | r100_cs_dump_packet(p, pkt); | 1206 | r100_cs_dump_packet(p, pkt); |
1209 | return r; | 1207 | return r; |
1210 | } | 1208 | } |
1211 | track->textures[0].cube_info[i].offset = ib_chunk->kdata[idx]; | 1209 | track->textures[0].cube_info[i].offset = idx_value; |
1212 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | 1210 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1213 | track->textures[0].cube_info[i].robj = reloc->robj; | 1211 | track->textures[0].cube_info[i].robj = reloc->robj; |
1214 | break; | 1212 | break; |
1215 | case RADEON_PP_CUBIC_OFFSET_T1_0: | 1213 | case RADEON_PP_CUBIC_OFFSET_T1_0: |
@@ -1225,8 +1223,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1225 | r100_cs_dump_packet(p, pkt); | 1223 | r100_cs_dump_packet(p, pkt); |
1226 | return r; | 1224 | return r; |
1227 | } | 1225 | } |
1228 | track->textures[1].cube_info[i].offset = ib_chunk->kdata[idx]; | 1226 | track->textures[1].cube_info[i].offset = idx_value; |
1229 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | 1227 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1230 | track->textures[1].cube_info[i].robj = reloc->robj; | 1228 | track->textures[1].cube_info[i].robj = reloc->robj; |
1231 | break; | 1229 | break; |
1232 | case RADEON_PP_CUBIC_OFFSET_T2_0: | 1230 | case RADEON_PP_CUBIC_OFFSET_T2_0: |
@@ -1242,12 +1240,12 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1242 | r100_cs_dump_packet(p, pkt); | 1240 | r100_cs_dump_packet(p, pkt); |
1243 | return r; | 1241 | return r; |
1244 | } | 1242 | } |
1245 | track->textures[2].cube_info[i].offset = ib_chunk->kdata[idx]; | 1243 | track->textures[2].cube_info[i].offset = idx_value; |
1246 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | 1244 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1247 | track->textures[2].cube_info[i].robj = reloc->robj; | 1245 | track->textures[2].cube_info[i].robj = reloc->robj; |
1248 | break; | 1246 | break; |
1249 | case RADEON_RE_WIDTH_HEIGHT: | 1247 | case RADEON_RE_WIDTH_HEIGHT: |
1250 | track->maxy = ((ib_chunk->kdata[idx] >> 16) & 0x7FF); | 1248 | track->maxy = ((idx_value >> 16) & 0x7FF); |
1251 | break; | 1249 | break; |
1252 | case RADEON_RB3D_COLORPITCH: | 1250 | case RADEON_RB3D_COLORPITCH: |
1253 | r = r100_cs_packet_next_reloc(p, &reloc); | 1251 | r = r100_cs_packet_next_reloc(p, &reloc); |
@@ -1263,17 +1261,17 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1263 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) | 1261 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) |
1264 | tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; | 1262 | tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; |
1265 | 1263 | ||
1266 | tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); | 1264 | tmp = idx_value & ~(0x7 << 16); |
1267 | tmp |= tile_flags; | 1265 | tmp |= tile_flags; |
1268 | ib[idx] = tmp; | 1266 | ib[idx] = tmp; |
1269 | 1267 | ||
1270 | track->cb[0].pitch = ib_chunk->kdata[idx] & RADEON_COLORPITCH_MASK; | 1268 | track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK; |
1271 | break; | 1269 | break; |
1272 | case RADEON_RB3D_DEPTHPITCH: | 1270 | case RADEON_RB3D_DEPTHPITCH: |
1273 | track->zb.pitch = ib_chunk->kdata[idx] & RADEON_DEPTHPITCH_MASK; | 1271 | track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK; |
1274 | break; | 1272 | break; |
1275 | case RADEON_RB3D_CNTL: | 1273 | case RADEON_RB3D_CNTL: |
1276 | switch ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { | 1274 | switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { |
1277 | case 7: | 1275 | case 7: |
1278 | case 8: | 1276 | case 8: |
1279 | case 9: | 1277 | case 9: |
@@ -1291,13 +1289,13 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1291 | break; | 1289 | break; |
1292 | default: | 1290 | default: |
1293 | DRM_ERROR("Invalid color buffer format (%d) !\n", | 1291 | DRM_ERROR("Invalid color buffer format (%d) !\n", |
1294 | ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f)); | 1292 | ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f)); |
1295 | return -EINVAL; | 1293 | return -EINVAL; |
1296 | } | 1294 | } |
1297 | track->z_enabled = !!(ib_chunk->kdata[idx] & RADEON_Z_ENABLE); | 1295 | track->z_enabled = !!(idx_value & RADEON_Z_ENABLE); |
1298 | break; | 1296 | break; |
1299 | case RADEON_RB3D_ZSTENCILCNTL: | 1297 | case RADEON_RB3D_ZSTENCILCNTL: |
1300 | switch (ib_chunk->kdata[idx] & 0xf) { | 1298 | switch (idx_value & 0xf) { |
1301 | case 0: | 1299 | case 0: |
1302 | track->zb.cpp = 2; | 1300 | track->zb.cpp = 2; |
1303 | break; | 1301 | break; |
@@ -1321,44 +1319,44 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1321 | r100_cs_dump_packet(p, pkt); | 1319 | r100_cs_dump_packet(p, pkt); |
1322 | return r; | 1320 | return r; |
1323 | } | 1321 | } |
1324 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | 1322 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1325 | break; | 1323 | break; |
1326 | case RADEON_PP_CNTL: | 1324 | case RADEON_PP_CNTL: |
1327 | { | 1325 | { |
1328 | uint32_t temp = ib_chunk->kdata[idx] >> 4; | 1326 | uint32_t temp = idx_value >> 4; |
1329 | for (i = 0; i < track->num_texture; i++) | 1327 | for (i = 0; i < track->num_texture; i++) |
1330 | track->textures[i].enabled = !!(temp & (1 << i)); | 1328 | track->textures[i].enabled = !!(temp & (1 << i)); |
1331 | } | 1329 | } |
1332 | break; | 1330 | break; |
1333 | case RADEON_SE_VF_CNTL: | 1331 | case RADEON_SE_VF_CNTL: |
1334 | track->vap_vf_cntl = ib_chunk->kdata[idx]; | 1332 | track->vap_vf_cntl = idx_value; |
1335 | break; | 1333 | break; |
1336 | case RADEON_SE_VTX_FMT: | 1334 | case RADEON_SE_VTX_FMT: |
1337 | track->vtx_size = r100_get_vtx_size(ib_chunk->kdata[idx]); | 1335 | track->vtx_size = r100_get_vtx_size(idx_value); |
1338 | break; | 1336 | break; |
1339 | case RADEON_PP_TEX_SIZE_0: | 1337 | case RADEON_PP_TEX_SIZE_0: |
1340 | case RADEON_PP_TEX_SIZE_1: | 1338 | case RADEON_PP_TEX_SIZE_1: |
1341 | case RADEON_PP_TEX_SIZE_2: | 1339 | case RADEON_PP_TEX_SIZE_2: |
1342 | i = (reg - RADEON_PP_TEX_SIZE_0) / 8; | 1340 | i = (reg - RADEON_PP_TEX_SIZE_0) / 8; |
1343 | track->textures[i].width = (ib_chunk->kdata[idx] & RADEON_TEX_USIZE_MASK) + 1; | 1341 | track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1; |
1344 | track->textures[i].height = ((ib_chunk->kdata[idx] & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; | 1342 | track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; |
1345 | break; | 1343 | break; |
1346 | case RADEON_PP_TEX_PITCH_0: | 1344 | case RADEON_PP_TEX_PITCH_0: |
1347 | case RADEON_PP_TEX_PITCH_1: | 1345 | case RADEON_PP_TEX_PITCH_1: |
1348 | case RADEON_PP_TEX_PITCH_2: | 1346 | case RADEON_PP_TEX_PITCH_2: |
1349 | i = (reg - RADEON_PP_TEX_PITCH_0) / 8; | 1347 | i = (reg - RADEON_PP_TEX_PITCH_0) / 8; |
1350 | track->textures[i].pitch = ib_chunk->kdata[idx] + 32; | 1348 | track->textures[i].pitch = idx_value + 32; |
1351 | break; | 1349 | break; |
1352 | case RADEON_PP_TXFILTER_0: | 1350 | case RADEON_PP_TXFILTER_0: |
1353 | case RADEON_PP_TXFILTER_1: | 1351 | case RADEON_PP_TXFILTER_1: |
1354 | case RADEON_PP_TXFILTER_2: | 1352 | case RADEON_PP_TXFILTER_2: |
1355 | i = (reg - RADEON_PP_TXFILTER_0) / 24; | 1353 | i = (reg - RADEON_PP_TXFILTER_0) / 24; |
1356 | track->textures[i].num_levels = ((ib_chunk->kdata[idx] & RADEON_MAX_MIP_LEVEL_MASK) | 1354 | track->textures[i].num_levels = ((idx_value & RADEON_MAX_MIP_LEVEL_MASK) |
1357 | >> RADEON_MAX_MIP_LEVEL_SHIFT); | 1355 | >> RADEON_MAX_MIP_LEVEL_SHIFT); |
1358 | tmp = (ib_chunk->kdata[idx] >> 23) & 0x7; | 1356 | tmp = (idx_value >> 23) & 0x7; |
1359 | if (tmp == 2 || tmp == 6) | 1357 | if (tmp == 2 || tmp == 6) |
1360 | track->textures[i].roundup_w = false; | 1358 | track->textures[i].roundup_w = false; |
1361 | tmp = (ib_chunk->kdata[idx] >> 27) & 0x7; | 1359 | tmp = (idx_value >> 27) & 0x7; |
1362 | if (tmp == 2 || tmp == 6) | 1360 | if (tmp == 2 || tmp == 6) |
1363 | track->textures[i].roundup_h = false; | 1361 | track->textures[i].roundup_h = false; |
1364 | break; | 1362 | break; |
@@ -1366,16 +1364,16 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1366 | case RADEON_PP_TXFORMAT_1: | 1364 | case RADEON_PP_TXFORMAT_1: |
1367 | case RADEON_PP_TXFORMAT_2: | 1365 | case RADEON_PP_TXFORMAT_2: |
1368 | i = (reg - RADEON_PP_TXFORMAT_0) / 24; | 1366 | i = (reg - RADEON_PP_TXFORMAT_0) / 24; |
1369 | if (ib_chunk->kdata[idx] & RADEON_TXFORMAT_NON_POWER2) { | 1367 | if (idx_value & RADEON_TXFORMAT_NON_POWER2) { |
1370 | track->textures[i].use_pitch = 1; | 1368 | track->textures[i].use_pitch = 1; |
1371 | } else { | 1369 | } else { |
1372 | track->textures[i].use_pitch = 0; | 1370 | track->textures[i].use_pitch = 0; |
1373 | track->textures[i].width = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK); | 1371 | track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK); |
1374 | track->textures[i].height = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK); | 1372 | track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK); |
1375 | } | 1373 | } |
1376 | if (ib_chunk->kdata[idx] & RADEON_TXFORMAT_CUBIC_MAP_ENABLE) | 1374 | if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE) |
1377 | track->textures[i].tex_coord_type = 2; | 1375 | track->textures[i].tex_coord_type = 2; |
1378 | switch ((ib_chunk->kdata[idx] & RADEON_TXFORMAT_FORMAT_MASK)) { | 1376 | switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) { |
1379 | case RADEON_TXFORMAT_I8: | 1377 | case RADEON_TXFORMAT_I8: |
1380 | case RADEON_TXFORMAT_RGB332: | 1378 | case RADEON_TXFORMAT_RGB332: |
1381 | case RADEON_TXFORMAT_Y8: | 1379 | case RADEON_TXFORMAT_Y8: |
@@ -1402,13 +1400,13 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1402 | track->textures[i].cpp = 4; | 1400 | track->textures[i].cpp = 4; |
1403 | break; | 1401 | break; |
1404 | } | 1402 | } |
1405 | track->textures[i].cube_info[4].width = 1 << ((ib_chunk->kdata[idx] >> 16) & 0xf); | 1403 | track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); |
1406 | track->textures[i].cube_info[4].height = 1 << ((ib_chunk->kdata[idx] >> 20) & 0xf); | 1404 | track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf); |
1407 | break; | 1405 | break; |
1408 | case RADEON_PP_CUBIC_FACES_0: | 1406 | case RADEON_PP_CUBIC_FACES_0: |
1409 | case RADEON_PP_CUBIC_FACES_1: | 1407 | case RADEON_PP_CUBIC_FACES_1: |
1410 | case RADEON_PP_CUBIC_FACES_2: | 1408 | case RADEON_PP_CUBIC_FACES_2: |
1411 | tmp = ib_chunk->kdata[idx]; | 1409 | tmp = idx_value; |
1412 | i = (reg - RADEON_PP_CUBIC_FACES_0) / 4; | 1410 | i = (reg - RADEON_PP_CUBIC_FACES_0) / 4; |
1413 | for (face = 0; face < 4; face++) { | 1411 | for (face = 0; face < 4; face++) { |
1414 | track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); | 1412 | track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); |
@@ -1427,15 +1425,14 @@ int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, | |||
1427 | struct radeon_cs_packet *pkt, | 1425 | struct radeon_cs_packet *pkt, |
1428 | struct radeon_object *robj) | 1426 | struct radeon_object *robj) |
1429 | { | 1427 | { |
1430 | struct radeon_cs_chunk *ib_chunk; | ||
1431 | unsigned idx; | 1428 | unsigned idx; |
1432 | 1429 | u32 value; | |
1433 | ib_chunk = &p->chunks[p->chunk_ib_idx]; | ||
1434 | idx = pkt->idx + 1; | 1430 | idx = pkt->idx + 1; |
1435 | if ((ib_chunk->kdata[idx+2] + 1) > radeon_object_size(robj)) { | 1431 | value = radeon_get_ib_value(p, idx + 2); |
1432 | if ((value + 1) > radeon_object_size(robj)) { | ||
1436 | DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER " | 1433 | DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER " |
1437 | "(need %u have %lu) !\n", | 1434 | "(need %u have %lu) !\n", |
1438 | ib_chunk->kdata[idx+2] + 1, | 1435 | value + 1, |
1439 | radeon_object_size(robj)); | 1436 | radeon_object_size(robj)); |
1440 | return -EINVAL; | 1437 | return -EINVAL; |
1441 | } | 1438 | } |
@@ -1445,59 +1442,20 @@ int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, | |||
1445 | static int r100_packet3_check(struct radeon_cs_parser *p, | 1442 | static int r100_packet3_check(struct radeon_cs_parser *p, |
1446 | struct radeon_cs_packet *pkt) | 1443 | struct radeon_cs_packet *pkt) |
1447 | { | 1444 | { |
1448 | struct radeon_cs_chunk *ib_chunk; | ||
1449 | struct radeon_cs_reloc *reloc; | 1445 | struct radeon_cs_reloc *reloc; |
1450 | struct r100_cs_track *track; | 1446 | struct r100_cs_track *track; |
1451 | unsigned idx; | 1447 | unsigned idx; |
1452 | unsigned i, c; | ||
1453 | volatile uint32_t *ib; | 1448 | volatile uint32_t *ib; |
1454 | int r; | 1449 | int r; |
1455 | 1450 | ||
1456 | ib = p->ib->ptr; | 1451 | ib = p->ib->ptr; |
1457 | ib_chunk = &p->chunks[p->chunk_ib_idx]; | ||
1458 | idx = pkt->idx + 1; | 1452 | idx = pkt->idx + 1; |
1459 | track = (struct r100_cs_track *)p->track; | 1453 | track = (struct r100_cs_track *)p->track; |
1460 | switch (pkt->opcode) { | 1454 | switch (pkt->opcode) { |
1461 | case PACKET3_3D_LOAD_VBPNTR: | 1455 | case PACKET3_3D_LOAD_VBPNTR: |
1462 | c = ib_chunk->kdata[idx++]; | 1456 | r = r100_packet3_load_vbpntr(p, pkt, idx); |
1463 | track->num_arrays = c; | 1457 | if (r) |
1464 | for (i = 0; i < (c - 1); i += 2, idx += 3) { | 1458 | return r; |
1465 | r = r100_cs_packet_next_reloc(p, &reloc); | ||
1466 | if (r) { | ||
1467 | DRM_ERROR("No reloc for packet3 %d\n", | ||
1468 | pkt->opcode); | ||
1469 | r100_cs_dump_packet(p, pkt); | ||
1470 | return r; | ||
1471 | } | ||
1472 | ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset); | ||
1473 | track->arrays[i + 0].robj = reloc->robj; | ||
1474 | track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8; | ||
1475 | track->arrays[i + 0].esize &= 0x7F; | ||
1476 | r = r100_cs_packet_next_reloc(p, &reloc); | ||
1477 | if (r) { | ||
1478 | DRM_ERROR("No reloc for packet3 %d\n", | ||
1479 | pkt->opcode); | ||
1480 | r100_cs_dump_packet(p, pkt); | ||
1481 | return r; | ||
1482 | } | ||
1483 | ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset); | ||
1484 | track->arrays[i + 1].robj = reloc->robj; | ||
1485 | track->arrays[i + 1].esize = ib_chunk->kdata[idx] >> 24; | ||
1486 | track->arrays[i + 1].esize &= 0x7F; | ||
1487 | } | ||
1488 | if (c & 1) { | ||
1489 | r = r100_cs_packet_next_reloc(p, &reloc); | ||
1490 | if (r) { | ||
1491 | DRM_ERROR("No reloc for packet3 %d\n", | ||
1492 | pkt->opcode); | ||
1493 | r100_cs_dump_packet(p, pkt); | ||
1494 | return r; | ||
1495 | } | ||
1496 | ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset); | ||
1497 | track->arrays[i + 0].robj = reloc->robj; | ||
1498 | track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8; | ||
1499 | track->arrays[i + 0].esize &= 0x7F; | ||
1500 | } | ||
1501 | break; | 1459 | break; |
1502 | case PACKET3_INDX_BUFFER: | 1460 | case PACKET3_INDX_BUFFER: |
1503 | r = r100_cs_packet_next_reloc(p, &reloc); | 1461 | r = r100_cs_packet_next_reloc(p, &reloc); |
@@ -1506,7 +1464,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p, | |||
1506 | r100_cs_dump_packet(p, pkt); | 1464 | r100_cs_dump_packet(p, pkt); |
1507 | return r; | 1465 | return r; |
1508 | } | 1466 | } |
1509 | ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset); | 1467 | ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->lobj.gpu_offset); |
1510 | r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); | 1468 | r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); |
1511 | if (r) { | 1469 | if (r) { |
1512 | return r; | 1470 | return r; |
@@ -1520,27 +1478,27 @@ static int r100_packet3_check(struct radeon_cs_parser *p, | |||
1520 | r100_cs_dump_packet(p, pkt); | 1478 | r100_cs_dump_packet(p, pkt); |
1521 | return r; | 1479 | return r; |
1522 | } | 1480 | } |
1523 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | 1481 | ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->lobj.gpu_offset); |
1524 | track->num_arrays = 1; | 1482 | track->num_arrays = 1; |
1525 | track->vtx_size = r100_get_vtx_size(ib_chunk->kdata[idx+2]); | 1483 | track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2)); |
1526 | 1484 | ||
1527 | track->arrays[0].robj = reloc->robj; | 1485 | track->arrays[0].robj = reloc->robj; |
1528 | track->arrays[0].esize = track->vtx_size; | 1486 | track->arrays[0].esize = track->vtx_size; |
1529 | 1487 | ||
1530 | track->max_indx = ib_chunk->kdata[idx+1]; | 1488 | track->max_indx = radeon_get_ib_value(p, idx+1); |
1531 | 1489 | ||
1532 | track->vap_vf_cntl = ib_chunk->kdata[idx+3]; | 1490 | track->vap_vf_cntl = radeon_get_ib_value(p, idx+3); |
1533 | track->immd_dwords = pkt->count - 1; | 1491 | track->immd_dwords = pkt->count - 1; |
1534 | r = r100_cs_track_check(p->rdev, track); | 1492 | r = r100_cs_track_check(p->rdev, track); |
1535 | if (r) | 1493 | if (r) |
1536 | return r; | 1494 | return r; |
1537 | break; | 1495 | break; |
1538 | case PACKET3_3D_DRAW_IMMD: | 1496 | case PACKET3_3D_DRAW_IMMD: |
1539 | if (((ib_chunk->kdata[idx+1] >> 4) & 0x3) != 3) { | 1497 | if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) { |
1540 | DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); | 1498 | DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); |
1541 | return -EINVAL; | 1499 | return -EINVAL; |
1542 | } | 1500 | } |
1543 | track->vap_vf_cntl = ib_chunk->kdata[idx+1]; | 1501 | track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); |
1544 | track->immd_dwords = pkt->count - 1; | 1502 | track->immd_dwords = pkt->count - 1; |
1545 | r = r100_cs_track_check(p->rdev, track); | 1503 | r = r100_cs_track_check(p->rdev, track); |
1546 | if (r) | 1504 | if (r) |
@@ -1548,11 +1506,11 @@ static int r100_packet3_check(struct radeon_cs_parser *p, | |||
1548 | break; | 1506 | break; |
1549 | /* triggers drawing using in-packet vertex data */ | 1507 | /* triggers drawing using in-packet vertex data */ |
1550 | case PACKET3_3D_DRAW_IMMD_2: | 1508 | case PACKET3_3D_DRAW_IMMD_2: |
1551 | if (((ib_chunk->kdata[idx] >> 4) & 0x3) != 3) { | 1509 | if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) { |
1552 | DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); | 1510 | DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); |
1553 | return -EINVAL; | 1511 | return -EINVAL; |
1554 | } | 1512 | } |
1555 | track->vap_vf_cntl = ib_chunk->kdata[idx]; | 1513 | track->vap_vf_cntl = radeon_get_ib_value(p, idx); |
1556 | track->immd_dwords = pkt->count; | 1514 | track->immd_dwords = pkt->count; |
1557 | r = r100_cs_track_check(p->rdev, track); | 1515 | r = r100_cs_track_check(p->rdev, track); |
1558 | if (r) | 1516 | if (r) |
@@ -1560,28 +1518,28 @@ static int r100_packet3_check(struct radeon_cs_parser *p, | |||
1560 | break; | 1518 | break; |
1561 | /* triggers drawing using in-packet vertex data */ | 1519 | /* triggers drawing using in-packet vertex data */ |
1562 | case PACKET3_3D_DRAW_VBUF_2: | 1520 | case PACKET3_3D_DRAW_VBUF_2: |
1563 | track->vap_vf_cntl = ib_chunk->kdata[idx]; | 1521 | track->vap_vf_cntl = radeon_get_ib_value(p, idx); |
1564 | r = r100_cs_track_check(p->rdev, track); | 1522 | r = r100_cs_track_check(p->rdev, track); |
1565 | if (r) | 1523 | if (r) |
1566 | return r; | 1524 | return r; |
1567 | break; | 1525 | break; |
1568 | /* triggers drawing of vertex buffers setup elsewhere */ | 1526 | /* triggers drawing of vertex buffers setup elsewhere */ |
1569 | case PACKET3_3D_DRAW_INDX_2: | 1527 | case PACKET3_3D_DRAW_INDX_2: |
1570 | track->vap_vf_cntl = ib_chunk->kdata[idx]; | 1528 | track->vap_vf_cntl = radeon_get_ib_value(p, idx); |
1571 | r = r100_cs_track_check(p->rdev, track); | 1529 | r = r100_cs_track_check(p->rdev, track); |
1572 | if (r) | 1530 | if (r) |
1573 | return r; | 1531 | return r; |
1574 | break; | 1532 | break; |
1575 | /* triggers drawing using indices to vertex buffer */ | 1533 | /* triggers drawing using indices to vertex buffer */ |
1576 | case PACKET3_3D_DRAW_VBUF: | 1534 | case PACKET3_3D_DRAW_VBUF: |
1577 | track->vap_vf_cntl = ib_chunk->kdata[idx + 1]; | 1535 | track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); |
1578 | r = r100_cs_track_check(p->rdev, track); | 1536 | r = r100_cs_track_check(p->rdev, track); |
1579 | if (r) | 1537 | if (r) |
1580 | return r; | 1538 | return r; |
1581 | break; | 1539 | break; |
1582 | /* triggers drawing of vertex buffers setup elsewhere */ | 1540 | /* triggers drawing of vertex buffers setup elsewhere */ |
1583 | case PACKET3_3D_DRAW_INDX: | 1541 | case PACKET3_3D_DRAW_INDX: |
1584 | track->vap_vf_cntl = ib_chunk->kdata[idx + 1]; | 1542 | track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); |
1585 | r = r100_cs_track_check(p->rdev, track); | 1543 | r = r100_cs_track_check(p->rdev, track); |
1586 | if (r) | 1544 | if (r) |
1587 | return r; | 1545 | return r; |