aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/r100.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/radeon/r100.c')
-rw-r--r--drivers/gpu/drm/radeon/r100.c197
1 files changed, 77 insertions, 120 deletions
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index be51c5f7d0f6..e6cce24de802 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -863,13 +863,11 @@ int r100_cs_parse_packet0(struct radeon_cs_parser *p,
863void r100_cs_dump_packet(struct radeon_cs_parser *p, 863void r100_cs_dump_packet(struct radeon_cs_parser *p,
864 struct radeon_cs_packet *pkt) 864 struct radeon_cs_packet *pkt)
865{ 865{
866 struct radeon_cs_chunk *ib_chunk;
867 volatile uint32_t *ib; 866 volatile uint32_t *ib;
868 unsigned i; 867 unsigned i;
869 unsigned idx; 868 unsigned idx;
870 869
871 ib = p->ib->ptr; 870 ib = p->ib->ptr;
872 ib_chunk = &p->chunks[p->chunk_ib_idx];
873 idx = pkt->idx; 871 idx = pkt->idx;
874 for (i = 0; i <= (pkt->count + 1); i++, idx++) { 872 for (i = 0; i <= (pkt->count + 1); i++, idx++) {
875 DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]); 873 DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
@@ -896,7 +894,7 @@ int r100_cs_packet_parse(struct radeon_cs_parser *p,
896 idx, ib_chunk->length_dw); 894 idx, ib_chunk->length_dw);
897 return -EINVAL; 895 return -EINVAL;
898 } 896 }
899 header = ib_chunk->kdata[idx]; 897 header = radeon_get_ib_value(p, idx);
900 pkt->idx = idx; 898 pkt->idx = idx;
901 pkt->type = CP_PACKET_GET_TYPE(header); 899 pkt->type = CP_PACKET_GET_TYPE(header);
902 pkt->count = CP_PACKET_GET_COUNT(header); 900 pkt->count = CP_PACKET_GET_COUNT(header);
@@ -939,7 +937,6 @@ int r100_cs_packet_parse(struct radeon_cs_parser *p,
939 */ 937 */
940int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) 938int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
941{ 939{
942 struct radeon_cs_chunk *ib_chunk;
943 struct drm_mode_object *obj; 940 struct drm_mode_object *obj;
944 struct drm_crtc *crtc; 941 struct drm_crtc *crtc;
945 struct radeon_crtc *radeon_crtc; 942 struct radeon_crtc *radeon_crtc;
@@ -947,8 +944,9 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
947 int crtc_id; 944 int crtc_id;
948 int r; 945 int r;
949 uint32_t header, h_idx, reg; 946 uint32_t header, h_idx, reg;
947 volatile uint32_t *ib;
950 948
951 ib_chunk = &p->chunks[p->chunk_ib_idx]; 949 ib = p->ib->ptr;
952 950
953 /* parse the wait until */ 951 /* parse the wait until */
954 r = r100_cs_packet_parse(p, &waitreloc, p->idx); 952 r = r100_cs_packet_parse(p, &waitreloc, p->idx);
@@ -963,24 +961,24 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
963 return r; 961 return r;
964 } 962 }
965 963
966 if (ib_chunk->kdata[waitreloc.idx + 1] != RADEON_WAIT_CRTC_VLINE) { 964 if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) {
967 DRM_ERROR("vline wait had illegal wait until\n"); 965 DRM_ERROR("vline wait had illegal wait until\n");
968 r = -EINVAL; 966 r = -EINVAL;
969 return r; 967 return r;
970 } 968 }
971 969
972 /* jump over the NOP */ 970 /* jump over the NOP */
973 r = r100_cs_packet_parse(p, &p3reloc, p->idx); 971 r = r100_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2);
974 if (r) 972 if (r)
975 return r; 973 return r;
976 974
977 h_idx = p->idx - 2; 975 h_idx = p->idx - 2;
978 p->idx += waitreloc.count; 976 p->idx += waitreloc.count + 2;
979 p->idx += p3reloc.count; 977 p->idx += p3reloc.count + 2;
980 978
981 header = ib_chunk->kdata[h_idx]; 979 header = radeon_get_ib_value(p, h_idx);
982 crtc_id = ib_chunk->kdata[h_idx + 5]; 980 crtc_id = radeon_get_ib_value(p, h_idx + 5);
983 reg = ib_chunk->kdata[h_idx] >> 2; 981 reg = header >> 2;
984 mutex_lock(&p->rdev->ddev->mode_config.mutex); 982 mutex_lock(&p->rdev->ddev->mode_config.mutex);
985 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); 983 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
986 if (!obj) { 984 if (!obj) {
@@ -994,16 +992,16 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
994 992
995 if (!crtc->enabled) { 993 if (!crtc->enabled) {
996 /* if the CRTC isn't enabled - we need to nop out the wait until */ 994 /* if the CRTC isn't enabled - we need to nop out the wait until */
997 ib_chunk->kdata[h_idx + 2] = PACKET2(0); 995 ib[h_idx + 2] = PACKET2(0);
998 ib_chunk->kdata[h_idx + 3] = PACKET2(0); 996 ib[h_idx + 3] = PACKET2(0);
999 } else if (crtc_id == 1) { 997 } else if (crtc_id == 1) {
1000 switch (reg) { 998 switch (reg) {
1001 case AVIVO_D1MODE_VLINE_START_END: 999 case AVIVO_D1MODE_VLINE_START_END:
1002 header &= R300_CP_PACKET0_REG_MASK; 1000 header &= ~R300_CP_PACKET0_REG_MASK;
1003 header |= AVIVO_D2MODE_VLINE_START_END >> 2; 1001 header |= AVIVO_D2MODE_VLINE_START_END >> 2;
1004 break; 1002 break;
1005 case RADEON_CRTC_GUI_TRIG_VLINE: 1003 case RADEON_CRTC_GUI_TRIG_VLINE:
1006 header &= R300_CP_PACKET0_REG_MASK; 1004 header &= ~R300_CP_PACKET0_REG_MASK;
1007 header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2; 1005 header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2;
1008 break; 1006 break;
1009 default: 1007 default:
@@ -1011,8 +1009,8 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
1011 r = -EINVAL; 1009 r = -EINVAL;
1012 goto out; 1010 goto out;
1013 } 1011 }
1014 ib_chunk->kdata[h_idx] = header; 1012 ib[h_idx] = header;
1015 ib_chunk->kdata[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1; 1013 ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1;
1016 } 1014 }
1017out: 1015out:
1018 mutex_unlock(&p->rdev->ddev->mode_config.mutex); 1016 mutex_unlock(&p->rdev->ddev->mode_config.mutex);
@@ -1033,7 +1031,6 @@ out:
1033int r100_cs_packet_next_reloc(struct radeon_cs_parser *p, 1031int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
1034 struct radeon_cs_reloc **cs_reloc) 1032 struct radeon_cs_reloc **cs_reloc)
1035{ 1033{
1036 struct radeon_cs_chunk *ib_chunk;
1037 struct radeon_cs_chunk *relocs_chunk; 1034 struct radeon_cs_chunk *relocs_chunk;
1038 struct radeon_cs_packet p3reloc; 1035 struct radeon_cs_packet p3reloc;
1039 unsigned idx; 1036 unsigned idx;
@@ -1044,7 +1041,6 @@ int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
1044 return -EINVAL; 1041 return -EINVAL;
1045 } 1042 }
1046 *cs_reloc = NULL; 1043 *cs_reloc = NULL;
1047 ib_chunk = &p->chunks[p->chunk_ib_idx];
1048 relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 1044 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
1049 r = r100_cs_packet_parse(p, &p3reloc, p->idx); 1045 r = r100_cs_packet_parse(p, &p3reloc, p->idx);
1050 if (r) { 1046 if (r) {
@@ -1057,7 +1053,7 @@ int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
1057 r100_cs_dump_packet(p, &p3reloc); 1053 r100_cs_dump_packet(p, &p3reloc);
1058 return -EINVAL; 1054 return -EINVAL;
1059 } 1055 }
1060 idx = ib_chunk->kdata[p3reloc.idx + 1]; 1056 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
1061 if (idx >= relocs_chunk->length_dw) { 1057 if (idx >= relocs_chunk->length_dw) {
1062 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", 1058 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
1063 idx, relocs_chunk->length_dw); 1059 idx, relocs_chunk->length_dw);
@@ -1126,7 +1122,6 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1126 struct radeon_cs_packet *pkt, 1122 struct radeon_cs_packet *pkt,
1127 unsigned idx, unsigned reg) 1123 unsigned idx, unsigned reg)
1128{ 1124{
1129 struct radeon_cs_chunk *ib_chunk;
1130 struct radeon_cs_reloc *reloc; 1125 struct radeon_cs_reloc *reloc;
1131 struct r100_cs_track *track; 1126 struct r100_cs_track *track;
1132 volatile uint32_t *ib; 1127 volatile uint32_t *ib;
@@ -1134,11 +1129,13 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1134 int r; 1129 int r;
1135 int i, face; 1130 int i, face;
1136 u32 tile_flags = 0; 1131 u32 tile_flags = 0;
1132 u32 idx_value;
1137 1133
1138 ib = p->ib->ptr; 1134 ib = p->ib->ptr;
1139 ib_chunk = &p->chunks[p->chunk_ib_idx];
1140 track = (struct r100_cs_track *)p->track; 1135 track = (struct r100_cs_track *)p->track;
1141 1136
1137 idx_value = radeon_get_ib_value(p, idx);
1138
1142 switch (reg) { 1139 switch (reg) {
1143 case RADEON_CRTC_GUI_TRIG_VLINE: 1140 case RADEON_CRTC_GUI_TRIG_VLINE:
1144 r = r100_cs_packet_parse_vline(p); 1141 r = r100_cs_packet_parse_vline(p);
@@ -1166,8 +1163,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1166 return r; 1163 return r;
1167 } 1164 }
1168 track->zb.robj = reloc->robj; 1165 track->zb.robj = reloc->robj;
1169 track->zb.offset = ib_chunk->kdata[idx]; 1166 track->zb.offset = idx_value;
1170 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 1167 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1171 break; 1168 break;
1172 case RADEON_RB3D_COLOROFFSET: 1169 case RADEON_RB3D_COLOROFFSET:
1173 r = r100_cs_packet_next_reloc(p, &reloc); 1170 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1178,8 +1175,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1178 return r; 1175 return r;
1179 } 1176 }
1180 track->cb[0].robj = reloc->robj; 1177 track->cb[0].robj = reloc->robj;
1181 track->cb[0].offset = ib_chunk->kdata[idx]; 1178 track->cb[0].offset = idx_value;
1182 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 1179 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1183 break; 1180 break;
1184 case RADEON_PP_TXOFFSET_0: 1181 case RADEON_PP_TXOFFSET_0:
1185 case RADEON_PP_TXOFFSET_1: 1182 case RADEON_PP_TXOFFSET_1:
@@ -1192,7 +1189,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1192 r100_cs_dump_packet(p, pkt); 1189 r100_cs_dump_packet(p, pkt);
1193 return r; 1190 return r;
1194 } 1191 }
1195 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 1192 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1196 track->textures[i].robj = reloc->robj; 1193 track->textures[i].robj = reloc->robj;
1197 break; 1194 break;
1198 case RADEON_PP_CUBIC_OFFSET_T0_0: 1195 case RADEON_PP_CUBIC_OFFSET_T0_0:
@@ -1208,8 +1205,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1208 r100_cs_dump_packet(p, pkt); 1205 r100_cs_dump_packet(p, pkt);
1209 return r; 1206 return r;
1210 } 1207 }
1211 track->textures[0].cube_info[i].offset = ib_chunk->kdata[idx]; 1208 track->textures[0].cube_info[i].offset = idx_value;
1212 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 1209 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1213 track->textures[0].cube_info[i].robj = reloc->robj; 1210 track->textures[0].cube_info[i].robj = reloc->robj;
1214 break; 1211 break;
1215 case RADEON_PP_CUBIC_OFFSET_T1_0: 1212 case RADEON_PP_CUBIC_OFFSET_T1_0:
@@ -1225,8 +1222,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1225 r100_cs_dump_packet(p, pkt); 1222 r100_cs_dump_packet(p, pkt);
1226 return r; 1223 return r;
1227 } 1224 }
1228 track->textures[1].cube_info[i].offset = ib_chunk->kdata[idx]; 1225 track->textures[1].cube_info[i].offset = idx_value;
1229 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 1226 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1230 track->textures[1].cube_info[i].robj = reloc->robj; 1227 track->textures[1].cube_info[i].robj = reloc->robj;
1231 break; 1228 break;
1232 case RADEON_PP_CUBIC_OFFSET_T2_0: 1229 case RADEON_PP_CUBIC_OFFSET_T2_0:
@@ -1242,12 +1239,12 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1242 r100_cs_dump_packet(p, pkt); 1239 r100_cs_dump_packet(p, pkt);
1243 return r; 1240 return r;
1244 } 1241 }
1245 track->textures[2].cube_info[i].offset = ib_chunk->kdata[idx]; 1242 track->textures[2].cube_info[i].offset = idx_value;
1246 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 1243 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1247 track->textures[2].cube_info[i].robj = reloc->robj; 1244 track->textures[2].cube_info[i].robj = reloc->robj;
1248 break; 1245 break;
1249 case RADEON_RE_WIDTH_HEIGHT: 1246 case RADEON_RE_WIDTH_HEIGHT:
1250 track->maxy = ((ib_chunk->kdata[idx] >> 16) & 0x7FF); 1247 track->maxy = ((idx_value >> 16) & 0x7FF);
1251 break; 1248 break;
1252 case RADEON_RB3D_COLORPITCH: 1249 case RADEON_RB3D_COLORPITCH:
1253 r = r100_cs_packet_next_reloc(p, &reloc); 1250 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1263,17 +1260,17 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1263 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 1260 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1264 tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; 1261 tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
1265 1262
1266 tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); 1263 tmp = idx_value & ~(0x7 << 16);
1267 tmp |= tile_flags; 1264 tmp |= tile_flags;
1268 ib[idx] = tmp; 1265 ib[idx] = tmp;
1269 1266
1270 track->cb[0].pitch = ib_chunk->kdata[idx] & RADEON_COLORPITCH_MASK; 1267 track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
1271 break; 1268 break;
1272 case RADEON_RB3D_DEPTHPITCH: 1269 case RADEON_RB3D_DEPTHPITCH:
1273 track->zb.pitch = ib_chunk->kdata[idx] & RADEON_DEPTHPITCH_MASK; 1270 track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
1274 break; 1271 break;
1275 case RADEON_RB3D_CNTL: 1272 case RADEON_RB3D_CNTL:
1276 switch ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { 1273 switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
1277 case 7: 1274 case 7:
1278 case 8: 1275 case 8:
1279 case 9: 1276 case 9:
@@ -1291,13 +1288,13 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1291 break; 1288 break;
1292 default: 1289 default:
1293 DRM_ERROR("Invalid color buffer format (%d) !\n", 1290 DRM_ERROR("Invalid color buffer format (%d) !\n",
1294 ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f)); 1291 ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
1295 return -EINVAL; 1292 return -EINVAL;
1296 } 1293 }
1297 track->z_enabled = !!(ib_chunk->kdata[idx] & RADEON_Z_ENABLE); 1294 track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
1298 break; 1295 break;
1299 case RADEON_RB3D_ZSTENCILCNTL: 1296 case RADEON_RB3D_ZSTENCILCNTL:
1300 switch (ib_chunk->kdata[idx] & 0xf) { 1297 switch (idx_value & 0xf) {
1301 case 0: 1298 case 0:
1302 track->zb.cpp = 2; 1299 track->zb.cpp = 2;
1303 break; 1300 break;
@@ -1321,44 +1318,44 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1321 r100_cs_dump_packet(p, pkt); 1318 r100_cs_dump_packet(p, pkt);
1322 return r; 1319 return r;
1323 } 1320 }
1324 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 1321 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1325 break; 1322 break;
1326 case RADEON_PP_CNTL: 1323 case RADEON_PP_CNTL:
1327 { 1324 {
1328 uint32_t temp = ib_chunk->kdata[idx] >> 4; 1325 uint32_t temp = idx_value >> 4;
1329 for (i = 0; i < track->num_texture; i++) 1326 for (i = 0; i < track->num_texture; i++)
1330 track->textures[i].enabled = !!(temp & (1 << i)); 1327 track->textures[i].enabled = !!(temp & (1 << i));
1331 } 1328 }
1332 break; 1329 break;
1333 case RADEON_SE_VF_CNTL: 1330 case RADEON_SE_VF_CNTL:
1334 track->vap_vf_cntl = ib_chunk->kdata[idx]; 1331 track->vap_vf_cntl = idx_value;
1335 break; 1332 break;
1336 case RADEON_SE_VTX_FMT: 1333 case RADEON_SE_VTX_FMT:
1337 track->vtx_size = r100_get_vtx_size(ib_chunk->kdata[idx]); 1334 track->vtx_size = r100_get_vtx_size(idx_value);
1338 break; 1335 break;
1339 case RADEON_PP_TEX_SIZE_0: 1336 case RADEON_PP_TEX_SIZE_0:
1340 case RADEON_PP_TEX_SIZE_1: 1337 case RADEON_PP_TEX_SIZE_1:
1341 case RADEON_PP_TEX_SIZE_2: 1338 case RADEON_PP_TEX_SIZE_2:
1342 i = (reg - RADEON_PP_TEX_SIZE_0) / 8; 1339 i = (reg - RADEON_PP_TEX_SIZE_0) / 8;
1343 track->textures[i].width = (ib_chunk->kdata[idx] & RADEON_TEX_USIZE_MASK) + 1; 1340 track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
1344 track->textures[i].height = ((ib_chunk->kdata[idx] & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; 1341 track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
1345 break; 1342 break;
1346 case RADEON_PP_TEX_PITCH_0: 1343 case RADEON_PP_TEX_PITCH_0:
1347 case RADEON_PP_TEX_PITCH_1: 1344 case RADEON_PP_TEX_PITCH_1:
1348 case RADEON_PP_TEX_PITCH_2: 1345 case RADEON_PP_TEX_PITCH_2:
1349 i = (reg - RADEON_PP_TEX_PITCH_0) / 8; 1346 i = (reg - RADEON_PP_TEX_PITCH_0) / 8;
1350 track->textures[i].pitch = ib_chunk->kdata[idx] + 32; 1347 track->textures[i].pitch = idx_value + 32;
1351 break; 1348 break;
1352 case RADEON_PP_TXFILTER_0: 1349 case RADEON_PP_TXFILTER_0:
1353 case RADEON_PP_TXFILTER_1: 1350 case RADEON_PP_TXFILTER_1:
1354 case RADEON_PP_TXFILTER_2: 1351 case RADEON_PP_TXFILTER_2:
1355 i = (reg - RADEON_PP_TXFILTER_0) / 24; 1352 i = (reg - RADEON_PP_TXFILTER_0) / 24;
1356 track->textures[i].num_levels = ((ib_chunk->kdata[idx] & RADEON_MAX_MIP_LEVEL_MASK) 1353 track->textures[i].num_levels = ((idx_value & RADEON_MAX_MIP_LEVEL_MASK)
1357 >> RADEON_MAX_MIP_LEVEL_SHIFT); 1354 >> RADEON_MAX_MIP_LEVEL_SHIFT);
1358 tmp = (ib_chunk->kdata[idx] >> 23) & 0x7; 1355 tmp = (idx_value >> 23) & 0x7;
1359 if (tmp == 2 || tmp == 6) 1356 if (tmp == 2 || tmp == 6)
1360 track->textures[i].roundup_w = false; 1357 track->textures[i].roundup_w = false;
1361 tmp = (ib_chunk->kdata[idx] >> 27) & 0x7; 1358 tmp = (idx_value >> 27) & 0x7;
1362 if (tmp == 2 || tmp == 6) 1359 if (tmp == 2 || tmp == 6)
1363 track->textures[i].roundup_h = false; 1360 track->textures[i].roundup_h = false;
1364 break; 1361 break;
@@ -1366,16 +1363,16 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1366 case RADEON_PP_TXFORMAT_1: 1363 case RADEON_PP_TXFORMAT_1:
1367 case RADEON_PP_TXFORMAT_2: 1364 case RADEON_PP_TXFORMAT_2:
1368 i = (reg - RADEON_PP_TXFORMAT_0) / 24; 1365 i = (reg - RADEON_PP_TXFORMAT_0) / 24;
1369 if (ib_chunk->kdata[idx] & RADEON_TXFORMAT_NON_POWER2) { 1366 if (idx_value & RADEON_TXFORMAT_NON_POWER2) {
1370 track->textures[i].use_pitch = 1; 1367 track->textures[i].use_pitch = 1;
1371 } else { 1368 } else {
1372 track->textures[i].use_pitch = 0; 1369 track->textures[i].use_pitch = 0;
1373 track->textures[i].width = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK); 1370 track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
1374 track->textures[i].height = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK); 1371 track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
1375 } 1372 }
1376 if (ib_chunk->kdata[idx] & RADEON_TXFORMAT_CUBIC_MAP_ENABLE) 1373 if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE)
1377 track->textures[i].tex_coord_type = 2; 1374 track->textures[i].tex_coord_type = 2;
1378 switch ((ib_chunk->kdata[idx] & RADEON_TXFORMAT_FORMAT_MASK)) { 1375 switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) {
1379 case RADEON_TXFORMAT_I8: 1376 case RADEON_TXFORMAT_I8:
1380 case RADEON_TXFORMAT_RGB332: 1377 case RADEON_TXFORMAT_RGB332:
1381 case RADEON_TXFORMAT_Y8: 1378 case RADEON_TXFORMAT_Y8:
@@ -1402,13 +1399,13 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1402 track->textures[i].cpp = 4; 1399 track->textures[i].cpp = 4;
1403 break; 1400 break;
1404 } 1401 }
1405 track->textures[i].cube_info[4].width = 1 << ((ib_chunk->kdata[idx] >> 16) & 0xf); 1402 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
1406 track->textures[i].cube_info[4].height = 1 << ((ib_chunk->kdata[idx] >> 20) & 0xf); 1403 track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
1407 break; 1404 break;
1408 case RADEON_PP_CUBIC_FACES_0: 1405 case RADEON_PP_CUBIC_FACES_0:
1409 case RADEON_PP_CUBIC_FACES_1: 1406 case RADEON_PP_CUBIC_FACES_1:
1410 case RADEON_PP_CUBIC_FACES_2: 1407 case RADEON_PP_CUBIC_FACES_2:
1411 tmp = ib_chunk->kdata[idx]; 1408 tmp = idx_value;
1412 i = (reg - RADEON_PP_CUBIC_FACES_0) / 4; 1409 i = (reg - RADEON_PP_CUBIC_FACES_0) / 4;
1413 for (face = 0; face < 4; face++) { 1410 for (face = 0; face < 4; face++) {
1414 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); 1411 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
@@ -1427,15 +1424,14 @@ int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
1427 struct radeon_cs_packet *pkt, 1424 struct radeon_cs_packet *pkt,
1428 struct radeon_object *robj) 1425 struct radeon_object *robj)
1429{ 1426{
1430 struct radeon_cs_chunk *ib_chunk;
1431 unsigned idx; 1427 unsigned idx;
1432 1428 u32 value;
1433 ib_chunk = &p->chunks[p->chunk_ib_idx];
1434 idx = pkt->idx + 1; 1429 idx = pkt->idx + 1;
1435 if ((ib_chunk->kdata[idx+2] + 1) > radeon_object_size(robj)) { 1430 value = radeon_get_ib_value(p, idx + 2);
1431 if ((value + 1) > radeon_object_size(robj)) {
1436 DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER " 1432 DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
1437 "(need %u have %lu) !\n", 1433 "(need %u have %lu) !\n",
1438 ib_chunk->kdata[idx+2] + 1, 1434 value + 1,
1439 radeon_object_size(robj)); 1435 radeon_object_size(robj));
1440 return -EINVAL; 1436 return -EINVAL;
1441 } 1437 }
@@ -1445,59 +1441,20 @@ int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
1445static int r100_packet3_check(struct radeon_cs_parser *p, 1441static int r100_packet3_check(struct radeon_cs_parser *p,
1446 struct radeon_cs_packet *pkt) 1442 struct radeon_cs_packet *pkt)
1447{ 1443{
1448 struct radeon_cs_chunk *ib_chunk;
1449 struct radeon_cs_reloc *reloc; 1444 struct radeon_cs_reloc *reloc;
1450 struct r100_cs_track *track; 1445 struct r100_cs_track *track;
1451 unsigned idx; 1446 unsigned idx;
1452 unsigned i, c;
1453 volatile uint32_t *ib; 1447 volatile uint32_t *ib;
1454 int r; 1448 int r;
1455 1449
1456 ib = p->ib->ptr; 1450 ib = p->ib->ptr;
1457 ib_chunk = &p->chunks[p->chunk_ib_idx];
1458 idx = pkt->idx + 1; 1451 idx = pkt->idx + 1;
1459 track = (struct r100_cs_track *)p->track; 1452 track = (struct r100_cs_track *)p->track;
1460 switch (pkt->opcode) { 1453 switch (pkt->opcode) {
1461 case PACKET3_3D_LOAD_VBPNTR: 1454 case PACKET3_3D_LOAD_VBPNTR:
1462 c = ib_chunk->kdata[idx++]; 1455 r = r100_packet3_load_vbpntr(p, pkt, idx);
1463 track->num_arrays = c; 1456 if (r)
1464 for (i = 0; i < (c - 1); i += 2, idx += 3) { 1457 return r;
1465 r = r100_cs_packet_next_reloc(p, &reloc);
1466 if (r) {
1467 DRM_ERROR("No reloc for packet3 %d\n",
1468 pkt->opcode);
1469 r100_cs_dump_packet(p, pkt);
1470 return r;
1471 }
1472 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
1473 track->arrays[i + 0].robj = reloc->robj;
1474 track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
1475 track->arrays[i + 0].esize &= 0x7F;
1476 r = r100_cs_packet_next_reloc(p, &reloc);
1477 if (r) {
1478 DRM_ERROR("No reloc for packet3 %d\n",
1479 pkt->opcode);
1480 r100_cs_dump_packet(p, pkt);
1481 return r;
1482 }
1483 ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset);
1484 track->arrays[i + 1].robj = reloc->robj;
1485 track->arrays[i + 1].esize = ib_chunk->kdata[idx] >> 24;
1486 track->arrays[i + 1].esize &= 0x7F;
1487 }
1488 if (c & 1) {
1489 r = r100_cs_packet_next_reloc(p, &reloc);
1490 if (r) {
1491 DRM_ERROR("No reloc for packet3 %d\n",
1492 pkt->opcode);
1493 r100_cs_dump_packet(p, pkt);
1494 return r;
1495 }
1496 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
1497 track->arrays[i + 0].robj = reloc->robj;
1498 track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
1499 track->arrays[i + 0].esize &= 0x7F;
1500 }
1501 break; 1458 break;
1502 case PACKET3_INDX_BUFFER: 1459 case PACKET3_INDX_BUFFER:
1503 r = r100_cs_packet_next_reloc(p, &reloc); 1460 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1506,7 +1463,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
1506 r100_cs_dump_packet(p, pkt); 1463 r100_cs_dump_packet(p, pkt);
1507 return r; 1464 return r;
1508 } 1465 }
1509 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset); 1466 ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->lobj.gpu_offset);
1510 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); 1467 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
1511 if (r) { 1468 if (r) {
1512 return r; 1469 return r;
@@ -1520,27 +1477,27 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
1520 r100_cs_dump_packet(p, pkt); 1477 r100_cs_dump_packet(p, pkt);
1521 return r; 1478 return r;
1522 } 1479 }
1523 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 1480 ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->lobj.gpu_offset);
1524 track->num_arrays = 1; 1481 track->num_arrays = 1;
1525 track->vtx_size = r100_get_vtx_size(ib_chunk->kdata[idx+2]); 1482 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2));
1526 1483
1527 track->arrays[0].robj = reloc->robj; 1484 track->arrays[0].robj = reloc->robj;
1528 track->arrays[0].esize = track->vtx_size; 1485 track->arrays[0].esize = track->vtx_size;
1529 1486
1530 track->max_indx = ib_chunk->kdata[idx+1]; 1487 track->max_indx = radeon_get_ib_value(p, idx+1);
1531 1488
1532 track->vap_vf_cntl = ib_chunk->kdata[idx+3]; 1489 track->vap_vf_cntl = radeon_get_ib_value(p, idx+3);
1533 track->immd_dwords = pkt->count - 1; 1490 track->immd_dwords = pkt->count - 1;
1534 r = r100_cs_track_check(p->rdev, track); 1491 r = r100_cs_track_check(p->rdev, track);
1535 if (r) 1492 if (r)
1536 return r; 1493 return r;
1537 break; 1494 break;
1538 case PACKET3_3D_DRAW_IMMD: 1495 case PACKET3_3D_DRAW_IMMD:
1539 if (((ib_chunk->kdata[idx+1] >> 4) & 0x3) != 3) { 1496 if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
1540 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1497 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1541 return -EINVAL; 1498 return -EINVAL;
1542 } 1499 }
1543 track->vap_vf_cntl = ib_chunk->kdata[idx+1]; 1500 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1544 track->immd_dwords = pkt->count - 1; 1501 track->immd_dwords = pkt->count - 1;
1545 r = r100_cs_track_check(p->rdev, track); 1502 r = r100_cs_track_check(p->rdev, track);
1546 if (r) 1503 if (r)
@@ -1548,11 +1505,11 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
1548 break; 1505 break;
1549 /* triggers drawing using in-packet vertex data */ 1506 /* triggers drawing using in-packet vertex data */
1550 case PACKET3_3D_DRAW_IMMD_2: 1507 case PACKET3_3D_DRAW_IMMD_2:
1551 if (((ib_chunk->kdata[idx] >> 4) & 0x3) != 3) { 1508 if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
1552 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1509 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1553 return -EINVAL; 1510 return -EINVAL;
1554 } 1511 }
1555 track->vap_vf_cntl = ib_chunk->kdata[idx]; 1512 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1556 track->immd_dwords = pkt->count; 1513 track->immd_dwords = pkt->count;
1557 r = r100_cs_track_check(p->rdev, track); 1514 r = r100_cs_track_check(p->rdev, track);
1558 if (r) 1515 if (r)
@@ -1560,28 +1517,28 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
1560 break; 1517 break;
1561 /* triggers drawing using in-packet vertex data */ 1518 /* triggers drawing using in-packet vertex data */
1562 case PACKET3_3D_DRAW_VBUF_2: 1519 case PACKET3_3D_DRAW_VBUF_2:
1563 track->vap_vf_cntl = ib_chunk->kdata[idx]; 1520 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1564 r = r100_cs_track_check(p->rdev, track); 1521 r = r100_cs_track_check(p->rdev, track);
1565 if (r) 1522 if (r)
1566 return r; 1523 return r;
1567 break; 1524 break;
1568 /* triggers drawing of vertex buffers setup elsewhere */ 1525 /* triggers drawing of vertex buffers setup elsewhere */
1569 case PACKET3_3D_DRAW_INDX_2: 1526 case PACKET3_3D_DRAW_INDX_2:
1570 track->vap_vf_cntl = ib_chunk->kdata[idx]; 1527 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1571 r = r100_cs_track_check(p->rdev, track); 1528 r = r100_cs_track_check(p->rdev, track);
1572 if (r) 1529 if (r)
1573 return r; 1530 return r;
1574 break; 1531 break;
1575 /* triggers drawing using indices to vertex buffer */ 1532 /* triggers drawing using indices to vertex buffer */
1576 case PACKET3_3D_DRAW_VBUF: 1533 case PACKET3_3D_DRAW_VBUF:
1577 track->vap_vf_cntl = ib_chunk->kdata[idx + 1]; 1534 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1578 r = r100_cs_track_check(p->rdev, track); 1535 r = r100_cs_track_check(p->rdev, track);
1579 if (r) 1536 if (r)
1580 return r; 1537 return r;
1581 break; 1538 break;
1582 /* triggers drawing of vertex buffers setup elsewhere */ 1539 /* triggers drawing of vertex buffers setup elsewhere */
1583 case PACKET3_3D_DRAW_INDX: 1540 case PACKET3_3D_DRAW_INDX:
1584 track->vap_vf_cntl = ib_chunk->kdata[idx + 1]; 1541 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1585 r = r100_cs_track_check(p->rdev, track); 1542 r = r100_cs_track_check(p->rdev, track);
1586 if (r) 1543 if (r)
1587 return r; 1544 return r;