aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/r100.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2009-09-01 01:25:57 -0400
committerDave Airlie <airlied@redhat.com>2009-09-07 18:54:31 -0400
commit551ebd837c75fc75df81811a18b7136c39cab487 (patch)
tree9703fd46cf9ad170012754f984375db37d2bf818 /drivers/gpu/drm/radeon/r100.c
parent11670d3c93210793562748d83502ecbef4034765 (diff)
drm/radeon/kms: add rn50/r100/r200 CS tracker.
This adds the command stream checker for the RN50, R100 and R200 cards. It stops any access to 3D registers on RN50, and does checks on buffer sizes on the r100/r200 cards. It also fixes some texture sizing checks on r300. Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/radeon/r100.c')
-rw-r--r--drivers/gpu/drm/radeon/r100.c811
1 files changed, 691 insertions, 120 deletions
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 44f34f8e2b32..ee3ab62417e2 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -34,6 +34,9 @@
34#include <linux/firmware.h> 34#include <linux/firmware.h>
35#include <linux/platform_device.h> 35#include <linux/platform_device.h>
36 36
37#include "r100_reg_safe.h"
38#include "rn50_reg_safe.h"
39
37/* Firmware Names */ 40/* Firmware Names */
38#define FIRMWARE_R100 "radeon/R100_cp.bin" 41#define FIRMWARE_R100 "radeon/R100_cp.bin"
39#define FIRMWARE_R200 "radeon/R200_cp.bin" 42#define FIRMWARE_R200 "radeon/R200_cp.bin"
@@ -51,11 +54,14 @@ MODULE_FIRMWARE(FIRMWARE_RS690);
51MODULE_FIRMWARE(FIRMWARE_RS600); 54MODULE_FIRMWARE(FIRMWARE_RS600);
52MODULE_FIRMWARE(FIRMWARE_R520); 55MODULE_FIRMWARE(FIRMWARE_R520);
53 56
57#include "r100_track.h"
58
54/* This files gather functions specifics to: 59/* This files gather functions specifics to:
55 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 60 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
56 * 61 *
57 * Some of these functions might be used by newer ASICs. 62 * Some of these functions might be used by newer ASICs.
58 */ 63 */
64int r200_init(struct radeon_device *rdev);
59void r100_hdp_reset(struct radeon_device *rdev); 65void r100_hdp_reset(struct radeon_device *rdev);
60void r100_gpu_init(struct radeon_device *rdev); 66void r100_gpu_init(struct radeon_device *rdev);
61int r100_gui_wait_for_idle(struct radeon_device *rdev); 67int r100_gui_wait_for_idle(struct radeon_device *rdev);
@@ -1017,147 +1023,356 @@ int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
1017 return 0; 1023 return 0;
1018} 1024}
1019 1025
1026static int r100_get_vtx_size(uint32_t vtx_fmt)
1027{
1028 int vtx_size;
1029 vtx_size = 2;
1030 /* ordered according to bits in spec */
1031 if (vtx_fmt & RADEON_SE_VTX_FMT_W0)
1032 vtx_size++;
1033 if (vtx_fmt & RADEON_SE_VTX_FMT_FPCOLOR)
1034 vtx_size += 3;
1035 if (vtx_fmt & RADEON_SE_VTX_FMT_FPALPHA)
1036 vtx_size++;
1037 if (vtx_fmt & RADEON_SE_VTX_FMT_PKCOLOR)
1038 vtx_size++;
1039 if (vtx_fmt & RADEON_SE_VTX_FMT_FPSPEC)
1040 vtx_size += 3;
1041 if (vtx_fmt & RADEON_SE_VTX_FMT_FPFOG)
1042 vtx_size++;
1043 if (vtx_fmt & RADEON_SE_VTX_FMT_PKSPEC)
1044 vtx_size++;
1045 if (vtx_fmt & RADEON_SE_VTX_FMT_ST0)
1046 vtx_size += 2;
1047 if (vtx_fmt & RADEON_SE_VTX_FMT_ST1)
1048 vtx_size += 2;
1049 if (vtx_fmt & RADEON_SE_VTX_FMT_Q1)
1050 vtx_size++;
1051 if (vtx_fmt & RADEON_SE_VTX_FMT_ST2)
1052 vtx_size += 2;
1053 if (vtx_fmt & RADEON_SE_VTX_FMT_Q2)
1054 vtx_size++;
1055 if (vtx_fmt & RADEON_SE_VTX_FMT_ST3)
1056 vtx_size += 2;
1057 if (vtx_fmt & RADEON_SE_VTX_FMT_Q3)
1058 vtx_size++;
1059 if (vtx_fmt & RADEON_SE_VTX_FMT_Q0)
1060 vtx_size++;
1061 /* blend weight */
1062 if (vtx_fmt & (0x7 << 15))
1063 vtx_size += (vtx_fmt >> 15) & 0x7;
1064 if (vtx_fmt & RADEON_SE_VTX_FMT_N0)
1065 vtx_size += 3;
1066 if (vtx_fmt & RADEON_SE_VTX_FMT_XY1)
1067 vtx_size += 2;
1068 if (vtx_fmt & RADEON_SE_VTX_FMT_Z1)
1069 vtx_size++;
1070 if (vtx_fmt & RADEON_SE_VTX_FMT_W1)
1071 vtx_size++;
1072 if (vtx_fmt & RADEON_SE_VTX_FMT_N1)
1073 vtx_size++;
1074 if (vtx_fmt & RADEON_SE_VTX_FMT_Z)
1075 vtx_size++;
1076 return vtx_size;
1077}
1078
1020static int r100_packet0_check(struct radeon_cs_parser *p, 1079static int r100_packet0_check(struct radeon_cs_parser *p,
1021 struct radeon_cs_packet *pkt) 1080 struct radeon_cs_packet *pkt,
1081 unsigned idx, unsigned reg)
1022{ 1082{
1023 struct radeon_cs_chunk *ib_chunk; 1083 struct radeon_cs_chunk *ib_chunk;
1024 struct radeon_cs_reloc *reloc; 1084 struct radeon_cs_reloc *reloc;
1085 struct r100_cs_track *track;
1025 volatile uint32_t *ib; 1086 volatile uint32_t *ib;
1026 uint32_t tmp; 1087 uint32_t tmp;
1027 unsigned reg;
1028 unsigned i;
1029 unsigned idx;
1030 bool onereg;
1031 int r; 1088 int r;
1089 int i, face;
1032 u32 tile_flags = 0; 1090 u32 tile_flags = 0;
1033 1091
1034 ib = p->ib->ptr; 1092 ib = p->ib->ptr;
1035 ib_chunk = &p->chunks[p->chunk_ib_idx]; 1093 ib_chunk = &p->chunks[p->chunk_ib_idx];
1036 idx = pkt->idx + 1; 1094 track = (struct r100_cs_track *)p->track;
1037 reg = pkt->reg; 1095
1038 onereg = false; 1096 switch (reg) {
1039 if (CP_PACKET0_GET_ONE_REG_WR(ib_chunk->kdata[pkt->idx])) { 1097 case RADEON_CRTC_GUI_TRIG_VLINE:
1040 onereg = true; 1098 r = r100_cs_packet_parse_vline(p);
1041 } 1099 if (r) {
1042 for (i = 0; i <= pkt->count; i++, idx++, reg += 4) { 1100 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1043 switch (reg) { 1101 idx, reg);
1044 case RADEON_CRTC_GUI_TRIG_VLINE: 1102 r100_cs_dump_packet(p, pkt);
1045 r = r100_cs_packet_parse_vline(p); 1103 return r;
1046 if (r) { 1104 }
1047 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1105 break;
1048 idx, reg);
1049 r100_cs_dump_packet(p, pkt);
1050 return r;
1051 }
1052 break;
1053 /* FIXME: only allow PACKET3 blit? easier to check for out of 1106 /* FIXME: only allow PACKET3 blit? easier to check for out of
1054 * range access */ 1107 * range access */
1055 case RADEON_DST_PITCH_OFFSET: 1108 case RADEON_DST_PITCH_OFFSET:
1056 case RADEON_SRC_PITCH_OFFSET: 1109 case RADEON_SRC_PITCH_OFFSET:
1057 r = r100_cs_packet_next_reloc(p, &reloc); 1110 r = r100_reloc_pitch_offset(p, pkt, idx, reg);
1058 if (r) { 1111 if (r)
1059 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1112 return r;
1060 idx, reg); 1113 break;
1061 r100_cs_dump_packet(p, pkt); 1114 case RADEON_RB3D_DEPTHOFFSET:
1062 return r; 1115 r = r100_cs_packet_next_reloc(p, &reloc);
1063 } 1116 if (r) {
1064 tmp = ib_chunk->kdata[idx] & 0x003fffff; 1117 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1065 tmp += (((u32)reloc->lobj.gpu_offset) >> 10); 1118 idx, reg);
1066 1119 r100_cs_dump_packet(p, pkt);
1067 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 1120 return r;
1068 tile_flags |= RADEON_DST_TILE_MACRO; 1121 }
1069 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { 1122 track->zb.robj = reloc->robj;
1070 if (reg == RADEON_SRC_PITCH_OFFSET) { 1123 track->zb.offset = ib_chunk->kdata[idx];
1071 DRM_ERROR("Cannot src blit from microtiled surface\n"); 1124 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
1072 r100_cs_dump_packet(p, pkt); 1125 break;
1073 return -EINVAL; 1126 case RADEON_RB3D_COLOROFFSET:
1074 } 1127 r = r100_cs_packet_next_reloc(p, &reloc);
1075 tile_flags |= RADEON_DST_TILE_MICRO; 1128 if (r) {
1076 } 1129 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1130 idx, reg);
1131 r100_cs_dump_packet(p, pkt);
1132 return r;
1133 }
1134 track->cb[0].robj = reloc->robj;
1135 track->cb[0].offset = ib_chunk->kdata[idx];
1136 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
1137 break;
1138 case RADEON_PP_TXOFFSET_0:
1139 case RADEON_PP_TXOFFSET_1:
1140 case RADEON_PP_TXOFFSET_2:
1141 i = (reg - RADEON_PP_TXOFFSET_0) / 24;
1142 r = r100_cs_packet_next_reloc(p, &reloc);
1143 if (r) {
1144 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1145 idx, reg);
1146 r100_cs_dump_packet(p, pkt);
1147 return r;
1148 }
1149 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
1150 track->textures[i].robj = reloc->robj;
1151 break;
1152 case RADEON_PP_CUBIC_OFFSET_T0_0:
1153 case RADEON_PP_CUBIC_OFFSET_T0_1:
1154 case RADEON_PP_CUBIC_OFFSET_T0_2:
1155 case RADEON_PP_CUBIC_OFFSET_T0_3:
1156 case RADEON_PP_CUBIC_OFFSET_T0_4:
1157 i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4;
1158 r = r100_cs_packet_next_reloc(p, &reloc);
1159 if (r) {
1160 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1161 idx, reg);
1162 r100_cs_dump_packet(p, pkt);
1163 return r;
1164 }
1165 track->textures[0].cube_info[i].offset = ib_chunk->kdata[idx];
1166 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
1167 track->textures[0].cube_info[i].robj = reloc->robj;
1168 break;
1169 case RADEON_PP_CUBIC_OFFSET_T1_0:
1170 case RADEON_PP_CUBIC_OFFSET_T1_1:
1171 case RADEON_PP_CUBIC_OFFSET_T1_2:
1172 case RADEON_PP_CUBIC_OFFSET_T1_3:
1173 case RADEON_PP_CUBIC_OFFSET_T1_4:
1174 i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4;
1175 r = r100_cs_packet_next_reloc(p, &reloc);
1176 if (r) {
1177 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1178 idx, reg);
1179 r100_cs_dump_packet(p, pkt);
1180 return r;
1181 }
1182 track->textures[1].cube_info[i].offset = ib_chunk->kdata[idx];
1183 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
1184 track->textures[1].cube_info[i].robj = reloc->robj;
1185 break;
1186 case RADEON_PP_CUBIC_OFFSET_T2_0:
1187 case RADEON_PP_CUBIC_OFFSET_T2_1:
1188 case RADEON_PP_CUBIC_OFFSET_T2_2:
1189 case RADEON_PP_CUBIC_OFFSET_T2_3:
1190 case RADEON_PP_CUBIC_OFFSET_T2_4:
1191 i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4;
1192 r = r100_cs_packet_next_reloc(p, &reloc);
1193 if (r) {
1194 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1195 idx, reg);
1196 r100_cs_dump_packet(p, pkt);
1197 return r;
1198 }
1199 track->textures[2].cube_info[i].offset = ib_chunk->kdata[idx];
1200 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
1201 track->textures[2].cube_info[i].robj = reloc->robj;
1202 break;
1203 case RADEON_RE_WIDTH_HEIGHT:
1204 track->maxy = ((ib_chunk->kdata[idx] >> 16) & 0x7FF);
1205 break;
1206 case RADEON_RB3D_COLORPITCH:
1207 r = r100_cs_packet_next_reloc(p, &reloc);
1208 if (r) {
1209 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1210 idx, reg);
1211 r100_cs_dump_packet(p, pkt);
1212 return r;
1213 }
1077 1214
1078 tmp |= tile_flags; 1215 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1079 ib[idx] = (ib_chunk->kdata[idx] & 0x3fc00000) | tmp; 1216 tile_flags |= RADEON_COLOR_TILE_ENABLE;
1080 break; 1217 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1081 case RADEON_RB3D_DEPTHOFFSET: 1218 tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
1082 case RADEON_RB3D_COLOROFFSET:
1083 case R300_RB3D_COLOROFFSET0:
1084 case R300_ZB_DEPTHOFFSET:
1085 case R200_PP_TXOFFSET_0:
1086 case R200_PP_TXOFFSET_1:
1087 case R200_PP_TXOFFSET_2:
1088 case R200_PP_TXOFFSET_3:
1089 case R200_PP_TXOFFSET_4:
1090 case R200_PP_TXOFFSET_5:
1091 case RADEON_PP_TXOFFSET_0:
1092 case RADEON_PP_TXOFFSET_1:
1093 case RADEON_PP_TXOFFSET_2:
1094 case R300_TX_OFFSET_0:
1095 case R300_TX_OFFSET_0+4:
1096 case R300_TX_OFFSET_0+8:
1097 case R300_TX_OFFSET_0+12:
1098 case R300_TX_OFFSET_0+16:
1099 case R300_TX_OFFSET_0+20:
1100 case R300_TX_OFFSET_0+24:
1101 case R300_TX_OFFSET_0+28:
1102 case R300_TX_OFFSET_0+32:
1103 case R300_TX_OFFSET_0+36:
1104 case R300_TX_OFFSET_0+40:
1105 case R300_TX_OFFSET_0+44:
1106 case R300_TX_OFFSET_0+48:
1107 case R300_TX_OFFSET_0+52:
1108 case R300_TX_OFFSET_0+56:
1109 case R300_TX_OFFSET_0+60:
1110 /* rn50 has no 3D engine so fail on any 3d setup */
1111 if (ASIC_IS_RN50(p->rdev)) {
1112 DRM_ERROR("attempt to use RN50 3D engine failed\n");
1113 return -EINVAL;
1114 }
1115 r = r100_cs_packet_next_reloc(p, &reloc);
1116 if (r) {
1117 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1118 idx, reg);
1119 r100_cs_dump_packet(p, pkt);
1120 return r;
1121 }
1122 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
1123 break;
1124 case R300_RB3D_COLORPITCH0:
1125 case RADEON_RB3D_COLORPITCH:
1126 r = r100_cs_packet_next_reloc(p, &reloc);
1127 if (r) {
1128 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1129 idx, reg);
1130 r100_cs_dump_packet(p, pkt);
1131 return r;
1132 }
1133 1219
1134 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 1220 tmp = ib_chunk->kdata[idx] & ~(0x7 << 16);
1135 tile_flags |= RADEON_COLOR_TILE_ENABLE; 1221 tmp |= tile_flags;
1136 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 1222 ib[idx] = tmp;
1137 tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
1138 1223
1139 tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); 1224 track->cb[0].pitch = ib_chunk->kdata[idx] & RADEON_COLORPITCH_MASK;
1140 tmp |= tile_flags; 1225 break;
1141 ib[idx] = tmp; 1226 case RADEON_RB3D_DEPTHPITCH:
1227 track->zb.pitch = ib_chunk->kdata[idx] & RADEON_DEPTHPITCH_MASK;
1228 break;
1229 case RADEON_RB3D_CNTL:
1230 switch ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
1231 case 7:
1232 case 8:
1233 case 9:
1234 case 11:
1235 case 12:
1236 track->cb[0].cpp = 1;
1142 break; 1237 break;
1143 case RADEON_RB3D_ZPASS_ADDR: 1238 case 3:
1144 r = r100_cs_packet_next_reloc(p, &reloc); 1239 case 4:
1145 if (r) { 1240 case 15:
1146 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1241 track->cb[0].cpp = 2;
1147 idx, reg); 1242 break;
1148 r100_cs_dump_packet(p, pkt); 1243 case 6:
1149 return r; 1244 track->cb[0].cpp = 4;
1150 } 1245 break;
1151 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 1246 default:
1247 DRM_ERROR("Invalid color buffer format (%d) !\n",
1248 ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
1249 return -EINVAL;
1250 }
1251 track->z_enabled = !!(ib_chunk->kdata[idx] & RADEON_Z_ENABLE);
1252 break;
1253 case RADEON_RB3D_ZSTENCILCNTL:
1254 switch (ib_chunk->kdata[idx] & 0xf) {
1255 case 0:
1256 track->zb.cpp = 2;
1257 break;
1258 case 2:
1259 case 3:
1260 case 4:
1261 case 5:
1262 case 9:
1263 case 11:
1264 track->zb.cpp = 4;
1152 break; 1265 break;
1153 default: 1266 default:
1154 /* FIXME: we don't want to allow anyothers packet */
1155 break; 1267 break;
1156 } 1268 }
1157 if (onereg) { 1269 break;
1158 /* FIXME: forbid onereg write to register on relocate */ 1270 case RADEON_RB3D_ZPASS_ADDR:
1271 r = r100_cs_packet_next_reloc(p, &reloc);
1272 if (r) {
1273 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1274 idx, reg);
1275 r100_cs_dump_packet(p, pkt);
1276 return r;
1277 }
1278 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
1279 break;
1280 case RADEON_PP_CNTL:
1281 {
1282 uint32_t temp = ib_chunk->kdata[idx] >> 4;
1283 for (i = 0; i < track->num_texture; i++)
1284 track->textures[i].enabled = !!(temp & (1 << i));
1285 }
1286 break;
1287 case RADEON_SE_VF_CNTL:
1288 track->vap_vf_cntl = ib_chunk->kdata[idx];
1289 break;
1290 case RADEON_SE_VTX_FMT:
1291 track->vtx_size = r100_get_vtx_size(ib_chunk->kdata[idx]);
1292 break;
1293 case RADEON_PP_TEX_SIZE_0:
1294 case RADEON_PP_TEX_SIZE_1:
1295 case RADEON_PP_TEX_SIZE_2:
1296 i = (reg - RADEON_PP_TEX_SIZE_0) / 8;
1297 track->textures[i].width = (ib_chunk->kdata[idx] & RADEON_TEX_USIZE_MASK) + 1;
1298 track->textures[i].height = ((ib_chunk->kdata[idx] & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
1299 break;
1300 case RADEON_PP_TEX_PITCH_0:
1301 case RADEON_PP_TEX_PITCH_1:
1302 case RADEON_PP_TEX_PITCH_2:
1303 i = (reg - RADEON_PP_TEX_PITCH_0) / 8;
1304 track->textures[i].pitch = ib_chunk->kdata[idx] + 32;
1305 break;
1306 case RADEON_PP_TXFILTER_0:
1307 case RADEON_PP_TXFILTER_1:
1308 case RADEON_PP_TXFILTER_2:
1309 i = (reg - RADEON_PP_TXFILTER_0) / 24;
1310 track->textures[i].num_levels = ((ib_chunk->kdata[idx] & RADEON_MAX_MIP_LEVEL_MASK)
1311 >> RADEON_MAX_MIP_LEVEL_SHIFT);
1312 tmp = (ib_chunk->kdata[idx] >> 23) & 0x7;
1313 if (tmp == 2 || tmp == 6)
1314 track->textures[i].roundup_w = false;
1315 tmp = (ib_chunk->kdata[idx] >> 27) & 0x7;
1316 if (tmp == 2 || tmp == 6)
1317 track->textures[i].roundup_h = false;
1318 break;
1319 case RADEON_PP_TXFORMAT_0:
1320 case RADEON_PP_TXFORMAT_1:
1321 case RADEON_PP_TXFORMAT_2:
1322 i = (reg - RADEON_PP_TXFORMAT_0) / 24;
1323 if (ib_chunk->kdata[idx] & RADEON_TXFORMAT_NON_POWER2) {
1324 track->textures[i].use_pitch = 1;
1325 } else {
1326 track->textures[i].use_pitch = 0;
1327 track->textures[i].width = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
1328 track->textures[i].height = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
1329 }
1330 if (ib_chunk->kdata[idx] & RADEON_TXFORMAT_CUBIC_MAP_ENABLE)
1331 track->textures[i].tex_coord_type = 2;
1332 switch ((ib_chunk->kdata[idx] & RADEON_TXFORMAT_FORMAT_MASK)) {
1333 case RADEON_TXFORMAT_I8:
1334 case RADEON_TXFORMAT_RGB332:
1335 case RADEON_TXFORMAT_Y8:
1336 track->textures[i].cpp = 1;
1337 break;
1338 case RADEON_TXFORMAT_AI88:
1339 case RADEON_TXFORMAT_ARGB1555:
1340 case RADEON_TXFORMAT_RGB565:
1341 case RADEON_TXFORMAT_ARGB4444:
1342 case RADEON_TXFORMAT_VYUY422:
1343 case RADEON_TXFORMAT_YVYU422:
1344 case RADEON_TXFORMAT_DXT1:
1345 case RADEON_TXFORMAT_SHADOW16:
1346 case RADEON_TXFORMAT_LDUDV655:
1347 case RADEON_TXFORMAT_DUDV88:
1348 track->textures[i].cpp = 2;
1159 break; 1349 break;
1350 case RADEON_TXFORMAT_ARGB8888:
1351 case RADEON_TXFORMAT_RGBA8888:
1352 case RADEON_TXFORMAT_DXT23:
1353 case RADEON_TXFORMAT_DXT45:
1354 case RADEON_TXFORMAT_SHADOW32:
1355 case RADEON_TXFORMAT_LDUDUV8888:
1356 track->textures[i].cpp = 4;
1357 break;
1358 }
1359 track->textures[i].cube_info[4].width = 1 << ((ib_chunk->kdata[idx] >> 16) & 0xf);
1360 track->textures[i].cube_info[4].height = 1 << ((ib_chunk->kdata[idx] >> 20) & 0xf);
1361 break;
1362 case RADEON_PP_CUBIC_FACES_0:
1363 case RADEON_PP_CUBIC_FACES_1:
1364 case RADEON_PP_CUBIC_FACES_2:
1365 tmp = ib_chunk->kdata[idx];
1366 i = (reg - RADEON_PP_CUBIC_FACES_0) / 4;
1367 for (face = 0; face < 4; face++) {
1368 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
1369 track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
1160 } 1370 }
1371 break;
1372 default:
1373 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
1374 reg, idx);
1375 return -EINVAL;
1161 } 1376 }
1162 return 0; 1377 return 0;
1163} 1378}
@@ -1186,6 +1401,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
1186{ 1401{
1187 struct radeon_cs_chunk *ib_chunk; 1402 struct radeon_cs_chunk *ib_chunk;
1188 struct radeon_cs_reloc *reloc; 1403 struct radeon_cs_reloc *reloc;
1404 struct r100_cs_track *track;
1189 unsigned idx; 1405 unsigned idx;
1190 unsigned i, c; 1406 unsigned i, c;
1191 volatile uint32_t *ib; 1407 volatile uint32_t *ib;
@@ -1194,9 +1410,11 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
1194 ib = p->ib->ptr; 1410 ib = p->ib->ptr;
1195 ib_chunk = &p->chunks[p->chunk_ib_idx]; 1411 ib_chunk = &p->chunks[p->chunk_ib_idx];
1196 idx = pkt->idx + 1; 1412 idx = pkt->idx + 1;
1413 track = (struct r100_cs_track *)p->track;
1197 switch (pkt->opcode) { 1414 switch (pkt->opcode) {
1198 case PACKET3_3D_LOAD_VBPNTR: 1415 case PACKET3_3D_LOAD_VBPNTR:
1199 c = ib_chunk->kdata[idx++]; 1416 c = ib_chunk->kdata[idx++];
1417 track->num_arrays = c;
1200 for (i = 0; i < (c - 1); i += 2, idx += 3) { 1418 for (i = 0; i < (c - 1); i += 2, idx += 3) {
1201 r = r100_cs_packet_next_reloc(p, &reloc); 1419 r = r100_cs_packet_next_reloc(p, &reloc);
1202 if (r) { 1420 if (r) {
@@ -1206,6 +1424,9 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
1206 return r; 1424 return r;
1207 } 1425 }
1208 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset); 1426 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
1427 track->arrays[i + 0].robj = reloc->robj;
1428 track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
1429 track->arrays[i + 0].esize &= 0x7F;
1209 r = r100_cs_packet_next_reloc(p, &reloc); 1430 r = r100_cs_packet_next_reloc(p, &reloc);
1210 if (r) { 1431 if (r) {
1211 DRM_ERROR("No reloc for packet3 %d\n", 1432 DRM_ERROR("No reloc for packet3 %d\n",
@@ -1214,6 +1435,9 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
1214 return r; 1435 return r;
1215 } 1436 }
1216 ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset); 1437 ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset);
1438 track->arrays[i + 1].robj = reloc->robj;
1439 track->arrays[i + 1].esize = ib_chunk->kdata[idx] >> 24;
1440 track->arrays[i + 1].esize &= 0x7F;
1217 } 1441 }
1218 if (c & 1) { 1442 if (c & 1) {
1219 r = r100_cs_packet_next_reloc(p, &reloc); 1443 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1224,6 +1448,9 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
1224 return r; 1448 return r;
1225 } 1449 }
1226 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset); 1450 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
1451 track->arrays[i + 0].robj = reloc->robj;
1452 track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
1453 track->arrays[i + 0].esize &= 0x7F;
1227 } 1454 }
1228 break; 1455 break;
1229 case PACKET3_INDX_BUFFER: 1456 case PACKET3_INDX_BUFFER:
@@ -1240,7 +1467,6 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
1240 } 1467 }
1241 break; 1468 break;
1242 case 0x23: 1469 case 0x23:
1243 /* FIXME: cleanup */
1244 /* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */ 1470 /* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */
1245 r = r100_cs_packet_next_reloc(p, &reloc); 1471 r = r100_cs_packet_next_reloc(p, &reloc);
1246 if (r) { 1472 if (r) {
@@ -1249,18 +1475,71 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
1249 return r; 1475 return r;
1250 } 1476 }
1251 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 1477 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
1478 track->num_arrays = 1;
1479 track->vtx_size = r100_get_vtx_size(ib_chunk->kdata[idx+2]);
1480
1481 track->arrays[0].robj = reloc->robj;
1482 track->arrays[0].esize = track->vtx_size;
1483
1484 track->max_indx = ib_chunk->kdata[idx+1];
1485
1486 track->vap_vf_cntl = ib_chunk->kdata[idx+3];
1487 track->immd_dwords = pkt->count - 1;
1488 r = r100_cs_track_check(p->rdev, track);
1489 if (r)
1490 return r;
1252 break; 1491 break;
1253 case PACKET3_3D_DRAW_IMMD: 1492 case PACKET3_3D_DRAW_IMMD:
1493 if (((ib_chunk->kdata[idx+1] >> 4) & 0x3) != 3) {
1494 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1495 return -EINVAL;
1496 }
1497 track->vap_vf_cntl = ib_chunk->kdata[idx+1];
1498 track->immd_dwords = pkt->count - 1;
1499 r = r100_cs_track_check(p->rdev, track);
1500 if (r)
1501 return r;
1502 break;
1254 /* triggers drawing using in-packet vertex data */ 1503 /* triggers drawing using in-packet vertex data */
1255 case PACKET3_3D_DRAW_IMMD_2: 1504 case PACKET3_3D_DRAW_IMMD_2:
1505 if (((ib_chunk->kdata[idx] >> 4) & 0x3) != 3) {
1506 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1507 return -EINVAL;
1508 }
1509 track->vap_vf_cntl = ib_chunk->kdata[idx];
1510 track->immd_dwords = pkt->count;
1511 r = r100_cs_track_check(p->rdev, track);
1512 if (r)
1513 return r;
1514 break;
1256 /* triggers drawing using in-packet vertex data */ 1515 /* triggers drawing using in-packet vertex data */
1257 case PACKET3_3D_DRAW_VBUF_2: 1516 case PACKET3_3D_DRAW_VBUF_2:
1517 track->vap_vf_cntl = ib_chunk->kdata[idx];
1518 r = r100_cs_track_check(p->rdev, track);
1519 if (r)
1520 return r;
1521 break;
1258 /* triggers drawing of vertex buffers setup elsewhere */ 1522 /* triggers drawing of vertex buffers setup elsewhere */
1259 case PACKET3_3D_DRAW_INDX_2: 1523 case PACKET3_3D_DRAW_INDX_2:
1524 track->vap_vf_cntl = ib_chunk->kdata[idx];
1525 r = r100_cs_track_check(p->rdev, track);
1526 if (r)
1527 return r;
1528 break;
1260 /* triggers drawing using indices to vertex buffer */ 1529 /* triggers drawing using indices to vertex buffer */
1261 case PACKET3_3D_DRAW_VBUF: 1530 case PACKET3_3D_DRAW_VBUF:
1531 track->vap_vf_cntl = ib_chunk->kdata[idx + 1];
1532 r = r100_cs_track_check(p->rdev, track);
1533 if (r)
1534 return r;
1535 break;
1262 /* triggers drawing of vertex buffers setup elsewhere */ 1536 /* triggers drawing of vertex buffers setup elsewhere */
1263 case PACKET3_3D_DRAW_INDX: 1537 case PACKET3_3D_DRAW_INDX:
1538 track->vap_vf_cntl = ib_chunk->kdata[idx + 1];
1539 r = r100_cs_track_check(p->rdev, track);
1540 if (r)
1541 return r;
1542 break;
1264 /* triggers drawing using indices to vertex buffer */ 1543 /* triggers drawing using indices to vertex buffer */
1265 case PACKET3_NOP: 1544 case PACKET3_NOP:
1266 break; 1545 break;
@@ -1274,8 +1553,11 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
1274int r100_cs_parse(struct radeon_cs_parser *p) 1553int r100_cs_parse(struct radeon_cs_parser *p)
1275{ 1554{
1276 struct radeon_cs_packet pkt; 1555 struct radeon_cs_packet pkt;
1556 struct r100_cs_track track;
1277 int r; 1557 int r;
1278 1558
1559 r100_cs_track_clear(p->rdev, &track);
1560 p->track = &track;
1279 do { 1561 do {
1280 r = r100_cs_packet_parse(p, &pkt, p->idx); 1562 r = r100_cs_packet_parse(p, &pkt, p->idx);
1281 if (r) { 1563 if (r) {
@@ -1284,7 +1566,16 @@ int r100_cs_parse(struct radeon_cs_parser *p)
1284 p->idx += pkt.count + 2; 1566 p->idx += pkt.count + 2;
1285 switch (pkt.type) { 1567 switch (pkt.type) {
1286 case PACKET_TYPE0: 1568 case PACKET_TYPE0:
1287 r = r100_packet0_check(p, &pkt); 1569 if (p->rdev->family >= CHIP_R200)
1570 r = r100_cs_parse_packet0(p, &pkt,
1571 p->rdev->config.r100.reg_safe_bm,
1572 p->rdev->config.r100.reg_safe_bm_size,
1573 &r200_packet0_check);
1574 else
1575 r = r100_cs_parse_packet0(p, &pkt,
1576 p->rdev->config.r100.reg_safe_bm,
1577 p->rdev->config.r100.reg_safe_bm_size,
1578 &r100_packet0_check);
1288 break; 1579 break;
1289 case PACKET_TYPE2: 1580 case PACKET_TYPE2:
1290 break; 1581 break;
@@ -1683,6 +1974,15 @@ void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
1683 1974
1684int r100_init(struct radeon_device *rdev) 1975int r100_init(struct radeon_device *rdev)
1685{ 1976{
1977 if (ASIC_IS_RN50(rdev)) {
1978 rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm;
1979 rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(rn50_reg_safe_bm);
1980 } else if (rdev->family < CHIP_R200) {
1981 rdev->config.r100.reg_safe_bm = r100_reg_safe_bm;
1982 rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r100_reg_safe_bm);
1983 } else {
1984 return r200_init(rdev);
1985 }
1686 return 0; 1986 return 0;
1687} 1987}
1688 1988
@@ -2383,3 +2683,274 @@ void r100_bandwidth_update(struct radeon_device *rdev)
2383 (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL)); 2683 (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL));
2384 } 2684 }
2385} 2685}
2686
2687static inline void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
2688{
2689 DRM_ERROR("pitch %d\n", t->pitch);
2690 DRM_ERROR("width %d\n", t->width);
2691 DRM_ERROR("height %d\n", t->height);
2692 DRM_ERROR("num levels %d\n", t->num_levels);
2693 DRM_ERROR("depth %d\n", t->txdepth);
2694 DRM_ERROR("bpp %d\n", t->cpp);
2695 DRM_ERROR("coordinate type %d\n", t->tex_coord_type);
2696 DRM_ERROR("width round to power of 2 %d\n", t->roundup_w);
2697 DRM_ERROR("height round to power of 2 %d\n", t->roundup_h);
2698}
2699
2700static int r100_cs_track_cube(struct radeon_device *rdev,
2701 struct r100_cs_track *track, unsigned idx)
2702{
2703 unsigned face, w, h;
2704 struct radeon_object *cube_robj;
2705 unsigned long size;
2706
2707 for (face = 0; face < 5; face++) {
2708 cube_robj = track->textures[idx].cube_info[face].robj;
2709 w = track->textures[idx].cube_info[face].width;
2710 h = track->textures[idx].cube_info[face].height;
2711
2712 size = w * h;
2713 size *= track->textures[idx].cpp;
2714
2715 size += track->textures[idx].cube_info[face].offset;
2716
2717 if (size > radeon_object_size(cube_robj)) {
2718 DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
2719 size, radeon_object_size(cube_robj));
2720 r100_cs_track_texture_print(&track->textures[idx]);
2721 return -1;
2722 }
2723 }
2724 return 0;
2725}
2726
2727static int r100_cs_track_texture_check(struct radeon_device *rdev,
2728 struct r100_cs_track *track)
2729{
2730 struct radeon_object *robj;
2731 unsigned long size;
2732 unsigned u, i, w, h;
2733 int ret;
2734
2735 for (u = 0; u < track->num_texture; u++) {
2736 if (!track->textures[u].enabled)
2737 continue;
2738 robj = track->textures[u].robj;
2739 if (robj == NULL) {
2740 DRM_ERROR("No texture bound to unit %u\n", u);
2741 return -EINVAL;
2742 }
2743 size = 0;
2744 for (i = 0; i <= track->textures[u].num_levels; i++) {
2745 if (track->textures[u].use_pitch) {
2746 if (rdev->family < CHIP_R300)
2747 w = (track->textures[u].pitch / track->textures[u].cpp) / (1 << i);
2748 else
2749 w = track->textures[u].pitch / (1 << i);
2750 } else {
2751 w = track->textures[u].width / (1 << i);
2752 if (rdev->family >= CHIP_RV515)
2753 w |= track->textures[u].width_11;
2754 if (track->textures[u].roundup_w)
2755 w = roundup_pow_of_two(w);
2756 }
2757 h = track->textures[u].height / (1 << i);
2758 if (rdev->family >= CHIP_RV515)
2759 h |= track->textures[u].height_11;
2760 if (track->textures[u].roundup_h)
2761 h = roundup_pow_of_two(h);
2762 size += w * h;
2763 }
2764 size *= track->textures[u].cpp;
2765 switch (track->textures[u].tex_coord_type) {
2766 case 0:
2767 break;
2768 case 1:
2769 size *= (1 << track->textures[u].txdepth);
2770 break;
2771 case 2:
2772 if (track->separate_cube) {
2773 ret = r100_cs_track_cube(rdev, track, u);
2774 if (ret)
2775 return ret;
2776 } else
2777 size *= 6;
2778 break;
2779 default:
2780 DRM_ERROR("Invalid texture coordinate type %u for unit "
2781 "%u\n", track->textures[u].tex_coord_type, u);
2782 return -EINVAL;
2783 }
2784 if (size > radeon_object_size(robj)) {
2785 DRM_ERROR("Texture of unit %u needs %lu bytes but is "
2786 "%lu\n", u, size, radeon_object_size(robj));
2787 r100_cs_track_texture_print(&track->textures[u]);
2788 return -EINVAL;
2789 }
2790 }
2791 return 0;
2792}
2793
2794int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
2795{
2796 unsigned i;
2797 unsigned long size;
2798 unsigned prim_walk;
2799 unsigned nverts;
2800
2801 for (i = 0; i < track->num_cb; i++) {
2802 if (track->cb[i].robj == NULL) {
2803 DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
2804 return -EINVAL;
2805 }
2806 size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
2807 size += track->cb[i].offset;
2808 if (size > radeon_object_size(track->cb[i].robj)) {
2809 DRM_ERROR("[drm] Buffer too small for color buffer %d "
2810 "(need %lu have %lu) !\n", i, size,
2811 radeon_object_size(track->cb[i].robj));
2812 DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
2813 i, track->cb[i].pitch, track->cb[i].cpp,
2814 track->cb[i].offset, track->maxy);
2815 return -EINVAL;
2816 }
2817 }
2818 if (track->z_enabled) {
2819 if (track->zb.robj == NULL) {
2820 DRM_ERROR("[drm] No buffer for z buffer !\n");
2821 return -EINVAL;
2822 }
2823 size = track->zb.pitch * track->zb.cpp * track->maxy;
2824 size += track->zb.offset;
2825 if (size > radeon_object_size(track->zb.robj)) {
2826 DRM_ERROR("[drm] Buffer too small for z buffer "
2827 "(need %lu have %lu) !\n", size,
2828 radeon_object_size(track->zb.robj));
2829 DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n",
2830 track->zb.pitch, track->zb.cpp,
2831 track->zb.offset, track->maxy);
2832 return -EINVAL;
2833 }
2834 }
2835 prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
2836 nverts = (track->vap_vf_cntl >> 16) & 0xFFFF;
2837 switch (prim_walk) {
2838 case 1:
2839 for (i = 0; i < track->num_arrays; i++) {
2840 size = track->arrays[i].esize * track->max_indx * 4;
2841 if (track->arrays[i].robj == NULL) {
2842 DRM_ERROR("(PW %u) Vertex array %u no buffer "
2843 "bound\n", prim_walk, i);
2844 return -EINVAL;
2845 }
2846 if (size > radeon_object_size(track->arrays[i].robj)) {
2847 DRM_ERROR("(PW %u) Vertex array %u need %lu dwords "
2848 "have %lu dwords\n", prim_walk, i,
2849 size >> 2,
2850 radeon_object_size(track->arrays[i].robj) >> 2);
2851 DRM_ERROR("Max indices %u\n", track->max_indx);
2852 return -EINVAL;
2853 }
2854 }
2855 break;
2856 case 2:
2857 for (i = 0; i < track->num_arrays; i++) {
2858 size = track->arrays[i].esize * (nverts - 1) * 4;
2859 if (track->arrays[i].robj == NULL) {
2860 DRM_ERROR("(PW %u) Vertex array %u no buffer "
2861 "bound\n", prim_walk, i);
2862 return -EINVAL;
2863 }
2864 if (size > radeon_object_size(track->arrays[i].robj)) {
2865 DRM_ERROR("(PW %u) Vertex array %u need %lu dwords "
2866 "have %lu dwords\n", prim_walk, i, size >> 2,
2867 radeon_object_size(track->arrays[i].robj) >> 2);
2868 return -EINVAL;
2869 }
2870 }
2871 break;
2872 case 3:
2873 size = track->vtx_size * nverts;
2874 if (size != track->immd_dwords) {
2875 DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n",
2876 track->immd_dwords, size);
2877 DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n",
2878 nverts, track->vtx_size);
2879 return -EINVAL;
2880 }
2881 break;
2882 default:
2883 DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n",
2884 prim_walk);
2885 return -EINVAL;
2886 }
2887 return r100_cs_track_texture_check(rdev, track);
2888}
2889
2890void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track)
2891{
2892 unsigned i, face;
2893
2894 if (rdev->family < CHIP_R300) {
2895 track->num_cb = 1;
2896 if (rdev->family <= CHIP_RS200)
2897 track->num_texture = 3;
2898 else
2899 track->num_texture = 6;
2900 track->maxy = 2048;
2901 track->separate_cube = 1;
2902 } else {
2903 track->num_cb = 4;
2904 track->num_texture = 16;
2905 track->maxy = 4096;
2906 track->separate_cube = 0;
2907 }
2908
2909 for (i = 0; i < track->num_cb; i++) {
2910 track->cb[i].robj = NULL;
2911 track->cb[i].pitch = 8192;
2912 track->cb[i].cpp = 16;
2913 track->cb[i].offset = 0;
2914 }
2915 track->z_enabled = true;
2916 track->zb.robj = NULL;
2917 track->zb.pitch = 8192;
2918 track->zb.cpp = 4;
2919 track->zb.offset = 0;
2920 track->vtx_size = 0x7F;
2921 track->immd_dwords = 0xFFFFFFFFUL;
2922 track->num_arrays = 11;
2923 track->max_indx = 0x00FFFFFFUL;
2924 for (i = 0; i < track->num_arrays; i++) {
2925 track->arrays[i].robj = NULL;
2926 track->arrays[i].esize = 0x7F;
2927 }
2928 for (i = 0; i < track->num_texture; i++) {
2929 track->textures[i].pitch = 16536;
2930 track->textures[i].width = 16536;
2931 track->textures[i].height = 16536;
2932 track->textures[i].width_11 = 1 << 11;
2933 track->textures[i].height_11 = 1 << 11;
2934 track->textures[i].num_levels = 12;
2935 if (rdev->family <= CHIP_RS200) {
2936 track->textures[i].tex_coord_type = 0;
2937 track->textures[i].txdepth = 0;
2938 } else {
2939 track->textures[i].txdepth = 16;
2940 track->textures[i].tex_coord_type = 1;
2941 }
2942 track->textures[i].cpp = 64;
2943 track->textures[i].robj = NULL;
2944 /* CS IB emission code makes sure texture unit are disabled */
2945 track->textures[i].enabled = false;
2946 track->textures[i].roundup_w = true;
2947 track->textures[i].roundup_h = true;
2948 if (track->separate_cube)
2949 for (face = 0; face < 5; face++) {
2950 track->textures[i].cube_info[face].robj = NULL;
2951 track->textures[i].cube_info[face].width = 16536;
2952 track->textures[i].cube_info[face].height = 16536;
2953 track->textures[i].cube_info[face].offset = 0;
2954 }
2955 }
2956}