diff options
author | Dave Airlie <airlied@redhat.com> | 2009-09-23 02:56:27 -0400 |
---|---|---|
committer | Dave Airlie <airlied@linux.ie> | 2009-09-24 23:08:18 -0400 |
commit | 513bcb4655e68706594e45dfa1d4b181500110ba (patch) | |
tree | ed457db4cfb202015866a131ad4e742503728fad /drivers/gpu/drm | |
parent | 35e4b7af21d77933abda3d41d1672589eb6c960c (diff) |
drm/radeon/kms: don't require up to 64k allocations. (v2)
This avoids needing to do a kmalloc > PAGE_SIZE for the main
indirect buffer chunk, it adds an accessor for all reads from
the chunk and caches a single page at a time for subsequent
reads.
changes since v1:
Use a two page pool which should be the most common case
a single packet spanning > PAGE_SIZE will be hit, but I'm
having trouble seeing anywhere we currently generate anything like that.
hopefully proper short page copying at end
added parser_error flag to set deep errors instead of having to test
every ib value fetch.
fixed bug in patch that went to list.
Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r-- | drivers/gpu/drm/radeon/r100.c | 188 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/r100_track.h | 69 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/r200.c | 79 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/r300.c | 137 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/r600_cs.c | 26 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon.h | 37 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_cs.c | 105 |
7 files changed, 370 insertions, 271 deletions
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 737970b43aef..9ab976d97e91 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -863,13 +863,11 @@ int r100_cs_parse_packet0(struct radeon_cs_parser *p, | |||
863 | void r100_cs_dump_packet(struct radeon_cs_parser *p, | 863 | void r100_cs_dump_packet(struct radeon_cs_parser *p, |
864 | struct radeon_cs_packet *pkt) | 864 | struct radeon_cs_packet *pkt) |
865 | { | 865 | { |
866 | struct radeon_cs_chunk *ib_chunk; | ||
867 | volatile uint32_t *ib; | 866 | volatile uint32_t *ib; |
868 | unsigned i; | 867 | unsigned i; |
869 | unsigned idx; | 868 | unsigned idx; |
870 | 869 | ||
871 | ib = p->ib->ptr; | 870 | ib = p->ib->ptr; |
872 | ib_chunk = &p->chunks[p->chunk_ib_idx]; | ||
873 | idx = pkt->idx; | 871 | idx = pkt->idx; |
874 | for (i = 0; i <= (pkt->count + 1); i++, idx++) { | 872 | for (i = 0; i <= (pkt->count + 1); i++, idx++) { |
875 | DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]); | 873 | DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]); |
@@ -896,7 +894,7 @@ int r100_cs_packet_parse(struct radeon_cs_parser *p, | |||
896 | idx, ib_chunk->length_dw); | 894 | idx, ib_chunk->length_dw); |
897 | return -EINVAL; | 895 | return -EINVAL; |
898 | } | 896 | } |
899 | header = ib_chunk->kdata[idx]; | 897 | header = radeon_get_ib_value(p, idx); |
900 | pkt->idx = idx; | 898 | pkt->idx = idx; |
901 | pkt->type = CP_PACKET_GET_TYPE(header); | 899 | pkt->type = CP_PACKET_GET_TYPE(header); |
902 | pkt->count = CP_PACKET_GET_COUNT(header); | 900 | pkt->count = CP_PACKET_GET_COUNT(header); |
@@ -939,7 +937,6 @@ int r100_cs_packet_parse(struct radeon_cs_parser *p, | |||
939 | */ | 937 | */ |
940 | int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) | 938 | int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) |
941 | { | 939 | { |
942 | struct radeon_cs_chunk *ib_chunk; | ||
943 | struct drm_mode_object *obj; | 940 | struct drm_mode_object *obj; |
944 | struct drm_crtc *crtc; | 941 | struct drm_crtc *crtc; |
945 | struct radeon_crtc *radeon_crtc; | 942 | struct radeon_crtc *radeon_crtc; |
@@ -947,8 +944,9 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
947 | int crtc_id; | 944 | int crtc_id; |
948 | int r; | 945 | int r; |
949 | uint32_t header, h_idx, reg; | 946 | uint32_t header, h_idx, reg; |
947 | volatile uint32_t *ib; | ||
950 | 948 | ||
951 | ib_chunk = &p->chunks[p->chunk_ib_idx]; | 949 | ib = p->ib->ptr; |
952 | 950 | ||
953 | /* parse the wait until */ | 951 | /* parse the wait until */ |
954 | r = r100_cs_packet_parse(p, &waitreloc, p->idx); | 952 | r = r100_cs_packet_parse(p, &waitreloc, p->idx); |
@@ -963,7 +961,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
963 | return r; | 961 | return r; |
964 | } | 962 | } |
965 | 963 | ||
966 | if (ib_chunk->kdata[waitreloc.idx + 1] != RADEON_WAIT_CRTC_VLINE) { | 964 | if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) { |
967 | DRM_ERROR("vline wait had illegal wait until\n"); | 965 | DRM_ERROR("vline wait had illegal wait until\n"); |
968 | r = -EINVAL; | 966 | r = -EINVAL; |
969 | return r; | 967 | return r; |
@@ -978,9 +976,9 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
978 | p->idx += waitreloc.count; | 976 | p->idx += waitreloc.count; |
979 | p->idx += p3reloc.count; | 977 | p->idx += p3reloc.count; |
980 | 978 | ||
981 | header = ib_chunk->kdata[h_idx]; | 979 | header = radeon_get_ib_value(p, h_idx); |
982 | crtc_id = ib_chunk->kdata[h_idx + 5]; | 980 | crtc_id = radeon_get_ib_value(p, h_idx + 5); |
983 | reg = ib_chunk->kdata[h_idx] >> 2; | 981 | reg = header >> 2; |
984 | mutex_lock(&p->rdev->ddev->mode_config.mutex); | 982 | mutex_lock(&p->rdev->ddev->mode_config.mutex); |
985 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); | 983 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); |
986 | if (!obj) { | 984 | if (!obj) { |
@@ -994,8 +992,9 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
994 | 992 | ||
995 | if (!crtc->enabled) { | 993 | if (!crtc->enabled) { |
996 | /* if the CRTC isn't enabled - we need to nop out the wait until */ | 994 | /* if the CRTC isn't enabled - we need to nop out the wait until */ |
997 | ib_chunk->kdata[h_idx + 2] = PACKET2(0); | 995 | |
998 | ib_chunk->kdata[h_idx + 3] = PACKET2(0); | 996 | ib[h_idx + 2] = PACKET2(0); |
997 | ib[h_idx + 3] = PACKET2(0); | ||
999 | } else if (crtc_id == 1) { | 998 | } else if (crtc_id == 1) { |
1000 | switch (reg) { | 999 | switch (reg) { |
1001 | case AVIVO_D1MODE_VLINE_START_END: | 1000 | case AVIVO_D1MODE_VLINE_START_END: |
@@ -1011,8 +1010,8 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
1011 | r = -EINVAL; | 1010 | r = -EINVAL; |
1012 | goto out; | 1011 | goto out; |
1013 | } | 1012 | } |
1014 | ib_chunk->kdata[h_idx] = header; | 1013 | ib[h_idx] = header; |
1015 | ib_chunk->kdata[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1; | 1014 | ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1; |
1016 | } | 1015 | } |
1017 | out: | 1016 | out: |
1018 | mutex_unlock(&p->rdev->ddev->mode_config.mutex); | 1017 | mutex_unlock(&p->rdev->ddev->mode_config.mutex); |
@@ -1033,7 +1032,6 @@ out: | |||
1033 | int r100_cs_packet_next_reloc(struct radeon_cs_parser *p, | 1032 | int r100_cs_packet_next_reloc(struct radeon_cs_parser *p, |
1034 | struct radeon_cs_reloc **cs_reloc) | 1033 | struct radeon_cs_reloc **cs_reloc) |
1035 | { | 1034 | { |
1036 | struct radeon_cs_chunk *ib_chunk; | ||
1037 | struct radeon_cs_chunk *relocs_chunk; | 1035 | struct radeon_cs_chunk *relocs_chunk; |
1038 | struct radeon_cs_packet p3reloc; | 1036 | struct radeon_cs_packet p3reloc; |
1039 | unsigned idx; | 1037 | unsigned idx; |
@@ -1044,7 +1042,6 @@ int r100_cs_packet_next_reloc(struct radeon_cs_parser *p, | |||
1044 | return -EINVAL; | 1042 | return -EINVAL; |
1045 | } | 1043 | } |
1046 | *cs_reloc = NULL; | 1044 | *cs_reloc = NULL; |
1047 | ib_chunk = &p->chunks[p->chunk_ib_idx]; | ||
1048 | relocs_chunk = &p->chunks[p->chunk_relocs_idx]; | 1045 | relocs_chunk = &p->chunks[p->chunk_relocs_idx]; |
1049 | r = r100_cs_packet_parse(p, &p3reloc, p->idx); | 1046 | r = r100_cs_packet_parse(p, &p3reloc, p->idx); |
1050 | if (r) { | 1047 | if (r) { |
@@ -1057,7 +1054,7 @@ int r100_cs_packet_next_reloc(struct radeon_cs_parser *p, | |||
1057 | r100_cs_dump_packet(p, &p3reloc); | 1054 | r100_cs_dump_packet(p, &p3reloc); |
1058 | return -EINVAL; | 1055 | return -EINVAL; |
1059 | } | 1056 | } |
1060 | idx = ib_chunk->kdata[p3reloc.idx + 1]; | 1057 | idx = radeon_get_ib_value(p, p3reloc.idx + 1); |
1061 | if (idx >= relocs_chunk->length_dw) { | 1058 | if (idx >= relocs_chunk->length_dw) { |
1062 | DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", | 1059 | DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", |
1063 | idx, relocs_chunk->length_dw); | 1060 | idx, relocs_chunk->length_dw); |
@@ -1126,7 +1123,6 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1126 | struct radeon_cs_packet *pkt, | 1123 | struct radeon_cs_packet *pkt, |
1127 | unsigned idx, unsigned reg) | 1124 | unsigned idx, unsigned reg) |
1128 | { | 1125 | { |
1129 | struct radeon_cs_chunk *ib_chunk; | ||
1130 | struct radeon_cs_reloc *reloc; | 1126 | struct radeon_cs_reloc *reloc; |
1131 | struct r100_cs_track *track; | 1127 | struct r100_cs_track *track; |
1132 | volatile uint32_t *ib; | 1128 | volatile uint32_t *ib; |
@@ -1134,11 +1130,13 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1134 | int r; | 1130 | int r; |
1135 | int i, face; | 1131 | int i, face; |
1136 | u32 tile_flags = 0; | 1132 | u32 tile_flags = 0; |
1133 | u32 idx_value; | ||
1137 | 1134 | ||
1138 | ib = p->ib->ptr; | 1135 | ib = p->ib->ptr; |
1139 | ib_chunk = &p->chunks[p->chunk_ib_idx]; | ||
1140 | track = (struct r100_cs_track *)p->track; | 1136 | track = (struct r100_cs_track *)p->track; |
1141 | 1137 | ||
1138 | idx_value = radeon_get_ib_value(p, idx); | ||
1139 | |||
1142 | switch (reg) { | 1140 | switch (reg) { |
1143 | case RADEON_CRTC_GUI_TRIG_VLINE: | 1141 | case RADEON_CRTC_GUI_TRIG_VLINE: |
1144 | r = r100_cs_packet_parse_vline(p); | 1142 | r = r100_cs_packet_parse_vline(p); |
@@ -1166,8 +1164,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1166 | return r; | 1164 | return r; |
1167 | } | 1165 | } |
1168 | track->zb.robj = reloc->robj; | 1166 | track->zb.robj = reloc->robj; |
1169 | track->zb.offset = ib_chunk->kdata[idx]; | 1167 | track->zb.offset = idx_value; |
1170 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | 1168 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1171 | break; | 1169 | break; |
1172 | case RADEON_RB3D_COLOROFFSET: | 1170 | case RADEON_RB3D_COLOROFFSET: |
1173 | r = r100_cs_packet_next_reloc(p, &reloc); | 1171 | r = r100_cs_packet_next_reloc(p, &reloc); |
@@ -1178,8 +1176,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1178 | return r; | 1176 | return r; |
1179 | } | 1177 | } |
1180 | track->cb[0].robj = reloc->robj; | 1178 | track->cb[0].robj = reloc->robj; |
1181 | track->cb[0].offset = ib_chunk->kdata[idx]; | 1179 | track->cb[0].offset = idx_value; |
1182 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | 1180 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1183 | break; | 1181 | break; |
1184 | case RADEON_PP_TXOFFSET_0: | 1182 | case RADEON_PP_TXOFFSET_0: |
1185 | case RADEON_PP_TXOFFSET_1: | 1183 | case RADEON_PP_TXOFFSET_1: |
@@ -1192,7 +1190,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1192 | r100_cs_dump_packet(p, pkt); | 1190 | r100_cs_dump_packet(p, pkt); |
1193 | return r; | 1191 | return r; |
1194 | } | 1192 | } |
1195 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | 1193 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1196 | track->textures[i].robj = reloc->robj; | 1194 | track->textures[i].robj = reloc->robj; |
1197 | break; | 1195 | break; |
1198 | case RADEON_PP_CUBIC_OFFSET_T0_0: | 1196 | case RADEON_PP_CUBIC_OFFSET_T0_0: |
@@ -1208,8 +1206,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1208 | r100_cs_dump_packet(p, pkt); | 1206 | r100_cs_dump_packet(p, pkt); |
1209 | return r; | 1207 | return r; |
1210 | } | 1208 | } |
1211 | track->textures[0].cube_info[i].offset = ib_chunk->kdata[idx]; | 1209 | track->textures[0].cube_info[i].offset = idx_value; |
1212 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | 1210 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1213 | track->textures[0].cube_info[i].robj = reloc->robj; | 1211 | track->textures[0].cube_info[i].robj = reloc->robj; |
1214 | break; | 1212 | break; |
1215 | case RADEON_PP_CUBIC_OFFSET_T1_0: | 1213 | case RADEON_PP_CUBIC_OFFSET_T1_0: |
@@ -1225,8 +1223,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1225 | r100_cs_dump_packet(p, pkt); | 1223 | r100_cs_dump_packet(p, pkt); |
1226 | return r; | 1224 | return r; |
1227 | } | 1225 | } |
1228 | track->textures[1].cube_info[i].offset = ib_chunk->kdata[idx]; | 1226 | track->textures[1].cube_info[i].offset = idx_value; |
1229 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | 1227 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1230 | track->textures[1].cube_info[i].robj = reloc->robj; | 1228 | track->textures[1].cube_info[i].robj = reloc->robj; |
1231 | break; | 1229 | break; |
1232 | case RADEON_PP_CUBIC_OFFSET_T2_0: | 1230 | case RADEON_PP_CUBIC_OFFSET_T2_0: |
@@ -1242,12 +1240,12 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1242 | r100_cs_dump_packet(p, pkt); | 1240 | r100_cs_dump_packet(p, pkt); |
1243 | return r; | 1241 | return r; |
1244 | } | 1242 | } |
1245 | track->textures[2].cube_info[i].offset = ib_chunk->kdata[idx]; | 1243 | track->textures[2].cube_info[i].offset = idx_value; |
1246 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | 1244 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1247 | track->textures[2].cube_info[i].robj = reloc->robj; | 1245 | track->textures[2].cube_info[i].robj = reloc->robj; |
1248 | break; | 1246 | break; |
1249 | case RADEON_RE_WIDTH_HEIGHT: | 1247 | case RADEON_RE_WIDTH_HEIGHT: |
1250 | track->maxy = ((ib_chunk->kdata[idx] >> 16) & 0x7FF); | 1248 | track->maxy = ((idx_value >> 16) & 0x7FF); |
1251 | break; | 1249 | break; |
1252 | case RADEON_RB3D_COLORPITCH: | 1250 | case RADEON_RB3D_COLORPITCH: |
1253 | r = r100_cs_packet_next_reloc(p, &reloc); | 1251 | r = r100_cs_packet_next_reloc(p, &reloc); |
@@ -1263,17 +1261,17 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1263 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) | 1261 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) |
1264 | tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; | 1262 | tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; |
1265 | 1263 | ||
1266 | tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); | 1264 | tmp = idx_value & ~(0x7 << 16); |
1267 | tmp |= tile_flags; | 1265 | tmp |= tile_flags; |
1268 | ib[idx] = tmp; | 1266 | ib[idx] = tmp; |
1269 | 1267 | ||
1270 | track->cb[0].pitch = ib_chunk->kdata[idx] & RADEON_COLORPITCH_MASK; | 1268 | track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK; |
1271 | break; | 1269 | break; |
1272 | case RADEON_RB3D_DEPTHPITCH: | 1270 | case RADEON_RB3D_DEPTHPITCH: |
1273 | track->zb.pitch = ib_chunk->kdata[idx] & RADEON_DEPTHPITCH_MASK; | 1271 | track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK; |
1274 | break; | 1272 | break; |
1275 | case RADEON_RB3D_CNTL: | 1273 | case RADEON_RB3D_CNTL: |
1276 | switch ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { | 1274 | switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { |
1277 | case 7: | 1275 | case 7: |
1278 | case 8: | 1276 | case 8: |
1279 | case 9: | 1277 | case 9: |
@@ -1291,13 +1289,13 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1291 | break; | 1289 | break; |
1292 | default: | 1290 | default: |
1293 | DRM_ERROR("Invalid color buffer format (%d) !\n", | 1291 | DRM_ERROR("Invalid color buffer format (%d) !\n", |
1294 | ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f)); | 1292 | ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f)); |
1295 | return -EINVAL; | 1293 | return -EINVAL; |
1296 | } | 1294 | } |
1297 | track->z_enabled = !!(ib_chunk->kdata[idx] & RADEON_Z_ENABLE); | 1295 | track->z_enabled = !!(idx_value & RADEON_Z_ENABLE); |
1298 | break; | 1296 | break; |
1299 | case RADEON_RB3D_ZSTENCILCNTL: | 1297 | case RADEON_RB3D_ZSTENCILCNTL: |
1300 | switch (ib_chunk->kdata[idx] & 0xf) { | 1298 | switch (idx_value & 0xf) { |
1301 | case 0: | 1299 | case 0: |
1302 | track->zb.cpp = 2; | 1300 | track->zb.cpp = 2; |
1303 | break; | 1301 | break; |
@@ -1321,44 +1319,44 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1321 | r100_cs_dump_packet(p, pkt); | 1319 | r100_cs_dump_packet(p, pkt); |
1322 | return r; | 1320 | return r; |
1323 | } | 1321 | } |
1324 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | 1322 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1325 | break; | 1323 | break; |
1326 | case RADEON_PP_CNTL: | 1324 | case RADEON_PP_CNTL: |
1327 | { | 1325 | { |
1328 | uint32_t temp = ib_chunk->kdata[idx] >> 4; | 1326 | uint32_t temp = idx_value >> 4; |
1329 | for (i = 0; i < track->num_texture; i++) | 1327 | for (i = 0; i < track->num_texture; i++) |
1330 | track->textures[i].enabled = !!(temp & (1 << i)); | 1328 | track->textures[i].enabled = !!(temp & (1 << i)); |
1331 | } | 1329 | } |
1332 | break; | 1330 | break; |
1333 | case RADEON_SE_VF_CNTL: | 1331 | case RADEON_SE_VF_CNTL: |
1334 | track->vap_vf_cntl = ib_chunk->kdata[idx]; | 1332 | track->vap_vf_cntl = idx_value; |
1335 | break; | 1333 | break; |
1336 | case RADEON_SE_VTX_FMT: | 1334 | case RADEON_SE_VTX_FMT: |
1337 | track->vtx_size = r100_get_vtx_size(ib_chunk->kdata[idx]); | 1335 | track->vtx_size = r100_get_vtx_size(idx_value); |
1338 | break; | 1336 | break; |
1339 | case RADEON_PP_TEX_SIZE_0: | 1337 | case RADEON_PP_TEX_SIZE_0: |
1340 | case RADEON_PP_TEX_SIZE_1: | 1338 | case RADEON_PP_TEX_SIZE_1: |
1341 | case RADEON_PP_TEX_SIZE_2: | 1339 | case RADEON_PP_TEX_SIZE_2: |
1342 | i = (reg - RADEON_PP_TEX_SIZE_0) / 8; | 1340 | i = (reg - RADEON_PP_TEX_SIZE_0) / 8; |
1343 | track->textures[i].width = (ib_chunk->kdata[idx] & RADEON_TEX_USIZE_MASK) + 1; | 1341 | track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1; |
1344 | track->textures[i].height = ((ib_chunk->kdata[idx] & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; | 1342 | track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; |
1345 | break; | 1343 | break; |
1346 | case RADEON_PP_TEX_PITCH_0: | 1344 | case RADEON_PP_TEX_PITCH_0: |
1347 | case RADEON_PP_TEX_PITCH_1: | 1345 | case RADEON_PP_TEX_PITCH_1: |
1348 | case RADEON_PP_TEX_PITCH_2: | 1346 | case RADEON_PP_TEX_PITCH_2: |
1349 | i = (reg - RADEON_PP_TEX_PITCH_0) / 8; | 1347 | i = (reg - RADEON_PP_TEX_PITCH_0) / 8; |
1350 | track->textures[i].pitch = ib_chunk->kdata[idx] + 32; | 1348 | track->textures[i].pitch = idx_value + 32; |
1351 | break; | 1349 | break; |
1352 | case RADEON_PP_TXFILTER_0: | 1350 | case RADEON_PP_TXFILTER_0: |
1353 | case RADEON_PP_TXFILTER_1: | 1351 | case RADEON_PP_TXFILTER_1: |
1354 | case RADEON_PP_TXFILTER_2: | 1352 | case RADEON_PP_TXFILTER_2: |
1355 | i = (reg - RADEON_PP_TXFILTER_0) / 24; | 1353 | i = (reg - RADEON_PP_TXFILTER_0) / 24; |
1356 | track->textures[i].num_levels = ((ib_chunk->kdata[idx] & RADEON_MAX_MIP_LEVEL_MASK) | 1354 | track->textures[i].num_levels = ((idx_value & RADEON_MAX_MIP_LEVEL_MASK) |
1357 | >> RADEON_MAX_MIP_LEVEL_SHIFT); | 1355 | >> RADEON_MAX_MIP_LEVEL_SHIFT); |
1358 | tmp = (ib_chunk->kdata[idx] >> 23) & 0x7; | 1356 | tmp = (idx_value >> 23) & 0x7; |
1359 | if (tmp == 2 || tmp == 6) | 1357 | if (tmp == 2 || tmp == 6) |
1360 | track->textures[i].roundup_w = false; | 1358 | track->textures[i].roundup_w = false; |
1361 | tmp = (ib_chunk->kdata[idx] >> 27) & 0x7; | 1359 | tmp = (idx_value >> 27) & 0x7; |
1362 | if (tmp == 2 || tmp == 6) | 1360 | if (tmp == 2 || tmp == 6) |
1363 | track->textures[i].roundup_h = false; | 1361 | track->textures[i].roundup_h = false; |
1364 | break; | 1362 | break; |
@@ -1366,16 +1364,16 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1366 | case RADEON_PP_TXFORMAT_1: | 1364 | case RADEON_PP_TXFORMAT_1: |
1367 | case RADEON_PP_TXFORMAT_2: | 1365 | case RADEON_PP_TXFORMAT_2: |
1368 | i = (reg - RADEON_PP_TXFORMAT_0) / 24; | 1366 | i = (reg - RADEON_PP_TXFORMAT_0) / 24; |
1369 | if (ib_chunk->kdata[idx] & RADEON_TXFORMAT_NON_POWER2) { | 1367 | if (idx_value & RADEON_TXFORMAT_NON_POWER2) { |
1370 | track->textures[i].use_pitch = 1; | 1368 | track->textures[i].use_pitch = 1; |
1371 | } else { | 1369 | } else { |
1372 | track->textures[i].use_pitch = 0; | 1370 | track->textures[i].use_pitch = 0; |
1373 | track->textures[i].width = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK); | 1371 | track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK); |
1374 | track->textures[i].height = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK); | 1372 | track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK); |
1375 | } | 1373 | } |
1376 | if (ib_chunk->kdata[idx] & RADEON_TXFORMAT_CUBIC_MAP_ENABLE) | 1374 | if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE) |
1377 | track->textures[i].tex_coord_type = 2; | 1375 | track->textures[i].tex_coord_type = 2; |
1378 | switch ((ib_chunk->kdata[idx] & RADEON_TXFORMAT_FORMAT_MASK)) { | 1376 | switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) { |
1379 | case RADEON_TXFORMAT_I8: | 1377 | case RADEON_TXFORMAT_I8: |
1380 | case RADEON_TXFORMAT_RGB332: | 1378 | case RADEON_TXFORMAT_RGB332: |
1381 | case RADEON_TXFORMAT_Y8: | 1379 | case RADEON_TXFORMAT_Y8: |
@@ -1402,13 +1400,13 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1402 | track->textures[i].cpp = 4; | 1400 | track->textures[i].cpp = 4; |
1403 | break; | 1401 | break; |
1404 | } | 1402 | } |
1405 | track->textures[i].cube_info[4].width = 1 << ((ib_chunk->kdata[idx] >> 16) & 0xf); | 1403 | track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); |
1406 | track->textures[i].cube_info[4].height = 1 << ((ib_chunk->kdata[idx] >> 20) & 0xf); | 1404 | track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf); |
1407 | break; | 1405 | break; |
1408 | case RADEON_PP_CUBIC_FACES_0: | 1406 | case RADEON_PP_CUBIC_FACES_0: |
1409 | case RADEON_PP_CUBIC_FACES_1: | 1407 | case RADEON_PP_CUBIC_FACES_1: |
1410 | case RADEON_PP_CUBIC_FACES_2: | 1408 | case RADEON_PP_CUBIC_FACES_2: |
1411 | tmp = ib_chunk->kdata[idx]; | 1409 | tmp = idx_value; |
1412 | i = (reg - RADEON_PP_CUBIC_FACES_0) / 4; | 1410 | i = (reg - RADEON_PP_CUBIC_FACES_0) / 4; |
1413 | for (face = 0; face < 4; face++) { | 1411 | for (face = 0; face < 4; face++) { |
1414 | track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); | 1412 | track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); |
@@ -1427,15 +1425,14 @@ int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, | |||
1427 | struct radeon_cs_packet *pkt, | 1425 | struct radeon_cs_packet *pkt, |
1428 | struct radeon_object *robj) | 1426 | struct radeon_object *robj) |
1429 | { | 1427 | { |
1430 | struct radeon_cs_chunk *ib_chunk; | ||
1431 | unsigned idx; | 1428 | unsigned idx; |
1432 | 1429 | u32 value; | |
1433 | ib_chunk = &p->chunks[p->chunk_ib_idx]; | ||
1434 | idx = pkt->idx + 1; | 1430 | idx = pkt->idx + 1; |
1435 | if ((ib_chunk->kdata[idx+2] + 1) > radeon_object_size(robj)) { | 1431 | value = radeon_get_ib_value(p, idx + 2); |
1432 | if ((value + 1) > radeon_object_size(robj)) { | ||
1436 | DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER " | 1433 | DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER " |
1437 | "(need %u have %lu) !\n", | 1434 | "(need %u have %lu) !\n", |
1438 | ib_chunk->kdata[idx+2] + 1, | 1435 | value + 1, |
1439 | radeon_object_size(robj)); | 1436 | radeon_object_size(robj)); |
1440 | return -EINVAL; | 1437 | return -EINVAL; |
1441 | } | 1438 | } |
@@ -1445,59 +1442,20 @@ int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, | |||
1445 | static int r100_packet3_check(struct radeon_cs_parser *p, | 1442 | static int r100_packet3_check(struct radeon_cs_parser *p, |
1446 | struct radeon_cs_packet *pkt) | 1443 | struct radeon_cs_packet *pkt) |
1447 | { | 1444 | { |
1448 | struct radeon_cs_chunk *ib_chunk; | ||
1449 | struct radeon_cs_reloc *reloc; | 1445 | struct radeon_cs_reloc *reloc; |
1450 | struct r100_cs_track *track; | 1446 | struct r100_cs_track *track; |
1451 | unsigned idx; | 1447 | unsigned idx; |
1452 | unsigned i, c; | ||
1453 | volatile uint32_t *ib; | 1448 | volatile uint32_t *ib; |
1454 | int r; | 1449 | int r; |
1455 | 1450 | ||
1456 | ib = p->ib->ptr; | 1451 | ib = p->ib->ptr; |
1457 | ib_chunk = &p->chunks[p->chunk_ib_idx]; | ||
1458 | idx = pkt->idx + 1; | 1452 | idx = pkt->idx + 1; |
1459 | track = (struct r100_cs_track *)p->track; | 1453 | track = (struct r100_cs_track *)p->track; |
1460 | switch (pkt->opcode) { | 1454 | switch (pkt->opcode) { |
1461 | case PACKET3_3D_LOAD_VBPNTR: | 1455 | case PACKET3_3D_LOAD_VBPNTR: |
1462 | c = ib_chunk->kdata[idx++]; | 1456 | r = r100_packet3_load_vbpntr(p, pkt, idx); |
1463 | track->num_arrays = c; | 1457 | if (r) |
1464 | for (i = 0; i < (c - 1); i += 2, idx += 3) { | 1458 | return r; |
1465 | r = r100_cs_packet_next_reloc(p, &reloc); | ||
1466 | if (r) { | ||
1467 | DRM_ERROR("No reloc for packet3 %d\n", | ||
1468 | pkt->opcode); | ||
1469 | r100_cs_dump_packet(p, pkt); | ||
1470 | return r; | ||
1471 | } | ||
1472 | ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset); | ||
1473 | track->arrays[i + 0].robj = reloc->robj; | ||
1474 | track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8; | ||
1475 | track->arrays[i + 0].esize &= 0x7F; | ||
1476 | r = r100_cs_packet_next_reloc(p, &reloc); | ||
1477 | if (r) { | ||
1478 | DRM_ERROR("No reloc for packet3 %d\n", | ||
1479 | pkt->opcode); | ||
1480 | r100_cs_dump_packet(p, pkt); | ||
1481 | return r; | ||
1482 | } | ||
1483 | ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset); | ||
1484 | track->arrays[i + 1].robj = reloc->robj; | ||
1485 | track->arrays[i + 1].esize = ib_chunk->kdata[idx] >> 24; | ||
1486 | track->arrays[i + 1].esize &= 0x7F; | ||
1487 | } | ||
1488 | if (c & 1) { | ||
1489 | r = r100_cs_packet_next_reloc(p, &reloc); | ||
1490 | if (r) { | ||
1491 | DRM_ERROR("No reloc for packet3 %d\n", | ||
1492 | pkt->opcode); | ||
1493 | r100_cs_dump_packet(p, pkt); | ||
1494 | return r; | ||
1495 | } | ||
1496 | ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset); | ||
1497 | track->arrays[i + 0].robj = reloc->robj; | ||
1498 | track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8; | ||
1499 | track->arrays[i + 0].esize &= 0x7F; | ||
1500 | } | ||
1501 | break; | 1459 | break; |
1502 | case PACKET3_INDX_BUFFER: | 1460 | case PACKET3_INDX_BUFFER: |
1503 | r = r100_cs_packet_next_reloc(p, &reloc); | 1461 | r = r100_cs_packet_next_reloc(p, &reloc); |
@@ -1506,7 +1464,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p, | |||
1506 | r100_cs_dump_packet(p, pkt); | 1464 | r100_cs_dump_packet(p, pkt); |
1507 | return r; | 1465 | return r; |
1508 | } | 1466 | } |
1509 | ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset); | 1467 | ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->lobj.gpu_offset); |
1510 | r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); | 1468 | r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); |
1511 | if (r) { | 1469 | if (r) { |
1512 | return r; | 1470 | return r; |
@@ -1520,27 +1478,27 @@ static int r100_packet3_check(struct radeon_cs_parser *p, | |||
1520 | r100_cs_dump_packet(p, pkt); | 1478 | r100_cs_dump_packet(p, pkt); |
1521 | return r; | 1479 | return r; |
1522 | } | 1480 | } |
1523 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | 1481 | ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->lobj.gpu_offset); |
1524 | track->num_arrays = 1; | 1482 | track->num_arrays = 1; |
1525 | track->vtx_size = r100_get_vtx_size(ib_chunk->kdata[idx+2]); | 1483 | track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2)); |
1526 | 1484 | ||
1527 | track->arrays[0].robj = reloc->robj; | 1485 | track->arrays[0].robj = reloc->robj; |
1528 | track->arrays[0].esize = track->vtx_size; | 1486 | track->arrays[0].esize = track->vtx_size; |
1529 | 1487 | ||
1530 | track->max_indx = ib_chunk->kdata[idx+1]; | 1488 | track->max_indx = radeon_get_ib_value(p, idx+1); |
1531 | 1489 | ||
1532 | track->vap_vf_cntl = ib_chunk->kdata[idx+3]; | 1490 | track->vap_vf_cntl = radeon_get_ib_value(p, idx+3); |
1533 | track->immd_dwords = pkt->count - 1; | 1491 | track->immd_dwords = pkt->count - 1; |
1534 | r = r100_cs_track_check(p->rdev, track); | 1492 | r = r100_cs_track_check(p->rdev, track); |
1535 | if (r) | 1493 | if (r) |
1536 | return r; | 1494 | return r; |
1537 | break; | 1495 | break; |
1538 | case PACKET3_3D_DRAW_IMMD: | 1496 | case PACKET3_3D_DRAW_IMMD: |
1539 | if (((ib_chunk->kdata[idx+1] >> 4) & 0x3) != 3) { | 1497 | if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) { |
1540 | DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); | 1498 | DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); |
1541 | return -EINVAL; | 1499 | return -EINVAL; |
1542 | } | 1500 | } |
1543 | track->vap_vf_cntl = ib_chunk->kdata[idx+1]; | 1501 | track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); |
1544 | track->immd_dwords = pkt->count - 1; | 1502 | track->immd_dwords = pkt->count - 1; |
1545 | r = r100_cs_track_check(p->rdev, track); | 1503 | r = r100_cs_track_check(p->rdev, track); |
1546 | if (r) | 1504 | if (r) |
@@ -1548,11 +1506,11 @@ static int r100_packet3_check(struct radeon_cs_parser *p, | |||
1548 | break; | 1506 | break; |
1549 | /* triggers drawing using in-packet vertex data */ | 1507 | /* triggers drawing using in-packet vertex data */ |
1550 | case PACKET3_3D_DRAW_IMMD_2: | 1508 | case PACKET3_3D_DRAW_IMMD_2: |
1551 | if (((ib_chunk->kdata[idx] >> 4) & 0x3) != 3) { | 1509 | if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) { |
1552 | DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); | 1510 | DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); |
1553 | return -EINVAL; | 1511 | return -EINVAL; |
1554 | } | 1512 | } |
1555 | track->vap_vf_cntl = ib_chunk->kdata[idx]; | 1513 | track->vap_vf_cntl = radeon_get_ib_value(p, idx); |
1556 | track->immd_dwords = pkt->count; | 1514 | track->immd_dwords = pkt->count; |
1557 | r = r100_cs_track_check(p->rdev, track); | 1515 | r = r100_cs_track_check(p->rdev, track); |
1558 | if (r) | 1516 | if (r) |
@@ -1560,28 +1518,28 @@ static int r100_packet3_check(struct radeon_cs_parser *p, | |||
1560 | break; | 1518 | break; |
1561 | /* triggers drawing using in-packet vertex data */ | 1519 | /* triggers drawing using in-packet vertex data */ |
1562 | case PACKET3_3D_DRAW_VBUF_2: | 1520 | case PACKET3_3D_DRAW_VBUF_2: |
1563 | track->vap_vf_cntl = ib_chunk->kdata[idx]; | 1521 | track->vap_vf_cntl = radeon_get_ib_value(p, idx); |
1564 | r = r100_cs_track_check(p->rdev, track); | 1522 | r = r100_cs_track_check(p->rdev, track); |
1565 | if (r) | 1523 | if (r) |
1566 | return r; | 1524 | return r; |
1567 | break; | 1525 | break; |
1568 | /* triggers drawing of vertex buffers setup elsewhere */ | 1526 | /* triggers drawing of vertex buffers setup elsewhere */ |
1569 | case PACKET3_3D_DRAW_INDX_2: | 1527 | case PACKET3_3D_DRAW_INDX_2: |
1570 | track->vap_vf_cntl = ib_chunk->kdata[idx]; | 1528 | track->vap_vf_cntl = radeon_get_ib_value(p, idx); |
1571 | r = r100_cs_track_check(p->rdev, track); | 1529 | r = r100_cs_track_check(p->rdev, track); |
1572 | if (r) | 1530 | if (r) |
1573 | return r; | 1531 | return r; |
1574 | break; | 1532 | break; |
1575 | /* triggers drawing using indices to vertex buffer */ | 1533 | /* triggers drawing using indices to vertex buffer */ |
1576 | case PACKET3_3D_DRAW_VBUF: | 1534 | case PACKET3_3D_DRAW_VBUF: |
1577 | track->vap_vf_cntl = ib_chunk->kdata[idx + 1]; | 1535 | track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); |
1578 | r = r100_cs_track_check(p->rdev, track); | 1536 | r = r100_cs_track_check(p->rdev, track); |
1579 | if (r) | 1537 | if (r) |
1580 | return r; | 1538 | return r; |
1581 | break; | 1539 | break; |
1582 | /* triggers drawing of vertex buffers setup elsewhere */ | 1540 | /* triggers drawing of vertex buffers setup elsewhere */ |
1583 | case PACKET3_3D_DRAW_INDX: | 1541 | case PACKET3_3D_DRAW_INDX: |
1584 | track->vap_vf_cntl = ib_chunk->kdata[idx + 1]; | 1542 | track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); |
1585 | r = r100_cs_track_check(p->rdev, track); | 1543 | r = r100_cs_track_check(p->rdev, track); |
1586 | if (r) | 1544 | if (r) |
1587 | return r; | 1545 | return r; |
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h index 70a82eda394a..0daf0d76a891 100644 --- a/drivers/gpu/drm/radeon/r100_track.h +++ b/drivers/gpu/drm/radeon/r100_track.h | |||
@@ -84,6 +84,8 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
84 | struct radeon_cs_packet *pkt, | 84 | struct radeon_cs_packet *pkt, |
85 | unsigned idx, unsigned reg); | 85 | unsigned idx, unsigned reg); |
86 | 86 | ||
87 | |||
88 | |||
87 | static inline int r100_reloc_pitch_offset(struct radeon_cs_parser *p, | 89 | static inline int r100_reloc_pitch_offset(struct radeon_cs_parser *p, |
88 | struct radeon_cs_packet *pkt, | 90 | struct radeon_cs_packet *pkt, |
89 | unsigned idx, | 91 | unsigned idx, |
@@ -93,9 +95,7 @@ static inline int r100_reloc_pitch_offset(struct radeon_cs_parser *p, | |||
93 | u32 tile_flags = 0; | 95 | u32 tile_flags = 0; |
94 | u32 tmp; | 96 | u32 tmp; |
95 | struct radeon_cs_reloc *reloc; | 97 | struct radeon_cs_reloc *reloc; |
96 | struct radeon_cs_chunk *ib_chunk; | 98 | u32 value; |
97 | |||
98 | ib_chunk = &p->chunks[p->chunk_ib_idx]; | ||
99 | 99 | ||
100 | r = r100_cs_packet_next_reloc(p, &reloc); | 100 | r = r100_cs_packet_next_reloc(p, &reloc); |
101 | if (r) { | 101 | if (r) { |
@@ -104,7 +104,8 @@ static inline int r100_reloc_pitch_offset(struct radeon_cs_parser *p, | |||
104 | r100_cs_dump_packet(p, pkt); | 104 | r100_cs_dump_packet(p, pkt); |
105 | return r; | 105 | return r; |
106 | } | 106 | } |
107 | tmp = ib_chunk->kdata[idx] & 0x003fffff; | 107 | value = radeon_get_ib_value(p, idx); |
108 | tmp = value & 0x003fffff; | ||
108 | tmp += (((u32)reloc->lobj.gpu_offset) >> 10); | 109 | tmp += (((u32)reloc->lobj.gpu_offset) >> 10); |
109 | 110 | ||
110 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) | 111 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) |
@@ -119,6 +120,64 @@ static inline int r100_reloc_pitch_offset(struct radeon_cs_parser *p, | |||
119 | } | 120 | } |
120 | 121 | ||
121 | tmp |= tile_flags; | 122 | tmp |= tile_flags; |
122 | p->ib->ptr[idx] = (ib_chunk->kdata[idx] & 0x3fc00000) | tmp; | 123 | p->ib->ptr[idx] = (value & 0x3fc00000) | tmp; |
123 | return 0; | 124 | return 0; |
124 | } | 125 | } |
126 | |||
127 | static inline int r100_packet3_load_vbpntr(struct radeon_cs_parser *p, | ||
128 | struct radeon_cs_packet *pkt, | ||
129 | int idx) | ||
130 | { | ||
131 | unsigned c, i; | ||
132 | struct radeon_cs_reloc *reloc; | ||
133 | struct r100_cs_track *track; | ||
134 | int r = 0; | ||
135 | volatile uint32_t *ib; | ||
136 | u32 idx_value; | ||
137 | |||
138 | ib = p->ib->ptr; | ||
139 | track = (struct r100_cs_track *)p->track; | ||
140 | c = radeon_get_ib_value(p, idx++) & 0x1F; | ||
141 | track->num_arrays = c; | ||
142 | for (i = 0; i < (c - 1); i+=2, idx+=3) { | ||
143 | r = r100_cs_packet_next_reloc(p, &reloc); | ||
144 | if (r) { | ||
145 | DRM_ERROR("No reloc for packet3 %d\n", | ||
146 | pkt->opcode); | ||
147 | r100_cs_dump_packet(p, pkt); | ||
148 | return r; | ||
149 | } | ||
150 | idx_value = radeon_get_ib_value(p, idx); | ||
151 | ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset); | ||
152 | |||
153 | track->arrays[i + 0].esize = idx_value >> 8; | ||
154 | track->arrays[i + 0].robj = reloc->robj; | ||
155 | track->arrays[i + 0].esize &= 0x7F; | ||
156 | r = r100_cs_packet_next_reloc(p, &reloc); | ||
157 | if (r) { | ||
158 | DRM_ERROR("No reloc for packet3 %d\n", | ||
159 | pkt->opcode); | ||
160 | r100_cs_dump_packet(p, pkt); | ||
161 | return r; | ||
162 | } | ||
163 | ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset); | ||
164 | track->arrays[i + 1].robj = reloc->robj; | ||
165 | track->arrays[i + 1].esize = idx_value >> 24; | ||
166 | track->arrays[i + 1].esize &= 0x7F; | ||
167 | } | ||
168 | if (c & 1) { | ||
169 | r = r100_cs_packet_next_reloc(p, &reloc); | ||
170 | if (r) { | ||
171 | DRM_ERROR("No reloc for packet3 %d\n", | ||
172 | pkt->opcode); | ||
173 | r100_cs_dump_packet(p, pkt); | ||
174 | return r; | ||
175 | } | ||
176 | idx_value = radeon_get_ib_value(p, idx); | ||
177 | ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset); | ||
178 | track->arrays[i + 0].robj = reloc->robj; | ||
179 | track->arrays[i + 0].esize = idx_value >> 8; | ||
180 | track->arrays[i + 0].esize &= 0x7F; | ||
181 | } | ||
182 | return r; | ||
183 | } | ||
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c index 568c74bfba3d..cf7fea5ff2e5 100644 --- a/drivers/gpu/drm/radeon/r200.c +++ b/drivers/gpu/drm/radeon/r200.c | |||
@@ -96,7 +96,6 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
96 | struct radeon_cs_packet *pkt, | 96 | struct radeon_cs_packet *pkt, |
97 | unsigned idx, unsigned reg) | 97 | unsigned idx, unsigned reg) |
98 | { | 98 | { |
99 | struct radeon_cs_chunk *ib_chunk; | ||
100 | struct radeon_cs_reloc *reloc; | 99 | struct radeon_cs_reloc *reloc; |
101 | struct r100_cs_track *track; | 100 | struct r100_cs_track *track; |
102 | volatile uint32_t *ib; | 101 | volatile uint32_t *ib; |
@@ -105,11 +104,11 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
105 | int i; | 104 | int i; |
106 | int face; | 105 | int face; |
107 | u32 tile_flags = 0; | 106 | u32 tile_flags = 0; |
107 | u32 idx_value; | ||
108 | 108 | ||
109 | ib = p->ib->ptr; | 109 | ib = p->ib->ptr; |
110 | ib_chunk = &p->chunks[p->chunk_ib_idx]; | ||
111 | track = (struct r100_cs_track *)p->track; | 110 | track = (struct r100_cs_track *)p->track; |
112 | 111 | idx_value = radeon_get_ib_value(p, idx); | |
113 | switch (reg) { | 112 | switch (reg) { |
114 | case RADEON_CRTC_GUI_TRIG_VLINE: | 113 | case RADEON_CRTC_GUI_TRIG_VLINE: |
115 | r = r100_cs_packet_parse_vline(p); | 114 | r = r100_cs_packet_parse_vline(p); |
@@ -137,8 +136,8 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
137 | return r; | 136 | return r; |
138 | } | 137 | } |
139 | track->zb.robj = reloc->robj; | 138 | track->zb.robj = reloc->robj; |
140 | track->zb.offset = ib_chunk->kdata[idx]; | 139 | track->zb.offset = idx_value; |
141 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | 140 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
142 | break; | 141 | break; |
143 | case RADEON_RB3D_COLOROFFSET: | 142 | case RADEON_RB3D_COLOROFFSET: |
144 | r = r100_cs_packet_next_reloc(p, &reloc); | 143 | r = r100_cs_packet_next_reloc(p, &reloc); |
@@ -149,8 +148,8 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
149 | return r; | 148 | return r; |
150 | } | 149 | } |
151 | track->cb[0].robj = reloc->robj; | 150 | track->cb[0].robj = reloc->robj; |
152 | track->cb[0].offset = ib_chunk->kdata[idx]; | 151 | track->cb[0].offset = idx_value; |
153 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | 152 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
154 | break; | 153 | break; |
155 | case R200_PP_TXOFFSET_0: | 154 | case R200_PP_TXOFFSET_0: |
156 | case R200_PP_TXOFFSET_1: | 155 | case R200_PP_TXOFFSET_1: |
@@ -166,7 +165,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
166 | r100_cs_dump_packet(p, pkt); | 165 | r100_cs_dump_packet(p, pkt); |
167 | return r; | 166 | return r; |
168 | } | 167 | } |
169 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | 168 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
170 | track->textures[i].robj = reloc->robj; | 169 | track->textures[i].robj = reloc->robj; |
171 | break; | 170 | break; |
172 | case R200_PP_CUBIC_OFFSET_F1_0: | 171 | case R200_PP_CUBIC_OFFSET_F1_0: |
@@ -208,12 +207,12 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
208 | r100_cs_dump_packet(p, pkt); | 207 | r100_cs_dump_packet(p, pkt); |
209 | return r; | 208 | return r; |
210 | } | 209 | } |
211 | track->textures[i].cube_info[face - 1].offset = ib_chunk->kdata[idx]; | 210 | track->textures[i].cube_info[face - 1].offset = idx_value; |
212 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | 211 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
213 | track->textures[i].cube_info[face - 1].robj = reloc->robj; | 212 | track->textures[i].cube_info[face - 1].robj = reloc->robj; |
214 | break; | 213 | break; |
215 | case RADEON_RE_WIDTH_HEIGHT: | 214 | case RADEON_RE_WIDTH_HEIGHT: |
216 | track->maxy = ((ib_chunk->kdata[idx] >> 16) & 0x7FF); | 215 | track->maxy = ((idx_value >> 16) & 0x7FF); |
217 | break; | 216 | break; |
218 | case RADEON_RB3D_COLORPITCH: | 217 | case RADEON_RB3D_COLORPITCH: |
219 | r = r100_cs_packet_next_reloc(p, &reloc); | 218 | r = r100_cs_packet_next_reloc(p, &reloc); |
@@ -229,17 +228,17 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
229 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) | 228 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) |
230 | tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; | 229 | tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; |
231 | 230 | ||
232 | tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); | 231 | tmp = idx_value & ~(0x7 << 16); |
233 | tmp |= tile_flags; | 232 | tmp |= tile_flags; |
234 | ib[idx] = tmp; | 233 | ib[idx] = tmp; |
235 | 234 | ||
236 | track->cb[0].pitch = ib_chunk->kdata[idx] & RADEON_COLORPITCH_MASK; | 235 | track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK; |
237 | break; | 236 | break; |
238 | case RADEON_RB3D_DEPTHPITCH: | 237 | case RADEON_RB3D_DEPTHPITCH: |
239 | track->zb.pitch = ib_chunk->kdata[idx] & RADEON_DEPTHPITCH_MASK; | 238 | track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK; |
240 | break; | 239 | break; |
241 | case RADEON_RB3D_CNTL: | 240 | case RADEON_RB3D_CNTL: |
242 | switch ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { | 241 | switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { |
243 | case 7: | 242 | case 7: |
244 | case 8: | 243 | case 8: |
245 | case 9: | 244 | case 9: |
@@ -257,18 +256,18 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
257 | break; | 256 | break; |
258 | default: | 257 | default: |
259 | DRM_ERROR("Invalid color buffer format (%d) !\n", | 258 | DRM_ERROR("Invalid color buffer format (%d) !\n", |
260 | ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f)); | 259 | ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f)); |
261 | return -EINVAL; | 260 | return -EINVAL; |
262 | } | 261 | } |
263 | if (ib_chunk->kdata[idx] & RADEON_DEPTHXY_OFFSET_ENABLE) { | 262 | if (idx_value & RADEON_DEPTHXY_OFFSET_ENABLE) { |
264 | DRM_ERROR("No support for depth xy offset in kms\n"); | 263 | DRM_ERROR("No support for depth xy offset in kms\n"); |
265 | return -EINVAL; | 264 | return -EINVAL; |
266 | } | 265 | } |
267 | 266 | ||
268 | track->z_enabled = !!(ib_chunk->kdata[idx] & RADEON_Z_ENABLE); | 267 | track->z_enabled = !!(idx_value & RADEON_Z_ENABLE); |
269 | break; | 268 | break; |
270 | case RADEON_RB3D_ZSTENCILCNTL: | 269 | case RADEON_RB3D_ZSTENCILCNTL: |
271 | switch (ib_chunk->kdata[idx] & 0xf) { | 270 | switch (idx_value & 0xf) { |
272 | case 0: | 271 | case 0: |
273 | track->zb.cpp = 2; | 272 | track->zb.cpp = 2; |
274 | break; | 273 | break; |
@@ -292,27 +291,27 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
292 | r100_cs_dump_packet(p, pkt); | 291 | r100_cs_dump_packet(p, pkt); |
293 | return r; | 292 | return r; |
294 | } | 293 | } |
295 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | 294 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
296 | break; | 295 | break; |
297 | case RADEON_PP_CNTL: | 296 | case RADEON_PP_CNTL: |
298 | { | 297 | { |
299 | uint32_t temp = ib_chunk->kdata[idx] >> 4; | 298 | uint32_t temp = idx_value >> 4; |
300 | for (i = 0; i < track->num_texture; i++) | 299 | for (i = 0; i < track->num_texture; i++) |
301 | track->textures[i].enabled = !!(temp & (1 << i)); | 300 | track->textures[i].enabled = !!(temp & (1 << i)); |
302 | } | 301 | } |
303 | break; | 302 | break; |
304 | case RADEON_SE_VF_CNTL: | 303 | case RADEON_SE_VF_CNTL: |
305 | track->vap_vf_cntl = ib_chunk->kdata[idx]; | 304 | track->vap_vf_cntl = idx_value; |
306 | break; | 305 | break; |
307 | case 0x210c: | 306 | case 0x210c: |
308 | /* VAP_VF_MAX_VTX_INDX */ | 307 | /* VAP_VF_MAX_VTX_INDX */ |
309 | track->max_indx = ib_chunk->kdata[idx] & 0x00FFFFFFUL; | 308 | track->max_indx = idx_value & 0x00FFFFFFUL; |
310 | break; | 309 | break; |
311 | case R200_SE_VTX_FMT_0: | 310 | case R200_SE_VTX_FMT_0: |
312 | track->vtx_size = r200_get_vtx_size_0(ib_chunk->kdata[idx]); | 311 | track->vtx_size = r200_get_vtx_size_0(idx_value); |
313 | break; | 312 | break; |
314 | case R200_SE_VTX_FMT_1: | 313 | case R200_SE_VTX_FMT_1: |
315 | track->vtx_size += r200_get_vtx_size_1(ib_chunk->kdata[idx]); | 314 | track->vtx_size += r200_get_vtx_size_1(idx_value); |
316 | break; | 315 | break; |
317 | case R200_PP_TXSIZE_0: | 316 | case R200_PP_TXSIZE_0: |
318 | case R200_PP_TXSIZE_1: | 317 | case R200_PP_TXSIZE_1: |
@@ -321,8 +320,8 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
321 | case R200_PP_TXSIZE_4: | 320 | case R200_PP_TXSIZE_4: |
322 | case R200_PP_TXSIZE_5: | 321 | case R200_PP_TXSIZE_5: |
323 | i = (reg - R200_PP_TXSIZE_0) / 32; | 322 | i = (reg - R200_PP_TXSIZE_0) / 32; |
324 | track->textures[i].width = (ib_chunk->kdata[idx] & RADEON_TEX_USIZE_MASK) + 1; | 323 | track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1; |
325 | track->textures[i].height = ((ib_chunk->kdata[idx] & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; | 324 | track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; |
326 | break; | 325 | break; |
327 | case R200_PP_TXPITCH_0: | 326 | case R200_PP_TXPITCH_0: |
328 | case R200_PP_TXPITCH_1: | 327 | case R200_PP_TXPITCH_1: |
@@ -331,7 +330,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
331 | case R200_PP_TXPITCH_4: | 330 | case R200_PP_TXPITCH_4: |
332 | case R200_PP_TXPITCH_5: | 331 | case R200_PP_TXPITCH_5: |
333 | i = (reg - R200_PP_TXPITCH_0) / 32; | 332 | i = (reg - R200_PP_TXPITCH_0) / 32; |
334 | track->textures[i].pitch = ib_chunk->kdata[idx] + 32; | 333 | track->textures[i].pitch = idx_value + 32; |
335 | break; | 334 | break; |
336 | case R200_PP_TXFILTER_0: | 335 | case R200_PP_TXFILTER_0: |
337 | case R200_PP_TXFILTER_1: | 336 | case R200_PP_TXFILTER_1: |
@@ -340,12 +339,12 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
340 | case R200_PP_TXFILTER_4: | 339 | case R200_PP_TXFILTER_4: |
341 | case R200_PP_TXFILTER_5: | 340 | case R200_PP_TXFILTER_5: |
342 | i = (reg - R200_PP_TXFILTER_0) / 32; | 341 | i = (reg - R200_PP_TXFILTER_0) / 32; |
343 | track->textures[i].num_levels = ((ib_chunk->kdata[idx] & R200_MAX_MIP_LEVEL_MASK) | 342 | track->textures[i].num_levels = ((idx_value & R200_MAX_MIP_LEVEL_MASK) |
344 | >> R200_MAX_MIP_LEVEL_SHIFT); | 343 | >> R200_MAX_MIP_LEVEL_SHIFT); |
345 | tmp = (ib_chunk->kdata[idx] >> 23) & 0x7; | 344 | tmp = (idx_value >> 23) & 0x7; |
346 | if (tmp == 2 || tmp == 6) | 345 | if (tmp == 2 || tmp == 6) |
347 | track->textures[i].roundup_w = false; | 346 | track->textures[i].roundup_w = false; |
348 | tmp = (ib_chunk->kdata[idx] >> 27) & 0x7; | 347 | tmp = (idx_value >> 27) & 0x7; |
349 | if (tmp == 2 || tmp == 6) | 348 | if (tmp == 2 || tmp == 6) |
350 | track->textures[i].roundup_h = false; | 349 | track->textures[i].roundup_h = false; |
351 | break; | 350 | break; |
@@ -364,8 +363,8 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
364 | case R200_PP_TXFORMAT_X_4: | 363 | case R200_PP_TXFORMAT_X_4: |
365 | case R200_PP_TXFORMAT_X_5: | 364 | case R200_PP_TXFORMAT_X_5: |
366 | i = (reg - R200_PP_TXFORMAT_X_0) / 32; | 365 | i = (reg - R200_PP_TXFORMAT_X_0) / 32; |
367 | track->textures[i].txdepth = ib_chunk->kdata[idx] & 0x7; | 366 | track->textures[i].txdepth = idx_value & 0x7; |
368 | tmp = (ib_chunk->kdata[idx] >> 16) & 0x3; | 367 | tmp = (idx_value >> 16) & 0x3; |
369 | /* 2D, 3D, CUBE */ | 368 | /* 2D, 3D, CUBE */ |
370 | switch (tmp) { | 369 | switch (tmp) { |
371 | case 0: | 370 | case 0: |
@@ -389,14 +388,14 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
389 | case R200_PP_TXFORMAT_4: | 388 | case R200_PP_TXFORMAT_4: |
390 | case R200_PP_TXFORMAT_5: | 389 | case R200_PP_TXFORMAT_5: |
391 | i = (reg - R200_PP_TXFORMAT_0) / 32; | 390 | i = (reg - R200_PP_TXFORMAT_0) / 32; |
392 | if (ib_chunk->kdata[idx] & R200_TXFORMAT_NON_POWER2) { | 391 | if (idx_value & R200_TXFORMAT_NON_POWER2) { |
393 | track->textures[i].use_pitch = 1; | 392 | track->textures[i].use_pitch = 1; |
394 | } else { | 393 | } else { |
395 | track->textures[i].use_pitch = 0; | 394 | track->textures[i].use_pitch = 0; |
396 | track->textures[i].width = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK); | 395 | track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK); |
397 | track->textures[i].height = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK); | 396 | track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK); |
398 | } | 397 | } |
399 | switch ((ib_chunk->kdata[idx] & RADEON_TXFORMAT_FORMAT_MASK)) { | 398 | switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) { |
400 | case R200_TXFORMAT_I8: | 399 | case R200_TXFORMAT_I8: |
401 | case R200_TXFORMAT_RGB332: | 400 | case R200_TXFORMAT_RGB332: |
402 | case R200_TXFORMAT_Y8: | 401 | case R200_TXFORMAT_Y8: |
@@ -424,8 +423,8 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
424 | track->textures[i].cpp = 4; | 423 | track->textures[i].cpp = 4; |
425 | break; | 424 | break; |
426 | } | 425 | } |
427 | track->textures[i].cube_info[4].width = 1 << ((ib_chunk->kdata[idx] >> 16) & 0xf); | 426 | track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); |
428 | track->textures[i].cube_info[4].height = 1 << ((ib_chunk->kdata[idx] >> 20) & 0xf); | 427 | track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf); |
429 | break; | 428 | break; |
430 | case R200_PP_CUBIC_FACES_0: | 429 | case R200_PP_CUBIC_FACES_0: |
431 | case R200_PP_CUBIC_FACES_1: | 430 | case R200_PP_CUBIC_FACES_1: |
@@ -433,7 +432,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
433 | case R200_PP_CUBIC_FACES_3: | 432 | case R200_PP_CUBIC_FACES_3: |
434 | case R200_PP_CUBIC_FACES_4: | 433 | case R200_PP_CUBIC_FACES_4: |
435 | case R200_PP_CUBIC_FACES_5: | 434 | case R200_PP_CUBIC_FACES_5: |
436 | tmp = ib_chunk->kdata[idx]; | 435 | tmp = idx_value; |
437 | i = (reg - R200_PP_CUBIC_FACES_0) / 32; | 436 | i = (reg - R200_PP_CUBIC_FACES_0) / 32; |
438 | for (face = 0; face < 4; face++) { | 437 | for (face = 0; face < 4; face++) { |
439 | track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); | 438 | track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); |
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index bb151ecdf8fc..1ebea8cc8c93 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
@@ -697,17 +697,18 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
697 | struct radeon_cs_packet *pkt, | 697 | struct radeon_cs_packet *pkt, |
698 | unsigned idx, unsigned reg) | 698 | unsigned idx, unsigned reg) |
699 | { | 699 | { |
700 | struct radeon_cs_chunk *ib_chunk; | ||
701 | struct radeon_cs_reloc *reloc; | 700 | struct radeon_cs_reloc *reloc; |
702 | struct r100_cs_track *track; | 701 | struct r100_cs_track *track; |
703 | volatile uint32_t *ib; | 702 | volatile uint32_t *ib; |
704 | uint32_t tmp, tile_flags = 0; | 703 | uint32_t tmp, tile_flags = 0; |
705 | unsigned i; | 704 | unsigned i; |
706 | int r; | 705 | int r; |
706 | u32 idx_value; | ||
707 | 707 | ||
708 | ib = p->ib->ptr; | 708 | ib = p->ib->ptr; |
709 | ib_chunk = &p->chunks[p->chunk_ib_idx]; | ||
710 | track = (struct r100_cs_track *)p->track; | 709 | track = (struct r100_cs_track *)p->track; |
710 | idx_value = radeon_get_ib_value(p, idx); | ||
711 | |||
711 | switch(reg) { | 712 | switch(reg) { |
712 | case AVIVO_D1MODE_VLINE_START_END: | 713 | case AVIVO_D1MODE_VLINE_START_END: |
713 | case RADEON_CRTC_GUI_TRIG_VLINE: | 714 | case RADEON_CRTC_GUI_TRIG_VLINE: |
@@ -738,8 +739,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
738 | return r; | 739 | return r; |
739 | } | 740 | } |
740 | track->cb[i].robj = reloc->robj; | 741 | track->cb[i].robj = reloc->robj; |
741 | track->cb[i].offset = ib_chunk->kdata[idx]; | 742 | track->cb[i].offset = idx_value; |
742 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | 743 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
743 | break; | 744 | break; |
744 | case R300_ZB_DEPTHOFFSET: | 745 | case R300_ZB_DEPTHOFFSET: |
745 | r = r100_cs_packet_next_reloc(p, &reloc); | 746 | r = r100_cs_packet_next_reloc(p, &reloc); |
@@ -750,8 +751,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
750 | return r; | 751 | return r; |
751 | } | 752 | } |
752 | track->zb.robj = reloc->robj; | 753 | track->zb.robj = reloc->robj; |
753 | track->zb.offset = ib_chunk->kdata[idx]; | 754 | track->zb.offset = idx_value; |
754 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | 755 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
755 | break; | 756 | break; |
756 | case R300_TX_OFFSET_0: | 757 | case R300_TX_OFFSET_0: |
757 | case R300_TX_OFFSET_0+4: | 758 | case R300_TX_OFFSET_0+4: |
@@ -777,32 +778,32 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
777 | r100_cs_dump_packet(p, pkt); | 778 | r100_cs_dump_packet(p, pkt); |
778 | return r; | 779 | return r; |
779 | } | 780 | } |
780 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | 781 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
781 | track->textures[i].robj = reloc->robj; | 782 | track->textures[i].robj = reloc->robj; |
782 | break; | 783 | break; |
783 | /* Tracked registers */ | 784 | /* Tracked registers */ |
784 | case 0x2084: | 785 | case 0x2084: |
785 | /* VAP_VF_CNTL */ | 786 | /* VAP_VF_CNTL */ |
786 | track->vap_vf_cntl = ib_chunk->kdata[idx]; | 787 | track->vap_vf_cntl = idx_value; |
787 | break; | 788 | break; |
788 | case 0x20B4: | 789 | case 0x20B4: |
789 | /* VAP_VTX_SIZE */ | 790 | /* VAP_VTX_SIZE */ |
790 | track->vtx_size = ib_chunk->kdata[idx] & 0x7F; | 791 | track->vtx_size = idx_value & 0x7F; |
791 | break; | 792 | break; |
792 | case 0x2134: | 793 | case 0x2134: |
793 | /* VAP_VF_MAX_VTX_INDX */ | 794 | /* VAP_VF_MAX_VTX_INDX */ |
794 | track->max_indx = ib_chunk->kdata[idx] & 0x00FFFFFFUL; | 795 | track->max_indx = idx_value & 0x00FFFFFFUL; |
795 | break; | 796 | break; |
796 | case 0x43E4: | 797 | case 0x43E4: |
797 | /* SC_SCISSOR1 */ | 798 | /* SC_SCISSOR1 */ |
798 | track->maxy = ((ib_chunk->kdata[idx] >> 13) & 0x1FFF) + 1; | 799 | track->maxy = ((idx_value >> 13) & 0x1FFF) + 1; |
799 | if (p->rdev->family < CHIP_RV515) { | 800 | if (p->rdev->family < CHIP_RV515) { |
800 | track->maxy -= 1440; | 801 | track->maxy -= 1440; |
801 | } | 802 | } |
802 | break; | 803 | break; |
803 | case 0x4E00: | 804 | case 0x4E00: |
804 | /* RB3D_CCTL */ | 805 | /* RB3D_CCTL */ |
805 | track->num_cb = ((ib_chunk->kdata[idx] >> 5) & 0x3) + 1; | 806 | track->num_cb = ((idx_value >> 5) & 0x3) + 1; |
806 | break; | 807 | break; |
807 | case 0x4E38: | 808 | case 0x4E38: |
808 | case 0x4E3C: | 809 | case 0x4E3C: |
@@ -825,13 +826,13 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
825 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) | 826 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) |
826 | tile_flags |= R300_COLOR_MICROTILE_ENABLE; | 827 | tile_flags |= R300_COLOR_MICROTILE_ENABLE; |
827 | 828 | ||
828 | tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); | 829 | tmp = idx_value & ~(0x7 << 16); |
829 | tmp |= tile_flags; | 830 | tmp |= tile_flags; |
830 | ib[idx] = tmp; | 831 | ib[idx] = tmp; |
831 | 832 | ||
832 | i = (reg - 0x4E38) >> 2; | 833 | i = (reg - 0x4E38) >> 2; |
833 | track->cb[i].pitch = ib_chunk->kdata[idx] & 0x3FFE; | 834 | track->cb[i].pitch = idx_value & 0x3FFE; |
834 | switch (((ib_chunk->kdata[idx] >> 21) & 0xF)) { | 835 | switch (((idx_value >> 21) & 0xF)) { |
835 | case 9: | 836 | case 9: |
836 | case 11: | 837 | case 11: |
837 | case 12: | 838 | case 12: |
@@ -854,13 +855,13 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
854 | break; | 855 | break; |
855 | default: | 856 | default: |
856 | DRM_ERROR("Invalid color buffer format (%d) !\n", | 857 | DRM_ERROR("Invalid color buffer format (%d) !\n", |
857 | ((ib_chunk->kdata[idx] >> 21) & 0xF)); | 858 | ((idx_value >> 21) & 0xF)); |
858 | return -EINVAL; | 859 | return -EINVAL; |
859 | } | 860 | } |
860 | break; | 861 | break; |
861 | case 0x4F00: | 862 | case 0x4F00: |
862 | /* ZB_CNTL */ | 863 | /* ZB_CNTL */ |
863 | if (ib_chunk->kdata[idx] & 2) { | 864 | if (idx_value & 2) { |
864 | track->z_enabled = true; | 865 | track->z_enabled = true; |
865 | } else { | 866 | } else { |
866 | track->z_enabled = false; | 867 | track->z_enabled = false; |
@@ -868,7 +869,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
868 | break; | 869 | break; |
869 | case 0x4F10: | 870 | case 0x4F10: |
870 | /* ZB_FORMAT */ | 871 | /* ZB_FORMAT */ |
871 | switch ((ib_chunk->kdata[idx] & 0xF)) { | 872 | switch ((idx_value & 0xF)) { |
872 | case 0: | 873 | case 0: |
873 | case 1: | 874 | case 1: |
874 | track->zb.cpp = 2; | 875 | track->zb.cpp = 2; |
@@ -878,7 +879,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
878 | break; | 879 | break; |
879 | default: | 880 | default: |
880 | DRM_ERROR("Invalid z buffer format (%d) !\n", | 881 | DRM_ERROR("Invalid z buffer format (%d) !\n", |
881 | (ib_chunk->kdata[idx] & 0xF)); | 882 | (idx_value & 0xF)); |
882 | return -EINVAL; | 883 | return -EINVAL; |
883 | } | 884 | } |
884 | break; | 885 | break; |
@@ -897,17 +898,17 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
897 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) | 898 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) |
898 | tile_flags |= R300_DEPTHMICROTILE_TILED;; | 899 | tile_flags |= R300_DEPTHMICROTILE_TILED;; |
899 | 900 | ||
900 | tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); | 901 | tmp = idx_value & ~(0x7 << 16); |
901 | tmp |= tile_flags; | 902 | tmp |= tile_flags; |
902 | ib[idx] = tmp; | 903 | ib[idx] = tmp; |
903 | 904 | ||
904 | track->zb.pitch = ib_chunk->kdata[idx] & 0x3FFC; | 905 | track->zb.pitch = idx_value & 0x3FFC; |
905 | break; | 906 | break; |
906 | case 0x4104: | 907 | case 0x4104: |
907 | for (i = 0; i < 16; i++) { | 908 | for (i = 0; i < 16; i++) { |
908 | bool enabled; | 909 | bool enabled; |
909 | 910 | ||
910 | enabled = !!(ib_chunk->kdata[idx] & (1 << i)); | 911 | enabled = !!(idx_value & (1 << i)); |
911 | track->textures[i].enabled = enabled; | 912 | track->textures[i].enabled = enabled; |
912 | } | 913 | } |
913 | break; | 914 | break; |
@@ -929,9 +930,9 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
929 | case 0x44FC: | 930 | case 0x44FC: |
930 | /* TX_FORMAT1_[0-15] */ | 931 | /* TX_FORMAT1_[0-15] */ |
931 | i = (reg - 0x44C0) >> 2; | 932 | i = (reg - 0x44C0) >> 2; |
932 | tmp = (ib_chunk->kdata[idx] >> 25) & 0x3; | 933 | tmp = (idx_value >> 25) & 0x3; |
933 | track->textures[i].tex_coord_type = tmp; | 934 | track->textures[i].tex_coord_type = tmp; |
934 | switch ((ib_chunk->kdata[idx] & 0x1F)) { | 935 | switch ((idx_value & 0x1F)) { |
935 | case R300_TX_FORMAT_X8: | 936 | case R300_TX_FORMAT_X8: |
936 | case R300_TX_FORMAT_Y4X4: | 937 | case R300_TX_FORMAT_Y4X4: |
937 | case R300_TX_FORMAT_Z3Y3X2: | 938 | case R300_TX_FORMAT_Z3Y3X2: |
@@ -971,7 +972,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
971 | break; | 972 | break; |
972 | default: | 973 | default: |
973 | DRM_ERROR("Invalid texture format %u\n", | 974 | DRM_ERROR("Invalid texture format %u\n", |
974 | (ib_chunk->kdata[idx] & 0x1F)); | 975 | (idx_value & 0x1F)); |
975 | return -EINVAL; | 976 | return -EINVAL; |
976 | break; | 977 | break; |
977 | } | 978 | } |
@@ -994,11 +995,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
994 | case 0x443C: | 995 | case 0x443C: |
995 | /* TX_FILTER0_[0-15] */ | 996 | /* TX_FILTER0_[0-15] */ |
996 | i = (reg - 0x4400) >> 2; | 997 | i = (reg - 0x4400) >> 2; |
997 | tmp = ib_chunk->kdata[idx] & 0x7; | 998 | tmp = idx_value & 0x7; |
998 | if (tmp == 2 || tmp == 4 || tmp == 6) { | 999 | if (tmp == 2 || tmp == 4 || tmp == 6) { |
999 | track->textures[i].roundup_w = false; | 1000 | track->textures[i].roundup_w = false; |
1000 | } | 1001 | } |
1001 | tmp = (ib_chunk->kdata[idx] >> 3) & 0x7; | 1002 | tmp = (idx_value >> 3) & 0x7; |
1002 | if (tmp == 2 || tmp == 4 || tmp == 6) { | 1003 | if (tmp == 2 || tmp == 4 || tmp == 6) { |
1003 | track->textures[i].roundup_h = false; | 1004 | track->textures[i].roundup_h = false; |
1004 | } | 1005 | } |
@@ -1021,12 +1022,12 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
1021 | case 0x453C: | 1022 | case 0x453C: |
1022 | /* TX_FORMAT2_[0-15] */ | 1023 | /* TX_FORMAT2_[0-15] */ |
1023 | i = (reg - 0x4500) >> 2; | 1024 | i = (reg - 0x4500) >> 2; |
1024 | tmp = ib_chunk->kdata[idx] & 0x3FFF; | 1025 | tmp = idx_value & 0x3FFF; |
1025 | track->textures[i].pitch = tmp + 1; | 1026 | track->textures[i].pitch = tmp + 1; |
1026 | if (p->rdev->family >= CHIP_RV515) { | 1027 | if (p->rdev->family >= CHIP_RV515) { |
1027 | tmp = ((ib_chunk->kdata[idx] >> 15) & 1) << 11; | 1028 | tmp = ((idx_value >> 15) & 1) << 11; |
1028 | track->textures[i].width_11 = tmp; | 1029 | track->textures[i].width_11 = tmp; |
1029 | tmp = ((ib_chunk->kdata[idx] >> 16) & 1) << 11; | 1030 | tmp = ((idx_value >> 16) & 1) << 11; |
1030 | track->textures[i].height_11 = tmp; | 1031 | track->textures[i].height_11 = tmp; |
1031 | } | 1032 | } |
1032 | break; | 1033 | break; |
@@ -1048,15 +1049,15 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
1048 | case 0x44BC: | 1049 | case 0x44BC: |
1049 | /* TX_FORMAT0_[0-15] */ | 1050 | /* TX_FORMAT0_[0-15] */ |
1050 | i = (reg - 0x4480) >> 2; | 1051 | i = (reg - 0x4480) >> 2; |
1051 | tmp = ib_chunk->kdata[idx] & 0x7FF; | 1052 | tmp = idx_value & 0x7FF; |
1052 | track->textures[i].width = tmp + 1; | 1053 | track->textures[i].width = tmp + 1; |
1053 | tmp = (ib_chunk->kdata[idx] >> 11) & 0x7FF; | 1054 | tmp = (idx_value >> 11) & 0x7FF; |
1054 | track->textures[i].height = tmp + 1; | 1055 | track->textures[i].height = tmp + 1; |
1055 | tmp = (ib_chunk->kdata[idx] >> 26) & 0xF; | 1056 | tmp = (idx_value >> 26) & 0xF; |
1056 | track->textures[i].num_levels = tmp; | 1057 | track->textures[i].num_levels = tmp; |
1057 | tmp = ib_chunk->kdata[idx] & (1 << 31); | 1058 | tmp = idx_value & (1 << 31); |
1058 | track->textures[i].use_pitch = !!tmp; | 1059 | track->textures[i].use_pitch = !!tmp; |
1059 | tmp = (ib_chunk->kdata[idx] >> 22) & 0xF; | 1060 | tmp = (idx_value >> 22) & 0xF; |
1060 | track->textures[i].txdepth = tmp; | 1061 | track->textures[i].txdepth = tmp; |
1061 | break; | 1062 | break; |
1062 | case R300_ZB_ZPASS_ADDR: | 1063 | case R300_ZB_ZPASS_ADDR: |
@@ -1067,7 +1068,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
1067 | r100_cs_dump_packet(p, pkt); | 1068 | r100_cs_dump_packet(p, pkt); |
1068 | return r; | 1069 | return r; |
1069 | } | 1070 | } |
1070 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | 1071 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1071 | break; | 1072 | break; |
1072 | case 0x4be8: | 1073 | case 0x4be8: |
1073 | /* valid register only on RV530 */ | 1074 | /* valid register only on RV530 */ |
@@ -1085,60 +1086,20 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
1085 | static int r300_packet3_check(struct radeon_cs_parser *p, | 1086 | static int r300_packet3_check(struct radeon_cs_parser *p, |
1086 | struct radeon_cs_packet *pkt) | 1087 | struct radeon_cs_packet *pkt) |
1087 | { | 1088 | { |
1088 | struct radeon_cs_chunk *ib_chunk; | ||
1089 | |||
1090 | struct radeon_cs_reloc *reloc; | 1089 | struct radeon_cs_reloc *reloc; |
1091 | struct r100_cs_track *track; | 1090 | struct r100_cs_track *track; |
1092 | volatile uint32_t *ib; | 1091 | volatile uint32_t *ib; |
1093 | unsigned idx; | 1092 | unsigned idx; |
1094 | unsigned i, c; | ||
1095 | int r; | 1093 | int r; |
1096 | 1094 | ||
1097 | ib = p->ib->ptr; | 1095 | ib = p->ib->ptr; |
1098 | ib_chunk = &p->chunks[p->chunk_ib_idx]; | ||
1099 | idx = pkt->idx + 1; | 1096 | idx = pkt->idx + 1; |
1100 | track = (struct r100_cs_track *)p->track; | 1097 | track = (struct r100_cs_track *)p->track; |
1101 | switch(pkt->opcode) { | 1098 | switch(pkt->opcode) { |
1102 | case PACKET3_3D_LOAD_VBPNTR: | 1099 | case PACKET3_3D_LOAD_VBPNTR: |
1103 | c = ib_chunk->kdata[idx++] & 0x1F; | 1100 | r = r100_packet3_load_vbpntr(p, pkt, idx); |
1104 | track->num_arrays = c; | 1101 | if (r) |
1105 | for (i = 0; i < (c - 1); i+=2, idx+=3) { | 1102 | return r; |
1106 | r = r100_cs_packet_next_reloc(p, &reloc); | ||
1107 | if (r) { | ||
1108 | DRM_ERROR("No reloc for packet3 %d\n", | ||
1109 | pkt->opcode); | ||
1110 | r100_cs_dump_packet(p, pkt); | ||
1111 | return r; | ||
1112 | } | ||
1113 | ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset); | ||
1114 | track->arrays[i + 0].robj = reloc->robj; | ||
1115 | track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8; | ||
1116 | track->arrays[i + 0].esize &= 0x7F; | ||
1117 | r = r100_cs_packet_next_reloc(p, &reloc); | ||
1118 | if (r) { | ||
1119 | DRM_ERROR("No reloc for packet3 %d\n", | ||
1120 | pkt->opcode); | ||
1121 | r100_cs_dump_packet(p, pkt); | ||
1122 | return r; | ||
1123 | } | ||
1124 | ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset); | ||
1125 | track->arrays[i + 1].robj = reloc->robj; | ||
1126 | track->arrays[i + 1].esize = ib_chunk->kdata[idx] >> 24; | ||
1127 | track->arrays[i + 1].esize &= 0x7F; | ||
1128 | } | ||
1129 | if (c & 1) { | ||
1130 | r = r100_cs_packet_next_reloc(p, &reloc); | ||
1131 | if (r) { | ||
1132 | DRM_ERROR("No reloc for packet3 %d\n", | ||
1133 | pkt->opcode); | ||
1134 | r100_cs_dump_packet(p, pkt); | ||
1135 | return r; | ||
1136 | } | ||
1137 | ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset); | ||
1138 | track->arrays[i + 0].robj = reloc->robj; | ||
1139 | track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8; | ||
1140 | track->arrays[i + 0].esize &= 0x7F; | ||
1141 | } | ||
1142 | break; | 1103 | break; |
1143 | case PACKET3_INDX_BUFFER: | 1104 | case PACKET3_INDX_BUFFER: |
1144 | r = r100_cs_packet_next_reloc(p, &reloc); | 1105 | r = r100_cs_packet_next_reloc(p, &reloc); |
@@ -1147,7 +1108,7 @@ static int r300_packet3_check(struct radeon_cs_parser *p, | |||
1147 | r100_cs_dump_packet(p, pkt); | 1108 | r100_cs_dump_packet(p, pkt); |
1148 | return r; | 1109 | return r; |
1149 | } | 1110 | } |
1150 | ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset); | 1111 | ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset); |
1151 | r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); | 1112 | r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); |
1152 | if (r) { | 1113 | if (r) { |
1153 | return r; | 1114 | return r; |
@@ -1158,11 +1119,11 @@ static int r300_packet3_check(struct radeon_cs_parser *p, | |||
1158 | /* Number of dwords is vtx_size * (num_vertices - 1) | 1119 | /* Number of dwords is vtx_size * (num_vertices - 1) |
1159 | * PRIM_WALK must be equal to 3 vertex data in embedded | 1120 | * PRIM_WALK must be equal to 3 vertex data in embedded |
1160 | * in cmd stream */ | 1121 | * in cmd stream */ |
1161 | if (((ib_chunk->kdata[idx+1] >> 4) & 0x3) != 3) { | 1122 | if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) { |
1162 | DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); | 1123 | DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); |
1163 | return -EINVAL; | 1124 | return -EINVAL; |
1164 | } | 1125 | } |
1165 | track->vap_vf_cntl = ib_chunk->kdata[idx+1]; | 1126 | track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); |
1166 | track->immd_dwords = pkt->count - 1; | 1127 | track->immd_dwords = pkt->count - 1; |
1167 | r = r100_cs_track_check(p->rdev, track); | 1128 | r = r100_cs_track_check(p->rdev, track); |
1168 | if (r) { | 1129 | if (r) { |
@@ -1173,11 +1134,11 @@ static int r300_packet3_check(struct radeon_cs_parser *p, | |||
1173 | /* Number of dwords is vtx_size * (num_vertices - 1) | 1134 | /* Number of dwords is vtx_size * (num_vertices - 1) |
1174 | * PRIM_WALK must be equal to 3 vertex data in embedded | 1135 | * PRIM_WALK must be equal to 3 vertex data in embedded |
1175 | * in cmd stream */ | 1136 | * in cmd stream */ |
1176 | if (((ib_chunk->kdata[idx] >> 4) & 0x3) != 3) { | 1137 | if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) { |
1177 | DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); | 1138 | DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); |
1178 | return -EINVAL; | 1139 | return -EINVAL; |
1179 | } | 1140 | } |
1180 | track->vap_vf_cntl = ib_chunk->kdata[idx]; | 1141 | track->vap_vf_cntl = radeon_get_ib_value(p, idx); |
1181 | track->immd_dwords = pkt->count; | 1142 | track->immd_dwords = pkt->count; |
1182 | r = r100_cs_track_check(p->rdev, track); | 1143 | r = r100_cs_track_check(p->rdev, track); |
1183 | if (r) { | 1144 | if (r) { |
@@ -1185,28 +1146,28 @@ static int r300_packet3_check(struct radeon_cs_parser *p, | |||
1185 | } | 1146 | } |
1186 | break; | 1147 | break; |
1187 | case PACKET3_3D_DRAW_VBUF: | 1148 | case PACKET3_3D_DRAW_VBUF: |
1188 | track->vap_vf_cntl = ib_chunk->kdata[idx + 1]; | 1149 | track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); |
1189 | r = r100_cs_track_check(p->rdev, track); | 1150 | r = r100_cs_track_check(p->rdev, track); |
1190 | if (r) { | 1151 | if (r) { |
1191 | return r; | 1152 | return r; |
1192 | } | 1153 | } |
1193 | break; | 1154 | break; |
1194 | case PACKET3_3D_DRAW_VBUF_2: | 1155 | case PACKET3_3D_DRAW_VBUF_2: |
1195 | track->vap_vf_cntl = ib_chunk->kdata[idx]; | 1156 | track->vap_vf_cntl = radeon_get_ib_value(p, idx); |
1196 | r = r100_cs_track_check(p->rdev, track); | 1157 | r = r100_cs_track_check(p->rdev, track); |
1197 | if (r) { | 1158 | if (r) { |
1198 | return r; | 1159 | return r; |
1199 | } | 1160 | } |
1200 | break; | 1161 | break; |
1201 | case PACKET3_3D_DRAW_INDX: | 1162 | case PACKET3_3D_DRAW_INDX: |
1202 | track->vap_vf_cntl = ib_chunk->kdata[idx + 1]; | 1163 | track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); |
1203 | r = r100_cs_track_check(p->rdev, track); | 1164 | r = r100_cs_track_check(p->rdev, track); |
1204 | if (r) { | 1165 | if (r) { |
1205 | return r; | 1166 | return r; |
1206 | } | 1167 | } |
1207 | break; | 1168 | break; |
1208 | case PACKET3_3D_DRAW_INDX_2: | 1169 | case PACKET3_3D_DRAW_INDX_2: |
1209 | track->vap_vf_cntl = ib_chunk->kdata[idx]; | 1170 | track->vap_vf_cntl = radeon_get_ib_value(p, idx); |
1210 | r = r100_cs_track_check(p->rdev, track); | 1171 | r = r100_cs_track_check(p->rdev, track); |
1211 | if (r) { | 1172 | if (r) { |
1212 | return r; | 1173 | return r; |
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 33b89cd8743e..c629b5aa4a3f 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c | |||
@@ -57,7 +57,7 @@ int r600_cs_packet_parse(struct radeon_cs_parser *p, | |||
57 | idx, ib_chunk->length_dw); | 57 | idx, ib_chunk->length_dw); |
58 | return -EINVAL; | 58 | return -EINVAL; |
59 | } | 59 | } |
60 | header = ib_chunk->kdata[idx]; | 60 | header = radeon_get_ib_value(p, idx); |
61 | pkt->idx = idx; | 61 | pkt->idx = idx; |
62 | pkt->type = CP_PACKET_GET_TYPE(header); | 62 | pkt->type = CP_PACKET_GET_TYPE(header); |
63 | pkt->count = CP_PACKET_GET_COUNT(header); | 63 | pkt->count = CP_PACKET_GET_COUNT(header); |
@@ -98,7 +98,6 @@ int r600_cs_packet_parse(struct radeon_cs_parser *p, | |||
98 | static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p, | 98 | static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p, |
99 | struct radeon_cs_reloc **cs_reloc) | 99 | struct radeon_cs_reloc **cs_reloc) |
100 | { | 100 | { |
101 | struct radeon_cs_chunk *ib_chunk; | ||
102 | struct radeon_cs_chunk *relocs_chunk; | 101 | struct radeon_cs_chunk *relocs_chunk; |
103 | struct radeon_cs_packet p3reloc; | 102 | struct radeon_cs_packet p3reloc; |
104 | unsigned idx; | 103 | unsigned idx; |
@@ -109,7 +108,6 @@ static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p, | |||
109 | return -EINVAL; | 108 | return -EINVAL; |
110 | } | 109 | } |
111 | *cs_reloc = NULL; | 110 | *cs_reloc = NULL; |
112 | ib_chunk = &p->chunks[p->chunk_ib_idx]; | ||
113 | relocs_chunk = &p->chunks[p->chunk_relocs_idx]; | 111 | relocs_chunk = &p->chunks[p->chunk_relocs_idx]; |
114 | r = r600_cs_packet_parse(p, &p3reloc, p->idx); | 112 | r = r600_cs_packet_parse(p, &p3reloc, p->idx); |
115 | if (r) { | 113 | if (r) { |
@@ -121,7 +119,7 @@ static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p, | |||
121 | p3reloc.idx); | 119 | p3reloc.idx); |
122 | return -EINVAL; | 120 | return -EINVAL; |
123 | } | 121 | } |
124 | idx = ib_chunk->kdata[p3reloc.idx + 1]; | 122 | idx = radeon_get_ib_value(p, p3reloc.idx + 1); |
125 | if (idx >= relocs_chunk->length_dw) { | 123 | if (idx >= relocs_chunk->length_dw) { |
126 | DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", | 124 | DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", |
127 | idx, relocs_chunk->length_dw); | 125 | idx, relocs_chunk->length_dw); |
@@ -146,7 +144,6 @@ static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p, | |||
146 | static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p, | 144 | static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p, |
147 | struct radeon_cs_reloc **cs_reloc) | 145 | struct radeon_cs_reloc **cs_reloc) |
148 | { | 146 | { |
149 | struct radeon_cs_chunk *ib_chunk; | ||
150 | struct radeon_cs_chunk *relocs_chunk; | 147 | struct radeon_cs_chunk *relocs_chunk; |
151 | struct radeon_cs_packet p3reloc; | 148 | struct radeon_cs_packet p3reloc; |
152 | unsigned idx; | 149 | unsigned idx; |
@@ -157,7 +154,6 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p, | |||
157 | return -EINVAL; | 154 | return -EINVAL; |
158 | } | 155 | } |
159 | *cs_reloc = NULL; | 156 | *cs_reloc = NULL; |
160 | ib_chunk = &p->chunks[p->chunk_ib_idx]; | ||
161 | relocs_chunk = &p->chunks[p->chunk_relocs_idx]; | 157 | relocs_chunk = &p->chunks[p->chunk_relocs_idx]; |
162 | r = r600_cs_packet_parse(p, &p3reloc, p->idx); | 158 | r = r600_cs_packet_parse(p, &p3reloc, p->idx); |
163 | if (r) { | 159 | if (r) { |
@@ -169,7 +165,7 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p, | |||
169 | p3reloc.idx); | 165 | p3reloc.idx); |
170 | return -EINVAL; | 166 | return -EINVAL; |
171 | } | 167 | } |
172 | idx = ib_chunk->kdata[p3reloc.idx + 1]; | 168 | idx = radeon_get_ib_value(p, p3reloc.idx + 1); |
173 | if (idx >= relocs_chunk->length_dw) { | 169 | if (idx >= relocs_chunk->length_dw) { |
174 | DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", | 170 | DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", |
175 | idx, relocs_chunk->length_dw); | 171 | idx, relocs_chunk->length_dw); |
@@ -218,7 +214,6 @@ static int r600_cs_parse_packet0(struct radeon_cs_parser *p, | |||
218 | static int r600_packet3_check(struct radeon_cs_parser *p, | 214 | static int r600_packet3_check(struct radeon_cs_parser *p, |
219 | struct radeon_cs_packet *pkt) | 215 | struct radeon_cs_packet *pkt) |
220 | { | 216 | { |
221 | struct radeon_cs_chunk *ib_chunk; | ||
222 | struct radeon_cs_reloc *reloc; | 217 | struct radeon_cs_reloc *reloc; |
223 | volatile u32 *ib; | 218 | volatile u32 *ib; |
224 | unsigned idx; | 219 | unsigned idx; |
@@ -227,8 +222,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
227 | int r; | 222 | int r; |
228 | 223 | ||
229 | ib = p->ib->ptr; | 224 | ib = p->ib->ptr; |
230 | ib_chunk = &p->chunks[p->chunk_ib_idx]; | ||
231 | idx = pkt->idx + 1; | 225 | idx = pkt->idx + 1; |
226 | |||
232 | switch (pkt->opcode) { | 227 | switch (pkt->opcode) { |
233 | case PACKET3_START_3D_CMDBUF: | 228 | case PACKET3_START_3D_CMDBUF: |
234 | if (p->family >= CHIP_RV770 || pkt->count) { | 229 | if (p->family >= CHIP_RV770 || pkt->count) { |
@@ -281,7 +276,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
281 | return -EINVAL; | 276 | return -EINVAL; |
282 | } | 277 | } |
283 | /* bit 4 is reg (0) or mem (1) */ | 278 | /* bit 4 is reg (0) or mem (1) */ |
284 | if (ib_chunk->kdata[idx+0] & 0x10) { | 279 | if (radeon_get_ib_value(p, idx) & 0x10) { |
285 | r = r600_cs_packet_next_reloc(p, &reloc); | 280 | r = r600_cs_packet_next_reloc(p, &reloc); |
286 | if (r) { | 281 | if (r) { |
287 | DRM_ERROR("bad WAIT_REG_MEM\n"); | 282 | DRM_ERROR("bad WAIT_REG_MEM\n"); |
@@ -297,8 +292,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
297 | return -EINVAL; | 292 | return -EINVAL; |
298 | } | 293 | } |
299 | /* 0xffffffff/0x0 is flush all cache flag */ | 294 | /* 0xffffffff/0x0 is flush all cache flag */ |
300 | if (ib_chunk->kdata[idx+1] != 0xffffffff || | 295 | if (radeon_get_ib_value(p, idx + 1) != 0xffffffff || |
301 | ib_chunk->kdata[idx+2] != 0) { | 296 | radeon_get_ib_value(p, idx + 2) != 0) { |
302 | r = r600_cs_packet_next_reloc(p, &reloc); | 297 | r = r600_cs_packet_next_reloc(p, &reloc); |
303 | if (r) { | 298 | if (r) { |
304 | DRM_ERROR("bad SURFACE_SYNC\n"); | 299 | DRM_ERROR("bad SURFACE_SYNC\n"); |
@@ -639,7 +634,6 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp, | |||
639 | * uncached). */ | 634 | * uncached). */ |
640 | ib_chunk = &parser.chunks[parser.chunk_ib_idx]; | 635 | ib_chunk = &parser.chunks[parser.chunk_ib_idx]; |
641 | parser.ib->length_dw = ib_chunk->length_dw; | 636 | parser.ib->length_dw = ib_chunk->length_dw; |
642 | memcpy((void *)parser.ib->ptr, ib_chunk->kdata, ib_chunk->length_dw*4); | ||
643 | *l = parser.ib->length_dw; | 637 | *l = parser.ib->length_dw; |
644 | r = r600_cs_parse(&parser); | 638 | r = r600_cs_parse(&parser); |
645 | if (r) { | 639 | if (r) { |
@@ -647,6 +641,12 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp, | |||
647 | r600_cs_parser_fini(&parser, r); | 641 | r600_cs_parser_fini(&parser, r); |
648 | return r; | 642 | return r; |
649 | } | 643 | } |
644 | r = radeon_cs_finish_pages(&parser); | ||
645 | if (r) { | ||
646 | DRM_ERROR("Invalid command stream !\n"); | ||
647 | r600_cs_parser_fini(&parser, r); | ||
648 | return r; | ||
649 | } | ||
650 | r600_cs_parser_fini(&parser, r); | 650 | r600_cs_parser_fini(&parser, r); |
651 | return r; | 651 | return r; |
652 | } | 652 | } |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index d5de53e06cec..7e34e4376f95 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -342,7 +342,7 @@ struct radeon_ib { | |||
342 | unsigned long idx; | 342 | unsigned long idx; |
343 | uint64_t gpu_addr; | 343 | uint64_t gpu_addr; |
344 | struct radeon_fence *fence; | 344 | struct radeon_fence *fence; |
345 | volatile uint32_t *ptr; | 345 | uint32_t *ptr; |
346 | uint32_t length_dw; | 346 | uint32_t length_dw; |
347 | }; | 347 | }; |
348 | 348 | ||
@@ -415,7 +415,12 @@ struct radeon_cs_reloc { | |||
415 | struct radeon_cs_chunk { | 415 | struct radeon_cs_chunk { |
416 | uint32_t chunk_id; | 416 | uint32_t chunk_id; |
417 | uint32_t length_dw; | 417 | uint32_t length_dw; |
418 | int kpage_idx[2]; | ||
419 | uint32_t *kpage[2]; | ||
418 | uint32_t *kdata; | 420 | uint32_t *kdata; |
421 | void __user *user_ptr; | ||
422 | int last_copied_page; | ||
423 | int last_page_index; | ||
419 | }; | 424 | }; |
420 | 425 | ||
421 | struct radeon_cs_parser { | 426 | struct radeon_cs_parser { |
@@ -438,8 +443,38 @@ struct radeon_cs_parser { | |||
438 | struct radeon_ib *ib; | 443 | struct radeon_ib *ib; |
439 | void *track; | 444 | void *track; |
440 | unsigned family; | 445 | unsigned family; |
446 | int parser_error; | ||
441 | }; | 447 | }; |
442 | 448 | ||
449 | extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx); | ||
450 | extern int radeon_cs_finish_pages(struct radeon_cs_parser *p); | ||
451 | |||
452 | |||
453 | static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx) | ||
454 | { | ||
455 | struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx]; | ||
456 | u32 pg_idx, pg_offset; | ||
457 | u32 idx_value = 0; | ||
458 | int new_page; | ||
459 | |||
460 | pg_idx = (idx * 4) / PAGE_SIZE; | ||
461 | pg_offset = (idx * 4) % PAGE_SIZE; | ||
462 | |||
463 | if (ibc->kpage_idx[0] == pg_idx) | ||
464 | return ibc->kpage[0][pg_offset/4]; | ||
465 | if (ibc->kpage_idx[1] == pg_idx) | ||
466 | return ibc->kpage[1][pg_offset/4]; | ||
467 | |||
468 | new_page = radeon_cs_update_pages(p, pg_idx); | ||
469 | if (new_page < 0) { | ||
470 | p->parser_error = new_page; | ||
471 | return 0; | ||
472 | } | ||
473 | |||
474 | idx_value = ibc->kpage[new_page][pg_offset/4]; | ||
475 | return idx_value; | ||
476 | } | ||
477 | |||
443 | struct radeon_cs_packet { | 478 | struct radeon_cs_packet { |
444 | unsigned idx; | 479 | unsigned idx; |
445 | unsigned type; | 480 | unsigned type; |
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 12f5990c2d2a..dea8acf88865 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c | |||
@@ -142,15 +142,31 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) | |||
142 | } | 142 | } |
143 | 143 | ||
144 | p->chunks[i].length_dw = user_chunk.length_dw; | 144 | p->chunks[i].length_dw = user_chunk.length_dw; |
145 | cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data; | 145 | p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data; |
146 | 146 | ||
147 | size = p->chunks[i].length_dw * sizeof(uint32_t); | 147 | cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data; |
148 | p->chunks[i].kdata = kmalloc(size, GFP_KERNEL); | 148 | if (p->chunks[i].chunk_id != RADEON_CHUNK_ID_IB) { |
149 | if (p->chunks[i].kdata == NULL) { | 149 | size = p->chunks[i].length_dw * sizeof(uint32_t); |
150 | return -ENOMEM; | 150 | p->chunks[i].kdata = kmalloc(size, GFP_KERNEL); |
151 | } | 151 | if (p->chunks[i].kdata == NULL) { |
152 | if (DRM_COPY_FROM_USER(p->chunks[i].kdata, cdata, size)) { | 152 | return -ENOMEM; |
153 | return -EFAULT; | 153 | } |
154 | if (DRM_COPY_FROM_USER(p->chunks[i].kdata, | ||
155 | p->chunks[i].user_ptr, size)) { | ||
156 | return -EFAULT; | ||
157 | } | ||
158 | } else { | ||
159 | p->chunks[i].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL); | ||
160 | p->chunks[i].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL); | ||
161 | if (p->chunks[i].kpage[0] == NULL || p->chunks[i].kpage[1] == NULL) { | ||
162 | kfree(p->chunks[i].kpage[0]); | ||
163 | kfree(p->chunks[i].kpage[1]); | ||
164 | return -ENOMEM; | ||
165 | } | ||
166 | p->chunks[i].kpage_idx[0] = -1; | ||
167 | p->chunks[i].kpage_idx[1] = -1; | ||
168 | p->chunks[i].last_copied_page = -1; | ||
169 | p->chunks[i].last_page_index = ((p->chunks[i].length_dw * 4) - 1) / PAGE_SIZE; | ||
154 | } | 170 | } |
155 | } | 171 | } |
156 | if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) { | 172 | if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) { |
@@ -190,6 +206,8 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error) | |||
190 | kfree(parser->relocs_ptr); | 206 | kfree(parser->relocs_ptr); |
191 | for (i = 0; i < parser->nchunks; i++) { | 207 | for (i = 0; i < parser->nchunks; i++) { |
192 | kfree(parser->chunks[i].kdata); | 208 | kfree(parser->chunks[i].kdata); |
209 | kfree(parser->chunks[i].kpage[0]); | ||
210 | kfree(parser->chunks[i].kpage[1]); | ||
193 | } | 211 | } |
194 | kfree(parser->chunks); | 212 | kfree(parser->chunks); |
195 | kfree(parser->chunks_array); | 213 | kfree(parser->chunks_array); |
@@ -238,8 +256,14 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
238 | * uncached). */ | 256 | * uncached). */ |
239 | ib_chunk = &parser.chunks[parser.chunk_ib_idx]; | 257 | ib_chunk = &parser.chunks[parser.chunk_ib_idx]; |
240 | parser.ib->length_dw = ib_chunk->length_dw; | 258 | parser.ib->length_dw = ib_chunk->length_dw; |
241 | memcpy((void *)parser.ib->ptr, ib_chunk->kdata, ib_chunk->length_dw*4); | ||
242 | r = radeon_cs_parse(&parser); | 259 | r = radeon_cs_parse(&parser); |
260 | if (r || parser.parser_error) { | ||
261 | DRM_ERROR("Invalid command stream !\n"); | ||
262 | radeon_cs_parser_fini(&parser, r); | ||
263 | mutex_unlock(&rdev->cs_mutex); | ||
264 | return r; | ||
265 | } | ||
266 | r = radeon_cs_finish_pages(&parser); | ||
243 | if (r) { | 267 | if (r) { |
244 | DRM_ERROR("Invalid command stream !\n"); | 268 | DRM_ERROR("Invalid command stream !\n"); |
245 | radeon_cs_parser_fini(&parser, r); | 269 | radeon_cs_parser_fini(&parser, r); |
@@ -254,3 +278,66 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
254 | mutex_unlock(&rdev->cs_mutex); | 278 | mutex_unlock(&rdev->cs_mutex); |
255 | return r; | 279 | return r; |
256 | } | 280 | } |
281 | |||
282 | int radeon_cs_finish_pages(struct radeon_cs_parser *p) | ||
283 | { | ||
284 | struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx]; | ||
285 | int i; | ||
286 | int size = PAGE_SIZE; | ||
287 | |||
288 | for (i = ibc->last_copied_page + 1; i <= ibc->last_page_index; i++) { | ||
289 | if (i == ibc->last_page_index) { | ||
290 | size = (ibc->length_dw * 4) % PAGE_SIZE; | ||
291 | if (size == 0) | ||
292 | size = PAGE_SIZE; | ||
293 | } | ||
294 | |||
295 | if (DRM_COPY_FROM_USER(p->ib->ptr + (i * (PAGE_SIZE/4)), | ||
296 | ibc->user_ptr + (i * PAGE_SIZE), | ||
297 | size)) | ||
298 | return -EFAULT; | ||
299 | } | ||
300 | return 0; | ||
301 | } | ||
302 | |||
303 | int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx) | ||
304 | { | ||
305 | int new_page; | ||
306 | int num_extra_pages; | ||
307 | struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx]; | ||
308 | int i; | ||
309 | int size = PAGE_SIZE; | ||
310 | |||
311 | num_extra_pages = (pg_idx - ibc->last_copied_page - 1); | ||
312 | for (i = ibc->last_copied_page + 1; i < ibc->last_copied_page + num_extra_pages; i++) { | ||
313 | if (DRM_COPY_FROM_USER(p->ib->ptr + (i * (PAGE_SIZE/4)), | ||
314 | ibc->user_ptr + (i * PAGE_SIZE), | ||
315 | PAGE_SIZE)) { | ||
316 | p->parser_error = -EFAULT; | ||
317 | return 0; | ||
318 | } | ||
319 | } | ||
320 | |||
321 | new_page = ibc->kpage_idx[0] < ibc->kpage_idx[1] ? 0 : 1; | ||
322 | |||
323 | if (pg_idx == ibc->last_page_index) { | ||
324 | size = (ibc->length_dw * 4) % PAGE_SIZE; | ||
325 | if (size == 0) | ||
326 | size = PAGE_SIZE; | ||
327 | } | ||
328 | |||
329 | if (DRM_COPY_FROM_USER(ibc->kpage[new_page], | ||
330 | ibc->user_ptr + (pg_idx * PAGE_SIZE), | ||
331 | size)) { | ||
332 | p->parser_error = -EFAULT; | ||
333 | return 0; | ||
334 | } | ||
335 | |||
336 | /* copy to IB here */ | ||
337 | memcpy((void *)(p->ib->ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size); | ||
338 | |||
339 | ibc->last_copied_page = pg_idx; | ||
340 | ibc->kpage_idx[new_page] = pg_idx; | ||
341 | |||
342 | return new_page; | ||
343 | } | ||