diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/atom.c')
-rw-r--r-- | drivers/gpu/drm/radeon/atom.c | 91 |
1 files changed, 67 insertions, 24 deletions
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index 8538b88eda35..07b7ebf1f466 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c | |||
@@ -53,15 +53,17 @@ | |||
53 | 53 | ||
54 | typedef struct { | 54 | typedef struct { |
55 | struct atom_context *ctx; | 55 | struct atom_context *ctx; |
56 | |||
57 | uint32_t *ps, *ws; | 56 | uint32_t *ps, *ws; |
58 | int ps_shift; | 57 | int ps_shift; |
59 | uint16_t start; | 58 | uint16_t start; |
59 | unsigned last_jump; | ||
60 | unsigned long last_jump_jiffies; | ||
61 | bool abort; | ||
60 | } atom_exec_context; | 62 | } atom_exec_context; |
61 | 63 | ||
62 | int atom_debug = 0; | 64 | int atom_debug = 0; |
63 | static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params); | 65 | static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params); |
64 | void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params); | 66 | int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params); |
65 | 67 | ||
66 | static uint32_t atom_arg_mask[8] = | 68 | static uint32_t atom_arg_mask[8] = |
67 | { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000, | 69 | { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000, |
@@ -605,12 +607,17 @@ static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg) | |||
605 | static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg) | 607 | static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg) |
606 | { | 608 | { |
607 | int idx = U8((*ptr)++); | 609 | int idx = U8((*ptr)++); |
610 | int r = 0; | ||
611 | |||
608 | if (idx < ATOM_TABLE_NAMES_CNT) | 612 | if (idx < ATOM_TABLE_NAMES_CNT) |
609 | SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]); | 613 | SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]); |
610 | else | 614 | else |
611 | SDEBUG(" table: %d\n", idx); | 615 | SDEBUG(" table: %d\n", idx); |
612 | if (U16(ctx->ctx->cmd_table + 4 + 2 * idx)) | 616 | if (U16(ctx->ctx->cmd_table + 4 + 2 * idx)) |
613 | atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift); | 617 | r = atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift); |
618 | if (r) { | ||
619 | ctx->abort = true; | ||
620 | } | ||
614 | } | 621 | } |
615 | 622 | ||
616 | static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg) | 623 | static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg) |
@@ -674,6 +681,8 @@ static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg) | |||
674 | static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) | 681 | static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) |
675 | { | 682 | { |
676 | int execute = 0, target = U16(*ptr); | 683 | int execute = 0, target = U16(*ptr); |
684 | unsigned long cjiffies; | ||
685 | |||
677 | (*ptr) += 2; | 686 | (*ptr) += 2; |
678 | switch (arg) { | 687 | switch (arg) { |
679 | case ATOM_COND_ABOVE: | 688 | case ATOM_COND_ABOVE: |
@@ -701,8 +710,25 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) | |||
701 | if (arg != ATOM_COND_ALWAYS) | 710 | if (arg != ATOM_COND_ALWAYS) |
702 | SDEBUG(" taken: %s\n", execute ? "yes" : "no"); | 711 | SDEBUG(" taken: %s\n", execute ? "yes" : "no"); |
703 | SDEBUG(" target: 0x%04X\n", target); | 712 | SDEBUG(" target: 0x%04X\n", target); |
704 | if (execute) | 713 | if (execute) { |
714 | if (ctx->last_jump == (ctx->start + target)) { | ||
715 | cjiffies = jiffies; | ||
716 | if (time_after(cjiffies, ctx->last_jump_jiffies)) { | ||
717 | cjiffies -= ctx->last_jump_jiffies; | ||
718 | if ((jiffies_to_msecs(cjiffies) > 1000)) { | ||
719 | DRM_ERROR("atombios stuck in loop for more than 1sec aborting\n"); | ||
720 | ctx->abort = true; | ||
721 | } | ||
722 | } else { | ||
723 | /* jiffies wrap around we will just wait a little longer */ | ||
724 | ctx->last_jump_jiffies = jiffies; | ||
725 | } | ||
726 | } else { | ||
727 | ctx->last_jump = ctx->start + target; | ||
728 | ctx->last_jump_jiffies = jiffies; | ||
729 | } | ||
705 | *ptr = ctx->start + target; | 730 | *ptr = ctx->start + target; |
731 | } | ||
706 | } | 732 | } |
707 | 733 | ||
708 | static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg) | 734 | static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg) |
@@ -1105,7 +1131,7 @@ static struct { | |||
1105 | atom_op_shr, ATOM_ARG_MC}, { | 1131 | atom_op_shr, ATOM_ARG_MC}, { |
1106 | atom_op_debug, 0},}; | 1132 | atom_op_debug, 0},}; |
1107 | 1133 | ||
1108 | static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params) | 1134 | static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params) |
1109 | { | 1135 | { |
1110 | int base = CU16(ctx->cmd_table + 4 + 2 * index); | 1136 | int base = CU16(ctx->cmd_table + 4 + 2 * index); |
1111 | int len, ws, ps, ptr; | 1137 | int len, ws, ps, ptr; |
@@ -1113,7 +1139,7 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3 | |||
1113 | atom_exec_context ectx; | 1139 | atom_exec_context ectx; |
1114 | 1140 | ||
1115 | if (!base) | 1141 | if (!base) |
1116 | return; | 1142 | return -EINVAL; |
1117 | 1143 | ||
1118 | len = CU16(base + ATOM_CT_SIZE_PTR); | 1144 | len = CU16(base + ATOM_CT_SIZE_PTR); |
1119 | ws = CU8(base + ATOM_CT_WS_PTR); | 1145 | ws = CU8(base + ATOM_CT_WS_PTR); |
@@ -1126,6 +1152,8 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3 | |||
1126 | ectx.ps_shift = ps / 4; | 1152 | ectx.ps_shift = ps / 4; |
1127 | ectx.start = base; | 1153 | ectx.start = base; |
1128 | ectx.ps = params; | 1154 | ectx.ps = params; |
1155 | ectx.abort = false; | ||
1156 | ectx.last_jump = 0; | ||
1129 | if (ws) | 1157 | if (ws) |
1130 | ectx.ws = kzalloc(4 * ws, GFP_KERNEL); | 1158 | ectx.ws = kzalloc(4 * ws, GFP_KERNEL); |
1131 | else | 1159 | else |
@@ -1138,6 +1166,11 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3 | |||
1138 | SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1); | 1166 | SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1); |
1139 | else | 1167 | else |
1140 | SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1); | 1168 | SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1); |
1169 | if (ectx.abort) { | ||
1170 | DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n", | ||
1171 | base, len, ws, ps, ptr - 1); | ||
1172 | return -EINVAL; | ||
1173 | } | ||
1141 | 1174 | ||
1142 | if (op < ATOM_OP_CNT && op > 0) | 1175 | if (op < ATOM_OP_CNT && op > 0) |
1143 | opcode_table[op].func(&ectx, &ptr, | 1176 | opcode_table[op].func(&ectx, &ptr, |
@@ -1153,10 +1186,13 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3 | |||
1153 | 1186 | ||
1154 | if (ws) | 1187 | if (ws) |
1155 | kfree(ectx.ws); | 1188 | kfree(ectx.ws); |
1189 | return 0; | ||
1156 | } | 1190 | } |
1157 | 1191 | ||
1158 | void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) | 1192 | int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) |
1159 | { | 1193 | { |
1194 | int r; | ||
1195 | |||
1160 | mutex_lock(&ctx->mutex); | 1196 | mutex_lock(&ctx->mutex); |
1161 | /* reset reg block */ | 1197 | /* reset reg block */ |
1162 | ctx->reg_block = 0; | 1198 | ctx->reg_block = 0; |
@@ -1164,8 +1200,9 @@ void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) | |||
1164 | ctx->fb_base = 0; | 1200 | ctx->fb_base = 0; |
1165 | /* reset io mode */ | 1201 | /* reset io mode */ |
1166 | ctx->io_mode = ATOM_IO_MM; | 1202 | ctx->io_mode = ATOM_IO_MM; |
1167 | atom_execute_table_locked(ctx, index, params); | 1203 | r = atom_execute_table_locked(ctx, index, params); |
1168 | mutex_unlock(&ctx->mutex); | 1204 | mutex_unlock(&ctx->mutex); |
1205 | return r; | ||
1169 | } | 1206 | } |
1170 | 1207 | ||
1171 | static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 }; | 1208 | static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 }; |
@@ -1249,9 +1286,7 @@ int atom_asic_init(struct atom_context *ctx) | |||
1249 | 1286 | ||
1250 | if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT)) | 1287 | if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT)) |
1251 | return 1; | 1288 | return 1; |
1252 | atom_execute_table(ctx, ATOM_CMD_INIT, ps); | 1289 | return atom_execute_table(ctx, ATOM_CMD_INIT, ps); |
1253 | |||
1254 | return 0; | ||
1255 | } | 1290 | } |
1256 | 1291 | ||
1257 | void atom_destroy(struct atom_context *ctx) | 1292 | void atom_destroy(struct atom_context *ctx) |
@@ -1261,12 +1296,16 @@ void atom_destroy(struct atom_context *ctx) | |||
1261 | kfree(ctx); | 1296 | kfree(ctx); |
1262 | } | 1297 | } |
1263 | 1298 | ||
1264 | void atom_parse_data_header(struct atom_context *ctx, int index, | 1299 | bool atom_parse_data_header(struct atom_context *ctx, int index, |
1265 | uint16_t * size, uint8_t * frev, uint8_t * crev, | 1300 | uint16_t * size, uint8_t * frev, uint8_t * crev, |
1266 | uint16_t * data_start) | 1301 | uint16_t * data_start) |
1267 | { | 1302 | { |
1268 | int offset = index * 2 + 4; | 1303 | int offset = index * 2 + 4; |
1269 | int idx = CU16(ctx->data_table + offset); | 1304 | int idx = CU16(ctx->data_table + offset); |
1305 | u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4); | ||
1306 | |||
1307 | if (!mdt[index]) | ||
1308 | return false; | ||
1270 | 1309 | ||
1271 | if (size) | 1310 | if (size) |
1272 | *size = CU16(idx); | 1311 | *size = CU16(idx); |
@@ -1275,38 +1314,42 @@ void atom_parse_data_header(struct atom_context *ctx, int index, | |||
1275 | if (crev) | 1314 | if (crev) |
1276 | *crev = CU8(idx + 3); | 1315 | *crev = CU8(idx + 3); |
1277 | *data_start = idx; | 1316 | *data_start = idx; |
1278 | return; | 1317 | return true; |
1279 | } | 1318 | } |
1280 | 1319 | ||
1281 | void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev, | 1320 | bool atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev, |
1282 | uint8_t * crev) | 1321 | uint8_t * crev) |
1283 | { | 1322 | { |
1284 | int offset = index * 2 + 4; | 1323 | int offset = index * 2 + 4; |
1285 | int idx = CU16(ctx->cmd_table + offset); | 1324 | int idx = CU16(ctx->cmd_table + offset); |
1325 | u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4); | ||
1326 | |||
1327 | if (!mct[index]) | ||
1328 | return false; | ||
1286 | 1329 | ||
1287 | if (frev) | 1330 | if (frev) |
1288 | *frev = CU8(idx + 2); | 1331 | *frev = CU8(idx + 2); |
1289 | if (crev) | 1332 | if (crev) |
1290 | *crev = CU8(idx + 3); | 1333 | *crev = CU8(idx + 3); |
1291 | return; | 1334 | return true; |
1292 | } | 1335 | } |
1293 | 1336 | ||
1294 | int atom_allocate_fb_scratch(struct atom_context *ctx) | 1337 | int atom_allocate_fb_scratch(struct atom_context *ctx) |
1295 | { | 1338 | { |
1296 | int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware); | 1339 | int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware); |
1297 | uint16_t data_offset; | 1340 | uint16_t data_offset; |
1298 | int usage_bytes; | 1341 | int usage_bytes = 0; |
1299 | struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage; | 1342 | struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage; |
1300 | 1343 | ||
1301 | atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset); | 1344 | if (atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) { |
1345 | firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset); | ||
1302 | 1346 | ||
1303 | firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset); | 1347 | DRM_DEBUG("atom firmware requested %08x %dkb\n", |
1348 | firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware, | ||
1349 | firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb); | ||
1304 | 1350 | ||
1305 | DRM_DEBUG("atom firmware requested %08x %dkb\n", | 1351 | usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024; |
1306 | firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware, | 1352 | } |
1307 | firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb); | ||
1308 | |||
1309 | usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024; | ||
1310 | if (usage_bytes == 0) | 1353 | if (usage_bytes == 0) |
1311 | usage_bytes = 20 * 1024; | 1354 | usage_bytes = 20 * 1024; |
1312 | /* allocate some scratch memory */ | 1355 | /* allocate some scratch memory */ |