diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/atom.c')
-rw-r--r-- | drivers/gpu/drm/radeon/atom.c | 91 |
1 files changed, 67 insertions, 24 deletions
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index d75788feac6c..247f8ee7e940 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c | |||
@@ -52,15 +52,17 @@ | |||
52 | 52 | ||
53 | typedef struct { | 53 | typedef struct { |
54 | struct atom_context *ctx; | 54 | struct atom_context *ctx; |
55 | |||
56 | uint32_t *ps, *ws; | 55 | uint32_t *ps, *ws; |
57 | int ps_shift; | 56 | int ps_shift; |
58 | uint16_t start; | 57 | uint16_t start; |
58 | unsigned last_jump; | ||
59 | unsigned long last_jump_jiffies; | ||
60 | bool abort; | ||
59 | } atom_exec_context; | 61 | } atom_exec_context; |
60 | 62 | ||
61 | int atom_debug = 0; | 63 | int atom_debug = 0; |
62 | static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params); | 64 | static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params); |
63 | void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params); | 65 | int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params); |
64 | 66 | ||
65 | static uint32_t atom_arg_mask[8] = | 67 | static uint32_t atom_arg_mask[8] = |
66 | { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000, | 68 | { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000, |
@@ -604,12 +606,17 @@ static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg) | |||
604 | static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg) | 606 | static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg) |
605 | { | 607 | { |
606 | int idx = U8((*ptr)++); | 608 | int idx = U8((*ptr)++); |
609 | int r = 0; | ||
610 | |||
607 | if (idx < ATOM_TABLE_NAMES_CNT) | 611 | if (idx < ATOM_TABLE_NAMES_CNT) |
608 | SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]); | 612 | SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]); |
609 | else | 613 | else |
610 | SDEBUG(" table: %d\n", idx); | 614 | SDEBUG(" table: %d\n", idx); |
611 | if (U16(ctx->ctx->cmd_table + 4 + 2 * idx)) | 615 | if (U16(ctx->ctx->cmd_table + 4 + 2 * idx)) |
612 | atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift); | 616 | r = atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift); |
617 | if (r) { | ||
618 | ctx->abort = true; | ||
619 | } | ||
613 | } | 620 | } |
614 | 621 | ||
615 | static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg) | 622 | static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg) |
@@ -673,6 +680,8 @@ static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg) | |||
673 | static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) | 680 | static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) |
674 | { | 681 | { |
675 | int execute = 0, target = U16(*ptr); | 682 | int execute = 0, target = U16(*ptr); |
683 | unsigned long cjiffies; | ||
684 | |||
676 | (*ptr) += 2; | 685 | (*ptr) += 2; |
677 | switch (arg) { | 686 | switch (arg) { |
678 | case ATOM_COND_ABOVE: | 687 | case ATOM_COND_ABOVE: |
@@ -700,8 +709,25 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) | |||
700 | if (arg != ATOM_COND_ALWAYS) | 709 | if (arg != ATOM_COND_ALWAYS) |
701 | SDEBUG(" taken: %s\n", execute ? "yes" : "no"); | 710 | SDEBUG(" taken: %s\n", execute ? "yes" : "no"); |
702 | SDEBUG(" target: 0x%04X\n", target); | 711 | SDEBUG(" target: 0x%04X\n", target); |
703 | if (execute) | 712 | if (execute) { |
713 | if (ctx->last_jump == (ctx->start + target)) { | ||
714 | cjiffies = jiffies; | ||
715 | if (time_after(cjiffies, ctx->last_jump_jiffies)) { | ||
716 | cjiffies -= ctx->last_jump_jiffies; | ||
717 | if ((jiffies_to_msecs(cjiffies) > 1000)) { | ||
718 | DRM_ERROR("atombios stuck in loop for more than 1sec aborting\n"); | ||
719 | ctx->abort = true; | ||
720 | } | ||
721 | } else { | ||
722 | /* jiffies wrap around we will just wait a little longer */ | ||
723 | ctx->last_jump_jiffies = jiffies; | ||
724 | } | ||
725 | } else { | ||
726 | ctx->last_jump = ctx->start + target; | ||
727 | ctx->last_jump_jiffies = jiffies; | ||
728 | } | ||
704 | *ptr = ctx->start + target; | 729 | *ptr = ctx->start + target; |
730 | } | ||
705 | } | 731 | } |
706 | 732 | ||
707 | static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg) | 733 | static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg) |
@@ -1104,7 +1130,7 @@ static struct { | |||
1104 | atom_op_shr, ATOM_ARG_MC}, { | 1130 | atom_op_shr, ATOM_ARG_MC}, { |
1105 | atom_op_debug, 0},}; | 1131 | atom_op_debug, 0},}; |
1106 | 1132 | ||
1107 | static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params) | 1133 | static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params) |
1108 | { | 1134 | { |
1109 | int base = CU16(ctx->cmd_table + 4 + 2 * index); | 1135 | int base = CU16(ctx->cmd_table + 4 + 2 * index); |
1110 | int len, ws, ps, ptr; | 1136 | int len, ws, ps, ptr; |
@@ -1112,7 +1138,7 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3 | |||
1112 | atom_exec_context ectx; | 1138 | atom_exec_context ectx; |
1113 | 1139 | ||
1114 | if (!base) | 1140 | if (!base) |
1115 | return; | 1141 | return -EINVAL; |
1116 | 1142 | ||
1117 | len = CU16(base + ATOM_CT_SIZE_PTR); | 1143 | len = CU16(base + ATOM_CT_SIZE_PTR); |
1118 | ws = CU8(base + ATOM_CT_WS_PTR); | 1144 | ws = CU8(base + ATOM_CT_WS_PTR); |
@@ -1125,6 +1151,8 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3 | |||
1125 | ectx.ps_shift = ps / 4; | 1151 | ectx.ps_shift = ps / 4; |
1126 | ectx.start = base; | 1152 | ectx.start = base; |
1127 | ectx.ps = params; | 1153 | ectx.ps = params; |
1154 | ectx.abort = false; | ||
1155 | ectx.last_jump = 0; | ||
1128 | if (ws) | 1156 | if (ws) |
1129 | ectx.ws = kzalloc(4 * ws, GFP_KERNEL); | 1157 | ectx.ws = kzalloc(4 * ws, GFP_KERNEL); |
1130 | else | 1158 | else |
@@ -1137,6 +1165,11 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3 | |||
1137 | SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1); | 1165 | SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1); |
1138 | else | 1166 | else |
1139 | SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1); | 1167 | SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1); |
1168 | if (ectx.abort) { | ||
1169 | DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n", | ||
1170 | base, len, ws, ps, ptr - 1); | ||
1171 | return -EINVAL; | ||
1172 | } | ||
1140 | 1173 | ||
1141 | if (op < ATOM_OP_CNT && op > 0) | 1174 | if (op < ATOM_OP_CNT && op > 0) |
1142 | opcode_table[op].func(&ectx, &ptr, | 1175 | opcode_table[op].func(&ectx, &ptr, |
@@ -1152,10 +1185,13 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3 | |||
1152 | 1185 | ||
1153 | if (ws) | 1186 | if (ws) |
1154 | kfree(ectx.ws); | 1187 | kfree(ectx.ws); |
1188 | return 0; | ||
1155 | } | 1189 | } |
1156 | 1190 | ||
1157 | void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) | 1191 | int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) |
1158 | { | 1192 | { |
1193 | int r; | ||
1194 | |||
1159 | mutex_lock(&ctx->mutex); | 1195 | mutex_lock(&ctx->mutex); |
1160 | /* reset reg block */ | 1196 | /* reset reg block */ |
1161 | ctx->reg_block = 0; | 1197 | ctx->reg_block = 0; |
@@ -1163,8 +1199,9 @@ void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) | |||
1163 | ctx->fb_base = 0; | 1199 | ctx->fb_base = 0; |
1164 | /* reset io mode */ | 1200 | /* reset io mode */ |
1165 | ctx->io_mode = ATOM_IO_MM; | 1201 | ctx->io_mode = ATOM_IO_MM; |
1166 | atom_execute_table_locked(ctx, index, params); | 1202 | r = atom_execute_table_locked(ctx, index, params); |
1167 | mutex_unlock(&ctx->mutex); | 1203 | mutex_unlock(&ctx->mutex); |
1204 | return r; | ||
1168 | } | 1205 | } |
1169 | 1206 | ||
1170 | static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 }; | 1207 | static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 }; |
@@ -1248,9 +1285,7 @@ int atom_asic_init(struct atom_context *ctx) | |||
1248 | 1285 | ||
1249 | if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT)) | 1286 | if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT)) |
1250 | return 1; | 1287 | return 1; |
1251 | atom_execute_table(ctx, ATOM_CMD_INIT, ps); | 1288 | return atom_execute_table(ctx, ATOM_CMD_INIT, ps); |
1252 | |||
1253 | return 0; | ||
1254 | } | 1289 | } |
1255 | 1290 | ||
1256 | void atom_destroy(struct atom_context *ctx) | 1291 | void atom_destroy(struct atom_context *ctx) |
@@ -1260,12 +1295,16 @@ void atom_destroy(struct atom_context *ctx) | |||
1260 | kfree(ctx); | 1295 | kfree(ctx); |
1261 | } | 1296 | } |
1262 | 1297 | ||
1263 | void atom_parse_data_header(struct atom_context *ctx, int index, | 1298 | bool atom_parse_data_header(struct atom_context *ctx, int index, |
1264 | uint16_t * size, uint8_t * frev, uint8_t * crev, | 1299 | uint16_t * size, uint8_t * frev, uint8_t * crev, |
1265 | uint16_t * data_start) | 1300 | uint16_t * data_start) |
1266 | { | 1301 | { |
1267 | int offset = index * 2 + 4; | 1302 | int offset = index * 2 + 4; |
1268 | int idx = CU16(ctx->data_table + offset); | 1303 | int idx = CU16(ctx->data_table + offset); |
1304 | u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4); | ||
1305 | |||
1306 | if (!mdt[index]) | ||
1307 | return false; | ||
1269 | 1308 | ||
1270 | if (size) | 1309 | if (size) |
1271 | *size = CU16(idx); | 1310 | *size = CU16(idx); |
@@ -1274,38 +1313,42 @@ void atom_parse_data_header(struct atom_context *ctx, int index, | |||
1274 | if (crev) | 1313 | if (crev) |
1275 | *crev = CU8(idx + 3); | 1314 | *crev = CU8(idx + 3); |
1276 | *data_start = idx; | 1315 | *data_start = idx; |
1277 | return; | 1316 | return true; |
1278 | } | 1317 | } |
1279 | 1318 | ||
1280 | void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev, | 1319 | bool atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev, |
1281 | uint8_t * crev) | 1320 | uint8_t * crev) |
1282 | { | 1321 | { |
1283 | int offset = index * 2 + 4; | 1322 | int offset = index * 2 + 4; |
1284 | int idx = CU16(ctx->cmd_table + offset); | 1323 | int idx = CU16(ctx->cmd_table + offset); |
1324 | u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4); | ||
1325 | |||
1326 | if (!mct[index]) | ||
1327 | return false; | ||
1285 | 1328 | ||
1286 | if (frev) | 1329 | if (frev) |
1287 | *frev = CU8(idx + 2); | 1330 | *frev = CU8(idx + 2); |
1288 | if (crev) | 1331 | if (crev) |
1289 | *crev = CU8(idx + 3); | 1332 | *crev = CU8(idx + 3); |
1290 | return; | 1333 | return true; |
1291 | } | 1334 | } |
1292 | 1335 | ||
1293 | int atom_allocate_fb_scratch(struct atom_context *ctx) | 1336 | int atom_allocate_fb_scratch(struct atom_context *ctx) |
1294 | { | 1337 | { |
1295 | int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware); | 1338 | int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware); |
1296 | uint16_t data_offset; | 1339 | uint16_t data_offset; |
1297 | int usage_bytes; | 1340 | int usage_bytes = 0; |
1298 | struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage; | 1341 | struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage; |
1299 | 1342 | ||
1300 | atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset); | 1343 | if (atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) { |
1344 | firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset); | ||
1301 | 1345 | ||
1302 | firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset); | 1346 | DRM_DEBUG("atom firmware requested %08x %dkb\n", |
1347 | firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware, | ||
1348 | firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb); | ||
1303 | 1349 | ||
1304 | DRM_DEBUG("atom firmware requested %08x %dkb\n", | 1350 | usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024; |
1305 | firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware, | 1351 | } |
1306 | firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb); | ||
1307 | |||
1308 | usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024; | ||
1309 | if (usage_bytes == 0) | 1352 | if (usage_bytes == 0) |
1310 | usage_bytes = 20 * 1024; | 1353 | usage_bytes = 20 * 1024; |
1311 | /* allocate some scratch memory */ | 1354 | /* allocate some scratch memory */ |