aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/atom.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/radeon/atom.c')
-rw-r--r--drivers/gpu/drm/radeon/atom.c95
1 files changed, 71 insertions, 24 deletions
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index d75788feac6c..bcec2d79636e 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -24,6 +24,7 @@
24 24
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/sched.h> 26#include <linux/sched.h>
27#include <linux/slab.h>
27#include <asm/unaligned.h> 28#include <asm/unaligned.h>
28 29
29#define ATOM_DEBUG 30#define ATOM_DEBUG
@@ -52,15 +53,17 @@
52 53
53typedef struct { 54typedef struct {
54 struct atom_context *ctx; 55 struct atom_context *ctx;
55
56 uint32_t *ps, *ws; 56 uint32_t *ps, *ws;
57 int ps_shift; 57 int ps_shift;
58 uint16_t start; 58 uint16_t start;
59 unsigned last_jump;
60 unsigned long last_jump_jiffies;
61 bool abort;
59} atom_exec_context; 62} atom_exec_context;
60 63
61int atom_debug = 0; 64int atom_debug = 0;
62static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params); 65static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
63void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params); 66int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
64 67
65static uint32_t atom_arg_mask[8] = 68static uint32_t atom_arg_mask[8] =
66 { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000, 69 { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000,
@@ -604,12 +607,17 @@ static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg)
604static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg) 607static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
605{ 608{
606 int idx = U8((*ptr)++); 609 int idx = U8((*ptr)++);
610 int r = 0;
611
607 if (idx < ATOM_TABLE_NAMES_CNT) 612 if (idx < ATOM_TABLE_NAMES_CNT)
608 SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]); 613 SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]);
609 else 614 else
610 SDEBUG(" table: %d\n", idx); 615 SDEBUG(" table: %d\n", idx);
611 if (U16(ctx->ctx->cmd_table + 4 + 2 * idx)) 616 if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
612 atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift); 617 r = atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
618 if (r) {
619 ctx->abort = true;
620 }
613} 621}
614 622
615static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg) 623static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
@@ -673,6 +681,8 @@ static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg)
673static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) 681static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
674{ 682{
675 int execute = 0, target = U16(*ptr); 683 int execute = 0, target = U16(*ptr);
684 unsigned long cjiffies;
685
676 (*ptr) += 2; 686 (*ptr) += 2;
677 switch (arg) { 687 switch (arg) {
678 case ATOM_COND_ABOVE: 688 case ATOM_COND_ABOVE:
@@ -700,8 +710,25 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
700 if (arg != ATOM_COND_ALWAYS) 710 if (arg != ATOM_COND_ALWAYS)
701 SDEBUG(" taken: %s\n", execute ? "yes" : "no"); 711 SDEBUG(" taken: %s\n", execute ? "yes" : "no");
702 SDEBUG(" target: 0x%04X\n", target); 712 SDEBUG(" target: 0x%04X\n", target);
703 if (execute) 713 if (execute) {
714 if (ctx->last_jump == (ctx->start + target)) {
715 cjiffies = jiffies;
716 if (time_after(cjiffies, ctx->last_jump_jiffies)) {
717 cjiffies -= ctx->last_jump_jiffies;
718 if ((jiffies_to_msecs(cjiffies) > 1000)) {
719 DRM_ERROR("atombios stuck in loop for more than 1sec aborting\n");
720 ctx->abort = true;
721 }
722 } else {
723 /* jiffies wrap around we will just wait a little longer */
724 ctx->last_jump_jiffies = jiffies;
725 }
726 } else {
727 ctx->last_jump = ctx->start + target;
728 ctx->last_jump_jiffies = jiffies;
729 }
704 *ptr = ctx->start + target; 730 *ptr = ctx->start + target;
731 }
705} 732}
706 733
707static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg) 734static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
@@ -1104,15 +1131,16 @@ static struct {
1104 atom_op_shr, ATOM_ARG_MC}, { 1131 atom_op_shr, ATOM_ARG_MC}, {
1105atom_op_debug, 0},}; 1132atom_op_debug, 0},};
1106 1133
1107static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params) 1134static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
1108{ 1135{
1109 int base = CU16(ctx->cmd_table + 4 + 2 * index); 1136 int base = CU16(ctx->cmd_table + 4 + 2 * index);
1110 int len, ws, ps, ptr; 1137 int len, ws, ps, ptr;
1111 unsigned char op; 1138 unsigned char op;
1112 atom_exec_context ectx; 1139 atom_exec_context ectx;
1140 int ret = 0;
1113 1141
1114 if (!base) 1142 if (!base)
1115 return; 1143 return -EINVAL;
1116 1144
1117 len = CU16(base + ATOM_CT_SIZE_PTR); 1145 len = CU16(base + ATOM_CT_SIZE_PTR);
1118 ws = CU8(base + ATOM_CT_WS_PTR); 1146 ws = CU8(base + ATOM_CT_WS_PTR);
@@ -1125,6 +1153,8 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3
1125 ectx.ps_shift = ps / 4; 1153 ectx.ps_shift = ps / 4;
1126 ectx.start = base; 1154 ectx.start = base;
1127 ectx.ps = params; 1155 ectx.ps = params;
1156 ectx.abort = false;
1157 ectx.last_jump = 0;
1128 if (ws) 1158 if (ws)
1129 ectx.ws = kzalloc(4 * ws, GFP_KERNEL); 1159 ectx.ws = kzalloc(4 * ws, GFP_KERNEL);
1130 else 1160 else
@@ -1137,6 +1167,12 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3
1137 SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1); 1167 SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1);
1138 else 1168 else
1139 SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1); 1169 SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1);
1170 if (ectx.abort) {
1171 DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n",
1172 base, len, ws, ps, ptr - 1);
1173 ret = -EINVAL;
1174 goto free;
1175 }
1140 1176
1141 if (op < ATOM_OP_CNT && op > 0) 1177 if (op < ATOM_OP_CNT && op > 0)
1142 opcode_table[op].func(&ectx, &ptr, 1178 opcode_table[op].func(&ectx, &ptr,
@@ -1150,12 +1186,16 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3
1150 debug_depth--; 1186 debug_depth--;
1151 SDEBUG("<<\n"); 1187 SDEBUG("<<\n");
1152 1188
1189free:
1153 if (ws) 1190 if (ws)
1154 kfree(ectx.ws); 1191 kfree(ectx.ws);
1192 return ret;
1155} 1193}
1156 1194
1157void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) 1195int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
1158{ 1196{
1197 int r;
1198
1159 mutex_lock(&ctx->mutex); 1199 mutex_lock(&ctx->mutex);
1160 /* reset reg block */ 1200 /* reset reg block */
1161 ctx->reg_block = 0; 1201 ctx->reg_block = 0;
@@ -1163,8 +1203,9 @@ void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
1163 ctx->fb_base = 0; 1203 ctx->fb_base = 0;
1164 /* reset io mode */ 1204 /* reset io mode */
1165 ctx->io_mode = ATOM_IO_MM; 1205 ctx->io_mode = ATOM_IO_MM;
1166 atom_execute_table_locked(ctx, index, params); 1206 r = atom_execute_table_locked(ctx, index, params);
1167 mutex_unlock(&ctx->mutex); 1207 mutex_unlock(&ctx->mutex);
1208 return r;
1168} 1209}
1169 1210
1170static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 }; 1211static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
@@ -1248,9 +1289,7 @@ int atom_asic_init(struct atom_context *ctx)
1248 1289
1249 if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT)) 1290 if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
1250 return 1; 1291 return 1;
1251 atom_execute_table(ctx, ATOM_CMD_INIT, ps); 1292 return atom_execute_table(ctx, ATOM_CMD_INIT, ps);
1252
1253 return 0;
1254} 1293}
1255 1294
1256void atom_destroy(struct atom_context *ctx) 1295void atom_destroy(struct atom_context *ctx)
@@ -1260,12 +1299,16 @@ void atom_destroy(struct atom_context *ctx)
1260 kfree(ctx); 1299 kfree(ctx);
1261} 1300}
1262 1301
1263void atom_parse_data_header(struct atom_context *ctx, int index, 1302bool atom_parse_data_header(struct atom_context *ctx, int index,
1264 uint16_t * size, uint8_t * frev, uint8_t * crev, 1303 uint16_t * size, uint8_t * frev, uint8_t * crev,
1265 uint16_t * data_start) 1304 uint16_t * data_start)
1266{ 1305{
1267 int offset = index * 2 + 4; 1306 int offset = index * 2 + 4;
1268 int idx = CU16(ctx->data_table + offset); 1307 int idx = CU16(ctx->data_table + offset);
1308 u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4);
1309
1310 if (!mdt[index])
1311 return false;
1269 1312
1270 if (size) 1313 if (size)
1271 *size = CU16(idx); 1314 *size = CU16(idx);
@@ -1274,38 +1317,42 @@ void atom_parse_data_header(struct atom_context *ctx, int index,
1274 if (crev) 1317 if (crev)
1275 *crev = CU8(idx + 3); 1318 *crev = CU8(idx + 3);
1276 *data_start = idx; 1319 *data_start = idx;
1277 return; 1320 return true;
1278} 1321}
1279 1322
1280void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev, 1323bool atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
1281 uint8_t * crev) 1324 uint8_t * crev)
1282{ 1325{
1283 int offset = index * 2 + 4; 1326 int offset = index * 2 + 4;
1284 int idx = CU16(ctx->cmd_table + offset); 1327 int idx = CU16(ctx->cmd_table + offset);
1328 u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4);
1329
1330 if (!mct[index])
1331 return false;
1285 1332
1286 if (frev) 1333 if (frev)
1287 *frev = CU8(idx + 2); 1334 *frev = CU8(idx + 2);
1288 if (crev) 1335 if (crev)
1289 *crev = CU8(idx + 3); 1336 *crev = CU8(idx + 3);
1290 return; 1337 return true;
1291} 1338}
1292 1339
1293int atom_allocate_fb_scratch(struct atom_context *ctx) 1340int atom_allocate_fb_scratch(struct atom_context *ctx)
1294{ 1341{
1295 int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware); 1342 int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware);
1296 uint16_t data_offset; 1343 uint16_t data_offset;
1297 int usage_bytes; 1344 int usage_bytes = 0;
1298 struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage; 1345 struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage;
1299 1346
1300 atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset); 1347 if (atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
1348 firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
1301 1349
1302 firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset); 1350 DRM_DEBUG("atom firmware requested %08x %dkb\n",
1351 firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware,
1352 firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb);
1303 1353
1304 DRM_DEBUG("atom firmware requested %08x %dkb\n", 1354 usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;
1305 firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware, 1355 }
1306 firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb);
1307
1308 usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;
1309 if (usage_bytes == 0) 1356 if (usage_bytes == 0)
1310 usage_bytes = 20 * 1024; 1357 usage_bytes = 20 * 1024;
1311 /* allocate some scratch memory */ 1358 /* allocate some scratch memory */