diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/atom.c')
| -rw-r--r-- | drivers/gpu/drm/radeon/atom.c | 92 |
1 files changed, 68 insertions, 24 deletions
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index d75788feac6c..07b7ebf1f466 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c | |||
| @@ -24,6 +24,7 @@ | |||
| 24 | 24 | ||
| 25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
| 26 | #include <linux/sched.h> | 26 | #include <linux/sched.h> |
| 27 | #include <linux/slab.h> | ||
| 27 | #include <asm/unaligned.h> | 28 | #include <asm/unaligned.h> |
| 28 | 29 | ||
| 29 | #define ATOM_DEBUG | 30 | #define ATOM_DEBUG |
| @@ -52,15 +53,17 @@ | |||
| 52 | 53 | ||
| 53 | typedef struct { | 54 | typedef struct { |
| 54 | struct atom_context *ctx; | 55 | struct atom_context *ctx; |
| 55 | |||
| 56 | uint32_t *ps, *ws; | 56 | uint32_t *ps, *ws; |
| 57 | int ps_shift; | 57 | int ps_shift; |
| 58 | uint16_t start; | 58 | uint16_t start; |
| 59 | unsigned last_jump; | ||
| 60 | unsigned long last_jump_jiffies; | ||
| 61 | bool abort; | ||
| 59 | } atom_exec_context; | 62 | } atom_exec_context; |
| 60 | 63 | ||
| 61 | int atom_debug = 0; | 64 | int atom_debug = 0; |
| 62 | static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params); | 65 | static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params); |
| 63 | void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params); | 66 | int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params); |
| 64 | 67 | ||
| 65 | static uint32_t atom_arg_mask[8] = | 68 | static uint32_t atom_arg_mask[8] = |
| 66 | { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000, | 69 | { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000, |
| @@ -604,12 +607,17 @@ static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg) | |||
| 604 | static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg) | 607 | static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg) |
| 605 | { | 608 | { |
| 606 | int idx = U8((*ptr)++); | 609 | int idx = U8((*ptr)++); |
| 610 | int r = 0; | ||
| 611 | |||
| 607 | if (idx < ATOM_TABLE_NAMES_CNT) | 612 | if (idx < ATOM_TABLE_NAMES_CNT) |
| 608 | SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]); | 613 | SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]); |
| 609 | else | 614 | else |
| 610 | SDEBUG(" table: %d\n", idx); | 615 | SDEBUG(" table: %d\n", idx); |
| 611 | if (U16(ctx->ctx->cmd_table + 4 + 2 * idx)) | 616 | if (U16(ctx->ctx->cmd_table + 4 + 2 * idx)) |
| 612 | atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift); | 617 | r = atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift); |
| 618 | if (r) { | ||
| 619 | ctx->abort = true; | ||
| 620 | } | ||
| 613 | } | 621 | } |
| 614 | 622 | ||
| 615 | static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg) | 623 | static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg) |
| @@ -673,6 +681,8 @@ static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg) | |||
| 673 | static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) | 681 | static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) |
| 674 | { | 682 | { |
| 675 | int execute = 0, target = U16(*ptr); | 683 | int execute = 0, target = U16(*ptr); |
| 684 | unsigned long cjiffies; | ||
| 685 | |||
| 676 | (*ptr) += 2; | 686 | (*ptr) += 2; |
| 677 | switch (arg) { | 687 | switch (arg) { |
| 678 | case ATOM_COND_ABOVE: | 688 | case ATOM_COND_ABOVE: |
| @@ -700,8 +710,25 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) | |||
| 700 | if (arg != ATOM_COND_ALWAYS) | 710 | if (arg != ATOM_COND_ALWAYS) |
| 701 | SDEBUG(" taken: %s\n", execute ? "yes" : "no"); | 711 | SDEBUG(" taken: %s\n", execute ? "yes" : "no"); |
| 702 | SDEBUG(" target: 0x%04X\n", target); | 712 | SDEBUG(" target: 0x%04X\n", target); |
| 703 | if (execute) | 713 | if (execute) { |
| 714 | if (ctx->last_jump == (ctx->start + target)) { | ||
| 715 | cjiffies = jiffies; | ||
| 716 | if (time_after(cjiffies, ctx->last_jump_jiffies)) { | ||
| 717 | cjiffies -= ctx->last_jump_jiffies; | ||
| 718 | if ((jiffies_to_msecs(cjiffies) > 1000)) { | ||
| 719 | DRM_ERROR("atombios stuck in loop for more than 1sec aborting\n"); | ||
| 720 | ctx->abort = true; | ||
| 721 | } | ||
| 722 | } else { | ||
| 723 | /* jiffies wrap around we will just wait a little longer */ | ||
| 724 | ctx->last_jump_jiffies = jiffies; | ||
| 725 | } | ||
| 726 | } else { | ||
| 727 | ctx->last_jump = ctx->start + target; | ||
| 728 | ctx->last_jump_jiffies = jiffies; | ||
| 729 | } | ||
| 704 | *ptr = ctx->start + target; | 730 | *ptr = ctx->start + target; |
| 731 | } | ||
| 705 | } | 732 | } |
| 706 | 733 | ||
| 707 | static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg) | 734 | static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg) |
| @@ -1104,7 +1131,7 @@ static struct { | |||
| 1104 | atom_op_shr, ATOM_ARG_MC}, { | 1131 | atom_op_shr, ATOM_ARG_MC}, { |
| 1105 | atom_op_debug, 0},}; | 1132 | atom_op_debug, 0},}; |
| 1106 | 1133 | ||
| 1107 | static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params) | 1134 | static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params) |
| 1108 | { | 1135 | { |
| 1109 | int base = CU16(ctx->cmd_table + 4 + 2 * index); | 1136 | int base = CU16(ctx->cmd_table + 4 + 2 * index); |
| 1110 | int len, ws, ps, ptr; | 1137 | int len, ws, ps, ptr; |
| @@ -1112,7 +1139,7 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3 | |||
| 1112 | atom_exec_context ectx; | 1139 | atom_exec_context ectx; |
| 1113 | 1140 | ||
| 1114 | if (!base) | 1141 | if (!base) |
| 1115 | return; | 1142 | return -EINVAL; |
| 1116 | 1143 | ||
| 1117 | len = CU16(base + ATOM_CT_SIZE_PTR); | 1144 | len = CU16(base + ATOM_CT_SIZE_PTR); |
| 1118 | ws = CU8(base + ATOM_CT_WS_PTR); | 1145 | ws = CU8(base + ATOM_CT_WS_PTR); |
| @@ -1125,6 +1152,8 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3 | |||
| 1125 | ectx.ps_shift = ps / 4; | 1152 | ectx.ps_shift = ps / 4; |
| 1126 | ectx.start = base; | 1153 | ectx.start = base; |
| 1127 | ectx.ps = params; | 1154 | ectx.ps = params; |
| 1155 | ectx.abort = false; | ||
| 1156 | ectx.last_jump = 0; | ||
| 1128 | if (ws) | 1157 | if (ws) |
| 1129 | ectx.ws = kzalloc(4 * ws, GFP_KERNEL); | 1158 | ectx.ws = kzalloc(4 * ws, GFP_KERNEL); |
| 1130 | else | 1159 | else |
| @@ -1137,6 +1166,11 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3 | |||
| 1137 | SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1); | 1166 | SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1); |
| 1138 | else | 1167 | else |
| 1139 | SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1); | 1168 | SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1); |
| 1169 | if (ectx.abort) { | ||
| 1170 | DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n", | ||
| 1171 | base, len, ws, ps, ptr - 1); | ||
| 1172 | return -EINVAL; | ||
| 1173 | } | ||
| 1140 | 1174 | ||
| 1141 | if (op < ATOM_OP_CNT && op > 0) | 1175 | if (op < ATOM_OP_CNT && op > 0) |
| 1142 | opcode_table[op].func(&ectx, &ptr, | 1176 | opcode_table[op].func(&ectx, &ptr, |
| @@ -1152,10 +1186,13 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3 | |||
| 1152 | 1186 | ||
| 1153 | if (ws) | 1187 | if (ws) |
| 1154 | kfree(ectx.ws); | 1188 | kfree(ectx.ws); |
| 1189 | return 0; | ||
| 1155 | } | 1190 | } |
| 1156 | 1191 | ||
| 1157 | void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) | 1192 | int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) |
| 1158 | { | 1193 | { |
| 1194 | int r; | ||
| 1195 | |||
| 1159 | mutex_lock(&ctx->mutex); | 1196 | mutex_lock(&ctx->mutex); |
| 1160 | /* reset reg block */ | 1197 | /* reset reg block */ |
| 1161 | ctx->reg_block = 0; | 1198 | ctx->reg_block = 0; |
| @@ -1163,8 +1200,9 @@ void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) | |||
| 1163 | ctx->fb_base = 0; | 1200 | ctx->fb_base = 0; |
| 1164 | /* reset io mode */ | 1201 | /* reset io mode */ |
| 1165 | ctx->io_mode = ATOM_IO_MM; | 1202 | ctx->io_mode = ATOM_IO_MM; |
| 1166 | atom_execute_table_locked(ctx, index, params); | 1203 | r = atom_execute_table_locked(ctx, index, params); |
| 1167 | mutex_unlock(&ctx->mutex); | 1204 | mutex_unlock(&ctx->mutex); |
| 1205 | return r; | ||
| 1168 | } | 1206 | } |
| 1169 | 1207 | ||
| 1170 | static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 }; | 1208 | static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 }; |
| @@ -1248,9 +1286,7 @@ int atom_asic_init(struct atom_context *ctx) | |||
| 1248 | 1286 | ||
| 1249 | if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT)) | 1287 | if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT)) |
| 1250 | return 1; | 1288 | return 1; |
| 1251 | atom_execute_table(ctx, ATOM_CMD_INIT, ps); | 1289 | return atom_execute_table(ctx, ATOM_CMD_INIT, ps); |
| 1252 | |||
| 1253 | return 0; | ||
| 1254 | } | 1290 | } |
| 1255 | 1291 | ||
| 1256 | void atom_destroy(struct atom_context *ctx) | 1292 | void atom_destroy(struct atom_context *ctx) |
| @@ -1260,12 +1296,16 @@ void atom_destroy(struct atom_context *ctx) | |||
| 1260 | kfree(ctx); | 1296 | kfree(ctx); |
| 1261 | } | 1297 | } |
| 1262 | 1298 | ||
| 1263 | void atom_parse_data_header(struct atom_context *ctx, int index, | 1299 | bool atom_parse_data_header(struct atom_context *ctx, int index, |
| 1264 | uint16_t * size, uint8_t * frev, uint8_t * crev, | 1300 | uint16_t * size, uint8_t * frev, uint8_t * crev, |
| 1265 | uint16_t * data_start) | 1301 | uint16_t * data_start) |
| 1266 | { | 1302 | { |
| 1267 | int offset = index * 2 + 4; | 1303 | int offset = index * 2 + 4; |
| 1268 | int idx = CU16(ctx->data_table + offset); | 1304 | int idx = CU16(ctx->data_table + offset); |
| 1305 | u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4); | ||
| 1306 | |||
| 1307 | if (!mdt[index]) | ||
| 1308 | return false; | ||
| 1269 | 1309 | ||
| 1270 | if (size) | 1310 | if (size) |
| 1271 | *size = CU16(idx); | 1311 | *size = CU16(idx); |
| @@ -1274,38 +1314,42 @@ void atom_parse_data_header(struct atom_context *ctx, int index, | |||
| 1274 | if (crev) | 1314 | if (crev) |
| 1275 | *crev = CU8(idx + 3); | 1315 | *crev = CU8(idx + 3); |
| 1276 | *data_start = idx; | 1316 | *data_start = idx; |
| 1277 | return; | 1317 | return true; |
| 1278 | } | 1318 | } |
| 1279 | 1319 | ||
| 1280 | void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev, | 1320 | bool atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev, |
| 1281 | uint8_t * crev) | 1321 | uint8_t * crev) |
| 1282 | { | 1322 | { |
| 1283 | int offset = index * 2 + 4; | 1323 | int offset = index * 2 + 4; |
| 1284 | int idx = CU16(ctx->cmd_table + offset); | 1324 | int idx = CU16(ctx->cmd_table + offset); |
| 1325 | u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4); | ||
| 1326 | |||
| 1327 | if (!mct[index]) | ||
| 1328 | return false; | ||
| 1285 | 1329 | ||
| 1286 | if (frev) | 1330 | if (frev) |
| 1287 | *frev = CU8(idx + 2); | 1331 | *frev = CU8(idx + 2); |
| 1288 | if (crev) | 1332 | if (crev) |
| 1289 | *crev = CU8(idx + 3); | 1333 | *crev = CU8(idx + 3); |
| 1290 | return; | 1334 | return true; |
| 1291 | } | 1335 | } |
| 1292 | 1336 | ||
| 1293 | int atom_allocate_fb_scratch(struct atom_context *ctx) | 1337 | int atom_allocate_fb_scratch(struct atom_context *ctx) |
| 1294 | { | 1338 | { |
| 1295 | int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware); | 1339 | int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware); |
| 1296 | uint16_t data_offset; | 1340 | uint16_t data_offset; |
| 1297 | int usage_bytes; | 1341 | int usage_bytes = 0; |
| 1298 | struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage; | 1342 | struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage; |
| 1299 | 1343 | ||
| 1300 | atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset); | 1344 | if (atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) { |
| 1345 | firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset); | ||
| 1301 | 1346 | ||
| 1302 | firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset); | 1347 | DRM_DEBUG("atom firmware requested %08x %dkb\n", |
| 1348 | firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware, | ||
| 1349 | firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb); | ||
| 1303 | 1350 | ||
| 1304 | DRM_DEBUG("atom firmware requested %08x %dkb\n", | 1351 | usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024; |
| 1305 | firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware, | 1352 | } |
| 1306 | firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb); | ||
| 1307 | |||
| 1308 | usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024; | ||
| 1309 | if (usage_bytes == 0) | 1353 | if (usage_bytes == 0) |
| 1310 | usage_bytes = 20 * 1024; | 1354 | usage_bytes = 20 * 1024; |
| 1311 | /* allocate some scratch memory */ | 1355 | /* allocate some scratch memory */ |
