aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/gvt/cmd_parser.c
diff options
context:
space:
mode:
authorTakashi Iwai <tiwai@suse.de>2017-05-02 02:25:25 -0400
committerTakashi Iwai <tiwai@suse.de>2017-05-02 02:25:25 -0400
commita5c3b32a1146e44f6b38fdfdfffc27842953420c (patch)
treeeca93f51c8deabe77ed079a3e9190717b6380009 /drivers/gpu/drm/i915/gvt/cmd_parser.c
parentd7dc450d5a7162de96edbed6b1792240c2f3a55f (diff)
parent20d5c84bef067b7e804a163e2abca16c47125bad (diff)
Merge tag 'asoc-v4.12' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound into for-linus
ASoC: Updates for v4.12 A quiet release for the core, but lots of new drivers this time around: - A new, generalized, API for hooking up jacks which makes it easier to write generic machine drivers for simple cases. - Continuing fixes for issues with the x86 CPU drivers. - New drivers for Cirrus CS35L35, DIO DIO2125, Everest ES7132, HiSilicon hi6210, Maxim MAX98927, MT2701 systems with WM8960, Nuvoton NAU8824, Odroid systems, ST STM32 SAI controllers and x86 systems with DA7213
Diffstat (limited to 'drivers/gpu/drm/i915/gvt/cmd_parser.c')
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c109
1 files changed, 72 insertions, 37 deletions
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 7ae6e2b241c8..2b92cc8a7d1a 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -817,6 +817,25 @@ static bool is_shadowed_mmio(unsigned int offset)
817 return ret; 817 return ret;
818} 818}
819 819
820static inline bool is_force_nonpriv_mmio(unsigned int offset)
821{
822 return (offset >= 0x24d0 && offset < 0x2500);
823}
824
825static int force_nonpriv_reg_handler(struct parser_exec_state *s,
826 unsigned int offset, unsigned int index)
827{
828 struct intel_gvt *gvt = s->vgpu->gvt;
829 unsigned int data = cmd_val(s, index + 1);
830
831 if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data)) {
832 gvt_err("Unexpected forcenonpriv 0x%x LRI write, value=0x%x\n",
833 offset, data);
834 return -EINVAL;
835 }
836 return 0;
837}
838
820static int cmd_reg_handler(struct parser_exec_state *s, 839static int cmd_reg_handler(struct parser_exec_state *s,
821 unsigned int offset, unsigned int index, char *cmd) 840 unsigned int offset, unsigned int index, char *cmd)
822{ 841{
@@ -824,23 +843,26 @@ static int cmd_reg_handler(struct parser_exec_state *s,
824 struct intel_gvt *gvt = vgpu->gvt; 843 struct intel_gvt *gvt = vgpu->gvt;
825 844
826 if (offset + 4 > gvt->device_info.mmio_size) { 845 if (offset + 4 > gvt->device_info.mmio_size) {
827 gvt_err("%s access to (%x) outside of MMIO range\n", 846 gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
828 cmd, offset); 847 cmd, offset);
829 return -EINVAL; 848 return -EINVAL;
830 } 849 }
831 850
832 if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) { 851 if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
833 gvt_err("vgpu%d: %s access to non-render register (%x)\n", 852 gvt_vgpu_err("%s access to non-render register (%x)\n",
834 s->vgpu->id, cmd, offset); 853 cmd, offset);
835 return 0; 854 return 0;
836 } 855 }
837 856
838 if (is_shadowed_mmio(offset)) { 857 if (is_shadowed_mmio(offset)) {
839 gvt_err("vgpu%d: found access of shadowed MMIO %x\n", 858 gvt_vgpu_err("found access of shadowed MMIO %x\n", offset);
840 s->vgpu->id, offset);
841 return 0; 859 return 0;
842 } 860 }
843 861
862 if (is_force_nonpriv_mmio(offset) &&
863 force_nonpriv_reg_handler(s, offset, index))
864 return -EINVAL;
865
844 if (offset == i915_mmio_reg_offset(DERRMR) || 866 if (offset == i915_mmio_reg_offset(DERRMR) ||
845 offset == i915_mmio_reg_offset(FORCEWAKE_MT)) { 867 offset == i915_mmio_reg_offset(FORCEWAKE_MT)) {
846 /* Writing to HW VGT_PVINFO_PAGE offset will be discarded */ 868 /* Writing to HW VGT_PVINFO_PAGE offset will be discarded */
@@ -1008,7 +1030,7 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s)
1008 ret = cmd_reg_handler(s, 0x2358, 1, "pipe_ctrl"); 1030 ret = cmd_reg_handler(s, 0x2358, 1, "pipe_ctrl");
1009 else if (post_sync == 1) { 1031 else if (post_sync == 1) {
1010 /* check ggtt*/ 1032 /* check ggtt*/
1011 if ((cmd_val(s, 2) & (1 << 2))) { 1033 if ((cmd_val(s, 1) & PIPE_CONTROL_GLOBAL_GTT_IVB)) {
1012 gma = cmd_val(s, 2) & GENMASK(31, 3); 1034 gma = cmd_val(s, 2) & GENMASK(31, 3);
1013 if (gmadr_bytes == 8) 1035 if (gmadr_bytes == 8)
1014 gma |= (cmd_gma_hi(s, 3)) << 32; 1036 gma |= (cmd_gma_hi(s, 3)) << 32;
@@ -1129,6 +1151,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
1129 struct mi_display_flip_command_info *info) 1151 struct mi_display_flip_command_info *info)
1130{ 1152{
1131 struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv; 1153 struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1154 struct intel_vgpu *vgpu = s->vgpu;
1132 u32 dword0 = cmd_val(s, 0); 1155 u32 dword0 = cmd_val(s, 0);
1133 u32 dword1 = cmd_val(s, 1); 1156 u32 dword1 = cmd_val(s, 1);
1134 u32 dword2 = cmd_val(s, 2); 1157 u32 dword2 = cmd_val(s, 2);
@@ -1167,7 +1190,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
1167 break; 1190 break;
1168 1191
1169 default: 1192 default:
1170 gvt_err("unknown plane code %d\n", plane); 1193 gvt_vgpu_err("unknown plane code %d\n", plane);
1171 return -EINVAL; 1194 return -EINVAL;
1172 } 1195 }
1173 1196
@@ -1274,25 +1297,26 @@ static int update_plane_mmio_from_mi_display_flip(
1274static int cmd_handler_mi_display_flip(struct parser_exec_state *s) 1297static int cmd_handler_mi_display_flip(struct parser_exec_state *s)
1275{ 1298{
1276 struct mi_display_flip_command_info info; 1299 struct mi_display_flip_command_info info;
1300 struct intel_vgpu *vgpu = s->vgpu;
1277 int ret; 1301 int ret;
1278 int i; 1302 int i;
1279 int len = cmd_length(s); 1303 int len = cmd_length(s);
1280 1304
1281 ret = decode_mi_display_flip(s, &info); 1305 ret = decode_mi_display_flip(s, &info);
1282 if (ret) { 1306 if (ret) {
1283 gvt_err("fail to decode MI display flip command\n"); 1307 gvt_vgpu_err("fail to decode MI display flip command\n");
1284 return ret; 1308 return ret;
1285 } 1309 }
1286 1310
1287 ret = check_mi_display_flip(s, &info); 1311 ret = check_mi_display_flip(s, &info);
1288 if (ret) { 1312 if (ret) {
1289 gvt_err("invalid MI display flip command\n"); 1313 gvt_vgpu_err("invalid MI display flip command\n");
1290 return ret; 1314 return ret;
1291 } 1315 }
1292 1316
1293 ret = update_plane_mmio_from_mi_display_flip(s, &info); 1317 ret = update_plane_mmio_from_mi_display_flip(s, &info);
1294 if (ret) { 1318 if (ret) {
1295 gvt_err("fail to update plane mmio\n"); 1319 gvt_vgpu_err("fail to update plane mmio\n");
1296 return ret; 1320 return ret;
1297 } 1321 }
1298 1322
@@ -1350,7 +1374,8 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
1350 int ret; 1374 int ret;
1351 1375
1352 if (op_size > max_surface_size) { 1376 if (op_size > max_surface_size) {
1353 gvt_err("command address audit fail name %s\n", s->info->name); 1377 gvt_vgpu_err("command address audit fail name %s\n",
1378 s->info->name);
1354 return -EINVAL; 1379 return -EINVAL;
1355 } 1380 }
1356 1381
@@ -1367,7 +1392,7 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
1367 } 1392 }
1368 return 0; 1393 return 0;
1369err: 1394err:
1370 gvt_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n", 1395 gvt_vgpu_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
1371 s->info->name, guest_gma, op_size); 1396 s->info->name, guest_gma, op_size);
1372 1397
1373 pr_err("cmd dump: "); 1398 pr_err("cmd dump: ");
@@ -1412,8 +1437,10 @@ static int cmd_handler_mi_store_data_imm(struct parser_exec_state *s)
1412 1437
1413static inline int unexpected_cmd(struct parser_exec_state *s) 1438static inline int unexpected_cmd(struct parser_exec_state *s)
1414{ 1439{
1415 gvt_err("vgpu%d: Unexpected %s in command buffer!\n", 1440 struct intel_vgpu *vgpu = s->vgpu;
1416 s->vgpu->id, s->info->name); 1441
1442 gvt_vgpu_err("Unexpected %s in command buffer!\n", s->info->name);
1443
1417 return -EINVAL; 1444 return -EINVAL;
1418} 1445}
1419 1446
@@ -1516,7 +1543,7 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
1516 while (gma != end_gma) { 1543 while (gma != end_gma) {
1517 gpa = intel_vgpu_gma_to_gpa(mm, gma); 1544 gpa = intel_vgpu_gma_to_gpa(mm, gma);
1518 if (gpa == INTEL_GVT_INVALID_ADDR) { 1545 if (gpa == INTEL_GVT_INVALID_ADDR) {
1519 gvt_err("invalid gma address: %lx\n", gma); 1546 gvt_vgpu_err("invalid gma address: %lx\n", gma);
1520 return -EFAULT; 1547 return -EFAULT;
1521 } 1548 }
1522 1549
@@ -1557,6 +1584,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
1557 uint32_t bb_size = 0; 1584 uint32_t bb_size = 0;
1558 uint32_t cmd_len = 0; 1585 uint32_t cmd_len = 0;
1559 bool met_bb_end = false; 1586 bool met_bb_end = false;
1587 struct intel_vgpu *vgpu = s->vgpu;
1560 u32 cmd; 1588 u32 cmd;
1561 1589
1562 /* get the start gm address of the batch buffer */ 1590 /* get the start gm address of the batch buffer */
@@ -1565,7 +1593,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
1565 1593
1566 info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); 1594 info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
1567 if (info == NULL) { 1595 if (info == NULL) {
1568 gvt_err("unknown cmd 0x%x, opcode=0x%x\n", 1596 gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
1569 cmd, get_opcode(cmd, s->ring_id)); 1597 cmd, get_opcode(cmd, s->ring_id));
1570 return -EINVAL; 1598 return -EINVAL;
1571 } 1599 }
@@ -1574,7 +1602,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
1574 gma, gma + 4, &cmd); 1602 gma, gma + 4, &cmd);
1575 info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); 1603 info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
1576 if (info == NULL) { 1604 if (info == NULL) {
1577 gvt_err("unknown cmd 0x%x, opcode=0x%x\n", 1605 gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
1578 cmd, get_opcode(cmd, s->ring_id)); 1606 cmd, get_opcode(cmd, s->ring_id));
1579 return -EINVAL; 1607 return -EINVAL;
1580 } 1608 }
@@ -1599,6 +1627,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
1599static int perform_bb_shadow(struct parser_exec_state *s) 1627static int perform_bb_shadow(struct parser_exec_state *s)
1600{ 1628{
1601 struct intel_shadow_bb_entry *entry_obj; 1629 struct intel_shadow_bb_entry *entry_obj;
1630 struct intel_vgpu *vgpu = s->vgpu;
1602 unsigned long gma = 0; 1631 unsigned long gma = 0;
1603 uint32_t bb_size; 1632 uint32_t bb_size;
1604 void *dst = NULL; 1633 void *dst = NULL;
@@ -1633,7 +1662,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
1633 1662
1634 ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false); 1663 ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false);
1635 if (ret) { 1664 if (ret) {
1636 gvt_err("failed to set shadow batch to CPU\n"); 1665 gvt_vgpu_err("failed to set shadow batch to CPU\n");
1637 goto unmap_src; 1666 goto unmap_src;
1638 } 1667 }
1639 1668
@@ -1645,7 +1674,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
1645 gma, gma + bb_size, 1674 gma, gma + bb_size,
1646 dst); 1675 dst);
1647 if (ret) { 1676 if (ret) {
1648 gvt_err("fail to copy guest ring buffer\n"); 1677 gvt_vgpu_err("fail to copy guest ring buffer\n");
1649 goto unmap_src; 1678 goto unmap_src;
1650 } 1679 }
1651 1680
@@ -1676,15 +1705,16 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
1676{ 1705{
1677 bool second_level; 1706 bool second_level;
1678 int ret = 0; 1707 int ret = 0;
1708 struct intel_vgpu *vgpu = s->vgpu;
1679 1709
1680 if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) { 1710 if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
1681 gvt_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n"); 1711 gvt_vgpu_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
1682 return -EINVAL; 1712 return -EINVAL;
1683 } 1713 }
1684 1714
1685 second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1; 1715 second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1;
1686 if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) { 1716 if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) {
1687 gvt_err("Jumping to 2nd level BB from RB is not allowed\n"); 1717 gvt_vgpu_err("Jumping to 2nd level BB from RB is not allowed\n");
1688 return -EINVAL; 1718 return -EINVAL;
1689 } 1719 }
1690 1720
@@ -1702,7 +1732,7 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
1702 if (batch_buffer_needs_scan(s)) { 1732 if (batch_buffer_needs_scan(s)) {
1703 ret = perform_bb_shadow(s); 1733 ret = perform_bb_shadow(s);
1704 if (ret < 0) 1734 if (ret < 0)
1705 gvt_err("invalid shadow batch buffer\n"); 1735 gvt_vgpu_err("invalid shadow batch buffer\n");
1706 } else { 1736 } else {
1707 /* emulate a batch buffer end to do return right */ 1737 /* emulate a batch buffer end to do return right */
1708 ret = cmd_handler_mi_batch_buffer_end(s); 1738 ret = cmd_handler_mi_batch_buffer_end(s);
@@ -2429,6 +2459,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
2429 int ret = 0; 2459 int ret = 0;
2430 cycles_t t0, t1, t2; 2460 cycles_t t0, t1, t2;
2431 struct parser_exec_state s_before_advance_custom; 2461 struct parser_exec_state s_before_advance_custom;
2462 struct intel_vgpu *vgpu = s->vgpu;
2432 2463
2433 t0 = get_cycles(); 2464 t0 = get_cycles();
2434 2465
@@ -2436,7 +2467,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
2436 2467
2437 info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); 2468 info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
2438 if (info == NULL) { 2469 if (info == NULL) {
2439 gvt_err("unknown cmd 0x%x, opcode=0x%x\n", 2470 gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
2440 cmd, get_opcode(cmd, s->ring_id)); 2471 cmd, get_opcode(cmd, s->ring_id));
2441 return -EINVAL; 2472 return -EINVAL;
2442 } 2473 }
@@ -2452,7 +2483,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
2452 if (info->handler) { 2483 if (info->handler) {
2453 ret = info->handler(s); 2484 ret = info->handler(s);
2454 if (ret < 0) { 2485 if (ret < 0) {
2455 gvt_err("%s handler error\n", info->name); 2486 gvt_vgpu_err("%s handler error\n", info->name);
2456 return ret; 2487 return ret;
2457 } 2488 }
2458 } 2489 }
@@ -2463,7 +2494,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
2463 if (!(info->flag & F_IP_ADVANCE_CUSTOM)) { 2494 if (!(info->flag & F_IP_ADVANCE_CUSTOM)) {
2464 ret = cmd_advance_default(s); 2495 ret = cmd_advance_default(s);
2465 if (ret) { 2496 if (ret) {
2466 gvt_err("%s IP advance error\n", info->name); 2497 gvt_vgpu_err("%s IP advance error\n", info->name);
2467 return ret; 2498 return ret;
2468 } 2499 }
2469 } 2500 }
@@ -2486,6 +2517,7 @@ static int command_scan(struct parser_exec_state *s,
2486 2517
2487 unsigned long gma_head, gma_tail, gma_bottom; 2518 unsigned long gma_head, gma_tail, gma_bottom;
2488 int ret = 0; 2519 int ret = 0;
2520 struct intel_vgpu *vgpu = s->vgpu;
2489 2521
2490 gma_head = rb_start + rb_head; 2522 gma_head = rb_start + rb_head;
2491 gma_tail = rb_start + rb_tail; 2523 gma_tail = rb_start + rb_tail;
@@ -2497,7 +2529,7 @@ static int command_scan(struct parser_exec_state *s,
2497 if (s->buf_type == RING_BUFFER_INSTRUCTION) { 2529 if (s->buf_type == RING_BUFFER_INSTRUCTION) {
2498 if (!(s->ip_gma >= rb_start) || 2530 if (!(s->ip_gma >= rb_start) ||
2499 !(s->ip_gma < gma_bottom)) { 2531 !(s->ip_gma < gma_bottom)) {
2500 gvt_err("ip_gma %lx out of ring scope." 2532 gvt_vgpu_err("ip_gma %lx out of ring scope."
2501 "(base:0x%lx, bottom: 0x%lx)\n", 2533 "(base:0x%lx, bottom: 0x%lx)\n",
2502 s->ip_gma, rb_start, 2534 s->ip_gma, rb_start,
2503 gma_bottom); 2535 gma_bottom);
@@ -2505,7 +2537,7 @@ static int command_scan(struct parser_exec_state *s,
2505 return -EINVAL; 2537 return -EINVAL;
2506 } 2538 }
2507 if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) { 2539 if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) {
2508 gvt_err("ip_gma %lx out of range." 2540 gvt_vgpu_err("ip_gma %lx out of range."
2509 "base 0x%lx head 0x%lx tail 0x%lx\n", 2541 "base 0x%lx head 0x%lx tail 0x%lx\n",
2510 s->ip_gma, rb_start, 2542 s->ip_gma, rb_start,
2511 rb_head, rb_tail); 2543 rb_head, rb_tail);
@@ -2515,7 +2547,7 @@ static int command_scan(struct parser_exec_state *s,
2515 } 2547 }
2516 ret = cmd_parser_exec(s); 2548 ret = cmd_parser_exec(s);
2517 if (ret) { 2549 if (ret) {
2518 gvt_err("cmd parser error\n"); 2550 gvt_vgpu_err("cmd parser error\n");
2519 parser_exec_state_dump(s); 2551 parser_exec_state_dump(s);
2520 break; 2552 break;
2521 } 2553 }
@@ -2639,7 +2671,7 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
2639 gma_head, gma_top, 2671 gma_head, gma_top,
2640 workload->shadow_ring_buffer_va); 2672 workload->shadow_ring_buffer_va);
2641 if (ret) { 2673 if (ret) {
2642 gvt_err("fail to copy guest ring buffer\n"); 2674 gvt_vgpu_err("fail to copy guest ring buffer\n");
2643 return ret; 2675 return ret;
2644 } 2676 }
2645 copy_len = gma_top - gma_head; 2677 copy_len = gma_top - gma_head;
@@ -2651,7 +2683,7 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
2651 gma_head, gma_tail, 2683 gma_head, gma_tail,
2652 workload->shadow_ring_buffer_va + copy_len); 2684 workload->shadow_ring_buffer_va + copy_len);
2653 if (ret) { 2685 if (ret) {
2654 gvt_err("fail to copy guest ring buffer\n"); 2686 gvt_vgpu_err("fail to copy guest ring buffer\n");
2655 return ret; 2687 return ret;
2656 } 2688 }
2657 ring->tail += workload->rb_len; 2689 ring->tail += workload->rb_len;
@@ -2662,16 +2694,17 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
2662int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) 2694int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
2663{ 2695{
2664 int ret; 2696 int ret;
2697 struct intel_vgpu *vgpu = workload->vgpu;
2665 2698
2666 ret = shadow_workload_ring_buffer(workload); 2699 ret = shadow_workload_ring_buffer(workload);
2667 if (ret) { 2700 if (ret) {
2668 gvt_err("fail to shadow workload ring_buffer\n"); 2701 gvt_vgpu_err("fail to shadow workload ring_buffer\n");
2669 return ret; 2702 return ret;
2670 } 2703 }
2671 2704
2672 ret = scan_workload(workload); 2705 ret = scan_workload(workload);
2673 if (ret) { 2706 if (ret) {
2674 gvt_err("scan workload error\n"); 2707 gvt_vgpu_err("scan workload error\n");
2675 return ret; 2708 return ret;
2676 } 2709 }
2677 return 0; 2710 return 0;
@@ -2681,6 +2714,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2681{ 2714{
2682 int ctx_size = wa_ctx->indirect_ctx.size; 2715 int ctx_size = wa_ctx->indirect_ctx.size;
2683 unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma; 2716 unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma;
2717 struct intel_vgpu *vgpu = wa_ctx->workload->vgpu;
2684 struct drm_i915_gem_object *obj; 2718 struct drm_i915_gem_object *obj;
2685 int ret = 0; 2719 int ret = 0;
2686 void *map; 2720 void *map;
@@ -2694,14 +2728,14 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2694 /* get the va of the shadow batch buffer */ 2728 /* get the va of the shadow batch buffer */
2695 map = i915_gem_object_pin_map(obj, I915_MAP_WB); 2729 map = i915_gem_object_pin_map(obj, I915_MAP_WB);
2696 if (IS_ERR(map)) { 2730 if (IS_ERR(map)) {
2697 gvt_err("failed to vmap shadow indirect ctx\n"); 2731 gvt_vgpu_err("failed to vmap shadow indirect ctx\n");
2698 ret = PTR_ERR(map); 2732 ret = PTR_ERR(map);
2699 goto put_obj; 2733 goto put_obj;
2700 } 2734 }
2701 2735
2702 ret = i915_gem_object_set_to_cpu_domain(obj, false); 2736 ret = i915_gem_object_set_to_cpu_domain(obj, false);
2703 if (ret) { 2737 if (ret) {
2704 gvt_err("failed to set shadow indirect ctx to CPU\n"); 2738 gvt_vgpu_err("failed to set shadow indirect ctx to CPU\n");
2705 goto unmap_src; 2739 goto unmap_src;
2706 } 2740 }
2707 2741
@@ -2710,7 +2744,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2710 guest_gma, guest_gma + ctx_size, 2744 guest_gma, guest_gma + ctx_size,
2711 map); 2745 map);
2712 if (ret) { 2746 if (ret) {
2713 gvt_err("fail to copy guest indirect ctx\n"); 2747 gvt_vgpu_err("fail to copy guest indirect ctx\n");
2714 goto unmap_src; 2748 goto unmap_src;
2715 } 2749 }
2716 2750
@@ -2744,13 +2778,14 @@ static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2744int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) 2778int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2745{ 2779{
2746 int ret; 2780 int ret;
2781 struct intel_vgpu *vgpu = wa_ctx->workload->vgpu;
2747 2782
2748 if (wa_ctx->indirect_ctx.size == 0) 2783 if (wa_ctx->indirect_ctx.size == 0)
2749 return 0; 2784 return 0;
2750 2785
2751 ret = shadow_indirect_ctx(wa_ctx); 2786 ret = shadow_indirect_ctx(wa_ctx);
2752 if (ret) { 2787 if (ret) {
2753 gvt_err("fail to shadow indirect ctx\n"); 2788 gvt_vgpu_err("fail to shadow indirect ctx\n");
2754 return ret; 2789 return ret;
2755 } 2790 }
2756 2791
@@ -2758,7 +2793,7 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2758 2793
2759 ret = scan_wa_ctx(wa_ctx); 2794 ret = scan_wa_ctx(wa_ctx);
2760 if (ret) { 2795 if (ret) {
2761 gvt_err("scan wa ctx error\n"); 2796 gvt_vgpu_err("scan wa ctx error\n");
2762 return ret; 2797 return ret;
2763 } 2798 }
2764 2799