aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/gvt/cmd_parser.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/gvt/cmd_parser.c')
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c225
1 files changed, 124 insertions, 101 deletions
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 85d4c57870fb..18c45734c7a2 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -709,18 +709,13 @@ static void parser_exec_state_dump(struct parser_exec_state *s)
709 709
710 print_opcode(cmd_val(s, 0), s->ring_id); 710 print_opcode(cmd_val(s, 0), s->ring_id);
711 711
712 /* print the whole page to trace */
713 pr_err(" ip_va=%p: %08x %08x %08x %08x\n",
714 s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
715 cmd_val(s, 2), cmd_val(s, 3));
716
717 s->ip_va = (u32 *)((((u64)s->ip_va) >> 12) << 12); 712 s->ip_va = (u32 *)((((u64)s->ip_va) >> 12) << 12);
718 713
719 while (cnt < 1024) { 714 while (cnt < 1024) {
720 pr_err("ip_va=%p: ", s->ip_va); 715 gvt_dbg_cmd("ip_va=%p: ", s->ip_va);
721 for (i = 0; i < 8; i++) 716 for (i = 0; i < 8; i++)
722 pr_err("%08x ", cmd_val(s, i)); 717 gvt_dbg_cmd("%08x ", cmd_val(s, i));
723 pr_err("\n"); 718 gvt_dbg_cmd("\n");
724 719
725 s->ip_va += 8 * sizeof(u32); 720 s->ip_va += 8 * sizeof(u32);
726 cnt += 8; 721 cnt += 8;
@@ -825,7 +820,7 @@ static int force_nonpriv_reg_handler(struct parser_exec_state *s,
825 if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data)) { 820 if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data)) {
826 gvt_err("Unexpected forcenonpriv 0x%x LRI write, value=0x%x\n", 821 gvt_err("Unexpected forcenonpriv 0x%x LRI write, value=0x%x\n",
827 offset, data); 822 offset, data);
828 return -EINVAL; 823 return -EPERM;
829 } 824 }
830 return 0; 825 return 0;
831} 826}
@@ -839,7 +834,7 @@ static int cmd_reg_handler(struct parser_exec_state *s,
839 if (offset + 4 > gvt->device_info.mmio_size) { 834 if (offset + 4 > gvt->device_info.mmio_size) {
840 gvt_vgpu_err("%s access to (%x) outside of MMIO range\n", 835 gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
841 cmd, offset); 836 cmd, offset);
842 return -EINVAL; 837 return -EFAULT;
843 } 838 }
844 839
845 if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) { 840 if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
@@ -854,8 +849,8 @@ static int cmd_reg_handler(struct parser_exec_state *s,
854 } 849 }
855 850
856 if (is_force_nonpriv_mmio(offset) && 851 if (is_force_nonpriv_mmio(offset) &&
857 force_nonpriv_reg_handler(s, offset, index)) 852 force_nonpriv_reg_handler(s, offset, index))
858 return -EINVAL; 853 return -EPERM;
859 854
860 if (offset == i915_mmio_reg_offset(DERRMR) || 855 if (offset == i915_mmio_reg_offset(DERRMR) ||
861 offset == i915_mmio_reg_offset(FORCEWAKE_MT)) { 856 offset == i915_mmio_reg_offset(FORCEWAKE_MT)) {
@@ -894,11 +889,14 @@ static int cmd_handler_lri(struct parser_exec_state *s)
894 i915_mmio_reg_offset(DERRMR)) 889 i915_mmio_reg_offset(DERRMR))
895 ret |= 0; 890 ret |= 0;
896 else 891 else
897 ret |= (cmd_reg_inhibit(s, i)) ? -EINVAL : 0; 892 ret |= (cmd_reg_inhibit(s, i)) ?
893 -EBADRQC : 0;
898 } 894 }
899 if (ret) 895 if (ret)
900 break; 896 break;
901 ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lri"); 897 ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lri");
898 if (ret)
899 break;
902 } 900 }
903 return ret; 901 return ret;
904} 902}
@@ -912,11 +910,15 @@ static int cmd_handler_lrr(struct parser_exec_state *s)
912 if (IS_BROADWELL(s->vgpu->gvt->dev_priv)) 910 if (IS_BROADWELL(s->vgpu->gvt->dev_priv))
913 ret |= ((cmd_reg_inhibit(s, i) || 911 ret |= ((cmd_reg_inhibit(s, i) ||
914 (cmd_reg_inhibit(s, i + 1)))) ? 912 (cmd_reg_inhibit(s, i + 1)))) ?
915 -EINVAL : 0; 913 -EBADRQC : 0;
916 if (ret) 914 if (ret)
917 break; 915 break;
918 ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrr-src"); 916 ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrr-src");
917 if (ret)
918 break;
919 ret |= cmd_reg_handler(s, cmd_reg(s, i + 1), i, "lrr-dst"); 919 ret |= cmd_reg_handler(s, cmd_reg(s, i + 1), i, "lrr-dst");
920 if (ret)
921 break;
920 } 922 }
921 return ret; 923 return ret;
922} 924}
@@ -934,15 +936,19 @@ static int cmd_handler_lrm(struct parser_exec_state *s)
934 936
935 for (i = 1; i < cmd_len;) { 937 for (i = 1; i < cmd_len;) {
936 if (IS_BROADWELL(gvt->dev_priv)) 938 if (IS_BROADWELL(gvt->dev_priv))
937 ret |= (cmd_reg_inhibit(s, i)) ? -EINVAL : 0; 939 ret |= (cmd_reg_inhibit(s, i)) ? -EBADRQC : 0;
938 if (ret) 940 if (ret)
939 break; 941 break;
940 ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrm"); 942 ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrm");
943 if (ret)
944 break;
941 if (cmd_val(s, 0) & (1 << 22)) { 945 if (cmd_val(s, 0) & (1 << 22)) {
942 gma = cmd_gma(s, i + 1); 946 gma = cmd_gma(s, i + 1);
943 if (gmadr_bytes == 8) 947 if (gmadr_bytes == 8)
944 gma |= (cmd_gma_hi(s, i + 2)) << 32; 948 gma |= (cmd_gma_hi(s, i + 2)) << 32;
945 ret |= cmd_address_audit(s, gma, sizeof(u32), false); 949 ret |= cmd_address_audit(s, gma, sizeof(u32), false);
950 if (ret)
951 break;
946 } 952 }
947 i += gmadr_dw_number(s) + 1; 953 i += gmadr_dw_number(s) + 1;
948 } 954 }
@@ -958,11 +964,15 @@ static int cmd_handler_srm(struct parser_exec_state *s)
958 964
959 for (i = 1; i < cmd_len;) { 965 for (i = 1; i < cmd_len;) {
960 ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "srm"); 966 ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "srm");
967 if (ret)
968 break;
961 if (cmd_val(s, 0) & (1 << 22)) { 969 if (cmd_val(s, 0) & (1 << 22)) {
962 gma = cmd_gma(s, i + 1); 970 gma = cmd_gma(s, i + 1);
963 if (gmadr_bytes == 8) 971 if (gmadr_bytes == 8)
964 gma |= (cmd_gma_hi(s, i + 2)) << 32; 972 gma |= (cmd_gma_hi(s, i + 2)) << 32;
965 ret |= cmd_address_audit(s, gma, sizeof(u32), false); 973 ret |= cmd_address_audit(s, gma, sizeof(u32), false);
974 if (ret)
975 break;
966 } 976 }
967 i += gmadr_dw_number(s) + 1; 977 i += gmadr_dw_number(s) + 1;
968 } 978 }
@@ -1116,7 +1126,7 @@ static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
1116 1126
1117 v = (dword0 & GENMASK(21, 19)) >> 19; 1127 v = (dword0 & GENMASK(21, 19)) >> 19;
1118 if (WARN_ON(v >= ARRAY_SIZE(gen8_plane_code))) 1128 if (WARN_ON(v >= ARRAY_SIZE(gen8_plane_code)))
1119 return -EINVAL; 1129 return -EBADRQC;
1120 1130
1121 info->pipe = gen8_plane_code[v].pipe; 1131 info->pipe = gen8_plane_code[v].pipe;
1122 info->plane = gen8_plane_code[v].plane; 1132 info->plane = gen8_plane_code[v].plane;
@@ -1136,7 +1146,7 @@ static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
1136 info->surf_reg = SPRSURF(info->pipe); 1146 info->surf_reg = SPRSURF(info->pipe);
1137 } else { 1147 } else {
1138 WARN_ON(1); 1148 WARN_ON(1);
1139 return -EINVAL; 1149 return -EBADRQC;
1140 } 1150 }
1141 return 0; 1151 return 0;
1142} 1152}
@@ -1185,7 +1195,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
1185 1195
1186 default: 1196 default:
1187 gvt_vgpu_err("unknown plane code %d\n", plane); 1197 gvt_vgpu_err("unknown plane code %d\n", plane);
1188 return -EINVAL; 1198 return -EBADRQC;
1189 } 1199 }
1190 1200
1191 info->stride_val = (dword1 & GENMASK(15, 6)) >> 6; 1201 info->stride_val = (dword1 & GENMASK(15, 6)) >> 6;
@@ -1348,10 +1358,13 @@ static unsigned long get_gma_bb_from_cmd(struct parser_exec_state *s, int index)
1348{ 1358{
1349 unsigned long addr; 1359 unsigned long addr;
1350 unsigned long gma_high, gma_low; 1360 unsigned long gma_high, gma_low;
1351 int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd; 1361 struct intel_vgpu *vgpu = s->vgpu;
1362 int gmadr_bytes = vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1352 1363
1353 if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8)) 1364 if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8)) {
1365 gvt_vgpu_err("invalid gma bytes %d\n", gmadr_bytes);
1354 return INTEL_GVT_INVALID_ADDR; 1366 return INTEL_GVT_INVALID_ADDR;
1367 }
1355 1368
1356 gma_low = cmd_val(s, index) & BATCH_BUFFER_ADDR_MASK; 1369 gma_low = cmd_val(s, index) & BATCH_BUFFER_ADDR_MASK;
1357 if (gmadr_bytes == 4) { 1370 if (gmadr_bytes == 4) {
@@ -1374,16 +1387,16 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
1374 if (op_size > max_surface_size) { 1387 if (op_size > max_surface_size) {
1375 gvt_vgpu_err("command address audit fail name %s\n", 1388 gvt_vgpu_err("command address audit fail name %s\n",
1376 s->info->name); 1389 s->info->name);
1377 return -EINVAL; 1390 return -EFAULT;
1378 } 1391 }
1379 1392
1380 if (index_mode) { 1393 if (index_mode) {
1381 if (guest_gma >= GTT_PAGE_SIZE / sizeof(u64)) { 1394 if (guest_gma >= I915_GTT_PAGE_SIZE / sizeof(u64)) {
1382 ret = -EINVAL; 1395 ret = -EFAULT;
1383 goto err; 1396 goto err;
1384 } 1397 }
1385 } else if (!intel_gvt_ggtt_validate_range(vgpu, guest_gma, op_size)) { 1398 } else if (!intel_gvt_ggtt_validate_range(vgpu, guest_gma, op_size)) {
1386 ret = -EINVAL; 1399 ret = -EFAULT;
1387 goto err; 1400 goto err;
1388 } 1401 }
1389 1402
@@ -1439,7 +1452,7 @@ static inline int unexpected_cmd(struct parser_exec_state *s)
1439 1452
1440 gvt_vgpu_err("Unexpected %s in command buffer!\n", s->info->name); 1453 gvt_vgpu_err("Unexpected %s in command buffer!\n", s->info->name);
1441 1454
1442 return -EINVAL; 1455 return -EBADRQC;
1443} 1456}
1444 1457
1445static int cmd_handler_mi_semaphore_wait(struct parser_exec_state *s) 1458static int cmd_handler_mi_semaphore_wait(struct parser_exec_state *s)
@@ -1545,10 +1558,10 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
1545 return -EFAULT; 1558 return -EFAULT;
1546 } 1559 }
1547 1560
1548 offset = gma & (GTT_PAGE_SIZE - 1); 1561 offset = gma & (I915_GTT_PAGE_SIZE - 1);
1549 1562
1550 copy_len = (end_gma - gma) >= (GTT_PAGE_SIZE - offset) ? 1563 copy_len = (end_gma - gma) >= (I915_GTT_PAGE_SIZE - offset) ?
1551 GTT_PAGE_SIZE - offset : end_gma - gma; 1564 I915_GTT_PAGE_SIZE - offset : end_gma - gma;
1552 1565
1553 intel_gvt_hypervisor_read_gpa(vgpu, gpa, va + len, copy_len); 1566 intel_gvt_hypervisor_read_gpa(vgpu, gpa, va + len, copy_len);
1554 1567
@@ -1576,110 +1589,113 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s)
1576 return 1; 1589 return 1;
1577} 1590}
1578 1591
1579static int find_bb_size(struct parser_exec_state *s) 1592static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size)
1580{ 1593{
1581 unsigned long gma = 0; 1594 unsigned long gma = 0;
1582 struct cmd_info *info; 1595 struct cmd_info *info;
1583 int bb_size = 0;
1584 uint32_t cmd_len = 0; 1596 uint32_t cmd_len = 0;
1585 bool met_bb_end = false; 1597 bool bb_end = false;
1586 struct intel_vgpu *vgpu = s->vgpu; 1598 struct intel_vgpu *vgpu = s->vgpu;
1587 u32 cmd; 1599 u32 cmd;
1588 1600
1601 *bb_size = 0;
1602
1589 /* get the start gm address of the batch buffer */ 1603 /* get the start gm address of the batch buffer */
1590 gma = get_gma_bb_from_cmd(s, 1); 1604 gma = get_gma_bb_from_cmd(s, 1);
1591 cmd = cmd_val(s, 0); 1605 if (gma == INTEL_GVT_INVALID_ADDR)
1606 return -EFAULT;
1592 1607
1608 cmd = cmd_val(s, 0);
1593 info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); 1609 info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
1594 if (info == NULL) { 1610 if (info == NULL) {
1595 gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n", 1611 gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
1596 cmd, get_opcode(cmd, s->ring_id)); 1612 cmd, get_opcode(cmd, s->ring_id));
1597 return -EINVAL; 1613 return -EBADRQC;
1598 } 1614 }
1599 do { 1615 do {
1600 copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm, 1616 if (copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
1601 gma, gma + 4, &cmd); 1617 gma, gma + 4, &cmd) < 0)
1618 return -EFAULT;
1602 info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); 1619 info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
1603 if (info == NULL) { 1620 if (info == NULL) {
1604 gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n", 1621 gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
1605 cmd, get_opcode(cmd, s->ring_id)); 1622 cmd, get_opcode(cmd, s->ring_id));
1606 return -EINVAL; 1623 return -EBADRQC;
1607 } 1624 }
1608 1625
1609 if (info->opcode == OP_MI_BATCH_BUFFER_END) { 1626 if (info->opcode == OP_MI_BATCH_BUFFER_END) {
1610 met_bb_end = true; 1627 bb_end = true;
1611 } else if (info->opcode == OP_MI_BATCH_BUFFER_START) { 1628 } else if (info->opcode == OP_MI_BATCH_BUFFER_START) {
1612 if (BATCH_BUFFER_2ND_LEVEL_BIT(cmd) == 0) { 1629 if (BATCH_BUFFER_2ND_LEVEL_BIT(cmd) == 0)
1613 /* chained batch buffer */ 1630 /* chained batch buffer */
1614 met_bb_end = true; 1631 bb_end = true;
1615 }
1616 } 1632 }
1617 cmd_len = get_cmd_length(info, cmd) << 2; 1633 cmd_len = get_cmd_length(info, cmd) << 2;
1618 bb_size += cmd_len; 1634 *bb_size += cmd_len;
1619 gma += cmd_len; 1635 gma += cmd_len;
1636 } while (!bb_end);
1620 1637
1621 } while (!met_bb_end); 1638 return 0;
1622
1623 return bb_size;
1624} 1639}
1625 1640
1626static int perform_bb_shadow(struct parser_exec_state *s) 1641static int perform_bb_shadow(struct parser_exec_state *s)
1627{ 1642{
1628 struct intel_shadow_bb_entry *entry_obj;
1629 struct intel_vgpu *vgpu = s->vgpu; 1643 struct intel_vgpu *vgpu = s->vgpu;
1644 struct intel_vgpu_shadow_bb *bb;
1630 unsigned long gma = 0; 1645 unsigned long gma = 0;
1631 int bb_size; 1646 unsigned long bb_size;
1632 void *dst = NULL;
1633 int ret = 0; 1647 int ret = 0;
1634 1648
1635 /* get the start gm address of the batch buffer */ 1649 /* get the start gm address of the batch buffer */
1636 gma = get_gma_bb_from_cmd(s, 1); 1650 gma = get_gma_bb_from_cmd(s, 1);
1651 if (gma == INTEL_GVT_INVALID_ADDR)
1652 return -EFAULT;
1637 1653
1638 /* get the size of the batch buffer */ 1654 ret = find_bb_size(s, &bb_size);
1639 bb_size = find_bb_size(s); 1655 if (ret)
1640 if (bb_size < 0) 1656 return ret;
1641 return -EINVAL;
1642 1657
1643 /* allocate shadow batch buffer */ 1658 bb = kzalloc(sizeof(*bb), GFP_KERNEL);
1644 entry_obj = kmalloc(sizeof(*entry_obj), GFP_KERNEL); 1659 if (!bb)
1645 if (entry_obj == NULL)
1646 return -ENOMEM; 1660 return -ENOMEM;
1647 1661
1648 entry_obj->obj = 1662 bb->obj = i915_gem_object_create(s->vgpu->gvt->dev_priv,
1649 i915_gem_object_create(s->vgpu->gvt->dev_priv, 1663 roundup(bb_size, PAGE_SIZE));
1650 roundup(bb_size, PAGE_SIZE)); 1664 if (IS_ERR(bb->obj)) {
1651 if (IS_ERR(entry_obj->obj)) { 1665 ret = PTR_ERR(bb->obj);
1652 ret = PTR_ERR(entry_obj->obj); 1666 goto err_free_bb;
1653 goto free_entry;
1654 } 1667 }
1655 entry_obj->len = bb_size;
1656 INIT_LIST_HEAD(&entry_obj->list);
1657 1668
1658 dst = i915_gem_object_pin_map(entry_obj->obj, I915_MAP_WB); 1669 ret = i915_gem_obj_prepare_shmem_write(bb->obj, &bb->clflush);
1659 if (IS_ERR(dst)) { 1670 if (ret)
1660 ret = PTR_ERR(dst); 1671 goto err_free_obj;
1661 goto put_obj;
1662 }
1663 1672
1664 ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false); 1673 bb->va = i915_gem_object_pin_map(bb->obj, I915_MAP_WB);
1665 if (ret) { 1674 if (IS_ERR(bb->va)) {
1666 gvt_vgpu_err("failed to set shadow batch to CPU\n"); 1675 ret = PTR_ERR(bb->va);
1667 goto unmap_src; 1676 goto err_finish_shmem_access;
1668 } 1677 }
1669 1678
1670 entry_obj->va = dst; 1679 if (bb->clflush & CLFLUSH_BEFORE) {
1671 entry_obj->bb_start_cmd_va = s->ip_va; 1680 drm_clflush_virt_range(bb->va, bb->obj->base.size);
1681 bb->clflush &= ~CLFLUSH_BEFORE;
1682 }
1672 1683
1673 /* copy batch buffer to shadow batch buffer*/
1674 ret = copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm, 1684 ret = copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
1675 gma, gma + bb_size, 1685 gma, gma + bb_size,
1676 dst); 1686 bb->va);
1677 if (ret < 0) { 1687 if (ret < 0) {
1678 gvt_vgpu_err("fail to copy guest ring buffer\n"); 1688 gvt_vgpu_err("fail to copy guest ring buffer\n");
1679 goto unmap_src; 1689 ret = -EFAULT;
1690 goto err_unmap;
1680 } 1691 }
1681 1692
1682 list_add(&entry_obj->list, &s->workload->shadow_bb); 1693 INIT_LIST_HEAD(&bb->list);
1694 list_add(&bb->list, &s->workload->shadow_bb);
1695
1696 bb->accessing = true;
1697 bb->bb_start_cmd_va = s->ip_va;
1698
1683 /* 1699 /*
1684 * ip_va saves the virtual address of the shadow batch buffer, while 1700 * ip_va saves the virtual address of the shadow batch buffer, while
1685 * ip_gma saves the graphics address of the original batch buffer. 1701 * ip_gma saves the graphics address of the original batch buffer.
@@ -1688,17 +1704,17 @@ static int perform_bb_shadow(struct parser_exec_state *s)
1688 * buffer's gma in pair. After all, we don't want to pin the shadow 1704 * buffer's gma in pair. After all, we don't want to pin the shadow
1689 * buffer here (too early). 1705 * buffer here (too early).
1690 */ 1706 */
1691 s->ip_va = dst; 1707 s->ip_va = bb->va;
1692 s->ip_gma = gma; 1708 s->ip_gma = gma;
1693
1694 return 0; 1709 return 0;
1695 1710err_unmap:
1696unmap_src: 1711 i915_gem_object_unpin_map(bb->obj);
1697 i915_gem_object_unpin_map(entry_obj->obj); 1712err_finish_shmem_access:
1698put_obj: 1713 i915_gem_obj_finish_shmem_access(bb->obj);
1699 i915_gem_object_put(entry_obj->obj); 1714err_free_obj:
1700free_entry: 1715 i915_gem_object_put(bb->obj);
1701 kfree(entry_obj); 1716err_free_bb:
1717 kfree(bb);
1702 return ret; 1718 return ret;
1703} 1719}
1704 1720
@@ -1710,13 +1726,13 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
1710 1726
1711 if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) { 1727 if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
1712 gvt_vgpu_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n"); 1728 gvt_vgpu_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
1713 return -EINVAL; 1729 return -EFAULT;
1714 } 1730 }
1715 1731
1716 second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1; 1732 second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1;
1717 if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) { 1733 if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) {
1718 gvt_vgpu_err("Jumping to 2nd level BB from RB is not allowed\n"); 1734 gvt_vgpu_err("Jumping to 2nd level BB from RB is not allowed\n");
1719 return -EINVAL; 1735 return -EFAULT;
1720 } 1736 }
1721 1737
1722 s->saved_buf_addr_type = s->buf_addr_type; 1738 s->saved_buf_addr_type = s->buf_addr_type;
@@ -1740,7 +1756,6 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
1740 if (ret < 0) 1756 if (ret < 0)
1741 return ret; 1757 return ret;
1742 } 1758 }
1743
1744 return ret; 1759 return ret;
1745} 1760}
1746 1761
@@ -2430,7 +2445,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
2430 if (info == NULL) { 2445 if (info == NULL) {
2431 gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n", 2446 gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
2432 cmd, get_opcode(cmd, s->ring_id)); 2447 cmd, get_opcode(cmd, s->ring_id));
2433 return -EINVAL; 2448 return -EBADRQC;
2434 } 2449 }
2435 2450
2436 s->info = info; 2451 s->info = info;
@@ -2465,6 +2480,10 @@ static inline bool gma_out_of_range(unsigned long gma,
2465 return (gma > gma_tail) && (gma < gma_head); 2480 return (gma > gma_tail) && (gma < gma_head);
2466} 2481}
2467 2482
2483/* Keep the consistent return type, e.g EBADRQC for unknown
2484 * cmd, EFAULT for invalid address, EPERM for nonpriv. later
2485 * works as the input of VM healthy status.
2486 */
2468static int command_scan(struct parser_exec_state *s, 2487static int command_scan(struct parser_exec_state *s,
2469 unsigned long rb_head, unsigned long rb_tail, 2488 unsigned long rb_head, unsigned long rb_tail,
2470 unsigned long rb_start, unsigned long rb_len) 2489 unsigned long rb_start, unsigned long rb_len)
@@ -2487,7 +2506,7 @@ static int command_scan(struct parser_exec_state *s,
2487 s->ip_gma, rb_start, 2506 s->ip_gma, rb_start,
2488 gma_bottom); 2507 gma_bottom);
2489 parser_exec_state_dump(s); 2508 parser_exec_state_dump(s);
2490 return -EINVAL; 2509 return -EFAULT;
2491 } 2510 }
2492 if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) { 2511 if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) {
2493 gvt_vgpu_err("ip_gma %lx out of range." 2512 gvt_vgpu_err("ip_gma %lx out of range."
@@ -2516,7 +2535,7 @@ static int scan_workload(struct intel_vgpu_workload *workload)
2516 int ret = 0; 2535 int ret = 0;
2517 2536
2518 /* ring base is page aligned */ 2537 /* ring base is page aligned */
2519 if (WARN_ON(!IS_ALIGNED(workload->rb_start, GTT_PAGE_SIZE))) 2538 if (WARN_ON(!IS_ALIGNED(workload->rb_start, I915_GTT_PAGE_SIZE)))
2520 return -EINVAL; 2539 return -EINVAL;
2521 2540
2522 gma_head = workload->rb_start + workload->rb_head; 2541 gma_head = workload->rb_start + workload->rb_head;
@@ -2565,7 +2584,8 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2565 wa_ctx); 2584 wa_ctx);
2566 2585
2567 /* ring base is page aligned */ 2586 /* ring base is page aligned */
2568 if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma, GTT_PAGE_SIZE))) 2587 if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma,
2588 I915_GTT_PAGE_SIZE)))
2569 return -EINVAL; 2589 return -EINVAL;
2570 2590
2571 ring_tail = wa_ctx->indirect_ctx.size + 3 * sizeof(uint32_t); 2591 ring_tail = wa_ctx->indirect_ctx.size + 3 * sizeof(uint32_t);
@@ -2604,6 +2624,7 @@ out:
2604static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload) 2624static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
2605{ 2625{
2606 struct intel_vgpu *vgpu = workload->vgpu; 2626 struct intel_vgpu *vgpu = workload->vgpu;
2627 struct intel_vgpu_submission *s = &vgpu->submission;
2607 unsigned long gma_head, gma_tail, gma_top, guest_rb_size; 2628 unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
2608 void *shadow_ring_buffer_va; 2629 void *shadow_ring_buffer_va;
2609 int ring_id = workload->ring_id; 2630 int ring_id = workload->ring_id;
@@ -2619,19 +2640,21 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
2619 gma_tail = workload->rb_start + workload->rb_tail; 2640 gma_tail = workload->rb_start + workload->rb_tail;
2620 gma_top = workload->rb_start + guest_rb_size; 2641 gma_top = workload->rb_start + guest_rb_size;
2621 2642
2622 if (workload->rb_len > vgpu->reserve_ring_buffer_size[ring_id]) { 2643 if (workload->rb_len > s->ring_scan_buffer_size[ring_id]) {
2623 void *va = vgpu->reserve_ring_buffer_va[ring_id]; 2644 void *p;
2645
2624 /* realloc the new ring buffer if needed */ 2646 /* realloc the new ring buffer if needed */
2625 vgpu->reserve_ring_buffer_va[ring_id] = 2647 p = krealloc(s->ring_scan_buffer[ring_id], workload->rb_len,
2626 krealloc(va, workload->rb_len, GFP_KERNEL); 2648 GFP_KERNEL);
2627 if (!vgpu->reserve_ring_buffer_va[ring_id]) { 2649 if (!p) {
2628 gvt_vgpu_err("fail to alloc reserve ring buffer\n"); 2650 gvt_vgpu_err("fail to re-alloc ring scan buffer\n");
2629 return -ENOMEM; 2651 return -ENOMEM;
2630 } 2652 }
2631 vgpu->reserve_ring_buffer_size[ring_id] = workload->rb_len; 2653 s->ring_scan_buffer[ring_id] = p;
2654 s->ring_scan_buffer_size[ring_id] = workload->rb_len;
2632 } 2655 }
2633 2656
2634 shadow_ring_buffer_va = vgpu->reserve_ring_buffer_va[ring_id]; 2657 shadow_ring_buffer_va = s->ring_scan_buffer[ring_id];
2635 2658
2636 /* get shadow ring buffer va */ 2659 /* get shadow ring buffer va */
2637 workload->shadow_ring_buffer_va = shadow_ring_buffer_va; 2660 workload->shadow_ring_buffer_va = shadow_ring_buffer_va;