aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/gvt
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/gvt')
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c109
-rw-r--r--drivers/gpu/drm/i915/gvt/debug.h8
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.c16
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c32
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c9
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c82
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h7
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c55
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c44
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c38
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/render.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c113
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c45
19 files changed, 363 insertions, 229 deletions
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 3b6caaca9751..325618d969fe 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -242,7 +242,7 @@ static int alloc_resource(struct intel_vgpu *vgpu,
242 const char *item; 242 const char *item;
243 243
244 if (!param->low_gm_sz || !param->high_gm_sz || !param->fence_sz) { 244 if (!param->low_gm_sz || !param->high_gm_sz || !param->fence_sz) {
245 gvt_err("Invalid vGPU creation params\n"); 245 gvt_vgpu_err("Invalid vGPU creation params\n");
246 return -EINVAL; 246 return -EINVAL;
247 } 247 }
248 248
@@ -285,9 +285,9 @@ static int alloc_resource(struct intel_vgpu *vgpu,
285 return 0; 285 return 0;
286 286
287no_enough_resource: 287no_enough_resource:
288 gvt_err("vgpu%d: fail to allocate resource %s\n", vgpu->id, item); 288 gvt_vgpu_err("fail to allocate resource %s\n", item);
289 gvt_err("vgpu%d: request %luMB avail %luMB max %luMB taken %luMB\n", 289 gvt_vgpu_err("request %luMB avail %luMB max %luMB taken %luMB\n",
290 vgpu->id, BYTES_TO_MB(request), BYTES_TO_MB(avail), 290 BYTES_TO_MB(request), BYTES_TO_MB(avail),
291 BYTES_TO_MB(max), BYTES_TO_MB(taken)); 291 BYTES_TO_MB(max), BYTES_TO_MB(taken));
292 return -ENOSPC; 292 return -ENOSPC;
293} 293}
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index b7d7721e72fa..40af17ec6312 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -285,9 +285,6 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
285{ 285{
286 int ret; 286 int ret;
287 287
288 if (vgpu->failsafe)
289 return 0;
290
291 if (WARN_ON(bytes > 4)) 288 if (WARN_ON(bytes > 4))
292 return -EINVAL; 289 return -EINVAL;
293 290
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 7ae6e2b241c8..2b92cc8a7d1a 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -817,6 +817,25 @@ static bool is_shadowed_mmio(unsigned int offset)
817 return ret; 817 return ret;
818} 818}
819 819
820static inline bool is_force_nonpriv_mmio(unsigned int offset)
821{
822 return (offset >= 0x24d0 && offset < 0x2500);
823}
824
825static int force_nonpriv_reg_handler(struct parser_exec_state *s,
826 unsigned int offset, unsigned int index)
827{
828 struct intel_gvt *gvt = s->vgpu->gvt;
829 unsigned int data = cmd_val(s, index + 1);
830
831 if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data)) {
832 gvt_err("Unexpected forcenonpriv 0x%x LRI write, value=0x%x\n",
833 offset, data);
834 return -EINVAL;
835 }
836 return 0;
837}
838
820static int cmd_reg_handler(struct parser_exec_state *s, 839static int cmd_reg_handler(struct parser_exec_state *s,
821 unsigned int offset, unsigned int index, char *cmd) 840 unsigned int offset, unsigned int index, char *cmd)
822{ 841{
@@ -824,23 +843,26 @@ static int cmd_reg_handler(struct parser_exec_state *s,
824 struct intel_gvt *gvt = vgpu->gvt; 843 struct intel_gvt *gvt = vgpu->gvt;
825 844
826 if (offset + 4 > gvt->device_info.mmio_size) { 845 if (offset + 4 > gvt->device_info.mmio_size) {
827 gvt_err("%s access to (%x) outside of MMIO range\n", 846 gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
828 cmd, offset); 847 cmd, offset);
829 return -EINVAL; 848 return -EINVAL;
830 } 849 }
831 850
832 if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) { 851 if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
833 gvt_err("vgpu%d: %s access to non-render register (%x)\n", 852 gvt_vgpu_err("%s access to non-render register (%x)\n",
834 s->vgpu->id, cmd, offset); 853 cmd, offset);
835 return 0; 854 return 0;
836 } 855 }
837 856
838 if (is_shadowed_mmio(offset)) { 857 if (is_shadowed_mmio(offset)) {
839 gvt_err("vgpu%d: found access of shadowed MMIO %x\n", 858 gvt_vgpu_err("found access of shadowed MMIO %x\n", offset);
840 s->vgpu->id, offset);
841 return 0; 859 return 0;
842 } 860 }
843 861
862 if (is_force_nonpriv_mmio(offset) &&
863 force_nonpriv_reg_handler(s, offset, index))
864 return -EINVAL;
865
844 if (offset == i915_mmio_reg_offset(DERRMR) || 866 if (offset == i915_mmio_reg_offset(DERRMR) ||
845 offset == i915_mmio_reg_offset(FORCEWAKE_MT)) { 867 offset == i915_mmio_reg_offset(FORCEWAKE_MT)) {
846 /* Writing to HW VGT_PVINFO_PAGE offset will be discarded */ 868 /* Writing to HW VGT_PVINFO_PAGE offset will be discarded */
@@ -1008,7 +1030,7 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s)
1008 ret = cmd_reg_handler(s, 0x2358, 1, "pipe_ctrl"); 1030 ret = cmd_reg_handler(s, 0x2358, 1, "pipe_ctrl");
1009 else if (post_sync == 1) { 1031 else if (post_sync == 1) {
1010 /* check ggtt*/ 1032 /* check ggtt*/
1011 if ((cmd_val(s, 2) & (1 << 2))) { 1033 if ((cmd_val(s, 1) & PIPE_CONTROL_GLOBAL_GTT_IVB)) {
1012 gma = cmd_val(s, 2) & GENMASK(31, 3); 1034 gma = cmd_val(s, 2) & GENMASK(31, 3);
1013 if (gmadr_bytes == 8) 1035 if (gmadr_bytes == 8)
1014 gma |= (cmd_gma_hi(s, 3)) << 32; 1036 gma |= (cmd_gma_hi(s, 3)) << 32;
@@ -1129,6 +1151,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
1129 struct mi_display_flip_command_info *info) 1151 struct mi_display_flip_command_info *info)
1130{ 1152{
1131 struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv; 1153 struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1154 struct intel_vgpu *vgpu = s->vgpu;
1132 u32 dword0 = cmd_val(s, 0); 1155 u32 dword0 = cmd_val(s, 0);
1133 u32 dword1 = cmd_val(s, 1); 1156 u32 dword1 = cmd_val(s, 1);
1134 u32 dword2 = cmd_val(s, 2); 1157 u32 dword2 = cmd_val(s, 2);
@@ -1167,7 +1190,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
1167 break; 1190 break;
1168 1191
1169 default: 1192 default:
1170 gvt_err("unknown plane code %d\n", plane); 1193 gvt_vgpu_err("unknown plane code %d\n", plane);
1171 return -EINVAL; 1194 return -EINVAL;
1172 } 1195 }
1173 1196
@@ -1274,25 +1297,26 @@ static int update_plane_mmio_from_mi_display_flip(
1274static int cmd_handler_mi_display_flip(struct parser_exec_state *s) 1297static int cmd_handler_mi_display_flip(struct parser_exec_state *s)
1275{ 1298{
1276 struct mi_display_flip_command_info info; 1299 struct mi_display_flip_command_info info;
1300 struct intel_vgpu *vgpu = s->vgpu;
1277 int ret; 1301 int ret;
1278 int i; 1302 int i;
1279 int len = cmd_length(s); 1303 int len = cmd_length(s);
1280 1304
1281 ret = decode_mi_display_flip(s, &info); 1305 ret = decode_mi_display_flip(s, &info);
1282 if (ret) { 1306 if (ret) {
1283 gvt_err("fail to decode MI display flip command\n"); 1307 gvt_vgpu_err("fail to decode MI display flip command\n");
1284 return ret; 1308 return ret;
1285 } 1309 }
1286 1310
1287 ret = check_mi_display_flip(s, &info); 1311 ret = check_mi_display_flip(s, &info);
1288 if (ret) { 1312 if (ret) {
1289 gvt_err("invalid MI display flip command\n"); 1313 gvt_vgpu_err("invalid MI display flip command\n");
1290 return ret; 1314 return ret;
1291 } 1315 }
1292 1316
1293 ret = update_plane_mmio_from_mi_display_flip(s, &info); 1317 ret = update_plane_mmio_from_mi_display_flip(s, &info);
1294 if (ret) { 1318 if (ret) {
1295 gvt_err("fail to update plane mmio\n"); 1319 gvt_vgpu_err("fail to update plane mmio\n");
1296 return ret; 1320 return ret;
1297 } 1321 }
1298 1322
@@ -1350,7 +1374,8 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
1350 int ret; 1374 int ret;
1351 1375
1352 if (op_size > max_surface_size) { 1376 if (op_size > max_surface_size) {
1353 gvt_err("command address audit fail name %s\n", s->info->name); 1377 gvt_vgpu_err("command address audit fail name %s\n",
1378 s->info->name);
1354 return -EINVAL; 1379 return -EINVAL;
1355 } 1380 }
1356 1381
@@ -1367,7 +1392,7 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
1367 } 1392 }
1368 return 0; 1393 return 0;
1369err: 1394err:
1370 gvt_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n", 1395 gvt_vgpu_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
1371 s->info->name, guest_gma, op_size); 1396 s->info->name, guest_gma, op_size);
1372 1397
1373 pr_err("cmd dump: "); 1398 pr_err("cmd dump: ");
@@ -1412,8 +1437,10 @@ static int cmd_handler_mi_store_data_imm(struct parser_exec_state *s)
1412 1437
1413static inline int unexpected_cmd(struct parser_exec_state *s) 1438static inline int unexpected_cmd(struct parser_exec_state *s)
1414{ 1439{
1415 gvt_err("vgpu%d: Unexpected %s in command buffer!\n", 1440 struct intel_vgpu *vgpu = s->vgpu;
1416 s->vgpu->id, s->info->name); 1441
1442 gvt_vgpu_err("Unexpected %s in command buffer!\n", s->info->name);
1443
1417 return -EINVAL; 1444 return -EINVAL;
1418} 1445}
1419 1446
@@ -1516,7 +1543,7 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
1516 while (gma != end_gma) { 1543 while (gma != end_gma) {
1517 gpa = intel_vgpu_gma_to_gpa(mm, gma); 1544 gpa = intel_vgpu_gma_to_gpa(mm, gma);
1518 if (gpa == INTEL_GVT_INVALID_ADDR) { 1545 if (gpa == INTEL_GVT_INVALID_ADDR) {
1519 gvt_err("invalid gma address: %lx\n", gma); 1546 gvt_vgpu_err("invalid gma address: %lx\n", gma);
1520 return -EFAULT; 1547 return -EFAULT;
1521 } 1548 }
1522 1549
@@ -1557,6 +1584,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
1557 uint32_t bb_size = 0; 1584 uint32_t bb_size = 0;
1558 uint32_t cmd_len = 0; 1585 uint32_t cmd_len = 0;
1559 bool met_bb_end = false; 1586 bool met_bb_end = false;
1587 struct intel_vgpu *vgpu = s->vgpu;
1560 u32 cmd; 1588 u32 cmd;
1561 1589
1562 /* get the start gm address of the batch buffer */ 1590 /* get the start gm address of the batch buffer */
@@ -1565,7 +1593,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
1565 1593
1566 info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); 1594 info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
1567 if (info == NULL) { 1595 if (info == NULL) {
1568 gvt_err("unknown cmd 0x%x, opcode=0x%x\n", 1596 gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
1569 cmd, get_opcode(cmd, s->ring_id)); 1597 cmd, get_opcode(cmd, s->ring_id));
1570 return -EINVAL; 1598 return -EINVAL;
1571 } 1599 }
@@ -1574,7 +1602,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
1574 gma, gma + 4, &cmd); 1602 gma, gma + 4, &cmd);
1575 info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); 1603 info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
1576 if (info == NULL) { 1604 if (info == NULL) {
1577 gvt_err("unknown cmd 0x%x, opcode=0x%x\n", 1605 gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
1578 cmd, get_opcode(cmd, s->ring_id)); 1606 cmd, get_opcode(cmd, s->ring_id));
1579 return -EINVAL; 1607 return -EINVAL;
1580 } 1608 }
@@ -1599,6 +1627,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
1599static int perform_bb_shadow(struct parser_exec_state *s) 1627static int perform_bb_shadow(struct parser_exec_state *s)
1600{ 1628{
1601 struct intel_shadow_bb_entry *entry_obj; 1629 struct intel_shadow_bb_entry *entry_obj;
1630 struct intel_vgpu *vgpu = s->vgpu;
1602 unsigned long gma = 0; 1631 unsigned long gma = 0;
1603 uint32_t bb_size; 1632 uint32_t bb_size;
1604 void *dst = NULL; 1633 void *dst = NULL;
@@ -1633,7 +1662,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
1633 1662
1634 ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false); 1663 ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false);
1635 if (ret) { 1664 if (ret) {
1636 gvt_err("failed to set shadow batch to CPU\n"); 1665 gvt_vgpu_err("failed to set shadow batch to CPU\n");
1637 goto unmap_src; 1666 goto unmap_src;
1638 } 1667 }
1639 1668
@@ -1645,7 +1674,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
1645 gma, gma + bb_size, 1674 gma, gma + bb_size,
1646 dst); 1675 dst);
1647 if (ret) { 1676 if (ret) {
1648 gvt_err("fail to copy guest ring buffer\n"); 1677 gvt_vgpu_err("fail to copy guest ring buffer\n");
1649 goto unmap_src; 1678 goto unmap_src;
1650 } 1679 }
1651 1680
@@ -1676,15 +1705,16 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
1676{ 1705{
1677 bool second_level; 1706 bool second_level;
1678 int ret = 0; 1707 int ret = 0;
1708 struct intel_vgpu *vgpu = s->vgpu;
1679 1709
1680 if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) { 1710 if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
1681 gvt_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n"); 1711 gvt_vgpu_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
1682 return -EINVAL; 1712 return -EINVAL;
1683 } 1713 }
1684 1714
1685 second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1; 1715 second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1;
1686 if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) { 1716 if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) {
1687 gvt_err("Jumping to 2nd level BB from RB is not allowed\n"); 1717 gvt_vgpu_err("Jumping to 2nd level BB from RB is not allowed\n");
1688 return -EINVAL; 1718 return -EINVAL;
1689 } 1719 }
1690 1720
@@ -1702,7 +1732,7 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
1702 if (batch_buffer_needs_scan(s)) { 1732 if (batch_buffer_needs_scan(s)) {
1703 ret = perform_bb_shadow(s); 1733 ret = perform_bb_shadow(s);
1704 if (ret < 0) 1734 if (ret < 0)
1705 gvt_err("invalid shadow batch buffer\n"); 1735 gvt_vgpu_err("invalid shadow batch buffer\n");
1706 } else { 1736 } else {
1707 /* emulate a batch buffer end to do return right */ 1737 /* emulate a batch buffer end to do return right */
1708 ret = cmd_handler_mi_batch_buffer_end(s); 1738 ret = cmd_handler_mi_batch_buffer_end(s);
@@ -2429,6 +2459,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
2429 int ret = 0; 2459 int ret = 0;
2430 cycles_t t0, t1, t2; 2460 cycles_t t0, t1, t2;
2431 struct parser_exec_state s_before_advance_custom; 2461 struct parser_exec_state s_before_advance_custom;
2462 struct intel_vgpu *vgpu = s->vgpu;
2432 2463
2433 t0 = get_cycles(); 2464 t0 = get_cycles();
2434 2465
@@ -2436,7 +2467,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
2436 2467
2437 info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); 2468 info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
2438 if (info == NULL) { 2469 if (info == NULL) {
2439 gvt_err("unknown cmd 0x%x, opcode=0x%x\n", 2470 gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
2440 cmd, get_opcode(cmd, s->ring_id)); 2471 cmd, get_opcode(cmd, s->ring_id));
2441 return -EINVAL; 2472 return -EINVAL;
2442 } 2473 }
@@ -2452,7 +2483,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
2452 if (info->handler) { 2483 if (info->handler) {
2453 ret = info->handler(s); 2484 ret = info->handler(s);
2454 if (ret < 0) { 2485 if (ret < 0) {
2455 gvt_err("%s handler error\n", info->name); 2486 gvt_vgpu_err("%s handler error\n", info->name);
2456 return ret; 2487 return ret;
2457 } 2488 }
2458 } 2489 }
@@ -2463,7 +2494,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
2463 if (!(info->flag & F_IP_ADVANCE_CUSTOM)) { 2494 if (!(info->flag & F_IP_ADVANCE_CUSTOM)) {
2464 ret = cmd_advance_default(s); 2495 ret = cmd_advance_default(s);
2465 if (ret) { 2496 if (ret) {
2466 gvt_err("%s IP advance error\n", info->name); 2497 gvt_vgpu_err("%s IP advance error\n", info->name);
2467 return ret; 2498 return ret;
2468 } 2499 }
2469 } 2500 }
@@ -2486,6 +2517,7 @@ static int command_scan(struct parser_exec_state *s,
2486 2517
2487 unsigned long gma_head, gma_tail, gma_bottom; 2518 unsigned long gma_head, gma_tail, gma_bottom;
2488 int ret = 0; 2519 int ret = 0;
2520 struct intel_vgpu *vgpu = s->vgpu;
2489 2521
2490 gma_head = rb_start + rb_head; 2522 gma_head = rb_start + rb_head;
2491 gma_tail = rb_start + rb_tail; 2523 gma_tail = rb_start + rb_tail;
@@ -2497,7 +2529,7 @@ static int command_scan(struct parser_exec_state *s,
2497 if (s->buf_type == RING_BUFFER_INSTRUCTION) { 2529 if (s->buf_type == RING_BUFFER_INSTRUCTION) {
2498 if (!(s->ip_gma >= rb_start) || 2530 if (!(s->ip_gma >= rb_start) ||
2499 !(s->ip_gma < gma_bottom)) { 2531 !(s->ip_gma < gma_bottom)) {
2500 gvt_err("ip_gma %lx out of ring scope." 2532 gvt_vgpu_err("ip_gma %lx out of ring scope."
2501 "(base:0x%lx, bottom: 0x%lx)\n", 2533 "(base:0x%lx, bottom: 0x%lx)\n",
2502 s->ip_gma, rb_start, 2534 s->ip_gma, rb_start,
2503 gma_bottom); 2535 gma_bottom);
@@ -2505,7 +2537,7 @@ static int command_scan(struct parser_exec_state *s,
2505 return -EINVAL; 2537 return -EINVAL;
2506 } 2538 }
2507 if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) { 2539 if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) {
2508 gvt_err("ip_gma %lx out of range." 2540 gvt_vgpu_err("ip_gma %lx out of range."
2509 "base 0x%lx head 0x%lx tail 0x%lx\n", 2541 "base 0x%lx head 0x%lx tail 0x%lx\n",
2510 s->ip_gma, rb_start, 2542 s->ip_gma, rb_start,
2511 rb_head, rb_tail); 2543 rb_head, rb_tail);
@@ -2515,7 +2547,7 @@ static int command_scan(struct parser_exec_state *s,
2515 } 2547 }
2516 ret = cmd_parser_exec(s); 2548 ret = cmd_parser_exec(s);
2517 if (ret) { 2549 if (ret) {
2518 gvt_err("cmd parser error\n"); 2550 gvt_vgpu_err("cmd parser error\n");
2519 parser_exec_state_dump(s); 2551 parser_exec_state_dump(s);
2520 break; 2552 break;
2521 } 2553 }
@@ -2639,7 +2671,7 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
2639 gma_head, gma_top, 2671 gma_head, gma_top,
2640 workload->shadow_ring_buffer_va); 2672 workload->shadow_ring_buffer_va);
2641 if (ret) { 2673 if (ret) {
2642 gvt_err("fail to copy guest ring buffer\n"); 2674 gvt_vgpu_err("fail to copy guest ring buffer\n");
2643 return ret; 2675 return ret;
2644 } 2676 }
2645 copy_len = gma_top - gma_head; 2677 copy_len = gma_top - gma_head;
@@ -2651,7 +2683,7 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
2651 gma_head, gma_tail, 2683 gma_head, gma_tail,
2652 workload->shadow_ring_buffer_va + copy_len); 2684 workload->shadow_ring_buffer_va + copy_len);
2653 if (ret) { 2685 if (ret) {
2654 gvt_err("fail to copy guest ring buffer\n"); 2686 gvt_vgpu_err("fail to copy guest ring buffer\n");
2655 return ret; 2687 return ret;
2656 } 2688 }
2657 ring->tail += workload->rb_len; 2689 ring->tail += workload->rb_len;
@@ -2662,16 +2694,17 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
2662int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) 2694int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
2663{ 2695{
2664 int ret; 2696 int ret;
2697 struct intel_vgpu *vgpu = workload->vgpu;
2665 2698
2666 ret = shadow_workload_ring_buffer(workload); 2699 ret = shadow_workload_ring_buffer(workload);
2667 if (ret) { 2700 if (ret) {
2668 gvt_err("fail to shadow workload ring_buffer\n"); 2701 gvt_vgpu_err("fail to shadow workload ring_buffer\n");
2669 return ret; 2702 return ret;
2670 } 2703 }
2671 2704
2672 ret = scan_workload(workload); 2705 ret = scan_workload(workload);
2673 if (ret) { 2706 if (ret) {
2674 gvt_err("scan workload error\n"); 2707 gvt_vgpu_err("scan workload error\n");
2675 return ret; 2708 return ret;
2676 } 2709 }
2677 return 0; 2710 return 0;
@@ -2681,6 +2714,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2681{ 2714{
2682 int ctx_size = wa_ctx->indirect_ctx.size; 2715 int ctx_size = wa_ctx->indirect_ctx.size;
2683 unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma; 2716 unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma;
2717 struct intel_vgpu *vgpu = wa_ctx->workload->vgpu;
2684 struct drm_i915_gem_object *obj; 2718 struct drm_i915_gem_object *obj;
2685 int ret = 0; 2719 int ret = 0;
2686 void *map; 2720 void *map;
@@ -2694,14 +2728,14 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2694 /* get the va of the shadow batch buffer */ 2728 /* get the va of the shadow batch buffer */
2695 map = i915_gem_object_pin_map(obj, I915_MAP_WB); 2729 map = i915_gem_object_pin_map(obj, I915_MAP_WB);
2696 if (IS_ERR(map)) { 2730 if (IS_ERR(map)) {
2697 gvt_err("failed to vmap shadow indirect ctx\n"); 2731 gvt_vgpu_err("failed to vmap shadow indirect ctx\n");
2698 ret = PTR_ERR(map); 2732 ret = PTR_ERR(map);
2699 goto put_obj; 2733 goto put_obj;
2700 } 2734 }
2701 2735
2702 ret = i915_gem_object_set_to_cpu_domain(obj, false); 2736 ret = i915_gem_object_set_to_cpu_domain(obj, false);
2703 if (ret) { 2737 if (ret) {
2704 gvt_err("failed to set shadow indirect ctx to CPU\n"); 2738 gvt_vgpu_err("failed to set shadow indirect ctx to CPU\n");
2705 goto unmap_src; 2739 goto unmap_src;
2706 } 2740 }
2707 2741
@@ -2710,7 +2744,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2710 guest_gma, guest_gma + ctx_size, 2744 guest_gma, guest_gma + ctx_size,
2711 map); 2745 map);
2712 if (ret) { 2746 if (ret) {
2713 gvt_err("fail to copy guest indirect ctx\n"); 2747 gvt_vgpu_err("fail to copy guest indirect ctx\n");
2714 goto unmap_src; 2748 goto unmap_src;
2715 } 2749 }
2716 2750
@@ -2744,13 +2778,14 @@ static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2744int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) 2778int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2745{ 2779{
2746 int ret; 2780 int ret;
2781 struct intel_vgpu *vgpu = wa_ctx->workload->vgpu;
2747 2782
2748 if (wa_ctx->indirect_ctx.size == 0) 2783 if (wa_ctx->indirect_ctx.size == 0)
2749 return 0; 2784 return 0;
2750 2785
2751 ret = shadow_indirect_ctx(wa_ctx); 2786 ret = shadow_indirect_ctx(wa_ctx);
2752 if (ret) { 2787 if (ret) {
2753 gvt_err("fail to shadow indirect ctx\n"); 2788 gvt_vgpu_err("fail to shadow indirect ctx\n");
2754 return ret; 2789 return ret;
2755 } 2790 }
2756 2791
@@ -2758,7 +2793,7 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2758 2793
2759 ret = scan_wa_ctx(wa_ctx); 2794 ret = scan_wa_ctx(wa_ctx);
2760 if (ret) { 2795 if (ret) {
2761 gvt_err("scan wa ctx error\n"); 2796 gvt_vgpu_err("scan wa ctx error\n");
2762 return ret; 2797 return ret;
2763 } 2798 }
2764 2799
diff --git a/drivers/gpu/drm/i915/gvt/debug.h b/drivers/gpu/drm/i915/gvt/debug.h
index 68cba7bd980a..b0cff4dc2684 100644
--- a/drivers/gpu/drm/i915/gvt/debug.h
+++ b/drivers/gpu/drm/i915/gvt/debug.h
@@ -27,6 +27,14 @@
27#define gvt_err(fmt, args...) \ 27#define gvt_err(fmt, args...) \
28 DRM_ERROR("gvt: "fmt, ##args) 28 DRM_ERROR("gvt: "fmt, ##args)
29 29
30#define gvt_vgpu_err(fmt, args...) \
31do { \
32 if (IS_ERR_OR_NULL(vgpu)) \
33 DRM_DEBUG_DRIVER("gvt: "fmt, ##args); \
34 else \
35 DRM_DEBUG_DRIVER("gvt: vgpu %d: "fmt, vgpu->id, ##args);\
36} while (0)
37
30#define gvt_dbg_core(fmt, args...) \ 38#define gvt_dbg_core(fmt, args...) \
31 DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args) 39 DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args)
32 40
diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c
index bda85dff7b2a..42cd09ec63fa 100644
--- a/drivers/gpu/drm/i915/gvt/edid.c
+++ b/drivers/gpu/drm/i915/gvt/edid.c
@@ -52,16 +52,16 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu)
52 unsigned char chr = 0; 52 unsigned char chr = 0;
53 53
54 if (edid->state == I2C_NOT_SPECIFIED || !edid->slave_selected) { 54 if (edid->state == I2C_NOT_SPECIFIED || !edid->slave_selected) {
55 gvt_err("Driver tries to read EDID without proper sequence!\n"); 55 gvt_vgpu_err("Driver tries to read EDID without proper sequence!\n");
56 return 0; 56 return 0;
57 } 57 }
58 if (edid->current_edid_read >= EDID_SIZE) { 58 if (edid->current_edid_read >= EDID_SIZE) {
59 gvt_err("edid_get_byte() exceeds the size of EDID!\n"); 59 gvt_vgpu_err("edid_get_byte() exceeds the size of EDID!\n");
60 return 0; 60 return 0;
61 } 61 }
62 62
63 if (!edid->edid_available) { 63 if (!edid->edid_available) {
64 gvt_err("Reading EDID but EDID is not available!\n"); 64 gvt_vgpu_err("Reading EDID but EDID is not available!\n");
65 return 0; 65 return 0;
66 } 66 }
67 67
@@ -72,7 +72,7 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu)
72 chr = edid_data->edid_block[edid->current_edid_read]; 72 chr = edid_data->edid_block[edid->current_edid_read];
73 edid->current_edid_read++; 73 edid->current_edid_read++;
74 } else { 74 } else {
75 gvt_err("No EDID available during the reading?\n"); 75 gvt_vgpu_err("No EDID available during the reading?\n");
76 } 76 }
77 return chr; 77 return chr;
78} 78}
@@ -223,7 +223,7 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
223 vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE; 223 vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE;
224 break; 224 break;
225 default: 225 default:
226 gvt_err("Unknown/reserved GMBUS cycle detected!\n"); 226 gvt_vgpu_err("Unknown/reserved GMBUS cycle detected!\n");
227 break; 227 break;
228 } 228 }
229 /* 229 /*
@@ -292,8 +292,7 @@ static int gmbus3_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
292 */ 292 */
293 } else { 293 } else {
294 memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes); 294 memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
295 gvt_err("vgpu%d: warning: gmbus3 read with nothing returned\n", 295 gvt_vgpu_err("warning: gmbus3 read with nothing returned\n");
296 vgpu->id);
297 } 296 }
298 return 0; 297 return 0;
299} 298}
@@ -496,7 +495,8 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
496 unsigned char val = edid_get_byte(vgpu); 495 unsigned char val = edid_get_byte(vgpu);
497 496
498 aux_data_for_write = (val << 16); 497 aux_data_for_write = (val << 16);
499 } 498 } else
499 aux_data_for_write = (0xff << 16);
500 } 500 }
501 /* write the return value in AUX_CH_DATA reg which includes: 501 /* write the return value in AUX_CH_DATA reg which includes:
502 * ACK of I2C_WRITE 502 * ACK of I2C_WRITE
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 46eb9fd3c03f..d186c157f65f 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -172,6 +172,7 @@ static int emulate_execlist_ctx_schedule_out(
172 struct intel_vgpu_execlist *execlist, 172 struct intel_vgpu_execlist *execlist,
173 struct execlist_ctx_descriptor_format *ctx) 173 struct execlist_ctx_descriptor_format *ctx)
174{ 174{
175 struct intel_vgpu *vgpu = execlist->vgpu;
175 struct intel_vgpu_execlist_slot *running = execlist->running_slot; 176 struct intel_vgpu_execlist_slot *running = execlist->running_slot;
176 struct intel_vgpu_execlist_slot *pending = execlist->pending_slot; 177 struct intel_vgpu_execlist_slot *pending = execlist->pending_slot;
177 struct execlist_ctx_descriptor_format *ctx0 = &running->ctx[0]; 178 struct execlist_ctx_descriptor_format *ctx0 = &running->ctx[0];
@@ -183,7 +184,7 @@ static int emulate_execlist_ctx_schedule_out(
183 gvt_dbg_el("schedule out context id %x\n", ctx->context_id); 184 gvt_dbg_el("schedule out context id %x\n", ctx->context_id);
184 185
185 if (WARN_ON(!same_context(ctx, execlist->running_context))) { 186 if (WARN_ON(!same_context(ctx, execlist->running_context))) {
186 gvt_err("schedule out context is not running context," 187 gvt_vgpu_err("schedule out context is not running context,"
187 "ctx id %x running ctx id %x\n", 188 "ctx id %x running ctx id %x\n",
188 ctx->context_id, 189 ctx->context_id,
189 execlist->running_context->context_id); 190 execlist->running_context->context_id);
@@ -254,7 +255,7 @@ static struct intel_vgpu_execlist_slot *get_next_execlist_slot(
254 status.udw = vgpu_vreg(vgpu, status_reg + 4); 255 status.udw = vgpu_vreg(vgpu, status_reg + 4);
255 256
256 if (status.execlist_queue_full) { 257 if (status.execlist_queue_full) {
257 gvt_err("virtual execlist slots are full\n"); 258 gvt_vgpu_err("virtual execlist slots are full\n");
258 return NULL; 259 return NULL;
259 } 260 }
260 261
@@ -270,11 +271,12 @@ static int emulate_execlist_schedule_in(struct intel_vgpu_execlist *execlist,
270 271
271 struct execlist_ctx_descriptor_format *ctx0, *ctx1; 272 struct execlist_ctx_descriptor_format *ctx0, *ctx1;
272 struct execlist_context_status_format status; 273 struct execlist_context_status_format status;
274 struct intel_vgpu *vgpu = execlist->vgpu;
273 275
274 gvt_dbg_el("emulate schedule-in\n"); 276 gvt_dbg_el("emulate schedule-in\n");
275 277
276 if (!slot) { 278 if (!slot) {
277 gvt_err("no available execlist slot\n"); 279 gvt_vgpu_err("no available execlist slot\n");
278 return -EINVAL; 280 return -EINVAL;
279 } 281 }
280 282
@@ -375,7 +377,6 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
375 377
376 vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0); 378 vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0);
377 if (IS_ERR(vma)) { 379 if (IS_ERR(vma)) {
378 gvt_err("Cannot pin\n");
379 return; 380 return;
380 } 381 }
381 382
@@ -428,7 +429,6 @@ static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
428 vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL, 429 vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
429 0, CACHELINE_BYTES, 0); 430 0, CACHELINE_BYTES, 0);
430 if (IS_ERR(vma)) { 431 if (IS_ERR(vma)) {
431 gvt_err("Cannot pin indirect ctx obj\n");
432 return; 432 return;
433 } 433 }
434 434
@@ -561,6 +561,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
561{ 561{
562 struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc; 562 struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
563 struct intel_vgpu_mm *mm; 563 struct intel_vgpu_mm *mm;
564 struct intel_vgpu *vgpu = workload->vgpu;
564 int page_table_level; 565 int page_table_level;
565 u32 pdp[8]; 566 u32 pdp[8];
566 567
@@ -569,7 +570,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
569 } else if (desc->addressing_mode == 3) { /* legacy 64 bit */ 570 } else if (desc->addressing_mode == 3) { /* legacy 64 bit */
570 page_table_level = 4; 571 page_table_level = 4;
571 } else { 572 } else {
572 gvt_err("Advanced Context mode(SVM) is not supported!\n"); 573 gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n");
573 return -EINVAL; 574 return -EINVAL;
574 } 575 }
575 576
@@ -583,7 +584,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
583 mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT, 584 mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT,
584 pdp, page_table_level, 0); 585 pdp, page_table_level, 0);
585 if (IS_ERR(mm)) { 586 if (IS_ERR(mm)) {
586 gvt_err("fail to create mm object.\n"); 587 gvt_vgpu_err("fail to create mm object.\n");
587 return PTR_ERR(mm); 588 return PTR_ERR(mm);
588 } 589 }
589 } 590 }
@@ -609,7 +610,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
609 ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, 610 ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
610 (u32)((desc->lrca + 1) << GTT_PAGE_SHIFT)); 611 (u32)((desc->lrca + 1) << GTT_PAGE_SHIFT));
611 if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) { 612 if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
612 gvt_err("invalid guest context LRCA: %x\n", desc->lrca); 613 gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca);
613 return -EINVAL; 614 return -EINVAL;
614 } 615 }
615 616
@@ -724,8 +725,7 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
724 continue; 725 continue;
725 726
726 if (!desc[i]->privilege_access) { 727 if (!desc[i]->privilege_access) {
727 gvt_err("vgpu%d: unexpected GGTT elsp submission\n", 728 gvt_vgpu_err("unexpected GGTT elsp submission\n");
728 vgpu->id);
729 return -EINVAL; 729 return -EINVAL;
730 } 730 }
731 731
@@ -735,15 +735,13 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
735 } 735 }
736 736
737 if (!valid_desc_bitmap) { 737 if (!valid_desc_bitmap) {
738 gvt_err("vgpu%d: no valid desc in a elsp submission\n", 738 gvt_vgpu_err("no valid desc in a elsp submission\n");
739 vgpu->id);
740 return -EINVAL; 739 return -EINVAL;
741 } 740 }
742 741
743 if (!test_bit(0, (void *)&valid_desc_bitmap) && 742 if (!test_bit(0, (void *)&valid_desc_bitmap) &&
744 test_bit(1, (void *)&valid_desc_bitmap)) { 743 test_bit(1, (void *)&valid_desc_bitmap)) {
745 gvt_err("vgpu%d: weird elsp submission, desc 0 is not valid\n", 744 gvt_vgpu_err("weird elsp submission, desc 0 is not valid\n");
746 vgpu->id);
747 return -EINVAL; 745 return -EINVAL;
748 } 746 }
749 747
@@ -752,8 +750,7 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
752 ret = submit_context(vgpu, ring_id, &valid_desc[i], 750 ret = submit_context(vgpu, ring_id, &valid_desc[i],
753 emulate_schedule_in); 751 emulate_schedule_in);
754 if (ret) { 752 if (ret) {
755 gvt_err("vgpu%d: fail to schedule workload\n", 753 gvt_vgpu_err("fail to schedule workload\n");
756 vgpu->id);
757 return ret; 754 return ret;
758 } 755 }
759 emulate_schedule_in = false; 756 emulate_schedule_in = false;
@@ -778,7 +775,8 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
778 _EL_OFFSET_STATUS_PTR); 775 _EL_OFFSET_STATUS_PTR);
779 776
780 ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg); 777 ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
781 ctx_status_ptr.read_ptr = ctx_status_ptr.write_ptr = 0x7; 778 ctx_status_ptr.read_ptr = 0;
779 ctx_status_ptr.write_ptr = 0x7;
782 vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw; 780 vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
783} 781}
784 782
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index 933a7c211a1c..dce8d15f706f 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -75,11 +75,11 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
75 struct gvt_firmware_header *h; 75 struct gvt_firmware_header *h;
76 void *firmware; 76 void *firmware;
77 void *p; 77 void *p;
78 unsigned long size; 78 unsigned long size, crc32_start;
79 int i; 79 int i;
80 int ret; 80 int ret;
81 81
82 size = sizeof(*h) + info->mmio_size + info->cfg_space_size - 1; 82 size = sizeof(*h) + info->mmio_size + info->cfg_space_size;
83 firmware = vzalloc(size); 83 firmware = vzalloc(size);
84 if (!firmware) 84 if (!firmware)
85 return -ENOMEM; 85 return -ENOMEM;
@@ -112,6 +112,9 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
112 112
113 memcpy(gvt->firmware.mmio, p, info->mmio_size); 113 memcpy(gvt->firmware.mmio, p, info->mmio_size);
114 114
115 crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
116 h->crc32 = crc32_le(0, firmware + crc32_start, size - crc32_start);
117
115 firmware_attr.size = size; 118 firmware_attr.size = size;
116 firmware_attr.private = firmware; 119 firmware_attr.private = firmware;
117 120
@@ -234,7 +237,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt)
234 237
235 firmware->mmio = mem; 238 firmware->mmio = mem;
236 239
237 sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%04x.golden_hw_state", 240 sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%02x.golden_hw_state",
238 GVT_FIRMWARE_PATH, pdev->vendor, pdev->device, 241 GVT_FIRMWARE_PATH, pdev->vendor, pdev->device,
239 pdev->revision); 242 pdev->revision);
240 243
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 6a5ff23ded90..b832bea64e03 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -49,8 +49,8 @@ bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
49{ 49{
50 if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size 50 if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size
51 && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) { 51 && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) {
52 gvt_err("vgpu%d: invalid range gmadr 0x%llx size 0x%x\n", 52 gvt_vgpu_err("invalid range gmadr 0x%llx size 0x%x\n",
53 vgpu->id, addr, size); 53 addr, size);
54 return false; 54 return false;
55 } 55 }
56 return true; 56 return true;
@@ -430,7 +430,7 @@ static int gtt_entry_p2m(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *p,
430 430
431 mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn); 431 mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn);
432 if (mfn == INTEL_GVT_INVALID_ADDR) { 432 if (mfn == INTEL_GVT_INVALID_ADDR) {
433 gvt_err("fail to translate gfn: 0x%lx\n", gfn); 433 gvt_vgpu_err("fail to translate gfn: 0x%lx\n", gfn);
434 return -ENXIO; 434 return -ENXIO;
435 } 435 }
436 436
@@ -611,7 +611,7 @@ static inline int init_shadow_page(struct intel_vgpu *vgpu,
611 611
612 daddr = dma_map_page(kdev, p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL); 612 daddr = dma_map_page(kdev, p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
613 if (dma_mapping_error(kdev, daddr)) { 613 if (dma_mapping_error(kdev, daddr)) {
614 gvt_err("fail to map dma addr\n"); 614 gvt_vgpu_err("fail to map dma addr\n");
615 return -EINVAL; 615 return -EINVAL;
616 } 616 }
617 617
@@ -735,7 +735,7 @@ retry:
735 if (reclaim_one_mm(vgpu->gvt)) 735 if (reclaim_one_mm(vgpu->gvt))
736 goto retry; 736 goto retry;
737 737
738 gvt_err("fail to allocate ppgtt shadow page\n"); 738 gvt_vgpu_err("fail to allocate ppgtt shadow page\n");
739 return ERR_PTR(-ENOMEM); 739 return ERR_PTR(-ENOMEM);
740 } 740 }
741 741
@@ -750,14 +750,14 @@ retry:
750 */ 750 */
751 ret = init_shadow_page(vgpu, &spt->shadow_page, type); 751 ret = init_shadow_page(vgpu, &spt->shadow_page, type);
752 if (ret) { 752 if (ret) {
753 gvt_err("fail to initialize shadow page for spt\n"); 753 gvt_vgpu_err("fail to initialize shadow page for spt\n");
754 goto err; 754 goto err;
755 } 755 }
756 756
757 ret = intel_vgpu_init_guest_page(vgpu, &spt->guest_page, 757 ret = intel_vgpu_init_guest_page(vgpu, &spt->guest_page,
758 gfn, ppgtt_write_protection_handler, NULL); 758 gfn, ppgtt_write_protection_handler, NULL);
759 if (ret) { 759 if (ret) {
760 gvt_err("fail to initialize guest page for spt\n"); 760 gvt_vgpu_err("fail to initialize guest page for spt\n");
761 goto err; 761 goto err;
762 } 762 }
763 763
@@ -776,8 +776,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page(
776 if (p) 776 if (p)
777 return shadow_page_to_ppgtt_spt(p); 777 return shadow_page_to_ppgtt_spt(p);
778 778
779 gvt_err("vgpu%d: fail to find ppgtt shadow page: 0x%lx\n", 779 gvt_vgpu_err("fail to find ppgtt shadow page: 0x%lx\n", mfn);
780 vgpu->id, mfn);
781 return NULL; 780 return NULL;
782} 781}
783 782
@@ -827,8 +826,8 @@ static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu,
827 } 826 }
828 s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e)); 827 s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
829 if (!s) { 828 if (!s) {
830 gvt_err("vgpu%d: fail to find shadow page: mfn: 0x%lx\n", 829 gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n",
831 vgpu->id, ops->get_pfn(e)); 830 ops->get_pfn(e));
832 return -ENXIO; 831 return -ENXIO;
833 } 832 }
834 return ppgtt_invalidate_shadow_page(s); 833 return ppgtt_invalidate_shadow_page(s);
@@ -836,6 +835,7 @@ static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu,
836 835
837static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) 836static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
838{ 837{
838 struct intel_vgpu *vgpu = spt->vgpu;
839 struct intel_gvt_gtt_entry e; 839 struct intel_gvt_gtt_entry e;
840 unsigned long index; 840 unsigned long index;
841 int ret; 841 int ret;
@@ -854,7 +854,7 @@ static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
854 854
855 for_each_present_shadow_entry(spt, &e, index) { 855 for_each_present_shadow_entry(spt, &e, index) {
856 if (!gtt_type_is_pt(get_next_pt_type(e.type))) { 856 if (!gtt_type_is_pt(get_next_pt_type(e.type))) {
857 gvt_err("GVT doesn't support pse bit for now\n"); 857 gvt_vgpu_err("GVT doesn't support pse bit for now\n");
858 return -EINVAL; 858 return -EINVAL;
859 } 859 }
860 ret = ppgtt_invalidate_shadow_page_by_shadow_entry( 860 ret = ppgtt_invalidate_shadow_page_by_shadow_entry(
@@ -868,8 +868,8 @@ release:
868 ppgtt_free_shadow_page(spt); 868 ppgtt_free_shadow_page(spt);
869 return 0; 869 return 0;
870fail: 870fail:
871 gvt_err("vgpu%d: fail: shadow page %p shadow entry 0x%llx type %d\n", 871 gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n",
872 spt->vgpu->id, spt, e.val64, e.type); 872 spt, e.val64, e.type);
873 return ret; 873 return ret;
874} 874}
875 875
@@ -914,8 +914,8 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry(
914 } 914 }
915 return s; 915 return s;
916fail: 916fail:
917 gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n", 917 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
918 vgpu->id, s, we->val64, we->type); 918 s, we->val64, we->type);
919 return ERR_PTR(ret); 919 return ERR_PTR(ret);
920} 920}
921 921
@@ -953,7 +953,7 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
953 953
954 for_each_present_guest_entry(spt, &ge, i) { 954 for_each_present_guest_entry(spt, &ge, i) {
955 if (!gtt_type_is_pt(get_next_pt_type(ge.type))) { 955 if (!gtt_type_is_pt(get_next_pt_type(ge.type))) {
956 gvt_err("GVT doesn't support pse bit now\n"); 956 gvt_vgpu_err("GVT doesn't support pse bit now\n");
957 ret = -EINVAL; 957 ret = -EINVAL;
958 goto fail; 958 goto fail;
959 } 959 }
@@ -969,8 +969,8 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
969 } 969 }
970 return 0; 970 return 0;
971fail: 971fail:
972 gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n", 972 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
973 vgpu->id, spt, ge.val64, ge.type); 973 spt, ge.val64, ge.type);
974 return ret; 974 return ret;
975} 975}
976 976
@@ -999,7 +999,7 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
999 struct intel_vgpu_ppgtt_spt *s = 999 struct intel_vgpu_ppgtt_spt *s =
1000 ppgtt_find_shadow_page(vgpu, ops->get_pfn(&e)); 1000 ppgtt_find_shadow_page(vgpu, ops->get_pfn(&e));
1001 if (!s) { 1001 if (!s) {
1002 gvt_err("fail to find guest page\n"); 1002 gvt_vgpu_err("fail to find guest page\n");
1003 ret = -ENXIO; 1003 ret = -ENXIO;
1004 goto fail; 1004 goto fail;
1005 } 1005 }
@@ -1011,8 +1011,8 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
1011 ppgtt_set_shadow_entry(spt, &e, index); 1011 ppgtt_set_shadow_entry(spt, &e, index);
1012 return 0; 1012 return 0;
1013fail: 1013fail:
1014 gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n", 1014 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1015 vgpu->id, spt, e.val64, e.type); 1015 spt, e.val64, e.type);
1016 return ret; 1016 return ret;
1017} 1017}
1018 1018
@@ -1046,8 +1046,8 @@ static int ppgtt_handle_guest_entry_add(struct intel_vgpu_guest_page *gpt,
1046 } 1046 }
1047 return 0; 1047 return 0;
1048fail: 1048fail:
1049 gvt_err("vgpu%d: fail: spt %p guest entry 0x%llx type %d\n", vgpu->id, 1049 gvt_vgpu_err("fail: spt %p guest entry 0x%llx type %d\n",
1050 spt, we->val64, we->type); 1050 spt, we->val64, we->type);
1051 return ret; 1051 return ret;
1052} 1052}
1053 1053
@@ -1250,8 +1250,8 @@ static int ppgtt_handle_guest_write_page_table(
1250 } 1250 }
1251 return 0; 1251 return 0;
1252fail: 1252fail:
1253 gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d.\n", 1253 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n",
1254 vgpu->id, spt, we->val64, we->type); 1254 spt, we->val64, we->type);
1255 return ret; 1255 return ret;
1256} 1256}
1257 1257
@@ -1493,7 +1493,7 @@ static int shadow_mm(struct intel_vgpu_mm *mm)
1493 1493
1494 spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge); 1494 spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
1495 if (IS_ERR(spt)) { 1495 if (IS_ERR(spt)) {
1496 gvt_err("fail to populate guest root pointer\n"); 1496 gvt_vgpu_err("fail to populate guest root pointer\n");
1497 ret = PTR_ERR(spt); 1497 ret = PTR_ERR(spt);
1498 goto fail; 1498 goto fail;
1499 } 1499 }
@@ -1566,7 +1566,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
1566 1566
1567 ret = gtt->mm_alloc_page_table(mm); 1567 ret = gtt->mm_alloc_page_table(mm);
1568 if (ret) { 1568 if (ret) {
1569 gvt_err("fail to allocate page table for mm\n"); 1569 gvt_vgpu_err("fail to allocate page table for mm\n");
1570 goto fail; 1570 goto fail;
1571 } 1571 }
1572 1572
@@ -1584,7 +1584,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
1584 } 1584 }
1585 return mm; 1585 return mm;
1586fail: 1586fail:
1587 gvt_err("fail to create mm\n"); 1587 gvt_vgpu_err("fail to create mm\n");
1588 if (mm) 1588 if (mm)
1589 intel_gvt_mm_unreference(mm); 1589 intel_gvt_mm_unreference(mm);
1590 return ERR_PTR(ret); 1590 return ERR_PTR(ret);
@@ -1760,7 +1760,7 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
1760 mm->page_table_level, gma, gpa); 1760 mm->page_table_level, gma, gpa);
1761 return gpa; 1761 return gpa;
1762err: 1762err:
1763 gvt_err("invalid mm type: %d gma %lx\n", mm->type, gma); 1763 gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma);
1764 return INTEL_GVT_INVALID_ADDR; 1764 return INTEL_GVT_INVALID_ADDR;
1765} 1765}
1766 1766
@@ -1836,13 +1836,16 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
1836 if (ops->test_present(&e)) { 1836 if (ops->test_present(&e)) {
1837 ret = gtt_entry_p2m(vgpu, &e, &m); 1837 ret = gtt_entry_p2m(vgpu, &e, &m);
1838 if (ret) { 1838 if (ret) {
1839 gvt_err("vgpu%d: fail to translate guest gtt entry\n", 1839 gvt_vgpu_err("fail to translate guest gtt entry\n");
1840 vgpu->id); 1840 /* guest driver may read/write the entry when partial
1841 return ret; 1841 * update the entry in this situation p2m will fail
1842 * settting the shadow entry to point to a scratch page
1843 */
1844 ops->set_pfn(&m, gvt->gtt.scratch_ggtt_mfn);
1842 } 1845 }
1843 } else { 1846 } else {
1844 m = e; 1847 m = e;
1845 m.val64 = 0; 1848 ops->set_pfn(&m, gvt->gtt.scratch_ggtt_mfn);
1846 } 1849 }
1847 1850
1848 ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index); 1851 ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index);
@@ -1893,14 +1896,14 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
1893 1896
1894 scratch_pt = (void *)get_zeroed_page(GFP_KERNEL); 1897 scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
1895 if (!scratch_pt) { 1898 if (!scratch_pt) {
1896 gvt_err("fail to allocate scratch page\n"); 1899 gvt_vgpu_err("fail to allocate scratch page\n");
1897 return -ENOMEM; 1900 return -ENOMEM;
1898 } 1901 }
1899 1902
1900 daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0, 1903 daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0,
1901 4096, PCI_DMA_BIDIRECTIONAL); 1904 4096, PCI_DMA_BIDIRECTIONAL);
1902 if (dma_mapping_error(dev, daddr)) { 1905 if (dma_mapping_error(dev, daddr)) {
1903 gvt_err("fail to dmamap scratch_pt\n"); 1906 gvt_vgpu_err("fail to dmamap scratch_pt\n");
1904 __free_page(virt_to_page(scratch_pt)); 1907 __free_page(virt_to_page(scratch_pt));
1905 return -ENOMEM; 1908 return -ENOMEM;
1906 } 1909 }
@@ -2003,7 +2006,7 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
2003 ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT, 2006 ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT,
2004 NULL, 1, 0); 2007 NULL, 1, 0);
2005 if (IS_ERR(ggtt_mm)) { 2008 if (IS_ERR(ggtt_mm)) {
2006 gvt_err("fail to create mm for ggtt.\n"); 2009 gvt_vgpu_err("fail to create mm for ggtt.\n");
2007 return PTR_ERR(ggtt_mm); 2010 return PTR_ERR(ggtt_mm);
2008 } 2011 }
2009 2012
@@ -2076,7 +2079,6 @@ static int setup_spt_oos(struct intel_gvt *gvt)
2076 for (i = 0; i < preallocated_oos_pages; i++) { 2079 for (i = 0; i < preallocated_oos_pages; i++) {
2077 oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL); 2080 oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL);
2078 if (!oos_page) { 2081 if (!oos_page) {
2079 gvt_err("fail to pre-allocate oos page\n");
2080 ret = -ENOMEM; 2082 ret = -ENOMEM;
2081 goto fail; 2083 goto fail;
2082 } 2084 }
@@ -2166,7 +2168,7 @@ int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
2166 mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_PPGTT, 2168 mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_PPGTT,
2167 pdp, page_table_level, 0); 2169 pdp, page_table_level, 0);
2168 if (IS_ERR(mm)) { 2170 if (IS_ERR(mm)) {
2169 gvt_err("fail to create mm\n"); 2171 gvt_vgpu_err("fail to create mm\n");
2170 return PTR_ERR(mm); 2172 return PTR_ERR(mm);
2171 } 2173 }
2172 } 2174 }
@@ -2196,7 +2198,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
2196 2198
2197 mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp); 2199 mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
2198 if (!mm) { 2200 if (!mm) {
2199 gvt_err("fail to find ppgtt instance.\n"); 2201 gvt_vgpu_err("fail to find ppgtt instance.\n");
2200 return -EINVAL; 2202 return -EINVAL;
2201 } 2203 }
2202 intel_gvt_mm_unreference(mm); 2204 intel_gvt_mm_unreference(mm);
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 3b9d59e457ba..ef3baa0c4754 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -52,6 +52,8 @@ static const struct intel_gvt_ops intel_gvt_ops = {
52 .vgpu_create = intel_gvt_create_vgpu, 52 .vgpu_create = intel_gvt_create_vgpu,
53 .vgpu_destroy = intel_gvt_destroy_vgpu, 53 .vgpu_destroy = intel_gvt_destroy_vgpu,
54 .vgpu_reset = intel_gvt_reset_vgpu, 54 .vgpu_reset = intel_gvt_reset_vgpu,
55 .vgpu_activate = intel_gvt_activate_vgpu,
56 .vgpu_deactivate = intel_gvt_deactivate_vgpu,
55}; 57};
56 58
57/** 59/**
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 23791920ced1..becae2fa3b29 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -162,7 +162,6 @@ struct intel_vgpu {
162 atomic_t running_workload_num; 162 atomic_t running_workload_num;
163 DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES); 163 DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
164 struct i915_gem_context *shadow_ctx; 164 struct i915_gem_context *shadow_ctx;
165 struct notifier_block shadow_ctx_notifier_block;
166 165
167#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT) 166#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
168 struct { 167 struct {
@@ -233,6 +232,7 @@ struct intel_gvt {
233 struct intel_gvt_gtt gtt; 232 struct intel_gvt_gtt gtt;
234 struct intel_gvt_opregion opregion; 233 struct intel_gvt_opregion opregion;
235 struct intel_gvt_workload_scheduler scheduler; 234 struct intel_gvt_workload_scheduler scheduler;
235 struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES];
236 DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS); 236 DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
237 struct intel_vgpu_type *types; 237 struct intel_vgpu_type *types;
238 unsigned int num_types; 238 unsigned int num_types;
@@ -382,7 +382,8 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
382void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, 382void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
383 unsigned int engine_mask); 383 unsigned int engine_mask);
384void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu); 384void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
385 385void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu);
386void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu);
386 387
387/* validating GM functions */ 388/* validating GM functions */
388#define vgpu_gmadr_is_aperture(vgpu, gmadr) \ 389#define vgpu_gmadr_is_aperture(vgpu, gmadr) \
@@ -449,6 +450,8 @@ struct intel_gvt_ops {
449 struct intel_vgpu_type *); 450 struct intel_vgpu_type *);
450 void (*vgpu_destroy)(struct intel_vgpu *); 451 void (*vgpu_destroy)(struct intel_vgpu *);
451 void (*vgpu_reset)(struct intel_vgpu *); 452 void (*vgpu_reset)(struct intel_vgpu *);
453 void (*vgpu_activate)(struct intel_vgpu *);
454 void (*vgpu_deactivate)(struct intel_vgpu *);
452}; 455};
453 456
454 457
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 8e43395c748a..6da9ae1618e3 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -181,11 +181,9 @@ static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
181 GVT_FAILSAFE_UNSUPPORTED_GUEST); 181 GVT_FAILSAFE_UNSUPPORTED_GUEST);
182 182
183 if (!vgpu->mmio.disable_warn_untrack) { 183 if (!vgpu->mmio.disable_warn_untrack) {
184 gvt_err("vgpu%d: found oob fence register access\n", 184 gvt_vgpu_err("found oob fence register access\n");
185 vgpu->id); 185 gvt_vgpu_err("total fence %d, access fence %d\n",
186 gvt_err("vgpu%d: total fence %d, access fence %d\n", 186 vgpu_fence_sz(vgpu), fence_num);
187 vgpu->id, vgpu_fence_sz(vgpu),
188 fence_num);
189 } 187 }
190 memset(p_data, 0, bytes); 188 memset(p_data, 0, bytes);
191 return -EINVAL; 189 return -EINVAL;
@@ -249,7 +247,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
249 break; 247 break;
250 default: 248 default:
251 /*should not hit here*/ 249 /*should not hit here*/
252 gvt_err("invalid forcewake offset 0x%x\n", offset); 250 gvt_vgpu_err("invalid forcewake offset 0x%x\n", offset);
253 return -EINVAL; 251 return -EINVAL;
254 } 252 }
255 } else { 253 } else {
@@ -530,7 +528,7 @@ static int check_fdi_rx_train_status(struct intel_vgpu *vgpu,
530 fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_2; 528 fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_2;
531 fdi_iir_check_bits = FDI_RX_SYMBOL_LOCK; 529 fdi_iir_check_bits = FDI_RX_SYMBOL_LOCK;
532 } else { 530 } else {
533 gvt_err("Invalid train pattern %d\n", train_pattern); 531 gvt_vgpu_err("Invalid train pattern %d\n", train_pattern);
534 return -EINVAL; 532 return -EINVAL;
535 } 533 }
536 534
@@ -588,7 +586,7 @@ static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu,
588 else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX) 586 else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX)
589 index = FDI_RX_IMR_TO_PIPE(offset); 587 index = FDI_RX_IMR_TO_PIPE(offset);
590 else { 588 else {
591 gvt_err("Unsupport registers %x\n", offset); 589 gvt_vgpu_err("Unsupport registers %x\n", offset);
592 return -EINVAL; 590 return -EINVAL;
593 } 591 }
594 592
@@ -818,7 +816,7 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
818 u32 data; 816 u32 data;
819 817
820 if (!dpy_is_valid_port(port_index)) { 818 if (!dpy_is_valid_port(port_index)) {
821 gvt_err("GVT(%d): Unsupported DP port access!\n", vgpu->id); 819 gvt_vgpu_err("Unsupported DP port access!\n");
822 return 0; 820 return 0;
823 } 821 }
824 822
@@ -972,6 +970,14 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
972 return 0; 970 return 0;
973} 971}
974 972
973static int mbctl_write(struct intel_vgpu *vgpu, unsigned int offset,
974 void *p_data, unsigned int bytes)
975{
976 *(u32 *)p_data &= (~GEN6_MBCTL_ENABLE_BOOT_FETCH);
977 write_vreg(vgpu, offset, p_data, bytes);
978 return 0;
979}
980
975static int vga_control_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, 981static int vga_control_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
976 void *p_data, unsigned int bytes) 982 void *p_data, unsigned int bytes)
977{ 983{
@@ -1016,8 +1022,7 @@ static void write_virtual_sbi_register(struct intel_vgpu *vgpu,
1016 1022
1017 if (i == num) { 1023 if (i == num) {
1018 if (num == SBI_REG_MAX) { 1024 if (num == SBI_REG_MAX) {
1019 gvt_err("vgpu%d: SBI caching meets maximum limits\n", 1025 gvt_vgpu_err("SBI caching meets maximum limits\n");
1020 vgpu->id);
1021 return; 1026 return;
1022 } 1027 }
1023 display->sbi.number++; 1028 display->sbi.number++;
@@ -1097,7 +1102,7 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
1097 break; 1102 break;
1098 } 1103 }
1099 if (invalid_read) 1104 if (invalid_read)
1100 gvt_err("invalid pvinfo read: [%x:%x] = %x\n", 1105 gvt_vgpu_err("invalid pvinfo read: [%x:%x] = %x\n",
1101 offset, bytes, *(u32 *)p_data); 1106 offset, bytes, *(u32 *)p_data);
1102 vgpu->pv_notified = true; 1107 vgpu->pv_notified = true;
1103 return 0; 1108 return 0;
@@ -1125,7 +1130,7 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
1125 case 1: /* Remove this in guest driver. */ 1130 case 1: /* Remove this in guest driver. */
1126 break; 1131 break;
1127 default: 1132 default:
1128 gvt_err("Invalid PV notification %d\n", notification); 1133 gvt_vgpu_err("Invalid PV notification %d\n", notification);
1129 } 1134 }
1130 return ret; 1135 return ret;
1131} 1136}
@@ -1181,7 +1186,7 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1181 enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE); 1186 enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE);
1182 break; 1187 break;
1183 default: 1188 default:
1184 gvt_err("invalid pvinfo write offset %x bytes %x data %x\n", 1189 gvt_vgpu_err("invalid pvinfo write offset %x bytes %x data %x\n",
1185 offset, bytes, data); 1190 offset, bytes, data);
1186 break; 1191 break;
1187 } 1192 }
@@ -1415,7 +1420,8 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1415 if (execlist->elsp_dwords.index == 3) { 1420 if (execlist->elsp_dwords.index == 3) {
1416 ret = intel_vgpu_submit_execlist(vgpu, ring_id); 1421 ret = intel_vgpu_submit_execlist(vgpu, ring_id);
1417 if(ret) 1422 if(ret)
1418 gvt_err("fail submit workload on ring %d\n", ring_id); 1423 gvt_vgpu_err("fail submit workload on ring %d\n",
1424 ring_id);
1419 } 1425 }
1420 1426
1421 ++execlist->elsp_dwords.index; 1427 ++execlist->elsp_dwords.index;
@@ -2240,7 +2246,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
2240 MMIO_D(0x7180, D_ALL); 2246 MMIO_D(0x7180, D_ALL);
2241 MMIO_D(0x7408, D_ALL); 2247 MMIO_D(0x7408, D_ALL);
2242 MMIO_D(0x7c00, D_ALL); 2248 MMIO_D(0x7c00, D_ALL);
2243 MMIO_D(GEN6_MBCTL, D_ALL); 2249 MMIO_DH(GEN6_MBCTL, D_ALL, NULL, mbctl_write);
2244 MMIO_D(0x911c, D_ALL); 2250 MMIO_D(0x911c, D_ALL);
2245 MMIO_D(0x9120, D_ALL); 2251 MMIO_D(0x9120, D_ALL);
2246 MMIO_DFH(GEN7_UCGCTL4, D_ALL, F_CMD_ACCESS, NULL, NULL); 2252 MMIO_DFH(GEN7_UCGCTL4, D_ALL, F_CMD_ACCESS, NULL, NULL);
@@ -2988,3 +2994,20 @@ int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
2988 write_vreg(vgpu, offset, p_data, bytes); 2994 write_vreg(vgpu, offset, p_data, bytes);
2989 return 0; 2995 return 0;
2990} 2996}
2997
2998/**
2999 * intel_gvt_in_force_nonpriv_whitelist - if a mmio is in whitelist to be
3000 * force-nopriv register
3001 *
3002 * @gvt: a GVT device
3003 * @offset: register offset
3004 *
3005 * Returns:
3006 * True if the register is in force-nonpriv whitelist;
3007 * False if outside;
3008 */
3009bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
3010 unsigned int offset)
3011{
3012 return in_whitelist(offset);
3013}
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 84d801638ede..e466259034e2 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -426,7 +426,7 @@ static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
426 426
427static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) 427static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
428{ 428{
429 struct intel_vgpu *vgpu; 429 struct intel_vgpu *vgpu = NULL;
430 struct intel_vgpu_type *type; 430 struct intel_vgpu_type *type;
431 struct device *pdev; 431 struct device *pdev;
432 void *gvt; 432 void *gvt;
@@ -437,7 +437,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
437 437
438 type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj)); 438 type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
439 if (!type) { 439 if (!type) {
440 gvt_err("failed to find type %s to create\n", 440 gvt_vgpu_err("failed to find type %s to create\n",
441 kobject_name(kobj)); 441 kobject_name(kobj));
442 ret = -EINVAL; 442 ret = -EINVAL;
443 goto out; 443 goto out;
@@ -446,7 +446,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
446 vgpu = intel_gvt_ops->vgpu_create(gvt, type); 446 vgpu = intel_gvt_ops->vgpu_create(gvt, type);
447 if (IS_ERR_OR_NULL(vgpu)) { 447 if (IS_ERR_OR_NULL(vgpu)) {
448 ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu); 448 ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
449 gvt_err("failed to create intel vgpu: %d\n", ret); 449 gvt_vgpu_err("failed to create intel vgpu: %d\n", ret);
450 goto out; 450 goto out;
451 } 451 }
452 452
@@ -526,7 +526,8 @@ static int intel_vgpu_open(struct mdev_device *mdev)
526 ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events, 526 ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events,
527 &vgpu->vdev.iommu_notifier); 527 &vgpu->vdev.iommu_notifier);
528 if (ret != 0) { 528 if (ret != 0) {
529 gvt_err("vfio_register_notifier for iommu failed: %d\n", ret); 529 gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n",
530 ret);
530 goto out; 531 goto out;
531 } 532 }
532 533
@@ -534,7 +535,8 @@ static int intel_vgpu_open(struct mdev_device *mdev)
534 ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events, 535 ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events,
535 &vgpu->vdev.group_notifier); 536 &vgpu->vdev.group_notifier);
536 if (ret != 0) { 537 if (ret != 0) {
537 gvt_err("vfio_register_notifier for group failed: %d\n", ret); 538 gvt_vgpu_err("vfio_register_notifier for group failed: %d\n",
539 ret);
538 goto undo_iommu; 540 goto undo_iommu;
539 } 541 }
540 542
@@ -542,6 +544,8 @@ static int intel_vgpu_open(struct mdev_device *mdev)
542 if (ret) 544 if (ret)
543 goto undo_group; 545 goto undo_group;
544 546
547 intel_gvt_ops->vgpu_activate(vgpu);
548
545 atomic_set(&vgpu->vdev.released, 0); 549 atomic_set(&vgpu->vdev.released, 0);
546 return ret; 550 return ret;
547 551
@@ -567,6 +571,8 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
567 if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1)) 571 if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
568 return; 572 return;
569 573
574 intel_gvt_ops->vgpu_deactivate(vgpu);
575
570 ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY, 576 ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
571 &vgpu->vdev.iommu_notifier); 577 &vgpu->vdev.iommu_notifier);
572 WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret); 578 WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret);
@@ -635,7 +641,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
635 641
636 642
637 if (index >= VFIO_PCI_NUM_REGIONS) { 643 if (index >= VFIO_PCI_NUM_REGIONS) {
638 gvt_err("invalid index: %u\n", index); 644 gvt_vgpu_err("invalid index: %u\n", index);
639 return -EINVAL; 645 return -EINVAL;
640 } 646 }
641 647
@@ -669,7 +675,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
669 case VFIO_PCI_VGA_REGION_INDEX: 675 case VFIO_PCI_VGA_REGION_INDEX:
670 case VFIO_PCI_ROM_REGION_INDEX: 676 case VFIO_PCI_ROM_REGION_INDEX:
671 default: 677 default:
672 gvt_err("unsupported region: %u\n", index); 678 gvt_vgpu_err("unsupported region: %u\n", index);
673 } 679 }
674 680
675 return ret == 0 ? count : ret; 681 return ret == 0 ? count : ret;
@@ -861,7 +867,7 @@ static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
861 867
862 trigger = eventfd_ctx_fdget(fd); 868 trigger = eventfd_ctx_fdget(fd);
863 if (IS_ERR(trigger)) { 869 if (IS_ERR(trigger)) {
864 gvt_err("eventfd_ctx_fdget failed\n"); 870 gvt_vgpu_err("eventfd_ctx_fdget failed\n");
865 return PTR_ERR(trigger); 871 return PTR_ERR(trigger);
866 } 872 }
867 vgpu->vdev.msi_trigger = trigger; 873 vgpu->vdev.msi_trigger = trigger;
@@ -1120,7 +1126,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
1120 ret = vfio_set_irqs_validate_and_prepare(&hdr, max, 1126 ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
1121 VFIO_PCI_NUM_IRQS, &data_size); 1127 VFIO_PCI_NUM_IRQS, &data_size);
1122 if (ret) { 1128 if (ret) {
1123 gvt_err("intel:vfio_set_irqs_validate_and_prepare failed\n"); 1129 gvt_vgpu_err("intel:vfio_set_irqs_validate_and_prepare failed\n");
1124 return -EINVAL; 1130 return -EINVAL;
1125 } 1131 }
1126 if (data_size) { 1132 if (data_size) {
@@ -1310,7 +1316,7 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
1310 1316
1311 kvm = vgpu->vdev.kvm; 1317 kvm = vgpu->vdev.kvm;
1312 if (!kvm || kvm->mm != current->mm) { 1318 if (!kvm || kvm->mm != current->mm) {
1313 gvt_err("KVM is required to use Intel vGPU\n"); 1319 gvt_vgpu_err("KVM is required to use Intel vGPU\n");
1314 return -ESRCH; 1320 return -ESRCH;
1315 } 1321 }
1316 1322
@@ -1324,6 +1330,7 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
1324 vgpu->handle = (unsigned long)info; 1330 vgpu->handle = (unsigned long)info;
1325 info->vgpu = vgpu; 1331 info->vgpu = vgpu;
1326 info->kvm = kvm; 1332 info->kvm = kvm;
1333 kvm_get_kvm(info->kvm);
1327 1334
1328 kvmgt_protect_table_init(info); 1335 kvmgt_protect_table_init(info);
1329 gvt_cache_init(vgpu); 1336 gvt_cache_init(vgpu);
@@ -1337,12 +1344,8 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
1337 1344
1338static bool kvmgt_guest_exit(struct kvmgt_guest_info *info) 1345static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
1339{ 1346{
1340 if (!info) {
1341 gvt_err("kvmgt_guest_info invalid\n");
1342 return false;
1343 }
1344
1345 kvm_page_track_unregister_notifier(info->kvm, &info->track_node); 1347 kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
1348 kvm_put_kvm(info->kvm);
1346 kvmgt_protect_table_destroy(info); 1349 kvmgt_protect_table_destroy(info);
1347 gvt_cache_destroy(info->vgpu); 1350 gvt_cache_destroy(info->vgpu);
1348 vfree(info); 1351 vfree(info);
@@ -1383,12 +1386,14 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
1383 unsigned long iova, pfn; 1386 unsigned long iova, pfn;
1384 struct kvmgt_guest_info *info; 1387 struct kvmgt_guest_info *info;
1385 struct device *dev; 1388 struct device *dev;
1389 struct intel_vgpu *vgpu;
1386 int rc; 1390 int rc;
1387 1391
1388 if (!handle_valid(handle)) 1392 if (!handle_valid(handle))
1389 return INTEL_GVT_INVALID_ADDR; 1393 return INTEL_GVT_INVALID_ADDR;
1390 1394
1391 info = (struct kvmgt_guest_info *)handle; 1395 info = (struct kvmgt_guest_info *)handle;
1396 vgpu = info->vgpu;
1392 iova = gvt_cache_find(info->vgpu, gfn); 1397 iova = gvt_cache_find(info->vgpu, gfn);
1393 if (iova != INTEL_GVT_INVALID_ADDR) 1398 if (iova != INTEL_GVT_INVALID_ADDR)
1394 return iova; 1399 return iova;
@@ -1397,13 +1402,14 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
1397 dev = mdev_dev(info->vgpu->vdev.mdev); 1402 dev = mdev_dev(info->vgpu->vdev.mdev);
1398 rc = vfio_pin_pages(dev, &gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn); 1403 rc = vfio_pin_pages(dev, &gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn);
1399 if (rc != 1) { 1404 if (rc != 1) {
1400 gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc); 1405 gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx: %d\n",
1406 gfn, rc);
1401 return INTEL_GVT_INVALID_ADDR; 1407 return INTEL_GVT_INVALID_ADDR;
1402 } 1408 }
1403 /* transfer to host iova for GFX to use DMA */ 1409 /* transfer to host iova for GFX to use DMA */
1404 rc = gvt_dma_map_iova(info->vgpu, pfn, &iova); 1410 rc = gvt_dma_map_iova(info->vgpu, pfn, &iova);
1405 if (rc) { 1411 if (rc) {
1406 gvt_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn); 1412 gvt_vgpu_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn);
1407 vfio_unpin_pages(dev, &gfn, 1); 1413 vfio_unpin_pages(dev, &gfn, 1);
1408 return INTEL_GVT_INVALID_ADDR; 1414 return INTEL_GVT_INVALID_ADDR;
1409 } 1415 }
@@ -1417,7 +1423,7 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
1417{ 1423{
1418 struct kvmgt_guest_info *info; 1424 struct kvmgt_guest_info *info;
1419 struct kvm *kvm; 1425 struct kvm *kvm;
1420 int ret; 1426 int idx, ret;
1421 bool kthread = current->mm == NULL; 1427 bool kthread = current->mm == NULL;
1422 1428
1423 if (!handle_valid(handle)) 1429 if (!handle_valid(handle))
@@ -1429,8 +1435,10 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
1429 if (kthread) 1435 if (kthread)
1430 use_mm(kvm->mm); 1436 use_mm(kvm->mm);
1431 1437
1438 idx = srcu_read_lock(&kvm->srcu);
1432 ret = write ? kvm_write_guest(kvm, gpa, buf, len) : 1439 ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
1433 kvm_read_guest(kvm, gpa, buf, len); 1440 kvm_read_guest(kvm, gpa, buf, len);
1441 srcu_read_unlock(&kvm->srcu, idx);
1434 1442
1435 if (kthread) 1443 if (kthread)
1436 unuse_mm(kvm->mm); 1444 unuse_mm(kvm->mm);
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 60b698cb8365..1ba3bdb09341 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -142,10 +142,10 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
142 ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, 142 ret = intel_gvt_hypervisor_read_gpa(vgpu, pa,
143 p_data, bytes); 143 p_data, bytes);
144 if (ret) { 144 if (ret) {
145 gvt_err("vgpu%d: guest page read error %d, " 145 gvt_vgpu_err("guest page read error %d, "
146 "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n", 146 "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
147 vgpu->id, ret, 147 ret, gp->gfn, pa, *(u32 *)p_data,
148 gp->gfn, pa, *(u32 *)p_data, bytes); 148 bytes);
149 } 149 }
150 mutex_unlock(&gvt->lock); 150 mutex_unlock(&gvt->lock);
151 return ret; 151 return ret;
@@ -200,14 +200,13 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
200 ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); 200 ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
201 201
202 if (!vgpu->mmio.disable_warn_untrack) { 202 if (!vgpu->mmio.disable_warn_untrack) {
203 gvt_err("vgpu%d: read untracked MMIO %x(%dB) val %x\n", 203 gvt_vgpu_err("read untracked MMIO %x(%dB) val %x\n",
204 vgpu->id, offset, bytes, *(u32 *)p_data); 204 offset, bytes, *(u32 *)p_data);
205 205
206 if (offset == 0x206c) { 206 if (offset == 0x206c) {
207 gvt_err("------------------------------------------\n"); 207 gvt_vgpu_err("------------------------------------------\n");
208 gvt_err("vgpu%d: likely triggers a gfx reset\n", 208 gvt_vgpu_err("likely triggers a gfx reset\n");
209 vgpu->id); 209 gvt_vgpu_err("------------------------------------------\n");
210 gvt_err("------------------------------------------\n");
211 vgpu->mmio.disable_warn_untrack = true; 210 vgpu->mmio.disable_warn_untrack = true;
212 } 211 }
213 } 212 }
@@ -220,8 +219,8 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
220 mutex_unlock(&gvt->lock); 219 mutex_unlock(&gvt->lock);
221 return 0; 220 return 0;
222err: 221err:
223 gvt_err("vgpu%d: fail to emulate MMIO read %08x len %d\n", 222 gvt_vgpu_err("fail to emulate MMIO read %08x len %d\n",
224 vgpu->id, offset, bytes); 223 offset, bytes);
225 mutex_unlock(&gvt->lock); 224 mutex_unlock(&gvt->lock);
226 return ret; 225 return ret;
227} 226}
@@ -259,10 +258,11 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
259 if (gp) { 258 if (gp) {
260 ret = gp->handler(gp, pa, p_data, bytes); 259 ret = gp->handler(gp, pa, p_data, bytes);
261 if (ret) { 260 if (ret) {
262 gvt_err("vgpu%d: guest page write error %d, " 261 gvt_err("guest page write error %d, "
263 "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n", 262 "gfn 0x%lx, pa 0x%llx, "
264 vgpu->id, ret, 263 "var 0x%x, len %d\n",
265 gp->gfn, pa, *(u32 *)p_data, bytes); 264 ret, gp->gfn, pa,
265 *(u32 *)p_data, bytes);
266 } 266 }
267 mutex_unlock(&gvt->lock); 267 mutex_unlock(&gvt->lock);
268 return ret; 268 return ret;
@@ -329,8 +329,8 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
329 329
330 /* all register bits are RO. */ 330 /* all register bits are RO. */
331 if (ro_mask == ~(u64)0) { 331 if (ro_mask == ~(u64)0) {
332 gvt_err("vgpu%d: try to write RO reg %x\n", 332 gvt_vgpu_err("try to write RO reg %x\n",
333 vgpu->id, offset); 333 offset);
334 ret = 0; 334 ret = 0;
335 goto out; 335 goto out;
336 } 336 }
@@ -360,8 +360,8 @@ out:
360 mutex_unlock(&gvt->lock); 360 mutex_unlock(&gvt->lock);
361 return 0; 361 return 0;
362err: 362err:
363 gvt_err("vgpu%d: fail to emulate MMIO write %08x len %d\n", 363 gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset,
364 vgpu->id, offset, bytes); 364 bytes);
365 mutex_unlock(&gvt->lock); 365 mutex_unlock(&gvt->lock);
366 return ret; 366 return ret;
367} 367}
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index 3bc620f56f35..a3a027025cd0 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -107,4 +107,7 @@ int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
107 void *p_data, unsigned int bytes); 107 void *p_data, unsigned int bytes);
108int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, 108int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
109 void *p_data, unsigned int bytes); 109 void *p_data, unsigned int bytes);
110
111bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
112 unsigned int offset);
110#endif 113#endif
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
index 5d1caf9daba9..311799136d7f 100644
--- a/drivers/gpu/drm/i915/gvt/opregion.c
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -67,14 +67,15 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
67 mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va 67 mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va
68 + i * PAGE_SIZE); 68 + i * PAGE_SIZE);
69 if (mfn == INTEL_GVT_INVALID_ADDR) { 69 if (mfn == INTEL_GVT_INVALID_ADDR) {
70 gvt_err("fail to get MFN from VA\n"); 70 gvt_vgpu_err("fail to get MFN from VA\n");
71 return -EINVAL; 71 return -EINVAL;
72 } 72 }
73 ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, 73 ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu,
74 vgpu_opregion(vgpu)->gfn[i], 74 vgpu_opregion(vgpu)->gfn[i],
75 mfn, 1, map); 75 mfn, 1, map);
76 if (ret) { 76 if (ret) {
77 gvt_err("fail to map GFN to MFN, errno: %d\n", ret); 77 gvt_vgpu_err("fail to map GFN to MFN, errno: %d\n",
78 ret);
78 return ret; 79 return ret;
79 } 80 }
80 } 81 }
@@ -287,7 +288,7 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
287 parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM; 288 parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM;
288 289
289 if (!(swsci & SWSCI_SCI_SELECT)) { 290 if (!(swsci & SWSCI_SCI_SELECT)) {
290 gvt_err("vgpu%d: requesting SMI service\n", vgpu->id); 291 gvt_vgpu_err("requesting SMI service\n");
291 return 0; 292 return 0;
292 } 293 }
293 /* ignore non 0->1 trasitions */ 294 /* ignore non 0->1 trasitions */
@@ -300,9 +301,8 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
300 func = GVT_OPREGION_FUNC(*scic); 301 func = GVT_OPREGION_FUNC(*scic);
301 subfunc = GVT_OPREGION_SUBFUNC(*scic); 302 subfunc = GVT_OPREGION_SUBFUNC(*scic);
302 if (!querying_capabilities(*scic)) { 303 if (!querying_capabilities(*scic)) {
303 gvt_err("vgpu%d: requesting runtime service: func \"%s\"," 304 gvt_vgpu_err("requesting runtime service: func \"%s\","
304 " subfunc \"%s\"\n", 305 " subfunc \"%s\"\n",
305 vgpu->id,
306 opregion_func_name(func), 306 opregion_func_name(func),
307 opregion_subfunc_name(subfunc)); 307 opregion_subfunc_name(subfunc));
308 /* 308 /*
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c
index 73f052a4f424..0beb83563b08 100644
--- a/drivers/gpu/drm/i915/gvt/render.c
+++ b/drivers/gpu/drm/i915/gvt/render.c
@@ -167,7 +167,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
167 I915_WRITE_FW(reg, 0x1); 167 I915_WRITE_FW(reg, 0x1);
168 168
169 if (wait_for_atomic((I915_READ_FW(reg) == 0), 50)) 169 if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
170 gvt_err("timeout in invalidate ring (%d) tlb\n", ring_id); 170 gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id);
171 else 171 else
172 vgpu_vreg(vgpu, regs[ring_id]) = 0; 172 vgpu_vreg(vgpu, regs[ring_id]) = 0;
173 173
@@ -207,7 +207,7 @@ static void load_mocs(struct intel_vgpu *vgpu, int ring_id)
207 l3_offset.reg = 0xb020; 207 l3_offset.reg = 0xb020;
208 for (i = 0; i < 32; i++) { 208 for (i = 0; i < 32; i++) {
209 gen9_render_mocs_L3[i] = I915_READ(l3_offset); 209 gen9_render_mocs_L3[i] = I915_READ(l3_offset);
210 I915_WRITE(l3_offset, vgpu_vreg(vgpu, offset)); 210 I915_WRITE(l3_offset, vgpu_vreg(vgpu, l3_offset));
211 POSTING_READ(l3_offset); 211 POSTING_READ(l3_offset);
212 l3_offset.reg += 4; 212 l3_offset.reg += 4;
213 } 213 }
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 06c9584ac5f0..34b9acdf3479 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -101,7 +101,7 @@ struct tbs_sched_data {
101 struct list_head runq_head; 101 struct list_head runq_head;
102}; 102};
103 103
104#define GVT_DEFAULT_TIME_SLICE (1 * HZ / 1000) 104#define GVT_DEFAULT_TIME_SLICE (msecs_to_jiffies(1))
105 105
106static void tbs_sched_func(struct work_struct *work) 106static void tbs_sched_func(struct work_struct *work)
107{ 107{
@@ -223,7 +223,7 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
223 return; 223 return;
224 224
225 list_add_tail(&vgpu_data->list, &sched_data->runq_head); 225 list_add_tail(&vgpu_data->list, &sched_data->runq_head);
226 schedule_delayed_work(&sched_data->work, sched_data->period); 226 schedule_delayed_work(&sched_data->work, 0);
227} 227}
228 228
229static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu) 229static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index d3a56c949025..a44782412f2c 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -84,7 +84,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
84 (u32)((workload->ctx_desc.lrca + i) << 84 (u32)((workload->ctx_desc.lrca + i) <<
85 GTT_PAGE_SHIFT)); 85 GTT_PAGE_SHIFT));
86 if (context_gpa == INTEL_GVT_INVALID_ADDR) { 86 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
87 gvt_err("Invalid guest context descriptor\n"); 87 gvt_vgpu_err("Invalid guest context descriptor\n");
88 return -EINVAL; 88 return -EINVAL;
89 } 89 }
90 90
@@ -127,19 +127,22 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
127 return 0; 127 return 0;
128} 128}
129 129
130static inline bool is_gvt_request(struct drm_i915_gem_request *req)
131{
132 return i915_gem_context_force_single_submission(req->ctx);
133}
134
130static int shadow_context_status_change(struct notifier_block *nb, 135static int shadow_context_status_change(struct notifier_block *nb,
131 unsigned long action, void *data) 136 unsigned long action, void *data)
132{ 137{
133 struct intel_vgpu *vgpu = container_of(nb, 138 struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data;
134 struct intel_vgpu, shadow_ctx_notifier_block); 139 struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
135 struct drm_i915_gem_request *req = 140 shadow_ctx_notifier_block[req->engine->id]);
136 (struct drm_i915_gem_request *)data; 141 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
137 struct intel_gvt_workload_scheduler *scheduler =
138 &vgpu->gvt->scheduler;
139 struct intel_vgpu_workload *workload = 142 struct intel_vgpu_workload *workload =
140 scheduler->current_workload[req->engine->id]; 143 scheduler->current_workload[req->engine->id];
141 144
142 if (unlikely(!workload)) 145 if (!is_gvt_request(req) || unlikely(!workload))
143 return NOTIFY_OK; 146 return NOTIFY_OK;
144 147
145 switch (action) { 148 switch (action) {
@@ -175,7 +178,9 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
175 int ring_id = workload->ring_id; 178 int ring_id = workload->ring_id;
176 struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx; 179 struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
177 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; 180 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
181 struct intel_engine_cs *engine = dev_priv->engine[ring_id];
178 struct drm_i915_gem_request *rq; 182 struct drm_i915_gem_request *rq;
183 struct intel_vgpu *vgpu = workload->vgpu;
179 int ret; 184 int ret;
180 185
181 gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n", 186 gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
@@ -187,9 +192,24 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
187 192
188 mutex_lock(&dev_priv->drm.struct_mutex); 193 mutex_lock(&dev_priv->drm.struct_mutex);
189 194
195 /* pin shadow context by gvt even the shadow context will be pinned
196 * when i915 alloc request. That is because gvt will update the guest
197 * context from shadow context when workload is completed, and at that
198 * moment, i915 may already unpined the shadow context to make the
199 * shadow_ctx pages invalid. So gvt need to pin itself. After update
200 * the guest context, gvt can unpin the shadow_ctx safely.
201 */
202 ret = engine->context_pin(engine, shadow_ctx);
203 if (ret) {
204 gvt_vgpu_err("fail to pin shadow context\n");
205 workload->status = ret;
206 mutex_unlock(&dev_priv->drm.struct_mutex);
207 return ret;
208 }
209
190 rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx); 210 rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
191 if (IS_ERR(rq)) { 211 if (IS_ERR(rq)) {
192 gvt_err("fail to allocate gem request\n"); 212 gvt_vgpu_err("fail to allocate gem request\n");
193 ret = PTR_ERR(rq); 213 ret = PTR_ERR(rq);
194 goto out; 214 goto out;
195 } 215 }
@@ -202,9 +222,12 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
202 if (ret) 222 if (ret)
203 goto out; 223 goto out;
204 224
205 ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx); 225 if ((workload->ring_id == RCS) &&
206 if (ret) 226 (workload->wa_ctx.indirect_ctx.size != 0)) {
207 goto out; 227 ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
228 if (ret)
229 goto out;
230 }
208 231
209 ret = populate_shadow_context(workload); 232 ret = populate_shadow_context(workload);
210 if (ret) 233 if (ret)
@@ -227,6 +250,9 @@ out:
227 250
228 if (!IS_ERR_OR_NULL(rq)) 251 if (!IS_ERR_OR_NULL(rq))
229 i915_add_request_no_flush(rq); 252 i915_add_request_no_flush(rq);
253 else
254 engine->context_unpin(engine, shadow_ctx);
255
230 mutex_unlock(&dev_priv->drm.struct_mutex); 256 mutex_unlock(&dev_priv->drm.struct_mutex);
231 return ret; 257 return ret;
232} 258}
@@ -322,7 +348,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
322 (u32)((workload->ctx_desc.lrca + i) << 348 (u32)((workload->ctx_desc.lrca + i) <<
323 GTT_PAGE_SHIFT)); 349 GTT_PAGE_SHIFT));
324 if (context_gpa == INTEL_GVT_INVALID_ADDR) { 350 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
325 gvt_err("invalid guest context descriptor\n"); 351 gvt_vgpu_err("invalid guest context descriptor\n");
326 return; 352 return;
327 } 353 }
328 354
@@ -376,6 +402,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
376 * For the workload w/o request, directly complete the workload. 402 * For the workload w/o request, directly complete the workload.
377 */ 403 */
378 if (workload->req) { 404 if (workload->req) {
405 struct drm_i915_private *dev_priv =
406 workload->vgpu->gvt->dev_priv;
407 struct intel_engine_cs *engine =
408 dev_priv->engine[workload->ring_id];
379 wait_event(workload->shadow_ctx_status_wq, 409 wait_event(workload->shadow_ctx_status_wq,
380 !atomic_read(&workload->shadow_ctx_active)); 410 !atomic_read(&workload->shadow_ctx_active));
381 411
@@ -388,6 +418,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
388 INTEL_GVT_EVENT_MAX) 418 INTEL_GVT_EVENT_MAX)
389 intel_vgpu_trigger_virtual_event(vgpu, event); 419 intel_vgpu_trigger_virtual_event(vgpu, event);
390 } 420 }
421 mutex_lock(&dev_priv->drm.struct_mutex);
422 /* unpin shadow ctx as the shadow_ctx update is done */
423 engine->context_unpin(engine, workload->vgpu->shadow_ctx);
424 mutex_unlock(&dev_priv->drm.struct_mutex);
391 } 425 }
392 426
393 gvt_dbg_sched("ring id %d complete workload %p status %d\n", 427 gvt_dbg_sched("ring id %d complete workload %p status %d\n",
@@ -417,6 +451,7 @@ static int workload_thread(void *priv)
417 int ring_id = p->ring_id; 451 int ring_id = p->ring_id;
418 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 452 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
419 struct intel_vgpu_workload *workload = NULL; 453 struct intel_vgpu_workload *workload = NULL;
454 struct intel_vgpu *vgpu = NULL;
420 int ret; 455 int ret;
421 bool need_force_wake = IS_SKYLAKE(gvt->dev_priv); 456 bool need_force_wake = IS_SKYLAKE(gvt->dev_priv);
422 DEFINE_WAIT_FUNC(wait, woken_wake_function); 457 DEFINE_WAIT_FUNC(wait, woken_wake_function);
@@ -459,25 +494,14 @@ static int workload_thread(void *priv)
459 mutex_unlock(&gvt->lock); 494 mutex_unlock(&gvt->lock);
460 495
461 if (ret) { 496 if (ret) {
462 gvt_err("fail to dispatch workload, skip\n"); 497 vgpu = workload->vgpu;
498 gvt_vgpu_err("fail to dispatch workload, skip\n");
463 goto complete; 499 goto complete;
464 } 500 }
465 501
466 gvt_dbg_sched("ring id %d wait workload %p\n", 502 gvt_dbg_sched("ring id %d wait workload %p\n",
467 workload->ring_id, workload); 503 workload->ring_id, workload);
468retry: 504 i915_wait_request(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
469 i915_wait_request(workload->req,
470 0, MAX_SCHEDULE_TIMEOUT);
471 /* I915 has replay mechanism and a request will be replayed
472 * if there is i915 reset. So the seqno will be updated anyway.
473 * If the seqno is not updated yet after waiting, which means
474 * the replay may still be in progress and we can wait again.
475 */
476 if (!i915_gem_request_completed(workload->req)) {
477 gvt_dbg_sched("workload %p not completed, wait again\n",
478 workload);
479 goto retry;
480 }
481 505
482complete: 506complete:
483 gvt_dbg_sched("will complete workload %p, status: %d\n", 507 gvt_dbg_sched("will complete workload %p, status: %d\n",
@@ -513,15 +537,16 @@ void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
513void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt) 537void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
514{ 538{
515 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 539 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
516 int i; 540 struct intel_engine_cs *engine;
541 enum intel_engine_id i;
517 542
518 gvt_dbg_core("clean workload scheduler\n"); 543 gvt_dbg_core("clean workload scheduler\n");
519 544
520 for (i = 0; i < I915_NUM_ENGINES; i++) { 545 for_each_engine(engine, gvt->dev_priv, i) {
521 if (scheduler->thread[i]) { 546 atomic_notifier_chain_unregister(
522 kthread_stop(scheduler->thread[i]); 547 &engine->context_status_notifier,
523 scheduler->thread[i] = NULL; 548 &gvt->shadow_ctx_notifier_block[i]);
524 } 549 kthread_stop(scheduler->thread[i]);
525 } 550 }
526} 551}
527 552
@@ -529,18 +554,15 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
529{ 554{
530 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 555 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
531 struct workload_thread_param *param = NULL; 556 struct workload_thread_param *param = NULL;
557 struct intel_engine_cs *engine;
558 enum intel_engine_id i;
532 int ret; 559 int ret;
533 int i;
534 560
535 gvt_dbg_core("init workload scheduler\n"); 561 gvt_dbg_core("init workload scheduler\n");
536 562
537 init_waitqueue_head(&scheduler->workload_complete_wq); 563 init_waitqueue_head(&scheduler->workload_complete_wq);
538 564
539 for (i = 0; i < I915_NUM_ENGINES; i++) { 565 for_each_engine(engine, gvt->dev_priv, i) {
540 /* check ring mask at init time */
541 if (!HAS_ENGINE(gvt->dev_priv, i))
542 continue;
543
544 init_waitqueue_head(&scheduler->waitq[i]); 566 init_waitqueue_head(&scheduler->waitq[i]);
545 567
546 param = kzalloc(sizeof(*param), GFP_KERNEL); 568 param = kzalloc(sizeof(*param), GFP_KERNEL);
@@ -559,6 +581,11 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
559 ret = PTR_ERR(scheduler->thread[i]); 581 ret = PTR_ERR(scheduler->thread[i]);
560 goto err; 582 goto err;
561 } 583 }
584
585 gvt->shadow_ctx_notifier_block[i].notifier_call =
586 shadow_context_status_change;
587 atomic_notifier_chain_register(&engine->context_status_notifier,
588 &gvt->shadow_ctx_notifier_block[i]);
562 } 589 }
563 return 0; 590 return 0;
564err: 591err:
@@ -570,9 +597,6 @@ err:
570 597
571void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu) 598void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
572{ 599{
573 atomic_notifier_chain_unregister(&vgpu->shadow_ctx->status_notifier,
574 &vgpu->shadow_ctx_notifier_block);
575
576 i915_gem_context_put_unlocked(vgpu->shadow_ctx); 600 i915_gem_context_put_unlocked(vgpu->shadow_ctx);
577} 601}
578 602
@@ -587,10 +611,5 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
587 611
588 vgpu->shadow_ctx->engine[RCS].initialised = true; 612 vgpu->shadow_ctx->engine[RCS].initialised = true;
589 613
590 vgpu->shadow_ctx_notifier_block.notifier_call =
591 shadow_context_status_change;
592
593 atomic_notifier_chain_register(&vgpu->shadow_ctx->status_notifier,
594 &vgpu->shadow_ctx_notifier_block);
595 return 0; 614 return 0;
596} 615}
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 41cfa5ccae84..649ef280cc9a 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -72,7 +72,7 @@ static struct {
72 char *name; 72 char *name;
73} vgpu_types[] = { 73} vgpu_types[] = {
74/* Fixed vGPU type table */ 74/* Fixed vGPU type table */
75 { MB_TO_BYTES(64), MB_TO_BYTES(512), 4, GVT_EDID_1024_768, "8" }, 75 { MB_TO_BYTES(64), MB_TO_BYTES(384), 4, GVT_EDID_1024_768, "8" },
76 { MB_TO_BYTES(128), MB_TO_BYTES(512), 4, GVT_EDID_1920_1200, "4" }, 76 { MB_TO_BYTES(128), MB_TO_BYTES(512), 4, GVT_EDID_1920_1200, "4" },
77 { MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, GVT_EDID_1920_1200, "2" }, 77 { MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, GVT_EDID_1920_1200, "2" },
78 { MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, GVT_EDID_1920_1200, "1" }, 78 { MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, GVT_EDID_1920_1200, "1" },
@@ -179,20 +179,34 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
179} 179}
180 180
181/** 181/**
182 * intel_gvt_destroy_vgpu - destroy a virtual GPU 182 * intel_gvt_active_vgpu - activate a virtual GPU
183 * @vgpu: virtual GPU 183 * @vgpu: virtual GPU
184 * 184 *
185 * This function is called when user wants to destroy a virtual GPU. 185 * This function is called when user wants to activate a virtual GPU.
186 * 186 *
187 */ 187 */
188void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) 188void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
189{
190 mutex_lock(&vgpu->gvt->lock);
191 vgpu->active = true;
192 mutex_unlock(&vgpu->gvt->lock);
193}
194
195/**
196 * intel_gvt_deactive_vgpu - deactivate a virtual GPU
197 * @vgpu: virtual GPU
198 *
199 * This function is called when user wants to deactivate a virtual GPU.
200 * All virtual GPU runtime information will be destroyed.
201 *
202 */
203void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
189{ 204{
190 struct intel_gvt *gvt = vgpu->gvt; 205 struct intel_gvt *gvt = vgpu->gvt;
191 206
192 mutex_lock(&gvt->lock); 207 mutex_lock(&gvt->lock);
193 208
194 vgpu->active = false; 209 vgpu->active = false;
195 idr_remove(&gvt->vgpu_idr, vgpu->id);
196 210
197 if (atomic_read(&vgpu->running_workload_num)) { 211 if (atomic_read(&vgpu->running_workload_num)) {
198 mutex_unlock(&gvt->lock); 212 mutex_unlock(&gvt->lock);
@@ -201,6 +215,26 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
201 } 215 }
202 216
203 intel_vgpu_stop_schedule(vgpu); 217 intel_vgpu_stop_schedule(vgpu);
218
219 mutex_unlock(&gvt->lock);
220}
221
222/**
223 * intel_gvt_destroy_vgpu - destroy a virtual GPU
224 * @vgpu: virtual GPU
225 *
226 * This function is called when user wants to destroy a virtual GPU.
227 *
228 */
229void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
230{
231 struct intel_gvt *gvt = vgpu->gvt;
232
233 mutex_lock(&gvt->lock);
234
235 WARN(vgpu->active, "vGPU is still active!\n");
236
237 idr_remove(&gvt->vgpu_idr, vgpu->id);
204 intel_vgpu_clean_sched_policy(vgpu); 238 intel_vgpu_clean_sched_policy(vgpu);
205 intel_vgpu_clean_gvt_context(vgpu); 239 intel_vgpu_clean_gvt_context(vgpu);
206 intel_vgpu_clean_execlist(vgpu); 240 intel_vgpu_clean_execlist(vgpu);
@@ -277,7 +311,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
277 if (ret) 311 if (ret)
278 goto out_clean_shadow_ctx; 312 goto out_clean_shadow_ctx;
279 313
280 vgpu->active = true;
281 mutex_unlock(&gvt->lock); 314 mutex_unlock(&gvt->lock);
282 315
283 return vgpu; 316 return vgpu;