aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJani Nikula <jani.nikula@intel.com>2017-03-08 05:21:40 -0500
committerJani Nikula <jani.nikula@intel.com>2017-03-08 05:21:54 -0500
commit70647f9163aa4fc7090b0d6795d026ebe3897928 (patch)
treea0559e76aa9049f56b478402bdf157c11dfdf04f
parent77e14ae6d785b436be4961b5f5dff80490e35227 (diff)
parent627c845c0907894a1e5cd2d90ff4fc86c9e4458e (diff)
Merge tag 'gvt-fixes-2017-03-08' of https://github.com/01org/gvt-linux into drm-intel-fixes
gvt-fixes-2017-03-08 - MMIO cmd access flag cleanup - Virtual display fixes from Weinan and Bing - config space reset fix from Changbin - better workload submission error path fix from Chuanxiao - other misc fixes Signed-off-by: Jani Nikula <jani.nikula@intel.com>
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c54
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c14
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c290
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c49
6 files changed, 272 insertions, 149 deletions
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index a77e050b85a3..b7d7721e72fa 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -41,6 +41,54 @@ enum {
41 INTEL_GVT_PCI_BAR_MAX, 41 INTEL_GVT_PCI_BAR_MAX,
42}; 42};
43 43
44/* bitmap for writable bits (RW or RW1C bits, but cannot co-exist in one
45 * byte) byte by byte in standard pci configuration space. (not the full
46 * 256 bytes.)
47 */
48static const u8 pci_cfg_space_rw_bmp[PCI_INTERRUPT_LINE + 4] = {
49 [PCI_COMMAND] = 0xff, 0x07,
50 [PCI_STATUS] = 0x00, 0xf9, /* the only one RW1C byte */
51 [PCI_CACHE_LINE_SIZE] = 0xff,
52 [PCI_BASE_ADDRESS_0 ... PCI_CARDBUS_CIS - 1] = 0xff,
53 [PCI_ROM_ADDRESS] = 0x01, 0xf8, 0xff, 0xff,
54 [PCI_INTERRUPT_LINE] = 0xff,
55};
56
57/**
58 * vgpu_pci_cfg_mem_write - write virtual cfg space memory
59 *
60 * Use this function to write virtual cfg space memory.
61 * For standard cfg space, only RW bits can be changed,
62 * and we emulates the RW1C behavior of PCI_STATUS register.
63 */
64static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off,
65 u8 *src, unsigned int bytes)
66{
67 u8 *cfg_base = vgpu_cfg_space(vgpu);
68 u8 mask, new, old;
69 int i = 0;
70
71 for (; i < bytes && (off + i < sizeof(pci_cfg_space_rw_bmp)); i++) {
72 mask = pci_cfg_space_rw_bmp[off + i];
73 old = cfg_base[off + i];
74 new = src[i] & mask;
75
76 /**
77 * The PCI_STATUS high byte has RW1C bits, here
78 * emulates clear by writing 1 for these bits.
79 * Writing a 0b to RW1C bits has no effect.
80 */
81 if (off + i == PCI_STATUS + 1)
82 new = (~new & old) & mask;
83
84 cfg_base[off + i] = (old & ~mask) | new;
85 }
86
87 /* For other configuration space directly copy as it is. */
88 if (i < bytes)
89 memcpy(cfg_base + off + i, src + i, bytes - i);
90}
91
44/** 92/**
45 * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space read 93 * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space read
46 * 94 *
@@ -123,7 +171,7 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
123 u8 changed = old ^ new; 171 u8 changed = old ^ new;
124 int ret; 172 int ret;
125 173
126 memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes); 174 vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
127 if (!(changed & PCI_COMMAND_MEMORY)) 175 if (!(changed & PCI_COMMAND_MEMORY))
128 return 0; 176 return 0;
129 177
@@ -277,10 +325,10 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
277 if (ret) 325 if (ret)
278 return ret; 326 return ret;
279 327
280 memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes); 328 vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
281 break; 329 break;
282 default: 330 default:
283 memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes); 331 vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
284 break; 332 break;
285 } 333 }
286 return 0; 334 return 0;
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index b9c8e2407682..7ae6e2b241c8 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -668,7 +668,7 @@ static inline void print_opcode(u32 cmd, int ring_id)
668 if (d_info == NULL) 668 if (d_info == NULL)
669 return; 669 return;
670 670
671 gvt_err("opcode=0x%x %s sub_ops:", 671 gvt_dbg_cmd("opcode=0x%x %s sub_ops:",
672 cmd >> (32 - d_info->op_len), d_info->name); 672 cmd >> (32 - d_info->op_len), d_info->name);
673 673
674 for (i = 0; i < d_info->nr_sub_op; i++) 674 for (i = 0; i < d_info->nr_sub_op; i++)
@@ -693,23 +693,23 @@ static void parser_exec_state_dump(struct parser_exec_state *s)
693 int cnt = 0; 693 int cnt = 0;
694 int i; 694 int i;
695 695
696 gvt_err(" vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)" 696 gvt_dbg_cmd(" vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)"
697 " ring_head(%08lx) ring_tail(%08lx)\n", s->vgpu->id, 697 " ring_head(%08lx) ring_tail(%08lx)\n", s->vgpu->id,
698 s->ring_id, s->ring_start, s->ring_start + s->ring_size, 698 s->ring_id, s->ring_start, s->ring_start + s->ring_size,
699 s->ring_head, s->ring_tail); 699 s->ring_head, s->ring_tail);
700 700
701 gvt_err(" %s %s ip_gma(%08lx) ", 701 gvt_dbg_cmd(" %s %s ip_gma(%08lx) ",
702 s->buf_type == RING_BUFFER_INSTRUCTION ? 702 s->buf_type == RING_BUFFER_INSTRUCTION ?
703 "RING_BUFFER" : "BATCH_BUFFER", 703 "RING_BUFFER" : "BATCH_BUFFER",
704 s->buf_addr_type == GTT_BUFFER ? 704 s->buf_addr_type == GTT_BUFFER ?
705 "GTT" : "PPGTT", s->ip_gma); 705 "GTT" : "PPGTT", s->ip_gma);
706 706
707 if (s->ip_va == NULL) { 707 if (s->ip_va == NULL) {
708 gvt_err(" ip_va(NULL)"); 708 gvt_dbg_cmd(" ip_va(NULL)");
709 return; 709 return;
710 } 710 }
711 711
712 gvt_err(" ip_va=%p: %08x %08x %08x %08x\n", 712 gvt_dbg_cmd(" ip_va=%p: %08x %08x %08x %08x\n",
713 s->ip_va, cmd_val(s, 0), cmd_val(s, 1), 713 s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
714 cmd_val(s, 2), cmd_val(s, 3)); 714 cmd_val(s, 2), cmd_val(s, 3));
715 715
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 43e02e038375..5419ae6ec633 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -176,14 +176,20 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
176 vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT | 176 vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
177 SDE_PORTE_HOTPLUG_SPT); 177 SDE_PORTE_HOTPLUG_SPT);
178 178
179 if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) 179 if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
180 vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT; 180 vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
181 vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED;
182 }
181 183
182 if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) 184 if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
183 vgpu_vreg(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT; 185 vgpu_vreg(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT;
186 vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED;
187 }
184 188
185 if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) 189 if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) {
186 vgpu_vreg(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT; 190 vgpu_vreg(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
191 vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
192 }
187 193
188 if (IS_SKYLAKE(dev_priv) && 194 if (IS_SKYLAKE(dev_priv) &&
189 intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) { 195 intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) {
@@ -196,6 +202,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
196 GEN8_PORT_DP_A_HOTPLUG; 202 GEN8_PORT_DP_A_HOTPLUG;
197 else 203 else
198 vgpu_vreg(vgpu, SDEISR) |= SDE_PORTA_HOTPLUG_SPT; 204 vgpu_vreg(vgpu, SDEISR) |= SDE_PORTA_HOTPLUG_SPT;
205
206 vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_A)) |= DDI_INIT_DISPLAY_DETECTED;
199 } 207 }
200} 208}
201 209
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index f89b183488e9..8e43395c748a 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -121,6 +121,7 @@ static int new_mmio_info(struct intel_gvt *gvt,
121 info->size = size; 121 info->size = size;
122 info->length = (i + 4) < end ? 4 : (end - i); 122 info->length = (i + 4) < end ? 4 : (end - i);
123 info->addr_mask = addr_mask; 123 info->addr_mask = addr_mask;
124 info->ro_mask = ro_mask;
124 info->device = device; 125 info->device = device;
125 info->read = read ? read : intel_vgpu_default_mmio_read; 126 info->read = read ? read : intel_vgpu_default_mmio_read;
126 info->write = write ? write : intel_vgpu_default_mmio_write; 127 info->write = write ? write : intel_vgpu_default_mmio_write;
@@ -1304,21 +1305,24 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
1304 u32 *data0 = &vgpu_vreg(vgpu, GEN6_PCODE_DATA); 1305 u32 *data0 = &vgpu_vreg(vgpu, GEN6_PCODE_DATA);
1305 1306
1306 switch (cmd) { 1307 switch (cmd) {
1307 case 0x6: 1308 case GEN9_PCODE_READ_MEM_LATENCY:
1308 /** 1309 if (IS_SKYLAKE(vgpu->gvt->dev_priv)) {
1309 * "Read memory latency" command on gen9. 1310 /**
1310 * Below memory latency values are read 1311 * "Read memory latency" command on gen9.
1311 * from skylake platform. 1312 * Below memory latency values are read
1312 */ 1313 * from skylake platform.
1313 if (!*data0) 1314 */
1314 *data0 = 0x1e1a1100; 1315 if (!*data0)
1315 else 1316 *data0 = 0x1e1a1100;
1316 *data0 = 0x61514b3d; 1317 else
1318 *data0 = 0x61514b3d;
1319 }
1317 break; 1320 break;
1318 case SKL_PCODE_CDCLK_CONTROL: 1321 case SKL_PCODE_CDCLK_CONTROL:
1319 *data0 = SKL_CDCLK_READY_FOR_CHANGE; 1322 if (IS_SKYLAKE(vgpu->gvt->dev_priv))
1323 *data0 = SKL_CDCLK_READY_FOR_CHANGE;
1320 break; 1324 break;
1321 case 0x5: 1325 case GEN6_PCODE_READ_RC6VIDS:
1322 *data0 |= 0x1; 1326 *data0 |= 0x1;
1323 break; 1327 break;
1324 } 1328 }
@@ -1520,6 +1524,9 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
1520#define MMIO_GM(reg, d, r, w) \ 1524#define MMIO_GM(reg, d, r, w) \
1521 MMIO_F(reg, 4, F_GMADR, 0xFFFFF000, 0, d, r, w) 1525 MMIO_F(reg, 4, F_GMADR, 0xFFFFF000, 0, d, r, w)
1522 1526
1527#define MMIO_GM_RDR(reg, d, r, w) \
1528 MMIO_F(reg, 4, F_GMADR | F_CMD_ACCESS, 0xFFFFF000, 0, d, r, w)
1529
1523#define MMIO_RO(reg, d, f, rm, r, w) \ 1530#define MMIO_RO(reg, d, f, rm, r, w) \
1524 MMIO_F(reg, 4, F_RO | f, 0, rm, d, r, w) 1531 MMIO_F(reg, 4, F_RO | f, 0, rm, d, r, w)
1525 1532
@@ -1539,6 +1546,9 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
1539#define MMIO_RING_GM(prefix, d, r, w) \ 1546#define MMIO_RING_GM(prefix, d, r, w) \
1540 MMIO_RING_F(prefix, 4, F_GMADR, 0xFFFF0000, 0, d, r, w) 1547 MMIO_RING_F(prefix, 4, F_GMADR, 0xFFFF0000, 0, d, r, w)
1541 1548
1549#define MMIO_RING_GM_RDR(prefix, d, r, w) \
1550 MMIO_RING_F(prefix, 4, F_GMADR | F_CMD_ACCESS, 0xFFFF0000, 0, d, r, w)
1551
1542#define MMIO_RING_RO(prefix, d, f, rm, r, w) \ 1552#define MMIO_RING_RO(prefix, d, f, rm, r, w) \
1543 MMIO_RING_F(prefix, 4, F_RO | f, 0, rm, d, r, w) 1553 MMIO_RING_F(prefix, 4, F_RO | f, 0, rm, d, r, w)
1544 1554
@@ -1547,73 +1557,79 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
1547 struct drm_i915_private *dev_priv = gvt->dev_priv; 1557 struct drm_i915_private *dev_priv = gvt->dev_priv;
1548 int ret; 1558 int ret;
1549 1559
1550 MMIO_RING_DFH(RING_IMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler); 1560 MMIO_RING_DFH(RING_IMR, D_ALL, F_CMD_ACCESS, NULL,
1561 intel_vgpu_reg_imr_handler);
1551 1562
1552 MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler); 1563 MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler);
1553 MMIO_DFH(SDEIER, D_ALL, 0, NULL, intel_vgpu_reg_ier_handler); 1564 MMIO_DFH(SDEIER, D_ALL, 0, NULL, intel_vgpu_reg_ier_handler);
1554 MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler); 1565 MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler);
1555 MMIO_D(SDEISR, D_ALL); 1566 MMIO_D(SDEISR, D_ALL);
1556 1567
1557 MMIO_RING_D(RING_HWSTAM, D_ALL); 1568 MMIO_RING_DFH(RING_HWSTAM, D_ALL, F_CMD_ACCESS, NULL, NULL);
1558 1569
1559 MMIO_GM(RENDER_HWS_PGA_GEN7, D_ALL, NULL, NULL); 1570 MMIO_GM_RDR(RENDER_HWS_PGA_GEN7, D_ALL, NULL, NULL);
1560 MMIO_GM(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL); 1571 MMIO_GM_RDR(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL);
1561 MMIO_GM(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL); 1572 MMIO_GM_RDR(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL);
1562 MMIO_GM(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL); 1573 MMIO_GM_RDR(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL);
1563 1574
1564#define RING_REG(base) (base + 0x28) 1575#define RING_REG(base) (base + 0x28)
1565 MMIO_RING_D(RING_REG, D_ALL); 1576 MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
1566#undef RING_REG 1577#undef RING_REG
1567 1578
1568#define RING_REG(base) (base + 0x134) 1579#define RING_REG(base) (base + 0x134)
1569 MMIO_RING_D(RING_REG, D_ALL); 1580 MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
1570#undef RING_REG 1581#undef RING_REG
1571 1582
1572 MMIO_GM(0x2148, D_ALL, NULL, NULL); 1583 MMIO_GM_RDR(0x2148, D_ALL, NULL, NULL);
1573 MMIO_GM(CCID, D_ALL, NULL, NULL); 1584 MMIO_GM_RDR(CCID, D_ALL, NULL, NULL);
1574 MMIO_GM(0x12198, D_ALL, NULL, NULL); 1585 MMIO_GM_RDR(0x12198, D_ALL, NULL, NULL);
1575 MMIO_D(GEN7_CXT_SIZE, D_ALL); 1586 MMIO_D(GEN7_CXT_SIZE, D_ALL);
1576 1587
1577 MMIO_RING_D(RING_TAIL, D_ALL); 1588 MMIO_RING_DFH(RING_TAIL, D_ALL, F_CMD_ACCESS, NULL, NULL);
1578 MMIO_RING_D(RING_HEAD, D_ALL); 1589 MMIO_RING_DFH(RING_HEAD, D_ALL, F_CMD_ACCESS, NULL, NULL);
1579 MMIO_RING_D(RING_CTL, D_ALL); 1590 MMIO_RING_DFH(RING_CTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
1580 MMIO_RING_D(RING_ACTHD, D_ALL); 1591 MMIO_RING_DFH(RING_ACTHD, D_ALL, F_CMD_ACCESS, NULL, NULL);
1581 MMIO_RING_GM(RING_START, D_ALL, NULL, NULL); 1592 MMIO_RING_GM_RDR(RING_START, D_ALL, NULL, NULL);
1582 1593
1583 /* RING MODE */ 1594 /* RING MODE */
1584#define RING_REG(base) (base + 0x29c) 1595#define RING_REG(base) (base + 0x29c)
1585 MMIO_RING_DFH(RING_REG, D_ALL, F_MODE_MASK, NULL, ring_mode_mmio_write); 1596 MMIO_RING_DFH(RING_REG, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL,
1597 ring_mode_mmio_write);
1586#undef RING_REG 1598#undef RING_REG
1587 1599
1588 MMIO_RING_DFH(RING_MI_MODE, D_ALL, F_MODE_MASK, NULL, NULL); 1600 MMIO_RING_DFH(RING_MI_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
1589 MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK, NULL, NULL); 1601 NULL, NULL);
1602 MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
1603 NULL, NULL);
1590 MMIO_RING_DFH(RING_TIMESTAMP, D_ALL, F_CMD_ACCESS, 1604 MMIO_RING_DFH(RING_TIMESTAMP, D_ALL, F_CMD_ACCESS,
1591 ring_timestamp_mmio_read, NULL); 1605 ring_timestamp_mmio_read, NULL);
1592 MMIO_RING_DFH(RING_TIMESTAMP_UDW, D_ALL, F_CMD_ACCESS, 1606 MMIO_RING_DFH(RING_TIMESTAMP_UDW, D_ALL, F_CMD_ACCESS,
1593 ring_timestamp_mmio_read, NULL); 1607 ring_timestamp_mmio_read, NULL);
1594 1608
1595 MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK, NULL, NULL); 1609 MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
1596 MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK, NULL, NULL); 1610 MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
1611 NULL, NULL);
1597 MMIO_DFH(CACHE_MODE_1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 1612 MMIO_DFH(CACHE_MODE_1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
1598 MMIO_DFH(CACHE_MODE_0, D_ALL, F_MODE_MASK, NULL, NULL); 1613 MMIO_DFH(CACHE_MODE_0, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
1599 MMIO_DFH(0x2124, D_ALL, F_MODE_MASK, NULL, NULL); 1614 MMIO_DFH(0x2124, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
1600 1615
1601 MMIO_DFH(0x20dc, D_ALL, F_MODE_MASK, NULL, NULL); 1616 MMIO_DFH(0x20dc, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
1602 MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK, NULL, NULL); 1617 MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
1603 MMIO_DFH(0x2088, D_ALL, F_MODE_MASK, NULL, NULL); 1618 MMIO_DFH(0x2088, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
1604 MMIO_DFH(0x20e4, D_ALL, F_MODE_MASK, NULL, NULL); 1619 MMIO_DFH(0x20e4, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
1605 MMIO_DFH(0x2470, D_ALL, F_MODE_MASK, NULL, NULL); 1620 MMIO_DFH(0x2470, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
1606 MMIO_D(GAM_ECOCHK, D_ALL); 1621 MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
1607 MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK, NULL, NULL); 1622 MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
1623 NULL, NULL);
1608 MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 1624 MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
1609 MMIO_D(0x9030, D_ALL); 1625 MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL);
1610 MMIO_D(0x20a0, D_ALL); 1626 MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL);
1611 MMIO_D(0x2420, D_ALL); 1627 MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL);
1612 MMIO_D(0x2430, D_ALL); 1628 MMIO_DFH(0x2430, D_ALL, F_CMD_ACCESS, NULL, NULL);
1613 MMIO_D(0x2434, D_ALL); 1629 MMIO_DFH(0x2434, D_ALL, F_CMD_ACCESS, NULL, NULL);
1614 MMIO_D(0x2438, D_ALL); 1630 MMIO_DFH(0x2438, D_ALL, F_CMD_ACCESS, NULL, NULL);
1615 MMIO_D(0x243c, D_ALL); 1631 MMIO_DFH(0x243c, D_ALL, F_CMD_ACCESS, NULL, NULL);
1616 MMIO_DFH(0x7018, D_ALL, F_MODE_MASK, NULL, NULL); 1632 MMIO_DFH(0x7018, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
1617 MMIO_DFH(HALF_SLICE_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 1633 MMIO_DFH(HALF_SLICE_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
1618 MMIO_DFH(GEN7_HALF_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 1634 MMIO_DFH(GEN7_HALF_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
1619 1635
@@ -2144,8 +2160,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
2144 MMIO_D(FORCEWAKE_ACK, D_ALL); 2160 MMIO_D(FORCEWAKE_ACK, D_ALL);
2145 MMIO_D(GEN6_GT_CORE_STATUS, D_ALL); 2161 MMIO_D(GEN6_GT_CORE_STATUS, D_ALL);
2146 MMIO_D(GEN6_GT_THREAD_STATUS_REG, D_ALL); 2162 MMIO_D(GEN6_GT_THREAD_STATUS_REG, D_ALL);
2147 MMIO_D(GTFIFODBG, D_ALL); 2163 MMIO_DFH(GTFIFODBG, D_ALL, F_CMD_ACCESS, NULL, NULL);
2148 MMIO_D(GTFIFOCTL, D_ALL); 2164 MMIO_DFH(GTFIFOCTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
2149 MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write); 2165 MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write);
2150 MMIO_DH(FORCEWAKE_ACK_HSW, D_HSW | D_BDW, NULL, NULL); 2166 MMIO_DH(FORCEWAKE_ACK_HSW, D_HSW | D_BDW, NULL, NULL);
2151 MMIO_D(ECOBUS, D_ALL); 2167 MMIO_D(ECOBUS, D_ALL);
@@ -2202,7 +2218,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
2202 2218
2203 MMIO_F(0x4f000, 0x90, 0, 0, 0, D_ALL, NULL, NULL); 2219 MMIO_F(0x4f000, 0x90, 0, 0, 0, D_ALL, NULL, NULL);
2204 2220
2205 MMIO_D(GEN6_PCODE_MAILBOX, D_PRE_SKL); 2221 MMIO_D(GEN6_PCODE_MAILBOX, D_PRE_BDW);
2206 MMIO_D(GEN6_PCODE_DATA, D_ALL); 2222 MMIO_D(GEN6_PCODE_DATA, D_ALL);
2207 MMIO_D(0x13812c, D_ALL); 2223 MMIO_D(0x13812c, D_ALL);
2208 MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL); 2224 MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL);
@@ -2281,36 +2297,35 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
2281 MMIO_D(0x1a054, D_ALL); 2297 MMIO_D(0x1a054, D_ALL);
2282 2298
2283 MMIO_D(0x44070, D_ALL); 2299 MMIO_D(0x44070, D_ALL);
2284 2300 MMIO_DFH(0x215c, D_HSW_PLUS, F_CMD_ACCESS, NULL, NULL);
2285 MMIO_D(0x215c, D_HSW_PLUS);
2286 MMIO_DFH(0x2178, D_ALL, F_CMD_ACCESS, NULL, NULL); 2301 MMIO_DFH(0x2178, D_ALL, F_CMD_ACCESS, NULL, NULL);
2287 MMIO_DFH(0x217c, D_ALL, F_CMD_ACCESS, NULL, NULL); 2302 MMIO_DFH(0x217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
2288 MMIO_DFH(0x12178, D_ALL, F_CMD_ACCESS, NULL, NULL); 2303 MMIO_DFH(0x12178, D_ALL, F_CMD_ACCESS, NULL, NULL);
2289 MMIO_DFH(0x1217c, D_ALL, F_CMD_ACCESS, NULL, NULL); 2304 MMIO_DFH(0x1217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
2290 2305
2291 MMIO_F(0x2290, 8, 0, 0, 0, D_HSW_PLUS, NULL, NULL); 2306 MMIO_F(0x2290, 8, F_CMD_ACCESS, 0, 0, D_HSW_PLUS, NULL, NULL);
2292 MMIO_D(GEN7_OACONTROL, D_HSW); 2307 MMIO_DFH(GEN7_OACONTROL, D_HSW, F_CMD_ACCESS, NULL, NULL);
2293 MMIO_D(0x2b00, D_BDW_PLUS); 2308 MMIO_D(0x2b00, D_BDW_PLUS);
2294 MMIO_D(0x2360, D_BDW_PLUS); 2309 MMIO_D(0x2360, D_BDW_PLUS);
2295 MMIO_F(0x5200, 32, 0, 0, 0, D_ALL, NULL, NULL); 2310 MMIO_F(0x5200, 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2296 MMIO_F(0x5240, 32, 0, 0, 0, D_ALL, NULL, NULL); 2311 MMIO_F(0x5240, 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2297 MMIO_F(0x5280, 16, 0, 0, 0, D_ALL, NULL, NULL); 2312 MMIO_F(0x5280, 16, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2298 2313
2299 MMIO_DFH(0x1c17c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2314 MMIO_DFH(0x1c17c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2300 MMIO_DFH(0x1c178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2315 MMIO_DFH(0x1c178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2301 MMIO_D(BCS_SWCTRL, D_ALL); 2316 MMIO_DFH(BCS_SWCTRL, D_ALL, F_CMD_ACCESS, NULL, NULL);
2302 2317
2303 MMIO_F(HS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); 2318 MMIO_F(HS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2304 MMIO_F(DS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); 2319 MMIO_F(DS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2305 MMIO_F(IA_VERTICES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); 2320 MMIO_F(IA_VERTICES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2306 MMIO_F(IA_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); 2321 MMIO_F(IA_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2307 MMIO_F(VS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); 2322 MMIO_F(VS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2308 MMIO_F(GS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); 2323 MMIO_F(GS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2309 MMIO_F(GS_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); 2324 MMIO_F(GS_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2310 MMIO_F(CL_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); 2325 MMIO_F(CL_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2311 MMIO_F(CL_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); 2326 MMIO_F(CL_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2312 MMIO_F(PS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); 2327 MMIO_F(PS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2313 MMIO_F(PS_DEPTH_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); 2328 MMIO_F(PS_DEPTH_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2314 MMIO_DH(0x4260, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler); 2329 MMIO_DH(0x4260, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
2315 MMIO_DH(0x4264, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler); 2330 MMIO_DH(0x4264, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
2316 MMIO_DH(0x4268, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler); 2331 MMIO_DH(0x4268, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
@@ -2318,6 +2333,17 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
2318 MMIO_DH(0x4270, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler); 2333 MMIO_DH(0x4270, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
2319 MMIO_DFH(0x4094, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2334 MMIO_DFH(0x4094, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2320 2335
2336 MMIO_DFH(ARB_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2337 MMIO_RING_GM_RDR(RING_BBADDR, D_ALL, NULL, NULL);
2338 MMIO_DFH(0x2220, D_ALL, F_CMD_ACCESS, NULL, NULL);
2339 MMIO_DFH(0x12220, D_ALL, F_CMD_ACCESS, NULL, NULL);
2340 MMIO_DFH(0x22220, D_ALL, F_CMD_ACCESS, NULL, NULL);
2341 MMIO_RING_DFH(RING_SYNC_1, D_ALL, F_CMD_ACCESS, NULL, NULL);
2342 MMIO_RING_DFH(RING_SYNC_0, D_ALL, F_CMD_ACCESS, NULL, NULL);
2343 MMIO_DFH(0x22178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2344 MMIO_DFH(0x1a178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2345 MMIO_DFH(0x1a17c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2346 MMIO_DFH(0x2217c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2321 return 0; 2347 return 0;
2322} 2348}
2323 2349
@@ -2326,7 +2352,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
2326 struct drm_i915_private *dev_priv = gvt->dev_priv; 2352 struct drm_i915_private *dev_priv = gvt->dev_priv;
2327 int ret; 2353 int ret;
2328 2354
2329 MMIO_DH(RING_IMR(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, 2355 MMIO_DFH(RING_IMR(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS, NULL,
2330 intel_vgpu_reg_imr_handler); 2356 intel_vgpu_reg_imr_handler);
2331 2357
2332 MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); 2358 MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
@@ -2391,24 +2417,31 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
2391 MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL, 2417 MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL,
2392 intel_vgpu_reg_master_irq_handler); 2418 intel_vgpu_reg_master_irq_handler);
2393 2419
2394 MMIO_D(RING_HWSTAM(GEN8_BSD2_RING_BASE), D_BDW_PLUS); 2420 MMIO_DFH(RING_HWSTAM(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
2395 MMIO_D(0x1c134, D_BDW_PLUS); 2421 F_CMD_ACCESS, NULL, NULL);
2396 2422 MMIO_DFH(0x1c134, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2397 MMIO_D(RING_TAIL(GEN8_BSD2_RING_BASE), D_BDW_PLUS); 2423
2398 MMIO_D(RING_HEAD(GEN8_BSD2_RING_BASE), D_BDW_PLUS); 2424 MMIO_DFH(RING_TAIL(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
2399 MMIO_GM(RING_START(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL); 2425 NULL, NULL);
2400 MMIO_D(RING_CTL(GEN8_BSD2_RING_BASE), D_BDW_PLUS); 2426 MMIO_DFH(RING_HEAD(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
2401 MMIO_D(RING_ACTHD(GEN8_BSD2_RING_BASE), D_BDW_PLUS); 2427 F_CMD_ACCESS, NULL, NULL);
2402 MMIO_D(RING_ACTHD_UDW(GEN8_BSD2_RING_BASE), D_BDW_PLUS); 2428 MMIO_GM_RDR(RING_START(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL);
2403 MMIO_DFH(0x1c29c, D_BDW_PLUS, F_MODE_MASK, NULL, ring_mode_mmio_write); 2429 MMIO_DFH(RING_CTL(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
2404 MMIO_DFH(RING_MI_MODE(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK, 2430 NULL, NULL);
2405 NULL, NULL); 2431 MMIO_DFH(RING_ACTHD(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
2406 MMIO_DFH(RING_INSTPM(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK, 2432 F_CMD_ACCESS, NULL, NULL);
2407 NULL, NULL); 2433 MMIO_DFH(RING_ACTHD_UDW(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
2434 F_CMD_ACCESS, NULL, NULL);
2435 MMIO_DFH(0x1c29c, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL,
2436 ring_mode_mmio_write);
2437 MMIO_DFH(RING_MI_MODE(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
2438 F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2439 MMIO_DFH(RING_INSTPM(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
2440 F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2408 MMIO_DFH(RING_TIMESTAMP(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS, 2441 MMIO_DFH(RING_TIMESTAMP(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
2409 ring_timestamp_mmio_read, NULL); 2442 ring_timestamp_mmio_read, NULL);
2410 2443
2411 MMIO_RING_D(RING_ACTHD_UDW, D_BDW_PLUS); 2444 MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2412 2445
2413#define RING_REG(base) (base + 0xd0) 2446#define RING_REG(base) (base + 0xd0)
2414 MMIO_RING_F(RING_REG, 4, F_RO, 0, 2447 MMIO_RING_F(RING_REG, 4, F_RO, 0,
@@ -2425,13 +2458,16 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
2425#undef RING_REG 2458#undef RING_REG
2426 2459
2427#define RING_REG(base) (base + 0x234) 2460#define RING_REG(base) (base + 0x234)
2428 MMIO_RING_F(RING_REG, 8, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL); 2461 MMIO_RING_F(RING_REG, 8, F_RO | F_CMD_ACCESS, 0, ~0, D_BDW_PLUS,
2429 MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO, 0, ~0LL, D_BDW_PLUS, NULL, NULL); 2462 NULL, NULL);
2463 MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO | F_CMD_ACCESS, 0,
2464 ~0LL, D_BDW_PLUS, NULL, NULL);
2430#undef RING_REG 2465#undef RING_REG
2431 2466
2432#define RING_REG(base) (base + 0x244) 2467#define RING_REG(base) (base + 0x244)
2433 MMIO_RING_D(RING_REG, D_BDW_PLUS); 2468 MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2434 MMIO_D(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS); 2469 MMIO_DFH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
2470 NULL, NULL);
2435#undef RING_REG 2471#undef RING_REG
2436 2472
2437#define RING_REG(base) (base + 0x370) 2473#define RING_REG(base) (base + 0x370)
@@ -2453,6 +2489,8 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
2453 MMIO_D(GEN7_MISCCPCTL, D_BDW_PLUS); 2489 MMIO_D(GEN7_MISCCPCTL, D_BDW_PLUS);
2454 MMIO_D(0x1c054, D_BDW_PLUS); 2490 MMIO_D(0x1c054, D_BDW_PLUS);
2455 2491
2492 MMIO_DH(GEN6_PCODE_MAILBOX, D_BDW_PLUS, NULL, mailbox_write);
2493
2456 MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS); 2494 MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS);
2457 MMIO_D(GEN8_PRIVATE_PAT_HI, D_BDW_PLUS); 2495 MMIO_D(GEN8_PRIVATE_PAT_HI, D_BDW_PLUS);
2458 2496
@@ -2463,8 +2501,8 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
2463 MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL); 2501 MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
2464#undef RING_REG 2502#undef RING_REG
2465 2503
2466 MMIO_RING_GM(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL); 2504 MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL);
2467 MMIO_GM(0x1c080, D_BDW_PLUS, NULL, NULL); 2505 MMIO_GM_RDR(RING_HWS_PGA(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL);
2468 2506
2469 MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 2507 MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2470 2508
@@ -2485,15 +2523,17 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
2485 MMIO_D(GEN8_EU_DISABLE2, D_BDW_PLUS); 2523 MMIO_D(GEN8_EU_DISABLE2, D_BDW_PLUS);
2486 2524
2487 MMIO_D(0xfdc, D_BDW_PLUS); 2525 MMIO_D(0xfdc, D_BDW_PLUS);
2488 MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2526 MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
2489 MMIO_D(GEN7_ROW_CHICKEN2, D_BDW_PLUS); 2527 NULL, NULL);
2490 MMIO_D(GEN8_UCGCTL6, D_BDW_PLUS); 2528 MMIO_DFH(GEN7_ROW_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
2529 NULL, NULL);
2530 MMIO_DFH(GEN8_UCGCTL6, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2491 2531
2492 MMIO_D(0xb1f0, D_BDW); 2532 MMIO_DFH(0xb1f0, D_BDW, F_CMD_ACCESS, NULL, NULL);
2493 MMIO_D(0xb1c0, D_BDW); 2533 MMIO_DFH(0xb1c0, D_BDW, F_CMD_ACCESS, NULL, NULL);
2494 MMIO_DFH(GEN8_L3SQCREG4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2534 MMIO_DFH(GEN8_L3SQCREG4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2495 MMIO_D(0xb100, D_BDW); 2535 MMIO_DFH(0xb100, D_BDW, F_CMD_ACCESS, NULL, NULL);
2496 MMIO_D(0xb10c, D_BDW); 2536 MMIO_DFH(0xb10c, D_BDW, F_CMD_ACCESS, NULL, NULL);
2497 MMIO_D(0xb110, D_BDW); 2537 MMIO_D(0xb110, D_BDW);
2498 2538
2499 MMIO_F(0x24d0, 48, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, 2539 MMIO_F(0x24d0, 48, F_CMD_ACCESS, 0, 0, D_BDW_PLUS,
@@ -2503,10 +2543,10 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
2503 MMIO_D(0x44484, D_BDW_PLUS); 2543 MMIO_D(0x44484, D_BDW_PLUS);
2504 MMIO_D(0x4448c, D_BDW_PLUS); 2544 MMIO_D(0x4448c, D_BDW_PLUS);
2505 2545
2506 MMIO_D(0x83a4, D_BDW); 2546 MMIO_DFH(0x83a4, D_BDW, F_CMD_ACCESS, NULL, NULL);
2507 MMIO_D(GEN8_L3_LRA_1_GPGPU, D_BDW_PLUS); 2547 MMIO_D(GEN8_L3_LRA_1_GPGPU, D_BDW_PLUS);
2508 2548
2509 MMIO_D(0x8430, D_BDW); 2549 MMIO_DFH(0x8430, D_BDW, F_CMD_ACCESS, NULL, NULL);
2510 2550
2511 MMIO_D(0x110000, D_BDW_PLUS); 2551 MMIO_D(0x110000, D_BDW_PLUS);
2512 2552
@@ -2518,10 +2558,19 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
2518 MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 2558 MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2519 MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 2559 MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2520 MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 2560 MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2521 MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK, NULL, NULL); 2561 MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2522 2562
2523 MMIO_D(0x2248, D_BDW); 2563 MMIO_DFH(0x2248, D_BDW, F_CMD_ACCESS, NULL, NULL);
2524 2564
2565 MMIO_DFH(0xe220, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2566 MMIO_DFH(0xe230, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2567 MMIO_DFH(0xe240, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2568 MMIO_DFH(0xe260, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2569 MMIO_DFH(0xe270, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2570 MMIO_DFH(0xe280, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2571 MMIO_DFH(0xe2a0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2572 MMIO_DFH(0xe2b0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2573 MMIO_DFH(0xe2c0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2525 return 0; 2574 return 0;
2526} 2575}
2527 2576
@@ -2544,7 +2593,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
2544 MMIO_D(HSW_PWR_WELL_BIOS, D_SKL); 2593 MMIO_D(HSW_PWR_WELL_BIOS, D_SKL);
2545 MMIO_DH(HSW_PWR_WELL_DRIVER, D_SKL, NULL, skl_power_well_ctl_write); 2594 MMIO_DH(HSW_PWR_WELL_DRIVER, D_SKL, NULL, skl_power_well_ctl_write);
2546 2595
2547 MMIO_DH(GEN6_PCODE_MAILBOX, D_SKL, NULL, mailbox_write);
2548 MMIO_D(0xa210, D_SKL_PLUS); 2596 MMIO_D(0xa210, D_SKL_PLUS);
2549 MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS); 2597 MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
2550 MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS); 2598 MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
@@ -2702,16 +2750,16 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
2702 MMIO_F(0xb020, 0x80, F_CMD_ACCESS, 0, 0, D_SKL, NULL, NULL); 2750 MMIO_F(0xb020, 0x80, F_CMD_ACCESS, 0, 0, D_SKL, NULL, NULL);
2703 2751
2704 MMIO_D(0xd08, D_SKL); 2752 MMIO_D(0xd08, D_SKL);
2705 MMIO_D(0x20e0, D_SKL); 2753 MMIO_DFH(0x20e0, D_SKL, F_MODE_MASK, NULL, NULL);
2706 MMIO_D(0x20ec, D_SKL); 2754 MMIO_DFH(0x20ec, D_SKL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2707 2755
2708 /* TRTT */ 2756 /* TRTT */
2709 MMIO_D(0x4de0, D_SKL); 2757 MMIO_DFH(0x4de0, D_SKL, F_CMD_ACCESS, NULL, NULL);
2710 MMIO_D(0x4de4, D_SKL); 2758 MMIO_DFH(0x4de4, D_SKL, F_CMD_ACCESS, NULL, NULL);
2711 MMIO_D(0x4de8, D_SKL); 2759 MMIO_DFH(0x4de8, D_SKL, F_CMD_ACCESS, NULL, NULL);
2712 MMIO_D(0x4dec, D_SKL); 2760 MMIO_DFH(0x4dec, D_SKL, F_CMD_ACCESS, NULL, NULL);
2713 MMIO_D(0x4df0, D_SKL); 2761 MMIO_DFH(0x4df0, D_SKL, F_CMD_ACCESS, NULL, NULL);
2714 MMIO_DH(0x4df4, D_SKL, NULL, gen9_trtte_write); 2762 MMIO_DFH(0x4df4, D_SKL, F_CMD_ACCESS, NULL, gen9_trtte_write);
2715 MMIO_DH(0x4dfc, D_SKL, NULL, gen9_trtt_chicken_write); 2763 MMIO_DH(0x4dfc, D_SKL, NULL, gen9_trtt_chicken_write);
2716 2764
2717 MMIO_D(0x45008, D_SKL); 2765 MMIO_D(0x45008, D_SKL);
@@ -2735,7 +2783,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
2735 MMIO_D(0x65f08, D_SKL); 2783 MMIO_D(0x65f08, D_SKL);
2736 MMIO_D(0x320f0, D_SKL); 2784 MMIO_D(0x320f0, D_SKL);
2737 2785
2738 MMIO_D(_REG_VCS2_EXCC, D_SKL); 2786 MMIO_DFH(_REG_VCS2_EXCC, D_SKL, F_CMD_ACCESS, NULL, NULL);
2739 MMIO_D(0x70034, D_SKL); 2787 MMIO_D(0x70034, D_SKL);
2740 MMIO_D(0x71034, D_SKL); 2788 MMIO_D(0x71034, D_SKL);
2741 MMIO_D(0x72034, D_SKL); 2789 MMIO_D(0x72034, D_SKL);
@@ -2748,7 +2796,9 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
2748 MMIO_D(_PLANE_KEYMSK_1(PIPE_C), D_SKL); 2796 MMIO_D(_PLANE_KEYMSK_1(PIPE_C), D_SKL);
2749 2797
2750 MMIO_D(0x44500, D_SKL); 2798 MMIO_D(0x44500, D_SKL);
2751 MMIO_D(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS); 2799 MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2800 MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL, F_MODE_MASK | F_CMD_ACCESS,
2801 NULL, NULL);
2752 return 0; 2802 return 0;
2753} 2803}
2754 2804
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index f07cb8ba751f..84d801638ede 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -96,10 +96,10 @@ static int gvt_dma_map_iova(struct intel_vgpu *vgpu, kvm_pfn_t pfn,
96 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; 96 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
97 dma_addr_t daddr; 97 dma_addr_t daddr;
98 98
99 page = pfn_to_page(pfn); 99 if (unlikely(!pfn_valid(pfn)))
100 if (is_error_page(page))
101 return -EFAULT; 100 return -EFAULT;
102 101
102 page = pfn_to_page(pfn);
103 daddr = dma_map_page(dev, page, 0, PAGE_SIZE, 103 daddr = dma_map_page(dev, page, 0, PAGE_SIZE,
104 PCI_DMA_BIDIRECTIONAL); 104 PCI_DMA_BIDIRECTIONAL);
105 if (dma_mapping_error(dev, daddr)) 105 if (dma_mapping_error(dev, daddr))
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index e355a82ccabd..d3a56c949025 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -151,6 +151,15 @@ static int shadow_context_status_change(struct notifier_block *nb,
151 case INTEL_CONTEXT_SCHEDULE_OUT: 151 case INTEL_CONTEXT_SCHEDULE_OUT:
152 intel_gvt_restore_render_mmio(workload->vgpu, 152 intel_gvt_restore_render_mmio(workload->vgpu,
153 workload->ring_id); 153 workload->ring_id);
154 /* If the status is -EINPROGRESS means this workload
155 * doesn't meet any issue during dispatching so when
156 * get the SCHEDULE_OUT set the status to be zero for
157 * good. If the status is NOT -EINPROGRESS means there
158 * is something wrong happened during dispatching and
159 * the status should not be set to zero
160 */
161 if (workload->status == -EINPROGRESS)
162 workload->status = 0;
154 atomic_set(&workload->shadow_ctx_active, 0); 163 atomic_set(&workload->shadow_ctx_active, 0);
155 break; 164 break;
156 default: 165 default:
@@ -362,15 +371,23 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
362 workload = scheduler->current_workload[ring_id]; 371 workload = scheduler->current_workload[ring_id];
363 vgpu = workload->vgpu; 372 vgpu = workload->vgpu;
364 373
365 if (!workload->status && !vgpu->resetting) { 374 /* For the workload w/ request, needs to wait for the context
375 * switch to make sure request is completed.
376 * For the workload w/o request, directly complete the workload.
377 */
378 if (workload->req) {
366 wait_event(workload->shadow_ctx_status_wq, 379 wait_event(workload->shadow_ctx_status_wq,
367 !atomic_read(&workload->shadow_ctx_active)); 380 !atomic_read(&workload->shadow_ctx_active));
368 381
369 update_guest_context(workload); 382 i915_gem_request_put(fetch_and_zero(&workload->req));
370 383
371 for_each_set_bit(event, workload->pending_events, 384 if (!workload->status && !vgpu->resetting) {
372 INTEL_GVT_EVENT_MAX) 385 update_guest_context(workload);
373 intel_vgpu_trigger_virtual_event(vgpu, event); 386
387 for_each_set_bit(event, workload->pending_events,
388 INTEL_GVT_EVENT_MAX)
389 intel_vgpu_trigger_virtual_event(vgpu, event);
390 }
374 } 391 }
375 392
376 gvt_dbg_sched("ring id %d complete workload %p status %d\n", 393 gvt_dbg_sched("ring id %d complete workload %p status %d\n",
@@ -400,7 +417,6 @@ static int workload_thread(void *priv)
400 int ring_id = p->ring_id; 417 int ring_id = p->ring_id;
401 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 418 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
402 struct intel_vgpu_workload *workload = NULL; 419 struct intel_vgpu_workload *workload = NULL;
403 long lret;
404 int ret; 420 int ret;
405 bool need_force_wake = IS_SKYLAKE(gvt->dev_priv); 421 bool need_force_wake = IS_SKYLAKE(gvt->dev_priv);
406 DEFINE_WAIT_FUNC(wait, woken_wake_function); 422 DEFINE_WAIT_FUNC(wait, woken_wake_function);
@@ -449,23 +465,24 @@ static int workload_thread(void *priv)
449 465
450 gvt_dbg_sched("ring id %d wait workload %p\n", 466 gvt_dbg_sched("ring id %d wait workload %p\n",
451 workload->ring_id, workload); 467 workload->ring_id, workload);
452 468retry:
453 lret = i915_wait_request(workload->req, 469 i915_wait_request(workload->req,
454 0, MAX_SCHEDULE_TIMEOUT); 470 0, MAX_SCHEDULE_TIMEOUT);
455 if (lret < 0) { 471 /* I915 has replay mechanism and a request will be replayed
456 workload->status = lret; 472 * if there is i915 reset. So the seqno will be updated anyway.
457 gvt_err("fail to wait workload, skip\n"); 473 * If the seqno is not updated yet after waiting, which means
458 } else { 474 * the replay may still be in progress and we can wait again.
459 workload->status = 0; 475 */
476 if (!i915_gem_request_completed(workload->req)) {
477 gvt_dbg_sched("workload %p not completed, wait again\n",
478 workload);
479 goto retry;
460 } 480 }
461 481
462complete: 482complete:
463 gvt_dbg_sched("will complete workload %p, status: %d\n", 483 gvt_dbg_sched("will complete workload %p, status: %d\n",
464 workload, workload->status); 484 workload, workload->status);
465 485
466 if (workload->req)
467 i915_gem_request_put(fetch_and_zero(&workload->req));
468
469 complete_current_workload(gvt, ring_id); 486 complete_current_workload(gvt, ring_id);
470 487
471 if (need_force_wake) 488 if (need_force_wake)