aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c21
-rw-r--r--drivers/gpu/drm/drm_edid.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c57
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c139
-rw-r--r--drivers/gpu/drm/i915/gvt/display.h20
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c40
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h12
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c439
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c12
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c66
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/render.c16
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c52
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c72
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_crtc.c49
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c4
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_out.c4
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_regs.h1
22 files changed, 768 insertions, 264 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 6abb238b25c9..4120b351a8e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2094,8 +2094,11 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
2094 } 2094 }
2095 2095
2096 r = amdgpu_late_init(adev); 2096 r = amdgpu_late_init(adev);
2097 if (r) 2097 if (r) {
2098 if (fbcon)
2099 console_unlock();
2098 return r; 2100 return r;
2101 }
2099 2102
2100 /* pin cursors */ 2103 /* pin cursors */
2101 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 2104 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 75fc376ba735..f7adbace428a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -59,9 +59,10 @@
59 * - 3.7.0 - Add support for VCE clock list packet 59 * - 3.7.0 - Add support for VCE clock list packet
60 * - 3.8.0 - Add support raster config init in the kernel 60 * - 3.8.0 - Add support raster config init in the kernel
61 * - 3.9.0 - Add support for memory query info about VRAM and GTT. 61 * - 3.9.0 - Add support for memory query info about VRAM and GTT.
62 * - 3.10.0 - Add support for new fences ioctl, new gem ioctl flags
62 */ 63 */
63#define KMS_DRIVER_MAJOR 3 64#define KMS_DRIVER_MAJOR 3
64#define KMS_DRIVER_MINOR 9 65#define KMS_DRIVER_MINOR 10
65#define KMS_DRIVER_PATCHLEVEL 0 66#define KMS_DRIVER_PATCHLEVEL 0
66 67
67int amdgpu_vram_limit = 0; 68int amdgpu_vram_limit = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 51d759463384..106cf83c2e6b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -202,6 +202,27 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
202 bool kernel = false; 202 bool kernel = false;
203 int r; 203 int r;
204 204
205 /* reject invalid gem flags */
206 if (args->in.domain_flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
207 AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
208 AMDGPU_GEM_CREATE_CPU_GTT_USWC |
209 AMDGPU_GEM_CREATE_VRAM_CLEARED|
210 AMDGPU_GEM_CREATE_SHADOW |
211 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
212 r = -EINVAL;
213 goto error_unlock;
214 }
215 /* reject invalid gem domains */
216 if (args->in.domains & ~(AMDGPU_GEM_DOMAIN_CPU |
217 AMDGPU_GEM_DOMAIN_GTT |
218 AMDGPU_GEM_DOMAIN_VRAM |
219 AMDGPU_GEM_DOMAIN_GDS |
220 AMDGPU_GEM_DOMAIN_GWS |
221 AMDGPU_GEM_DOMAIN_OA)) {
222 r = -EINVAL;
223 goto error_unlock;
224 }
225
205 /* create a gem object to contain this object in */ 226 /* create a gem object to contain this object in */
206 if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS | 227 if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
207 AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) { 228 AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index c8baab9bee0d..ba58f1b11d1e 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -148,6 +148,9 @@ static const struct edid_quirk {
148 148
149 /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */ 149 /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
150 { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC }, 150 { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
151
152 /* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/
153 { "ETR", 13896, EDID_QUIRK_FORCE_8BPC },
151}; 154};
152 155
153/* 156/*
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index 4a6a2ed65732..b7d7721e72fa 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -41,6 +41,54 @@ enum {
41 INTEL_GVT_PCI_BAR_MAX, 41 INTEL_GVT_PCI_BAR_MAX,
42}; 42};
43 43
44/* bitmap for writable bits (RW or RW1C bits, but cannot co-exist in one
45 * byte) byte by byte in standard pci configuration space. (not the full
46 * 256 bytes.)
47 */
48static const u8 pci_cfg_space_rw_bmp[PCI_INTERRUPT_LINE + 4] = {
49 [PCI_COMMAND] = 0xff, 0x07,
50 [PCI_STATUS] = 0x00, 0xf9, /* the only one RW1C byte */
51 [PCI_CACHE_LINE_SIZE] = 0xff,
52 [PCI_BASE_ADDRESS_0 ... PCI_CARDBUS_CIS - 1] = 0xff,
53 [PCI_ROM_ADDRESS] = 0x01, 0xf8, 0xff, 0xff,
54 [PCI_INTERRUPT_LINE] = 0xff,
55};
56
57/**
58 * vgpu_pci_cfg_mem_write - write virtual cfg space memory
59 *
60 * Use this function to write virtual cfg space memory.
61 * For standard cfg space, only RW bits can be changed,
62 * and we emulates the RW1C behavior of PCI_STATUS register.
63 */
64static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off,
65 u8 *src, unsigned int bytes)
66{
67 u8 *cfg_base = vgpu_cfg_space(vgpu);
68 u8 mask, new, old;
69 int i = 0;
70
71 for (; i < bytes && (off + i < sizeof(pci_cfg_space_rw_bmp)); i++) {
72 mask = pci_cfg_space_rw_bmp[off + i];
73 old = cfg_base[off + i];
74 new = src[i] & mask;
75
76 /**
77 * The PCI_STATUS high byte has RW1C bits, here
78 * emulates clear by writing 1 for these bits.
79 * Writing a 0b to RW1C bits has no effect.
80 */
81 if (off + i == PCI_STATUS + 1)
82 new = (~new & old) & mask;
83
84 cfg_base[off + i] = (old & ~mask) | new;
85 }
86
87 /* For other configuration space directly copy as it is. */
88 if (i < bytes)
89 memcpy(cfg_base + off + i, src + i, bytes - i);
90}
91
44/** 92/**
45 * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space read 93 * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space read
46 * 94 *
@@ -123,7 +171,7 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
123 u8 changed = old ^ new; 171 u8 changed = old ^ new;
124 int ret; 172 int ret;
125 173
126 memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes); 174 vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
127 if (!(changed & PCI_COMMAND_MEMORY)) 175 if (!(changed & PCI_COMMAND_MEMORY))
128 return 0; 176 return 0;
129 177
@@ -237,6 +285,9 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
237{ 285{
238 int ret; 286 int ret;
239 287
288 if (vgpu->failsafe)
289 return 0;
290
240 if (WARN_ON(bytes > 4)) 291 if (WARN_ON(bytes > 4))
241 return -EINVAL; 292 return -EINVAL;
242 293
@@ -274,10 +325,10 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
274 if (ret) 325 if (ret)
275 return ret; 326 return ret;
276 327
277 memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes); 328 vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
278 break; 329 break;
279 default: 330 default:
280 memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes); 331 vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
281 break; 332 break;
282 } 333 }
283 return 0; 334 return 0;
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index b9c8e2407682..7ae6e2b241c8 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -668,7 +668,7 @@ static inline void print_opcode(u32 cmd, int ring_id)
668 if (d_info == NULL) 668 if (d_info == NULL)
669 return; 669 return;
670 670
671 gvt_err("opcode=0x%x %s sub_ops:", 671 gvt_dbg_cmd("opcode=0x%x %s sub_ops:",
672 cmd >> (32 - d_info->op_len), d_info->name); 672 cmd >> (32 - d_info->op_len), d_info->name);
673 673
674 for (i = 0; i < d_info->nr_sub_op; i++) 674 for (i = 0; i < d_info->nr_sub_op; i++)
@@ -693,23 +693,23 @@ static void parser_exec_state_dump(struct parser_exec_state *s)
693 int cnt = 0; 693 int cnt = 0;
694 int i; 694 int i;
695 695
696 gvt_err(" vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)" 696 gvt_dbg_cmd(" vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)"
697 " ring_head(%08lx) ring_tail(%08lx)\n", s->vgpu->id, 697 " ring_head(%08lx) ring_tail(%08lx)\n", s->vgpu->id,
698 s->ring_id, s->ring_start, s->ring_start + s->ring_size, 698 s->ring_id, s->ring_start, s->ring_start + s->ring_size,
699 s->ring_head, s->ring_tail); 699 s->ring_head, s->ring_tail);
700 700
701 gvt_err(" %s %s ip_gma(%08lx) ", 701 gvt_dbg_cmd(" %s %s ip_gma(%08lx) ",
702 s->buf_type == RING_BUFFER_INSTRUCTION ? 702 s->buf_type == RING_BUFFER_INSTRUCTION ?
703 "RING_BUFFER" : "BATCH_BUFFER", 703 "RING_BUFFER" : "BATCH_BUFFER",
704 s->buf_addr_type == GTT_BUFFER ? 704 s->buf_addr_type == GTT_BUFFER ?
705 "GTT" : "PPGTT", s->ip_gma); 705 "GTT" : "PPGTT", s->ip_gma);
706 706
707 if (s->ip_va == NULL) { 707 if (s->ip_va == NULL) {
708 gvt_err(" ip_va(NULL)"); 708 gvt_dbg_cmd(" ip_va(NULL)");
709 return; 709 return;
710 } 710 }
711 711
712 gvt_err(" ip_va=%p: %08x %08x %08x %08x\n", 712 gvt_dbg_cmd(" ip_va=%p: %08x %08x %08x %08x\n",
713 s->ip_va, cmd_val(s, 0), cmd_val(s, 1), 713 s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
714 cmd_val(s, 2), cmd_val(s, 3)); 714 cmd_val(s, 2), cmd_val(s, 3));
715 715
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 6d8fde880c39..5419ae6ec633 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -83,44 +83,80 @@ static int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
83 return 0; 83 return 0;
84} 84}
85 85
86static unsigned char virtual_dp_monitor_edid[GVT_EDID_NUM][EDID_SIZE] = {
87 {
88/* EDID with 1024x768 as its resolution */
89 /*Header*/
90 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
91 /* Vendor & Product Identification */
92 0x22, 0xf0, 0x54, 0x29, 0x00, 0x00, 0x00, 0x00, 0x04, 0x17,
93 /* Version & Revision */
94 0x01, 0x04,
95 /* Basic Display Parameters & Features */
96 0xa5, 0x34, 0x20, 0x78, 0x23,
97 /* Color Characteristics */
98 0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54,
99 /* Established Timings: maximum resolution is 1024x768 */
100 0x21, 0x08, 0x00,
101 /* Standard Timings. All invalid */
102 0x00, 0xc0, 0x00, 0xc0, 0x00, 0x40, 0x00, 0x80, 0x00, 0x00,
103 0x00, 0x40, 0x00, 0x00, 0x00, 0x01,
104 /* 18 Byte Data Blocks 1: invalid */
105 0x00, 0x00, 0x80, 0xa0, 0x70, 0xb0,
106 0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a,
107 /* 18 Byte Data Blocks 2: invalid */
108 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a,
109 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
110 /* 18 Byte Data Blocks 3: invalid */
111 0x00, 0x00, 0x00, 0xfc, 0x00, 0x48,
112 0x50, 0x20, 0x5a, 0x52, 0x32, 0x34, 0x34, 0x30, 0x77, 0x0a, 0x20, 0x20,
113 /* 18 Byte Data Blocks 4: invalid */
114 0x00, 0x00, 0x00, 0xff, 0x00, 0x43, 0x4e, 0x34, 0x33, 0x30, 0x34, 0x30,
115 0x44, 0x58, 0x51, 0x0a, 0x20, 0x20,
116 /* Extension Block Count */
117 0x00,
118 /* Checksum */
119 0xef,
120 },
121 {
86/* EDID with 1920x1200 as its resolution */ 122/* EDID with 1920x1200 as its resolution */
87static unsigned char virtual_dp_monitor_edid[] = { 123 /*Header*/
88 /*Header*/ 124 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
89 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 125 /* Vendor & Product Identification */
90 /* Vendor & Product Identification */ 126 0x22, 0xf0, 0x54, 0x29, 0x00, 0x00, 0x00, 0x00, 0x04, 0x17,
91 0x22, 0xf0, 0x54, 0x29, 0x00, 0x00, 0x00, 0x00, 0x04, 0x17, 127 /* Version & Revision */
92 /* Version & Revision */ 128 0x01, 0x04,
93 0x01, 0x04, 129 /* Basic Display Parameters & Features */
94 /* Basic Display Parameters & Features */ 130 0xa5, 0x34, 0x20, 0x78, 0x23,
95 0xa5, 0x34, 0x20, 0x78, 0x23, 131 /* Color Characteristics */
96 /* Color Characteristics */ 132 0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54,
97 0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54, 133 /* Established Timings: maximum resolution is 1024x768 */
98 /* Established Timings: maximum resolution is 1024x768 */ 134 0x21, 0x08, 0x00,
99 0x21, 0x08, 0x00, 135 /*
100 /* 136 * Standard Timings.
101 * Standard Timings. 137 * below new resolutions can be supported:
102 * below new resolutions can be supported: 138 * 1920x1080, 1280x720, 1280x960, 1280x1024,
103 * 1920x1080, 1280x720, 1280x960, 1280x1024, 139 * 1440x900, 1600x1200, 1680x1050
104 * 1440x900, 1600x1200, 1680x1050 140 */
105 */ 141 0xd1, 0xc0, 0x81, 0xc0, 0x81, 0x40, 0x81, 0x80, 0x95, 0x00,
106 0xd1, 0xc0, 0x81, 0xc0, 0x81, 0x40, 0x81, 0x80, 0x95, 0x00, 142 0xa9, 0x40, 0xb3, 0x00, 0x01, 0x01,
107 0xa9, 0x40, 0xb3, 0x00, 0x01, 0x01, 143 /* 18 Byte Data Blocks 1: max resolution is 1920x1200 */
108 /* 18 Byte Data Blocks 1: max resolution is 1920x1200 */ 144 0x28, 0x3c, 0x80, 0xa0, 0x70, 0xb0,
109 0x28, 0x3c, 0x80, 0xa0, 0x70, 0xb0, 145 0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a,
110 0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a, 146 /* 18 Byte Data Blocks 2: invalid */
111 /* 18 Byte Data Blocks 2: invalid */ 147 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a,
112 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a, 148 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
113 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 149 /* 18 Byte Data Blocks 3: invalid */
114 /* 18 Byte Data Blocks 3: invalid */ 150 0x00, 0x00, 0x00, 0xfc, 0x00, 0x48,
115 0x00, 0x00, 0x00, 0xfc, 0x00, 0x48, 151 0x50, 0x20, 0x5a, 0x52, 0x32, 0x34, 0x34, 0x30, 0x77, 0x0a, 0x20, 0x20,
116 0x50, 0x20, 0x5a, 0x52, 0x32, 0x34, 0x34, 0x30, 0x77, 0x0a, 0x20, 0x20, 152 /* 18 Byte Data Blocks 4: invalid */
117 /* 18 Byte Data Blocks 4: invalid */ 153 0x00, 0x00, 0x00, 0xff, 0x00, 0x43, 0x4e, 0x34, 0x33, 0x30, 0x34, 0x30,
118 0x00, 0x00, 0x00, 0xff, 0x00, 0x43, 0x4e, 0x34, 0x33, 0x30, 0x34, 0x30, 154 0x44, 0x58, 0x51, 0x0a, 0x20, 0x20,
119 0x44, 0x58, 0x51, 0x0a, 0x20, 0x20, 155 /* Extension Block Count */
120 /* Extension Block Count */ 156 0x00,
121 0x00, 157 /* Checksum */
122 /* Checksum */ 158 0x45,
123 0x45, 159 },
124}; 160};
125 161
126#define DPCD_HEADER_SIZE 0xb 162#define DPCD_HEADER_SIZE 0xb
@@ -140,14 +176,20 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
140 vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT | 176 vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
141 SDE_PORTE_HOTPLUG_SPT); 177 SDE_PORTE_HOTPLUG_SPT);
142 178
143 if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) 179 if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
144 vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT; 180 vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
181 vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED;
182 }
145 183
146 if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) 184 if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
147 vgpu_vreg(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT; 185 vgpu_vreg(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT;
186 vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED;
187 }
148 188
149 if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) 189 if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) {
150 vgpu_vreg(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT; 190 vgpu_vreg(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
191 vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
192 }
151 193
152 if (IS_SKYLAKE(dev_priv) && 194 if (IS_SKYLAKE(dev_priv) &&
153 intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) { 195 intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) {
@@ -160,6 +202,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
160 GEN8_PORT_DP_A_HOTPLUG; 202 GEN8_PORT_DP_A_HOTPLUG;
161 else 203 else
162 vgpu_vreg(vgpu, SDEISR) |= SDE_PORTA_HOTPLUG_SPT; 204 vgpu_vreg(vgpu, SDEISR) |= SDE_PORTA_HOTPLUG_SPT;
205
206 vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_A)) |= DDI_INIT_DISPLAY_DETECTED;
163 } 207 }
164} 208}
165 209
@@ -175,10 +219,13 @@ static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
175} 219}
176 220
177static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num, 221static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
178 int type) 222 int type, unsigned int resolution)
179{ 223{
180 struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num); 224 struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
181 225
226 if (WARN_ON(resolution >= GVT_EDID_NUM))
227 return -EINVAL;
228
182 port->edid = kzalloc(sizeof(*(port->edid)), GFP_KERNEL); 229 port->edid = kzalloc(sizeof(*(port->edid)), GFP_KERNEL);
183 if (!port->edid) 230 if (!port->edid)
184 return -ENOMEM; 231 return -ENOMEM;
@@ -189,7 +236,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
189 return -ENOMEM; 236 return -ENOMEM;
190 } 237 }
191 238
192 memcpy(port->edid->edid_block, virtual_dp_monitor_edid, 239 memcpy(port->edid->edid_block, virtual_dp_monitor_edid[resolution],
193 EDID_SIZE); 240 EDID_SIZE);
194 port->edid->data_valid = true; 241 port->edid->data_valid = true;
195 242
@@ -322,16 +369,18 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
322 * Zero on success, negative error code if failed. 369 * Zero on success, negative error code if failed.
323 * 370 *
324 */ 371 */
325int intel_vgpu_init_display(struct intel_vgpu *vgpu) 372int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution)
326{ 373{
327 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 374 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
328 375
329 intel_vgpu_init_i2c_edid(vgpu); 376 intel_vgpu_init_i2c_edid(vgpu);
330 377
331 if (IS_SKYLAKE(dev_priv)) 378 if (IS_SKYLAKE(dev_priv))
332 return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D); 379 return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D,
380 resolution);
333 else 381 else
334 return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B); 382 return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B,
383 resolution);
335} 384}
336 385
337/** 386/**
diff --git a/drivers/gpu/drm/i915/gvt/display.h b/drivers/gpu/drm/i915/gvt/display.h
index 8b234ea961f6..d73de22102e2 100644
--- a/drivers/gpu/drm/i915/gvt/display.h
+++ b/drivers/gpu/drm/i915/gvt/display.h
@@ -154,10 +154,28 @@ struct intel_vgpu_port {
154 int type; 154 int type;
155}; 155};
156 156
157enum intel_vgpu_edid {
158 GVT_EDID_1024_768,
159 GVT_EDID_1920_1200,
160 GVT_EDID_NUM,
161};
162
163static inline char *vgpu_edid_str(enum intel_vgpu_edid id)
164{
165 switch (id) {
166 case GVT_EDID_1024_768:
167 return "1024x768";
168 case GVT_EDID_1920_1200:
169 return "1920x1200";
170 default:
171 return "";
172 }
173}
174
157void intel_gvt_emulate_vblank(struct intel_gvt *gvt); 175void intel_gvt_emulate_vblank(struct intel_gvt *gvt);
158void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt); 176void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt);
159 177
160int intel_vgpu_init_display(struct intel_vgpu *vgpu); 178int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution);
161void intel_vgpu_reset_display(struct intel_vgpu *vgpu); 179void intel_vgpu_reset_display(struct intel_vgpu *vgpu);
162void intel_vgpu_clean_display(struct intel_vgpu *vgpu); 180void intel_vgpu_clean_display(struct intel_vgpu *vgpu);
163 181
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index 1cb29b2d7dc6..933a7c211a1c 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -80,7 +80,7 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
80 int ret; 80 int ret;
81 81
82 size = sizeof(*h) + info->mmio_size + info->cfg_space_size - 1; 82 size = sizeof(*h) + info->mmio_size + info->cfg_space_size - 1;
83 firmware = vmalloc(size); 83 firmware = vzalloc(size);
84 if (!firmware) 84 if (!firmware)
85 return -ENOMEM; 85 return -ENOMEM;
86 86
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 28c92346db0e..6a5ff23ded90 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1825,11 +1825,8 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
1825 gma = g_gtt_index << GTT_PAGE_SHIFT; 1825 gma = g_gtt_index << GTT_PAGE_SHIFT;
1826 1826
1827 /* the VM may configure the whole GM space when ballooning is used */ 1827 /* the VM may configure the whole GM space when ballooning is used */
1828 if (WARN_ONCE(!vgpu_gmadr_is_valid(vgpu, gma), 1828 if (!vgpu_gmadr_is_valid(vgpu, gma))
1829 "vgpu%d: found oob ggtt write, offset %x\n",
1830 vgpu->id, off)) {
1831 return 0; 1829 return 0;
1832 }
1833 1830
1834 ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index); 1831 ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index);
1835 1832
@@ -2015,6 +2012,22 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
2015 return create_scratch_page_tree(vgpu); 2012 return create_scratch_page_tree(vgpu);
2016} 2013}
2017 2014
2015static void intel_vgpu_free_mm(struct intel_vgpu *vgpu, int type)
2016{
2017 struct list_head *pos, *n;
2018 struct intel_vgpu_mm *mm;
2019
2020 list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) {
2021 mm = container_of(pos, struct intel_vgpu_mm, list);
2022 if (mm->type == type) {
2023 vgpu->gvt->gtt.mm_free_page_table(mm);
2024 list_del(&mm->list);
2025 list_del(&mm->lru_list);
2026 kfree(mm);
2027 }
2028 }
2029}
2030
2018/** 2031/**
2019 * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization 2032 * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization
2020 * @vgpu: a vGPU 2033 * @vgpu: a vGPU
@@ -2027,19 +2040,11 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
2027 */ 2040 */
2028void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu) 2041void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu)
2029{ 2042{
2030 struct list_head *pos, *n;
2031 struct intel_vgpu_mm *mm;
2032
2033 ppgtt_free_all_shadow_page(vgpu); 2043 ppgtt_free_all_shadow_page(vgpu);
2034 release_scratch_page_tree(vgpu); 2044 release_scratch_page_tree(vgpu);
2035 2045
2036 list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) { 2046 intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT);
2037 mm = container_of(pos, struct intel_vgpu_mm, list); 2047 intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_GGTT);
2038 vgpu->gvt->gtt.mm_free_page_table(mm);
2039 list_del(&mm->list);
2040 list_del(&mm->lru_list);
2041 kfree(mm);
2042 }
2043} 2048}
2044 2049
2045static void clean_spt_oos(struct intel_gvt *gvt) 2050static void clean_spt_oos(struct intel_gvt *gvt)
@@ -2322,6 +2327,13 @@ void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr)
2322 int i; 2327 int i;
2323 2328
2324 ppgtt_free_all_shadow_page(vgpu); 2329 ppgtt_free_all_shadow_page(vgpu);
2330
2331 /* Shadow pages are only created when there is no page
2332 * table tracking data, so remove page tracking data after
2333 * removing the shadow pages.
2334 */
2335 intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT);
2336
2325 if (!dmlr) 2337 if (!dmlr)
2326 return; 2338 return;
2327 2339
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index e227caf5859e..23791920ced1 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -143,6 +143,8 @@ struct intel_vgpu {
143 int id; 143 int id;
144 unsigned long handle; /* vGPU handle used by hypervisor MPT modules */ 144 unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
145 bool active; 145 bool active;
146 bool pv_notified;
147 bool failsafe;
146 bool resetting; 148 bool resetting;
147 void *sched_data; 149 void *sched_data;
148 150
@@ -203,18 +205,18 @@ struct intel_gvt_firmware {
203}; 205};
204 206
205struct intel_gvt_opregion { 207struct intel_gvt_opregion {
206 void __iomem *opregion_va; 208 void *opregion_va;
207 u32 opregion_pa; 209 u32 opregion_pa;
208}; 210};
209 211
210#define NR_MAX_INTEL_VGPU_TYPES 20 212#define NR_MAX_INTEL_VGPU_TYPES 20
211struct intel_vgpu_type { 213struct intel_vgpu_type {
212 char name[16]; 214 char name[16];
213 unsigned int max_instance;
214 unsigned int avail_instance; 215 unsigned int avail_instance;
215 unsigned int low_gm_size; 216 unsigned int low_gm_size;
216 unsigned int high_gm_size; 217 unsigned int high_gm_size;
217 unsigned int fence; 218 unsigned int fence;
219 enum intel_vgpu_edid resolution;
218}; 220};
219 221
220struct intel_gvt { 222struct intel_gvt {
@@ -317,6 +319,7 @@ struct intel_vgpu_creation_params {
317 __u64 low_gm_sz; /* in MB */ 319 __u64 low_gm_sz; /* in MB */
318 __u64 high_gm_sz; /* in MB */ 320 __u64 high_gm_sz; /* in MB */
319 __u64 fence_sz; 321 __u64 fence_sz;
322 __u64 resolution;
320 __s32 primary; 323 __s32 primary;
321 __u64 vgpu_id; 324 __u64 vgpu_id;
322}; 325};
@@ -449,6 +452,11 @@ struct intel_gvt_ops {
449}; 452};
450 453
451 454
455enum {
456 GVT_FAILSAFE_UNSUPPORTED_GUEST,
457 GVT_FAILSAFE_INSUFFICIENT_RESOURCE,
458};
459
452#include "mpt.h" 460#include "mpt.h"
453 461
454#endif 462#endif
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 1d450627ff65..8e43395c748a 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -121,6 +121,7 @@ static int new_mmio_info(struct intel_gvt *gvt,
121 info->size = size; 121 info->size = size;
122 info->length = (i + 4) < end ? 4 : (end - i); 122 info->length = (i + 4) < end ? 4 : (end - i);
123 info->addr_mask = addr_mask; 123 info->addr_mask = addr_mask;
124 info->ro_mask = ro_mask;
124 info->device = device; 125 info->device = device;
125 info->read = read ? read : intel_vgpu_default_mmio_read; 126 info->read = read ? read : intel_vgpu_default_mmio_read;
126 info->write = write ? write : intel_vgpu_default_mmio_write; 127 info->write = write ? write : intel_vgpu_default_mmio_write;
@@ -150,15 +151,44 @@ static int render_mmio_to_ring_id(struct intel_gvt *gvt, unsigned int reg)
150#define fence_num_to_offset(num) \ 151#define fence_num_to_offset(num) \
151 (num * 8 + i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0))) 152 (num * 8 + i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0)))
152 153
154
155static void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason)
156{
157 switch (reason) {
158 case GVT_FAILSAFE_UNSUPPORTED_GUEST:
159 pr_err("Detected your guest driver doesn't support GVT-g.\n");
160 break;
161 case GVT_FAILSAFE_INSUFFICIENT_RESOURCE:
162 pr_err("Graphics resource is not enough for the guest\n");
163 default:
164 break;
165 }
166 pr_err("Now vgpu %d will enter failsafe mode.\n", vgpu->id);
167 vgpu->failsafe = true;
168}
169
153static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu, 170static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
154 unsigned int fence_num, void *p_data, unsigned int bytes) 171 unsigned int fence_num, void *p_data, unsigned int bytes)
155{ 172{
156 if (fence_num >= vgpu_fence_sz(vgpu)) { 173 if (fence_num >= vgpu_fence_sz(vgpu)) {
157 gvt_err("vgpu%d: found oob fence register access\n", 174
158 vgpu->id); 175 /* When guest access oob fence regs without access
159 gvt_err("vgpu%d: total fence num %d access fence num %d\n", 176 * pv_info first, we treat guest not supporting GVT,
160 vgpu->id, vgpu_fence_sz(vgpu), fence_num); 177 * and we will let vgpu enter failsafe mode.
178 */
179 if (!vgpu->pv_notified)
180 enter_failsafe_mode(vgpu,
181 GVT_FAILSAFE_UNSUPPORTED_GUEST);
182
183 if (!vgpu->mmio.disable_warn_untrack) {
184 gvt_err("vgpu%d: found oob fence register access\n",
185 vgpu->id);
186 gvt_err("vgpu%d: total fence %d, access fence %d\n",
187 vgpu->id, vgpu_fence_sz(vgpu),
188 fence_num);
189 }
161 memset(p_data, 0, bytes); 190 memset(p_data, 0, bytes);
191 return -EINVAL;
162 } 192 }
163 return 0; 193 return 0;
164} 194}
@@ -369,6 +399,74 @@ static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
369 return 0; 399 return 0;
370} 400}
371 401
402/* ascendingly sorted */
403static i915_reg_t force_nonpriv_white_list[] = {
404 GEN9_CS_DEBUG_MODE1, //_MMIO(0x20ec)
405 GEN9_CTX_PREEMPT_REG,//_MMIO(0x2248)
406 GEN8_CS_CHICKEN1,//_MMIO(0x2580)
407 _MMIO(0x2690),
408 _MMIO(0x2694),
409 _MMIO(0x2698),
410 _MMIO(0x4de0),
411 _MMIO(0x4de4),
412 _MMIO(0x4dfc),
413 GEN7_COMMON_SLICE_CHICKEN1,//_MMIO(0x7010)
414 _MMIO(0x7014),
415 HDC_CHICKEN0,//_MMIO(0x7300)
416 GEN8_HDC_CHICKEN1,//_MMIO(0x7304)
417 _MMIO(0x7700),
418 _MMIO(0x7704),
419 _MMIO(0x7708),
420 _MMIO(0x770c),
421 _MMIO(0xb110),
422 GEN8_L3SQCREG4,//_MMIO(0xb118)
423 _MMIO(0xe100),
424 _MMIO(0xe18c),
425 _MMIO(0xe48c),
426 _MMIO(0xe5f4),
427};
428
429/* a simple bsearch */
430static inline bool in_whitelist(unsigned int reg)
431{
432 int left = 0, right = ARRAY_SIZE(force_nonpriv_white_list);
433 i915_reg_t *array = force_nonpriv_white_list;
434
435 while (left < right) {
436 int mid = (left + right)/2;
437
438 if (reg > array[mid].reg)
439 left = mid + 1;
440 else if (reg < array[mid].reg)
441 right = mid;
442 else
443 return true;
444 }
445 return false;
446}
447
448static int force_nonpriv_write(struct intel_vgpu *vgpu,
449 unsigned int offset, void *p_data, unsigned int bytes)
450{
451 u32 reg_nonpriv = *(u32 *)p_data;
452 int ret = -EINVAL;
453
454 if ((bytes != 4) || ((offset & (bytes - 1)) != 0)) {
455 gvt_err("vgpu(%d) Invalid FORCE_NONPRIV offset %x(%dB)\n",
456 vgpu->id, offset, bytes);
457 return ret;
458 }
459
460 if (in_whitelist(reg_nonpriv)) {
461 ret = intel_vgpu_default_mmio_write(vgpu, offset, p_data,
462 bytes);
463 } else {
464 gvt_err("vgpu(%d) Invalid FORCE_NONPRIV write %x\n",
465 vgpu->id, reg_nonpriv);
466 }
467 return ret;
468}
469
372static int ddi_buf_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, 470static int ddi_buf_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
373 void *p_data, unsigned int bytes) 471 void *p_data, unsigned int bytes)
374{ 472{
@@ -1001,6 +1099,7 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
1001 if (invalid_read) 1099 if (invalid_read)
1002 gvt_err("invalid pvinfo read: [%x:%x] = %x\n", 1100 gvt_err("invalid pvinfo read: [%x:%x] = %x\n",
1003 offset, bytes, *(u32 *)p_data); 1101 offset, bytes, *(u32 *)p_data);
1102 vgpu->pv_notified = true;
1004 return 0; 1103 return 0;
1005} 1104}
1006 1105
@@ -1039,7 +1138,7 @@ static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready)
1039 char vmid_str[20]; 1138 char vmid_str[20];
1040 char display_ready_str[20]; 1139 char display_ready_str[20];
1041 1140
1042 snprintf(display_ready_str, 20, "GVT_DISPLAY_READY=%d\n", ready); 1141 snprintf(display_ready_str, 20, "GVT_DISPLAY_READY=%d", ready);
1043 env[0] = display_ready_str; 1142 env[0] = display_ready_str;
1044 1143
1045 snprintf(vmid_str, 20, "VMID=%d", vgpu->id); 1144 snprintf(vmid_str, 20, "VMID=%d", vgpu->id);
@@ -1078,6 +1177,9 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1078 case _vgtif_reg(execlist_context_descriptor_lo): 1177 case _vgtif_reg(execlist_context_descriptor_lo):
1079 case _vgtif_reg(execlist_context_descriptor_hi): 1178 case _vgtif_reg(execlist_context_descriptor_hi):
1080 break; 1179 break;
1180 case _vgtif_reg(rsv5[0])..._vgtif_reg(rsv5[3]):
1181 enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE);
1182 break;
1081 default: 1183 default:
1082 gvt_err("invalid pvinfo write offset %x bytes %x data %x\n", 1184 gvt_err("invalid pvinfo write offset %x bytes %x data %x\n",
1083 offset, bytes, data); 1185 offset, bytes, data);
@@ -1203,26 +1305,37 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
1203 u32 *data0 = &vgpu_vreg(vgpu, GEN6_PCODE_DATA); 1305 u32 *data0 = &vgpu_vreg(vgpu, GEN6_PCODE_DATA);
1204 1306
1205 switch (cmd) { 1307 switch (cmd) {
1206 case 0x6: 1308 case GEN9_PCODE_READ_MEM_LATENCY:
1207 /** 1309 if (IS_SKYLAKE(vgpu->gvt->dev_priv)) {
1208 * "Read memory latency" command on gen9. 1310 /**
1209 * Below memory latency values are read 1311 * "Read memory latency" command on gen9.
1210 * from skylake platform. 1312 * Below memory latency values are read
1211 */ 1313 * from skylake platform.
1212 if (!*data0) 1314 */
1213 *data0 = 0x1e1a1100; 1315 if (!*data0)
1214 else 1316 *data0 = 0x1e1a1100;
1215 *data0 = 0x61514b3d; 1317 else
1318 *data0 = 0x61514b3d;
1319 }
1320 break;
1321 case SKL_PCODE_CDCLK_CONTROL:
1322 if (IS_SKYLAKE(vgpu->gvt->dev_priv))
1323 *data0 = SKL_CDCLK_READY_FOR_CHANGE;
1216 break; 1324 break;
1217 case 0x5: 1325 case GEN6_PCODE_READ_RC6VIDS:
1218 *data0 |= 0x1; 1326 *data0 |= 0x1;
1219 break; 1327 break;
1220 } 1328 }
1221 1329
1222 gvt_dbg_core("VM(%d) write %x to mailbox, return data0 %x\n", 1330 gvt_dbg_core("VM(%d) write %x to mailbox, return data0 %x\n",
1223 vgpu->id, value, *data0); 1331 vgpu->id, value, *data0);
1224 1332 /**
1225 value &= ~(1 << 31); 1333 * PCODE_READY clear means ready for pcode read/write,
1334 * PCODE_ERROR_MASK clear means no error happened. In GVT-g we
1335 * always emulate as pcode read/write success and ready for access
1336 * anytime, since we don't touch real physical registers here.
1337 */
1338 value &= ~(GEN6_PCODE_READY | GEN6_PCODE_ERROR_MASK);
1226 return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes); 1339 return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes);
1227} 1340}
1228 1341
@@ -1318,6 +1431,17 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1318 bool enable_execlist; 1431 bool enable_execlist;
1319 1432
1320 write_vreg(vgpu, offset, p_data, bytes); 1433 write_vreg(vgpu, offset, p_data, bytes);
1434
1435 /* when PPGTT mode enabled, we will check if guest has called
1436 * pvinfo, if not, we will treat this guest as non-gvtg-aware
1437 * guest, and stop emulating its cfg space, mmio, gtt, etc.
1438 */
1439 if (((data & _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)) ||
1440 (data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)))
1441 && !vgpu->pv_notified) {
1442 enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
1443 return 0;
1444 }
1321 if ((data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)) 1445 if ((data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE))
1322 || (data & _MASKED_BIT_DISABLE(GFX_RUN_LIST_ENABLE))) { 1446 || (data & _MASKED_BIT_DISABLE(GFX_RUN_LIST_ENABLE))) {
1323 enable_execlist = !!(data & GFX_RUN_LIST_ENABLE); 1447 enable_execlist = !!(data & GFX_RUN_LIST_ENABLE);
@@ -1400,6 +1524,9 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
1400#define MMIO_GM(reg, d, r, w) \ 1524#define MMIO_GM(reg, d, r, w) \
1401 MMIO_F(reg, 4, F_GMADR, 0xFFFFF000, 0, d, r, w) 1525 MMIO_F(reg, 4, F_GMADR, 0xFFFFF000, 0, d, r, w)
1402 1526
1527#define MMIO_GM_RDR(reg, d, r, w) \
1528 MMIO_F(reg, 4, F_GMADR | F_CMD_ACCESS, 0xFFFFF000, 0, d, r, w)
1529
1403#define MMIO_RO(reg, d, f, rm, r, w) \ 1530#define MMIO_RO(reg, d, f, rm, r, w) \
1404 MMIO_F(reg, 4, F_RO | f, 0, rm, d, r, w) 1531 MMIO_F(reg, 4, F_RO | f, 0, rm, d, r, w)
1405 1532
@@ -1419,6 +1546,9 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
1419#define MMIO_RING_GM(prefix, d, r, w) \ 1546#define MMIO_RING_GM(prefix, d, r, w) \
1420 MMIO_RING_F(prefix, 4, F_GMADR, 0xFFFF0000, 0, d, r, w) 1547 MMIO_RING_F(prefix, 4, F_GMADR, 0xFFFF0000, 0, d, r, w)
1421 1548
1549#define MMIO_RING_GM_RDR(prefix, d, r, w) \
1550 MMIO_RING_F(prefix, 4, F_GMADR | F_CMD_ACCESS, 0xFFFF0000, 0, d, r, w)
1551
1422#define MMIO_RING_RO(prefix, d, f, rm, r, w) \ 1552#define MMIO_RING_RO(prefix, d, f, rm, r, w) \
1423 MMIO_RING_F(prefix, 4, F_RO | f, 0, rm, d, r, w) 1553 MMIO_RING_F(prefix, 4, F_RO | f, 0, rm, d, r, w)
1424 1554
@@ -1427,73 +1557,81 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
1427 struct drm_i915_private *dev_priv = gvt->dev_priv; 1557 struct drm_i915_private *dev_priv = gvt->dev_priv;
1428 int ret; 1558 int ret;
1429 1559
1430 MMIO_RING_DFH(RING_IMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler); 1560 MMIO_RING_DFH(RING_IMR, D_ALL, F_CMD_ACCESS, NULL,
1561 intel_vgpu_reg_imr_handler);
1431 1562
1432 MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler); 1563 MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler);
1433 MMIO_DFH(SDEIER, D_ALL, 0, NULL, intel_vgpu_reg_ier_handler); 1564 MMIO_DFH(SDEIER, D_ALL, 0, NULL, intel_vgpu_reg_ier_handler);
1434 MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler); 1565 MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler);
1435 MMIO_D(SDEISR, D_ALL); 1566 MMIO_D(SDEISR, D_ALL);
1436 1567
1437 MMIO_RING_D(RING_HWSTAM, D_ALL); 1568 MMIO_RING_DFH(RING_HWSTAM, D_ALL, F_CMD_ACCESS, NULL, NULL);
1438 1569
1439 MMIO_GM(RENDER_HWS_PGA_GEN7, D_ALL, NULL, NULL); 1570 MMIO_GM_RDR(RENDER_HWS_PGA_GEN7, D_ALL, NULL, NULL);
1440 MMIO_GM(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL); 1571 MMIO_GM_RDR(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL);
1441 MMIO_GM(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL); 1572 MMIO_GM_RDR(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL);
1442 MMIO_GM(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL); 1573 MMIO_GM_RDR(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL);
1443 1574
1444#define RING_REG(base) (base + 0x28) 1575#define RING_REG(base) (base + 0x28)
1445 MMIO_RING_D(RING_REG, D_ALL); 1576 MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
1446#undef RING_REG 1577#undef RING_REG
1447 1578
1448#define RING_REG(base) (base + 0x134) 1579#define RING_REG(base) (base + 0x134)
1449 MMIO_RING_D(RING_REG, D_ALL); 1580 MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
1450#undef RING_REG 1581#undef RING_REG
1451 1582
1452 MMIO_GM(0x2148, D_ALL, NULL, NULL); 1583 MMIO_GM_RDR(0x2148, D_ALL, NULL, NULL);
1453 MMIO_GM(CCID, D_ALL, NULL, NULL); 1584 MMIO_GM_RDR(CCID, D_ALL, NULL, NULL);
1454 MMIO_GM(0x12198, D_ALL, NULL, NULL); 1585 MMIO_GM_RDR(0x12198, D_ALL, NULL, NULL);
1455 MMIO_D(GEN7_CXT_SIZE, D_ALL); 1586 MMIO_D(GEN7_CXT_SIZE, D_ALL);
1456 1587
1457 MMIO_RING_D(RING_TAIL, D_ALL); 1588 MMIO_RING_DFH(RING_TAIL, D_ALL, F_CMD_ACCESS, NULL, NULL);
1458 MMIO_RING_D(RING_HEAD, D_ALL); 1589 MMIO_RING_DFH(RING_HEAD, D_ALL, F_CMD_ACCESS, NULL, NULL);
1459 MMIO_RING_D(RING_CTL, D_ALL); 1590 MMIO_RING_DFH(RING_CTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
1460 MMIO_RING_D(RING_ACTHD, D_ALL); 1591 MMIO_RING_DFH(RING_ACTHD, D_ALL, F_CMD_ACCESS, NULL, NULL);
1461 MMIO_RING_GM(RING_START, D_ALL, NULL, NULL); 1592 MMIO_RING_GM_RDR(RING_START, D_ALL, NULL, NULL);
1462 1593
1463 /* RING MODE */ 1594 /* RING MODE */
1464#define RING_REG(base) (base + 0x29c) 1595#define RING_REG(base) (base + 0x29c)
1465 MMIO_RING_DFH(RING_REG, D_ALL, F_MODE_MASK, NULL, ring_mode_mmio_write); 1596 MMIO_RING_DFH(RING_REG, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL,
1597 ring_mode_mmio_write);
1466#undef RING_REG 1598#undef RING_REG
1467 1599
1468 MMIO_RING_DFH(RING_MI_MODE, D_ALL, F_MODE_MASK, NULL, NULL); 1600 MMIO_RING_DFH(RING_MI_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
1469 MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK, NULL, NULL); 1601 NULL, NULL);
1602 MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
1603 NULL, NULL);
1470 MMIO_RING_DFH(RING_TIMESTAMP, D_ALL, F_CMD_ACCESS, 1604 MMIO_RING_DFH(RING_TIMESTAMP, D_ALL, F_CMD_ACCESS,
1471 ring_timestamp_mmio_read, NULL); 1605 ring_timestamp_mmio_read, NULL);
1472 MMIO_RING_DFH(RING_TIMESTAMP_UDW, D_ALL, F_CMD_ACCESS, 1606 MMIO_RING_DFH(RING_TIMESTAMP_UDW, D_ALL, F_CMD_ACCESS,
1473 ring_timestamp_mmio_read, NULL); 1607 ring_timestamp_mmio_read, NULL);
1474 1608
1475 MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK, NULL, NULL); 1609 MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
1476 MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK, NULL, NULL); 1610 MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
1611 NULL, NULL);
1477 MMIO_DFH(CACHE_MODE_1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 1612 MMIO_DFH(CACHE_MODE_1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
1478 1613 MMIO_DFH(CACHE_MODE_0, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
1479 MMIO_DFH(0x20dc, D_ALL, F_MODE_MASK, NULL, NULL); 1614 MMIO_DFH(0x2124, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
1480 MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK, NULL, NULL); 1615
1481 MMIO_DFH(0x2088, D_ALL, F_MODE_MASK, NULL, NULL); 1616 MMIO_DFH(0x20dc, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
1482 MMIO_DFH(0x20e4, D_ALL, F_MODE_MASK, NULL, NULL); 1617 MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
1483 MMIO_DFH(0x2470, D_ALL, F_MODE_MASK, NULL, NULL); 1618 MMIO_DFH(0x2088, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
1484 MMIO_D(GAM_ECOCHK, D_ALL); 1619 MMIO_DFH(0x20e4, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
1485 MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK, NULL, NULL); 1620 MMIO_DFH(0x2470, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
1621 MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
1622 MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
1623 NULL, NULL);
1486 MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 1624 MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
1487 MMIO_D(0x9030, D_ALL); 1625 MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL);
1488 MMIO_D(0x20a0, D_ALL); 1626 MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL);
1489 MMIO_D(0x2420, D_ALL); 1627 MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL);
1490 MMIO_D(0x2430, D_ALL); 1628 MMIO_DFH(0x2430, D_ALL, F_CMD_ACCESS, NULL, NULL);
1491 MMIO_D(0x2434, D_ALL); 1629 MMIO_DFH(0x2434, D_ALL, F_CMD_ACCESS, NULL, NULL);
1492 MMIO_D(0x2438, D_ALL); 1630 MMIO_DFH(0x2438, D_ALL, F_CMD_ACCESS, NULL, NULL);
1493 MMIO_D(0x243c, D_ALL); 1631 MMIO_DFH(0x243c, D_ALL, F_CMD_ACCESS, NULL, NULL);
1494 MMIO_DFH(0x7018, D_ALL, F_MODE_MASK, NULL, NULL); 1632 MMIO_DFH(0x7018, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
1495 MMIO_DFH(HALF_SLICE_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 1633 MMIO_DFH(HALF_SLICE_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
1496 MMIO_DFH(0xe100, D_ALL, F_MODE_MASK, NULL, NULL); 1634 MMIO_DFH(GEN7_HALF_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
1497 1635
1498 /* display */ 1636 /* display */
1499 MMIO_F(0x60220, 0x20, 0, 0, 0, D_ALL, NULL, NULL); 1637 MMIO_F(0x60220, 0x20, 0, 0, 0, D_ALL, NULL, NULL);
@@ -2022,8 +2160,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
2022 MMIO_D(FORCEWAKE_ACK, D_ALL); 2160 MMIO_D(FORCEWAKE_ACK, D_ALL);
2023 MMIO_D(GEN6_GT_CORE_STATUS, D_ALL); 2161 MMIO_D(GEN6_GT_CORE_STATUS, D_ALL);
2024 MMIO_D(GEN6_GT_THREAD_STATUS_REG, D_ALL); 2162 MMIO_D(GEN6_GT_THREAD_STATUS_REG, D_ALL);
2025 MMIO_D(GTFIFODBG, D_ALL); 2163 MMIO_DFH(GTFIFODBG, D_ALL, F_CMD_ACCESS, NULL, NULL);
2026 MMIO_D(GTFIFOCTL, D_ALL); 2164 MMIO_DFH(GTFIFOCTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
2027 MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write); 2165 MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write);
2028 MMIO_DH(FORCEWAKE_ACK_HSW, D_HSW | D_BDW, NULL, NULL); 2166 MMIO_DH(FORCEWAKE_ACK_HSW, D_HSW | D_BDW, NULL, NULL);
2029 MMIO_D(ECOBUS, D_ALL); 2167 MMIO_D(ECOBUS, D_ALL);
@@ -2080,7 +2218,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
2080 2218
2081 MMIO_F(0x4f000, 0x90, 0, 0, 0, D_ALL, NULL, NULL); 2219 MMIO_F(0x4f000, 0x90, 0, 0, 0, D_ALL, NULL, NULL);
2082 2220
2083 MMIO_D(GEN6_PCODE_MAILBOX, D_PRE_SKL); 2221 MMIO_D(GEN6_PCODE_MAILBOX, D_PRE_BDW);
2084 MMIO_D(GEN6_PCODE_DATA, D_ALL); 2222 MMIO_D(GEN6_PCODE_DATA, D_ALL);
2085 MMIO_D(0x13812c, D_ALL); 2223 MMIO_D(0x13812c, D_ALL);
2086 MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL); 2224 MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL);
@@ -2159,36 +2297,35 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
2159 MMIO_D(0x1a054, D_ALL); 2297 MMIO_D(0x1a054, D_ALL);
2160 2298
2161 MMIO_D(0x44070, D_ALL); 2299 MMIO_D(0x44070, D_ALL);
2162 2300 MMIO_DFH(0x215c, D_HSW_PLUS, F_CMD_ACCESS, NULL, NULL);
2163 MMIO_D(0x215c, D_HSW_PLUS);
2164 MMIO_DFH(0x2178, D_ALL, F_CMD_ACCESS, NULL, NULL); 2301 MMIO_DFH(0x2178, D_ALL, F_CMD_ACCESS, NULL, NULL);
2165 MMIO_DFH(0x217c, D_ALL, F_CMD_ACCESS, NULL, NULL); 2302 MMIO_DFH(0x217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
2166 MMIO_DFH(0x12178, D_ALL, F_CMD_ACCESS, NULL, NULL); 2303 MMIO_DFH(0x12178, D_ALL, F_CMD_ACCESS, NULL, NULL);
2167 MMIO_DFH(0x1217c, D_ALL, F_CMD_ACCESS, NULL, NULL); 2304 MMIO_DFH(0x1217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
2168 2305
2169 MMIO_F(0x2290, 8, 0, 0, 0, D_HSW_PLUS, NULL, NULL); 2306 MMIO_F(0x2290, 8, F_CMD_ACCESS, 0, 0, D_HSW_PLUS, NULL, NULL);
2170 MMIO_D(GEN7_OACONTROL, D_HSW); 2307 MMIO_DFH(GEN7_OACONTROL, D_HSW, F_CMD_ACCESS, NULL, NULL);
2171 MMIO_D(0x2b00, D_BDW_PLUS); 2308 MMIO_D(0x2b00, D_BDW_PLUS);
2172 MMIO_D(0x2360, D_BDW_PLUS); 2309 MMIO_D(0x2360, D_BDW_PLUS);
2173 MMIO_F(0x5200, 32, 0, 0, 0, D_ALL, NULL, NULL); 2310 MMIO_F(0x5200, 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2174 MMIO_F(0x5240, 32, 0, 0, 0, D_ALL, NULL, NULL); 2311 MMIO_F(0x5240, 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2175 MMIO_F(0x5280, 16, 0, 0, 0, D_ALL, NULL, NULL); 2312 MMIO_F(0x5280, 16, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2176 2313
2177 MMIO_DFH(0x1c17c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2314 MMIO_DFH(0x1c17c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2178 MMIO_DFH(0x1c178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2315 MMIO_DFH(0x1c178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2179 MMIO_D(BCS_SWCTRL, D_ALL); 2316 MMIO_DFH(BCS_SWCTRL, D_ALL, F_CMD_ACCESS, NULL, NULL);
2180 2317
2181 MMIO_F(HS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); 2318 MMIO_F(HS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2182 MMIO_F(DS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); 2319 MMIO_F(DS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2183 MMIO_F(IA_VERTICES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); 2320 MMIO_F(IA_VERTICES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2184 MMIO_F(IA_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); 2321 MMIO_F(IA_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2185 MMIO_F(VS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); 2322 MMIO_F(VS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2186 MMIO_F(GS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); 2323 MMIO_F(GS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2187 MMIO_F(GS_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); 2324 MMIO_F(GS_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2188 MMIO_F(CL_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); 2325 MMIO_F(CL_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2189 MMIO_F(CL_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); 2326 MMIO_F(CL_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2190 MMIO_F(PS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); 2327 MMIO_F(PS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2191 MMIO_F(PS_DEPTH_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); 2328 MMIO_F(PS_DEPTH_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2192 MMIO_DH(0x4260, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler); 2329 MMIO_DH(0x4260, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
2193 MMIO_DH(0x4264, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler); 2330 MMIO_DH(0x4264, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
2194 MMIO_DH(0x4268, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler); 2331 MMIO_DH(0x4268, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
@@ -2196,6 +2333,17 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
2196 MMIO_DH(0x4270, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler); 2333 MMIO_DH(0x4270, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
2197 MMIO_DFH(0x4094, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2334 MMIO_DFH(0x4094, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2198 2335
2336 MMIO_DFH(ARB_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2337 MMIO_RING_GM_RDR(RING_BBADDR, D_ALL, NULL, NULL);
2338 MMIO_DFH(0x2220, D_ALL, F_CMD_ACCESS, NULL, NULL);
2339 MMIO_DFH(0x12220, D_ALL, F_CMD_ACCESS, NULL, NULL);
2340 MMIO_DFH(0x22220, D_ALL, F_CMD_ACCESS, NULL, NULL);
2341 MMIO_RING_DFH(RING_SYNC_1, D_ALL, F_CMD_ACCESS, NULL, NULL);
2342 MMIO_RING_DFH(RING_SYNC_0, D_ALL, F_CMD_ACCESS, NULL, NULL);
2343 MMIO_DFH(0x22178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2344 MMIO_DFH(0x1a178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2345 MMIO_DFH(0x1a17c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2346 MMIO_DFH(0x2217c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2199 return 0; 2347 return 0;
2200} 2348}
2201 2349
@@ -2204,7 +2352,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
2204 struct drm_i915_private *dev_priv = gvt->dev_priv; 2352 struct drm_i915_private *dev_priv = gvt->dev_priv;
2205 int ret; 2353 int ret;
2206 2354
2207 MMIO_DH(RING_IMR(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, 2355 MMIO_DFH(RING_IMR(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS, NULL,
2208 intel_vgpu_reg_imr_handler); 2356 intel_vgpu_reg_imr_handler);
2209 2357
2210 MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); 2358 MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
@@ -2269,24 +2417,31 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
2269 MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL, 2417 MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL,
2270 intel_vgpu_reg_master_irq_handler); 2418 intel_vgpu_reg_master_irq_handler);
2271 2419
2272 MMIO_D(RING_HWSTAM(GEN8_BSD2_RING_BASE), D_BDW_PLUS); 2420 MMIO_DFH(RING_HWSTAM(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
2273 MMIO_D(0x1c134, D_BDW_PLUS); 2421 F_CMD_ACCESS, NULL, NULL);
2274 2422 MMIO_DFH(0x1c134, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2275 MMIO_D(RING_TAIL(GEN8_BSD2_RING_BASE), D_BDW_PLUS); 2423
2276 MMIO_D(RING_HEAD(GEN8_BSD2_RING_BASE), D_BDW_PLUS); 2424 MMIO_DFH(RING_TAIL(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
2277 MMIO_GM(RING_START(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL); 2425 NULL, NULL);
2278 MMIO_D(RING_CTL(GEN8_BSD2_RING_BASE), D_BDW_PLUS); 2426 MMIO_DFH(RING_HEAD(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
2279 MMIO_D(RING_ACTHD(GEN8_BSD2_RING_BASE), D_BDW_PLUS); 2427 F_CMD_ACCESS, NULL, NULL);
2280 MMIO_D(RING_ACTHD_UDW(GEN8_BSD2_RING_BASE), D_BDW_PLUS); 2428 MMIO_GM_RDR(RING_START(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL);
2281 MMIO_DFH(0x1c29c, D_BDW_PLUS, F_MODE_MASK, NULL, ring_mode_mmio_write); 2429 MMIO_DFH(RING_CTL(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
2282 MMIO_DFH(RING_MI_MODE(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK, 2430 NULL, NULL);
2283 NULL, NULL); 2431 MMIO_DFH(RING_ACTHD(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
2284 MMIO_DFH(RING_INSTPM(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK, 2432 F_CMD_ACCESS, NULL, NULL);
2285 NULL, NULL); 2433 MMIO_DFH(RING_ACTHD_UDW(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
2434 F_CMD_ACCESS, NULL, NULL);
2435 MMIO_DFH(0x1c29c, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL,
2436 ring_mode_mmio_write);
2437 MMIO_DFH(RING_MI_MODE(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
2438 F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2439 MMIO_DFH(RING_INSTPM(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
2440 F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2286 MMIO_DFH(RING_TIMESTAMP(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS, 2441 MMIO_DFH(RING_TIMESTAMP(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
2287 ring_timestamp_mmio_read, NULL); 2442 ring_timestamp_mmio_read, NULL);
2288 2443
2289 MMIO_RING_D(RING_ACTHD_UDW, D_BDW_PLUS); 2444 MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2290 2445
2291#define RING_REG(base) (base + 0xd0) 2446#define RING_REG(base) (base + 0xd0)
2292 MMIO_RING_F(RING_REG, 4, F_RO, 0, 2447 MMIO_RING_F(RING_REG, 4, F_RO, 0,
@@ -2303,13 +2458,16 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
2303#undef RING_REG 2458#undef RING_REG
2304 2459
2305#define RING_REG(base) (base + 0x234) 2460#define RING_REG(base) (base + 0x234)
2306 MMIO_RING_F(RING_REG, 8, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL); 2461 MMIO_RING_F(RING_REG, 8, F_RO | F_CMD_ACCESS, 0, ~0, D_BDW_PLUS,
2307 MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO, 0, ~0LL, D_BDW_PLUS, NULL, NULL); 2462 NULL, NULL);
2463 MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO | F_CMD_ACCESS, 0,
2464 ~0LL, D_BDW_PLUS, NULL, NULL);
2308#undef RING_REG 2465#undef RING_REG
2309 2466
2310#define RING_REG(base) (base + 0x244) 2467#define RING_REG(base) (base + 0x244)
2311 MMIO_RING_D(RING_REG, D_BDW_PLUS); 2468 MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2312 MMIO_D(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS); 2469 MMIO_DFH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
2470 NULL, NULL);
2313#undef RING_REG 2471#undef RING_REG
2314 2472
2315#define RING_REG(base) (base + 0x370) 2473#define RING_REG(base) (base + 0x370)
@@ -2331,6 +2489,8 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
2331 MMIO_D(GEN7_MISCCPCTL, D_BDW_PLUS); 2489 MMIO_D(GEN7_MISCCPCTL, D_BDW_PLUS);
2332 MMIO_D(0x1c054, D_BDW_PLUS); 2490 MMIO_D(0x1c054, D_BDW_PLUS);
2333 2491
2492 MMIO_DH(GEN6_PCODE_MAILBOX, D_BDW_PLUS, NULL, mailbox_write);
2493
2334 MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS); 2494 MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS);
2335 MMIO_D(GEN8_PRIVATE_PAT_HI, D_BDW_PLUS); 2495 MMIO_D(GEN8_PRIVATE_PAT_HI, D_BDW_PLUS);
2336 2496
@@ -2341,14 +2501,14 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
2341 MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL); 2501 MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
2342#undef RING_REG 2502#undef RING_REG
2343 2503
2344 MMIO_RING_GM(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL); 2504 MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL);
2345 MMIO_GM(0x1c080, D_BDW_PLUS, NULL, NULL); 2505 MMIO_GM_RDR(RING_HWS_PGA(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL);
2346 2506
2347 MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 2507 MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2348 2508
2349 MMIO_D(CHICKEN_PIPESL_1(PIPE_A), D_BDW); 2509 MMIO_D(CHICKEN_PIPESL_1(PIPE_A), D_BDW_PLUS);
2350 MMIO_D(CHICKEN_PIPESL_1(PIPE_B), D_BDW); 2510 MMIO_D(CHICKEN_PIPESL_1(PIPE_B), D_BDW_PLUS);
2351 MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW); 2511 MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW_PLUS);
2352 2512
2353 MMIO_D(WM_MISC, D_BDW); 2513 MMIO_D(WM_MISC, D_BDW);
2354 MMIO_D(BDW_EDP_PSR_BASE, D_BDW); 2514 MMIO_D(BDW_EDP_PSR_BASE, D_BDW);
@@ -2362,27 +2522,31 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
2362 MMIO_D(GEN8_EU_DISABLE1, D_BDW_PLUS); 2522 MMIO_D(GEN8_EU_DISABLE1, D_BDW_PLUS);
2363 MMIO_D(GEN8_EU_DISABLE2, D_BDW_PLUS); 2523 MMIO_D(GEN8_EU_DISABLE2, D_BDW_PLUS);
2364 2524
2365 MMIO_D(0xfdc, D_BDW); 2525 MMIO_D(0xfdc, D_BDW_PLUS);
2366 MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2526 MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
2367 MMIO_D(GEN7_ROW_CHICKEN2, D_BDW_PLUS); 2527 NULL, NULL);
2368 MMIO_D(GEN8_UCGCTL6, D_BDW_PLUS); 2528 MMIO_DFH(GEN7_ROW_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
2529 NULL, NULL);
2530 MMIO_DFH(GEN8_UCGCTL6, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2369 2531
2370 MMIO_D(0xb1f0, D_BDW); 2532 MMIO_DFH(0xb1f0, D_BDW, F_CMD_ACCESS, NULL, NULL);
2371 MMIO_D(0xb1c0, D_BDW); 2533 MMIO_DFH(0xb1c0, D_BDW, F_CMD_ACCESS, NULL, NULL);
2372 MMIO_DFH(GEN8_L3SQCREG4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2534 MMIO_DFH(GEN8_L3SQCREG4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2373 MMIO_D(0xb100, D_BDW); 2535 MMIO_DFH(0xb100, D_BDW, F_CMD_ACCESS, NULL, NULL);
2374 MMIO_D(0xb10c, D_BDW); 2536 MMIO_DFH(0xb10c, D_BDW, F_CMD_ACCESS, NULL, NULL);
2375 MMIO_D(0xb110, D_BDW); 2537 MMIO_D(0xb110, D_BDW);
2376 2538
2377 MMIO_DFH(0x24d0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2539 MMIO_F(0x24d0, 48, F_CMD_ACCESS, 0, 0, D_BDW_PLUS,
2378 MMIO_DFH(0x24d4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2540 NULL, force_nonpriv_write);
2379 MMIO_DFH(0x24d8, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2541
2380 MMIO_DFH(0x24dc, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2542 MMIO_D(0x22040, D_BDW_PLUS);
2543 MMIO_D(0x44484, D_BDW_PLUS);
2544 MMIO_D(0x4448c, D_BDW_PLUS);
2381 2545
2382 MMIO_D(0x83a4, D_BDW); 2546 MMIO_DFH(0x83a4, D_BDW, F_CMD_ACCESS, NULL, NULL);
2383 MMIO_D(GEN8_L3_LRA_1_GPGPU, D_BDW_PLUS); 2547 MMIO_D(GEN8_L3_LRA_1_GPGPU, D_BDW_PLUS);
2384 2548
2385 MMIO_D(0x8430, D_BDW); 2549 MMIO_DFH(0x8430, D_BDW, F_CMD_ACCESS, NULL, NULL);
2386 2550
2387 MMIO_D(0x110000, D_BDW_PLUS); 2551 MMIO_D(0x110000, D_BDW_PLUS);
2388 2552
@@ -2394,10 +2558,19 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
2394 MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 2558 MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2395 MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 2559 MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2396 MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 2560 MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2397 MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK, NULL, NULL); 2561 MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2398 2562
2399 MMIO_D(0x2248, D_BDW); 2563 MMIO_DFH(0x2248, D_BDW, F_CMD_ACCESS, NULL, NULL);
2400 2564
2565 MMIO_DFH(0xe220, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2566 MMIO_DFH(0xe230, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2567 MMIO_DFH(0xe240, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2568 MMIO_DFH(0xe260, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2569 MMIO_DFH(0xe270, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2570 MMIO_DFH(0xe280, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2571 MMIO_DFH(0xe2a0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2572 MMIO_DFH(0xe2b0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2573 MMIO_DFH(0xe2c0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2401 return 0; 2574 return 0;
2402} 2575}
2403 2576
@@ -2420,7 +2593,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
2420 MMIO_D(HSW_PWR_WELL_BIOS, D_SKL); 2593 MMIO_D(HSW_PWR_WELL_BIOS, D_SKL);
2421 MMIO_DH(HSW_PWR_WELL_DRIVER, D_SKL, NULL, skl_power_well_ctl_write); 2594 MMIO_DH(HSW_PWR_WELL_DRIVER, D_SKL, NULL, skl_power_well_ctl_write);
2422 2595
2423 MMIO_DH(GEN6_PCODE_MAILBOX, D_SKL, NULL, mailbox_write);
2424 MMIO_D(0xa210, D_SKL_PLUS); 2596 MMIO_D(0xa210, D_SKL_PLUS);
2425 MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS); 2597 MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
2426 MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS); 2598 MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
@@ -2578,16 +2750,16 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
2578 MMIO_F(0xb020, 0x80, F_CMD_ACCESS, 0, 0, D_SKL, NULL, NULL); 2750 MMIO_F(0xb020, 0x80, F_CMD_ACCESS, 0, 0, D_SKL, NULL, NULL);
2579 2751
2580 MMIO_D(0xd08, D_SKL); 2752 MMIO_D(0xd08, D_SKL);
2581 MMIO_D(0x20e0, D_SKL); 2753 MMIO_DFH(0x20e0, D_SKL, F_MODE_MASK, NULL, NULL);
2582 MMIO_D(0x20ec, D_SKL); 2754 MMIO_DFH(0x20ec, D_SKL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2583 2755
2584 /* TRTT */ 2756 /* TRTT */
2585 MMIO_D(0x4de0, D_SKL); 2757 MMIO_DFH(0x4de0, D_SKL, F_CMD_ACCESS, NULL, NULL);
2586 MMIO_D(0x4de4, D_SKL); 2758 MMIO_DFH(0x4de4, D_SKL, F_CMD_ACCESS, NULL, NULL);
2587 MMIO_D(0x4de8, D_SKL); 2759 MMIO_DFH(0x4de8, D_SKL, F_CMD_ACCESS, NULL, NULL);
2588 MMIO_D(0x4dec, D_SKL); 2760 MMIO_DFH(0x4dec, D_SKL, F_CMD_ACCESS, NULL, NULL);
2589 MMIO_D(0x4df0, D_SKL); 2761 MMIO_DFH(0x4df0, D_SKL, F_CMD_ACCESS, NULL, NULL);
2590 MMIO_DH(0x4df4, D_SKL, NULL, gen9_trtte_write); 2762 MMIO_DFH(0x4df4, D_SKL, F_CMD_ACCESS, NULL, gen9_trtte_write);
2591 MMIO_DH(0x4dfc, D_SKL, NULL, gen9_trtt_chicken_write); 2763 MMIO_DH(0x4dfc, D_SKL, NULL, gen9_trtt_chicken_write);
2592 2764
2593 MMIO_D(0x45008, D_SKL); 2765 MMIO_D(0x45008, D_SKL);
@@ -2611,7 +2783,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
2611 MMIO_D(0x65f08, D_SKL); 2783 MMIO_D(0x65f08, D_SKL);
2612 MMIO_D(0x320f0, D_SKL); 2784 MMIO_D(0x320f0, D_SKL);
2613 2785
2614 MMIO_D(_REG_VCS2_EXCC, D_SKL); 2786 MMIO_DFH(_REG_VCS2_EXCC, D_SKL, F_CMD_ACCESS, NULL, NULL);
2615 MMIO_D(0x70034, D_SKL); 2787 MMIO_D(0x70034, D_SKL);
2616 MMIO_D(0x71034, D_SKL); 2788 MMIO_D(0x71034, D_SKL);
2617 MMIO_D(0x72034, D_SKL); 2789 MMIO_D(0x72034, D_SKL);
@@ -2624,6 +2796,9 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
2624 MMIO_D(_PLANE_KEYMSK_1(PIPE_C), D_SKL); 2796 MMIO_D(_PLANE_KEYMSK_1(PIPE_C), D_SKL);
2625 2797
2626 MMIO_D(0x44500, D_SKL); 2798 MMIO_D(0x44500, D_SKL);
2799 MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2800 MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL, F_MODE_MASK | F_CMD_ACCESS,
2801 NULL, NULL);
2627 return 0; 2802 return 0;
2628} 2803}
2629 2804
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 0f7f5d97f582..84d801638ede 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -96,10 +96,10 @@ static int gvt_dma_map_iova(struct intel_vgpu *vgpu, kvm_pfn_t pfn,
96 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; 96 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
97 dma_addr_t daddr; 97 dma_addr_t daddr;
98 98
99 page = pfn_to_page(pfn); 99 if (unlikely(!pfn_valid(pfn)))
100 if (is_error_page(page))
101 return -EFAULT; 100 return -EFAULT;
102 101
102 page = pfn_to_page(pfn);
103 daddr = dma_map_page(dev, page, 0, PAGE_SIZE, 103 daddr = dma_map_page(dev, page, 0, PAGE_SIZE,
104 PCI_DMA_BIDIRECTIONAL); 104 PCI_DMA_BIDIRECTIONAL);
105 if (dma_mapping_error(dev, daddr)) 105 if (dma_mapping_error(dev, daddr))
@@ -295,10 +295,10 @@ static ssize_t description_show(struct kobject *kobj, struct device *dev,
295 return 0; 295 return 0;
296 296
297 return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n" 297 return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
298 "fence: %d\n", 298 "fence: %d\nresolution: %s\n",
299 BYTES_TO_MB(type->low_gm_size), 299 BYTES_TO_MB(type->low_gm_size),
300 BYTES_TO_MB(type->high_gm_size), 300 BYTES_TO_MB(type->high_gm_size),
301 type->fence); 301 type->fence, vgpu_edid_str(type->resolution));
302} 302}
303 303
304static MDEV_TYPE_ATTR_RO(available_instances); 304static MDEV_TYPE_ATTR_RO(available_instances);
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 4df078bc5d04..60b698cb8365 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -57,6 +57,58 @@ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
57 (reg >= gvt->device_info.gtt_start_offset \ 57 (reg >= gvt->device_info.gtt_start_offset \
58 && reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt)) 58 && reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt))
59 59
60static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
61 void *p_data, unsigned int bytes, bool read)
62{
63 struct intel_gvt *gvt = NULL;
64 void *pt = NULL;
65 unsigned int offset = 0;
66
67 if (!vgpu || !p_data)
68 return;
69
70 gvt = vgpu->gvt;
71 mutex_lock(&gvt->lock);
72 offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
73 if (reg_is_mmio(gvt, offset)) {
74 if (read)
75 intel_vgpu_default_mmio_read(vgpu, offset, p_data,
76 bytes);
77 else
78 intel_vgpu_default_mmio_write(vgpu, offset, p_data,
79 bytes);
80 } else if (reg_is_gtt(gvt, offset) &&
81 vgpu->gtt.ggtt_mm->virtual_page_table) {
82 offset -= gvt->device_info.gtt_start_offset;
83 pt = vgpu->gtt.ggtt_mm->virtual_page_table + offset;
84 if (read)
85 memcpy(p_data, pt, bytes);
86 else
87 memcpy(pt, p_data, bytes);
88
89 } else if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
90 struct intel_vgpu_guest_page *gp;
91
92 /* Since we enter the failsafe mode early during guest boot,
93 * guest may not have chance to set up its ppgtt table, so
94 * there should not be any wp pages for guest. Keep the wp
95 * related code here in case we need to handle it in furture.
96 */
97 gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
98 if (gp) {
99 /* remove write protection to prevent furture traps */
100 intel_vgpu_clean_guest_page(vgpu, gp);
101 if (read)
102 intel_gvt_hypervisor_read_gpa(vgpu, pa,
103 p_data, bytes);
104 else
105 intel_gvt_hypervisor_write_gpa(vgpu, pa,
106 p_data, bytes);
107 }
108 }
109 mutex_unlock(&gvt->lock);
110}
111
60/** 112/**
61 * intel_vgpu_emulate_mmio_read - emulate MMIO read 113 * intel_vgpu_emulate_mmio_read - emulate MMIO read
62 * @vgpu: a vGPU 114 * @vgpu: a vGPU
@@ -75,6 +127,11 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
75 unsigned int offset = 0; 127 unsigned int offset = 0;
76 int ret = -EINVAL; 128 int ret = -EINVAL;
77 129
130
131 if (vgpu->failsafe) {
132 failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, true);
133 return 0;
134 }
78 mutex_lock(&gvt->lock); 135 mutex_lock(&gvt->lock);
79 136
80 if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) { 137 if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
@@ -188,6 +245,11 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
188 u32 old_vreg = 0, old_sreg = 0; 245 u32 old_vreg = 0, old_sreg = 0;
189 int ret = -EINVAL; 246 int ret = -EINVAL;
190 247
248 if (vgpu->failsafe) {
249 failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, false);
250 return 0;
251 }
252
191 mutex_lock(&gvt->lock); 253 mutex_lock(&gvt->lock);
192 254
193 if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) { 255 if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
@@ -236,7 +298,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
236 298
237 mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4)); 299 mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
238 if (!mmio && !vgpu->mmio.disable_warn_untrack) 300 if (!mmio && !vgpu->mmio.disable_warn_untrack)
239 gvt_err("vgpu%d: write untracked MMIO %x len %d val %x\n", 301 gvt_dbg_mmio("vgpu%d: write untracked MMIO %x len %d val %x\n",
240 vgpu->id, offset, bytes, *(u32 *)p_data); 302 vgpu->id, offset, bytes, *(u32 *)p_data);
241 303
242 if (!intel_gvt_mmio_is_unalign(gvt, offset)) { 304 if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
@@ -322,6 +384,8 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu)
322 384
323 /* set the bit 0:2(Core C-State ) to C0 */ 385 /* set the bit 0:2(Core C-State ) to C0 */
324 vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0; 386 vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
387
388 vgpu->mmio.disable_warn_untrack = false;
325} 389}
326 390
327/** 391/**
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
index d9fb41ab7119..5d1caf9daba9 100644
--- a/drivers/gpu/drm/i915/gvt/opregion.c
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -27,7 +27,6 @@
27 27
28static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa) 28static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
29{ 29{
30 void __iomem *host_va = vgpu->gvt->opregion.opregion_va;
31 u8 *buf; 30 u8 *buf;
32 int i; 31 int i;
33 32
@@ -43,8 +42,8 @@ static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
43 if (!vgpu_opregion(vgpu)->va) 42 if (!vgpu_opregion(vgpu)->va)
44 return -ENOMEM; 43 return -ENOMEM;
45 44
46 memcpy_fromio(vgpu_opregion(vgpu)->va, host_va, 45 memcpy(vgpu_opregion(vgpu)->va, vgpu->gvt->opregion.opregion_va,
47 INTEL_GVT_OPREGION_SIZE); 46 INTEL_GVT_OPREGION_SIZE);
48 47
49 for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) 48 for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
50 vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i; 49 vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c
index 2b3a642284b6..73f052a4f424 100644
--- a/drivers/gpu/drm/i915/gvt/render.c
+++ b/drivers/gpu/drm/i915/gvt/render.c
@@ -53,6 +53,14 @@ static struct render_mmio gen8_render_mmio_list[] = {
53 {RCS, _MMIO(0x24d4), 0, false}, 53 {RCS, _MMIO(0x24d4), 0, false},
54 {RCS, _MMIO(0x24d8), 0, false}, 54 {RCS, _MMIO(0x24d8), 0, false},
55 {RCS, _MMIO(0x24dc), 0, false}, 55 {RCS, _MMIO(0x24dc), 0, false},
56 {RCS, _MMIO(0x24e0), 0, false},
57 {RCS, _MMIO(0x24e4), 0, false},
58 {RCS, _MMIO(0x24e8), 0, false},
59 {RCS, _MMIO(0x24ec), 0, false},
60 {RCS, _MMIO(0x24f0), 0, false},
61 {RCS, _MMIO(0x24f4), 0, false},
62 {RCS, _MMIO(0x24f8), 0, false},
63 {RCS, _MMIO(0x24fc), 0, false},
56 {RCS, _MMIO(0x7004), 0xffff, true}, 64 {RCS, _MMIO(0x7004), 0xffff, true},
57 {RCS, _MMIO(0x7008), 0xffff, true}, 65 {RCS, _MMIO(0x7008), 0xffff, true},
58 {RCS, _MMIO(0x7000), 0xffff, true}, 66 {RCS, _MMIO(0x7000), 0xffff, true},
@@ -76,6 +84,14 @@ static struct render_mmio gen9_render_mmio_list[] = {
76 {RCS, _MMIO(0x24d4), 0, false}, 84 {RCS, _MMIO(0x24d4), 0, false},
77 {RCS, _MMIO(0x24d8), 0, false}, 85 {RCS, _MMIO(0x24d8), 0, false},
78 {RCS, _MMIO(0x24dc), 0, false}, 86 {RCS, _MMIO(0x24dc), 0, false},
87 {RCS, _MMIO(0x24e0), 0, false},
88 {RCS, _MMIO(0x24e4), 0, false},
89 {RCS, _MMIO(0x24e8), 0, false},
90 {RCS, _MMIO(0x24ec), 0, false},
91 {RCS, _MMIO(0x24f0), 0, false},
92 {RCS, _MMIO(0x24f4), 0, false},
93 {RCS, _MMIO(0x24f8), 0, false},
94 {RCS, _MMIO(0x24fc), 0, false},
79 {RCS, _MMIO(0x7004), 0xffff, true}, 95 {RCS, _MMIO(0x7004), 0xffff, true},
80 {RCS, _MMIO(0x7008), 0xffff, true}, 96 {RCS, _MMIO(0x7008), 0xffff, true},
81 {RCS, _MMIO(0x7000), 0xffff, true}, 97 {RCS, _MMIO(0x7000), 0xffff, true},
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index d6b6d0efdd1a..d3a56c949025 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -139,6 +139,9 @@ static int shadow_context_status_change(struct notifier_block *nb,
139 struct intel_vgpu_workload *workload = 139 struct intel_vgpu_workload *workload =
140 scheduler->current_workload[req->engine->id]; 140 scheduler->current_workload[req->engine->id];
141 141
142 if (unlikely(!workload))
143 return NOTIFY_OK;
144
142 switch (action) { 145 switch (action) {
143 case INTEL_CONTEXT_SCHEDULE_IN: 146 case INTEL_CONTEXT_SCHEDULE_IN:
144 intel_gvt_load_render_mmio(workload->vgpu, 147 intel_gvt_load_render_mmio(workload->vgpu,
@@ -148,6 +151,15 @@ static int shadow_context_status_change(struct notifier_block *nb,
148 case INTEL_CONTEXT_SCHEDULE_OUT: 151 case INTEL_CONTEXT_SCHEDULE_OUT:
149 intel_gvt_restore_render_mmio(workload->vgpu, 152 intel_gvt_restore_render_mmio(workload->vgpu,
150 workload->ring_id); 153 workload->ring_id);
154 /* If the status is -EINPROGRESS means this workload
155 * doesn't meet any issue during dispatching so when
156 * get the SCHEDULE_OUT set the status to be zero for
157 * good. If the status is NOT -EINPROGRESS means there
158 * is something wrong happened during dispatching and
159 * the status should not be set to zero
160 */
161 if (workload->status == -EINPROGRESS)
162 workload->status = 0;
151 atomic_set(&workload->shadow_ctx_active, 0); 163 atomic_set(&workload->shadow_ctx_active, 0);
152 break; 164 break;
153 default: 165 default:
@@ -359,15 +371,23 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
359 workload = scheduler->current_workload[ring_id]; 371 workload = scheduler->current_workload[ring_id];
360 vgpu = workload->vgpu; 372 vgpu = workload->vgpu;
361 373
362 if (!workload->status && !vgpu->resetting) { 374 /* For the workload w/ request, needs to wait for the context
375 * switch to make sure request is completed.
376 * For the workload w/o request, directly complete the workload.
377 */
378 if (workload->req) {
363 wait_event(workload->shadow_ctx_status_wq, 379 wait_event(workload->shadow_ctx_status_wq,
364 !atomic_read(&workload->shadow_ctx_active)); 380 !atomic_read(&workload->shadow_ctx_active));
365 381
366 update_guest_context(workload); 382 i915_gem_request_put(fetch_and_zero(&workload->req));
383
384 if (!workload->status && !vgpu->resetting) {
385 update_guest_context(workload);
367 386
368 for_each_set_bit(event, workload->pending_events, 387 for_each_set_bit(event, workload->pending_events,
369 INTEL_GVT_EVENT_MAX) 388 INTEL_GVT_EVENT_MAX)
370 intel_vgpu_trigger_virtual_event(vgpu, event); 389 intel_vgpu_trigger_virtual_event(vgpu, event);
390 }
371 } 391 }
372 392
373 gvt_dbg_sched("ring id %d complete workload %p status %d\n", 393 gvt_dbg_sched("ring id %d complete workload %p status %d\n",
@@ -397,7 +417,6 @@ static int workload_thread(void *priv)
397 int ring_id = p->ring_id; 417 int ring_id = p->ring_id;
398 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 418 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
399 struct intel_vgpu_workload *workload = NULL; 419 struct intel_vgpu_workload *workload = NULL;
400 long lret;
401 int ret; 420 int ret;
402 bool need_force_wake = IS_SKYLAKE(gvt->dev_priv); 421 bool need_force_wake = IS_SKYLAKE(gvt->dev_priv);
403 DEFINE_WAIT_FUNC(wait, woken_wake_function); 422 DEFINE_WAIT_FUNC(wait, woken_wake_function);
@@ -446,23 +465,24 @@ static int workload_thread(void *priv)
446 465
447 gvt_dbg_sched("ring id %d wait workload %p\n", 466 gvt_dbg_sched("ring id %d wait workload %p\n",
448 workload->ring_id, workload); 467 workload->ring_id, workload);
449 468retry:
450 lret = i915_wait_request(workload->req, 469 i915_wait_request(workload->req,
451 0, MAX_SCHEDULE_TIMEOUT); 470 0, MAX_SCHEDULE_TIMEOUT);
452 if (lret < 0) { 471 /* I915 has replay mechanism and a request will be replayed
453 workload->status = lret; 472 * if there is i915 reset. So the seqno will be updated anyway.
454 gvt_err("fail to wait workload, skip\n"); 473 * If the seqno is not updated yet after waiting, which means
455 } else { 474 * the replay may still be in progress and we can wait again.
456 workload->status = 0; 475 */
476 if (!i915_gem_request_completed(workload->req)) {
477 gvt_dbg_sched("workload %p not completed, wait again\n",
478 workload);
479 goto retry;
457 } 480 }
458 481
459complete: 482complete:
460 gvt_dbg_sched("will complete workload %p, status: %d\n", 483 gvt_dbg_sched("will complete workload %p, status: %d\n",
461 workload, workload->status); 484 workload, workload->status);
462 485
463 if (workload->req)
464 i915_gem_request_put(fetch_and_zero(&workload->req));
465
466 complete_current_workload(gvt, ring_id); 486 complete_current_workload(gvt, ring_id);
467 487
468 if (need_force_wake) 488 if (need_force_wake)
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 95a97aa0051e..41cfa5ccae84 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -64,6 +64,20 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
64 WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE); 64 WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
65} 65}
66 66
67static struct {
68 unsigned int low_mm;
69 unsigned int high_mm;
70 unsigned int fence;
71 enum intel_vgpu_edid edid;
72 char *name;
73} vgpu_types[] = {
74/* Fixed vGPU type table */
75 { MB_TO_BYTES(64), MB_TO_BYTES(512), 4, GVT_EDID_1024_768, "8" },
76 { MB_TO_BYTES(128), MB_TO_BYTES(512), 4, GVT_EDID_1920_1200, "4" },
77 { MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, GVT_EDID_1920_1200, "2" },
78 { MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, GVT_EDID_1920_1200, "1" },
79};
80
67/** 81/**
68 * intel_gvt_init_vgpu_types - initialize vGPU type list 82 * intel_gvt_init_vgpu_types - initialize vGPU type list
69 * @gvt : GVT device 83 * @gvt : GVT device
@@ -78,9 +92,8 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
78 unsigned int min_low; 92 unsigned int min_low;
79 93
80 /* vGPU type name is defined as GVTg_Vx_y which contains 94 /* vGPU type name is defined as GVTg_Vx_y which contains
81 * physical GPU generation type and 'y' means maximum vGPU 95 * physical GPU generation type (e.g V4 as BDW server, V5 as
82 * instances user can create on one physical GPU for this 96 * SKL server).
83 * type.
84 * 97 *
85 * Depend on physical SKU resource, might see vGPU types like 98 * Depend on physical SKU resource, might see vGPU types like
86 * GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create 99 * GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create
@@ -92,7 +105,7 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
92 */ 105 */
93 low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE; 106 low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
94 high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE; 107 high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
95 num_types = 4; 108 num_types = sizeof(vgpu_types) / sizeof(vgpu_types[0]);
96 109
97 gvt->types = kzalloc(num_types * sizeof(struct intel_vgpu_type), 110 gvt->types = kzalloc(num_types * sizeof(struct intel_vgpu_type),
98 GFP_KERNEL); 111 GFP_KERNEL);
@@ -101,28 +114,29 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
101 114
102 min_low = MB_TO_BYTES(32); 115 min_low = MB_TO_BYTES(32);
103 for (i = 0; i < num_types; ++i) { 116 for (i = 0; i < num_types; ++i) {
104 if (low_avail / min_low == 0) 117 if (low_avail / vgpu_types[i].low_mm == 0)
105 break; 118 break;
106 gvt->types[i].low_gm_size = min_low; 119
107 gvt->types[i].high_gm_size = max((min_low<<3), MB_TO_BYTES(384U)); 120 gvt->types[i].low_gm_size = vgpu_types[i].low_mm;
108 gvt->types[i].fence = 4; 121 gvt->types[i].high_gm_size = vgpu_types[i].high_mm;
109 gvt->types[i].max_instance = min(low_avail / min_low, 122 gvt->types[i].fence = vgpu_types[i].fence;
110 high_avail / gvt->types[i].high_gm_size); 123 gvt->types[i].resolution = vgpu_types[i].edid;
111 gvt->types[i].avail_instance = gvt->types[i].max_instance; 124 gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
125 high_avail / vgpu_types[i].high_mm);
112 126
113 if (IS_GEN8(gvt->dev_priv)) 127 if (IS_GEN8(gvt->dev_priv))
114 sprintf(gvt->types[i].name, "GVTg_V4_%u", 128 sprintf(gvt->types[i].name, "GVTg_V4_%s",
115 gvt->types[i].max_instance); 129 vgpu_types[i].name);
116 else if (IS_GEN9(gvt->dev_priv)) 130 else if (IS_GEN9(gvt->dev_priv))
117 sprintf(gvt->types[i].name, "GVTg_V5_%u", 131 sprintf(gvt->types[i].name, "GVTg_V5_%s",
118 gvt->types[i].max_instance); 132 vgpu_types[i].name);
119 133
120 min_low <<= 1; 134 gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u res %s\n",
121 gvt_dbg_core("type[%d]: %s max %u avail %u low %u high %u fence %u\n", 135 i, gvt->types[i].name,
122 i, gvt->types[i].name, gvt->types[i].max_instance,
123 gvt->types[i].avail_instance, 136 gvt->types[i].avail_instance,
124 gvt->types[i].low_gm_size, 137 gvt->types[i].low_gm_size,
125 gvt->types[i].high_gm_size, gvt->types[i].fence); 138 gvt->types[i].high_gm_size, gvt->types[i].fence,
139 vgpu_edid_str(gvt->types[i].resolution));
126 } 140 }
127 141
128 gvt->num_types = i; 142 gvt->num_types = i;
@@ -138,7 +152,7 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
138{ 152{
139 int i; 153 int i;
140 unsigned int low_gm_avail, high_gm_avail, fence_avail; 154 unsigned int low_gm_avail, high_gm_avail, fence_avail;
141 unsigned int low_gm_min, high_gm_min, fence_min, total_min; 155 unsigned int low_gm_min, high_gm_min, fence_min;
142 156
143 /* Need to depend on maxium hw resource size but keep on 157 /* Need to depend on maxium hw resource size but keep on
144 * static config for now. 158 * static config for now.
@@ -154,12 +168,11 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
154 low_gm_min = low_gm_avail / gvt->types[i].low_gm_size; 168 low_gm_min = low_gm_avail / gvt->types[i].low_gm_size;
155 high_gm_min = high_gm_avail / gvt->types[i].high_gm_size; 169 high_gm_min = high_gm_avail / gvt->types[i].high_gm_size;
156 fence_min = fence_avail / gvt->types[i].fence; 170 fence_min = fence_avail / gvt->types[i].fence;
157 total_min = min(min(low_gm_min, high_gm_min), fence_min); 171 gvt->types[i].avail_instance = min(min(low_gm_min, high_gm_min),
158 gvt->types[i].avail_instance = min(gvt->types[i].max_instance, 172 fence_min);
159 total_min);
160 173
161 gvt_dbg_core("update type[%d]: %s max %u avail %u low %u high %u fence %u\n", 174 gvt_dbg_core("update type[%d]: %s avail %u low %u high %u fence %u\n",
162 i, gvt->types[i].name, gvt->types[i].max_instance, 175 i, gvt->types[i].name,
163 gvt->types[i].avail_instance, gvt->types[i].low_gm_size, 176 gvt->types[i].avail_instance, gvt->types[i].low_gm_size,
164 gvt->types[i].high_gm_size, gvt->types[i].fence); 177 gvt->types[i].high_gm_size, gvt->types[i].fence);
165 } 178 }
@@ -248,7 +261,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
248 if (ret) 261 if (ret)
249 goto out_detach_hypervisor_vgpu; 262 goto out_detach_hypervisor_vgpu;
250 263
251 ret = intel_vgpu_init_display(vgpu); 264 ret = intel_vgpu_init_display(vgpu, param->resolution);
252 if (ret) 265 if (ret)
253 goto out_clean_gtt; 266 goto out_clean_gtt;
254 267
@@ -312,6 +325,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
312 param.low_gm_sz = type->low_gm_size; 325 param.low_gm_sz = type->low_gm_size;
313 param.high_gm_sz = type->high_gm_size; 326 param.high_gm_sz = type->high_gm_size;
314 param.fence_sz = type->fence; 327 param.fence_sz = type->fence;
328 param.resolution = type->resolution;
315 329
316 /* XXX current param based on MB */ 330 /* XXX current param based on MB */
317 param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz); 331 param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz);
@@ -387,8 +401,12 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
387 populate_pvinfo_page(vgpu); 401 populate_pvinfo_page(vgpu);
388 intel_vgpu_reset_display(vgpu); 402 intel_vgpu_reset_display(vgpu);
389 403
390 if (dmlr) 404 if (dmlr) {
391 intel_vgpu_reset_cfg_space(vgpu); 405 intel_vgpu_reset_cfg_space(vgpu);
406 /* only reset the failsafe mode when dmlr reset */
407 vgpu->failsafe = false;
408 vgpu->pv_notified = false;
409 }
392 } 410 }
393 411
394 vgpu->resetting = false; 412 vgpu->resetting = false;
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
index e10a4eda4078..1144e0c9e894 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
@@ -65,13 +65,11 @@ static int mxsfb_set_pixel_fmt(struct mxsfb_drm_private *mxsfb)
65 switch (format) { 65 switch (format) {
66 case DRM_FORMAT_RGB565: 66 case DRM_FORMAT_RGB565:
67 dev_dbg(drm->dev, "Setting up RGB565 mode\n"); 67 dev_dbg(drm->dev, "Setting up RGB565 mode\n");
68 ctrl |= CTRL_SET_BUS_WIDTH(STMLCDIF_16BIT);
69 ctrl |= CTRL_SET_WORD_LENGTH(0); 68 ctrl |= CTRL_SET_WORD_LENGTH(0);
70 ctrl1 |= CTRL1_SET_BYTE_PACKAGING(0xf); 69 ctrl1 |= CTRL1_SET_BYTE_PACKAGING(0xf);
71 break; 70 break;
72 case DRM_FORMAT_XRGB8888: 71 case DRM_FORMAT_XRGB8888:
73 dev_dbg(drm->dev, "Setting up XRGB8888 mode\n"); 72 dev_dbg(drm->dev, "Setting up XRGB8888 mode\n");
74 ctrl |= CTRL_SET_BUS_WIDTH(STMLCDIF_24BIT);
75 ctrl |= CTRL_SET_WORD_LENGTH(3); 73 ctrl |= CTRL_SET_WORD_LENGTH(3);
76 /* Do not use packed pixels = one pixel per word instead. */ 74 /* Do not use packed pixels = one pixel per word instead. */
77 ctrl1 |= CTRL1_SET_BYTE_PACKAGING(0x7); 75 ctrl1 |= CTRL1_SET_BYTE_PACKAGING(0x7);
@@ -87,6 +85,36 @@ static int mxsfb_set_pixel_fmt(struct mxsfb_drm_private *mxsfb)
87 return 0; 85 return 0;
88} 86}
89 87
88static void mxsfb_set_bus_fmt(struct mxsfb_drm_private *mxsfb)
89{
90 struct drm_crtc *crtc = &mxsfb->pipe.crtc;
91 struct drm_device *drm = crtc->dev;
92 u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
93 u32 reg;
94
95 reg = readl(mxsfb->base + LCDC_CTRL);
96
97 if (mxsfb->connector.display_info.num_bus_formats)
98 bus_format = mxsfb->connector.display_info.bus_formats[0];
99
100 reg &= ~CTRL_BUS_WIDTH_MASK;
101 switch (bus_format) {
102 case MEDIA_BUS_FMT_RGB565_1X16:
103 reg |= CTRL_SET_BUS_WIDTH(STMLCDIF_16BIT);
104 break;
105 case MEDIA_BUS_FMT_RGB666_1X18:
106 reg |= CTRL_SET_BUS_WIDTH(STMLCDIF_18BIT);
107 break;
108 case MEDIA_BUS_FMT_RGB888_1X24:
109 reg |= CTRL_SET_BUS_WIDTH(STMLCDIF_24BIT);
110 break;
111 default:
112 dev_err(drm->dev, "Unknown media bus format %d\n", bus_format);
113 break;
114 }
115 writel(reg, mxsfb->base + LCDC_CTRL);
116}
117
90static void mxsfb_enable_controller(struct mxsfb_drm_private *mxsfb) 118static void mxsfb_enable_controller(struct mxsfb_drm_private *mxsfb)
91{ 119{
92 u32 reg; 120 u32 reg;
@@ -168,13 +196,22 @@ static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
168 vdctrl0 |= VDCTRL0_HSYNC_ACT_HIGH; 196 vdctrl0 |= VDCTRL0_HSYNC_ACT_HIGH;
169 if (m->flags & DRM_MODE_FLAG_PVSYNC) 197 if (m->flags & DRM_MODE_FLAG_PVSYNC)
170 vdctrl0 |= VDCTRL0_VSYNC_ACT_HIGH; 198 vdctrl0 |= VDCTRL0_VSYNC_ACT_HIGH;
171 if (bus_flags & DRM_BUS_FLAG_DE_HIGH) 199 /* Make sure Data Enable is high active by default */
200 if (!(bus_flags & DRM_BUS_FLAG_DE_LOW))
172 vdctrl0 |= VDCTRL0_ENABLE_ACT_HIGH; 201 vdctrl0 |= VDCTRL0_ENABLE_ACT_HIGH;
173 if (bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE) 202 /*
203 * DRM_BUS_FLAG_PIXDATA_ defines are controller centric,
204 * controllers VDCTRL0_DOTCLK is display centric.
205 * Drive on positive edge -> display samples on falling edge
206 * DRM_BUS_FLAG_PIXDATA_POSEDGE -> VDCTRL0_DOTCLK_ACT_FALLING
207 */
208 if (bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE)
174 vdctrl0 |= VDCTRL0_DOTCLK_ACT_FALLING; 209 vdctrl0 |= VDCTRL0_DOTCLK_ACT_FALLING;
175 210
176 writel(vdctrl0, mxsfb->base + LCDC_VDCTRL0); 211 writel(vdctrl0, mxsfb->base + LCDC_VDCTRL0);
177 212
213 mxsfb_set_bus_fmt(mxsfb);
214
178 /* Frame length in lines. */ 215 /* Frame length in lines. */
179 writel(m->crtc_vtotal, mxsfb->base + LCDC_VDCTRL1); 216 writel(m->crtc_vtotal, mxsfb->base + LCDC_VDCTRL1);
180 217
@@ -184,8 +221,8 @@ static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
184 VDCTRL2_SET_HSYNC_PERIOD(m->crtc_htotal), 221 VDCTRL2_SET_HSYNC_PERIOD(m->crtc_htotal),
185 mxsfb->base + LCDC_VDCTRL2); 222 mxsfb->base + LCDC_VDCTRL2);
186 223
187 writel(SET_HOR_WAIT_CNT(m->crtc_hblank_end - m->crtc_hsync_end) | 224 writel(SET_HOR_WAIT_CNT(m->crtc_htotal - m->crtc_hsync_start) |
188 SET_VERT_WAIT_CNT(m->crtc_vblank_end - m->crtc_vsync_end), 225 SET_VERT_WAIT_CNT(m->crtc_vtotal - m->crtc_vsync_start),
189 mxsfb->base + LCDC_VDCTRL3); 226 mxsfb->base + LCDC_VDCTRL3);
190 227
191 writel(SET_DOTCLK_H_VALID_DATA_CNT(m->hdisplay), 228 writel(SET_DOTCLK_H_VALID_DATA_CNT(m->hdisplay),
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index cdfbe0284635..ff6d6a6f842e 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -102,14 +102,18 @@ static void mxsfb_pipe_enable(struct drm_simple_display_pipe *pipe,
102{ 102{
103 struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe); 103 struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe);
104 104
105 drm_panel_prepare(mxsfb->panel);
105 mxsfb_crtc_enable(mxsfb); 106 mxsfb_crtc_enable(mxsfb);
107 drm_panel_enable(mxsfb->panel);
106} 108}
107 109
108static void mxsfb_pipe_disable(struct drm_simple_display_pipe *pipe) 110static void mxsfb_pipe_disable(struct drm_simple_display_pipe *pipe)
109{ 111{
110 struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe); 112 struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe);
111 113
114 drm_panel_disable(mxsfb->panel);
112 mxsfb_crtc_disable(mxsfb); 115 mxsfb_crtc_disable(mxsfb);
116 drm_panel_unprepare(mxsfb->panel);
113} 117}
114 118
115static void mxsfb_pipe_update(struct drm_simple_display_pipe *pipe, 119static void mxsfb_pipe_update(struct drm_simple_display_pipe *pipe,
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_out.c b/drivers/gpu/drm/mxsfb/mxsfb_out.c
index fa8d17399407..b8e81422d4e2 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_out.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_out.c
@@ -112,6 +112,7 @@ static int mxsfb_attach_endpoint(struct drm_device *drm,
112 112
113int mxsfb_create_output(struct drm_device *drm) 113int mxsfb_create_output(struct drm_device *drm)
114{ 114{
115 struct mxsfb_drm_private *mxsfb = drm->dev_private;
115 struct device_node *ep_np = NULL; 116 struct device_node *ep_np = NULL;
116 struct of_endpoint ep; 117 struct of_endpoint ep;
117 int ret; 118 int ret;
@@ -127,5 +128,8 @@ int mxsfb_create_output(struct drm_device *drm)
127 } 128 }
128 } 129 }
129 130
131 if (!mxsfb->panel)
132 return -EPROBE_DEFER;
133
130 return 0; 134 return 0;
131} 135}
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_regs.h b/drivers/gpu/drm/mxsfb/mxsfb_regs.h
index 31d62cd0d3d7..66a6ba9ec533 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_regs.h
+++ b/drivers/gpu/drm/mxsfb/mxsfb_regs.h
@@ -44,6 +44,7 @@
44#define CTRL_DATA_SELECT (1 << 16) 44#define CTRL_DATA_SELECT (1 << 16)
45#define CTRL_SET_BUS_WIDTH(x) (((x) & 0x3) << 10) 45#define CTRL_SET_BUS_WIDTH(x) (((x) & 0x3) << 10)
46#define CTRL_GET_BUS_WIDTH(x) (((x) >> 10) & 0x3) 46#define CTRL_GET_BUS_WIDTH(x) (((x) >> 10) & 0x3)
47#define CTRL_BUS_WIDTH_MASK (0x3 << 10)
47#define CTRL_SET_WORD_LENGTH(x) (((x) & 0x3) << 8) 48#define CTRL_SET_WORD_LENGTH(x) (((x) & 0x3) << 8)
48#define CTRL_GET_WORD_LENGTH(x) (((x) >> 8) & 0x3) 49#define CTRL_GET_WORD_LENGTH(x) (((x) >> 8) & 0x3)
49#define CTRL_MASTER (1 << 5) 50#define CTRL_MASTER (1 << 5)