diff options
author | Dave Airlie <airlied@redhat.com> | 2017-02-17 15:35:25 -0500 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2017-02-17 15:35:25 -0500 |
commit | 601109c5c74a10e6b89465cb6aa31a40d1efc8e3 (patch) | |
tree | bcb9b4beff8005bdf0ef06b6f2de485968247d67 | |
parent | 8fd4a62d875945fc8aacdb92fdc90161ec1d9bae (diff) | |
parent | 998d75730b40afc218c059d811869abe9676b305 (diff) |
Merge tag 'drm-intel-next-fixes-2017-02-17' of git://anongit.freedesktop.org/git/drm-intel into drm-next
i915 and GVT fixes for v4.11 merge window
* tag 'drm-intel-next-fixes-2017-02-17' of git://anongit.freedesktop.org/git/drm-intel: (32 commits)
drm/i915: Fix not finding the VBT when it overlaps with OPREGION_ASLE_EXT
drm/i915: Pass timeout==0 on to i915_gem_object_wait_fence()
drm/i915/gvt: Disable access to stolen memory as a guest
drm/i915: Avoid spurious WARNs about the wrong pipe in the PPS code
drm/i915: Check for timeout completion when waiting for the rq to submitted
drm/i915: A hotfix for making aliasing PPGTT work for GVT-g
drm/i915: Restore context and pd for ringbuffer submission after reset
drm/i915: Let execlist_update_context() cover !FULL_PPGTT mode.
drm/i915/lspcon: Fix resume time initialization due to unasserted HPD
drm/i915/gen9+: Enable hotplug detection early
drm/i915: Reject set-tiling-ioctl with stride==0 and a tiling mode
drm/i915: Recreate internal objects with single page segments if dmar fails
drm/i915/gvt: return error code if dma map iova failed
drm/i915/gvt: optimize the inhibit context mmio load
drm/i915/gvt: add sprite plane flip done support.
drm/i915/gvt: add missing display part reset for vGPU reset
drm/i915/gvt: Fix shadow context descriptor
drm/i915/gvt: Fix alignment for GTT allocation
drm/i915/gvt: fix crash at function release_shadow_wa_ctx
drm/i915/gvt: enable IOMMU for gvt
...
32 files changed, 411 insertions, 278 deletions
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c index 7311aeab16f7..3b6caaca9751 100644 --- a/drivers/gpu/drm/i915/gvt/aperture_gm.c +++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c | |||
@@ -49,20 +49,21 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) | |||
49 | if (high_gm) { | 49 | if (high_gm) { |
50 | node = &vgpu->gm.high_gm_node; | 50 | node = &vgpu->gm.high_gm_node; |
51 | size = vgpu_hidden_sz(vgpu); | 51 | size = vgpu_hidden_sz(vgpu); |
52 | start = gvt_hidden_gmadr_base(gvt); | 52 | start = ALIGN(gvt_hidden_gmadr_base(gvt), I915_GTT_PAGE_SIZE); |
53 | end = gvt_hidden_gmadr_end(gvt); | 53 | end = ALIGN(gvt_hidden_gmadr_end(gvt), I915_GTT_PAGE_SIZE); |
54 | flags = PIN_HIGH; | 54 | flags = PIN_HIGH; |
55 | } else { | 55 | } else { |
56 | node = &vgpu->gm.low_gm_node; | 56 | node = &vgpu->gm.low_gm_node; |
57 | size = vgpu_aperture_sz(vgpu); | 57 | size = vgpu_aperture_sz(vgpu); |
58 | start = gvt_aperture_gmadr_base(gvt); | 58 | start = ALIGN(gvt_aperture_gmadr_base(gvt), I915_GTT_PAGE_SIZE); |
59 | end = gvt_aperture_gmadr_end(gvt); | 59 | end = ALIGN(gvt_aperture_gmadr_end(gvt), I915_GTT_PAGE_SIZE); |
60 | flags = PIN_MAPPABLE; | 60 | flags = PIN_MAPPABLE; |
61 | } | 61 | } |
62 | 62 | ||
63 | mutex_lock(&dev_priv->drm.struct_mutex); | 63 | mutex_lock(&dev_priv->drm.struct_mutex); |
64 | ret = i915_gem_gtt_insert(&dev_priv->ggtt.base, node, | 64 | ret = i915_gem_gtt_insert(&dev_priv->ggtt.base, node, |
65 | size, 4096, I915_COLOR_UNEVICTABLE, | 65 | size, I915_GTT_PAGE_SIZE, |
66 | I915_COLOR_UNEVICTABLE, | ||
66 | start, end, flags); | 67 | start, end, flags); |
67 | mutex_unlock(&dev_priv->drm.struct_mutex); | 68 | mutex_unlock(&dev_priv->drm.struct_mutex); |
68 | if (ret) | 69 | if (ret) |
@@ -254,7 +255,7 @@ static int alloc_resource(struct intel_vgpu *vgpu, | |||
254 | if (request > avail) | 255 | if (request > avail) |
255 | goto no_enough_resource; | 256 | goto no_enough_resource; |
256 | 257 | ||
257 | vgpu_aperture_sz(vgpu) = request; | 258 | vgpu_aperture_sz(vgpu) = ALIGN(request, I915_GTT_PAGE_SIZE); |
258 | 259 | ||
259 | item = "high GM space"; | 260 | item = "high GM space"; |
260 | max = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE; | 261 | max = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE; |
@@ -265,7 +266,7 @@ static int alloc_resource(struct intel_vgpu *vgpu, | |||
265 | if (request > avail) | 266 | if (request > avail) |
266 | goto no_enough_resource; | 267 | goto no_enough_resource; |
267 | 268 | ||
268 | vgpu_hidden_sz(vgpu) = request; | 269 | vgpu_hidden_sz(vgpu) = ALIGN(request, I915_GTT_PAGE_SIZE); |
269 | 270 | ||
270 | item = "fence"; | 271 | item = "fence"; |
271 | max = gvt_fence_sz(gvt) - HOST_FENCE; | 272 | max = gvt_fence_sz(gvt) - HOST_FENCE; |
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 9a4b23c3ee97..7bb11a555b76 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c | |||
@@ -1135,6 +1135,8 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s, | |||
1135 | u32 dword2 = cmd_val(s, 2); | 1135 | u32 dword2 = cmd_val(s, 2); |
1136 | u32 plane = (dword0 & GENMASK(12, 8)) >> 8; | 1136 | u32 plane = (dword0 & GENMASK(12, 8)) >> 8; |
1137 | 1137 | ||
1138 | info->plane = PRIMARY_PLANE; | ||
1139 | |||
1138 | switch (plane) { | 1140 | switch (plane) { |
1139 | case MI_DISPLAY_FLIP_SKL_PLANE_1_A: | 1141 | case MI_DISPLAY_FLIP_SKL_PLANE_1_A: |
1140 | info->pipe = PIPE_A; | 1142 | info->pipe = PIPE_A; |
@@ -1148,12 +1150,28 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s, | |||
1148 | info->pipe = PIPE_C; | 1150 | info->pipe = PIPE_C; |
1149 | info->event = PRIMARY_C_FLIP_DONE; | 1151 | info->event = PRIMARY_C_FLIP_DONE; |
1150 | break; | 1152 | break; |
1153 | |||
1154 | case MI_DISPLAY_FLIP_SKL_PLANE_2_A: | ||
1155 | info->pipe = PIPE_A; | ||
1156 | info->event = SPRITE_A_FLIP_DONE; | ||
1157 | info->plane = SPRITE_PLANE; | ||
1158 | break; | ||
1159 | case MI_DISPLAY_FLIP_SKL_PLANE_2_B: | ||
1160 | info->pipe = PIPE_B; | ||
1161 | info->event = SPRITE_B_FLIP_DONE; | ||
1162 | info->plane = SPRITE_PLANE; | ||
1163 | break; | ||
1164 | case MI_DISPLAY_FLIP_SKL_PLANE_2_C: | ||
1165 | info->pipe = PIPE_C; | ||
1166 | info->event = SPRITE_C_FLIP_DONE; | ||
1167 | info->plane = SPRITE_PLANE; | ||
1168 | break; | ||
1169 | |||
1151 | default: | 1170 | default: |
1152 | gvt_err("unknown plane code %d\n", plane); | 1171 | gvt_err("unknown plane code %d\n", plane); |
1153 | return -EINVAL; | 1172 | return -EINVAL; |
1154 | } | 1173 | } |
1155 | 1174 | ||
1156 | info->pipe = PRIMARY_PLANE; | ||
1157 | info->stride_val = (dword1 & GENMASK(15, 6)) >> 6; | 1175 | info->stride_val = (dword1 & GENMASK(15, 6)) >> 6; |
1158 | info->tile_val = (dword1 & GENMASK(2, 0)); | 1176 | info->tile_val = (dword1 & GENMASK(2, 0)); |
1159 | info->surf_val = (dword2 & GENMASK(31, 12)) >> 12; | 1177 | info->surf_val = (dword2 & GENMASK(31, 12)) >> 12; |
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index c0c884aeb30e..6d8fde880c39 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c | |||
@@ -83,7 +83,7 @@ static int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe) | |||
83 | return 0; | 83 | return 0; |
84 | } | 84 | } |
85 | 85 | ||
86 | /* EDID with 1024x768 as its resolution */ | 86 | /* EDID with 1920x1200 as its resolution */ |
87 | static unsigned char virtual_dp_monitor_edid[] = { | 87 | static unsigned char virtual_dp_monitor_edid[] = { |
88 | /*Header*/ | 88 | /*Header*/ |
89 | 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, | 89 | 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, |
@@ -97,11 +97,16 @@ static unsigned char virtual_dp_monitor_edid[] = { | |||
97 | 0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54, | 97 | 0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54, |
98 | /* Established Timings: maximum resolution is 1024x768 */ | 98 | /* Established Timings: maximum resolution is 1024x768 */ |
99 | 0x21, 0x08, 0x00, | 99 | 0x21, 0x08, 0x00, |
100 | /* Standard Timings. All invalid */ | 100 | /* |
101 | 0x00, 0xc0, 0x00, 0xc0, 0x00, 0x40, 0x00, 0x80, 0x00, 0x00, | 101 | * Standard Timings. |
102 | 0x00, 0x40, 0x00, 0x00, 0x00, 0x01, | 102 | * below new resolutions can be supported: |
103 | /* 18 Byte Data Blocks 1: invalid */ | 103 | * 1920x1080, 1280x720, 1280x960, 1280x1024, |
104 | 0x00, 0x00, 0x80, 0xa0, 0x70, 0xb0, | 104 | * 1440x900, 1600x1200, 1680x1050 |
105 | */ | ||
106 | 0xd1, 0xc0, 0x81, 0xc0, 0x81, 0x40, 0x81, 0x80, 0x95, 0x00, | ||
107 | 0xa9, 0x40, 0xb3, 0x00, 0x01, 0x01, | ||
108 | /* 18 Byte Data Blocks 1: max resolution is 1920x1200 */ | ||
109 | 0x28, 0x3c, 0x80, 0xa0, 0x70, 0xb0, | ||
105 | 0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a, | 110 | 0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a, |
106 | /* 18 Byte Data Blocks 2: invalid */ | 111 | /* 18 Byte Data Blocks 2: invalid */ |
107 | 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a, | 112 | 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a, |
@@ -115,7 +120,7 @@ static unsigned char virtual_dp_monitor_edid[] = { | |||
115 | /* Extension Block Count */ | 120 | /* Extension Block Count */ |
116 | 0x00, | 121 | 0x00, |
117 | /* Checksum */ | 122 | /* Checksum */ |
118 | 0xef, | 123 | 0x45, |
119 | }; | 124 | }; |
120 | 125 | ||
121 | #define DPCD_HEADER_SIZE 0xb | 126 | #define DPCD_HEADER_SIZE 0xb |
@@ -328,3 +333,15 @@ int intel_vgpu_init_display(struct intel_vgpu *vgpu) | |||
328 | else | 333 | else |
329 | return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B); | 334 | return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B); |
330 | } | 335 | } |
336 | |||
337 | /** | ||
338 | * intel_vgpu_reset_display- reset vGPU virtual display emulation | ||
339 | * @vgpu: a vGPU | ||
340 | * | ||
341 | * This function is used to reset vGPU virtual display emulation stuffs | ||
342 | * | ||
343 | */ | ||
344 | void intel_vgpu_reset_display(struct intel_vgpu *vgpu) | ||
345 | { | ||
346 | emulate_monitor_status_change(vgpu); | ||
347 | } | ||
diff --git a/drivers/gpu/drm/i915/gvt/display.h b/drivers/gpu/drm/i915/gvt/display.h index 7a60cb848268..8b234ea961f6 100644 --- a/drivers/gpu/drm/i915/gvt/display.h +++ b/drivers/gpu/drm/i915/gvt/display.h | |||
@@ -158,6 +158,7 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt); | |||
158 | void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt); | 158 | void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt); |
159 | 159 | ||
160 | int intel_vgpu_init_display(struct intel_vgpu *vgpu); | 160 | int intel_vgpu_init_display(struct intel_vgpu *vgpu); |
161 | void intel_vgpu_reset_display(struct intel_vgpu *vgpu); | ||
161 | void intel_vgpu_clean_display(struct intel_vgpu *vgpu); | 162 | void intel_vgpu_clean_display(struct intel_vgpu *vgpu); |
162 | 163 | ||
163 | #endif | 164 | #endif |
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index f32bb6f6495c..136c6e77561a 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c | |||
@@ -515,7 +515,7 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) | |||
515 | 515 | ||
516 | static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) | 516 | static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) |
517 | { | 517 | { |
518 | if (wa_ctx->indirect_ctx.size == 0) | 518 | if (!wa_ctx->indirect_ctx.obj) |
519 | return; | 519 | return; |
520 | 520 | ||
521 | i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj); | 521 | i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj); |
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c index 2fae2a2ca96f..1cb29b2d7dc6 100644 --- a/drivers/gpu/drm/i915/gvt/firmware.c +++ b/drivers/gpu/drm/i915/gvt/firmware.c | |||
@@ -48,31 +48,6 @@ struct gvt_firmware_header { | |||
48 | unsigned char data[1]; | 48 | unsigned char data[1]; |
49 | }; | 49 | }; |
50 | 50 | ||
51 | #define RD(offset) (readl(mmio + offset.reg)) | ||
52 | #define WR(v, offset) (writel(v, mmio + offset.reg)) | ||
53 | |||
54 | static void bdw_forcewake_get(void __iomem *mmio) | ||
55 | { | ||
56 | WR(_MASKED_BIT_DISABLE(0xffff), FORCEWAKE_MT); | ||
57 | |||
58 | RD(ECOBUS); | ||
59 | |||
60 | if (wait_for((RD(FORCEWAKE_ACK_HSW) & FORCEWAKE_KERNEL) == 0, 50)) | ||
61 | gvt_err("fail to wait forcewake idle\n"); | ||
62 | |||
63 | WR(_MASKED_BIT_ENABLE(FORCEWAKE_KERNEL), FORCEWAKE_MT); | ||
64 | |||
65 | if (wait_for((RD(FORCEWAKE_ACK_HSW) & FORCEWAKE_KERNEL), 50)) | ||
66 | gvt_err("fail to wait forcewake ack\n"); | ||
67 | |||
68 | if (wait_for((RD(GEN6_GT_THREAD_STATUS_REG) & | ||
69 | GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 50)) | ||
70 | gvt_err("fail to wait c0 wake up\n"); | ||
71 | } | ||
72 | |||
73 | #undef RD | ||
74 | #undef WR | ||
75 | |||
76 | #define dev_to_drm_minor(d) dev_get_drvdata((d)) | 51 | #define dev_to_drm_minor(d) dev_get_drvdata((d)) |
77 | 52 | ||
78 | static ssize_t | 53 | static ssize_t |
@@ -91,9 +66,9 @@ static struct bin_attribute firmware_attr = { | |||
91 | .mmap = NULL, | 66 | .mmap = NULL, |
92 | }; | 67 | }; |
93 | 68 | ||
94 | static int expose_firmware_sysfs(struct intel_gvt *gvt, | 69 | static int expose_firmware_sysfs(struct intel_gvt *gvt) |
95 | void __iomem *mmio) | ||
96 | { | 70 | { |
71 | struct drm_i915_private *dev_priv = gvt->dev_priv; | ||
97 | struct intel_gvt_device_info *info = &gvt->device_info; | 72 | struct intel_gvt_device_info *info = &gvt->device_info; |
98 | struct pci_dev *pdev = gvt->dev_priv->drm.pdev; | 73 | struct pci_dev *pdev = gvt->dev_priv->drm.pdev; |
99 | struct intel_gvt_mmio_info *e; | 74 | struct intel_gvt_mmio_info *e; |
@@ -132,7 +107,7 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt, | |||
132 | 107 | ||
133 | for (j = 0; j < e->length; j += 4) | 108 | for (j = 0; j < e->length; j += 4) |
134 | *(u32 *)(p + e->offset + j) = | 109 | *(u32 *)(p + e->offset + j) = |
135 | readl(mmio + e->offset + j); | 110 | I915_READ_NOTRACE(_MMIO(e->offset + j)); |
136 | } | 111 | } |
137 | 112 | ||
138 | memcpy(gvt->firmware.mmio, p, info->mmio_size); | 113 | memcpy(gvt->firmware.mmio, p, info->mmio_size); |
@@ -235,7 +210,6 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt) | |||
235 | struct gvt_firmware_header *h; | 210 | struct gvt_firmware_header *h; |
236 | const struct firmware *fw; | 211 | const struct firmware *fw; |
237 | char *path; | 212 | char *path; |
238 | void __iomem *mmio; | ||
239 | void *mem; | 213 | void *mem; |
240 | int ret; | 214 | int ret; |
241 | 215 | ||
@@ -260,17 +234,6 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt) | |||
260 | 234 | ||
261 | firmware->mmio = mem; | 235 | firmware->mmio = mem; |
262 | 236 | ||
263 | mmio = pci_iomap(pdev, info->mmio_bar, info->mmio_size); | ||
264 | if (!mmio) { | ||
265 | kfree(path); | ||
266 | kfree(firmware->cfg_space); | ||
267 | kfree(firmware->mmio); | ||
268 | return -EINVAL; | ||
269 | } | ||
270 | |||
271 | if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) | ||
272 | bdw_forcewake_get(mmio); | ||
273 | |||
274 | sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%04x.golden_hw_state", | 237 | sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%04x.golden_hw_state", |
275 | GVT_FIRMWARE_PATH, pdev->vendor, pdev->device, | 238 | GVT_FIRMWARE_PATH, pdev->vendor, pdev->device, |
276 | pdev->revision); | 239 | pdev->revision); |
@@ -300,13 +263,11 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt) | |||
300 | 263 | ||
301 | release_firmware(fw); | 264 | release_firmware(fw); |
302 | firmware->firmware_loaded = true; | 265 | firmware->firmware_loaded = true; |
303 | pci_iounmap(pdev, mmio); | ||
304 | return 0; | 266 | return 0; |
305 | 267 | ||
306 | out_free_fw: | 268 | out_free_fw: |
307 | release_firmware(fw); | 269 | release_firmware(fw); |
308 | expose_firmware: | 270 | expose_firmware: |
309 | expose_firmware_sysfs(gvt, mmio); | 271 | expose_firmware_sysfs(gvt); |
310 | pci_iounmap(pdev, mmio); | ||
311 | return 0; | 272 | return 0; |
312 | } | 273 | } |
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 47dec4acf7ff..28c92346db0e 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c | |||
@@ -606,21 +606,33 @@ struct intel_vgpu_guest_page *intel_vgpu_find_guest_page( | |||
606 | static inline int init_shadow_page(struct intel_vgpu *vgpu, | 606 | static inline int init_shadow_page(struct intel_vgpu *vgpu, |
607 | struct intel_vgpu_shadow_page *p, int type) | 607 | struct intel_vgpu_shadow_page *p, int type) |
608 | { | 608 | { |
609 | struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev; | ||
610 | dma_addr_t daddr; | ||
611 | |||
612 | daddr = dma_map_page(kdev, p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL); | ||
613 | if (dma_mapping_error(kdev, daddr)) { | ||
614 | gvt_err("fail to map dma addr\n"); | ||
615 | return -EINVAL; | ||
616 | } | ||
617 | |||
609 | p->vaddr = page_address(p->page); | 618 | p->vaddr = page_address(p->page); |
610 | p->type = type; | 619 | p->type = type; |
611 | 620 | ||
612 | INIT_HLIST_NODE(&p->node); | 621 | INIT_HLIST_NODE(&p->node); |
613 | 622 | ||
614 | p->mfn = intel_gvt_hypervisor_virt_to_mfn(p->vaddr); | 623 | p->mfn = daddr >> GTT_PAGE_SHIFT; |
615 | if (p->mfn == INTEL_GVT_INVALID_ADDR) | ||
616 | return -EFAULT; | ||
617 | |||
618 | hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn); | 624 | hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn); |
619 | return 0; | 625 | return 0; |
620 | } | 626 | } |
621 | 627 | ||
622 | static inline void clean_shadow_page(struct intel_vgpu_shadow_page *p) | 628 | static inline void clean_shadow_page(struct intel_vgpu *vgpu, |
629 | struct intel_vgpu_shadow_page *p) | ||
623 | { | 630 | { |
631 | struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev; | ||
632 | |||
633 | dma_unmap_page(kdev, p->mfn << GTT_PAGE_SHIFT, 4096, | ||
634 | PCI_DMA_BIDIRECTIONAL); | ||
635 | |||
624 | if (!hlist_unhashed(&p->node)) | 636 | if (!hlist_unhashed(&p->node)) |
625 | hash_del(&p->node); | 637 | hash_del(&p->node); |
626 | } | 638 | } |
@@ -670,7 +682,7 @@ static void ppgtt_free_shadow_page(struct intel_vgpu_ppgtt_spt *spt) | |||
670 | { | 682 | { |
671 | trace_spt_free(spt->vgpu->id, spt, spt->shadow_page.type); | 683 | trace_spt_free(spt->vgpu->id, spt, spt->shadow_page.type); |
672 | 684 | ||
673 | clean_shadow_page(&spt->shadow_page); | 685 | clean_shadow_page(spt->vgpu, &spt->shadow_page); |
674 | intel_vgpu_clean_guest_page(spt->vgpu, &spt->guest_page); | 686 | intel_vgpu_clean_guest_page(spt->vgpu, &spt->guest_page); |
675 | list_del_init(&spt->post_shadow_list); | 687 | list_del_init(&spt->post_shadow_list); |
676 | 688 | ||
@@ -1875,8 +1887,9 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, | |||
1875 | int page_entry_num = GTT_PAGE_SIZE >> | 1887 | int page_entry_num = GTT_PAGE_SIZE >> |
1876 | vgpu->gvt->device_info.gtt_entry_size_shift; | 1888 | vgpu->gvt->device_info.gtt_entry_size_shift; |
1877 | void *scratch_pt; | 1889 | void *scratch_pt; |
1878 | unsigned long mfn; | ||
1879 | int i; | 1890 | int i; |
1891 | struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; | ||
1892 | dma_addr_t daddr; | ||
1880 | 1893 | ||
1881 | if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX)) | 1894 | if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX)) |
1882 | return -EINVAL; | 1895 | return -EINVAL; |
@@ -1887,16 +1900,18 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, | |||
1887 | return -ENOMEM; | 1900 | return -ENOMEM; |
1888 | } | 1901 | } |
1889 | 1902 | ||
1890 | mfn = intel_gvt_hypervisor_virt_to_mfn(scratch_pt); | 1903 | daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0, |
1891 | if (mfn == INTEL_GVT_INVALID_ADDR) { | 1904 | 4096, PCI_DMA_BIDIRECTIONAL); |
1892 | gvt_err("fail to translate vaddr:0x%lx\n", (unsigned long)scratch_pt); | 1905 | if (dma_mapping_error(dev, daddr)) { |
1893 | free_page((unsigned long)scratch_pt); | 1906 | gvt_err("fail to dmamap scratch_pt\n"); |
1894 | return -EFAULT; | 1907 | __free_page(virt_to_page(scratch_pt)); |
1908 | return -ENOMEM; | ||
1895 | } | 1909 | } |
1896 | gtt->scratch_pt[type].page_mfn = mfn; | 1910 | gtt->scratch_pt[type].page_mfn = |
1911 | (unsigned long)(daddr >> GTT_PAGE_SHIFT); | ||
1897 | gtt->scratch_pt[type].page = virt_to_page(scratch_pt); | 1912 | gtt->scratch_pt[type].page = virt_to_page(scratch_pt); |
1898 | gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n", | 1913 | gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n", |
1899 | vgpu->id, type, mfn); | 1914 | vgpu->id, type, gtt->scratch_pt[type].page_mfn); |
1900 | 1915 | ||
1901 | /* Build the tree by full filled the scratch pt with the entries which | 1916 | /* Build the tree by full filled the scratch pt with the entries which |
1902 | * point to the next level scratch pt or scratch page. The | 1917 | * point to the next level scratch pt or scratch page. The |
@@ -1930,9 +1945,14 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, | |||
1930 | static int release_scratch_page_tree(struct intel_vgpu *vgpu) | 1945 | static int release_scratch_page_tree(struct intel_vgpu *vgpu) |
1931 | { | 1946 | { |
1932 | int i; | 1947 | int i; |
1948 | struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; | ||
1949 | dma_addr_t daddr; | ||
1933 | 1950 | ||
1934 | for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { | 1951 | for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { |
1935 | if (vgpu->gtt.scratch_pt[i].page != NULL) { | 1952 | if (vgpu->gtt.scratch_pt[i].page != NULL) { |
1953 | daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn << | ||
1954 | GTT_PAGE_SHIFT); | ||
1955 | dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL); | ||
1936 | __free_page(vgpu->gtt.scratch_pt[i].page); | 1956 | __free_page(vgpu->gtt.scratch_pt[i].page); |
1937 | vgpu->gtt.scratch_pt[i].page = NULL; | 1957 | vgpu->gtt.scratch_pt[i].page = NULL; |
1938 | vgpu->gtt.scratch_pt[i].page_mfn = 0; | 1958 | vgpu->gtt.scratch_pt[i].page_mfn = 0; |
@@ -2192,6 +2212,8 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) | |||
2192 | { | 2212 | { |
2193 | int ret; | 2213 | int ret; |
2194 | void *page; | 2214 | void *page; |
2215 | struct device *dev = &gvt->dev_priv->drm.pdev->dev; | ||
2216 | dma_addr_t daddr; | ||
2195 | 2217 | ||
2196 | gvt_dbg_core("init gtt\n"); | 2218 | gvt_dbg_core("init gtt\n"); |
2197 | 2219 | ||
@@ -2209,14 +2231,16 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) | |||
2209 | gvt_err("fail to allocate scratch ggtt page\n"); | 2231 | gvt_err("fail to allocate scratch ggtt page\n"); |
2210 | return -ENOMEM; | 2232 | return -ENOMEM; |
2211 | } | 2233 | } |
2212 | gvt->gtt.scratch_ggtt_page = virt_to_page(page); | ||
2213 | 2234 | ||
2214 | gvt->gtt.scratch_ggtt_mfn = intel_gvt_hypervisor_virt_to_mfn(page); | 2235 | daddr = dma_map_page(dev, virt_to_page(page), 0, |
2215 | if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) { | 2236 | 4096, PCI_DMA_BIDIRECTIONAL); |
2216 | gvt_err("fail to translate scratch ggtt page\n"); | 2237 | if (dma_mapping_error(dev, daddr)) { |
2217 | __free_page(gvt->gtt.scratch_ggtt_page); | 2238 | gvt_err("fail to dmamap scratch ggtt page\n"); |
2218 | return -EFAULT; | 2239 | __free_page(virt_to_page(page)); |
2240 | return -ENOMEM; | ||
2219 | } | 2241 | } |
2242 | gvt->gtt.scratch_ggtt_page = virt_to_page(page); | ||
2243 | gvt->gtt.scratch_ggtt_mfn = (unsigned long)(daddr >> GTT_PAGE_SHIFT); | ||
2220 | 2244 | ||
2221 | if (enable_out_of_sync) { | 2245 | if (enable_out_of_sync) { |
2222 | ret = setup_spt_oos(gvt); | 2246 | ret = setup_spt_oos(gvt); |
@@ -2239,6 +2263,12 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) | |||
2239 | */ | 2263 | */ |
2240 | void intel_gvt_clean_gtt(struct intel_gvt *gvt) | 2264 | void intel_gvt_clean_gtt(struct intel_gvt *gvt) |
2241 | { | 2265 | { |
2266 | struct device *dev = &gvt->dev_priv->drm.pdev->dev; | ||
2267 | dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_ggtt_mfn << | ||
2268 | GTT_PAGE_SHIFT); | ||
2269 | |||
2270 | dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL); | ||
2271 | |||
2242 | __free_page(gvt->gtt.scratch_ggtt_page); | 2272 | __free_page(gvt->gtt.scratch_ggtt_page); |
2243 | 2273 | ||
2244 | if (enable_out_of_sync) | 2274 | if (enable_out_of_sync) |
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index e6bf5c533fbe..3b9d59e457ba 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c | |||
@@ -68,8 +68,6 @@ static const struct intel_gvt_ops intel_gvt_ops = { | |||
68 | */ | 68 | */ |
69 | int intel_gvt_init_host(void) | 69 | int intel_gvt_init_host(void) |
70 | { | 70 | { |
71 | int ret; | ||
72 | |||
73 | if (intel_gvt_host.initialized) | 71 | if (intel_gvt_host.initialized) |
74 | return 0; | 72 | return 0; |
75 | 73 | ||
@@ -96,11 +94,6 @@ int intel_gvt_init_host(void) | |||
96 | if (!intel_gvt_host.mpt) | 94 | if (!intel_gvt_host.mpt) |
97 | return -EINVAL; | 95 | return -EINVAL; |
98 | 96 | ||
99 | /* Try to detect if we're running in host instead of VM. */ | ||
100 | ret = intel_gvt_hypervisor_detect_host(); | ||
101 | if (ret) | ||
102 | return -ENODEV; | ||
103 | |||
104 | gvt_dbg_core("Running with hypervisor %s in host mode\n", | 97 | gvt_dbg_core("Running with hypervisor %s in host mode\n", |
105 | supported_hypervisors[intel_gvt_host.hypervisor_type]); | 98 | supported_hypervisors[intel_gvt_host.hypervisor_type]); |
106 | 99 | ||
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index 30e543f5a703..df7f33abd393 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h | |||
@@ -38,7 +38,6 @@ | |||
38 | * both Xen and KVM by providing dedicated hypervisor-related MPT modules. | 38 | * both Xen and KVM by providing dedicated hypervisor-related MPT modules. |
39 | */ | 39 | */ |
40 | struct intel_gvt_mpt { | 40 | struct intel_gvt_mpt { |
41 | int (*detect_host)(void); | ||
42 | int (*host_init)(struct device *dev, void *gvt, const void *ops); | 41 | int (*host_init)(struct device *dev, void *gvt, const void *ops); |
43 | void (*host_exit)(struct device *dev, void *gvt); | 42 | void (*host_exit)(struct device *dev, void *gvt); |
44 | int (*attach_vgpu)(void *vgpu, unsigned long *handle); | 43 | int (*attach_vgpu)(void *vgpu, unsigned long *handle); |
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c index f7be02ac4be1..92bb247e3478 100644 --- a/drivers/gpu/drm/i915/gvt/interrupt.c +++ b/drivers/gpu/drm/i915/gvt/interrupt.c | |||
@@ -176,26 +176,15 @@ int intel_vgpu_reg_imr_handler(struct intel_vgpu *vgpu, | |||
176 | { | 176 | { |
177 | struct intel_gvt *gvt = vgpu->gvt; | 177 | struct intel_gvt *gvt = vgpu->gvt; |
178 | struct intel_gvt_irq_ops *ops = gvt->irq.ops; | 178 | struct intel_gvt_irq_ops *ops = gvt->irq.ops; |
179 | u32 changed, masked, unmasked; | ||
180 | u32 imr = *(u32 *)p_data; | 179 | u32 imr = *(u32 *)p_data; |
181 | 180 | ||
182 | gvt_dbg_irq("write IMR %x with val %x\n", | 181 | gvt_dbg_irq("write IMR %x, new %08x, old %08x, changed %08x\n", |
183 | reg, imr); | 182 | reg, imr, vgpu_vreg(vgpu, reg), vgpu_vreg(vgpu, reg) ^ imr); |
184 | |||
185 | gvt_dbg_irq("old vIMR %x\n", vgpu_vreg(vgpu, reg)); | ||
186 | |||
187 | /* figure out newly masked/unmasked bits */ | ||
188 | changed = vgpu_vreg(vgpu, reg) ^ imr; | ||
189 | masked = (vgpu_vreg(vgpu, reg) & changed) ^ changed; | ||
190 | unmasked = masked ^ changed; | ||
191 | |||
192 | gvt_dbg_irq("changed %x, masked %x, unmasked %x\n", | ||
193 | changed, masked, unmasked); | ||
194 | 183 | ||
195 | vgpu_vreg(vgpu, reg) = imr; | 184 | vgpu_vreg(vgpu, reg) = imr; |
196 | 185 | ||
197 | ops->check_pending_irq(vgpu); | 186 | ops->check_pending_irq(vgpu); |
198 | gvt_dbg_irq("IRQ: new vIMR %x\n", vgpu_vreg(vgpu, reg)); | 187 | |
199 | return 0; | 188 | return 0; |
200 | } | 189 | } |
201 | 190 | ||
@@ -217,14 +206,11 @@ int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu, | |||
217 | { | 206 | { |
218 | struct intel_gvt *gvt = vgpu->gvt; | 207 | struct intel_gvt *gvt = vgpu->gvt; |
219 | struct intel_gvt_irq_ops *ops = gvt->irq.ops; | 208 | struct intel_gvt_irq_ops *ops = gvt->irq.ops; |
220 | u32 changed, enabled, disabled; | ||
221 | u32 ier = *(u32 *)p_data; | 209 | u32 ier = *(u32 *)p_data; |
222 | u32 virtual_ier = vgpu_vreg(vgpu, reg); | 210 | u32 virtual_ier = vgpu_vreg(vgpu, reg); |
223 | 211 | ||
224 | gvt_dbg_irq("write master irq reg %x with val %x\n", | 212 | gvt_dbg_irq("write MASTER_IRQ %x, new %08x, old %08x, changed %08x\n", |
225 | reg, ier); | 213 | reg, ier, virtual_ier, virtual_ier ^ ier); |
226 | |||
227 | gvt_dbg_irq("old vreg %x\n", vgpu_vreg(vgpu, reg)); | ||
228 | 214 | ||
229 | /* | 215 | /* |
230 | * GEN8_MASTER_IRQ is a special irq register, | 216 | * GEN8_MASTER_IRQ is a special irq register, |
@@ -236,16 +222,8 @@ int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu, | |||
236 | vgpu_vreg(vgpu, reg) &= ~GEN8_MASTER_IRQ_CONTROL; | 222 | vgpu_vreg(vgpu, reg) &= ~GEN8_MASTER_IRQ_CONTROL; |
237 | vgpu_vreg(vgpu, reg) |= ier; | 223 | vgpu_vreg(vgpu, reg) |= ier; |
238 | 224 | ||
239 | /* figure out newly enabled/disable bits */ | ||
240 | changed = virtual_ier ^ ier; | ||
241 | enabled = (virtual_ier & changed) ^ changed; | ||
242 | disabled = enabled ^ changed; | ||
243 | |||
244 | gvt_dbg_irq("changed %x, enabled %x, disabled %x\n", | ||
245 | changed, enabled, disabled); | ||
246 | |||
247 | ops->check_pending_irq(vgpu); | 225 | ops->check_pending_irq(vgpu); |
248 | gvt_dbg_irq("new vreg %x\n", vgpu_vreg(vgpu, reg)); | 226 | |
249 | return 0; | 227 | return 0; |
250 | } | 228 | } |
251 | 229 | ||
@@ -268,21 +246,11 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu, | |||
268 | struct intel_gvt *gvt = vgpu->gvt; | 246 | struct intel_gvt *gvt = vgpu->gvt; |
269 | struct intel_gvt_irq_ops *ops = gvt->irq.ops; | 247 | struct intel_gvt_irq_ops *ops = gvt->irq.ops; |
270 | struct intel_gvt_irq_info *info; | 248 | struct intel_gvt_irq_info *info; |
271 | u32 changed, enabled, disabled; | ||
272 | u32 ier = *(u32 *)p_data; | 249 | u32 ier = *(u32 *)p_data; |
273 | 250 | ||
274 | gvt_dbg_irq("write IER %x with val %x\n", | 251 | gvt_dbg_irq("write IER %x, new %08x, old %08x, changed %08x\n", |
275 | reg, ier); | 252 | reg, ier, vgpu_vreg(vgpu, reg), vgpu_vreg(vgpu, reg) ^ ier); |
276 | |||
277 | gvt_dbg_irq("old vIER %x\n", vgpu_vreg(vgpu, reg)); | ||
278 | 253 | ||
279 | /* figure out newly enabled/disable bits */ | ||
280 | changed = vgpu_vreg(vgpu, reg) ^ ier; | ||
281 | enabled = (vgpu_vreg(vgpu, reg) & changed) ^ changed; | ||
282 | disabled = enabled ^ changed; | ||
283 | |||
284 | gvt_dbg_irq("changed %x, enabled %x, disabled %x\n", | ||
285 | changed, enabled, disabled); | ||
286 | vgpu_vreg(vgpu, reg) = ier; | 254 | vgpu_vreg(vgpu, reg) = ier; |
287 | 255 | ||
288 | info = regbase_to_irq_info(gvt, ier_to_regbase(reg)); | 256 | info = regbase_to_irq_info(gvt, ier_to_regbase(reg)); |
@@ -293,7 +261,7 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu, | |||
293 | update_upstream_irq(vgpu, info); | 261 | update_upstream_irq(vgpu, info); |
294 | 262 | ||
295 | ops->check_pending_irq(vgpu); | 263 | ops->check_pending_irq(vgpu); |
296 | gvt_dbg_irq("new vIER %x\n", vgpu_vreg(vgpu, reg)); | 264 | |
297 | return 0; | 265 | return 0; |
298 | } | 266 | } |
299 | 267 | ||
@@ -317,7 +285,8 @@ int intel_vgpu_reg_iir_handler(struct intel_vgpu *vgpu, unsigned int reg, | |||
317 | iir_to_regbase(reg)); | 285 | iir_to_regbase(reg)); |
318 | u32 iir = *(u32 *)p_data; | 286 | u32 iir = *(u32 *)p_data; |
319 | 287 | ||
320 | gvt_dbg_irq("write IIR %x with val %x\n", reg, iir); | 288 | gvt_dbg_irq("write IIR %x, new %08x, old %08x, changed %08x\n", |
289 | reg, iir, vgpu_vreg(vgpu, reg), vgpu_vreg(vgpu, reg) ^ iir); | ||
321 | 290 | ||
322 | if (WARN_ON(!info)) | 291 | if (WARN_ON(!info)) |
323 | return -EINVAL; | 292 | return -EINVAL; |
@@ -619,6 +588,10 @@ static void gen8_init_irq( | |||
619 | SET_BIT_INFO(irq, 3, PRIMARY_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A); | 588 | SET_BIT_INFO(irq, 3, PRIMARY_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A); |
620 | SET_BIT_INFO(irq, 3, PRIMARY_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B); | 589 | SET_BIT_INFO(irq, 3, PRIMARY_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B); |
621 | SET_BIT_INFO(irq, 3, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); | 590 | SET_BIT_INFO(irq, 3, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); |
591 | |||
592 | SET_BIT_INFO(irq, 4, SPRITE_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A); | ||
593 | SET_BIT_INFO(irq, 4, SPRITE_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B); | ||
594 | SET_BIT_INFO(irq, 4, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); | ||
622 | } | 595 | } |
623 | 596 | ||
624 | /* GEN8 interrupt PCU events */ | 597 | /* GEN8 interrupt PCU events */ |
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 0c9234a87a20..10c3a4b95a92 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c | |||
@@ -77,7 +77,7 @@ struct kvmgt_guest_info { | |||
77 | struct gvt_dma { | 77 | struct gvt_dma { |
78 | struct rb_node node; | 78 | struct rb_node node; |
79 | gfn_t gfn; | 79 | gfn_t gfn; |
80 | kvm_pfn_t pfn; | 80 | unsigned long iova; |
81 | }; | 81 | }; |
82 | 82 | ||
83 | static inline bool handle_valid(unsigned long handle) | 83 | static inline bool handle_valid(unsigned long handle) |
@@ -89,6 +89,35 @@ static int kvmgt_guest_init(struct mdev_device *mdev); | |||
89 | static void intel_vgpu_release_work(struct work_struct *work); | 89 | static void intel_vgpu_release_work(struct work_struct *work); |
90 | static bool kvmgt_guest_exit(struct kvmgt_guest_info *info); | 90 | static bool kvmgt_guest_exit(struct kvmgt_guest_info *info); |
91 | 91 | ||
92 | static int gvt_dma_map_iova(struct intel_vgpu *vgpu, kvm_pfn_t pfn, | ||
93 | unsigned long *iova) | ||
94 | { | ||
95 | struct page *page; | ||
96 | struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; | ||
97 | dma_addr_t daddr; | ||
98 | |||
99 | page = pfn_to_page(pfn); | ||
100 | if (is_error_page(page)) | ||
101 | return -EFAULT; | ||
102 | |||
103 | daddr = dma_map_page(dev, page, 0, PAGE_SIZE, | ||
104 | PCI_DMA_BIDIRECTIONAL); | ||
105 | if (dma_mapping_error(dev, daddr)) | ||
106 | return -ENOMEM; | ||
107 | |||
108 | *iova = (unsigned long)(daddr >> PAGE_SHIFT); | ||
109 | return 0; | ||
110 | } | ||
111 | |||
112 | static void gvt_dma_unmap_iova(struct intel_vgpu *vgpu, unsigned long iova) | ||
113 | { | ||
114 | struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; | ||
115 | dma_addr_t daddr; | ||
116 | |||
117 | daddr = (dma_addr_t)(iova << PAGE_SHIFT); | ||
118 | dma_unmap_page(dev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
119 | } | ||
120 | |||
92 | static struct gvt_dma *__gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn) | 121 | static struct gvt_dma *__gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn) |
93 | { | 122 | { |
94 | struct rb_node *node = vgpu->vdev.cache.rb_node; | 123 | struct rb_node *node = vgpu->vdev.cache.rb_node; |
@@ -111,21 +140,22 @@ out: | |||
111 | return ret; | 140 | return ret; |
112 | } | 141 | } |
113 | 142 | ||
114 | static kvm_pfn_t gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn) | 143 | static unsigned long gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn) |
115 | { | 144 | { |
116 | struct gvt_dma *entry; | 145 | struct gvt_dma *entry; |
117 | kvm_pfn_t pfn; | 146 | unsigned long iova; |
118 | 147 | ||
119 | mutex_lock(&vgpu->vdev.cache_lock); | 148 | mutex_lock(&vgpu->vdev.cache_lock); |
120 | 149 | ||
121 | entry = __gvt_cache_find(vgpu, gfn); | 150 | entry = __gvt_cache_find(vgpu, gfn); |
122 | pfn = (entry == NULL) ? 0 : entry->pfn; | 151 | iova = (entry == NULL) ? INTEL_GVT_INVALID_ADDR : entry->iova; |
123 | 152 | ||
124 | mutex_unlock(&vgpu->vdev.cache_lock); | 153 | mutex_unlock(&vgpu->vdev.cache_lock); |
125 | return pfn; | 154 | return iova; |
126 | } | 155 | } |
127 | 156 | ||
128 | static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn) | 157 | static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, |
158 | unsigned long iova) | ||
129 | { | 159 | { |
130 | struct gvt_dma *new, *itr; | 160 | struct gvt_dma *new, *itr; |
131 | struct rb_node **link = &vgpu->vdev.cache.rb_node, *parent = NULL; | 161 | struct rb_node **link = &vgpu->vdev.cache.rb_node, *parent = NULL; |
@@ -135,7 +165,7 @@ static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn) | |||
135 | return; | 165 | return; |
136 | 166 | ||
137 | new->gfn = gfn; | 167 | new->gfn = gfn; |
138 | new->pfn = pfn; | 168 | new->iova = iova; |
139 | 169 | ||
140 | mutex_lock(&vgpu->vdev.cache_lock); | 170 | mutex_lock(&vgpu->vdev.cache_lock); |
141 | while (*link) { | 171 | while (*link) { |
@@ -182,6 +212,7 @@ static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn) | |||
182 | } | 212 | } |
183 | 213 | ||
184 | g1 = gfn; | 214 | g1 = gfn; |
215 | gvt_dma_unmap_iova(vgpu, this->iova); | ||
185 | rc = vfio_unpin_pages(dev, &g1, 1); | 216 | rc = vfio_unpin_pages(dev, &g1, 1); |
186 | WARN_ON(rc != 1); | 217 | WARN_ON(rc != 1); |
187 | __gvt_cache_remove_entry(vgpu, this); | 218 | __gvt_cache_remove_entry(vgpu, this); |
@@ -204,6 +235,7 @@ static void gvt_cache_destroy(struct intel_vgpu *vgpu) | |||
204 | mutex_lock(&vgpu->vdev.cache_lock); | 235 | mutex_lock(&vgpu->vdev.cache_lock); |
205 | while ((node = rb_first(&vgpu->vdev.cache))) { | 236 | while ((node = rb_first(&vgpu->vdev.cache))) { |
206 | dma = rb_entry(node, struct gvt_dma, node); | 237 | dma = rb_entry(node, struct gvt_dma, node); |
238 | gvt_dma_unmap_iova(vgpu, dma->iova); | ||
207 | gfn = dma->gfn; | 239 | gfn = dma->gfn; |
208 | 240 | ||
209 | vfio_unpin_pages(dev, &gfn, 1); | 241 | vfio_unpin_pages(dev, &gfn, 1); |
@@ -965,11 +997,6 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, | |||
965 | sparse->areas[0].offset = | 997 | sparse->areas[0].offset = |
966 | PAGE_ALIGN(vgpu_aperture_offset(vgpu)); | 998 | PAGE_ALIGN(vgpu_aperture_offset(vgpu)); |
967 | sparse->areas[0].size = vgpu_aperture_sz(vgpu); | 999 | sparse->areas[0].size = vgpu_aperture_sz(vgpu); |
968 | if (!caps.buf) { | ||
969 | kfree(caps.buf); | ||
970 | caps.buf = NULL; | ||
971 | caps.size = 0; | ||
972 | } | ||
973 | break; | 1000 | break; |
974 | 1001 | ||
975 | case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX: | 1002 | case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX: |
@@ -1248,43 +1275,6 @@ static void kvmgt_page_track_flush_slot(struct kvm *kvm, | |||
1248 | spin_unlock(&kvm->mmu_lock); | 1275 | spin_unlock(&kvm->mmu_lock); |
1249 | } | 1276 | } |
1250 | 1277 | ||
1251 | static bool kvmgt_check_guest(void) | ||
1252 | { | ||
1253 | unsigned int eax, ebx, ecx, edx; | ||
1254 | char s[12]; | ||
1255 | unsigned int *i; | ||
1256 | |||
1257 | eax = KVM_CPUID_SIGNATURE; | ||
1258 | ebx = ecx = edx = 0; | ||
1259 | |||
1260 | asm volatile ("cpuid" | ||
1261 | : "+a"(eax), "=b"(ebx), "=c"(ecx), "=d"(edx) | ||
1262 | : | ||
1263 | : "cc", "memory"); | ||
1264 | i = (unsigned int *)s; | ||
1265 | i[0] = ebx; | ||
1266 | i[1] = ecx; | ||
1267 | i[2] = edx; | ||
1268 | |||
1269 | return !strncmp(s, "KVMKVMKVM", strlen("KVMKVMKVM")); | ||
1270 | } | ||
1271 | |||
1272 | /** | ||
1273 | * NOTE: | ||
1274 | * It's actually impossible to check if we are running in KVM host, | ||
1275 | * since the "KVM host" is simply native. So we only dectect guest here. | ||
1276 | */ | ||
1277 | static int kvmgt_detect_host(void) | ||
1278 | { | ||
1279 | #ifdef CONFIG_INTEL_IOMMU | ||
1280 | if (intel_iommu_gfx_mapped) { | ||
1281 | gvt_err("Hardware IOMMU compatibility not yet supported, try to boot with intel_iommu=igfx_off\n"); | ||
1282 | return -ENODEV; | ||
1283 | } | ||
1284 | #endif | ||
1285 | return kvmgt_check_guest() ? -ENODEV : 0; | ||
1286 | } | ||
1287 | |||
1288 | static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm) | 1278 | static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm) |
1289 | { | 1279 | { |
1290 | struct intel_vgpu *itr; | 1280 | struct intel_vgpu *itr; |
@@ -1390,7 +1380,7 @@ static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data) | |||
1390 | 1380 | ||
1391 | static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn) | 1381 | static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn) |
1392 | { | 1382 | { |
1393 | unsigned long pfn; | 1383 | unsigned long iova, pfn; |
1394 | struct kvmgt_guest_info *info; | 1384 | struct kvmgt_guest_info *info; |
1395 | struct device *dev; | 1385 | struct device *dev; |
1396 | int rc; | 1386 | int rc; |
@@ -1399,9 +1389,9 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn) | |||
1399 | return INTEL_GVT_INVALID_ADDR; | 1389 | return INTEL_GVT_INVALID_ADDR; |
1400 | 1390 | ||
1401 | info = (struct kvmgt_guest_info *)handle; | 1391 | info = (struct kvmgt_guest_info *)handle; |
1402 | pfn = gvt_cache_find(info->vgpu, gfn); | 1392 | iova = gvt_cache_find(info->vgpu, gfn); |
1403 | if (pfn != 0) | 1393 | if (iova != INTEL_GVT_INVALID_ADDR) |
1404 | return pfn; | 1394 | return iova; |
1405 | 1395 | ||
1406 | pfn = INTEL_GVT_INVALID_ADDR; | 1396 | pfn = INTEL_GVT_INVALID_ADDR; |
1407 | dev = mdev_dev(info->vgpu->vdev.mdev); | 1397 | dev = mdev_dev(info->vgpu->vdev.mdev); |
@@ -1410,9 +1400,16 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn) | |||
1410 | gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc); | 1400 | gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc); |
1411 | return INTEL_GVT_INVALID_ADDR; | 1401 | return INTEL_GVT_INVALID_ADDR; |
1412 | } | 1402 | } |
1403 | /* transfer to host iova for GFX to use DMA */ | ||
1404 | rc = gvt_dma_map_iova(info->vgpu, pfn, &iova); | ||
1405 | if (rc) { | ||
1406 | gvt_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn); | ||
1407 | vfio_unpin_pages(dev, &gfn, 1); | ||
1408 | return INTEL_GVT_INVALID_ADDR; | ||
1409 | } | ||
1413 | 1410 | ||
1414 | gvt_cache_add(info->vgpu, gfn, pfn); | 1411 | gvt_cache_add(info->vgpu, gfn, iova); |
1415 | return pfn; | 1412 | return iova; |
1416 | } | 1413 | } |
1417 | 1414 | ||
1418 | static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa, | 1415 | static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa, |
@@ -1459,7 +1456,6 @@ static unsigned long kvmgt_virt_to_pfn(void *addr) | |||
1459 | } | 1456 | } |
1460 | 1457 | ||
1461 | struct intel_gvt_mpt kvmgt_mpt = { | 1458 | struct intel_gvt_mpt kvmgt_mpt = { |
1462 | .detect_host = kvmgt_detect_host, | ||
1463 | .host_init = kvmgt_host_init, | 1459 | .host_init = kvmgt_host_init, |
1464 | .host_exit = kvmgt_host_exit, | 1460 | .host_exit = kvmgt_host_exit, |
1465 | .attach_vgpu = kvmgt_attach_vgpu, | 1461 | .attach_vgpu = kvmgt_attach_vgpu, |
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 1af5830c0a56..419353624c5a 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h | |||
@@ -44,18 +44,6 @@ | |||
44 | */ | 44 | */ |
45 | 45 | ||
46 | /** | 46 | /** |
47 | * intel_gvt_hypervisor_detect_host - check if GVT-g is running within | ||
48 | * hypervisor host/privilged domain | ||
49 | * | ||
50 | * Returns: | ||
51 | * Zero on success, -ENODEV if current kernel is running inside a VM | ||
52 | */ | ||
53 | static inline int intel_gvt_hypervisor_detect_host(void) | ||
54 | { | ||
55 | return intel_gvt_host.mpt->detect_host(); | ||
56 | } | ||
57 | |||
58 | /** | ||
59 | * intel_gvt_hypervisor_host_init - init GVT-g host side | 47 | * intel_gvt_hypervisor_host_init - init GVT-g host side |
60 | * | 48 | * |
61 | * Returns: | 49 | * Returns: |
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c index 44136b1f3aab..2b3a642284b6 100644 --- a/drivers/gpu/drm/i915/gvt/render.c +++ b/drivers/gpu/drm/i915/gvt/render.c | |||
@@ -236,12 +236,18 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id) | |||
236 | } | 236 | } |
237 | } | 237 | } |
238 | 238 | ||
239 | #define CTX_CONTEXT_CONTROL_VAL 0x03 | ||
240 | |||
239 | void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id) | 241 | void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id) |
240 | { | 242 | { |
241 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; | 243 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; |
242 | struct render_mmio *mmio; | 244 | struct render_mmio *mmio; |
243 | u32 v; | 245 | u32 v; |
244 | int i, array_size; | 246 | int i, array_size; |
247 | u32 *reg_state = vgpu->shadow_ctx->engine[ring_id].lrc_reg_state; | ||
248 | u32 ctx_ctrl = reg_state[CTX_CONTEXT_CONTROL_VAL]; | ||
249 | u32 inhibit_mask = | ||
250 | _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); | ||
245 | 251 | ||
246 | if (IS_SKYLAKE(vgpu->gvt->dev_priv)) { | 252 | if (IS_SKYLAKE(vgpu->gvt->dev_priv)) { |
247 | mmio = gen9_render_mmio_list; | 253 | mmio = gen9_render_mmio_list; |
@@ -257,6 +263,17 @@ void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id) | |||
257 | continue; | 263 | continue; |
258 | 264 | ||
259 | mmio->value = I915_READ(mmio->reg); | 265 | mmio->value = I915_READ(mmio->reg); |
266 | |||
267 | /* | ||
268 | * if it is an inhibit context, load in_context mmio | ||
269 | * into HW by mmio write. If it is not, skip this mmio | ||
270 | * write. | ||
271 | */ | ||
272 | if (mmio->in_context && | ||
273 | ((ctx_ctrl & inhibit_mask) != inhibit_mask) && | ||
274 | i915.enable_execlists) | ||
275 | continue; | ||
276 | |||
260 | if (mmio->mask) | 277 | if (mmio->mask) |
261 | v = vgpu_vreg(vgpu, mmio->reg) | (mmio->mask << 16); | 278 | v = vgpu_vreg(vgpu, mmio->reg) | (mmio->mask << 16); |
262 | else | 279 | else |
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index 678b0be85376..06c9584ac5f0 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c | |||
@@ -125,7 +125,6 @@ static void tbs_sched_func(struct work_struct *work) | |||
125 | vgpu_data = scheduler->current_vgpu->sched_data; | 125 | vgpu_data = scheduler->current_vgpu->sched_data; |
126 | head = &vgpu_data->list; | 126 | head = &vgpu_data->list; |
127 | } else { | 127 | } else { |
128 | gvt_dbg_sched("no current vgpu search from q head\n"); | ||
129 | head = &sched_data->runq_head; | 128 | head = &sched_data->runq_head; |
130 | } | 129 | } |
131 | 130 | ||
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 7ea68a75dc46..d6b6d0efdd1a 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c | |||
@@ -169,7 +169,8 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) | |||
169 | gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n", | 169 | gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n", |
170 | ring_id, workload); | 170 | ring_id, workload); |
171 | 171 | ||
172 | shadow_ctx->desc_template = workload->ctx_desc.addressing_mode << | 172 | shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT); |
173 | shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode << | ||
173 | GEN8_CTX_ADDRESSING_MODE_SHIFT; | 174 | GEN8_CTX_ADDRESSING_MODE_SHIFT; |
174 | 175 | ||
175 | mutex_lock(&dev_priv->drm.struct_mutex); | 176 | mutex_lock(&dev_priv->drm.struct_mutex); |
@@ -456,7 +457,7 @@ static int workload_thread(void *priv) | |||
456 | } | 457 | } |
457 | 458 | ||
458 | complete: | 459 | complete: |
459 | gvt_dbg_sched("will complete workload %p\n, status: %d\n", | 460 | gvt_dbg_sched("will complete workload %p, status: %d\n", |
460 | workload, workload->status); | 461 | workload, workload->status); |
461 | 462 | ||
462 | if (workload->req) | 463 | if (workload->req) |
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 7295bc8e12fb..95a97aa0051e 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c | |||
@@ -74,7 +74,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu) | |||
74 | int intel_gvt_init_vgpu_types(struct intel_gvt *gvt) | 74 | int intel_gvt_init_vgpu_types(struct intel_gvt *gvt) |
75 | { | 75 | { |
76 | unsigned int num_types; | 76 | unsigned int num_types; |
77 | unsigned int i, low_avail; | 77 | unsigned int i, low_avail, high_avail; |
78 | unsigned int min_low; | 78 | unsigned int min_low; |
79 | 79 | ||
80 | /* vGPU type name is defined as GVTg_Vx_y which contains | 80 | /* vGPU type name is defined as GVTg_Vx_y which contains |
@@ -89,9 +89,9 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt) | |||
89 | * to indicate how many vGPU instance can be created for this | 89 | * to indicate how many vGPU instance can be created for this |
90 | * type. | 90 | * type. |
91 | * | 91 | * |
92 | * Currently use static size here as we init type earlier.. | ||
93 | */ | 92 | */ |
94 | low_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE; | 93 | low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE; |
94 | high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE; | ||
95 | num_types = 4; | 95 | num_types = 4; |
96 | 96 | ||
97 | gvt->types = kzalloc(num_types * sizeof(struct intel_vgpu_type), | 97 | gvt->types = kzalloc(num_types * sizeof(struct intel_vgpu_type), |
@@ -106,7 +106,8 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt) | |||
106 | gvt->types[i].low_gm_size = min_low; | 106 | gvt->types[i].low_gm_size = min_low; |
107 | gvt->types[i].high_gm_size = max((min_low<<3), MB_TO_BYTES(384U)); | 107 | gvt->types[i].high_gm_size = max((min_low<<3), MB_TO_BYTES(384U)); |
108 | gvt->types[i].fence = 4; | 108 | gvt->types[i].fence = 4; |
109 | gvt->types[i].max_instance = low_avail / min_low; | 109 | gvt->types[i].max_instance = min(low_avail / min_low, |
110 | high_avail / gvt->types[i].high_gm_size); | ||
110 | gvt->types[i].avail_instance = gvt->types[i].max_instance; | 111 | gvt->types[i].avail_instance = gvt->types[i].max_instance; |
111 | 112 | ||
112 | if (IS_GEN8(gvt->dev_priv)) | 113 | if (IS_GEN8(gvt->dev_priv)) |
@@ -142,9 +143,9 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt) | |||
142 | /* Need to depend on maxium hw resource size but keep on | 143 | /* Need to depend on maxium hw resource size but keep on |
143 | * static config for now. | 144 | * static config for now. |
144 | */ | 145 | */ |
145 | low_gm_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE - | 146 | low_gm_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE - |
146 | gvt->gm.vgpu_allocated_low_gm_size; | 147 | gvt->gm.vgpu_allocated_low_gm_size; |
147 | high_gm_avail = MB_TO_BYTES(256) * 8UL - HOST_HIGH_GM_SIZE - | 148 | high_gm_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE - |
148 | gvt->gm.vgpu_allocated_high_gm_size; | 149 | gvt->gm.vgpu_allocated_high_gm_size; |
149 | fence_avail = gvt_fence_sz(gvt) - HOST_FENCE - | 150 | fence_avail = gvt_fence_sz(gvt) - HOST_FENCE - |
150 | gvt->fence.vgpu_allocated_fence_num; | 151 | gvt->fence.vgpu_allocated_fence_num; |
@@ -384,6 +385,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, | |||
384 | intel_vgpu_reset_resource(vgpu); | 385 | intel_vgpu_reset_resource(vgpu); |
385 | intel_vgpu_reset_mmio(vgpu); | 386 | intel_vgpu_reset_mmio(vgpu); |
386 | populate_pvinfo_page(vgpu); | 387 | populate_pvinfo_page(vgpu); |
388 | intel_vgpu_reset_display(vgpu); | ||
387 | 389 | ||
388 | if (dmlr) | 390 | if (dmlr) |
389 | intel_vgpu_reset_cfg_space(vgpu); | 391 | intel_vgpu_reset_cfg_space(vgpu); |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 4ae69ebe166e..1dfc0b204a72 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -824,10 +824,6 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, | |||
824 | if (ret < 0) | 824 | if (ret < 0) |
825 | return ret; | 825 | return ret; |
826 | 826 | ||
827 | ret = intel_gvt_init(dev_priv); | ||
828 | if (ret < 0) | ||
829 | goto err_workqueues; | ||
830 | |||
831 | /* This must be called before any calls to HAS_PCH_* */ | 827 | /* This must be called before any calls to HAS_PCH_* */ |
832 | intel_detect_pch(dev_priv); | 828 | intel_detect_pch(dev_priv); |
833 | 829 | ||
@@ -841,7 +837,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, | |||
841 | intel_init_audio_hooks(dev_priv); | 837 | intel_init_audio_hooks(dev_priv); |
842 | ret = i915_gem_load_init(dev_priv); | 838 | ret = i915_gem_load_init(dev_priv); |
843 | if (ret < 0) | 839 | if (ret < 0) |
844 | goto err_gvt; | 840 | goto err_workqueues; |
845 | 841 | ||
846 | intel_display_crc_init(dev_priv); | 842 | intel_display_crc_init(dev_priv); |
847 | 843 | ||
@@ -853,8 +849,6 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, | |||
853 | 849 | ||
854 | return 0; | 850 | return 0; |
855 | 851 | ||
856 | err_gvt: | ||
857 | intel_gvt_cleanup(dev_priv); | ||
858 | err_workqueues: | 852 | err_workqueues: |
859 | i915_workqueues_cleanup(dev_priv); | 853 | i915_workqueues_cleanup(dev_priv); |
860 | return ret; | 854 | return ret; |
@@ -1077,6 +1071,10 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) | |||
1077 | DRM_DEBUG_DRIVER("can't enable MSI"); | 1071 | DRM_DEBUG_DRIVER("can't enable MSI"); |
1078 | } | 1072 | } |
1079 | 1073 | ||
1074 | ret = intel_gvt_init(dev_priv); | ||
1075 | if (ret) | ||
1076 | goto out_ggtt; | ||
1077 | |||
1080 | return 0; | 1078 | return 0; |
1081 | 1079 | ||
1082 | out_ggtt: | 1080 | out_ggtt: |
@@ -1290,6 +1288,8 @@ void i915_driver_unload(struct drm_device *dev) | |||
1290 | 1288 | ||
1291 | intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); | 1289 | intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); |
1292 | 1290 | ||
1291 | intel_gvt_cleanup(dev_priv); | ||
1292 | |||
1293 | i915_driver_unregister(dev_priv); | 1293 | i915_driver_unregister(dev_priv); |
1294 | 1294 | ||
1295 | drm_vblank_cleanup(dev); | 1295 | drm_vblank_cleanup(dev); |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index c8689892a89f..a51b134038a0 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -440,7 +440,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv, | |||
440 | timeout = i915_gem_object_wait_fence(shared[i], | 440 | timeout = i915_gem_object_wait_fence(shared[i], |
441 | flags, timeout, | 441 | flags, timeout, |
442 | rps); | 442 | rps); |
443 | if (timeout <= 0) | 443 | if (timeout < 0) |
444 | break; | 444 | break; |
445 | 445 | ||
446 | dma_fence_put(shared[i]); | 446 | dma_fence_put(shared[i]); |
@@ -453,7 +453,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv, | |||
453 | excl = reservation_object_get_excl_rcu(resv); | 453 | excl = reservation_object_get_excl_rcu(resv); |
454 | } | 454 | } |
455 | 455 | ||
456 | if (excl && timeout > 0) | 456 | if (excl && timeout >= 0) |
457 | timeout = i915_gem_object_wait_fence(excl, flags, timeout, rps); | 457 | timeout = i915_gem_object_wait_fence(excl, flags, timeout, rps); |
458 | 458 | ||
459 | dma_fence_put(excl); | 459 | dma_fence_put(excl); |
@@ -2735,21 +2735,17 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine) | |||
2735 | engine->irq_seqno_barrier(engine); | 2735 | engine->irq_seqno_barrier(engine); |
2736 | 2736 | ||
2737 | request = i915_gem_find_active_request(engine); | 2737 | request = i915_gem_find_active_request(engine); |
2738 | if (!request) | 2738 | if (request && i915_gem_reset_request(request)) { |
2739 | return; | 2739 | DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n", |
2740 | 2740 | engine->name, request->global_seqno); | |
2741 | if (!i915_gem_reset_request(request)) | ||
2742 | return; | ||
2743 | 2741 | ||
2744 | DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n", | 2742 | /* If this context is now banned, skip all pending requests. */ |
2745 | engine->name, request->global_seqno); | 2743 | if (i915_gem_context_is_banned(request->ctx)) |
2744 | engine_skip_context(request); | ||
2745 | } | ||
2746 | 2746 | ||
2747 | /* Setup the CS to resume from the breadcrumb of the hung request */ | 2747 | /* Setup the CS to resume from the breadcrumb of the hung request */ |
2748 | engine->reset_hw(engine, request); | 2748 | engine->reset_hw(engine, request); |
2749 | |||
2750 | /* If this context is now banned, skip all of its pending requests. */ | ||
2751 | if (i915_gem_context_is_banned(request->ctx)) | ||
2752 | engine_skip_context(request); | ||
2753 | } | 2749 | } |
2754 | 2750 | ||
2755 | void i915_gem_reset_finish(struct drm_i915_private *dev_priv) | 2751 | void i915_gem_reset_finish(struct drm_i915_private *dev_priv) |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 30d8dbd04f0b..2801a4d56324 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -755,9 +755,10 @@ static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm, | |||
755 | GEM_BUG_ON(pte_end > GEN8_PTES); | 755 | GEM_BUG_ON(pte_end > GEN8_PTES); |
756 | 756 | ||
757 | bitmap_clear(pt->used_ptes, pte, num_entries); | 757 | bitmap_clear(pt->used_ptes, pte, num_entries); |
758 | 758 | if (USES_FULL_PPGTT(vm->i915)) { | |
759 | if (bitmap_empty(pt->used_ptes, GEN8_PTES)) | 759 | if (bitmap_empty(pt->used_ptes, GEN8_PTES)) |
760 | return true; | 760 | return true; |
761 | } | ||
761 | 762 | ||
762 | pt_vaddr = kmap_px(pt); | 763 | pt_vaddr = kmap_px(pt); |
763 | 764 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c b/drivers/gpu/drm/i915/i915_gem_internal.c index 17ce53d0d092..628699a91106 100644 --- a/drivers/gpu/drm/i915/i915_gem_internal.c +++ b/drivers/gpu/drm/i915/i915_gem_internal.c | |||
@@ -46,24 +46,12 @@ static struct sg_table * | |||
46 | i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) | 46 | i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) |
47 | { | 47 | { |
48 | struct drm_i915_private *i915 = to_i915(obj->base.dev); | 48 | struct drm_i915_private *i915 = to_i915(obj->base.dev); |
49 | unsigned int npages = obj->base.size / PAGE_SIZE; | ||
50 | struct sg_table *st; | 49 | struct sg_table *st; |
51 | struct scatterlist *sg; | 50 | struct scatterlist *sg; |
51 | unsigned int npages; | ||
52 | int max_order; | 52 | int max_order; |
53 | gfp_t gfp; | 53 | gfp_t gfp; |
54 | 54 | ||
55 | st = kmalloc(sizeof(*st), GFP_KERNEL); | ||
56 | if (!st) | ||
57 | return ERR_PTR(-ENOMEM); | ||
58 | |||
59 | if (sg_alloc_table(st, npages, GFP_KERNEL)) { | ||
60 | kfree(st); | ||
61 | return ERR_PTR(-ENOMEM); | ||
62 | } | ||
63 | |||
64 | sg = st->sgl; | ||
65 | st->nents = 0; | ||
66 | |||
67 | max_order = MAX_ORDER; | 55 | max_order = MAX_ORDER; |
68 | #ifdef CONFIG_SWIOTLB | 56 | #ifdef CONFIG_SWIOTLB |
69 | if (swiotlb_nr_tbl()) /* minimum max swiotlb size is IO_TLB_SEGSIZE */ | 57 | if (swiotlb_nr_tbl()) /* minimum max swiotlb size is IO_TLB_SEGSIZE */ |
@@ -77,6 +65,20 @@ i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) | |||
77 | gfp |= __GFP_DMA32; | 65 | gfp |= __GFP_DMA32; |
78 | } | 66 | } |
79 | 67 | ||
68 | create_st: | ||
69 | st = kmalloc(sizeof(*st), GFP_KERNEL); | ||
70 | if (!st) | ||
71 | return ERR_PTR(-ENOMEM); | ||
72 | |||
73 | npages = obj->base.size / PAGE_SIZE; | ||
74 | if (sg_alloc_table(st, npages, GFP_KERNEL)) { | ||
75 | kfree(st); | ||
76 | return ERR_PTR(-ENOMEM); | ||
77 | } | ||
78 | |||
79 | sg = st->sgl; | ||
80 | st->nents = 0; | ||
81 | |||
80 | do { | 82 | do { |
81 | int order = min(fls(npages) - 1, max_order); | 83 | int order = min(fls(npages) - 1, max_order); |
82 | struct page *page; | 84 | struct page *page; |
@@ -104,8 +106,15 @@ i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) | |||
104 | sg = __sg_next(sg); | 106 | sg = __sg_next(sg); |
105 | } while (1); | 107 | } while (1); |
106 | 108 | ||
107 | if (i915_gem_gtt_prepare_pages(obj, st)) | 109 | if (i915_gem_gtt_prepare_pages(obj, st)) { |
110 | /* Failed to dma-map try again with single page sg segments */ | ||
111 | if (get_order(st->sgl->length)) { | ||
112 | internal_free_pages(st); | ||
113 | max_order = 0; | ||
114 | goto create_st; | ||
115 | } | ||
108 | goto err; | 116 | goto err; |
117 | } | ||
109 | 118 | ||
110 | /* Mark the pages as dontneed whilst they are still pinned. As soon | 119 | /* Mark the pages as dontneed whilst they are still pinned. As soon |
111 | * as they are unpinned they are allowed to be reaped by the shrinker, | 120 | * as they are unpinned they are allowed to be reaped by the shrinker, |
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c index 72b7f7d9461d..f31deeb72703 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.c +++ b/drivers/gpu/drm/i915/i915_gem_request.c | |||
@@ -1025,8 +1025,13 @@ __i915_request_wait_for_execute(struct drm_i915_gem_request *request, | |||
1025 | break; | 1025 | break; |
1026 | } | 1026 | } |
1027 | 1027 | ||
1028 | if (!timeout) { | ||
1029 | timeout = -ETIME; | ||
1030 | break; | ||
1031 | } | ||
1032 | |||
1028 | timeout = io_schedule_timeout(timeout); | 1033 | timeout = io_schedule_timeout(timeout); |
1029 | } while (timeout); | 1034 | } while (1); |
1030 | finish_wait(&request->execute.wait, &wait); | 1035 | finish_wait(&request->execute.wait, &wait); |
1031 | 1036 | ||
1032 | if (flags & I915_WAIT_LOCKED) | 1037 | if (flags & I915_WAIT_LOCKED) |
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index ec7c5d80fe4f..9673bcc3b6ad 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c | |||
@@ -405,6 +405,11 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv) | |||
405 | 405 | ||
406 | mutex_init(&dev_priv->mm.stolen_lock); | 406 | mutex_init(&dev_priv->mm.stolen_lock); |
407 | 407 | ||
408 | if (intel_vgpu_active(dev_priv)) { | ||
409 | DRM_INFO("iGVT-g active, disabling use of stolen memory\n"); | ||
410 | return 0; | ||
411 | } | ||
412 | |||
408 | #ifdef CONFIG_INTEL_IOMMU | 413 | #ifdef CONFIG_INTEL_IOMMU |
409 | if (intel_iommu_gfx_mapped && INTEL_GEN(dev_priv) < 8) { | 414 | if (intel_iommu_gfx_mapped && INTEL_GEN(dev_priv) < 8) { |
410 | DRM_INFO("DMAR active, disabling use of stolen memory\n"); | 415 | DRM_INFO("DMAR active, disabling use of stolen memory\n"); |
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index b1361cfd4c5c..974ac08df473 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c | |||
@@ -173,7 +173,7 @@ i915_tiling_ok(struct drm_i915_gem_object *obj, | |||
173 | else | 173 | else |
174 | tile_width = 512; | 174 | tile_width = 512; |
175 | 175 | ||
176 | if (!IS_ALIGNED(stride, tile_width)) | 176 | if (!stride || !IS_ALIGNED(stride, tile_width)) |
177 | return false; | 177 | return false; |
178 | 178 | ||
179 | /* 965+ just needs multiples of tile width */ | 179 | /* 965+ just needs multiples of tile width */ |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 6fefc34ef602..7dba148ca792 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -3123,19 +3123,16 @@ static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) | |||
3123 | I915_WRITE(PCH_PORT_HOTPLUG, hotplug); | 3123 | I915_WRITE(PCH_PORT_HOTPLUG, hotplug); |
3124 | } | 3124 | } |
3125 | 3125 | ||
3126 | static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) | 3126 | static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) |
3127 | { | 3127 | { |
3128 | u32 hotplug_irqs, hotplug, enabled_irqs; | 3128 | u32 hotplug; |
3129 | |||
3130 | hotplug_irqs = SDE_HOTPLUG_MASK_SPT; | ||
3131 | enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt); | ||
3132 | |||
3133 | ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); | ||
3134 | 3129 | ||
3135 | /* Enable digital hotplug on the PCH */ | 3130 | /* Enable digital hotplug on the PCH */ |
3136 | hotplug = I915_READ(PCH_PORT_HOTPLUG); | 3131 | hotplug = I915_READ(PCH_PORT_HOTPLUG); |
3137 | hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE | | 3132 | hotplug |= PORTA_HOTPLUG_ENABLE | |
3138 | PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE; | 3133 | PORTB_HOTPLUG_ENABLE | |
3134 | PORTC_HOTPLUG_ENABLE | | ||
3135 | PORTD_HOTPLUG_ENABLE; | ||
3139 | I915_WRITE(PCH_PORT_HOTPLUG, hotplug); | 3136 | I915_WRITE(PCH_PORT_HOTPLUG, hotplug); |
3140 | 3137 | ||
3141 | hotplug = I915_READ(PCH_PORT_HOTPLUG2); | 3138 | hotplug = I915_READ(PCH_PORT_HOTPLUG2); |
@@ -3143,6 +3140,18 @@ static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) | |||
3143 | I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); | 3140 | I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); |
3144 | } | 3141 | } |
3145 | 3142 | ||
3143 | static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) | ||
3144 | { | ||
3145 | u32 hotplug_irqs, enabled_irqs; | ||
3146 | |||
3147 | hotplug_irqs = SDE_HOTPLUG_MASK_SPT; | ||
3148 | enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt); | ||
3149 | |||
3150 | ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); | ||
3151 | |||
3152 | spt_hpd_detection_setup(dev_priv); | ||
3153 | } | ||
3154 | |||
3146 | static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) | 3155 | static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) |
3147 | { | 3156 | { |
3148 | u32 hotplug_irqs, hotplug, enabled_irqs; | 3157 | u32 hotplug_irqs, hotplug, enabled_irqs; |
@@ -3177,18 +3186,15 @@ static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) | |||
3177 | ibx_hpd_irq_setup(dev_priv); | 3186 | ibx_hpd_irq_setup(dev_priv); |
3178 | } | 3187 | } |
3179 | 3188 | ||
3180 | static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) | 3189 | static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv, |
3190 | u32 enabled_irqs) | ||
3181 | { | 3191 | { |
3182 | u32 hotplug_irqs, hotplug, enabled_irqs; | 3192 | u32 hotplug; |
3183 | |||
3184 | enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt); | ||
3185 | hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK; | ||
3186 | |||
3187 | bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); | ||
3188 | 3193 | ||
3189 | hotplug = I915_READ(PCH_PORT_HOTPLUG); | 3194 | hotplug = I915_READ(PCH_PORT_HOTPLUG); |
3190 | hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE | | 3195 | hotplug |= PORTA_HOTPLUG_ENABLE | |
3191 | PORTA_HOTPLUG_ENABLE; | 3196 | PORTB_HOTPLUG_ENABLE | |
3197 | PORTC_HOTPLUG_ENABLE; | ||
3192 | 3198 | ||
3193 | DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n", | 3199 | DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n", |
3194 | hotplug, enabled_irqs); | 3200 | hotplug, enabled_irqs); |
@@ -3198,7 +3204,6 @@ static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) | |||
3198 | * For BXT invert bit has to be set based on AOB design | 3204 | * For BXT invert bit has to be set based on AOB design |
3199 | * for HPD detection logic, update it based on VBT fields. | 3205 | * for HPD detection logic, update it based on VBT fields. |
3200 | */ | 3206 | */ |
3201 | |||
3202 | if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) && | 3207 | if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) && |
3203 | intel_bios_is_port_hpd_inverted(dev_priv, PORT_A)) | 3208 | intel_bios_is_port_hpd_inverted(dev_priv, PORT_A)) |
3204 | hotplug |= BXT_DDIA_HPD_INVERT; | 3209 | hotplug |= BXT_DDIA_HPD_INVERT; |
@@ -3212,6 +3217,23 @@ static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) | |||
3212 | I915_WRITE(PCH_PORT_HOTPLUG, hotplug); | 3217 | I915_WRITE(PCH_PORT_HOTPLUG, hotplug); |
3213 | } | 3218 | } |
3214 | 3219 | ||
3220 | static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv) | ||
3221 | { | ||
3222 | __bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK); | ||
3223 | } | ||
3224 | |||
3225 | static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) | ||
3226 | { | ||
3227 | u32 hotplug_irqs, enabled_irqs; | ||
3228 | |||
3229 | enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt); | ||
3230 | hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK; | ||
3231 | |||
3232 | bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); | ||
3233 | |||
3234 | __bxt_hpd_detection_setup(dev_priv, enabled_irqs); | ||
3235 | } | ||
3236 | |||
3215 | static void ibx_irq_postinstall(struct drm_device *dev) | 3237 | static void ibx_irq_postinstall(struct drm_device *dev) |
3216 | { | 3238 | { |
3217 | struct drm_i915_private *dev_priv = to_i915(dev); | 3239 | struct drm_i915_private *dev_priv = to_i915(dev); |
@@ -3227,6 +3249,12 @@ static void ibx_irq_postinstall(struct drm_device *dev) | |||
3227 | 3249 | ||
3228 | gen5_assert_iir_is_zero(dev_priv, SDEIIR); | 3250 | gen5_assert_iir_is_zero(dev_priv, SDEIIR); |
3229 | I915_WRITE(SDEIMR, ~mask); | 3251 | I915_WRITE(SDEIMR, ~mask); |
3252 | |||
3253 | if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) || | ||
3254 | HAS_PCH_LPT(dev_priv)) | ||
3255 | ; /* TODO: Enable HPD detection on older PCH platforms too */ | ||
3256 | else | ||
3257 | spt_hpd_detection_setup(dev_priv); | ||
3230 | } | 3258 | } |
3231 | 3259 | ||
3232 | static void gen5_gt_irq_postinstall(struct drm_device *dev) | 3260 | static void gen5_gt_irq_postinstall(struct drm_device *dev) |
@@ -3438,6 +3466,9 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) | |||
3438 | 3466 | ||
3439 | GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables); | 3467 | GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables); |
3440 | GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); | 3468 | GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); |
3469 | |||
3470 | if (IS_GEN9_LP(dev_priv)) | ||
3471 | bxt_hpd_detection_setup(dev_priv); | ||
3441 | } | 3472 | } |
3442 | 3473 | ||
3443 | static int gen8_irq_postinstall(struct drm_device *dev) | 3474 | static int gen8_irq_postinstall(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 72f9f36ae5ce..675323189f2c 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -3307,8 +3307,10 @@ enum skl_disp_power_wells { | |||
3307 | /* | 3307 | /* |
3308 | * Logical Context regs | 3308 | * Logical Context regs |
3309 | */ | 3309 | */ |
3310 | #define CCID _MMIO(0x2180) | 3310 | #define CCID _MMIO(0x2180) |
3311 | #define CCID_EN (1<<0) | 3311 | #define CCID_EN BIT(0) |
3312 | #define CCID_EXTENDED_STATE_RESTORE BIT(2) | ||
3313 | #define CCID_EXTENDED_STATE_SAVE BIT(3) | ||
3312 | /* | 3314 | /* |
3313 | * Notes on SNB/IVB/VLV context size: | 3315 | * Notes on SNB/IVB/VLV context size: |
3314 | * - Power context is saved elsewhere (LLC or stolen) | 3316 | * - Power context is saved elsewhere (LLC or stolen) |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 3d8ac8aa7214..d1670b8afbf5 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -2887,6 +2887,9 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp) | |||
2887 | 2887 | ||
2888 | WARN_ON(intel_dp->active_pipe != INVALID_PIPE); | 2888 | WARN_ON(intel_dp->active_pipe != INVALID_PIPE); |
2889 | 2889 | ||
2890 | if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B)) | ||
2891 | return; | ||
2892 | |||
2890 | edp_panel_vdd_off_sync(intel_dp); | 2893 | edp_panel_vdd_off_sync(intel_dp); |
2891 | 2894 | ||
2892 | /* | 2895 | /* |
@@ -2914,9 +2917,6 @@ static void vlv_steal_power_sequencer(struct drm_device *dev, | |||
2914 | 2917 | ||
2915 | lockdep_assert_held(&dev_priv->pps_mutex); | 2918 | lockdep_assert_held(&dev_priv->pps_mutex); |
2916 | 2919 | ||
2917 | if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B)) | ||
2918 | return; | ||
2919 | |||
2920 | for_each_intel_encoder(dev, encoder) { | 2920 | for_each_intel_encoder(dev, encoder) { |
2921 | struct intel_dp *intel_dp; | 2921 | struct intel_dp *intel_dp; |
2922 | enum port port; | 2922 | enum port port; |
@@ -4406,8 +4406,8 @@ static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv, | |||
4406 | * | 4406 | * |
4407 | * Return %true if @port is connected, %false otherwise. | 4407 | * Return %true if @port is connected, %false otherwise. |
4408 | */ | 4408 | */ |
4409 | static bool intel_digital_port_connected(struct drm_i915_private *dev_priv, | 4409 | bool intel_digital_port_connected(struct drm_i915_private *dev_priv, |
4410 | struct intel_digital_port *port) | 4410 | struct intel_digital_port *port) |
4411 | { | 4411 | { |
4412 | if (HAS_PCH_IBX(dev_priv)) | 4412 | if (HAS_PCH_IBX(dev_priv)) |
4413 | return ibx_digital_port_connected(dev_priv, port); | 4413 | return ibx_digital_port_connected(dev_priv, port); |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 0cec0013ace0..a60f442f2686 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -1485,6 +1485,8 @@ bool __intel_dp_read_desc(struct intel_dp *intel_dp, | |||
1485 | bool intel_dp_read_desc(struct intel_dp *intel_dp); | 1485 | bool intel_dp_read_desc(struct intel_dp *intel_dp); |
1486 | int intel_dp_link_required(int pixel_clock, int bpp); | 1486 | int intel_dp_link_required(int pixel_clock, int bpp); |
1487 | int intel_dp_max_data_rate(int max_link_clock, int max_lanes); | 1487 | int intel_dp_max_data_rate(int max_link_clock, int max_lanes); |
1488 | bool intel_digital_port_connected(struct drm_i915_private *dev_priv, | ||
1489 | struct intel_digital_port *port); | ||
1488 | 1490 | ||
1489 | /* intel_dp_aux_backlight.c */ | 1491 | /* intel_dp_aux_backlight.c */ |
1490 | int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector); | 1492 | int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector); |
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c index 290384e86c63..d23c0fcff751 100644 --- a/drivers/gpu/drm/i915/intel_gvt.c +++ b/drivers/gpu/drm/i915/intel_gvt.c | |||
@@ -67,6 +67,11 @@ int intel_gvt_init(struct drm_i915_private *dev_priv) | |||
67 | return 0; | 67 | return 0; |
68 | } | 68 | } |
69 | 69 | ||
70 | if (intel_vgpu_active(dev_priv)) { | ||
71 | DRM_DEBUG_DRIVER("GVT-g is disabled for guest\n"); | ||
72 | goto bail; | ||
73 | } | ||
74 | |||
70 | if (!is_supported_device(dev_priv)) { | 75 | if (!is_supported_device(dev_priv)) { |
71 | DRM_DEBUG_DRIVER("Unsupported device. GVT-g is disabled\n"); | 76 | DRM_DEBUG_DRIVER("Unsupported device. GVT-g is disabled\n"); |
72 | goto bail; | 77 | goto bail; |
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 432ee495dec2..ebf8023d21e6 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c | |||
@@ -360,7 +360,8 @@ execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state) | |||
360 | static u64 execlists_update_context(struct drm_i915_gem_request *rq) | 360 | static u64 execlists_update_context(struct drm_i915_gem_request *rq) |
361 | { | 361 | { |
362 | struct intel_context *ce = &rq->ctx->engine[rq->engine->id]; | 362 | struct intel_context *ce = &rq->ctx->engine[rq->engine->id]; |
363 | struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt; | 363 | struct i915_hw_ppgtt *ppgtt = |
364 | rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt; | ||
364 | u32 *reg_state = ce->lrc_reg_state; | 365 | u32 *reg_state = ce->lrc_reg_state; |
365 | 366 | ||
366 | reg_state[CTX_RING_TAIL+1] = rq->tail; | 367 | reg_state[CTX_RING_TAIL+1] = rq->tail; |
@@ -1389,7 +1390,20 @@ static void reset_common_ring(struct intel_engine_cs *engine, | |||
1389 | { | 1390 | { |
1390 | struct drm_i915_private *dev_priv = engine->i915; | 1391 | struct drm_i915_private *dev_priv = engine->i915; |
1391 | struct execlist_port *port = engine->execlist_port; | 1392 | struct execlist_port *port = engine->execlist_port; |
1392 | struct intel_context *ce = &request->ctx->engine[engine->id]; | 1393 | struct intel_context *ce; |
1394 | |||
1395 | /* If the request was innocent, we leave the request in the ELSP | ||
1396 | * and will try to replay it on restarting. The context image may | ||
1397 | * have been corrupted by the reset, in which case we may have | ||
1398 | * to service a new GPU hang, but more likely we can continue on | ||
1399 | * without impact. | ||
1400 | * | ||
1401 | * If the request was guilty, we presume the context is corrupt | ||
1402 | * and have to at least restore the RING register in the context | ||
1403 | * image back to the expected values to skip over the guilty request. | ||
1404 | */ | ||
1405 | if (!request || request->fence.error != -EIO) | ||
1406 | return; | ||
1393 | 1407 | ||
1394 | /* We want a simple context + ring to execute the breadcrumb update. | 1408 | /* We want a simple context + ring to execute the breadcrumb update. |
1395 | * We cannot rely on the context being intact across the GPU hang, | 1409 | * We cannot rely on the context being intact across the GPU hang, |
@@ -1398,6 +1412,7 @@ static void reset_common_ring(struct intel_engine_cs *engine, | |||
1398 | * future request will be after userspace has had the opportunity | 1412 | * future request will be after userspace has had the opportunity |
1399 | * to recreate its own state. | 1413 | * to recreate its own state. |
1400 | */ | 1414 | */ |
1415 | ce = &request->ctx->engine[engine->id]; | ||
1401 | execlists_init_reg_state(ce->lrc_reg_state, | 1416 | execlists_init_reg_state(ce->lrc_reg_state, |
1402 | request->ctx, engine, ce->ring); | 1417 | request->ctx, engine, ce->ring); |
1403 | 1418 | ||
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c index f6d4e6940257..c300647ef604 100644 --- a/drivers/gpu/drm/i915/intel_lspcon.c +++ b/drivers/gpu/drm/i915/intel_lspcon.c | |||
@@ -158,6 +158,8 @@ static bool lspcon_probe(struct intel_lspcon *lspcon) | |||
158 | static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon) | 158 | static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon) |
159 | { | 159 | { |
160 | struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon); | 160 | struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon); |
161 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | ||
162 | struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); | ||
161 | unsigned long start = jiffies; | 163 | unsigned long start = jiffies; |
162 | 164 | ||
163 | if (!lspcon->desc_valid) | 165 | if (!lspcon->desc_valid) |
@@ -173,7 +175,8 @@ static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon) | |||
173 | if (!__intel_dp_read_desc(intel_dp, &desc)) | 175 | if (!__intel_dp_read_desc(intel_dp, &desc)) |
174 | return; | 176 | return; |
175 | 177 | ||
176 | if (!memcmp(&intel_dp->desc, &desc, sizeof(desc))) { | 178 | if (intel_digital_port_connected(dev_priv, dig_port) && |
179 | !memcmp(&intel_dp->desc, &desc, sizeof(desc))) { | ||
177 | DRM_DEBUG_KMS("LSPCON recovering in PCON mode after %u ms\n", | 180 | DRM_DEBUG_KMS("LSPCON recovering in PCON mode after %u ms\n", |
178 | jiffies_to_msecs(jiffies - start)); | 181 | jiffies_to_msecs(jiffies - start)); |
179 | return; | 182 | return; |
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index f4429f67a4e3..4a862a358c70 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c | |||
@@ -982,7 +982,18 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) | |||
982 | opregion->vbt_size = vbt_size; | 982 | opregion->vbt_size = vbt_size; |
983 | } else { | 983 | } else { |
984 | vbt = base + OPREGION_VBT_OFFSET; | 984 | vbt = base + OPREGION_VBT_OFFSET; |
985 | vbt_size = OPREGION_ASLE_EXT_OFFSET - OPREGION_VBT_OFFSET; | 985 | /* |
986 | * The VBT specification says that if the ASLE ext | ||
987 | * mailbox is not used its area is reserved, but | ||
988 | * on some CHT boards the VBT extends into the | ||
989 | * ASLE ext area. Allow this even though it is | ||
990 | * against the spec, so we do not end up rejecting | ||
991 | * the VBT on those boards (and end up not finding the | ||
992 | * LCD panel because of this). | ||
993 | */ | ||
994 | vbt_size = (mboxes & MBOX_ASLE_EXT) ? | ||
995 | OPREGION_ASLE_EXT_OFFSET : OPREGION_SIZE; | ||
996 | vbt_size -= OPREGION_VBT_OFFSET; | ||
986 | if (intel_bios_is_valid_vbt(vbt, vbt_size)) { | 997 | if (intel_bios_is_valid_vbt(vbt, vbt_size)) { |
987 | DRM_DEBUG_KMS("Found valid VBT in ACPI OpRegion (Mailbox #4)\n"); | 998 | DRM_DEBUG_KMS("Found valid VBT in ACPI OpRegion (Mailbox #4)\n"); |
988 | opregion->vbt = vbt; | 999 | opregion->vbt = vbt; |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 69035e4f9b3b..91bc4abf5d3e 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -599,10 +599,62 @@ out: | |||
599 | static void reset_ring_common(struct intel_engine_cs *engine, | 599 | static void reset_ring_common(struct intel_engine_cs *engine, |
600 | struct drm_i915_gem_request *request) | 600 | struct drm_i915_gem_request *request) |
601 | { | 601 | { |
602 | struct intel_ring *ring = request->ring; | 602 | /* Try to restore the logical GPU state to match the continuation |
603 | * of the request queue. If we skip the context/PD restore, then | ||
604 | * the next request may try to execute assuming that its context | ||
605 | * is valid and loaded on the GPU and so may try to access invalid | ||
606 | * memory, prompting repeated GPU hangs. | ||
607 | * | ||
608 | * If the request was guilty, we still restore the logical state | ||
609 | * in case the next request requires it (e.g. the aliasing ppgtt), | ||
610 | * but skip over the hung batch. | ||
611 | * | ||
612 | * If the request was innocent, we try to replay the request with | ||
613 | * the restored context. | ||
614 | */ | ||
615 | if (request) { | ||
616 | struct drm_i915_private *dev_priv = request->i915; | ||
617 | struct intel_context *ce = &request->ctx->engine[engine->id]; | ||
618 | struct i915_hw_ppgtt *ppgtt; | ||
619 | |||
620 | /* FIXME consider gen8 reset */ | ||
621 | |||
622 | if (ce->state) { | ||
623 | I915_WRITE(CCID, | ||
624 | i915_ggtt_offset(ce->state) | | ||
625 | BIT(8) /* must be set! */ | | ||
626 | CCID_EXTENDED_STATE_SAVE | | ||
627 | CCID_EXTENDED_STATE_RESTORE | | ||
628 | CCID_EN); | ||
629 | } | ||
603 | 630 | ||
604 | ring->head = request->postfix; | 631 | ppgtt = request->ctx->ppgtt ?: engine->i915->mm.aliasing_ppgtt; |
605 | ring->last_retired_head = -1; | 632 | if (ppgtt) { |
633 | u32 pd_offset = ppgtt->pd.base.ggtt_offset << 10; | ||
634 | |||
635 | I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G); | ||
636 | I915_WRITE(RING_PP_DIR_BASE(engine), pd_offset); | ||
637 | |||
638 | /* Wait for the PD reload to complete */ | ||
639 | if (intel_wait_for_register(dev_priv, | ||
640 | RING_PP_DIR_BASE(engine), | ||
641 | BIT(0), 0, | ||
642 | 10)) | ||
643 | DRM_ERROR("Wait for reload of ppgtt page-directory timed out\n"); | ||
644 | |||
645 | ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine); | ||
646 | } | ||
647 | |||
648 | /* If the rq hung, jump to its breadcrumb and skip the batch */ | ||
649 | if (request->fence.error == -EIO) { | ||
650 | struct intel_ring *ring = request->ring; | ||
651 | |||
652 | ring->head = request->postfix; | ||
653 | ring->last_retired_head = -1; | ||
654 | } | ||
655 | } else { | ||
656 | engine->legacy_active_context = NULL; | ||
657 | } | ||
606 | } | 658 | } |
607 | 659 | ||
608 | static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req) | 660 | static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req) |