aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c15
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h4
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c26
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c7
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c23
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c4
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c14
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c46
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_object.c20
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_uncore.c2
15 files changed, 117 insertions, 53 deletions
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 380eeb2a0e83..fe754022e356 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -131,7 +131,7 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
131 131
132 assert_rpm_wakelock_held(dev_priv); 132 assert_rpm_wakelock_held(dev_priv);
133 133
134 if (WARN_ON(fence > vgpu_fence_sz(vgpu))) 134 if (WARN_ON(fence >= vgpu_fence_sz(vgpu)))
135 return; 135 return;
136 136
137 reg = vgpu->fence.regs[fence]; 137 reg = vgpu->fence.regs[fence];
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 45e89b1e0481..a614db310ea2 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -874,7 +874,7 @@ static int cmd_reg_handler(struct parser_exec_state *s,
874 if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) { 874 if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
875 gvt_vgpu_err("%s access to non-render register (%x)\n", 875 gvt_vgpu_err("%s access to non-render register (%x)\n",
876 cmd, offset); 876 cmd, offset);
877 return 0; 877 return -EBADRQC;
878 } 878 }
879 879
880 if (is_shadowed_mmio(offset)) { 880 if (is_shadowed_mmio(offset)) {
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 712f9d14e720..46c8b720e336 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -176,6 +176,7 @@ static const struct intel_gvt_ops intel_gvt_ops = {
176 .emulate_mmio_write = intel_vgpu_emulate_mmio_write, 176 .emulate_mmio_write = intel_vgpu_emulate_mmio_write,
177 .vgpu_create = intel_gvt_create_vgpu, 177 .vgpu_create = intel_gvt_create_vgpu,
178 .vgpu_destroy = intel_gvt_destroy_vgpu, 178 .vgpu_destroy = intel_gvt_destroy_vgpu,
179 .vgpu_release = intel_gvt_release_vgpu,
179 .vgpu_reset = intel_gvt_reset_vgpu, 180 .vgpu_reset = intel_gvt_reset_vgpu,
180 .vgpu_activate = intel_gvt_activate_vgpu, 181 .vgpu_activate = intel_gvt_activate_vgpu,
181 .vgpu_deactivate = intel_gvt_deactivate_vgpu, 182 .vgpu_deactivate = intel_gvt_deactivate_vgpu,
@@ -315,6 +316,11 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
315 if (WARN_ON(!gvt)) 316 if (WARN_ON(!gvt))
316 return; 317 return;
317 318
319 intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
320 intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
321 intel_gvt_cleanup_vgpu_type_groups(gvt);
322 intel_gvt_clean_vgpu_types(gvt);
323
318 intel_gvt_debugfs_clean(gvt); 324 intel_gvt_debugfs_clean(gvt);
319 clean_service_thread(gvt); 325 clean_service_thread(gvt);
320 intel_gvt_clean_cmd_parser(gvt); 326 intel_gvt_clean_cmd_parser(gvt);
@@ -322,17 +328,10 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
322 intel_gvt_clean_workload_scheduler(gvt); 328 intel_gvt_clean_workload_scheduler(gvt);
323 intel_gvt_clean_gtt(gvt); 329 intel_gvt_clean_gtt(gvt);
324 intel_gvt_clean_irq(gvt); 330 intel_gvt_clean_irq(gvt);
325 intel_gvt_clean_mmio_info(gvt);
326 intel_gvt_free_firmware(gvt); 331 intel_gvt_free_firmware(gvt);
327 332 intel_gvt_clean_mmio_info(gvt);
328 intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
329 intel_gvt_cleanup_vgpu_type_groups(gvt);
330 intel_gvt_clean_vgpu_types(gvt);
331
332 idr_destroy(&gvt->vgpu_idr); 333 idr_destroy(&gvt->vgpu_idr);
333 334
334 intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
335
336 kfree(dev_priv->gvt); 335 kfree(dev_priv->gvt);
337 dev_priv->gvt = NULL; 336 dev_priv->gvt = NULL;
338} 337}
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 9a9671522774..31f6cdbe5c42 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -486,6 +486,7 @@ void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu);
486struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, 486struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
487 struct intel_vgpu_type *type); 487 struct intel_vgpu_type *type);
488void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu); 488void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
489void intel_gvt_release_vgpu(struct intel_vgpu *vgpu);
489void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, 490void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
490 unsigned int engine_mask); 491 unsigned int engine_mask);
491void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu); 492void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
@@ -563,7 +564,8 @@ struct intel_gvt_ops {
563 unsigned int); 564 unsigned int);
564 struct intel_vgpu *(*vgpu_create)(struct intel_gvt *, 565 struct intel_vgpu *(*vgpu_create)(struct intel_gvt *,
565 struct intel_vgpu_type *); 566 struct intel_vgpu_type *);
566 void (*vgpu_destroy)(struct intel_vgpu *); 567 void (*vgpu_destroy)(struct intel_vgpu *vgpu);
568 void (*vgpu_release)(struct intel_vgpu *vgpu);
567 void (*vgpu_reset)(struct intel_vgpu *); 569 void (*vgpu_reset)(struct intel_vgpu *);
568 void (*vgpu_activate)(struct intel_vgpu *); 570 void (*vgpu_activate)(struct intel_vgpu *);
569 void (*vgpu_deactivate)(struct intel_vgpu *); 571 void (*vgpu_deactivate)(struct intel_vgpu *);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 4d2f53ae9f0f..a45f46d8537f 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -43,6 +43,8 @@
43#include <linux/mdev.h> 43#include <linux/mdev.h>
44#include <linux/debugfs.h> 44#include <linux/debugfs.h>
45 45
46#include <linux/nospec.h>
47
46#include "i915_drv.h" 48#include "i915_drv.h"
47#include "gvt.h" 49#include "gvt.h"
48 50
@@ -187,14 +189,14 @@ static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
187 189
188 /* Setup DMA mapping. */ 190 /* Setup DMA mapping. */
189 *dma_addr = dma_map_page(dev, page, 0, size, PCI_DMA_BIDIRECTIONAL); 191 *dma_addr = dma_map_page(dev, page, 0, size, PCI_DMA_BIDIRECTIONAL);
190 ret = dma_mapping_error(dev, *dma_addr); 192 if (dma_mapping_error(dev, *dma_addr)) {
191 if (ret) {
192 gvt_vgpu_err("DMA mapping failed for pfn 0x%lx, ret %d\n", 193 gvt_vgpu_err("DMA mapping failed for pfn 0x%lx, ret %d\n",
193 page_to_pfn(page), ret); 194 page_to_pfn(page), ret);
194 gvt_unpin_guest_page(vgpu, gfn, size); 195 gvt_unpin_guest_page(vgpu, gfn, size);
196 return -ENOMEM;
195 } 197 }
196 198
197 return ret; 199 return 0;
198} 200}
199 201
200static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn, 202static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
@@ -666,7 +668,7 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
666 if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1)) 668 if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
667 return; 669 return;
668 670
669 intel_gvt_ops->vgpu_deactivate(vgpu); 671 intel_gvt_ops->vgpu_release(vgpu);
670 672
671 ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY, 673 ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
672 &vgpu->vdev.iommu_notifier); 674 &vgpu->vdev.iommu_notifier);
@@ -1139,7 +1141,8 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
1139 } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) { 1141 } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
1140 struct vfio_region_info info; 1142 struct vfio_region_info info;
1141 struct vfio_info_cap caps = { .buf = NULL, .size = 0 }; 1143 struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
1142 int i, ret; 1144 unsigned int i;
1145 int ret;
1143 struct vfio_region_info_cap_sparse_mmap *sparse = NULL; 1146 struct vfio_region_info_cap_sparse_mmap *sparse = NULL;
1144 size_t size; 1147 size_t size;
1145 int nr_areas = 1; 1148 int nr_areas = 1;
@@ -1224,6 +1227,10 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
1224 if (info.index >= VFIO_PCI_NUM_REGIONS + 1227 if (info.index >= VFIO_PCI_NUM_REGIONS +
1225 vgpu->vdev.num_regions) 1228 vgpu->vdev.num_regions)
1226 return -EINVAL; 1229 return -EINVAL;
1230 info.index =
1231 array_index_nospec(info.index,
1232 VFIO_PCI_NUM_REGIONS +
1233 vgpu->vdev.num_regions);
1227 1234
1228 i = info.index - VFIO_PCI_NUM_REGIONS; 1235 i = info.index - VFIO_PCI_NUM_REGIONS;
1229 1236
@@ -1250,11 +1257,13 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
1250 &sparse->header, sizeof(*sparse) + 1257 &sparse->header, sizeof(*sparse) +
1251 (sparse->nr_areas * 1258 (sparse->nr_areas *
1252 sizeof(*sparse->areas))); 1259 sizeof(*sparse->areas)));
1253 kfree(sparse); 1260 if (ret) {
1254 if (ret) 1261 kfree(sparse);
1255 return ret; 1262 return ret;
1263 }
1256 break; 1264 break;
1257 default: 1265 default:
1266 kfree(sparse);
1258 return -EINVAL; 1267 return -EINVAL;
1259 } 1268 }
1260 } 1269 }
@@ -1270,6 +1279,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
1270 sizeof(info), caps.buf, 1279 sizeof(info), caps.buf,
1271 caps.size)) { 1280 caps.size)) {
1272 kfree(caps.buf); 1281 kfree(caps.buf);
1282 kfree(sparse);
1273 return -EFAULT; 1283 return -EFAULT;
1274 } 1284 }
1275 info.cap_offset = sizeof(info); 1285 info.cap_offset = sizeof(info);
@@ -1278,6 +1288,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
1278 kfree(caps.buf); 1288 kfree(caps.buf);
1279 } 1289 }
1280 1290
1291 kfree(sparse);
1281 return copy_to_user((void __user *)arg, &info, minsz) ? 1292 return copy_to_user((void __user *)arg, &info, minsz) ?
1282 -EFAULT : 0; 1293 -EFAULT : 0;
1283 } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) { 1294 } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
@@ -1615,7 +1626,6 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
1615 kvmgt_protect_table_init(info); 1626 kvmgt_protect_table_init(info);
1616 gvt_cache_init(vgpu); 1627 gvt_cache_init(vgpu);
1617 1628
1618 mutex_init(&vgpu->dmabuf_lock);
1619 init_completion(&vgpu->vblank_done); 1629 init_completion(&vgpu->vblank_done);
1620 1630
1621 info->track_node.track_write = kvmgt_page_track_write; 1631 info->track_node.track_write = kvmgt_page_track_write;
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index b0e566956b8d..43aa058e29fc 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -784,7 +784,8 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
784 kunmap(page); 784 kunmap(page);
785} 785}
786 786
787static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask) 787void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
788 unsigned long engine_mask)
788{ 789{
789 struct intel_vgpu_submission *s = &vgpu->submission; 790 struct intel_vgpu_submission *s = &vgpu->submission;
790 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 791 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
@@ -879,7 +880,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
879 * cleaned up during the resetting process later, so doing 880 * cleaned up during the resetting process later, so doing
880 * the workload clean up here doesn't have any impact. 881 * the workload clean up here doesn't have any impact.
881 **/ 882 **/
882 clean_workloads(vgpu, ENGINE_MASK(ring_id)); 883 intel_vgpu_clean_workloads(vgpu, ENGINE_MASK(ring_id));
883 } 884 }
884 885
885 workload->complete(workload); 886 workload->complete(workload);
@@ -1081,7 +1082,7 @@ void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
1081 if (!s->active) 1082 if (!s->active)
1082 return; 1083 return;
1083 1084
1084 clean_workloads(vgpu, engine_mask); 1085 intel_vgpu_clean_workloads(vgpu, engine_mask);
1085 s->ops->reset(vgpu, engine_mask); 1086 s->ops->reset(vgpu, engine_mask);
1086} 1087}
1087 1088
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 21eddab4a9cd..ca5529d0e48e 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -158,4 +158,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
158 158
159void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload); 159void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload);
160 160
161void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
162 unsigned long engine_mask);
163
161#endif 164#endif
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index f6fa916517c3..a4e8e3cf74fd 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -222,7 +222,7 @@ void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
222 * @vgpu: virtual GPU 222 * @vgpu: virtual GPU
223 * 223 *
224 * This function is called when user wants to deactivate a virtual GPU. 224 * This function is called when user wants to deactivate a virtual GPU.
225 * All virtual GPU runtime information will be destroyed. 225 * The virtual GPU will be stopped.
226 * 226 *
227 */ 227 */
228void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu) 228void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
@@ -238,12 +238,30 @@ void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
238 } 238 }
239 239
240 intel_vgpu_stop_schedule(vgpu); 240 intel_vgpu_stop_schedule(vgpu);
241 intel_vgpu_dmabuf_cleanup(vgpu);
242 241
243 mutex_unlock(&vgpu->vgpu_lock); 242 mutex_unlock(&vgpu->vgpu_lock);
244} 243}
245 244
246/** 245/**
246 * intel_gvt_release_vgpu - release a virtual GPU
247 * @vgpu: virtual GPU
248 *
249 * This function is called when user wants to release a virtual GPU.
250 * The virtual GPU will be stopped and all runtime information will be
251 * destroyed.
252 *
253 */
254void intel_gvt_release_vgpu(struct intel_vgpu *vgpu)
255{
256 intel_gvt_deactivate_vgpu(vgpu);
257
258 mutex_lock(&vgpu->vgpu_lock);
259 intel_vgpu_clean_workloads(vgpu, ALL_ENGINES);
260 intel_vgpu_dmabuf_cleanup(vgpu);
261 mutex_unlock(&vgpu->vgpu_lock);
262}
263
264/**
247 * intel_gvt_destroy_vgpu - destroy a virtual GPU 265 * intel_gvt_destroy_vgpu - destroy a virtual GPU
248 * @vgpu: virtual GPU 266 * @vgpu: virtual GPU
249 * 267 *
@@ -361,6 +379,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
361 vgpu->gvt = gvt; 379 vgpu->gvt = gvt;
362 vgpu->sched_ctl.weight = param->weight; 380 vgpu->sched_ctl.weight = param->weight;
363 mutex_init(&vgpu->vgpu_lock); 381 mutex_init(&vgpu->vgpu_lock);
382 mutex_init(&vgpu->dmabuf_lock);
364 INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head); 383 INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head);
365 INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL); 384 INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL);
366 idr_init(&vgpu->object_idr); 385 idr_init(&vgpu->object_idr);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 91e7483228e1..08ec7446282e 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -9201,6 +9201,7 @@ enum skl_power_gate {
9201#define TRANS_MSA_10_BPC (2 << 5) 9201#define TRANS_MSA_10_BPC (2 << 5)
9202#define TRANS_MSA_12_BPC (3 << 5) 9202#define TRANS_MSA_12_BPC (3 << 5)
9203#define TRANS_MSA_16_BPC (4 << 5) 9203#define TRANS_MSA_16_BPC (4 << 5)
9204#define TRANS_MSA_CEA_RANGE (1 << 3)
9204 9205
9205/* LCPLL Control */ 9206/* LCPLL Control */
9206#define LCPLL_CTL _MMIO(0x130040) 9207#define LCPLL_CTL _MMIO(0x130040)
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 39d66f8493fa..8761513f3532 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1685,6 +1685,10 @@ void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
1685 WARN_ON(transcoder_is_dsi(cpu_transcoder)); 1685 WARN_ON(transcoder_is_dsi(cpu_transcoder));
1686 1686
1687 temp = TRANS_MSA_SYNC_CLK; 1687 temp = TRANS_MSA_SYNC_CLK;
1688
1689 if (crtc_state->limited_color_range)
1690 temp |= TRANS_MSA_CEA_RANGE;
1691
1688 switch (crtc_state->pipe_bpp) { 1692 switch (crtc_state->pipe_bpp) {
1689 case 18: 1693 case 18:
1690 temp |= TRANS_MSA_6_BPC; 1694 temp |= TRANS_MSA_6_BPC;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 33faad3197fe..6a8f27d0a742 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -387,8 +387,18 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
387 mmio = RING_HWS_PGA(engine->mmio_base); 387 mmio = RING_HWS_PGA(engine->mmio_base);
388 } 388 }
389 389
390 if (INTEL_GEN(dev_priv) >= 6) 390 if (INTEL_GEN(dev_priv) >= 6) {
391 I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff); 391 u32 mask = ~0u;
392
393 /*
394 * Keep the render interrupt unmasked as this papers over
395 * lost interrupts following a reset.
396 */
397 if (engine->id == RCS)
398 mask &= ~BIT(0);
399
400 I915_WRITE(RING_HWSTAM(engine->mmio_base), mask);
401 }
392 402
393 I915_WRITE(mmio, engine->status_page.ggtt_offset); 403 I915_WRITE(mmio, engine->status_page.ggtt_offset);
394 POSTING_READ(mmio); 404 POSTING_READ(mmio);
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index b892ca8396e8..50b39aa4ffb8 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -359,8 +359,8 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
359} 359}
360 360
361/* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */ 361/* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */
362static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv, 362static unsigned int
363 bool restore) 363intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv)
364{ 364{
365 unsigned long irqflags; 365 unsigned long irqflags;
366 struct intel_uncore_forcewake_domain *domain; 366 struct intel_uncore_forcewake_domain *domain;
@@ -412,20 +412,11 @@ static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
412 dev_priv->uncore.funcs.force_wake_put(dev_priv, fw); 412 dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
413 413
414 fw_domains_reset(dev_priv, dev_priv->uncore.fw_domains); 414 fw_domains_reset(dev_priv, dev_priv->uncore.fw_domains);
415 415 assert_forcewakes_inactive(dev_priv);
416 if (restore) { /* If reset with a user forcewake, try to restore */
417 if (fw)
418 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
419
420 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
421 dev_priv->uncore.fifo_count =
422 fifo_free_entries(dev_priv);
423 }
424
425 if (!restore)
426 assert_forcewakes_inactive(dev_priv);
427 416
428 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 417 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
418
419 return fw; /* track the lost user forcewake domains */
429} 420}
430 421
431static u64 gen9_edram_size(struct drm_i915_private *dev_priv) 422static u64 gen9_edram_size(struct drm_i915_private *dev_priv)
@@ -534,7 +525,7 @@ check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
534} 525}
535 526
536static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv, 527static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
537 bool restore_forcewake) 528 unsigned int restore_forcewake)
538{ 529{
539 /* clear out unclaimed reg detection bit */ 530 /* clear out unclaimed reg detection bit */
540 if (check_for_unclaimed_mmio(dev_priv)) 531 if (check_for_unclaimed_mmio(dev_priv))
@@ -549,7 +540,17 @@ static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
549 } 540 }
550 541
551 iosf_mbi_punit_acquire(); 542 iosf_mbi_punit_acquire();
552 intel_uncore_forcewake_reset(dev_priv, restore_forcewake); 543 intel_uncore_forcewake_reset(dev_priv);
544 if (restore_forcewake) {
545 spin_lock_irq(&dev_priv->uncore.lock);
546 dev_priv->uncore.funcs.force_wake_get(dev_priv,
547 restore_forcewake);
548
549 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
550 dev_priv->uncore.fifo_count =
551 fifo_free_entries(dev_priv);
552 spin_unlock_irq(&dev_priv->uncore.lock);
553 }
553 iosf_mbi_punit_release(); 554 iosf_mbi_punit_release();
554} 555}
555 556
@@ -558,13 +559,18 @@ void intel_uncore_suspend(struct drm_i915_private *dev_priv)
558 iosf_mbi_punit_acquire(); 559 iosf_mbi_punit_acquire();
559 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked( 560 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
560 &dev_priv->uncore.pmic_bus_access_nb); 561 &dev_priv->uncore.pmic_bus_access_nb);
561 intel_uncore_forcewake_reset(dev_priv, false); 562 dev_priv->uncore.fw_domains_saved =
563 intel_uncore_forcewake_reset(dev_priv);
562 iosf_mbi_punit_release(); 564 iosf_mbi_punit_release();
563} 565}
564 566
565void intel_uncore_resume_early(struct drm_i915_private *dev_priv) 567void intel_uncore_resume_early(struct drm_i915_private *dev_priv)
566{ 568{
567 __intel_uncore_early_sanitize(dev_priv, true); 569 unsigned int restore_forcewake;
570
571 restore_forcewake = fetch_and_zero(&dev_priv->uncore.fw_domains_saved);
572 __intel_uncore_early_sanitize(dev_priv, restore_forcewake);
573
568 iosf_mbi_register_pmic_bus_access_notifier( 574 iosf_mbi_register_pmic_bus_access_notifier(
569 &dev_priv->uncore.pmic_bus_access_nb); 575 &dev_priv->uncore.pmic_bus_access_nb);
570 i915_check_and_clear_faults(dev_priv); 576 i915_check_and_clear_faults(dev_priv);
@@ -1545,7 +1551,7 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
1545 1551
1546 intel_uncore_edram_detect(dev_priv); 1552 intel_uncore_edram_detect(dev_priv);
1547 intel_uncore_fw_domains_init(dev_priv); 1553 intel_uncore_fw_domains_init(dev_priv);
1548 __intel_uncore_early_sanitize(dev_priv, false); 1554 __intel_uncore_early_sanitize(dev_priv, 0);
1549 1555
1550 dev_priv->uncore.unclaimed_mmio_check = 1; 1556 dev_priv->uncore.unclaimed_mmio_check = 1;
1551 dev_priv->uncore.pmic_bus_access_nb.notifier_call = 1557 dev_priv->uncore.pmic_bus_access_nb.notifier_call =
@@ -1632,7 +1638,7 @@ void intel_uncore_fini(struct drm_i915_private *dev_priv)
1632 iosf_mbi_punit_acquire(); 1638 iosf_mbi_punit_acquire();
1633 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked( 1639 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
1634 &dev_priv->uncore.pmic_bus_access_nb); 1640 &dev_priv->uncore.pmic_bus_access_nb);
1635 intel_uncore_forcewake_reset(dev_priv, false); 1641 intel_uncore_forcewake_reset(dev_priv);
1636 iosf_mbi_punit_release(); 1642 iosf_mbi_punit_release();
1637} 1643}
1638 1644
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index 2fbe93178fb2..e5e157d288de 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -104,6 +104,7 @@ struct intel_uncore {
104 104
105 enum forcewake_domains fw_domains; 105 enum forcewake_domains fw_domains;
106 enum forcewake_domains fw_domains_active; 106 enum forcewake_domains fw_domains_active;
107 enum forcewake_domains fw_domains_saved; /* user domains saved for S3 */
107 108
108 u32 fw_set; 109 u32 fw_set;
109 u32 fw_clear; 110 u32 fw_clear;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
index c69cbd5aed52..ba4f322d56b8 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
@@ -499,6 +499,19 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
499 return err == expected; 499 return err == expected;
500} 500}
501 501
502static void disable_retire_worker(struct drm_i915_private *i915)
503{
504 mutex_lock(&i915->drm.struct_mutex);
505 if (!i915->gt.active_requests++) {
506 intel_runtime_pm_get(i915);
507 i915_gem_unpark(i915);
508 intel_runtime_pm_put(i915);
509 }
510 mutex_unlock(&i915->drm.struct_mutex);
511 cancel_delayed_work_sync(&i915->gt.retire_work);
512 cancel_delayed_work_sync(&i915->gt.idle_work);
513}
514
502static int igt_mmap_offset_exhaustion(void *arg) 515static int igt_mmap_offset_exhaustion(void *arg)
503{ 516{
504 struct drm_i915_private *i915 = arg; 517 struct drm_i915_private *i915 = arg;
@@ -509,12 +522,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
509 int loop, err; 522 int loop, err;
510 523
511 /* Disable background reaper */ 524 /* Disable background reaper */
512 mutex_lock(&i915->drm.struct_mutex); 525 disable_retire_worker(i915);
513 if (!i915->gt.active_requests++)
514 i915_gem_unpark(i915);
515 mutex_unlock(&i915->drm.struct_mutex);
516 cancel_delayed_work_sync(&i915->gt.retire_work);
517 cancel_delayed_work_sync(&i915->gt.idle_work);
518 GEM_BUG_ON(!i915->gt.awake); 526 GEM_BUG_ON(!i915->gt.awake);
519 527
520 /* Trim the device mmap space to only a page */ 528 /* Trim the device mmap space to only a page */
diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
index 47bc5b2ddb56..81d9d31042a9 100644
--- a/drivers/gpu/drm/i915/selftests/intel_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -160,7 +160,7 @@ static int intel_uncore_check_forcewake_domains(struct drm_i915_private *dev_pri
160 i915_reg_t reg = { offset }; 160 i915_reg_t reg = { offset };
161 161
162 iosf_mbi_punit_acquire(); 162 iosf_mbi_punit_acquire();
163 intel_uncore_forcewake_reset(dev_priv, false); 163 intel_uncore_forcewake_reset(dev_priv);
164 iosf_mbi_punit_release(); 164 iosf_mbi_punit_release();
165 165
166 check_for_unclaimed_mmio(dev_priv); 166 check_for_unclaimed_mmio(dev_priv);