aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c45
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c33
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h3
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gemfs.c3
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c1
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c10
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c3
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c4
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c13
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h1
17 files changed, 81 insertions, 62 deletions
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 701a3c6f1669..85d4c57870fb 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1628,7 +1628,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
1628 struct intel_shadow_bb_entry *entry_obj; 1628 struct intel_shadow_bb_entry *entry_obj;
1629 struct intel_vgpu *vgpu = s->vgpu; 1629 struct intel_vgpu *vgpu = s->vgpu;
1630 unsigned long gma = 0; 1630 unsigned long gma = 0;
1631 uint32_t bb_size; 1631 int bb_size;
1632 void *dst = NULL; 1632 void *dst = NULL;
1633 int ret = 0; 1633 int ret = 0;
1634 1634
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 3c318439a659..355120865efd 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -282,6 +282,7 @@ static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
282static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num, 282static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
283 int type, unsigned int resolution) 283 int type, unsigned int resolution)
284{ 284{
285 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
285 struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num); 286 struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
286 287
287 if (WARN_ON(resolution >= GVT_EDID_NUM)) 288 if (WARN_ON(resolution >= GVT_EDID_NUM))
@@ -307,6 +308,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
307 port->type = type; 308 port->type = type;
308 309
309 emulate_monitor_status_change(vgpu); 310 emulate_monitor_status_change(vgpu);
311 vgpu_vreg(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
310 return 0; 312 return 0;
311} 313}
312 314
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 4427be18e4a9..940cdaaa3f24 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -496,6 +496,12 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
496 goto err_unpin_mm; 496 goto err_unpin_mm;
497 } 497 }
498 498
499 ret = intel_gvt_generate_request(workload);
500 if (ret) {
501 gvt_vgpu_err("fail to generate request\n");
502 goto err_unpin_mm;
503 }
504
499 ret = prepare_shadow_batch_buffer(workload); 505 ret = prepare_shadow_batch_buffer(workload);
500 if (ret) { 506 if (ret) {
501 gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n"); 507 gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 2801d70579d8..8e331142badb 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -311,9 +311,9 @@ static inline int gtt_set_entry64(void *pt,
311 311
312#define GTT_HAW 46 312#define GTT_HAW 46
313 313
314#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30 + 1)) - 1) << 30) 314#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30)) - 1) << 30)
315#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21 + 1)) - 1) << 21) 315#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21)) - 1) << 21)
316#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12 + 1)) - 1) << 12) 316#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12)) - 1) << 12)
317 317
318static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e) 318static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
319{ 319{
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index a5bed2e71b92..44cd5ff5e97d 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1381,40 +1381,6 @@ static int skl_power_well_ctl_write(struct intel_vgpu *vgpu,
1381 return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes); 1381 return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes);
1382} 1382}
1383 1383
1384static int skl_misc_ctl_write(struct intel_vgpu *vgpu, unsigned int offset,
1385 void *p_data, unsigned int bytes)
1386{
1387 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
1388 u32 v = *(u32 *)p_data;
1389
1390 if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv))
1391 return intel_vgpu_default_mmio_write(vgpu,
1392 offset, p_data, bytes);
1393
1394 switch (offset) {
1395 case 0x4ddc:
1396 /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
1397 vgpu_vreg(vgpu, offset) = v & ~(1 << 31);
1398 break;
1399 case 0x42080:
1400 /* bypass WaCompressedResourceDisplayNewHashMode */
1401 vgpu_vreg(vgpu, offset) = v & ~(1 << 15);
1402 break;
1403 case 0xe194:
1404 /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
1405 vgpu_vreg(vgpu, offset) = v & ~(1 << 8);
1406 break;
1407 case 0x7014:
1408 /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
1409 vgpu_vreg(vgpu, offset) = v & ~(1 << 13);
1410 break;
1411 default:
1412 return -EINVAL;
1413 }
1414
1415 return 0;
1416}
1417
1418static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset, 1384static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
1419 void *p_data, unsigned int bytes) 1385 void *p_data, unsigned int bytes)
1420{ 1386{
@@ -1671,8 +1637,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
1671 MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL); 1637 MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
1672 MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, 1638 MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
1673 NULL, NULL); 1639 NULL, NULL);
1674 MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, 1640 MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
1675 skl_misc_ctl_write); 1641 NULL, NULL);
1676 MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL); 1642 MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL);
1677 MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL); 1643 MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL);
1678 MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL); 1644 MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL);
@@ -2564,8 +2530,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
2564 MMIO_D(0x6e570, D_BDW_PLUS); 2530 MMIO_D(0x6e570, D_BDW_PLUS);
2565 MMIO_D(0x65f10, D_BDW_PLUS); 2531 MMIO_D(0x65f10, D_BDW_PLUS);
2566 2532
2567 MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, 2533 MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2568 skl_misc_ctl_write);
2569 MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 2534 MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2570 MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 2535 MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2571 MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 2536 MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
@@ -2615,8 +2580,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
2615 MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS); 2580 MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
2616 MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS); 2581 MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
2617 MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); 2582 MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2618 MMIO_DH(0x4ddc, D_SKL_PLUS, NULL, skl_misc_ctl_write); 2583 MMIO_DH(0x4ddc, D_SKL_PLUS, NULL, NULL);
2619 MMIO_DH(0x42080, D_SKL_PLUS, NULL, skl_misc_ctl_write); 2584 MMIO_DH(0x42080, D_SKL_PLUS, NULL, NULL);
2620 MMIO_D(0x45504, D_SKL_PLUS); 2585 MMIO_D(0x45504, D_SKL_PLUS);
2621 MMIO_D(0x45520, D_SKL_PLUS); 2586 MMIO_D(0x45520, D_SKL_PLUS);
2622 MMIO_D(0x46000, D_SKL_PLUS); 2587 MMIO_D(0x46000, D_SKL_PLUS);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index f6ded475bb2c..3ac1dc97a7a0 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -140,9 +140,10 @@ static int shadow_context_status_change(struct notifier_block *nb,
140 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 140 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
141 enum intel_engine_id ring_id = req->engine->id; 141 enum intel_engine_id ring_id = req->engine->id;
142 struct intel_vgpu_workload *workload; 142 struct intel_vgpu_workload *workload;
143 unsigned long flags;
143 144
144 if (!is_gvt_request(req)) { 145 if (!is_gvt_request(req)) {
145 spin_lock_bh(&scheduler->mmio_context_lock); 146 spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
146 if (action == INTEL_CONTEXT_SCHEDULE_IN && 147 if (action == INTEL_CONTEXT_SCHEDULE_IN &&
147 scheduler->engine_owner[ring_id]) { 148 scheduler->engine_owner[ring_id]) {
148 /* Switch ring from vGPU to host. */ 149 /* Switch ring from vGPU to host. */
@@ -150,7 +151,7 @@ static int shadow_context_status_change(struct notifier_block *nb,
150 NULL, ring_id); 151 NULL, ring_id);
151 scheduler->engine_owner[ring_id] = NULL; 152 scheduler->engine_owner[ring_id] = NULL;
152 } 153 }
153 spin_unlock_bh(&scheduler->mmio_context_lock); 154 spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
154 155
155 return NOTIFY_OK; 156 return NOTIFY_OK;
156 } 157 }
@@ -161,7 +162,7 @@ static int shadow_context_status_change(struct notifier_block *nb,
161 162
162 switch (action) { 163 switch (action) {
163 case INTEL_CONTEXT_SCHEDULE_IN: 164 case INTEL_CONTEXT_SCHEDULE_IN:
164 spin_lock_bh(&scheduler->mmio_context_lock); 165 spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
165 if (workload->vgpu != scheduler->engine_owner[ring_id]) { 166 if (workload->vgpu != scheduler->engine_owner[ring_id]) {
166 /* Switch ring from host to vGPU or vGPU to vGPU. */ 167 /* Switch ring from host to vGPU or vGPU to vGPU. */
167 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id], 168 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
@@ -170,7 +171,7 @@ static int shadow_context_status_change(struct notifier_block *nb,
170 } else 171 } else
171 gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n", 172 gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
172 ring_id, workload->vgpu->id); 173 ring_id, workload->vgpu->id);
173 spin_unlock_bh(&scheduler->mmio_context_lock); 174 spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
174 atomic_set(&workload->shadow_ctx_active, 1); 175 atomic_set(&workload->shadow_ctx_active, 1);
175 break; 176 break;
176 case INTEL_CONTEXT_SCHEDULE_OUT: 177 case INTEL_CONTEXT_SCHEDULE_OUT:
@@ -253,7 +254,6 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
253 struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx; 254 struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
254 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; 255 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
255 struct intel_engine_cs *engine = dev_priv->engine[ring_id]; 256 struct intel_engine_cs *engine = dev_priv->engine[ring_id];
256 struct drm_i915_gem_request *rq;
257 struct intel_vgpu *vgpu = workload->vgpu; 257 struct intel_vgpu *vgpu = workload->vgpu;
258 struct intel_ring *ring; 258 struct intel_ring *ring;
259 int ret; 259 int ret;
@@ -299,6 +299,26 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
299 ret = populate_shadow_context(workload); 299 ret = populate_shadow_context(workload);
300 if (ret) 300 if (ret)
301 goto err_unpin; 301 goto err_unpin;
302 workload->shadowed = true;
303 return 0;
304
305err_unpin:
306 engine->context_unpin(engine, shadow_ctx);
307err_shadow:
308 release_shadow_wa_ctx(&workload->wa_ctx);
309err_scan:
310 return ret;
311}
312
313int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
314{
315 int ring_id = workload->ring_id;
316 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
317 struct intel_engine_cs *engine = dev_priv->engine[ring_id];
318 struct drm_i915_gem_request *rq;
319 struct intel_vgpu *vgpu = workload->vgpu;
320 struct i915_gem_context *shadow_ctx = vgpu->shadow_ctx;
321 int ret;
302 322
303 rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx); 323 rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
304 if (IS_ERR(rq)) { 324 if (IS_ERR(rq)) {
@@ -313,14 +333,11 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
313 ret = copy_workload_to_ring_buffer(workload); 333 ret = copy_workload_to_ring_buffer(workload);
314 if (ret) 334 if (ret)
315 goto err_unpin; 335 goto err_unpin;
316 workload->shadowed = true;
317 return 0; 336 return 0;
318 337
319err_unpin: 338err_unpin:
320 engine->context_unpin(engine, shadow_ctx); 339 engine->context_unpin(engine, shadow_ctx);
321err_shadow:
322 release_shadow_wa_ctx(&workload->wa_ctx); 340 release_shadow_wa_ctx(&workload->wa_ctx);
323err_scan:
324 return ret; 341 return ret;
325} 342}
326 343
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 2d694f6c0907..b9f872204d7e 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -142,4 +142,7 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu);
142void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu); 142void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu);
143 143
144void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx); 144void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
145
146int intel_gvt_generate_request(struct intel_vgpu_workload *workload);
147
145#endif 148#endif
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 960d3d8b95b8..2cf10d17acfb 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1714,6 +1714,7 @@ static int i915_drm_resume(struct drm_device *dev)
1714 intel_guc_resume(dev_priv); 1714 intel_guc_resume(dev_priv);
1715 1715
1716 intel_modeset_init_hw(dev); 1716 intel_modeset_init_hw(dev);
1717 intel_init_clock_gating(dev_priv);
1717 1718
1718 spin_lock_irq(&dev_priv->irq_lock); 1719 spin_lock_irq(&dev_priv->irq_lock);
1719 if (dev_priv->display.hpd_irq_setup) 1720 if (dev_priv->display.hpd_irq_setup)
@@ -2618,6 +2619,8 @@ static int intel_runtime_resume(struct device *kdev)
2618 ret = vlv_resume_prepare(dev_priv, true); 2619 ret = vlv_resume_prepare(dev_priv, true);
2619 } 2620 }
2620 2621
2622 intel_uncore_runtime_resume(dev_priv);
2623
2621 /* 2624 /*
2622 * No point of rolling back things in case of an error, as the best 2625 * No point of rolling back things in case of an error, as the best
2623 * we can do is to hope that things will still work (and disable RPM). 2626 * we can do is to hope that things will still work (and disable RPM).
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 135fc750a837..382a77a1097e 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -172,7 +172,9 @@ i915_mmu_notifier_create(struct mm_struct *mm)
172 spin_lock_init(&mn->lock); 172 spin_lock_init(&mn->lock);
173 mn->mn.ops = &i915_gem_userptr_notifier; 173 mn->mn.ops = &i915_gem_userptr_notifier;
174 mn->objects = RB_ROOT_CACHED; 174 mn->objects = RB_ROOT_CACHED;
175 mn->wq = alloc_workqueue("i915-userptr-release", WQ_UNBOUND, 0); 175 mn->wq = alloc_workqueue("i915-userptr-release",
176 WQ_UNBOUND | WQ_MEM_RECLAIM,
177 0);
176 if (mn->wq == NULL) { 178 if (mn->wq == NULL) {
177 kfree(mn); 179 kfree(mn);
178 return ERR_PTR(-ENOMEM); 180 return ERR_PTR(-ENOMEM);
@@ -827,7 +829,7 @@ int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
827 829
828 dev_priv->mm.userptr_wq = 830 dev_priv->mm.userptr_wq =
829 alloc_workqueue("i915-userptr-acquire", 831 alloc_workqueue("i915-userptr-acquire",
830 WQ_HIGHPRI | WQ_MEM_RECLAIM, 832 WQ_HIGHPRI | WQ_UNBOUND,
831 0); 833 0);
832 if (!dev_priv->mm.userptr_wq) 834 if (!dev_priv->mm.userptr_wq)
833 return -ENOMEM; 835 return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/i915_gemfs.c b/drivers/gpu/drm/i915/i915_gemfs.c
index e2993857df37..888b7d3f04c3 100644
--- a/drivers/gpu/drm/i915/i915_gemfs.c
+++ b/drivers/gpu/drm/i915/i915_gemfs.c
@@ -52,7 +52,8 @@ int i915_gemfs_init(struct drm_i915_private *i915)
52 52
53 if (has_transparent_hugepage()) { 53 if (has_transparent_hugepage()) {
54 struct super_block *sb = gemfs->mnt_sb; 54 struct super_block *sb = gemfs->mnt_sb;
55 char options[] = "huge=within_size"; 55 /* FIXME: Disabled until we get W/A for read BW issue. */
56 char options[] = "huge=never";
56 int flags = 0; 57 int flags = 0;
57 int err; 58 int err;
58 59
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 48e1ba01ccf8..5f8b9f1f40f1 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -517,6 +517,7 @@ static void __intel_engine_remove_wait(struct intel_engine_cs *engine,
517 517
518 GEM_BUG_ON(RB_EMPTY_NODE(&wait->node)); 518 GEM_BUG_ON(RB_EMPTY_NODE(&wait->node));
519 rb_erase(&wait->node, &b->waiters); 519 rb_erase(&wait->node, &b->waiters);
520 RB_CLEAR_NODE(&wait->node);
520 521
521out: 522out:
522 GEM_BUG_ON(b->irq_wait == wait); 523 GEM_BUG_ON(b->irq_wait == wait);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 7bc60c848940..6c7f8bca574e 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1736,7 +1736,7 @@ extern struct drm_display_mode *intel_find_panel_downclock(
1736int intel_backlight_device_register(struct intel_connector *connector); 1736int intel_backlight_device_register(struct intel_connector *connector);
1737void intel_backlight_device_unregister(struct intel_connector *connector); 1737void intel_backlight_device_unregister(struct intel_connector *connector);
1738#else /* CONFIG_BACKLIGHT_CLASS_DEVICE */ 1738#else /* CONFIG_BACKLIGHT_CLASS_DEVICE */
1739static int intel_backlight_device_register(struct intel_connector *connector) 1739static inline int intel_backlight_device_register(struct intel_connector *connector)
1740{ 1740{
1741 return 0; 1741 return 0;
1742} 1742}
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index b8af35187d22..ea96682568e8 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -697,10 +697,8 @@ static void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
697 697
698 /* Due to peculiar init order wrt to hpd handling this is separate. */ 698 /* Due to peculiar init order wrt to hpd handling this is separate. */
699 if (drm_fb_helper_initial_config(&ifbdev->helper, 699 if (drm_fb_helper_initial_config(&ifbdev->helper,
700 ifbdev->preferred_bpp)) { 700 ifbdev->preferred_bpp))
701 intel_fbdev_unregister(to_i915(ifbdev->helper.dev)); 701 intel_fbdev_unregister(to_i915(ifbdev->helper.dev));
702 intel_fbdev_fini(to_i915(ifbdev->helper.dev));
703 }
704} 702}
705 703
706void intel_fbdev_initial_config_async(struct drm_device *dev) 704void intel_fbdev_initial_config_async(struct drm_device *dev)
@@ -800,7 +798,11 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev)
800{ 798{
801 struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; 799 struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
802 800
803 if (ifbdev) 801 if (!ifbdev)
802 return;
803
804 intel_fbdev_sync(ifbdev);
805 if (ifbdev->vma)
804 drm_fb_helper_hotplug_event(&ifbdev->helper); 806 drm_fb_helper_hotplug_event(&ifbdev->helper);
805} 807}
806 808
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 5132dc814788..4dea833f9d1b 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -487,7 +487,8 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
487 crtc_state->limited_color_range ? 487 crtc_state->limited_color_range ?
488 HDMI_QUANTIZATION_RANGE_LIMITED : 488 HDMI_QUANTIZATION_RANGE_LIMITED :
489 HDMI_QUANTIZATION_RANGE_FULL, 489 HDMI_QUANTIZATION_RANGE_FULL,
490 intel_hdmi->rgb_quant_range_selectable); 490 intel_hdmi->rgb_quant_range_selectable,
491 is_hdmi2_sink);
491 492
492 /* TODO: handle pixel repetition for YCBCR420 outputs */ 493 /* TODO: handle pixel repetition for YCBCR420 outputs */
493 intel_write_infoframe(encoder, crtc_state, &frame); 494 intel_write_infoframe(encoder, crtc_state, &frame);
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index eb5827110d8f..49fdf09f9919 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -438,7 +438,9 @@ static bool
438gmbus_is_index_read(struct i2c_msg *msgs, int i, int num) 438gmbus_is_index_read(struct i2c_msg *msgs, int i, int num)
439{ 439{
440 return (i + 1 < num && 440 return (i + 1 < num &&
441 !(msgs[i].flags & I2C_M_RD) && msgs[i].len <= 2 && 441 msgs[i].addr == msgs[i + 1].addr &&
442 !(msgs[i].flags & I2C_M_RD) &&
443 (msgs[i].len == 1 || msgs[i].len == 2) &&
442 (msgs[i + 1].flags & I2C_M_RD)); 444 (msgs[i + 1].flags & I2C_M_RD));
443} 445}
444 446
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 20e3c65c0999..8c2ce81f01c2 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -434,6 +434,12 @@ void intel_uncore_resume_early(struct drm_i915_private *dev_priv)
434 i915_check_and_clear_faults(dev_priv); 434 i915_check_and_clear_faults(dev_priv);
435} 435}
436 436
437void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv)
438{
439 iosf_mbi_register_pmic_bus_access_notifier(
440 &dev_priv->uncore.pmic_bus_access_nb);
441}
442
437void intel_uncore_sanitize(struct drm_i915_private *dev_priv) 443void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
438{ 444{
439 i915_modparams.enable_rc6 = 445 i915_modparams.enable_rc6 =
@@ -1240,8 +1246,15 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
1240 * bus, which will be busy after this notification, leading to: 1246 * bus, which will be busy after this notification, leading to:
1241 * "render: timed out waiting for forcewake ack request." 1247 * "render: timed out waiting for forcewake ack request."
1242 * errors. 1248 * errors.
1249 *
1250 * The notifier is unregistered during intel_runtime_suspend(),
1251 * so it's ok to access the HW here without holding a RPM
1252 * wake reference -> disable wakeref asserts for the time of
1253 * the access.
1243 */ 1254 */
1255 disable_rpm_wakeref_asserts(dev_priv);
1244 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 1256 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1257 enable_rpm_wakeref_asserts(dev_priv);
1245 break; 1258 break;
1246 case MBI_PMIC_BUS_ACCESS_END: 1259 case MBI_PMIC_BUS_ACCESS_END:
1247 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 1260 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index 582771251b57..9ce079b5dd0d 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -134,6 +134,7 @@ bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv
134void intel_uncore_fini(struct drm_i915_private *dev_priv); 134void intel_uncore_fini(struct drm_i915_private *dev_priv);
135void intel_uncore_suspend(struct drm_i915_private *dev_priv); 135void intel_uncore_suspend(struct drm_i915_private *dev_priv);
136void intel_uncore_resume_early(struct drm_i915_private *dev_priv); 136void intel_uncore_resume_early(struct drm_i915_private *dev_priv);
137void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv);
137 138
138u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv); 139u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
139void assert_forcewakes_inactive(struct drm_i915_private *dev_priv); 140void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);