aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2016-03-07 19:50:37 -0500
committerDave Airlie <airlied@redhat.com>2016-03-07 19:50:37 -0500
commit507d44a9e1bb01661c75b88fd866d2461ab41c9c (patch)
tree75ae546cddd02379de3eca3497a042e9148828f1
parentd8c61663c7a8e5041f5c2a4b142f24d0d7861ad6 (diff)
parent5790ff742b1feee62f60a95f4caf78827f656f58 (diff)
Merge tag 'drm-intel-next-2016-02-29' of git://anongit.freedesktop.org/drm-intel into drm-next
- fbc by default on hsw&bdw, thanks to great work by Paulo! - psr by default hsw,bdw,vlv&chv, thanks to great work by Rodrigo! - fixes to hw state readout vs. rpm issues (Imre) - dc3 fixes&improvements (Mika), this and above already cherr-pick to -fixes - first part of locking fixes from Tvrtko - proper atomic code for load detect (Maarten) - more rpm fixes from Ville - more atomic work from Maarten * tag 'drm-intel-next-2016-02-29' of git://anongit.freedesktop.org/drm-intel: (63 commits) drm/i915: Update DRIVER_DATE to 20160229 drm/i915: Execlists cannot pin a context without the object drm/i915: Reduce the pointer dance of i915_is_ggtt() drm/i915: Rename vma->*_list to *_link for consistency drm/i915: Balance assert_rpm_wakelock_held() for !IS_ENABLED(CONFIG_PM) drm/i915/lrc: Only set RS ctx enable in ctx control reg if there is a RS drm/i915/gen9: Set value of Indirect Context Offset based on gen version drm/i915: Remove update_sprite_watermarks. drm/i915: Kill off intel_crtc->atomic.wait_vblank, v6. drm/i915: Unify power domain handling. drm/i915: Pass crtc state to modeset_get_crtc_power_domains. drm/i915: Add for_each_pipe_masked() drm/i915: Make sure pipe interrupts are processed before turning off power well on BDW+ drm/i915: synchronize_irq() before turning off disp2d power well on VLV/CHV drm/i915: Skip PIPESTAT reads from irq handler on VLV/CHV when power well is down drm/i915/gen9: Write dc state debugmask bits only once drm/i915/gen9: Extend dmc debug mask to include cores drm/i915/gen9: Verify and enforce dc6 state writes drm/i915/gen9: Check for DC state mismatch drm/i915/fbc: enable FBC by default on HSW and BDW ...
-rw-r--r--drivers/gpu/drm/i915/Kconfig11
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c56
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c8
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h16
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c99
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c22
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h11
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c8
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c37
-rw-r--r--drivers/gpu/drm/i915/i915_params.c17
-rw-r--r--drivers/gpu/drm/i915/i915_params.h1
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h16
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h27
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c1
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c23
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c10
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c112
-rw-r--r--drivers/gpu/drm/i915/intel_display.c622
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c34
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h14
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c29
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h2
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_panel_vbt.c5
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_pll.c2
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c4
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c17
-rw-r--r--drivers/gpu/drm/i915/intel_guc_loader.c6
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c14
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c33
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c14
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c22
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c9
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c190
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c11
42 files changed, 930 insertions, 610 deletions
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 4c59793c4ccb..20a5d0455e19 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -45,3 +45,14 @@ config DRM_I915_PRELIMINARY_HW_SUPPORT
45 option changes the default for that module option. 45 option changes the default for that module option.
46 46
47 If in doubt, say "N". 47 If in doubt, say "N".
48
49config DRM_I915_USERPTR
50 bool "Always enable userptr support"
51 depends on DRM_I915
52 select MMU_NOTIFIER
53 default y
54 help
55 This option selects CONFIG_MMU_NOTIFIER if it isn't already
56 selected to enabled full userptr support.
57
58 If in doubt, say "Y".
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index ec0c2a05eed6..a0f1bd711b53 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -117,9 +117,8 @@ static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
117 u64 size = 0; 117 u64 size = 0;
118 struct i915_vma *vma; 118 struct i915_vma *vma;
119 119
120 list_for_each_entry(vma, &obj->vma_list, vma_link) { 120 list_for_each_entry(vma, &obj->vma_list, obj_link) {
121 if (i915_is_ggtt(vma->vm) && 121 if (vma->is_ggtt && drm_mm_node_allocated(&vma->node))
122 drm_mm_node_allocated(&vma->node))
123 size += vma->node.size; 122 size += vma->node.size;
124 } 123 }
125 124
@@ -155,7 +154,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
155 obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 154 obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
156 if (obj->base.name) 155 if (obj->base.name)
157 seq_printf(m, " (name: %d)", obj->base.name); 156 seq_printf(m, " (name: %d)", obj->base.name);
158 list_for_each_entry(vma, &obj->vma_list, vma_link) { 157 list_for_each_entry(vma, &obj->vma_list, obj_link) {
159 if (vma->pin_count > 0) 158 if (vma->pin_count > 0)
160 pin_count++; 159 pin_count++;
161 } 160 }
@@ -164,14 +163,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
164 seq_printf(m, " (display)"); 163 seq_printf(m, " (display)");
165 if (obj->fence_reg != I915_FENCE_REG_NONE) 164 if (obj->fence_reg != I915_FENCE_REG_NONE)
166 seq_printf(m, " (fence: %d)", obj->fence_reg); 165 seq_printf(m, " (fence: %d)", obj->fence_reg);
167 list_for_each_entry(vma, &obj->vma_list, vma_link) { 166 list_for_each_entry(vma, &obj->vma_list, obj_link) {
168 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx", 167 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx",
169 i915_is_ggtt(vma->vm) ? "g" : "pp", 168 vma->is_ggtt ? "g" : "pp",
170 vma->node.start, vma->node.size); 169 vma->node.start, vma->node.size);
171 if (i915_is_ggtt(vma->vm)) 170 if (vma->is_ggtt)
172 seq_printf(m, ", type: %u)", vma->ggtt_view.type); 171 seq_printf(m, ", type: %u", vma->ggtt_view.type);
173 else 172 seq_puts(m, ")");
174 seq_puts(m, ")");
175 } 173 }
176 if (obj->stolen) 174 if (obj->stolen)
177 seq_printf(m, " (stolen: %08llx)", obj->stolen->start); 175 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
@@ -230,7 +228,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
230 } 228 }
231 229
232 total_obj_size = total_gtt_size = count = 0; 230 total_obj_size = total_gtt_size = count = 0;
233 list_for_each_entry(vma, head, mm_list) { 231 list_for_each_entry(vma, head, vm_link) {
234 seq_printf(m, " "); 232 seq_printf(m, " ");
235 describe_obj(m, vma->obj); 233 describe_obj(m, vma->obj);
236 seq_printf(m, "\n"); 234 seq_printf(m, "\n");
@@ -342,13 +340,13 @@ static int per_file_stats(int id, void *ptr, void *data)
342 stats->shared += obj->base.size; 340 stats->shared += obj->base.size;
343 341
344 if (USES_FULL_PPGTT(obj->base.dev)) { 342 if (USES_FULL_PPGTT(obj->base.dev)) {
345 list_for_each_entry(vma, &obj->vma_list, vma_link) { 343 list_for_each_entry(vma, &obj->vma_list, obj_link) {
346 struct i915_hw_ppgtt *ppgtt; 344 struct i915_hw_ppgtt *ppgtt;
347 345
348 if (!drm_mm_node_allocated(&vma->node)) 346 if (!drm_mm_node_allocated(&vma->node))
349 continue; 347 continue;
350 348
351 if (i915_is_ggtt(vma->vm)) { 349 if (vma->is_ggtt) {
352 stats->global += obj->base.size; 350 stats->global += obj->base.size;
353 continue; 351 continue;
354 } 352 }
@@ -454,12 +452,12 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
454 count, mappable_count, size, mappable_size); 452 count, mappable_count, size, mappable_size);
455 453
456 size = count = mappable_size = mappable_count = 0; 454 size = count = mappable_size = mappable_count = 0;
457 count_vmas(&vm->active_list, mm_list); 455 count_vmas(&vm->active_list, vm_link);
458 seq_printf(m, " %u [%u] active objects, %llu [%llu] bytes\n", 456 seq_printf(m, " %u [%u] active objects, %llu [%llu] bytes\n",
459 count, mappable_count, size, mappable_size); 457 count, mappable_count, size, mappable_size);
460 458
461 size = count = mappable_size = mappable_count = 0; 459 size = count = mappable_size = mappable_count = 0;
462 count_vmas(&vm->inactive_list, mm_list); 460 count_vmas(&vm->inactive_list, vm_link);
463 seq_printf(m, " %u [%u] inactive objects, %llu [%llu] bytes\n", 461 seq_printf(m, " %u [%u] inactive objects, %llu [%llu] bytes\n",
464 count, mappable_count, size, mappable_size); 462 count, mappable_count, size, mappable_size);
465 463
@@ -825,8 +823,11 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
825 } 823 }
826 824
827 for_each_pipe(dev_priv, pipe) { 825 for_each_pipe(dev_priv, pipe) {
828 if (!intel_display_power_is_enabled(dev_priv, 826 enum intel_display_power_domain power_domain;
829 POWER_DOMAIN_PIPE(pipe))) { 827
828 power_domain = POWER_DOMAIN_PIPE(pipe);
829 if (!intel_display_power_get_if_enabled(dev_priv,
830 power_domain)) {
830 seq_printf(m, "Pipe %c power disabled\n", 831 seq_printf(m, "Pipe %c power disabled\n",
831 pipe_name(pipe)); 832 pipe_name(pipe));
832 continue; 833 continue;
@@ -840,6 +841,8 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
840 seq_printf(m, "Pipe %c IER:\t%08x\n", 841 seq_printf(m, "Pipe %c IER:\t%08x\n",
841 pipe_name(pipe), 842 pipe_name(pipe),
842 I915_READ(GEN8_DE_PIPE_IER(pipe))); 843 I915_READ(GEN8_DE_PIPE_IER(pipe)));
844
845 intel_display_power_put(dev_priv, power_domain);
843 } 846 }
844 847
845 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n", 848 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
@@ -4004,6 +4007,7 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
4004 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 4007 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
4005 struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, 4008 struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev,
4006 pipe)); 4009 pipe));
4010 enum intel_display_power_domain power_domain;
4007 u32 val = 0; /* shut up gcc */ 4011 u32 val = 0; /* shut up gcc */
4008 int ret; 4012 int ret;
4009 4013
@@ -4014,7 +4018,8 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
4014 if (pipe_crc->source && source) 4018 if (pipe_crc->source && source)
4015 return -EINVAL; 4019 return -EINVAL;
4016 4020
4017 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe))) { 4021 power_domain = POWER_DOMAIN_PIPE(pipe);
4022 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) {
4018 DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n"); 4023 DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
4019 return -EIO; 4024 return -EIO;
4020 } 4025 }
@@ -4031,7 +4036,7 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
4031 ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val); 4036 ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val);
4032 4037
4033 if (ret != 0) 4038 if (ret != 0)
4034 return ret; 4039 goto out;
4035 4040
4036 /* none -> real source transition */ 4041 /* none -> real source transition */
4037 if (source) { 4042 if (source) {
@@ -4043,8 +4048,10 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
4043 entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR, 4048 entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR,
4044 sizeof(pipe_crc->entries[0]), 4049 sizeof(pipe_crc->entries[0]),
4045 GFP_KERNEL); 4050 GFP_KERNEL);
4046 if (!entries) 4051 if (!entries) {
4047 return -ENOMEM; 4052 ret = -ENOMEM;
4053 goto out;
4054 }
4048 4055
4049 /* 4056 /*
4050 * When IPS gets enabled, the pipe CRC changes. Since IPS gets 4057 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
@@ -4100,7 +4107,12 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
4100 hsw_enable_ips(crtc); 4107 hsw_enable_ips(crtc);
4101 } 4108 }
4102 4109
4103 return 0; 4110 ret = 0;
4111
4112out:
4113 intel_display_power_put(dev_priv, power_domain);
4114
4115 return ret;
4104} 4116}
4105 4117
4106/* 4118/*
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 2df2fac04708..1c6d227aae7c 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -444,8 +444,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
444 444
445cleanup_gem: 445cleanup_gem:
446 mutex_lock(&dev->struct_mutex); 446 mutex_lock(&dev->struct_mutex);
447 i915_gem_cleanup_ringbuffer(dev);
447 i915_gem_context_fini(dev); 448 i915_gem_context_fini(dev);
448 i915_gem_cleanup_engines(dev);
449 mutex_unlock(&dev->struct_mutex); 449 mutex_unlock(&dev->struct_mutex);
450cleanup_irq: 450cleanup_irq:
451 intel_guc_ucode_fini(dev); 451 intel_guc_ucode_fini(dev);
@@ -1256,8 +1256,8 @@ int i915_driver_unload(struct drm_device *dev)
1256 1256
1257 intel_guc_ucode_fini(dev); 1257 intel_guc_ucode_fini(dev);
1258 mutex_lock(&dev->struct_mutex); 1258 mutex_lock(&dev->struct_mutex);
1259 i915_gem_cleanup_ringbuffer(dev);
1259 i915_gem_context_fini(dev); 1260 i915_gem_context_fini(dev);
1260 i915_gem_cleanup_engines(dev);
1261 mutex_unlock(&dev->struct_mutex); 1261 mutex_unlock(&dev->struct_mutex);
1262 intel_fbc_cleanup_cfb(dev_priv); 1262 intel_fbc_cleanup_cfb(dev_priv);
1263 1263
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 44912ecebc1a..20e82008b8b6 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -603,13 +603,7 @@ static int i915_drm_suspend(struct drm_device *dev)
603 603
604 intel_suspend_gt_powersave(dev); 604 intel_suspend_gt_powersave(dev);
605 605
606 /*
607 * Disable CRTCs directly since we want to preserve sw state
608 * for _thaw. Also, power gate the CRTC power wells.
609 */
610 drm_modeset_lock_all(dev);
611 intel_display_suspend(dev); 606 intel_display_suspend(dev);
612 drm_modeset_unlock_all(dev);
613 607
614 intel_dp_mst_suspend(dev); 608 intel_dp_mst_suspend(dev);
615 609
@@ -764,9 +758,7 @@ static int i915_drm_resume(struct drm_device *dev)
764 dev_priv->display.hpd_irq_setup(dev); 758 dev_priv->display.hpd_irq_setup(dev);
765 spin_unlock_irq(&dev_priv->irq_lock); 759 spin_unlock_irq(&dev_priv->irq_lock);
766 760
767 drm_modeset_lock_all(dev);
768 intel_display_resume(dev); 761 intel_display_resume(dev);
769 drm_modeset_unlock_all(dev);
770 762
771 intel_dp_mst_resume(dev); 763 intel_dp_mst_resume(dev);
772 764
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 64cfd446453c..10480939159c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -59,7 +59,7 @@
59 59
60#define DRIVER_NAME "i915" 60#define DRIVER_NAME "i915"
61#define DRIVER_DESC "Intel Graphics" 61#define DRIVER_DESC "Intel Graphics"
62#define DRIVER_DATE "20160214" 62#define DRIVER_DATE "20160229"
63 63
64#undef WARN_ON 64#undef WARN_ON
65/* Many gcc seem to no see through this and fall over :( */ 65/* Many gcc seem to no see through this and fall over :( */
@@ -261,6 +261,9 @@ struct i915_hotplug {
261 261
262#define for_each_pipe(__dev_priv, __p) \ 262#define for_each_pipe(__dev_priv, __p) \
263 for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) 263 for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++)
264#define for_each_pipe_masked(__dev_priv, __p, __mask) \
265 for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \
266 for_each_if ((__mask) & (1 << (__p)))
264#define for_each_plane(__dev_priv, __pipe, __p) \ 267#define for_each_plane(__dev_priv, __pipe, __p) \
265 for ((__p) = 0; \ 268 for ((__p) = 0; \
266 (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \ 269 (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \
@@ -746,6 +749,7 @@ struct intel_csr {
746 uint32_t mmio_count; 749 uint32_t mmio_count;
747 i915_reg_t mmioaddr[8]; 750 i915_reg_t mmioaddr[8];
748 uint32_t mmiodata[8]; 751 uint32_t mmiodata[8];
752 uint32_t dc_state;
749}; 753};
750 754
751#define DEV_INFO_FOR_EACH_FLAG(func, sep) \ 755#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
@@ -1848,6 +1852,7 @@ struct drm_i915_private {
1848 1852
1849 enum modeset_restore modeset_restore; 1853 enum modeset_restore modeset_restore;
1850 struct mutex modeset_restore_lock; 1854 struct mutex modeset_restore_lock;
1855 struct drm_atomic_state *modeset_restore_state;
1851 1856
1852 struct list_head vm_list; /* Global list of all address spaces */ 1857 struct list_head vm_list; /* Global list of all address spaces */
1853 struct i915_gtt gtt; /* VM representing the global address space */ 1858 struct i915_gtt gtt; /* VM representing the global address space */
@@ -3058,7 +3063,7 @@ int i915_gem_init_rings(struct drm_device *dev);
3058int __must_check i915_gem_init_hw(struct drm_device *dev); 3063int __must_check i915_gem_init_hw(struct drm_device *dev);
3059int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice); 3064int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice);
3060void i915_gem_init_swizzling(struct drm_device *dev); 3065void i915_gem_init_swizzling(struct drm_device *dev);
3061void i915_gem_cleanup_engines(struct drm_device *dev); 3066void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
3062int __must_check i915_gpu_idle(struct drm_device *dev); 3067int __must_check i915_gpu_idle(struct drm_device *dev);
3063int __must_check i915_gem_suspend(struct drm_device *dev); 3068int __must_check i915_gem_suspend(struct drm_device *dev);
3064void __i915_add_request(struct drm_i915_gem_request *req, 3069void __i915_add_request(struct drm_i915_gem_request *req,
@@ -3151,18 +3156,11 @@ bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj);
3151/* Some GGTT VM helpers */ 3156/* Some GGTT VM helpers */
3152#define i915_obj_to_ggtt(obj) \ 3157#define i915_obj_to_ggtt(obj) \
3153 (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base) 3158 (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
3154static inline bool i915_is_ggtt(struct i915_address_space *vm)
3155{
3156 struct i915_address_space *ggtt =
3157 &((struct drm_i915_private *)(vm)->dev->dev_private)->gtt.base;
3158 return vm == ggtt;
3159}
3160 3159
3161static inline struct i915_hw_ppgtt * 3160static inline struct i915_hw_ppgtt *
3162i915_vm_to_ppgtt(struct i915_address_space *vm) 3161i915_vm_to_ppgtt(struct i915_address_space *vm)
3163{ 3162{
3164 WARN_ON(i915_is_ggtt(vm)); 3163 WARN_ON(i915_is_ggtt(vm));
3165
3166 return container_of(vm, struct i915_hw_ppgtt, base); 3164 return container_of(vm, struct i915_hw_ppgtt, base);
3167} 3165}
3168 3166
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index de57e7f0be0f..3d31d3ac589e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -138,10 +138,10 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
138 138
139 pinned = 0; 139 pinned = 0;
140 mutex_lock(&dev->struct_mutex); 140 mutex_lock(&dev->struct_mutex);
141 list_for_each_entry(vma, &ggtt->base.active_list, mm_list) 141 list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
142 if (vma->pin_count) 142 if (vma->pin_count)
143 pinned += vma->node.size; 143 pinned += vma->node.size;
144 list_for_each_entry(vma, &ggtt->base.inactive_list, mm_list) 144 list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
145 if (vma->pin_count) 145 if (vma->pin_count)
146 pinned += vma->node.size; 146 pinned += vma->node.size;
147 mutex_unlock(&dev->struct_mutex); 147 mutex_unlock(&dev->struct_mutex);
@@ -272,7 +272,7 @@ drop_pages(struct drm_i915_gem_object *obj)
272 int ret; 272 int ret;
273 273
274 drm_gem_object_reference(&obj->base); 274 drm_gem_object_reference(&obj->base);
275 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) 275 list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link)
276 if (i915_vma_unbind(vma)) 276 if (i915_vma_unbind(vma))
277 break; 277 break;
278 278
@@ -489,7 +489,7 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
489 489
490 *needs_clflush = 0; 490 *needs_clflush = 0;
491 491
492 if (!obj->base.filp) 492 if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0))
493 return -EINVAL; 493 return -EINVAL;
494 494
495 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) { 495 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
@@ -2416,7 +2416,7 @@ void i915_vma_move_to_active(struct i915_vma *vma,
2416 list_move_tail(&obj->ring_list[ring->id], &ring->active_list); 2416 list_move_tail(&obj->ring_list[ring->id], &ring->active_list);
2417 i915_gem_request_assign(&obj->last_read_req[ring->id], req); 2417 i915_gem_request_assign(&obj->last_read_req[ring->id], req);
2418 2418
2419 list_move_tail(&vma->mm_list, &vma->vm->active_list); 2419 list_move_tail(&vma->vm_link, &vma->vm->active_list);
2420} 2420}
2421 2421
2422static void 2422static void
@@ -2454,9 +2454,9 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
2454 list_move_tail(&obj->global_list, 2454 list_move_tail(&obj->global_list,
2455 &to_i915(obj->base.dev)->mm.bound_list); 2455 &to_i915(obj->base.dev)->mm.bound_list);
2456 2456
2457 list_for_each_entry(vma, &obj->vma_list, vma_link) { 2457 list_for_each_entry(vma, &obj->vma_list, obj_link) {
2458 if (!list_empty(&vma->mm_list)) 2458 if (!list_empty(&vma->vm_link))
2459 list_move_tail(&vma->mm_list, &vma->vm->inactive_list); 2459 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
2460 } 2460 }
2461 2461
2462 i915_gem_request_assign(&obj->last_fenced_req, NULL); 2462 i915_gem_request_assign(&obj->last_fenced_req, NULL);
@@ -2970,11 +2970,9 @@ i915_gem_retire_requests(struct drm_device *dev)
2970 i915_gem_retire_requests_ring(ring); 2970 i915_gem_retire_requests_ring(ring);
2971 idle &= list_empty(&ring->request_list); 2971 idle &= list_empty(&ring->request_list);
2972 if (i915.enable_execlists) { 2972 if (i915.enable_execlists) {
2973 unsigned long flags; 2973 spin_lock_irq(&ring->execlist_lock);
2974
2975 spin_lock_irqsave(&ring->execlist_lock, flags);
2976 idle &= list_empty(&ring->execlist_queue); 2974 idle &= list_empty(&ring->execlist_queue);
2977 spin_unlock_irqrestore(&ring->execlist_lock, flags); 2975 spin_unlock_irq(&ring->execlist_lock);
2978 2976
2979 intel_execlists_retire_requests(ring); 2977 intel_execlists_retire_requests(ring);
2980 } 2978 }
@@ -3319,7 +3317,7 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
3319 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 3317 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3320 int ret; 3318 int ret;
3321 3319
3322 if (list_empty(&vma->vma_link)) 3320 if (list_empty(&vma->obj_link))
3323 return 0; 3321 return 0;
3324 3322
3325 if (!drm_mm_node_allocated(&vma->node)) { 3323 if (!drm_mm_node_allocated(&vma->node)) {
@@ -3338,8 +3336,7 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
3338 return ret; 3336 return ret;
3339 } 3337 }
3340 3338
3341 if (i915_is_ggtt(vma->vm) && 3339 if (vma->is_ggtt && vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
3342 vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
3343 i915_gem_object_finish_gtt(obj); 3340 i915_gem_object_finish_gtt(obj);
3344 3341
3345 /* release the fence reg _after_ flushing */ 3342 /* release the fence reg _after_ flushing */
@@ -3353,8 +3350,8 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
3353 vma->vm->unbind_vma(vma); 3350 vma->vm->unbind_vma(vma);
3354 vma->bound = 0; 3351 vma->bound = 0;
3355 3352
3356 list_del_init(&vma->mm_list); 3353 list_del_init(&vma->vm_link);
3357 if (i915_is_ggtt(vma->vm)) { 3354 if (vma->is_ggtt) {
3358 if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) { 3355 if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
3359 obj->map_and_fenceable = false; 3356 obj->map_and_fenceable = false;
3360 } else if (vma->ggtt_view.pages) { 3357 } else if (vma->ggtt_view.pages) {
@@ -3611,7 +3608,7 @@ search_free:
3611 goto err_remove_node; 3608 goto err_remove_node;
3612 3609
3613 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list); 3610 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3614 list_add_tail(&vma->mm_list, &vm->inactive_list); 3611 list_add_tail(&vma->vm_link, &vm->inactive_list);
3615 3612
3616 return vma; 3613 return vma;
3617 3614
@@ -3776,7 +3773,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3776 /* And bump the LRU for this access */ 3773 /* And bump the LRU for this access */
3777 vma = i915_gem_obj_to_ggtt(obj); 3774 vma = i915_gem_obj_to_ggtt(obj);
3778 if (vma && drm_mm_node_allocated(&vma->node) && !obj->active) 3775 if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
3779 list_move_tail(&vma->mm_list, 3776 list_move_tail(&vma->vm_link,
3780 &to_i915(obj->base.dev)->gtt.base.inactive_list); 3777 &to_i915(obj->base.dev)->gtt.base.inactive_list);
3781 3778
3782 return 0; 3779 return 0;
@@ -3811,7 +3808,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3811 * catch the issue of the CS prefetch crossing page boundaries and 3808 * catch the issue of the CS prefetch crossing page boundaries and
3812 * reading an invalid PTE on older architectures. 3809 * reading an invalid PTE on older architectures.
3813 */ 3810 */
3814 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { 3811 list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
3815 if (!drm_mm_node_allocated(&vma->node)) 3812 if (!drm_mm_node_allocated(&vma->node))
3816 continue; 3813 continue;
3817 3814
@@ -3874,7 +3871,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3874 */ 3871 */
3875 } 3872 }
3876 3873
3877 list_for_each_entry(vma, &obj->vma_list, vma_link) { 3874 list_for_each_entry(vma, &obj->vma_list, obj_link) {
3878 if (!drm_mm_node_allocated(&vma->node)) 3875 if (!drm_mm_node_allocated(&vma->node))
3879 continue; 3876 continue;
3880 3877
@@ -3884,7 +3881,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3884 } 3881 }
3885 } 3882 }
3886 3883
3887 list_for_each_entry(vma, &obj->vma_list, vma_link) 3884 list_for_each_entry(vma, &obj->vma_list, obj_link)
3888 vma->node.color = cache_level; 3885 vma->node.color = cache_level;
3889 obj->cache_level = cache_level; 3886 obj->cache_level = cache_level;
3890 3887
@@ -4558,7 +4555,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
4558 4555
4559 trace_i915_gem_object_destroy(obj); 4556 trace_i915_gem_object_destroy(obj);
4560 4557
4561 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { 4558 list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
4562 int ret; 4559 int ret;
4563 4560
4564 vma->pin_count = 0; 4561 vma->pin_count = 0;
@@ -4615,7 +4612,7 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4615 struct i915_address_space *vm) 4612 struct i915_address_space *vm)
4616{ 4613{
4617 struct i915_vma *vma; 4614 struct i915_vma *vma;
4618 list_for_each_entry(vma, &obj->vma_list, vma_link) { 4615 list_for_each_entry(vma, &obj->vma_list, obj_link) {
4619 if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL && 4616 if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL &&
4620 vma->vm == vm) 4617 vma->vm == vm)
4621 return vma; 4618 return vma;
@@ -4632,7 +4629,7 @@ struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
4632 if (WARN_ONCE(!view, "no view specified")) 4629 if (WARN_ONCE(!view, "no view specified"))
4633 return ERR_PTR(-EINVAL); 4630 return ERR_PTR(-EINVAL);
4634 4631
4635 list_for_each_entry(vma, &obj->vma_list, vma_link) 4632 list_for_each_entry(vma, &obj->vma_list, obj_link)
4636 if (vma->vm == ggtt && 4633 if (vma->vm == ggtt &&
4637 i915_ggtt_view_equal(&vma->ggtt_view, view)) 4634 i915_ggtt_view_equal(&vma->ggtt_view, view))
4638 return vma; 4635 return vma;
@@ -4641,19 +4638,16 @@ struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
4641 4638
4642void i915_gem_vma_destroy(struct i915_vma *vma) 4639void i915_gem_vma_destroy(struct i915_vma *vma)
4643{ 4640{
4644 struct i915_address_space *vm = NULL;
4645 WARN_ON(vma->node.allocated); 4641 WARN_ON(vma->node.allocated);
4646 4642
4647 /* Keep the vma as a placeholder in the execbuffer reservation lists */ 4643 /* Keep the vma as a placeholder in the execbuffer reservation lists */
4648 if (!list_empty(&vma->exec_list)) 4644 if (!list_empty(&vma->exec_list))
4649 return; 4645 return;
4650 4646
4651 vm = vma->vm; 4647 if (!vma->is_ggtt)
4648 i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
4652 4649
4653 if (!i915_is_ggtt(vm)) 4650 list_del(&vma->obj_link);
4654 i915_ppgtt_put(i915_vm_to_ppgtt(vm));
4655
4656 list_del(&vma->vma_link);
4657 4651
4658 kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma); 4652 kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
4659} 4653}
@@ -4913,7 +4907,7 @@ i915_gem_init_hw(struct drm_device *dev)
4913 req = i915_gem_request_alloc(ring, NULL); 4907 req = i915_gem_request_alloc(ring, NULL);
4914 if (IS_ERR(req)) { 4908 if (IS_ERR(req)) {
4915 ret = PTR_ERR(req); 4909 ret = PTR_ERR(req);
4916 i915_gem_cleanup_engines(dev); 4910 i915_gem_cleanup_ringbuffer(dev);
4917 goto out; 4911 goto out;
4918 } 4912 }
4919 4913
@@ -4926,7 +4920,7 @@ i915_gem_init_hw(struct drm_device *dev)
4926 if (ret && ret != -EIO) { 4920 if (ret && ret != -EIO) {
4927 DRM_ERROR("PPGTT enable ring #%d failed %d\n", i, ret); 4921 DRM_ERROR("PPGTT enable ring #%d failed %d\n", i, ret);
4928 i915_gem_request_cancel(req); 4922 i915_gem_request_cancel(req);
4929 i915_gem_cleanup_engines(dev); 4923 i915_gem_cleanup_ringbuffer(dev);
4930 goto out; 4924 goto out;
4931 } 4925 }
4932 4926
@@ -4934,7 +4928,7 @@ i915_gem_init_hw(struct drm_device *dev)
4934 if (ret && ret != -EIO) { 4928 if (ret && ret != -EIO) {
4935 DRM_ERROR("Context enable ring #%d failed %d\n", i, ret); 4929 DRM_ERROR("Context enable ring #%d failed %d\n", i, ret);
4936 i915_gem_request_cancel(req); 4930 i915_gem_request_cancel(req);
4937 i915_gem_cleanup_engines(dev); 4931 i915_gem_cleanup_ringbuffer(dev);
4938 goto out; 4932 goto out;
4939 } 4933 }
4940 4934
@@ -5009,7 +5003,7 @@ out_unlock:
5009} 5003}
5010 5004
5011void 5005void
5012i915_gem_cleanup_engines(struct drm_device *dev) 5006i915_gem_cleanup_ringbuffer(struct drm_device *dev)
5013{ 5007{
5014 struct drm_i915_private *dev_priv = dev->dev_private; 5008 struct drm_i915_private *dev_priv = dev->dev_private;
5015 struct intel_engine_cs *ring; 5009 struct intel_engine_cs *ring;
@@ -5018,14 +5012,13 @@ i915_gem_cleanup_engines(struct drm_device *dev)
5018 for_each_ring(ring, dev_priv, i) 5012 for_each_ring(ring, dev_priv, i)
5019 dev_priv->gt.cleanup_ring(ring); 5013 dev_priv->gt.cleanup_ring(ring);
5020 5014
5021 if (i915.enable_execlists) { 5015 if (i915.enable_execlists)
5022 /* 5016 /*
5023 * Neither the BIOS, ourselves or any other kernel 5017 * Neither the BIOS, ourselves or any other kernel
5024 * expects the system to be in execlists mode on startup, 5018 * expects the system to be in execlists mode on startup,
5025 * so we need to reset the GPU back to legacy mode. 5019 * so we need to reset the GPU back to legacy mode.
5026 */ 5020 */
5027 intel_gpu_reset(dev); 5021 intel_gpu_reset(dev);
5028 }
5029} 5022}
5030 5023
5031static void 5024static void
@@ -5204,8 +5197,8 @@ u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
5204 5197
5205 WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); 5198 WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
5206 5199
5207 list_for_each_entry(vma, &o->vma_list, vma_link) { 5200 list_for_each_entry(vma, &o->vma_list, obj_link) {
5208 if (i915_is_ggtt(vma->vm) && 5201 if (vma->is_ggtt &&
5209 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) 5202 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
5210 continue; 5203 continue;
5211 if (vma->vm == vm) 5204 if (vma->vm == vm)
@@ -5223,7 +5216,7 @@ u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
5223 struct i915_address_space *ggtt = i915_obj_to_ggtt(o); 5216 struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
5224 struct i915_vma *vma; 5217 struct i915_vma *vma;
5225 5218
5226 list_for_each_entry(vma, &o->vma_list, vma_link) 5219 list_for_each_entry(vma, &o->vma_list, obj_link)
5227 if (vma->vm == ggtt && 5220 if (vma->vm == ggtt &&
5228 i915_ggtt_view_equal(&vma->ggtt_view, view)) 5221 i915_ggtt_view_equal(&vma->ggtt_view, view))
5229 return vma->node.start; 5222 return vma->node.start;
@@ -5237,8 +5230,8 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
5237{ 5230{
5238 struct i915_vma *vma; 5231 struct i915_vma *vma;
5239 5232
5240 list_for_each_entry(vma, &o->vma_list, vma_link) { 5233 list_for_each_entry(vma, &o->vma_list, obj_link) {
5241 if (i915_is_ggtt(vma->vm) && 5234 if (vma->is_ggtt &&
5242 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) 5235 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
5243 continue; 5236 continue;
5244 if (vma->vm == vm && drm_mm_node_allocated(&vma->node)) 5237 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
@@ -5254,7 +5247,7 @@ bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
5254 struct i915_address_space *ggtt = i915_obj_to_ggtt(o); 5247 struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
5255 struct i915_vma *vma; 5248 struct i915_vma *vma;
5256 5249
5257 list_for_each_entry(vma, &o->vma_list, vma_link) 5250 list_for_each_entry(vma, &o->vma_list, obj_link)
5258 if (vma->vm == ggtt && 5251 if (vma->vm == ggtt &&
5259 i915_ggtt_view_equal(&vma->ggtt_view, view) && 5252 i915_ggtt_view_equal(&vma->ggtt_view, view) &&
5260 drm_mm_node_allocated(&vma->node)) 5253 drm_mm_node_allocated(&vma->node))
@@ -5267,7 +5260,7 @@ bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
5267{ 5260{
5268 struct i915_vma *vma; 5261 struct i915_vma *vma;
5269 5262
5270 list_for_each_entry(vma, &o->vma_list, vma_link) 5263 list_for_each_entry(vma, &o->vma_list, obj_link)
5271 if (drm_mm_node_allocated(&vma->node)) 5264 if (drm_mm_node_allocated(&vma->node))
5272 return true; 5265 return true;
5273 5266
@@ -5284,8 +5277,8 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
5284 5277
5285 BUG_ON(list_empty(&o->vma_list)); 5278 BUG_ON(list_empty(&o->vma_list));
5286 5279
5287 list_for_each_entry(vma, &o->vma_list, vma_link) { 5280 list_for_each_entry(vma, &o->vma_list, obj_link) {
5288 if (i915_is_ggtt(vma->vm) && 5281 if (vma->is_ggtt &&
5289 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) 5282 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
5290 continue; 5283 continue;
5291 if (vma->vm == vm) 5284 if (vma->vm == vm)
@@ -5297,7 +5290,7 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
5297bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) 5290bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
5298{ 5291{
5299 struct i915_vma *vma; 5292 struct i915_vma *vma;
5300 list_for_each_entry(vma, &obj->vma_list, vma_link) 5293 list_for_each_entry(vma, &obj->vma_list, obj_link)
5301 if (vma->pin_count > 0) 5294 if (vma->pin_count > 0)
5302 return true; 5295 return true;
5303 5296
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 83a097c94911..5dd84e148bba 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -142,7 +142,7 @@ static void i915_gem_context_clean(struct intel_context *ctx)
142 return; 142 return;
143 143
144 list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list, 144 list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list,
145 mm_list) { 145 vm_link) {
146 if (WARN_ON(__i915_vma_unbind_no_wait(vma))) 146 if (WARN_ON(__i915_vma_unbind_no_wait(vma)))
147 break; 147 break;
148 } 148 }
@@ -855,6 +855,9 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
855 if (!contexts_enabled(dev)) 855 if (!contexts_enabled(dev))
856 return -ENODEV; 856 return -ENODEV;
857 857
858 if (args->pad != 0)
859 return -EINVAL;
860
858 ret = i915_mutex_lock_interruptible(dev); 861 ret = i915_mutex_lock_interruptible(dev);
859 if (ret) 862 if (ret)
860 return ret; 863 return ret;
@@ -878,6 +881,9 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
878 struct intel_context *ctx; 881 struct intel_context *ctx;
879 int ret; 882 int ret;
880 883
884 if (args->pad != 0)
885 return -EINVAL;
886
881 if (args->ctx_id == DEFAULT_CONTEXT_HANDLE) 887 if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
882 return -ENOENT; 888 return -ENOENT;
883 889
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 07c6e4d320c9..ea1f8d1bd228 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -116,7 +116,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
116 116
117search_again: 117search_again:
118 /* First see if there is a large enough contiguous idle region... */ 118 /* First see if there is a large enough contiguous idle region... */
119 list_for_each_entry(vma, &vm->inactive_list, mm_list) { 119 list_for_each_entry(vma, &vm->inactive_list, vm_link) {
120 if (mark_free(vma, &unwind_list)) 120 if (mark_free(vma, &unwind_list))
121 goto found; 121 goto found;
122 } 122 }
@@ -125,7 +125,7 @@ search_again:
125 goto none; 125 goto none;
126 126
127 /* Now merge in the soon-to-be-expired objects... */ 127 /* Now merge in the soon-to-be-expired objects... */
128 list_for_each_entry(vma, &vm->active_list, mm_list) { 128 list_for_each_entry(vma, &vm->active_list, vm_link) {
129 if (mark_free(vma, &unwind_list)) 129 if (mark_free(vma, &unwind_list))
130 goto found; 130 goto found;
131 } 131 }
@@ -270,7 +270,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
270 WARN_ON(!list_empty(&vm->active_list)); 270 WARN_ON(!list_empty(&vm->active_list));
271 } 271 }
272 272
273 list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list) 273 list_for_each_entry_safe(vma, next, &vm->inactive_list, vm_link)
274 if (vma->pin_count == 0) 274 if (vma->pin_count == 0)
275 WARN_ON(i915_vma_unbind(vma)); 275 WARN_ON(i915_vma_unbind(vma));
276 276
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 8fd00d279447..1328bc5021b4 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -668,7 +668,7 @@ need_reloc_mappable(struct i915_vma *vma)
668 if (entry->relocation_count == 0) 668 if (entry->relocation_count == 0)
669 return false; 669 return false;
670 670
671 if (!i915_is_ggtt(vma->vm)) 671 if (!vma->is_ggtt)
672 return false; 672 return false;
673 673
674 /* See also use_cpu_reloc() */ 674 /* See also use_cpu_reloc() */
@@ -687,8 +687,7 @@ eb_vma_misplaced(struct i915_vma *vma)
687 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; 687 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
688 struct drm_i915_gem_object *obj = vma->obj; 688 struct drm_i915_gem_object *obj = vma->obj;
689 689
690 WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP && 690 WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP && !vma->is_ggtt);
691 !i915_is_ggtt(vma->vm));
692 691
693 if (entry->alignment && 692 if (entry->alignment &&
694 vma->node.start & (entry->alignment - 1)) 693 vma->node.start & (entry->alignment - 1))
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 9127f8f3561c..49e4f26b79d8 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2758,7 +2758,7 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
2758 } 2758 }
2759 vma->bound |= GLOBAL_BIND; 2759 vma->bound |= GLOBAL_BIND;
2760 __i915_vma_set_map_and_fenceable(vma); 2760 __i915_vma_set_map_and_fenceable(vma);
2761 list_add_tail(&vma->mm_list, &ggtt_vm->inactive_list); 2761 list_add_tail(&vma->vm_link, &ggtt_vm->inactive_list);
2762 } 2762 }
2763 2763
2764 /* Clear any non-preallocated blocks */ 2764 /* Clear any non-preallocated blocks */
@@ -3198,6 +3198,7 @@ int i915_gem_gtt_init(struct drm_device *dev)
3198 } 3198 }
3199 3199
3200 gtt->base.dev = dev; 3200 gtt->base.dev = dev;
3201 gtt->base.is_ggtt = true;
3201 3202
3202 ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size, 3203 ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,
3203 &gtt->mappable_base, &gtt->mappable_end); 3204 &gtt->mappable_base, &gtt->mappable_end);
@@ -3258,7 +3259,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
3258 vm = &dev_priv->gtt.base; 3259 vm = &dev_priv->gtt.base;
3259 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 3260 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
3260 flush = false; 3261 flush = false;
3261 list_for_each_entry(vma, &obj->vma_list, vma_link) { 3262 list_for_each_entry(vma, &obj->vma_list, obj_link) {
3262 if (vma->vm != vm) 3263 if (vma->vm != vm)
3263 continue; 3264 continue;
3264 3265
@@ -3314,19 +3315,20 @@ __i915_gem_vma_create(struct drm_i915_gem_object *obj,
3314 if (vma == NULL) 3315 if (vma == NULL)
3315 return ERR_PTR(-ENOMEM); 3316 return ERR_PTR(-ENOMEM);
3316 3317
3317 INIT_LIST_HEAD(&vma->vma_link); 3318 INIT_LIST_HEAD(&vma->vm_link);
3318 INIT_LIST_HEAD(&vma->mm_list); 3319 INIT_LIST_HEAD(&vma->obj_link);
3319 INIT_LIST_HEAD(&vma->exec_list); 3320 INIT_LIST_HEAD(&vma->exec_list);
3320 vma->vm = vm; 3321 vma->vm = vm;
3321 vma->obj = obj; 3322 vma->obj = obj;
3323 vma->is_ggtt = i915_is_ggtt(vm);
3322 3324
3323 if (i915_is_ggtt(vm)) 3325 if (i915_is_ggtt(vm))
3324 vma->ggtt_view = *ggtt_view; 3326 vma->ggtt_view = *ggtt_view;
3325 3327 else
3326 list_add_tail(&vma->vma_link, &obj->vma_list);
3327 if (!i915_is_ggtt(vm))
3328 i915_ppgtt_get(i915_vm_to_ppgtt(vm)); 3328 i915_ppgtt_get(i915_vm_to_ppgtt(vm));
3329 3329
3330 list_add_tail(&vma->obj_link, &obj->vma_list);
3331
3330 return vma; 3332 return vma;
3331} 3333}
3332 3334
@@ -3598,13 +3600,9 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
3598 return 0; 3600 return 0;
3599 3601
3600 if (vma->bound == 0 && vma->vm->allocate_va_range) { 3602 if (vma->bound == 0 && vma->vm->allocate_va_range) {
3601 trace_i915_va_alloc(vma->vm,
3602 vma->node.start,
3603 vma->node.size,
3604 VM_TO_TRACE_NAME(vma->vm));
3605
3606 /* XXX: i915_vma_pin() will fix this +- hack */ 3603 /* XXX: i915_vma_pin() will fix this +- hack */
3607 vma->pin_count++; 3604 vma->pin_count++;
3605 trace_i915_va_alloc(vma);
3608 ret = vma->vm->allocate_va_range(vma->vm, 3606 ret = vma->vm->allocate_va_range(vma->vm,
3609 vma->node.start, 3607 vma->node.start,
3610 vma->node.size); 3608 vma->node.size);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 66a6da2396a2..8774f1ba46e7 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -183,6 +183,7 @@ struct i915_vma {
183#define GLOBAL_BIND (1<<0) 183#define GLOBAL_BIND (1<<0)
184#define LOCAL_BIND (1<<1) 184#define LOCAL_BIND (1<<1)
185 unsigned int bound : 4; 185 unsigned int bound : 4;
186 bool is_ggtt : 1;
186 187
187 /** 188 /**
188 * Support different GGTT views into the same object. 189 * Support different GGTT views into the same object.
@@ -194,9 +195,9 @@ struct i915_vma {
194 struct i915_ggtt_view ggtt_view; 195 struct i915_ggtt_view ggtt_view;
195 196
196 /** This object's place on the active/inactive lists */ 197 /** This object's place on the active/inactive lists */
197 struct list_head mm_list; 198 struct list_head vm_link;
198 199
199 struct list_head vma_link; /* Link in the object's VMA list */ 200 struct list_head obj_link; /* Link in the object's VMA list */
200 201
201 /** This vma's place in the batchbuffer or on the eviction list */ 202 /** This vma's place in the batchbuffer or on the eviction list */
202 struct list_head exec_list; 203 struct list_head exec_list;
@@ -275,6 +276,8 @@ struct i915_address_space {
275 u64 start; /* Start offset always 0 for dri2 */ 276 u64 start; /* Start offset always 0 for dri2 */
276 u64 total; /* size addr space maps (ex. 2GB for ggtt) */ 277 u64 total; /* size addr space maps (ex. 2GB for ggtt) */
277 278
279 bool is_ggtt;
280
278 struct i915_page_scratch *scratch_page; 281 struct i915_page_scratch *scratch_page;
279 struct i915_page_table *scratch_pt; 282 struct i915_page_table *scratch_pt;
280 struct i915_page_directory *scratch_pd; 283 struct i915_page_directory *scratch_pd;
@@ -330,6 +333,8 @@ struct i915_address_space {
330 u32 flags); 333 u32 flags);
331}; 334};
332 335
336#define i915_is_ggtt(V) ((V)->is_ggtt)
337
333/* The Graphics Translation Table is the way in which GEN hardware translates a 338/* The Graphics Translation Table is the way in which GEN hardware translates a
334 * Graphics Virtual Address into a Physical Address. In addition to the normal 339 * Graphics Virtual Address into a Physical Address. In addition to the normal
335 * collateral associated with any va->pa translations GEN hardware also has a 340 * collateral associated with any va->pa translations GEN hardware also has a
@@ -418,7 +423,7 @@ static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift)
418static inline uint32_t i915_pte_count(uint64_t addr, size_t length, 423static inline uint32_t i915_pte_count(uint64_t addr, size_t length,
419 uint32_t pde_shift) 424 uint32_t pde_shift)
420{ 425{
421 const uint64_t mask = ~((1 << pde_shift) - 1); 426 const uint64_t mask = ~((1ULL << pde_shift) - 1);
422 uint64_t end; 427 uint64_t end;
423 428
424 WARN_ON(length == 0); 429 WARN_ON(length == 0);
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 58c1e592bbdb..d3c473ffb90a 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -52,7 +52,7 @@ static int num_vma_bound(struct drm_i915_gem_object *obj)
52 struct i915_vma *vma; 52 struct i915_vma *vma;
53 int count = 0; 53 int count = 0;
54 54
55 list_for_each_entry(vma, &obj->vma_list, vma_link) { 55 list_for_each_entry(vma, &obj->vma_list, obj_link) {
56 if (drm_mm_node_allocated(&vma->node)) 56 if (drm_mm_node_allocated(&vma->node))
57 count++; 57 count++;
58 if (vma->pin_count) 58 if (vma->pin_count)
@@ -176,7 +176,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
176 176
177 /* For the unbound phase, this should be a no-op! */ 177 /* For the unbound phase, this should be a no-op! */
178 list_for_each_entry_safe(vma, v, 178 list_for_each_entry_safe(vma, v,
179 &obj->vma_list, vma_link) 179 &obj->vma_list, obj_link)
180 if (i915_vma_unbind(vma)) 180 if (i915_vma_unbind(vma))
181 break; 181 break;
182 182
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index ba1a00d815d3..2e6e9fb6f80d 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -638,6 +638,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
638 if (!drm_mm_initialized(&dev_priv->mm.stolen)) 638 if (!drm_mm_initialized(&dev_priv->mm.stolen))
639 return NULL; 639 return NULL;
640 640
641 lockdep_assert_held(&dev->struct_mutex);
642
641 DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n", 643 DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
642 stolen_offset, gtt_offset, size); 644 stolen_offset, gtt_offset, size);
643 645
@@ -695,7 +697,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
695 697
696 vma->bound |= GLOBAL_BIND; 698 vma->bound |= GLOBAL_BIND;
697 __i915_vma_set_map_and_fenceable(vma); 699 __i915_vma_set_map_and_fenceable(vma);
698 list_add_tail(&vma->mm_list, &ggtt->inactive_list); 700 list_add_tail(&vma->vm_link, &ggtt->inactive_list);
699 } 701 }
700 702
701 list_add_tail(&obj->global_list, &dev_priv->mm.bound_list); 703 list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 7107f2fd38f5..4b09c840d493 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -78,7 +78,7 @@ static void cancel_userptr(struct work_struct *work)
78 was_interruptible = dev_priv->mm.interruptible; 78 was_interruptible = dev_priv->mm.interruptible;
79 dev_priv->mm.interruptible = false; 79 dev_priv->mm.interruptible = false;
80 80
81 list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link) { 81 list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link) {
82 int ret = i915_vma_unbind(vma); 82 int ret = i915_vma_unbind(vma);
83 WARN_ON(ret && ret != -EIO); 83 WARN_ON(ret && ret != -EIO);
84 } 84 }
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 978c026963b8..831895b8cb75 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -736,7 +736,7 @@ static u32 capture_active_bo(struct drm_i915_error_buffer *err,
736 struct i915_vma *vma; 736 struct i915_vma *vma;
737 int i = 0; 737 int i = 0;
738 738
739 list_for_each_entry(vma, head, mm_list) { 739 list_for_each_entry(vma, head, vm_link) {
740 capture_bo(err++, vma); 740 capture_bo(err++, vma);
741 if (++i == count) 741 if (++i == count)
742 break; 742 break;
@@ -759,7 +759,7 @@ static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
759 if (err == last) 759 if (err == last)
760 break; 760 break;
761 761
762 list_for_each_entry(vma, &obj->vma_list, vma_link) 762 list_for_each_entry(vma, &obj->vma_list, obj_link)
763 if (vma->vm == vm && vma->pin_count > 0) 763 if (vma->vm == vm && vma->pin_count > 0)
764 capture_bo(err++, vma); 764 capture_bo(err++, vma);
765 } 765 }
@@ -1127,12 +1127,12 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
1127 int i; 1127 int i;
1128 1128
1129 i = 0; 1129 i = 0;
1130 list_for_each_entry(vma, &vm->active_list, mm_list) 1130 list_for_each_entry(vma, &vm->active_list, vm_link)
1131 i++; 1131 i++;
1132 error->active_bo_count[ndx] = i; 1132 error->active_bo_count[ndx] = i;
1133 1133
1134 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 1134 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
1135 list_for_each_entry(vma, &obj->vma_list, vma_link) 1135 list_for_each_entry(vma, &obj->vma_list, obj_link)
1136 if (vma->vm == vm && vma->pin_count > 0) 1136 if (vma->vm == vm && vma->pin_count > 0)
1137 i++; 1137 i++;
1138 } 1138 }
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 25a89373df63..d1a46ef5ab3f 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1651,6 +1651,12 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1651 int pipe; 1651 int pipe;
1652 1652
1653 spin_lock(&dev_priv->irq_lock); 1653 spin_lock(&dev_priv->irq_lock);
1654
1655 if (!dev_priv->display_irqs_enabled) {
1656 spin_unlock(&dev_priv->irq_lock);
1657 return;
1658 }
1659
1654 for_each_pipe(dev_priv, pipe) { 1660 for_each_pipe(dev_priv, pipe) {
1655 i915_reg_t reg; 1661 i915_reg_t reg;
1656 u32 mask, iir_bit = 0; 1662 u32 mask, iir_bit = 0;
@@ -3343,21 +3349,28 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3343 unsigned int pipe_mask) 3349 unsigned int pipe_mask)
3344{ 3350{
3345 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; 3351 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3352 enum pipe pipe;
3353
3354 spin_lock_irq(&dev_priv->irq_lock);
3355 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3356 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3357 dev_priv->de_irq_mask[pipe],
3358 ~dev_priv->de_irq_mask[pipe] | extra_ier);
3359 spin_unlock_irq(&dev_priv->irq_lock);
3360}
3361
3362void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3363 unsigned int pipe_mask)
3364{
3365 enum pipe pipe;
3346 3366
3347 spin_lock_irq(&dev_priv->irq_lock); 3367 spin_lock_irq(&dev_priv->irq_lock);
3348 if (pipe_mask & 1 << PIPE_A) 3368 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3349 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A, 3369 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3350 dev_priv->de_irq_mask[PIPE_A],
3351 ~dev_priv->de_irq_mask[PIPE_A] | extra_ier);
3352 if (pipe_mask & 1 << PIPE_B)
3353 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B,
3354 dev_priv->de_irq_mask[PIPE_B],
3355 ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
3356 if (pipe_mask & 1 << PIPE_C)
3357 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C,
3358 dev_priv->de_irq_mask[PIPE_C],
3359 ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
3360 spin_unlock_irq(&dev_priv->irq_lock); 3370 spin_unlock_irq(&dev_priv->irq_lock);
3371
3372 /* make sure we're done processing display irqs */
3373 synchronize_irq(dev_priv->dev->irq);
3361} 3374}
3362 3375
3363static void cherryview_irq_preinstall(struct drm_device *dev) 3376static void cherryview_irq_preinstall(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 8b9f36814165..278c9c40c2e0 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -38,7 +38,7 @@ struct i915_params i915 __read_mostly = {
38 .enable_execlists = -1, 38 .enable_execlists = -1,
39 .enable_hangcheck = true, 39 .enable_hangcheck = true,
40 .enable_ppgtt = -1, 40 .enable_ppgtt = -1,
41 .enable_psr = 0, 41 .enable_psr = -1,
42 .preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT), 42 .preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT),
43 .disable_power_well = -1, 43 .disable_power_well = -1,
44 .enable_ips = 1, 44 .enable_ips = 1,
@@ -49,7 +49,6 @@ struct i915_params i915 __read_mostly = {
49 .invert_brightness = 0, 49 .invert_brightness = 0,
50 .disable_display = 0, 50 .disable_display = 0,
51 .enable_cmd_parser = 1, 51 .enable_cmd_parser = 1,
52 .disable_vtd_wa = 0,
53 .use_mmio_flip = 0, 52 .use_mmio_flip = 0,
54 .mmio_debug = 0, 53 .mmio_debug = 0,
55 .verbose_state_checks = 1, 54 .verbose_state_checks = 1,
@@ -92,7 +91,7 @@ MODULE_PARM_DESC(enable_fbc,
92 "Enable frame buffer compression for power savings " 91 "Enable frame buffer compression for power savings "
93 "(default: -1 (use per-chip default))"); 92 "(default: -1 (use per-chip default))");
94 93
95module_param_named_unsafe(lvds_channel_mode, i915.lvds_channel_mode, int, 0600); 94module_param_named_unsafe(lvds_channel_mode, i915.lvds_channel_mode, int, 0400);
96MODULE_PARM_DESC(lvds_channel_mode, 95MODULE_PARM_DESC(lvds_channel_mode,
97 "Specify LVDS channel mode " 96 "Specify LVDS channel mode "
98 "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)"); 97 "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
@@ -102,7 +101,7 @@ MODULE_PARM_DESC(lvds_use_ssc,
102 "Use Spread Spectrum Clock with panels [LVDS/eDP] " 101 "Use Spread Spectrum Clock with panels [LVDS/eDP] "
103 "(default: auto from VBT)"); 102 "(default: auto from VBT)");
104 103
105module_param_named_unsafe(vbt_sdvo_panel_type, i915.vbt_sdvo_panel_type, int, 0600); 104module_param_named_unsafe(vbt_sdvo_panel_type, i915.vbt_sdvo_panel_type, int, 0400);
106MODULE_PARM_DESC(vbt_sdvo_panel_type, 105MODULE_PARM_DESC(vbt_sdvo_panel_type,
107 "Override/Ignore selection of SDVO panel mode in the VBT " 106 "Override/Ignore selection of SDVO panel mode in the VBT "
108 "(-2=ignore, -1=auto [default], index in VBT BIOS table)"); 107 "(-2=ignore, -1=auto [default], index in VBT BIOS table)");
@@ -128,9 +127,10 @@ MODULE_PARM_DESC(enable_execlists,
128 127
129module_param_named_unsafe(enable_psr, i915.enable_psr, int, 0600); 128module_param_named_unsafe(enable_psr, i915.enable_psr, int, 0600);
130MODULE_PARM_DESC(enable_psr, "Enable PSR " 129MODULE_PARM_DESC(enable_psr, "Enable PSR "
131 "(0=disabled [default], 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode)"); 130 "(0=disabled, 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode) "
131 "Default: -1 (use per-chip default)");
132 132
133module_param_named_unsafe(preliminary_hw_support, i915.preliminary_hw_support, int, 0600); 133module_param_named_unsafe(preliminary_hw_support, i915.preliminary_hw_support, int, 0400);
134MODULE_PARM_DESC(preliminary_hw_support, 134MODULE_PARM_DESC(preliminary_hw_support,
135 "Enable preliminary hardware support."); 135 "Enable preliminary hardware support.");
136 136
@@ -164,12 +164,9 @@ MODULE_PARM_DESC(invert_brightness,
164 "to dri-devel@lists.freedesktop.org, if your machine needs it. " 164 "to dri-devel@lists.freedesktop.org, if your machine needs it. "
165 "It will then be included in an upcoming module version."); 165 "It will then be included in an upcoming module version.");
166 166
167module_param_named(disable_display, i915.disable_display, bool, 0600); 167module_param_named(disable_display, i915.disable_display, bool, 0400);
168MODULE_PARM_DESC(disable_display, "Disable display (default: false)"); 168MODULE_PARM_DESC(disable_display, "Disable display (default: false)");
169 169
170module_param_named_unsafe(disable_vtd_wa, i915.disable_vtd_wa, bool, 0600);
171MODULE_PARM_DESC(disable_vtd_wa, "Disable all VT-d workarounds (default: false)");
172
173module_param_named_unsafe(enable_cmd_parser, i915.enable_cmd_parser, int, 0600); 170module_param_named_unsafe(enable_cmd_parser, i915.enable_cmd_parser, int, 0600);
174MODULE_PARM_DESC(enable_cmd_parser, 171MODULE_PARM_DESC(enable_cmd_parser,
175 "Enable command parsing (1=enabled [default], 0=disabled)"); 172 "Enable command parsing (1=enabled [default], 0=disabled)");
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 529929073120..bd5026b15d3e 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -56,7 +56,6 @@ struct i915_params {
56 bool load_detect_test; 56 bool load_detect_test;
57 bool reset; 57 bool reset;
58 bool disable_display; 58 bool disable_display;
59 bool disable_vtd_wa;
60 bool enable_guc_submission; 59 bool enable_guc_submission;
61 bool verbose_state_checks; 60 bool verbose_state_checks;
62 bool nuclear_pageflip; 61 bool nuclear_pageflip;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 144586ee74d5..f76cbf3e5d1e 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3296,19 +3296,20 @@ enum skl_disp_power_wells {
3296 3296
3297#define PORT_HOTPLUG_STAT _MMIO(dev_priv->info.display_mmio_offset + 0x61114) 3297#define PORT_HOTPLUG_STAT _MMIO(dev_priv->info.display_mmio_offset + 0x61114)
3298/* 3298/*
3299 * HDMI/DP bits are gen4+ 3299 * HDMI/DP bits are g4x+
3300 * 3300 *
3301 * WARNING: Bspec for hpd status bits on gen4 seems to be completely confused. 3301 * WARNING: Bspec for hpd status bits on gen4 seems to be completely confused.
3302 * Please check the detailed lore in the commit message for for experimental 3302 * Please check the detailed lore in the commit message for for experimental
3303 * evidence. 3303 * evidence.
3304 */ 3304 */
3305#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 29) 3305/* Bspec says GM45 should match G4X/VLV/CHV, but reality disagrees */
3306#define PORTD_HOTPLUG_LIVE_STATUS_GM45 (1 << 29)
3307#define PORTC_HOTPLUG_LIVE_STATUS_GM45 (1 << 28)
3308#define PORTB_HOTPLUG_LIVE_STATUS_GM45 (1 << 27)
3309/* G4X/VLV/CHV DP/HDMI bits again match Bspec */
3310#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 27)
3306#define PORTC_HOTPLUG_LIVE_STATUS_G4X (1 << 28) 3311#define PORTC_HOTPLUG_LIVE_STATUS_G4X (1 << 28)
3307#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 27) 3312#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 29)
3308/* VLV DP/HDMI bits again match Bspec */
3309#define PORTD_HOTPLUG_LIVE_STATUS_VLV (1 << 27)
3310#define PORTC_HOTPLUG_LIVE_STATUS_VLV (1 << 28)
3311#define PORTB_HOTPLUG_LIVE_STATUS_VLV (1 << 29)
3312#define PORTD_HOTPLUG_INT_STATUS (3 << 21) 3313#define PORTD_HOTPLUG_INT_STATUS (3 << 21)
3313#define PORTD_HOTPLUG_INT_LONG_PULSE (2 << 21) 3314#define PORTD_HOTPLUG_INT_LONG_PULSE (2 << 21)
3314#define PORTD_HOTPLUG_INT_SHORT_PULSE (1 << 21) 3315#define PORTD_HOTPLUG_INT_SHORT_PULSE (1 << 21)
@@ -7567,6 +7568,7 @@ enum skl_disp_power_wells {
7567#define DC_STATE_EN_UPTO_DC5_DC6_MASK 0x3 7568#define DC_STATE_EN_UPTO_DC5_DC6_MASK 0x3
7568 7569
7569#define DC_STATE_DEBUG _MMIO(0x45520) 7570#define DC_STATE_DEBUG _MMIO(0x45520)
7571#define DC_STATE_DEBUG_MASK_CORES (1<<0)
7570#define DC_STATE_DEBUG_MASK_MEMORY_UP (1<<1) 7572#define DC_STATE_DEBUG_MASK_MEMORY_UP (1<<1)
7571 7573
7572/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register, 7574/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 52b2d409945d..fa09e5581137 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -175,35 +175,24 @@ TRACE_EVENT(i915_vma_unbind,
175 __entry->obj, __entry->offset, __entry->size, __entry->vm) 175 __entry->obj, __entry->offset, __entry->size, __entry->vm)
176); 176);
177 177
178#define VM_TO_TRACE_NAME(vm) \ 178TRACE_EVENT(i915_va_alloc,
179 (i915_is_ggtt(vm) ? "G" : \ 179 TP_PROTO(struct i915_vma *vma),
180 "P") 180 TP_ARGS(vma),
181
182DECLARE_EVENT_CLASS(i915_va,
183 TP_PROTO(struct i915_address_space *vm, u64 start, u64 length, const char *name),
184 TP_ARGS(vm, start, length, name),
185 181
186 TP_STRUCT__entry( 182 TP_STRUCT__entry(
187 __field(struct i915_address_space *, vm) 183 __field(struct i915_address_space *, vm)
188 __field(u64, start) 184 __field(u64, start)
189 __field(u64, end) 185 __field(u64, end)
190 __string(name, name)
191 ), 186 ),
192 187
193 TP_fast_assign( 188 TP_fast_assign(
194 __entry->vm = vm; 189 __entry->vm = vma->vm;
195 __entry->start = start; 190 __entry->start = vma->node.start;
196 __entry->end = start + length - 1; 191 __entry->end = vma->node.start + vma->node.size - 1;
197 __assign_str(name, name);
198 ), 192 ),
199 193
200 TP_printk("vm=%p (%s), 0x%llx-0x%llx", 194 TP_printk("vm=%p (%c), 0x%llx-0x%llx",
201 __entry->vm, __get_str(name), __entry->start, __entry->end) 195 __entry->vm, i915_is_ggtt(__entry->vm) ? 'G' : 'P', __entry->start, __entry->end)
202);
203
204DEFINE_EVENT(i915_va, i915_va_alloc,
205 TP_PROTO(struct i915_address_space *vm, u64 start, u64 length, const char *name),
206 TP_ARGS(vm, start, length, name)
207); 196);
208 197
209DECLARE_EVENT_CLASS(i915_px_entry, 198DECLARE_EVENT_CLASS(i915_px_entry,
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index 4625f8a9ba12..8e579a8505ac 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -97,6 +97,7 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
97 crtc_state->disable_lp_wm = false; 97 crtc_state->disable_lp_wm = false;
98 crtc_state->disable_cxsr = false; 98 crtc_state->disable_cxsr = false;
99 crtc_state->wm_changed = false; 99 crtc_state->wm_changed = false;
100 crtc_state->fb_changed = false;
100 101
101 return &crtc_state->base; 102 return &crtc_state->base;
102} 103}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index ad5dfabc452e..505fc5cf26f8 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -71,22 +71,29 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
71 struct intel_crt *crt = intel_encoder_to_crt(encoder); 71 struct intel_crt *crt = intel_encoder_to_crt(encoder);
72 enum intel_display_power_domain power_domain; 72 enum intel_display_power_domain power_domain;
73 u32 tmp; 73 u32 tmp;
74 bool ret;
74 75
75 power_domain = intel_display_port_power_domain(encoder); 76 power_domain = intel_display_port_power_domain(encoder);
76 if (!intel_display_power_is_enabled(dev_priv, power_domain)) 77 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
77 return false; 78 return false;
78 79
80 ret = false;
81
79 tmp = I915_READ(crt->adpa_reg); 82 tmp = I915_READ(crt->adpa_reg);
80 83
81 if (!(tmp & ADPA_DAC_ENABLE)) 84 if (!(tmp & ADPA_DAC_ENABLE))
82 return false; 85 goto out;
83 86
84 if (HAS_PCH_CPT(dev)) 87 if (HAS_PCH_CPT(dev))
85 *pipe = PORT_TO_PIPE_CPT(tmp); 88 *pipe = PORT_TO_PIPE_CPT(tmp);
86 else 89 else
87 *pipe = PORT_TO_PIPE(tmp); 90 *pipe = PORT_TO_PIPE(tmp);
88 91
89 return true; 92 ret = true;
93out:
94 intel_display_power_put(dev_priv, power_domain);
95
96 return ret;
90} 97}
91 98
92static unsigned int intel_crt_get_flags(struct intel_encoder *encoder) 99static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
@@ -206,9 +213,7 @@ static void pch_post_disable_crt(struct intel_encoder *encoder)
206 213
207static void intel_enable_crt(struct intel_encoder *encoder) 214static void intel_enable_crt(struct intel_encoder *encoder)
208{ 215{
209 struct intel_crt *crt = intel_encoder_to_crt(encoder); 216 intel_crt_set_dpms(encoder, DRM_MODE_DPMS_ON);
210
211 intel_crt_set_dpms(encoder, crt->connector->base.dpms);
212} 217}
213 218
214static enum drm_mode_status 219static enum drm_mode_status
@@ -473,11 +478,10 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
473} 478}
474 479
475static enum drm_connector_status 480static enum drm_connector_status
476intel_crt_load_detect(struct intel_crt *crt) 481intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe)
477{ 482{
478 struct drm_device *dev = crt->base.base.dev; 483 struct drm_device *dev = crt->base.base.dev;
479 struct drm_i915_private *dev_priv = dev->dev_private; 484 struct drm_i915_private *dev_priv = dev->dev_private;
480 uint32_t pipe = to_intel_crtc(crt->base.base.crtc)->pipe;
481 uint32_t save_bclrpat; 485 uint32_t save_bclrpat;
482 uint32_t save_vtotal; 486 uint32_t save_vtotal;
483 uint32_t vtotal, vactive; 487 uint32_t vtotal, vactive;
@@ -646,7 +650,8 @@ intel_crt_detect(struct drm_connector *connector, bool force)
646 if (intel_crt_detect_ddc(connector)) 650 if (intel_crt_detect_ddc(connector))
647 status = connector_status_connected; 651 status = connector_status_connected;
648 else if (INTEL_INFO(dev)->gen < 4) 652 else if (INTEL_INFO(dev)->gen < 4)
649 status = intel_crt_load_detect(crt); 653 status = intel_crt_load_detect(crt,
654 to_intel_crtc(connector->state->crtc)->pipe);
650 else 655 else
651 status = connector_status_unknown; 656 status = connector_status_unknown;
652 intel_release_load_detect_pipe(connector, &tmp, &ctx); 657 intel_release_load_detect_pipe(connector, &tmp, &ctx);
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 2a7ec3141c8d..902054efb902 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -220,19 +220,19 @@ static const struct stepping_info *intel_get_stepping_info(struct drm_device *de
220 * Everytime display comes back from low power state this function is called to 220 * Everytime display comes back from low power state this function is called to
221 * copy the firmware from internal memory to registers. 221 * copy the firmware from internal memory to registers.
222 */ 222 */
223void intel_csr_load_program(struct drm_i915_private *dev_priv) 223bool intel_csr_load_program(struct drm_i915_private *dev_priv)
224{ 224{
225 u32 *payload = dev_priv->csr.dmc_payload; 225 u32 *payload = dev_priv->csr.dmc_payload;
226 uint32_t i, fw_size; 226 uint32_t i, fw_size;
227 227
228 if (!IS_GEN9(dev_priv)) { 228 if (!IS_GEN9(dev_priv)) {
229 DRM_ERROR("No CSR support available for this platform\n"); 229 DRM_ERROR("No CSR support available for this platform\n");
230 return; 230 return false;
231 } 231 }
232 232
233 if (!dev_priv->csr.dmc_payload) { 233 if (!dev_priv->csr.dmc_payload) {
234 DRM_ERROR("Tried to program CSR with empty payload\n"); 234 DRM_ERROR("Tried to program CSR with empty payload\n");
235 return; 235 return false;
236 } 236 }
237 237
238 fw_size = dev_priv->csr.dmc_fw_size; 238 fw_size = dev_priv->csr.dmc_fw_size;
@@ -243,6 +243,10 @@ void intel_csr_load_program(struct drm_i915_private *dev_priv)
243 I915_WRITE(dev_priv->csr.mmioaddr[i], 243 I915_WRITE(dev_priv->csr.mmioaddr[i],
244 dev_priv->csr.mmiodata[i]); 244 dev_priv->csr.mmiodata[i]);
245 } 245 }
246
247 dev_priv->csr.dc_state = 0;
248
249 return true;
246} 250}
247 251
248static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, 252static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index cdf2e14aa45d..21a9b83f3bfc 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1913,13 +1913,16 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
1913 enum transcoder cpu_transcoder; 1913 enum transcoder cpu_transcoder;
1914 enum intel_display_power_domain power_domain; 1914 enum intel_display_power_domain power_domain;
1915 uint32_t tmp; 1915 uint32_t tmp;
1916 bool ret;
1916 1917
1917 power_domain = intel_display_port_power_domain(intel_encoder); 1918 power_domain = intel_display_port_power_domain(intel_encoder);
1918 if (!intel_display_power_is_enabled(dev_priv, power_domain)) 1919 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
1919 return false; 1920 return false;
1920 1921
1921 if (!intel_encoder->get_hw_state(intel_encoder, &pipe)) 1922 if (!intel_encoder->get_hw_state(intel_encoder, &pipe)) {
1922 return false; 1923 ret = false;
1924 goto out;
1925 }
1923 1926
1924 if (port == PORT_A) 1927 if (port == PORT_A)
1925 cpu_transcoder = TRANSCODER_EDP; 1928 cpu_transcoder = TRANSCODER_EDP;
@@ -1931,23 +1934,33 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
1931 switch (tmp & TRANS_DDI_MODE_SELECT_MASK) { 1934 switch (tmp & TRANS_DDI_MODE_SELECT_MASK) {
1932 case TRANS_DDI_MODE_SELECT_HDMI: 1935 case TRANS_DDI_MODE_SELECT_HDMI:
1933 case TRANS_DDI_MODE_SELECT_DVI: 1936 case TRANS_DDI_MODE_SELECT_DVI:
1934 return (type == DRM_MODE_CONNECTOR_HDMIA); 1937 ret = type == DRM_MODE_CONNECTOR_HDMIA;
1938 break;
1935 1939
1936 case TRANS_DDI_MODE_SELECT_DP_SST: 1940 case TRANS_DDI_MODE_SELECT_DP_SST:
1937 if (type == DRM_MODE_CONNECTOR_eDP) 1941 ret = type == DRM_MODE_CONNECTOR_eDP ||
1938 return true; 1942 type == DRM_MODE_CONNECTOR_DisplayPort;
1939 return (type == DRM_MODE_CONNECTOR_DisplayPort); 1943 break;
1944
1940 case TRANS_DDI_MODE_SELECT_DP_MST: 1945 case TRANS_DDI_MODE_SELECT_DP_MST:
1941 /* if the transcoder is in MST state then 1946 /* if the transcoder is in MST state then
1942 * connector isn't connected */ 1947 * connector isn't connected */
1943 return false; 1948 ret = false;
1949 break;
1944 1950
1945 case TRANS_DDI_MODE_SELECT_FDI: 1951 case TRANS_DDI_MODE_SELECT_FDI:
1946 return (type == DRM_MODE_CONNECTOR_VGA); 1952 ret = type == DRM_MODE_CONNECTOR_VGA;
1953 break;
1947 1954
1948 default: 1955 default:
1949 return false; 1956 ret = false;
1957 break;
1950 } 1958 }
1959
1960out:
1961 intel_display_power_put(dev_priv, power_domain);
1962
1963 return ret;
1951} 1964}
1952 1965
1953bool intel_ddi_get_hw_state(struct intel_encoder *encoder, 1966bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
@@ -1959,15 +1972,18 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
1959 enum intel_display_power_domain power_domain; 1972 enum intel_display_power_domain power_domain;
1960 u32 tmp; 1973 u32 tmp;
1961 int i; 1974 int i;
1975 bool ret;
1962 1976
1963 power_domain = intel_display_port_power_domain(encoder); 1977 power_domain = intel_display_port_power_domain(encoder);
1964 if (!intel_display_power_is_enabled(dev_priv, power_domain)) 1978 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
1965 return false; 1979 return false;
1966 1980
1981 ret = false;
1982
1967 tmp = I915_READ(DDI_BUF_CTL(port)); 1983 tmp = I915_READ(DDI_BUF_CTL(port));
1968 1984
1969 if (!(tmp & DDI_BUF_CTL_ENABLE)) 1985 if (!(tmp & DDI_BUF_CTL_ENABLE))
1970 return false; 1986 goto out;
1971 1987
1972 if (port == PORT_A) { 1988 if (port == PORT_A) {
1973 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); 1989 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
@@ -1985,25 +2001,32 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
1985 break; 2001 break;
1986 } 2002 }
1987 2003
1988 return true; 2004 ret = true;
1989 } else {
1990 for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) {
1991 tmp = I915_READ(TRANS_DDI_FUNC_CTL(i));
1992 2005
1993 if ((tmp & TRANS_DDI_PORT_MASK) 2006 goto out;
1994 == TRANS_DDI_SELECT_PORT(port)) { 2007 }
1995 if ((tmp & TRANS_DDI_MODE_SELECT_MASK) == TRANS_DDI_MODE_SELECT_DP_MST)
1996 return false;
1997 2008
1998 *pipe = i; 2009 for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) {
1999 return true; 2010 tmp = I915_READ(TRANS_DDI_FUNC_CTL(i));
2000 } 2011
2012 if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(port)) {
2013 if ((tmp & TRANS_DDI_MODE_SELECT_MASK) ==
2014 TRANS_DDI_MODE_SELECT_DP_MST)
2015 goto out;
2016
2017 *pipe = i;
2018 ret = true;
2019
2020 goto out;
2001 } 2021 }
2002 } 2022 }
2003 2023
2004 DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port)); 2024 DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port));
2005 2025
2006 return false; 2026out:
2027 intel_display_power_put(dev_priv, power_domain);
2028
2029 return ret;
2007} 2030}
2008 2031
2009void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc) 2032void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
@@ -2449,12 +2472,14 @@ static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
2449{ 2472{
2450 uint32_t val; 2473 uint32_t val;
2451 2474
2452 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS)) 2475 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
2453 return false; 2476 return false;
2454 2477
2455 val = I915_READ(WRPLL_CTL(pll->id)); 2478 val = I915_READ(WRPLL_CTL(pll->id));
2456 hw_state->wrpll = val; 2479 hw_state->wrpll = val;
2457 2480
2481 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
2482
2458 return val & WRPLL_PLL_ENABLE; 2483 return val & WRPLL_PLL_ENABLE;
2459} 2484}
2460 2485
@@ -2464,12 +2489,14 @@ static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
2464{ 2489{
2465 uint32_t val; 2490 uint32_t val;
2466 2491
2467 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS)) 2492 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
2468 return false; 2493 return false;
2469 2494
2470 val = I915_READ(SPLL_CTL); 2495 val = I915_READ(SPLL_CTL);
2471 hw_state->spll = val; 2496 hw_state->spll = val;
2472 2497
2498 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
2499
2473 return val & SPLL_PLL_ENABLE; 2500 return val & SPLL_PLL_ENABLE;
2474} 2501}
2475 2502
@@ -2586,16 +2613,19 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2586 uint32_t val; 2613 uint32_t val;
2587 unsigned int dpll; 2614 unsigned int dpll;
2588 const struct skl_dpll_regs *regs = skl_dpll_regs; 2615 const struct skl_dpll_regs *regs = skl_dpll_regs;
2616 bool ret;
2589 2617
2590 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS)) 2618 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
2591 return false; 2619 return false;
2592 2620
2621 ret = false;
2622
2593 /* DPLL0 is not part of the shared DPLLs, so pll->id is 0 for DPLL1 */ 2623 /* DPLL0 is not part of the shared DPLLs, so pll->id is 0 for DPLL1 */
2594 dpll = pll->id + 1; 2624 dpll = pll->id + 1;
2595 2625
2596 val = I915_READ(regs[pll->id].ctl); 2626 val = I915_READ(regs[pll->id].ctl);
2597 if (!(val & LCPLL_PLL_ENABLE)) 2627 if (!(val & LCPLL_PLL_ENABLE))
2598 return false; 2628 goto out;
2599 2629
2600 val = I915_READ(DPLL_CTRL1); 2630 val = I915_READ(DPLL_CTRL1);
2601 hw_state->ctrl1 = (val >> (dpll * 6)) & 0x3f; 2631 hw_state->ctrl1 = (val >> (dpll * 6)) & 0x3f;
@@ -2605,8 +2635,12 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2605 hw_state->cfgcr1 = I915_READ(regs[pll->id].cfgcr1); 2635 hw_state->cfgcr1 = I915_READ(regs[pll->id].cfgcr1);
2606 hw_state->cfgcr2 = I915_READ(regs[pll->id].cfgcr2); 2636 hw_state->cfgcr2 = I915_READ(regs[pll->id].cfgcr2);
2607 } 2637 }
2638 ret = true;
2608 2639
2609 return true; 2640out:
2641 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
2642
2643 return ret;
2610} 2644}
2611 2645
2612static void skl_shared_dplls_init(struct drm_i915_private *dev_priv) 2646static void skl_shared_dplls_init(struct drm_i915_private *dev_priv)
@@ -2873,13 +2907,16 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2873{ 2907{
2874 enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */ 2908 enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
2875 uint32_t val; 2909 uint32_t val;
2910 bool ret;
2876 2911
2877 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS)) 2912 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
2878 return false; 2913 return false;
2879 2914
2915 ret = false;
2916
2880 val = I915_READ(BXT_PORT_PLL_ENABLE(port)); 2917 val = I915_READ(BXT_PORT_PLL_ENABLE(port));
2881 if (!(val & PORT_PLL_ENABLE)) 2918 if (!(val & PORT_PLL_ENABLE))
2882 return false; 2919 goto out;
2883 2920
2884 hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(port)); 2921 hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(port));
2885 hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK; 2922 hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
@@ -2926,7 +2963,12 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2926 I915_READ(BXT_PORT_PCS_DW12_LN23(port))); 2963 I915_READ(BXT_PORT_PCS_DW12_LN23(port)));
2927 hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD; 2964 hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2928 2965
2929 return true; 2966 ret = true;
2967
2968out:
2969 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
2970
2971 return ret;
2930} 2972}
2931 2973
2932static void bxt_shared_dplls_init(struct drm_i915_private *dev_priv) 2974static void bxt_shared_dplls_init(struct drm_i915_private *dev_priv)
@@ -3061,11 +3103,15 @@ bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
3061{ 3103{
3062 u32 temp; 3104 u32 temp;
3063 3105
3064 if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) { 3106 if (intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
3065 temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD); 3107 temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
3108
3109 intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
3110
3066 if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe)) 3111 if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe))
3067 return true; 3112 return true;
3068 } 3113 }
3114
3069 return false; 3115 return false;
3070} 3116}
3071 3117
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 836bbdc239b6..8b7b8b64b008 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1343,18 +1343,21 @@ void assert_pipe(struct drm_i915_private *dev_priv,
1343 bool cur_state; 1343 bool cur_state;
1344 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1344 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1345 pipe); 1345 pipe);
1346 enum intel_display_power_domain power_domain;
1346 1347
1347 /* if we need the pipe quirk it must be always on */ 1348 /* if we need the pipe quirk it must be always on */
1348 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 1349 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1349 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 1350 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1350 state = true; 1351 state = true;
1351 1352
1352 if (!intel_display_power_is_enabled(dev_priv, 1353 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1353 POWER_DOMAIN_TRANSCODER(cpu_transcoder))) { 1354 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
1354 cur_state = false;
1355 } else {
1356 u32 val = I915_READ(PIPECONF(cpu_transcoder)); 1355 u32 val = I915_READ(PIPECONF(cpu_transcoder));
1357 cur_state = !!(val & PIPECONF_ENABLE); 1356 cur_state = !!(val & PIPECONF_ENABLE);
1357
1358 intel_display_power_put(dev_priv, power_domain);
1359 } else {
1360 cur_state = false;
1358 } 1361 }
1359 1362
1360 I915_STATE_WARN(cur_state != state, 1363 I915_STATE_WARN(cur_state != state,
@@ -2551,12 +2554,16 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2551 if (size_aligned * 2 > dev_priv->gtt.stolen_usable_size) 2554 if (size_aligned * 2 > dev_priv->gtt.stolen_usable_size)
2552 return false; 2555 return false;
2553 2556
2557 mutex_lock(&dev->struct_mutex);
2558
2554 obj = i915_gem_object_create_stolen_for_preallocated(dev, 2559 obj = i915_gem_object_create_stolen_for_preallocated(dev,
2555 base_aligned, 2560 base_aligned,
2556 base_aligned, 2561 base_aligned,
2557 size_aligned); 2562 size_aligned);
2558 if (!obj) 2563 if (!obj) {
2564 mutex_unlock(&dev->struct_mutex);
2559 return false; 2565 return false;
2566 }
2560 2567
2561 obj->tiling_mode = plane_config->tiling; 2568 obj->tiling_mode = plane_config->tiling;
2562 if (obj->tiling_mode == I915_TILING_X) 2569 if (obj->tiling_mode == I915_TILING_X)
@@ -2569,12 +2576,12 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2569 mode_cmd.modifier[0] = fb->modifier[0]; 2576 mode_cmd.modifier[0] = fb->modifier[0];
2570 mode_cmd.flags = DRM_MODE_FB_MODIFIERS; 2577 mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
2571 2578
2572 mutex_lock(&dev->struct_mutex);
2573 if (intel_framebuffer_init(dev, to_intel_framebuffer(fb), 2579 if (intel_framebuffer_init(dev, to_intel_framebuffer(fb),
2574 &mode_cmd, obj)) { 2580 &mode_cmd, obj)) {
2575 DRM_DEBUG_KMS("intel fb init failed\n"); 2581 DRM_DEBUG_KMS("intel fb init failed\n");
2576 goto out_unref_obj; 2582 goto out_unref_obj;
2577 } 2583 }
2584
2578 mutex_unlock(&dev->struct_mutex); 2585 mutex_unlock(&dev->struct_mutex);
2579 2586
2580 DRM_DEBUG_KMS("initial plane fb obj %p\n", obj); 2587 DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
@@ -4785,9 +4792,6 @@ static void intel_post_plane_update(struct intel_crtc *crtc)
4785 to_intel_crtc_state(crtc->base.state); 4792 to_intel_crtc_state(crtc->base.state);
4786 struct drm_device *dev = crtc->base.dev; 4793 struct drm_device *dev = crtc->base.dev;
4787 4794
4788 if (atomic->wait_vblank)
4789 intel_wait_for_vblank(dev, crtc->pipe);
4790
4791 intel_frontbuffer_flip(dev, atomic->fb_bits); 4795 intel_frontbuffer_flip(dev, atomic->fb_bits);
4792 4796
4793 crtc->wm.cxsr_allowed = true; 4797 crtc->wm.cxsr_allowed = true;
@@ -5301,31 +5305,37 @@ intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
5301 } 5305 }
5302} 5306}
5303 5307
5304static unsigned long get_crtc_power_domains(struct drm_crtc *crtc) 5308static unsigned long get_crtc_power_domains(struct drm_crtc *crtc,
5309 struct intel_crtc_state *crtc_state)
5305{ 5310{
5306 struct drm_device *dev = crtc->dev; 5311 struct drm_device *dev = crtc->dev;
5307 struct intel_encoder *intel_encoder; 5312 struct drm_encoder *encoder;
5308 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5313 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5309 enum pipe pipe = intel_crtc->pipe; 5314 enum pipe pipe = intel_crtc->pipe;
5310 unsigned long mask; 5315 unsigned long mask;
5311 enum transcoder transcoder = intel_crtc->config->cpu_transcoder; 5316 enum transcoder transcoder = crtc_state->cpu_transcoder;
5312 5317
5313 if (!crtc->state->active) 5318 if (!crtc_state->base.active)
5314 return 0; 5319 return 0;
5315 5320
5316 mask = BIT(POWER_DOMAIN_PIPE(pipe)); 5321 mask = BIT(POWER_DOMAIN_PIPE(pipe));
5317 mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder)); 5322 mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
5318 if (intel_crtc->config->pch_pfit.enabled || 5323 if (crtc_state->pch_pfit.enabled ||
5319 intel_crtc->config->pch_pfit.force_thru) 5324 crtc_state->pch_pfit.force_thru)
5320 mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe)); 5325 mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
5321 5326
5322 for_each_encoder_on_crtc(dev, crtc, intel_encoder) 5327 drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
5328 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
5329
5323 mask |= BIT(intel_display_port_power_domain(intel_encoder)); 5330 mask |= BIT(intel_display_port_power_domain(intel_encoder));
5331 }
5324 5332
5325 return mask; 5333 return mask;
5326} 5334}
5327 5335
5328static unsigned long modeset_get_crtc_power_domains(struct drm_crtc *crtc) 5336static unsigned long
5337modeset_get_crtc_power_domains(struct drm_crtc *crtc,
5338 struct intel_crtc_state *crtc_state)
5329{ 5339{
5330 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 5340 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
5331 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5341 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -5333,7 +5343,8 @@ static unsigned long modeset_get_crtc_power_domains(struct drm_crtc *crtc)
5333 unsigned long domains, new_domains, old_domains; 5343 unsigned long domains, new_domains, old_domains;
5334 5344
5335 old_domains = intel_crtc->enabled_power_domains; 5345 old_domains = intel_crtc->enabled_power_domains;
5336 intel_crtc->enabled_power_domains = new_domains = get_crtc_power_domains(crtc); 5346 intel_crtc->enabled_power_domains = new_domains =
5347 get_crtc_power_domains(crtc, crtc_state);
5337 5348
5338 domains = new_domains & ~old_domains; 5349 domains = new_domains & ~old_domains;
5339 5350
@@ -5352,31 +5363,6 @@ static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
5352 intel_display_power_put(dev_priv, domain); 5363 intel_display_power_put(dev_priv, domain);
5353} 5364}
5354 5365
5355static void modeset_update_crtc_power_domains(struct drm_atomic_state *state)
5356{
5357 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
5358 struct drm_device *dev = state->dev;
5359 struct drm_i915_private *dev_priv = dev->dev_private;
5360 unsigned long put_domains[I915_MAX_PIPES] = {};
5361 struct drm_crtc_state *crtc_state;
5362 struct drm_crtc *crtc;
5363 int i;
5364
5365 for_each_crtc_in_state(state, crtc, crtc_state, i) {
5366 if (needs_modeset(crtc->state))
5367 put_domains[to_intel_crtc(crtc)->pipe] =
5368 modeset_get_crtc_power_domains(crtc);
5369 }
5370
5371 if (dev_priv->display.modeset_commit_cdclk &&
5372 intel_state->dev_cdclk != dev_priv->cdclk_freq)
5373 dev_priv->display.modeset_commit_cdclk(state);
5374
5375 for (i = 0; i < I915_MAX_PIPES; i++)
5376 if (put_domains[i])
5377 modeset_put_power_domains(dev_priv, put_domains[i]);
5378}
5379
5380static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv) 5366static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
5381{ 5367{
5382 int max_cdclk_freq = dev_priv->max_cdclk_freq; 5368 int max_cdclk_freq = dev_priv->max_cdclk_freq;
@@ -6039,8 +6025,7 @@ static int broxton_calc_cdclk(struct drm_i915_private *dev_priv,
6039 return 144000; 6025 return 144000;
6040} 6026}
6041 6027
6042/* Compute the max pixel clock for new configuration. Uses atomic state if 6028/* Compute the max pixel clock for new configuration. */
6043 * that's non-NULL, look at current state otherwise. */
6044static int intel_mode_max_pixclk(struct drm_device *dev, 6029static int intel_mode_max_pixclk(struct drm_device *dev,
6045 struct drm_atomic_state *state) 6030 struct drm_atomic_state *state)
6046{ 6031{
@@ -6063,9 +6048,6 @@ static int intel_mode_max_pixclk(struct drm_device *dev,
6063 intel_state->min_pixclk[i] = pixclk; 6048 intel_state->min_pixclk[i] = pixclk;
6064 } 6049 }
6065 6050
6066 if (!intel_state->active_crtcs)
6067 return 0;
6068
6069 for_each_pipe(dev_priv, pipe) 6051 for_each_pipe(dev_priv, pipe)
6070 max_pixclk = max(intel_state->min_pixclk[pipe], max_pixclk); 6052 max_pixclk = max(intel_state->min_pixclk[pipe], max_pixclk);
6071 6053
@@ -6393,55 +6375,16 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
6393 */ 6375 */
6394int intel_display_suspend(struct drm_device *dev) 6376int intel_display_suspend(struct drm_device *dev)
6395{ 6377{
6396 struct drm_mode_config *config = &dev->mode_config; 6378 struct drm_i915_private *dev_priv = to_i915(dev);
6397 struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
6398 struct drm_atomic_state *state; 6379 struct drm_atomic_state *state;
6399 struct drm_crtc *crtc; 6380 int ret;
6400 unsigned crtc_mask = 0;
6401 int ret = 0;
6402
6403 if (WARN_ON(!ctx))
6404 return 0;
6405
6406 lockdep_assert_held(&ctx->ww_ctx);
6407 state = drm_atomic_state_alloc(dev);
6408 if (WARN_ON(!state))
6409 return -ENOMEM;
6410
6411 state->acquire_ctx = ctx;
6412 state->allow_modeset = true;
6413
6414 for_each_crtc(dev, crtc) {
6415 struct drm_crtc_state *crtc_state =
6416 drm_atomic_get_crtc_state(state, crtc);
6417
6418 ret = PTR_ERR_OR_ZERO(crtc_state);
6419 if (ret)
6420 goto free;
6421
6422 if (!crtc_state->active)
6423 continue;
6424
6425 crtc_state->active = false;
6426 crtc_mask |= 1 << drm_crtc_index(crtc);
6427 }
6428
6429 if (crtc_mask) {
6430 ret = drm_atomic_commit(state);
6431
6432 if (!ret) {
6433 for_each_crtc(dev, crtc)
6434 if (crtc_mask & (1 << drm_crtc_index(crtc)))
6435 crtc->state->active = true;
6436
6437 return ret;
6438 }
6439 }
6440 6381
6441free: 6382 state = drm_atomic_helper_suspend(dev);
6383 ret = PTR_ERR_OR_ZERO(state);
6442 if (ret) 6384 if (ret)
6443 DRM_ERROR("Suspending crtc's failed with %i\n", ret); 6385 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
6444 drm_atomic_state_free(state); 6386 else
6387 dev_priv->modeset_restore_state = state;
6445 return ret; 6388 return ret;
6446} 6389}
6447 6390
@@ -8181,18 +8124,22 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
8181{ 8124{
8182 struct drm_device *dev = crtc->base.dev; 8125 struct drm_device *dev = crtc->base.dev;
8183 struct drm_i915_private *dev_priv = dev->dev_private; 8126 struct drm_i915_private *dev_priv = dev->dev_private;
8127 enum intel_display_power_domain power_domain;
8184 uint32_t tmp; 8128 uint32_t tmp;
8129 bool ret;
8185 8130
8186 if (!intel_display_power_is_enabled(dev_priv, 8131 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
8187 POWER_DOMAIN_PIPE(crtc->pipe))) 8132 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
8188 return false; 8133 return false;
8189 8134
8190 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 8135 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8191 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 8136 pipe_config->shared_dpll = DPLL_ID_PRIVATE;
8192 8137
8138 ret = false;
8139
8193 tmp = I915_READ(PIPECONF(crtc->pipe)); 8140 tmp = I915_READ(PIPECONF(crtc->pipe));
8194 if (!(tmp & PIPECONF_ENABLE)) 8141 if (!(tmp & PIPECONF_ENABLE))
8195 return false; 8142 goto out;
8196 8143
8197 if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 8144 if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
8198 switch (tmp & PIPECONF_BPC_MASK) { 8145 switch (tmp & PIPECONF_BPC_MASK) {
@@ -8272,7 +8219,12 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
8272 pipe_config->base.adjusted_mode.crtc_clock = 8219 pipe_config->base.adjusted_mode.crtc_clock =
8273 pipe_config->port_clock / pipe_config->pixel_multiplier; 8220 pipe_config->port_clock / pipe_config->pixel_multiplier;
8274 8221
8275 return true; 8222 ret = true;
8223
8224out:
8225 intel_display_power_put(dev_priv, power_domain);
8226
8227 return ret;
8276} 8228}
8277 8229
8278static void ironlake_init_pch_refclk(struct drm_device *dev) 8230static void ironlake_init_pch_refclk(struct drm_device *dev)
@@ -9376,18 +9328,21 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
9376{ 9328{
9377 struct drm_device *dev = crtc->base.dev; 9329 struct drm_device *dev = crtc->base.dev;
9378 struct drm_i915_private *dev_priv = dev->dev_private; 9330 struct drm_i915_private *dev_priv = dev->dev_private;
9331 enum intel_display_power_domain power_domain;
9379 uint32_t tmp; 9332 uint32_t tmp;
9333 bool ret;
9380 9334
9381 if (!intel_display_power_is_enabled(dev_priv, 9335 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9382 POWER_DOMAIN_PIPE(crtc->pipe))) 9336 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9383 return false; 9337 return false;
9384 9338
9385 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 9339 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9386 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 9340 pipe_config->shared_dpll = DPLL_ID_PRIVATE;
9387 9341
9342 ret = false;
9388 tmp = I915_READ(PIPECONF(crtc->pipe)); 9343 tmp = I915_READ(PIPECONF(crtc->pipe));
9389 if (!(tmp & PIPECONF_ENABLE)) 9344 if (!(tmp & PIPECONF_ENABLE))
9390 return false; 9345 goto out;
9391 9346
9392 switch (tmp & PIPECONF_BPC_MASK) { 9347 switch (tmp & PIPECONF_BPC_MASK) {
9393 case PIPECONF_6BPC: 9348 case PIPECONF_6BPC:
@@ -9450,7 +9405,12 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
9450 9405
9451 ironlake_get_pfit_config(crtc, pipe_config); 9406 ironlake_get_pfit_config(crtc, pipe_config);
9452 9407
9453 return true; 9408 ret = true;
9409
9410out:
9411 intel_display_power_put(dev_priv, power_domain);
9412
9413 return ret;
9454} 9414}
9455 9415
9456static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) 9416static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
@@ -9716,9 +9676,6 @@ static int ilk_max_pixel_rate(struct drm_atomic_state *state)
9716 intel_state->min_pixclk[i] = pixel_rate; 9676 intel_state->min_pixclk[i] = pixel_rate;
9717 } 9677 }
9718 9678
9719 if (!intel_state->active_crtcs)
9720 return 0;
9721
9722 for_each_pipe(dev_priv, pipe) 9679 for_each_pipe(dev_priv, pipe)
9723 max_pixel_rate = max(intel_state->min_pixclk[pipe], max_pixel_rate); 9680 max_pixel_rate = max(intel_state->min_pixclk[pipe], max_pixel_rate);
9724 9681
@@ -9982,12 +9939,17 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
9982{ 9939{
9983 struct drm_device *dev = crtc->base.dev; 9940 struct drm_device *dev = crtc->base.dev;
9984 struct drm_i915_private *dev_priv = dev->dev_private; 9941 struct drm_i915_private *dev_priv = dev->dev_private;
9985 enum intel_display_power_domain pfit_domain; 9942 enum intel_display_power_domain power_domain;
9943 unsigned long power_domain_mask;
9986 uint32_t tmp; 9944 uint32_t tmp;
9945 bool ret;
9987 9946
9988 if (!intel_display_power_is_enabled(dev_priv, 9947 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9989 POWER_DOMAIN_PIPE(crtc->pipe))) 9948 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9990 return false; 9949 return false;
9950 power_domain_mask = BIT(power_domain);
9951
9952 ret = false;
9991 9953
9992 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 9954 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9993 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 9955 pipe_config->shared_dpll = DPLL_ID_PRIVATE;
@@ -10014,13 +9976,14 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
10014 pipe_config->cpu_transcoder = TRANSCODER_EDP; 9976 pipe_config->cpu_transcoder = TRANSCODER_EDP;
10015 } 9977 }
10016 9978
10017 if (!intel_display_power_is_enabled(dev_priv, 9979 power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
10018 POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder))) 9980 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
10019 return false; 9981 goto out;
9982 power_domain_mask |= BIT(power_domain);
10020 9983
10021 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder)); 9984 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
10022 if (!(tmp & PIPECONF_ENABLE)) 9985 if (!(tmp & PIPECONF_ENABLE))
10023 return false; 9986 goto out;
10024 9987
10025 haswell_get_ddi_port_state(crtc, pipe_config); 9988 haswell_get_ddi_port_state(crtc, pipe_config);
10026 9989
@@ -10030,14 +9993,14 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
10030 skl_init_scalers(dev, crtc, pipe_config); 9993 skl_init_scalers(dev, crtc, pipe_config);
10031 } 9994 }
10032 9995
10033 pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
10034
10035 if (INTEL_INFO(dev)->gen >= 9) { 9996 if (INTEL_INFO(dev)->gen >= 9) {
10036 pipe_config->scaler_state.scaler_id = -1; 9997 pipe_config->scaler_state.scaler_id = -1;
10037 pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX); 9998 pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
10038 } 9999 }
10039 10000
10040 if (intel_display_power_is_enabled(dev_priv, pfit_domain)) { 10001 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
10002 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
10003 power_domain_mask |= BIT(power_domain);
10041 if (INTEL_INFO(dev)->gen >= 9) 10004 if (INTEL_INFO(dev)->gen >= 9)
10042 skylake_get_pfit_config(crtc, pipe_config); 10005 skylake_get_pfit_config(crtc, pipe_config);
10043 else 10006 else
@@ -10055,7 +10018,13 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
10055 pipe_config->pixel_multiplier = 1; 10018 pipe_config->pixel_multiplier = 1;
10056 } 10019 }
10057 10020
10058 return true; 10021 ret = true;
10022
10023out:
10024 for_each_power_domain(power_domain, power_domain_mask)
10025 intel_display_power_put(dev_priv, power_domain);
10026
10027 return ret;
10059} 10028}
10060 10029
10061static void i845_update_cursor(struct drm_crtc *crtc, u32 base, 10030static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
@@ -10376,6 +10345,7 @@ mode_fits_in_fbdev(struct drm_device *dev,
10376 if (obj->base.size < mode->vdisplay * fb->pitches[0]) 10345 if (obj->base.size < mode->vdisplay * fb->pitches[0])
10377 return NULL; 10346 return NULL;
10378 10347
10348 drm_framebuffer_reference(fb);
10379 return fb; 10349 return fb;
10380#else 10350#else
10381 return NULL; 10351 return NULL;
@@ -10431,7 +10401,7 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
10431 struct drm_device *dev = encoder->dev; 10401 struct drm_device *dev = encoder->dev;
10432 struct drm_framebuffer *fb; 10402 struct drm_framebuffer *fb;
10433 struct drm_mode_config *config = &dev->mode_config; 10403 struct drm_mode_config *config = &dev->mode_config;
10434 struct drm_atomic_state *state = NULL; 10404 struct drm_atomic_state *state = NULL, *restore_state = NULL;
10435 struct drm_connector_state *connector_state; 10405 struct drm_connector_state *connector_state;
10436 struct intel_crtc_state *crtc_state; 10406 struct intel_crtc_state *crtc_state;
10437 int ret, i = -1; 10407 int ret, i = -1;
@@ -10440,6 +10410,8 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
10440 connector->base.id, connector->name, 10410 connector->base.id, connector->name,
10441 encoder->base.id, encoder->name); 10411 encoder->base.id, encoder->name);
10442 10412
10413 old->restore_state = NULL;
10414
10443retry: 10415retry:
10444 ret = drm_modeset_lock(&config->connection_mutex, ctx); 10416 ret = drm_modeset_lock(&config->connection_mutex, ctx);
10445 if (ret) 10417 if (ret)
@@ -10456,24 +10428,15 @@ retry:
10456 */ 10428 */
10457 10429
10458 /* See if we already have a CRTC for this connector */ 10430 /* See if we already have a CRTC for this connector */
10459 if (encoder->crtc) { 10431 if (connector->state->crtc) {
10460 crtc = encoder->crtc; 10432 crtc = connector->state->crtc;
10461 10433
10462 ret = drm_modeset_lock(&crtc->mutex, ctx); 10434 ret = drm_modeset_lock(&crtc->mutex, ctx);
10463 if (ret) 10435 if (ret)
10464 goto fail; 10436 goto fail;
10465 ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
10466 if (ret)
10467 goto fail;
10468
10469 old->dpms_mode = connector->dpms;
10470 old->load_detect_temp = false;
10471 10437
10472 /* Make sure the crtc and connector are running */ 10438 /* Make sure the crtc and connector are running */
10473 if (connector->dpms != DRM_MODE_DPMS_ON) 10439 goto found;
10474 connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
10475
10476 return true;
10477 } 10440 }
10478 10441
10479 /* Find an unused one (if possible) */ 10442 /* Find an unused one (if possible) */
@@ -10481,8 +10444,15 @@ retry:
10481 i++; 10444 i++;
10482 if (!(encoder->possible_crtcs & (1 << i))) 10445 if (!(encoder->possible_crtcs & (1 << i)))
10483 continue; 10446 continue;
10484 if (possible_crtc->state->enable) 10447
10448 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
10449 if (ret)
10450 goto fail;
10451
10452 if (possible_crtc->state->enable) {
10453 drm_modeset_unlock(&possible_crtc->mutex);
10485 continue; 10454 continue;
10455 }
10486 10456
10487 crtc = possible_crtc; 10457 crtc = possible_crtc;
10488 break; 10458 break;
@@ -10496,23 +10466,22 @@ retry:
10496 goto fail; 10466 goto fail;
10497 } 10467 }
10498 10468
10499 ret = drm_modeset_lock(&crtc->mutex, ctx); 10469found:
10500 if (ret) 10470 intel_crtc = to_intel_crtc(crtc);
10501 goto fail; 10471
10502 ret = drm_modeset_lock(&crtc->primary->mutex, ctx); 10472 ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
10503 if (ret) 10473 if (ret)
10504 goto fail; 10474 goto fail;
10505 10475
10506 intel_crtc = to_intel_crtc(crtc);
10507 old->dpms_mode = connector->dpms;
10508 old->load_detect_temp = true;
10509 old->release_fb = NULL;
10510
10511 state = drm_atomic_state_alloc(dev); 10476 state = drm_atomic_state_alloc(dev);
10512 if (!state) 10477 restore_state = drm_atomic_state_alloc(dev);
10513 return false; 10478 if (!state || !restore_state) {
10479 ret = -ENOMEM;
10480 goto fail;
10481 }
10514 10482
10515 state->acquire_ctx = ctx; 10483 state->acquire_ctx = ctx;
10484 restore_state->acquire_ctx = ctx;
10516 10485
10517 connector_state = drm_atomic_get_connector_state(state, connector); 10486 connector_state = drm_atomic_get_connector_state(state, connector);
10518 if (IS_ERR(connector_state)) { 10487 if (IS_ERR(connector_state)) {
@@ -10520,7 +10489,9 @@ retry:
10520 goto fail; 10489 goto fail;
10521 } 10490 }
10522 10491
10523 connector_state->crtc = crtc; 10492 ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
10493 if (ret)
10494 goto fail;
10524 10495
10525 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); 10496 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
10526 if (IS_ERR(crtc_state)) { 10497 if (IS_ERR(crtc_state)) {
@@ -10544,7 +10515,6 @@ retry:
10544 if (fb == NULL) { 10515 if (fb == NULL) {
10545 DRM_DEBUG_KMS("creating tmp fb for load-detection\n"); 10516 DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
10546 fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32); 10517 fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
10547 old->release_fb = fb;
10548 } else 10518 } else
10549 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); 10519 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
10550 if (IS_ERR(fb)) { 10520 if (IS_ERR(fb)) {
@@ -10556,15 +10526,28 @@ retry:
10556 if (ret) 10526 if (ret)
10557 goto fail; 10527 goto fail;
10558 10528
10559 drm_mode_copy(&crtc_state->base.mode, mode); 10529 drm_framebuffer_unreference(fb);
10530
10531 ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
10532 if (ret)
10533 goto fail;
10534
10535 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
10536 if (!ret)
10537 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
10538 if (!ret)
10539 ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(restore_state, crtc->primary));
10540 if (ret) {
10541 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
10542 goto fail;
10543 }
10560 10544
10561 if (drm_atomic_commit(state)) { 10545 if (drm_atomic_commit(state)) {
10562 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); 10546 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
10563 if (old->release_fb)
10564 old->release_fb->funcs->destroy(old->release_fb);
10565 goto fail; 10547 goto fail;
10566 } 10548 }
10567 crtc->primary->crtc = crtc; 10549
10550 old->restore_state = restore_state;
10568 10551
10569 /* let the connector get through one full cycle before testing */ 10552 /* let the connector get through one full cycle before testing */
10570 intel_wait_for_vblank(dev, intel_crtc->pipe); 10553 intel_wait_for_vblank(dev, intel_crtc->pipe);
@@ -10572,7 +10555,8 @@ retry:
10572 10555
10573fail: 10556fail:
10574 drm_atomic_state_free(state); 10557 drm_atomic_state_free(state);
10575 state = NULL; 10558 drm_atomic_state_free(restore_state);
10559 restore_state = state = NULL;
10576 10560
10577 if (ret == -EDEADLK) { 10561 if (ret == -EDEADLK) {
10578 drm_modeset_backoff(ctx); 10562 drm_modeset_backoff(ctx);
@@ -10586,65 +10570,24 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
10586 struct intel_load_detect_pipe *old, 10570 struct intel_load_detect_pipe *old,
10587 struct drm_modeset_acquire_ctx *ctx) 10571 struct drm_modeset_acquire_ctx *ctx)
10588{ 10572{
10589 struct drm_device *dev = connector->dev;
10590 struct intel_encoder *intel_encoder = 10573 struct intel_encoder *intel_encoder =
10591 intel_attached_encoder(connector); 10574 intel_attached_encoder(connector);
10592 struct drm_encoder *encoder = &intel_encoder->base; 10575 struct drm_encoder *encoder = &intel_encoder->base;
10593 struct drm_crtc *crtc = encoder->crtc; 10576 struct drm_atomic_state *state = old->restore_state;
10594 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10595 struct drm_atomic_state *state;
10596 struct drm_connector_state *connector_state;
10597 struct intel_crtc_state *crtc_state;
10598 int ret; 10577 int ret;
10599 10578
10600 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 10579 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10601 connector->base.id, connector->name, 10580 connector->base.id, connector->name,
10602 encoder->base.id, encoder->name); 10581 encoder->base.id, encoder->name);
10603 10582
10604 if (old->load_detect_temp) { 10583 if (!state)
10605 state = drm_atomic_state_alloc(dev);
10606 if (!state)
10607 goto fail;
10608
10609 state->acquire_ctx = ctx;
10610
10611 connector_state = drm_atomic_get_connector_state(state, connector);
10612 if (IS_ERR(connector_state))
10613 goto fail;
10614
10615 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
10616 if (IS_ERR(crtc_state))
10617 goto fail;
10618
10619 connector_state->crtc = NULL;
10620
10621 crtc_state->base.enable = crtc_state->base.active = false;
10622
10623 ret = intel_modeset_setup_plane_state(state, crtc, NULL, NULL,
10624 0, 0);
10625 if (ret)
10626 goto fail;
10627
10628 ret = drm_atomic_commit(state);
10629 if (ret)
10630 goto fail;
10631
10632 if (old->release_fb) {
10633 drm_framebuffer_unregister_private(old->release_fb);
10634 drm_framebuffer_unreference(old->release_fb);
10635 }
10636
10637 return; 10584 return;
10638 }
10639
10640 /* Switch crtc and encoder back off if necessary */
10641 if (old->dpms_mode != DRM_MODE_DPMS_ON)
10642 connector->funcs->dpms(connector, old->dpms_mode);
10643 10585
10644 return; 10586 ret = drm_atomic_commit(state);
10645fail: 10587 if (ret) {
10646 DRM_DEBUG_KMS("Couldn't release load detect pipe.\n"); 10588 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
10647 drm_atomic_state_free(state); 10589 drm_atomic_state_free(state);
10590 }
10648} 10591}
10649 10592
10650static int i9xx_pll_refclk(struct drm_device *dev, 10593static int i9xx_pll_refclk(struct drm_device *dev,
@@ -10996,6 +10939,12 @@ static bool page_flip_finished(struct intel_crtc *crtc)
10996 return true; 10939 return true;
10997 10940
10998 /* 10941 /*
10942 * BDW signals flip done immediately if the plane
10943 * is disabled, even if the plane enable is already
10944 * armed to occur at the next vblank :(
10945 */
10946
10947 /*
10999 * A DSPSURFLIVE check isn't enough in case the mmio and CS flips 10948 * A DSPSURFLIVE check isn't enough in case the mmio and CS flips
11000 * used the same base address. In that case the mmio flip might 10949 * used the same base address. In that case the mmio flip might
11001 * have completed, but the CS hasn't even executed the flip yet. 10950 * have completed, but the CS hasn't even executed the flip yet.
@@ -11839,7 +11788,6 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
11839 struct intel_plane_state *old_plane_state = 11788 struct intel_plane_state *old_plane_state =
11840 to_intel_plane_state(plane->state); 11789 to_intel_plane_state(plane->state);
11841 int idx = intel_crtc->base.base.id, ret; 11790 int idx = intel_crtc->base.base.id, ret;
11842 int i = drm_plane_index(plane);
11843 bool mode_changed = needs_modeset(crtc_state); 11791 bool mode_changed = needs_modeset(crtc_state);
11844 bool was_crtc_enabled = crtc->state->active; 11792 bool was_crtc_enabled = crtc->state->active;
11845 bool is_crtc_enabled = crtc_state->active; 11793 bool is_crtc_enabled = crtc_state->active;
@@ -11872,6 +11820,9 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
11872 if (!was_visible && !visible) 11820 if (!was_visible && !visible)
11873 return 0; 11821 return 0;
11874 11822
11823 if (fb != old_plane_state->base.fb)
11824 pipe_config->fb_changed = true;
11825
11875 turn_off = was_visible && (!visible || mode_changed); 11826 turn_off = was_visible && (!visible || mode_changed);
11876 turn_on = visible && (!was_visible || mode_changed); 11827 turn_on = visible && (!was_visible || mode_changed);
11877 11828
@@ -11886,11 +11837,8 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
11886 pipe_config->wm_changed = true; 11837 pipe_config->wm_changed = true;
11887 11838
11888 /* must disable cxsr around plane enable/disable */ 11839 /* must disable cxsr around plane enable/disable */
11889 if (plane->type != DRM_PLANE_TYPE_CURSOR) { 11840 if (plane->type != DRM_PLANE_TYPE_CURSOR)
11890 if (is_crtc_enabled)
11891 intel_crtc->atomic.wait_vblank = true;
11892 pipe_config->disable_cxsr = true; 11841 pipe_config->disable_cxsr = true;
11893 }
11894 } else if (intel_wm_need_update(plane, plane_state)) { 11842 } else if (intel_wm_need_update(plane, plane_state)) {
11895 pipe_config->wm_changed = true; 11843 pipe_config->wm_changed = true;
11896 } 11844 }
@@ -11904,14 +11852,6 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
11904 intel_crtc->atomic.post_enable_primary = turn_on; 11852 intel_crtc->atomic.post_enable_primary = turn_on;
11905 intel_crtc->atomic.update_fbc = true; 11853 intel_crtc->atomic.update_fbc = true;
11906 11854
11907 /*
11908 * BDW signals flip done immediately if the plane
11909 * is disabled, even if the plane enable is already
11910 * armed to occur at the next vblank :(
11911 */
11912 if (turn_on && IS_BROADWELL(dev))
11913 intel_crtc->atomic.wait_vblank = true;
11914
11915 break; 11855 break;
11916 case DRM_PLANE_TYPE_CURSOR: 11856 case DRM_PLANE_TYPE_CURSOR:
11917 break; 11857 break;
@@ -11924,13 +11864,8 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
11924 */ 11864 */
11925 if (IS_IVYBRIDGE(dev) && 11865 if (IS_IVYBRIDGE(dev) &&
11926 needs_scaling(to_intel_plane_state(plane_state)) && 11866 needs_scaling(to_intel_plane_state(plane_state)) &&
11927 !needs_scaling(old_plane_state)) { 11867 !needs_scaling(old_plane_state))
11928 to_intel_crtc_state(crtc_state)->disable_lp_wm = true; 11868 pipe_config->disable_lp_wm = true;
11929 } else if (turn_off && !mode_changed) {
11930 intel_crtc->atomic.wait_vblank = true;
11931 intel_crtc->atomic.update_sprite_watermarks |=
11932 1 << i;
11933 }
11934 11869
11935 break; 11870 break;
11936 } 11871 }
@@ -13098,8 +13033,6 @@ static void intel_modeset_clear_plls(struct drm_atomic_state *state)
13098 struct drm_device *dev = state->dev; 13033 struct drm_device *dev = state->dev;
13099 struct drm_i915_private *dev_priv = to_i915(dev); 13034 struct drm_i915_private *dev_priv = to_i915(dev);
13100 struct intel_shared_dpll_config *shared_dpll = NULL; 13035 struct intel_shared_dpll_config *shared_dpll = NULL;
13101 struct intel_crtc *intel_crtc;
13102 struct intel_crtc_state *intel_crtc_state;
13103 struct drm_crtc *crtc; 13036 struct drm_crtc *crtc;
13104 struct drm_crtc_state *crtc_state; 13037 struct drm_crtc_state *crtc_state;
13105 int i; 13038 int i;
@@ -13108,21 +13041,21 @@ static void intel_modeset_clear_plls(struct drm_atomic_state *state)
13108 return; 13041 return;
13109 13042
13110 for_each_crtc_in_state(state, crtc, crtc_state, i) { 13043 for_each_crtc_in_state(state, crtc, crtc_state, i) {
13111 int dpll; 13044 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13112 13045 int old_dpll = to_intel_crtc_state(crtc->state)->shared_dpll;
13113 intel_crtc = to_intel_crtc(crtc);
13114 intel_crtc_state = to_intel_crtc_state(crtc_state);
13115 dpll = intel_crtc_state->shared_dpll;
13116 13046
13117 if (!needs_modeset(crtc_state) || dpll == DPLL_ID_PRIVATE) 13047 if (!needs_modeset(crtc_state))
13118 continue; 13048 continue;
13119 13049
13120 intel_crtc_state->shared_dpll = DPLL_ID_PRIVATE; 13050 to_intel_crtc_state(crtc_state)->shared_dpll = DPLL_ID_PRIVATE;
13051
13052 if (old_dpll == DPLL_ID_PRIVATE)
13053 continue;
13121 13054
13122 if (!shared_dpll) 13055 if (!shared_dpll)
13123 shared_dpll = intel_atomic_get_shared_dpll_state(state); 13056 shared_dpll = intel_atomic_get_shared_dpll_state(state);
13124 13057
13125 shared_dpll[dpll].crtc_mask &= ~(1 << intel_crtc->pipe); 13058 shared_dpll[old_dpll].crtc_mask &= ~(1 << intel_crtc->pipe);
13126 } 13059 }
13127} 13060}
13128 13061
@@ -13258,6 +13191,9 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
13258 13191
13259 if (ret < 0) 13192 if (ret < 0)
13260 return ret; 13193 return ret;
13194
13195 DRM_DEBUG_KMS("New cdclk calculated to be atomic %u, actual %u\n",
13196 intel_state->cdclk, intel_state->dev_cdclk);
13261 } else 13197 } else
13262 to_intel_atomic_state(state)->cdclk = dev_priv->atomic_cdclk_freq; 13198 to_intel_atomic_state(state)->cdclk = dev_priv->atomic_cdclk_freq;
13263 13199
@@ -13463,6 +13399,71 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
13463 return ret; 13399 return ret;
13464} 13400}
13465 13401
13402static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
13403 struct drm_i915_private *dev_priv,
13404 unsigned crtc_mask)
13405{
13406 unsigned last_vblank_count[I915_MAX_PIPES];
13407 enum pipe pipe;
13408 int ret;
13409
13410 if (!crtc_mask)
13411 return;
13412
13413 for_each_pipe(dev_priv, pipe) {
13414 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
13415
13416 if (!((1 << pipe) & crtc_mask))
13417 continue;
13418
13419 ret = drm_crtc_vblank_get(crtc);
13420 if (WARN_ON(ret != 0)) {
13421 crtc_mask &= ~(1 << pipe);
13422 continue;
13423 }
13424
13425 last_vblank_count[pipe] = drm_crtc_vblank_count(crtc);
13426 }
13427
13428 for_each_pipe(dev_priv, pipe) {
13429 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
13430 long lret;
13431
13432 if (!((1 << pipe) & crtc_mask))
13433 continue;
13434
13435 lret = wait_event_timeout(dev->vblank[pipe].queue,
13436 last_vblank_count[pipe] !=
13437 drm_crtc_vblank_count(crtc),
13438 msecs_to_jiffies(50));
13439
13440 WARN_ON(!lret);
13441
13442 drm_crtc_vblank_put(crtc);
13443 }
13444}
13445
13446static bool needs_vblank_wait(struct intel_crtc_state *crtc_state)
13447{
13448 /* fb updated, need to unpin old fb */
13449 if (crtc_state->fb_changed)
13450 return true;
13451
13452 /* wm changes, need vblank before final wm's */
13453 if (crtc_state->wm_changed)
13454 return true;
13455
13456 /*
13457 * cxsr is re-enabled after vblank.
13458 * This is already handled by crtc_state->wm_changed,
13459 * but added for clarity.
13460 */
13461 if (crtc_state->disable_cxsr)
13462 return true;
13463
13464 return false;
13465}
13466
13466/** 13467/**
13467 * intel_atomic_commit - commit validated state object 13468 * intel_atomic_commit - commit validated state object
13468 * @dev: DRM device 13469 * @dev: DRM device
@@ -13489,6 +13490,8 @@ static int intel_atomic_commit(struct drm_device *dev,
13489 struct drm_crtc *crtc; 13490 struct drm_crtc *crtc;
13490 int ret = 0, i; 13491 int ret = 0, i;
13491 bool hw_check = intel_state->modeset; 13492 bool hw_check = intel_state->modeset;
13493 unsigned long put_domains[I915_MAX_PIPES] = {};
13494 unsigned crtc_vblank_mask = 0;
13492 13495
13493 ret = intel_atomic_prepare_commit(dev, state, async); 13496 ret = intel_atomic_prepare_commit(dev, state, async);
13494 if (ret) { 13497 if (ret) {
@@ -13504,11 +13507,22 @@ static int intel_atomic_commit(struct drm_device *dev,
13504 sizeof(intel_state->min_pixclk)); 13507 sizeof(intel_state->min_pixclk));
13505 dev_priv->active_crtcs = intel_state->active_crtcs; 13508 dev_priv->active_crtcs = intel_state->active_crtcs;
13506 dev_priv->atomic_cdclk_freq = intel_state->cdclk; 13509 dev_priv->atomic_cdclk_freq = intel_state->cdclk;
13510
13511 intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
13507 } 13512 }
13508 13513
13509 for_each_crtc_in_state(state, crtc, crtc_state, i) { 13514 for_each_crtc_in_state(state, crtc, crtc_state, i) {
13510 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 13515 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13511 13516
13517 if (needs_modeset(crtc->state) ||
13518 to_intel_crtc_state(crtc->state)->update_pipe) {
13519 hw_check = true;
13520
13521 put_domains[to_intel_crtc(crtc)->pipe] =
13522 modeset_get_crtc_power_domains(crtc,
13523 to_intel_crtc_state(crtc->state));
13524 }
13525
13512 if (!needs_modeset(crtc->state)) 13526 if (!needs_modeset(crtc->state))
13513 continue; 13527 continue;
13514 13528
@@ -13541,32 +13555,25 @@ static int intel_atomic_commit(struct drm_device *dev,
13541 intel_shared_dpll_commit(state); 13555 intel_shared_dpll_commit(state);
13542 13556
13543 drm_atomic_helper_update_legacy_modeset_state(state->dev, state); 13557 drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
13544 modeset_update_crtc_power_domains(state); 13558
13559 if (dev_priv->display.modeset_commit_cdclk &&
13560 intel_state->dev_cdclk != dev_priv->cdclk_freq)
13561 dev_priv->display.modeset_commit_cdclk(state);
13545 } 13562 }
13546 13563
13547 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 13564 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
13548 for_each_crtc_in_state(state, crtc, crtc_state, i) { 13565 for_each_crtc_in_state(state, crtc, crtc_state, i) {
13549 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 13566 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13550 bool modeset = needs_modeset(crtc->state); 13567 bool modeset = needs_modeset(crtc->state);
13551 bool update_pipe = !modeset && 13568 struct intel_crtc_state *pipe_config =
13552 to_intel_crtc_state(crtc->state)->update_pipe; 13569 to_intel_crtc_state(crtc->state);
13553 unsigned long put_domains = 0; 13570 bool update_pipe = !modeset && pipe_config->update_pipe;
13554
13555 if (modeset)
13556 intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
13557 13571
13558 if (modeset && crtc->state->active) { 13572 if (modeset && crtc->state->active) {
13559 update_scanline_offset(to_intel_crtc(crtc)); 13573 update_scanline_offset(to_intel_crtc(crtc));
13560 dev_priv->display.crtc_enable(crtc); 13574 dev_priv->display.crtc_enable(crtc);
13561 } 13575 }
13562 13576
13563 if (update_pipe) {
13564 put_domains = modeset_get_crtc_power_domains(crtc);
13565
13566 /* make sure intel_modeset_check_state runs */
13567 hw_check = true;
13568 }
13569
13570 if (!modeset) 13577 if (!modeset)
13571 intel_pre_plane_update(to_intel_crtc_state(crtc_state)); 13578 intel_pre_plane_update(to_intel_crtc_state(crtc_state));
13572 13579
@@ -13577,18 +13584,24 @@ static int intel_atomic_commit(struct drm_device *dev,
13577 (crtc->state->planes_changed || update_pipe)) 13584 (crtc->state->planes_changed || update_pipe))
13578 drm_atomic_helper_commit_planes_on_crtc(crtc_state); 13585 drm_atomic_helper_commit_planes_on_crtc(crtc_state);
13579 13586
13580 if (put_domains) 13587 if (pipe_config->base.active && needs_vblank_wait(pipe_config))
13581 modeset_put_power_domains(dev_priv, put_domains); 13588 crtc_vblank_mask |= 1 << i;
13582
13583 intel_post_plane_update(intel_crtc);
13584
13585 if (modeset)
13586 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
13587 } 13589 }
13588 13590
13589 /* FIXME: add subpixel order */ 13591 /* FIXME: add subpixel order */
13590 13592
13591 drm_atomic_helper_wait_for_vblanks(dev, state); 13593 if (!state->legacy_cursor_update)
13594 intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask);
13595
13596 for_each_crtc_in_state(state, crtc, crtc_state, i) {
13597 intel_post_plane_update(to_intel_crtc(crtc));
13598
13599 if (put_domains[i])
13600 modeset_put_power_domains(dev_priv, put_domains[i]);
13601 }
13602
13603 if (intel_state->modeset)
13604 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
13592 13605
13593 mutex_lock(&dev->struct_mutex); 13606 mutex_lock(&dev->struct_mutex);
13594 drm_atomic_helper_cleanup_planes(dev, state); 13607 drm_atomic_helper_cleanup_planes(dev, state);
@@ -13670,7 +13683,7 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
13670{ 13683{
13671 uint32_t val; 13684 uint32_t val;
13672 13685
13673 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS)) 13686 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
13674 return false; 13687 return false;
13675 13688
13676 val = I915_READ(PCH_DPLL(pll->id)); 13689 val = I915_READ(PCH_DPLL(pll->id));
@@ -13678,6 +13691,8 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
13678 hw_state->fp0 = I915_READ(PCH_FP0(pll->id)); 13691 hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
13679 hw_state->fp1 = I915_READ(PCH_FP1(pll->id)); 13692 hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
13680 13693
13694 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
13695
13681 return val & DPLL_VCO_ENABLE; 13696 return val & DPLL_VCO_ENABLE;
13682} 13697}
13683 13698
@@ -15493,6 +15508,17 @@ static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
15493 return false; 15508 return false;
15494} 15509}
15495 15510
15511static bool intel_encoder_has_connectors(struct intel_encoder *encoder)
15512{
15513 struct drm_device *dev = encoder->base.dev;
15514 struct intel_connector *connector;
15515
15516 for_each_connector_on_encoder(dev, &encoder->base, connector)
15517 return true;
15518
15519 return false;
15520}
15521
15496static void intel_sanitize_crtc(struct intel_crtc *crtc) 15522static void intel_sanitize_crtc(struct intel_crtc *crtc)
15497{ 15523{
15498 struct drm_device *dev = crtc->base.dev; 15524 struct drm_device *dev = crtc->base.dev;
@@ -15603,7 +15629,6 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
15603{ 15629{
15604 struct intel_connector *connector; 15630 struct intel_connector *connector;
15605 struct drm_device *dev = encoder->base.dev; 15631 struct drm_device *dev = encoder->base.dev;
15606 bool active = false;
15607 15632
15608 /* We need to check both for a crtc link (meaning that the 15633 /* We need to check both for a crtc link (meaning that the
15609 * encoder is active and trying to read from a pipe) and the 15634 * encoder is active and trying to read from a pipe) and the
@@ -15611,15 +15636,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
15611 bool has_active_crtc = encoder->base.crtc && 15636 bool has_active_crtc = encoder->base.crtc &&
15612 to_intel_crtc(encoder->base.crtc)->active; 15637 to_intel_crtc(encoder->base.crtc)->active;
15613 15638
15614 for_each_intel_connector(dev, connector) { 15639 if (intel_encoder_has_connectors(encoder) && !has_active_crtc) {
15615 if (connector->base.encoder != &encoder->base)
15616 continue;
15617
15618 active = true;
15619 break;
15620 }
15621
15622 if (active && !has_active_crtc) {
15623 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n", 15640 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
15624 encoder->base.base.id, 15641 encoder->base.base.id,
15625 encoder->base.name); 15642 encoder->base.name);
@@ -15674,10 +15691,12 @@ void i915_redisable_vga(struct drm_device *dev)
15674 * level, just check if the power well is enabled instead of trying to 15691 * level, just check if the power well is enabled instead of trying to
15675 * follow the "don't touch the power well if we don't need it" policy 15692 * follow the "don't touch the power well if we don't need it" policy
15676 * the rest of the driver uses. */ 15693 * the rest of the driver uses. */
15677 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_VGA)) 15694 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
15678 return; 15695 return;
15679 15696
15680 i915_redisable_vga_power_on(dev); 15697 i915_redisable_vga_power_on(dev);
15698
15699 intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
15681} 15700}
15682 15701
15683static bool primary_get_hw_state(struct intel_plane *plane) 15702static bool primary_get_hw_state(struct intel_plane *plane)
@@ -15905,7 +15924,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev)
15905 for_each_intel_crtc(dev, crtc) { 15924 for_each_intel_crtc(dev, crtc) {
15906 unsigned long put_domains; 15925 unsigned long put_domains;
15907 15926
15908 put_domains = modeset_get_crtc_power_domains(&crtc->base); 15927 put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config);
15909 if (WARN_ON(put_domains)) 15928 if (WARN_ON(put_domains))
15910 modeset_put_power_domains(dev_priv, put_domains); 15929 modeset_put_power_domains(dev_priv, put_domains);
15911 } 15930 }
@@ -15916,54 +15935,65 @@ intel_modeset_setup_hw_state(struct drm_device *dev)
15916 15935
15917void intel_display_resume(struct drm_device *dev) 15936void intel_display_resume(struct drm_device *dev)
15918{ 15937{
15919 struct drm_atomic_state *state = drm_atomic_state_alloc(dev); 15938 struct drm_i915_private *dev_priv = to_i915(dev);
15920 struct intel_connector *conn; 15939 struct drm_atomic_state *state = dev_priv->modeset_restore_state;
15921 struct intel_plane *plane; 15940 struct drm_modeset_acquire_ctx ctx;
15922 struct drm_crtc *crtc;
15923 int ret; 15941 int ret;
15942 bool setup = false;
15924 15943
15925 if (!state) 15944 dev_priv->modeset_restore_state = NULL;
15926 return;
15927
15928 state->acquire_ctx = dev->mode_config.acquire_ctx;
15929 15945
15930 /* preserve complete old state, including dpll */ 15946 /*
15931 intel_atomic_get_shared_dpll_state(state); 15947 * This is a cludge because with real atomic modeset mode_config.mutex
15948 * won't be taken. Unfortunately some probed state like
15949 * audio_codec_enable is still protected by mode_config.mutex, so lock
15950 * it here for now.
15951 */
15952 mutex_lock(&dev->mode_config.mutex);
15953 drm_modeset_acquire_init(&ctx, 0);
15932 15954
15933 for_each_crtc(dev, crtc) { 15955retry:
15934 struct drm_crtc_state *crtc_state = 15956 ret = drm_modeset_lock_all_ctx(dev, &ctx);
15935 drm_atomic_get_crtc_state(state, crtc);
15936 15957
15937 ret = PTR_ERR_OR_ZERO(crtc_state); 15958 if (ret == 0 && !setup) {
15938 if (ret) 15959 setup = true;
15939 goto err;
15940 15960
15941 /* force a restore */ 15961 intel_modeset_setup_hw_state(dev);
15942 crtc_state->mode_changed = true; 15962 i915_redisable_vga(dev);
15943 } 15963 }
15944 15964
15945 for_each_intel_plane(dev, plane) { 15965 if (ret == 0 && state) {
15946 ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(state, &plane->base)); 15966 struct drm_crtc_state *crtc_state;
15947 if (ret) 15967 struct drm_crtc *crtc;
15948 goto err; 15968 int i;
15949 }
15950 15969
15951 for_each_intel_connector(dev, conn) { 15970 state->acquire_ctx = &ctx;
15952 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(state, &conn->base)); 15971
15953 if (ret) 15972 for_each_crtc_in_state(state, crtc, crtc_state, i) {
15954 goto err; 15973 /*
15974 * Force recalculation even if we restore
15975 * current state. With fast modeset this may not result
15976 * in a modeset when the state is compatible.
15977 */
15978 crtc_state->mode_changed = true;
15979 }
15980
15981 ret = drm_atomic_commit(state);
15955 } 15982 }
15956 15983
15957 intel_modeset_setup_hw_state(dev); 15984 if (ret == -EDEADLK) {
15985 drm_modeset_backoff(&ctx);
15986 goto retry;
15987 }
15958 15988
15959 i915_redisable_vga(dev); 15989 drm_modeset_drop_locks(&ctx);
15960 ret = drm_atomic_commit(state); 15990 drm_modeset_acquire_fini(&ctx);
15961 if (!ret) 15991 mutex_unlock(&dev->mode_config.mutex);
15962 return;
15963 15992
15964err: 15993 if (ret) {
15965 DRM_ERROR("Restoring old state failed with %i\n", ret); 15994 DRM_ERROR("Restoring old state failed with %i\n", ret);
15966 drm_atomic_state_free(state); 15995 drm_atomic_state_free(state);
15996 }
15967} 15997}
15968 15998
15969void intel_modeset_gem_init(struct drm_device *dev) 15999void intel_modeset_gem_init(struct drm_device *dev)
@@ -15972,9 +16002,7 @@ void intel_modeset_gem_init(struct drm_device *dev)
15972 struct drm_i915_gem_object *obj; 16002 struct drm_i915_gem_object *obj;
15973 int ret; 16003 int ret;
15974 16004
15975 mutex_lock(&dev->struct_mutex);
15976 intel_init_gt_powersave(dev); 16005 intel_init_gt_powersave(dev);
15977 mutex_unlock(&dev->struct_mutex);
15978 16006
15979 intel_modeset_init_hw(dev); 16007 intel_modeset_init_hw(dev);
15980 16008
@@ -16054,9 +16082,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
16054 16082
16055 intel_cleanup_overlay(dev); 16083 intel_cleanup_overlay(dev);
16056 16084
16057 mutex_lock(&dev->struct_mutex);
16058 intel_cleanup_gt_powersave(dev); 16085 intel_cleanup_gt_powersave(dev);
16059 mutex_unlock(&dev->struct_mutex);
16060 16086
16061 intel_teardown_gmbus(dev); 16087 intel_teardown_gmbus(dev);
16062} 16088}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 23599c36503f..cbc06596659a 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -2356,15 +2356,18 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2356 struct drm_i915_private *dev_priv = dev->dev_private; 2356 struct drm_i915_private *dev_priv = dev->dev_private;
2357 enum intel_display_power_domain power_domain; 2357 enum intel_display_power_domain power_domain;
2358 u32 tmp; 2358 u32 tmp;
2359 bool ret;
2359 2360
2360 power_domain = intel_display_port_power_domain(encoder); 2361 power_domain = intel_display_port_power_domain(encoder);
2361 if (!intel_display_power_is_enabled(dev_priv, power_domain)) 2362 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
2362 return false; 2363 return false;
2363 2364
2365 ret = false;
2366
2364 tmp = I915_READ(intel_dp->output_reg); 2367 tmp = I915_READ(intel_dp->output_reg);
2365 2368
2366 if (!(tmp & DP_PORT_EN)) 2369 if (!(tmp & DP_PORT_EN))
2367 return false; 2370 goto out;
2368 2371
2369 if (IS_GEN7(dev) && port == PORT_A) { 2372 if (IS_GEN7(dev) && port == PORT_A) {
2370 *pipe = PORT_TO_PIPE_CPT(tmp); 2373 *pipe = PORT_TO_PIPE_CPT(tmp);
@@ -2375,7 +2378,9 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2375 u32 trans_dp = I915_READ(TRANS_DP_CTL(p)); 2378 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2376 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) { 2379 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2377 *pipe = p; 2380 *pipe = p;
2378 return true; 2381 ret = true;
2382
2383 goto out;
2379 } 2384 }
2380 } 2385 }
2381 2386
@@ -2387,7 +2392,12 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2387 *pipe = PORT_TO_PIPE(tmp); 2392 *pipe = PORT_TO_PIPE(tmp);
2388 } 2393 }
2389 2394
2390 return true; 2395 ret = true;
2396
2397out:
2398 intel_display_power_put(dev_priv, power_domain);
2399
2400 return ret;
2391} 2401}
2392 2402
2393static void intel_dp_get_config(struct intel_encoder *encoder, 2403static void intel_dp_get_config(struct intel_encoder *encoder,
@@ -4487,20 +4497,20 @@ static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
4487 return I915_READ(PORT_HOTPLUG_STAT) & bit; 4497 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4488} 4498}
4489 4499
4490static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv, 4500static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
4491 struct intel_digital_port *port) 4501 struct intel_digital_port *port)
4492{ 4502{
4493 u32 bit; 4503 u32 bit;
4494 4504
4495 switch (port->port) { 4505 switch (port->port) {
4496 case PORT_B: 4506 case PORT_B:
4497 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV; 4507 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
4498 break; 4508 break;
4499 case PORT_C: 4509 case PORT_C:
4500 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV; 4510 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
4501 break; 4511 break;
4502 case PORT_D: 4512 case PORT_D:
4503 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV; 4513 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
4504 break; 4514 break;
4505 default: 4515 default:
4506 MISSING_CASE(port->port); 4516 MISSING_CASE(port->port);
@@ -4548,12 +4558,12 @@ bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4548{ 4558{
4549 if (HAS_PCH_IBX(dev_priv)) 4559 if (HAS_PCH_IBX(dev_priv))
4550 return ibx_digital_port_connected(dev_priv, port); 4560 return ibx_digital_port_connected(dev_priv, port);
4551 if (HAS_PCH_SPLIT(dev_priv)) 4561 else if (HAS_PCH_SPLIT(dev_priv))
4552 return cpt_digital_port_connected(dev_priv, port); 4562 return cpt_digital_port_connected(dev_priv, port);
4553 else if (IS_BROXTON(dev_priv)) 4563 else if (IS_BROXTON(dev_priv))
4554 return bxt_digital_port_connected(dev_priv, port); 4564 return bxt_digital_port_connected(dev_priv, port);
4555 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4565 else if (IS_GM45(dev_priv))
4556 return vlv_digital_port_connected(dev_priv, port); 4566 return gm45_digital_port_connected(dev_priv, port);
4557 else 4567 else
4558 return g4x_digital_port_connected(dev_priv, port); 4568 return g4x_digital_port_connected(dev_priv, port);
4559} 4569}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 3cae3768ea37..4c027d69fac9 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -379,6 +379,7 @@ struct intel_crtc_state {
379 bool update_pipe; /* can a fast modeset be performed? */ 379 bool update_pipe; /* can a fast modeset be performed? */
380 bool disable_cxsr; 380 bool disable_cxsr;
381 bool wm_changed; /* watermarks are updated */ 381 bool wm_changed; /* watermarks are updated */
382 bool fb_changed; /* fb on any of the planes is changed */
382 383
383 /* Pipe source size (ie. panel fitter input size) 384 /* Pipe source size (ie. panel fitter input size)
384 * All planes will be positioned inside this space, 385 * All planes will be positioned inside this space,
@@ -547,9 +548,7 @@ struct intel_crtc_atomic_commit {
547 548
548 /* Sleepable operations to perform after commit */ 549 /* Sleepable operations to perform after commit */
549 unsigned fb_bits; 550 unsigned fb_bits;
550 bool wait_vblank;
551 bool post_enable_primary; 551 bool post_enable_primary;
552 unsigned update_sprite_watermarks;
553 552
554 /* Sleepable operations to perform before and after commit */ 553 /* Sleepable operations to perform before and after commit */
555 bool update_fbc; 554 bool update_fbc;
@@ -910,9 +909,7 @@ struct intel_unpin_work {
910}; 909};
911 910
912struct intel_load_detect_pipe { 911struct intel_load_detect_pipe {
913 struct drm_framebuffer *release_fb; 912 struct drm_atomic_state *restore_state;
914 bool load_detect_temp;
915 int dpms_mode;
916}; 913};
917 914
918static inline struct intel_encoder * 915static inline struct intel_encoder *
@@ -995,6 +992,8 @@ static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
995int intel_get_crtc_scanline(struct intel_crtc *crtc); 992int intel_get_crtc_scanline(struct intel_crtc *crtc);
996void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, 993void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
997 unsigned int pipe_mask); 994 unsigned int pipe_mask);
995void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
996 unsigned int pipe_mask);
998 997
999/* intel_crt.c */ 998/* intel_crt.c */
1000void intel_crt_init(struct drm_device *dev); 999void intel_crt_init(struct drm_device *dev);
@@ -1227,7 +1226,7 @@ u32 skl_plane_ctl_rotation(unsigned int rotation);
1227 1226
1228/* intel_csr.c */ 1227/* intel_csr.c */
1229void intel_csr_ucode_init(struct drm_i915_private *); 1228void intel_csr_ucode_init(struct drm_i915_private *);
1230void intel_csr_load_program(struct drm_i915_private *); 1229bool intel_csr_load_program(struct drm_i915_private *);
1231void intel_csr_ucode_fini(struct drm_i915_private *); 1230void intel_csr_ucode_fini(struct drm_i915_private *);
1232 1231
1233/* intel_dp.c */ 1232/* intel_dp.c */
@@ -1436,6 +1435,8 @@ bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
1436 enum intel_display_power_domain domain); 1435 enum intel_display_power_domain domain);
1437void intel_display_power_get(struct drm_i915_private *dev_priv, 1436void intel_display_power_get(struct drm_i915_private *dev_priv,
1438 enum intel_display_power_domain domain); 1437 enum intel_display_power_domain domain);
1438bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
1439 enum intel_display_power_domain domain);
1439void intel_display_power_put(struct drm_i915_private *dev_priv, 1440void intel_display_power_put(struct drm_i915_private *dev_priv,
1440 enum intel_display_power_domain domain); 1441 enum intel_display_power_domain domain);
1441 1442
@@ -1522,6 +1523,7 @@ enable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv)
1522 enable_rpm_wakeref_asserts(dev_priv) 1523 enable_rpm_wakeref_asserts(dev_priv)
1523 1524
1524void intel_runtime_pm_get(struct drm_i915_private *dev_priv); 1525void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
1526bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv);
1525void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv); 1527void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
1526void intel_runtime_pm_put(struct drm_i915_private *dev_priv); 1528void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
1527 1529
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 378f879f4015..01b8e9f4c272 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -634,7 +634,6 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder)
634{ 634{
635 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 635 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
636 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 636 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
637 u32 val;
638 637
639 DRM_DEBUG_KMS("\n"); 638 DRM_DEBUG_KMS("\n");
640 639
@@ -642,9 +641,13 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder)
642 641
643 intel_dsi_clear_device_ready(encoder); 642 intel_dsi_clear_device_ready(encoder);
644 643
645 val = I915_READ(DSPCLK_GATE_D); 644 if (!IS_BROXTON(dev_priv)) {
646 val &= ~DPOUNIT_CLOCK_GATE_DISABLE; 645 u32 val;
647 I915_WRITE(DSPCLK_GATE_D, val); 646
647 val = I915_READ(DSPCLK_GATE_D);
648 val &= ~DPOUNIT_CLOCK_GATE_DISABLE;
649 I915_WRITE(DSPCLK_GATE_D, val);
650 }
648 651
649 drm_panel_unprepare(intel_dsi->panel); 652 drm_panel_unprepare(intel_dsi->panel);
650 653
@@ -664,13 +667,16 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
664 struct drm_device *dev = encoder->base.dev; 667 struct drm_device *dev = encoder->base.dev;
665 enum intel_display_power_domain power_domain; 668 enum intel_display_power_domain power_domain;
666 enum port port; 669 enum port port;
670 bool ret;
667 671
668 DRM_DEBUG_KMS("\n"); 672 DRM_DEBUG_KMS("\n");
669 673
670 power_domain = intel_display_port_power_domain(encoder); 674 power_domain = intel_display_port_power_domain(encoder);
671 if (!intel_display_power_is_enabled(dev_priv, power_domain)) 675 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
672 return false; 676 return false;
673 677
678 ret = false;
679
674 /* XXX: this only works for one DSI output */ 680 /* XXX: this only works for one DSI output */
675 for_each_dsi_port(port, intel_dsi->ports) { 681 for_each_dsi_port(port, intel_dsi->ports) {
676 i915_reg_t ctrl_reg = IS_BROXTON(dev) ? 682 i915_reg_t ctrl_reg = IS_BROXTON(dev) ?
@@ -691,12 +697,16 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
691 if (dpi_enabled || (func & CMD_MODE_DATA_WIDTH_MASK)) { 697 if (dpi_enabled || (func & CMD_MODE_DATA_WIDTH_MASK)) {
692 if (I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY) { 698 if (I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY) {
693 *pipe = port == PORT_A ? PIPE_A : PIPE_B; 699 *pipe = port == PORT_A ? PIPE_A : PIPE_B;
694 return true; 700 ret = true;
701
702 goto out;
695 } 703 }
696 } 704 }
697 } 705 }
706out:
707 intel_display_power_put(dev_priv, power_domain);
698 708
699 return false; 709 return ret;
700} 710}
701 711
702static void intel_dsi_get_config(struct intel_encoder *encoder, 712static void intel_dsi_get_config(struct intel_encoder *encoder,
@@ -775,10 +785,9 @@ static void set_dsi_timings(struct drm_encoder *encoder,
775{ 785{
776 struct drm_device *dev = encoder->dev; 786 struct drm_device *dev = encoder->dev;
777 struct drm_i915_private *dev_priv = dev->dev_private; 787 struct drm_i915_private *dev_priv = dev->dev_private;
778 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
779 struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); 788 struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
780 enum port port; 789 enum port port;
781 unsigned int bpp = intel_crtc->config->pipe_bpp; 790 unsigned int bpp = dsi_pixel_format_bpp(intel_dsi->pixel_format);
782 unsigned int lane_count = intel_dsi->lane_count; 791 unsigned int lane_count = intel_dsi->lane_count;
783 792
784 u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp; 793 u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp;
@@ -849,7 +858,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
849 struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); 858 struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
850 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; 859 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
851 enum port port; 860 enum port port;
852 unsigned int bpp = intel_crtc->config->pipe_bpp; 861 unsigned int bpp = dsi_pixel_format_bpp(intel_dsi->pixel_format);
853 u32 val, tmp; 862 u32 val, tmp;
854 u16 mode_hdisplay; 863 u16 mode_hdisplay;
855 864
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index de7be7f3fb42..92f39227b361 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -34,6 +34,8 @@
34#define DSI_DUAL_LINK_FRONT_BACK 1 34#define DSI_DUAL_LINK_FRONT_BACK 1
35#define DSI_DUAL_LINK_PIXEL_ALT 2 35#define DSI_DUAL_LINK_PIXEL_ALT 2
36 36
37int dsi_pixel_format_bpp(int pixel_format);
38
37struct intel_dsi_host; 39struct intel_dsi_host;
38 40
39struct intel_dsi { 41struct intel_dsi {
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
index 787f01c63984..7f145b4fec6a 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -440,10 +440,7 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
440 intel_dsi->dual_link = mipi_config->dual_link; 440 intel_dsi->dual_link = mipi_config->dual_link;
441 intel_dsi->pixel_overlap = mipi_config->pixel_overlap; 441 intel_dsi->pixel_overlap = mipi_config->pixel_overlap;
442 442
443 if (intel_dsi->pixel_format == VID_MODE_FORMAT_RGB666) 443 bits_per_pixel = dsi_pixel_format_bpp(intel_dsi->pixel_format);
444 bits_per_pixel = 18;
445 else if (intel_dsi->pixel_format == VID_MODE_FORMAT_RGB565)
446 bits_per_pixel = 16;
447 444
448 intel_dsi->operation_mode = mipi_config->is_cmd_mode; 445 intel_dsi->operation_mode = mipi_config->is_cmd_mode;
449 intel_dsi->video_mode_format = mipi_config->video_transfer_mode; 446 intel_dsi->video_mode_format = mipi_config->video_transfer_mode;
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c
index bb5e95a1a453..70883c54cb0a 100644
--- a/drivers/gpu/drm/i915/intel_dsi_pll.c
+++ b/drivers/gpu/drm/i915/intel_dsi_pll.c
@@ -30,7 +30,7 @@
30#include "i915_drv.h" 30#include "i915_drv.h"
31#include "intel_dsi.h" 31#include "intel_dsi.h"
32 32
33static int dsi_pixel_format_bpp(int pixel_format) 33int dsi_pixel_format_bpp(int pixel_format)
34{ 34{
35 int bpp; 35 int bpp;
36 36
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 3614a951736b..0f0492f4a357 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -823,13 +823,15 @@ static bool intel_fbc_can_choose(struct intel_crtc *crtc)
823{ 823{
824 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 824 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
825 struct intel_fbc *fbc = &dev_priv->fbc; 825 struct intel_fbc *fbc = &dev_priv->fbc;
826 bool enable_by_default = IS_HASWELL(dev_priv) ||
827 IS_BROADWELL(dev_priv);
826 828
827 if (intel_vgpu_active(dev_priv->dev)) { 829 if (intel_vgpu_active(dev_priv->dev)) {
828 fbc->no_fbc_reason = "VGPU is active"; 830 fbc->no_fbc_reason = "VGPU is active";
829 return false; 831 return false;
830 } 832 }
831 833
832 if (i915.enable_fbc < 0) { 834 if (i915.enable_fbc < 0 && !enable_by_default) {
833 fbc->no_fbc_reason = "disabled per chip default"; 835 fbc->no_fbc_reason = "disabled per chip default";
834 return false; 836 return false;
835 } 837 }
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 09840f4380f9..97a91e631915 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -406,8 +406,8 @@ retry:
406 continue; 406 continue;
407 } 407 }
408 408
409 encoder = connector->encoder; 409 encoder = connector->state->best_encoder;
410 if (!encoder || WARN_ON(!encoder->crtc)) { 410 if (!encoder || WARN_ON(!connector->state->crtc)) {
411 if (connector->force > DRM_FORCE_OFF) 411 if (connector->force > DRM_FORCE_OFF)
412 goto bail; 412 goto bail;
413 413
@@ -420,7 +420,7 @@ retry:
420 420
421 num_connectors_enabled++; 421 num_connectors_enabled++;
422 422
423 new_crtc = intel_fb_helper_crtc(fb_helper, encoder->crtc); 423 new_crtc = intel_fb_helper_crtc(fb_helper, connector->state->crtc);
424 424
425 /* 425 /*
426 * Make sure we're not trying to drive multiple connectors 426 * Make sure we're not trying to drive multiple connectors
@@ -466,17 +466,22 @@ retry:
466 * usually contains. But since our current 466 * usually contains. But since our current
467 * code puts a mode derived from the post-pfit timings 467 * code puts a mode derived from the post-pfit timings
468 * into crtc->mode this works out correctly. 468 * into crtc->mode this works out correctly.
469 *
470 * This is crtc->mode and not crtc->state->mode for the
471 * fastboot check to work correctly. crtc_state->mode has
472 * I915_MODE_FLAG_INHERITED, which we clear to force check
473 * state.
469 */ 474 */
470 DRM_DEBUG_KMS("looking for current mode on connector %s\n", 475 DRM_DEBUG_KMS("looking for current mode on connector %s\n",
471 connector->name); 476 connector->name);
472 modes[i] = &encoder->crtc->mode; 477 modes[i] = &connector->state->crtc->mode;
473 } 478 }
474 crtcs[i] = new_crtc; 479 crtcs[i] = new_crtc;
475 480
476 DRM_DEBUG_KMS("connector %s on pipe %c [CRTC:%d]: %dx%d%s\n", 481 DRM_DEBUG_KMS("connector %s on pipe %c [CRTC:%d]: %dx%d%s\n",
477 connector->name, 482 connector->name,
478 pipe_name(to_intel_crtc(encoder->crtc)->pipe), 483 pipe_name(to_intel_crtc(connector->state->crtc)->pipe),
479 encoder->crtc->base.id, 484 connector->state->crtc->base.id,
480 modes[i]->hdisplay, modes[i]->vdisplay, 485 modes[i]->hdisplay, modes[i]->vdisplay,
481 modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :""); 486 modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :"");
482 487
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
index 3accd914490f..82a3c03fbc0e 100644
--- a/drivers/gpu/drm/i915/intel_guc_loader.c
+++ b/drivers/gpu/drm/i915/intel_guc_loader.c
@@ -199,7 +199,7 @@ static void set_guc_init_params(struct drm_i915_private *dev_priv)
199 * the value matches either of two values representing completion 199 * the value matches either of two values representing completion
200 * of the GuC boot process. 200 * of the GuC boot process.
201 * 201 *
202 * This is used for polling the GuC status in a wait_for_atomic() 202 * This is used for polling the GuC status in a wait_for()
203 * loop below. 203 * loop below.
204 */ 204 */
205static inline bool guc_ucode_response(struct drm_i915_private *dev_priv, 205static inline bool guc_ucode_response(struct drm_i915_private *dev_priv,
@@ -259,14 +259,14 @@ static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv)
259 I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA)); 259 I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));
260 260
261 /* 261 /*
262 * Spin-wait for the DMA to complete & the GuC to start up. 262 * Wait for the DMA to complete & the GuC to start up.
263 * NB: Docs recommend not using the interrupt for completion. 263 * NB: Docs recommend not using the interrupt for completion.
264 * Measurements indicate this should take no more than 20ms, so a 264 * Measurements indicate this should take no more than 20ms, so a
265 * timeout here indicates that the GuC has failed and is unusable. 265 * timeout here indicates that the GuC has failed and is unusable.
266 * (Higher levels of the driver will attempt to fall back to 266 * (Higher levels of the driver will attempt to fall back to
267 * execlist mode if this happens.) 267 * execlist mode if this happens.)
268 */ 268 */
269 ret = wait_for_atomic(guc_ucode_response(dev_priv, &status), 100); 269 ret = wait_for(guc_ucode_response(dev_priv, &status), 100);
270 270
271 DRM_DEBUG_DRIVER("DMA status 0x%x, GuC status 0x%x\n", 271 DRM_DEBUG_DRIVER("DMA status 0x%x, GuC status 0x%x\n",
272 I915_READ(DMA_CTRL), status); 272 I915_READ(DMA_CTRL), status);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index edb7e901ba4a..80b44c054087 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -880,15 +880,18 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
880 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 880 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
881 enum intel_display_power_domain power_domain; 881 enum intel_display_power_domain power_domain;
882 u32 tmp; 882 u32 tmp;
883 bool ret;
883 884
884 power_domain = intel_display_port_power_domain(encoder); 885 power_domain = intel_display_port_power_domain(encoder);
885 if (!intel_display_power_is_enabled(dev_priv, power_domain)) 886 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
886 return false; 887 return false;
887 888
889 ret = false;
890
888 tmp = I915_READ(intel_hdmi->hdmi_reg); 891 tmp = I915_READ(intel_hdmi->hdmi_reg);
889 892
890 if (!(tmp & SDVO_ENABLE)) 893 if (!(tmp & SDVO_ENABLE))
891 return false; 894 goto out;
892 895
893 if (HAS_PCH_CPT(dev)) 896 if (HAS_PCH_CPT(dev))
894 *pipe = PORT_TO_PIPE_CPT(tmp); 897 *pipe = PORT_TO_PIPE_CPT(tmp);
@@ -897,7 +900,12 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
897 else 900 else
898 *pipe = PORT_TO_PIPE(tmp); 901 *pipe = PORT_TO_PIPE(tmp);
899 902
900 return true; 903 ret = true;
904
905out:
906 intel_display_power_put(dev_priv, power_domain);
907
908 return ret;
901} 909}
902 910
903static void intel_hdmi_get_config(struct intel_encoder *encoder, 911static void intel_hdmi_get_config(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 3a03646e343d..6a978ce80244 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -223,7 +223,8 @@ enum {
223 FAULT_AND_CONTINUE /* Unsupported */ 223 FAULT_AND_CONTINUE /* Unsupported */
224}; 224};
225#define GEN8_CTX_ID_SHIFT 32 225#define GEN8_CTX_ID_SHIFT 32
226#define CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17 226#define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
227#define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26
227 228
228static int intel_lr_context_pin(struct intel_context *ctx, 229static int intel_lr_context_pin(struct intel_context *ctx,
229 struct intel_engine_cs *engine); 230 struct intel_engine_cs *engine);
@@ -1144,10 +1145,6 @@ void intel_lr_context_unpin(struct intel_context *ctx,
1144 struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state; 1145 struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
1145 1146
1146 WARN_ON(!mutex_is_locked(&ctx->i915->dev->struct_mutex)); 1147 WARN_ON(!mutex_is_locked(&ctx->i915->dev->struct_mutex));
1147
1148 if (WARN_ON_ONCE(!ctx_obj))
1149 return;
1150
1151 if (--ctx->engine[engine->id].pin_count == 0) { 1148 if (--ctx->engine[engine->id].pin_count == 0) {
1152 kunmap(kmap_to_page(ctx->engine[engine->id].lrc_reg_state)); 1149 kunmap(kmap_to_page(ctx->engine[engine->id].lrc_reg_state));
1153 intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf); 1150 intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf);
@@ -2317,6 +2314,27 @@ make_rpcs(struct drm_device *dev)
2317 return rpcs; 2314 return rpcs;
2318} 2315}
2319 2316
2317static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *ring)
2318{
2319 u32 indirect_ctx_offset;
2320
2321 switch (INTEL_INFO(ring->dev)->gen) {
2322 default:
2323 MISSING_CASE(INTEL_INFO(ring->dev)->gen);
2324 /* fall through */
2325 case 9:
2326 indirect_ctx_offset =
2327 GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
2328 break;
2329 case 8:
2330 indirect_ctx_offset =
2331 GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
2332 break;
2333 }
2334
2335 return indirect_ctx_offset;
2336}
2337
2320static int 2338static int
2321populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj, 2339populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj,
2322 struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf) 2340 struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf)
@@ -2360,7 +2378,8 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
2360 ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(ring), 2378 ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(ring),
2361 _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH | 2379 _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
2362 CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | 2380 CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
2363 CTX_CTRL_RS_CTX_ENABLE)); 2381 (HAS_RESOURCE_STREAMER(dev) ?
2382 CTX_CTRL_RS_CTX_ENABLE : 0)));
2364 ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(ring->mmio_base), 0); 2383 ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(ring->mmio_base), 0);
2365 ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(ring->mmio_base), 0); 2384 ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(ring->mmio_base), 0);
2366 /* Ring buffer start address is not known until the buffer is pinned. 2385 /* Ring buffer start address is not known until the buffer is pinned.
@@ -2389,7 +2408,7 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
2389 (wa_ctx->indirect_ctx.size / CACHELINE_DWORDS); 2408 (wa_ctx->indirect_ctx.size / CACHELINE_DWORDS);
2390 2409
2391 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] = 2410 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] =
2392 CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT << 6; 2411 intel_lr_indirect_ctx_offset(ring) << 6;
2393 2412
2394 reg_state[CTX_BB_PER_CTX_PTR+1] = 2413 reg_state[CTX_BB_PER_CTX_PTR+1] =
2395 (ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) | 2414 (ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 811ddf7799f0..30a8403a8f4f 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -76,22 +76,30 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
76 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); 76 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
77 enum intel_display_power_domain power_domain; 77 enum intel_display_power_domain power_domain;
78 u32 tmp; 78 u32 tmp;
79 bool ret;
79 80
80 power_domain = intel_display_port_power_domain(encoder); 81 power_domain = intel_display_port_power_domain(encoder);
81 if (!intel_display_power_is_enabled(dev_priv, power_domain)) 82 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
82 return false; 83 return false;
83 84
85 ret = false;
86
84 tmp = I915_READ(lvds_encoder->reg); 87 tmp = I915_READ(lvds_encoder->reg);
85 88
86 if (!(tmp & LVDS_PORT_EN)) 89 if (!(tmp & LVDS_PORT_EN))
87 return false; 90 goto out;
88 91
89 if (HAS_PCH_CPT(dev)) 92 if (HAS_PCH_CPT(dev))
90 *pipe = PORT_TO_PIPE_CPT(tmp); 93 *pipe = PORT_TO_PIPE_CPT(tmp);
91 else 94 else
92 *pipe = PORT_TO_PIPE(tmp); 95 *pipe = PORT_TO_PIPE(tmp);
93 96
94 return true; 97 ret = true;
98
99out:
100 intel_display_power_put(dev_priv, power_domain);
101
102 return ret;
95} 103}
96 104
97static void intel_lvds_get_config(struct intel_encoder *encoder, 105static void intel_lvds_get_config(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 379eabe093cb..347d4df49a9b 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2851,7 +2851,10 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
2851 memset(ddb, 0, sizeof(*ddb)); 2851 memset(ddb, 0, sizeof(*ddb));
2852 2852
2853 for_each_pipe(dev_priv, pipe) { 2853 for_each_pipe(dev_priv, pipe) {
2854 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe))) 2854 enum intel_display_power_domain power_domain;
2855
2856 power_domain = POWER_DOMAIN_PIPE(pipe);
2857 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
2855 continue; 2858 continue;
2856 2859
2857 for_each_plane(dev_priv, pipe, plane) { 2860 for_each_plane(dev_priv, pipe, plane) {
@@ -2863,6 +2866,8 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
2863 val = I915_READ(CUR_BUF_CFG(pipe)); 2866 val = I915_READ(CUR_BUF_CFG(pipe));
2864 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][PLANE_CURSOR], 2867 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][PLANE_CURSOR],
2865 val); 2868 val);
2869
2870 intel_display_power_put(dev_priv, power_domain);
2866 } 2871 }
2867} 2872}
2868 2873
@@ -4116,11 +4121,13 @@ bool ironlake_set_drps(struct drm_device *dev, u8 val)
4116static void ironlake_enable_drps(struct drm_device *dev) 4121static void ironlake_enable_drps(struct drm_device *dev)
4117{ 4122{
4118 struct drm_i915_private *dev_priv = dev->dev_private; 4123 struct drm_i915_private *dev_priv = dev->dev_private;
4119 u32 rgvmodectl = I915_READ(MEMMODECTL); 4124 u32 rgvmodectl;
4120 u8 fmax, fmin, fstart, vstart; 4125 u8 fmax, fmin, fstart, vstart;
4121 4126
4122 spin_lock_irq(&mchdev_lock); 4127 spin_lock_irq(&mchdev_lock);
4123 4128
4129 rgvmodectl = I915_READ(MEMMODECTL);
4130
4124 /* Enable temp reporting */ 4131 /* Enable temp reporting */
4125 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN); 4132 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
4126 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE); 4133 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
@@ -5229,8 +5236,6 @@ static void cherryview_setup_pctx(struct drm_device *dev)
5229 u32 pcbr; 5236 u32 pcbr;
5230 int pctx_size = 32*1024; 5237 int pctx_size = 32*1024;
5231 5238
5232 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
5233
5234 pcbr = I915_READ(VLV_PCBR); 5239 pcbr = I915_READ(VLV_PCBR);
5235 if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) { 5240 if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
5236 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n"); 5241 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
@@ -5252,7 +5257,7 @@ static void valleyview_setup_pctx(struct drm_device *dev)
5252 u32 pcbr; 5257 u32 pcbr;
5253 int pctx_size = 24*1024; 5258 int pctx_size = 24*1024;
5254 5259
5255 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 5260 mutex_lock(&dev->struct_mutex);
5256 5261
5257 pcbr = I915_READ(VLV_PCBR); 5262 pcbr = I915_READ(VLV_PCBR);
5258 if (pcbr) { 5263 if (pcbr) {
@@ -5280,7 +5285,7 @@ static void valleyview_setup_pctx(struct drm_device *dev)
5280 pctx = i915_gem_object_create_stolen(dev, pctx_size); 5285 pctx = i915_gem_object_create_stolen(dev, pctx_size);
5281 if (!pctx) { 5286 if (!pctx) {
5282 DRM_DEBUG("not enough stolen space for PCTX, disabling\n"); 5287 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
5283 return; 5288 goto out;
5284 } 5289 }
5285 5290
5286 pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start; 5291 pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
@@ -5289,6 +5294,7 @@ static void valleyview_setup_pctx(struct drm_device *dev)
5289out: 5294out:
5290 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR)); 5295 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
5291 dev_priv->vlv_pctx = pctx; 5296 dev_priv->vlv_pctx = pctx;
5297 mutex_unlock(&dev->struct_mutex);
5292} 5298}
5293 5299
5294static void valleyview_cleanup_pctx(struct drm_device *dev) 5300static void valleyview_cleanup_pctx(struct drm_device *dev)
@@ -5298,7 +5304,7 @@ static void valleyview_cleanup_pctx(struct drm_device *dev)
5298 if (WARN_ON(!dev_priv->vlv_pctx)) 5304 if (WARN_ON(!dev_priv->vlv_pctx))
5299 return; 5305 return;
5300 5306
5301 drm_gem_object_unreference(&dev_priv->vlv_pctx->base); 5307 drm_gem_object_unreference_unlocked(&dev_priv->vlv_pctx->base);
5302 dev_priv->vlv_pctx = NULL; 5308 dev_priv->vlv_pctx = NULL;
5303} 5309}
5304 5310
@@ -6241,8 +6247,8 @@ void intel_enable_gt_powersave(struct drm_device *dev)
6241 return; 6247 return;
6242 6248
6243 if (IS_IRONLAKE_M(dev)) { 6249 if (IS_IRONLAKE_M(dev)) {
6244 mutex_lock(&dev->struct_mutex);
6245 ironlake_enable_drps(dev); 6250 ironlake_enable_drps(dev);
6251 mutex_lock(&dev->struct_mutex);
6246 intel_init_emon(dev); 6252 intel_init_emon(dev);
6247 mutex_unlock(&dev->struct_mutex); 6253 mutex_unlock(&dev->struct_mutex);
6248 } else if (INTEL_INFO(dev)->gen >= 6) { 6254 } else if (INTEL_INFO(dev)->gen >= 6) {
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 4ab757947f15..0b42ada338c8 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -778,6 +778,15 @@ void intel_psr_init(struct drm_device *dev)
778 dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ? 778 dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
779 HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE; 779 HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
780 780
781 /* Per platform default */
782 if (i915.enable_psr == -1) {
783 if (IS_HASWELL(dev) || IS_BROADWELL(dev) ||
784 IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
785 i915.enable_psr = 1;
786 else
787 i915.enable_psr = 0;
788 }
789
781 /* Set link_standby x link_off defaults */ 790 /* Set link_standby x link_off defaults */
782 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 791 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
783 /* HSW and BDW require workarounds that we don't implement. */ 792 /* HSW and BDW require workarounds that we don't implement. */
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 133321a5b3d0..45ce45a5e122 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -746,9 +746,9 @@ static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
746 746
747 ret = i915_gem_render_state_init(req); 747 ret = i915_gem_render_state_init(req);
748 if (ret) 748 if (ret)
749 DRM_ERROR("init render state: %d\n", ret); 749 return ret;
750 750
751 return ret; 751 return 0;
752} 752}
753 753
754static int wa_add(struct drm_i915_private *dev_priv, 754static int wa_add(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index bbca527184d0..4172e73212cd 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -284,6 +284,13 @@ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
284 1 << PIPE_C | 1 << PIPE_B); 284 1 << PIPE_C | 1 << PIPE_B);
285} 285}
286 286
287static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv)
288{
289 if (IS_BROADWELL(dev_priv))
290 gen8_irq_power_well_pre_disable(dev_priv,
291 1 << PIPE_C | 1 << PIPE_B);
292}
293
287static void skl_power_well_post_enable(struct drm_i915_private *dev_priv, 294static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
288 struct i915_power_well *power_well) 295 struct i915_power_well *power_well)
289{ 296{
@@ -309,6 +316,14 @@ static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
309 } 316 }
310} 317}
311 318
319static void skl_power_well_pre_disable(struct drm_i915_private *dev_priv,
320 struct i915_power_well *power_well)
321{
322 if (power_well->data == SKL_DISP_PW_2)
323 gen8_irq_power_well_pre_disable(dev_priv,
324 1 << PIPE_C | 1 << PIPE_B);
325}
326
312static void hsw_set_power_well(struct drm_i915_private *dev_priv, 327static void hsw_set_power_well(struct drm_i915_private *dev_priv,
313 struct i915_power_well *power_well, bool enable) 328 struct i915_power_well *power_well, bool enable)
314{ 329{
@@ -334,6 +349,7 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
334 349
335 } else { 350 } else {
336 if (enable_requested) { 351 if (enable_requested) {
352 hsw_power_well_pre_disable(dev_priv);
337 I915_WRITE(HSW_PWR_WELL_DRIVER, 0); 353 I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
338 POSTING_READ(HSW_PWR_WELL_DRIVER); 354 POSTING_READ(HSW_PWR_WELL_DRIVER);
339 DRM_DEBUG_KMS("Requesting to disable the power well\n"); 355 DRM_DEBUG_KMS("Requesting to disable the power well\n");
@@ -456,20 +472,61 @@ static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
456 */ 472 */
457} 473}
458 474
459static void gen9_set_dc_state_debugmask_memory_up( 475static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
460 struct drm_i915_private *dev_priv)
461{ 476{
462 uint32_t val; 477 uint32_t val, mask;
478
479 mask = DC_STATE_DEBUG_MASK_MEMORY_UP;
480
481 if (IS_BROXTON(dev_priv))
482 mask |= DC_STATE_DEBUG_MASK_CORES;
463 483
464 /* The below bit doesn't need to be cleared ever afterwards */ 484 /* The below bit doesn't need to be cleared ever afterwards */
465 val = I915_READ(DC_STATE_DEBUG); 485 val = I915_READ(DC_STATE_DEBUG);
466 if (!(val & DC_STATE_DEBUG_MASK_MEMORY_UP)) { 486 if ((val & mask) != mask) {
467 val |= DC_STATE_DEBUG_MASK_MEMORY_UP; 487 val |= mask;
468 I915_WRITE(DC_STATE_DEBUG, val); 488 I915_WRITE(DC_STATE_DEBUG, val);
469 POSTING_READ(DC_STATE_DEBUG); 489 POSTING_READ(DC_STATE_DEBUG);
470 } 490 }
471} 491}
472 492
493static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
494 u32 state)
495{
496 int rewrites = 0;
497 int rereads = 0;
498 u32 v;
499
500 I915_WRITE(DC_STATE_EN, state);
501
502 /* It has been observed that disabling the dc6 state sometimes
503 * doesn't stick and dmc keeps returning old value. Make sure
504 * the write really sticks enough times and also force rewrite until
505 * we are confident that state is exactly what we want.
506 */
507 do {
508 v = I915_READ(DC_STATE_EN);
509
510 if (v != state) {
511 I915_WRITE(DC_STATE_EN, state);
512 rewrites++;
513 rereads = 0;
514 } else if (rereads++ > 5) {
515 break;
516 }
517
518 } while (rewrites < 100);
519
520 if (v != state)
521 DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
522 state, v);
523
524 /* Most of the times we need one retry, avoid spam */
525 if (rewrites > 1)
526 DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
527 state, rewrites);
528}
529
473static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state) 530static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
474{ 531{
475 uint32_t val; 532 uint32_t val;
@@ -488,16 +545,21 @@ static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
488 else if (i915.enable_dc == 1 && state > DC_STATE_EN_UPTO_DC5) 545 else if (i915.enable_dc == 1 && state > DC_STATE_EN_UPTO_DC5)
489 state = DC_STATE_EN_UPTO_DC5; 546 state = DC_STATE_EN_UPTO_DC5;
490 547
491 if (state & DC_STATE_EN_UPTO_DC5_DC6_MASK)
492 gen9_set_dc_state_debugmask_memory_up(dev_priv);
493
494 val = I915_READ(DC_STATE_EN); 548 val = I915_READ(DC_STATE_EN);
495 DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n", 549 DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
496 val & mask, state); 550 val & mask, state);
551
552 /* Check if DMC is ignoring our DC state requests */
553 if ((val & mask) != dev_priv->csr.dc_state)
554 DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
555 dev_priv->csr.dc_state, val & mask);
556
497 val &= ~mask; 557 val &= ~mask;
498 val |= state; 558 val |= state;
499 I915_WRITE(DC_STATE_EN, val); 559
500 POSTING_READ(DC_STATE_EN); 560 gen9_write_dc_state(dev_priv, val);
561
562 dev_priv->csr.dc_state = val & mask;
501} 563}
502 564
503void bxt_enable_dc9(struct drm_i915_private *dev_priv) 565void bxt_enable_dc9(struct drm_i915_private *dev_priv)
@@ -663,6 +725,9 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
663 state_mask = SKL_POWER_WELL_STATE(power_well->data); 725 state_mask = SKL_POWER_WELL_STATE(power_well->data);
664 is_enabled = tmp & state_mask; 726 is_enabled = tmp & state_mask;
665 727
728 if (!enable && enable_requested)
729 skl_power_well_pre_disable(dev_priv, power_well);
730
666 if (enable) { 731 if (enable) {
667 if (!enable_requested) { 732 if (!enable_requested) {
668 WARN((tmp & state_mask) && 733 WARN((tmp & state_mask) &&
@@ -941,6 +1006,9 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
941 valleyview_disable_display_irqs(dev_priv); 1006 valleyview_disable_display_irqs(dev_priv);
942 spin_unlock_irq(&dev_priv->irq_lock); 1007 spin_unlock_irq(&dev_priv->irq_lock);
943 1008
1009 /* make sure we're done processing display irqs */
1010 synchronize_irq(dev_priv->dev->irq);
1011
944 vlv_power_sequencer_reset(dev_priv); 1012 vlv_power_sequencer_reset(dev_priv);
945} 1013}
946 1014
@@ -1435,6 +1503,22 @@ static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1435 chv_set_pipe_power_well(dev_priv, power_well, false); 1503 chv_set_pipe_power_well(dev_priv, power_well, false);
1436} 1504}
1437 1505
1506static void
1507__intel_display_power_get_domain(struct drm_i915_private *dev_priv,
1508 enum intel_display_power_domain domain)
1509{
1510 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1511 struct i915_power_well *power_well;
1512 int i;
1513
1514 for_each_power_well(i, power_well, BIT(domain), power_domains) {
1515 if (!power_well->count++)
1516 intel_power_well_enable(dev_priv, power_well);
1517 }
1518
1519 power_domains->domain_use_count[domain]++;
1520}
1521
1438/** 1522/**
1439 * intel_display_power_get - grab a power domain reference 1523 * intel_display_power_get - grab a power domain reference
1440 * @dev_priv: i915 device instance 1524 * @dev_priv: i915 device instance
@@ -1450,24 +1534,53 @@ static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1450void intel_display_power_get(struct drm_i915_private *dev_priv, 1534void intel_display_power_get(struct drm_i915_private *dev_priv,
1451 enum intel_display_power_domain domain) 1535 enum intel_display_power_domain domain)
1452{ 1536{
1453 struct i915_power_domains *power_domains; 1537 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1454 struct i915_power_well *power_well;
1455 int i;
1456 1538
1457 intel_runtime_pm_get(dev_priv); 1539 intel_runtime_pm_get(dev_priv);
1458 1540
1459 power_domains = &dev_priv->power_domains; 1541 mutex_lock(&power_domains->lock);
1542
1543 __intel_display_power_get_domain(dev_priv, domain);
1544
1545 mutex_unlock(&power_domains->lock);
1546}
1547
1548/**
1549 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
1550 * @dev_priv: i915 device instance
1551 * @domain: power domain to reference
1552 *
1553 * This function grabs a power domain reference for @domain and ensures that the
1554 * power domain and all its parents are powered up. Therefore users should only
1555 * grab a reference to the innermost power domain they need.
1556 *
1557 * Any power domain reference obtained by this function must have a symmetric
1558 * call to intel_display_power_put() to release the reference again.
1559 */
1560bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
1561 enum intel_display_power_domain domain)
1562{
1563 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1564 bool is_enabled;
1565
1566 if (!intel_runtime_pm_get_if_in_use(dev_priv))
1567 return false;
1460 1568
1461 mutex_lock(&power_domains->lock); 1569 mutex_lock(&power_domains->lock);
1462 1570
1463 for_each_power_well(i, power_well, BIT(domain), power_domains) { 1571 if (__intel_display_power_is_enabled(dev_priv, domain)) {
1464 if (!power_well->count++) 1572 __intel_display_power_get_domain(dev_priv, domain);
1465 intel_power_well_enable(dev_priv, power_well); 1573 is_enabled = true;
1574 } else {
1575 is_enabled = false;
1466 } 1576 }
1467 1577
1468 power_domains->domain_use_count[domain]++;
1469
1470 mutex_unlock(&power_domains->lock); 1578 mutex_unlock(&power_domains->lock);
1579
1580 if (!is_enabled)
1581 intel_runtime_pm_put(dev_priv);
1582
1583 return is_enabled;
1471} 1584}
1472 1585
1473/** 1586/**
@@ -2028,8 +2141,8 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv,
2028 2141
2029 skl_init_cdclk(dev_priv); 2142 skl_init_cdclk(dev_priv);
2030 2143
2031 if (dev_priv->csr.dmc_payload) 2144 if (dev_priv->csr.dmc_payload && intel_csr_load_program(dev_priv))
2032 intel_csr_load_program(dev_priv); 2145 gen9_set_dc_state_debugmask(dev_priv);
2033} 2146}
2034 2147
2035static void skl_display_core_uninit(struct drm_i915_private *dev_priv) 2148static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
@@ -2239,6 +2352,41 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
2239} 2352}
2240 2353
2241/** 2354/**
2355 * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
2356 * @dev_priv: i915 device instance
2357 *
2358 * This function grabs a device-level runtime pm reference if the device is
2359 * already in use and ensures that it is powered up.
2360 *
2361 * Any runtime pm reference obtained by this function must have a symmetric
2362 * call to intel_runtime_pm_put() to release the reference again.
2363 */
2364bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
2365{
2366 struct drm_device *dev = dev_priv->dev;
2367 struct device *device = &dev->pdev->dev;
2368
2369 if (IS_ENABLED(CONFIG_PM)) {
2370 int ret = pm_runtime_get_if_in_use(device);
2371
2372 /*
2373 * In cases runtime PM is disabled by the RPM core and we get
2374 * an -EINVAL return value we are not supposed to call this
2375 * function, since the power state is undefined. This applies
2376 * atm to the late/early system suspend/resume handlers.
2377 */
2378 WARN_ON_ONCE(ret < 0);
2379 if (ret <= 0)
2380 return false;
2381 }
2382
2383 atomic_inc(&dev_priv->pm.wakeref_count);
2384 assert_rpm_wakelock_held(dev_priv);
2385
2386 return true;
2387}
2388
2389/**
2242 * intel_runtime_pm_get_noresume - grab a runtime pm reference 2390 * intel_runtime_pm_get_noresume - grab a runtime pm reference
2243 * @dev_priv: i915 device instance 2391 * @dev_priv: i915 device instance
2244 * 2392 *
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index d21f75bda96e..6745bad5bff0 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1182,10 +1182,9 @@ static int
1182intel_tv_detect_type(struct intel_tv *intel_tv, 1182intel_tv_detect_type(struct intel_tv *intel_tv,
1183 struct drm_connector *connector) 1183 struct drm_connector *connector)
1184{ 1184{
1185 struct drm_encoder *encoder = &intel_tv->base.base; 1185 struct drm_crtc *crtc = connector->state->crtc;
1186 struct drm_crtc *crtc = encoder->crtc;
1187 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1186 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1188 struct drm_device *dev = encoder->dev; 1187 struct drm_device *dev = connector->dev;
1189 struct drm_i915_private *dev_priv = dev->dev_private; 1188 struct drm_i915_private *dev_priv = dev->dev_private;
1190 u32 tv_ctl, save_tv_ctl; 1189 u32 tv_ctl, save_tv_ctl;
1191 u32 tv_dac, save_tv_dac; 1190 u32 tv_dac, save_tv_dac;
@@ -1234,8 +1233,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
1234 I915_WRITE(TV_DAC, tv_dac); 1233 I915_WRITE(TV_DAC, tv_dac);
1235 POSTING_READ(TV_DAC); 1234 POSTING_READ(TV_DAC);
1236 1235
1237 intel_wait_for_vblank(intel_tv->base.base.dev, 1236 intel_wait_for_vblank(dev, intel_crtc->pipe);
1238 to_intel_crtc(intel_tv->base.base.crtc)->pipe);
1239 1237
1240 type = -1; 1238 type = -1;
1241 tv_dac = I915_READ(TV_DAC); 1239 tv_dac = I915_READ(TV_DAC);
@@ -1265,8 +1263,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
1265 POSTING_READ(TV_CTL); 1263 POSTING_READ(TV_CTL);
1266 1264
1267 /* For unknown reasons the hw barfs if we don't do this vblank wait. */ 1265 /* For unknown reasons the hw barfs if we don't do this vblank wait. */
1268 intel_wait_for_vblank(intel_tv->base.base.dev, 1266 intel_wait_for_vblank(dev, intel_crtc->pipe);
1269 to_intel_crtc(intel_tv->base.base.crtc)->pipe);
1270 1267
1271 /* Restore interrupt config */ 1268 /* Restore interrupt config */
1272 if (connector->polled & DRM_CONNECTOR_POLL_HPD) { 1269 if (connector->polled & DRM_CONNECTOR_POLL_HPD) {