aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2015-01-12 17:07:46 -0500
committerDaniel Vetter <daniel.vetter@ffwll.ch>2015-01-12 17:07:46 -0500
commit0a87a2db485a1456b7427914969c0e8195a1bbda (patch)
tree8d0186672af22c6ee76118c471881cd66a36502d /drivers/gpu/drm/i915
parent7226572d8ed48f7e1aa9de5383d919490d6e9a0c (diff)
parentfcf3aac5fc307f0cae429f5844ddc25761662858 (diff)
Merge tag 'topic/i915-hda-componentized-2015-01-12' into drm-intel-next-queued
Conflicts: drivers/gpu/drm/i915/intel_runtime_pm.c Separate branch so that Takashi can also pull just this refactoring into sound-next. Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c18
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h21
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c49
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c48
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c6
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c24
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h21
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c8
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c110
-rw-r--r--drivers/gpu/drm/i915/intel_display.c21
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c4
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c24
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c22
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c34
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c27
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c83
19 files changed, 333 insertions, 196 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 8cbff3010e1c..2447de36de44 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -833,6 +833,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
833 833
834 intel_runtime_pm_enable(dev_priv); 834 intel_runtime_pm_enable(dev_priv);
835 835
836 i915_audio_component_init(dev_priv);
837
836 return 0; 838 return 0;
837 839
838out_power_well: 840out_power_well:
@@ -873,6 +875,8 @@ int i915_driver_unload(struct drm_device *dev)
873 struct drm_i915_private *dev_priv = dev->dev_private; 875 struct drm_i915_private *dev_priv = dev->dev_private;
874 int ret; 876 int ret;
875 877
878 i915_audio_component_cleanup(dev_priv);
879
876 ret = i915_gem_suspend(dev); 880 ret = i915_gem_suspend(dev);
877 if (ret) { 881 if (ret) {
878 DRM_ERROR("failed to idle hardware: %d\n", ret); 882 DRM_ERROR("failed to idle hardware: %d\n", ret);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 95bc829184eb..308774f42079 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -706,11 +706,12 @@ static int i915_drm_resume(struct drm_device *dev)
706 dev_priv->display.hpd_irq_setup(dev); 706 dev_priv->display.hpd_irq_setup(dev);
707 spin_unlock_irq(&dev_priv->irq_lock); 707 spin_unlock_irq(&dev_priv->irq_lock);
708 708
709 intel_dp_mst_resume(dev);
710 drm_modeset_lock_all(dev); 709 drm_modeset_lock_all(dev);
711 intel_modeset_setup_hw_state(dev, true); 710 intel_modeset_setup_hw_state(dev, true);
712 drm_modeset_unlock_all(dev); 711 drm_modeset_unlock_all(dev);
713 712
713 intel_dp_mst_resume(dev);
714
714 /* 715 /*
715 * ... but also need to make sure that hotplug processing 716 * ... but also need to make sure that hotplug processing
716 * doesn't cause havoc. Like in the driver load code we don't 717 * doesn't cause havoc. Like in the driver load code we don't
@@ -810,6 +811,8 @@ int i915_reset(struct drm_device *dev)
810 if (!i915.reset) 811 if (!i915.reset)
811 return 0; 812 return 0;
812 813
814 intel_reset_gt_powersave(dev);
815
813 mutex_lock(&dev->struct_mutex); 816 mutex_lock(&dev->struct_mutex);
814 817
815 i915_gem_reset(dev); 818 i915_gem_reset(dev);
@@ -881,7 +884,7 @@ int i915_reset(struct drm_device *dev)
881 * of re-init after reset. 884 * of re-init after reset.
882 */ 885 */
883 if (INTEL_INFO(dev)->gen > 5) 886 if (INTEL_INFO(dev)->gen > 5)
884 intel_reset_gt_powersave(dev); 887 intel_enable_gt_powersave(dev);
885 } else { 888 } else {
886 mutex_unlock(&dev->struct_mutex); 889 mutex_unlock(&dev->struct_mutex);
887 } 890 }
@@ -939,8 +942,7 @@ static int i915_pm_suspend(struct device *dev)
939 942
940static int i915_pm_suspend_late(struct device *dev) 943static int i915_pm_suspend_late(struct device *dev)
941{ 944{
942 struct pci_dev *pdev = to_pci_dev(dev); 945 struct drm_device *drm_dev = dev_to_i915(dev)->dev;
943 struct drm_device *drm_dev = pci_get_drvdata(pdev);
944 946
945 /* 947 /*
946 * We have a suspedn ordering issue with the snd-hda driver also 948 * We have a suspedn ordering issue with the snd-hda driver also
@@ -959,8 +961,7 @@ static int i915_pm_suspend_late(struct device *dev)
959 961
960static int i915_pm_resume_early(struct device *dev) 962static int i915_pm_resume_early(struct device *dev)
961{ 963{
962 struct pci_dev *pdev = to_pci_dev(dev); 964 struct drm_device *drm_dev = dev_to_i915(dev)->dev;
963 struct drm_device *drm_dev = pci_get_drvdata(pdev);
964 965
965 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 966 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
966 return 0; 967 return 0;
@@ -970,8 +971,7 @@ static int i915_pm_resume_early(struct device *dev)
970 971
971static int i915_pm_resume(struct device *dev) 972static int i915_pm_resume(struct device *dev)
972{ 973{
973 struct pci_dev *pdev = to_pci_dev(dev); 974 struct drm_device *drm_dev = dev_to_i915(dev)->dev;
974 struct drm_device *drm_dev = pci_get_drvdata(pdev);
975 975
976 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 976 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
977 return 0; 977 return 0;
@@ -1588,7 +1588,7 @@ static struct drm_driver driver = {
1588 .gem_prime_import = i915_gem_prime_import, 1588 .gem_prime_import = i915_gem_prime_import,
1589 1589
1590 .dumb_create = i915_gem_dumb_create, 1590 .dumb_create = i915_gem_dumb_create,
1591 .dumb_map_offset = i915_gem_dumb_map_offset, 1591 .dumb_map_offset = i915_gem_mmap_gtt,
1592 .dumb_destroy = drm_gem_dumb_destroy, 1592 .dumb_destroy = drm_gem_dumb_destroy,
1593 .ioctls = i915_ioctls, 1593 .ioctls = i915_ioctls,
1594 .fops = &i915_driver_fops, 1594 .fops = &i915_driver_fops,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 71016f5192c5..e008fa0c58da 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -970,6 +970,7 @@ struct i915_suspend_saved_registers {
970 u32 savePIPEB_LINK_N1; 970 u32 savePIPEB_LINK_N1;
971 u32 saveMCHBAR_RENDER_STANDBY; 971 u32 saveMCHBAR_RENDER_STANDBY;
972 u32 savePCH_PORT_HOTPLUG; 972 u32 savePCH_PORT_HOTPLUG;
973 u16 saveGCDGMBUS;
973}; 974};
974 975
975struct vlv_s0ix_state { 976struct vlv_s0ix_state {
@@ -1771,6 +1772,9 @@ struct drm_i915_private {
1771 struct drm_property *broadcast_rgb_property; 1772 struct drm_property *broadcast_rgb_property;
1772 struct drm_property *force_audio_property; 1773 struct drm_property *force_audio_property;
1773 1774
1775 /* hda/i915 audio component */
1776 bool audio_component_registered;
1777
1774 uint32_t hw_context_size; 1778 uint32_t hw_context_size;
1775 struct list_head context_list; 1779 struct list_head context_list;
1776 1780
@@ -1829,8 +1833,6 @@ struct drm_i915_private {
1829 */ 1833 */
1830 struct workqueue_struct *dp_wq; 1834 struct workqueue_struct *dp_wq;
1831 1835
1832 uint32_t bios_vgacntr;
1833
1834 /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ 1836 /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
1835 struct { 1837 struct {
1836 int (*do_execbuf)(struct drm_device *dev, struct drm_file *file, 1838 int (*do_execbuf)(struct drm_device *dev, struct drm_file *file,
@@ -1858,6 +1860,11 @@ static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
1858 return dev->dev_private; 1860 return dev->dev_private;
1859} 1861}
1860 1862
1863static inline struct drm_i915_private *dev_to_i915(struct device *dev)
1864{
1865 return to_i915(dev_get_drvdata(dev));
1866}
1867
1861/* Iterate over initialised rings */ 1868/* Iterate over initialised rings */
1862#define for_each_ring(ring__, dev_priv__, i__) \ 1869#define for_each_ring(ring__, dev_priv__, i__) \
1863 for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \ 1870 for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
@@ -2633,9 +2640,8 @@ void i915_vma_move_to_active(struct i915_vma *vma,
2633int i915_gem_dumb_create(struct drm_file *file_priv, 2640int i915_gem_dumb_create(struct drm_file *file_priv,
2634 struct drm_device *dev, 2641 struct drm_device *dev,
2635 struct drm_mode_create_dumb *args); 2642 struct drm_mode_create_dumb *args);
2636int i915_gem_dumb_map_offset(struct drm_file *file_priv, 2643int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
2637 struct drm_device *dev, uint32_t handle, 2644 uint32_t handle, uint64_t *offset);
2638 uint64_t *offset);
2639/** 2645/**
2640 * Returns true if seq1 is later than seq2. 2646 * Returns true if seq1 is later than seq2.
2641 */ 2647 */
@@ -3218,6 +3224,11 @@ static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
3218 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); 3224 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
3219} 3225}
3220 3226
3227static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
3228{
3229 return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
3230}
3231
3221static inline unsigned long 3232static inline unsigned long
3222timespec_to_jiffies_timeout(const struct timespec *value) 3233timespec_to_jiffies_timeout(const struct timespec *value)
3223{ 3234{
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 3f6ca46a1dfe..9f430f77a520 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -395,7 +395,6 @@ static int
395i915_gem_create(struct drm_file *file, 395i915_gem_create(struct drm_file *file,
396 struct drm_device *dev, 396 struct drm_device *dev,
397 uint64_t size, 397 uint64_t size,
398 bool dumb,
399 uint32_t *handle_p) 398 uint32_t *handle_p)
400{ 399{
401 struct drm_i915_gem_object *obj; 400 struct drm_i915_gem_object *obj;
@@ -411,7 +410,6 @@ i915_gem_create(struct drm_file *file,
411 if (obj == NULL) 410 if (obj == NULL)
412 return -ENOMEM; 411 return -ENOMEM;
413 412
414 obj->base.dumb = dumb;
415 ret = drm_gem_handle_create(file, &obj->base, &handle); 413 ret = drm_gem_handle_create(file, &obj->base, &handle);
416 /* drop reference from allocate - handle holds it now */ 414 /* drop reference from allocate - handle holds it now */
417 drm_gem_object_unreference_unlocked(&obj->base); 415 drm_gem_object_unreference_unlocked(&obj->base);
@@ -431,7 +429,7 @@ i915_gem_dumb_create(struct drm_file *file,
431 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64); 429 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
432 args->size = args->pitch * args->height; 430 args->size = args->pitch * args->height;
433 return i915_gem_create(file, dev, 431 return i915_gem_create(file, dev,
434 args->size, true, &args->handle); 432 args->size, &args->handle);
435} 433}
436 434
437/** 435/**
@@ -444,7 +442,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
444 struct drm_i915_gem_create *args = data; 442 struct drm_i915_gem_create *args = data;
445 443
446 return i915_gem_create(file, dev, 444 return i915_gem_create(file, dev,
447 args->size, false, &args->handle); 445 args->size, &args->handle);
448} 446}
449 447
450static inline int 448static inline int
@@ -1044,6 +1042,7 @@ int
1044i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, 1042i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1045 struct drm_file *file) 1043 struct drm_file *file)
1046{ 1044{
1045 struct drm_i915_private *dev_priv = dev->dev_private;
1047 struct drm_i915_gem_pwrite *args = data; 1046 struct drm_i915_gem_pwrite *args = data;
1048 struct drm_i915_gem_object *obj; 1047 struct drm_i915_gem_object *obj;
1049 int ret; 1048 int ret;
@@ -1063,9 +1062,11 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1063 return -EFAULT; 1062 return -EFAULT;
1064 } 1063 }
1065 1064
1065 intel_runtime_pm_get(dev_priv);
1066
1066 ret = i915_mutex_lock_interruptible(dev); 1067 ret = i915_mutex_lock_interruptible(dev);
1067 if (ret) 1068 if (ret)
1068 return ret; 1069 goto put_rpm;
1069 1070
1070 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 1071 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1071 if (&obj->base == NULL) { 1072 if (&obj->base == NULL) {
@@ -1117,6 +1118,9 @@ out:
1117 drm_gem_object_unreference(&obj->base); 1118 drm_gem_object_unreference(&obj->base);
1118unlock: 1119unlock:
1119 mutex_unlock(&dev->struct_mutex); 1120 mutex_unlock(&dev->struct_mutex);
1121put_rpm:
1122 intel_runtime_pm_put(dev_priv);
1123
1120 return ret; 1124 return ret;
1121} 1125}
1122 1126
@@ -1220,7 +1224,8 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
1220 if (i915_gem_request_completed(req, true)) 1224 if (i915_gem_request_completed(req, true))
1221 return 0; 1225 return 0;
1222 1226
1223 timeout_expire = timeout ? jiffies + nsecs_to_jiffies((u64)*timeout) : 0; 1227 timeout_expire = timeout ?
1228 jiffies + nsecs_to_jiffies_timeout((u64)*timeout) : 0;
1224 1229
1225 if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) { 1230 if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) {
1226 gen6_rps_boost(dev_priv); 1231 gen6_rps_boost(dev_priv);
@@ -1296,6 +1301,16 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
1296 s64 tres = *timeout - (now - before); 1301 s64 tres = *timeout - (now - before);
1297 1302
1298 *timeout = tres < 0 ? 0 : tres; 1303 *timeout = tres < 0 ? 0 : tres;
1304
1305 /*
1306 * Apparently ktime isn't accurate enough and occasionally has a
1307 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
1308 * things up to make the test happy. We allow up to 1 jiffy.
1309 *
1310 * This is a regrssion from the timespec->ktime conversion.
1311 */
1312 if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000)
1313 *timeout = 0;
1299 } 1314 }
1300 1315
1301 return ret; 1316 return ret;
@@ -1840,10 +1855,10 @@ static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1840 drm_gem_free_mmap_offset(&obj->base); 1855 drm_gem_free_mmap_offset(&obj->base);
1841} 1856}
1842 1857
1843static int 1858int
1844i915_gem_mmap_gtt(struct drm_file *file, 1859i915_gem_mmap_gtt(struct drm_file *file,
1845 struct drm_device *dev, 1860 struct drm_device *dev,
1846 uint32_t handle, bool dumb, 1861 uint32_t handle,
1847 uint64_t *offset) 1862 uint64_t *offset)
1848{ 1863{
1849 struct drm_i915_private *dev_priv = dev->dev_private; 1864 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1860,13 +1875,6 @@ i915_gem_mmap_gtt(struct drm_file *file,
1860 goto unlock; 1875 goto unlock;
1861 } 1876 }
1862 1877
1863 /*
1864 * We don't allow dumb mmaps on objects created using another
1865 * interface.
1866 */
1867 WARN_ONCE(dumb && !(obj->base.dumb || obj->base.import_attach),
1868 "Illegal dumb map of accelerated buffer.\n");
1869
1870 if (obj->base.size > dev_priv->gtt.mappable_end) { 1878 if (obj->base.size > dev_priv->gtt.mappable_end) {
1871 ret = -E2BIG; 1879 ret = -E2BIG;
1872 goto out; 1880 goto out;
@@ -1891,15 +1899,6 @@ unlock:
1891 return ret; 1899 return ret;
1892} 1900}
1893 1901
1894int
1895i915_gem_dumb_map_offset(struct drm_file *file,
1896 struct drm_device *dev,
1897 uint32_t handle,
1898 uint64_t *offset)
1899{
1900 return i915_gem_mmap_gtt(file, dev, handle, true, offset);
1901}
1902
1903/** 1902/**
1904 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing 1903 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1905 * @dev: DRM device 1904 * @dev: DRM device
@@ -1921,7 +1920,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1921{ 1920{
1922 struct drm_i915_gem_mmap_gtt *args = data; 1921 struct drm_i915_gem_mmap_gtt *args = data;
1923 1922
1924 return i915_gem_mmap_gtt(file, dev, args->handle, false, &args->offset); 1923 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1925} 1924}
1926 1925
1927static inline int 1926static inline int
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index b775ed4a189c..8603bf48d3ee 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -486,7 +486,12 @@ mi_set_context(struct intel_engine_cs *ring,
486 u32 hw_flags) 486 u32 hw_flags)
487{ 487{
488 u32 flags = hw_flags | MI_MM_SPACE_GTT; 488 u32 flags = hw_flags | MI_MM_SPACE_GTT;
489 int ret; 489 const int num_rings =
490 /* Use an extended w/a on ivb+ if signalling from other rings */
491 i915_semaphore_is_enabled(ring->dev) ?
492 hweight32(INTEL_INFO(ring->dev)->ring_mask) - 1 :
493 0;
494 int len, i, ret;
490 495
491 /* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB 496 /* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
492 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value 497 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
@@ -503,15 +508,31 @@ mi_set_context(struct intel_engine_cs *ring,
503 if (!IS_HASWELL(ring->dev) && INTEL_INFO(ring->dev)->gen < 8) 508 if (!IS_HASWELL(ring->dev) && INTEL_INFO(ring->dev)->gen < 8)
504 flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN); 509 flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
505 510
506 ret = intel_ring_begin(ring, 6); 511
512 len = 4;
513 if (INTEL_INFO(ring->dev)->gen >= 7)
514 len += 2 + (num_rings ? 4*num_rings + 2 : 0);
515
516 ret = intel_ring_begin(ring, len);
507 if (ret) 517 if (ret)
508 return ret; 518 return ret;
509 519
510 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */ 520 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
511 if (INTEL_INFO(ring->dev)->gen >= 7) 521 if (INTEL_INFO(ring->dev)->gen >= 7) {
512 intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE); 522 intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
513 else 523 if (num_rings) {
514 intel_ring_emit(ring, MI_NOOP); 524 struct intel_engine_cs *signaller;
525
526 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
527 for_each_ring(signaller, to_i915(ring->dev), i) {
528 if (signaller == ring)
529 continue;
530
531 intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
532 intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
533 }
534 }
535 }
515 536
516 intel_ring_emit(ring, MI_NOOP); 537 intel_ring_emit(ring, MI_NOOP);
517 intel_ring_emit(ring, MI_SET_CONTEXT); 538 intel_ring_emit(ring, MI_SET_CONTEXT);
@@ -523,10 +544,21 @@ mi_set_context(struct intel_engine_cs *ring,
523 */ 544 */
524 intel_ring_emit(ring, MI_NOOP); 545 intel_ring_emit(ring, MI_NOOP);
525 546
526 if (INTEL_INFO(ring->dev)->gen >= 7) 547 if (INTEL_INFO(ring->dev)->gen >= 7) {
548 if (num_rings) {
549 struct intel_engine_cs *signaller;
550
551 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
552 for_each_ring(signaller, to_i915(ring->dev), i) {
553 if (signaller == ring)
554 continue;
555
556 intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
557 intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
558 }
559 }
527 intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE); 560 intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
528 else 561 }
529 intel_ring_emit(ring, MI_NOOP);
530 562
531 intel_ring_advance(ring); 563 intel_ring_advance(ring);
532 564
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 6c21c59ed71d..e3ef17783765 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -122,9 +122,6 @@ eb_lookup_vmas(struct eb_vmas *eb,
122 goto err; 122 goto err;
123 } 123 }
124 124
125 WARN_ONCE(obj->base.dumb,
126 "GPU use of dumb buffer is illegal.\n");
127
128 drm_gem_object_reference(&obj->base); 125 drm_gem_object_reference(&obj->base);
129 list_add_tail(&obj->obj_exec_link, &objects); 126 list_add_tail(&obj->obj_exec_link, &objects);
130 } 127 }
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index c38891892547..a2045848bd1a 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -137,7 +137,11 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
137 r = devm_request_mem_region(dev->dev, base + 1, 137 r = devm_request_mem_region(dev->dev, base + 1,
138 dev_priv->gtt.stolen_size - 1, 138 dev_priv->gtt.stolen_size - 1,
139 "Graphics Stolen Memory"); 139 "Graphics Stolen Memory");
140 if (r == NULL) { 140 /*
141 * GEN3 firmware likes to smash pci bridges into the stolen
142 * range. Apparently this works.
143 */
144 if (r == NULL && !IS_GEN3(dev)) {
141 DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n", 145 DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
142 base, base + (uint32_t)dev_priv->gtt.stolen_size); 146 base, base + (uint32_t)dev_priv->gtt.stolen_size);
143 base = 0; 147 base = 0;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index e3dd2d62c992..aa3180cf2921 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -285,10 +285,14 @@ void gen6_enable_rps_interrupts(struct drm_device *dev)
285 struct drm_i915_private *dev_priv = dev->dev_private; 285 struct drm_i915_private *dev_priv = dev->dev_private;
286 286
287 spin_lock_irq(&dev_priv->irq_lock); 287 spin_lock_irq(&dev_priv->irq_lock);
288
288 WARN_ON(dev_priv->rps.pm_iir); 289 WARN_ON(dev_priv->rps.pm_iir);
289 WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); 290 WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
290 dev_priv->rps.interrupts_enabled = true; 291 dev_priv->rps.interrupts_enabled = true;
292 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
293 dev_priv->pm_rps_events);
291 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 294 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
295
292 spin_unlock_irq(&dev_priv->irq_lock); 296 spin_unlock_irq(&dev_priv->irq_lock);
293} 297}
294 298
@@ -3313,8 +3317,10 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
3313 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); 3317 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3314 3318
3315 if (INTEL_INFO(dev)->gen >= 6) { 3319 if (INTEL_INFO(dev)->gen >= 6) {
3316 pm_irqs |= dev_priv->pm_rps_events; 3320 /*
3317 3321 * RPS interrupts will get enabled/disabled on demand when RPS
3322 * itself is enabled/disabled.
3323 */
3318 if (HAS_VEBOX(dev)) 3324 if (HAS_VEBOX(dev))
3319 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 3325 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3320 3326
@@ -3526,7 +3532,11 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3526 dev_priv->pm_irq_mask = 0xffffffff; 3532 dev_priv->pm_irq_mask = 0xffffffff;
3527 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); 3533 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3528 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]); 3534 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3529 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, dev_priv->pm_rps_events); 3535 /*
3536 * RPS interrupts will get enabled/disabled on demand when RPS itself
3537 * is enabled/disabled.
3538 */
3539 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
3530 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]); 3540 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3531} 3541}
3532 3542
@@ -3615,7 +3625,7 @@ static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
3615 3625
3616 vlv_display_irq_reset(dev_priv); 3626 vlv_display_irq_reset(dev_priv);
3617 3627
3618 dev_priv->irq_mask = 0; 3628 dev_priv->irq_mask = ~0;
3619} 3629}
3620 3630
3621static void valleyview_irq_uninstall(struct drm_device *dev) 3631static void valleyview_irq_uninstall(struct drm_device *dev)
@@ -3721,8 +3731,6 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
3721 if ((iir & flip_pending) == 0) 3731 if ((iir & flip_pending) == 0)
3722 goto check_page_flip; 3732 goto check_page_flip;
3723 3733
3724 intel_prepare_page_flip(dev, plane);
3725
3726 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3734 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3727 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3735 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3728 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3736 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
@@ -3732,6 +3740,7 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
3732 if (I915_READ16(ISR) & flip_pending) 3740 if (I915_READ16(ISR) & flip_pending)
3733 goto check_page_flip; 3741 goto check_page_flip;
3734 3742
3743 intel_prepare_page_flip(dev, plane);
3735 intel_finish_page_flip(dev, pipe); 3744 intel_finish_page_flip(dev, pipe);
3736 return true; 3745 return true;
3737 3746
@@ -3903,8 +3912,6 @@ static bool i915_handle_vblank(struct drm_device *dev,
3903 if ((iir & flip_pending) == 0) 3912 if ((iir & flip_pending) == 0)
3904 goto check_page_flip; 3913 goto check_page_flip;
3905 3914
3906 intel_prepare_page_flip(dev, plane);
3907
3908 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3915 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3909 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3916 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3910 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3917 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
@@ -3914,6 +3921,7 @@ static bool i915_handle_vblank(struct drm_device *dev,
3914 if (I915_READ(ISR) & flip_pending) 3921 if (I915_READ(ISR) & flip_pending)
3915 goto check_page_flip; 3922 goto check_page_flip;
3916 3923
3924 intel_prepare_page_flip(dev, plane);
3917 intel_finish_page_flip(dev, pipe); 3925 intel_finish_page_flip(dev, pipe);
3918 return true; 3926 return true;
3919 3927
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 0cb0067af4bb..0f32fd1a9d10 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -34,8 +34,19 @@
34#define _PORT3(port, a, b, c) ((port) == PORT_A ? (a) : \ 34#define _PORT3(port, a, b, c) ((port) == PORT_A ? (a) : \
35 (port) == PORT_B ? (b) : (c)) 35 (port) == PORT_B ? (b) : (c))
36 36
37#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a)) 37#define _MASKED_FIELD(mask, value) ({ \
38#define _MASKED_BIT_DISABLE(a) ((a) << 16) 38 if (__builtin_constant_p(mask)) \
39 BUILD_BUG_ON_MSG(((mask) & 0xffff0000), "Incorrect mask"); \
40 if (__builtin_constant_p(value)) \
41 BUILD_BUG_ON_MSG((value) & 0xffff0000, "Incorrect value"); \
42 if (__builtin_constant_p(mask) && __builtin_constant_p(value)) \
43 BUILD_BUG_ON_MSG((value) & ~(mask), \
44 "Incorrect value for mask"); \
45 (mask) << 16 | (value); })
46#define _MASKED_BIT_ENABLE(a) ({ typeof(a) _a = (a); _MASKED_FIELD(_a, _a); })
47#define _MASKED_BIT_DISABLE(a) (_MASKED_FIELD((a), 0))
48
49
39 50
40/* PCI config space */ 51/* PCI config space */
41 52
@@ -76,6 +87,7 @@
76#define I915_GC_RENDER_CLOCK_166_MHZ (0 << 0) 87#define I915_GC_RENDER_CLOCK_166_MHZ (0 << 0)
77#define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0) 88#define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0)
78#define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0) 89#define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0)
90#define GCDGMBUS 0xcc
79#define PCI_LBPC 0xf4 /* legacy/combination backlight modes, also called LBB */ 91#define PCI_LBPC 0xf4 /* legacy/combination backlight modes, also called LBB */
80 92
81 93
@@ -389,6 +401,7 @@
389#define PIPE_CONTROL_STORE_DATA_INDEX (1<<21) 401#define PIPE_CONTROL_STORE_DATA_INDEX (1<<21)
390#define PIPE_CONTROL_CS_STALL (1<<20) 402#define PIPE_CONTROL_CS_STALL (1<<20)
391#define PIPE_CONTROL_TLB_INVALIDATE (1<<18) 403#define PIPE_CONTROL_TLB_INVALIDATE (1<<18)
404#define PIPE_CONTROL_MEDIA_STATE_CLEAR (1<<16)
392#define PIPE_CONTROL_QW_WRITE (1<<14) 405#define PIPE_CONTROL_QW_WRITE (1<<14)
393#define PIPE_CONTROL_POST_SYNC_OP_MASK (3<<14) 406#define PIPE_CONTROL_POST_SYNC_OP_MASK (3<<14)
394#define PIPE_CONTROL_DEPTH_STALL (1<<13) 407#define PIPE_CONTROL_DEPTH_STALL (1<<13)
@@ -1123,6 +1136,7 @@ enum punit_power_well {
1123#define GEN6_VERSYNC (RING_SYNC_1(VEBOX_RING_BASE)) 1136#define GEN6_VERSYNC (RING_SYNC_1(VEBOX_RING_BASE))
1124#define GEN6_VEVSYNC (RING_SYNC_2(VEBOX_RING_BASE)) 1137#define GEN6_VEVSYNC (RING_SYNC_2(VEBOX_RING_BASE))
1125#define GEN6_NOSYNC 0 1138#define GEN6_NOSYNC 0
1139#define RING_PSMI_CTL(base) ((base)+0x50)
1126#define RING_MAX_IDLE(base) ((base)+0x54) 1140#define RING_MAX_IDLE(base) ((base)+0x54)
1127#define RING_HWS_PGA(base) ((base)+0x80) 1141#define RING_HWS_PGA(base) ((base)+0x80)
1128#define RING_HWS_PGA_GEN6(base) ((base)+0x2080) 1142#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
@@ -1289,7 +1303,7 @@ enum punit_power_well {
1289#define GEN6_WIZ_HASHING_8x8 GEN6_WIZ_HASHING(0, 0) 1303#define GEN6_WIZ_HASHING_8x8 GEN6_WIZ_HASHING(0, 0)
1290#define GEN6_WIZ_HASHING_8x4 GEN6_WIZ_HASHING(0, 1) 1304#define GEN6_WIZ_HASHING_8x4 GEN6_WIZ_HASHING(0, 1)
1291#define GEN6_WIZ_HASHING_16x4 GEN6_WIZ_HASHING(1, 0) 1305#define GEN6_WIZ_HASHING_16x4 GEN6_WIZ_HASHING(1, 0)
1292#define GEN6_WIZ_HASHING_MASK (GEN6_WIZ_HASHING(1, 1) << 16) 1306#define GEN6_WIZ_HASHING_MASK GEN6_WIZ_HASHING(1, 1)
1293#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5) 1307#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5)
1294 1308
1295#define GFX_MODE 0x02520 1309#define GFX_MODE 0x02520
@@ -1453,6 +1467,7 @@ enum punit_power_well {
1453#define GEN6_BLITTER_FBC_NOTIFY (1<<3) 1467#define GEN6_BLITTER_FBC_NOTIFY (1<<3)
1454 1468
1455#define GEN6_RC_SLEEP_PSMI_CONTROL 0x2050 1469#define GEN6_RC_SLEEP_PSMI_CONTROL 0x2050
1470#define GEN6_PSMI_SLEEP_MSG_DISABLE (1 << 0)
1456#define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12) 1471#define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12)
1457#define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1<<10) 1472#define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1<<10)
1458 1473
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 1e4999dd3ed5..9f19ed38cdc3 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -303,6 +303,10 @@ int i915_save_state(struct drm_device *dev)
303 } 303 }
304 } 304 }
305 305
306 if (IS_GEN4(dev))
307 pci_read_config_word(dev->pdev, GCDGMBUS,
308 &dev_priv->regfile.saveGCDGMBUS);
309
306 /* Cache mode state */ 310 /* Cache mode state */
307 if (INTEL_INFO(dev)->gen < 7) 311 if (INTEL_INFO(dev)->gen < 7)
308 dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); 312 dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
@@ -331,6 +335,10 @@ int i915_restore_state(struct drm_device *dev)
331 mutex_lock(&dev->struct_mutex); 335 mutex_lock(&dev->struct_mutex);
332 336
333 i915_gem_restore_fences(dev); 337 i915_gem_restore_fences(dev);
338
339 if (IS_GEN4(dev))
340 pci_write_config_word(dev->pdev, GCDGMBUS,
341 dev_priv->regfile.saveGCDGMBUS);
334 i915_restore_display(dev); 342 i915_restore_display(dev);
335 343
336 if (!drm_core_check_feature(dev, DRIVER_MODESET)) { 344 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 2c7ed5cb29c0..ee41b882e71a 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -22,6 +22,9 @@
22 */ 22 */
23 23
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/component.h>
26#include <drm/i915_component.h>
27#include "intel_drv.h"
25 28
26#include <drm/drmP.h> 29#include <drm/drmP.h>
27#include <drm/drm_edid.h> 30#include <drm/drm_edid.h>
@@ -461,3 +464,110 @@ void intel_init_audio(struct drm_device *dev)
461 dev_priv->display.audio_codec_disable = ilk_audio_codec_disable; 464 dev_priv->display.audio_codec_disable = ilk_audio_codec_disable;
462 } 465 }
463} 466}
467
468static void i915_audio_component_get_power(struct device *dev)
469{
470 intel_display_power_get(dev_to_i915(dev), POWER_DOMAIN_AUDIO);
471}
472
473static void i915_audio_component_put_power(struct device *dev)
474{
475 intel_display_power_put(dev_to_i915(dev), POWER_DOMAIN_AUDIO);
476}
477
478/* Get CDCLK in kHz */
479static int i915_audio_component_get_cdclk_freq(struct device *dev)
480{
481 struct drm_i915_private *dev_priv = dev_to_i915(dev);
482 int ret;
483
484 if (WARN_ON_ONCE(!HAS_DDI(dev_priv)))
485 return -ENODEV;
486
487 intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
488 ret = intel_ddi_get_cdclk_freq(dev_priv);
489 intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
490
491 return ret;
492}
493
494static const struct i915_audio_component_ops i915_audio_component_ops = {
495 .owner = THIS_MODULE,
496 .get_power = i915_audio_component_get_power,
497 .put_power = i915_audio_component_put_power,
498 .get_cdclk_freq = i915_audio_component_get_cdclk_freq,
499};
500
501static int i915_audio_component_bind(struct device *i915_dev,
502 struct device *hda_dev, void *data)
503{
504 struct i915_audio_component *acomp = data;
505
506 if (WARN_ON(acomp->ops || acomp->dev))
507 return -EEXIST;
508
509 acomp->ops = &i915_audio_component_ops;
510 acomp->dev = i915_dev;
511
512 return 0;
513}
514
515static void i915_audio_component_unbind(struct device *i915_dev,
516 struct device *hda_dev, void *data)
517{
518 struct i915_audio_component *acomp = data;
519
520 acomp->ops = NULL;
521 acomp->dev = NULL;
522}
523
524static const struct component_ops i915_audio_component_bind_ops = {
525 .bind = i915_audio_component_bind,
526 .unbind = i915_audio_component_unbind,
527};
528
529/**
530 * i915_audio_component_init - initialize and register the audio component
531 * @dev_priv: i915 device instance
532 *
533 * This will register with the component framework a child component which
534 * will bind dynamically to the snd_hda_intel driver's corresponding master
535 * component when the latter is registered. During binding the child
536 * initializes an instance of struct i915_audio_component which it receives
537 * from the master. The master can then start to use the interface defined by
538 * this struct. Each side can break the binding at any point by deregistering
539 * its own component after which each side's component unbind callback is
540 * called.
541 *
542 * We ignore any error during registration and continue with reduced
543 * functionality (i.e. without HDMI audio).
544 */
545void i915_audio_component_init(struct drm_i915_private *dev_priv)
546{
547 int ret;
548
549 ret = component_add(dev_priv->dev->dev, &i915_audio_component_bind_ops);
550 if (ret < 0) {
551 DRM_ERROR("failed to add audio component (%d)\n", ret);
552 /* continue with reduced functionality */
553 return;
554 }
555
556 dev_priv->audio_component_registered = true;
557}
558
559/**
560 * i915_audio_component_cleanup - deregister the audio component
561 * @dev_priv: i915 device instance
562 *
563 * Deregisters the audio component, breaking any existing binding to the
564 * corresponding snd_hda_intel driver's master component.
565 */
566void i915_audio_component_cleanup(struct drm_i915_private *dev_priv)
567{
568 if (!dev_priv->audio_component_registered)
569 return;
570
571 component_del(dev_priv->dev->dev, &i915_audio_component_bind_ops);
572 dev_priv->audio_component_registered = false;
573}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index e224820ea5a4..a340f51c790a 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -4500,7 +4500,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
4500 ironlake_fdi_disable(crtc); 4500 ironlake_fdi_disable(crtc);
4501 4501
4502 ironlake_disable_pch_transcoder(dev_priv, pipe); 4502 ironlake_disable_pch_transcoder(dev_priv, pipe);
4503 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
4504 4503
4505 if (HAS_PCH_CPT(dev)) { 4504 if (HAS_PCH_CPT(dev)) {
4506 /* disable TRANS_DP_CTL */ 4505 /* disable TRANS_DP_CTL */
@@ -4571,8 +4570,6 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
4571 4570
4572 if (intel_crtc->config.has_pch_encoder) { 4571 if (intel_crtc->config.has_pch_encoder) {
4573 lpt_disable_pch_transcoder(dev_priv); 4572 lpt_disable_pch_transcoder(dev_priv);
4574 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
4575 true);
4576 intel_ddi_fdi_disable(crtc); 4573 intel_ddi_fdi_disable(crtc);
4577 } 4574 }
4578 4575
@@ -11467,10 +11464,12 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
11467 to_intel_crtc(set->crtc)->config.has_audio) 11464 to_intel_crtc(set->crtc)->config.has_audio)
11468 config->mode_changed = true; 11465 config->mode_changed = true;
11469 11466
11470 /* Force mode sets for any infoframe stuff */ 11467 /*
11471 if (pipe_config->has_infoframe || 11468 * Note we have an issue here with infoframes: current code
11472 to_intel_crtc(set->crtc)->config.has_infoframe) 11469 * only updates them on the full mode set path per hw
11473 config->mode_changed = true; 11470 * requirements. So here we should be checking for any
11471 * required changes and forcing a mode set.
11472 */
11474 } 11473 }
11475 11474
11476 /* set_mode will free it in the mode_changed case */ 11475 /* set_mode will free it in the mode_changed case */
@@ -12957,11 +12956,7 @@ static void i915_disable_vga(struct drm_device *dev)
12957 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); 12956 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
12958 udelay(300); 12957 udelay(300);
12959 12958
12960 /* 12959 I915_WRITE(vga_reg, VGA_DISP_DISABLE);
12961 * Fujitsu-Siemens Lifebook S6010 (830) has problems resuming
12962 * from S3 without preserving (some of?) the other bits.
12963 */
12964 I915_WRITE(vga_reg, dev_priv->bios_vgacntr | VGA_DISP_DISABLE);
12965 POSTING_READ(vga_reg); 12960 POSTING_READ(vga_reg);
12966} 12961}
12967 12962
@@ -13046,8 +13041,6 @@ void intel_modeset_init(struct drm_device *dev)
13046 13041
13047 intel_shared_dpll_init(dev); 13042 intel_shared_dpll_init(dev);
13048 13043
13049 /* save the BIOS value before clobbering it */
13050 dev_priv->bios_vgacntr = I915_READ(i915_vgacntrl_reg(dev));
13051 /* Just disable it once at startup */ 13044 /* Just disable it once at startup */
13052 i915_disable_vga(dev); 13045 i915_disable_vga(dev);
13053 intel_setup_outputs(dev); 13046 intel_setup_outputs(dev);
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index bfe359506377..7f8c6a66680a 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -283,7 +283,7 @@ intel_dp_mst_detect(struct drm_connector *connector, bool force)
283 struct intel_connector *intel_connector = to_intel_connector(connector); 283 struct intel_connector *intel_connector = to_intel_connector(connector);
284 struct intel_dp *intel_dp = intel_connector->mst_port; 284 struct intel_dp *intel_dp = intel_connector->mst_port;
285 285
286 return drm_dp_mst_detect_port(&intel_dp->mst_mgr, intel_connector->port); 286 return drm_dp_mst_detect_port(connector, &intel_dp->mst_mgr, intel_connector->port);
287} 287}
288 288
289static int 289static int
@@ -414,6 +414,8 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
414 intel_dp_add_properties(intel_dp, connector); 414 intel_dp_add_properties(intel_dp, connector);
415 415
416 drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0); 416 drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
417 drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
418
417 drm_mode_connector_set_path_property(connector, pathprop); 419 drm_mode_connector_set_path_property(connector, pathprop);
418 drm_reinit_primary_mode_group(dev); 420 drm_reinit_primary_mode_group(dev);
419 mutex_lock(&dev->mode_config.mutex); 421 mutex_lock(&dev->mode_config.mutex);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 588b618ab668..1043a1e3a569 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -873,6 +873,8 @@ void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
873void intel_init_audio(struct drm_device *dev); 873void intel_init_audio(struct drm_device *dev);
874void intel_audio_codec_enable(struct intel_encoder *encoder); 874void intel_audio_codec_enable(struct intel_encoder *encoder);
875void intel_audio_codec_disable(struct intel_encoder *encoder); 875void intel_audio_codec_disable(struct intel_encoder *encoder);
876void i915_audio_component_init(struct drm_i915_private *dev_priv);
877void i915_audio_component_cleanup(struct drm_i915_private *dev_priv);
876 878
877/* intel_display.c */ 879/* intel_display.c */
878bool intel_has_pending_fb_unpin(struct drm_device *dev); 880bool intel_has_pending_fb_unpin(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index f2183b554cbc..850cf7d6578c 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -324,6 +324,7 @@ intel_fb_helper_crtc(struct drm_fb_helper *fb_helper, struct drm_crtc *crtc)
324static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, 324static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
325 struct drm_fb_helper_crtc **crtcs, 325 struct drm_fb_helper_crtc **crtcs,
326 struct drm_display_mode **modes, 326 struct drm_display_mode **modes,
327 struct drm_fb_offset *offsets,
327 bool *enabled, int width, int height) 328 bool *enabled, int width, int height)
328{ 329{
329 struct drm_device *dev = fb_helper->dev; 330 struct drm_device *dev = fb_helper->dev;
@@ -332,6 +333,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
332 bool fallback = true; 333 bool fallback = true;
333 int num_connectors_enabled = 0; 334 int num_connectors_enabled = 0;
334 int num_connectors_detected = 0; 335 int num_connectors_detected = 0;
336 uint64_t conn_configured = 0, mask;
337 int pass = 0;
335 338
336 save_enabled = kcalloc(dev->mode_config.num_connector, sizeof(bool), 339 save_enabled = kcalloc(dev->mode_config.num_connector, sizeof(bool),
337 GFP_KERNEL); 340 GFP_KERNEL);
@@ -339,7 +342,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
339 return false; 342 return false;
340 343
341 memcpy(save_enabled, enabled, dev->mode_config.num_connector); 344 memcpy(save_enabled, enabled, dev->mode_config.num_connector);
342 345 mask = (1 << fb_helper->connector_count) - 1;
346retry:
343 for (i = 0; i < fb_helper->connector_count; i++) { 347 for (i = 0; i < fb_helper->connector_count; i++) {
344 struct drm_fb_helper_connector *fb_conn; 348 struct drm_fb_helper_connector *fb_conn;
345 struct drm_connector *connector; 349 struct drm_connector *connector;
@@ -349,12 +353,19 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
349 fb_conn = fb_helper->connector_info[i]; 353 fb_conn = fb_helper->connector_info[i];
350 connector = fb_conn->connector; 354 connector = fb_conn->connector;
351 355
356 if (conn_configured & (1 << i))
357 continue;
358
359 if (pass == 0 && !connector->has_tile)
360 continue;
361
352 if (connector->status == connector_status_connected) 362 if (connector->status == connector_status_connected)
353 num_connectors_detected++; 363 num_connectors_detected++;
354 364
355 if (!enabled[i]) { 365 if (!enabled[i]) {
356 DRM_DEBUG_KMS("connector %s not enabled, skipping\n", 366 DRM_DEBUG_KMS("connector %s not enabled, skipping\n",
357 connector->name); 367 connector->name);
368 conn_configured |= (1 << i);
358 continue; 369 continue;
359 } 370 }
360 371
@@ -373,6 +384,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
373 DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n", 384 DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n",
374 connector->name); 385 connector->name);
375 enabled[i] = false; 386 enabled[i] = false;
387 conn_configured |= (1 << i);
376 continue; 388 continue;
377 } 389 }
378 390
@@ -400,8 +412,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
400 412
401 /* try for preferred next */ 413 /* try for preferred next */
402 if (!modes[i]) { 414 if (!modes[i]) {
403 DRM_DEBUG_KMS("looking for preferred mode on connector %s\n", 415 DRM_DEBUG_KMS("looking for preferred mode on connector %s %d\n",
404 connector->name); 416 connector->name, connector->has_tile);
405 modes[i] = drm_has_preferred_mode(fb_conn, width, 417 modes[i] = drm_has_preferred_mode(fb_conn, width,
406 height); 418 height);
407 } 419 }
@@ -444,6 +456,12 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
444 modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :""); 456 modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :"");
445 457
446 fallback = false; 458 fallback = false;
459 conn_configured |= (1 << i);
460 }
461
462 if ((conn_configured & mask) != mask) {
463 pass++;
464 goto retry;
447 } 465 }
448 466
449 /* 467 /*
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index c03d457a5150..14654d628ca4 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -899,6 +899,17 @@ void intel_lvds_init(struct drm_device *dev)
899 int pipe; 899 int pipe;
900 u8 pin; 900 u8 pin;
901 901
902 /*
903 * Unlock registers and just leave them unlocked. Do this before
904 * checking quirk lists to avoid bogus WARNINGs.
905 */
906 if (HAS_PCH_SPLIT(dev)) {
907 I915_WRITE(PCH_PP_CONTROL,
908 I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
909 } else {
910 I915_WRITE(PP_CONTROL,
911 I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
912 }
902 if (!intel_lvds_supported(dev)) 913 if (!intel_lvds_supported(dev))
903 return; 914 return;
904 915
@@ -1097,17 +1108,6 @@ out:
1097 lvds_encoder->a3_power = I915_READ(lvds_encoder->reg) & 1108 lvds_encoder->a3_power = I915_READ(lvds_encoder->reg) &
1098 LVDS_A3_POWER_MASK; 1109 LVDS_A3_POWER_MASK;
1099 1110
1100 /*
1101 * Unlock registers and just
1102 * leave them unlocked
1103 */
1104 if (HAS_PCH_SPLIT(dev)) {
1105 I915_WRITE(PCH_PP_CONTROL,
1106 I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
1107 } else {
1108 I915_WRITE(PP_CONTROL,
1109 I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
1110 }
1111 lvds_connector->lid_notifier.notifier_call = intel_lid_notify; 1111 lvds_connector->lid_notifier.notifier_call = intel_lid_notify;
1112 if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) { 1112 if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) {
1113 DRM_DEBUG_KMS("lid notifier registration failed\n"); 1113 DRM_DEBUG_KMS("lid notifier registration failed\n");
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 091860432f01..3ba446a69ecd 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -5455,6 +5455,20 @@ void intel_cleanup_gt_powersave(struct drm_device *dev)
5455 valleyview_cleanup_gt_powersave(dev); 5455 valleyview_cleanup_gt_powersave(dev);
5456} 5456}
5457 5457
5458static void gen6_suspend_rps(struct drm_device *dev)
5459{
5460 struct drm_i915_private *dev_priv = dev->dev_private;
5461
5462 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
5463
5464 /*
5465 * TODO: disable RPS interrupts on GEN9+ too once RPS support
5466 * is added for it.
5467 */
5468 if (INTEL_INFO(dev)->gen < 9)
5469 gen6_disable_rps_interrupts(dev);
5470}
5471
5458/** 5472/**
5459 * intel_suspend_gt_powersave - suspend PM work and helper threads 5473 * intel_suspend_gt_powersave - suspend PM work and helper threads
5460 * @dev: drm device 5474 * @dev: drm device
@@ -5470,14 +5484,7 @@ void intel_suspend_gt_powersave(struct drm_device *dev)
5470 if (INTEL_INFO(dev)->gen < 6) 5484 if (INTEL_INFO(dev)->gen < 6)
5471 return; 5485 return;
5472 5486
5473 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 5487 gen6_suspend_rps(dev);
5474
5475 /*
5476 * TODO: disable RPS interrupts on GEN9+ too once RPS support
5477 * is added for it.
5478 */
5479 if (INTEL_INFO(dev)->gen < 9)
5480 gen6_disable_rps_interrupts(dev);
5481 5488
5482 /* Force GPU to min freq during suspend */ 5489 /* Force GPU to min freq during suspend */
5483 gen6_rps_idle(dev_priv); 5490 gen6_rps_idle(dev_priv);
@@ -5580,8 +5587,11 @@ void intel_reset_gt_powersave(struct drm_device *dev)
5580{ 5587{
5581 struct drm_i915_private *dev_priv = dev->dev_private; 5588 struct drm_i915_private *dev_priv = dev->dev_private;
5582 5589
5590 if (INTEL_INFO(dev)->gen < 6)
5591 return;
5592
5593 gen6_suspend_rps(dev);
5583 dev_priv->rps.enabled = false; 5594 dev_priv->rps.enabled = false;
5584 intel_enable_gt_powersave(dev);
5585} 5595}
5586 5596
5587static void ibx_init_clock_gating(struct drm_device *dev) 5597static void ibx_init_clock_gating(struct drm_device *dev)
@@ -5772,7 +5782,7 @@ static void gen6_init_clock_gating(struct drm_device *dev)
5772 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 5782 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
5773 */ 5783 */
5774 I915_WRITE(GEN6_GT_MODE, 5784 I915_WRITE(GEN6_GT_MODE,
5775 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4); 5785 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
5776 5786
5777 ilk_init_lp_watermarks(dev); 5787 ilk_init_lp_watermarks(dev);
5778 5788
@@ -5970,7 +5980,7 @@ static void haswell_init_clock_gating(struct drm_device *dev)
5970 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 5980 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
5971 */ 5981 */
5972 I915_WRITE(GEN7_GT_MODE, 5982 I915_WRITE(GEN7_GT_MODE,
5973 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4); 5983 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
5974 5984
5975 /* WaSampleCChickenBitEnable:hsw */ 5985 /* WaSampleCChickenBitEnable:hsw */
5976 I915_WRITE(HALF_SLICE_CHICKEN3, 5986 I915_WRITE(HALF_SLICE_CHICKEN3,
@@ -6071,7 +6081,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
6071 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 6081 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6072 */ 6082 */
6073 I915_WRITE(GEN7_GT_MODE, 6083 I915_WRITE(GEN7_GT_MODE,
6074 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4); 6084 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
6075 6085
6076 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 6086 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
6077 snpcr &= ~GEN6_MBC_SNPCR_MASK; 6087 snpcr &= ~GEN6_MBC_SNPCR_MASK;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 3cad32a80108..12a36f0ca53d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -373,12 +373,15 @@ gen7_render_ring_flush(struct intel_engine_cs *ring,
373 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 373 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
374 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 374 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
375 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 375 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
376 flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
376 /* 377 /*
377 * TLB invalidate requires a post-sync write. 378 * TLB invalidate requires a post-sync write.
378 */ 379 */
379 flags |= PIPE_CONTROL_QW_WRITE; 380 flags |= PIPE_CONTROL_QW_WRITE;
380 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; 381 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
381 382
383 flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
384
382 /* Workaround: we must issue a pipe_control with CS-stall bit 385 /* Workaround: we must issue a pipe_control with CS-stall bit
383 * set before a pipe_control command that has the state cache 386 * set before a pipe_control command that has the state cache
384 * invalidate bit set. */ 387 * invalidate bit set. */
@@ -727,7 +730,7 @@ static int intel_rcs_ctx_init(struct intel_engine_cs *ring,
727} 730}
728 731
729static int wa_add(struct drm_i915_private *dev_priv, 732static int wa_add(struct drm_i915_private *dev_priv,
730 const u32 addr, const u32 val, const u32 mask) 733 const u32 addr, const u32 mask, const u32 val)
731{ 734{
732 const u32 idx = dev_priv->workarounds.count; 735 const u32 idx = dev_priv->workarounds.count;
733 736
@@ -743,22 +746,25 @@ static int wa_add(struct drm_i915_private *dev_priv,
743 return 0; 746 return 0;
744} 747}
745 748
746#define WA_REG(addr, val, mask) { \ 749#define WA_REG(addr, mask, val) { \
747 const int r = wa_add(dev_priv, (addr), (val), (mask)); \ 750 const int r = wa_add(dev_priv, (addr), (mask), (val)); \
748 if (r) \ 751 if (r) \
749 return r; \ 752 return r; \
750 } 753 }
751 754
752#define WA_SET_BIT_MASKED(addr, mask) \ 755#define WA_SET_BIT_MASKED(addr, mask) \
753 WA_REG(addr, _MASKED_BIT_ENABLE(mask), (mask) & 0xffff) 756 WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
754 757
755#define WA_CLR_BIT_MASKED(addr, mask) \ 758#define WA_CLR_BIT_MASKED(addr, mask) \
756 WA_REG(addr, _MASKED_BIT_DISABLE(mask), (mask) & 0xffff) 759 WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
760
761#define WA_SET_FIELD_MASKED(addr, mask, value) \
762 WA_REG(addr, mask, _MASKED_FIELD(mask, value))
757 763
758#define WA_SET_BIT(addr, mask) WA_REG(addr, I915_READ(addr) | (mask), mask) 764#define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask))
759#define WA_CLR_BIT(addr, mask) WA_REG(addr, I915_READ(addr) & ~(mask), mask) 765#define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask))
760 766
761#define WA_WRITE(addr, val) WA_REG(addr, val, 0xffffffff) 767#define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
762 768
763static int bdw_init_workarounds(struct intel_engine_cs *ring) 769static int bdw_init_workarounds(struct intel_engine_cs *ring)
764{ 770{
@@ -802,8 +808,9 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
802 * disable bit, which we don't touch here, but it's good 808 * disable bit, which we don't touch here, but it's good
803 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 809 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
804 */ 810 */
805 WA_SET_BIT_MASKED(GEN7_GT_MODE, 811 WA_SET_FIELD_MASKED(GEN7_GT_MODE,
806 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4); 812 GEN6_WIZ_HASHING_MASK,
813 GEN6_WIZ_HASHING_16x4);
807 814
808 return 0; 815 return 0;
809} 816}
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 6aa3a81df485..8bf7bb4a12bc 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -31,7 +31,6 @@
31 31
32#include "i915_drv.h" 32#include "i915_drv.h"
33#include "intel_drv.h" 33#include "intel_drv.h"
34#include <drm/i915_powerwell.h>
35 34
36/** 35/**
37 * DOC: runtime pm 36 * DOC: runtime pm
@@ -50,8 +49,6 @@
50 * present for a given platform. 49 * present for a given platform.
51 */ 50 */
52 51
53static struct i915_power_domains *hsw_pwr;
54
55#define for_each_power_well(i, power_well, domain_mask, power_domains) \ 52#define for_each_power_well(i, power_well, domain_mask, power_domains) \
56 for (i = 0; \ 53 for (i = 0; \
57 i < (power_domains)->power_well_count && \ 54 i < (power_domains)->power_well_count && \
@@ -615,29 +612,6 @@ static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
615 vlv_power_sequencer_reset(dev_priv); 612 vlv_power_sequencer_reset(dev_priv);
616} 613}
617 614
618static void check_power_well_state(struct drm_i915_private *dev_priv,
619 struct i915_power_well *power_well)
620{
621 bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
622
623 if (power_well->always_on || !i915.disable_power_well) {
624 if (!enabled)
625 goto mismatch;
626
627 return;
628 }
629
630 if (enabled != (power_well->count > 0))
631 goto mismatch;
632
633 return;
634
635mismatch:
636 I915_STATE_WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
637 power_well->name, power_well->always_on, enabled,
638 power_well->count, i915.disable_power_well);
639}
640
641/** 615/**
642 * intel_display_power_get - grab a power domain reference 616 * intel_display_power_get - grab a power domain reference
643 * @dev_priv: i915 device instance 617 * @dev_priv: i915 device instance
@@ -669,8 +643,6 @@ void intel_display_power_get(struct drm_i915_private *dev_priv,
669 power_well->ops->enable(dev_priv, power_well); 643 power_well->ops->enable(dev_priv, power_well);
670 power_well->hw_enabled = true; 644 power_well->hw_enabled = true;
671 } 645 }
672
673 check_power_well_state(dev_priv, power_well);
674 } 646 }
675 647
676 power_domains->domain_use_count[domain]++; 648 power_domains->domain_use_count[domain]++;
@@ -709,8 +681,6 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
709 power_well->hw_enabled = false; 681 power_well->hw_enabled = false;
710 power_well->ops->disable(dev_priv, power_well); 682 power_well->ops->disable(dev_priv, power_well);
711 } 683 }
712
713 check_power_well_state(dev_priv, power_well);
714 } 684 }
715 685
716 mutex_unlock(&power_domains->lock); 686 mutex_unlock(&power_domains->lock);
@@ -1098,10 +1068,8 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
1098 */ 1068 */
1099 if (IS_HASWELL(dev_priv->dev)) { 1069 if (IS_HASWELL(dev_priv->dev)) {
1100 set_power_wells(power_domains, hsw_power_wells); 1070 set_power_wells(power_domains, hsw_power_wells);
1101 hsw_pwr = power_domains;
1102 } else if (IS_BROADWELL(dev_priv->dev)) { 1071 } else if (IS_BROADWELL(dev_priv->dev)) {
1103 set_power_wells(power_domains, bdw_power_wells); 1072 set_power_wells(power_domains, bdw_power_wells);
1104 hsw_pwr = power_domains;
1105 } else if (IS_CHERRYVIEW(dev_priv->dev)) { 1073 } else if (IS_CHERRYVIEW(dev_priv->dev)) {
1106 set_power_wells(power_domains, chv_power_wells); 1074 set_power_wells(power_domains, chv_power_wells);
1107 } else if (IS_VALLEYVIEW(dev_priv->dev)) { 1075 } else if (IS_VALLEYVIEW(dev_priv->dev)) {
@@ -1145,8 +1113,6 @@ void intel_power_domains_fini(struct drm_i915_private *dev_priv)
1145 * the power well is not enabled, so just enable it in case 1113 * the power well is not enabled, so just enable it in case
1146 * we're going to unload/reload. */ 1114 * we're going to unload/reload. */
1147 intel_display_set_init_power(dev_priv, true); 1115 intel_display_set_init_power(dev_priv, true);
1148
1149 hsw_pwr = NULL;
1150} 1116}
1151 1117
1152static void intel_power_domains_resume(struct drm_i915_private *dev_priv) 1118static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
@@ -1355,52 +1321,3 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
1355 pm_runtime_put_autosuspend(device); 1321 pm_runtime_put_autosuspend(device);
1356} 1322}
1357 1323
1358/* Display audio driver power well request */
1359int i915_request_power_well(void)
1360{
1361 struct drm_i915_private *dev_priv;
1362
1363 if (!hsw_pwr)
1364 return -ENODEV;
1365
1366 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
1367 power_domains);
1368 intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
1369 return 0;
1370}
1371EXPORT_SYMBOL_GPL(i915_request_power_well);
1372
1373/* Display audio driver power well release */
1374int i915_release_power_well(void)
1375{
1376 struct drm_i915_private *dev_priv;
1377
1378 if (!hsw_pwr)
1379 return -ENODEV;
1380
1381 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
1382 power_domains);
1383 intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
1384 return 0;
1385}
1386EXPORT_SYMBOL_GPL(i915_release_power_well);
1387
1388/*
1389 * Private interface for the audio driver to get CDCLK in kHz.
1390 *
1391 * Caller must request power well using i915_request_power_well() prior to
1392 * making the call.
1393 */
1394int i915_get_cdclk_freq(void)
1395{
1396 struct drm_i915_private *dev_priv;
1397
1398 if (!hsw_pwr)
1399 return -ENODEV;
1400
1401 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
1402 power_domains);
1403
1404 return intel_ddi_get_cdclk_freq(dev_priv);
1405}
1406EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);