aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2015-03-09 05:41:15 -0400
committerDave Airlie <airlied@redhat.com>2015-03-09 05:41:15 -0400
commit8dd0eb3566711d81bfbe2b4421b33f0dd723cec4 (patch)
treee5567779a5b874d895761d37774a43100c2e77a9 /drivers/gpu/drm
parentd136dfeec84bfe3e4238bacd23f21e161268deac (diff)
parentf89fe1ffe698a6bb7671ebf99f5bb918fda4cf35 (diff)
Merge tag 'drm-intel-next-2015-02-27' of git://anongit.freedesktop.org/drm-intel into drm-next
- Y tiling support for scanout from Tvrtko&Damien - Remove more UMS support - some small prep patches for OLR removal from John Harrison - first few patches for dynamic pagetable allocation from Ben Widawsky, rebased by tons of other people - DRRS support patches (Sonika&Vandana) - fbc patches from Paulo - make sure our vblank callbacks aren't called when the pipes are off - various patches all over * tag 'drm-intel-next-2015-02-27' of git://anongit.freedesktop.org/drm-intel: (61 commits) drm/i915: Update DRIVER_DATE to 20150227 drm/i915: Clarify obj->map_and_fenceable drm/i915/skl: Allow Y (and Yf) frame buffer creation drm/i915/skl: Update watermarks for Y tiling drm/i915/skl: Updated watermark programming drm/i915/skl: Adjust get_plane_config() to support Yb/Yf tiling drm/i915/skl: Teach pin_and_fence_fb_obj() about Y tiling constraints drm/i915/skl: Adjust intel_fb_align_height() for Yb/Yf tiling drm/i915/skl: Allow scanning out Y and Yf fbs drm/i915/skl: Add new displayable tiling formats drm/i915: Remove DRIVER_MODESET checks from modeset code drm/i915: Remove regfile code&data for UMS suspend/resume drm/i915: Remove DRIVER_MODESET checks from gem code drm/i915: Remove DRIVER_MODESET checks in the gpu reset code drm/i915: Remove DRIVER_MODESET checks from suspend/resume code drm/i915: Remove DRIVER_MODESET checks in load/unload/close code drm/i915: fix a printk format drm/i915: Add media rc6 residency file to sysfs drm/i915: Add missing description to parameter in alloc_pt_range drm/i915: Removed the read of RP_STATE_CAP from sysfs/debugfs functions ...
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/drm_irq.c56
-rw-r--r--drivers/gpu/drm/i915/Makefile3
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c74
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c121
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c179
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c162
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h152
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c30
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c12
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c97
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c362
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h33
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c90
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h62
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c215
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c64
-rw-r--r--drivers/gpu/drm/i915/i915_ums.c552
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c7
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h1
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c46
-rw-r--r--drivers/gpu/drm/i915/intel_display.c345
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c135
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h25
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c57
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c115
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h4
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c2
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c6
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c2
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c119
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c89
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h11
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c34
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c19
35 files changed, 1577 insertions, 1706 deletions
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index c9f5453f20e7..c8a34476570a 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -276,7 +276,6 @@ static void vblank_disable_fn(unsigned long arg)
276void drm_vblank_cleanup(struct drm_device *dev) 276void drm_vblank_cleanup(struct drm_device *dev)
277{ 277{
278 int crtc; 278 int crtc;
279 unsigned long irqflags;
280 279
281 /* Bail if the driver didn't call drm_vblank_init() */ 280 /* Bail if the driver didn't call drm_vblank_init() */
282 if (dev->num_crtcs == 0) 281 if (dev->num_crtcs == 0)
@@ -285,11 +284,10 @@ void drm_vblank_cleanup(struct drm_device *dev)
285 for (crtc = 0; crtc < dev->num_crtcs; crtc++) { 284 for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
286 struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; 285 struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
287 286
288 del_timer_sync(&vblank->disable_timer); 287 WARN_ON(vblank->enabled &&
288 drm_core_check_feature(dev, DRIVER_MODESET));
289 289
290 spin_lock_irqsave(&dev->vbl_lock, irqflags); 290 del_timer_sync(&vblank->disable_timer);
291 vblank_disable_and_save(dev, crtc);
292 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
293 } 291 }
294 292
295 kfree(dev->vblank); 293 kfree(dev->vblank);
@@ -475,17 +473,23 @@ int drm_irq_uninstall(struct drm_device *dev)
475 dev->irq_enabled = false; 473 dev->irq_enabled = false;
476 474
477 /* 475 /*
478 * Wake up any waiters so they don't hang. 476 * Wake up any waiters so they don't hang. This is just to paper over
477 * isssues for UMS drivers which aren't in full control of their
478 * vblank/irq handling. KMS drivers must ensure that vblanks are all
479 * disabled when uninstalling the irq handler.
479 */ 480 */
480 if (dev->num_crtcs) { 481 if (dev->num_crtcs) {
481 spin_lock_irqsave(&dev->vbl_lock, irqflags); 482 spin_lock_irqsave(&dev->vbl_lock, irqflags);
482 for (i = 0; i < dev->num_crtcs; i++) { 483 for (i = 0; i < dev->num_crtcs; i++) {
483 struct drm_vblank_crtc *vblank = &dev->vblank[i]; 484 struct drm_vblank_crtc *vblank = &dev->vblank[i];
484 485
486 if (!vblank->enabled)
487 continue;
488
489 WARN_ON(drm_core_check_feature(dev, DRIVER_MODESET));
490
491 vblank_disable_and_save(dev, i);
485 wake_up(&vblank->queue); 492 wake_up(&vblank->queue);
486 vblank->enabled = false;
487 vblank->last =
488 dev->driver->get_vblank_counter(dev, i);
489 } 493 }
490 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 494 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
491 } 495 }
@@ -1233,6 +1237,38 @@ void drm_crtc_vblank_off(struct drm_crtc *crtc)
1233EXPORT_SYMBOL(drm_crtc_vblank_off); 1237EXPORT_SYMBOL(drm_crtc_vblank_off);
1234 1238
1235/** 1239/**
1240 * drm_crtc_vblank_reset - reset vblank state to off on a CRTC
1241 * @crtc: CRTC in question
1242 *
1243 * Drivers can use this function to reset the vblank state to off at load time.
1244 * Drivers should use this together with the drm_crtc_vblank_off() and
1245 * drm_crtc_vblank_on() functions. The difference compared to
1246 * drm_crtc_vblank_off() is that this function doesn't save the vblank counter
1247 * and hence doesn't need to call any driver hooks.
1248 */
1249void drm_crtc_vblank_reset(struct drm_crtc *drm_crtc)
1250{
1251 struct drm_device *dev = drm_crtc->dev;
1252 unsigned long irqflags;
1253 int crtc = drm_crtc_index(drm_crtc);
1254 struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
1255
1256 spin_lock_irqsave(&dev->vbl_lock, irqflags);
1257 /*
1258 * Prevent subsequent drm_vblank_get() from enabling the vblank
1259 * interrupt by bumping the refcount.
1260 */
1261 if (!vblank->inmodeset) {
1262 atomic_inc(&vblank->refcount);
1263 vblank->inmodeset = 1;
1264 }
1265 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
1266
1267 WARN_ON(!list_empty(&dev->vblank_event_list));
1268}
1269EXPORT_SYMBOL(drm_crtc_vblank_reset);
1270
1271/**
1236 * drm_vblank_on - enable vblank events on a CRTC 1272 * drm_vblank_on - enable vblank events on a CRTC
1237 * @dev: DRM device 1273 * @dev: DRM device
1238 * @crtc: CRTC in question 1274 * @crtc: CRTC in question
@@ -1653,7 +1689,7 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
1653 struct timeval tvblank; 1689 struct timeval tvblank;
1654 unsigned long irqflags; 1690 unsigned long irqflags;
1655 1691
1656 if (!dev->num_crtcs) 1692 if (WARN_ON_ONCE(!dev->num_crtcs))
1657 return false; 1693 return false;
1658 1694
1659 if (WARN_ON(crtc >= dev->num_crtcs)) 1695 if (WARN_ON(crtc >= dev->num_crtcs))
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index f025e7fae253..d3ebaf204408 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -87,8 +87,7 @@ i915-y += dvo_ch7017.o \
87i915-y += i915_vgpu.o 87i915-y += i915_vgpu.o
88 88
89# legacy horrors 89# legacy horrors
90i915-y += i915_dma.o \ 90i915-y += i915_dma.o
91 i915_ums.o
92 91
93obj-$(CONFIG_DRM_I915) += i915.o 92obj-$(CONFIG_DRM_I915) += i915.o
94 93
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 806e812340d0..9a6da3536ae5 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -818,24 +818,26 @@ static bool valid_reg(const u32 *table, int count, u32 addr)
818 return false; 818 return false;
819} 819}
820 820
821static u32 *vmap_batch(struct drm_i915_gem_object *obj) 821static u32 *vmap_batch(struct drm_i915_gem_object *obj,
822 unsigned start, unsigned len)
822{ 823{
823 int i; 824 int i;
824 void *addr = NULL; 825 void *addr = NULL;
825 struct sg_page_iter sg_iter; 826 struct sg_page_iter sg_iter;
827 int first_page = start >> PAGE_SHIFT;
828 int last_page = (len + start + 4095) >> PAGE_SHIFT;
829 int npages = last_page - first_page;
826 struct page **pages; 830 struct page **pages;
827 831
828 pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages)); 832 pages = drm_malloc_ab(npages, sizeof(*pages));
829 if (pages == NULL) { 833 if (pages == NULL) {
830 DRM_DEBUG_DRIVER("Failed to get space for pages\n"); 834 DRM_DEBUG_DRIVER("Failed to get space for pages\n");
831 goto finish; 835 goto finish;
832 } 836 }
833 837
834 i = 0; 838 i = 0;
835 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { 839 for_each_sg_page(obj->pages->sgl, &sg_iter, npages, first_page)
836 pages[i] = sg_page_iter_page(&sg_iter); 840 pages[i++] = sg_page_iter_page(&sg_iter);
837 i++;
838 }
839 841
840 addr = vmap(pages, i, 0, PAGE_KERNEL); 842 addr = vmap(pages, i, 0, PAGE_KERNEL);
841 if (addr == NULL) { 843 if (addr == NULL) {
@@ -855,61 +857,61 @@ static u32 *copy_batch(struct drm_i915_gem_object *dest_obj,
855 u32 batch_start_offset, 857 u32 batch_start_offset,
856 u32 batch_len) 858 u32 batch_len)
857{ 859{
858 int ret = 0;
859 int needs_clflush = 0; 860 int needs_clflush = 0;
860 u32 *src_base, *dest_base = NULL; 861 void *src_base, *src;
861 u32 *src_addr, *dest_addr; 862 void *dst = NULL;
862 u32 offset = batch_start_offset / sizeof(*dest_addr); 863 int ret;
863 u32 end = batch_start_offset + batch_len;
864 864
865 if (end > dest_obj->base.size || end > src_obj->base.size) 865 if (batch_len > dest_obj->base.size ||
866 batch_len + batch_start_offset > src_obj->base.size)
866 return ERR_PTR(-E2BIG); 867 return ERR_PTR(-E2BIG);
867 868
868 ret = i915_gem_obj_prepare_shmem_read(src_obj, &needs_clflush); 869 ret = i915_gem_obj_prepare_shmem_read(src_obj, &needs_clflush);
869 if (ret) { 870 if (ret) {
870 DRM_DEBUG_DRIVER("CMD: failed to prep read\n"); 871 DRM_DEBUG_DRIVER("CMD: failed to prepare shadow batch\n");
871 return ERR_PTR(ret); 872 return ERR_PTR(ret);
872 } 873 }
873 874
874 src_base = vmap_batch(src_obj); 875 src_base = vmap_batch(src_obj, batch_start_offset, batch_len);
875 if (!src_base) { 876 if (!src_base) {
876 DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n"); 877 DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n");
877 ret = -ENOMEM; 878 ret = -ENOMEM;
878 goto unpin_src; 879 goto unpin_src;
879 } 880 }
880 881
881 src_addr = src_base + offset; 882 ret = i915_gem_object_get_pages(dest_obj);
882 883 if (ret) {
883 if (needs_clflush) 884 DRM_DEBUG_DRIVER("CMD: Failed to get pages for shadow batch\n");
884 drm_clflush_virt_range((char *)src_addr, batch_len); 885 goto unmap_src;
886 }
887 i915_gem_object_pin_pages(dest_obj);
885 888
886 ret = i915_gem_object_set_to_cpu_domain(dest_obj, true); 889 ret = i915_gem_object_set_to_cpu_domain(dest_obj, true);
887 if (ret) { 890 if (ret) {
888 DRM_DEBUG_DRIVER("CMD: Failed to set batch CPU domain\n"); 891 DRM_DEBUG_DRIVER("CMD: Failed to set shadow batch to CPU\n");
889 goto unmap_src; 892 goto unmap_src;
890 } 893 }
891 894
892 dest_base = vmap_batch(dest_obj); 895 dst = vmap_batch(dest_obj, 0, batch_len);
893 if (!dest_base) { 896 if (!dst) {
894 DRM_DEBUG_DRIVER("CMD: Failed to vmap shadow batch\n"); 897 DRM_DEBUG_DRIVER("CMD: Failed to vmap shadow batch\n");
898 i915_gem_object_unpin_pages(dest_obj);
895 ret = -ENOMEM; 899 ret = -ENOMEM;
896 goto unmap_src; 900 goto unmap_src;
897 } 901 }
898 902
899 dest_addr = dest_base + offset; 903 src = src_base + offset_in_page(batch_start_offset);
900 904 if (needs_clflush)
901 if (batch_start_offset != 0) 905 drm_clflush_virt_range(src, batch_len);
902 memset((u8 *)dest_base, 0, batch_start_offset);
903 906
904 memcpy(dest_addr, src_addr, batch_len); 907 memcpy(dst, src, batch_len);
905 memset((u8 *)dest_addr + batch_len, 0, dest_obj->base.size - end);
906 908
907unmap_src: 909unmap_src:
908 vunmap(src_base); 910 vunmap(src_base);
909unpin_src: 911unpin_src:
910 i915_gem_object_unpin_pages(src_obj); 912 i915_gem_object_unpin_pages(src_obj);
911 913
912 return ret ? ERR_PTR(ret) : dest_base; 914 return ret ? ERR_PTR(ret) : dst;
913} 915}
914 916
915/** 917/**
@@ -1046,34 +1048,26 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
1046 u32 batch_len, 1048 u32 batch_len,
1047 bool is_master) 1049 bool is_master)
1048{ 1050{
1049 int ret = 0;
1050 u32 *cmd, *batch_base, *batch_end; 1051 u32 *cmd, *batch_base, *batch_end;
1051 struct drm_i915_cmd_descriptor default_desc = { 0 }; 1052 struct drm_i915_cmd_descriptor default_desc = { 0 };
1052 bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */ 1053 bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */
1053 1054 int ret = 0;
1054 ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 4096, 0);
1055 if (ret) {
1056 DRM_DEBUG_DRIVER("CMD: Failed to pin shadow batch\n");
1057 return -1;
1058 }
1059 1055
1060 batch_base = copy_batch(shadow_batch_obj, batch_obj, 1056 batch_base = copy_batch(shadow_batch_obj, batch_obj,
1061 batch_start_offset, batch_len); 1057 batch_start_offset, batch_len);
1062 if (IS_ERR(batch_base)) { 1058 if (IS_ERR(batch_base)) {
1063 DRM_DEBUG_DRIVER("CMD: Failed to copy batch\n"); 1059 DRM_DEBUG_DRIVER("CMD: Failed to copy batch\n");
1064 i915_gem_object_ggtt_unpin(shadow_batch_obj);
1065 return PTR_ERR(batch_base); 1060 return PTR_ERR(batch_base);
1066 } 1061 }
1067 1062
1068 cmd = batch_base + (batch_start_offset / sizeof(*cmd));
1069
1070 /* 1063 /*
1071 * We use the batch length as size because the shadow object is as 1064 * We use the batch length as size because the shadow object is as
1072 * large or larger and copy_batch() will write MI_NOPs to the extra 1065 * large or larger and copy_batch() will write MI_NOPs to the extra
1073 * space. Parsing should be faster in some cases this way. 1066 * space. Parsing should be faster in some cases this way.
1074 */ 1067 */
1075 batch_end = cmd + (batch_len / sizeof(*batch_end)); 1068 batch_end = batch_base + (batch_len / sizeof(*batch_end));
1076 1069
1070 cmd = batch_base;
1077 while (cmd < batch_end) { 1071 while (cmd < batch_end) {
1078 const struct drm_i915_cmd_descriptor *desc; 1072 const struct drm_i915_cmd_descriptor *desc;
1079 u32 length; 1073 u32 length;
@@ -1132,7 +1126,7 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
1132 } 1126 }
1133 1127
1134 vunmap(batch_base); 1128 vunmap(batch_base);
1135 i915_gem_object_ggtt_unpin(shadow_batch_obj); 1129 i915_gem_object_unpin_pages(shadow_batch_obj);
1136 1130
1137 return ret; 1131 return ret;
1138} 1132}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 164fa8286fb9..94b3984dbea0 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -139,10 +139,11 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
139 obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 139 obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
140 if (obj->base.name) 140 if (obj->base.name)
141 seq_printf(m, " (name: %d)", obj->base.name); 141 seq_printf(m, " (name: %d)", obj->base.name);
142 list_for_each_entry(vma, &obj->vma_list, vma_link) 142 list_for_each_entry(vma, &obj->vma_list, vma_link) {
143 if (vma->pin_count > 0) 143 if (vma->pin_count > 0)
144 pin_count++; 144 pin_count++;
145 seq_printf(m, " (pinned x %d)", pin_count); 145 }
146 seq_printf(m, " (pinned x %d)", pin_count);
146 if (obj->pin_display) 147 if (obj->pin_display)
147 seq_printf(m, " (display)"); 148 seq_printf(m, " (display)");
148 if (obj->fence_reg != I915_FENCE_REG_NONE) 149 if (obj->fence_reg != I915_FENCE_REG_NONE)
@@ -580,7 +581,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
580 seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n", 581 seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
581 work->flip_queued_vblank, 582 work->flip_queued_vblank,
582 work->flip_ready_vblank, 583 work->flip_ready_vblank,
583 drm_vblank_count(dev, crtc->pipe)); 584 drm_crtc_vblank_count(&crtc->base));
584 if (work->enable_stall_check) 585 if (work->enable_stall_check)
585 seq_puts(m, "Stall check enabled, "); 586 seq_puts(m, "Stall check enabled, ");
586 else 587 else
@@ -2185,7 +2186,7 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
2185 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 2186 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2186 2187
2187 seq_puts(m, "aliasing PPGTT:\n"); 2188 seq_puts(m, "aliasing PPGTT:\n");
2188 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset); 2189 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.pd_offset);
2189 2190
2190 ppgtt->debug_dump(ppgtt, m); 2191 ppgtt->debug_dump(ppgtt, m);
2191 } 2192 }
@@ -4191,7 +4192,7 @@ i915_max_freq_set(void *data, u64 val)
4191{ 4192{
4192 struct drm_device *dev = data; 4193 struct drm_device *dev = data;
4193 struct drm_i915_private *dev_priv = dev->dev_private; 4194 struct drm_i915_private *dev_priv = dev->dev_private;
4194 u32 rp_state_cap, hw_max, hw_min; 4195 u32 hw_max, hw_min;
4195 int ret; 4196 int ret;
4196 4197
4197 if (INTEL_INFO(dev)->gen < 6) 4198 if (INTEL_INFO(dev)->gen < 6)
@@ -4208,18 +4209,10 @@ i915_max_freq_set(void *data, u64 val)
4208 /* 4209 /*
4209 * Turbo will still be enabled, but won't go above the set value. 4210 * Turbo will still be enabled, but won't go above the set value.
4210 */ 4211 */
4211 if (IS_VALLEYVIEW(dev)) { 4212 val = intel_freq_opcode(dev_priv, val);
4212 val = intel_freq_opcode(dev_priv, val);
4213 4213
4214 hw_max = dev_priv->rps.max_freq; 4214 hw_max = dev_priv->rps.max_freq;
4215 hw_min = dev_priv->rps.min_freq; 4215 hw_min = dev_priv->rps.min_freq;
4216 } else {
4217 val = intel_freq_opcode(dev_priv, val);
4218
4219 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
4220 hw_max = dev_priv->rps.max_freq;
4221 hw_min = (rp_state_cap >> 16) & 0xff;
4222 }
4223 4216
4224 if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) { 4217 if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) {
4225 mutex_unlock(&dev_priv->rps.hw_lock); 4218 mutex_unlock(&dev_priv->rps.hw_lock);
@@ -4266,7 +4259,7 @@ i915_min_freq_set(void *data, u64 val)
4266{ 4259{
4267 struct drm_device *dev = data; 4260 struct drm_device *dev = data;
4268 struct drm_i915_private *dev_priv = dev->dev_private; 4261 struct drm_i915_private *dev_priv = dev->dev_private;
4269 u32 rp_state_cap, hw_max, hw_min; 4262 u32 hw_max, hw_min;
4270 int ret; 4263 int ret;
4271 4264
4272 if (INTEL_INFO(dev)->gen < 6) 4265 if (INTEL_INFO(dev)->gen < 6)
@@ -4283,18 +4276,10 @@ i915_min_freq_set(void *data, u64 val)
4283 /* 4276 /*
4284 * Turbo will still be enabled, but won't go below the set value. 4277 * Turbo will still be enabled, but won't go below the set value.
4285 */ 4278 */
4286 if (IS_VALLEYVIEW(dev)) { 4279 val = intel_freq_opcode(dev_priv, val);
4287 val = intel_freq_opcode(dev_priv, val);
4288 4280
4289 hw_max = dev_priv->rps.max_freq; 4281 hw_max = dev_priv->rps.max_freq;
4290 hw_min = dev_priv->rps.min_freq; 4282 hw_min = dev_priv->rps.min_freq;
4291 } else {
4292 val = intel_freq_opcode(dev_priv, val);
4293
4294 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
4295 hw_max = dev_priv->rps.max_freq;
4296 hw_min = (rp_state_cap >> 16) & 0xff;
4297 }
4298 4283
4299 if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) { 4284 if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
4300 mutex_unlock(&dev_priv->rps.hw_lock); 4285 mutex_unlock(&dev_priv->rps.hw_lock);
@@ -4370,6 +4355,85 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
4370 i915_cache_sharing_get, i915_cache_sharing_set, 4355 i915_cache_sharing_get, i915_cache_sharing_set,
4371 "%llu\n"); 4356 "%llu\n");
4372 4357
4358static int i915_sseu_status(struct seq_file *m, void *unused)
4359{
4360 struct drm_info_node *node = (struct drm_info_node *) m->private;
4361 struct drm_device *dev = node->minor->dev;
4362 struct drm_i915_private *dev_priv = dev->dev_private;
4363 unsigned int s_tot = 0, ss_tot = 0, ss_per = 0, eu_tot = 0, eu_per = 0;
4364
4365 if (INTEL_INFO(dev)->gen < 9)
4366 return -ENODEV;
4367
4368 seq_puts(m, "SSEU Device Info\n");
4369 seq_printf(m, " Available Slice Total: %u\n",
4370 INTEL_INFO(dev)->slice_total);
4371 seq_printf(m, " Available Subslice Total: %u\n",
4372 INTEL_INFO(dev)->subslice_total);
4373 seq_printf(m, " Available Subslice Per Slice: %u\n",
4374 INTEL_INFO(dev)->subslice_per_slice);
4375 seq_printf(m, " Available EU Total: %u\n",
4376 INTEL_INFO(dev)->eu_total);
4377 seq_printf(m, " Available EU Per Subslice: %u\n",
4378 INTEL_INFO(dev)->eu_per_subslice);
4379 seq_printf(m, " Has Slice Power Gating: %s\n",
4380 yesno(INTEL_INFO(dev)->has_slice_pg));
4381 seq_printf(m, " Has Subslice Power Gating: %s\n",
4382 yesno(INTEL_INFO(dev)->has_subslice_pg));
4383 seq_printf(m, " Has EU Power Gating: %s\n",
4384 yesno(INTEL_INFO(dev)->has_eu_pg));
4385
4386 seq_puts(m, "SSEU Device Status\n");
4387 if (IS_SKYLAKE(dev)) {
4388 const int s_max = 3, ss_max = 4;
4389 int s, ss;
4390 u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2];
4391
4392 s_reg[0] = I915_READ(GEN9_SLICE0_PGCTL_ACK);
4393 s_reg[1] = I915_READ(GEN9_SLICE1_PGCTL_ACK);
4394 s_reg[2] = I915_READ(GEN9_SLICE2_PGCTL_ACK);
4395 eu_reg[0] = I915_READ(GEN9_SLICE0_SS01_EU_PGCTL_ACK);
4396 eu_reg[1] = I915_READ(GEN9_SLICE0_SS23_EU_PGCTL_ACK);
4397 eu_reg[2] = I915_READ(GEN9_SLICE1_SS01_EU_PGCTL_ACK);
4398 eu_reg[3] = I915_READ(GEN9_SLICE1_SS23_EU_PGCTL_ACK);
4399 eu_reg[4] = I915_READ(GEN9_SLICE2_SS01_EU_PGCTL_ACK);
4400 eu_reg[5] = I915_READ(GEN9_SLICE2_SS23_EU_PGCTL_ACK);
4401 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4402 GEN9_PGCTL_SSA_EU19_ACK |
4403 GEN9_PGCTL_SSA_EU210_ACK |
4404 GEN9_PGCTL_SSA_EU311_ACK;
4405 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4406 GEN9_PGCTL_SSB_EU19_ACK |
4407 GEN9_PGCTL_SSB_EU210_ACK |
4408 GEN9_PGCTL_SSB_EU311_ACK;
4409
4410 for (s = 0; s < s_max; s++) {
4411 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4412 /* skip disabled slice */
4413 continue;
4414
4415 s_tot++;
4416 ss_per = INTEL_INFO(dev)->subslice_per_slice;
4417 ss_tot += ss_per;
4418 for (ss = 0; ss < ss_max; ss++) {
4419 unsigned int eu_cnt;
4420
4421 eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4422 eu_mask[ss%2]);
4423 eu_tot += eu_cnt;
4424 eu_per = max(eu_per, eu_cnt);
4425 }
4426 }
4427 }
4428 seq_printf(m, " Enabled Slice Total: %u\n", s_tot);
4429 seq_printf(m, " Enabled Subslice Total: %u\n", ss_tot);
4430 seq_printf(m, " Enabled Subslice Per Slice: %u\n", ss_per);
4431 seq_printf(m, " Enabled EU Total: %u\n", eu_tot);
4432 seq_printf(m, " Enabled EU Per Subslice: %u\n", eu_per);
4433
4434 return 0;
4435}
4436
4373static int i915_forcewake_open(struct inode *inode, struct file *file) 4437static int i915_forcewake_open(struct inode *inode, struct file *file)
4374{ 4438{
4375 struct drm_device *dev = inode->i_private; 4439 struct drm_device *dev = inode->i_private;
@@ -4483,6 +4547,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
4483 {"i915_dp_mst_info", i915_dp_mst_info, 0}, 4547 {"i915_dp_mst_info", i915_dp_mst_info, 0},
4484 {"i915_wa_registers", i915_wa_registers, 0}, 4548 {"i915_wa_registers", i915_wa_registers, 0},
4485 {"i915_ddb_info", i915_ddb_info, 0}, 4549 {"i915_ddb_info", i915_ddb_info, 0},
4550 {"i915_sseu_status", i915_sseu_status, 0},
4486}; 4551};
4487#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 4552#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4488 4553
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 5804aa5f9df0..053e1788f578 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -606,6 +606,7 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
606 } 606 }
607 } 607 }
608 608
609 /* Initialize slice/subslice/EU info */
609 if (IS_CHERRYVIEW(dev)) { 610 if (IS_CHERRYVIEW(dev)) {
610 u32 fuse, mask_eu; 611 u32 fuse, mask_eu;
611 612
@@ -615,7 +616,90 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
615 CHV_FGT_EU_DIS_SS1_R0_MASK | 616 CHV_FGT_EU_DIS_SS1_R0_MASK |
616 CHV_FGT_EU_DIS_SS1_R1_MASK); 617 CHV_FGT_EU_DIS_SS1_R1_MASK);
617 info->eu_total = 16 - hweight32(mask_eu); 618 info->eu_total = 16 - hweight32(mask_eu);
619 } else if (IS_SKYLAKE(dev)) {
620 const int s_max = 3, ss_max = 4, eu_max = 8;
621 int s, ss;
622 u32 fuse2, eu_disable[s_max], s_enable, ss_disable;
623
624 fuse2 = I915_READ(GEN8_FUSE2);
625 s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >>
626 GEN8_F2_S_ENA_SHIFT;
627 ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >>
628 GEN9_F2_SS_DIS_SHIFT;
629
630 eu_disable[0] = I915_READ(GEN8_EU_DISABLE0);
631 eu_disable[1] = I915_READ(GEN8_EU_DISABLE1);
632 eu_disable[2] = I915_READ(GEN8_EU_DISABLE2);
633
634 info->slice_total = hweight32(s_enable);
635 /*
636 * The subslice disable field is global, i.e. it applies
637 * to each of the enabled slices.
638 */
639 info->subslice_per_slice = ss_max - hweight32(ss_disable);
640 info->subslice_total = info->slice_total *
641 info->subslice_per_slice;
642
643 /*
644 * Iterate through enabled slices and subslices to
645 * count the total enabled EU.
646 */
647 for (s = 0; s < s_max; s++) {
648 if (!(s_enable & (0x1 << s)))
649 /* skip disabled slice */
650 continue;
651
652 for (ss = 0; ss < ss_max; ss++) {
653 u32 n_disabled;
654
655 if (ss_disable & (0x1 << ss))
656 /* skip disabled subslice */
657 continue;
658
659 n_disabled = hweight8(eu_disable[s] >>
660 (ss * eu_max));
661
662 /*
663 * Record which subslice(s) has(have) 7 EUs. we
664 * can tune the hash used to spread work among
665 * subslices if they are unbalanced.
666 */
667 if (eu_max - n_disabled == 7)
668 info->subslice_7eu[s] |= 1 << ss;
669
670 info->eu_total += eu_max - n_disabled;
671 }
672 }
673
674 /*
675 * SKL is expected to always have a uniform distribution
676 * of EU across subslices with the exception that any one
677 * EU in any one subslice may be fused off for die
678 * recovery.
679 */
680 info->eu_per_subslice = info->subslice_total ?
681 DIV_ROUND_UP(info->eu_total,
682 info->subslice_total) : 0;
683 /*
684 * SKL supports slice power gating on devices with more than
685 * one slice, and supports EU power gating on devices with
686 * more than one EU pair per subslice.
687 */
688 info->has_slice_pg = (info->slice_total > 1) ? 1 : 0;
689 info->has_subslice_pg = 0;
690 info->has_eu_pg = (info->eu_per_subslice > 2) ? 1 : 0;
618 } 691 }
692 DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
693 DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
694 DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
695 DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total);
696 DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice);
697 DRM_DEBUG_DRIVER("has slice power gating: %s\n",
698 info->has_slice_pg ? "y" : "n");
699 DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
700 info->has_subslice_pg ? "y" : "n");
701 DRM_DEBUG_DRIVER("has EU power gating: %s\n",
702 info->has_eu_pg ? "y" : "n");
619} 703}
620 704
621/** 705/**
@@ -638,17 +722,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
638 722
639 info = (struct intel_device_info *) flags; 723 info = (struct intel_device_info *) flags;
640 724
641 /* Refuse to load on gen6+ without kms enabled. */
642 if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) {
643 DRM_INFO("Your hardware requires kernel modesetting (KMS)\n");
644 DRM_INFO("See CONFIG_DRM_I915_KMS, nomodeset, and i915.modeset parameters\n");
645 return -ENODEV;
646 }
647
648 /* UMS needs agp support. */
649 if (!drm_core_check_feature(dev, DRIVER_MODESET) && !dev->agp)
650 return -EINVAL;
651
652 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); 725 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
653 if (dev_priv == NULL) 726 if (dev_priv == NULL)
654 return -ENOMEM; 727 return -ENOMEM;
@@ -718,20 +791,18 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
718 if (ret) 791 if (ret)
719 goto out_regs; 792 goto out_regs;
720 793
721 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 794 /* WARNING: Apparently we must kick fbdev drivers before vgacon,
722 /* WARNING: Apparently we must kick fbdev drivers before vgacon, 795 * otherwise the vga fbdev driver falls over. */
723 * otherwise the vga fbdev driver falls over. */ 796 ret = i915_kick_out_firmware_fb(dev_priv);
724 ret = i915_kick_out_firmware_fb(dev_priv); 797 if (ret) {
725 if (ret) { 798 DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
726 DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); 799 goto out_gtt;
727 goto out_gtt; 800 }
728 }
729 801
730 ret = i915_kick_out_vgacon(dev_priv); 802 ret = i915_kick_out_vgacon(dev_priv);
731 if (ret) { 803 if (ret) {
732 DRM_ERROR("failed to remove conflicting VGA console\n"); 804 DRM_ERROR("failed to remove conflicting VGA console\n");
733 goto out_gtt; 805 goto out_gtt;
734 }
735 } 806 }
736 807
737 pci_set_master(dev->pdev); 808 pci_set_master(dev->pdev);
@@ -835,12 +906,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
835 906
836 intel_power_domains_init(dev_priv); 907 intel_power_domains_init(dev_priv);
837 908
838 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 909 ret = i915_load_modeset_init(dev);
839 ret = i915_load_modeset_init(dev); 910 if (ret < 0) {
840 if (ret < 0) { 911 DRM_ERROR("failed to init modeset\n");
841 DRM_ERROR("failed to init modeset\n"); 912 goto out_power_well;
842 goto out_power_well;
843 }
844 } 913 }
845 914
846 /* 915 /*
@@ -929,28 +998,25 @@ int i915_driver_unload(struct drm_device *dev)
929 998
930 acpi_video_unregister(); 999 acpi_video_unregister();
931 1000
932 if (drm_core_check_feature(dev, DRIVER_MODESET)) 1001 intel_fbdev_fini(dev);
933 intel_fbdev_fini(dev);
934 1002
935 drm_vblank_cleanup(dev); 1003 drm_vblank_cleanup(dev);
936 1004
937 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1005 intel_modeset_cleanup(dev);
938 intel_modeset_cleanup(dev);
939 1006
940 /* 1007 /*
941 * free the memory space allocated for the child device 1008 * free the memory space allocated for the child device
942 * config parsed from VBT 1009 * config parsed from VBT
943 */ 1010 */
944 if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) { 1011 if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
945 kfree(dev_priv->vbt.child_dev); 1012 kfree(dev_priv->vbt.child_dev);
946 dev_priv->vbt.child_dev = NULL; 1013 dev_priv->vbt.child_dev = NULL;
947 dev_priv->vbt.child_dev_num = 0; 1014 dev_priv->vbt.child_dev_num = 0;
948 }
949
950 vga_switcheroo_unregister_client(dev->pdev);
951 vga_client_register(dev->pdev, NULL, NULL, NULL);
952 } 1015 }
953 1016
1017 vga_switcheroo_unregister_client(dev->pdev);
1018 vga_client_register(dev->pdev, NULL, NULL, NULL);
1019
954 /* Free error state after interrupts are fully disabled. */ 1020 /* Free error state after interrupts are fully disabled. */
955 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); 1021 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
956 i915_destroy_error_state(dev); 1022 i915_destroy_error_state(dev);
@@ -960,17 +1026,15 @@ int i915_driver_unload(struct drm_device *dev)
960 1026
961 intel_opregion_fini(dev); 1027 intel_opregion_fini(dev);
962 1028
963 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1029 /* Flush any outstanding unpin_work. */
964 /* Flush any outstanding unpin_work. */ 1030 flush_workqueue(dev_priv->wq);
965 flush_workqueue(dev_priv->wq);
966 1031
967 mutex_lock(&dev->struct_mutex); 1032 mutex_lock(&dev->struct_mutex);
968 i915_gem_cleanup_ringbuffer(dev); 1033 i915_gem_cleanup_ringbuffer(dev);
969 i915_gem_batch_pool_fini(&dev_priv->mm.batch_pool); 1034 i915_gem_batch_pool_fini(&dev_priv->mm.batch_pool);
970 i915_gem_context_fini(dev); 1035 i915_gem_context_fini(dev);
971 mutex_unlock(&dev->struct_mutex); 1036 mutex_unlock(&dev->struct_mutex);
972 i915_gem_cleanup_stolen(dev); 1037 i915_gem_cleanup_stolen(dev);
973 }
974 1038
975 intel_teardown_gmbus(dev); 1039 intel_teardown_gmbus(dev);
976 intel_teardown_mchbar(dev); 1040 intel_teardown_mchbar(dev);
@@ -1031,8 +1095,7 @@ void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
1031 i915_gem_release(dev, file); 1095 i915_gem_release(dev, file);
1032 mutex_unlock(&dev->struct_mutex); 1096 mutex_unlock(&dev->struct_mutex);
1033 1097
1034 if (drm_core_check_feature(dev, DRIVER_MODESET)) 1098 intel_modeset_preclose(dev, file);
1035 intel_modeset_preclose(dev, file);
1036} 1099}
1037 1100
1038void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) 1101void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 4badb23f2c58..875b1b7964c3 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -568,6 +568,7 @@ static int i915_drm_suspend(struct drm_device *dev)
568 struct drm_i915_private *dev_priv = dev->dev_private; 568 struct drm_i915_private *dev_priv = dev->dev_private;
569 struct drm_crtc *crtc; 569 struct drm_crtc *crtc;
570 pci_power_t opregion_target_state; 570 pci_power_t opregion_target_state;
571 int error;
571 572
572 /* ignore lid events during suspend */ 573 /* ignore lid events during suspend */
573 mutex_lock(&dev_priv->modeset_restore_lock); 574 mutex_lock(&dev_priv->modeset_restore_lock);
@@ -582,37 +583,32 @@ static int i915_drm_suspend(struct drm_device *dev)
582 583
583 pci_save_state(dev->pdev); 584 pci_save_state(dev->pdev);
584 585
585 /* If KMS is active, we do the leavevt stuff here */ 586 error = i915_gem_suspend(dev);
586 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 587 if (error) {
587 int error; 588 dev_err(&dev->pdev->dev,
588 589 "GEM idle failed, resume might fail\n");
589 error = i915_gem_suspend(dev); 590 return error;
590 if (error) { 591 }
591 dev_err(&dev->pdev->dev,
592 "GEM idle failed, resume might fail\n");
593 return error;
594 }
595 592
596 intel_suspend_gt_powersave(dev); 593 intel_suspend_gt_powersave(dev);
597 594
598 /* 595 /*
599 * Disable CRTCs directly since we want to preserve sw state 596 * Disable CRTCs directly since we want to preserve sw state
600 * for _thaw. Also, power gate the CRTC power wells. 597 * for _thaw. Also, power gate the CRTC power wells.
601 */ 598 */
602 drm_modeset_lock_all(dev); 599 drm_modeset_lock_all(dev);
603 for_each_crtc(dev, crtc) 600 for_each_crtc(dev, crtc)
604 intel_crtc_control(crtc, false); 601 intel_crtc_control(crtc, false);
605 drm_modeset_unlock_all(dev); 602 drm_modeset_unlock_all(dev);
606 603
607 intel_dp_mst_suspend(dev); 604 intel_dp_mst_suspend(dev);
608 605
609 intel_runtime_pm_disable_interrupts(dev_priv); 606 intel_runtime_pm_disable_interrupts(dev_priv);
610 intel_hpd_cancel_work(dev_priv); 607 intel_hpd_cancel_work(dev_priv);
611 608
612 intel_suspend_encoders(dev_priv); 609 intel_suspend_encoders(dev_priv);
613 610
614 intel_suspend_hw(dev); 611 intel_suspend_hw(dev);
615 }
616 612
617 i915_gem_suspend_gtt_mappings(dev); 613 i915_gem_suspend_gtt_mappings(dev);
618 614
@@ -684,53 +680,48 @@ static int i915_drm_resume(struct drm_device *dev)
684{ 680{
685 struct drm_i915_private *dev_priv = dev->dev_private; 681 struct drm_i915_private *dev_priv = dev->dev_private;
686 682
687 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 683 mutex_lock(&dev->struct_mutex);
688 mutex_lock(&dev->struct_mutex); 684 i915_gem_restore_gtt_mappings(dev);
689 i915_gem_restore_gtt_mappings(dev); 685 mutex_unlock(&dev->struct_mutex);
690 mutex_unlock(&dev->struct_mutex);
691 }
692 686
693 i915_restore_state(dev); 687 i915_restore_state(dev);
694 intel_opregion_setup(dev); 688 intel_opregion_setup(dev);
695 689
696 /* KMS EnterVT equivalent */ 690 intel_init_pch_refclk(dev);
697 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 691 drm_mode_config_reset(dev);
698 intel_init_pch_refclk(dev);
699 drm_mode_config_reset(dev);
700 692
701 mutex_lock(&dev->struct_mutex); 693 mutex_lock(&dev->struct_mutex);
702 if (i915_gem_init_hw(dev)) { 694 if (i915_gem_init_hw(dev)) {
703 DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n"); 695 DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
704 atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter); 696 atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
705 } 697 }
706 mutex_unlock(&dev->struct_mutex); 698 mutex_unlock(&dev->struct_mutex);
707 699
708 /* We need working interrupts for modeset enabling ... */ 700 /* We need working interrupts for modeset enabling ... */
709 intel_runtime_pm_enable_interrupts(dev_priv); 701 intel_runtime_pm_enable_interrupts(dev_priv);
710 702
711 intel_modeset_init_hw(dev); 703 intel_modeset_init_hw(dev);
712 704
713 spin_lock_irq(&dev_priv->irq_lock); 705 spin_lock_irq(&dev_priv->irq_lock);
714 if (dev_priv->display.hpd_irq_setup) 706 if (dev_priv->display.hpd_irq_setup)
715 dev_priv->display.hpd_irq_setup(dev); 707 dev_priv->display.hpd_irq_setup(dev);
716 spin_unlock_irq(&dev_priv->irq_lock); 708 spin_unlock_irq(&dev_priv->irq_lock);
717 709
718 drm_modeset_lock_all(dev); 710 drm_modeset_lock_all(dev);
719 intel_modeset_setup_hw_state(dev, true); 711 intel_modeset_setup_hw_state(dev, true);
720 drm_modeset_unlock_all(dev); 712 drm_modeset_unlock_all(dev);
721 713
722 intel_dp_mst_resume(dev); 714 intel_dp_mst_resume(dev);
723 715
724 /* 716 /*
725 * ... but also need to make sure that hotplug processing 717 * ... but also need to make sure that hotplug processing
726 * doesn't cause havoc. Like in the driver load code we don't 718 * doesn't cause havoc. Like in the driver load code we don't
727 * bother with the tiny race here where we might loose hotplug 719 * bother with the tiny race here where we might loose hotplug
728 * notifications. 720 * notifications.
729 * */ 721 * */
730 intel_hpd_init(dev_priv); 722 intel_hpd_init(dev_priv);
731 /* Config may have changed between suspend and resume */ 723 /* Config may have changed between suspend and resume */
732 drm_helper_hpd_irq_event(dev); 724 drm_helper_hpd_irq_event(dev);
733 }
734 725
735 intel_opregion_init(dev); 726 intel_opregion_init(dev);
736 727
@@ -866,38 +857,35 @@ int i915_reset(struct drm_device *dev)
866 * was running at the time of the reset (i.e. we weren't VT 857 * was running at the time of the reset (i.e. we weren't VT
867 * switched away). 858 * switched away).
868 */ 859 */
869 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
870 /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
871 dev_priv->gpu_error.reload_in_reset = true;
872 860
873 ret = i915_gem_init_hw(dev); 861 /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
862 dev_priv->gpu_error.reload_in_reset = true;
874 863
875 dev_priv->gpu_error.reload_in_reset = false; 864 ret = i915_gem_init_hw(dev);
876 865
877 mutex_unlock(&dev->struct_mutex); 866 dev_priv->gpu_error.reload_in_reset = false;
878 if (ret) {
879 DRM_ERROR("Failed hw init on reset %d\n", ret);
880 return ret;
881 }
882
883 /*
884 * FIXME: This races pretty badly against concurrent holders of
885 * ring interrupts. This is possible since we've started to drop
886 * dev->struct_mutex in select places when waiting for the gpu.
887 */
888 867
889 /* 868 mutex_unlock(&dev->struct_mutex);
890 * rps/rc6 re-init is necessary to restore state lost after the 869 if (ret) {
891 * reset and the re-install of gt irqs. Skip for ironlake per 870 DRM_ERROR("Failed hw init on reset %d\n", ret);
892 * previous concerns that it doesn't respond well to some forms 871 return ret;
893 * of re-init after reset.
894 */
895 if (INTEL_INFO(dev)->gen > 5)
896 intel_enable_gt_powersave(dev);
897 } else {
898 mutex_unlock(&dev->struct_mutex);
899 } 872 }
900 873
874 /*
875 * FIXME: This races pretty badly against concurrent holders of
876 * ring interrupts. This is possible since we've started to drop
877 * dev->struct_mutex in select places when waiting for the gpu.
878 */
879
880 /*
881 * rps/rc6 re-init is necessary to restore state lost after the
882 * reset and the re-install of gt irqs. Skip for ironlake per
883 * previous concerns that it doesn't respond well to some forms
884 * of re-init after reset.
885 */
886 if (INTEL_INFO(dev)->gen > 5)
887 intel_enable_gt_powersave(dev);
888
901 return 0; 889 return 0;
902} 890}
903 891
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 39f7e5676c9b..2903090f25e5 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -56,7 +56,7 @@
56 56
57#define DRIVER_NAME "i915" 57#define DRIVER_NAME "i915"
58#define DRIVER_DESC "Intel Graphics" 58#define DRIVER_DESC "Intel Graphics"
59#define DRIVER_DATE "20150214" 59#define DRIVER_DATE "20150227"
60 60
61#undef WARN_ON 61#undef WARN_ON
62/* Many gcc seem to no see through this and fall over :( */ 62/* Many gcc seem to no see through this and fall over :( */
@@ -693,7 +693,18 @@ struct intel_device_info {
693 int trans_offsets[I915_MAX_TRANSCODERS]; 693 int trans_offsets[I915_MAX_TRANSCODERS];
694 int palette_offsets[I915_MAX_PIPES]; 694 int palette_offsets[I915_MAX_PIPES];
695 int cursor_offsets[I915_MAX_PIPES]; 695 int cursor_offsets[I915_MAX_PIPES];
696 unsigned int eu_total; 696
697 /* Slice/subslice/EU info */
698 u8 slice_total;
699 u8 subslice_total;
700 u8 subslice_per_slice;
701 u8 eu_total;
702 u8 eu_per_subslice;
703 /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
704 u8 subslice_7eu[3];
705 u8 has_slice_pg:1;
706 u8 has_subslice_pg:1;
707 u8 has_eu_pg:1;
697}; 708};
698 709
699#undef DEFINE_FLAG 710#undef DEFINE_FLAG
@@ -889,150 +900,21 @@ struct intel_gmbus {
889}; 900};
890 901
891struct i915_suspend_saved_registers { 902struct i915_suspend_saved_registers {
892 u8 saveLBB;
893 u32 saveDSPACNTR;
894 u32 saveDSPBCNTR;
895 u32 saveDSPARB; 903 u32 saveDSPARB;
896 u32 savePIPEACONF;
897 u32 savePIPEBCONF;
898 u32 savePIPEASRC;
899 u32 savePIPEBSRC;
900 u32 saveFPA0;
901 u32 saveFPA1;
902 u32 saveDPLL_A;
903 u32 saveDPLL_A_MD;
904 u32 saveHTOTAL_A;
905 u32 saveHBLANK_A;
906 u32 saveHSYNC_A;
907 u32 saveVTOTAL_A;
908 u32 saveVBLANK_A;
909 u32 saveVSYNC_A;
910 u32 saveBCLRPAT_A;
911 u32 saveTRANSACONF;
912 u32 saveTRANS_HTOTAL_A;
913 u32 saveTRANS_HBLANK_A;
914 u32 saveTRANS_HSYNC_A;
915 u32 saveTRANS_VTOTAL_A;
916 u32 saveTRANS_VBLANK_A;
917 u32 saveTRANS_VSYNC_A;
918 u32 savePIPEASTAT;
919 u32 saveDSPASTRIDE;
920 u32 saveDSPASIZE;
921 u32 saveDSPAPOS;
922 u32 saveDSPAADDR;
923 u32 saveDSPASURF;
924 u32 saveDSPATILEOFF;
925 u32 savePFIT_PGM_RATIOS;
926 u32 saveBLC_HIST_CTL;
927 u32 saveBLC_PWM_CTL;
928 u32 saveBLC_PWM_CTL2;
929 u32 saveBLC_CPU_PWM_CTL;
930 u32 saveBLC_CPU_PWM_CTL2;
931 u32 saveFPB0;
932 u32 saveFPB1;
933 u32 saveDPLL_B;
934 u32 saveDPLL_B_MD;
935 u32 saveHTOTAL_B;
936 u32 saveHBLANK_B;
937 u32 saveHSYNC_B;
938 u32 saveVTOTAL_B;
939 u32 saveVBLANK_B;
940 u32 saveVSYNC_B;
941 u32 saveBCLRPAT_B;
942 u32 saveTRANSBCONF;
943 u32 saveTRANS_HTOTAL_B;
944 u32 saveTRANS_HBLANK_B;
945 u32 saveTRANS_HSYNC_B;
946 u32 saveTRANS_VTOTAL_B;
947 u32 saveTRANS_VBLANK_B;
948 u32 saveTRANS_VSYNC_B;
949 u32 savePIPEBSTAT;
950 u32 saveDSPBSTRIDE;
951 u32 saveDSPBSIZE;
952 u32 saveDSPBPOS;
953 u32 saveDSPBADDR;
954 u32 saveDSPBSURF;
955 u32 saveDSPBTILEOFF;
956 u32 saveVGA0;
957 u32 saveVGA1;
958 u32 saveVGA_PD;
959 u32 saveVGACNTRL;
960 u32 saveADPA;
961 u32 saveLVDS; 904 u32 saveLVDS;
962 u32 savePP_ON_DELAYS; 905 u32 savePP_ON_DELAYS;
963 u32 savePP_OFF_DELAYS; 906 u32 savePP_OFF_DELAYS;
964 u32 saveDVOA;
965 u32 saveDVOB;
966 u32 saveDVOC;
967 u32 savePP_ON; 907 u32 savePP_ON;
968 u32 savePP_OFF; 908 u32 savePP_OFF;
969 u32 savePP_CONTROL; 909 u32 savePP_CONTROL;
970 u32 savePP_DIVISOR; 910 u32 savePP_DIVISOR;
971 u32 savePFIT_CONTROL;
972 u32 save_palette_a[256];
973 u32 save_palette_b[256];
974 u32 saveFBC_CONTROL; 911 u32 saveFBC_CONTROL;
975 u32 saveIER;
976 u32 saveIIR;
977 u32 saveIMR;
978 u32 saveDEIER;
979 u32 saveDEIMR;
980 u32 saveGTIER;
981 u32 saveGTIMR;
982 u32 saveFDI_RXA_IMR;
983 u32 saveFDI_RXB_IMR;
984 u32 saveCACHE_MODE_0; 912 u32 saveCACHE_MODE_0;
985 u32 saveMI_ARB_STATE; 913 u32 saveMI_ARB_STATE;
986 u32 saveSWF0[16]; 914 u32 saveSWF0[16];
987 u32 saveSWF1[16]; 915 u32 saveSWF1[16];
988 u32 saveSWF2[3]; 916 u32 saveSWF2[3];
989 u8 saveMSR;
990 u8 saveSR[8];
991 u8 saveGR[25];
992 u8 saveAR_INDEX;
993 u8 saveAR[21];
994 u8 saveDACMASK;
995 u8 saveCR[37];
996 uint64_t saveFENCE[I915_MAX_NUM_FENCES]; 917 uint64_t saveFENCE[I915_MAX_NUM_FENCES];
997 u32 saveCURACNTR;
998 u32 saveCURAPOS;
999 u32 saveCURABASE;
1000 u32 saveCURBCNTR;
1001 u32 saveCURBPOS;
1002 u32 saveCURBBASE;
1003 u32 saveCURSIZE;
1004 u32 saveDP_B;
1005 u32 saveDP_C;
1006 u32 saveDP_D;
1007 u32 savePIPEA_GMCH_DATA_M;
1008 u32 savePIPEB_GMCH_DATA_M;
1009 u32 savePIPEA_GMCH_DATA_N;
1010 u32 savePIPEB_GMCH_DATA_N;
1011 u32 savePIPEA_DP_LINK_M;
1012 u32 savePIPEB_DP_LINK_M;
1013 u32 savePIPEA_DP_LINK_N;
1014 u32 savePIPEB_DP_LINK_N;
1015 u32 saveFDI_RXA_CTL;
1016 u32 saveFDI_TXA_CTL;
1017 u32 saveFDI_RXB_CTL;
1018 u32 saveFDI_TXB_CTL;
1019 u32 savePFA_CTL_1;
1020 u32 savePFB_CTL_1;
1021 u32 savePFA_WIN_SZ;
1022 u32 savePFB_WIN_SZ;
1023 u32 savePFA_WIN_POS;
1024 u32 savePFB_WIN_POS;
1025 u32 savePCH_DREF_CONTROL;
1026 u32 saveDISP_ARB_CTL;
1027 u32 savePIPEA_DATA_M1;
1028 u32 savePIPEA_DATA_N1;
1029 u32 savePIPEA_LINK_M1;
1030 u32 savePIPEA_LINK_N1;
1031 u32 savePIPEB_DATA_M1;
1032 u32 savePIPEB_DATA_N1;
1033 u32 savePIPEB_LINK_M1;
1034 u32 savePIPEB_LINK_N1;
1035 u32 saveMCHBAR_RENDER_STANDBY;
1036 u32 savePCH_PORT_HOTPLUG; 918 u32 savePCH_PORT_HOTPLUG;
1037 u16 saveGCDGMBUS; 919 u16 saveGCDGMBUS;
1038}; 920};
@@ -1455,6 +1337,7 @@ struct intel_vbt_data {
1455 bool edp_initialized; 1337 bool edp_initialized;
1456 bool edp_support; 1338 bool edp_support;
1457 int edp_bpp; 1339 int edp_bpp;
1340 bool edp_low_vswing;
1458 struct edp_power_seq edp_pps; 1341 struct edp_power_seq edp_pps;
1459 1342
1460 struct { 1343 struct {
@@ -2144,8 +2027,9 @@ struct drm_i915_gem_request {
2144 /** Position in the ringbuffer of the end of the whole request */ 2027 /** Position in the ringbuffer of the end of the whole request */
2145 u32 tail; 2028 u32 tail;
2146 2029
2147 /** Context related to this request */ 2030 /** Context and ring buffer related to this request */
2148 struct intel_context *ctx; 2031 struct intel_context *ctx;
2032 struct intel_ringbuffer *ringbuf;
2149 2033
2150 /** Batch buffer related to this request if any */ 2034 /** Batch buffer related to this request if any */
2151 struct drm_i915_gem_object *batch_obj; 2035 struct drm_i915_gem_object *batch_obj;
@@ -3123,10 +3007,6 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
3123extern int i915_save_state(struct drm_device *dev); 3007extern int i915_save_state(struct drm_device *dev);
3124extern int i915_restore_state(struct drm_device *dev); 3008extern int i915_restore_state(struct drm_device *dev);
3125 3009
3126/* i915_ums.c */
3127void i915_save_display_reg(struct drm_device *dev);
3128void i915_restore_display_reg(struct drm_device *dev);
3129
3130/* i915_sysfs.c */ 3010/* i915_sysfs.c */
3131void i915_setup_sysfs(struct drm_device *dev_priv); 3011void i915_setup_sysfs(struct drm_device *dev_priv);
3132void i915_teardown_sysfs(struct drm_device *dev_priv); 3012void i915_teardown_sysfs(struct drm_device *dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 61134ab0be81..1b2a1eb3aafc 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2763,7 +2763,6 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
2763 2763
2764 while (!list_empty(&ring->request_list)) { 2764 while (!list_empty(&ring->request_list)) {
2765 struct drm_i915_gem_request *request; 2765 struct drm_i915_gem_request *request;
2766 struct intel_ringbuffer *ringbuf;
2767 2766
2768 request = list_first_entry(&ring->request_list, 2767 request = list_first_entry(&ring->request_list,
2769 struct drm_i915_gem_request, 2768 struct drm_i915_gem_request,
@@ -2774,23 +2773,12 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
2774 2773
2775 trace_i915_gem_request_retire(request); 2774 trace_i915_gem_request_retire(request);
2776 2775
2777 /* This is one of the few common intersection points
2778 * between legacy ringbuffer submission and execlists:
2779 * we need to tell them apart in order to find the correct
2780 * ringbuffer to which the request belongs to.
2781 */
2782 if (i915.enable_execlists) {
2783 struct intel_context *ctx = request->ctx;
2784 ringbuf = ctx->engine[ring->id].ringbuf;
2785 } else
2786 ringbuf = ring->buffer;
2787
2788 /* We know the GPU must have read the request to have 2776 /* We know the GPU must have read the request to have
2789 * sent us the seqno + interrupt, so use the position 2777 * sent us the seqno + interrupt, so use the position
2790 * of tail of the request to update the last known position 2778 * of tail of the request to update the last known position
2791 * of the GPU head. 2779 * of the GPU head.
2792 */ 2780 */
2793 ringbuf->last_retired_head = request->postfix; 2781 request->ringbuf->last_retired_head = request->postfix;
2794 2782
2795 i915_gem_free_request(request); 2783 i915_gem_free_request(request);
2796 } 2784 }
@@ -4238,7 +4226,7 @@ i915_gem_object_pin_view(struct drm_i915_gem_object *obj,
4238 fenceable = (vma->node.size == fence_size && 4226 fenceable = (vma->node.size == fence_size &&
4239 (vma->node.start & (fence_alignment - 1)) == 0); 4227 (vma->node.start & (fence_alignment - 1)) == 0);
4240 4228
4241 mappable = (vma->node.start + obj->base.size <= 4229 mappable = (vma->node.start + fence_size <=
4242 dev_priv->gtt.mappable_end); 4230 dev_priv->gtt.mappable_end);
4243 4231
4244 obj->map_and_fenceable = mappable && fenceable; 4232 obj->map_and_fenceable = mappable && fenceable;
@@ -4613,10 +4601,6 @@ i915_gem_suspend(struct drm_device *dev)
4613 4601
4614 i915_gem_retire_requests(dev); 4602 i915_gem_retire_requests(dev);
4615 4603
4616 /* Under UMS, be paranoid and evict. */
4617 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4618 i915_gem_evict_everything(dev);
4619
4620 i915_gem_stop_ringbuffers(dev); 4604 i915_gem_stop_ringbuffers(dev);
4621 mutex_unlock(&dev->struct_mutex); 4605 mutex_unlock(&dev->struct_mutex);
4622 4606
@@ -4973,18 +4957,8 @@ i915_gem_load(struct drm_device *dev)
4973 i915_gem_idle_work_handler); 4957 i915_gem_idle_work_handler);
4974 init_waitqueue_head(&dev_priv->gpu_error.reset_queue); 4958 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4975 4959
4976 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4977 if (!drm_core_check_feature(dev, DRIVER_MODESET) && IS_GEN3(dev)) {
4978 I915_WRITE(MI_ARB_STATE,
4979 _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
4980 }
4981
4982 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL; 4960 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4983 4961
4984 /* Old X drivers will take 0-2 for front, back, depth buffers */
4985 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4986 dev_priv->fence_reg_start = 3;
4987
4988 if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) 4962 if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
4989 dev_priv->num_fence_regs = 32; 4963 dev_priv->num_fence_regs = 32;
4990 else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 4964 else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 8603bf48d3ee..70346b0028f9 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -296,11 +296,15 @@ void i915_gem_context_reset(struct drm_device *dev)
296 struct drm_i915_private *dev_priv = dev->dev_private; 296 struct drm_i915_private *dev_priv = dev->dev_private;
297 int i; 297 int i;
298 298
299 /* In execlists mode we will unreference the context when the execlist 299 if (i915.enable_execlists) {
300 * queue is cleared and the requests destroyed. 300 struct intel_context *ctx;
301 */ 301
302 if (i915.enable_execlists) 302 list_for_each_entry(ctx, &dev_priv->context_list, link) {
303 intel_lr_context_reset(dev, ctx);
304 }
305
303 return; 306 return;
307 }
304 308
305 for (i = 0; i < I915_NUM_RINGS; i++) { 309 for (i = 0; i < I915_NUM_RINGS; i++) {
306 struct intel_engine_cs *ring = &dev_priv->ring[i]; 310 struct intel_engine_cs *ring = &dev_priv->ring[i];
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index b773368fc62c..85a6adaba258 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1076,16 +1076,15 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
1076 struct drm_i915_gem_object *batch_obj, 1076 struct drm_i915_gem_object *batch_obj,
1077 u32 batch_start_offset, 1077 u32 batch_start_offset,
1078 u32 batch_len, 1078 u32 batch_len,
1079 bool is_master, 1079 bool is_master)
1080 u32 *flags)
1081{ 1080{
1082 struct drm_i915_private *dev_priv = to_i915(batch_obj->base.dev); 1081 struct drm_i915_private *dev_priv = to_i915(batch_obj->base.dev);
1083 struct drm_i915_gem_object *shadow_batch_obj; 1082 struct drm_i915_gem_object *shadow_batch_obj;
1084 bool need_reloc = false; 1083 struct i915_vma *vma;
1085 int ret; 1084 int ret;
1086 1085
1087 shadow_batch_obj = i915_gem_batch_pool_get(&dev_priv->mm.batch_pool, 1086 shadow_batch_obj = i915_gem_batch_pool_get(&dev_priv->mm.batch_pool,
1088 batch_obj->base.size); 1087 PAGE_ALIGN(batch_len));
1089 if (IS_ERR(shadow_batch_obj)) 1088 if (IS_ERR(shadow_batch_obj))
1090 return shadow_batch_obj; 1089 return shadow_batch_obj;
1091 1090
@@ -1095,40 +1094,30 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
1095 batch_start_offset, 1094 batch_start_offset,
1096 batch_len, 1095 batch_len,
1097 is_master); 1096 is_master);
1098 if (ret) { 1097 if (ret)
1099 if (ret == -EACCES) 1098 goto err;
1100 return batch_obj;
1101 } else {
1102 struct i915_vma *vma;
1103 1099
1104 memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry)); 1100 ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 0, 0);
1101 if (ret)
1102 goto err;
1105 1103
1106 vma = i915_gem_obj_to_ggtt(shadow_batch_obj); 1104 memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
1107 vma->exec_entry = shadow_exec_entry;
1108 vma->exec_entry->flags = __EXEC_OBJECT_PURGEABLE;
1109 drm_gem_object_reference(&shadow_batch_obj->base);
1110 i915_gem_execbuffer_reserve_vma(vma, ring, &need_reloc);
1111 list_add_tail(&vma->exec_list, &eb->vmas);
1112 1105
1113 shadow_batch_obj->base.pending_read_domains = 1106 vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
1114 batch_obj->base.pending_read_domains; 1107 vma->exec_entry = shadow_exec_entry;
1108 vma->exec_entry->flags = __EXEC_OBJECT_PURGEABLE | __EXEC_OBJECT_HAS_PIN;
1109 drm_gem_object_reference(&shadow_batch_obj->base);
1110 list_add_tail(&vma->exec_list, &eb->vmas);
1115 1111
1116 /* 1112 shadow_batch_obj->base.pending_read_domains = I915_GEM_DOMAIN_COMMAND;
1117 * Set the DISPATCH_SECURE bit to remove the NON_SECURE 1113
1118 * bit from MI_BATCH_BUFFER_START commands issued in the 1114 return shadow_batch_obj;
1119 * dispatch_execbuffer implementations. We specifically
1120 * don't want that set when the command parser is
1121 * enabled.
1122 *
1123 * FIXME: with aliasing ppgtt, buffers that should only
1124 * be in ggtt still end up in the aliasing ppgtt. remove
1125 * this check when that is fixed.
1126 */
1127 if (USES_FULL_PPGTT(dev))
1128 *flags |= I915_DISPATCH_SECURE;
1129 }
1130 1115
1131 return ret ? ERR_PTR(ret) : shadow_batch_obj; 1116err:
1117 if (ret == -EACCES) /* unhandled chained batch */
1118 return batch_obj;
1119 else
1120 return ERR_PTR(ret);
1132} 1121}
1133 1122
1134int 1123int
@@ -1138,7 +1127,7 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
1138 struct drm_i915_gem_execbuffer2 *args, 1127 struct drm_i915_gem_execbuffer2 *args,
1139 struct list_head *vmas, 1128 struct list_head *vmas,
1140 struct drm_i915_gem_object *batch_obj, 1129 struct drm_i915_gem_object *batch_obj,
1141 u64 exec_start, u32 flags) 1130 u64 exec_start, u32 dispatch_flags)
1142{ 1131{
1143 struct drm_clip_rect *cliprects = NULL; 1132 struct drm_clip_rect *cliprects = NULL;
1144 struct drm_i915_private *dev_priv = dev->dev_private; 1133 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1266,19 +1255,19 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
1266 1255
1267 ret = ring->dispatch_execbuffer(ring, 1256 ret = ring->dispatch_execbuffer(ring,
1268 exec_start, exec_len, 1257 exec_start, exec_len,
1269 flags); 1258 dispatch_flags);
1270 if (ret) 1259 if (ret)
1271 goto error; 1260 goto error;
1272 } 1261 }
1273 } else { 1262 } else {
1274 ret = ring->dispatch_execbuffer(ring, 1263 ret = ring->dispatch_execbuffer(ring,
1275 exec_start, exec_len, 1264 exec_start, exec_len,
1276 flags); 1265 dispatch_flags);
1277 if (ret) 1266 if (ret)
1278 return ret; 1267 return ret;
1279 } 1268 }
1280 1269
1281 trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), flags); 1270 trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), dispatch_flags);
1282 1271
1283 i915_gem_execbuffer_move_to_active(vmas, ring); 1272 i915_gem_execbuffer_move_to_active(vmas, ring);
1284 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); 1273 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
@@ -1353,7 +1342,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1353 struct i915_address_space *vm; 1342 struct i915_address_space *vm;
1354 const u32 ctx_id = i915_execbuffer2_get_context_id(*args); 1343 const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
1355 u64 exec_start = args->batch_start_offset; 1344 u64 exec_start = args->batch_start_offset;
1356 u32 flags; 1345 u32 dispatch_flags;
1357 int ret; 1346 int ret;
1358 bool need_relocs; 1347 bool need_relocs;
1359 1348
@@ -1364,15 +1353,15 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1364 if (ret) 1353 if (ret)
1365 return ret; 1354 return ret;
1366 1355
1367 flags = 0; 1356 dispatch_flags = 0;
1368 if (args->flags & I915_EXEC_SECURE) { 1357 if (args->flags & I915_EXEC_SECURE) {
1369 if (!file->is_master || !capable(CAP_SYS_ADMIN)) 1358 if (!file->is_master || !capable(CAP_SYS_ADMIN))
1370 return -EPERM; 1359 return -EPERM;
1371 1360
1372 flags |= I915_DISPATCH_SECURE; 1361 dispatch_flags |= I915_DISPATCH_SECURE;
1373 } 1362 }
1374 if (args->flags & I915_EXEC_IS_PINNED) 1363 if (args->flags & I915_EXEC_IS_PINNED)
1375 flags |= I915_DISPATCH_PINNED; 1364 dispatch_flags |= I915_DISPATCH_PINNED;
1376 1365
1377 if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) { 1366 if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) {
1378 DRM_DEBUG("execbuf with unknown ring: %d\n", 1367 DRM_DEBUG("execbuf with unknown ring: %d\n",
@@ -1494,12 +1483,27 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1494 batch_obj, 1483 batch_obj,
1495 args->batch_start_offset, 1484 args->batch_start_offset,
1496 args->batch_len, 1485 args->batch_len,
1497 file->is_master, 1486 file->is_master);
1498 &flags);
1499 if (IS_ERR(batch_obj)) { 1487 if (IS_ERR(batch_obj)) {
1500 ret = PTR_ERR(batch_obj); 1488 ret = PTR_ERR(batch_obj);
1501 goto err; 1489 goto err;
1502 } 1490 }
1491
1492 /*
1493 * Set the DISPATCH_SECURE bit to remove the NON_SECURE
1494 * bit from MI_BATCH_BUFFER_START commands issued in the
1495 * dispatch_execbuffer implementations. We specifically
1496 * don't want that set when the command parser is
1497 * enabled.
1498 *
1499 * FIXME: with aliasing ppgtt, buffers that should only
1500 * be in ggtt still end up in the aliasing ppgtt. remove
1501 * this check when that is fixed.
1502 */
1503 if (USES_FULL_PPGTT(dev))
1504 dispatch_flags |= I915_DISPATCH_SECURE;
1505
1506 exec_start = 0;
1503 } 1507 }
1504 1508
1505 batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND; 1509 batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
@@ -1507,7 +1511,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1507 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure 1511 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
1508 * batch" bit. Hence we need to pin secure batches into the global gtt. 1512 * batch" bit. Hence we need to pin secure batches into the global gtt.
1509 * hsw should have this fixed, but bdw mucks it up again. */ 1513 * hsw should have this fixed, but bdw mucks it up again. */
1510 if (flags & I915_DISPATCH_SECURE) { 1514 if (dispatch_flags & I915_DISPATCH_SECURE) {
1511 /* 1515 /*
1512 * So on first glance it looks freaky that we pin the batch here 1516 * So on first glance it looks freaky that we pin the batch here
1513 * outside of the reservation loop. But: 1517 * outside of the reservation loop. But:
@@ -1527,7 +1531,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1527 exec_start += i915_gem_obj_offset(batch_obj, vm); 1531 exec_start += i915_gem_obj_offset(batch_obj, vm);
1528 1532
1529 ret = dev_priv->gt.do_execbuf(dev, file, ring, ctx, args, 1533 ret = dev_priv->gt.do_execbuf(dev, file, ring, ctx, args,
1530 &eb->vmas, batch_obj, exec_start, flags); 1534 &eb->vmas, batch_obj, exec_start,
1535 dispatch_flags);
1531 1536
1532 /* 1537 /*
1533 * FIXME: We crucially rely upon the active tracking for the (ppgtt) 1538 * FIXME: We crucially rely upon the active tracking for the (ppgtt)
@@ -1535,7 +1540,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1535 * needs to be adjusted to also track the ggtt batch vma properly as 1540 * needs to be adjusted to also track the ggtt batch vma properly as
1536 * active. 1541 * active.
1537 */ 1542 */
1538 if (flags & I915_DISPATCH_SECURE) 1543 if (dispatch_flags & I915_DISPATCH_SECURE)
1539 i915_gem_object_ggtt_unpin(batch_obj); 1544 i915_gem_object_ggtt_unpin(batch_obj);
1540err: 1545err:
1541 /* the request owns the ref now */ 1546 /* the request owns the ref now */
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index e54b2a0ca921..bd95776c3144 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -142,7 +142,6 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
142 return has_aliasing_ppgtt ? 1 : 0; 142 return has_aliasing_ppgtt ? 1 : 0;
143} 143}
144 144
145
146static void ppgtt_bind_vma(struct i915_vma *vma, 145static void ppgtt_bind_vma(struct i915_vma *vma,
147 enum i915_cache_level cache_level, 146 enum i915_cache_level cache_level,
148 u32 flags); 147 u32 flags);
@@ -279,6 +278,100 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
279 return pte; 278 return pte;
280} 279}
281 280
281static void unmap_and_free_pt(struct i915_page_table_entry *pt, struct drm_device *dev)
282{
283 if (WARN_ON(!pt->page))
284 return;
285 __free_page(pt->page);
286 kfree(pt);
287}
288
289static struct i915_page_table_entry *alloc_pt_single(struct drm_device *dev)
290{
291 struct i915_page_table_entry *pt;
292
293 pt = kzalloc(sizeof(*pt), GFP_KERNEL);
294 if (!pt)
295 return ERR_PTR(-ENOMEM);
296
297 pt->page = alloc_page(GFP_KERNEL | __GFP_ZERO);
298 if (!pt->page) {
299 kfree(pt);
300 return ERR_PTR(-ENOMEM);
301 }
302
303 return pt;
304}
305
306/**
307 * alloc_pt_range() - Allocate a multiple page tables
308 * @pd: The page directory which will have at least @count entries
309 * available to point to the allocated page tables.
310 * @pde: First page directory entry for which we are allocating.
311 * @count: Number of pages to allocate.
312 * @dev: DRM device.
313 *
314 * Allocates multiple page table pages and sets the appropriate entries in the
315 * page table structure within the page directory. Function cleans up after
316 * itself on any failures.
317 *
318 * Return: 0 if allocation succeeded.
319 */
320static int alloc_pt_range(struct i915_page_directory_entry *pd, uint16_t pde, size_t count,
321 struct drm_device *dev)
322{
323 int i, ret;
324
325 /* 512 is the max page tables per page_directory on any platform. */
326 if (WARN_ON(pde + count > GEN6_PPGTT_PD_ENTRIES))
327 return -EINVAL;
328
329 for (i = pde; i < pde + count; i++) {
330 struct i915_page_table_entry *pt = alloc_pt_single(dev);
331
332 if (IS_ERR(pt)) {
333 ret = PTR_ERR(pt);
334 goto err_out;
335 }
336 WARN(pd->page_table[i],
337 "Leaking page directory entry %d (%p)\n",
338 i, pd->page_table[i]);
339 pd->page_table[i] = pt;
340 }
341
342 return 0;
343
344err_out:
345 while (i-- > pde)
346 unmap_and_free_pt(pd->page_table[i], dev);
347 return ret;
348}
349
350static void unmap_and_free_pd(struct i915_page_directory_entry *pd)
351{
352 if (pd->page) {
353 __free_page(pd->page);
354 kfree(pd);
355 }
356}
357
358static struct i915_page_directory_entry *alloc_pd_single(void)
359{
360 struct i915_page_directory_entry *pd;
361
362 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
363 if (!pd)
364 return ERR_PTR(-ENOMEM);
365
366 pd->page = alloc_page(GFP_KERNEL | __GFP_ZERO);
367 if (!pd->page) {
368 kfree(pd);
369 return ERR_PTR(-ENOMEM);
370 }
371
372 return pd;
373}
374
282/* Broadwell Page Directory Pointer Descriptors */ 375/* Broadwell Page Directory Pointer Descriptors */
283static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry, 376static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry,
284 uint64_t val) 377 uint64_t val)
@@ -311,7 +404,7 @@ static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
311 int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE; 404 int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE;
312 405
313 for (i = used_pd - 1; i >= 0; i--) { 406 for (i = used_pd - 1; i >= 0; i--) {
314 dma_addr_t addr = ppgtt->pd_dma_addr[i]; 407 dma_addr_t addr = ppgtt->pdp.page_directory[i]->daddr;
315 ret = gen8_write_pdp(ring, i, addr); 408 ret = gen8_write_pdp(ring, i, addr);
316 if (ret) 409 if (ret)
317 return ret; 410 return ret;
@@ -338,7 +431,24 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
338 I915_CACHE_LLC, use_scratch); 431 I915_CACHE_LLC, use_scratch);
339 432
340 while (num_entries) { 433 while (num_entries) {
341 struct page *page_table = ppgtt->gen8_pt_pages[pdpe][pde]; 434 struct i915_page_directory_entry *pd;
435 struct i915_page_table_entry *pt;
436 struct page *page_table;
437
438 if (WARN_ON(!ppgtt->pdp.page_directory[pdpe]))
439 continue;
440
441 pd = ppgtt->pdp.page_directory[pdpe];
442
443 if (WARN_ON(!pd->page_table[pde]))
444 continue;
445
446 pt = pd->page_table[pde];
447
448 if (WARN_ON(!pt->page))
449 continue;
450
451 page_table = pt->page;
342 452
343 last_pte = pte + num_entries; 453 last_pte = pte + num_entries;
344 if (last_pte > GEN8_PTES_PER_PAGE) 454 if (last_pte > GEN8_PTES_PER_PAGE)
@@ -382,8 +492,13 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
382 if (WARN_ON(pdpe >= GEN8_LEGACY_PDPES)) 492 if (WARN_ON(pdpe >= GEN8_LEGACY_PDPES))
383 break; 493 break;
384 494
385 if (pt_vaddr == NULL) 495 if (pt_vaddr == NULL) {
386 pt_vaddr = kmap_atomic(ppgtt->gen8_pt_pages[pdpe][pde]); 496 struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[pdpe];
497 struct i915_page_table_entry *pt = pd->page_table[pde];
498 struct page *page_table = pt->page;
499
500 pt_vaddr = kmap_atomic(page_table);
501 }
387 502
388 pt_vaddr[pte] = 503 pt_vaddr[pte] =
389 gen8_pte_encode(sg_page_iter_dma_address(&sg_iter), 504 gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
@@ -407,29 +522,33 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
407 } 522 }
408} 523}
409 524
410static void gen8_free_page_tables(struct page **pt_pages) 525static void gen8_free_page_tables(struct i915_page_directory_entry *pd, struct drm_device *dev)
411{ 526{
412 int i; 527 int i;
413 528
414 if (pt_pages == NULL) 529 if (!pd->page)
415 return; 530 return;
416 531
417 for (i = 0; i < GEN8_PDES_PER_PAGE; i++) 532 for (i = 0; i < GEN8_PDES_PER_PAGE; i++) {
418 if (pt_pages[i]) 533 if (WARN_ON(!pd->page_table[i]))
419 __free_pages(pt_pages[i], 0); 534 continue;
535
536 unmap_and_free_pt(pd->page_table[i], dev);
537 pd->page_table[i] = NULL;
538 }
420} 539}
421 540
422static void gen8_ppgtt_free(const struct i915_hw_ppgtt *ppgtt) 541static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
423{ 542{
424 int i; 543 int i;
425 544
426 for (i = 0; i < ppgtt->num_pd_pages; i++) { 545 for (i = 0; i < ppgtt->num_pd_pages; i++) {
427 gen8_free_page_tables(ppgtt->gen8_pt_pages[i]); 546 if (WARN_ON(!ppgtt->pdp.page_directory[i]))
428 kfree(ppgtt->gen8_pt_pages[i]); 547 continue;
429 kfree(ppgtt->gen8_pt_dma_addr[i]);
430 }
431 548
432 __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT)); 549 gen8_free_page_tables(ppgtt->pdp.page_directory[i], ppgtt->base.dev);
550 unmap_and_free_pd(ppgtt->pdp.page_directory[i]);
551 }
433} 552}
434 553
435static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt) 554static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
@@ -440,14 +559,23 @@ static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
440 for (i = 0; i < ppgtt->num_pd_pages; i++) { 559 for (i = 0; i < ppgtt->num_pd_pages; i++) {
441 /* TODO: In the future we'll support sparse mappings, so this 560 /* TODO: In the future we'll support sparse mappings, so this
442 * will have to change. */ 561 * will have to change. */
443 if (!ppgtt->pd_dma_addr[i]) 562 if (!ppgtt->pdp.page_directory[i]->daddr)
444 continue; 563 continue;
445 564
446 pci_unmap_page(hwdev, ppgtt->pd_dma_addr[i], PAGE_SIZE, 565 pci_unmap_page(hwdev, ppgtt->pdp.page_directory[i]->daddr, PAGE_SIZE,
447 PCI_DMA_BIDIRECTIONAL); 566 PCI_DMA_BIDIRECTIONAL);
448 567
449 for (j = 0; j < GEN8_PDES_PER_PAGE; j++) { 568 for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
450 dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j]; 569 struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[i];
570 struct i915_page_table_entry *pt;
571 dma_addr_t addr;
572
573 if (WARN_ON(!pd->page_table[j]))
574 continue;
575
576 pt = pd->page_table[j];
577 addr = pt->daddr;
578
451 if (addr) 579 if (addr)
452 pci_unmap_page(hwdev, addr, PAGE_SIZE, 580 pci_unmap_page(hwdev, addr, PAGE_SIZE,
453 PCI_DMA_BIDIRECTIONAL); 581 PCI_DMA_BIDIRECTIONAL);
@@ -464,86 +592,47 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
464 gen8_ppgtt_free(ppgtt); 592 gen8_ppgtt_free(ppgtt);
465} 593}
466 594
467static struct page **__gen8_alloc_page_tables(void) 595static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt)
468{ 596{
469 struct page **pt_pages;
470 int i;
471
472 pt_pages = kcalloc(GEN8_PDES_PER_PAGE, sizeof(struct page *), GFP_KERNEL);
473 if (!pt_pages)
474 return ERR_PTR(-ENOMEM);
475
476 for (i = 0; i < GEN8_PDES_PER_PAGE; i++) {
477 pt_pages[i] = alloc_page(GFP_KERNEL);
478 if (!pt_pages[i])
479 goto bail;
480 }
481
482 return pt_pages;
483
484bail:
485 gen8_free_page_tables(pt_pages);
486 kfree(pt_pages);
487 return ERR_PTR(-ENOMEM);
488}
489
490static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt,
491 const int max_pdp)
492{
493 struct page **pt_pages[GEN8_LEGACY_PDPES];
494 int i, ret; 597 int i, ret;
495 598
496 for (i = 0; i < max_pdp; i++) { 599 for (i = 0; i < ppgtt->num_pd_pages; i++) {
497 pt_pages[i] = __gen8_alloc_page_tables(); 600 ret = alloc_pt_range(ppgtt->pdp.page_directory[i],
498 if (IS_ERR(pt_pages[i])) { 601 0, GEN8_PDES_PER_PAGE, ppgtt->base.dev);
499 ret = PTR_ERR(pt_pages[i]); 602 if (ret)
500 goto unwind_out; 603 goto unwind_out;
501 }
502 } 604 }
503 605
504 /* NB: Avoid touching gen8_pt_pages until last to keep the allocation,
505 * "atomic" - for cleanup purposes.
506 */
507 for (i = 0; i < max_pdp; i++)
508 ppgtt->gen8_pt_pages[i] = pt_pages[i];
509
510 return 0; 606 return 0;
511 607
512unwind_out: 608unwind_out:
513 while (i--) { 609 while (i--)
514 gen8_free_page_tables(pt_pages[i]); 610 gen8_free_page_tables(ppgtt->pdp.page_directory[i], ppgtt->base.dev);
515 kfree(pt_pages[i]);
516 }
517 611
518 return ret; 612 return -ENOMEM;
519} 613}
520 614
521static int gen8_ppgtt_allocate_dma(struct i915_hw_ppgtt *ppgtt) 615static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt,
616 const int max_pdp)
522{ 617{
523 int i; 618 int i;
524 619
525 for (i = 0; i < ppgtt->num_pd_pages; i++) { 620 for (i = 0; i < max_pdp; i++) {
526 ppgtt->gen8_pt_dma_addr[i] = kcalloc(GEN8_PDES_PER_PAGE, 621 ppgtt->pdp.page_directory[i] = alloc_pd_single();
527 sizeof(dma_addr_t), 622 if (IS_ERR(ppgtt->pdp.page_directory[i]))
528 GFP_KERNEL); 623 goto unwind_out;
529 if (!ppgtt->gen8_pt_dma_addr[i])
530 return -ENOMEM;
531 } 624 }
532 625
533 return 0; 626 ppgtt->num_pd_pages = max_pdp;
534}
535
536static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt,
537 const int max_pdp)
538{
539 ppgtt->pd_pages = alloc_pages(GFP_KERNEL, get_order(max_pdp << PAGE_SHIFT));
540 if (!ppgtt->pd_pages)
541 return -ENOMEM;
542
543 ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT);
544 BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPES); 627 BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPES);
545 628
546 return 0; 629 return 0;
630
631unwind_out:
632 while (i--)
633 unmap_and_free_pd(ppgtt->pdp.page_directory[i]);
634
635 return -ENOMEM;
547} 636}
548 637
549static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt, 638static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt,
@@ -555,18 +644,16 @@ static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt,
555 if (ret) 644 if (ret)
556 return ret; 645 return ret;
557 646
558 ret = gen8_ppgtt_allocate_page_tables(ppgtt, max_pdp); 647 ret = gen8_ppgtt_allocate_page_tables(ppgtt);
559 if (ret) { 648 if (ret)
560 __free_pages(ppgtt->pd_pages, get_order(max_pdp << PAGE_SHIFT)); 649 goto err_out;
561 return ret;
562 }
563 650
564 ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE; 651 ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE;
565 652
566 ret = gen8_ppgtt_allocate_dma(ppgtt); 653 return 0;
567 if (ret)
568 gen8_ppgtt_free(ppgtt);
569 654
655err_out:
656 gen8_ppgtt_free(ppgtt);
570 return ret; 657 return ret;
571} 658}
572 659
@@ -577,14 +664,14 @@ static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt,
577 int ret; 664 int ret;
578 665
579 pd_addr = pci_map_page(ppgtt->base.dev->pdev, 666 pd_addr = pci_map_page(ppgtt->base.dev->pdev,
580 &ppgtt->pd_pages[pd], 0, 667 ppgtt->pdp.page_directory[pd]->page, 0,
581 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 668 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
582 669
583 ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pd_addr); 670 ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pd_addr);
584 if (ret) 671 if (ret)
585 return ret; 672 return ret;
586 673
587 ppgtt->pd_dma_addr[pd] = pd_addr; 674 ppgtt->pdp.page_directory[pd]->daddr = pd_addr;
588 675
589 return 0; 676 return 0;
590} 677}
@@ -594,17 +681,18 @@ static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt,
594 const int pt) 681 const int pt)
595{ 682{
596 dma_addr_t pt_addr; 683 dma_addr_t pt_addr;
597 struct page *p; 684 struct i915_page_directory_entry *pdir = ppgtt->pdp.page_directory[pd];
685 struct i915_page_table_entry *ptab = pdir->page_table[pt];
686 struct page *p = ptab->page;
598 int ret; 687 int ret;
599 688
600 p = ppgtt->gen8_pt_pages[pd][pt];
601 pt_addr = pci_map_page(ppgtt->base.dev->pdev, 689 pt_addr = pci_map_page(ppgtt->base.dev->pdev,
602 p, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 690 p, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
603 ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pt_addr); 691 ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pt_addr);
604 if (ret) 692 if (ret)
605 return ret; 693 return ret;
606 694
607 ppgtt->gen8_pt_dma_addr[pd][pt] = pt_addr; 695 ptab->daddr = pt_addr;
608 696
609 return 0; 697 return 0;
610} 698}
@@ -657,10 +745,12 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
657 * will never need to touch the PDEs again. 745 * will never need to touch the PDEs again.
658 */ 746 */
659 for (i = 0; i < max_pdp; i++) { 747 for (i = 0; i < max_pdp; i++) {
748 struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[i];
660 gen8_ppgtt_pde_t *pd_vaddr; 749 gen8_ppgtt_pde_t *pd_vaddr;
661 pd_vaddr = kmap_atomic(&ppgtt->pd_pages[i]); 750 pd_vaddr = kmap_atomic(ppgtt->pdp.page_directory[i]->page);
662 for (j = 0; j < GEN8_PDES_PER_PAGE; j++) { 751 for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
663 dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j]; 752 struct i915_page_table_entry *pt = pd->page_table[j];
753 dma_addr_t addr = pt->daddr;
664 pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr, 754 pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr,
665 I915_CACHE_LLC); 755 I915_CACHE_LLC);
666 } 756 }
@@ -703,14 +793,15 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
703 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0); 793 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
704 794
705 pd_addr = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + 795 pd_addr = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm +
706 ppgtt->pd_offset / sizeof(gen6_gtt_pte_t); 796 ppgtt->pd.pd_offset / sizeof(gen6_gtt_pte_t);
707 797
708 seq_printf(m, " VM %p (pd_offset %x-%x):\n", vm, 798 seq_printf(m, " VM %p (pd_offset %x-%x):\n", vm,
709 ppgtt->pd_offset, ppgtt->pd_offset + ppgtt->num_pd_entries); 799 ppgtt->pd.pd_offset,
800 ppgtt->pd.pd_offset + ppgtt->num_pd_entries);
710 for (pde = 0; pde < ppgtt->num_pd_entries; pde++) { 801 for (pde = 0; pde < ppgtt->num_pd_entries; pde++) {
711 u32 expected; 802 u32 expected;
712 gen6_gtt_pte_t *pt_vaddr; 803 gen6_gtt_pte_t *pt_vaddr;
713 dma_addr_t pt_addr = ppgtt->pt_dma_addr[pde]; 804 dma_addr_t pt_addr = ppgtt->pd.page_table[pde]->daddr;
714 pd_entry = readl(pd_addr + pde); 805 pd_entry = readl(pd_addr + pde);
715 expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID); 806 expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
716 807
@@ -721,7 +812,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
721 expected); 812 expected);
722 seq_printf(m, "\tPDE: %x\n", pd_entry); 813 seq_printf(m, "\tPDE: %x\n", pd_entry);
723 814
724 pt_vaddr = kmap_atomic(ppgtt->pt_pages[pde]); 815 pt_vaddr = kmap_atomic(ppgtt->pd.page_table[pde]->page);
725 for (pte = 0; pte < I915_PPGTT_PT_ENTRIES; pte+=4) { 816 for (pte = 0; pte < I915_PPGTT_PT_ENTRIES; pte+=4) {
726 unsigned long va = 817 unsigned long va =
727 (pde * PAGE_SIZE * I915_PPGTT_PT_ENTRIES) + 818 (pde * PAGE_SIZE * I915_PPGTT_PT_ENTRIES) +
@@ -754,13 +845,13 @@ static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
754 uint32_t pd_entry; 845 uint32_t pd_entry;
755 int i; 846 int i;
756 847
757 WARN_ON(ppgtt->pd_offset & 0x3f); 848 WARN_ON(ppgtt->pd.pd_offset & 0x3f);
758 pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm + 849 pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm +
759 ppgtt->pd_offset / sizeof(gen6_gtt_pte_t); 850 ppgtt->pd.pd_offset / sizeof(gen6_gtt_pte_t);
760 for (i = 0; i < ppgtt->num_pd_entries; i++) { 851 for (i = 0; i < ppgtt->num_pd_entries; i++) {
761 dma_addr_t pt_addr; 852 dma_addr_t pt_addr;
762 853
763 pt_addr = ppgtt->pt_dma_addr[i]; 854 pt_addr = ppgtt->pd.page_table[i]->daddr;
764 pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr); 855 pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
765 pd_entry |= GEN6_PDE_VALID; 856 pd_entry |= GEN6_PDE_VALID;
766 857
@@ -771,9 +862,9 @@ static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
771 862
772static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt) 863static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
773{ 864{
774 BUG_ON(ppgtt->pd_offset & 0x3f); 865 BUG_ON(ppgtt->pd.pd_offset & 0x3f);
775 866
776 return (ppgtt->pd_offset / 64) << 16; 867 return (ppgtt->pd.pd_offset / 64) << 16;
777} 868}
778 869
779static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, 870static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
@@ -936,7 +1027,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
936 if (last_pte > I915_PPGTT_PT_ENTRIES) 1027 if (last_pte > I915_PPGTT_PT_ENTRIES)
937 last_pte = I915_PPGTT_PT_ENTRIES; 1028 last_pte = I915_PPGTT_PT_ENTRIES;
938 1029
939 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]); 1030 pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->page);
940 1031
941 for (i = first_pte; i < last_pte; i++) 1032 for (i = first_pte; i < last_pte; i++)
942 pt_vaddr[i] = scratch_pte; 1033 pt_vaddr[i] = scratch_pte;
@@ -965,7 +1056,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
965 pt_vaddr = NULL; 1056 pt_vaddr = NULL;
966 for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) { 1057 for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
967 if (pt_vaddr == NULL) 1058 if (pt_vaddr == NULL)
968 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]); 1059 pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->page);
969 1060
970 pt_vaddr[act_pte] = 1061 pt_vaddr[act_pte] =
971 vm->pte_encode(sg_page_iter_dma_address(&sg_iter), 1062 vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
@@ -986,22 +1077,20 @@ static void gen6_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
986{ 1077{
987 int i; 1078 int i;
988 1079
989 if (ppgtt->pt_dma_addr) { 1080 for (i = 0; i < ppgtt->num_pd_entries; i++)
990 for (i = 0; i < ppgtt->num_pd_entries; i++) 1081 pci_unmap_page(ppgtt->base.dev->pdev,
991 pci_unmap_page(ppgtt->base.dev->pdev, 1082 ppgtt->pd.page_table[i]->daddr,
992 ppgtt->pt_dma_addr[i], 1083 4096, PCI_DMA_BIDIRECTIONAL);
993 4096, PCI_DMA_BIDIRECTIONAL);
994 }
995} 1084}
996 1085
997static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt) 1086static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
998{ 1087{
999 int i; 1088 int i;
1000 1089
1001 kfree(ppgtt->pt_dma_addr);
1002 for (i = 0; i < ppgtt->num_pd_entries; i++) 1090 for (i = 0; i < ppgtt->num_pd_entries; i++)
1003 __free_page(ppgtt->pt_pages[i]); 1091 unmap_and_free_pt(ppgtt->pd.page_table[i], ppgtt->base.dev);
1004 kfree(ppgtt->pt_pages); 1092
1093 unmap_and_free_pd(&ppgtt->pd);
1005} 1094}
1006 1095
1007static void gen6_ppgtt_cleanup(struct i915_address_space *vm) 1096static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
@@ -1056,27 +1145,6 @@ alloc:
1056 return 0; 1145 return 0;
1057} 1146}
1058 1147
1059static int gen6_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt)
1060{
1061 int i;
1062
1063 ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
1064 GFP_KERNEL);
1065
1066 if (!ppgtt->pt_pages)
1067 return -ENOMEM;
1068
1069 for (i = 0; i < ppgtt->num_pd_entries; i++) {
1070 ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
1071 if (!ppgtt->pt_pages[i]) {
1072 gen6_ppgtt_free(ppgtt);
1073 return -ENOMEM;
1074 }
1075 }
1076
1077 return 0;
1078}
1079
1080static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt) 1148static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
1081{ 1149{
1082 int ret; 1150 int ret;
@@ -1085,20 +1153,14 @@ static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
1085 if (ret) 1153 if (ret)
1086 return ret; 1154 return ret;
1087 1155
1088 ret = gen6_ppgtt_allocate_page_tables(ppgtt); 1156 ret = alloc_pt_range(&ppgtt->pd, 0, ppgtt->num_pd_entries,
1157 ppgtt->base.dev);
1158
1089 if (ret) { 1159 if (ret) {
1090 drm_mm_remove_node(&ppgtt->node); 1160 drm_mm_remove_node(&ppgtt->node);
1091 return ret; 1161 return ret;
1092 } 1162 }
1093 1163
1094 ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t),
1095 GFP_KERNEL);
1096 if (!ppgtt->pt_dma_addr) {
1097 drm_mm_remove_node(&ppgtt->node);
1098 gen6_ppgtt_free(ppgtt);
1099 return -ENOMEM;
1100 }
1101
1102 return 0; 1164 return 0;
1103} 1165}
1104 1166
@@ -1108,9 +1170,11 @@ static int gen6_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt)
1108 int i; 1170 int i;
1109 1171
1110 for (i = 0; i < ppgtt->num_pd_entries; i++) { 1172 for (i = 0; i < ppgtt->num_pd_entries; i++) {
1173 struct page *page;
1111 dma_addr_t pt_addr; 1174 dma_addr_t pt_addr;
1112 1175
1113 pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096, 1176 page = ppgtt->pd.page_table[i]->page;
1177 pt_addr = pci_map_page(dev->pdev, page, 0, 4096,
1114 PCI_DMA_BIDIRECTIONAL); 1178 PCI_DMA_BIDIRECTIONAL);
1115 1179
1116 if (pci_dma_mapping_error(dev->pdev, pt_addr)) { 1180 if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
@@ -1118,7 +1182,7 @@ static int gen6_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt)
1118 return -EIO; 1182 return -EIO;
1119 } 1183 }
1120 1184
1121 ppgtt->pt_dma_addr[i] = pt_addr; 1185 ppgtt->pd.page_table[i]->daddr = pt_addr;
1122 } 1186 }
1123 1187
1124 return 0; 1188 return 0;
@@ -1157,10 +1221,10 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
1157 ppgtt->base.insert_entries = gen6_ppgtt_insert_entries; 1221 ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
1158 ppgtt->base.cleanup = gen6_ppgtt_cleanup; 1222 ppgtt->base.cleanup = gen6_ppgtt_cleanup;
1159 ppgtt->base.start = 0; 1223 ppgtt->base.start = 0;
1160 ppgtt->base.total = ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES * PAGE_SIZE; 1224 ppgtt->base.total = ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES * PAGE_SIZE;
1161 ppgtt->debug_dump = gen6_dump_ppgtt; 1225 ppgtt->debug_dump = gen6_dump_ppgtt;
1162 1226
1163 ppgtt->pd_offset = 1227 ppgtt->pd.pd_offset =
1164 ppgtt->node.start / PAGE_SIZE * sizeof(gen6_gtt_pte_t); 1228 ppgtt->node.start / PAGE_SIZE * sizeof(gen6_gtt_pte_t);
1165 1229
1166 ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true); 1230 ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
@@ -1171,7 +1235,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
1171 1235
1172 gen6_write_pdes(ppgtt); 1236 gen6_write_pdes(ppgtt);
1173 DRM_DEBUG("Adding PPGTT at offset %x\n", 1237 DRM_DEBUG("Adding PPGTT at offset %x\n",
1174 ppgtt->pd_offset << 10); 1238 ppgtt->pd.pd_offset << 10);
1175 1239
1176 return 0; 1240 return 0;
1177} 1241}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 8f7699016711..c9e93f5070bc 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -187,6 +187,26 @@ struct i915_vma {
187 u32 flags); 187 u32 flags);
188}; 188};
189 189
190struct i915_page_table_entry {
191 struct page *page;
192 dma_addr_t daddr;
193};
194
195struct i915_page_directory_entry {
196 struct page *page; /* NULL for GEN6-GEN7 */
197 union {
198 uint32_t pd_offset;
199 dma_addr_t daddr;
200 };
201
202 struct i915_page_table_entry *page_table[GEN6_PPGTT_PD_ENTRIES]; /* PDEs */
203};
204
205struct i915_page_directory_pointer_entry {
206 /* struct page *page; */
207 struct i915_page_directory_entry *page_directory[GEN8_LEGACY_PDPES];
208};
209
190struct i915_address_space { 210struct i915_address_space {
191 struct drm_mm mm; 211 struct drm_mm mm;
192 struct drm_device *dev; 212 struct drm_device *dev;
@@ -272,17 +292,8 @@ struct i915_hw_ppgtt {
272 unsigned num_pd_entries; 292 unsigned num_pd_entries;
273 unsigned num_pd_pages; /* gen8+ */ 293 unsigned num_pd_pages; /* gen8+ */
274 union { 294 union {
275 struct page **pt_pages; 295 struct i915_page_directory_pointer_entry pdp;
276 struct page **gen8_pt_pages[GEN8_LEGACY_PDPES]; 296 struct i915_page_directory_entry pd;
277 };
278 struct page *pd_pages;
279 union {
280 uint32_t pd_offset;
281 dma_addr_t pd_dma_addr[GEN8_LEGACY_PDPES];
282 };
283 union {
284 dma_addr_t *pt_dma_addr;
285 dma_addr_t *gen8_pt_dma_addr[GEN8_LEGACY_PDPES];
286 }; 297 };
287 298
288 struct drm_i915_file_private *file_priv; 299 struct drm_i915_file_private *file_priv;
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 59401f3b902c..f1de95f7432c 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -253,7 +253,7 @@ int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_c
253 if (!drm_mm_initialized(&dev_priv->mm.stolen)) 253 if (!drm_mm_initialized(&dev_priv->mm.stolen))
254 return -ENODEV; 254 return -ENODEV;
255 255
256 if (size < dev_priv->fbc.uncompressed_size) 256 if (size <= dev_priv->fbc.uncompressed_size)
257 return 0; 257 return 0;
258 258
259 /* Release any current block */ 259 /* Release any current block */
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 90731195ab52..6ebea4614204 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -492,31 +492,6 @@ static void i915_enable_asle_pipestat(struct drm_device *dev)
492 spin_unlock_irq(&dev_priv->irq_lock); 492 spin_unlock_irq(&dev_priv->irq_lock);
493} 493}
494 494
495/**
496 * i915_pipe_enabled - check if a pipe is enabled
497 * @dev: DRM device
498 * @pipe: pipe to check
499 *
500 * Reading certain registers when the pipe is disabled can hang the chip.
501 * Use this routine to make sure the PLL is running and the pipe is active
502 * before reading such registers if unsure.
503 */
504static int
505i915_pipe_enabled(struct drm_device *dev, int pipe)
506{
507 struct drm_i915_private *dev_priv = dev->dev_private;
508
509 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
510 /* Locking is horribly broken here, but whatever. */
511 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
512 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
513
514 return intel_crtc->active;
515 } else {
516 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
517 }
518}
519
520/* 495/*
521 * This timing diagram depicts the video signal in and 496 * This timing diagram depicts the video signal in and
522 * around the vertical blanking period. 497 * around the vertical blanking period.
@@ -582,34 +557,16 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
582 unsigned long high_frame; 557 unsigned long high_frame;
583 unsigned long low_frame; 558 unsigned long low_frame;
584 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 559 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
560 struct intel_crtc *intel_crtc =
561 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
562 const struct drm_display_mode *mode =
563 &intel_crtc->config->base.adjusted_mode;
585 564
586 if (!i915_pipe_enabled(dev, pipe)) { 565 htotal = mode->crtc_htotal;
587 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 566 hsync_start = mode->crtc_hsync_start;
588 "pipe %c\n", pipe_name(pipe)); 567 vbl_start = mode->crtc_vblank_start;
589 return 0; 568 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
590 } 569 vbl_start = DIV_ROUND_UP(vbl_start, 2);
591
592 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
593 struct intel_crtc *intel_crtc =
594 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
595 const struct drm_display_mode *mode =
596 &intel_crtc->config->base.adjusted_mode;
597
598 htotal = mode->crtc_htotal;
599 hsync_start = mode->crtc_hsync_start;
600 vbl_start = mode->crtc_vblank_start;
601 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
602 vbl_start = DIV_ROUND_UP(vbl_start, 2);
603 } else {
604 enum transcoder cpu_transcoder = (enum transcoder) pipe;
605
606 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
607 hsync_start = (I915_READ(HSYNC(cpu_transcoder)) & 0x1fff) + 1;
608 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
609 if ((I915_READ(PIPECONF(cpu_transcoder)) &
610 PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE)
611 vbl_start = DIV_ROUND_UP(vbl_start, 2);
612 }
613 570
614 /* Convert to pixel count */ 571 /* Convert to pixel count */
615 vbl_start *= htotal; 572 vbl_start *= htotal;
@@ -648,12 +605,6 @@ static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
648 struct drm_i915_private *dev_priv = dev->dev_private; 605 struct drm_i915_private *dev_priv = dev->dev_private;
649 int reg = PIPE_FRMCOUNT_GM45(pipe); 606 int reg = PIPE_FRMCOUNT_GM45(pipe);
650 607
651 if (!i915_pipe_enabled(dev, pipe)) {
652 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
653 "pipe %c\n", pipe_name(pipe));
654 return 0;
655 }
656
657 return I915_READ(reg); 608 return I915_READ(reg);
658} 609}
659 610
@@ -840,7 +791,7 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
840 return -EINVAL; 791 return -EINVAL;
841 } 792 }
842 793
843 if (!crtc->enabled) { 794 if (!crtc->state->enable) {
844 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); 795 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
845 return -EBUSY; 796 return -EBUSY;
846 } 797 }
@@ -2647,9 +2598,6 @@ static int i915_enable_vblank(struct drm_device *dev, int pipe)
2647 struct drm_i915_private *dev_priv = dev->dev_private; 2598 struct drm_i915_private *dev_priv = dev->dev_private;
2648 unsigned long irqflags; 2599 unsigned long irqflags;
2649 2600
2650 if (!i915_pipe_enabled(dev, pipe))
2651 return -EINVAL;
2652
2653 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2601 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2654 if (INTEL_INFO(dev)->gen >= 4) 2602 if (INTEL_INFO(dev)->gen >= 4)
2655 i915_enable_pipestat(dev_priv, pipe, 2603 i915_enable_pipestat(dev_priv, pipe,
@@ -2669,9 +2617,6 @@ static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2669 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2617 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2670 DE_PIPE_VBLANK(pipe); 2618 DE_PIPE_VBLANK(pipe);
2671 2619
2672 if (!i915_pipe_enabled(dev, pipe))
2673 return -EINVAL;
2674
2675 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2620 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2676 ironlake_enable_display_irq(dev_priv, bit); 2621 ironlake_enable_display_irq(dev_priv, bit);
2677 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2622 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
@@ -2684,9 +2629,6 @@ static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2684 struct drm_i915_private *dev_priv = dev->dev_private; 2629 struct drm_i915_private *dev_priv = dev->dev_private;
2685 unsigned long irqflags; 2630 unsigned long irqflags;
2686 2631
2687 if (!i915_pipe_enabled(dev, pipe))
2688 return -EINVAL;
2689
2690 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2632 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2691 i915_enable_pipestat(dev_priv, pipe, 2633 i915_enable_pipestat(dev_priv, pipe,
2692 PIPE_START_VBLANK_INTERRUPT_STATUS); 2634 PIPE_START_VBLANK_INTERRUPT_STATUS);
@@ -2700,9 +2642,6 @@ static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2700 struct drm_i915_private *dev_priv = dev->dev_private; 2642 struct drm_i915_private *dev_priv = dev->dev_private;
2701 unsigned long irqflags; 2643 unsigned long irqflags;
2702 2644
2703 if (!i915_pipe_enabled(dev, pipe))
2704 return -EINVAL;
2705
2706 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2645 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2707 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK; 2646 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2708 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2647 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
@@ -2754,9 +2693,6 @@ static void gen8_disable_vblank(struct drm_device *dev, int pipe)
2754 struct drm_i915_private *dev_priv = dev->dev_private; 2693 struct drm_i915_private *dev_priv = dev->dev_private;
2755 unsigned long irqflags; 2694 unsigned long irqflags;
2756 2695
2757 if (!i915_pipe_enabled(dev, pipe))
2758 return;
2759
2760 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2696 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2761 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK; 2697 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
2762 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2698 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
@@ -4368,10 +4304,8 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
4368 if (!IS_GEN2(dev_priv)) 4304 if (!IS_GEN2(dev_priv))
4369 dev->vblank_disable_immediate = true; 4305 dev->vblank_disable_immediate = true;
4370 4306
4371 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 4307 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4372 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 4308 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4373 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4374 }
4375 4309
4376 if (IS_CHERRYVIEW(dev_priv)) { 4310 if (IS_CHERRYVIEW(dev_priv)) {
4377 dev->driver->irq_handler = cherryview_irq_handler; 4311 dev->driver->irq_handler = cherryview_irq_handler;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 1dc91de7d2e6..55143cb36e74 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -139,6 +139,19 @@
139#define GEN8_RING_PDP_UDW(ring, n) ((ring)->mmio_base+0x270 + ((n) * 8 + 4)) 139#define GEN8_RING_PDP_UDW(ring, n) ((ring)->mmio_base+0x270 + ((n) * 8 + 4))
140#define GEN8_RING_PDP_LDW(ring, n) ((ring)->mmio_base+0x270 + (n) * 8) 140#define GEN8_RING_PDP_LDW(ring, n) ((ring)->mmio_base+0x270 + (n) * 8)
141 141
142#define GEN8_R_PWR_CLK_STATE 0x20C8
143#define GEN8_RPCS_ENABLE (1 << 31)
144#define GEN8_RPCS_S_CNT_ENABLE (1 << 18)
145#define GEN8_RPCS_S_CNT_SHIFT 15
146#define GEN8_RPCS_S_CNT_MASK (0x7 << GEN8_RPCS_S_CNT_SHIFT)
147#define GEN8_RPCS_SS_CNT_ENABLE (1 << 11)
148#define GEN8_RPCS_SS_CNT_SHIFT 8
149#define GEN8_RPCS_SS_CNT_MASK (0x7 << GEN8_RPCS_SS_CNT_SHIFT)
150#define GEN8_RPCS_EU_MAX_SHIFT 4
151#define GEN8_RPCS_EU_MAX_MASK (0xf << GEN8_RPCS_EU_MAX_SHIFT)
152#define GEN8_RPCS_EU_MIN_SHIFT 0
153#define GEN8_RPCS_EU_MIN_MASK (0xf << GEN8_RPCS_EU_MIN_SHIFT)
154
142#define GAM_ECOCHK 0x4090 155#define GAM_ECOCHK 0x4090
143#define BDW_DISABLE_HDC_INVALIDATION (1<<25) 156#define BDW_DISABLE_HDC_INVALIDATION (1<<25)
144#define ECOCHK_SNB_BIT (1<<10) 157#define ECOCHK_SNB_BIT (1<<10)
@@ -1025,6 +1038,16 @@ enum skl_disp_power_wells {
1025#define DPIO_CHV_PROP_COEFF_SHIFT 0 1038#define DPIO_CHV_PROP_COEFF_SHIFT 0
1026#define CHV_PLL_DW6(ch) _PIPE(ch, _CHV_PLL_DW6_CH0, _CHV_PLL_DW6_CH1) 1039#define CHV_PLL_DW6(ch) _PIPE(ch, _CHV_PLL_DW6_CH0, _CHV_PLL_DW6_CH1)
1027 1040
1041#define _CHV_PLL_DW8_CH0 0x8020
1042#define _CHV_PLL_DW8_CH1 0x81A0
1043#define CHV_PLL_DW8(ch) _PIPE(ch, _CHV_PLL_DW8_CH0, _CHV_PLL_DW8_CH1)
1044
1045#define _CHV_PLL_DW9_CH0 0x8024
1046#define _CHV_PLL_DW9_CH1 0x81A4
1047#define DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT 1 /* 3 bits */
1048#define DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE 1 /* 1: coarse & 0 : fine */
1049#define CHV_PLL_DW9(ch) _PIPE(ch, _CHV_PLL_DW9_CH0, _CHV_PLL_DW9_CH1)
1050
1028#define _CHV_CMN_DW5_CH0 0x8114 1051#define _CHV_CMN_DW5_CH0 0x8114
1029#define CHV_BUFRIGHTENA1_DISABLE (0 << 20) 1052#define CHV_BUFRIGHTENA1_DISABLE (0 << 20)
1030#define CHV_BUFRIGHTENA1_NORMAL (1 << 20) 1053#define CHV_BUFRIGHTENA1_NORMAL (1 << 20)
@@ -1328,6 +1351,8 @@ enum skl_disp_power_wells {
1328#define GEN6_WIZ_HASHING_16x4 GEN6_WIZ_HASHING(1, 0) 1351#define GEN6_WIZ_HASHING_16x4 GEN6_WIZ_HASHING(1, 0)
1329#define GEN6_WIZ_HASHING_MASK GEN6_WIZ_HASHING(1, 1) 1352#define GEN6_WIZ_HASHING_MASK GEN6_WIZ_HASHING(1, 1)
1330#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5) 1353#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5)
1354#define GEN9_IZ_HASHING_MASK(slice) (0x3 << (slice * 2))
1355#define GEN9_IZ_HASHING(slice, val) ((val) << (slice * 2))
1331 1356
1332#define GFX_MODE 0x02520 1357#define GFX_MODE 0x02520
1333#define GFX_MODE_GEN7 0x0229c 1358#define GFX_MODE_GEN7 0x0229c
@@ -1506,6 +1531,17 @@ enum skl_disp_power_wells {
1506#define CHV_FGT_EU_DIS_SS1_R1_SHIFT 28 1531#define CHV_FGT_EU_DIS_SS1_R1_SHIFT 28
1507#define CHV_FGT_EU_DIS_SS1_R1_MASK (0xf << CHV_FGT_EU_DIS_SS1_R1_SHIFT) 1532#define CHV_FGT_EU_DIS_SS1_R1_MASK (0xf << CHV_FGT_EU_DIS_SS1_R1_SHIFT)
1508 1533
1534#define GEN8_FUSE2 0x9120
1535#define GEN8_F2_S_ENA_SHIFT 25
1536#define GEN8_F2_S_ENA_MASK (0x7 << GEN8_F2_S_ENA_SHIFT)
1537
1538#define GEN9_F2_SS_DIS_SHIFT 20
1539#define GEN9_F2_SS_DIS_MASK (0xf << GEN9_F2_SS_DIS_SHIFT)
1540
1541#define GEN8_EU_DISABLE0 0x9134
1542#define GEN8_EU_DISABLE1 0x9138
1543#define GEN8_EU_DISABLE2 0x913c
1544
1509#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050 1545#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050
1510#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0) 1546#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0)
1511#define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2) 1547#define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2)
@@ -3880,6 +3916,7 @@ enum skl_disp_power_wells {
3880#define PIPECONF_INTERLACE_MODE_MASK (7 << 21) 3916#define PIPECONF_INTERLACE_MODE_MASK (7 << 21)
3881#define PIPECONF_EDP_RR_MODE_SWITCH (1 << 20) 3917#define PIPECONF_EDP_RR_MODE_SWITCH (1 << 20)
3882#define PIPECONF_CXSR_DOWNCLOCK (1<<16) 3918#define PIPECONF_CXSR_DOWNCLOCK (1<<16)
3919#define PIPECONF_EDP_RR_MODE_SWITCH_VLV (1 << 14)
3883#define PIPECONF_COLOR_RANGE_SELECT (1 << 13) 3920#define PIPECONF_COLOR_RANGE_SELECT (1 << 13)
3884#define PIPECONF_BPC_MASK (0x7 << 5) 3921#define PIPECONF_BPC_MASK (0x7 << 5)
3885#define PIPECONF_8BPC (0<<5) 3922#define PIPECONF_8BPC (0<<5)
@@ -5246,8 +5283,9 @@ enum skl_disp_power_wells {
5246#define COMMON_SLICE_CHICKEN2 0x7014 5283#define COMMON_SLICE_CHICKEN2 0x7014
5247# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0) 5284# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0)
5248 5285
5249#define HIZ_CHICKEN 0x7018 5286#define HIZ_CHICKEN 0x7018
5250# define CHV_HZ_8X8_MODE_IN_1X (1<<15) 5287# define CHV_HZ_8X8_MODE_IN_1X (1<<15)
5288# define BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE (1<<3)
5251 5289
5252#define GEN9_SLICE_COMMON_ECO_CHICKEN0 0x7308 5290#define GEN9_SLICE_COMMON_ECO_CHICKEN0 0x7308
5253#define DISABLE_PIXEL_MASK_CAMMING (1<<14) 5291#define DISABLE_PIXEL_MASK_CAMMING (1<<14)
@@ -6187,6 +6225,26 @@ enum skl_disp_power_wells {
6187#define GEN6_RC6 3 6225#define GEN6_RC6 3
6188#define GEN6_RC7 4 6226#define GEN6_RC7 4
6189 6227
6228#define GEN9_SLICE0_PGCTL_ACK 0x804c
6229#define GEN9_SLICE1_PGCTL_ACK 0x8050
6230#define GEN9_SLICE2_PGCTL_ACK 0x8054
6231#define GEN9_PGCTL_SLICE_ACK (1 << 0)
6232
6233#define GEN9_SLICE0_SS01_EU_PGCTL_ACK 0x805c
6234#define GEN9_SLICE0_SS23_EU_PGCTL_ACK 0x8060
6235#define GEN9_SLICE1_SS01_EU_PGCTL_ACK 0x8064
6236#define GEN9_SLICE1_SS23_EU_PGCTL_ACK 0x8068
6237#define GEN9_SLICE2_SS01_EU_PGCTL_ACK 0x806c
6238#define GEN9_SLICE2_SS23_EU_PGCTL_ACK 0x8070
6239#define GEN9_PGCTL_SSA_EU08_ACK (1 << 0)
6240#define GEN9_PGCTL_SSA_EU19_ACK (1 << 2)
6241#define GEN9_PGCTL_SSA_EU210_ACK (1 << 4)
6242#define GEN9_PGCTL_SSA_EU311_ACK (1 << 6)
6243#define GEN9_PGCTL_SSB_EU08_ACK (1 << 8)
6244#define GEN9_PGCTL_SSB_EU19_ACK (1 << 10)
6245#define GEN9_PGCTL_SSB_EU210_ACK (1 << 12)
6246#define GEN9_PGCTL_SSB_EU311_ACK (1 << 14)
6247
6190#define GEN7_MISCCPCTL (0x9424) 6248#define GEN7_MISCCPCTL (0x9424)
6191#define GEN7_DOP_CLOCK_GATE_ENABLE (1<<0) 6249#define GEN7_DOP_CLOCK_GATE_ENABLE (1<<0)
6192 6250
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 9f19ed38cdc3..cf67f82f7b7f 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -29,166 +29,6 @@
29#include "intel_drv.h" 29#include "intel_drv.h"
30#include "i915_reg.h" 30#include "i915_reg.h"
31 31
32static u8 i915_read_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg)
33{
34 struct drm_i915_private *dev_priv = dev->dev_private;
35
36 I915_WRITE8(index_port, reg);
37 return I915_READ8(data_port);
38}
39
40static u8 i915_read_ar(struct drm_device *dev, u16 st01, u8 reg, u16 palette_enable)
41{
42 struct drm_i915_private *dev_priv = dev->dev_private;
43
44 I915_READ8(st01);
45 I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
46 return I915_READ8(VGA_AR_DATA_READ);
47}
48
49static void i915_write_ar(struct drm_device *dev, u16 st01, u8 reg, u8 val, u16 palette_enable)
50{
51 struct drm_i915_private *dev_priv = dev->dev_private;
52
53 I915_READ8(st01);
54 I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
55 I915_WRITE8(VGA_AR_DATA_WRITE, val);
56}
57
58static void i915_write_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg, u8 val)
59{
60 struct drm_i915_private *dev_priv = dev->dev_private;
61
62 I915_WRITE8(index_port, reg);
63 I915_WRITE8(data_port, val);
64}
65
66static void i915_save_vga(struct drm_device *dev)
67{
68 struct drm_i915_private *dev_priv = dev->dev_private;
69 int i;
70 u16 cr_index, cr_data, st01;
71
72 /* VGA state */
73 dev_priv->regfile.saveVGA0 = I915_READ(VGA0);
74 dev_priv->regfile.saveVGA1 = I915_READ(VGA1);
75 dev_priv->regfile.saveVGA_PD = I915_READ(VGA_PD);
76 dev_priv->regfile.saveVGACNTRL = I915_READ(i915_vgacntrl_reg(dev));
77
78 /* VGA color palette registers */
79 dev_priv->regfile.saveDACMASK = I915_READ8(VGA_DACMASK);
80
81 /* MSR bits */
82 dev_priv->regfile.saveMSR = I915_READ8(VGA_MSR_READ);
83 if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) {
84 cr_index = VGA_CR_INDEX_CGA;
85 cr_data = VGA_CR_DATA_CGA;
86 st01 = VGA_ST01_CGA;
87 } else {
88 cr_index = VGA_CR_INDEX_MDA;
89 cr_data = VGA_CR_DATA_MDA;
90 st01 = VGA_ST01_MDA;
91 }
92
93 /* CRT controller regs */
94 i915_write_indexed(dev, cr_index, cr_data, 0x11,
95 i915_read_indexed(dev, cr_index, cr_data, 0x11) &
96 (~0x80));
97 for (i = 0; i <= 0x24; i++)
98 dev_priv->regfile.saveCR[i] =
99 i915_read_indexed(dev, cr_index, cr_data, i);
100 /* Make sure we don't turn off CR group 0 writes */
101 dev_priv->regfile.saveCR[0x11] &= ~0x80;
102
103 /* Attribute controller registers */
104 I915_READ8(st01);
105 dev_priv->regfile.saveAR_INDEX = I915_READ8(VGA_AR_INDEX);
106 for (i = 0; i <= 0x14; i++)
107 dev_priv->regfile.saveAR[i] = i915_read_ar(dev, st01, i, 0);
108 I915_READ8(st01);
109 I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX);
110 I915_READ8(st01);
111
112 /* Graphics controller registers */
113 for (i = 0; i < 9; i++)
114 dev_priv->regfile.saveGR[i] =
115 i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i);
116
117 dev_priv->regfile.saveGR[0x10] =
118 i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10);
119 dev_priv->regfile.saveGR[0x11] =
120 i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11);
121 dev_priv->regfile.saveGR[0x18] =
122 i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18);
123
124 /* Sequencer registers */
125 for (i = 0; i < 8; i++)
126 dev_priv->regfile.saveSR[i] =
127 i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i);
128}
129
130static void i915_restore_vga(struct drm_device *dev)
131{
132 struct drm_i915_private *dev_priv = dev->dev_private;
133 int i;
134 u16 cr_index, cr_data, st01;
135
136 /* VGA state */
137 I915_WRITE(i915_vgacntrl_reg(dev), dev_priv->regfile.saveVGACNTRL);
138
139 I915_WRITE(VGA0, dev_priv->regfile.saveVGA0);
140 I915_WRITE(VGA1, dev_priv->regfile.saveVGA1);
141 I915_WRITE(VGA_PD, dev_priv->regfile.saveVGA_PD);
142 POSTING_READ(VGA_PD);
143 udelay(150);
144
145 /* MSR bits */
146 I915_WRITE8(VGA_MSR_WRITE, dev_priv->regfile.saveMSR);
147 if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) {
148 cr_index = VGA_CR_INDEX_CGA;
149 cr_data = VGA_CR_DATA_CGA;
150 st01 = VGA_ST01_CGA;
151 } else {
152 cr_index = VGA_CR_INDEX_MDA;
153 cr_data = VGA_CR_DATA_MDA;
154 st01 = VGA_ST01_MDA;
155 }
156
157 /* Sequencer registers, don't write SR07 */
158 for (i = 0; i < 7; i++)
159 i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i,
160 dev_priv->regfile.saveSR[i]);
161
162 /* CRT controller regs */
163 /* Enable CR group 0 writes */
164 i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->regfile.saveCR[0x11]);
165 for (i = 0; i <= 0x24; i++)
166 i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->regfile.saveCR[i]);
167
168 /* Graphics controller regs */
169 for (i = 0; i < 9; i++)
170 i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i,
171 dev_priv->regfile.saveGR[i]);
172
173 i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10,
174 dev_priv->regfile.saveGR[0x10]);
175 i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11,
176 dev_priv->regfile.saveGR[0x11]);
177 i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18,
178 dev_priv->regfile.saveGR[0x18]);
179
180 /* Attribute controller registers */
181 I915_READ8(st01); /* switch back to index mode */
182 for (i = 0; i <= 0x14; i++)
183 i915_write_ar(dev, st01, i, dev_priv->regfile.saveAR[i], 0);
184 I915_READ8(st01); /* switch back to index mode */
185 I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX | 0x20);
186 I915_READ8(st01);
187
188 /* VGA color palette registers */
189 I915_WRITE8(VGA_DACMASK, dev_priv->regfile.saveDACMASK);
190}
191
192static void i915_save_display(struct drm_device *dev) 32static void i915_save_display(struct drm_device *dev)
193{ 33{
194 struct drm_i915_private *dev_priv = dev->dev_private; 34 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -197,11 +37,6 @@ static void i915_save_display(struct drm_device *dev)
197 if (INTEL_INFO(dev)->gen <= 4) 37 if (INTEL_INFO(dev)->gen <= 4)
198 dev_priv->regfile.saveDSPARB = I915_READ(DSPARB); 38 dev_priv->regfile.saveDSPARB = I915_READ(DSPARB);
199 39
200 /* This is only meaningful in non-KMS mode */
201 /* Don't regfile.save them in KMS mode */
202 if (!drm_core_check_feature(dev, DRIVER_MODESET))
203 i915_save_display_reg(dev);
204
205 /* LVDS state */ 40 /* LVDS state */
206 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 41 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
207 dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS); 42 dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
@@ -224,9 +59,6 @@ static void i915_save_display(struct drm_device *dev)
224 /* save FBC interval */ 59 /* save FBC interval */
225 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev)) 60 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
226 dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL); 61 dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL);
227
228 if (!drm_core_check_feature(dev, DRIVER_MODESET))
229 i915_save_vga(dev);
230} 62}
231 63
232static void i915_restore_display(struct drm_device *dev) 64static void i915_restore_display(struct drm_device *dev)
@@ -238,11 +70,7 @@ static void i915_restore_display(struct drm_device *dev)
238 if (INTEL_INFO(dev)->gen <= 4) 70 if (INTEL_INFO(dev)->gen <= 4)
239 I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB); 71 I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB);
240 72
241 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 73 mask = ~LVDS_PORT_EN;
242 i915_restore_display_reg(dev);
243
244 if (drm_core_check_feature(dev, DRIVER_MODESET))
245 mask = ~LVDS_PORT_EN;
246 74
247 /* LVDS state */ 75 /* LVDS state */
248 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 76 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
@@ -270,10 +98,7 @@ static void i915_restore_display(struct drm_device *dev)
270 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev)) 98 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
271 I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL); 99 I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
272 100
273 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 101 i915_redisable_vga(dev);
274 i915_restore_vga(dev);
275 else
276 i915_redisable_vga(dev);
277} 102}
278 103
279int i915_save_state(struct drm_device *dev) 104int i915_save_state(struct drm_device *dev)
@@ -285,24 +110,6 @@ int i915_save_state(struct drm_device *dev)
285 110
286 i915_save_display(dev); 111 i915_save_display(dev);
287 112
288 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
289 /* Interrupt state */
290 if (HAS_PCH_SPLIT(dev)) {
291 dev_priv->regfile.saveDEIER = I915_READ(DEIER);
292 dev_priv->regfile.saveDEIMR = I915_READ(DEIMR);
293 dev_priv->regfile.saveGTIER = I915_READ(GTIER);
294 dev_priv->regfile.saveGTIMR = I915_READ(GTIMR);
295 dev_priv->regfile.saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR);
296 dev_priv->regfile.saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR);
297 dev_priv->regfile.saveMCHBAR_RENDER_STANDBY =
298 I915_READ(RSTDBYCTL);
299 dev_priv->regfile.savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG);
300 } else {
301 dev_priv->regfile.saveIER = I915_READ(IER);
302 dev_priv->regfile.saveIMR = I915_READ(IMR);
303 }
304 }
305
306 if (IS_GEN4(dev)) 113 if (IS_GEN4(dev))
307 pci_read_config_word(dev->pdev, GCDGMBUS, 114 pci_read_config_word(dev->pdev, GCDGMBUS,
308 &dev_priv->regfile.saveGCDGMBUS); 115 &dev_priv->regfile.saveGCDGMBUS);
@@ -341,24 +148,6 @@ int i915_restore_state(struct drm_device *dev)
341 dev_priv->regfile.saveGCDGMBUS); 148 dev_priv->regfile.saveGCDGMBUS);
342 i915_restore_display(dev); 149 i915_restore_display(dev);
343 150
344 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
345 /* Interrupt state */
346 if (HAS_PCH_SPLIT(dev)) {
347 I915_WRITE(DEIER, dev_priv->regfile.saveDEIER);
348 I915_WRITE(DEIMR, dev_priv->regfile.saveDEIMR);
349 I915_WRITE(GTIER, dev_priv->regfile.saveGTIER);
350 I915_WRITE(GTIMR, dev_priv->regfile.saveGTIMR);
351 I915_WRITE(_FDI_RXA_IMR, dev_priv->regfile.saveFDI_RXA_IMR);
352 I915_WRITE(_FDI_RXB_IMR, dev_priv->regfile.saveFDI_RXB_IMR);
353 I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->regfile.savePCH_PORT_HOTPLUG);
354 I915_WRITE(RSTDBYCTL,
355 dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
356 } else {
357 I915_WRITE(IER, dev_priv->regfile.saveIER);
358 I915_WRITE(IMR, dev_priv->regfile.saveIMR);
359 }
360 }
361
362 /* Cache mode state */ 151 /* Cache mode state */
363 if (INTEL_INFO(dev)->gen < 7) 152 if (INTEL_INFO(dev)->gen < 7)
364 I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 | 153 I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 |
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index cdc9da001484..67bd07edcbb0 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -127,10 +127,19 @@ show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
127 return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency); 127 return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
128} 128}
129 129
130static ssize_t
131show_media_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
132{
133 struct drm_minor *dminor = dev_get_drvdata(kdev);
134 u32 rc6_residency = calc_residency(dminor->dev, VLV_GT_MEDIA_RC6);
135 return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
136}
137
130static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL); 138static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
131static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL); 139static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL);
132static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL); 140static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL);
133static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL); 141static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
142static DEVICE_ATTR(media_rc6_residency_ms, S_IRUGO, show_media_rc6_ms, NULL);
134 143
135static struct attribute *rc6_attrs[] = { 144static struct attribute *rc6_attrs[] = {
136 &dev_attr_rc6_enable.attr, 145 &dev_attr_rc6_enable.attr,
@@ -153,6 +162,16 @@ static struct attribute_group rc6p_attr_group = {
153 .name = power_group_name, 162 .name = power_group_name,
154 .attrs = rc6p_attrs 163 .attrs = rc6p_attrs
155}; 164};
165
166static struct attribute *media_rc6_attrs[] = {
167 &dev_attr_media_rc6_residency_ms.attr,
168 NULL
169};
170
171static struct attribute_group media_rc6_attr_group = {
172 .name = power_group_name,
173 .attrs = media_rc6_attrs
174};
156#endif 175#endif
157 176
158static int l3_access_valid(struct drm_device *dev, loff_t offset) 177static int l3_access_valid(struct drm_device *dev, loff_t offset)
@@ -487,38 +506,17 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
487 struct drm_minor *minor = dev_to_drm_minor(kdev); 506 struct drm_minor *minor = dev_to_drm_minor(kdev);
488 struct drm_device *dev = minor->dev; 507 struct drm_device *dev = minor->dev;
489 struct drm_i915_private *dev_priv = dev->dev_private; 508 struct drm_i915_private *dev_priv = dev->dev_private;
490 u32 val, rp_state_cap; 509 u32 val;
491 ssize_t ret;
492
493 ret = mutex_lock_interruptible(&dev->struct_mutex);
494 if (ret)
495 return ret;
496 intel_runtime_pm_get(dev_priv);
497 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
498 intel_runtime_pm_put(dev_priv);
499 mutex_unlock(&dev->struct_mutex);
500 510
501 if (attr == &dev_attr_gt_RP0_freq_mhz) { 511 if (attr == &dev_attr_gt_RP0_freq_mhz)
502 if (IS_VALLEYVIEW(dev)) 512 val = intel_gpu_freq(dev_priv, dev_priv->rps.rp0_freq);
503 val = intel_gpu_freq(dev_priv, dev_priv->rps.rp0_freq); 513 else if (attr == &dev_attr_gt_RP1_freq_mhz)
504 else 514 val = intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
505 val = intel_gpu_freq(dev_priv, 515 else if (attr == &dev_attr_gt_RPn_freq_mhz)
506 ((rp_state_cap & 0x0000ff) >> 0)); 516 val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq);
507 } else if (attr == &dev_attr_gt_RP1_freq_mhz) { 517 else
508 if (IS_VALLEYVIEW(dev))
509 val = intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
510 else
511 val = intel_gpu_freq(dev_priv,
512 ((rp_state_cap & 0x00ff00) >> 8));
513 } else if (attr == &dev_attr_gt_RPn_freq_mhz) {
514 if (IS_VALLEYVIEW(dev))
515 val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq);
516 else
517 val = intel_gpu_freq(dev_priv,
518 ((rp_state_cap & 0xff0000) >> 16));
519 } else {
520 BUG(); 518 BUG();
521 } 519
522 return snprintf(buf, PAGE_SIZE, "%d\n", val); 520 return snprintf(buf, PAGE_SIZE, "%d\n", val);
523} 521}
524 522
@@ -627,6 +625,12 @@ void i915_setup_sysfs(struct drm_device *dev)
627 if (ret) 625 if (ret)
628 DRM_ERROR("RC6p residency sysfs setup failed\n"); 626 DRM_ERROR("RC6p residency sysfs setup failed\n");
629 } 627 }
628 if (IS_VALLEYVIEW(dev)) {
629 ret = sysfs_merge_group(&dev->primary->kdev->kobj,
630 &media_rc6_attr_group);
631 if (ret)
632 DRM_ERROR("Media RC6 residency sysfs setup failed\n");
633 }
630#endif 634#endif
631 if (HAS_L3_DPF(dev)) { 635 if (HAS_L3_DPF(dev)) {
632 ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs); 636 ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs);
diff --git a/drivers/gpu/drm/i915/i915_ums.c b/drivers/gpu/drm/i915/i915_ums.c
deleted file mode 100644
index d10fe3e9c49f..000000000000
--- a/drivers/gpu/drm/i915/i915_ums.c
+++ /dev/null
@@ -1,552 +0,0 @@
1/*
2 *
3 * Copyright 2008 (c) Intel Corporation
4 * Jesse Barnes <jbarnes@virtuousgeek.org>
5 * Copyright 2013 (c) Intel Corporation
6 * Daniel Vetter <daniel.vetter@ffwll.ch>
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 */
28
29#include <drm/drmP.h>
30#include <drm/i915_drm.h>
31#include "intel_drv.h"
32#include "i915_reg.h"
33
34static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
35{
36 struct drm_i915_private *dev_priv = dev->dev_private;
37 u32 dpll_reg;
38
39 /* On IVB, 3rd pipe shares PLL with another one */
40 if (pipe > 1)
41 return false;
42
43 if (HAS_PCH_SPLIT(dev))
44 dpll_reg = PCH_DPLL(pipe);
45 else
46 dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B;
47
48 return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE);
49}
50
51static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
52{
53 struct drm_i915_private *dev_priv = dev->dev_private;
54 unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
55 u32 *array;
56 int i;
57
58 if (!i915_pipe_enabled(dev, pipe))
59 return;
60
61 if (HAS_PCH_SPLIT(dev))
62 reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
63
64 if (pipe == PIPE_A)
65 array = dev_priv->regfile.save_palette_a;
66 else
67 array = dev_priv->regfile.save_palette_b;
68
69 for (i = 0; i < 256; i++)
70 array[i] = I915_READ(reg + (i << 2));
71}
72
73static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
74{
75 struct drm_i915_private *dev_priv = dev->dev_private;
76 unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
77 u32 *array;
78 int i;
79
80 if (!i915_pipe_enabled(dev, pipe))
81 return;
82
83 if (HAS_PCH_SPLIT(dev))
84 reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
85
86 if (pipe == PIPE_A)
87 array = dev_priv->regfile.save_palette_a;
88 else
89 array = dev_priv->regfile.save_palette_b;
90
91 for (i = 0; i < 256; i++)
92 I915_WRITE(reg + (i << 2), array[i]);
93}
94
95void i915_save_display_reg(struct drm_device *dev)
96{
97 struct drm_i915_private *dev_priv = dev->dev_private;
98 int i;
99
100 /* Cursor state */
101 dev_priv->regfile.saveCURACNTR = I915_READ(_CURACNTR);
102 dev_priv->regfile.saveCURAPOS = I915_READ(_CURAPOS);
103 dev_priv->regfile.saveCURABASE = I915_READ(_CURABASE);
104 dev_priv->regfile.saveCURBCNTR = I915_READ(_CURBCNTR);
105 dev_priv->regfile.saveCURBPOS = I915_READ(_CURBPOS);
106 dev_priv->regfile.saveCURBBASE = I915_READ(_CURBBASE);
107 if (IS_GEN2(dev))
108 dev_priv->regfile.saveCURSIZE = I915_READ(CURSIZE);
109
110 if (HAS_PCH_SPLIT(dev)) {
111 dev_priv->regfile.savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
112 dev_priv->regfile.saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
113 }
114
115 /* Pipe & plane A info */
116 dev_priv->regfile.savePIPEACONF = I915_READ(_PIPEACONF);
117 dev_priv->regfile.savePIPEASRC = I915_READ(_PIPEASRC);
118 if (HAS_PCH_SPLIT(dev)) {
119 dev_priv->regfile.saveFPA0 = I915_READ(_PCH_FPA0);
120 dev_priv->regfile.saveFPA1 = I915_READ(_PCH_FPA1);
121 dev_priv->regfile.saveDPLL_A = I915_READ(_PCH_DPLL_A);
122 } else {
123 dev_priv->regfile.saveFPA0 = I915_READ(_FPA0);
124 dev_priv->regfile.saveFPA1 = I915_READ(_FPA1);
125 dev_priv->regfile.saveDPLL_A = I915_READ(_DPLL_A);
126 }
127 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
128 dev_priv->regfile.saveDPLL_A_MD = I915_READ(_DPLL_A_MD);
129 dev_priv->regfile.saveHTOTAL_A = I915_READ(_HTOTAL_A);
130 dev_priv->regfile.saveHBLANK_A = I915_READ(_HBLANK_A);
131 dev_priv->regfile.saveHSYNC_A = I915_READ(_HSYNC_A);
132 dev_priv->regfile.saveVTOTAL_A = I915_READ(_VTOTAL_A);
133 dev_priv->regfile.saveVBLANK_A = I915_READ(_VBLANK_A);
134 dev_priv->regfile.saveVSYNC_A = I915_READ(_VSYNC_A);
135 if (!HAS_PCH_SPLIT(dev))
136 dev_priv->regfile.saveBCLRPAT_A = I915_READ(_BCLRPAT_A);
137
138 if (HAS_PCH_SPLIT(dev)) {
139 dev_priv->regfile.savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1);
140 dev_priv->regfile.savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1);
141 dev_priv->regfile.savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1);
142 dev_priv->regfile.savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1);
143
144 dev_priv->regfile.saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL);
145 dev_priv->regfile.saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL);
146
147 dev_priv->regfile.savePFA_CTL_1 = I915_READ(_PFA_CTL_1);
148 dev_priv->regfile.savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ);
149 dev_priv->regfile.savePFA_WIN_POS = I915_READ(_PFA_WIN_POS);
150
151 dev_priv->regfile.saveTRANSACONF = I915_READ(_PCH_TRANSACONF);
152 dev_priv->regfile.saveTRANS_HTOTAL_A = I915_READ(_PCH_TRANS_HTOTAL_A);
153 dev_priv->regfile.saveTRANS_HBLANK_A = I915_READ(_PCH_TRANS_HBLANK_A);
154 dev_priv->regfile.saveTRANS_HSYNC_A = I915_READ(_PCH_TRANS_HSYNC_A);
155 dev_priv->regfile.saveTRANS_VTOTAL_A = I915_READ(_PCH_TRANS_VTOTAL_A);
156 dev_priv->regfile.saveTRANS_VBLANK_A = I915_READ(_PCH_TRANS_VBLANK_A);
157 dev_priv->regfile.saveTRANS_VSYNC_A = I915_READ(_PCH_TRANS_VSYNC_A);
158 }
159
160 dev_priv->regfile.saveDSPACNTR = I915_READ(_DSPACNTR);
161 dev_priv->regfile.saveDSPASTRIDE = I915_READ(_DSPASTRIDE);
162 dev_priv->regfile.saveDSPASIZE = I915_READ(_DSPASIZE);
163 dev_priv->regfile.saveDSPAPOS = I915_READ(_DSPAPOS);
164 dev_priv->regfile.saveDSPAADDR = I915_READ(_DSPAADDR);
165 if (INTEL_INFO(dev)->gen >= 4) {
166 dev_priv->regfile.saveDSPASURF = I915_READ(_DSPASURF);
167 dev_priv->regfile.saveDSPATILEOFF = I915_READ(_DSPATILEOFF);
168 }
169 i915_save_palette(dev, PIPE_A);
170 dev_priv->regfile.savePIPEASTAT = I915_READ(_PIPEASTAT);
171
172 /* Pipe & plane B info */
173 dev_priv->regfile.savePIPEBCONF = I915_READ(_PIPEBCONF);
174 dev_priv->regfile.savePIPEBSRC = I915_READ(_PIPEBSRC);
175 if (HAS_PCH_SPLIT(dev)) {
176 dev_priv->regfile.saveFPB0 = I915_READ(_PCH_FPB0);
177 dev_priv->regfile.saveFPB1 = I915_READ(_PCH_FPB1);
178 dev_priv->regfile.saveDPLL_B = I915_READ(_PCH_DPLL_B);
179 } else {
180 dev_priv->regfile.saveFPB0 = I915_READ(_FPB0);
181 dev_priv->regfile.saveFPB1 = I915_READ(_FPB1);
182 dev_priv->regfile.saveDPLL_B = I915_READ(_DPLL_B);
183 }
184 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
185 dev_priv->regfile.saveDPLL_B_MD = I915_READ(_DPLL_B_MD);
186 dev_priv->regfile.saveHTOTAL_B = I915_READ(_HTOTAL_B);
187 dev_priv->regfile.saveHBLANK_B = I915_READ(_HBLANK_B);
188 dev_priv->regfile.saveHSYNC_B = I915_READ(_HSYNC_B);
189 dev_priv->regfile.saveVTOTAL_B = I915_READ(_VTOTAL_B);
190 dev_priv->regfile.saveVBLANK_B = I915_READ(_VBLANK_B);
191 dev_priv->regfile.saveVSYNC_B = I915_READ(_VSYNC_B);
192 if (!HAS_PCH_SPLIT(dev))
193 dev_priv->regfile.saveBCLRPAT_B = I915_READ(_BCLRPAT_B);
194
195 if (HAS_PCH_SPLIT(dev)) {
196 dev_priv->regfile.savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1);
197 dev_priv->regfile.savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1);
198 dev_priv->regfile.savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1);
199 dev_priv->regfile.savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1);
200
201 dev_priv->regfile.saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL);
202 dev_priv->regfile.saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL);
203
204 dev_priv->regfile.savePFB_CTL_1 = I915_READ(_PFB_CTL_1);
205 dev_priv->regfile.savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ);
206 dev_priv->regfile.savePFB_WIN_POS = I915_READ(_PFB_WIN_POS);
207
208 dev_priv->regfile.saveTRANSBCONF = I915_READ(_PCH_TRANSBCONF);
209 dev_priv->regfile.saveTRANS_HTOTAL_B = I915_READ(_PCH_TRANS_HTOTAL_B);
210 dev_priv->regfile.saveTRANS_HBLANK_B = I915_READ(_PCH_TRANS_HBLANK_B);
211 dev_priv->regfile.saveTRANS_HSYNC_B = I915_READ(_PCH_TRANS_HSYNC_B);
212 dev_priv->regfile.saveTRANS_VTOTAL_B = I915_READ(_PCH_TRANS_VTOTAL_B);
213 dev_priv->regfile.saveTRANS_VBLANK_B = I915_READ(_PCH_TRANS_VBLANK_B);
214 dev_priv->regfile.saveTRANS_VSYNC_B = I915_READ(_PCH_TRANS_VSYNC_B);
215 }
216
217 dev_priv->regfile.saveDSPBCNTR = I915_READ(_DSPBCNTR);
218 dev_priv->regfile.saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE);
219 dev_priv->regfile.saveDSPBSIZE = I915_READ(_DSPBSIZE);
220 dev_priv->regfile.saveDSPBPOS = I915_READ(_DSPBPOS);
221 dev_priv->regfile.saveDSPBADDR = I915_READ(_DSPBADDR);
222 if (INTEL_INFO(dev)->gen >= 4) {
223 dev_priv->regfile.saveDSPBSURF = I915_READ(_DSPBSURF);
224 dev_priv->regfile.saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF);
225 }
226 i915_save_palette(dev, PIPE_B);
227 dev_priv->regfile.savePIPEBSTAT = I915_READ(_PIPEBSTAT);
228
229 /* Fences */
230 switch (INTEL_INFO(dev)->gen) {
231 case 7:
232 case 6:
233 for (i = 0; i < 16; i++)
234 dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
235 break;
236 case 5:
237 case 4:
238 for (i = 0; i < 16; i++)
239 dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
240 break;
241 case 3:
242 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
243 for (i = 0; i < 8; i++)
244 dev_priv->regfile.saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
245 case 2:
246 for (i = 0; i < 8; i++)
247 dev_priv->regfile.saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
248 break;
249 }
250
251 /* CRT state */
252 if (HAS_PCH_SPLIT(dev))
253 dev_priv->regfile.saveADPA = I915_READ(PCH_ADPA);
254 else
255 dev_priv->regfile.saveADPA = I915_READ(ADPA);
256
257 /* Display Port state */
258 if (SUPPORTS_INTEGRATED_DP(dev)) {
259 dev_priv->regfile.saveDP_B = I915_READ(DP_B);
260 dev_priv->regfile.saveDP_C = I915_READ(DP_C);
261 dev_priv->regfile.saveDP_D = I915_READ(DP_D);
262 dev_priv->regfile.savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_DATA_M_G4X);
263 dev_priv->regfile.savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_DATA_M_G4X);
264 dev_priv->regfile.savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_DATA_N_G4X);
265 dev_priv->regfile.savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_DATA_N_G4X);
266 dev_priv->regfile.savePIPEA_DP_LINK_M = I915_READ(_PIPEA_LINK_M_G4X);
267 dev_priv->regfile.savePIPEB_DP_LINK_M = I915_READ(_PIPEB_LINK_M_G4X);
268 dev_priv->regfile.savePIPEA_DP_LINK_N = I915_READ(_PIPEA_LINK_N_G4X);
269 dev_priv->regfile.savePIPEB_DP_LINK_N = I915_READ(_PIPEB_LINK_N_G4X);
270 }
271 /* FIXME: regfile.save TV & SDVO state */
272
273 /* Panel fitter */
274 if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) {
275 dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
276 dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
277 }
278
279 /* Backlight */
280 if (INTEL_INFO(dev)->gen <= 4)
281 pci_read_config_byte(dev->pdev, PCI_LBPC,
282 &dev_priv->regfile.saveLBB);
283
284 if (HAS_PCH_SPLIT(dev)) {
285 dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
286 dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
287 dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
288 dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
289 } else {
290 dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
291 if (INTEL_INFO(dev)->gen >= 4)
292 dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
293 dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
294 }
295
296 return;
297}
298
299void i915_restore_display_reg(struct drm_device *dev)
300{
301 struct drm_i915_private *dev_priv = dev->dev_private;
302 int dpll_a_reg, fpa0_reg, fpa1_reg;
303 int dpll_b_reg, fpb0_reg, fpb1_reg;
304 int i;
305
306 /* Backlight */
307 if (INTEL_INFO(dev)->gen <= 4)
308 pci_write_config_byte(dev->pdev, PCI_LBPC,
309 dev_priv->regfile.saveLBB);
310
311 if (HAS_PCH_SPLIT(dev)) {
312 I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL);
313 I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
314 /* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2;
315 * otherwise we get blank eDP screen after S3 on some machines
316 */
317 I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->regfile.saveBLC_CPU_PWM_CTL2);
318 I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->regfile.saveBLC_CPU_PWM_CTL);
319 } else {
320 if (INTEL_INFO(dev)->gen >= 4)
321 I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
322 I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL);
323 I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL);
324 }
325
326 /* Panel fitter */
327 if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) {
328 I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
329 I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL);
330 }
331
332 /* Display port ratios (must be done before clock is set) */
333 if (SUPPORTS_INTEGRATED_DP(dev)) {
334 I915_WRITE(_PIPEA_DATA_M_G4X, dev_priv->regfile.savePIPEA_GMCH_DATA_M);
335 I915_WRITE(_PIPEB_DATA_M_G4X, dev_priv->regfile.savePIPEB_GMCH_DATA_M);
336 I915_WRITE(_PIPEA_DATA_N_G4X, dev_priv->regfile.savePIPEA_GMCH_DATA_N);
337 I915_WRITE(_PIPEB_DATA_N_G4X, dev_priv->regfile.savePIPEB_GMCH_DATA_N);
338 I915_WRITE(_PIPEA_LINK_M_G4X, dev_priv->regfile.savePIPEA_DP_LINK_M);
339 I915_WRITE(_PIPEB_LINK_M_G4X, dev_priv->regfile.savePIPEB_DP_LINK_M);
340 I915_WRITE(_PIPEA_LINK_N_G4X, dev_priv->regfile.savePIPEA_DP_LINK_N);
341 I915_WRITE(_PIPEB_LINK_N_G4X, dev_priv->regfile.savePIPEB_DP_LINK_N);
342 }
343
344 /* Fences */
345 switch (INTEL_INFO(dev)->gen) {
346 case 7:
347 case 6:
348 for (i = 0; i < 16; i++)
349 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
350 break;
351 case 5:
352 case 4:
353 for (i = 0; i < 16; i++)
354 I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
355 break;
356 case 3:
357 case 2:
358 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
359 for (i = 0; i < 8; i++)
360 I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->regfile.saveFENCE[i+8]);
361 for (i = 0; i < 8; i++)
362 I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->regfile.saveFENCE[i]);
363 break;
364 }
365
366
367 if (HAS_PCH_SPLIT(dev)) {
368 dpll_a_reg = _PCH_DPLL_A;
369 dpll_b_reg = _PCH_DPLL_B;
370 fpa0_reg = _PCH_FPA0;
371 fpb0_reg = _PCH_FPB0;
372 fpa1_reg = _PCH_FPA1;
373 fpb1_reg = _PCH_FPB1;
374 } else {
375 dpll_a_reg = _DPLL_A;
376 dpll_b_reg = _DPLL_B;
377 fpa0_reg = _FPA0;
378 fpb0_reg = _FPB0;
379 fpa1_reg = _FPA1;
380 fpb1_reg = _FPB1;
381 }
382
383 if (HAS_PCH_SPLIT(dev)) {
384 I915_WRITE(PCH_DREF_CONTROL, dev_priv->regfile.savePCH_DREF_CONTROL);
385 I915_WRITE(DISP_ARB_CTL, dev_priv->regfile.saveDISP_ARB_CTL);
386 }
387
388 /* Pipe & plane A info */
389 /* Prime the clock */
390 if (dev_priv->regfile.saveDPLL_A & DPLL_VCO_ENABLE) {
391 I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A &
392 ~DPLL_VCO_ENABLE);
393 POSTING_READ(dpll_a_reg);
394 udelay(150);
395 }
396 I915_WRITE(fpa0_reg, dev_priv->regfile.saveFPA0);
397 I915_WRITE(fpa1_reg, dev_priv->regfile.saveFPA1);
398 /* Actually enable it */
399 I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A);
400 POSTING_READ(dpll_a_reg);
401 udelay(150);
402 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
403 I915_WRITE(_DPLL_A_MD, dev_priv->regfile.saveDPLL_A_MD);
404 POSTING_READ(_DPLL_A_MD);
405 }
406 udelay(150);
407
408 /* Restore mode */
409 I915_WRITE(_HTOTAL_A, dev_priv->regfile.saveHTOTAL_A);
410 I915_WRITE(_HBLANK_A, dev_priv->regfile.saveHBLANK_A);
411 I915_WRITE(_HSYNC_A, dev_priv->regfile.saveHSYNC_A);
412 I915_WRITE(_VTOTAL_A, dev_priv->regfile.saveVTOTAL_A);
413 I915_WRITE(_VBLANK_A, dev_priv->regfile.saveVBLANK_A);
414 I915_WRITE(_VSYNC_A, dev_priv->regfile.saveVSYNC_A);
415 if (!HAS_PCH_SPLIT(dev))
416 I915_WRITE(_BCLRPAT_A, dev_priv->regfile.saveBCLRPAT_A);
417
418 if (HAS_PCH_SPLIT(dev)) {
419 I915_WRITE(_PIPEA_DATA_M1, dev_priv->regfile.savePIPEA_DATA_M1);
420 I915_WRITE(_PIPEA_DATA_N1, dev_priv->regfile.savePIPEA_DATA_N1);
421 I915_WRITE(_PIPEA_LINK_M1, dev_priv->regfile.savePIPEA_LINK_M1);
422 I915_WRITE(_PIPEA_LINK_N1, dev_priv->regfile.savePIPEA_LINK_N1);
423
424 I915_WRITE(_FDI_RXA_CTL, dev_priv->regfile.saveFDI_RXA_CTL);
425 I915_WRITE(_FDI_TXA_CTL, dev_priv->regfile.saveFDI_TXA_CTL);
426
427 I915_WRITE(_PFA_CTL_1, dev_priv->regfile.savePFA_CTL_1);
428 I915_WRITE(_PFA_WIN_SZ, dev_priv->regfile.savePFA_WIN_SZ);
429 I915_WRITE(_PFA_WIN_POS, dev_priv->regfile.savePFA_WIN_POS);
430
431 I915_WRITE(_PCH_TRANSACONF, dev_priv->regfile.saveTRANSACONF);
432 I915_WRITE(_PCH_TRANS_HTOTAL_A, dev_priv->regfile.saveTRANS_HTOTAL_A);
433 I915_WRITE(_PCH_TRANS_HBLANK_A, dev_priv->regfile.saveTRANS_HBLANK_A);
434 I915_WRITE(_PCH_TRANS_HSYNC_A, dev_priv->regfile.saveTRANS_HSYNC_A);
435 I915_WRITE(_PCH_TRANS_VTOTAL_A, dev_priv->regfile.saveTRANS_VTOTAL_A);
436 I915_WRITE(_PCH_TRANS_VBLANK_A, dev_priv->regfile.saveTRANS_VBLANK_A);
437 I915_WRITE(_PCH_TRANS_VSYNC_A, dev_priv->regfile.saveTRANS_VSYNC_A);
438 }
439
440 /* Restore plane info */
441 I915_WRITE(_DSPASIZE, dev_priv->regfile.saveDSPASIZE);
442 I915_WRITE(_DSPAPOS, dev_priv->regfile.saveDSPAPOS);
443 I915_WRITE(_PIPEASRC, dev_priv->regfile.savePIPEASRC);
444 I915_WRITE(_DSPAADDR, dev_priv->regfile.saveDSPAADDR);
445 I915_WRITE(_DSPASTRIDE, dev_priv->regfile.saveDSPASTRIDE);
446 if (INTEL_INFO(dev)->gen >= 4) {
447 I915_WRITE(_DSPASURF, dev_priv->regfile.saveDSPASURF);
448 I915_WRITE(_DSPATILEOFF, dev_priv->regfile.saveDSPATILEOFF);
449 }
450
451 I915_WRITE(_PIPEACONF, dev_priv->regfile.savePIPEACONF);
452
453 i915_restore_palette(dev, PIPE_A);
454 /* Enable the plane */
455 I915_WRITE(_DSPACNTR, dev_priv->regfile.saveDSPACNTR);
456 I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR));
457
458 /* Pipe & plane B info */
459 if (dev_priv->regfile.saveDPLL_B & DPLL_VCO_ENABLE) {
460 I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B &
461 ~DPLL_VCO_ENABLE);
462 POSTING_READ(dpll_b_reg);
463 udelay(150);
464 }
465 I915_WRITE(fpb0_reg, dev_priv->regfile.saveFPB0);
466 I915_WRITE(fpb1_reg, dev_priv->regfile.saveFPB1);
467 /* Actually enable it */
468 I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B);
469 POSTING_READ(dpll_b_reg);
470 udelay(150);
471 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
472 I915_WRITE(_DPLL_B_MD, dev_priv->regfile.saveDPLL_B_MD);
473 POSTING_READ(_DPLL_B_MD);
474 }
475 udelay(150);
476
477 /* Restore mode */
478 I915_WRITE(_HTOTAL_B, dev_priv->regfile.saveHTOTAL_B);
479 I915_WRITE(_HBLANK_B, dev_priv->regfile.saveHBLANK_B);
480 I915_WRITE(_HSYNC_B, dev_priv->regfile.saveHSYNC_B);
481 I915_WRITE(_VTOTAL_B, dev_priv->regfile.saveVTOTAL_B);
482 I915_WRITE(_VBLANK_B, dev_priv->regfile.saveVBLANK_B);
483 I915_WRITE(_VSYNC_B, dev_priv->regfile.saveVSYNC_B);
484 if (!HAS_PCH_SPLIT(dev))
485 I915_WRITE(_BCLRPAT_B, dev_priv->regfile.saveBCLRPAT_B);
486
487 if (HAS_PCH_SPLIT(dev)) {
488 I915_WRITE(_PIPEB_DATA_M1, dev_priv->regfile.savePIPEB_DATA_M1);
489 I915_WRITE(_PIPEB_DATA_N1, dev_priv->regfile.savePIPEB_DATA_N1);
490 I915_WRITE(_PIPEB_LINK_M1, dev_priv->regfile.savePIPEB_LINK_M1);
491 I915_WRITE(_PIPEB_LINK_N1, dev_priv->regfile.savePIPEB_LINK_N1);
492
493 I915_WRITE(_FDI_RXB_CTL, dev_priv->regfile.saveFDI_RXB_CTL);
494 I915_WRITE(_FDI_TXB_CTL, dev_priv->regfile.saveFDI_TXB_CTL);
495
496 I915_WRITE(_PFB_CTL_1, dev_priv->regfile.savePFB_CTL_1);
497 I915_WRITE(_PFB_WIN_SZ, dev_priv->regfile.savePFB_WIN_SZ);
498 I915_WRITE(_PFB_WIN_POS, dev_priv->regfile.savePFB_WIN_POS);
499
500 I915_WRITE(_PCH_TRANSBCONF, dev_priv->regfile.saveTRANSBCONF);
501 I915_WRITE(_PCH_TRANS_HTOTAL_B, dev_priv->regfile.saveTRANS_HTOTAL_B);
502 I915_WRITE(_PCH_TRANS_HBLANK_B, dev_priv->regfile.saveTRANS_HBLANK_B);
503 I915_WRITE(_PCH_TRANS_HSYNC_B, dev_priv->regfile.saveTRANS_HSYNC_B);
504 I915_WRITE(_PCH_TRANS_VTOTAL_B, dev_priv->regfile.saveTRANS_VTOTAL_B);
505 I915_WRITE(_PCH_TRANS_VBLANK_B, dev_priv->regfile.saveTRANS_VBLANK_B);
506 I915_WRITE(_PCH_TRANS_VSYNC_B, dev_priv->regfile.saveTRANS_VSYNC_B);
507 }
508
509 /* Restore plane info */
510 I915_WRITE(_DSPBSIZE, dev_priv->regfile.saveDSPBSIZE);
511 I915_WRITE(_DSPBPOS, dev_priv->regfile.saveDSPBPOS);
512 I915_WRITE(_PIPEBSRC, dev_priv->regfile.savePIPEBSRC);
513 I915_WRITE(_DSPBADDR, dev_priv->regfile.saveDSPBADDR);
514 I915_WRITE(_DSPBSTRIDE, dev_priv->regfile.saveDSPBSTRIDE);
515 if (INTEL_INFO(dev)->gen >= 4) {
516 I915_WRITE(_DSPBSURF, dev_priv->regfile.saveDSPBSURF);
517 I915_WRITE(_DSPBTILEOFF, dev_priv->regfile.saveDSPBTILEOFF);
518 }
519
520 I915_WRITE(_PIPEBCONF, dev_priv->regfile.savePIPEBCONF);
521
522 i915_restore_palette(dev, PIPE_B);
523 /* Enable the plane */
524 I915_WRITE(_DSPBCNTR, dev_priv->regfile.saveDSPBCNTR);
525 I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR));
526
527 /* Cursor state */
528 I915_WRITE(_CURAPOS, dev_priv->regfile.saveCURAPOS);
529 I915_WRITE(_CURACNTR, dev_priv->regfile.saveCURACNTR);
530 I915_WRITE(_CURABASE, dev_priv->regfile.saveCURABASE);
531 I915_WRITE(_CURBPOS, dev_priv->regfile.saveCURBPOS);
532 I915_WRITE(_CURBCNTR, dev_priv->regfile.saveCURBCNTR);
533 I915_WRITE(_CURBBASE, dev_priv->regfile.saveCURBBASE);
534 if (IS_GEN2(dev))
535 I915_WRITE(CURSIZE, dev_priv->regfile.saveCURSIZE);
536
537 /* CRT state */
538 if (HAS_PCH_SPLIT(dev))
539 I915_WRITE(PCH_ADPA, dev_priv->regfile.saveADPA);
540 else
541 I915_WRITE(ADPA, dev_priv->regfile.saveADPA);
542
543 /* Display Port state */
544 if (SUPPORTS_INTEGRATED_DP(dev)) {
545 I915_WRITE(DP_B, dev_priv->regfile.saveDP_B);
546 I915_WRITE(DP_C, dev_priv->regfile.saveDP_C);
547 I915_WRITE(DP_D, dev_priv->regfile.saveDP_D);
548 }
549 /* FIXME: restore TV & SDVO state */
550
551 return;
552}
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 3f178258d9f9..c684085cb56a 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -662,6 +662,13 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
662 edp_link_params->vswing); 662 edp_link_params->vswing);
663 break; 663 break;
664 } 664 }
665
666 if (bdb->version >= 173) {
667 uint8_t vswing;
668
669 vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF;
670 dev_priv->vbt.edp_low_vswing = vswing == 0;
671 }
665} 672}
666 673
667static void 674static void
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index a6a8710f665f..6afd5be33367 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -554,6 +554,7 @@ struct bdb_edp {
554 /* ith bit indicates enabled/disabled for (i+1)th panel */ 554 /* ith bit indicates enabled/disabled for (i+1)th panel */
555 u16 edp_s3d_feature; 555 u16 edp_s3d_feature;
556 u16 edp_t3_optimization; 556 u16 edp_t3_optimization;
557 u64 edp_vswing_preemph; /* v173 */
557} __packed; 558} __packed;
558 559
559struct psr_table { 560struct psr_table {
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index f14e8a2a022d..985d531aaf9e 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -139,6 +139,21 @@ static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
139 { 0x00004014, 0x00000087 }, 139 { 0x00004014, 0x00000087 },
140}; 140};
141 141
142/* eDP 1.4 low vswing translation parameters */
143static const struct ddi_buf_trans skl_ddi_translations_edp[] = {
144 { 0x00000018, 0x000000a8 },
145 { 0x00002016, 0x000000ab },
146 { 0x00006012, 0x000000a2 },
147 { 0x00008010, 0x00000088 },
148 { 0x00000018, 0x000000ab },
149 { 0x00004014, 0x000000a2 },
150 { 0x00006012, 0x000000a6 },
151 { 0x00000018, 0x000000a2 },
152 { 0x00005013, 0x0000009c },
153 { 0x00000018, 0x00000088 },
154};
155
156
142static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = { 157static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
143 /* Idx NT mV T mV db */ 158 /* Idx NT mV T mV db */
144 { 0x00000018, 0x000000a0 }, /* 0: 400 400 0 */ 159 { 0x00000018, 0x000000a0 }, /* 0: 400 400 0 */
@@ -187,7 +202,8 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
187{ 202{
188 struct drm_i915_private *dev_priv = dev->dev_private; 203 struct drm_i915_private *dev_priv = dev->dev_private;
189 u32 reg; 204 u32 reg;
190 int i, n_hdmi_entries, hdmi_800mV_0dB; 205 int i, n_hdmi_entries, n_dp_entries, n_edp_entries, hdmi_800mV_0dB,
206 size;
191 int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift; 207 int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
192 const struct ddi_buf_trans *ddi_translations_fdi; 208 const struct ddi_buf_trans *ddi_translations_fdi;
193 const struct ddi_buf_trans *ddi_translations_dp; 209 const struct ddi_buf_trans *ddi_translations_dp;
@@ -198,7 +214,15 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
198 if (IS_SKYLAKE(dev)) { 214 if (IS_SKYLAKE(dev)) {
199 ddi_translations_fdi = NULL; 215 ddi_translations_fdi = NULL;
200 ddi_translations_dp = skl_ddi_translations_dp; 216 ddi_translations_dp = skl_ddi_translations_dp;
201 ddi_translations_edp = skl_ddi_translations_dp; 217 n_dp_entries = ARRAY_SIZE(skl_ddi_translations_dp);
218 if (dev_priv->vbt.edp_low_vswing) {
219 ddi_translations_edp = skl_ddi_translations_edp;
220 n_edp_entries = ARRAY_SIZE(skl_ddi_translations_edp);
221 } else {
222 ddi_translations_edp = skl_ddi_translations_dp;
223 n_edp_entries = ARRAY_SIZE(skl_ddi_translations_dp);
224 }
225
202 ddi_translations_hdmi = skl_ddi_translations_hdmi; 226 ddi_translations_hdmi = skl_ddi_translations_hdmi;
203 n_hdmi_entries = ARRAY_SIZE(skl_ddi_translations_hdmi); 227 n_hdmi_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
204 hdmi_800mV_0dB = 7; 228 hdmi_800mV_0dB = 7;
@@ -207,6 +231,8 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
207 ddi_translations_dp = bdw_ddi_translations_dp; 231 ddi_translations_dp = bdw_ddi_translations_dp;
208 ddi_translations_edp = bdw_ddi_translations_edp; 232 ddi_translations_edp = bdw_ddi_translations_edp;
209 ddi_translations_hdmi = bdw_ddi_translations_hdmi; 233 ddi_translations_hdmi = bdw_ddi_translations_hdmi;
234 n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
235 n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
210 n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); 236 n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
211 hdmi_800mV_0dB = 7; 237 hdmi_800mV_0dB = 7;
212 } else if (IS_HASWELL(dev)) { 238 } else if (IS_HASWELL(dev)) {
@@ -214,6 +240,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
214 ddi_translations_dp = hsw_ddi_translations_dp; 240 ddi_translations_dp = hsw_ddi_translations_dp;
215 ddi_translations_edp = hsw_ddi_translations_dp; 241 ddi_translations_edp = hsw_ddi_translations_dp;
216 ddi_translations_hdmi = hsw_ddi_translations_hdmi; 242 ddi_translations_hdmi = hsw_ddi_translations_hdmi;
243 n_dp_entries = n_edp_entries = ARRAY_SIZE(hsw_ddi_translations_dp);
217 n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi); 244 n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
218 hdmi_800mV_0dB = 6; 245 hdmi_800mV_0dB = 6;
219 } else { 246 } else {
@@ -222,6 +249,8 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
222 ddi_translations_fdi = bdw_ddi_translations_fdi; 249 ddi_translations_fdi = bdw_ddi_translations_fdi;
223 ddi_translations_dp = bdw_ddi_translations_dp; 250 ddi_translations_dp = bdw_ddi_translations_dp;
224 ddi_translations_hdmi = bdw_ddi_translations_hdmi; 251 ddi_translations_hdmi = bdw_ddi_translations_hdmi;
252 n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
253 n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
225 n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); 254 n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
226 hdmi_800mV_0dB = 7; 255 hdmi_800mV_0dB = 7;
227 } 256 }
@@ -229,29 +258,34 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
229 switch (port) { 258 switch (port) {
230 case PORT_A: 259 case PORT_A:
231 ddi_translations = ddi_translations_edp; 260 ddi_translations = ddi_translations_edp;
261 size = n_edp_entries;
232 break; 262 break;
233 case PORT_B: 263 case PORT_B:
234 case PORT_C: 264 case PORT_C:
235 ddi_translations = ddi_translations_dp; 265 ddi_translations = ddi_translations_dp;
266 size = n_dp_entries;
236 break; 267 break;
237 case PORT_D: 268 case PORT_D:
238 if (intel_dp_is_edp(dev, PORT_D)) 269 if (intel_dp_is_edp(dev, PORT_D)) {
239 ddi_translations = ddi_translations_edp; 270 ddi_translations = ddi_translations_edp;
240 else 271 size = n_edp_entries;
272 } else {
241 ddi_translations = ddi_translations_dp; 273 ddi_translations = ddi_translations_dp;
274 size = n_dp_entries;
275 }
242 break; 276 break;
243 case PORT_E: 277 case PORT_E:
244 if (ddi_translations_fdi) 278 if (ddi_translations_fdi)
245 ddi_translations = ddi_translations_fdi; 279 ddi_translations = ddi_translations_fdi;
246 else 280 else
247 ddi_translations = ddi_translations_dp; 281 ddi_translations = ddi_translations_dp;
282 size = n_dp_entries;
248 break; 283 break;
249 default: 284 default:
250 BUG(); 285 BUG();
251 } 286 }
252 287
253 for (i = 0, reg = DDI_BUF_TRANS(port); 288 for (i = 0, reg = DDI_BUF_TRANS(port); i < size; i++) {
254 i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
255 I915_WRITE(reg, ddi_translations[i].trans1); 289 I915_WRITE(reg, ddi_translations[i].trans1);
256 reg += 4; 290 reg += 4;
257 I915_WRITE(reg, ddi_translations[i].trans2); 291 I915_WRITE(reg, ddi_translations[i].trans2);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index c234af0379fc..dbd817928d0d 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -390,7 +390,7 @@ static const intel_limit_t intel_limits_chv = {
390 * them would make no difference. 390 * them would make no difference.
391 */ 391 */
392 .dot = { .min = 25000 * 5, .max = 540000 * 5}, 392 .dot = { .min = 25000 * 5, .max = 540000 * 5},
393 .vco = { .min = 4860000, .max = 6700000 }, 393 .vco = { .min = 4860000, .max = 6480000 },
394 .n = { .min = 1, .max = 1 }, 394 .n = { .min = 1, .max = 1 },
395 .m1 = { .min = 2, .max = 2 }, 395 .m1 = { .min = 2, .max = 2 },
396 .m2 = { .min = 24 << 22, .max = 175 << 22 }, 396 .m2 = { .min = 24 << 22, .max = 175 << 22 },
@@ -2195,9 +2195,44 @@ intel_fb_align_height(struct drm_device *dev, int height,
2195 uint64_t fb_format_modifier) 2195 uint64_t fb_format_modifier)
2196{ 2196{
2197 int tile_height; 2197 int tile_height;
2198 uint32_t bits_per_pixel;
2198 2199
2199 tile_height = fb_format_modifier == I915_FORMAT_MOD_X_TILED ? 2200 switch (fb_format_modifier) {
2200 (IS_GEN2(dev) ? 16 : 8) : 1; 2201 case DRM_FORMAT_MOD_NONE:
2202 tile_height = 1;
2203 break;
2204 case I915_FORMAT_MOD_X_TILED:
2205 tile_height = IS_GEN2(dev) ? 16 : 8;
2206 break;
2207 case I915_FORMAT_MOD_Y_TILED:
2208 tile_height = 32;
2209 break;
2210 case I915_FORMAT_MOD_Yf_TILED:
2211 bits_per_pixel = drm_format_plane_cpp(pixel_format, 0) * 8;
2212 switch (bits_per_pixel) {
2213 default:
2214 case 8:
2215 tile_height = 64;
2216 break;
2217 case 16:
2218 case 32:
2219 tile_height = 32;
2220 break;
2221 case 64:
2222 tile_height = 16;
2223 break;
2224 case 128:
2225 WARN_ONCE(1,
2226 "128-bit pixels are not supported for display!");
2227 tile_height = 16;
2228 break;
2229 }
2230 break;
2231 default:
2232 MISSING_CASE(fb_format_modifier);
2233 tile_height = 1;
2234 break;
2235 }
2201 2236
2202 return ALIGN(height, tile_height); 2237 return ALIGN(height, tile_height);
2203} 2238}
@@ -2235,8 +2270,12 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
2235 } 2270 }
2236 break; 2271 break;
2237 case I915_FORMAT_MOD_Y_TILED: 2272 case I915_FORMAT_MOD_Y_TILED:
2238 WARN(1, "Y tiled bo slipped through, driver bug!\n"); 2273 case I915_FORMAT_MOD_Yf_TILED:
2239 return -EINVAL; 2274 if (WARN_ONCE(INTEL_INFO(dev)->gen < 9,
2275 "Y tiling bo slipped through, driver bug!\n"))
2276 return -EINVAL;
2277 alignment = 1 * 1024 * 1024;
2278 break;
2240 default: 2279 default:
2241 MISSING_CASE(fb->modifier[0]); 2280 MISSING_CASE(fb->modifier[0]);
2242 return -EINVAL; 2281 return -EINVAL;
@@ -2728,6 +2767,40 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
2728 POSTING_READ(reg); 2767 POSTING_READ(reg);
2729} 2768}
2730 2769
2770u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
2771 uint32_t pixel_format)
2772{
2773 u32 bits_per_pixel = drm_format_plane_cpp(pixel_format, 0) * 8;
2774
2775 /*
2776 * The stride is either expressed as a multiple of 64 bytes
2777 * chunks for linear buffers or in number of tiles for tiled
2778 * buffers.
2779 */
2780 switch (fb_modifier) {
2781 case DRM_FORMAT_MOD_NONE:
2782 return 64;
2783 case I915_FORMAT_MOD_X_TILED:
2784 if (INTEL_INFO(dev)->gen == 2)
2785 return 128;
2786 return 512;
2787 case I915_FORMAT_MOD_Y_TILED:
2788 /* No need to check for old gens and Y tiling since this is
2789 * about the display engine and those will be blocked before
2790 * we get here.
2791 */
2792 return 128;
2793 case I915_FORMAT_MOD_Yf_TILED:
2794 if (bits_per_pixel == 8)
2795 return 64;
2796 else
2797 return 128;
2798 default:
2799 MISSING_CASE(fb_modifier);
2800 return 64;
2801 }
2802}
2803
2731static void skylake_update_primary_plane(struct drm_crtc *crtc, 2804static void skylake_update_primary_plane(struct drm_crtc *crtc,
2732 struct drm_framebuffer *fb, 2805 struct drm_framebuffer *fb,
2733 int x, int y) 2806 int x, int y)
@@ -2735,10 +2808,9 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
2735 struct drm_device *dev = crtc->dev; 2808 struct drm_device *dev = crtc->dev;
2736 struct drm_i915_private *dev_priv = dev->dev_private; 2809 struct drm_i915_private *dev_priv = dev->dev_private;
2737 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2810 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2738 struct intel_framebuffer *intel_fb;
2739 struct drm_i915_gem_object *obj; 2811 struct drm_i915_gem_object *obj;
2740 int pipe = intel_crtc->pipe; 2812 int pipe = intel_crtc->pipe;
2741 u32 plane_ctl, stride; 2813 u32 plane_ctl, stride_div;
2742 2814
2743 if (!intel_crtc->primary_enabled) { 2815 if (!intel_crtc->primary_enabled) {
2744 I915_WRITE(PLANE_CTL(pipe, 0), 0); 2816 I915_WRITE(PLANE_CTL(pipe, 0), 0);
@@ -2773,29 +2845,30 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
2773 BUG(); 2845 BUG();
2774 } 2846 }
2775 2847
2776 intel_fb = to_intel_framebuffer(fb);
2777 obj = intel_fb->obj;
2778
2779 /*
2780 * The stride is either expressed as a multiple of 64 bytes chunks for
2781 * linear buffers or in number of tiles for tiled buffers.
2782 */
2783 switch (fb->modifier[0]) { 2848 switch (fb->modifier[0]) {
2784 case DRM_FORMAT_MOD_NONE: 2849 case DRM_FORMAT_MOD_NONE:
2785 stride = fb->pitches[0] >> 6;
2786 break; 2850 break;
2787 case I915_FORMAT_MOD_X_TILED: 2851 case I915_FORMAT_MOD_X_TILED:
2788 plane_ctl |= PLANE_CTL_TILED_X; 2852 plane_ctl |= PLANE_CTL_TILED_X;
2789 stride = fb->pitches[0] >> 9; 2853 break;
2854 case I915_FORMAT_MOD_Y_TILED:
2855 plane_ctl |= PLANE_CTL_TILED_Y;
2856 break;
2857 case I915_FORMAT_MOD_Yf_TILED:
2858 plane_ctl |= PLANE_CTL_TILED_YF;
2790 break; 2859 break;
2791 default: 2860 default:
2792 BUG(); 2861 MISSING_CASE(fb->modifier[0]);
2793 } 2862 }
2794 2863
2795 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE; 2864 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
2796 if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) 2865 if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180))
2797 plane_ctl |= PLANE_CTL_ROTATE_180; 2866 plane_ctl |= PLANE_CTL_ROTATE_180;
2798 2867
2868 obj = intel_fb_obj(fb);
2869 stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
2870 fb->pixel_format);
2871
2799 I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl); 2872 I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
2800 2873
2801 DRM_DEBUG_KMS("Writing base %08lX %d,%d,%d,%d pitch=%d\n", 2874 DRM_DEBUG_KMS("Writing base %08lX %d,%d,%d,%d pitch=%d\n",
@@ -2808,7 +2881,7 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
2808 I915_WRITE(PLANE_SIZE(pipe, 0), 2881 I915_WRITE(PLANE_SIZE(pipe, 0),
2809 (intel_crtc->config->pipe_src_h - 1) << 16 | 2882 (intel_crtc->config->pipe_src_h - 1) << 16 |
2810 (intel_crtc->config->pipe_src_w - 1)); 2883 (intel_crtc->config->pipe_src_w - 1));
2811 I915_WRITE(PLANE_STRIDE(pipe, 0), stride); 2884 I915_WRITE(PLANE_STRIDE(pipe, 0), fb->pitches[0] / stride_div);
2812 I915_WRITE(PLANE_SURF(pipe, 0), i915_gem_obj_ggtt_offset(obj)); 2885 I915_WRITE(PLANE_SURF(pipe, 0), i915_gem_obj_ggtt_offset(obj));
2813 2886
2814 POSTING_READ(PLANE_SURF(pipe, 0)); 2887 POSTING_READ(PLANE_SURF(pipe, 0));
@@ -3062,7 +3135,7 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
3062 3135
3063static bool pipe_has_enabled_pch(struct intel_crtc *crtc) 3136static bool pipe_has_enabled_pch(struct intel_crtc *crtc)
3064{ 3137{
3065 return crtc->base.enabled && crtc->active && 3138 return crtc->base.state->enable && crtc->active &&
3066 crtc->config->has_pch_encoder; 3139 crtc->config->has_pch_encoder;
3067} 3140}
3068 3141
@@ -4200,7 +4273,7 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc)
4200 bool reenable_ips = false; 4273 bool reenable_ips = false;
4201 4274
4202 /* The clocks have to be on to load the palette. */ 4275 /* The clocks have to be on to load the palette. */
4203 if (!crtc->enabled || !intel_crtc->active) 4276 if (!crtc->state->enable || !intel_crtc->active)
4204 return; 4277 return;
4205 4278
4206 if (!HAS_PCH_SPLIT(dev_priv->dev)) { 4279 if (!HAS_PCH_SPLIT(dev_priv->dev)) {
@@ -4313,7 +4386,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
4313 struct intel_encoder *encoder; 4386 struct intel_encoder *encoder;
4314 int pipe = intel_crtc->pipe; 4387 int pipe = intel_crtc->pipe;
4315 4388
4316 WARN_ON(!crtc->enabled); 4389 WARN_ON(!crtc->state->enable);
4317 4390
4318 if (intel_crtc->active) 4391 if (intel_crtc->active)
4319 return; 4392 return;
@@ -4322,7 +4395,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
4322 intel_prepare_shared_dpll(intel_crtc); 4395 intel_prepare_shared_dpll(intel_crtc);
4323 4396
4324 if (intel_crtc->config->has_dp_encoder) 4397 if (intel_crtc->config->has_dp_encoder)
4325 intel_dp_set_m_n(intel_crtc); 4398 intel_dp_set_m_n(intel_crtc, M1_N1);
4326 4399
4327 intel_set_pipe_timings(intel_crtc); 4400 intel_set_pipe_timings(intel_crtc);
4328 4401
@@ -4421,7 +4494,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
4421 struct intel_encoder *encoder; 4494 struct intel_encoder *encoder;
4422 int pipe = intel_crtc->pipe; 4495 int pipe = intel_crtc->pipe;
4423 4496
4424 WARN_ON(!crtc->enabled); 4497 WARN_ON(!crtc->state->enable);
4425 4498
4426 if (intel_crtc->active) 4499 if (intel_crtc->active)
4427 return; 4500 return;
@@ -4430,7 +4503,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
4430 intel_enable_shared_dpll(intel_crtc); 4503 intel_enable_shared_dpll(intel_crtc);
4431 4504
4432 if (intel_crtc->config->has_dp_encoder) 4505 if (intel_crtc->config->has_dp_encoder)
4433 intel_dp_set_m_n(intel_crtc); 4506 intel_dp_set_m_n(intel_crtc, M1_N1);
4434 4507
4435 intel_set_pipe_timings(intel_crtc); 4508 intel_set_pipe_timings(intel_crtc);
4436 4509
@@ -4768,7 +4841,7 @@ static void modeset_update_crtc_power_domains(struct drm_device *dev)
4768 for_each_intel_crtc(dev, crtc) { 4841 for_each_intel_crtc(dev, crtc) {
4769 enum intel_display_power_domain domain; 4842 enum intel_display_power_domain domain;
4770 4843
4771 if (!crtc->base.enabled) 4844 if (!crtc->base.state->enable)
4772 continue; 4845 continue;
4773 4846
4774 pipe_domains[crtc->pipe] = get_crtc_power_domains(&crtc->base); 4847 pipe_domains[crtc->pipe] = get_crtc_power_domains(&crtc->base);
@@ -4989,7 +5062,7 @@ static void valleyview_modeset_global_pipes(struct drm_device *dev,
4989 5062
4990 /* disable/enable all currently active pipes while we change cdclk */ 5063 /* disable/enable all currently active pipes while we change cdclk */
4991 for_each_intel_crtc(dev, intel_crtc) 5064 for_each_intel_crtc(dev, intel_crtc)
4992 if (intel_crtc->base.enabled) 5065 if (intel_crtc->base.state->enable)
4993 *prepare_pipes |= (1 << intel_crtc->pipe); 5066 *prepare_pipes |= (1 << intel_crtc->pipe);
4994} 5067}
4995 5068
@@ -5029,7 +5102,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
5029 int pipe = intel_crtc->pipe; 5102 int pipe = intel_crtc->pipe;
5030 bool is_dsi; 5103 bool is_dsi;
5031 5104
5032 WARN_ON(!crtc->enabled); 5105 WARN_ON(!crtc->state->enable);
5033 5106
5034 if (intel_crtc->active) 5107 if (intel_crtc->active)
5035 return; 5108 return;
@@ -5044,7 +5117,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
5044 } 5117 }
5045 5118
5046 if (intel_crtc->config->has_dp_encoder) 5119 if (intel_crtc->config->has_dp_encoder)
5047 intel_dp_set_m_n(intel_crtc); 5120 intel_dp_set_m_n(intel_crtc, M1_N1);
5048 5121
5049 intel_set_pipe_timings(intel_crtc); 5122 intel_set_pipe_timings(intel_crtc);
5050 5123
@@ -5112,7 +5185,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
5112 struct intel_encoder *encoder; 5185 struct intel_encoder *encoder;
5113 int pipe = intel_crtc->pipe; 5186 int pipe = intel_crtc->pipe;
5114 5187
5115 WARN_ON(!crtc->enabled); 5188 WARN_ON(!crtc->state->enable);
5116 5189
5117 if (intel_crtc->active) 5190 if (intel_crtc->active)
5118 return; 5191 return;
@@ -5120,7 +5193,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
5120 i9xx_set_pll_dividers(intel_crtc); 5193 i9xx_set_pll_dividers(intel_crtc);
5121 5194
5122 if (intel_crtc->config->has_dp_encoder) 5195 if (intel_crtc->config->has_dp_encoder)
5123 intel_dp_set_m_n(intel_crtc); 5196 intel_dp_set_m_n(intel_crtc, M1_N1);
5124 5197
5125 intel_set_pipe_timings(intel_crtc); 5198 intel_set_pipe_timings(intel_crtc);
5126 5199
@@ -5311,7 +5384,7 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
5311 struct drm_i915_private *dev_priv = dev->dev_private; 5384 struct drm_i915_private *dev_priv = dev->dev_private;
5312 5385
5313 /* crtc should still be enabled when we disable it. */ 5386 /* crtc should still be enabled when we disable it. */
5314 WARN_ON(!crtc->enabled); 5387 WARN_ON(!crtc->state->enable);
5315 5388
5316 dev_priv->display.crtc_disable(crtc); 5389 dev_priv->display.crtc_disable(crtc);
5317 dev_priv->display.off(crtc); 5390 dev_priv->display.off(crtc);
@@ -5389,7 +5462,8 @@ static void intel_connector_check_state(struct intel_connector *connector)
5389 5462
5390 crtc = encoder->base.crtc; 5463 crtc = encoder->base.crtc;
5391 5464
5392 I915_STATE_WARN(!crtc->enabled, "crtc not enabled\n"); 5465 I915_STATE_WARN(!crtc->state->enable,
5466 "crtc not enabled\n");
5393 I915_STATE_WARN(!to_intel_crtc(crtc)->active, "crtc not active\n"); 5467 I915_STATE_WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
5394 I915_STATE_WARN(pipe != to_intel_crtc(crtc)->pipe, 5468 I915_STATE_WARN(pipe != to_intel_crtc(crtc)->pipe,
5395 "encoder active on the wrong pipe\n"); 5469 "encoder active on the wrong pipe\n");
@@ -5576,7 +5650,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
5576 * - LVDS dual channel mode 5650 * - LVDS dual channel mode
5577 * - Double wide pipe 5651 * - Double wide pipe
5578 */ 5652 */
5579 if ((intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 5653 if ((intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
5580 intel_is_dual_link_lvds(dev)) || pipe_config->double_wide) 5654 intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
5581 pipe_config->pipe_src_w &= ~1; 5655 pipe_config->pipe_src_w &= ~1;
5582 5656
@@ -5879,7 +5953,7 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
5879 * for gen < 8) and if DRRS is supported (to make sure the 5953 * for gen < 8) and if DRRS is supported (to make sure the
5880 * registers are not unnecessarily accessed). 5954 * registers are not unnecessarily accessed).
5881 */ 5955 */
5882 if (m2_n2 && INTEL_INFO(dev)->gen < 8 && 5956 if (m2_n2 && (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen < 8) &&
5883 crtc->config->has_drrs) { 5957 crtc->config->has_drrs) {
5884 I915_WRITE(PIPE_DATA_M2(transcoder), 5958 I915_WRITE(PIPE_DATA_M2(transcoder),
5885 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m); 5959 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
@@ -5895,13 +5969,29 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
5895 } 5969 }
5896} 5970}
5897 5971
5898void intel_dp_set_m_n(struct intel_crtc *crtc) 5972void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
5899{ 5973{
5974 struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
5975
5976 if (m_n == M1_N1) {
5977 dp_m_n = &crtc->config->dp_m_n;
5978 dp_m2_n2 = &crtc->config->dp_m2_n2;
5979 } else if (m_n == M2_N2) {
5980
5981 /*
5982 * M2_N2 registers are not supported. Hence m2_n2 divider value
5983 * needs to be programmed into M1_N1.
5984 */
5985 dp_m_n = &crtc->config->dp_m2_n2;
5986 } else {
5987 DRM_ERROR("Unsupported divider value\n");
5988 return;
5989 }
5990
5900 if (crtc->config->has_pch_encoder) 5991 if (crtc->config->has_pch_encoder)
5901 intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n); 5992 intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
5902 else 5993 else
5903 intel_cpu_transcoder_set_m_n(crtc, &crtc->config->dp_m_n, 5994 intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
5904 &crtc->config->dp_m2_n2);
5905} 5995}
5906 5996
5907static void vlv_update_pll(struct intel_crtc *crtc, 5997static void vlv_update_pll(struct intel_crtc *crtc,
@@ -7650,7 +7740,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
7650{ 7740{
7651 struct drm_device *dev = crtc->base.dev; 7741 struct drm_device *dev = crtc->base.dev;
7652 struct drm_i915_private *dev_priv = dev->dev_private; 7742 struct drm_i915_private *dev_priv = dev->dev_private;
7653 u32 val, base, offset, stride_mult; 7743 u32 val, base, offset, stride_mult, tiling;
7654 int pipe = crtc->pipe; 7744 int pipe = crtc->pipe;
7655 int fourcc, pixel_format; 7745 int fourcc, pixel_format;
7656 int aligned_height; 7746 int aligned_height;
@@ -7669,11 +7759,6 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
7669 if (!(val & PLANE_CTL_ENABLE)) 7759 if (!(val & PLANE_CTL_ENABLE))
7670 goto error; 7760 goto error;
7671 7761
7672 if (val & PLANE_CTL_TILED_MASK) {
7673 plane_config->tiling = I915_TILING_X;
7674 fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
7675 }
7676
7677 pixel_format = val & PLANE_CTL_FORMAT_MASK; 7762 pixel_format = val & PLANE_CTL_FORMAT_MASK;
7678 fourcc = skl_format_to_fourcc(pixel_format, 7763 fourcc = skl_format_to_fourcc(pixel_format,
7679 val & PLANE_CTL_ORDER_RGBX, 7764 val & PLANE_CTL_ORDER_RGBX,
@@ -7681,6 +7766,26 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
7681 fb->pixel_format = fourcc; 7766 fb->pixel_format = fourcc;
7682 fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8; 7767 fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
7683 7768
7769 tiling = val & PLANE_CTL_TILED_MASK;
7770 switch (tiling) {
7771 case PLANE_CTL_TILED_LINEAR:
7772 fb->modifier[0] = DRM_FORMAT_MOD_NONE;
7773 break;
7774 case PLANE_CTL_TILED_X:
7775 plane_config->tiling = I915_TILING_X;
7776 fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
7777 break;
7778 case PLANE_CTL_TILED_Y:
7779 fb->modifier[0] = I915_FORMAT_MOD_Y_TILED;
7780 break;
7781 case PLANE_CTL_TILED_YF:
7782 fb->modifier[0] = I915_FORMAT_MOD_Yf_TILED;
7783 break;
7784 default:
7785 MISSING_CASE(tiling);
7786 goto error;
7787 }
7788
7684 base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000; 7789 base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000;
7685 plane_config->base = base; 7790 plane_config->base = base;
7686 7791
@@ -7691,17 +7796,8 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
7691 fb->width = ((val >> 0) & 0x1fff) + 1; 7796 fb->width = ((val >> 0) & 0x1fff) + 1;
7692 7797
7693 val = I915_READ(PLANE_STRIDE(pipe, 0)); 7798 val = I915_READ(PLANE_STRIDE(pipe, 0));
7694 switch (plane_config->tiling) { 7799 stride_mult = intel_fb_stride_alignment(dev, fb->modifier[0],
7695 case I915_TILING_NONE: 7800 fb->pixel_format);
7696 stride_mult = 64;
7697 break;
7698 case I915_TILING_X:
7699 stride_mult = 512;
7700 break;
7701 default:
7702 MISSING_CASE(plane_config->tiling);
7703 goto error;
7704 }
7705 fb->pitches[0] = (val & 0x3ff) * stride_mult; 7801 fb->pitches[0] = (val & 0x3ff) * stride_mult;
7706 7802
7707 aligned_height = intel_fb_align_height(dev, fb->height, 7803 aligned_height = intel_fb_align_height(dev, fb->height,
@@ -8686,7 +8782,7 @@ retry:
8686 i++; 8782 i++;
8687 if (!(encoder->possible_crtcs & (1 << i))) 8783 if (!(encoder->possible_crtcs & (1 << i)))
8688 continue; 8784 continue;
8689 if (possible_crtc->enabled) 8785 if (possible_crtc->state->enable)
8690 continue; 8786 continue;
8691 /* This can occur when applying the pipe A quirk on resume. */ 8787 /* This can occur when applying the pipe A quirk on resume. */
8692 if (to_intel_crtc(possible_crtc)->new_enabled) 8788 if (to_intel_crtc(possible_crtc)->new_enabled)
@@ -8754,7 +8850,7 @@ retry:
8754 return true; 8850 return true;
8755 8851
8756 fail: 8852 fail:
8757 intel_crtc->new_enabled = crtc->enabled; 8853 intel_crtc->new_enabled = crtc->state->enable;
8758 if (intel_crtc->new_enabled) 8854 if (intel_crtc->new_enabled)
8759 intel_crtc->new_config = intel_crtc->config; 8855 intel_crtc->new_config = intel_crtc->config;
8760 else 8856 else
@@ -9661,10 +9757,10 @@ static bool __intel_pageflip_stall_check(struct drm_device *dev,
9661 !i915_gem_request_completed(work->flip_queued_req, true)) 9757 !i915_gem_request_completed(work->flip_queued_req, true))
9662 return false; 9758 return false;
9663 9759
9664 work->flip_ready_vblank = drm_vblank_count(dev, intel_crtc->pipe); 9760 work->flip_ready_vblank = drm_crtc_vblank_count(crtc);
9665 } 9761 }
9666 9762
9667 if (drm_vblank_count(dev, intel_crtc->pipe) - work->flip_ready_vblank < 3) 9763 if (drm_crtc_vblank_count(crtc) - work->flip_ready_vblank < 3)
9668 return false; 9764 return false;
9669 9765
9670 /* Potential stall - if we see that the flip has happened, 9766 /* Potential stall - if we see that the flip has happened,
@@ -9695,7 +9791,8 @@ void intel_check_page_flip(struct drm_device *dev, int pipe)
9695 spin_lock(&dev->event_lock); 9791 spin_lock(&dev->event_lock);
9696 if (intel_crtc->unpin_work && __intel_pageflip_stall_check(dev, crtc)) { 9792 if (intel_crtc->unpin_work && __intel_pageflip_stall_check(dev, crtc)) {
9697 WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n", 9793 WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n",
9698 intel_crtc->unpin_work->flip_queued_vblank, drm_vblank_count(dev, pipe)); 9794 intel_crtc->unpin_work->flip_queued_vblank,
9795 drm_vblank_count(dev, pipe));
9699 page_flip_completed(intel_crtc); 9796 page_flip_completed(intel_crtc);
9700 } 9797 }
9701 spin_unlock(&dev->event_lock); 9798 spin_unlock(&dev->event_lock);
@@ -9837,7 +9934,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
9837 intel_ring_get_request(ring)); 9934 intel_ring_get_request(ring));
9838 } 9935 }
9839 9936
9840 work->flip_queued_vblank = drm_vblank_count(dev, intel_crtc->pipe); 9937 work->flip_queued_vblank = drm_crtc_vblank_count(crtc);
9841 work->enable_stall_check = true; 9938 work->enable_stall_check = true;
9842 9939
9843 i915_gem_track_fb(intel_fb_obj(work->old_fb), obj, 9940 i915_gem_track_fb(intel_fb_obj(work->old_fb), obj,
@@ -9913,7 +10010,7 @@ static void intel_modeset_update_staged_output_state(struct drm_device *dev)
9913 } 10010 }
9914 10011
9915 for_each_intel_crtc(dev, crtc) { 10012 for_each_intel_crtc(dev, crtc) {
9916 crtc->new_enabled = crtc->base.enabled; 10013 crtc->new_enabled = crtc->base.state->enable;
9917 10014
9918 if (crtc->new_enabled) 10015 if (crtc->new_enabled)
9919 crtc->new_config = crtc->config; 10016 crtc->new_config = crtc->config;
@@ -9943,6 +10040,7 @@ static void intel_modeset_commit_output_state(struct drm_device *dev)
9943 } 10040 }
9944 10041
9945 for_each_intel_crtc(dev, crtc) { 10042 for_each_intel_crtc(dev, crtc) {
10043 crtc->base.state->enable = crtc->new_enabled;
9946 crtc->base.enabled = crtc->new_enabled; 10044 crtc->base.enabled = crtc->new_enabled;
9947 } 10045 }
9948} 10046}
@@ -10206,6 +10304,7 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
10206 if (!pipe_config) 10304 if (!pipe_config)
10207 return ERR_PTR(-ENOMEM); 10305 return ERR_PTR(-ENOMEM);
10208 10306
10307 pipe_config->base.crtc = crtc;
10209 drm_mode_copy(&pipe_config->base.adjusted_mode, mode); 10308 drm_mode_copy(&pipe_config->base.adjusted_mode, mode);
10210 drm_mode_copy(&pipe_config->base.mode, mode); 10309 drm_mode_copy(&pipe_config->base.mode, mode);
10211 10310
@@ -10354,7 +10453,7 @@ intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
10354 10453
10355 /* Check for pipes that will be enabled/disabled ... */ 10454 /* Check for pipes that will be enabled/disabled ... */
10356 for_each_intel_crtc(dev, intel_crtc) { 10455 for_each_intel_crtc(dev, intel_crtc) {
10357 if (intel_crtc->base.enabled == intel_crtc->new_enabled) 10456 if (intel_crtc->base.state->enable == intel_crtc->new_enabled)
10358 continue; 10457 continue;
10359 10458
10360 if (!intel_crtc->new_enabled) 10459 if (!intel_crtc->new_enabled)
@@ -10429,10 +10528,10 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
10429 10528
10430 /* Double check state. */ 10529 /* Double check state. */
10431 for_each_intel_crtc(dev, intel_crtc) { 10530 for_each_intel_crtc(dev, intel_crtc) {
10432 WARN_ON(intel_crtc->base.enabled != intel_crtc_in_use(&intel_crtc->base)); 10531 WARN_ON(intel_crtc->base.state->enable != intel_crtc_in_use(&intel_crtc->base));
10433 WARN_ON(intel_crtc->new_config && 10532 WARN_ON(intel_crtc->new_config &&
10434 intel_crtc->new_config != intel_crtc->config); 10533 intel_crtc->new_config != intel_crtc->config);
10435 WARN_ON(intel_crtc->base.enabled != !!intel_crtc->new_config); 10534 WARN_ON(intel_crtc->base.state->enable != !!intel_crtc->new_config);
10436 } 10535 }
10437 10536
10438 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 10537 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
@@ -10819,7 +10918,7 @@ check_crtc_state(struct drm_device *dev)
10819 DRM_DEBUG_KMS("[CRTC:%d]\n", 10918 DRM_DEBUG_KMS("[CRTC:%d]\n",
10820 crtc->base.base.id); 10919 crtc->base.base.id);
10821 10920
10822 I915_STATE_WARN(crtc->active && !crtc->base.enabled, 10921 I915_STATE_WARN(crtc->active && !crtc->base.state->enable,
10823 "active crtc, but not enabled in sw tracking\n"); 10922 "active crtc, but not enabled in sw tracking\n");
10824 10923
10825 for_each_intel_encoder(dev, encoder) { 10924 for_each_intel_encoder(dev, encoder) {
@@ -10833,9 +10932,10 @@ check_crtc_state(struct drm_device *dev)
10833 I915_STATE_WARN(active != crtc->active, 10932 I915_STATE_WARN(active != crtc->active,
10834 "crtc's computed active state doesn't match tracked active state " 10933 "crtc's computed active state doesn't match tracked active state "
10835 "(expected %i, found %i)\n", active, crtc->active); 10934 "(expected %i, found %i)\n", active, crtc->active);
10836 I915_STATE_WARN(enabled != crtc->base.enabled, 10935 I915_STATE_WARN(enabled != crtc->base.state->enable,
10837 "crtc's computed enabled state doesn't match tracked enabled state " 10936 "crtc's computed enabled state doesn't match tracked enabled state "
10838 "(expected %i, found %i)\n", enabled, crtc->base.enabled); 10937 "(expected %i, found %i)\n", enabled,
10938 crtc->base.state->enable);
10839 10939
10840 active = dev_priv->display.get_pipe_config(crtc, 10940 active = dev_priv->display.get_pipe_config(crtc,
10841 &pipe_config); 10941 &pipe_config);
@@ -10899,7 +10999,7 @@ check_shared_dpll_state(struct drm_device *dev)
10899 pll->on, active); 10999 pll->on, active);
10900 11000
10901 for_each_intel_crtc(dev, crtc) { 11001 for_each_intel_crtc(dev, crtc) {
10902 if (crtc->base.enabled && intel_crtc_to_shared_dpll(crtc) == pll) 11002 if (crtc->base.state->enable && intel_crtc_to_shared_dpll(crtc) == pll)
10903 enabled_crtcs++; 11003 enabled_crtcs++;
10904 if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) 11004 if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
10905 active_crtcs++; 11005 active_crtcs++;
@@ -11085,7 +11185,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
11085 intel_crtc_disable(&intel_crtc->base); 11185 intel_crtc_disable(&intel_crtc->base);
11086 11186
11087 for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) { 11187 for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
11088 if (intel_crtc->base.enabled) 11188 if (intel_crtc->base.state->enable)
11089 dev_priv->display.crtc_disable(&intel_crtc->base); 11189 dev_priv->display.crtc_disable(&intel_crtc->base);
11090 } 11190 }
11091 11191
@@ -11141,7 +11241,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
11141 11241
11142 /* FIXME: add subpixel order */ 11242 /* FIXME: add subpixel order */
11143done: 11243done:
11144 if (ret && crtc->enabled) 11244 if (ret && crtc->state->enable)
11145 crtc->mode = *saved_mode; 11245 crtc->mode = *saved_mode;
11146 11246
11147 kfree(saved_mode); 11247 kfree(saved_mode);
@@ -11237,7 +11337,7 @@ static int intel_set_config_save_state(struct drm_device *dev,
11237 */ 11337 */
11238 count = 0; 11338 count = 0;
11239 for_each_crtc(dev, crtc) { 11339 for_each_crtc(dev, crtc) {
11240 config->save_crtc_enabled[count++] = crtc->enabled; 11340 config->save_crtc_enabled[count++] = crtc->state->enable;
11241 } 11341 }
11242 11342
11243 count = 0; 11343 count = 0;
@@ -11471,7 +11571,7 @@ intel_modeset_stage_output_state(struct drm_device *dev,
11471 } 11571 }
11472 } 11572 }
11473 11573
11474 if (crtc->new_enabled != crtc->base.enabled) { 11574 if (crtc->new_enabled != crtc->base.state->enable) {
11475 DRM_DEBUG_KMS("crtc %sabled, full mode switch\n", 11575 DRM_DEBUG_KMS("crtc %sabled, full mode switch\n",
11476 crtc->new_enabled ? "en" : "dis"); 11576 crtc->new_enabled ? "en" : "dis");
11477 config->mode_changed = true; 11577 config->mode_changed = true;
@@ -11907,6 +12007,12 @@ intel_check_primary_plane(struct drm_plane *plane,
11907 INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe); 12007 INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
11908 12008
11909 intel_crtc->atomic.update_fbc = true; 12009 intel_crtc->atomic.update_fbc = true;
12010
12011 /* Update watermarks on tiling changes. */
12012 if (!plane->state->fb || !state->base.fb ||
12013 plane->state->fb->modifier[0] !=
12014 state->base.fb->modifier[0])
12015 intel_crtc->atomic.update_wm = true;
11910 } 12016 }
11911 12017
11912 return 0; 12018 return 0;
@@ -12297,6 +12403,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
12297 if (!crtc_state) 12403 if (!crtc_state)
12298 goto fail; 12404 goto fail;
12299 intel_crtc_set_state(intel_crtc, crtc_state); 12405 intel_crtc_set_state(intel_crtc, crtc_state);
12406 crtc_state->base.crtc = &intel_crtc->base;
12300 12407
12301 primary = intel_primary_plane_create(dev, pipe); 12408 primary = intel_primary_plane_create(dev, pipe);
12302 if (!primary) 12409 if (!primary)
@@ -12374,9 +12481,6 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
12374 struct drm_crtc *drmmode_crtc; 12481 struct drm_crtc *drmmode_crtc;
12375 struct intel_crtc *crtc; 12482 struct intel_crtc *crtc;
12376 12483
12377 if (!drm_core_check_feature(dev, DRIVER_MODESET))
12378 return -ENODEV;
12379
12380 drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id); 12484 drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
12381 12485
12382 if (!drmmode_crtc) { 12486 if (!drmmode_crtc) {
@@ -12649,14 +12753,43 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
12649 .create_handle = intel_user_framebuffer_create_handle, 12753 .create_handle = intel_user_framebuffer_create_handle,
12650}; 12754};
12651 12755
12756static
12757u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier,
12758 uint32_t pixel_format)
12759{
12760 u32 gen = INTEL_INFO(dev)->gen;
12761
12762 if (gen >= 9) {
12763 /* "The stride in bytes must not exceed the of the size of 8K
12764 * pixels and 32K bytes."
12765 */
12766 return min(8192*drm_format_plane_cpp(pixel_format, 0), 32768);
12767 } else if (gen >= 5 && !IS_VALLEYVIEW(dev)) {
12768 return 32*1024;
12769 } else if (gen >= 4) {
12770 if (fb_modifier == I915_FORMAT_MOD_X_TILED)
12771 return 16*1024;
12772 else
12773 return 32*1024;
12774 } else if (gen >= 3) {
12775 if (fb_modifier == I915_FORMAT_MOD_X_TILED)
12776 return 8*1024;
12777 else
12778 return 16*1024;
12779 } else {
12780 /* XXX DSPC is limited to 4k tiled */
12781 return 8*1024;
12782 }
12783}
12784
12652static int intel_framebuffer_init(struct drm_device *dev, 12785static int intel_framebuffer_init(struct drm_device *dev,
12653 struct intel_framebuffer *intel_fb, 12786 struct intel_framebuffer *intel_fb,
12654 struct drm_mode_fb_cmd2 *mode_cmd, 12787 struct drm_mode_fb_cmd2 *mode_cmd,
12655 struct drm_i915_gem_object *obj) 12788 struct drm_i915_gem_object *obj)
12656{ 12789{
12657 int aligned_height; 12790 int aligned_height;
12658 int pitch_limit;
12659 int ret; 12791 int ret;
12792 u32 pitch_limit, stride_alignment;
12660 12793
12661 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 12794 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
12662 12795
@@ -12677,36 +12810,37 @@ static int intel_framebuffer_init(struct drm_device *dev,
12677 } 12810 }
12678 } 12811 }
12679 12812
12680 if (mode_cmd->modifier[0] == I915_FORMAT_MOD_Y_TILED) { 12813 /* Passed in modifier sanity checking. */
12681 DRM_DEBUG("hardware does not support tiling Y\n"); 12814 switch (mode_cmd->modifier[0]) {
12815 case I915_FORMAT_MOD_Y_TILED:
12816 case I915_FORMAT_MOD_Yf_TILED:
12817 if (INTEL_INFO(dev)->gen < 9) {
12818 DRM_DEBUG("Unsupported tiling 0x%llx!\n",
12819 mode_cmd->modifier[0]);
12820 return -EINVAL;
12821 }
12822 case DRM_FORMAT_MOD_NONE:
12823 case I915_FORMAT_MOD_X_TILED:
12824 break;
12825 default:
12826 DRM_ERROR("Unsupported fb modifier 0x%llx!\n",
12827 mode_cmd->modifier[0]);
12682 return -EINVAL; 12828 return -EINVAL;
12683 } 12829 }
12684 12830
12685 if (mode_cmd->pitches[0] & 63) { 12831 stride_alignment = intel_fb_stride_alignment(dev, mode_cmd->modifier[0],
12686 DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n", 12832 mode_cmd->pixel_format);
12687 mode_cmd->pitches[0]); 12833 if (mode_cmd->pitches[0] & (stride_alignment - 1)) {
12834 DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n",
12835 mode_cmd->pitches[0], stride_alignment);
12688 return -EINVAL; 12836 return -EINVAL;
12689 } 12837 }
12690 12838
12691 if (INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev)) { 12839 pitch_limit = intel_fb_pitch_limit(dev, mode_cmd->modifier[0],
12692 pitch_limit = 32*1024; 12840 mode_cmd->pixel_format);
12693 } else if (INTEL_INFO(dev)->gen >= 4) {
12694 if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)
12695 pitch_limit = 16*1024;
12696 else
12697 pitch_limit = 32*1024;
12698 } else if (INTEL_INFO(dev)->gen >= 3) {
12699 if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)
12700 pitch_limit = 8*1024;
12701 else
12702 pitch_limit = 16*1024;
12703 } else
12704 /* XXX DSPC is limited to 4k tiled */
12705 pitch_limit = 8*1024;
12706
12707 if (mode_cmd->pitches[0] > pitch_limit) { 12841 if (mode_cmd->pitches[0] > pitch_limit) {
12708 DRM_DEBUG("%s pitch (%d) must be at less than %d\n", 12842 DRM_DEBUG("%s pitch (%u) must be at less than %d\n",
12709 mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED ? 12843 mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ?
12710 "tiled" : "linear", 12844 "tiled" : "linear",
12711 mode_cmd->pitches[0], pitch_limit); 12845 mode_cmd->pitches[0], pitch_limit);
12712 return -EINVAL; 12846 return -EINVAL;
@@ -13318,11 +13452,11 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
13318 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); 13452 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
13319 13453
13320 /* restore vblank interrupts to correct state */ 13454 /* restore vblank interrupts to correct state */
13455 drm_crtc_vblank_reset(&crtc->base);
13321 if (crtc->active) { 13456 if (crtc->active) {
13322 update_scanline_offset(crtc); 13457 update_scanline_offset(crtc);
13323 drm_vblank_on(dev, crtc->pipe); 13458 drm_crtc_vblank_on(&crtc->base);
13324 } else 13459 }
13325 drm_vblank_off(dev, crtc->pipe);
13326 13460
13327 /* We need to sanitize the plane -> pipe mapping first because this will 13461 /* We need to sanitize the plane -> pipe mapping first because this will
13328 * disable the crtc (and hence change the state) if it is wrong. Note 13462 * disable the crtc (and hence change the state) if it is wrong. Note
@@ -13362,6 +13496,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
13362 } 13496 }
13363 13497
13364 WARN_ON(crtc->active); 13498 WARN_ON(crtc->active);
13499 crtc->base.state->enable = false;
13365 crtc->base.enabled = false; 13500 crtc->base.enabled = false;
13366 } 13501 }
13367 13502
@@ -13378,7 +13513,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
13378 * have active connectors/encoders. */ 13513 * have active connectors/encoders. */
13379 intel_crtc_update_dpms(&crtc->base); 13514 intel_crtc_update_dpms(&crtc->base);
13380 13515
13381 if (crtc->active != crtc->base.enabled) { 13516 if (crtc->active != crtc->base.state->enable) {
13382 struct intel_encoder *encoder; 13517 struct intel_encoder *encoder;
13383 13518
13384 /* This can happen either due to bugs in the get_hw_state 13519 /* This can happen either due to bugs in the get_hw_state
@@ -13386,9 +13521,10 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
13386 * pipe A quirk. */ 13521 * pipe A quirk. */
13387 DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n", 13522 DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
13388 crtc->base.base.id, 13523 crtc->base.base.id,
13389 crtc->base.enabled ? "enabled" : "disabled", 13524 crtc->base.state->enable ? "enabled" : "disabled",
13390 crtc->active ? "enabled" : "disabled"); 13525 crtc->active ? "enabled" : "disabled");
13391 13526
13527 crtc->base.state->enable = crtc->active;
13392 crtc->base.enabled = crtc->active; 13528 crtc->base.enabled = crtc->active;
13393 13529
13394 /* Because we only establish the connector -> encoder -> 13530 /* Because we only establish the connector -> encoder ->
@@ -13525,6 +13661,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
13525 crtc->active = dev_priv->display.get_pipe_config(crtc, 13661 crtc->active = dev_priv->display.get_pipe_config(crtc,
13526 crtc->config); 13662 crtc->config);
13527 13663
13664 crtc->base.state->enable = crtc->active;
13528 crtc->base.enabled = crtc->active; 13665 crtc->base.enabled = crtc->active;
13529 crtc->primary_enabled = primary_get_hw_state(crtc); 13666 crtc->primary_enabled = primary_get_hw_state(crtc);
13530 13667
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 868a07ba5e59..d1141d37e205 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -2691,11 +2691,14 @@ static uint8_t
2691intel_dp_voltage_max(struct intel_dp *intel_dp) 2691intel_dp_voltage_max(struct intel_dp *intel_dp)
2692{ 2692{
2693 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2693 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2694 struct drm_i915_private *dev_priv = dev->dev_private;
2694 enum port port = dp_to_dig_port(intel_dp)->port; 2695 enum port port = dp_to_dig_port(intel_dp)->port;
2695 2696
2696 if (INTEL_INFO(dev)->gen >= 9) 2697 if (INTEL_INFO(dev)->gen >= 9) {
2698 if (dev_priv->vbt.edp_low_vswing && port == PORT_A)
2699 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2697 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; 2700 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2698 else if (IS_VALLEYVIEW(dev)) 2701 } else if (IS_VALLEYVIEW(dev))
2699 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; 2702 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2700 else if (IS_GEN7(dev) && port == PORT_A) 2703 else if (IS_GEN7(dev) && port == PORT_A)
2701 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; 2704 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
@@ -2719,6 +2722,8 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2719 return DP_TRAIN_PRE_EMPH_LEVEL_2; 2722 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2720 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 2723 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2721 return DP_TRAIN_PRE_EMPH_LEVEL_1; 2724 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2725 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2726 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2722 default: 2727 default:
2723 return DP_TRAIN_PRE_EMPH_LEVEL_0; 2728 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2724 } 2729 }
@@ -3201,6 +3206,9 @@ intel_hsw_signal_levels(uint8_t train_set)
3201 return DDI_BUF_TRANS_SELECT(7); 3206 return DDI_BUF_TRANS_SELECT(7);
3202 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1: 3207 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3203 return DDI_BUF_TRANS_SELECT(8); 3208 return DDI_BUF_TRANS_SELECT(8);
3209
3210 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3211 return DDI_BUF_TRANS_SELECT(9);
3204 default: 3212 default:
3205 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 3213 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3206 "0x%x\n", signal_levels); 3214 "0x%x\n", signal_levels);
@@ -4736,6 +4744,18 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
4736 I915_READ(pp_div_reg)); 4744 I915_READ(pp_div_reg));
4737} 4745}
4738 4746
4747/**
4748 * intel_dp_set_drrs_state - program registers for RR switch to take effect
4749 * @dev: DRM device
4750 * @refresh_rate: RR to be programmed
4751 *
4752 * This function gets called when refresh rate (RR) has to be changed from
4753 * one frequency to another. Switches can be between high and low RR
4754 * supported by the panel or to any other RR based on media playback (in
4755 * this case, RR value needs to be passed from user space).
4756 *
4757 * The caller of this function needs to take a lock on dev_priv->drrs.
4758 */
4739static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate) 4759static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
4740{ 4760{
4741 struct drm_i915_private *dev_priv = dev->dev_private; 4761 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4793,14 +4813,32 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
4793 return; 4813 return;
4794 } 4814 }
4795 4815
4796 if (INTEL_INFO(dev)->gen > 6 && INTEL_INFO(dev)->gen < 8) { 4816 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
4817 switch (index) {
4818 case DRRS_HIGH_RR:
4819 intel_dp_set_m_n(intel_crtc, M1_N1);
4820 break;
4821 case DRRS_LOW_RR:
4822 intel_dp_set_m_n(intel_crtc, M2_N2);
4823 break;
4824 case DRRS_MAX_RR:
4825 default:
4826 DRM_ERROR("Unsupported refreshrate type\n");
4827 }
4828 } else if (INTEL_INFO(dev)->gen > 6) {
4797 reg = PIPECONF(intel_crtc->config->cpu_transcoder); 4829 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
4798 val = I915_READ(reg); 4830 val = I915_READ(reg);
4831
4799 if (index > DRRS_HIGH_RR) { 4832 if (index > DRRS_HIGH_RR) {
4800 val |= PIPECONF_EDP_RR_MODE_SWITCH; 4833 if (IS_VALLEYVIEW(dev))
4801 intel_dp_set_m_n(intel_crtc); 4834 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
4835 else
4836 val |= PIPECONF_EDP_RR_MODE_SWITCH;
4802 } else { 4837 } else {
4803 val &= ~PIPECONF_EDP_RR_MODE_SWITCH; 4838 if (IS_VALLEYVIEW(dev))
4839 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
4840 else
4841 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
4804 } 4842 }
4805 I915_WRITE(reg, val); 4843 I915_WRITE(reg, val);
4806 } 4844 }
@@ -4810,6 +4848,12 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
4810 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate); 4848 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
4811} 4849}
4812 4850
4851/**
4852 * intel_edp_drrs_enable - init drrs struct if supported
4853 * @intel_dp: DP struct
4854 *
4855 * Initializes frontbuffer_bits and drrs.dp
4856 */
4813void intel_edp_drrs_enable(struct intel_dp *intel_dp) 4857void intel_edp_drrs_enable(struct intel_dp *intel_dp)
4814{ 4858{
4815 struct drm_device *dev = intel_dp_to_dev(intel_dp); 4859 struct drm_device *dev = intel_dp_to_dev(intel_dp);
@@ -4837,6 +4881,11 @@ unlock:
4837 mutex_unlock(&dev_priv->drrs.mutex); 4881 mutex_unlock(&dev_priv->drrs.mutex);
4838} 4882}
4839 4883
4884/**
4885 * intel_edp_drrs_disable - Disable DRRS
4886 * @intel_dp: DP struct
4887 *
4888 */
4840void intel_edp_drrs_disable(struct intel_dp *intel_dp) 4889void intel_edp_drrs_disable(struct intel_dp *intel_dp)
4841{ 4890{
4842 struct drm_device *dev = intel_dp_to_dev(intel_dp); 4891 struct drm_device *dev = intel_dp_to_dev(intel_dp);
@@ -4896,6 +4945,17 @@ unlock:
4896 mutex_unlock(&dev_priv->drrs.mutex); 4945 mutex_unlock(&dev_priv->drrs.mutex);
4897} 4946}
4898 4947
4948/**
4949 * intel_edp_drrs_invalidate - Invalidate DRRS
4950 * @dev: DRM device
4951 * @frontbuffer_bits: frontbuffer plane tracking bits
4952 *
4953 * When there is a disturbance on screen (due to cursor movement/time
4954 * update etc), DRRS needs to be invalidated, i.e. need to switch to
4955 * high RR.
4956 *
4957 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
4958 */
4899void intel_edp_drrs_invalidate(struct drm_device *dev, 4959void intel_edp_drrs_invalidate(struct drm_device *dev,
4900 unsigned frontbuffer_bits) 4960 unsigned frontbuffer_bits)
4901{ 4961{
@@ -4923,6 +4983,17 @@ void intel_edp_drrs_invalidate(struct drm_device *dev,
4923 mutex_unlock(&dev_priv->drrs.mutex); 4983 mutex_unlock(&dev_priv->drrs.mutex);
4924} 4984}
4925 4985
4986/**
4987 * intel_edp_drrs_flush - Flush DRRS
4988 * @dev: DRM device
4989 * @frontbuffer_bits: frontbuffer plane tracking bits
4990 *
4991 * When there is no movement on screen, DRRS work can be scheduled.
4992 * This DRRS work is responsible for setting relevant registers after a
4993 * timeout of 1 second.
4994 *
4995 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
4996 */
4926void intel_edp_drrs_flush(struct drm_device *dev, 4997void intel_edp_drrs_flush(struct drm_device *dev,
4927 unsigned frontbuffer_bits) 4998 unsigned frontbuffer_bits)
4928{ 4999{
@@ -4947,6 +5018,56 @@ void intel_edp_drrs_flush(struct drm_device *dev,
4947 mutex_unlock(&dev_priv->drrs.mutex); 5018 mutex_unlock(&dev_priv->drrs.mutex);
4948} 5019}
4949 5020
5021/**
5022 * DOC: Display Refresh Rate Switching (DRRS)
5023 *
5024 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5025 * which enables swtching between low and high refresh rates,
5026 * dynamically, based on the usage scenario. This feature is applicable
5027 * for internal panels.
5028 *
5029 * Indication that the panel supports DRRS is given by the panel EDID, which
5030 * would list multiple refresh rates for one resolution.
5031 *
5032 * DRRS is of 2 types - static and seamless.
5033 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5034 * (may appear as a blink on screen) and is used in dock-undock scenario.
5035 * Seamless DRRS involves changing RR without any visual effect to the user
5036 * and can be used during normal system usage. This is done by programming
5037 * certain registers.
5038 *
5039 * Support for static/seamless DRRS may be indicated in the VBT based on
5040 * inputs from the panel spec.
5041 *
5042 * DRRS saves power by switching to low RR based on usage scenarios.
5043 *
5044 * eDP DRRS:-
5045 * The implementation is based on frontbuffer tracking implementation.
5046 * When there is a disturbance on the screen triggered by user activity or a
5047 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5048 * When there is no movement on screen, after a timeout of 1 second, a switch
5049 * to low RR is made.
5050 * For integration with frontbuffer tracking code,
5051 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5052 *
5053 * DRRS can be further extended to support other internal panels and also
5054 * the scenario of video playback wherein RR is set based on the rate
5055 * requested by userspace.
5056 */
5057
5058/**
5059 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5060 * @intel_connector: eDP connector
5061 * @fixed_mode: preferred mode of panel
5062 *
5063 * This function is called only once at driver load to initialize basic
5064 * DRRS stuff.
5065 *
5066 * Returns:
5067 * Downclock mode if panel supports it, else return NULL.
5068 * DRRS support is determined by the presence of downclock mode (apart
5069 * from VBT setting).
5070 */
4950static struct drm_display_mode * 5071static struct drm_display_mode *
4951intel_dp_drrs_init(struct intel_connector *intel_connector, 5072intel_dp_drrs_init(struct intel_connector *intel_connector,
4952 struct drm_display_mode *fixed_mode) 5073 struct drm_display_mode *fixed_mode)
@@ -4970,7 +5091,7 @@ intel_dp_drrs_init(struct intel_connector *intel_connector,
4970 (dev, fixed_mode, connector); 5091 (dev, fixed_mode, connector);
4971 5092
4972 if (!downclock_mode) { 5093 if (!downclock_mode) {
4973 DRM_DEBUG_KMS("DRRS not supported\n"); 5094 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
4974 return NULL; 5095 return NULL;
4975 } 5096 }
4976 5097
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 58d11a8066d4..f4aa849b243e 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -501,6 +501,7 @@ struct intel_plane_wm_parameters {
501 uint8_t bytes_per_pixel; 501 uint8_t bytes_per_pixel;
502 bool enabled; 502 bool enabled;
503 bool scaled; 503 bool scaled;
504 u64 tiling;
504}; 505};
505 506
506struct intel_plane { 507struct intel_plane {
@@ -593,6 +594,26 @@ struct intel_hdmi {
593struct intel_dp_mst_encoder; 594struct intel_dp_mst_encoder;
594#define DP_MAX_DOWNSTREAM_PORTS 0x10 595#define DP_MAX_DOWNSTREAM_PORTS 0x10
595 596
597/*
598 * enum link_m_n_set:
599 * When platform provides two set of M_N registers for dp, we can
600 * program them and switch between them incase of DRRS.
601 * But When only one such register is provided, we have to program the
602 * required divider value on that registers itself based on the DRRS state.
603 *
604 * M1_N1 : Program dp_m_n on M1_N1 registers
605 * dp_m2_n2 on M2_N2 registers (If supported)
606 *
607 * M2_N2 : Program dp_m2_n2 on M1_N1 registers
608 * M2_N2 registers are not supported
609 */
610
611enum link_m_n_set {
612 /* Sets the m1_n1 and m2_n2 */
613 M1_N1 = 0,
614 M2_N2
615};
616
596struct intel_dp { 617struct intel_dp {
597 uint32_t output_reg; 618 uint32_t output_reg;
598 uint32_t aux_ch_ctl_reg; 619 uint32_t aux_ch_ctl_reg;
@@ -883,6 +904,8 @@ int intel_fb_align_height(struct drm_device *dev, int height,
883 uint64_t fb_format_modifier); 904 uint64_t fb_format_modifier);
884void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire); 905void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
885 906
907u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
908 uint32_t pixel_format);
886 909
887/* intel_audio.c */ 910/* intel_audio.c */
888void intel_init_audio(struct drm_device *dev); 911void intel_init_audio(struct drm_device *dev);
@@ -996,7 +1019,7 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv);
996void hsw_disable_pc8(struct drm_i915_private *dev_priv); 1019void hsw_disable_pc8(struct drm_i915_private *dev_priv);
997void intel_dp_get_m_n(struct intel_crtc *crtc, 1020void intel_dp_get_m_n(struct intel_crtc *crtc,
998 struct intel_crtc_state *pipe_config); 1021 struct intel_crtc_state *pipe_config);
999void intel_dp_set_m_n(struct intel_crtc *crtc); 1022void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n);
1000int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n); 1023int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
1001void 1024void
1002ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config, 1025ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config,
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index ee65731baaf7..618f7bdab0ba 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -473,6 +473,43 @@ static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
473 return true; 473 return true;
474} 474}
475 475
476static struct drm_crtc *intel_fbc_find_crtc(struct drm_i915_private *dev_priv)
477{
478 struct drm_crtc *crtc = NULL, *tmp_crtc;
479 enum pipe pipe;
480 bool pipe_a_only = false, one_pipe_only = false;
481
482 if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
483 pipe_a_only = true;
484 else if (INTEL_INFO(dev_priv)->gen <= 4)
485 one_pipe_only = true;
486
487 for_each_pipe(dev_priv, pipe) {
488 tmp_crtc = dev_priv->pipe_to_crtc_mapping[pipe];
489
490 if (intel_crtc_active(tmp_crtc) &&
491 to_intel_crtc(tmp_crtc)->primary_enabled) {
492 if (one_pipe_only && crtc) {
493 if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
494 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
495 return NULL;
496 }
497 crtc = tmp_crtc;
498 }
499
500 if (pipe_a_only)
501 break;
502 }
503
504 if (!crtc || crtc->primary->fb == NULL) {
505 if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
506 DRM_DEBUG_KMS("no output, disabling\n");
507 return NULL;
508 }
509
510 return crtc;
511}
512
476/** 513/**
477 * intel_fbc_update - enable/disable FBC as needed 514 * intel_fbc_update - enable/disable FBC as needed
478 * @dev: the drm_device 515 * @dev: the drm_device
@@ -495,7 +532,7 @@ static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
495void intel_fbc_update(struct drm_device *dev) 532void intel_fbc_update(struct drm_device *dev)
496{ 533{
497 struct drm_i915_private *dev_priv = dev->dev_private; 534 struct drm_i915_private *dev_priv = dev->dev_private;
498 struct drm_crtc *crtc = NULL, *tmp_crtc; 535 struct drm_crtc *crtc = NULL;
499 struct intel_crtc *intel_crtc; 536 struct intel_crtc *intel_crtc;
500 struct drm_framebuffer *fb; 537 struct drm_framebuffer *fb;
501 struct drm_i915_gem_object *obj; 538 struct drm_i915_gem_object *obj;
@@ -530,23 +567,9 @@ void intel_fbc_update(struct drm_device *dev)
530 * - new fb is too large to fit in compressed buffer 567 * - new fb is too large to fit in compressed buffer
531 * - going to an unsupported config (interlace, pixel multiply, etc.) 568 * - going to an unsupported config (interlace, pixel multiply, etc.)
532 */ 569 */
533 for_each_crtc(dev, tmp_crtc) { 570 crtc = intel_fbc_find_crtc(dev_priv);
534 if (intel_crtc_active(tmp_crtc) && 571 if (!crtc)
535 to_intel_crtc(tmp_crtc)->primary_enabled) {
536 if (crtc) {
537 if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
538 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
539 goto out_disable;
540 }
541 crtc = tmp_crtc;
542 }
543 }
544
545 if (!crtc || crtc->primary->fb == NULL) {
546 if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
547 DRM_DEBUG_KMS("no output, disabling\n");
548 goto out_disable; 572 goto out_disable;
549 }
550 573
551 intel_crtc = to_intel_crtc(crtc); 574 intel_crtc = to_intel_crtc(crtc);
552 fb = crtc->primary->fb; 575 fb = crtc->primary->fb;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index aafcef3b6b23..a2dc9a11a248 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -620,7 +620,7 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
620 * @vmas: list of vmas. 620 * @vmas: list of vmas.
621 * @batch_obj: the batchbuffer to submit. 621 * @batch_obj: the batchbuffer to submit.
622 * @exec_start: batchbuffer start virtual address pointer. 622 * @exec_start: batchbuffer start virtual address pointer.
623 * @flags: translated execbuffer call flags. 623 * @dispatch_flags: translated execbuffer call flags.
624 * 624 *
625 * This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts 625 * This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts
626 * away the submission details of the execbuffer ioctl call. 626 * away the submission details of the execbuffer ioctl call.
@@ -633,7 +633,7 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
633 struct drm_i915_gem_execbuffer2 *args, 633 struct drm_i915_gem_execbuffer2 *args,
634 struct list_head *vmas, 634 struct list_head *vmas,
635 struct drm_i915_gem_object *batch_obj, 635 struct drm_i915_gem_object *batch_obj,
636 u64 exec_start, u32 flags) 636 u64 exec_start, u32 dispatch_flags)
637{ 637{
638 struct drm_i915_private *dev_priv = dev->dev_private; 638 struct drm_i915_private *dev_priv = dev->dev_private;
639 struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf; 639 struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
@@ -706,10 +706,12 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
706 dev_priv->relative_constants_mode = instp_mode; 706 dev_priv->relative_constants_mode = instp_mode;
707 } 707 }
708 708
709 ret = ring->emit_bb_start(ringbuf, ctx, exec_start, flags); 709 ret = ring->emit_bb_start(ringbuf, ctx, exec_start, dispatch_flags);
710 if (ret) 710 if (ret)
711 return ret; 711 return ret;
712 712
713 trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), dispatch_flags);
714
713 i915_gem_execbuffer_move_to_active(vmas, ring); 715 i915_gem_execbuffer_move_to_active(vmas, ring);
714 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); 716 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
715 717
@@ -886,12 +888,9 @@ static int logical_ring_alloc_request(struct intel_engine_cs *ring,
886 return ret; 888 return ret;
887 } 889 }
888 890
889 /* Hold a reference to the context this request belongs to
890 * (we will need it when the time comes to emit/retire the
891 * request).
892 */
893 request->ctx = ctx; 891 request->ctx = ctx;
894 i915_gem_context_reference(request->ctx); 892 i915_gem_context_reference(request->ctx);
893 request->ringbuf = ctx->engine[ring->id].ringbuf;
895 894
896 ring->outstanding_lazy_request = request; 895 ring->outstanding_lazy_request = request;
897 return 0; 896 return 0;
@@ -1163,9 +1162,9 @@ static int gen9_init_render_ring(struct intel_engine_cs *ring)
1163 1162
1164static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf, 1163static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf,
1165 struct intel_context *ctx, 1164 struct intel_context *ctx,
1166 u64 offset, unsigned flags) 1165 u64 offset, unsigned dispatch_flags)
1167{ 1166{
1168 bool ppgtt = !(flags & I915_DISPATCH_SECURE); 1167 bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
1169 int ret; 1168 int ret;
1170 1169
1171 ret = intel_logical_ring_begin(ringbuf, ctx, 4); 1170 ret = intel_logical_ring_begin(ringbuf, ctx, 4);
@@ -1638,6 +1637,49 @@ cleanup_render_ring:
1638 return ret; 1637 return ret;
1639} 1638}
1640 1639
1640static u32
1641make_rpcs(struct drm_device *dev)
1642{
1643 u32 rpcs = 0;
1644
1645 /*
1646 * No explicit RPCS request is needed to ensure full
1647 * slice/subslice/EU enablement prior to Gen9.
1648 */
1649 if (INTEL_INFO(dev)->gen < 9)
1650 return 0;
1651
1652 /*
1653 * Starting in Gen9, render power gating can leave
1654 * slice/subslice/EU in a partially enabled state. We
1655 * must make an explicit request through RPCS for full
1656 * enablement.
1657 */
1658 if (INTEL_INFO(dev)->has_slice_pg) {
1659 rpcs |= GEN8_RPCS_S_CNT_ENABLE;
1660 rpcs |= INTEL_INFO(dev)->slice_total <<
1661 GEN8_RPCS_S_CNT_SHIFT;
1662 rpcs |= GEN8_RPCS_ENABLE;
1663 }
1664
1665 if (INTEL_INFO(dev)->has_subslice_pg) {
1666 rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
1667 rpcs |= INTEL_INFO(dev)->subslice_per_slice <<
1668 GEN8_RPCS_SS_CNT_SHIFT;
1669 rpcs |= GEN8_RPCS_ENABLE;
1670 }
1671
1672 if (INTEL_INFO(dev)->has_eu_pg) {
1673 rpcs |= INTEL_INFO(dev)->eu_per_subslice <<
1674 GEN8_RPCS_EU_MIN_SHIFT;
1675 rpcs |= INTEL_INFO(dev)->eu_per_subslice <<
1676 GEN8_RPCS_EU_MAX_SHIFT;
1677 rpcs |= GEN8_RPCS_ENABLE;
1678 }
1679
1680 return rpcs;
1681}
1682
1641static int 1683static int
1642populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj, 1684populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj,
1643 struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf) 1685 struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf)
@@ -1731,18 +1773,18 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
1731 reg_state[CTX_PDP1_LDW] = GEN8_RING_PDP_LDW(ring, 1); 1773 reg_state[CTX_PDP1_LDW] = GEN8_RING_PDP_LDW(ring, 1);
1732 reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0); 1774 reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0);
1733 reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0); 1775 reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0);
1734 reg_state[CTX_PDP3_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[3]); 1776 reg_state[CTX_PDP3_UDW+1] = upper_32_bits(ppgtt->pdp.page_directory[3]->daddr);
1735 reg_state[CTX_PDP3_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[3]); 1777 reg_state[CTX_PDP3_LDW+1] = lower_32_bits(ppgtt->pdp.page_directory[3]->daddr);
1736 reg_state[CTX_PDP2_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[2]); 1778 reg_state[CTX_PDP2_UDW+1] = upper_32_bits(ppgtt->pdp.page_directory[2]->daddr);
1737 reg_state[CTX_PDP2_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[2]); 1779 reg_state[CTX_PDP2_LDW+1] = lower_32_bits(ppgtt->pdp.page_directory[2]->daddr);
1738 reg_state[CTX_PDP1_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[1]); 1780 reg_state[CTX_PDP1_UDW+1] = upper_32_bits(ppgtt->pdp.page_directory[1]->daddr);
1739 reg_state[CTX_PDP1_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[1]); 1781 reg_state[CTX_PDP1_LDW+1] = lower_32_bits(ppgtt->pdp.page_directory[1]->daddr);
1740 reg_state[CTX_PDP0_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[0]); 1782 reg_state[CTX_PDP0_UDW+1] = upper_32_bits(ppgtt->pdp.page_directory[0]->daddr);
1741 reg_state[CTX_PDP0_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[0]); 1783 reg_state[CTX_PDP0_LDW+1] = lower_32_bits(ppgtt->pdp.page_directory[0]->daddr);
1742 if (ring->id == RCS) { 1784 if (ring->id == RCS) {
1743 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1); 1785 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
1744 reg_state[CTX_R_PWR_CLK_STATE] = 0x20c8; 1786 reg_state[CTX_R_PWR_CLK_STATE] = GEN8_R_PWR_CLK_STATE;
1745 reg_state[CTX_R_PWR_CLK_STATE+1] = 0; 1787 reg_state[CTX_R_PWR_CLK_STATE+1] = make_rpcs(dev);
1746 } 1788 }
1747 1789
1748 kunmap_atomic(reg_state); 1790 kunmap_atomic(reg_state);
@@ -1950,3 +1992,38 @@ error_unpin_ctx:
1950 drm_gem_object_unreference(&ctx_obj->base); 1992 drm_gem_object_unreference(&ctx_obj->base);
1951 return ret; 1993 return ret;
1952} 1994}
1995
1996void intel_lr_context_reset(struct drm_device *dev,
1997 struct intel_context *ctx)
1998{
1999 struct drm_i915_private *dev_priv = dev->dev_private;
2000 struct intel_engine_cs *ring;
2001 int i;
2002
2003 for_each_ring(ring, dev_priv, i) {
2004 struct drm_i915_gem_object *ctx_obj =
2005 ctx->engine[ring->id].state;
2006 struct intel_ringbuffer *ringbuf =
2007 ctx->engine[ring->id].ringbuf;
2008 uint32_t *reg_state;
2009 struct page *page;
2010
2011 if (!ctx_obj)
2012 continue;
2013
2014 if (i915_gem_object_get_pages(ctx_obj)) {
2015 WARN(1, "Failed get_pages for context obj\n");
2016 continue;
2017 }
2018 page = i915_gem_object_get_page(ctx_obj, 1);
2019 reg_state = kmap_atomic(page);
2020
2021 reg_state[CTX_RING_HEAD+1] = 0;
2022 reg_state[CTX_RING_TAIL+1] = 0;
2023
2024 kunmap_atomic(reg_state);
2025
2026 ringbuf->head = 0;
2027 ringbuf->tail = 0;
2028 }
2029}
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index f635735df8a1..adb731e49c57 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -73,6 +73,8 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
73 struct intel_engine_cs *ring); 73 struct intel_engine_cs *ring);
74void intel_lr_context_unpin(struct intel_engine_cs *ring, 74void intel_lr_context_unpin(struct intel_engine_cs *ring,
75 struct intel_context *ctx); 75 struct intel_context *ctx);
76void intel_lr_context_reset(struct drm_device *dev,
77 struct intel_context *ctx);
76 78
77/* Execlists */ 79/* Execlists */
78int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists); 80int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
@@ -82,7 +84,7 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
82 struct drm_i915_gem_execbuffer2 *args, 84 struct drm_i915_gem_execbuffer2 *args,
83 struct list_head *vmas, 85 struct list_head *vmas,
84 struct drm_i915_gem_object *batch_obj, 86 struct drm_i915_gem_object *batch_obj,
85 u64 exec_start, u32 flags); 87 u64 exec_start, u32 dispatch_flags);
86u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj); 88u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj);
87 89
88void intel_lrc_irq_handler(struct intel_engine_cs *ring); 90void intel_lrc_irq_handler(struct intel_engine_cs *ring);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 071b96d6e146..24e8730dc189 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -509,7 +509,7 @@ static int intel_lvds_set_property(struct drm_connector *connector,
509 intel_connector->panel.fitting_mode = value; 509 intel_connector->panel.fitting_mode = value;
510 510
511 crtc = intel_attached_encoder(connector)->base.crtc; 511 crtc = intel_attached_encoder(connector)->base.crtc;
512 if (crtc && crtc->enabled) { 512 if (crtc && crtc->state->enable) {
513 /* 513 /*
514 * If the CRTC is enabled, the display will be changed 514 * If the CRTC is enabled, the display will be changed
515 * according to the new panel fitting mode. 515 * according to the new panel fitting mode.
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index d8de1d5140a7..71e87abdcae7 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -744,10 +744,8 @@ void intel_opregion_init(struct drm_device *dev)
744 return; 744 return;
745 745
746 if (opregion->acpi) { 746 if (opregion->acpi) {
747 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 747 intel_didl_outputs(dev);
748 intel_didl_outputs(dev); 748 intel_setup_cadls(dev);
749 intel_setup_cadls(dev);
750 }
751 749
752 /* Notify BIOS we are ready to handle ACPI video ext notifs. 750 /* Notify BIOS we are ready to handle ACPI video ext notifs.
753 * Right now, all the events are handled by the ACPI video module. 751 * Right now, all the events are handled by the ACPI video module.
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index f93dfc174495..823d1d97a000 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -1065,7 +1065,6 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1065 struct put_image_params *params; 1065 struct put_image_params *params;
1066 int ret; 1066 int ret;
1067 1067
1068 /* No need to check for DRIVER_MODESET - we don't set it up then. */
1069 overlay = dev_priv->overlay; 1068 overlay = dev_priv->overlay;
1070 if (!overlay) { 1069 if (!overlay) {
1071 DRM_DEBUG("userspace bug: no overlay\n"); 1070 DRM_DEBUG("userspace bug: no overlay\n");
@@ -1261,7 +1260,6 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
1261 struct overlay_registers __iomem *regs; 1260 struct overlay_registers __iomem *regs;
1262 int ret; 1261 int ret;
1263 1262
1264 /* No need to check for DRIVER_MODESET - we don't set it up then. */
1265 overlay = dev_priv->overlay; 1263 overlay = dev_priv->overlay;
1266 if (!overlay) { 1264 if (!overlay) {
1267 DRM_DEBUG("userspace bug: no overlay\n"); 1265 DRM_DEBUG("userspace bug: no overlay\n");
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index f7c993843be8..542cf6844dc3 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2522,6 +2522,7 @@ skl_allocate_pipe_ddb(struct drm_crtc *crtc,
2522 enum pipe pipe = intel_crtc->pipe; 2522 enum pipe pipe = intel_crtc->pipe;
2523 struct skl_ddb_entry *alloc = &ddb->pipe[pipe]; 2523 struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
2524 uint16_t alloc_size, start, cursor_blocks; 2524 uint16_t alloc_size, start, cursor_blocks;
2525 uint16_t minimum[I915_MAX_PLANES];
2525 unsigned int total_data_rate; 2526 unsigned int total_data_rate;
2526 int plane; 2527 int plane;
2527 2528
@@ -2540,9 +2541,21 @@ skl_allocate_pipe_ddb(struct drm_crtc *crtc,
2540 alloc_size -= cursor_blocks; 2541 alloc_size -= cursor_blocks;
2541 alloc->end -= cursor_blocks; 2542 alloc->end -= cursor_blocks;
2542 2543
2544 /* 1. Allocate the mininum required blocks for each active plane */
2545 for_each_plane(pipe, plane) {
2546 const struct intel_plane_wm_parameters *p;
2547
2548 p = &params->plane[plane];
2549 if (!p->enabled)
2550 continue;
2551
2552 minimum[plane] = 8;
2553 alloc_size -= minimum[plane];
2554 }
2555
2543 /* 2556 /*
2544 * Each active plane get a portion of the remaining space, in 2557 * 2. Distribute the remaining space in proportion to the amount of
2545 * proportion to the amount of data they need to fetch from memory. 2558 * data each plane needs to fetch from memory.
2546 * 2559 *
2547 * FIXME: we may not allocate every single block here. 2560 * FIXME: we may not allocate every single block here.
2548 */ 2561 */
@@ -2564,8 +2577,9 @@ skl_allocate_pipe_ddb(struct drm_crtc *crtc,
2564 * promote the expression to 64 bits to avoid overflowing, the 2577 * promote the expression to 64 bits to avoid overflowing, the
2565 * result is < available as data_rate / total_data_rate < 1 2578 * result is < available as data_rate / total_data_rate < 1
2566 */ 2579 */
2567 plane_blocks = div_u64((uint64_t)alloc_size * data_rate, 2580 plane_blocks = minimum[plane];
2568 total_data_rate); 2581 plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
2582 total_data_rate);
2569 2583
2570 ddb->plane[pipe][plane].start = start; 2584 ddb->plane[pipe][plane].start = start;
2571 ddb->plane[pipe][plane].end = start + plane_blocks; 2585 ddb->plane[pipe][plane].end = start + plane_blocks;
@@ -2595,7 +2609,7 @@ static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
2595 if (latency == 0) 2609 if (latency == 0)
2596 return UINT_MAX; 2610 return UINT_MAX;
2597 2611
2598 wm_intermediate_val = latency * pixel_rate * bytes_per_pixel; 2612 wm_intermediate_val = latency * pixel_rate * bytes_per_pixel / 512;
2599 ret = DIV_ROUND_UP(wm_intermediate_val, 1000); 2613 ret = DIV_ROUND_UP(wm_intermediate_val, 1000);
2600 2614
2601 return ret; 2615 return ret;
@@ -2603,17 +2617,29 @@ static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
2603 2617
2604static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal, 2618static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
2605 uint32_t horiz_pixels, uint8_t bytes_per_pixel, 2619 uint32_t horiz_pixels, uint8_t bytes_per_pixel,
2606 uint32_t latency) 2620 uint64_t tiling, uint32_t latency)
2607{ 2621{
2608 uint32_t ret, plane_bytes_per_line, wm_intermediate_val; 2622 uint32_t ret;
2623 uint32_t plane_bytes_per_line, plane_blocks_per_line;
2624 uint32_t wm_intermediate_val;
2609 2625
2610 if (latency == 0) 2626 if (latency == 0)
2611 return UINT_MAX; 2627 return UINT_MAX;
2612 2628
2613 plane_bytes_per_line = horiz_pixels * bytes_per_pixel; 2629 plane_bytes_per_line = horiz_pixels * bytes_per_pixel;
2630
2631 if (tiling == I915_FORMAT_MOD_Y_TILED ||
2632 tiling == I915_FORMAT_MOD_Yf_TILED) {
2633 plane_bytes_per_line *= 4;
2634 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
2635 plane_blocks_per_line /= 4;
2636 } else {
2637 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
2638 }
2639
2614 wm_intermediate_val = latency * pixel_rate; 2640 wm_intermediate_val = latency * pixel_rate;
2615 ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) * 2641 ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) *
2616 plane_bytes_per_line; 2642 plane_blocks_per_line;
2617 2643
2618 return ret; 2644 return ret;
2619} 2645}
@@ -2662,6 +2688,7 @@ static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
2662 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2688 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2663 enum pipe pipe = intel_crtc->pipe; 2689 enum pipe pipe = intel_crtc->pipe;
2664 struct drm_plane *plane; 2690 struct drm_plane *plane;
2691 struct drm_framebuffer *fb;
2665 int i = 1; /* Index for sprite planes start */ 2692 int i = 1; /* Index for sprite planes start */
2666 2693
2667 p->active = intel_crtc_active(crtc); 2694 p->active = intel_crtc_active(crtc);
@@ -2677,6 +2704,14 @@ static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
2677 crtc->primary->fb->bits_per_pixel / 8; 2704 crtc->primary->fb->bits_per_pixel / 8;
2678 p->plane[0].horiz_pixels = intel_crtc->config->pipe_src_w; 2705 p->plane[0].horiz_pixels = intel_crtc->config->pipe_src_w;
2679 p->plane[0].vert_pixels = intel_crtc->config->pipe_src_h; 2706 p->plane[0].vert_pixels = intel_crtc->config->pipe_src_h;
2707 p->plane[0].tiling = DRM_FORMAT_MOD_NONE;
2708 fb = crtc->primary->state->fb;
2709 /*
2710 * Framebuffer can be NULL on plane disable, but it does not
2711 * matter for watermarks if we assume no tiling in that case.
2712 */
2713 if (fb)
2714 p->plane[0].tiling = fb->modifier[0];
2680 2715
2681 p->cursor.enabled = true; 2716 p->cursor.enabled = true;
2682 p->cursor.bytes_per_pixel = 4; 2717 p->cursor.bytes_per_pixel = 4;
@@ -2693,41 +2728,60 @@ static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
2693 } 2728 }
2694} 2729}
2695 2730
2696static bool skl_compute_plane_wm(struct skl_pipe_wm_parameters *p, 2731static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
2732 struct skl_pipe_wm_parameters *p,
2697 struct intel_plane_wm_parameters *p_params, 2733 struct intel_plane_wm_parameters *p_params,
2698 uint16_t ddb_allocation, 2734 uint16_t ddb_allocation,
2699 uint32_t mem_value, 2735 int level,
2700 uint16_t *out_blocks, /* out */ 2736 uint16_t *out_blocks, /* out */
2701 uint8_t *out_lines /* out */) 2737 uint8_t *out_lines /* out */)
2702{ 2738{
2703 uint32_t method1, method2, plane_bytes_per_line, res_blocks, res_lines; 2739 uint32_t latency = dev_priv->wm.skl_latency[level];
2704 uint32_t result_bytes; 2740 uint32_t method1, method2;
2741 uint32_t plane_bytes_per_line, plane_blocks_per_line;
2742 uint32_t res_blocks, res_lines;
2743 uint32_t selected_result;
2705 2744
2706 if (mem_value == 0 || !p->active || !p_params->enabled) 2745 if (latency == 0 || !p->active || !p_params->enabled)
2707 return false; 2746 return false;
2708 2747
2709 method1 = skl_wm_method1(p->pixel_rate, 2748 method1 = skl_wm_method1(p->pixel_rate,
2710 p_params->bytes_per_pixel, 2749 p_params->bytes_per_pixel,
2711 mem_value); 2750 latency);
2712 method2 = skl_wm_method2(p->pixel_rate, 2751 method2 = skl_wm_method2(p->pixel_rate,
2713 p->pipe_htotal, 2752 p->pipe_htotal,
2714 p_params->horiz_pixels, 2753 p_params->horiz_pixels,
2715 p_params->bytes_per_pixel, 2754 p_params->bytes_per_pixel,
2716 mem_value); 2755 p_params->tiling,
2756 latency);
2717 2757
2718 plane_bytes_per_line = p_params->horiz_pixels * 2758 plane_bytes_per_line = p_params->horiz_pixels *
2719 p_params->bytes_per_pixel; 2759 p_params->bytes_per_pixel;
2760 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
2720 2761
2721 /* For now xtile and linear */ 2762 if (p_params->tiling == I915_FORMAT_MOD_Y_TILED ||
2722 if (((ddb_allocation * 512) / plane_bytes_per_line) >= 1) 2763 p_params->tiling == I915_FORMAT_MOD_Yf_TILED) {
2723 result_bytes = min(method1, method2); 2764 uint32_t y_tile_minimum = plane_blocks_per_line * 4;
2724 else 2765 selected_result = max(method2, y_tile_minimum);
2725 result_bytes = method1; 2766 } else {
2767 if ((ddb_allocation / plane_blocks_per_line) >= 1)
2768 selected_result = min(method1, method2);
2769 else
2770 selected_result = method1;
2771 }
2726 2772
2727 res_blocks = DIV_ROUND_UP(result_bytes, 512) + 1; 2773 res_blocks = selected_result + 1;
2728 res_lines = DIV_ROUND_UP(result_bytes, plane_bytes_per_line); 2774 res_lines = DIV_ROUND_UP(selected_result, plane_blocks_per_line);
2729 2775
2730 if (res_blocks > ddb_allocation || res_lines > 31) 2776 if (level >= 1 && level <= 7) {
2777 if (p_params->tiling == I915_FORMAT_MOD_Y_TILED ||
2778 p_params->tiling == I915_FORMAT_MOD_Yf_TILED)
2779 res_lines += 4;
2780 else
2781 res_blocks++;
2782 }
2783
2784 if (res_blocks >= ddb_allocation || res_lines > 31)
2731 return false; 2785 return false;
2732 2786
2733 *out_blocks = res_blocks; 2787 *out_blocks = res_blocks;
@@ -2744,23 +2798,24 @@ static void skl_compute_wm_level(const struct drm_i915_private *dev_priv,
2744 int num_planes, 2798 int num_planes,
2745 struct skl_wm_level *result) 2799 struct skl_wm_level *result)
2746{ 2800{
2747 uint16_t latency = dev_priv->wm.skl_latency[level];
2748 uint16_t ddb_blocks; 2801 uint16_t ddb_blocks;
2749 int i; 2802 int i;
2750 2803
2751 for (i = 0; i < num_planes; i++) { 2804 for (i = 0; i < num_planes; i++) {
2752 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]); 2805 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
2753 2806
2754 result->plane_en[i] = skl_compute_plane_wm(p, &p->plane[i], 2807 result->plane_en[i] = skl_compute_plane_wm(dev_priv,
2808 p, &p->plane[i],
2755 ddb_blocks, 2809 ddb_blocks,
2756 latency, 2810 level,
2757 &result->plane_res_b[i], 2811 &result->plane_res_b[i],
2758 &result->plane_res_l[i]); 2812 &result->plane_res_l[i]);
2759 } 2813 }
2760 2814
2761 ddb_blocks = skl_ddb_entry_size(&ddb->cursor[pipe]); 2815 ddb_blocks = skl_ddb_entry_size(&ddb->cursor[pipe]);
2762 result->cursor_en = skl_compute_plane_wm(p, &p->cursor, ddb_blocks, 2816 result->cursor_en = skl_compute_plane_wm(dev_priv, p, &p->cursor,
2763 latency, &result->cursor_res_b, 2817 ddb_blocks, level,
2818 &result->cursor_res_b,
2764 &result->cursor_res_l); 2819 &result->cursor_res_l);
2765} 2820}
2766 2821
@@ -3153,12 +3208,20 @@ skl_update_sprite_wm(struct drm_plane *plane, struct drm_crtc *crtc,
3153 int pixel_size, bool enabled, bool scaled) 3208 int pixel_size, bool enabled, bool scaled)
3154{ 3209{
3155 struct intel_plane *intel_plane = to_intel_plane(plane); 3210 struct intel_plane *intel_plane = to_intel_plane(plane);
3211 struct drm_framebuffer *fb = plane->state->fb;
3156 3212
3157 intel_plane->wm.enabled = enabled; 3213 intel_plane->wm.enabled = enabled;
3158 intel_plane->wm.scaled = scaled; 3214 intel_plane->wm.scaled = scaled;
3159 intel_plane->wm.horiz_pixels = sprite_width; 3215 intel_plane->wm.horiz_pixels = sprite_width;
3160 intel_plane->wm.vert_pixels = sprite_height; 3216 intel_plane->wm.vert_pixels = sprite_height;
3161 intel_plane->wm.bytes_per_pixel = pixel_size; 3217 intel_plane->wm.bytes_per_pixel = pixel_size;
3218 intel_plane->wm.tiling = DRM_FORMAT_MOD_NONE;
3219 /*
3220 * Framebuffer can be NULL on plane disable, but it does not
3221 * matter for watermarks if we assume no tiling in that case.
3222 */
3223 if (fb)
3224 intel_plane->wm.tiling = fb->modifier[0];
3162 3225
3163 skl_update_wm(crtc); 3226 skl_update_wm(crtc);
3164} 3227}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index d17e76d32e03..cd79c3843452 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1002,11 +1002,62 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
1002 return 0; 1002 return 0;
1003} 1003}
1004 1004
1005static int skl_tune_iz_hashing(struct intel_engine_cs *ring)
1006{
1007 struct drm_device *dev = ring->dev;
1008 struct drm_i915_private *dev_priv = dev->dev_private;
1009 u8 vals[3] = { 0, 0, 0 };
1010 unsigned int i;
1011
1012 for (i = 0; i < 3; i++) {
1013 u8 ss;
1014
1015 /*
1016 * Only consider slices where one, and only one, subslice has 7
1017 * EUs
1018 */
1019 if (hweight8(dev_priv->info.subslice_7eu[i]) != 1)
1020 continue;
1021
1022 /*
1023 * subslice_7eu[i] != 0 (because of the check above) and
1024 * ss_max == 4 (maximum number of subslices possible per slice)
1025 *
1026 * -> 0 <= ss <= 3;
1027 */
1028 ss = ffs(dev_priv->info.subslice_7eu[i]) - 1;
1029 vals[i] = 3 - ss;
1030 }
1031
1032 if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
1033 return 0;
1034
1035 /* Tune IZ hashing. See intel_device_info_runtime_init() */
1036 WA_SET_FIELD_MASKED(GEN7_GT_MODE,
1037 GEN9_IZ_HASHING_MASK(2) |
1038 GEN9_IZ_HASHING_MASK(1) |
1039 GEN9_IZ_HASHING_MASK(0),
1040 GEN9_IZ_HASHING(2, vals[2]) |
1041 GEN9_IZ_HASHING(1, vals[1]) |
1042 GEN9_IZ_HASHING(0, vals[0]));
1043
1044 return 0;
1045}
1046
1047
1005static int skl_init_workarounds(struct intel_engine_cs *ring) 1048static int skl_init_workarounds(struct intel_engine_cs *ring)
1006{ 1049{
1050 struct drm_device *dev = ring->dev;
1051 struct drm_i915_private *dev_priv = dev->dev_private;
1052
1007 gen9_init_workarounds(ring); 1053 gen9_init_workarounds(ring);
1008 1054
1009 return 0; 1055 /* WaDisablePowerCompilerClockGating:skl */
1056 if (INTEL_REVID(dev) == SKL_REVID_B0)
1057 WA_SET_BIT_MASKED(HIZ_CHICKEN,
1058 BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
1059
1060 return skl_tune_iz_hashing(ring);
1010} 1061}
1011 1062
1012int init_workarounds_ring(struct intel_engine_cs *ring) 1063int init_workarounds_ring(struct intel_engine_cs *ring)
@@ -1690,7 +1741,7 @@ gen8_ring_put_irq(struct intel_engine_cs *ring)
1690static int 1741static int
1691i965_dispatch_execbuffer(struct intel_engine_cs *ring, 1742i965_dispatch_execbuffer(struct intel_engine_cs *ring,
1692 u64 offset, u32 length, 1743 u64 offset, u32 length,
1693 unsigned flags) 1744 unsigned dispatch_flags)
1694{ 1745{
1695 int ret; 1746 int ret;
1696 1747
@@ -1701,7 +1752,8 @@ i965_dispatch_execbuffer(struct intel_engine_cs *ring,
1701 intel_ring_emit(ring, 1752 intel_ring_emit(ring,
1702 MI_BATCH_BUFFER_START | 1753 MI_BATCH_BUFFER_START |
1703 MI_BATCH_GTT | 1754 MI_BATCH_GTT |
1704 (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); 1755 (dispatch_flags & I915_DISPATCH_SECURE ?
1756 0 : MI_BATCH_NON_SECURE_I965));
1705 intel_ring_emit(ring, offset); 1757 intel_ring_emit(ring, offset);
1706 intel_ring_advance(ring); 1758 intel_ring_advance(ring);
1707 1759
@@ -1714,8 +1766,8 @@ i965_dispatch_execbuffer(struct intel_engine_cs *ring,
1714#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT) 1766#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
1715static int 1767static int
1716i830_dispatch_execbuffer(struct intel_engine_cs *ring, 1768i830_dispatch_execbuffer(struct intel_engine_cs *ring,
1717 u64 offset, u32 len, 1769 u64 offset, u32 len,
1718 unsigned flags) 1770 unsigned dispatch_flags)
1719{ 1771{
1720 u32 cs_offset = ring->scratch.gtt_offset; 1772 u32 cs_offset = ring->scratch.gtt_offset;
1721 int ret; 1773 int ret;
@@ -1733,7 +1785,7 @@ i830_dispatch_execbuffer(struct intel_engine_cs *ring,
1733 intel_ring_emit(ring, MI_NOOP); 1785 intel_ring_emit(ring, MI_NOOP);
1734 intel_ring_advance(ring); 1786 intel_ring_advance(ring);
1735 1787
1736 if ((flags & I915_DISPATCH_PINNED) == 0) { 1788 if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
1737 if (len > I830_BATCH_LIMIT) 1789 if (len > I830_BATCH_LIMIT)
1738 return -ENOSPC; 1790 return -ENOSPC;
1739 1791
@@ -1765,7 +1817,8 @@ i830_dispatch_execbuffer(struct intel_engine_cs *ring,
1765 return ret; 1817 return ret;
1766 1818
1767 intel_ring_emit(ring, MI_BATCH_BUFFER); 1819 intel_ring_emit(ring, MI_BATCH_BUFFER);
1768 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); 1820 intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
1821 0 : MI_BATCH_NON_SECURE));
1769 intel_ring_emit(ring, offset + len - 8); 1822 intel_ring_emit(ring, offset + len - 8);
1770 intel_ring_emit(ring, MI_NOOP); 1823 intel_ring_emit(ring, MI_NOOP);
1771 intel_ring_advance(ring); 1824 intel_ring_advance(ring);
@@ -1776,7 +1829,7 @@ i830_dispatch_execbuffer(struct intel_engine_cs *ring,
1776static int 1829static int
1777i915_dispatch_execbuffer(struct intel_engine_cs *ring, 1830i915_dispatch_execbuffer(struct intel_engine_cs *ring,
1778 u64 offset, u32 len, 1831 u64 offset, u32 len,
1779 unsigned flags) 1832 unsigned dispatch_flags)
1780{ 1833{
1781 int ret; 1834 int ret;
1782 1835
@@ -1785,7 +1838,8 @@ i915_dispatch_execbuffer(struct intel_engine_cs *ring,
1785 return ret; 1838 return ret;
1786 1839
1787 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT); 1840 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
1788 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); 1841 intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
1842 0 : MI_BATCH_NON_SECURE));
1789 intel_ring_advance(ring); 1843 intel_ring_advance(ring);
1790 1844
1791 return 0; 1845 return 0;
@@ -2176,6 +2230,7 @@ intel_ring_alloc_request(struct intel_engine_cs *ring)
2176 2230
2177 kref_init(&request->ref); 2231 kref_init(&request->ref);
2178 request->ring = ring; 2232 request->ring = ring;
2233 request->ringbuf = ring->buffer;
2179 request->uniq = dev_private->request_uniq++; 2234 request->uniq = dev_private->request_uniq++;
2180 2235
2181 ret = i915_gem_get_seqno(ring->dev, &request->seqno); 2236 ret = i915_gem_get_seqno(ring->dev, &request->seqno);
@@ -2352,9 +2407,10 @@ static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
2352static int 2407static int
2353gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring, 2408gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
2354 u64 offset, u32 len, 2409 u64 offset, u32 len,
2355 unsigned flags) 2410 unsigned dispatch_flags)
2356{ 2411{
2357 bool ppgtt = USES_PPGTT(ring->dev) && !(flags & I915_DISPATCH_SECURE); 2412 bool ppgtt = USES_PPGTT(ring->dev) &&
2413 !(dispatch_flags & I915_DISPATCH_SECURE);
2358 int ret; 2414 int ret;
2359 2415
2360 ret = intel_ring_begin(ring, 4); 2416 ret = intel_ring_begin(ring, 4);
@@ -2373,8 +2429,8 @@ gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
2373 2429
2374static int 2430static int
2375hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring, 2431hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
2376 u64 offset, u32 len, 2432 u64 offset, u32 len,
2377 unsigned flags) 2433 unsigned dispatch_flags)
2378{ 2434{
2379 int ret; 2435 int ret;
2380 2436
@@ -2384,7 +2440,7 @@ hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
2384 2440
2385 intel_ring_emit(ring, 2441 intel_ring_emit(ring,
2386 MI_BATCH_BUFFER_START | 2442 MI_BATCH_BUFFER_START |
2387 (flags & I915_DISPATCH_SECURE ? 2443 (dispatch_flags & I915_DISPATCH_SECURE ?
2388 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW)); 2444 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW));
2389 /* bit0-7 is the length on GEN6+ */ 2445 /* bit0-7 is the length on GEN6+ */
2390 intel_ring_emit(ring, offset); 2446 intel_ring_emit(ring, offset);
@@ -2396,7 +2452,7 @@ hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
2396static int 2452static int
2397gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring, 2453gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
2398 u64 offset, u32 len, 2454 u64 offset, u32 len,
2399 unsigned flags) 2455 unsigned dispatch_flags)
2400{ 2456{
2401 int ret; 2457 int ret;
2402 2458
@@ -2406,7 +2462,8 @@ gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
2406 2462
2407 intel_ring_emit(ring, 2463 intel_ring_emit(ring,
2408 MI_BATCH_BUFFER_START | 2464 MI_BATCH_BUFFER_START |
2409 (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); 2465 (dispatch_flags & I915_DISPATCH_SECURE ?
2466 0 : MI_BATCH_NON_SECURE_I965));
2410 /* bit0-7 is the length on GEN6+ */ 2467 /* bit0-7 is the length on GEN6+ */
2411 intel_ring_emit(ring, offset); 2468 intel_ring_emit(ring, offset);
2412 intel_ring_advance(ring); 2469 intel_ring_advance(ring);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index b6c484fe7a59..8f3b49a23ccf 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -164,7 +164,7 @@ struct intel_engine_cs {
164 u32 seqno); 164 u32 seqno);
165 int (*dispatch_execbuffer)(struct intel_engine_cs *ring, 165 int (*dispatch_execbuffer)(struct intel_engine_cs *ring,
166 u64 offset, u32 length, 166 u64 offset, u32 length,
167 unsigned flags); 167 unsigned dispatch_flags);
168#define I915_DISPATCH_SECURE 0x1 168#define I915_DISPATCH_SECURE 0x1
169#define I915_DISPATCH_PINNED 0x2 169#define I915_DISPATCH_PINNED 0x2
170 void (*cleanup)(struct intel_engine_cs *ring); 170 void (*cleanup)(struct intel_engine_cs *ring);
@@ -242,7 +242,7 @@ struct intel_engine_cs {
242 u32 flush_domains); 242 u32 flush_domains);
243 int (*emit_bb_start)(struct intel_ringbuffer *ringbuf, 243 int (*emit_bb_start)(struct intel_ringbuffer *ringbuf,
244 struct intel_context *ctx, 244 struct intel_context *ctx,
245 u64 offset, unsigned flags); 245 u64 offset, unsigned dispatch_flags);
246 246
247 /** 247 /**
248 * List of objects currently involved in rendering from the 248 * List of objects currently involved in rendering from the
@@ -373,11 +373,12 @@ intel_write_status_page(struct intel_engine_cs *ring,
373 * 0x06: ring 2 head pointer (915-class) 373 * 0x06: ring 2 head pointer (915-class)
374 * 0x10-0x1b: Context status DWords (GM45) 374 * 0x10-0x1b: Context status DWords (GM45)
375 * 0x1f: Last written status offset. (GM45) 375 * 0x1f: Last written status offset. (GM45)
376 * 0x20-0x2f: Reserved (Gen6+)
376 * 377 *
377 * The area from dword 0x20 to 0x3ff is available for driver usage. 378 * The area from dword 0x30 to 0x3ff is available for driver usage.
378 */ 379 */
379#define I915_GEM_HWS_INDEX 0x20 380#define I915_GEM_HWS_INDEX 0x30
380#define I915_GEM_HWS_SCRATCH_INDEX 0x30 381#define I915_GEM_HWS_SCRATCH_INDEX 0x40
381#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) 382#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
382 383
383void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf); 384void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index f2d408dd7c15..7051da7015d3 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -98,7 +98,7 @@ bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
98 if (min <= 0 || max <= 0) 98 if (min <= 0 || max <= 0)
99 return false; 99 return false;
100 100
101 if (WARN_ON(drm_vblank_get(dev, pipe))) 101 if (WARN_ON(drm_crtc_vblank_get(&crtc->base)))
102 return false; 102 return false;
103 103
104 local_irq_disable(); 104 local_irq_disable();
@@ -132,7 +132,7 @@ bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
132 132
133 finish_wait(wq, &wait); 133 finish_wait(wq, &wait);
134 134
135 drm_vblank_put(dev, pipe); 135 drm_crtc_vblank_put(&crtc->base);
136 136
137 *start_vbl_count = dev->driver->get_vblank_counter(dev, pipe); 137 *start_vbl_count = dev->driver->get_vblank_counter(dev, pipe);
138 138
@@ -189,7 +189,7 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
189 struct intel_plane *intel_plane = to_intel_plane(drm_plane); 189 struct intel_plane *intel_plane = to_intel_plane(drm_plane);
190 const int pipe = intel_plane->pipe; 190 const int pipe = intel_plane->pipe;
191 const int plane = intel_plane->plane + 1; 191 const int plane = intel_plane->plane + 1;
192 u32 plane_ctl, stride; 192 u32 plane_ctl, stride_div;
193 int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); 193 int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
194 194
195 plane_ctl = I915_READ(PLANE_CTL(pipe, plane)); 195 plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
@@ -247,15 +247,20 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
247 247
248 switch (fb->modifier[0]) { 248 switch (fb->modifier[0]) {
249 case DRM_FORMAT_MOD_NONE: 249 case DRM_FORMAT_MOD_NONE:
250 stride = fb->pitches[0] >> 6;
251 break; 250 break;
252 case I915_FORMAT_MOD_X_TILED: 251 case I915_FORMAT_MOD_X_TILED:
253 plane_ctl |= PLANE_CTL_TILED_X; 252 plane_ctl |= PLANE_CTL_TILED_X;
254 stride = fb->pitches[0] >> 9; 253 break;
254 case I915_FORMAT_MOD_Y_TILED:
255 plane_ctl |= PLANE_CTL_TILED_Y;
256 break;
257 case I915_FORMAT_MOD_Yf_TILED:
258 plane_ctl |= PLANE_CTL_TILED_YF;
255 break; 259 break;
256 default: 260 default:
257 BUG(); 261 MISSING_CASE(fb->modifier[0]);
258 } 262 }
263
259 if (drm_plane->state->rotation == BIT(DRM_ROTATE_180)) 264 if (drm_plane->state->rotation == BIT(DRM_ROTATE_180))
260 plane_ctl |= PLANE_CTL_ROTATE_180; 265 plane_ctl |= PLANE_CTL_ROTATE_180;
261 266
@@ -266,6 +271,9 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
266 pixel_size, true, 271 pixel_size, true,
267 src_w != crtc_w || src_h != crtc_h); 272 src_w != crtc_w || src_h != crtc_h);
268 273
274 stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
275 fb->pixel_format);
276
269 /* Sizes are 0 based */ 277 /* Sizes are 0 based */
270 src_w--; 278 src_w--;
271 src_h--; 279 src_h--;
@@ -273,7 +281,7 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
273 crtc_h--; 281 crtc_h--;
274 282
275 I915_WRITE(PLANE_OFFSET(pipe, plane), (y << 16) | x); 283 I915_WRITE(PLANE_OFFSET(pipe, plane), (y << 16) | x);
276 I915_WRITE(PLANE_STRIDE(pipe, plane), stride); 284 I915_WRITE(PLANE_STRIDE(pipe, plane), fb->pitches[0] / stride_div);
277 I915_WRITE(PLANE_POS(pipe, plane), (crtc_y << 16) | crtc_x); 285 I915_WRITE(PLANE_POS(pipe, plane), (crtc_y << 16) | crtc_x);
278 I915_WRITE(PLANE_SIZE(pipe, plane), (crtc_h << 16) | crtc_w); 286 I915_WRITE(PLANE_SIZE(pipe, plane), (crtc_h << 16) | crtc_w);
279 I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl); 287 I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
@@ -1248,6 +1256,12 @@ finish:
1248 1256
1249 if (!intel_crtc->primary_enabled && !state->hides_primary) 1257 if (!intel_crtc->primary_enabled && !state->hides_primary)
1250 intel_crtc->atomic.post_enable_primary = true; 1258 intel_crtc->atomic.post_enable_primary = true;
1259
1260 /* Update watermarks on tiling changes. */
1261 if (!plane->state->fb || !state->base.fb ||
1262 plane->state->fb->modifier[0] !=
1263 state->base.fb->modifier[0])
1264 intel_crtc->atomic.update_wm = true;
1251 } 1265 }
1252 1266
1253 return 0; 1267 return 0;
@@ -1301,9 +1315,6 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
1301 struct intel_plane *intel_plane; 1315 struct intel_plane *intel_plane;
1302 int ret = 0; 1316 int ret = 0;
1303 1317
1304 if (!drm_core_check_feature(dev, DRIVER_MODESET))
1305 return -ENODEV;
1306
1307 /* Make sure we don't try to enable both src & dest simultaneously */ 1318 /* Make sure we don't try to enable both src & dest simultaneously */
1308 if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) 1319 if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
1309 return -EINVAL; 1320 return -EINVAL;
@@ -1332,9 +1343,6 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
1332 struct intel_plane *intel_plane; 1343 struct intel_plane *intel_plane;
1333 int ret = 0; 1344 int ret = 0;
1334 1345
1335 if (!drm_core_check_feature(dev, DRIVER_MODESET))
1336 return -ENODEV;
1337
1338 drm_modeset_lock_all(dev); 1346 drm_modeset_lock_all(dev);
1339 1347
1340 plane = drm_plane_find(dev, get->plane_id); 1348 plane = drm_plane_find(dev, get->plane_id);
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 876e06360c36..8879f17770aa 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -211,6 +211,13 @@ static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv,
211 gen6_gt_check_fifodbg(dev_priv); 211 gen6_gt_check_fifodbg(dev_priv);
212} 212}
213 213
214static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
215{
216 u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
217
218 return count & GT_FIFO_FREE_ENTRIES_MASK;
219}
220
214static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) 221static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
215{ 222{
216 int ret = 0; 223 int ret = 0;
@@ -218,16 +225,15 @@ static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
218 /* On VLV, FIFO will be shared by both SW and HW. 225 /* On VLV, FIFO will be shared by both SW and HW.
219 * So, we need to read the FREE_ENTRIES everytime */ 226 * So, we need to read the FREE_ENTRIES everytime */
220 if (IS_VALLEYVIEW(dev_priv->dev)) 227 if (IS_VALLEYVIEW(dev_priv->dev))
221 dev_priv->uncore.fifo_count = 228 dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv);
222 __raw_i915_read32(dev_priv, GTFIFOCTL) &
223 GT_FIFO_FREE_ENTRIES_MASK;
224 229
225 if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) { 230 if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
226 int loop = 500; 231 int loop = 500;
227 u32 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK; 232 u32 fifo = fifo_free_entries(dev_priv);
233
228 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) { 234 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
229 udelay(10); 235 udelay(10);
230 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK; 236 fifo = fifo_free_entries(dev_priv);
231 } 237 }
232 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES)) 238 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
233 ++ret; 239 ++ret;
@@ -315,8 +321,7 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
315 321
316 if (IS_GEN6(dev) || IS_GEN7(dev)) 322 if (IS_GEN6(dev) || IS_GEN7(dev))
317 dev_priv->uncore.fifo_count = 323 dev_priv->uncore.fifo_count =
318 __raw_i915_read32(dev_priv, GTFIFOCTL) & 324 fifo_free_entries(dev_priv);
319 GT_FIFO_FREE_ENTRIES_MASK;
320 } 325 }
321 326
322 if (!restore) 327 if (!restore)