aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2014-10-21 08:32:55 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2014-10-21 08:42:30 -0400
commita8cbd4597799ade2b8a656dac7768c352b58e43a (patch)
treee40a5ccc9fc808735f3426f5387e54e291ad4351
parentb727735732d5b2fde0a88911210215e03e190d11 (diff)
parent07c338ce98263a5af631b991dd8f96cff6ca2548 (diff)
Merge branch 'drm-intel-next-fixes' into drm-intel-next
So I've sent the first pull request to Dave and I expect his request for a merge tree any second now ;-) More seriously I have some pending patches for 3.19 that depend upon both trees, hence backmerge. Conflicts are all trivial. Conflicts: drivers/gpu/drm/i915/i915_irq.c drivers/gpu/drm/i915/intel_display.c v2: Of course I've forgotten the fixup script for the silent conflict. Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h22
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c31
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c41
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h7
-rw-r--r--drivers/gpu/drm/i915/intel_display.c51
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c5
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c243
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c15
9 files changed, 118 insertions, 300 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 821ba26438fb..9962da202456 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -968,23 +968,6 @@ struct intel_rps_ei {
968 u32 media_c0; 968 u32 media_c0;
969}; 969};
970 970
971struct intel_rps_bdw_cal {
972 u32 it_threshold_pct; /* interrupt, in percentage */
973 u32 eval_interval; /* evaluation interval, in us */
974 u32 last_ts;
975 u32 last_c0;
976 bool is_up;
977};
978
979struct intel_rps_bdw_turbo {
980 struct intel_rps_bdw_cal up;
981 struct intel_rps_bdw_cal down;
982 struct timer_list flip_timer;
983 u32 timeout;
984 atomic_t flip_received;
985 struct work_struct work_max_freq;
986};
987
988struct intel_gen6_power_mgmt { 971struct intel_gen6_power_mgmt {
989 /* work and pm_iir are protected by dev_priv->irq_lock */ 972 /* work and pm_iir are protected by dev_priv->irq_lock */
990 struct work_struct work; 973 struct work_struct work;
@@ -1018,9 +1001,6 @@ struct intel_gen6_power_mgmt {
1018 bool enabled; 1001 bool enabled;
1019 struct delayed_work delayed_resume_work; 1002 struct delayed_work delayed_resume_work;
1020 1003
1021 bool is_bdw_sw_turbo; /* Switch of BDW software turbo */
1022 struct intel_rps_bdw_turbo sw_turbo; /* Calculate RP interrupt timing */
1023
1024 /* manual wa residency calculations */ 1004 /* manual wa residency calculations */
1025 struct intel_rps_ei up_ei, down_ei; 1005 struct intel_rps_ei up_ei, down_ei;
1026 1006
@@ -2857,8 +2837,6 @@ extern void intel_disable_fbc(struct drm_device *dev);
2857extern bool ironlake_set_drps(struct drm_device *dev, u8 val); 2837extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
2858extern void intel_init_pch_refclk(struct drm_device *dev); 2838extern void intel_init_pch_refclk(struct drm_device *dev);
2859extern void gen6_set_rps(struct drm_device *dev, u8 val); 2839extern void gen6_set_rps(struct drm_device *dev, u8 val);
2860extern void bdw_software_turbo(struct drm_device *dev);
2861extern void gen8_flip_interrupt(struct drm_device *dev);
2862extern void valleyview_set_rps(struct drm_device *dev, u8 val); 2840extern void valleyview_set_rps(struct drm_device *dev, u8 val);
2863extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, 2841extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
2864 bool enable); 2842 bool enable);
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index d38413997379..d182058383a9 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -293,15 +293,23 @@ i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
293static struct i915_mmu_notifier * 293static struct i915_mmu_notifier *
294i915_mmu_notifier_find(struct i915_mm_struct *mm) 294i915_mmu_notifier_find(struct i915_mm_struct *mm)
295{ 295{
296 if (mm->mn == NULL) { 296 struct i915_mmu_notifier *mn = mm->mn;
297 down_write(&mm->mm->mmap_sem); 297
298 mutex_lock(&to_i915(mm->dev)->mm_lock); 298 mn = mm->mn;
299 if (mm->mn == NULL) 299 if (mn)
300 mm->mn = i915_mmu_notifier_create(mm->mm); 300 return mn;
301 mutex_unlock(&to_i915(mm->dev)->mm_lock); 301
302 up_write(&mm->mm->mmap_sem); 302 down_write(&mm->mm->mmap_sem);
303 mutex_lock(&to_i915(mm->dev)->mm_lock);
304 if ((mn = mm->mn) == NULL) {
305 mn = i915_mmu_notifier_create(mm->mm);
306 if (!IS_ERR(mn))
307 mm->mn = mn;
303 } 308 }
304 return mm->mn; 309 mutex_unlock(&to_i915(mm->dev)->mm_lock);
310 up_write(&mm->mm->mmap_sem);
311
312 return mn;
305} 313}
306 314
307static int 315static int
@@ -681,16 +689,15 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
681static void 689static void
682i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj) 690i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
683{ 691{
684 struct scatterlist *sg; 692 struct sg_page_iter sg_iter;
685 int i;
686 693
687 BUG_ON(obj->userptr.work != NULL); 694 BUG_ON(obj->userptr.work != NULL);
688 695
689 if (obj->madv != I915_MADV_WILLNEED) 696 if (obj->madv != I915_MADV_WILLNEED)
690 obj->dirty = 0; 697 obj->dirty = 0;
691 698
692 for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) { 699 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
693 struct page *page = sg_page(sg); 700 struct page *page = sg_page_iter_page(&sg_iter);
694 701
695 if (obj->dirty) 702 if (obj->dirty)
696 set_page_dirty(page); 703 set_page_dirty(page);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 737b23982b95..f17bbf3ac136 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1716,7 +1716,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1716#define HPD_STORM_DETECT_PERIOD 1000 1716#define HPD_STORM_DETECT_PERIOD 1000
1717#define HPD_STORM_THRESHOLD 5 1717#define HPD_STORM_THRESHOLD 5
1718 1718
1719static int ilk_port_to_hotplug_shift(enum port port) 1719static int pch_port_to_hotplug_shift(enum port port)
1720{ 1720{
1721 switch (port) { 1721 switch (port) {
1722 case PORT_A: 1722 case PORT_A:
@@ -1732,7 +1732,7 @@ static int ilk_port_to_hotplug_shift(enum port port)
1732 } 1732 }
1733} 1733}
1734 1734
1735static int g4x_port_to_hotplug_shift(enum port port) 1735static int i915_port_to_hotplug_shift(enum port port)
1736{ 1736{
1737 switch (port) { 1737 switch (port) {
1738 case PORT_A: 1738 case PORT_A:
@@ -1790,12 +1790,12 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
1790 if (port && dev_priv->hpd_irq_port[port]) { 1790 if (port && dev_priv->hpd_irq_port[port]) {
1791 bool long_hpd; 1791 bool long_hpd;
1792 1792
1793 if (IS_G4X(dev)) { 1793 if (HAS_PCH_SPLIT(dev)) {
1794 dig_shift = g4x_port_to_hotplug_shift(port); 1794 dig_shift = pch_port_to_hotplug_shift(port);
1795 long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1796 } else {
1797 dig_shift = ilk_port_to_hotplug_shift(port);
1798 long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT; 1795 long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1796 } else {
1797 dig_shift = i915_port_to_hotplug_shift(port);
1798 long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1799 } 1799 }
1800 1800
1801 DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", 1801 DRM_DEBUG_DRIVER("digital hpd port %c - %s\n",
@@ -1984,27 +1984,6 @@ static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1984 res1, res2); 1984 res1, res2);
1985} 1985}
1986 1986
1987void gen8_flip_interrupt(struct drm_device *dev)
1988{
1989 struct drm_i915_private *dev_priv = dev->dev_private;
1990
1991 if (!dev_priv->rps.is_bdw_sw_turbo)
1992 return;
1993
1994 if(atomic_read(&dev_priv->rps.sw_turbo.flip_received)) {
1995 mod_timer(&dev_priv->rps.sw_turbo.flip_timer,
1996 usecs_to_jiffies(dev_priv->rps.sw_turbo.timeout) + jiffies);
1997 }
1998 else {
1999 dev_priv->rps.sw_turbo.flip_timer.expires =
2000 usecs_to_jiffies(dev_priv->rps.sw_turbo.timeout) + jiffies;
2001 add_timer(&dev_priv->rps.sw_turbo.flip_timer);
2002 atomic_set(&dev_priv->rps.sw_turbo.flip_received, true);
2003 }
2004
2005 bdw_software_turbo(dev);
2006}
2007
2008/* The RPS events need forcewake, so we add them to a work queue and mask their 1987/* The RPS events need forcewake, so we add them to a work queue and mask their
2009 * IMR bits until the work is done. Other interrupts can be processed without 1988 * IMR bits until the work is done. Other interrupts can be processed without
2010 * the work queue. */ 1989 * the work queue. */
@@ -3494,11 +3473,13 @@ static void gen8_irq_reset(struct drm_device *dev)
3494 3473
3495void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv) 3474void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
3496{ 3475{
3476 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3477
3497 spin_lock_irq(&dev_priv->irq_lock); 3478 spin_lock_irq(&dev_priv->irq_lock);
3498 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B], 3479 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
3499 ~dev_priv->de_irq_mask[PIPE_B]); 3480 ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
3500 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C], 3481 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
3501 ~dev_priv->de_irq_mask[PIPE_C]); 3482 ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
3502 spin_unlock_irq(&dev_priv->irq_lock); 3483 spin_unlock_irq(&dev_priv->irq_lock);
3503} 3484}
3504 3485
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 2ed02c391f3b..a56d9a7e7e0e 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2474,6 +2474,7 @@ enum punit_power_well {
2474#define _PIPEASRC 0x6001c 2474#define _PIPEASRC 0x6001c
2475#define _BCLRPAT_A 0x60020 2475#define _BCLRPAT_A 0x60020
2476#define _VSYNCSHIFT_A 0x60028 2476#define _VSYNCSHIFT_A 0x60028
2477#define _PIPE_MULT_A 0x6002c
2477 2478
2478/* Pipe B timing regs */ 2479/* Pipe B timing regs */
2479#define _HTOTAL_B 0x61000 2480#define _HTOTAL_B 0x61000
@@ -2485,6 +2486,7 @@ enum punit_power_well {
2485#define _PIPEBSRC 0x6101c 2486#define _PIPEBSRC 0x6101c
2486#define _BCLRPAT_B 0x61020 2487#define _BCLRPAT_B 0x61020
2487#define _VSYNCSHIFT_B 0x61028 2488#define _VSYNCSHIFT_B 0x61028
2489#define _PIPE_MULT_B 0x6102c
2488 2490
2489#define TRANSCODER_A_OFFSET 0x60000 2491#define TRANSCODER_A_OFFSET 0x60000
2490#define TRANSCODER_B_OFFSET 0x61000 2492#define TRANSCODER_B_OFFSET 0x61000
@@ -2505,6 +2507,7 @@ enum punit_power_well {
2505#define BCLRPAT(trans) _TRANSCODER2(trans, _BCLRPAT_A) 2507#define BCLRPAT(trans) _TRANSCODER2(trans, _BCLRPAT_A)
2506#define VSYNCSHIFT(trans) _TRANSCODER2(trans, _VSYNCSHIFT_A) 2508#define VSYNCSHIFT(trans) _TRANSCODER2(trans, _VSYNCSHIFT_A)
2507#define PIPESRC(trans) _TRANSCODER2(trans, _PIPEASRC) 2509#define PIPESRC(trans) _TRANSCODER2(trans, _PIPEASRC)
2510#define PIPE_MULT(trans) _TRANSCODER2(trans, _PIPE_MULT_A)
2508 2511
2509/* HSW+ eDP PSR registers */ 2512/* HSW+ eDP PSR registers */
2510#define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800) 2513#define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800)
@@ -5766,10 +5769,6 @@ enum punit_power_well {
5766#define GEN8_UCGCTL6 0x9430 5769#define GEN8_UCGCTL6 0x9430
5767#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1<<14) 5770#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1<<14)
5768 5771
5769#define TIMESTAMP_CTR 0x44070
5770#define FREQ_1_28_US(us) (((us) * 100) >> 7)
5771#define MCHBAR_PCU_C0 (MCHBAR_MIRROR_BASE_SNB + 0x5960)
5772
5773#define GEN6_GFXPAUSE 0xA000 5772#define GEN6_GFXPAUSE 0xA000
5774#define GEN6_RPNSWREQ 0xA008 5773#define GEN6_RPNSWREQ 0xA008
5775#define GEN6_TURBO_DISABLE (1<<31) 5774#define GEN6_TURBO_DISABLE (1<<31)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 6fc77a100cc6..1fc05ffc4695 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -73,9 +73,6 @@ static const uint32_t intel_cursor_formats[] = {
73 DRM_FORMAT_ARGB8888, 73 DRM_FORMAT_ARGB8888,
74}; 74};
75 75
76#define DIV_ROUND_CLOSEST_ULL(ll, d) \
77({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
78
79static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); 76static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
80 77
81static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 78static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
@@ -4265,6 +4262,11 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
4265 4262
4266 intel_set_pipe_timings(intel_crtc); 4263 intel_set_pipe_timings(intel_crtc);
4267 4264
4265 if (intel_crtc->config.cpu_transcoder != TRANSCODER_EDP) {
4266 I915_WRITE(PIPE_MULT(intel_crtc->config.cpu_transcoder),
4267 intel_crtc->config.pixel_multiplier - 1);
4268 }
4269
4268 if (intel_crtc->config.has_pch_encoder) { 4270 if (intel_crtc->config.has_pch_encoder) {
4269 intel_cpu_transcoder_set_m_n(intel_crtc, 4271 intel_cpu_transcoder_set_m_n(intel_crtc,
4270 &intel_crtc->config.fdi_m_n, NULL); 4272 &intel_crtc->config.fdi_m_n, NULL);
@@ -7937,7 +7939,12 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
7937 pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) && 7939 pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
7938 (I915_READ(IPS_CTL) & IPS_ENABLE); 7940 (I915_READ(IPS_CTL) & IPS_ENABLE);
7939 7941
7940 pipe_config->pixel_multiplier = 1; 7942 if (pipe_config->cpu_transcoder != TRANSCODER_EDP) {
7943 pipe_config->pixel_multiplier =
7944 I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
7945 } else {
7946 pipe_config->pixel_multiplier = 1;
7947 }
7941 7948
7942 return true; 7949 return true;
7943} 7950}
@@ -9773,9 +9780,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
9773 struct intel_engine_cs *ring; 9780 struct intel_engine_cs *ring;
9774 int ret; 9781 int ret;
9775 9782
9776 //trigger software GT busyness calculation
9777 gen8_flip_interrupt(dev);
9778
9779 /* 9783 /*
9780 * drm_mode_page_flip_ioctl() should already catch this, but double 9784 * drm_mode_page_flip_ioctl() should already catch this, but double
9781 * check to be safe. In the future we may enable pageflipping from 9785 * check to be safe. In the future we may enable pageflipping from
@@ -12223,27 +12227,36 @@ static void intel_setup_outputs(struct drm_device *dev)
12223 if (I915_READ(PCH_DP_D) & DP_DETECTED) 12227 if (I915_READ(PCH_DP_D) & DP_DETECTED)
12224 intel_dp_init(dev, PCH_DP_D, PORT_D); 12228 intel_dp_init(dev, PCH_DP_D, PORT_D);
12225 } else if (IS_VALLEYVIEW(dev)) { 12229 } else if (IS_VALLEYVIEW(dev)) {
12226 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) { 12230 /*
12231 * The DP_DETECTED bit is the latched state of the DDC
12232 * SDA pin at boot. However since eDP doesn't require DDC
12233 * (no way to plug in a DP->HDMI dongle) the DDC pins for
12234 * eDP ports may have been muxed to an alternate function.
12235 * Thus we can't rely on the DP_DETECTED bit alone to detect
12236 * eDP ports. Consult the VBT as well as DP_DETECTED to
12237 * detect eDP ports.
12238 */
12239 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED)
12227 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB, 12240 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
12228 PORT_B); 12241 PORT_B);
12229 if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED) 12242 if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED ||
12230 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B); 12243 intel_dp_is_edp(dev, PORT_B))
12231 } 12244 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
12232 12245
12233 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) { 12246 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED)
12234 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC, 12247 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
12235 PORT_C); 12248 PORT_C);
12236 if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED) 12249 if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED ||
12237 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C); 12250 intel_dp_is_edp(dev, PORT_C))
12238 } 12251 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
12239 12252
12240 if (IS_CHERRYVIEW(dev)) { 12253 if (IS_CHERRYVIEW(dev)) {
12241 if (I915_READ(VLV_DISPLAY_BASE + CHV_HDMID) & SDVO_DETECTED) { 12254 if (I915_READ(VLV_DISPLAY_BASE + CHV_HDMID) & SDVO_DETECTED)
12242 intel_hdmi_init(dev, VLV_DISPLAY_BASE + CHV_HDMID, 12255 intel_hdmi_init(dev, VLV_DISPLAY_BASE + CHV_HDMID,
12243 PORT_D); 12256 PORT_D);
12244 if (I915_READ(VLV_DISPLAY_BASE + DP_D) & DP_DETECTED) 12257 /* eDP not supported on port D, so don't check VBT */
12245 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_D, PORT_D); 12258 if (I915_READ(VLV_DISPLAY_BASE + DP_D) & DP_DETECTED)
12246 } 12259 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_D, PORT_D);
12247 } 12260 }
12248 12261
12249 intel_dsi_init(dev); 12262 intel_dsi_init(dev);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index ffbf38d9b536..94993d23e547 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -36,6 +36,9 @@
36#include <drm/drm_dp_mst_helper.h> 36#include <drm/drm_dp_mst_helper.h>
37#include <drm/drm_rect.h> 37#include <drm/drm_rect.h>
38 38
39#define DIV_ROUND_CLOSEST_ULL(ll, d) \
40({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
41
39/** 42/**
40 * _wait_for - magic (register) wait macro 43 * _wait_for - magic (register) wait macro
41 * 44 *
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 543e0f17ee62..e3def5ad4a77 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -419,9 +419,8 @@ static uint32_t scale(uint32_t source_val,
419 source_val = clamp(source_val, source_min, source_max); 419 source_val = clamp(source_val, source_min, source_max);
420 420
421 /* avoid overflows */ 421 /* avoid overflows */
422 target_val = (uint64_t)(source_val - source_min) * 422 target_val = DIV_ROUND_CLOSEST_ULL((uint64_t)(source_val - source_min) *
423 (target_max - target_min); 423 (target_max - target_min), source_max - source_min);
424 do_div(target_val, source_max - source_min);
425 target_val += target_min; 424 target_val += target_min;
426 425
427 return target_val; 426 return target_val;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 79108059b79b..a14be5d56c6b 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2320,6 +2320,7 @@ int ilk_wm_max_level(const struct drm_device *dev)
2320 else 2320 else
2321 return 2; 2321 return 2;
2322} 2322}
2323
2323static void intel_print_wm_latency(struct drm_device *dev, 2324static void intel_print_wm_latency(struct drm_device *dev,
2324 const char *name, 2325 const char *name,
2325 const uint16_t wm[5]) 2326 const uint16_t wm[5])
@@ -3288,9 +3289,6 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
3288{ 3289{
3289 int new_power; 3290 int new_power;
3290 3291
3291 if (dev_priv->rps.is_bdw_sw_turbo)
3292 return;
3293
3294 new_power = dev_priv->rps.power; 3292 new_power = dev_priv->rps.power;
3295 switch (dev_priv->rps.power) { 3293 switch (dev_priv->rps.power) {
3296 case LOW_POWER: 3294 case LOW_POWER:
@@ -3498,11 +3496,8 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
3498 valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit); 3496 valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
3499 else if (IS_VALLEYVIEW(dev)) 3497 else if (IS_VALLEYVIEW(dev))
3500 vlv_set_rps_idle(dev_priv); 3498 vlv_set_rps_idle(dev_priv);
3501 else if (!dev_priv->rps.is_bdw_sw_turbo 3499 else
3502 || atomic_read(&dev_priv->rps.sw_turbo.flip_received)){
3503 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit); 3500 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
3504 }
3505
3506 dev_priv->rps.last_adj = 0; 3501 dev_priv->rps.last_adj = 0;
3507 } 3502 }
3508 mutex_unlock(&dev_priv->rps.hw_lock); 3503 mutex_unlock(&dev_priv->rps.hw_lock);
@@ -3516,11 +3511,8 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv)
3516 if (dev_priv->rps.enabled) { 3511 if (dev_priv->rps.enabled) {
3517 if (IS_VALLEYVIEW(dev)) 3512 if (IS_VALLEYVIEW(dev))
3518 valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit); 3513 valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
3519 else if (!dev_priv->rps.is_bdw_sw_turbo 3514 else
3520 || atomic_read(&dev_priv->rps.sw_turbo.flip_received)){
3521 gen6_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit); 3515 gen6_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
3522 }
3523
3524 dev_priv->rps.last_adj = 0; 3516 dev_priv->rps.last_adj = 0;
3525 } 3517 }
3526 mutex_unlock(&dev_priv->rps.hw_lock); 3518 mutex_unlock(&dev_priv->rps.hw_lock);
@@ -3534,17 +3526,18 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
3534 WARN_ON(val > dev_priv->rps.max_freq_softlimit); 3526 WARN_ON(val > dev_priv->rps.max_freq_softlimit);
3535 WARN_ON(val < dev_priv->rps.min_freq_softlimit); 3527 WARN_ON(val < dev_priv->rps.min_freq_softlimit);
3536 3528
3537 DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
3538 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
3539 dev_priv->rps.cur_freq,
3540 vlv_gpu_freq(dev_priv, val), val);
3541
3542 if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1), 3529 if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1),
3543 "Odd GPU freq value\n")) 3530 "Odd GPU freq value\n"))
3544 val &= ~1; 3531 val &= ~1;
3545 3532
3546 if (val != dev_priv->rps.cur_freq) 3533 if (val != dev_priv->rps.cur_freq) {
3534 DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
3535 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
3536 dev_priv->rps.cur_freq,
3537 vlv_gpu_freq(dev_priv, val), val);
3538
3547 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val); 3539 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
3540 }
3548 3541
3549 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val)); 3542 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
3550 3543
@@ -3555,26 +3548,21 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
3555static void gen8_disable_rps_interrupts(struct drm_device *dev) 3548static void gen8_disable_rps_interrupts(struct drm_device *dev)
3556{ 3549{
3557 struct drm_i915_private *dev_priv = dev->dev_private; 3550 struct drm_i915_private *dev_priv = dev->dev_private;
3558 if (IS_BROADWELL(dev) && dev_priv->rps.is_bdw_sw_turbo){
3559 if (atomic_read(&dev_priv->rps.sw_turbo.flip_received))
3560 del_timer(&dev_priv->rps.sw_turbo.flip_timer);
3561 dev_priv-> rps.is_bdw_sw_turbo = false;
3562 } else {
3563 I915_WRITE(GEN6_PMINTRMSK, ~GEN8_PMINTR_REDIRECT_TO_NON_DISP);
3564 I915_WRITE(GEN8_GT_IER(2), I915_READ(GEN8_GT_IER(2)) &
3565 ~dev_priv->pm_rps_events);
3566 /* Complete PM interrupt masking here doesn't race with the rps work
3567 * item again unmasking PM interrupts because that is using a different
3568 * register (GEN8_GT_IMR(2)) to mask PM interrupts. The only risk is in
3569 * leaving stale bits in GEN8_GT_IIR(2) and GEN8_GT_IMR(2) which
3570 * gen8_enable_rps will clean up. */
3571 3551
3572 spin_lock_irq(&dev_priv->irq_lock); 3552 I915_WRITE(GEN6_PMINTRMSK, ~GEN8_PMINTR_REDIRECT_TO_NON_DISP);
3573 dev_priv->rps.pm_iir = 0; 3553 I915_WRITE(GEN8_GT_IER(2), I915_READ(GEN8_GT_IER(2)) &
3574 spin_unlock_irq(&dev_priv->irq_lock); 3554 ~dev_priv->pm_rps_events);
3555 /* Complete PM interrupt masking here doesn't race with the rps work
3556 * item again unmasking PM interrupts because that is using a different
3557 * register (GEN8_GT_IMR(2)) to mask PM interrupts. The only risk is in
3558 * leaving stale bits in GEN8_GT_IIR(2) and GEN8_GT_IMR(2) which
3559 * gen8_enable_rps will clean up. */
3575 3560
3576 I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events); 3561 spin_lock_irq(&dev_priv->irq_lock);
3577 } 3562 dev_priv->rps.pm_iir = 0;
3563 spin_unlock_irq(&dev_priv->irq_lock);
3564
3565 I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
3578} 3566}
3579 3567
3580static void gen6_disable_rps_interrupts(struct drm_device *dev) 3568static void gen6_disable_rps_interrupts(struct drm_device *dev)
@@ -3732,111 +3720,13 @@ static void parse_rp_state_cap(struct drm_i915_private *dev_priv, u32 rp_state_c
3732 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq; 3720 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
3733} 3721}
3734 3722
3735static void bdw_sw_calculate_freq(struct drm_device *dev,
3736 struct intel_rps_bdw_cal *c, u32 *cur_time, u32 *c0)
3737{
3738 struct drm_i915_private *dev_priv = dev->dev_private;
3739 u64 busy = 0;
3740 u32 busyness_pct = 0;
3741 u32 elapsed_time = 0;
3742 u16 new_freq = 0;
3743
3744 if (!c || !cur_time || !c0)
3745 return;
3746
3747 if (0 == c->last_c0)
3748 goto out;
3749
3750 /* Check Evaluation interval */
3751 elapsed_time = *cur_time - c->last_ts;
3752 if (elapsed_time < c->eval_interval)
3753 return;
3754
3755 mutex_lock(&dev_priv->rps.hw_lock);
3756
3757 /*
3758 * c0 unit in 32*1.28 usec, elapsed_time unit in 1 usec.
3759 * Whole busyness_pct calculation should be
3760 * busy = ((u64)(*c0 - c->last_c0) << 5 << 7) / 100;
3761 * busyness_pct = (u32)(busy * 100 / elapsed_time);
3762 * The final formula is to simplify CPU calculation
3763 */
3764 busy = (u64)(*c0 - c->last_c0) << 12;
3765 do_div(busy, elapsed_time);
3766 busyness_pct = (u32)busy;
3767
3768 if (c->is_up && busyness_pct >= c->it_threshold_pct)
3769 new_freq = (u16)dev_priv->rps.cur_freq + 3;
3770 if (!c->is_up && busyness_pct <= c->it_threshold_pct)
3771 new_freq = (u16)dev_priv->rps.cur_freq - 1;
3772
3773 /* Adjust to new frequency busyness and compare with threshold */
3774 if (0 != new_freq) {
3775 if (new_freq > dev_priv->rps.max_freq_softlimit)
3776 new_freq = dev_priv->rps.max_freq_softlimit;
3777 else if (new_freq < dev_priv->rps.min_freq_softlimit)
3778 new_freq = dev_priv->rps.min_freq_softlimit;
3779
3780 gen6_set_rps(dev, new_freq);
3781 }
3782
3783 mutex_unlock(&dev_priv->rps.hw_lock);
3784
3785out:
3786 c->last_c0 = *c0;
3787 c->last_ts = *cur_time;
3788}
3789
3790static void gen8_set_frequency_RP0(struct work_struct *work)
3791{
3792 struct intel_rps_bdw_turbo *p_bdw_turbo =
3793 container_of(work, struct intel_rps_bdw_turbo, work_max_freq);
3794 struct intel_gen6_power_mgmt *p_power_mgmt =
3795 container_of(p_bdw_turbo, struct intel_gen6_power_mgmt, sw_turbo);
3796 struct drm_i915_private *dev_priv =
3797 container_of(p_power_mgmt, struct drm_i915_private, rps);
3798
3799 mutex_lock(&dev_priv->rps.hw_lock);
3800 gen6_set_rps(dev_priv->dev, dev_priv->rps.rp0_freq);
3801 mutex_unlock(&dev_priv->rps.hw_lock);
3802}
3803
3804static void flip_active_timeout_handler(unsigned long var)
3805{
3806 struct drm_i915_private *dev_priv = (struct drm_i915_private *) var;
3807
3808 del_timer(&dev_priv->rps.sw_turbo.flip_timer);
3809 atomic_set(&dev_priv->rps.sw_turbo.flip_received, false);
3810
3811 queue_work(dev_priv->wq, &dev_priv->rps.sw_turbo.work_max_freq);
3812}
3813
3814void bdw_software_turbo(struct drm_device *dev)
3815{
3816 struct drm_i915_private *dev_priv = dev->dev_private;
3817
3818 u32 current_time = I915_READ(TIMESTAMP_CTR); /* unit in usec */
3819 u32 current_c0 = I915_READ(MCHBAR_PCU_C0); /* unit in 32*1.28 usec */
3820
3821 bdw_sw_calculate_freq(dev, &dev_priv->rps.sw_turbo.up,
3822 &current_time, &current_c0);
3823 bdw_sw_calculate_freq(dev, &dev_priv->rps.sw_turbo.down,
3824 &current_time, &current_c0);
3825}
3826
3827static void gen8_enable_rps(struct drm_device *dev) 3723static void gen8_enable_rps(struct drm_device *dev)
3828{ 3724{
3829 struct drm_i915_private *dev_priv = dev->dev_private; 3725 struct drm_i915_private *dev_priv = dev->dev_private;
3830 struct intel_engine_cs *ring; 3726 struct intel_engine_cs *ring;
3831 uint32_t rc6_mask = 0, rp_state_cap; 3727 uint32_t rc6_mask = 0, rp_state_cap;
3832 uint32_t threshold_up_pct, threshold_down_pct;
3833 uint32_t ei_up, ei_down; /* up and down evaluation interval */
3834 u32 rp_ctl_flag;
3835 int unused; 3728 int unused;
3836 3729
3837 /* Use software Turbo for BDW */
3838 dev_priv->rps.is_bdw_sw_turbo = IS_BROADWELL(dev);
3839
3840 /* 1a: Software RC state - RC0 */ 3730 /* 1a: Software RC state - RC0 */
3841 I915_WRITE(GEN6_RC_STATE, 0); 3731 I915_WRITE(GEN6_RC_STATE, 0);
3842 3732
@@ -3880,74 +3770,35 @@ static void gen8_enable_rps(struct drm_device *dev)
3880 HSW_FREQUENCY(dev_priv->rps.rp1_freq)); 3770 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
3881 I915_WRITE(GEN6_RC_VIDEO_FREQ, 3771 I915_WRITE(GEN6_RC_VIDEO_FREQ,
3882 HSW_FREQUENCY(dev_priv->rps.rp1_freq)); 3772 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
3883 ei_up = 84480; /* 84.48ms */ 3773 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
3884 ei_down = 448000; 3774 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
3885 threshold_up_pct = 90; /* x percent busy */
3886 threshold_down_pct = 70;
3887
3888 if (dev_priv->rps.is_bdw_sw_turbo) {
3889 dev_priv->rps.sw_turbo.up.it_threshold_pct = threshold_up_pct;
3890 dev_priv->rps.sw_turbo.up.eval_interval = ei_up;
3891 dev_priv->rps.sw_turbo.up.is_up = true;
3892 dev_priv->rps.sw_turbo.up.last_ts = 0;
3893 dev_priv->rps.sw_turbo.up.last_c0 = 0;
3894
3895 dev_priv->rps.sw_turbo.down.it_threshold_pct = threshold_down_pct;
3896 dev_priv->rps.sw_turbo.down.eval_interval = ei_down;
3897 dev_priv->rps.sw_turbo.down.is_up = false;
3898 dev_priv->rps.sw_turbo.down.last_ts = 0;
3899 dev_priv->rps.sw_turbo.down.last_c0 = 0;
3900
3901 /* Start the timer to track if flip comes*/
3902 dev_priv->rps.sw_turbo.timeout = 200*1000; /* in us */
3903
3904 init_timer(&dev_priv->rps.sw_turbo.flip_timer);
3905 dev_priv->rps.sw_turbo.flip_timer.function = flip_active_timeout_handler;
3906 dev_priv->rps.sw_turbo.flip_timer.data = (unsigned long) dev_priv;
3907 dev_priv->rps.sw_turbo.flip_timer.expires =
3908 usecs_to_jiffies(dev_priv->rps.sw_turbo.timeout) + jiffies;
3909 add_timer(&dev_priv->rps.sw_turbo.flip_timer);
3910 INIT_WORK(&dev_priv->rps.sw_turbo.work_max_freq, gen8_set_frequency_RP0);
3911
3912 atomic_set(&dev_priv->rps.sw_turbo.flip_received, true);
3913 } else {
3914 /* NB: Docs say 1s, and 1000000 - which aren't equivalent
3915 * 1 second timeout*/
3916 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, FREQ_1_28_US(1000000));
3917 3775
3918 /* Docs recommend 900MHz, and 300 MHz respectively */ 3776 /* Docs recommend 900MHz, and 300 MHz respectively */
3919 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, 3777 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
3920 dev_priv->rps.max_freq_softlimit << 24 | 3778 dev_priv->rps.max_freq_softlimit << 24 |
3921 dev_priv->rps.min_freq_softlimit << 16); 3779 dev_priv->rps.min_freq_softlimit << 16);
3922 3780
3923 I915_WRITE(GEN6_RP_UP_THRESHOLD, 3781 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
3924 FREQ_1_28_US(ei_up * threshold_up_pct / 100)); 3782 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
3925 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 3783 I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
3926 FREQ_1_28_US(ei_down * threshold_down_pct / 100)); 3784 I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
3927 I915_WRITE(GEN6_RP_UP_EI,
3928 FREQ_1_28_US(ei_up));
3929 I915_WRITE(GEN6_RP_DOWN_EI,
3930 FREQ_1_28_US(ei_down));
3931 3785
3932 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); 3786 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3933 }
3934 3787
3935 /* 5: Enable RPS */ 3788 /* 5: Enable RPS */
3936 rp_ctl_flag = GEN6_RP_MEDIA_TURBO | 3789 I915_WRITE(GEN6_RP_CONTROL,
3937 GEN6_RP_MEDIA_HW_NORMAL_MODE | 3790 GEN6_RP_MEDIA_TURBO |
3938 GEN6_RP_MEDIA_IS_GFX | 3791 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3939 GEN6_RP_UP_BUSY_AVG | 3792 GEN6_RP_MEDIA_IS_GFX |
3940 GEN6_RP_DOWN_IDLE_AVG; 3793 GEN6_RP_ENABLE |
3941 if (!dev_priv->rps.is_bdw_sw_turbo) 3794 GEN6_RP_UP_BUSY_AVG |
3942 rp_ctl_flag |= GEN6_RP_ENABLE; 3795 GEN6_RP_DOWN_IDLE_AVG);
3943 3796
3944 I915_WRITE(GEN6_RP_CONTROL, rp_ctl_flag); 3797 /* 6: Ring frequency + overclocking (our driver does this later */
3945 3798
3946 /* 6: Ring frequency + overclocking
3947 * (our driver does this later */
3948 gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8); 3799 gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8);
3949 if (!dev_priv->rps.is_bdw_sw_turbo) 3800
3950 gen8_enable_rps_interrupts(dev); 3801 gen8_enable_rps_interrupts(dev);
3951 3802
3952 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 3803 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
3953} 3804}
@@ -5421,8 +5272,6 @@ static void intel_gen6_powersave_work(struct work_struct *work)
5421 rps.delayed_resume_work.work); 5272 rps.delayed_resume_work.work);
5422 struct drm_device *dev = dev_priv->dev; 5273 struct drm_device *dev = dev_priv->dev;
5423 5274
5424 dev_priv->rps.is_bdw_sw_turbo = false;
5425
5426 mutex_lock(&dev_priv->rps.hw_lock); 5275 mutex_lock(&dev_priv->rps.hw_lock);
5427 5276
5428 if (IS_CHERRYVIEW(dev)) { 5277 if (IS_CHERRYVIEW(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index cc50bf65d35a..7c0c28c65cb4 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -707,7 +707,7 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
707 * update the number of dwords required based on the 707 * update the number of dwords required based on the
708 * actual number of workarounds applied 708 * actual number of workarounds applied
709 */ 709 */
710 ret = intel_ring_begin(ring, 24); 710 ret = intel_ring_begin(ring, 18);
711 if (ret) 711 if (ret)
712 return ret; 712 return ret;
713 713
@@ -722,19 +722,8 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
722 intel_ring_emit_wa(ring, GEN7_ROW_CHICKEN2, 722 intel_ring_emit_wa(ring, GEN7_ROW_CHICKEN2,
723 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 723 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
724 724
725 /*
726 * This GEN8_CENTROID_PIXEL_OPT_DIS W/A is only needed for
727 * pre-production hardware
728 */
729 intel_ring_emit_wa(ring, HALF_SLICE_CHICKEN3, 725 intel_ring_emit_wa(ring, HALF_SLICE_CHICKEN3,
730 _MASKED_BIT_ENABLE(GEN8_CENTROID_PIXEL_OPT_DIS 726 _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
731 | GEN8_SAMPLER_POWER_BYPASS_DIS));
732
733 intel_ring_emit_wa(ring, GEN7_HALF_SLICE_CHICKEN1,
734 _MASKED_BIT_ENABLE(GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE));
735
736 intel_ring_emit_wa(ring, COMMON_SLICE_CHICKEN2,
737 _MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE));
738 727
739 /* Use Force Non-Coherent whenever executing a 3D context. This is a 728 /* Use Force Non-Coherent whenever executing a 3D context. This is a
740 * workaround for for a possible hang in the unlikely event a TLB 729 * workaround for for a possible hang in the unlikely event a TLB