aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2014-07-19 02:43:41 -0400
committerDave Airlie <airlied@redhat.com>2014-07-19 02:43:41 -0400
commitc51f71679042a5f388d9580ffbede14c897f1e86 (patch)
tree517d9f80b2251c71698e3abd2a7cb977d74e56c0 /drivers/gpu/drm/i915
parentb957f457fbce30cc4901dc28f2b56f2b15dfe84a (diff)
parent7b3c29f6fd374640266d82aafe6929761469e27b (diff)
Merge tag 'drm-intel-next-2014-07-11' of git://anongit.freedesktop.org/drm-intel into drm-next
- fbc improvements when stolen memory is tight (Ben) - cdclk handling improvements for vlv/chv (Ville) - proper fix for stuck primary planes on gmch platforms with cxsr (Imre&Ebgert Eich) - gen8 hw semaphore support (Ben) - more execlist prep work from Oscar Mateo - locking fixes for primary planes (Matt Roper) - code rework to support runtime pm for dpms on hsw/bdw (Paulo, Imre & me), but not yet enabled because some fixes from Paulo haven't made the cut - more gpu boost tuning from Chris - as usual piles of little things all over * tag 'drm-intel-next-2014-07-11' of git://anongit.freedesktop.org/drm-intel: (93 commits) drm/i915: Make the RPS interrupt generation mask handle the vlv wa drm/i915: Move RPS evaluation interval counters to i915->rps drm/i915: Don't cast a pointer to void* unnecessarily drm/i915: don't read LVDS regs at compute_config time drm/i915: check the power domains in intel_lvds_get_hw_state() drm/i915: check the power domains in ironlake_get_pipe_config() drm/i915: don't skip shared DPLL assertion on LPT drm/i915: Only touch WRPLL hw state in enable/disable hooks drm/i915: Switch to common shared dpll framework for WRPLLs drm/i915: ->enable hook for WRPLLs drm/i915: ->disable hook for WRPLLs drm/i915: State readout support for WRPLLs drm/i915: add POWER_DOMAIN_PLLS drm/i915: Document that the pll->mode_set hook is optional drm/i915: Basic shared dpll support for WRPLLs drm/i915: Precompute static ddi_pll_sel values in encoders drm/i915: BDW also has special-purpose DP DDI clocks drm/i915: State readout and cross-checking for ddi_pll_sel drm/i915: Move ddi_pll_sel into the pipe config drm/i915: Add a debugfs file for the shared dpll state ...
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c301
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c8
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c13
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h78
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c142
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c300
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c83
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c79
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c335
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h39
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c32
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c350
-rw-r--r--drivers/gpu/drm/i915/intel_display.c475
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c59
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h25
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c2
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c54
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c18
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c333
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c420
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h90
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c2
23 files changed, 2000 insertions, 1246 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index a93b3bfdad61..4a5b0f80e059 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -176,7 +176,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
176 176
177static void describe_ctx(struct seq_file *m, struct intel_context *ctx) 177static void describe_ctx(struct seq_file *m, struct intel_context *ctx)
178{ 178{
179 seq_putc(m, ctx->is_initialized ? 'I' : 'i'); 179 seq_putc(m, ctx->legacy_hw_ctx.initialized ? 'I' : 'i');
180 seq_putc(m, ctx->remap_slice ? 'R' : 'r'); 180 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
181 seq_putc(m, ' '); 181 seq_putc(m, ' ');
182} 182}
@@ -994,29 +994,6 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
994 i915_next_seqno_get, i915_next_seqno_set, 994 i915_next_seqno_get, i915_next_seqno_set,
995 "0x%llx\n"); 995 "0x%llx\n");
996 996
997static int i915_rstdby_delays(struct seq_file *m, void *unused)
998{
999 struct drm_info_node *node = m->private;
1000 struct drm_device *dev = node->minor->dev;
1001 struct drm_i915_private *dev_priv = dev->dev_private;
1002 u16 crstanddelay;
1003 int ret;
1004
1005 ret = mutex_lock_interruptible(&dev->struct_mutex);
1006 if (ret)
1007 return ret;
1008 intel_runtime_pm_get(dev_priv);
1009
1010 crstanddelay = I915_READ16(CRSTANDVID);
1011
1012 intel_runtime_pm_put(dev_priv);
1013 mutex_unlock(&dev->struct_mutex);
1014
1015 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
1016
1017 return 0;
1018}
1019
1020static int i915_frequency_info(struct seq_file *m, void *unused) 997static int i915_frequency_info(struct seq_file *m, void *unused)
1021{ 998{
1022 struct drm_info_node *node = m->private; 999 struct drm_info_node *node = m->private;
@@ -1158,61 +1135,6 @@ out:
1158 return ret; 1135 return ret;
1159} 1136}
1160 1137
1161static int i915_delayfreq_table(struct seq_file *m, void *unused)
1162{
1163 struct drm_info_node *node = m->private;
1164 struct drm_device *dev = node->minor->dev;
1165 struct drm_i915_private *dev_priv = dev->dev_private;
1166 u32 delayfreq;
1167 int ret, i;
1168
1169 ret = mutex_lock_interruptible(&dev->struct_mutex);
1170 if (ret)
1171 return ret;
1172 intel_runtime_pm_get(dev_priv);
1173
1174 for (i = 0; i < 16; i++) {
1175 delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
1176 seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
1177 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
1178 }
1179
1180 intel_runtime_pm_put(dev_priv);
1181
1182 mutex_unlock(&dev->struct_mutex);
1183
1184 return 0;
1185}
1186
1187static inline int MAP_TO_MV(int map)
1188{
1189 return 1250 - (map * 25);
1190}
1191
1192static int i915_inttoext_table(struct seq_file *m, void *unused)
1193{
1194 struct drm_info_node *node = m->private;
1195 struct drm_device *dev = node->minor->dev;
1196 struct drm_i915_private *dev_priv = dev->dev_private;
1197 u32 inttoext;
1198 int ret, i;
1199
1200 ret = mutex_lock_interruptible(&dev->struct_mutex);
1201 if (ret)
1202 return ret;
1203 intel_runtime_pm_get(dev_priv);
1204
1205 for (i = 1; i <= 32; i++) {
1206 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
1207 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
1208 }
1209
1210 intel_runtime_pm_put(dev_priv);
1211 mutex_unlock(&dev->struct_mutex);
1212
1213 return 0;
1214}
1215
1216static int ironlake_drpc_info(struct seq_file *m) 1138static int ironlake_drpc_info(struct seq_file *m)
1217{ 1139{
1218 struct drm_info_node *node = m->private; 1140 struct drm_info_node *node = m->private;
@@ -1523,10 +1445,17 @@ static int i915_ips_status(struct seq_file *m, void *unused)
1523 1445
1524 intel_runtime_pm_get(dev_priv); 1446 intel_runtime_pm_get(dev_priv);
1525 1447
1526 if (IS_BROADWELL(dev) || I915_READ(IPS_CTL) & IPS_ENABLE) 1448 seq_printf(m, "Enabled by kernel parameter: %s\n",
1527 seq_puts(m, "enabled\n"); 1449 yesno(i915.enable_ips));
1528 else 1450
1529 seq_puts(m, "disabled\n"); 1451 if (INTEL_INFO(dev)->gen >= 8) {
1452 seq_puts(m, "Currently: unknown\n");
1453 } else {
1454 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1455 seq_puts(m, "Currently: enabled\n");
1456 else
1457 seq_puts(m, "Currently: disabled\n");
1458 }
1530 1459
1531 intel_runtime_pm_put(dev_priv); 1460 intel_runtime_pm_put(dev_priv);
1532 1461
@@ -1630,26 +1559,6 @@ out:
1630 return ret; 1559 return ret;
1631} 1560}
1632 1561
1633static int i915_gfxec(struct seq_file *m, void *unused)
1634{
1635 struct drm_info_node *node = m->private;
1636 struct drm_device *dev = node->minor->dev;
1637 struct drm_i915_private *dev_priv = dev->dev_private;
1638 int ret;
1639
1640 ret = mutex_lock_interruptible(&dev->struct_mutex);
1641 if (ret)
1642 return ret;
1643 intel_runtime_pm_get(dev_priv);
1644
1645 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
1646 intel_runtime_pm_put(dev_priv);
1647
1648 mutex_unlock(&dev->struct_mutex);
1649
1650 return 0;
1651}
1652
1653static int i915_opregion(struct seq_file *m, void *unused) 1562static int i915_opregion(struct seq_file *m, void *unused)
1654{ 1563{
1655 struct drm_info_node *node = m->private; 1564 struct drm_info_node *node = m->private;
@@ -1746,7 +1655,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
1746 } 1655 }
1747 1656
1748 list_for_each_entry(ctx, &dev_priv->context_list, link) { 1657 list_for_each_entry(ctx, &dev_priv->context_list, link) {
1749 if (ctx->obj == NULL) 1658 if (ctx->legacy_hw_ctx.rcs_state == NULL)
1750 continue; 1659 continue;
1751 1660
1752 seq_puts(m, "HW context "); 1661 seq_puts(m, "HW context ");
@@ -1755,7 +1664,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
1755 if (ring->default_context == ctx) 1664 if (ring->default_context == ctx)
1756 seq_printf(m, "(default context %s) ", ring->name); 1665 seq_printf(m, "(default context %s) ", ring->name);
1757 1666
1758 describe_obj(m, ctx->obj); 1667 describe_obj(m, ctx->legacy_hw_ctx.rcs_state);
1759 seq_putc(m, '\n'); 1668 seq_putc(m, '\n');
1760 } 1669 }
1761 1670
@@ -1869,7 +1778,7 @@ static int per_file_ctx(int id, void *ptr, void *data)
1869 if (i915_gem_context_is_default(ctx)) 1778 if (i915_gem_context_is_default(ctx))
1870 seq_puts(m, " default context:\n"); 1779 seq_puts(m, " default context:\n");
1871 else 1780 else
1872 seq_printf(m, " context %d:\n", ctx->id); 1781 seq_printf(m, " context %d:\n", ctx->user_handle);
1873 ppgtt->debug_dump(ppgtt, m); 1782 ppgtt->debug_dump(ppgtt, m);
1874 1783
1875 return 0; 1784 return 0;
@@ -2134,6 +2043,8 @@ static const char *power_domain_str(enum intel_display_power_domain domain)
2134 return "VGA"; 2043 return "VGA";
2135 case POWER_DOMAIN_AUDIO: 2044 case POWER_DOMAIN_AUDIO:
2136 return "AUDIO"; 2045 return "AUDIO";
2046 case POWER_DOMAIN_PLLS:
2047 return "PLLS";
2137 case POWER_DOMAIN_INIT: 2048 case POWER_DOMAIN_INIT:
2138 return "INIT"; 2049 return "INIT";
2139 default: 2050 default:
@@ -2358,17 +2269,17 @@ static int i915_display_info(struct seq_file *m, void *unused)
2358 bool active; 2269 bool active;
2359 int x, y; 2270 int x, y;
2360 2271
2361 seq_printf(m, "CRTC %d: pipe: %c, active: %s\n", 2272 seq_printf(m, "CRTC %d: pipe: %c, active=%s (size=%dx%d)\n",
2362 crtc->base.base.id, pipe_name(crtc->pipe), 2273 crtc->base.base.id, pipe_name(crtc->pipe),
2363 yesno(crtc->active)); 2274 yesno(crtc->active), crtc->config.pipe_src_w, crtc->config.pipe_src_h);
2364 if (crtc->active) { 2275 if (crtc->active) {
2365 intel_crtc_info(m, crtc); 2276 intel_crtc_info(m, crtc);
2366 2277
2367 active = cursor_position(dev, crtc->pipe, &x, &y); 2278 active = cursor_position(dev, crtc->pipe, &x, &y);
2368 seq_printf(m, "\tcursor visible? %s, position (%d, %d), addr 0x%08x, active? %s\n", 2279 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n",
2369 yesno(crtc->cursor_base), 2280 yesno(crtc->cursor_base),
2370 x, y, crtc->cursor_addr, 2281 x, y, crtc->cursor_width, crtc->cursor_height,
2371 yesno(active)); 2282 crtc->cursor_addr, yesno(active));
2372 } 2283 }
2373 2284
2374 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n", 2285 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
@@ -2388,6 +2299,104 @@ static int i915_display_info(struct seq_file *m, void *unused)
2388 return 0; 2299 return 0;
2389} 2300}
2390 2301
2302static int i915_semaphore_status(struct seq_file *m, void *unused)
2303{
2304 struct drm_info_node *node = (struct drm_info_node *) m->private;
2305 struct drm_device *dev = node->minor->dev;
2306 struct drm_i915_private *dev_priv = dev->dev_private;
2307 struct intel_engine_cs *ring;
2308 int num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
2309 int i, j, ret;
2310
2311 if (!i915_semaphore_is_enabled(dev)) {
2312 seq_puts(m, "Semaphores are disabled\n");
2313 return 0;
2314 }
2315
2316 ret = mutex_lock_interruptible(&dev->struct_mutex);
2317 if (ret)
2318 return ret;
2319 intel_runtime_pm_get(dev_priv);
2320
2321 if (IS_BROADWELL(dev)) {
2322 struct page *page;
2323 uint64_t *seqno;
2324
2325 page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0);
2326
2327 seqno = (uint64_t *)kmap_atomic(page);
2328 for_each_ring(ring, dev_priv, i) {
2329 uint64_t offset;
2330
2331 seq_printf(m, "%s\n", ring->name);
2332
2333 seq_puts(m, " Last signal:");
2334 for (j = 0; j < num_rings; j++) {
2335 offset = i * I915_NUM_RINGS + j;
2336 seq_printf(m, "0x%08llx (0x%02llx) ",
2337 seqno[offset], offset * 8);
2338 }
2339 seq_putc(m, '\n');
2340
2341 seq_puts(m, " Last wait: ");
2342 for (j = 0; j < num_rings; j++) {
2343 offset = i + (j * I915_NUM_RINGS);
2344 seq_printf(m, "0x%08llx (0x%02llx) ",
2345 seqno[offset], offset * 8);
2346 }
2347 seq_putc(m, '\n');
2348
2349 }
2350 kunmap_atomic(seqno);
2351 } else {
2352 seq_puts(m, " Last signal:");
2353 for_each_ring(ring, dev_priv, i)
2354 for (j = 0; j < num_rings; j++)
2355 seq_printf(m, "0x%08x\n",
2356 I915_READ(ring->semaphore.mbox.signal[j]));
2357 seq_putc(m, '\n');
2358 }
2359
2360 seq_puts(m, "\nSync seqno:\n");
2361 for_each_ring(ring, dev_priv, i) {
2362 for (j = 0; j < num_rings; j++) {
2363 seq_printf(m, " 0x%08x ", ring->semaphore.sync_seqno[j]);
2364 }
2365 seq_putc(m, '\n');
2366 }
2367 seq_putc(m, '\n');
2368
2369 intel_runtime_pm_put(dev_priv);
2370 mutex_unlock(&dev->struct_mutex);
2371 return 0;
2372}
2373
2374static int i915_shared_dplls_info(struct seq_file *m, void *unused)
2375{
2376 struct drm_info_node *node = (struct drm_info_node *) m->private;
2377 struct drm_device *dev = node->minor->dev;
2378 struct drm_i915_private *dev_priv = dev->dev_private;
2379 int i;
2380
2381 drm_modeset_lock_all(dev);
2382 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
2383 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
2384
2385 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id);
2386 seq_printf(m, " refcount: %i, active: %i, on: %s\n", pll->refcount,
2387 pll->active, yesno(pll->on));
2388 seq_printf(m, " tracked hardware state:\n");
2389 seq_printf(m, " dpll: 0x%08x\n", pll->hw_state.dpll);
2390 seq_printf(m, " dpll_md: 0x%08x\n", pll->hw_state.dpll_md);
2391 seq_printf(m, " fp0: 0x%08x\n", pll->hw_state.fp0);
2392 seq_printf(m, " fp1: 0x%08x\n", pll->hw_state.fp1);
2393 seq_printf(m, " wrpll: 0x%08x\n", pll->hw_state.wrpll);
2394 }
2395 drm_modeset_unlock_all(dev);
2396
2397 return 0;
2398}
2399
2391struct pipe_crc_info { 2400struct pipe_crc_info {
2392 const char *name; 2401 const char *name;
2393 struct drm_device *dev; 2402 struct drm_device *dev;
@@ -2860,7 +2869,60 @@ static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
2860 return 0; 2869 return 0;
2861} 2870}
2862 2871
2863static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, 2872static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
2873{
2874 struct drm_i915_private *dev_priv = dev->dev_private;
2875 struct intel_crtc *crtc =
2876 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
2877
2878 drm_modeset_lock_all(dev);
2879 /*
2880 * If we use the eDP transcoder we need to make sure that we don't
2881 * bypass the pfit, since otherwise the pipe CRC source won't work. Only
2882 * relevant on hsw with pipe A when using the always-on power well
2883 * routing.
2884 */
2885 if (crtc->config.cpu_transcoder == TRANSCODER_EDP &&
2886 !crtc->config.pch_pfit.enabled) {
2887 crtc->config.pch_pfit.force_thru = true;
2888
2889 intel_display_power_get(dev_priv,
2890 POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
2891
2892 dev_priv->display.crtc_disable(&crtc->base);
2893 dev_priv->display.crtc_enable(&crtc->base);
2894 }
2895 drm_modeset_unlock_all(dev);
2896}
2897
2898static void hsw_undo_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
2899{
2900 struct drm_i915_private *dev_priv = dev->dev_private;
2901 struct intel_crtc *crtc =
2902 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
2903
2904 drm_modeset_lock_all(dev);
2905 /*
2906 * If we use the eDP transcoder we need to make sure that we don't
2907 * bypass the pfit, since otherwise the pipe CRC source won't work. Only
2908 * relevant on hsw with pipe A when using the always-on power well
2909 * routing.
2910 */
2911 if (crtc->config.pch_pfit.force_thru) {
2912 crtc->config.pch_pfit.force_thru = false;
2913
2914 dev_priv->display.crtc_disable(&crtc->base);
2915 dev_priv->display.crtc_enable(&crtc->base);
2916
2917 intel_display_power_put(dev_priv,
2918 POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
2919 }
2920 drm_modeset_unlock_all(dev);
2921}
2922
2923static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
2924 enum pipe pipe,
2925 enum intel_pipe_crc_source *source,
2864 uint32_t *val) 2926 uint32_t *val)
2865{ 2927{
2866 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) 2928 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
@@ -2874,6 +2936,9 @@ static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
2874 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB; 2936 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
2875 break; 2937 break;
2876 case INTEL_PIPE_CRC_SOURCE_PF: 2938 case INTEL_PIPE_CRC_SOURCE_PF:
2939 if (IS_HASWELL(dev) && pipe == PIPE_A)
2940 hsw_trans_edp_pipe_A_crc_wa(dev);
2941
2877 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB; 2942 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
2878 break; 2943 break;
2879 case INTEL_PIPE_CRC_SOURCE_NONE: 2944 case INTEL_PIPE_CRC_SOURCE_NONE:
@@ -2906,11 +2971,11 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
2906 else if (INTEL_INFO(dev)->gen < 5) 2971 else if (INTEL_INFO(dev)->gen < 5)
2907 ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val); 2972 ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val);
2908 else if (IS_VALLEYVIEW(dev)) 2973 else if (IS_VALLEYVIEW(dev))
2909 ret = vlv_pipe_crc_ctl_reg(dev,pipe, &source, &val); 2974 ret = vlv_pipe_crc_ctl_reg(dev, pipe, &source, &val);
2910 else if (IS_GEN5(dev) || IS_GEN6(dev)) 2975 else if (IS_GEN5(dev) || IS_GEN6(dev))
2911 ret = ilk_pipe_crc_ctl_reg(&source, &val); 2976 ret = ilk_pipe_crc_ctl_reg(&source, &val);
2912 else 2977 else
2913 ret = ivb_pipe_crc_ctl_reg(&source, &val); 2978 ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val);
2914 2979
2915 if (ret != 0) 2980 if (ret != 0)
2916 return ret; 2981 return ret;
@@ -2962,6 +3027,8 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
2962 g4x_undo_pipe_scramble_reset(dev, pipe); 3027 g4x_undo_pipe_scramble_reset(dev, pipe);
2963 else if (IS_VALLEYVIEW(dev)) 3028 else if (IS_VALLEYVIEW(dev))
2964 vlv_undo_pipe_scramble_reset(dev, pipe); 3029 vlv_undo_pipe_scramble_reset(dev, pipe);
3030 else if (IS_HASWELL(dev) && pipe == PIPE_A)
3031 hsw_undo_trans_edp_pipe_A_crc_wa(dev);
2965 } 3032 }
2966 3033
2967 return 0; 3034 return 0;
@@ -3815,14 +3882,10 @@ static const struct drm_info_list i915_debugfs_list[] = {
3815 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS}, 3882 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
3816 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS}, 3883 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
3817 {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS}, 3884 {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
3818 {"i915_rstdby_delays", i915_rstdby_delays, 0},
3819 {"i915_frequency_info", i915_frequency_info, 0}, 3885 {"i915_frequency_info", i915_frequency_info, 0},
3820 {"i915_delayfreq_table", i915_delayfreq_table, 0},
3821 {"i915_inttoext_table", i915_inttoext_table, 0},
3822 {"i915_drpc_info", i915_drpc_info, 0}, 3886 {"i915_drpc_info", i915_drpc_info, 0},
3823 {"i915_emon_status", i915_emon_status, 0}, 3887 {"i915_emon_status", i915_emon_status, 0},
3824 {"i915_ring_freq_table", i915_ring_freq_table, 0}, 3888 {"i915_ring_freq_table", i915_ring_freq_table, 0},
3825 {"i915_gfxec", i915_gfxec, 0},
3826 {"i915_fbc_status", i915_fbc_status, 0}, 3889 {"i915_fbc_status", i915_fbc_status, 0},
3827 {"i915_ips_status", i915_ips_status, 0}, 3890 {"i915_ips_status", i915_ips_status, 0},
3828 {"i915_sr_status", i915_sr_status, 0}, 3891 {"i915_sr_status", i915_sr_status, 0},
@@ -3839,6 +3902,8 @@ static const struct drm_info_list i915_debugfs_list[] = {
3839 {"i915_pc8_status", i915_pc8_status, 0}, 3902 {"i915_pc8_status", i915_pc8_status, 0},
3840 {"i915_power_domain_info", i915_power_domain_info, 0}, 3903 {"i915_power_domain_info", i915_power_domain_info, 0},
3841 {"i915_display_info", i915_display_info, 0}, 3904 {"i915_display_info", i915_display_info, 0},
3905 {"i915_semaphore_status", i915_semaphore_status, 0},
3906 {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
3842}; 3907};
3843#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 3908#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
3844 3909
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 84b55665bd87..6df6506db919 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1593,7 +1593,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1593 if (dev_priv == NULL) 1593 if (dev_priv == NULL)
1594 return -ENOMEM; 1594 return -ENOMEM;
1595 1595
1596 dev->dev_private = (void *)dev_priv; 1596 dev->dev_private = dev_priv;
1597 dev_priv->dev = dev; 1597 dev_priv->dev = dev;
1598 1598
1599 /* copy initial configuration to dev_priv->info */ 1599 /* copy initial configuration to dev_priv->info */
@@ -1954,11 +1954,11 @@ void i915_driver_lastclose(struct drm_device *dev)
1954 i915_dma_cleanup(dev); 1954 i915_dma_cleanup(dev);
1955} 1955}
1956 1956
1957void i915_driver_preclose(struct drm_device *dev, struct drm_file *file_priv) 1957void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
1958{ 1958{
1959 mutex_lock(&dev->struct_mutex); 1959 mutex_lock(&dev->struct_mutex);
1960 i915_gem_context_close(dev, file_priv); 1960 i915_gem_context_close(dev, file);
1961 i915_gem_release(dev, file_priv); 1961 i915_gem_release(dev, file);
1962 mutex_unlock(&dev->struct_mutex); 1962 mutex_unlock(&dev->struct_mutex);
1963} 1963}
1964 1964
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index b0955fffca98..83cb43a24768 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -477,10 +477,6 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
477 if (i915.semaphores >= 0) 477 if (i915.semaphores >= 0)
478 return i915.semaphores; 478 return i915.semaphores;
479 479
480 /* Until we get further testing... */
481 if (IS_GEN8(dev))
482 return false;
483
484#ifdef CONFIG_INTEL_IOMMU 480#ifdef CONFIG_INTEL_IOMMU
485 /* Enable semaphores on SNB when IO remapping is off */ 481 /* Enable semaphores on SNB when IO remapping is off */
486 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) 482 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
@@ -520,6 +516,8 @@ static int i915_drm_freeze(struct drm_device *dev)
520 return error; 516 return error;
521 } 517 }
522 518
519 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
520
523 intel_runtime_pm_disable_interrupts(dev); 521 intel_runtime_pm_disable_interrupts(dev);
524 522
525 intel_suspend_gt_powersave(dev); 523 intel_suspend_gt_powersave(dev);
@@ -541,10 +539,11 @@ static int i915_drm_freeze(struct drm_device *dev)
541 539
542 i915_save_state(dev); 540 i915_save_state(dev);
543 541
544 if (acpi_target_system_state() >= ACPI_STATE_S3) 542 opregion_target_state = PCI_D3cold;
545 opregion_target_state = PCI_D3cold; 543#if IS_ENABLED(CONFIG_ACPI_SLEEP)
546 else 544 if (acpi_target_system_state() < ACPI_STATE_S3)
547 opregion_target_state = PCI_D1; 545 opregion_target_state = PCI_D1;
546#endif
548 intel_opregion_notify_adapter(dev, opregion_target_state); 547 intel_opregion_notify_adapter(dev, opregion_target_state);
549 548
550 intel_uncore_forcewake_reset(dev, false); 549 intel_uncore_forcewake_reset(dev, false);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 6a1e990cb482..2dc3a922a3c8 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -129,6 +129,7 @@ enum intel_display_power_domain {
129 POWER_DOMAIN_PORT_OTHER, 129 POWER_DOMAIN_PORT_OTHER,
130 POWER_DOMAIN_VGA, 130 POWER_DOMAIN_VGA,
131 POWER_DOMAIN_AUDIO, 131 POWER_DOMAIN_AUDIO,
132 POWER_DOMAIN_PLLS,
132 POWER_DOMAIN_INIT, 133 POWER_DOMAIN_INIT,
133 134
134 POWER_DOMAIN_NUM, 135 POWER_DOMAIN_NUM,
@@ -184,8 +185,10 @@ struct i915_mmu_object;
184enum intel_dpll_id { 185enum intel_dpll_id {
185 DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */ 186 DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
186 /* real shared dpll ids must be >= 0 */ 187 /* real shared dpll ids must be >= 0 */
187 DPLL_ID_PCH_PLL_A, 188 DPLL_ID_PCH_PLL_A = 0,
188 DPLL_ID_PCH_PLL_B, 189 DPLL_ID_PCH_PLL_B = 1,
190 DPLL_ID_WRPLL1 = 0,
191 DPLL_ID_WRPLL2 = 1,
189}; 192};
190#define I915_NUM_PLLS 2 193#define I915_NUM_PLLS 2
191 194
@@ -194,6 +197,7 @@ struct intel_dpll_hw_state {
194 uint32_t dpll_md; 197 uint32_t dpll_md;
195 uint32_t fp0; 198 uint32_t fp0;
196 uint32_t fp1; 199 uint32_t fp1;
200 uint32_t wrpll;
197}; 201};
198 202
199struct intel_shared_dpll { 203struct intel_shared_dpll {
@@ -204,6 +208,8 @@ struct intel_shared_dpll {
204 /* should match the index in the dev_priv->shared_dplls array */ 208 /* should match the index in the dev_priv->shared_dplls array */
205 enum intel_dpll_id id; 209 enum intel_dpll_id id;
206 struct intel_dpll_hw_state hw_state; 210 struct intel_dpll_hw_state hw_state;
211 /* The mode_set hook is optional and should be used together with the
212 * intel_prepare_shared_dpll function. */
207 void (*mode_set)(struct drm_i915_private *dev_priv, 213 void (*mode_set)(struct drm_i915_private *dev_priv,
208 struct intel_shared_dpll *pll); 214 struct intel_shared_dpll *pll);
209 void (*enable)(struct drm_i915_private *dev_priv, 215 void (*enable)(struct drm_i915_private *dev_priv,
@@ -228,12 +234,6 @@ void intel_link_compute_m_n(int bpp, int nlanes,
228 int pixel_clock, int link_clock, 234 int pixel_clock, int link_clock,
229 struct intel_link_m_n *m_n); 235 struct intel_link_m_n *m_n);
230 236
231struct intel_ddi_plls {
232 int spll_refcount;
233 int wrpll1_refcount;
234 int wrpll2_refcount;
235};
236
237/* Interface history: 237/* Interface history:
238 * 238 *
239 * 1.1: Original. 239 * 1.1: Original.
@@ -324,6 +324,7 @@ struct drm_i915_error_state {
324 u64 fence[I915_MAX_NUM_FENCES]; 324 u64 fence[I915_MAX_NUM_FENCES];
325 struct intel_overlay_error_state *overlay; 325 struct intel_overlay_error_state *overlay;
326 struct intel_display_error_state *display; 326 struct intel_display_error_state *display;
327 struct drm_i915_error_object *semaphore_obj;
327 328
328 struct drm_i915_error_ring { 329 struct drm_i915_error_ring {
329 bool valid; 330 bool valid;
@@ -584,27 +585,48 @@ struct i915_ctx_hang_stats {
584}; 585};
585 586
586/* This must match up with the value previously used for execbuf2.rsvd1. */ 587/* This must match up with the value previously used for execbuf2.rsvd1. */
587#define DEFAULT_CONTEXT_ID 0 588#define DEFAULT_CONTEXT_HANDLE 0
589/**
590 * struct intel_context - as the name implies, represents a context.
591 * @ref: reference count.
592 * @user_handle: userspace tracking identity for this context.
593 * @remap_slice: l3 row remapping information.
594 * @file_priv: filp associated with this context (NULL for global default
595 * context).
596 * @hang_stats: information about the role of this context in possible GPU
597 * hangs.
598 * @vm: virtual memory space used by this context.
599 * @legacy_hw_ctx: render context backing object and whether it is correctly
600 * initialized (legacy ring submission mechanism only).
601 * @link: link in the global list of contexts.
602 *
603 * Contexts are memory images used by the hardware to store copies of their
604 * internal state.
605 */
588struct intel_context { 606struct intel_context {
589 struct kref ref; 607 struct kref ref;
590 int id; 608 int user_handle;
591 bool is_initialized;
592 uint8_t remap_slice; 609 uint8_t remap_slice;
593 struct drm_i915_file_private *file_priv; 610 struct drm_i915_file_private *file_priv;
594 struct drm_i915_gem_object *obj;
595 struct i915_ctx_hang_stats hang_stats; 611 struct i915_ctx_hang_stats hang_stats;
596 struct i915_address_space *vm; 612 struct i915_address_space *vm;
597 613
614 struct {
615 struct drm_i915_gem_object *rcs_state;
616 bool initialized;
617 } legacy_hw_ctx;
618
598 struct list_head link; 619 struct list_head link;
599}; 620};
600 621
601struct i915_fbc { 622struct i915_fbc {
602 unsigned long size; 623 unsigned long size;
624 unsigned threshold;
603 unsigned int fb_id; 625 unsigned int fb_id;
604 enum plane plane; 626 enum plane plane;
605 int y; 627 int y;
606 628
607 struct drm_mm_node *compressed_fb; 629 struct drm_mm_node compressed_fb;
608 struct drm_mm_node *compressed_llb; 630 struct drm_mm_node *compressed_llb;
609 631
610 struct intel_fbc_work { 632 struct intel_fbc_work {
@@ -880,6 +902,12 @@ struct vlv_s0ix_state {
880 u32 clock_gate_dis2; 902 u32 clock_gate_dis2;
881}; 903};
882 904
905struct intel_rps_ei {
906 u32 cz_clock;
907 u32 render_c0;
908 u32 media_c0;
909};
910
883struct intel_gen6_power_mgmt { 911struct intel_gen6_power_mgmt {
884 /* work and pm_iir are protected by dev_priv->irq_lock */ 912 /* work and pm_iir are protected by dev_priv->irq_lock */
885 struct work_struct work; 913 struct work_struct work;
@@ -904,12 +932,17 @@ struct intel_gen6_power_mgmt {
904 u8 rp1_freq; /* "less than" RP0 power/freqency */ 932 u8 rp1_freq; /* "less than" RP0 power/freqency */
905 u8 rp0_freq; /* Non-overclocked max frequency. */ 933 u8 rp0_freq; /* Non-overclocked max frequency. */
906 934
935 u32 ei_interrupt_count;
936
907 int last_adj; 937 int last_adj;
908 enum { LOW_POWER, BETWEEN, HIGH_POWER } power; 938 enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
909 939
910 bool enabled; 940 bool enabled;
911 struct delayed_work delayed_resume_work; 941 struct delayed_work delayed_resume_work;
912 942
943 /* manual wa residency calculations */
944 struct intel_rps_ei up_ei, down_ei;
945
913 /* 946 /*
914 * Protects RPS/RC6 register access and PCU communication. 947 * Protects RPS/RC6 register access and PCU communication.
915 * Must be taken after struct_mutex if nested. 948 * Must be taken after struct_mutex if nested.
@@ -1374,6 +1407,7 @@ struct drm_i915_private {
1374 1407
1375 struct pci_dev *bridge_dev; 1408 struct pci_dev *bridge_dev;
1376 struct intel_engine_cs ring[I915_NUM_RINGS]; 1409 struct intel_engine_cs ring[I915_NUM_RINGS];
1410 struct drm_i915_gem_object *semaphore_obj;
1377 uint32_t last_seqno, next_seqno; 1411 uint32_t last_seqno, next_seqno;
1378 1412
1379 drm_dma_handle_t *status_page_dmah; 1413 drm_dma_handle_t *status_page_dmah;
@@ -1480,7 +1514,6 @@ struct drm_i915_private {
1480 1514
1481 int num_shared_dpll; 1515 int num_shared_dpll;
1482 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; 1516 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
1483 struct intel_ddi_plls ddi_plls;
1484 int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; 1517 int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
1485 1518
1486 /* Reclocking support */ 1519 /* Reclocking support */
@@ -1557,6 +1590,11 @@ struct drm_i915_private {
1557 1590
1558 struct i915_runtime_pm pm; 1591 struct i915_runtime_pm pm;
1559 1592
1593 struct intel_digital_port *hpd_irq_port[I915_MAX_PORTS];
1594 u32 long_hpd_port_mask;
1595 u32 short_hpd_port_mask;
1596 struct work_struct dig_port_work;
1597
1560 /* Old dri1 support infrastructure, beware the dragons ya fools entering 1598 /* Old dri1 support infrastructure, beware the dragons ya fools entering
1561 * here! */ 1599 * here! */
1562 struct i915_dri1_state dri1; 1600 struct i915_dri1_state dri1;
@@ -2097,12 +2135,12 @@ void i915_update_dri1_breadcrumb(struct drm_device *dev);
2097extern void i915_kernel_lost_context(struct drm_device * dev); 2135extern void i915_kernel_lost_context(struct drm_device * dev);
2098extern int i915_driver_load(struct drm_device *, unsigned long flags); 2136extern int i915_driver_load(struct drm_device *, unsigned long flags);
2099extern int i915_driver_unload(struct drm_device *); 2137extern int i915_driver_unload(struct drm_device *);
2100extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv); 2138extern int i915_driver_open(struct drm_device *dev, struct drm_file *file);
2101extern void i915_driver_lastclose(struct drm_device * dev); 2139extern void i915_driver_lastclose(struct drm_device * dev);
2102extern void i915_driver_preclose(struct drm_device *dev, 2140extern void i915_driver_preclose(struct drm_device *dev,
2103 struct drm_file *file_priv); 2141 struct drm_file *file);
2104extern void i915_driver_postclose(struct drm_device *dev, 2142extern void i915_driver_postclose(struct drm_device *dev,
2105 struct drm_file *file_priv); 2143 struct drm_file *file);
2106extern int i915_driver_device_is_agp(struct drm_device * dev); 2144extern int i915_driver_device_is_agp(struct drm_device * dev);
2107#ifdef CONFIG_COMPAT 2145#ifdef CONFIG_COMPAT
2108extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 2146extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
@@ -2457,7 +2495,7 @@ static inline void i915_gem_context_unreference(struct intel_context *ctx)
2457 2495
2458static inline bool i915_gem_context_is_default(const struct intel_context *c) 2496static inline bool i915_gem_context_is_default(const struct intel_context *c)
2459{ 2497{
2460 return c->id == DEFAULT_CONTEXT_ID; 2498 return c->user_handle == DEFAULT_CONTEXT_HANDLE;
2461} 2499}
2462 2500
2463int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 2501int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
@@ -2488,7 +2526,7 @@ static inline void i915_gem_chipset_flush(struct drm_device *dev)
2488 2526
2489/* i915_gem_stolen.c */ 2527/* i915_gem_stolen.c */
2490int i915_gem_init_stolen(struct drm_device *dev); 2528int i915_gem_init_stolen(struct drm_device *dev);
2491int i915_gem_stolen_setup_compression(struct drm_device *dev, int size); 2529int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp);
2492void i915_gem_stolen_cleanup_compression(struct drm_device *dev); 2530void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
2493void i915_gem_cleanup_stolen(struct drm_device *dev); 2531void i915_gem_cleanup_stolen(struct drm_device *dev);
2494struct drm_i915_gem_object * 2532struct drm_i915_gem_object *
@@ -2647,6 +2685,8 @@ extern void gen6_set_rps(struct drm_device *dev, u8 val);
2647extern void valleyview_set_rps(struct drm_device *dev, u8 val); 2685extern void valleyview_set_rps(struct drm_device *dev, u8 val);
2648extern int valleyview_rps_max_freq(struct drm_i915_private *dev_priv); 2686extern int valleyview_rps_max_freq(struct drm_i915_private *dev_priv);
2649extern int valleyview_rps_min_freq(struct drm_i915_private *dev_priv); 2687extern int valleyview_rps_min_freq(struct drm_i915_private *dev_priv);
2688extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
2689 bool enable);
2650extern void intel_detect_pch(struct drm_device *dev); 2690extern void intel_detect_pch(struct drm_device *dev);
2651extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); 2691extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
2652extern int intel_enable_rc6(const struct drm_device *dev); 2692extern int intel_enable_rc6(const struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index f6d123828926..e5d4d73a9844 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1168,7 +1168,7 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
1168 1168
1169 timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0; 1169 timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0;
1170 1170
1171 if (INTEL_INFO(dev)->gen >= 6 && can_wait_boost(file_priv)) { 1171 if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) {
1172 gen6_rps_boost(dev_priv); 1172 gen6_rps_boost(dev_priv);
1173 if (file_priv) 1173 if (file_priv)
1174 mod_delayed_work(dev_priv->wq, 1174 mod_delayed_work(dev_priv->wq,
@@ -2330,7 +2330,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
2330 u32 request_ring_position, request_start; 2330 u32 request_ring_position, request_start;
2331 int ret; 2331 int ret;
2332 2332
2333 request_start = intel_ring_get_tail(ring); 2333 request_start = intel_ring_get_tail(ring->buffer);
2334 /* 2334 /*
2335 * Emit any outstanding flushes - execbuf can fail to emit the flush 2335 * Emit any outstanding flushes - execbuf can fail to emit the flush
2336 * after having emitted the batchbuffer command. Hence we need to fix 2336 * after having emitted the batchbuffer command. Hence we need to fix
@@ -2351,7 +2351,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
2351 * GPU processing the request, we never over-estimate the 2351 * GPU processing the request, we never over-estimate the
2352 * position of the head. 2352 * position of the head.
2353 */ 2353 */
2354 request_ring_position = intel_ring_get_tail(ring); 2354 request_ring_position = intel_ring_get_tail(ring->buffer);
2355 2355
2356 ret = ring->add_request(ring); 2356 ret = ring->add_request(ring);
2357 if (ret) 2357 if (ret)
@@ -2842,6 +2842,8 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
2842 idx = intel_ring_sync_index(from, to); 2842 idx = intel_ring_sync_index(from, to);
2843 2843
2844 seqno = obj->last_read_seqno; 2844 seqno = obj->last_read_seqno;
2845 /* Optimization: Avoid semaphore sync when we are sure we already
2846 * waited for an object with higher seqno */
2845 if (seqno <= from->semaphore.sync_seqno[idx]) 2847 if (seqno <= from->semaphore.sync_seqno[idx])
2846 return 0; 2848 return 0;
2847 2849
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 0d2c75bde96e..de72a2859f32 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -182,14 +182,14 @@ void i915_gem_context_free(struct kref *ctx_ref)
182 typeof(*ctx), ref); 182 typeof(*ctx), ref);
183 struct i915_hw_ppgtt *ppgtt = NULL; 183 struct i915_hw_ppgtt *ppgtt = NULL;
184 184
185 if (ctx->obj) { 185 if (ctx->legacy_hw_ctx.rcs_state) {
186 /* We refcount even the aliasing PPGTT to keep the code symmetric */ 186 /* We refcount even the aliasing PPGTT to keep the code symmetric */
187 if (USES_PPGTT(ctx->obj->base.dev)) 187 if (USES_PPGTT(ctx->legacy_hw_ctx.rcs_state->base.dev))
188 ppgtt = ctx_to_ppgtt(ctx); 188 ppgtt = ctx_to_ppgtt(ctx);
189 189
190 /* XXX: Free up the object before tearing down the address space, in 190 /* XXX: Free up the object before tearing down the address space, in
191 * case we're bound in the PPGTT */ 191 * case we're bound in the PPGTT */
192 drm_gem_object_unreference(&ctx->obj->base); 192 drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
193 } 193 }
194 194
195 if (ppgtt) 195 if (ppgtt)
@@ -198,6 +198,36 @@ void i915_gem_context_free(struct kref *ctx_ref)
198 kfree(ctx); 198 kfree(ctx);
199} 199}
200 200
201static struct drm_i915_gem_object *
202i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
203{
204 struct drm_i915_gem_object *obj;
205 int ret;
206
207 obj = i915_gem_alloc_object(dev, size);
208 if (obj == NULL)
209 return ERR_PTR(-ENOMEM);
210
211 /*
212 * Try to make the context utilize L3 as well as LLC.
213 *
214 * On VLV we don't have L3 controls in the PTEs so we
215 * shouldn't touch the cache level, especially as that
216 * would make the object snooped which might have a
217 * negative performance impact.
218 */
219 if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) {
220 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
221 /* Failure shouldn't ever happen this early */
222 if (WARN_ON(ret)) {
223 drm_gem_object_unreference(&obj->base);
224 return ERR_PTR(ret);
225 }
226 }
227
228 return obj;
229}
230
201static struct i915_hw_ppgtt * 231static struct i915_hw_ppgtt *
202create_vm_for_ctx(struct drm_device *dev, struct intel_context *ctx) 232create_vm_for_ctx(struct drm_device *dev, struct intel_context *ctx)
203{ 233{
@@ -234,40 +264,26 @@ __create_hw_context(struct drm_device *dev,
234 list_add_tail(&ctx->link, &dev_priv->context_list); 264 list_add_tail(&ctx->link, &dev_priv->context_list);
235 265
236 if (dev_priv->hw_context_size) { 266 if (dev_priv->hw_context_size) {
237 ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size); 267 struct drm_i915_gem_object *obj =
238 if (ctx->obj == NULL) { 268 i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size);
239 ret = -ENOMEM; 269 if (IS_ERR(obj)) {
270 ret = PTR_ERR(obj);
240 goto err_out; 271 goto err_out;
241 } 272 }
242 273 ctx->legacy_hw_ctx.rcs_state = obj;
243 /*
244 * Try to make the context utilize L3 as well as LLC.
245 *
246 * On VLV we don't have L3 controls in the PTEs so we
247 * shouldn't touch the cache level, especially as that
248 * would make the object snooped which might have a
249 * negative performance impact.
250 */
251 if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) {
252 ret = i915_gem_object_set_cache_level(ctx->obj,
253 I915_CACHE_L3_LLC);
254 /* Failure shouldn't ever happen this early */
255 if (WARN_ON(ret))
256 goto err_out;
257 }
258 } 274 }
259 275
260 /* Default context will never have a file_priv */ 276 /* Default context will never have a file_priv */
261 if (file_priv != NULL) { 277 if (file_priv != NULL) {
262 ret = idr_alloc(&file_priv->context_idr, ctx, 278 ret = idr_alloc(&file_priv->context_idr, ctx,
263 DEFAULT_CONTEXT_ID, 0, GFP_KERNEL); 279 DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
264 if (ret < 0) 280 if (ret < 0)
265 goto err_out; 281 goto err_out;
266 } else 282 } else
267 ret = DEFAULT_CONTEXT_ID; 283 ret = DEFAULT_CONTEXT_HANDLE;
268 284
269 ctx->file_priv = file_priv; 285 ctx->file_priv = file_priv;
270 ctx->id = ret; 286 ctx->user_handle = ret;
271 /* NB: Mark all slices as needing a remap so that when the context first 287 /* NB: Mark all slices as needing a remap so that when the context first
272 * loads it will restore whatever remap state already exists. If there 288 * loads it will restore whatever remap state already exists. If there
273 * is no remap info, it will be a NOP. */ 289 * is no remap info, it will be a NOP. */
@@ -301,7 +317,7 @@ i915_gem_create_context(struct drm_device *dev,
301 if (IS_ERR(ctx)) 317 if (IS_ERR(ctx))
302 return ctx; 318 return ctx;
303 319
304 if (is_global_default_ctx && ctx->obj) { 320 if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) {
305 /* We may need to do things with the shrinker which 321 /* We may need to do things with the shrinker which
306 * require us to immediately switch back to the default 322 * require us to immediately switch back to the default
307 * context. This can cause a problem as pinning the 323 * context. This can cause a problem as pinning the
@@ -309,7 +325,7 @@ i915_gem_create_context(struct drm_device *dev,
309 * be available. To avoid this we always pin the default 325 * be available. To avoid this we always pin the default
310 * context. 326 * context.
311 */ 327 */
312 ret = i915_gem_obj_ggtt_pin(ctx->obj, 328 ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state,
313 get_context_alignment(dev), 0); 329 get_context_alignment(dev), 0);
314 if (ret) { 330 if (ret) {
315 DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret); 331 DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
@@ -349,8 +365,8 @@ i915_gem_create_context(struct drm_device *dev,
349 return ctx; 365 return ctx;
350 366
351err_unpin: 367err_unpin:
352 if (is_global_default_ctx && ctx->obj) 368 if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state)
353 i915_gem_object_ggtt_unpin(ctx->obj); 369 i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
354err_destroy: 370err_destroy:
355 i915_gem_context_unreference(ctx); 371 i915_gem_context_unreference(ctx);
356 return ERR_PTR(ret); 372 return ERR_PTR(ret);
@@ -366,23 +382,27 @@ void i915_gem_context_reset(struct drm_device *dev)
366 for (i = 0; i < I915_NUM_RINGS; i++) { 382 for (i = 0; i < I915_NUM_RINGS; i++) {
367 struct intel_engine_cs *ring = &dev_priv->ring[i]; 383 struct intel_engine_cs *ring = &dev_priv->ring[i];
368 struct intel_context *dctx = ring->default_context; 384 struct intel_context *dctx = ring->default_context;
385 struct intel_context *lctx = ring->last_context;
369 386
370 /* Do a fake switch to the default context */ 387 /* Do a fake switch to the default context */
371 if (ring->last_context == dctx) 388 if (lctx == dctx)
372 continue; 389 continue;
373 390
374 if (!ring->last_context) 391 if (!lctx)
375 continue; 392 continue;
376 393
377 if (dctx->obj && i == RCS) { 394 if (dctx->legacy_hw_ctx.rcs_state && i == RCS) {
378 WARN_ON(i915_gem_obj_ggtt_pin(dctx->obj, 395 WARN_ON(i915_gem_obj_ggtt_pin(dctx->legacy_hw_ctx.rcs_state,
379 get_context_alignment(dev), 0)); 396 get_context_alignment(dev), 0));
380 /* Fake a finish/inactive */ 397 /* Fake a finish/inactive */
381 dctx->obj->base.write_domain = 0; 398 dctx->legacy_hw_ctx.rcs_state->base.write_domain = 0;
382 dctx->obj->active = 0; 399 dctx->legacy_hw_ctx.rcs_state->active = 0;
383 } 400 }
384 401
385 i915_gem_context_unreference(ring->last_context); 402 if (lctx->legacy_hw_ctx.rcs_state && i == RCS)
403 i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state);
404
405 i915_gem_context_unreference(lctx);
386 i915_gem_context_reference(dctx); 406 i915_gem_context_reference(dctx);
387 ring->last_context = dctx; 407 ring->last_context = dctx;
388 } 408 }
@@ -429,7 +449,7 @@ void i915_gem_context_fini(struct drm_device *dev)
429 struct intel_context *dctx = dev_priv->ring[RCS].default_context; 449 struct intel_context *dctx = dev_priv->ring[RCS].default_context;
430 int i; 450 int i;
431 451
432 if (dctx->obj) { 452 if (dctx->legacy_hw_ctx.rcs_state) {
433 /* The only known way to stop the gpu from accessing the hw context is 453 /* The only known way to stop the gpu from accessing the hw context is
434 * to reset it. Do this as the very last operation to avoid confusing 454 * to reset it. Do this as the very last operation to avoid confusing
435 * other code, leading to spurious errors. */ 455 * other code, leading to spurious errors. */
@@ -444,13 +464,13 @@ void i915_gem_context_fini(struct drm_device *dev)
444 WARN_ON(!dev_priv->ring[RCS].last_context); 464 WARN_ON(!dev_priv->ring[RCS].last_context);
445 if (dev_priv->ring[RCS].last_context == dctx) { 465 if (dev_priv->ring[RCS].last_context == dctx) {
446 /* Fake switch to NULL context */ 466 /* Fake switch to NULL context */
447 WARN_ON(dctx->obj->active); 467 WARN_ON(dctx->legacy_hw_ctx.rcs_state->active);
448 i915_gem_object_ggtt_unpin(dctx->obj); 468 i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
449 i915_gem_context_unreference(dctx); 469 i915_gem_context_unreference(dctx);
450 dev_priv->ring[RCS].last_context = NULL; 470 dev_priv->ring[RCS].last_context = NULL;
451 } 471 }
452 472
453 i915_gem_object_ggtt_unpin(dctx->obj); 473 i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
454 } 474 }
455 475
456 for (i = 0; i < I915_NUM_RINGS; i++) { 476 for (i = 0; i < I915_NUM_RINGS; i++) {
@@ -570,7 +590,7 @@ mi_set_context(struct intel_engine_cs *ring,
570 590
571 intel_ring_emit(ring, MI_NOOP); 591 intel_ring_emit(ring, MI_NOOP);
572 intel_ring_emit(ring, MI_SET_CONTEXT); 592 intel_ring_emit(ring, MI_SET_CONTEXT);
573 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->obj) | 593 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->legacy_hw_ctx.rcs_state) |
574 MI_MM_SPACE_GTT | 594 MI_MM_SPACE_GTT |
575 MI_SAVE_EXT_STATE_EN | 595 MI_SAVE_EXT_STATE_EN |
576 MI_RESTORE_EXT_STATE_EN | 596 MI_RESTORE_EXT_STATE_EN |
@@ -602,8 +622,8 @@ static int do_switch(struct intel_engine_cs *ring,
602 int ret, i; 622 int ret, i;
603 623
604 if (from != NULL && ring == &dev_priv->ring[RCS]) { 624 if (from != NULL && ring == &dev_priv->ring[RCS]) {
605 BUG_ON(from->obj == NULL); 625 BUG_ON(from->legacy_hw_ctx.rcs_state == NULL);
606 BUG_ON(!i915_gem_obj_is_pinned(from->obj)); 626 BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state));
607 } 627 }
608 628
609 if (from == to && !to->remap_slice) 629 if (from == to && !to->remap_slice)
@@ -611,7 +631,7 @@ static int do_switch(struct intel_engine_cs *ring,
611 631
612 /* Trying to pin first makes error handling easier. */ 632 /* Trying to pin first makes error handling easier. */
613 if (ring == &dev_priv->ring[RCS]) { 633 if (ring == &dev_priv->ring[RCS]) {
614 ret = i915_gem_obj_ggtt_pin(to->obj, 634 ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
615 get_context_alignment(ring->dev), 0); 635 get_context_alignment(ring->dev), 0);
616 if (ret) 636 if (ret)
617 return ret; 637 return ret;
@@ -644,17 +664,17 @@ static int do_switch(struct intel_engine_cs *ring,
644 * 664 *
645 * XXX: We need a real interface to do this instead of trickery. 665 * XXX: We need a real interface to do this instead of trickery.
646 */ 666 */
647 ret = i915_gem_object_set_to_gtt_domain(to->obj, false); 667 ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, false);
648 if (ret) 668 if (ret)
649 goto unpin_out; 669 goto unpin_out;
650 670
651 if (!to->obj->has_global_gtt_mapping) { 671 if (!to->legacy_hw_ctx.rcs_state->has_global_gtt_mapping) {
652 struct i915_vma *vma = i915_gem_obj_to_vma(to->obj, 672 struct i915_vma *vma = i915_gem_obj_to_vma(to->legacy_hw_ctx.rcs_state,
653 &dev_priv->gtt.base); 673 &dev_priv->gtt.base);
654 vma->bind_vma(vma, to->obj->cache_level, GLOBAL_BIND); 674 vma->bind_vma(vma, to->legacy_hw_ctx.rcs_state->cache_level, GLOBAL_BIND);
655 } 675 }
656 676
657 if (!to->is_initialized || i915_gem_context_is_default(to)) 677 if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
658 hw_flags |= MI_RESTORE_INHIBIT; 678 hw_flags |= MI_RESTORE_INHIBIT;
659 679
660 ret = mi_set_context(ring, to, hw_flags); 680 ret = mi_set_context(ring, to, hw_flags);
@@ -680,8 +700,8 @@ static int do_switch(struct intel_engine_cs *ring,
680 * MI_SET_CONTEXT instead of when the next seqno has completed. 700 * MI_SET_CONTEXT instead of when the next seqno has completed.
681 */ 701 */
682 if (from != NULL) { 702 if (from != NULL) {
683 from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; 703 from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
684 i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->obj), ring); 704 i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), ring);
685 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the 705 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
686 * whole damn pipeline, we don't need to explicitly mark the 706 * whole damn pipeline, we don't need to explicitly mark the
687 * object dirty. The only exception is that the context must be 707 * object dirty. The only exception is that the context must be
@@ -689,16 +709,16 @@ static int do_switch(struct intel_engine_cs *ring,
689 * able to defer doing this until we know the object would be 709 * able to defer doing this until we know the object would be
690 * swapped, but there is no way to do that yet. 710 * swapped, but there is no way to do that yet.
691 */ 711 */
692 from->obj->dirty = 1; 712 from->legacy_hw_ctx.rcs_state->dirty = 1;
693 BUG_ON(from->obj->ring != ring); 713 BUG_ON(from->legacy_hw_ctx.rcs_state->ring != ring);
694 714
695 /* obj is kept alive until the next request by its active ref */ 715 /* obj is kept alive until the next request by its active ref */
696 i915_gem_object_ggtt_unpin(from->obj); 716 i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
697 i915_gem_context_unreference(from); 717 i915_gem_context_unreference(from);
698 } 718 }
699 719
700 uninitialized = !to->is_initialized && from == NULL; 720 uninitialized = !to->legacy_hw_ctx.initialized && from == NULL;
701 to->is_initialized = true; 721 to->legacy_hw_ctx.initialized = true;
702 722
703done: 723done:
704 i915_gem_context_reference(to); 724 i915_gem_context_reference(to);
@@ -714,7 +734,7 @@ done:
714 734
715unpin_out: 735unpin_out:
716 if (ring->id == RCS) 736 if (ring->id == RCS)
717 i915_gem_object_ggtt_unpin(to->obj); 737 i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
718 return ret; 738 return ret;
719} 739}
720 740
@@ -735,7 +755,7 @@ int i915_switch_context(struct intel_engine_cs *ring,
735 755
736 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); 756 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
737 757
738 if (to->obj == NULL) { /* We have the fake context */ 758 if (to->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
739 if (to != ring->last_context) { 759 if (to != ring->last_context) {
740 i915_gem_context_reference(to); 760 i915_gem_context_reference(to);
741 if (ring->last_context) 761 if (ring->last_context)
@@ -773,7 +793,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
773 if (IS_ERR(ctx)) 793 if (IS_ERR(ctx))
774 return PTR_ERR(ctx); 794 return PTR_ERR(ctx);
775 795
776 args->ctx_id = ctx->id; 796 args->ctx_id = ctx->user_handle;
777 DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id); 797 DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);
778 798
779 return 0; 799 return 0;
@@ -787,7 +807,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
787 struct intel_context *ctx; 807 struct intel_context *ctx;
788 int ret; 808 int ret;
789 809
790 if (args->ctx_id == DEFAULT_CONTEXT_ID) 810 if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
791 return -ENOENT; 811 return -ENOENT;
792 812
793 ret = i915_mutex_lock_interruptible(dev); 813 ret = i915_mutex_lock_interruptible(dev);
@@ -800,7 +820,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
800 return PTR_ERR(ctx); 820 return PTR_ERR(ctx);
801 } 821 }
802 822
803 idr_remove(&ctx->file_priv->context_idr, ctx->id); 823 idr_remove(&ctx->file_priv->context_idr, ctx->user_handle);
804 i915_gem_context_unreference(ctx); 824 i915_gem_context_unreference(ctx);
805 mutex_unlock(&dev->struct_mutex); 825 mutex_unlock(&dev->struct_mutex);
806 826
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index d815ef51a5ea..60998fc4e5b2 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -938,7 +938,7 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
938 struct intel_context *ctx = NULL; 938 struct intel_context *ctx = NULL;
939 struct i915_ctx_hang_stats *hs; 939 struct i915_ctx_hang_stats *hs;
940 940
941 if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_ID) 941 if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
942 return ERR_PTR(-EINVAL); 942 return ERR_PTR(-EINVAL);
943 943
944 ctx = i915_gem_context_get(file->driver_priv, ctx_id); 944 ctx = i915_gem_context_get(file->driver_priv, ctx_id);
@@ -1026,6 +1026,163 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
1026 return 0; 1026 return 0;
1027} 1027}
1028 1028
1029static int
1030legacy_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
1031 struct intel_engine_cs *ring,
1032 struct intel_context *ctx,
1033 struct drm_i915_gem_execbuffer2 *args,
1034 struct list_head *vmas,
1035 struct drm_i915_gem_object *batch_obj,
1036 u64 exec_start, u32 flags)
1037{
1038 struct drm_clip_rect *cliprects = NULL;
1039 struct drm_i915_private *dev_priv = dev->dev_private;
1040 u64 exec_len;
1041 int instp_mode;
1042 u32 instp_mask;
1043 int i, ret = 0;
1044
1045 if (args->num_cliprects != 0) {
1046 if (ring != &dev_priv->ring[RCS]) {
1047 DRM_DEBUG("clip rectangles are only valid with the render ring\n");
1048 return -EINVAL;
1049 }
1050
1051 if (INTEL_INFO(dev)->gen >= 5) {
1052 DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
1053 return -EINVAL;
1054 }
1055
1056 if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
1057 DRM_DEBUG("execbuf with %u cliprects\n",
1058 args->num_cliprects);
1059 return -EINVAL;
1060 }
1061
1062 cliprects = kcalloc(args->num_cliprects,
1063 sizeof(*cliprects),
1064 GFP_KERNEL);
1065 if (cliprects == NULL) {
1066 ret = -ENOMEM;
1067 goto error;
1068 }
1069
1070 if (copy_from_user(cliprects,
1071 to_user_ptr(args->cliprects_ptr),
1072 sizeof(*cliprects)*args->num_cliprects)) {
1073 ret = -EFAULT;
1074 goto error;
1075 }
1076 } else {
1077 if (args->DR4 == 0xffffffff) {
1078 DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
1079 args->DR4 = 0;
1080 }
1081
1082 if (args->DR1 || args->DR4 || args->cliprects_ptr) {
1083 DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
1084 return -EINVAL;
1085 }
1086 }
1087
1088 ret = i915_gem_execbuffer_move_to_gpu(ring, vmas);
1089 if (ret)
1090 goto error;
1091
1092 ret = i915_switch_context(ring, ctx);
1093 if (ret)
1094 goto error;
1095
1096 instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1097 instp_mask = I915_EXEC_CONSTANTS_MASK;
1098 switch (instp_mode) {
1099 case I915_EXEC_CONSTANTS_REL_GENERAL:
1100 case I915_EXEC_CONSTANTS_ABSOLUTE:
1101 case I915_EXEC_CONSTANTS_REL_SURFACE:
1102 if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
1103 DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
1104 ret = -EINVAL;
1105 goto error;
1106 }
1107
1108 if (instp_mode != dev_priv->relative_constants_mode) {
1109 if (INTEL_INFO(dev)->gen < 4) {
1110 DRM_DEBUG("no rel constants on pre-gen4\n");
1111 ret = -EINVAL;
1112 goto error;
1113 }
1114
1115 if (INTEL_INFO(dev)->gen > 5 &&
1116 instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
1117 DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
1118 ret = -EINVAL;
1119 goto error;
1120 }
1121
1122 /* The HW changed the meaning on this bit on gen6 */
1123 if (INTEL_INFO(dev)->gen >= 6)
1124 instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
1125 }
1126 break;
1127 default:
1128 DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
1129 ret = -EINVAL;
1130 goto error;
1131 }
1132
1133 if (ring == &dev_priv->ring[RCS] &&
1134 instp_mode != dev_priv->relative_constants_mode) {
1135 ret = intel_ring_begin(ring, 4);
1136 if (ret)
1137 goto error;
1138
1139 intel_ring_emit(ring, MI_NOOP);
1140 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1141 intel_ring_emit(ring, INSTPM);
1142 intel_ring_emit(ring, instp_mask << 16 | instp_mode);
1143 intel_ring_advance(ring);
1144
1145 dev_priv->relative_constants_mode = instp_mode;
1146 }
1147
1148 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1149 ret = i915_reset_gen7_sol_offsets(dev, ring);
1150 if (ret)
1151 goto error;
1152 }
1153
1154 exec_len = args->batch_len;
1155 if (cliprects) {
1156 for (i = 0; i < args->num_cliprects; i++) {
1157 ret = i915_emit_box(dev, &cliprects[i],
1158 args->DR1, args->DR4);
1159 if (ret)
1160 goto error;
1161
1162 ret = ring->dispatch_execbuffer(ring,
1163 exec_start, exec_len,
1164 flags);
1165 if (ret)
1166 goto error;
1167 }
1168 } else {
1169 ret = ring->dispatch_execbuffer(ring,
1170 exec_start, exec_len,
1171 flags);
1172 if (ret)
1173 return ret;
1174 }
1175
1176 trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
1177
1178 i915_gem_execbuffer_move_to_active(vmas, ring);
1179 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
1180
1181error:
1182 kfree(cliprects);
1183 return ret;
1184}
1185
1029/** 1186/**
1030 * Find one BSD ring to dispatch the corresponding BSD command. 1187 * Find one BSD ring to dispatch the corresponding BSD command.
1031 * The Ring ID is returned. 1188 * The Ring ID is returned.
@@ -1085,14 +1242,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1085 struct drm_i915_private *dev_priv = dev->dev_private; 1242 struct drm_i915_private *dev_priv = dev->dev_private;
1086 struct eb_vmas *eb; 1243 struct eb_vmas *eb;
1087 struct drm_i915_gem_object *batch_obj; 1244 struct drm_i915_gem_object *batch_obj;
1088 struct drm_clip_rect *cliprects = NULL;
1089 struct intel_engine_cs *ring; 1245 struct intel_engine_cs *ring;
1090 struct intel_context *ctx; 1246 struct intel_context *ctx;
1091 struct i915_address_space *vm; 1247 struct i915_address_space *vm;
1092 const u32 ctx_id = i915_execbuffer2_get_context_id(*args); 1248 const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
1093 u64 exec_start = args->batch_start_offset, exec_len; 1249 u64 exec_start = args->batch_start_offset;
1094 u32 mask, flags; 1250 u32 flags;
1095 int ret, mode, i; 1251 int ret;
1096 bool need_relocs; 1252 bool need_relocs;
1097 1253
1098 if (!i915_gem_check_execbuffer(args)) 1254 if (!i915_gem_check_execbuffer(args))
@@ -1136,87 +1292,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1136 return -EINVAL; 1292 return -EINVAL;
1137 } 1293 }
1138 1294
1139 mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1140 mask = I915_EXEC_CONSTANTS_MASK;
1141 switch (mode) {
1142 case I915_EXEC_CONSTANTS_REL_GENERAL:
1143 case I915_EXEC_CONSTANTS_ABSOLUTE:
1144 case I915_EXEC_CONSTANTS_REL_SURFACE:
1145 if (mode != 0 && ring != &dev_priv->ring[RCS]) {
1146 DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
1147 return -EINVAL;
1148 }
1149
1150 if (mode != dev_priv->relative_constants_mode) {
1151 if (INTEL_INFO(dev)->gen < 4) {
1152 DRM_DEBUG("no rel constants on pre-gen4\n");
1153 return -EINVAL;
1154 }
1155
1156 if (INTEL_INFO(dev)->gen > 5 &&
1157 mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
1158 DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
1159 return -EINVAL;
1160 }
1161
1162 /* The HW changed the meaning on this bit on gen6 */
1163 if (INTEL_INFO(dev)->gen >= 6)
1164 mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
1165 }
1166 break;
1167 default:
1168 DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
1169 return -EINVAL;
1170 }
1171
1172 if (args->buffer_count < 1) { 1295 if (args->buffer_count < 1) {
1173 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count); 1296 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1174 return -EINVAL; 1297 return -EINVAL;
1175 } 1298 }
1176 1299
1177 if (args->num_cliprects != 0) {
1178 if (ring != &dev_priv->ring[RCS]) {
1179 DRM_DEBUG("clip rectangles are only valid with the render ring\n");
1180 return -EINVAL;
1181 }
1182
1183 if (INTEL_INFO(dev)->gen >= 5) {
1184 DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
1185 return -EINVAL;
1186 }
1187
1188 if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
1189 DRM_DEBUG("execbuf with %u cliprects\n",
1190 args->num_cliprects);
1191 return -EINVAL;
1192 }
1193
1194 cliprects = kcalloc(args->num_cliprects,
1195 sizeof(*cliprects),
1196 GFP_KERNEL);
1197 if (cliprects == NULL) {
1198 ret = -ENOMEM;
1199 goto pre_mutex_err;
1200 }
1201
1202 if (copy_from_user(cliprects,
1203 to_user_ptr(args->cliprects_ptr),
1204 sizeof(*cliprects)*args->num_cliprects)) {
1205 ret = -EFAULT;
1206 goto pre_mutex_err;
1207 }
1208 } else {
1209 if (args->DR4 == 0xffffffff) {
1210 DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
1211 args->DR4 = 0;
1212 }
1213
1214 if (args->DR1 || args->DR4 || args->cliprects_ptr) {
1215 DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
1216 return -EINVAL;
1217 }
1218 }
1219
1220 intel_runtime_pm_get(dev_priv); 1300 intel_runtime_pm_get(dev_priv);
1221 1301
1222 ret = i915_mutex_lock_interruptible(dev); 1302 ret = i915_mutex_lock_interruptible(dev);
@@ -1320,63 +1400,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1320 else 1400 else
1321 exec_start += i915_gem_obj_offset(batch_obj, vm); 1401 exec_start += i915_gem_obj_offset(batch_obj, vm);
1322 1402
1323 ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas); 1403 ret = legacy_ringbuffer_submission(dev, file, ring, ctx,
1404 args, &eb->vmas, batch_obj, exec_start, flags);
1324 if (ret) 1405 if (ret)
1325 goto err; 1406 goto err;
1326 1407
1327 ret = i915_switch_context(ring, ctx);
1328 if (ret)
1329 goto err;
1330
1331 if (ring == &dev_priv->ring[RCS] &&
1332 mode != dev_priv->relative_constants_mode) {
1333 ret = intel_ring_begin(ring, 4);
1334 if (ret)
1335 goto err;
1336
1337 intel_ring_emit(ring, MI_NOOP);
1338 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1339 intel_ring_emit(ring, INSTPM);
1340 intel_ring_emit(ring, mask << 16 | mode);
1341 intel_ring_advance(ring);
1342
1343 dev_priv->relative_constants_mode = mode;
1344 }
1345
1346 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1347 ret = i915_reset_gen7_sol_offsets(dev, ring);
1348 if (ret)
1349 goto err;
1350 }
1351
1352
1353 exec_len = args->batch_len;
1354 if (cliprects) {
1355 for (i = 0; i < args->num_cliprects; i++) {
1356 ret = i915_emit_box(dev, &cliprects[i],
1357 args->DR1, args->DR4);
1358 if (ret)
1359 goto err;
1360
1361 ret = ring->dispatch_execbuffer(ring,
1362 exec_start, exec_len,
1363 flags);
1364 if (ret)
1365 goto err;
1366 }
1367 } else {
1368 ret = ring->dispatch_execbuffer(ring,
1369 exec_start, exec_len,
1370 flags);
1371 if (ret)
1372 goto err;
1373 }
1374
1375 trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
1376
1377 i915_gem_execbuffer_move_to_active(&eb->vmas, ring);
1378 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
1379
1380err: 1408err:
1381 /* the request owns the ref now */ 1409 /* the request owns the ref now */
1382 i915_gem_context_unreference(ctx); 1410 i915_gem_context_unreference(ctx);
@@ -1385,8 +1413,6 @@ err:
1385 mutex_unlock(&dev->struct_mutex); 1413 mutex_unlock(&dev->struct_mutex);
1386 1414
1387pre_mutex_err: 1415pre_mutex_err:
1388 kfree(cliprects);
1389
1390 /* intel_gpu_busy should also get a ref, so it will free when the device 1416 /* intel_gpu_busy should also get a ref, so it will free when the device
1391 * is really idle. */ 1417 * is really idle. */
1392 intel_runtime_pm_put(dev_priv); 1418 intel_runtime_pm_put(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 644117855e01..b695d184c487 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -103,30 +103,68 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
103 return base; 103 return base;
104} 104}
105 105
106static int i915_setup_compression(struct drm_device *dev, int size) 106static int find_compression_threshold(struct drm_device *dev,
107 struct drm_mm_node *node,
108 int size,
109 int fb_cpp)
107{ 110{
108 struct drm_i915_private *dev_priv = dev->dev_private; 111 struct drm_i915_private *dev_priv = dev->dev_private;
109 struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb); 112 int compression_threshold = 1;
110 int ret; 113 int ret;
111 114
112 compressed_fb = kzalloc(sizeof(*compressed_fb), GFP_KERNEL); 115 /* HACK: This code depends on what we will do in *_enable_fbc. If that
113 if (!compressed_fb) 116 * code changes, this code needs to change as well.
114 goto err_llb; 117 *
118 * The enable_fbc code will attempt to use one of our 2 compression
119 * thresholds, therefore, in that case, we only have 1 resort.
120 */
115 121
116 /* Try to over-allocate to reduce reallocations and fragmentation */ 122 /* Try to over-allocate to reduce reallocations and fragmentation. */
117 ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb, 123 ret = drm_mm_insert_node(&dev_priv->mm.stolen, node,
118 size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT); 124 size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT);
119 if (ret) 125 if (ret == 0)
120 ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb, 126 return compression_threshold;
121 size >>= 1, 4096, 127
122 DRM_MM_SEARCH_DEFAULT); 128again:
123 if (ret) 129 /* HW's ability to limit the CFB is 1:4 */
130 if (compression_threshold > 4 ||
131 (fb_cpp == 2 && compression_threshold == 2))
132 return 0;
133
134 ret = drm_mm_insert_node(&dev_priv->mm.stolen, node,
135 size >>= 1, 4096,
136 DRM_MM_SEARCH_DEFAULT);
137 if (ret && INTEL_INFO(dev)->gen <= 4) {
138 return 0;
139 } else if (ret) {
140 compression_threshold <<= 1;
141 goto again;
142 } else {
143 return compression_threshold;
144 }
145}
146
147static int i915_setup_compression(struct drm_device *dev, int size, int fb_cpp)
148{
149 struct drm_i915_private *dev_priv = dev->dev_private;
150 struct drm_mm_node *uninitialized_var(compressed_llb);
151 int ret;
152
153 ret = find_compression_threshold(dev, &dev_priv->fbc.compressed_fb,
154 size, fb_cpp);
155 if (!ret)
124 goto err_llb; 156 goto err_llb;
157 else if (ret > 1) {
158 DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
159
160 }
161
162 dev_priv->fbc.threshold = ret;
125 163
126 if (HAS_PCH_SPLIT(dev)) 164 if (HAS_PCH_SPLIT(dev))
127 I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start); 165 I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
128 else if (IS_GM45(dev)) { 166 else if (IS_GM45(dev)) {
129 I915_WRITE(DPFC_CB_BASE, compressed_fb->start); 167 I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
130 } else { 168 } else {
131 compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL); 169 compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
132 if (!compressed_llb) 170 if (!compressed_llb)
@@ -140,13 +178,12 @@ static int i915_setup_compression(struct drm_device *dev, int size)
140 dev_priv->fbc.compressed_llb = compressed_llb; 178 dev_priv->fbc.compressed_llb = compressed_llb;
141 179
142 I915_WRITE(FBC_CFB_BASE, 180 I915_WRITE(FBC_CFB_BASE,
143 dev_priv->mm.stolen_base + compressed_fb->start); 181 dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start);
144 I915_WRITE(FBC_LL_BASE, 182 I915_WRITE(FBC_LL_BASE,
145 dev_priv->mm.stolen_base + compressed_llb->start); 183 dev_priv->mm.stolen_base + compressed_llb->start);
146 } 184 }
147 185
148 dev_priv->fbc.compressed_fb = compressed_fb; 186 dev_priv->fbc.size = size / dev_priv->fbc.threshold;
149 dev_priv->fbc.size = size;
150 187
151 DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n", 188 DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
152 size); 189 size);
@@ -155,14 +192,13 @@ static int i915_setup_compression(struct drm_device *dev, int size)
155 192
156err_fb: 193err_fb:
157 kfree(compressed_llb); 194 kfree(compressed_llb);
158 drm_mm_remove_node(compressed_fb); 195 drm_mm_remove_node(&dev_priv->fbc.compressed_fb);
159err_llb: 196err_llb:
160 kfree(compressed_fb);
161 pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size); 197 pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
162 return -ENOSPC; 198 return -ENOSPC;
163} 199}
164 200
165int i915_gem_stolen_setup_compression(struct drm_device *dev, int size) 201int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp)
166{ 202{
167 struct drm_i915_private *dev_priv = dev->dev_private; 203 struct drm_i915_private *dev_priv = dev->dev_private;
168 204
@@ -175,7 +211,7 @@ int i915_gem_stolen_setup_compression(struct drm_device *dev, int size)
175 /* Release any current block */ 211 /* Release any current block */
176 i915_gem_stolen_cleanup_compression(dev); 212 i915_gem_stolen_cleanup_compression(dev);
177 213
178 return i915_setup_compression(dev, size); 214 return i915_setup_compression(dev, size, fb_cpp);
179} 215}
180 216
181void i915_gem_stolen_cleanup_compression(struct drm_device *dev) 217void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
@@ -185,10 +221,7 @@ void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
185 if (dev_priv->fbc.size == 0) 221 if (dev_priv->fbc.size == 0)
186 return; 222 return;
187 223
188 if (dev_priv->fbc.compressed_fb) { 224 drm_mm_remove_node(&dev_priv->fbc.compressed_fb);
189 drm_mm_remove_node(dev_priv->fbc.compressed_fb);
190 kfree(dev_priv->fbc.compressed_fb);
191 }
192 225
193 if (dev_priv->fbc.compressed_llb) { 226 if (dev_priv->fbc.compressed_llb) {
194 drm_mm_remove_node(dev_priv->fbc.compressed_llb); 227 drm_mm_remove_node(dev_priv->fbc.compressed_llb);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 66cf41765bf9..45b6191efb58 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -327,6 +327,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
327 struct drm_device *dev = error_priv->dev; 327 struct drm_device *dev = error_priv->dev;
328 struct drm_i915_private *dev_priv = dev->dev_private; 328 struct drm_i915_private *dev_priv = dev->dev_private;
329 struct drm_i915_error_state *error = error_priv->error; 329 struct drm_i915_error_state *error = error_priv->error;
330 struct drm_i915_error_object *obj;
330 int i, j, offset, elt; 331 int i, j, offset, elt;
331 int max_hangcheck_score; 332 int max_hangcheck_score;
332 333
@@ -395,8 +396,6 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
395 error->pinned_bo_count[0]); 396 error->pinned_bo_count[0]);
396 397
397 for (i = 0; i < ARRAY_SIZE(error->ring); i++) { 398 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
398 struct drm_i915_error_object *obj;
399
400 obj = error->ring[i].batchbuffer; 399 obj = error->ring[i].batchbuffer;
401 if (obj) { 400 if (obj) {
402 err_puts(m, dev_priv->ring[i].name); 401 err_puts(m, dev_priv->ring[i].name);
@@ -459,6 +458,18 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
459 } 458 }
460 } 459 }
461 460
461 if ((obj = error->semaphore_obj)) {
462 err_printf(m, "Semaphore page = 0x%08x\n", obj->gtt_offset);
463 for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
464 err_printf(m, "[%04x] %08x %08x %08x %08x\n",
465 elt * 4,
466 obj->pages[0][elt],
467 obj->pages[0][elt+1],
468 obj->pages[0][elt+2],
469 obj->pages[0][elt+3]);
470 }
471 }
472
462 if (error->overlay) 473 if (error->overlay)
463 intel_overlay_print_error_state(m, error->overlay); 474 intel_overlay_print_error_state(m, error->overlay);
464 475
@@ -529,6 +540,7 @@ static void i915_error_state_free(struct kref *error_ref)
529 kfree(error->ring[i].requests); 540 kfree(error->ring[i].requests);
530 } 541 }
531 542
543 i915_error_object_free(error->semaphore_obj);
532 kfree(error->active_bo); 544 kfree(error->active_bo);
533 kfree(error->overlay); 545 kfree(error->overlay);
534 kfree(error->display); 546 kfree(error->display);
@@ -746,7 +758,52 @@ static void i915_gem_record_fences(struct drm_device *dev,
746 } 758 }
747} 759}
748 760
761
762static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
763 struct drm_i915_error_state *error,
764 struct intel_engine_cs *ring,
765 struct drm_i915_error_ring *ering)
766{
767 struct intel_engine_cs *useless;
768 int i;
769
770 if (!i915_semaphore_is_enabled(dev_priv->dev))
771 return;
772
773 if (!error->semaphore_obj)
774 error->semaphore_obj =
775 i915_error_object_create(dev_priv,
776 dev_priv->semaphore_obj,
777 &dev_priv->gtt.base);
778
779 for_each_ring(useless, dev_priv, i) {
780 u16 signal_offset =
781 (GEN8_SIGNAL_OFFSET(ring, i) & PAGE_MASK) / 4;
782 u32 *tmp = error->semaphore_obj->pages[0];
783
784 ering->semaphore_mboxes[i] = tmp[signal_offset];
785 ering->semaphore_seqno[i] = ring->semaphore.sync_seqno[i];
786 }
787}
788
789static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
790 struct intel_engine_cs *ring,
791 struct drm_i915_error_ring *ering)
792{
793 ering->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(ring->mmio_base));
794 ering->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(ring->mmio_base));
795 ering->semaphore_seqno[0] = ring->semaphore.sync_seqno[0];
796 ering->semaphore_seqno[1] = ring->semaphore.sync_seqno[1];
797
798 if (HAS_VEBOX(dev_priv->dev)) {
799 ering->semaphore_mboxes[2] =
800 I915_READ(RING_SYNC_2(ring->mmio_base));
801 ering->semaphore_seqno[2] = ring->semaphore.sync_seqno[2];
802 }
803}
804
749static void i915_record_ring_state(struct drm_device *dev, 805static void i915_record_ring_state(struct drm_device *dev,
806 struct drm_i915_error_state *error,
750 struct intel_engine_cs *ring, 807 struct intel_engine_cs *ring,
751 struct drm_i915_error_ring *ering) 808 struct drm_i915_error_ring *ering)
752{ 809{
@@ -755,18 +812,10 @@ static void i915_record_ring_state(struct drm_device *dev,
755 if (INTEL_INFO(dev)->gen >= 6) { 812 if (INTEL_INFO(dev)->gen >= 6) {
756 ering->rc_psmi = I915_READ(ring->mmio_base + 0x50); 813 ering->rc_psmi = I915_READ(ring->mmio_base + 0x50);
757 ering->fault_reg = I915_READ(RING_FAULT_REG(ring)); 814 ering->fault_reg = I915_READ(RING_FAULT_REG(ring));
758 ering->semaphore_mboxes[0] 815 if (INTEL_INFO(dev)->gen >= 8)
759 = I915_READ(RING_SYNC_0(ring->mmio_base)); 816 gen8_record_semaphore_state(dev_priv, error, ring, ering);
760 ering->semaphore_mboxes[1] 817 else
761 = I915_READ(RING_SYNC_1(ring->mmio_base)); 818 gen6_record_semaphore_state(dev_priv, ring, ering);
762 ering->semaphore_seqno[0] = ring->semaphore.sync_seqno[0];
763 ering->semaphore_seqno[1] = ring->semaphore.sync_seqno[1];
764 }
765
766 if (HAS_VEBOX(dev)) {
767 ering->semaphore_mboxes[2] =
768 I915_READ(RING_SYNC_2(ring->mmio_base));
769 ering->semaphore_seqno[2] = ring->semaphore.sync_seqno[2];
770 } 819 }
771 820
772 if (INTEL_INFO(dev)->gen >= 4) { 821 if (INTEL_INFO(dev)->gen >= 4) {
@@ -895,7 +944,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
895 944
896 error->ring[i].valid = true; 945 error->ring[i].valid = true;
897 946
898 i915_record_ring_state(dev, ring, &error->ring[i]); 947 i915_record_ring_state(dev, error, ring, &error->ring[i]);
899 948
900 request = i915_gem_find_active_request(ring); 949 request = i915_gem_find_active_request(ring);
901 if (request) { 950 if (request) {
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index c0d7674c45cd..026f0a3f3b90 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1090,6 +1090,53 @@ static bool intel_hpd_irq_event(struct drm_device *dev,
1090 return true; 1090 return true;
1091} 1091}
1092 1092
1093static void i915_digport_work_func(struct work_struct *work)
1094{
1095 struct drm_i915_private *dev_priv =
1096 container_of(work, struct drm_i915_private, dig_port_work);
1097 unsigned long irqflags;
1098 u32 long_port_mask, short_port_mask;
1099 struct intel_digital_port *intel_dig_port;
1100 int i, ret;
1101 u32 old_bits = 0;
1102
1103 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1104 long_port_mask = dev_priv->long_hpd_port_mask;
1105 dev_priv->long_hpd_port_mask = 0;
1106 short_port_mask = dev_priv->short_hpd_port_mask;
1107 dev_priv->short_hpd_port_mask = 0;
1108 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1109
1110 for (i = 0; i < I915_MAX_PORTS; i++) {
1111 bool valid = false;
1112 bool long_hpd = false;
1113 intel_dig_port = dev_priv->hpd_irq_port[i];
1114 if (!intel_dig_port || !intel_dig_port->hpd_pulse)
1115 continue;
1116
1117 if (long_port_mask & (1 << i)) {
1118 valid = true;
1119 long_hpd = true;
1120 } else if (short_port_mask & (1 << i))
1121 valid = true;
1122
1123 if (valid) {
1124 ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
1125 if (ret == true) {
1126 /* if we get true fallback to old school hpd */
1127 old_bits |= (1 << intel_dig_port->base.hpd_pin);
1128 }
1129 }
1130 }
1131
1132 if (old_bits) {
1133 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1134 dev_priv->hpd_event_bits |= old_bits;
1135 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1136 schedule_work(&dev_priv->hotplug_work);
1137 }
1138}
1139
1093/* 1140/*
1094 * Handle hotplug events outside the interrupt handler proper. 1141 * Handle hotplug events outside the interrupt handler proper.
1095 */ 1142 */
@@ -1221,6 +1268,131 @@ static void notify_ring(struct drm_device *dev,
1221 i915_queue_hangcheck(dev); 1268 i915_queue_hangcheck(dev);
1222} 1269}
1223 1270
1271static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
1272 struct intel_rps_ei *rps_ei)
1273{
1274 u32 cz_ts, cz_freq_khz;
1275 u32 render_count, media_count;
1276 u32 elapsed_render, elapsed_media, elapsed_time;
1277 u32 residency = 0;
1278
1279 cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
1280 cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4);
1281
1282 render_count = I915_READ(VLV_RENDER_C0_COUNT_REG);
1283 media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG);
1284
1285 if (rps_ei->cz_clock == 0) {
1286 rps_ei->cz_clock = cz_ts;
1287 rps_ei->render_c0 = render_count;
1288 rps_ei->media_c0 = media_count;
1289
1290 return dev_priv->rps.cur_freq;
1291 }
1292
1293 elapsed_time = cz_ts - rps_ei->cz_clock;
1294 rps_ei->cz_clock = cz_ts;
1295
1296 elapsed_render = render_count - rps_ei->render_c0;
1297 rps_ei->render_c0 = render_count;
1298
1299 elapsed_media = media_count - rps_ei->media_c0;
1300 rps_ei->media_c0 = media_count;
1301
1302 /* Convert all the counters into common unit of milli sec */
1303 elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC;
1304 elapsed_render /= cz_freq_khz;
1305 elapsed_media /= cz_freq_khz;
1306
1307 /*
1308 * Calculate overall C0 residency percentage
1309 * only if elapsed time is non zero
1310 */
1311 if (elapsed_time) {
1312 residency =
1313 ((max(elapsed_render, elapsed_media) * 100)
1314 / elapsed_time);
1315 }
1316
1317 return residency;
1318}
1319
1320/**
1321 * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU
1322 * busy-ness calculated from C0 counters of render & media power wells
1323 * @dev_priv: DRM device private
1324 *
1325 */
1326static u32 vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
1327{
1328 u32 residency_C0_up = 0, residency_C0_down = 0;
1329 u8 new_delay, adj;
1330
1331 dev_priv->rps.ei_interrupt_count++;
1332
1333 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
1334
1335
1336 if (dev_priv->rps.up_ei.cz_clock == 0) {
1337 vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei);
1338 vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei);
1339 return dev_priv->rps.cur_freq;
1340 }
1341
1342
1343 /*
1344 * To down throttle, C0 residency should be less than down threshold
1345 * for continous EI intervals. So calculate down EI counters
1346 * once in VLV_INT_COUNT_FOR_DOWN_EI
1347 */
1348 if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) {
1349
1350 dev_priv->rps.ei_interrupt_count = 0;
1351
1352 residency_C0_down = vlv_c0_residency(dev_priv,
1353 &dev_priv->rps.down_ei);
1354 } else {
1355 residency_C0_up = vlv_c0_residency(dev_priv,
1356 &dev_priv->rps.up_ei);
1357 }
1358
1359 new_delay = dev_priv->rps.cur_freq;
1360
1361 adj = dev_priv->rps.last_adj;
1362 /* C0 residency is greater than UP threshold. Increase Frequency */
1363 if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) {
1364 if (adj > 0)
1365 adj *= 2;
1366 else
1367 adj = 1;
1368
1369 if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)
1370 new_delay = dev_priv->rps.cur_freq + adj;
1371
1372 /*
1373 * For better performance, jump directly
1374 * to RPe if we're below it.
1375 */
1376 if (new_delay < dev_priv->rps.efficient_freq)
1377 new_delay = dev_priv->rps.efficient_freq;
1378
1379 } else if (!dev_priv->rps.ei_interrupt_count &&
1380 (residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) {
1381 if (adj < 0)
1382 adj *= 2;
1383 else
1384 adj = -1;
1385 /*
1386 * This means, C0 residency is less than down threshold over
1387 * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq
1388 */
1389 if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
1390 new_delay = dev_priv->rps.cur_freq + adj;
1391 }
1392
1393 return new_delay;
1394}
1395
1224static void gen6_pm_rps_work(struct work_struct *work) 1396static void gen6_pm_rps_work(struct work_struct *work)
1225{ 1397{
1226 struct drm_i915_private *dev_priv = 1398 struct drm_i915_private *dev_priv =
@@ -1269,6 +1441,8 @@ static void gen6_pm_rps_work(struct work_struct *work)
1269 else 1441 else
1270 new_delay = dev_priv->rps.min_freq_softlimit; 1442 new_delay = dev_priv->rps.min_freq_softlimit;
1271 adj = 0; 1443 adj = 0;
1444 } else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1445 new_delay = vlv_calc_delay_from_C0_counters(dev_priv);
1272 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1446 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1273 if (adj < 0) 1447 if (adj < 0)
1274 adj *= 2; 1448 adj *= 2;
@@ -1517,23 +1691,104 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1517#define HPD_STORM_DETECT_PERIOD 1000 1691#define HPD_STORM_DETECT_PERIOD 1000
1518#define HPD_STORM_THRESHOLD 5 1692#define HPD_STORM_THRESHOLD 5
1519 1693
1694static int ilk_port_to_hotplug_shift(enum port port)
1695{
1696 switch (port) {
1697 case PORT_A:
1698 case PORT_E:
1699 default:
1700 return -1;
1701 case PORT_B:
1702 return 0;
1703 case PORT_C:
1704 return 8;
1705 case PORT_D:
1706 return 16;
1707 }
1708}
1709
1710static int g4x_port_to_hotplug_shift(enum port port)
1711{
1712 switch (port) {
1713 case PORT_A:
1714 case PORT_E:
1715 default:
1716 return -1;
1717 case PORT_B:
1718 return 17;
1719 case PORT_C:
1720 return 19;
1721 case PORT_D:
1722 return 21;
1723 }
1724}
1725
1726static inline enum port get_port_from_pin(enum hpd_pin pin)
1727{
1728 switch (pin) {
1729 case HPD_PORT_B:
1730 return PORT_B;
1731 case HPD_PORT_C:
1732 return PORT_C;
1733 case HPD_PORT_D:
1734 return PORT_D;
1735 default:
1736 return PORT_A; /* no hpd */
1737 }
1738}
1739
1520static inline void intel_hpd_irq_handler(struct drm_device *dev, 1740static inline void intel_hpd_irq_handler(struct drm_device *dev,
1521 u32 hotplug_trigger, 1741 u32 hotplug_trigger,
1742 u32 dig_hotplug_reg,
1522 const u32 *hpd) 1743 const u32 *hpd)
1523{ 1744{
1524 struct drm_i915_private *dev_priv = dev->dev_private; 1745 struct drm_i915_private *dev_priv = dev->dev_private;
1525 int i; 1746 int i;
1747 enum port port;
1526 bool storm_detected = false; 1748 bool storm_detected = false;
1749 bool queue_dig = false, queue_hp = false;
1750 u32 dig_shift;
1751 u32 dig_port_mask = 0;
1527 1752
1528 if (!hotplug_trigger) 1753 if (!hotplug_trigger)
1529 return; 1754 return;
1530 1755
1531 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 1756 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
1532 hotplug_trigger); 1757 hotplug_trigger, dig_hotplug_reg);
1533 1758
1534 spin_lock(&dev_priv->irq_lock); 1759 spin_lock(&dev_priv->irq_lock);
1535 for (i = 1; i < HPD_NUM_PINS; i++) { 1760 for (i = 1; i < HPD_NUM_PINS; i++) {
1761 if (!(hpd[i] & hotplug_trigger))
1762 continue;
1763
1764 port = get_port_from_pin(i);
1765 if (port && dev_priv->hpd_irq_port[port]) {
1766 bool long_hpd;
1767
1768 if (IS_G4X(dev)) {
1769 dig_shift = g4x_port_to_hotplug_shift(port);
1770 long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1771 } else {
1772 dig_shift = ilk_port_to_hotplug_shift(port);
1773 long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1774 }
1536 1775
1776 DRM_DEBUG_DRIVER("digital hpd port %d %d\n", port, long_hpd);
1777 /* for long HPD pulses we want to have the digital queue happen,
1778 but we still want HPD storm detection to function. */
1779 if (long_hpd) {
1780 dev_priv->long_hpd_port_mask |= (1 << port);
1781 dig_port_mask |= hpd[i];
1782 } else {
1783 /* for short HPD just trigger the digital queue */
1784 dev_priv->short_hpd_port_mask |= (1 << port);
1785 hotplug_trigger &= ~hpd[i];
1786 }
1787 queue_dig = true;
1788 }
1789 }
1790
1791 for (i = 1; i < HPD_NUM_PINS; i++) {
1537 if (hpd[i] & hotplug_trigger && 1792 if (hpd[i] & hotplug_trigger &&
1538 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) { 1793 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
1539 /* 1794 /*
@@ -1553,7 +1808,11 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
1553 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) 1808 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
1554 continue; 1809 continue;
1555 1810
1556 dev_priv->hpd_event_bits |= (1 << i); 1811 if (!(dig_port_mask & hpd[i])) {
1812 dev_priv->hpd_event_bits |= (1 << i);
1813 queue_hp = true;
1814 }
1815
1557 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies, 1816 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
1558 dev_priv->hpd_stats[i].hpd_last_jiffies 1817 dev_priv->hpd_stats[i].hpd_last_jiffies
1559 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { 1818 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
@@ -1582,7 +1841,10 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
1582 * queue for otherwise the flush_work in the pageflip code will 1841 * queue for otherwise the flush_work in the pageflip code will
1583 * deadlock. 1842 * deadlock.
1584 */ 1843 */
1585 schedule_work(&dev_priv->hotplug_work); 1844 if (queue_dig)
1845 schedule_work(&dev_priv->dig_port_work);
1846 if (queue_hp)
1847 schedule_work(&dev_priv->hotplug_work);
1586} 1848}
1587 1849
1588static void gmbus_irq_handler(struct drm_device *dev) 1850static void gmbus_irq_handler(struct drm_device *dev)
@@ -1823,11 +2085,11 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev)
1823 if (IS_G4X(dev)) { 2085 if (IS_G4X(dev)) {
1824 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 2086 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1825 2087
1826 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_g4x); 2088 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x);
1827 } else { 2089 } else {
1828 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 2090 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1829 2091
1830 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); 2092 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915);
1831 } 2093 }
1832 2094
1833 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && 2095 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
@@ -1925,8 +2187,12 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1925 struct drm_i915_private *dev_priv = dev->dev_private; 2187 struct drm_i915_private *dev_priv = dev->dev_private;
1926 int pipe; 2188 int pipe;
1927 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 2189 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
2190 u32 dig_hotplug_reg;
2191
2192 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2193 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1928 2194
1929 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx); 2195 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx);
1930 2196
1931 if (pch_iir & SDE_AUDIO_POWER_MASK) { 2197 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1932 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 2198 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
@@ -2032,8 +2298,12 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
2032 struct drm_i915_private *dev_priv = dev->dev_private; 2298 struct drm_i915_private *dev_priv = dev->dev_private;
2033 int pipe; 2299 int pipe;
2034 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 2300 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2301 u32 dig_hotplug_reg;
2035 2302
2036 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt); 2303 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2304 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2305
2306 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt);
2037 2307
2038 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 2308 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2039 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 2309 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
@@ -2780,12 +3050,7 @@ static bool
2780ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr) 3050ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
2781{ 3051{
2782 if (INTEL_INFO(dev)->gen >= 8) { 3052 if (INTEL_INFO(dev)->gen >= 8) {
2783 /* 3053 return (ipehr >> 23) == 0x1c;
2784 * FIXME: gen8 semaphore support - currently we don't emit
2785 * semaphores on bdw anyway, but this needs to be addressed when
2786 * we merge that code.
2787 */
2788 return false;
2789 } else { 3054 } else {
2790 ipehr &= ~MI_SEMAPHORE_SYNC_MASK; 3055 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2791 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | 3056 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
@@ -2794,19 +3059,20 @@ ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
2794} 3059}
2795 3060
2796static struct intel_engine_cs * 3061static struct intel_engine_cs *
2797semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr) 3062semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
2798{ 3063{
2799 struct drm_i915_private *dev_priv = ring->dev->dev_private; 3064 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2800 struct intel_engine_cs *signaller; 3065 struct intel_engine_cs *signaller;
2801 int i; 3066 int i;
2802 3067
2803 if (INTEL_INFO(dev_priv->dev)->gen >= 8) { 3068 if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
2804 /* 3069 for_each_ring(signaller, dev_priv, i) {
2805 * FIXME: gen8 semaphore support - currently we don't emit 3070 if (ring == signaller)
2806 * semaphores on bdw anyway, but this needs to be addressed when 3071 continue;
2807 * we merge that code. 3072
2808 */ 3073 if (offset == signaller->semaphore.signal_ggtt[ring->id])
2809 return NULL; 3074 return signaller;
3075 }
2810 } else { 3076 } else {
2811 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK; 3077 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2812 3078
@@ -2819,8 +3085,8 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr)
2819 } 3085 }
2820 } 3086 }
2821 3087
2822 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x\n", 3088 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2823 ring->id, ipehr); 3089 ring->id, ipehr, offset);
2824 3090
2825 return NULL; 3091 return NULL;
2826} 3092}
@@ -2830,7 +3096,8 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
2830{ 3096{
2831 struct drm_i915_private *dev_priv = ring->dev->dev_private; 3097 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2832 u32 cmd, ipehr, head; 3098 u32 cmd, ipehr, head;
2833 int i; 3099 u64 offset = 0;
3100 int i, backwards;
2834 3101
2835 ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); 3102 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2836 if (!ipehr_is_semaphore_wait(ring->dev, ipehr)) 3103 if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
@@ -2839,13 +3106,15 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
2839 /* 3106 /*
2840 * HEAD is likely pointing to the dword after the actual command, 3107 * HEAD is likely pointing to the dword after the actual command,
2841 * so scan backwards until we find the MBOX. But limit it to just 3 3108 * so scan backwards until we find the MBOX. But limit it to just 3
2842 * dwords. Note that we don't care about ACTHD here since that might 3109 * or 4 dwords depending on the semaphore wait command size.
3110 * Note that we don't care about ACTHD here since that might
2843 * point at at batch, and semaphores are always emitted into the 3111 * point at at batch, and semaphores are always emitted into the
2844 * ringbuffer itself. 3112 * ringbuffer itself.
2845 */ 3113 */
2846 head = I915_READ_HEAD(ring) & HEAD_ADDR; 3114 head = I915_READ_HEAD(ring) & HEAD_ADDR;
3115 backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
2847 3116
2848 for (i = 4; i; --i) { 3117 for (i = backwards; i; --i) {
2849 /* 3118 /*
2850 * Be paranoid and presume the hw has gone off into the wild - 3119 * Be paranoid and presume the hw has gone off into the wild -
2851 * our ring is smaller than what the hardware (and hence 3120 * our ring is smaller than what the hardware (and hence
@@ -2865,7 +3134,12 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
2865 return NULL; 3134 return NULL;
2866 3135
2867 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1; 3136 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
2868 return semaphore_wait_to_signaller_ring(ring, ipehr); 3137 if (INTEL_INFO(ring->dev)->gen >= 8) {
3138 offset = ioread32(ring->buffer->virtual_start + head + 12);
3139 offset <<= 32;
3140 offset = ioread32(ring->buffer->virtual_start + head + 8);
3141 }
3142 return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
2869} 3143}
2870 3144
2871static int semaphore_passed(struct intel_engine_cs *ring) 3145static int semaphore_passed(struct intel_engine_cs *ring)
@@ -4354,12 +4628,17 @@ void intel_irq_init(struct drm_device *dev)
4354 struct drm_i915_private *dev_priv = dev->dev_private; 4628 struct drm_i915_private *dev_priv = dev->dev_private;
4355 4629
4356 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 4630 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
4631 INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
4357 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func); 4632 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
4358 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 4633 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4359 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4634 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4360 4635
4361 /* Let's track the enabled rps events */ 4636 /* Let's track the enabled rps events */
4362 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; 4637 if (IS_VALLEYVIEW(dev))
4638 /* WaGsvRC0ResidenncyMethod:VLV */
4639 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
4640 else
4641 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4363 4642
4364 setup_timer(&dev_priv->gpu_error.hangcheck_timer, 4643 setup_timer(&dev_priv->gpu_error.hangcheck_timer,
4365 i915_hangcheck_elapsed, 4644 i915_hangcheck_elapsed,
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 348856787b7c..2d2c4deb3e87 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -240,7 +240,7 @@
240#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19) 240#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
241#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19) 241#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19)
242#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19) 242#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
243#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */ 243#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6, gen7 */
244#define MI_SEMAPHORE_GLOBAL_GTT (1<<22) 244#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
245#define MI_SEMAPHORE_UPDATE (1<<21) 245#define MI_SEMAPHORE_UPDATE (1<<21)
246#define MI_SEMAPHORE_COMPARE (1<<20) 246#define MI_SEMAPHORE_COMPARE (1<<20)
@@ -266,6 +266,11 @@
266#define MI_RESTORE_EXT_STATE_EN (1<<2) 266#define MI_RESTORE_EXT_STATE_EN (1<<2)
267#define MI_FORCE_RESTORE (1<<1) 267#define MI_FORCE_RESTORE (1<<1)
268#define MI_RESTORE_INHIBIT (1<<0) 268#define MI_RESTORE_INHIBIT (1<<0)
269#define MI_SEMAPHORE_SIGNAL MI_INSTR(0x1b, 0) /* GEN8+ */
270#define MI_SEMAPHORE_TARGET(engine) ((engine)<<15)
271#define MI_SEMAPHORE_WAIT MI_INSTR(0x1c, 2) /* GEN8+ */
272#define MI_SEMAPHORE_POLL (1<<15)
273#define MI_SEMAPHORE_SAD_GTE_SDD (1<<12)
269#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) 274#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
270#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ 275#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
271#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) 276#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
@@ -360,6 +365,7 @@
360#define PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE (1<<10) /* GM45+ only */ 365#define PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE (1<<10) /* GM45+ only */
361#define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9) 366#define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9)
362#define PIPE_CONTROL_NOTIFY (1<<8) 367#define PIPE_CONTROL_NOTIFY (1<<8)
368#define PIPE_CONTROL_FLUSH_ENABLE (1<<7) /* gen7+ */
363#define PIPE_CONTROL_VF_CACHE_INVALIDATE (1<<4) 369#define PIPE_CONTROL_VF_CACHE_INVALIDATE (1<<4)
364#define PIPE_CONTROL_CONST_CACHE_INVALIDATE (1<<3) 370#define PIPE_CONTROL_CONST_CACHE_INVALIDATE (1<<3)
365#define PIPE_CONTROL_STATE_CACHE_INVALIDATE (1<<2) 371#define PIPE_CONTROL_STATE_CACHE_INVALIDATE (1<<2)
@@ -525,6 +531,7 @@ enum punit_power_well {
525#define PUNIT_REG_GPU_FREQ_STS 0xd8 531#define PUNIT_REG_GPU_FREQ_STS 0xd8
526#define GENFREQSTATUS (1<<0) 532#define GENFREQSTATUS (1<<0)
527#define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc 533#define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc
534#define PUNIT_REG_CZ_TIMESTAMP 0xce
528 535
529#define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */ 536#define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */
530#define PUNIT_FUSE_BUS1 0xf5 /* bits 55:48 */ 537#define PUNIT_FUSE_BUS1 0xf5 /* bits 55:48 */
@@ -550,6 +557,11 @@ enum punit_power_well {
550#define FB_FMAX_VMIN_FREQ_LO_SHIFT 27 557#define FB_FMAX_VMIN_FREQ_LO_SHIFT 27
551#define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000 558#define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000
552 559
560#define VLV_CZ_CLOCK_TO_MILLI_SEC 100000
561#define VLV_RP_UP_EI_THRESHOLD 90
562#define VLV_RP_DOWN_EI_THRESHOLD 70
563#define VLV_INT_COUNT_FOR_DOWN_EI 5
564
553/* vlv2 north clock has */ 565/* vlv2 north clock has */
554#define CCK_FUSE_REG 0x8 566#define CCK_FUSE_REG 0x8
555#define CCK_FUSE_HPLL_FREQ_MASK 0x3 567#define CCK_FUSE_HPLL_FREQ_MASK 0x3
@@ -584,6 +596,11 @@ enum punit_power_well {
584#define DSI_PLL_M1_DIV_SHIFT 0 596#define DSI_PLL_M1_DIV_SHIFT 0
585#define DSI_PLL_M1_DIV_MASK (0x1ff << 0) 597#define DSI_PLL_M1_DIV_MASK (0x1ff << 0)
586#define CCK_DISPLAY_CLOCK_CONTROL 0x6b 598#define CCK_DISPLAY_CLOCK_CONTROL 0x6b
599#define DISPLAY_TRUNK_FORCE_ON (1 << 17)
600#define DISPLAY_TRUNK_FORCE_OFF (1 << 16)
601#define DISPLAY_FREQUENCY_STATUS (0x1f << 8)
602#define DISPLAY_FREQUENCY_STATUS_SHIFT 8
603#define DISPLAY_FREQUENCY_VALUES (0x1f << 0)
587 604
588/** 605/**
589 * DOC: DPIO 606 * DOC: DPIO
@@ -5383,6 +5400,7 @@ enum punit_power_well {
5383#define VLV_GTLC_ALLOWWAKEERR (1 << 1) 5400#define VLV_GTLC_ALLOWWAKEERR (1 << 1)
5384#define VLV_GTLC_PW_MEDIA_STATUS_MASK (1 << 5) 5401#define VLV_GTLC_PW_MEDIA_STATUS_MASK (1 << 5)
5385#define VLV_GTLC_PW_RENDER_STATUS_MASK (1 << 7) 5402#define VLV_GTLC_PW_RENDER_STATUS_MASK (1 << 7)
5403#define VLV_GTLC_SURVIVABILITY_REG 0x130098
5386#define FORCEWAKE_MT 0xa188 /* multi-threaded */ 5404#define FORCEWAKE_MT 0xa188 /* multi-threaded */
5387#define FORCEWAKE_KERNEL 0x1 5405#define FORCEWAKE_KERNEL 0x1
5388#define FORCEWAKE_USER 0x2 5406#define FORCEWAKE_USER 0x2
@@ -5530,6 +5548,8 @@ enum punit_power_well {
5530#define GEN6_GT_GFX_RC6_LOCKED 0x138104 5548#define GEN6_GT_GFX_RC6_LOCKED 0x138104
5531#define VLV_COUNTER_CONTROL 0x138104 5549#define VLV_COUNTER_CONTROL 0x138104
5532#define VLV_COUNT_RANGE_HIGH (1<<15) 5550#define VLV_COUNT_RANGE_HIGH (1<<15)
5551#define VLV_MEDIA_RC0_COUNT_EN (1<<5)
5552#define VLV_RENDER_RC0_COUNT_EN (1<<4)
5533#define VLV_MEDIA_RC6_COUNT_EN (1<<1) 5553#define VLV_MEDIA_RC6_COUNT_EN (1<<1)
5534#define VLV_RENDER_RC6_COUNT_EN (1<<0) 5554#define VLV_RENDER_RC6_COUNT_EN (1<<0)
5535#define GEN6_GT_GFX_RC6 0x138108 5555#define GEN6_GT_GFX_RC6 0x138108
@@ -5538,6 +5558,8 @@ enum punit_power_well {
5538 5558
5539#define GEN6_GT_GFX_RC6p 0x13810C 5559#define GEN6_GT_GFX_RC6p 0x13810C
5540#define GEN6_GT_GFX_RC6pp 0x138110 5560#define GEN6_GT_GFX_RC6pp 0x138110
5561#define VLV_RENDER_C0_COUNT_REG 0x138118
5562#define VLV_MEDIA_C0_COUNT_REG 0x13811C
5541 5563
5542#define GEN6_PCODE_MAILBOX 0x138124 5564#define GEN6_PCODE_MAILBOX 0x138124
5543#define GEN6_PCODE_READY (1<<31) 5565#define GEN6_PCODE_READY (1<<31)
@@ -5772,6 +5794,7 @@ enum punit_power_well {
5772#define TRANS_DDI_FUNC_ENABLE (1<<31) 5794#define TRANS_DDI_FUNC_ENABLE (1<<31)
5773/* Those bits are ignored by pipe EDP since it can only connect to DDI A */ 5795/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
5774#define TRANS_DDI_PORT_MASK (7<<28) 5796#define TRANS_DDI_PORT_MASK (7<<28)
5797#define TRANS_DDI_PORT_SHIFT 28
5775#define TRANS_DDI_SELECT_PORT(x) ((x)<<28) 5798#define TRANS_DDI_SELECT_PORT(x) ((x)<<28)
5776#define TRANS_DDI_PORT_NONE (0<<28) 5799#define TRANS_DDI_PORT_NONE (0<<28)
5777#define TRANS_DDI_MODE_SELECT_MASK (7<<24) 5800#define TRANS_DDI_MODE_SELECT_MASK (7<<24)
@@ -5899,10 +5922,12 @@ enum punit_power_well {
5899/* WRPLL */ 5922/* WRPLL */
5900#define WRPLL_CTL1 0x46040 5923#define WRPLL_CTL1 0x46040
5901#define WRPLL_CTL2 0x46060 5924#define WRPLL_CTL2 0x46060
5925#define WRPLL_CTL(pll) (pll == 0 ? WRPLL_CTL1 : WRPLL_CTL2)
5902#define WRPLL_PLL_ENABLE (1<<31) 5926#define WRPLL_PLL_ENABLE (1<<31)
5903#define WRPLL_PLL_SELECT_SSC (0x01<<28) 5927#define WRPLL_PLL_SSC (1<<28)
5904#define WRPLL_PLL_SELECT_NON_SSC (0x02<<28) 5928#define WRPLL_PLL_NON_SSC (2<<28)
5905#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28) 5929#define WRPLL_PLL_LCPLL (3<<28)
5930#define WRPLL_PLL_REF_MASK (3<<28)
5906/* WRPLL divider programming */ 5931/* WRPLL divider programming */
5907#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0) 5932#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0)
5908#define WRPLL_DIVIDER_REF_MASK (0xff) 5933#define WRPLL_DIVIDER_REF_MASK (0xff)
@@ -5921,6 +5946,7 @@ enum punit_power_well {
5921#define PORT_CLK_SEL_LCPLL_1350 (1<<29) 5946#define PORT_CLK_SEL_LCPLL_1350 (1<<29)
5922#define PORT_CLK_SEL_LCPLL_810 (2<<29) 5947#define PORT_CLK_SEL_LCPLL_810 (2<<29)
5923#define PORT_CLK_SEL_SPLL (3<<29) 5948#define PORT_CLK_SEL_SPLL (3<<29)
5949#define PORT_CLK_SEL_WRPLL(pll) (((pll)+4)<<29)
5924#define PORT_CLK_SEL_WRPLL1 (4<<29) 5950#define PORT_CLK_SEL_WRPLL1 (4<<29)
5925#define PORT_CLK_SEL_WRPLL2 (5<<29) 5951#define PORT_CLK_SEL_WRPLL2 (5<<29)
5926#define PORT_CLK_SEL_NONE (7<<29) 5952#define PORT_CLK_SEL_NONE (7<<29)
@@ -5962,7 +5988,10 @@ enum punit_power_well {
5962#define LCPLL_CD_SOURCE_FCLK (1<<21) 5988#define LCPLL_CD_SOURCE_FCLK (1<<21)
5963#define LCPLL_CD_SOURCE_FCLK_DONE (1<<19) 5989#define LCPLL_CD_SOURCE_FCLK_DONE (1<<19)
5964 5990
5965#define D_COMP (MCHBAR_MIRROR_BASE_SNB + 0x5F0C) 5991/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
5992 * since on HSW we can't write to it using I915_WRITE. */
5993#define D_COMP_HSW (MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
5994#define D_COMP_BDW 0x138144
5966#define D_COMP_RCOMP_IN_PROGRESS (1<<9) 5995#define D_COMP_RCOMP_IN_PROGRESS (1<<9)
5967#define D_COMP_COMP_FORCE (1<<8) 5996#define D_COMP_COMP_FORCE (1<<8)
5968#define D_COMP_COMP_DISABLE (1<<0) 5997#define D_COMP_COMP_DISABLE (1<<0)
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 8da5ef9f4828..88db4b6b6884 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -137,6 +137,18 @@ static void hsw_crt_get_config(struct intel_encoder *encoder,
137 pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder); 137 pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
138} 138}
139 139
140static void hsw_crt_pre_enable(struct intel_encoder *encoder)
141{
142 struct drm_device *dev = encoder->base.dev;
143 struct drm_i915_private *dev_priv = dev->dev_private;
144
145 WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL already enabled\n");
146 I915_WRITE(SPLL_CTL,
147 SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC);
148 POSTING_READ(SPLL_CTL);
149 udelay(20);
150}
151
140/* Note: The caller is required to filter out dpms modes not supported by the 152/* Note: The caller is required to filter out dpms modes not supported by the
141 * platform. */ 153 * platform. */
142static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode) 154static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
@@ -194,6 +206,20 @@ static void intel_disable_crt(struct intel_encoder *encoder)
194 intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF); 206 intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF);
195} 207}
196 208
209
210static void hsw_crt_post_disable(struct intel_encoder *encoder)
211{
212 struct drm_device *dev = encoder->base.dev;
213 struct drm_i915_private *dev_priv = dev->dev_private;
214 uint32_t val;
215
216 DRM_DEBUG_KMS("Disabling SPLL\n");
217 val = I915_READ(SPLL_CTL);
218 WARN_ON(!(val & SPLL_PLL_ENABLE));
219 I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
220 POSTING_READ(SPLL_CTL);
221}
222
197static void intel_enable_crt(struct intel_encoder *encoder) 223static void intel_enable_crt(struct intel_encoder *encoder)
198{ 224{
199 struct intel_crt *crt = intel_encoder_to_crt(encoder); 225 struct intel_crt *crt = intel_encoder_to_crt(encoder);
@@ -289,8 +315,10 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
289 pipe_config->pipe_bpp = 24; 315 pipe_config->pipe_bpp = 24;
290 316
291 /* FDI must always be 2.7 GHz */ 317 /* FDI must always be 2.7 GHz */
292 if (HAS_DDI(dev)) 318 if (HAS_DDI(dev)) {
319 pipe_config->ddi_pll_sel = PORT_CLK_SEL_SPLL;
293 pipe_config->port_clock = 135000 * 2; 320 pipe_config->port_clock = 135000 * 2;
321 }
294 322
295 return true; 323 return true;
296} 324}
@@ -860,6 +888,8 @@ void intel_crt_init(struct drm_device *dev)
860 if (HAS_DDI(dev)) { 888 if (HAS_DDI(dev)) {
861 crt->base.get_config = hsw_crt_get_config; 889 crt->base.get_config = hsw_crt_get_config;
862 crt->base.get_hw_state = intel_ddi_get_hw_state; 890 crt->base.get_hw_state = intel_ddi_get_hw_state;
891 crt->base.pre_enable = hsw_crt_pre_enable;
892 crt->base.post_disable = hsw_crt_post_disable;
863 } else { 893 } else {
864 crt->base.get_config = intel_crt_get_config; 894 crt->base.get_config = intel_crt_get_config;
865 crt->base.get_hw_state = intel_crt_get_hw_state; 895 crt->base.get_hw_state = intel_crt_get_hw_state;
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index ded60139820e..b2267249c1c0 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -277,7 +277,8 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
277 I915_WRITE(_FDI_RXA_CTL, rx_ctl_val); 277 I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
278 278
279 /* Configure Port Clock Select */ 279 /* Configure Port Clock Select */
280 I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->ddi_pll_sel); 280 I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->config.ddi_pll_sel);
281 WARN_ON(intel_crtc->config.ddi_pll_sel != PORT_CLK_SEL_SPLL);
281 282
282 /* Start the training iterating through available voltages and emphasis, 283 /* Start the training iterating through available voltages and emphasis,
283 * testing each value twice. */ 284 * testing each value twice. */
@@ -385,53 +386,6 @@ intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
385 return ret; 386 return ret;
386} 387}
387 388
388void intel_ddi_put_crtc_pll(struct drm_crtc *crtc)
389{
390 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
391 struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
392 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
393 uint32_t val;
394
395 switch (intel_crtc->ddi_pll_sel) {
396 case PORT_CLK_SEL_SPLL:
397 plls->spll_refcount--;
398 if (plls->spll_refcount == 0) {
399 DRM_DEBUG_KMS("Disabling SPLL\n");
400 val = I915_READ(SPLL_CTL);
401 WARN_ON(!(val & SPLL_PLL_ENABLE));
402 I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
403 POSTING_READ(SPLL_CTL);
404 }
405 break;
406 case PORT_CLK_SEL_WRPLL1:
407 plls->wrpll1_refcount--;
408 if (plls->wrpll1_refcount == 0) {
409 DRM_DEBUG_KMS("Disabling WRPLL 1\n");
410 val = I915_READ(WRPLL_CTL1);
411 WARN_ON(!(val & WRPLL_PLL_ENABLE));
412 I915_WRITE(WRPLL_CTL1, val & ~WRPLL_PLL_ENABLE);
413 POSTING_READ(WRPLL_CTL1);
414 }
415 break;
416 case PORT_CLK_SEL_WRPLL2:
417 plls->wrpll2_refcount--;
418 if (plls->wrpll2_refcount == 0) {
419 DRM_DEBUG_KMS("Disabling WRPLL 2\n");
420 val = I915_READ(WRPLL_CTL2);
421 WARN_ON(!(val & WRPLL_PLL_ENABLE));
422 I915_WRITE(WRPLL_CTL2, val & ~WRPLL_PLL_ENABLE);
423 POSTING_READ(WRPLL_CTL2);
424 }
425 break;
426 }
427
428 WARN(plls->spll_refcount < 0, "Invalid SPLL refcount\n");
429 WARN(plls->wrpll1_refcount < 0, "Invalid WRPLL1 refcount\n");
430 WARN(plls->wrpll2_refcount < 0, "Invalid WRPLL2 refcount\n");
431
432 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
433}
434
435#define LC_FREQ 2700 389#define LC_FREQ 2700
436#define LC_FREQ_2K (LC_FREQ * 2000) 390#define LC_FREQ_2K (LC_FREQ * 2000)
437 391
@@ -592,9 +546,9 @@ static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
592 u32 wrpll; 546 u32 wrpll;
593 547
594 wrpll = I915_READ(reg); 548 wrpll = I915_READ(reg);
595 switch (wrpll & SPLL_PLL_REF_MASK) { 549 switch (wrpll & WRPLL_PLL_REF_MASK) {
596 case SPLL_PLL_SSC: 550 case WRPLL_PLL_SSC:
597 case SPLL_PLL_NON_SSC: 551 case WRPLL_PLL_NON_SSC:
598 /* 552 /*
599 * We could calculate spread here, but our checking 553 * We could calculate spread here, but our checking
600 * code only cares about 5% accuracy, and spread is a max of 554 * code only cares about 5% accuracy, and spread is a max of
@@ -602,7 +556,7 @@ static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
602 */ 556 */
603 refclk = 135; 557 refclk = 135;
604 break; 558 break;
605 case SPLL_PLL_LCPLL: 559 case WRPLL_PLL_LCPLL:
606 refclk = LC_FREQ; 560 refclk = LC_FREQ;
607 break; 561 break;
608 default: 562 default:
@@ -622,11 +576,10 @@ static void intel_ddi_clock_get(struct intel_encoder *encoder,
622 struct intel_crtc_config *pipe_config) 576 struct intel_crtc_config *pipe_config)
623{ 577{
624 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 578 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
625 enum port port = intel_ddi_get_encoder_port(encoder);
626 int link_clock = 0; 579 int link_clock = 0;
627 u32 val, pll; 580 u32 val, pll;
628 581
629 val = I915_READ(PORT_CLK_SEL(port)); 582 val = pipe_config->ddi_pll_sel;
630 switch (val & PORT_CLK_SEL_MASK) { 583 switch (val & PORT_CLK_SEL_MASK) {
631 case PORT_CLK_SEL_LCPLL_810: 584 case PORT_CLK_SEL_LCPLL_810:
632 link_clock = 81000; 585 link_clock = 81000;
@@ -750,173 +703,37 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc)
750{ 703{
751 struct drm_crtc *crtc = &intel_crtc->base; 704 struct drm_crtc *crtc = &intel_crtc->base;
752 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); 705 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
753 struct drm_encoder *encoder = &intel_encoder->base;
754 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
755 struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
756 int type = intel_encoder->type; 706 int type = intel_encoder->type;
757 enum pipe pipe = intel_crtc->pipe;
758 int clock = intel_crtc->config.port_clock; 707 int clock = intel_crtc->config.port_clock;
759 708
760 intel_ddi_put_crtc_pll(crtc); 709 intel_put_shared_dpll(intel_crtc);
761
762 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
763 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
764
765 switch (intel_dp->link_bw) {
766 case DP_LINK_BW_1_62:
767 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
768 break;
769 case DP_LINK_BW_2_7:
770 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
771 break;
772 case DP_LINK_BW_5_4:
773 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
774 break;
775 default:
776 DRM_ERROR("Link bandwidth %d unsupported\n",
777 intel_dp->link_bw);
778 return false;
779 }
780 710
781 } else if (type == INTEL_OUTPUT_HDMI) { 711 if (type == INTEL_OUTPUT_HDMI) {
782 uint32_t reg, val; 712 struct intel_shared_dpll *pll;
713 uint32_t val;
783 unsigned p, n2, r2; 714 unsigned p, n2, r2;
784 715
785 intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p); 716 intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
786 717
787 val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 | 718 val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL |
788 WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) | 719 WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
789 WRPLL_DIVIDER_POST(p); 720 WRPLL_DIVIDER_POST(p);
790 721
791 if (val == I915_READ(WRPLL_CTL1)) { 722 intel_crtc->config.dpll_hw_state.wrpll = val;
792 DRM_DEBUG_KMS("Reusing WRPLL 1 on pipe %c\n",
793 pipe_name(pipe));
794 reg = WRPLL_CTL1;
795 } else if (val == I915_READ(WRPLL_CTL2)) {
796 DRM_DEBUG_KMS("Reusing WRPLL 2 on pipe %c\n",
797 pipe_name(pipe));
798 reg = WRPLL_CTL2;
799 } else if (plls->wrpll1_refcount == 0) {
800 DRM_DEBUG_KMS("Using WRPLL 1 on pipe %c\n",
801 pipe_name(pipe));
802 reg = WRPLL_CTL1;
803 } else if (plls->wrpll2_refcount == 0) {
804 DRM_DEBUG_KMS("Using WRPLL 2 on pipe %c\n",
805 pipe_name(pipe));
806 reg = WRPLL_CTL2;
807 } else {
808 DRM_ERROR("No WRPLLs available!\n");
809 return false;
810 }
811
812 DRM_DEBUG_KMS("WRPLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
813 clock, p, n2, r2);
814
815 if (reg == WRPLL_CTL1) {
816 plls->wrpll1_refcount++;
817 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1;
818 } else {
819 plls->wrpll2_refcount++;
820 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2;
821 }
822 723
823 } else if (type == INTEL_OUTPUT_ANALOG) { 724 pll = intel_get_shared_dpll(intel_crtc);
824 if (plls->spll_refcount == 0) { 725 if (pll == NULL) {
825 DRM_DEBUG_KMS("Using SPLL on pipe %c\n", 726 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
826 pipe_name(pipe)); 727 pipe_name(intel_crtc->pipe));
827 plls->spll_refcount++;
828 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_SPLL;
829 } else {
830 DRM_ERROR("SPLL already in use\n");
831 return false; 728 return false;
832 } 729 }
833 730
834 } else { 731 intel_crtc->config.ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id);
835 WARN(1, "Invalid DDI encoder type %d\n", type);
836 return false;
837 } 732 }
838 733
839 return true; 734 return true;
840} 735}
841 736
842/*
843 * To be called after intel_ddi_pll_select(). That one selects the PLL to be
844 * used, this one actually enables the PLL.
845 */
846void intel_ddi_pll_enable(struct intel_crtc *crtc)
847{
848 struct drm_device *dev = crtc->base.dev;
849 struct drm_i915_private *dev_priv = dev->dev_private;
850 struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
851 int clock = crtc->config.port_clock;
852 uint32_t reg, cur_val, new_val;
853 int refcount;
854 const char *pll_name;
855 uint32_t enable_bit = (1 << 31);
856 unsigned int p, n2, r2;
857
858 BUILD_BUG_ON(enable_bit != SPLL_PLL_ENABLE);
859 BUILD_BUG_ON(enable_bit != WRPLL_PLL_ENABLE);
860
861 switch (crtc->ddi_pll_sel) {
862 case PORT_CLK_SEL_LCPLL_2700:
863 case PORT_CLK_SEL_LCPLL_1350:
864 case PORT_CLK_SEL_LCPLL_810:
865 /*
866 * LCPLL should always be enabled at this point of the mode set
867 * sequence, so nothing to do.
868 */
869 return;
870
871 case PORT_CLK_SEL_SPLL:
872 pll_name = "SPLL";
873 reg = SPLL_CTL;
874 refcount = plls->spll_refcount;
875 new_val = SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz |
876 SPLL_PLL_SSC;
877 break;
878
879 case PORT_CLK_SEL_WRPLL1:
880 case PORT_CLK_SEL_WRPLL2:
881 if (crtc->ddi_pll_sel == PORT_CLK_SEL_WRPLL1) {
882 pll_name = "WRPLL1";
883 reg = WRPLL_CTL1;
884 refcount = plls->wrpll1_refcount;
885 } else {
886 pll_name = "WRPLL2";
887 reg = WRPLL_CTL2;
888 refcount = plls->wrpll2_refcount;
889 }
890
891 intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
892
893 new_val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
894 WRPLL_DIVIDER_REFERENCE(r2) |
895 WRPLL_DIVIDER_FEEDBACK(n2) | WRPLL_DIVIDER_POST(p);
896
897 break;
898
899 case PORT_CLK_SEL_NONE:
900 WARN(1, "Bad selected pll: PORT_CLK_SEL_NONE\n");
901 return;
902 default:
903 WARN(1, "Bad selected pll: 0x%08x\n", crtc->ddi_pll_sel);
904 return;
905 }
906
907 cur_val = I915_READ(reg);
908
909 WARN(refcount < 1, "Bad %s refcount: %d\n", pll_name, refcount);
910 if (refcount == 1) {
911 WARN(cur_val & enable_bit, "%s already enabled\n", pll_name);
912 I915_WRITE(reg, new_val);
913 POSTING_READ(reg);
914 udelay(20);
915 } else {
916 WARN((cur_val & enable_bit) == 0, "%s disabled\n", pll_name);
917 }
918}
919
920void intel_ddi_set_pipe_settings(struct drm_crtc *crtc) 737void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
921{ 738{
922 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 739 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
@@ -995,7 +812,9 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
995 * eDP when not using the panel fitter, and when not 812 * eDP when not using the panel fitter, and when not
996 * using motion blur mitigation (which we don't 813 * using motion blur mitigation (which we don't
997 * support). */ 814 * support). */
998 if (IS_HASWELL(dev) && intel_crtc->config.pch_pfit.enabled) 815 if (IS_HASWELL(dev) &&
816 (intel_crtc->config.pch_pfit.enabled ||
817 intel_crtc->config.pch_pfit.force_thru))
999 temp |= TRANS_DDI_EDP_INPUT_A_ONOFF; 818 temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
1000 else 819 else
1001 temp |= TRANS_DDI_EDP_INPUT_A_ON; 820 temp |= TRANS_DDI_EDP_INPUT_A_ON;
@@ -1146,76 +965,6 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
1146 return false; 965 return false;
1147} 966}
1148 967
1149static uint32_t intel_ddi_get_crtc_pll(struct drm_i915_private *dev_priv,
1150 enum pipe pipe)
1151{
1152 uint32_t temp, ret;
1153 enum port port = I915_MAX_PORTS;
1154 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1155 pipe);
1156 int i;
1157
1158 if (cpu_transcoder == TRANSCODER_EDP) {
1159 port = PORT_A;
1160 } else {
1161 temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1162 temp &= TRANS_DDI_PORT_MASK;
1163
1164 for (i = PORT_B; i <= PORT_E; i++)
1165 if (temp == TRANS_DDI_SELECT_PORT(i))
1166 port = i;
1167 }
1168
1169 if (port == I915_MAX_PORTS) {
1170 WARN(1, "Pipe %c enabled on an unknown port\n",
1171 pipe_name(pipe));
1172 ret = PORT_CLK_SEL_NONE;
1173 } else {
1174 ret = I915_READ(PORT_CLK_SEL(port));
1175 DRM_DEBUG_KMS("Pipe %c connected to port %c using clock "
1176 "0x%08x\n", pipe_name(pipe), port_name(port),
1177 ret);
1178 }
1179
1180 return ret;
1181}
1182
1183void intel_ddi_setup_hw_pll_state(struct drm_device *dev)
1184{
1185 struct drm_i915_private *dev_priv = dev->dev_private;
1186 enum pipe pipe;
1187 struct intel_crtc *intel_crtc;
1188
1189 dev_priv->ddi_plls.spll_refcount = 0;
1190 dev_priv->ddi_plls.wrpll1_refcount = 0;
1191 dev_priv->ddi_plls.wrpll2_refcount = 0;
1192
1193 for_each_pipe(pipe) {
1194 intel_crtc =
1195 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
1196
1197 if (!intel_crtc->active) {
1198 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
1199 continue;
1200 }
1201
1202 intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv,
1203 pipe);
1204
1205 switch (intel_crtc->ddi_pll_sel) {
1206 case PORT_CLK_SEL_SPLL:
1207 dev_priv->ddi_plls.spll_refcount++;
1208 break;
1209 case PORT_CLK_SEL_WRPLL1:
1210 dev_priv->ddi_plls.wrpll1_refcount++;
1211 break;
1212 case PORT_CLK_SEL_WRPLL2:
1213 dev_priv->ddi_plls.wrpll2_refcount++;
1214 break;
1215 }
1216 }
1217}
1218
1219void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc) 968void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
1220{ 969{
1221 struct drm_crtc *crtc = &intel_crtc->base; 970 struct drm_crtc *crtc = &intel_crtc->base;
@@ -1261,8 +1010,8 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
1261 intel_edp_panel_on(intel_dp); 1010 intel_edp_panel_on(intel_dp);
1262 } 1011 }
1263 1012
1264 WARN_ON(crtc->ddi_pll_sel == PORT_CLK_SEL_NONE); 1013 WARN_ON(crtc->config.ddi_pll_sel == PORT_CLK_SEL_NONE);
1265 I915_WRITE(PORT_CLK_SEL(port), crtc->ddi_pll_sel); 1014 I915_WRITE(PORT_CLK_SEL(port), crtc->config.ddi_pll_sel);
1266 1015
1267 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { 1016 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
1268 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1017 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -1418,10 +1167,60 @@ int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
1418 } 1167 }
1419} 1168}
1420 1169
1170static void hsw_ddi_pll_enable(struct drm_i915_private *dev_priv,
1171 struct intel_shared_dpll *pll)
1172{
1173 I915_WRITE(WRPLL_CTL(pll->id), pll->hw_state.wrpll);
1174 POSTING_READ(WRPLL_CTL(pll->id));
1175 udelay(20);
1176}
1177
1178static void hsw_ddi_pll_disable(struct drm_i915_private *dev_priv,
1179 struct intel_shared_dpll *pll)
1180{
1181 uint32_t val;
1182
1183 val = I915_READ(WRPLL_CTL(pll->id));
1184 I915_WRITE(WRPLL_CTL(pll->id), val & ~WRPLL_PLL_ENABLE);
1185 POSTING_READ(WRPLL_CTL(pll->id));
1186}
1187
1188static bool hsw_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1189 struct intel_shared_dpll *pll,
1190 struct intel_dpll_hw_state *hw_state)
1191{
1192 uint32_t val;
1193
1194 if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS))
1195 return false;
1196
1197 val = I915_READ(WRPLL_CTL(pll->id));
1198 hw_state->wrpll = val;
1199
1200 return val & WRPLL_PLL_ENABLE;
1201}
1202
1203static char *hsw_ddi_pll_names[] = {
1204 "WRPLL 1",
1205 "WRPLL 2",
1206};
1207
1421void intel_ddi_pll_init(struct drm_device *dev) 1208void intel_ddi_pll_init(struct drm_device *dev)
1422{ 1209{
1423 struct drm_i915_private *dev_priv = dev->dev_private; 1210 struct drm_i915_private *dev_priv = dev->dev_private;
1424 uint32_t val = I915_READ(LCPLL_CTL); 1211 uint32_t val = I915_READ(LCPLL_CTL);
1212 int i;
1213
1214 dev_priv->num_shared_dpll = 2;
1215
1216 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
1217 dev_priv->shared_dplls[i].id = i;
1218 dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i];
1219 dev_priv->shared_dplls[i].disable = hsw_ddi_pll_disable;
1220 dev_priv->shared_dplls[i].enable = hsw_ddi_pll_enable;
1221 dev_priv->shared_dplls[i].get_hw_state =
1222 hsw_ddi_pll_get_hw_state;
1223 }
1425 1224
1426 /* The LCPLL register should be turned on by the BIOS. For now let's 1225 /* The LCPLL register should be turned on by the BIOS. For now let's
1427 * just check its state and print errors in case something is wrong. 1226 * just check its state and print errors in case something is wrong.
@@ -1705,6 +1504,9 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
1705 intel_encoder->cloneable = 0; 1504 intel_encoder->cloneable = 0;
1706 intel_encoder->hot_plug = intel_ddi_hot_plug; 1505 intel_encoder->hot_plug = intel_ddi_hot_plug;
1707 1506
1507 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
1508 dev_priv->hpd_irq_port[port] = intel_dig_port;
1509
1708 if (init_dp) 1510 if (init_dp)
1709 dp_connector = intel_ddi_init_dp_connector(intel_dig_port); 1511 dp_connector = intel_ddi_init_dp_connector(intel_dig_port);
1710 1512
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 8796eb18c9a4..e07e6b5dee35 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1094,11 +1094,6 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
1094 bool cur_state; 1094 bool cur_state;
1095 struct intel_dpll_hw_state hw_state; 1095 struct intel_dpll_hw_state hw_state;
1096 1096
1097 if (HAS_PCH_LPT(dev_priv->dev)) {
1098 DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n");
1099 return;
1100 }
1101
1102 if (WARN (!pll, 1097 if (WARN (!pll,
1103 "asserting DPLL %s with no DPLL\n", state_string(state))) 1098 "asserting DPLL %s with no DPLL\n", state_string(state)))
1104 return; 1099 return;
@@ -1514,9 +1509,6 @@ static void intel_reset_dpio(struct drm_device *dev)
1514{ 1509{
1515 struct drm_i915_private *dev_priv = dev->dev_private; 1510 struct drm_i915_private *dev_priv = dev->dev_private;
1516 1511
1517 if (!IS_VALLEYVIEW(dev))
1518 return;
1519
1520 if (IS_CHERRYVIEW(dev)) { 1512 if (IS_CHERRYVIEW(dev)) {
1521 enum dpio_phy phy; 1513 enum dpio_phy phy;
1522 u32 val; 1514 u32 val;
@@ -1538,26 +1530,6 @@ static void intel_reset_dpio(struct drm_device *dev)
1538 I915_WRITE(DISPLAY_PHY_CONTROL, 1530 I915_WRITE(DISPLAY_PHY_CONTROL,
1539 PHY_COM_LANE_RESET_DEASSERT(phy, val)); 1531 PHY_COM_LANE_RESET_DEASSERT(phy, val));
1540 } 1532 }
1541
1542 } else {
1543 /*
1544 * If DPIO has already been reset, e.g. by BIOS, just skip all
1545 * this.
1546 */
1547 if (I915_READ(DPIO_CTL) & DPIO_CMNRST)
1548 return;
1549
1550 /*
1551 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
1552 * Need to assert and de-assert PHY SB reset by gating the
1553 * common lane power, then un-gating it.
1554 * Simply ungating isn't enough to reset the PHY enough to get
1555 * ports and lanes running.
1556 */
1557 __vlv_set_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC,
1558 false);
1559 __vlv_set_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC,
1560 true);
1561 } 1533 }
1562} 1534}
1563 1535
@@ -1837,12 +1809,14 @@ static void intel_enable_shared_dpll(struct intel_crtc *crtc)
1837 } 1809 }
1838 WARN_ON(pll->on); 1810 WARN_ON(pll->on);
1839 1811
1812 intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
1813
1840 DRM_DEBUG_KMS("enabling %s\n", pll->name); 1814 DRM_DEBUG_KMS("enabling %s\n", pll->name);
1841 pll->enable(dev_priv, pll); 1815 pll->enable(dev_priv, pll);
1842 pll->on = true; 1816 pll->on = true;
1843} 1817}
1844 1818
1845static void intel_disable_shared_dpll(struct intel_crtc *crtc) 1819void intel_disable_shared_dpll(struct intel_crtc *crtc)
1846{ 1820{
1847 struct drm_device *dev = crtc->base.dev; 1821 struct drm_device *dev = crtc->base.dev;
1848 struct drm_i915_private *dev_priv = dev->dev_private; 1822 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1873,6 +1847,8 @@ static void intel_disable_shared_dpll(struct intel_crtc *crtc)
1873 DRM_DEBUG_KMS("disabling %s\n", pll->name); 1847 DRM_DEBUG_KMS("disabling %s\n", pll->name);
1874 pll->disable(dev_priv, pll); 1848 pll->disable(dev_priv, pll);
1875 pll->on = false; 1849 pll->on = false;
1850
1851 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
1876} 1852}
1877 1853
1878static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, 1854static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
@@ -2219,6 +2195,8 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
2219 u32 alignment; 2195 u32 alignment;
2220 int ret; 2196 int ret;
2221 2197
2198 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2199
2222 switch (obj->tiling_mode) { 2200 switch (obj->tiling_mode) {
2223 case I915_TILING_NONE: 2201 case I915_TILING_NONE:
2224 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) 2202 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
@@ -2275,6 +2253,8 @@ err_interruptible:
2275 2253
2276void intel_unpin_fb_obj(struct drm_i915_gem_object *obj) 2254void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
2277{ 2255{
2256 WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
2257
2278 i915_gem_object_unpin_fence(obj); 2258 i915_gem_object_unpin_fence(obj);
2279 i915_gem_object_unpin_from_display_plane(obj); 2259 i915_gem_object_unpin_from_display_plane(obj);
2280} 2260}
@@ -2379,7 +2359,7 @@ static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
2379 struct drm_device *dev = intel_crtc->base.dev; 2359 struct drm_device *dev = intel_crtc->base.dev;
2380 struct drm_crtc *c; 2360 struct drm_crtc *c;
2381 struct intel_crtc *i; 2361 struct intel_crtc *i;
2382 struct intel_framebuffer *fb; 2362 struct drm_i915_gem_object *obj;
2383 2363
2384 if (!intel_crtc->base.primary->fb) 2364 if (!intel_crtc->base.primary->fb)
2385 return; 2365 return;
@@ -2400,14 +2380,17 @@ static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
2400 if (c == &intel_crtc->base) 2380 if (c == &intel_crtc->base)
2401 continue; 2381 continue;
2402 2382
2403 if (!i->active || !c->primary->fb) 2383 if (!i->active)
2404 continue; 2384 continue;
2405 2385
2406 fb = to_intel_framebuffer(c->primary->fb); 2386 obj = intel_fb_obj(c->primary->fb);
2407 if (i915_gem_obj_ggtt_offset(fb->obj) == plane_config->base) { 2387 if (obj == NULL)
2388 continue;
2389
2390 if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
2408 drm_framebuffer_reference(c->primary->fb); 2391 drm_framebuffer_reference(c->primary->fb);
2409 intel_crtc->base.primary->fb = c->primary->fb; 2392 intel_crtc->base.primary->fb = c->primary->fb;
2410 fb->obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe); 2393 obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
2411 break; 2394 break;
2412 } 2395 }
2413 } 2396 }
@@ -2420,16 +2403,12 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
2420 struct drm_device *dev = crtc->dev; 2403 struct drm_device *dev = crtc->dev;
2421 struct drm_i915_private *dev_priv = dev->dev_private; 2404 struct drm_i915_private *dev_priv = dev->dev_private;
2422 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2405 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2423 struct intel_framebuffer *intel_fb; 2406 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2424 struct drm_i915_gem_object *obj;
2425 int plane = intel_crtc->plane; 2407 int plane = intel_crtc->plane;
2426 unsigned long linear_offset; 2408 unsigned long linear_offset;
2427 u32 dspcntr; 2409 u32 dspcntr;
2428 u32 reg; 2410 u32 reg;
2429 2411
2430 intel_fb = to_intel_framebuffer(fb);
2431 obj = intel_fb->obj;
2432
2433 reg = DSPCNTR(plane); 2412 reg = DSPCNTR(plane);
2434 dspcntr = I915_READ(reg); 2413 dspcntr = I915_READ(reg);
2435 /* Mask out pixel format bits in case we change it */ 2414 /* Mask out pixel format bits in case we change it */
@@ -2510,16 +2489,12 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
2510 struct drm_device *dev = crtc->dev; 2489 struct drm_device *dev = crtc->dev;
2511 struct drm_i915_private *dev_priv = dev->dev_private; 2490 struct drm_i915_private *dev_priv = dev->dev_private;
2512 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2491 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2513 struct intel_framebuffer *intel_fb; 2492 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2514 struct drm_i915_gem_object *obj;
2515 int plane = intel_crtc->plane; 2493 int plane = intel_crtc->plane;
2516 unsigned long linear_offset; 2494 unsigned long linear_offset;
2517 u32 dspcntr; 2495 u32 dspcntr;
2518 u32 reg; 2496 u32 reg;
2519 2497
2520 intel_fb = to_intel_framebuffer(fb);
2521 obj = intel_fb->obj;
2522
2523 reg = DSPCNTR(plane); 2498 reg = DSPCNTR(plane);
2524 dspcntr = I915_READ(reg); 2499 dspcntr = I915_READ(reg);
2525 /* Mask out pixel format bits in case we change it */ 2500 /* Mask out pixel format bits in case we change it */
@@ -2650,7 +2625,7 @@ void intel_display_handle_reset(struct drm_device *dev)
2650static int 2625static int
2651intel_finish_fb(struct drm_framebuffer *old_fb) 2626intel_finish_fb(struct drm_framebuffer *old_fb)
2652{ 2627{
2653 struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj; 2628 struct drm_i915_gem_object *obj = intel_fb_obj(old_fb);
2654 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 2629 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2655 bool was_interruptible = dev_priv->mm.interruptible; 2630 bool was_interruptible = dev_priv->mm.interruptible;
2656 int ret; 2631 int ret;
@@ -2697,8 +2672,9 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2697 struct drm_i915_private *dev_priv = dev->dev_private; 2672 struct drm_i915_private *dev_priv = dev->dev_private;
2698 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2673 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2699 enum pipe pipe = intel_crtc->pipe; 2674 enum pipe pipe = intel_crtc->pipe;
2700 struct drm_framebuffer *old_fb; 2675 struct drm_framebuffer *old_fb = crtc->primary->fb;
2701 struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj; 2676 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2677 struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
2702 int ret; 2678 int ret;
2703 2679
2704 if (intel_crtc_has_pending_flip(crtc)) { 2680 if (intel_crtc_has_pending_flip(crtc)) {
@@ -2719,12 +2695,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2719 return -EINVAL; 2695 return -EINVAL;
2720 } 2696 }
2721 2697
2722 old_fb = crtc->primary->fb;
2723
2724 mutex_lock(&dev->struct_mutex); 2698 mutex_lock(&dev->struct_mutex);
2725 ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); 2699 ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
2726 if (ret == 0) 2700 if (ret == 0)
2727 i915_gem_track_fb(to_intel_framebuffer(old_fb)->obj, obj, 2701 i915_gem_track_fb(old_obj, obj,
2728 INTEL_FRONTBUFFER_PRIMARY(pipe)); 2702 INTEL_FRONTBUFFER_PRIMARY(pipe));
2729 mutex_unlock(&dev->struct_mutex); 2703 mutex_unlock(&dev->struct_mutex);
2730 if (ret != 0) { 2704 if (ret != 0) {
@@ -2776,7 +2750,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2776 if (intel_crtc->active && old_fb != fb) 2750 if (intel_crtc->active && old_fb != fb)
2777 intel_wait_for_vblank(dev, intel_crtc->pipe); 2751 intel_wait_for_vblank(dev, intel_crtc->pipe);
2778 mutex_lock(&dev->struct_mutex); 2752 mutex_lock(&dev->struct_mutex);
2779 intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj); 2753 intel_unpin_fb_obj(old_obj);
2780 mutex_unlock(&dev->struct_mutex); 2754 mutex_unlock(&dev->struct_mutex);
2781 } 2755 }
2782 2756
@@ -3642,7 +3616,7 @@ static void lpt_pch_enable(struct drm_crtc *crtc)
3642 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); 3616 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
3643} 3617}
3644 3618
3645static void intel_put_shared_dpll(struct intel_crtc *crtc) 3619void intel_put_shared_dpll(struct intel_crtc *crtc)
3646{ 3620{
3647 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); 3621 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
3648 3622
@@ -3662,7 +3636,7 @@ static void intel_put_shared_dpll(struct intel_crtc *crtc)
3662 crtc->config.shared_dpll = DPLL_ID_PRIVATE; 3636 crtc->config.shared_dpll = DPLL_ID_PRIVATE;
3663} 3637}
3664 3638
3665static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc) 3639struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
3666{ 3640{
3667 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 3641 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
3668 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); 3642 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
@@ -3915,30 +3889,6 @@ static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3915 */ 3889 */
3916} 3890}
3917 3891
3918/**
3919 * i9xx_fixup_plane - ugly workaround for G45 to fire up the hardware
3920 * cursor plane briefly if not already running after enabling the display
3921 * plane.
3922 * This workaround avoids occasional blank screens when self refresh is
3923 * enabled.
3924 */
3925static void
3926g4x_fixup_plane(struct drm_i915_private *dev_priv, enum pipe pipe)
3927{
3928 u32 cntl = I915_READ(CURCNTR(pipe));
3929
3930 if ((cntl & CURSOR_MODE) == 0) {
3931 u32 fw_bcl_self = I915_READ(FW_BLC_SELF);
3932
3933 I915_WRITE(FW_BLC_SELF, fw_bcl_self & ~FW_BLC_SELF_EN);
3934 I915_WRITE(CURCNTR(pipe), CURSOR_MODE_64_ARGB_AX);
3935 intel_wait_for_vblank(dev_priv->dev, pipe);
3936 I915_WRITE(CURCNTR(pipe), cntl);
3937 I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
3938 I915_WRITE(FW_BLC_SELF, fw_bcl_self);
3939 }
3940}
3941
3942static void intel_crtc_enable_planes(struct drm_crtc *crtc) 3892static void intel_crtc_enable_planes(struct drm_crtc *crtc)
3943{ 3893{
3944 struct drm_device *dev = crtc->dev; 3894 struct drm_device *dev = crtc->dev;
@@ -3951,9 +3901,6 @@ static void intel_crtc_enable_planes(struct drm_crtc *crtc)
3951 3901
3952 intel_enable_primary_hw_plane(dev_priv, plane, pipe); 3902 intel_enable_primary_hw_plane(dev_priv, plane, pipe);
3953 intel_enable_planes(crtc); 3903 intel_enable_planes(crtc);
3954 /* The fixup needs to happen before cursor is enabled */
3955 if (IS_G4X(dev))
3956 g4x_fixup_plane(dev_priv, pipe);
3957 intel_crtc_update_cursor(crtc, true); 3904 intel_crtc_update_cursor(crtc, true);
3958 intel_crtc_dpms_overlay(intel_crtc, true); 3905 intel_crtc_dpms_overlay(intel_crtc, true);
3959 3906
@@ -4128,6 +4075,9 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
4128 if (intel_crtc->active) 4075 if (intel_crtc->active)
4129 return; 4076 return;
4130 4077
4078 if (intel_crtc_to_shared_dpll(intel_crtc))
4079 intel_enable_shared_dpll(intel_crtc);
4080
4131 if (intel_crtc->config.has_dp_encoder) 4081 if (intel_crtc->config.has_dp_encoder)
4132 intel_dp_set_m_n(intel_crtc); 4082 intel_dp_set_m_n(intel_crtc);
4133 4083
@@ -4152,16 +4102,15 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
4152 intel_crtc->active = true; 4102 intel_crtc->active = true;
4153 4103
4154 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); 4104 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
4155 if (intel_crtc->config.has_pch_encoder)
4156 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
4157
4158 if (intel_crtc->config.has_pch_encoder)
4159 dev_priv->display.fdi_link_train(crtc);
4160
4161 for_each_encoder_on_crtc(dev, crtc, encoder) 4105 for_each_encoder_on_crtc(dev, crtc, encoder)
4162 if (encoder->pre_enable) 4106 if (encoder->pre_enable)
4163 encoder->pre_enable(encoder); 4107 encoder->pre_enable(encoder);
4164 4108
4109 if (intel_crtc->config.has_pch_encoder) {
4110 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
4111 dev_priv->display.fdi_link_train(crtc);
4112 }
4113
4165 intel_ddi_enable_pipe_clock(intel_crtc); 4114 intel_ddi_enable_pipe_clock(intel_crtc);
4166 4115
4167 ironlake_pfit_enable(intel_crtc); 4116 ironlake_pfit_enable(intel_crtc);
@@ -4299,22 +4248,25 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
4299 4248
4300 intel_ddi_disable_pipe_clock(intel_crtc); 4249 intel_ddi_disable_pipe_clock(intel_crtc);
4301 4250
4302 for_each_encoder_on_crtc(dev, crtc, encoder)
4303 if (encoder->post_disable)
4304 encoder->post_disable(encoder);
4305
4306 if (intel_crtc->config.has_pch_encoder) { 4251 if (intel_crtc->config.has_pch_encoder) {
4307 lpt_disable_pch_transcoder(dev_priv); 4252 lpt_disable_pch_transcoder(dev_priv);
4308 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true); 4253 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
4309 intel_ddi_fdi_disable(crtc); 4254 intel_ddi_fdi_disable(crtc);
4310 } 4255 }
4311 4256
4257 for_each_encoder_on_crtc(dev, crtc, encoder)
4258 if (encoder->post_disable)
4259 encoder->post_disable(encoder);
4260
4312 intel_crtc->active = false; 4261 intel_crtc->active = false;
4313 intel_update_watermarks(crtc); 4262 intel_update_watermarks(crtc);
4314 4263
4315 mutex_lock(&dev->struct_mutex); 4264 mutex_lock(&dev->struct_mutex);
4316 intel_update_fbc(dev); 4265 intel_update_fbc(dev);
4317 mutex_unlock(&dev->struct_mutex); 4266 mutex_unlock(&dev->struct_mutex);
4267
4268 if (intel_crtc_to_shared_dpll(intel_crtc))
4269 intel_disable_shared_dpll(intel_crtc);
4318} 4270}
4319 4271
4320static void ironlake_crtc_off(struct drm_crtc *crtc) 4272static void ironlake_crtc_off(struct drm_crtc *crtc)
@@ -4323,10 +4275,6 @@ static void ironlake_crtc_off(struct drm_crtc *crtc)
4323 intel_put_shared_dpll(intel_crtc); 4275 intel_put_shared_dpll(intel_crtc);
4324} 4276}
4325 4277
4326static void haswell_crtc_off(struct drm_crtc *crtc)
4327{
4328 intel_ddi_put_crtc_pll(crtc);
4329}
4330 4278
4331static void i9xx_pfit_enable(struct intel_crtc *crtc) 4279static void i9xx_pfit_enable(struct intel_crtc *crtc)
4332{ 4280{
@@ -4398,7 +4346,6 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
4398 struct intel_encoder *intel_encoder; 4346 struct intel_encoder *intel_encoder;
4399 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4347 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4400 enum pipe pipe = intel_crtc->pipe; 4348 enum pipe pipe = intel_crtc->pipe;
4401 bool pfit_enabled = intel_crtc->config.pch_pfit.enabled;
4402 unsigned long mask; 4349 unsigned long mask;
4403 enum transcoder transcoder; 4350 enum transcoder transcoder;
4404 4351
@@ -4406,7 +4353,8 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
4406 4353
4407 mask = BIT(POWER_DOMAIN_PIPE(pipe)); 4354 mask = BIT(POWER_DOMAIN_PIPE(pipe));
4408 mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder)); 4355 mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
4409 if (pfit_enabled) 4356 if (intel_crtc->config.pch_pfit.enabled ||
4357 intel_crtc->config.pch_pfit.force_thru)
4410 mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe)); 4358 mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
4411 4359
4412 for_each_encoder_on_crtc(dev, crtc, intel_encoder) 4360 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
@@ -4463,7 +4411,8 @@ static void modeset_update_crtc_power_domains(struct drm_device *dev)
4463 intel_display_set_init_power(dev_priv, false); 4411 intel_display_set_init_power(dev_priv, false);
4464} 4412}
4465 4413
4466int valleyview_get_vco(struct drm_i915_private *dev_priv) 4414/* returns HPLL frequency in kHz */
4415static int valleyview_get_vco(struct drm_i915_private *dev_priv)
4467{ 4416{
4468 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; 4417 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
4469 4418
@@ -4473,7 +4422,23 @@ int valleyview_get_vco(struct drm_i915_private *dev_priv)
4473 CCK_FUSE_HPLL_FREQ_MASK; 4422 CCK_FUSE_HPLL_FREQ_MASK;
4474 mutex_unlock(&dev_priv->dpio_lock); 4423 mutex_unlock(&dev_priv->dpio_lock);
4475 4424
4476 return vco_freq[hpll_freq]; 4425 return vco_freq[hpll_freq] * 1000;
4426}
4427
4428static void vlv_update_cdclk(struct drm_device *dev)
4429{
4430 struct drm_i915_private *dev_priv = dev->dev_private;
4431
4432 dev_priv->vlv_cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
4433 DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz",
4434 dev_priv->vlv_cdclk_freq);
4435
4436 /*
4437 * Program the gmbus_freq based on the cdclk frequency.
4438 * BSpec erroneously claims we should aim for 4MHz, but
4439 * in fact 1MHz is the correct frequency.
4440 */
4441 I915_WRITE(GMBUSFREQ_VLV, dev_priv->vlv_cdclk_freq);
4477} 4442}
4478 4443
4479/* Adjust CDclk dividers to allow high res or save power if possible */ 4444/* Adjust CDclk dividers to allow high res or save power if possible */
@@ -4482,12 +4447,11 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
4482 struct drm_i915_private *dev_priv = dev->dev_private; 4447 struct drm_i915_private *dev_priv = dev->dev_private;
4483 u32 val, cmd; 4448 u32 val, cmd;
4484 4449
4485 WARN_ON(valleyview_cur_cdclk(dev_priv) != dev_priv->vlv_cdclk_freq); 4450 WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq);
4486 dev_priv->vlv_cdclk_freq = cdclk;
4487 4451
4488 if (cdclk >= 320) /* jump to highest voltage for 400MHz too */ 4452 if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
4489 cmd = 2; 4453 cmd = 2;
4490 else if (cdclk == 266) 4454 else if (cdclk == 266667)
4491 cmd = 1; 4455 cmd = 1;
4492 else 4456 else
4493 cmd = 0; 4457 cmd = 0;
@@ -4504,18 +4468,23 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
4504 } 4468 }
4505 mutex_unlock(&dev_priv->rps.hw_lock); 4469 mutex_unlock(&dev_priv->rps.hw_lock);
4506 4470
4507 if (cdclk == 400) { 4471 if (cdclk == 400000) {
4508 u32 divider, vco; 4472 u32 divider, vco;
4509 4473
4510 vco = valleyview_get_vco(dev_priv); 4474 vco = valleyview_get_vco(dev_priv);
4511 divider = ((vco << 1) / cdclk) - 1; 4475 divider = DIV_ROUND_CLOSEST(vco << 1, cdclk) - 1;
4512 4476
4513 mutex_lock(&dev_priv->dpio_lock); 4477 mutex_lock(&dev_priv->dpio_lock);
4514 /* adjust cdclk divider */ 4478 /* adjust cdclk divider */
4515 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL); 4479 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
4516 val &= ~0xf; 4480 val &= ~DISPLAY_FREQUENCY_VALUES;
4517 val |= divider; 4481 val |= divider;
4518 vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val); 4482 vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
4483
4484 if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
4485 DISPLAY_FREQUENCY_STATUS) == (divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
4486 50))
4487 DRM_ERROR("timed out waiting for CDclk change\n");
4519 mutex_unlock(&dev_priv->dpio_lock); 4488 mutex_unlock(&dev_priv->dpio_lock);
4520 } 4489 }
4521 4490
@@ -4528,54 +4497,43 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
4528 * For high bandwidth configs, we set a higher latency in the bunit 4497 * For high bandwidth configs, we set a higher latency in the bunit
4529 * so that the core display fetch happens in time to avoid underruns. 4498 * so that the core display fetch happens in time to avoid underruns.
4530 */ 4499 */
4531 if (cdclk == 400) 4500 if (cdclk == 400000)
4532 val |= 4500 / 250; /* 4.5 usec */ 4501 val |= 4500 / 250; /* 4.5 usec */
4533 else 4502 else
4534 val |= 3000 / 250; /* 3.0 usec */ 4503 val |= 3000 / 250; /* 3.0 usec */
4535 vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val); 4504 vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
4536 mutex_unlock(&dev_priv->dpio_lock); 4505 mutex_unlock(&dev_priv->dpio_lock);
4537 4506
4538 /* Since we changed the CDclk, we need to update the GMBUSFREQ too */ 4507 vlv_update_cdclk(dev);
4539 intel_i2c_reset(dev);
4540}
4541
4542int valleyview_cur_cdclk(struct drm_i915_private *dev_priv)
4543{
4544 int cur_cdclk, vco;
4545 int divider;
4546
4547 vco = valleyview_get_vco(dev_priv);
4548
4549 mutex_lock(&dev_priv->dpio_lock);
4550 divider = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
4551 mutex_unlock(&dev_priv->dpio_lock);
4552
4553 divider &= 0xf;
4554
4555 cur_cdclk = (vco << 1) / (divider + 1);
4556
4557 return cur_cdclk;
4558} 4508}
4559 4509
4560static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv, 4510static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
4561 int max_pixclk) 4511 int max_pixclk)
4562{ 4512{
4513 int vco = valleyview_get_vco(dev_priv);
4514 int freq_320 = (vco << 1) % 320000 != 0 ? 333333 : 320000;
4515
4563 /* 4516 /*
4564 * Really only a few cases to deal with, as only 4 CDclks are supported: 4517 * Really only a few cases to deal with, as only 4 CDclks are supported:
4565 * 200MHz 4518 * 200MHz
4566 * 267MHz 4519 * 267MHz
4567 * 320MHz 4520 * 320/333MHz (depends on HPLL freq)
4568 * 400MHz 4521 * 400MHz
4569 * So we check to see whether we're above 90% of the lower bin and 4522 * So we check to see whether we're above 90% of the lower bin and
4570 * adjust if needed. 4523 * adjust if needed.
4524 *
4525 * We seem to get an unstable or solid color picture at 200MHz.
4526 * Not sure what's wrong. For now use 200MHz only when all pipes
4527 * are off.
4571 */ 4528 */
4572 if (max_pixclk > 288000) { 4529 if (max_pixclk > freq_320*9/10)
4573 return 400; 4530 return 400000;
4574 } else if (max_pixclk > 240000) { 4531 else if (max_pixclk > 266667*9/10)
4575 return 320; 4532 return freq_320;
4576 } else 4533 else if (max_pixclk > 0)
4577 return 266; 4534 return 266667;
4578 /* Looks like the 200MHz CDclk freq doesn't work on some configs */ 4535 else
4536 return 200000;
4579} 4537}
4580 4538
4581/* compute the max pixel clock for new configuration */ 4539/* compute the max pixel clock for new configuration */
@@ -4829,6 +4787,16 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
4829 if (IS_GEN2(dev)) 4787 if (IS_GEN2(dev))
4830 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false); 4788 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
4831 4789
4790 /*
4791 * Vblank time updates from the shadow to live plane control register
4792 * are blocked if the memory self-refresh mode is active at that
4793 * moment. So to make sure the plane gets truly disabled, disable
4794 * first the self-refresh mode. The self-refresh enable bit in turn
4795 * will be checked/applied by the HW only at the next frame start
4796 * event which is after the vblank start event, so we need to have a
4797 * wait-for-vblank between disabling the plane and the pipe.
4798 */
4799 intel_set_memory_cxsr(dev_priv, false);
4832 intel_crtc_disable_planes(crtc); 4800 intel_crtc_disable_planes(crtc);
4833 4801
4834 for_each_encoder_on_crtc(dev, crtc, encoder) 4802 for_each_encoder_on_crtc(dev, crtc, encoder)
@@ -4837,9 +4805,10 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
4837 /* 4805 /*
4838 * On gen2 planes are double buffered but the pipe isn't, so we must 4806 * On gen2 planes are double buffered but the pipe isn't, so we must
4839 * wait for planes to fully turn off before disabling the pipe. 4807 * wait for planes to fully turn off before disabling the pipe.
4808 * We also need to wait on all gmch platforms because of the
4809 * self-refresh mode constraint explained above.
4840 */ 4810 */
4841 if (IS_GEN2(dev)) 4811 intel_wait_for_vblank(dev, pipe);
4842 intel_wait_for_vblank(dev, pipe);
4843 4812
4844 intel_disable_pipe(dev_priv, pipe); 4813 intel_disable_pipe(dev_priv, pipe);
4845 4814
@@ -4956,7 +4925,7 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
4956 struct drm_device *dev = crtc->dev; 4925 struct drm_device *dev = crtc->dev;
4957 struct drm_connector *connector; 4926 struct drm_connector *connector;
4958 struct drm_i915_private *dev_priv = dev->dev_private; 4927 struct drm_i915_private *dev_priv = dev->dev_private;
4959 struct drm_i915_gem_object *old_obj; 4928 struct drm_i915_gem_object *old_obj = intel_fb_obj(crtc->primary->fb);
4960 enum pipe pipe = to_intel_crtc(crtc)->pipe; 4929 enum pipe pipe = to_intel_crtc(crtc)->pipe;
4961 4930
4962 /* crtc should still be enabled when we disable it. */ 4931 /* crtc should still be enabled when we disable it. */
@@ -4971,7 +4940,6 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
4971 assert_pipe_disabled(dev->dev_private, pipe); 4940 assert_pipe_disabled(dev->dev_private, pipe);
4972 4941
4973 if (crtc->primary->fb) { 4942 if (crtc->primary->fb) {
4974 old_obj = to_intel_framebuffer(crtc->primary->fb)->obj;
4975 mutex_lock(&dev->struct_mutex); 4943 mutex_lock(&dev->struct_mutex);
4976 intel_unpin_fb_obj(old_obj); 4944 intel_unpin_fb_obj(old_obj);
4977 i915_gem_track_fb(old_obj, NULL, 4945 i915_gem_track_fb(old_obj, NULL,
@@ -5253,9 +5221,11 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
5253 if (HAS_IPS(dev)) 5221 if (HAS_IPS(dev))
5254 hsw_compute_ips_config(crtc, pipe_config); 5222 hsw_compute_ips_config(crtc, pipe_config);
5255 5223
5256 /* XXX: PCH clock sharing is done in ->mode_set, so make sure the old 5224 /*
5257 * clock survives for now. */ 5225 * XXX: PCH/WRPLL clock sharing is done in ->mode_set, so make sure the
5258 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 5226 * old clock survives for now.
5227 */
5228 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev) || HAS_DDI(dev))
5259 pipe_config->shared_dpll = crtc->config.shared_dpll; 5229 pipe_config->shared_dpll = crtc->config.shared_dpll;
5260 5230
5261 if (pipe_config->has_pch_encoder) 5231 if (pipe_config->has_pch_encoder)
@@ -5266,7 +5236,22 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
5266 5236
5267static int valleyview_get_display_clock_speed(struct drm_device *dev) 5237static int valleyview_get_display_clock_speed(struct drm_device *dev)
5268{ 5238{
5269 return 400000; /* FIXME */ 5239 struct drm_i915_private *dev_priv = dev->dev_private;
5240 int vco = valleyview_get_vco(dev_priv);
5241 u32 val;
5242 int divider;
5243
5244 mutex_lock(&dev_priv->dpio_lock);
5245 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
5246 mutex_unlock(&dev_priv->dpio_lock);
5247
5248 divider = val & DISPLAY_FREQUENCY_VALUES;
5249
5250 WARN((val & DISPLAY_FREQUENCY_STATUS) !=
5251 (divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
5252 "cdclk change in progress\n");
5253
5254 return DIV_ROUND_CLOSEST(vco << 1, divider + 1);
5270} 5255}
5271 5256
5272static int i945_get_display_clock_speed(struct drm_device *dev) 5257static int i945_get_display_clock_speed(struct drm_device *dev)
@@ -6217,8 +6202,8 @@ static void i9xx_get_plane_config(struct intel_crtc *crtc,
6217 aligned_height = intel_align_height(dev, crtc->base.primary->fb->height, 6202 aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
6218 plane_config->tiled); 6203 plane_config->tiled);
6219 6204
6220 plane_config->size = ALIGN(crtc->base.primary->fb->pitches[0] * 6205 plane_config->size = PAGE_ALIGN(crtc->base.primary->fb->pitches[0] *
6221 aligned_height, PAGE_SIZE); 6206 aligned_height);
6222 6207
6223 DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 6208 DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
6224 pipe, plane, crtc->base.primary->fb->width, 6209 pipe, plane, crtc->base.primary->fb->width,
@@ -7237,8 +7222,8 @@ static void ironlake_get_plane_config(struct intel_crtc *crtc,
7237 aligned_height = intel_align_height(dev, crtc->base.primary->fb->height, 7222 aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
7238 plane_config->tiled); 7223 plane_config->tiled);
7239 7224
7240 plane_config->size = ALIGN(crtc->base.primary->fb->pitches[0] * 7225 plane_config->size = PAGE_ALIGN(crtc->base.primary->fb->pitches[0] *
7241 aligned_height, PAGE_SIZE); 7226 aligned_height);
7242 7227
7243 DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 7228 DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
7244 pipe, plane, crtc->base.primary->fb->width, 7229 pipe, plane, crtc->base.primary->fb->width,
@@ -7255,6 +7240,10 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
7255 struct drm_i915_private *dev_priv = dev->dev_private; 7240 struct drm_i915_private *dev_priv = dev->dev_private;
7256 uint32_t tmp; 7241 uint32_t tmp;
7257 7242
7243 if (!intel_display_power_enabled(dev_priv,
7244 POWER_DOMAIN_PIPE(crtc->pipe)))
7245 return false;
7246
7258 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 7247 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
7259 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 7248 pipe_config->shared_dpll = DPLL_ID_PRIVATE;
7260 7249
@@ -7329,7 +7318,6 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
7329static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) 7318static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
7330{ 7319{
7331 struct drm_device *dev = dev_priv->dev; 7320 struct drm_device *dev = dev_priv->dev;
7332 struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
7333 struct intel_crtc *crtc; 7321 struct intel_crtc *crtc;
7334 7322
7335 for_each_intel_crtc(dev, crtc) 7323 for_each_intel_crtc(dev, crtc)
@@ -7337,9 +7325,9 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
7337 pipe_name(crtc->pipe)); 7325 pipe_name(crtc->pipe));
7338 7326
7339 WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n"); 7327 WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
7340 WARN(plls->spll_refcount, "SPLL enabled\n"); 7328 WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
7341 WARN(plls->wrpll1_refcount, "WRPLL1 enabled\n"); 7329 WARN(I915_READ(WRPLL_CTL1) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
7342 WARN(plls->wrpll2_refcount, "WRPLL2 enabled\n"); 7330 WARN(I915_READ(WRPLL_CTL2) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
7343 WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n"); 7331 WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
7344 WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, 7332 WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
7345 "CPU PWM1 enabled\n"); 7333 "CPU PWM1 enabled\n");
@@ -7360,6 +7348,16 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
7360 WARN(!dev_priv->pm.irqs_disabled, "IRQs enabled\n"); 7348 WARN(!dev_priv->pm.irqs_disabled, "IRQs enabled\n");
7361} 7349}
7362 7350
7351static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
7352{
7353 struct drm_device *dev = dev_priv->dev;
7354
7355 if (IS_HASWELL(dev))
7356 return I915_READ(D_COMP_HSW);
7357 else
7358 return I915_READ(D_COMP_BDW);
7359}
7360
7363static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val) 7361static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
7364{ 7362{
7365 struct drm_device *dev = dev_priv->dev; 7363 struct drm_device *dev = dev_priv->dev;
@@ -7368,12 +7366,12 @@ static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
7368 mutex_lock(&dev_priv->rps.hw_lock); 7366 mutex_lock(&dev_priv->rps.hw_lock);
7369 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, 7367 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
7370 val)) 7368 val))
7371 DRM_ERROR("Failed to disable D_COMP\n"); 7369 DRM_ERROR("Failed to write to D_COMP\n");
7372 mutex_unlock(&dev_priv->rps.hw_lock); 7370 mutex_unlock(&dev_priv->rps.hw_lock);
7373 } else { 7371 } else {
7374 I915_WRITE(D_COMP, val); 7372 I915_WRITE(D_COMP_BDW, val);
7373 POSTING_READ(D_COMP_BDW);
7375 } 7374 }
7376 POSTING_READ(D_COMP);
7377} 7375}
7378 7376
7379/* 7377/*
@@ -7411,12 +7409,13 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
7411 if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1)) 7409 if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
7412 DRM_ERROR("LCPLL still locked\n"); 7410 DRM_ERROR("LCPLL still locked\n");
7413 7411
7414 val = I915_READ(D_COMP); 7412 val = hsw_read_dcomp(dev_priv);
7415 val |= D_COMP_COMP_DISABLE; 7413 val |= D_COMP_COMP_DISABLE;
7416 hsw_write_dcomp(dev_priv, val); 7414 hsw_write_dcomp(dev_priv, val);
7417 ndelay(100); 7415 ndelay(100);
7418 7416
7419 if (wait_for((I915_READ(D_COMP) & D_COMP_RCOMP_IN_PROGRESS) == 0, 1)) 7417 if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
7418 1))
7420 DRM_ERROR("D_COMP RCOMP still in progress\n"); 7419 DRM_ERROR("D_COMP RCOMP still in progress\n");
7421 7420
7422 if (allow_power_down) { 7421 if (allow_power_down) {
@@ -7465,7 +7464,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
7465 POSTING_READ(LCPLL_CTL); 7464 POSTING_READ(LCPLL_CTL);
7466 } 7465 }
7467 7466
7468 val = I915_READ(D_COMP); 7467 val = hsw_read_dcomp(dev_priv);
7469 val |= D_COMP_COMP_FORCE; 7468 val |= D_COMP_COMP_FORCE;
7470 val &= ~D_COMP_COMP_DISABLE; 7469 val &= ~D_COMP_COMP_DISABLE;
7471 hsw_write_dcomp(dev_priv, val); 7470 hsw_write_dcomp(dev_priv, val);
@@ -7571,13 +7570,59 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
7571 7570
7572 if (!intel_ddi_pll_select(intel_crtc)) 7571 if (!intel_ddi_pll_select(intel_crtc))
7573 return -EINVAL; 7572 return -EINVAL;
7574 intel_ddi_pll_enable(intel_crtc);
7575 7573
7576 intel_crtc->lowfreq_avail = false; 7574 intel_crtc->lowfreq_avail = false;
7577 7575
7578 return 0; 7576 return 0;
7579} 7577}
7580 7578
7579static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
7580 struct intel_crtc_config *pipe_config)
7581{
7582 struct drm_device *dev = crtc->base.dev;
7583 struct drm_i915_private *dev_priv = dev->dev_private;
7584 struct intel_shared_dpll *pll;
7585 enum port port;
7586 uint32_t tmp;
7587
7588 tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
7589
7590 port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
7591
7592 pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
7593
7594 switch (pipe_config->ddi_pll_sel) {
7595 case PORT_CLK_SEL_WRPLL1:
7596 pipe_config->shared_dpll = DPLL_ID_WRPLL1;
7597 break;
7598 case PORT_CLK_SEL_WRPLL2:
7599 pipe_config->shared_dpll = DPLL_ID_WRPLL2;
7600 break;
7601 }
7602
7603 if (pipe_config->shared_dpll >= 0) {
7604 pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
7605
7606 WARN_ON(!pll->get_hw_state(dev_priv, pll,
7607 &pipe_config->dpll_hw_state));
7608 }
7609
7610 /*
7611 * Haswell has only FDI/PCH transcoder A. It is which is connected to
7612 * DDI E. So just check whether this pipe is wired to DDI E and whether
7613 * the PCH transcoder is on.
7614 */
7615 if ((port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
7616 pipe_config->has_pch_encoder = true;
7617
7618 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
7619 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
7620 FDI_DP_PORT_WIDTH_SHIFT) + 1;
7621
7622 ironlake_get_fdi_m_n_config(crtc, pipe_config);
7623 }
7624}
7625
7581static bool haswell_get_pipe_config(struct intel_crtc *crtc, 7626static bool haswell_get_pipe_config(struct intel_crtc *crtc,
7582 struct intel_crtc_config *pipe_config) 7627 struct intel_crtc_config *pipe_config)
7583{ 7628{
@@ -7623,22 +7668,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
7623 if (!(tmp & PIPECONF_ENABLE)) 7668 if (!(tmp & PIPECONF_ENABLE))
7624 return false; 7669 return false;
7625 7670
7626 /* 7671 haswell_get_ddi_port_state(crtc, pipe_config);
7627 * Haswell has only FDI/PCH transcoder A. It is which is connected to
7628 * DDI E. So just check whether this pipe is wired to DDI E and whether
7629 * the PCH transcoder is on.
7630 */
7631 tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
7632 if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(PORT_E) &&
7633 I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
7634 pipe_config->has_pch_encoder = true;
7635
7636 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
7637 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
7638 FDI_DP_PORT_WIDTH_SHIFT) + 1;
7639
7640 ironlake_get_fdi_m_n_config(crtc, pipe_config);
7641 }
7642 7672
7643 intel_get_pipe_timings(crtc, pipe_config); 7673 intel_get_pipe_timings(crtc, pipe_config);
7644 7674
@@ -8326,7 +8356,7 @@ static u32
8326intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp) 8356intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
8327{ 8357{
8328 u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp); 8358 u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
8329 return ALIGN(pitch * mode->vdisplay, PAGE_SIZE); 8359 return PAGE_ALIGN(pitch * mode->vdisplay);
8330} 8360}
8331 8361
8332static struct drm_framebuffer * 8362static struct drm_framebuffer *
@@ -9447,6 +9477,9 @@ static bool use_mmio_flip(struct intel_engine_cs *ring,
9447 * So using MMIO flips there would disrupt this mechanism. 9477 * So using MMIO flips there would disrupt this mechanism.
9448 */ 9478 */
9449 9479
9480 if (ring == NULL)
9481 return true;
9482
9450 if (INTEL_INFO(ring->dev)->gen < 5) 9483 if (INTEL_INFO(ring->dev)->gen < 5)
9451 return false; 9484 return false;
9452 9485
@@ -9595,7 +9628,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
9595 struct drm_device *dev = crtc->dev; 9628 struct drm_device *dev = crtc->dev;
9596 struct drm_i915_private *dev_priv = dev->dev_private; 9629 struct drm_i915_private *dev_priv = dev->dev_private;
9597 struct drm_framebuffer *old_fb = crtc->primary->fb; 9630 struct drm_framebuffer *old_fb = crtc->primary->fb;
9598 struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj; 9631 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
9599 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9632 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9600 enum pipe pipe = intel_crtc->pipe; 9633 enum pipe pipe = intel_crtc->pipe;
9601 struct intel_unpin_work *work; 9634 struct intel_unpin_work *work;
@@ -9603,6 +9636,14 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
9603 unsigned long flags; 9636 unsigned long flags;
9604 int ret; 9637 int ret;
9605 9638
9639 /*
9640 * drm_mode_page_flip_ioctl() should already catch this, but double
9641 * check to be safe. In the future we may enable pageflipping from
9642 * a disabled primary plane.
9643 */
9644 if (WARN_ON(intel_fb_obj(old_fb) == NULL))
9645 return -EBUSY;
9646
9606 /* Can't change pixel format via MI display flips. */ 9647 /* Can't change pixel format via MI display flips. */
9607 if (fb->pixel_format != crtc->primary->fb->pixel_format) 9648 if (fb->pixel_format != crtc->primary->fb->pixel_format)
9608 return -EINVAL; 9649 return -EINVAL;
@@ -9625,7 +9666,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
9625 9666
9626 work->event = event; 9667 work->event = event;
9627 work->crtc = crtc; 9668 work->crtc = crtc;
9628 work->old_fb_obj = to_intel_framebuffer(old_fb)->obj; 9669 work->old_fb_obj = intel_fb_obj(old_fb);
9629 INIT_WORK(&work->work, intel_unpin_work_fn); 9670 INIT_WORK(&work->work, intel_unpin_work_fn);
9630 9671
9631 ret = drm_crtc_vblank_get(crtc); 9672 ret = drm_crtc_vblank_get(crtc);
@@ -9670,6 +9711,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
9670 9711
9671 if (IS_VALLEYVIEW(dev)) { 9712 if (IS_VALLEYVIEW(dev)) {
9672 ring = &dev_priv->ring[BCS]; 9713 ring = &dev_priv->ring[BCS];
9714 if (obj->tiling_mode != work->old_fb_obj->tiling_mode)
9715 /* vlv: DISPLAY_FLIP fails to change tiling */
9716 ring = NULL;
9717 } else if (IS_IVYBRIDGE(dev)) {
9718 ring = &dev_priv->ring[BCS];
9673 } else if (INTEL_INFO(dev)->gen >= 7) { 9719 } else if (INTEL_INFO(dev)->gen >= 7) {
9674 ring = obj->ring; 9720 ring = obj->ring;
9675 if (ring == NULL || ring->id != RCS) 9721 if (ring == NULL || ring->id != RCS)
@@ -10401,11 +10447,14 @@ intel_pipe_config_compare(struct drm_device *dev,
10401 10447
10402 PIPE_CONF_CHECK_I(double_wide); 10448 PIPE_CONF_CHECK_I(double_wide);
10403 10449
10450 PIPE_CONF_CHECK_X(ddi_pll_sel);
10451
10404 PIPE_CONF_CHECK_I(shared_dpll); 10452 PIPE_CONF_CHECK_I(shared_dpll);
10405 PIPE_CONF_CHECK_X(dpll_hw_state.dpll); 10453 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
10406 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); 10454 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
10407 PIPE_CONF_CHECK_X(dpll_hw_state.fp0); 10455 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
10408 PIPE_CONF_CHECK_X(dpll_hw_state.fp1); 10456 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
10457 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
10409 10458
10410 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) 10459 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
10411 PIPE_CONF_CHECK_I(pipe_bpp); 10460 PIPE_CONF_CHECK_I(pipe_bpp);
@@ -10762,10 +10811,9 @@ static int __intel_set_mode(struct drm_crtc *crtc,
10762 * on the DPLL. 10811 * on the DPLL.
10763 */ 10812 */
10764 for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) { 10813 for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
10765 struct drm_framebuffer *old_fb; 10814 struct drm_framebuffer *old_fb = crtc->primary->fb;
10766 struct drm_i915_gem_object *old_obj = NULL; 10815 struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
10767 struct drm_i915_gem_object *obj = 10816 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
10768 to_intel_framebuffer(fb)->obj;
10769 10817
10770 mutex_lock(&dev->struct_mutex); 10818 mutex_lock(&dev->struct_mutex);
10771 ret = intel_pin_and_fence_fb_obj(dev, 10819 ret = intel_pin_and_fence_fb_obj(dev,
@@ -10776,11 +10824,8 @@ static int __intel_set_mode(struct drm_crtc *crtc,
10776 mutex_unlock(&dev->struct_mutex); 10824 mutex_unlock(&dev->struct_mutex);
10777 goto done; 10825 goto done;
10778 } 10826 }
10779 old_fb = crtc->primary->fb; 10827 if (old_fb)
10780 if (old_fb) {
10781 old_obj = to_intel_framebuffer(old_fb)->obj;
10782 intel_unpin_fb_obj(old_obj); 10828 intel_unpin_fb_obj(old_obj);
10783 }
10784 i915_gem_track_fb(old_obj, obj, 10829 i915_gem_track_fb(old_obj, obj,
10785 INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe)); 10830 INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
10786 mutex_unlock(&dev->struct_mutex); 10831 mutex_unlock(&dev->struct_mutex);
@@ -11266,18 +11311,15 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
11266 .page_flip = intel_crtc_page_flip, 11311 .page_flip = intel_crtc_page_flip,
11267}; 11312};
11268 11313
11269static void intel_cpu_pll_init(struct drm_device *dev)
11270{
11271 if (HAS_DDI(dev))
11272 intel_ddi_pll_init(dev);
11273}
11274
11275static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv, 11314static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
11276 struct intel_shared_dpll *pll, 11315 struct intel_shared_dpll *pll,
11277 struct intel_dpll_hw_state *hw_state) 11316 struct intel_dpll_hw_state *hw_state)
11278{ 11317{
11279 uint32_t val; 11318 uint32_t val;
11280 11319
11320 if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS))
11321 return false;
11322
11281 val = I915_READ(PCH_DPLL(pll->id)); 11323 val = I915_READ(PCH_DPLL(pll->id));
11282 hw_state->dpll = val; 11324 hw_state->dpll = val;
11283 hw_state->fp0 = I915_READ(PCH_FP0(pll->id)); 11325 hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
@@ -11359,7 +11401,9 @@ static void intel_shared_dpll_init(struct drm_device *dev)
11359{ 11401{
11360 struct drm_i915_private *dev_priv = dev->dev_private; 11402 struct drm_i915_private *dev_priv = dev->dev_private;
11361 11403
11362 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 11404 if (HAS_DDI(dev))
11405 intel_ddi_pll_init(dev);
11406 else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
11363 ibx_pch_dpll_init(dev); 11407 ibx_pch_dpll_init(dev);
11364 else 11408 else
11365 dev_priv->num_shared_dpll = 0; 11409 dev_priv->num_shared_dpll = 0;
@@ -11398,9 +11442,11 @@ intel_primary_plane_disable(struct drm_plane *plane)
11398 intel_disable_primary_hw_plane(dev_priv, intel_plane->plane, 11442 intel_disable_primary_hw_plane(dev_priv, intel_plane->plane,
11399 intel_plane->pipe); 11443 intel_plane->pipe);
11400disable_unpin: 11444disable_unpin:
11401 i915_gem_track_fb(to_intel_framebuffer(plane->fb)->obj, NULL, 11445 mutex_lock(&dev->struct_mutex);
11446 i915_gem_track_fb(intel_fb_obj(plane->fb), NULL,
11402 INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe)); 11447 INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
11403 intel_unpin_fb_obj(to_intel_framebuffer(plane->fb)->obj); 11448 intel_unpin_fb_obj(intel_fb_obj(plane->fb));
11449 mutex_unlock(&dev->struct_mutex);
11404 plane->fb = NULL; 11450 plane->fb = NULL;
11405 11451
11406 return 0; 11452 return 0;
@@ -11417,7 +11463,8 @@ intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
11417 struct drm_i915_private *dev_priv = dev->dev_private; 11463 struct drm_i915_private *dev_priv = dev->dev_private;
11418 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11464 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11419 struct intel_plane *intel_plane = to_intel_plane(plane); 11465 struct intel_plane *intel_plane = to_intel_plane(plane);
11420 struct drm_i915_gem_object *obj, *old_obj = NULL; 11466 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11467 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
11421 struct drm_rect dest = { 11468 struct drm_rect dest = {
11422 /* integer pixels */ 11469 /* integer pixels */
11423 .x1 = crtc_x, 11470 .x1 = crtc_x,
@@ -11449,10 +11496,6 @@ intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
11449 if (ret) 11496 if (ret)
11450 return ret; 11497 return ret;
11451 11498
11452 if (plane->fb)
11453 old_obj = to_intel_framebuffer(plane->fb)->obj;
11454 obj = to_intel_framebuffer(fb)->obj;
11455
11456 /* 11499 /*
11457 * If the CRTC isn't enabled, we're just pinning the framebuffer, 11500 * If the CRTC isn't enabled, we're just pinning the framebuffer,
11458 * updating the fb pointer, and returning without touching the 11501 * updating the fb pointer, and returning without touching the
@@ -11460,6 +11503,8 @@ intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
11460 * turn on the display with all planes setup as desired. 11503 * turn on the display with all planes setup as desired.
11461 */ 11504 */
11462 if (!crtc->enabled) { 11505 if (!crtc->enabled) {
11506 mutex_lock(&dev->struct_mutex);
11507
11463 /* 11508 /*
11464 * If we already called setplane while the crtc was disabled, 11509 * If we already called setplane while the crtc was disabled,
11465 * we may have an fb pinned; unpin it. 11510 * we may have an fb pinned; unpin it.
@@ -11471,7 +11516,10 @@ intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
11471 INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe)); 11516 INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
11472 11517
11473 /* Pin and return without programming hardware */ 11518 /* Pin and return without programming hardware */
11474 return intel_pin_and_fence_fb_obj(dev, obj, NULL); 11519 ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
11520 mutex_unlock(&dev->struct_mutex);
11521
11522 return ret;
11475 } 11523 }
11476 11524
11477 intel_crtc_wait_for_pending_flips(crtc); 11525 intel_crtc_wait_for_pending_flips(crtc);
@@ -11483,14 +11531,18 @@ intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
11483 * because plane->fb still gets set and pinned. 11531 * because plane->fb still gets set and pinned.
11484 */ 11532 */
11485 if (!visible) { 11533 if (!visible) {
11534 mutex_lock(&dev->struct_mutex);
11535
11486 /* 11536 /*
11487 * Try to pin the new fb first so that we can bail out if we 11537 * Try to pin the new fb first so that we can bail out if we
11488 * fail. 11538 * fail.
11489 */ 11539 */
11490 if (plane->fb != fb) { 11540 if (plane->fb != fb) {
11491 ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); 11541 ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
11492 if (ret) 11542 if (ret) {
11543 mutex_unlock(&dev->struct_mutex);
11493 return ret; 11544 return ret;
11545 }
11494 } 11546 }
11495 11547
11496 i915_gem_track_fb(old_obj, obj, 11548 i915_gem_track_fb(old_obj, obj,
@@ -11506,6 +11558,8 @@ intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
11506 if (plane->fb) 11558 if (plane->fb)
11507 intel_unpin_fb_obj(old_obj); 11559 intel_unpin_fb_obj(old_obj);
11508 11560
11561 mutex_unlock(&dev->struct_mutex);
11562
11509 return 0; 11563 return 0;
11510 } 11564 }
11511 11565
@@ -12159,7 +12213,7 @@ static void intel_init_display(struct drm_device *dev)
12159 dev_priv->display.crtc_mode_set = haswell_crtc_mode_set; 12213 dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
12160 dev_priv->display.crtc_enable = haswell_crtc_enable; 12214 dev_priv->display.crtc_enable = haswell_crtc_enable;
12161 dev_priv->display.crtc_disable = haswell_crtc_disable; 12215 dev_priv->display.crtc_disable = haswell_crtc_disable;
12162 dev_priv->display.off = haswell_crtc_off; 12216 dev_priv->display.off = ironlake_crtc_off;
12163 dev_priv->display.update_primary_plane = 12217 dev_priv->display.update_primary_plane =
12164 ironlake_update_primary_plane; 12218 ironlake_update_primary_plane;
12165 } else if (HAS_PCH_SPLIT(dev)) { 12219 } else if (HAS_PCH_SPLIT(dev)) {
@@ -12426,6 +12480,9 @@ void intel_modeset_init_hw(struct drm_device *dev)
12426{ 12480{
12427 intel_prepare_ddi(dev); 12481 intel_prepare_ddi(dev);
12428 12482
12483 if (IS_VALLEYVIEW(dev))
12484 vlv_update_cdclk(dev);
12485
12429 intel_init_clock_gating(dev); 12486 intel_init_clock_gating(dev);
12430 12487
12431 intel_reset_dpio(dev); 12488 intel_reset_dpio(dev);
@@ -12502,7 +12559,6 @@ void intel_modeset_init(struct drm_device *dev)
12502 intel_init_dpio(dev); 12559 intel_init_dpio(dev);
12503 intel_reset_dpio(dev); 12560 intel_reset_dpio(dev);
12504 12561
12505 intel_cpu_pll_init(dev);
12506 intel_shared_dpll_init(dev); 12562 intel_shared_dpll_init(dev);
12507 12563
12508 /* Just disable it once at startup */ 12564 /* Just disable it once at startup */
@@ -12811,10 +12867,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
12811 crtc->active ? "enabled" : "disabled"); 12867 crtc->active ? "enabled" : "disabled");
12812 } 12868 }
12813 12869
12814 /* FIXME: Smash this into the new shared dpll infrastructure. */
12815 if (HAS_DDI(dev))
12816 intel_ddi_setup_hw_pll_state(dev);
12817
12818 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 12870 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
12819 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 12871 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
12820 12872
@@ -12828,6 +12880,9 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
12828 12880
12829 DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n", 12881 DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n",
12830 pll->name, pll->refcount, pll->on); 12882 pll->name, pll->refcount, pll->on);
12883
12884 if (pll->refcount)
12885 intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
12831 } 12886 }
12832 12887
12833 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 12888 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
@@ -12945,7 +13000,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
12945void intel_modeset_gem_init(struct drm_device *dev) 13000void intel_modeset_gem_init(struct drm_device *dev)
12946{ 13001{
12947 struct drm_crtc *c; 13002 struct drm_crtc *c;
12948 struct intel_framebuffer *fb; 13003 struct drm_i915_gem_object *obj;
12949 13004
12950 mutex_lock(&dev->struct_mutex); 13005 mutex_lock(&dev->struct_mutex);
12951 intel_init_gt_powersave(dev); 13006 intel_init_gt_powersave(dev);
@@ -12962,11 +13017,11 @@ void intel_modeset_gem_init(struct drm_device *dev)
12962 */ 13017 */
12963 mutex_lock(&dev->struct_mutex); 13018 mutex_lock(&dev->struct_mutex);
12964 for_each_crtc(dev, c) { 13019 for_each_crtc(dev, c) {
12965 if (!c->primary->fb) 13020 obj = intel_fb_obj(c->primary->fb);
13021 if (obj == NULL)
12966 continue; 13022 continue;
12967 13023
12968 fb = to_intel_framebuffer(c->primary->fb); 13024 if (intel_pin_and_fence_fb_obj(dev, obj, NULL)) {
12969 if (intel_pin_and_fence_fb_obj(dev, fb->obj, NULL)) {
12970 DRM_ERROR("failed to pin boot fb on pipe %d\n", 13025 DRM_ERROR("failed to pin boot fb on pipe %d\n",
12971 to_intel_crtc(c)->pipe); 13026 to_intel_crtc(c)->pipe);
12972 drm_framebuffer_unreference(c->primary->fb); 13027 drm_framebuffer_unreference(c->primary->fb);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index b5ec48913b47..c06a62a405b6 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -746,6 +746,22 @@ intel_dp_connector_unregister(struct intel_connector *intel_connector)
746} 746}
747 747
748static void 748static void
749hsw_dp_set_ddi_pll_sel(struct intel_crtc_config *pipe_config, int link_bw)
750{
751 switch (link_bw) {
752 case DP_LINK_BW_1_62:
753 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
754 break;
755 case DP_LINK_BW_2_7:
756 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
757 break;
758 case DP_LINK_BW_5_4:
759 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
760 break;
761 }
762}
763
764static void
749intel_dp_set_clock(struct intel_encoder *encoder, 765intel_dp_set_clock(struct intel_encoder *encoder,
750 struct intel_crtc_config *pipe_config, int link_bw) 766 struct intel_crtc_config *pipe_config, int link_bw)
751{ 767{
@@ -756,8 +772,6 @@ intel_dp_set_clock(struct intel_encoder *encoder,
756 if (IS_G4X(dev)) { 772 if (IS_G4X(dev)) {
757 divisor = gen4_dpll; 773 divisor = gen4_dpll;
758 count = ARRAY_SIZE(gen4_dpll); 774 count = ARRAY_SIZE(gen4_dpll);
759 } else if (IS_HASWELL(dev)) {
760 /* Haswell has special-purpose DP DDI clocks. */
761 } else if (HAS_PCH_SPLIT(dev)) { 775 } else if (HAS_PCH_SPLIT(dev)) {
762 divisor = pch_dpll; 776 divisor = pch_dpll;
763 count = ARRAY_SIZE(pch_dpll); 777 count = ARRAY_SIZE(pch_dpll);
@@ -928,7 +942,10 @@ found:
928 &pipe_config->dp_m2_n2); 942 &pipe_config->dp_m2_n2);
929 } 943 }
930 944
931 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw); 945 if (HAS_DDI(dev))
946 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
947 else
948 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
932 949
933 return true; 950 return true;
934} 951}
@@ -1316,8 +1333,6 @@ void intel_edp_panel_off(struct intel_dp *intel_dp)
1316 1333
1317 DRM_DEBUG_KMS("Turn eDP power off\n"); 1334 DRM_DEBUG_KMS("Turn eDP power off\n");
1318 1335
1319 edp_wait_backlight_off(intel_dp);
1320
1321 WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n"); 1336 WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
1322 1337
1323 pp = ironlake_get_pp_control(intel_dp); 1338 pp = ironlake_get_pp_control(intel_dp);
@@ -1353,6 +1368,9 @@ void intel_edp_backlight_on(struct intel_dp *intel_dp)
1353 return; 1368 return;
1354 1369
1355 DRM_DEBUG_KMS("\n"); 1370 DRM_DEBUG_KMS("\n");
1371
1372 intel_panel_enable_backlight(intel_dp->attached_connector);
1373
1356 /* 1374 /*
1357 * If we enable the backlight right away following a panel power 1375 * If we enable the backlight right away following a panel power
1358 * on, we may see slight flicker as the panel syncs with the eDP 1376 * on, we may see slight flicker as the panel syncs with the eDP
@@ -1367,8 +1385,6 @@ void intel_edp_backlight_on(struct intel_dp *intel_dp)
1367 1385
1368 I915_WRITE(pp_ctrl_reg, pp); 1386 I915_WRITE(pp_ctrl_reg, pp);
1369 POSTING_READ(pp_ctrl_reg); 1387 POSTING_READ(pp_ctrl_reg);
1370
1371 intel_panel_enable_backlight(intel_dp->attached_connector);
1372} 1388}
1373 1389
1374void intel_edp_backlight_off(struct intel_dp *intel_dp) 1390void intel_edp_backlight_off(struct intel_dp *intel_dp)
@@ -1381,8 +1397,6 @@ void intel_edp_backlight_off(struct intel_dp *intel_dp)
1381 if (!is_edp(intel_dp)) 1397 if (!is_edp(intel_dp))
1382 return; 1398 return;
1383 1399
1384 intel_panel_disable_backlight(intel_dp->attached_connector);
1385
1386 DRM_DEBUG_KMS("\n"); 1400 DRM_DEBUG_KMS("\n");
1387 pp = ironlake_get_pp_control(intel_dp); 1401 pp = ironlake_get_pp_control(intel_dp);
1388 pp &= ~EDP_BLC_ENABLE; 1402 pp &= ~EDP_BLC_ENABLE;
@@ -1392,6 +1406,10 @@ void intel_edp_backlight_off(struct intel_dp *intel_dp)
1392 I915_WRITE(pp_ctrl_reg, pp); 1406 I915_WRITE(pp_ctrl_reg, pp);
1393 POSTING_READ(pp_ctrl_reg); 1407 POSTING_READ(pp_ctrl_reg);
1394 intel_dp->last_backlight_off = jiffies; 1408 intel_dp->last_backlight_off = jiffies;
1409
1410 edp_wait_backlight_off(intel_dp);
1411
1412 intel_panel_disable_backlight(intel_dp->attached_connector);
1395} 1413}
1396 1414
1397static void ironlake_edp_pll_on(struct intel_dp *intel_dp) 1415static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
@@ -1751,7 +1769,7 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
1751 struct drm_i915_private *dev_priv = dev->dev_private; 1769 struct drm_i915_private *dev_priv = dev->dev_private;
1752 struct drm_crtc *crtc = dig_port->base.base.crtc; 1770 struct drm_crtc *crtc = dig_port->base.base.crtc;
1753 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1771 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1754 struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->primary->fb)->obj; 1772 struct drm_i915_gem_object *obj = intel_fb_obj(crtc->primary->fb);
1755 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; 1773 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
1756 1774
1757 dev_priv->psr.source_ok = false; 1775 dev_priv->psr.source_ok = false;
@@ -1784,7 +1802,6 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
1784 return false; 1802 return false;
1785 } 1803 }
1786 1804
1787 obj = to_intel_framebuffer(crtc->primary->fb)->obj;
1788 if (obj->tiling_mode != I915_TILING_X || 1805 if (obj->tiling_mode != I915_TILING_X ||
1789 obj->fence_reg == I915_FENCE_REG_NONE) { 1806 obj->fence_reg == I915_FENCE_REG_NONE) {
1790 DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n"); 1807 DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n");
@@ -3815,6 +3832,22 @@ intel_dp_hot_plug(struct intel_encoder *intel_encoder)
3815 intel_dp_check_link_status(intel_dp); 3832 intel_dp_check_link_status(intel_dp);
3816} 3833}
3817 3834
3835bool
3836intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
3837{
3838 struct intel_dp *intel_dp = &intel_dig_port->dp;
3839
3840 if (long_hpd)
3841 return true;
3842
3843 /*
3844 * we'll check the link status via the normal hot plug path later -
3845 * but for short hpds we should check it now
3846 */
3847 intel_dp_check_link_status(intel_dp);
3848 return false;
3849}
3850
3818/* Return which DP Port should be selected for Transcoder DP control */ 3851/* Return which DP Port should be selected for Transcoder DP control */
3819int 3852int
3820intel_trans_dp_port_sel(struct drm_crtc *crtc) 3853intel_trans_dp_port_sel(struct drm_crtc *crtc)
@@ -4387,6 +4420,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
4387void 4420void
4388intel_dp_init(struct drm_device *dev, int output_reg, enum port port) 4421intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
4389{ 4422{
4423 struct drm_i915_private *dev_priv = dev->dev_private;
4390 struct intel_digital_port *intel_dig_port; 4424 struct intel_digital_port *intel_dig_port;
4391 struct intel_encoder *intel_encoder; 4425 struct intel_encoder *intel_encoder;
4392 struct drm_encoder *encoder; 4426 struct drm_encoder *encoder;
@@ -4443,6 +4477,9 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
4443 intel_encoder->cloneable = 0; 4477 intel_encoder->cloneable = 0;
4444 intel_encoder->hot_plug = intel_dp_hot_plug; 4478 intel_encoder->hot_plug = intel_dp_hot_plug;
4445 4479
4480 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
4481 dev_priv->hpd_irq_port[port] = intel_dig_port;
4482
4446 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) { 4483 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
4447 drm_encoder_cleanup(encoder); 4484 drm_encoder_cleanup(encoder);
4448 kfree(intel_dig_port); 4485 kfree(intel_dig_port);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 5f7c7bd94d90..fa19744ed6c0 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -307,6 +307,9 @@ struct intel_crtc_config {
307 /* Selected dpll when shared or DPLL_ID_PRIVATE. */ 307 /* Selected dpll when shared or DPLL_ID_PRIVATE. */
308 enum intel_dpll_id shared_dpll; 308 enum intel_dpll_id shared_dpll;
309 309
310 /* PORT_CLK_SEL for DDI ports. */
311 uint32_t ddi_pll_sel;
312
310 /* Actual register state of the dpll, for shared dpll cross-checking. */ 313 /* Actual register state of the dpll, for shared dpll cross-checking. */
311 struct intel_dpll_hw_state dpll_hw_state; 314 struct intel_dpll_hw_state dpll_hw_state;
312 315
@@ -338,6 +341,7 @@ struct intel_crtc_config {
338 u32 pos; 341 u32 pos;
339 u32 size; 342 u32 size;
340 bool enabled; 343 bool enabled;
344 bool force_thru;
341 } pch_pfit; 345 } pch_pfit;
342 346
343 /* FDI configuration, only valid if has_pch_encoder is set. */ 347 /* FDI configuration, only valid if has_pch_encoder is set. */
@@ -398,8 +402,6 @@ struct intel_crtc {
398 struct intel_crtc_config *new_config; 402 struct intel_crtc_config *new_config;
399 bool new_enabled; 403 bool new_enabled;
400 404
401 uint32_t ddi_pll_sel;
402
403 /* reset counter value when the last flip was submitted */ 405 /* reset counter value when the last flip was submitted */
404 unsigned int reset_counter; 406 unsigned int reset_counter;
405 407
@@ -485,6 +487,7 @@ struct cxsr_latency {
485#define to_intel_encoder(x) container_of(x, struct intel_encoder, base) 487#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
486#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) 488#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
487#define to_intel_plane(x) container_of(x, struct intel_plane, base) 489#define to_intel_plane(x) container_of(x, struct intel_plane, base)
490#define intel_fb_obj(x) (x ? to_intel_framebuffer(x)->obj : NULL)
488 491
489struct intel_hdmi { 492struct intel_hdmi {
490 u32 hdmi_reg; 493 u32 hdmi_reg;
@@ -567,6 +570,7 @@ struct intel_digital_port {
567 u32 saved_port_bits; 570 u32 saved_port_bits;
568 struct intel_dp dp; 571 struct intel_dp dp;
569 struct intel_hdmi hdmi; 572 struct intel_hdmi hdmi;
573 bool (*hpd_pulse)(struct intel_digital_port *, bool);
570}; 574};
571 575
572static inline int 576static inline int
@@ -706,10 +710,7 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
706 enum transcoder cpu_transcoder); 710 enum transcoder cpu_transcoder);
707void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc); 711void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
708void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc); 712void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
709void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
710bool intel_ddi_pll_select(struct intel_crtc *crtc); 713bool intel_ddi_pll_select(struct intel_crtc *crtc);
711void intel_ddi_pll_enable(struct intel_crtc *crtc);
712void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
713void intel_ddi_set_pipe_settings(struct drm_crtc *crtc); 714void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
714void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder); 715void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
715bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector); 716bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
@@ -722,7 +723,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
722const char *intel_output_name(int output); 723const char *intel_output_name(int output);
723bool intel_has_pending_fb_unpin(struct drm_device *dev); 724bool intel_has_pending_fb_unpin(struct drm_device *dev);
724int intel_pch_rawclk(struct drm_device *dev); 725int intel_pch_rawclk(struct drm_device *dev);
725int valleyview_cur_cdclk(struct drm_i915_private *dev_priv);
726void intel_mark_busy(struct drm_device *dev); 726void intel_mark_busy(struct drm_device *dev);
727void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, 727void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
728 struct intel_engine_cs *ring); 728 struct intel_engine_cs *ring);
@@ -793,12 +793,18 @@ __intel_framebuffer_create(struct drm_device *dev,
793void intel_prepare_page_flip(struct drm_device *dev, int plane); 793void intel_prepare_page_flip(struct drm_device *dev, int plane);
794void intel_finish_page_flip(struct drm_device *dev, int pipe); 794void intel_finish_page_flip(struct drm_device *dev, int pipe);
795void intel_finish_page_flip_plane(struct drm_device *dev, int plane); 795void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
796
797/* shared dpll functions */
796struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc); 798struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
797void assert_shared_dpll(struct drm_i915_private *dev_priv, 799void assert_shared_dpll(struct drm_i915_private *dev_priv,
798 struct intel_shared_dpll *pll, 800 struct intel_shared_dpll *pll,
799 bool state); 801 bool state);
800#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true) 802#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
801#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false) 803#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
804struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc);
805void intel_put_shared_dpll(struct intel_crtc *crtc);
806
807/* modesetting asserts */
802void assert_pll(struct drm_i915_private *dev_priv, 808void assert_pll(struct drm_i915_private *dev_priv,
803 enum pipe pipe, bool state); 809 enum pipe pipe, bool state);
804#define assert_pll_enabled(d, p) assert_pll(d, p, true) 810#define assert_pll_enabled(d, p) assert_pll(d, p, true)
@@ -831,7 +837,6 @@ void hsw_disable_ips(struct intel_crtc *crtc);
831void intel_display_set_init_power(struct drm_i915_private *dev, bool enable); 837void intel_display_set_init_power(struct drm_i915_private *dev, bool enable);
832enum intel_display_power_domain 838enum intel_display_power_domain
833intel_display_port_power_domain(struct intel_encoder *intel_encoder); 839intel_display_port_power_domain(struct intel_encoder *intel_encoder);
834int valleyview_get_vco(struct drm_i915_private *dev_priv);
835void intel_mode_from_pipe_config(struct drm_display_mode *mode, 840void intel_mode_from_pipe_config(struct drm_display_mode *mode,
836 struct intel_crtc_config *pipe_config); 841 struct intel_crtc_config *pipe_config);
837int intel_format_to_fourcc(int format); 842int intel_format_to_fourcc(int format);
@@ -852,6 +857,8 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
852bool intel_dp_compute_config(struct intel_encoder *encoder, 857bool intel_dp_compute_config(struct intel_encoder *encoder,
853 struct intel_crtc_config *pipe_config); 858 struct intel_crtc_config *pipe_config);
854bool intel_dp_is_edp(struct drm_device *dev, enum port port); 859bool intel_dp_is_edp(struct drm_device *dev, enum port port);
860bool intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
861 bool long_hpd);
855void intel_edp_backlight_on(struct intel_dp *intel_dp); 862void intel_edp_backlight_on(struct intel_dp *intel_dp);
856void intel_edp_backlight_off(struct intel_dp *intel_dp); 863void intel_edp_backlight_off(struct intel_dp *intel_dp);
857void intel_edp_panel_vdd_on(struct intel_dp *intel_dp); 864void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
@@ -863,7 +870,6 @@ void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate);
863void intel_edp_psr_exit(struct drm_device *dev); 870void intel_edp_psr_exit(struct drm_device *dev);
864void intel_edp_psr_init(struct drm_device *dev); 871void intel_edp_psr_init(struct drm_device *dev);
865 872
866
867/* intel_dsi.c */ 873/* intel_dsi.c */
868void intel_dsi_init(struct drm_device *dev); 874void intel_dsi_init(struct drm_device *dev);
869 875
@@ -1005,8 +1011,7 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
1005void intel_init_runtime_pm(struct drm_i915_private *dev_priv); 1011void intel_init_runtime_pm(struct drm_i915_private *dev_priv);
1006void intel_fini_runtime_pm(struct drm_i915_private *dev_priv); 1012void intel_fini_runtime_pm(struct drm_i915_private *dev_priv);
1007void ilk_wm_get_hw_state(struct drm_device *dev); 1013void ilk_wm_get_hw_state(struct drm_device *dev);
1008void __vlv_set_power_well(struct drm_i915_private *dev_priv, 1014
1009 enum punit_power_well power_well_id, bool enable);
1010 1015
1011/* intel_sdvo.c */ 1016/* intel_sdvo.c */
1012bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob); 1017bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 44e17fd781b8..f475414671d8 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -107,7 +107,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
107 sizes->surface_depth); 107 sizes->surface_depth);
108 108
109 size = mode_cmd.pitches[0] * mode_cmd.height; 109 size = mode_cmd.pitches[0] * mode_cmd.height;
110 size = ALIGN(size, PAGE_SIZE); 110 size = PAGE_ALIGN(size);
111 obj = i915_gem_object_create_stolen(dev, size); 111 obj = i915_gem_object_create_stolen(dev, size);
112 if (obj == NULL) 112 if (obj == NULL)
113 obj = i915_gem_alloc_object(dev, size); 113 obj = i915_gem_alloc_object(dev, size);
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index d33b61d0dd33..b31088a551f2 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -34,11 +34,6 @@
34#include <drm/i915_drm.h> 34#include <drm/i915_drm.h>
35#include "i915_drv.h" 35#include "i915_drv.h"
36 36
37enum disp_clk {
38 CDCLK,
39 CZCLK
40};
41
42struct gmbus_port { 37struct gmbus_port {
43 const char *name; 38 const char *name;
44 int reg; 39 int reg;
@@ -63,60 +58,11 @@ to_intel_gmbus(struct i2c_adapter *i2c)
63 return container_of(i2c, struct intel_gmbus, adapter); 58 return container_of(i2c, struct intel_gmbus, adapter);
64} 59}
65 60
66static int get_disp_clk_div(struct drm_i915_private *dev_priv,
67 enum disp_clk clk)
68{
69 u32 reg_val;
70 int clk_ratio;
71
72 reg_val = I915_READ(CZCLK_CDCLK_FREQ_RATIO);
73
74 if (clk == CDCLK)
75 clk_ratio =
76 ((reg_val & CDCLK_FREQ_MASK) >> CDCLK_FREQ_SHIFT) + 1;
77 else
78 clk_ratio = (reg_val & CZCLK_FREQ_MASK) + 1;
79
80 return clk_ratio;
81}
82
83static void gmbus_set_freq(struct drm_i915_private *dev_priv)
84{
85 int vco, gmbus_freq = 0, cdclk_div;
86
87 BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
88
89 vco = valleyview_get_vco(dev_priv);
90
91 /* Get the CDCLK divide ratio */
92 cdclk_div = get_disp_clk_div(dev_priv, CDCLK);
93
94 /*
95 * Program the gmbus_freq based on the cdclk frequency.
96 * BSpec erroneously claims we should aim for 4MHz, but
97 * in fact 1MHz is the correct frequency.
98 */
99 if (cdclk_div)
100 gmbus_freq = (vco << 1) / cdclk_div;
101
102 if (WARN_ON(gmbus_freq == 0))
103 return;
104
105 I915_WRITE(GMBUSFREQ_VLV, gmbus_freq);
106}
107
108void 61void
109intel_i2c_reset(struct drm_device *dev) 62intel_i2c_reset(struct drm_device *dev)
110{ 63{
111 struct drm_i915_private *dev_priv = dev->dev_private; 64 struct drm_i915_private *dev_priv = dev->dev_private;
112 65
113 /*
114 * In BIOS-less system, program the correct gmbus frequency
115 * before reading edid.
116 */
117 if (IS_VALLEYVIEW(dev))
118 gmbus_set_freq(dev_priv);
119
120 I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0); 66 I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0);
121 I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0); 67 I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0);
122} 68}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 4d29a83fd163..c511287bbb86 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -51,6 +51,7 @@ struct intel_lvds_encoder {
51 51
52 bool is_dual_link; 52 bool is_dual_link;
53 u32 reg; 53 u32 reg;
54 u32 a3_power;
54 55
55 struct intel_lvds_connector *attached_connector; 56 struct intel_lvds_connector *attached_connector;
56}; 57};
@@ -71,8 +72,13 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
71 struct drm_device *dev = encoder->base.dev; 72 struct drm_device *dev = encoder->base.dev;
72 struct drm_i915_private *dev_priv = dev->dev_private; 73 struct drm_i915_private *dev_priv = dev->dev_private;
73 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); 74 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
75 enum intel_display_power_domain power_domain;
74 u32 tmp; 76 u32 tmp;
75 77
78 power_domain = intel_display_port_power_domain(encoder);
79 if (!intel_display_power_enabled(dev_priv, power_domain))
80 return false;
81
76 tmp = I915_READ(lvds_encoder->reg); 82 tmp = I915_READ(lvds_encoder->reg);
77 83
78 if (!(tmp & LVDS_PORT_EN)) 84 if (!(tmp & LVDS_PORT_EN))
@@ -165,8 +171,11 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
165 171
166 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) 172 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
167 * appropriately here, but we need to look more thoroughly into how 173 * appropriately here, but we need to look more thoroughly into how
168 * panels behave in the two modes. 174 * panels behave in the two modes. For now, let's just maintain the
175 * value we got from the BIOS.
169 */ 176 */
177 temp &= ~LVDS_A3_POWER_MASK;
178 temp |= lvds_encoder->a3_power;
170 179
171 /* Set the dithering flag on LVDS as needed, note that there is no 180 /* Set the dithering flag on LVDS as needed, note that there is no
172 * special lvds dither control bit on pch-split platforms, dithering is 181 * special lvds dither control bit on pch-split platforms, dithering is
@@ -264,7 +273,6 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
264 struct intel_crtc_config *pipe_config) 273 struct intel_crtc_config *pipe_config)
265{ 274{
266 struct drm_device *dev = intel_encoder->base.dev; 275 struct drm_device *dev = intel_encoder->base.dev;
267 struct drm_i915_private *dev_priv = dev->dev_private;
268 struct intel_lvds_encoder *lvds_encoder = 276 struct intel_lvds_encoder *lvds_encoder =
269 to_lvds_encoder(&intel_encoder->base); 277 to_lvds_encoder(&intel_encoder->base);
270 struct intel_connector *intel_connector = 278 struct intel_connector *intel_connector =
@@ -279,8 +287,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
279 return false; 287 return false;
280 } 288 }
281 289
282 if ((I915_READ(lvds_encoder->reg) & LVDS_A3_POWER_MASK) == 290 if (lvds_encoder->a3_power == LVDS_A3_POWER_UP)
283 LVDS_A3_POWER_UP)
284 lvds_bpp = 8*3; 291 lvds_bpp = 8*3;
285 else 292 else
286 lvds_bpp = 6*3; 293 lvds_bpp = 6*3;
@@ -1081,6 +1088,9 @@ out:
1081 DRM_DEBUG_KMS("detected %s-link lvds configuration\n", 1088 DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
1082 lvds_encoder->is_dual_link ? "dual" : "single"); 1089 lvds_encoder->is_dual_link ? "dual" : "single");
1083 1090
1091 lvds_encoder->a3_power = I915_READ(lvds_encoder->reg) &
1092 LVDS_A3_POWER_MASK;
1093
1084 /* 1094 /*
1085 * Unlock registers and just 1095 * Unlock registers and just
1086 * leave them unlocked 1096 * leave them unlocked
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index b6e09f226230..780c3ab26f4f 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -93,8 +93,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc)
93 struct drm_device *dev = crtc->dev; 93 struct drm_device *dev = crtc->dev;
94 struct drm_i915_private *dev_priv = dev->dev_private; 94 struct drm_i915_private *dev_priv = dev->dev_private;
95 struct drm_framebuffer *fb = crtc->primary->fb; 95 struct drm_framebuffer *fb = crtc->primary->fb;
96 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 96 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
97 struct drm_i915_gem_object *obj = intel_fb->obj;
98 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 97 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
99 int cfb_pitch; 98 int cfb_pitch;
100 int i; 99 int i;
@@ -150,8 +149,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc)
150 struct drm_device *dev = crtc->dev; 149 struct drm_device *dev = crtc->dev;
151 struct drm_i915_private *dev_priv = dev->dev_private; 150 struct drm_i915_private *dev_priv = dev->dev_private;
152 struct drm_framebuffer *fb = crtc->primary->fb; 151 struct drm_framebuffer *fb = crtc->primary->fb;
153 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 152 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
154 struct drm_i915_gem_object *obj = intel_fb->obj;
155 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 153 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
156 u32 dpfc_ctl; 154 u32 dpfc_ctl;
157 155
@@ -222,16 +220,26 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc)
222 struct drm_device *dev = crtc->dev; 220 struct drm_device *dev = crtc->dev;
223 struct drm_i915_private *dev_priv = dev->dev_private; 221 struct drm_i915_private *dev_priv = dev->dev_private;
224 struct drm_framebuffer *fb = crtc->primary->fb; 222 struct drm_framebuffer *fb = crtc->primary->fb;
225 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 223 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
226 struct drm_i915_gem_object *obj = intel_fb->obj;
227 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 224 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
228 u32 dpfc_ctl; 225 u32 dpfc_ctl;
229 226
230 dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane); 227 dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
231 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) 228 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
229 dev_priv->fbc.threshold++;
230
231 switch (dev_priv->fbc.threshold) {
232 case 4:
233 case 3:
234 dpfc_ctl |= DPFC_CTL_LIMIT_4X;
235 break;
236 case 2:
232 dpfc_ctl |= DPFC_CTL_LIMIT_2X; 237 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
233 else 238 break;
239 case 1:
234 dpfc_ctl |= DPFC_CTL_LIMIT_1X; 240 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
241 break;
242 }
235 dpfc_ctl |= DPFC_CTL_FENCE_EN; 243 dpfc_ctl |= DPFC_CTL_FENCE_EN;
236 if (IS_GEN5(dev)) 244 if (IS_GEN5(dev))
237 dpfc_ctl |= obj->fence_reg; 245 dpfc_ctl |= obj->fence_reg;
@@ -278,16 +286,27 @@ static void gen7_enable_fbc(struct drm_crtc *crtc)
278 struct drm_device *dev = crtc->dev; 286 struct drm_device *dev = crtc->dev;
279 struct drm_i915_private *dev_priv = dev->dev_private; 287 struct drm_i915_private *dev_priv = dev->dev_private;
280 struct drm_framebuffer *fb = crtc->primary->fb; 288 struct drm_framebuffer *fb = crtc->primary->fb;
281 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 289 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
282 struct drm_i915_gem_object *obj = intel_fb->obj;
283 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 290 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
284 u32 dpfc_ctl; 291 u32 dpfc_ctl;
285 292
286 dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane); 293 dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
287 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) 294 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
295 dev_priv->fbc.threshold++;
296
297 switch (dev_priv->fbc.threshold) {
298 case 4:
299 case 3:
300 dpfc_ctl |= DPFC_CTL_LIMIT_4X;
301 break;
302 case 2:
288 dpfc_ctl |= DPFC_CTL_LIMIT_2X; 303 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
289 else 304 break;
305 case 1:
290 dpfc_ctl |= DPFC_CTL_LIMIT_1X; 306 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
307 break;
308 }
309
291 dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; 310 dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
292 311
293 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); 312 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
@@ -462,7 +481,6 @@ void intel_update_fbc(struct drm_device *dev)
462 struct drm_crtc *crtc = NULL, *tmp_crtc; 481 struct drm_crtc *crtc = NULL, *tmp_crtc;
463 struct intel_crtc *intel_crtc; 482 struct intel_crtc *intel_crtc;
464 struct drm_framebuffer *fb; 483 struct drm_framebuffer *fb;
465 struct intel_framebuffer *intel_fb;
466 struct drm_i915_gem_object *obj; 484 struct drm_i915_gem_object *obj;
467 const struct drm_display_mode *adjusted_mode; 485 const struct drm_display_mode *adjusted_mode;
468 unsigned int max_width, max_height; 486 unsigned int max_width, max_height;
@@ -507,8 +525,7 @@ void intel_update_fbc(struct drm_device *dev)
507 525
508 intel_crtc = to_intel_crtc(crtc); 526 intel_crtc = to_intel_crtc(crtc);
509 fb = crtc->primary->fb; 527 fb = crtc->primary->fb;
510 intel_fb = to_intel_framebuffer(fb); 528 obj = intel_fb_obj(fb);
511 obj = intel_fb->obj;
512 adjusted_mode = &intel_crtc->config.adjusted_mode; 529 adjusted_mode = &intel_crtc->config.adjusted_mode;
513 530
514 if (i915.enable_fbc < 0) { 531 if (i915.enable_fbc < 0) {
@@ -566,7 +583,8 @@ void intel_update_fbc(struct drm_device *dev)
566 if (in_dbg_master()) 583 if (in_dbg_master())
567 goto out_disable; 584 goto out_disable;
568 585
569 if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) { 586 if (i915_gem_stolen_setup_compression(dev, obj->base.size,
587 drm_format_plane_cpp(fb->pixel_format, 0))) {
570 if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL)) 588 if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
571 DRM_DEBUG_KMS("framebuffer too large, disabling compression\n"); 589 DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
572 goto out_disable; 590 goto out_disable;
@@ -792,12 +810,33 @@ static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
792 return NULL; 810 return NULL;
793} 811}
794 812
795static void pineview_disable_cxsr(struct drm_device *dev) 813void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
796{ 814{
797 struct drm_i915_private *dev_priv = dev->dev_private; 815 struct drm_device *dev = dev_priv->dev;
816 u32 val;
798 817
799 /* deactivate cxsr */ 818 if (IS_VALLEYVIEW(dev)) {
800 I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN); 819 I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
820 } else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
821 I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
822 } else if (IS_PINEVIEW(dev)) {
823 val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
824 val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
825 I915_WRITE(DSPFW3, val);
826 } else if (IS_I945G(dev) || IS_I945GM(dev)) {
827 val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
828 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
829 I915_WRITE(FW_BLC_SELF, val);
830 } else if (IS_I915GM(dev)) {
831 val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
832 _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
833 I915_WRITE(INSTPM, val);
834 } else {
835 return;
836 }
837
838 DRM_DEBUG_KMS("memory self-refresh is %s\n",
839 enable ? "enabled" : "disabled");
801} 840}
802 841
803/* 842/*
@@ -1036,7 +1075,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
1036 dev_priv->fsb_freq, dev_priv->mem_freq); 1075 dev_priv->fsb_freq, dev_priv->mem_freq);
1037 if (!latency) { 1076 if (!latency) {
1038 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); 1077 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
1039 pineview_disable_cxsr(dev); 1078 intel_set_memory_cxsr(dev_priv, false);
1040 return; 1079 return;
1041 } 1080 }
1042 1081
@@ -1087,13 +1126,9 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
1087 I915_WRITE(DSPFW3, reg); 1126 I915_WRITE(DSPFW3, reg);
1088 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg); 1127 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
1089 1128
1090 /* activate cxsr */ 1129 intel_set_memory_cxsr(dev_priv, true);
1091 I915_WRITE(DSPFW3,
1092 I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
1093 DRM_DEBUG_KMS("Self-refresh is enabled\n");
1094 } else { 1130 } else {
1095 pineview_disable_cxsr(dev); 1131 intel_set_memory_cxsr(dev_priv, false);
1096 DRM_DEBUG_KMS("Self-refresh is disabled\n");
1097 } 1132 }
1098} 1133}
1099 1134
@@ -1319,6 +1354,7 @@ static void valleyview_update_wm(struct drm_crtc *crtc)
1319 int plane_sr, cursor_sr; 1354 int plane_sr, cursor_sr;
1320 int ignore_plane_sr, ignore_cursor_sr; 1355 int ignore_plane_sr, ignore_cursor_sr;
1321 unsigned int enabled = 0; 1356 unsigned int enabled = 0;
1357 bool cxsr_enabled;
1322 1358
1323 vlv_update_drain_latency(dev); 1359 vlv_update_drain_latency(dev);
1324 1360
@@ -1345,10 +1381,10 @@ static void valleyview_update_wm(struct drm_crtc *crtc)
1345 &valleyview_wm_info, 1381 &valleyview_wm_info,
1346 &valleyview_cursor_wm_info, 1382 &valleyview_cursor_wm_info,
1347 &ignore_plane_sr, &cursor_sr)) { 1383 &ignore_plane_sr, &cursor_sr)) {
1348 I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN); 1384 cxsr_enabled = true;
1349 } else { 1385 } else {
1350 I915_WRITE(FW_BLC_SELF_VLV, 1386 cxsr_enabled = false;
1351 I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN); 1387 intel_set_memory_cxsr(dev_priv, false);
1352 plane_sr = cursor_sr = 0; 1388 plane_sr = cursor_sr = 0;
1353 } 1389 }
1354 1390
@@ -1368,6 +1404,9 @@ static void valleyview_update_wm(struct drm_crtc *crtc)
1368 I915_WRITE(DSPFW3, 1404 I915_WRITE(DSPFW3,
1369 (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) | 1405 (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
1370 (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); 1406 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1407
1408 if (cxsr_enabled)
1409 intel_set_memory_cxsr(dev_priv, true);
1371} 1410}
1372 1411
1373static void g4x_update_wm(struct drm_crtc *crtc) 1412static void g4x_update_wm(struct drm_crtc *crtc)
@@ -1378,6 +1417,7 @@ static void g4x_update_wm(struct drm_crtc *crtc)
1378 int planea_wm, planeb_wm, cursora_wm, cursorb_wm; 1417 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1379 int plane_sr, cursor_sr; 1418 int plane_sr, cursor_sr;
1380 unsigned int enabled = 0; 1419 unsigned int enabled = 0;
1420 bool cxsr_enabled;
1381 1421
1382 if (g4x_compute_wm0(dev, PIPE_A, 1422 if (g4x_compute_wm0(dev, PIPE_A,
1383 &g4x_wm_info, latency_ns, 1423 &g4x_wm_info, latency_ns,
@@ -1397,10 +1437,10 @@ static void g4x_update_wm(struct drm_crtc *crtc)
1397 &g4x_wm_info, 1437 &g4x_wm_info,
1398 &g4x_cursor_wm_info, 1438 &g4x_cursor_wm_info,
1399 &plane_sr, &cursor_sr)) { 1439 &plane_sr, &cursor_sr)) {
1400 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); 1440 cxsr_enabled = true;
1401 } else { 1441 } else {
1402 I915_WRITE(FW_BLC_SELF, 1442 cxsr_enabled = false;
1403 I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN); 1443 intel_set_memory_cxsr(dev_priv, false);
1404 plane_sr = cursor_sr = 0; 1444 plane_sr = cursor_sr = 0;
1405 } 1445 }
1406 1446
@@ -1421,6 +1461,9 @@ static void g4x_update_wm(struct drm_crtc *crtc)
1421 I915_WRITE(DSPFW3, 1461 I915_WRITE(DSPFW3,
1422 (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) | 1462 (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
1423 (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); 1463 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1464
1465 if (cxsr_enabled)
1466 intel_set_memory_cxsr(dev_priv, true);
1424} 1467}
1425 1468
1426static void i965_update_wm(struct drm_crtc *unused_crtc) 1469static void i965_update_wm(struct drm_crtc *unused_crtc)
@@ -1430,6 +1473,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
1430 struct drm_crtc *crtc; 1473 struct drm_crtc *crtc;
1431 int srwm = 1; 1474 int srwm = 1;
1432 int cursor_sr = 16; 1475 int cursor_sr = 16;
1476 bool cxsr_enabled;
1433 1477
1434 /* Calc sr entries for one plane configs */ 1478 /* Calc sr entries for one plane configs */
1435 crtc = single_enabled_crtc(dev); 1479 crtc = single_enabled_crtc(dev);
@@ -1471,13 +1515,11 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
1471 DRM_DEBUG_KMS("self-refresh watermark: display plane %d " 1515 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1472 "cursor %d\n", srwm, cursor_sr); 1516 "cursor %d\n", srwm, cursor_sr);
1473 1517
1474 if (IS_CRESTLINE(dev)) 1518 cxsr_enabled = true;
1475 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
1476 } else { 1519 } else {
1520 cxsr_enabled = false;
1477 /* Turn off self refresh if both pipes are enabled */ 1521 /* Turn off self refresh if both pipes are enabled */
1478 if (IS_CRESTLINE(dev)) 1522 intel_set_memory_cxsr(dev_priv, false);
1479 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
1480 & ~FW_BLC_SELF_EN);
1481 } 1523 }
1482 1524
1483 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", 1525 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
@@ -1489,6 +1531,9 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
1489 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); 1531 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
1490 /* update cursor SR watermark */ 1532 /* update cursor SR watermark */
1491 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); 1533 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1534
1535 if (cxsr_enabled)
1536 intel_set_memory_cxsr(dev_priv, true);
1492} 1537}
1493 1538
1494static void i9xx_update_wm(struct drm_crtc *unused_crtc) 1539static void i9xx_update_wm(struct drm_crtc *unused_crtc)
@@ -1548,12 +1593,12 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1548 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); 1593 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1549 1594
1550 if (IS_I915GM(dev) && enabled) { 1595 if (IS_I915GM(dev) && enabled) {
1551 struct intel_framebuffer *fb; 1596 struct drm_i915_gem_object *obj;
1552 1597
1553 fb = to_intel_framebuffer(enabled->primary->fb); 1598 obj = intel_fb_obj(enabled->primary->fb);
1554 1599
1555 /* self-refresh seems busted with untiled */ 1600 /* self-refresh seems busted with untiled */
1556 if (fb->obj->tiling_mode == I915_TILING_NONE) 1601 if (obj->tiling_mode == I915_TILING_NONE)
1557 enabled = NULL; 1602 enabled = NULL;
1558 } 1603 }
1559 1604
@@ -1563,10 +1608,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1563 cwm = 2; 1608 cwm = 2;
1564 1609
1565 /* Play safe and disable self-refresh before adjusting watermarks. */ 1610 /* Play safe and disable self-refresh before adjusting watermarks. */
1566 if (IS_I945G(dev) || IS_I945GM(dev)) 1611 intel_set_memory_cxsr(dev_priv, false);
1567 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
1568 else if (IS_I915GM(dev))
1569 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_SELF_EN));
1570 1612
1571 /* Calc sr entries for one plane configs */ 1613 /* Calc sr entries for one plane configs */
1572 if (HAS_FW_BLC(dev) && enabled) { 1614 if (HAS_FW_BLC(dev) && enabled) {
@@ -1612,17 +1654,8 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1612 I915_WRITE(FW_BLC, fwater_lo); 1654 I915_WRITE(FW_BLC, fwater_lo);
1613 I915_WRITE(FW_BLC2, fwater_hi); 1655 I915_WRITE(FW_BLC2, fwater_hi);
1614 1656
1615 if (HAS_FW_BLC(dev)) { 1657 if (enabled)
1616 if (enabled) { 1658 intel_set_memory_cxsr(dev_priv, true);
1617 if (IS_I945G(dev) || IS_I945GM(dev))
1618 I915_WRITE(FW_BLC_SELF,
1619 FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
1620 else if (IS_I915GM(dev))
1621 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_SELF_EN));
1622 DRM_DEBUG_KMS("memory self refresh enabled\n");
1623 } else
1624 DRM_DEBUG_KMS("memory self refresh disabled\n");
1625 }
1626} 1659}
1627 1660
1628static void i845_update_wm(struct drm_crtc *unused_crtc) 1661static void i845_update_wm(struct drm_crtc *unused_crtc)
@@ -3150,6 +3183,9 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
3150 if (val < dev_priv->rps.max_freq_softlimit) 3183 if (val < dev_priv->rps.max_freq_softlimit)
3151 mask |= GEN6_PM_RP_UP_THRESHOLD; 3184 mask |= GEN6_PM_RP_UP_THRESHOLD;
3152 3185
3186 mask |= dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED);
3187 mask &= dev_priv->pm_rps_events;
3188
3153 /* IVB and SNB hard hangs on looping batchbuffer 3189 /* IVB and SNB hard hangs on looping batchbuffer
3154 * if GEN6_PM_UP_EI_EXPIRED is masked. 3190 * if GEN6_PM_UP_EI_EXPIRED is masked.
3155 */ 3191 */
@@ -3493,15 +3529,23 @@ static void gen8_enable_rps(struct drm_device *dev)
3493 for_each_ring(ring, dev_priv, unused) 3529 for_each_ring(ring, dev_priv, unused)
3494 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); 3530 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
3495 I915_WRITE(GEN6_RC_SLEEP, 0); 3531 I915_WRITE(GEN6_RC_SLEEP, 0);
3496 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */ 3532 if (IS_BROADWELL(dev))
3533 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
3534 else
3535 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
3497 3536
3498 /* 3: Enable RC6 */ 3537 /* 3: Enable RC6 */
3499 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) 3538 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
3500 rc6_mask = GEN6_RC_CTL_RC6_ENABLE; 3539 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
3501 intel_print_rc6_info(dev, rc6_mask); 3540 intel_print_rc6_info(dev, rc6_mask);
3502 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | 3541 if (IS_BROADWELL(dev))
3503 GEN6_RC_CTL_EI_MODE(1) | 3542 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
3504 rc6_mask); 3543 GEN7_RC_CTL_TO_MODE |
3544 rc6_mask);
3545 else
3546 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
3547 GEN6_RC_CTL_EI_MODE(1) |
3548 rc6_mask);
3505 3549
3506 /* 4 Program defaults and thresholds for RPS*/ 3550 /* 4 Program defaults and thresholds for RPS*/
3507 I915_WRITE(GEN6_RPNSWREQ, 3551 I915_WRITE(GEN6_RPNSWREQ,
@@ -4078,6 +4122,7 @@ static void valleyview_enable_rps(struct drm_device *dev)
4078 I915_WRITE(GEN6_RP_DOWN_EI, 350000); 4122 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
4079 4123
4080 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); 4124 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
4125 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 0xf4240);
4081 4126
4082 I915_WRITE(GEN6_RP_CONTROL, 4127 I915_WRITE(GEN6_RP_CONTROL,
4083 GEN6_RP_MEDIA_TURBO | 4128 GEN6_RP_MEDIA_TURBO |
@@ -4098,9 +4143,11 @@ static void valleyview_enable_rps(struct drm_device *dev)
4098 4143
4099 /* allows RC6 residency counter to work */ 4144 /* allows RC6 residency counter to work */
4100 I915_WRITE(VLV_COUNTER_CONTROL, 4145 I915_WRITE(VLV_COUNTER_CONTROL,
4101 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH | 4146 _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN |
4147 VLV_RENDER_RC0_COUNT_EN |
4102 VLV_MEDIA_RC6_COUNT_EN | 4148 VLV_MEDIA_RC6_COUNT_EN |
4103 VLV_RENDER_RC6_COUNT_EN)); 4149 VLV_RENDER_RC6_COUNT_EN));
4150
4104 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) 4151 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
4105 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL; 4152 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
4106 4153
@@ -5328,7 +5375,7 @@ static void gen8_init_clock_gating(struct drm_device *dev)
5328 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE)); 5375 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE));
5329 5376
5330 I915_WRITE(_3D_CHICKEN3, 5377 I915_WRITE(_3D_CHICKEN3,
5331 _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2)); 5378 _MASKED_BIT_ENABLE(_3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2)));
5332 5379
5333 I915_WRITE(COMMON_SLICE_CHICKEN2, 5380 I915_WRITE(COMMON_SLICE_CHICKEN2,
5334 _MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE)); 5381 _MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE));
@@ -5563,10 +5610,6 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
5563 } 5610 }
5564 DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq); 5611 DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
5565 5612
5566 dev_priv->vlv_cdclk_freq = valleyview_cur_cdclk(dev_priv);
5567 DRM_DEBUG_DRIVER("Current CD clock rate: %d MHz",
5568 dev_priv->vlv_cdclk_freq);
5569
5570 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE); 5613 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
5571 5614
5572 /* WaDisableEarlyCull:vlv */ 5615 /* WaDisableEarlyCull:vlv */
@@ -5982,34 +6025,13 @@ static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
5982 return true; 6025 return true;
5983} 6026}
5984 6027
5985void __vlv_set_power_well(struct drm_i915_private *dev_priv, 6028static void vlv_set_power_well(struct drm_i915_private *dev_priv,
5986 enum punit_power_well power_well_id, bool enable) 6029 struct i915_power_well *power_well, bool enable)
5987{ 6030{
5988 struct drm_device *dev = dev_priv->dev; 6031 enum punit_power_well power_well_id = power_well->data;
5989 u32 mask; 6032 u32 mask;
5990 u32 state; 6033 u32 state;
5991 u32 ctrl; 6034 u32 ctrl;
5992 enum pipe pipe;
5993
5994 if (power_well_id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
5995 if (enable) {
5996 /*
5997 * Enable the CRI clock source so we can get at the
5998 * display and the reference clock for VGA
5999 * hotplug / manual detection.
6000 */
6001 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
6002 DPLL_REFA_CLK_ENABLE_VLV |
6003 DPLL_INTEGRATED_CRI_CLK_VLV);
6004 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
6005 } else {
6006 for_each_pipe(pipe)
6007 assert_pll_disabled(dev_priv, pipe);
6008 /* Assert common reset */
6009 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) &
6010 ~DPIO_CMNRST);
6011 }
6012 }
6013 6035
6014 mask = PUNIT_PWRGT_MASK(power_well_id); 6036 mask = PUNIT_PWRGT_MASK(power_well_id);
6015 state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) : 6037 state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
@@ -6037,28 +6059,6 @@ void __vlv_set_power_well(struct drm_i915_private *dev_priv,
6037 6059
6038out: 6060out:
6039 mutex_unlock(&dev_priv->rps.hw_lock); 6061 mutex_unlock(&dev_priv->rps.hw_lock);
6040
6041 /*
6042 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
6043 * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
6044 * a. GUnit 0x2110 bit[0] set to 1 (def 0)
6045 * b. The other bits such as sfr settings / modesel may all
6046 * be set to 0.
6047 *
6048 * This should only be done on init and resume from S3 with
6049 * both PLLs disabled, or we risk losing DPIO and PLL
6050 * synchronization.
6051 */
6052 if (power_well_id == PUNIT_POWER_WELL_DPIO_CMN_BC && enable)
6053 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
6054}
6055
6056static void vlv_set_power_well(struct drm_i915_private *dev_priv,
6057 struct i915_power_well *power_well, bool enable)
6058{
6059 enum punit_power_well power_well_id = power_well->data;
6060
6061 __vlv_set_power_well(dev_priv, power_well_id, enable);
6062} 6062}
6063 6063
6064static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv, 6064static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
@@ -6150,6 +6150,53 @@ static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
6150 vlv_set_power_well(dev_priv, power_well, false); 6150 vlv_set_power_well(dev_priv, power_well, false);
6151} 6151}
6152 6152
6153static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
6154 struct i915_power_well *power_well)
6155{
6156 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
6157
6158 /*
6159 * Enable the CRI clock source so we can get at the
6160 * display and the reference clock for VGA
6161 * hotplug / manual detection.
6162 */
6163 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
6164 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
6165 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
6166
6167 vlv_set_power_well(dev_priv, power_well, true);
6168
6169 /*
6170 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
6171 * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
6172 * a. GUnit 0x2110 bit[0] set to 1 (def 0)
6173 * b. The other bits such as sfr settings / modesel may all
6174 * be set to 0.
6175 *
6176 * This should only be done on init and resume from S3 with
6177 * both PLLs disabled, or we risk losing DPIO and PLL
6178 * synchronization.
6179 */
6180 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
6181}
6182
6183static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
6184 struct i915_power_well *power_well)
6185{
6186 struct drm_device *dev = dev_priv->dev;
6187 enum pipe pipe;
6188
6189 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
6190
6191 for_each_pipe(pipe)
6192 assert_pll_disabled(dev_priv, pipe);
6193
6194 /* Assert common reset */
6195 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
6196
6197 vlv_set_power_well(dev_priv, power_well, false);
6198}
6199
6153static void check_power_well_state(struct drm_i915_private *dev_priv, 6200static void check_power_well_state(struct drm_i915_private *dev_priv,
6154 struct i915_power_well *power_well) 6201 struct i915_power_well *power_well)
6155{ 6202{
@@ -6299,6 +6346,7 @@ EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
6299 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \ 6346 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
6300 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \ 6347 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
6301 BIT(POWER_DOMAIN_PORT_CRT) | \ 6348 BIT(POWER_DOMAIN_PORT_CRT) | \
6349 BIT(POWER_DOMAIN_PLLS) | \
6302 BIT(POWER_DOMAIN_INIT)) 6350 BIT(POWER_DOMAIN_INIT))
6303#define HSW_DISPLAY_POWER_DOMAINS ( \ 6351#define HSW_DISPLAY_POWER_DOMAINS ( \
6304 (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \ 6352 (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
@@ -6398,6 +6446,13 @@ static const struct i915_power_well_ops vlv_display_power_well_ops = {
6398 .is_enabled = vlv_power_well_enabled, 6446 .is_enabled = vlv_power_well_enabled,
6399}; 6447};
6400 6448
6449static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
6450 .sync_hw = vlv_power_well_sync_hw,
6451 .enable = vlv_dpio_cmn_power_well_enable,
6452 .disable = vlv_dpio_cmn_power_well_disable,
6453 .is_enabled = vlv_power_well_enabled,
6454};
6455
6401static const struct i915_power_well_ops vlv_dpio_power_well_ops = { 6456static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
6402 .sync_hw = vlv_power_well_sync_hw, 6457 .sync_hw = vlv_power_well_sync_hw,
6403 .enable = vlv_power_well_enable, 6458 .enable = vlv_power_well_enable,
@@ -6458,10 +6513,25 @@ static struct i915_power_well vlv_power_wells[] = {
6458 .name = "dpio-common", 6513 .name = "dpio-common",
6459 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS, 6514 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
6460 .data = PUNIT_POWER_WELL_DPIO_CMN_BC, 6515 .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
6461 .ops = &vlv_dpio_power_well_ops, 6516 .ops = &vlv_dpio_cmn_power_well_ops,
6462 }, 6517 },
6463}; 6518};
6464 6519
6520static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
6521 enum punit_power_well power_well_id)
6522{
6523 struct i915_power_domains *power_domains = &dev_priv->power_domains;
6524 struct i915_power_well *power_well;
6525 int i;
6526
6527 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
6528 if (power_well->data == power_well_id)
6529 return power_well;
6530 }
6531
6532 return NULL;
6533}
6534
6465#define set_power_wells(power_domains, __power_wells) ({ \ 6535#define set_power_wells(power_domains, __power_wells) ({ \
6466 (power_domains)->power_wells = (__power_wells); \ 6536 (power_domains)->power_wells = (__power_wells); \
6467 (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \ 6537 (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
@@ -6512,11 +6582,50 @@ static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
6512 mutex_unlock(&power_domains->lock); 6582 mutex_unlock(&power_domains->lock);
6513} 6583}
6514 6584
6585static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
6586{
6587 struct i915_power_well *cmn =
6588 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
6589 struct i915_power_well *disp2d =
6590 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
6591
6592 /* nothing to do if common lane is already off */
6593 if (!cmn->ops->is_enabled(dev_priv, cmn))
6594 return;
6595
6596 /* If the display might be already active skip this */
6597 if (disp2d->ops->is_enabled(dev_priv, disp2d) &&
6598 I915_READ(DPIO_CTL) & DPIO_CMNRST)
6599 return;
6600
6601 DRM_DEBUG_KMS("toggling display PHY side reset\n");
6602
6603 /* cmnlane needs DPLL registers */
6604 disp2d->ops->enable(dev_priv, disp2d);
6605
6606 /*
6607 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
6608 * Need to assert and de-assert PHY SB reset by gating the
6609 * common lane power, then un-gating it.
6610 * Simply ungating isn't enough to reset the PHY enough to get
6611 * ports and lanes running.
6612 */
6613 cmn->ops->disable(dev_priv, cmn);
6614}
6615
6515void intel_power_domains_init_hw(struct drm_i915_private *dev_priv) 6616void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
6516{ 6617{
6618 struct drm_device *dev = dev_priv->dev;
6517 struct i915_power_domains *power_domains = &dev_priv->power_domains; 6619 struct i915_power_domains *power_domains = &dev_priv->power_domains;
6518 6620
6519 power_domains->initializing = true; 6621 power_domains->initializing = true;
6622
6623 if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
6624 mutex_lock(&power_domains->lock);
6625 vlv_cmnlane_wa(dev_priv);
6626 mutex_unlock(&power_domains->lock);
6627 }
6628
6520 /* For now, we need the power well to be always enabled. */ 6629 /* For now, we need the power well to be always enabled. */
6521 intel_display_set_init_power(dev_priv, true); 6630 intel_display_set_init_power(dev_priv, true);
6522 intel_power_domains_resume(dev_priv); 6631 intel_power_domains_resume(dev_priv);
@@ -6689,7 +6798,7 @@ void intel_init_pm(struct drm_device *dev)
6689 (dev_priv->is_ddr3 == 1) ? "3" : "2", 6798 (dev_priv->is_ddr3 == 1) ? "3" : "2",
6690 dev_priv->fsb_freq, dev_priv->mem_freq); 6799 dev_priv->fsb_freq, dev_priv->mem_freq);
6691 /* Disable CxSR and never update its watermark again */ 6800 /* Disable CxSR and never update its watermark again */
6692 pineview_disable_cxsr(dev); 6801 intel_set_memory_cxsr(dev_priv, false);
6693 dev_priv->display.update_wm = NULL; 6802 dev_priv->display.update_wm = NULL;
6694 } else 6803 } else
6695 dev_priv->display.update_wm = pineview_update_wm; 6804 dev_priv->display.update_wm = pineview_update_wm;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 2faef2605e97..599709e80a16 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -48,9 +48,8 @@ static inline int __ring_space(int head, int tail, int size)
48 return space; 48 return space;
49} 49}
50 50
51static inline int ring_space(struct intel_engine_cs *ring) 51static inline int ring_space(struct intel_ringbuffer *ringbuf)
52{ 52{
53 struct intel_ringbuffer *ringbuf = ring->buffer;
54 return __ring_space(ringbuf->head & HEAD_ADDR, ringbuf->tail, ringbuf->size); 53 return __ring_space(ringbuf->head & HEAD_ADDR, ringbuf->tail, ringbuf->size);
55} 54}
56 55
@@ -545,7 +544,7 @@ static int init_ring_common(struct intel_engine_cs *ring)
545 else { 544 else {
546 ringbuf->head = I915_READ_HEAD(ring); 545 ringbuf->head = I915_READ_HEAD(ring);
547 ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR; 546 ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
548 ringbuf->space = ring_space(ring); 547 ringbuf->space = ring_space(ringbuf);
549 ringbuf->last_retired_head = -1; 548 ringbuf->last_retired_head = -1;
550 } 549 }
551 550
@@ -660,6 +659,13 @@ static int init_render_ring(struct intel_engine_cs *ring)
660static void render_ring_cleanup(struct intel_engine_cs *ring) 659static void render_ring_cleanup(struct intel_engine_cs *ring)
661{ 660{
662 struct drm_device *dev = ring->dev; 661 struct drm_device *dev = ring->dev;
662 struct drm_i915_private *dev_priv = dev->dev_private;
663
664 if (dev_priv->semaphore_obj) {
665 i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj);
666 drm_gem_object_unreference(&dev_priv->semaphore_obj->base);
667 dev_priv->semaphore_obj = NULL;
668 }
663 669
664 if (ring->scratch.obj == NULL) 670 if (ring->scratch.obj == NULL)
665 return; 671 return;
@@ -673,29 +679,96 @@ static void render_ring_cleanup(struct intel_engine_cs *ring)
673 ring->scratch.obj = NULL; 679 ring->scratch.obj = NULL;
674} 680}
675 681
682static int gen8_rcs_signal(struct intel_engine_cs *signaller,
683 unsigned int num_dwords)
684{
685#define MBOX_UPDATE_DWORDS 8
686 struct drm_device *dev = signaller->dev;
687 struct drm_i915_private *dev_priv = dev->dev_private;
688 struct intel_engine_cs *waiter;
689 int i, ret, num_rings;
690
691 num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
692 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
693#undef MBOX_UPDATE_DWORDS
694
695 ret = intel_ring_begin(signaller, num_dwords);
696 if (ret)
697 return ret;
698
699 for_each_ring(waiter, dev_priv, i) {
700 u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
701 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
702 continue;
703
704 intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
705 intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
706 PIPE_CONTROL_QW_WRITE |
707 PIPE_CONTROL_FLUSH_ENABLE);
708 intel_ring_emit(signaller, lower_32_bits(gtt_offset));
709 intel_ring_emit(signaller, upper_32_bits(gtt_offset));
710 intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
711 intel_ring_emit(signaller, 0);
712 intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
713 MI_SEMAPHORE_TARGET(waiter->id));
714 intel_ring_emit(signaller, 0);
715 }
716
717 return 0;
718}
719
720static int gen8_xcs_signal(struct intel_engine_cs *signaller,
721 unsigned int num_dwords)
722{
723#define MBOX_UPDATE_DWORDS 6
724 struct drm_device *dev = signaller->dev;
725 struct drm_i915_private *dev_priv = dev->dev_private;
726 struct intel_engine_cs *waiter;
727 int i, ret, num_rings;
728
729 num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
730 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
731#undef MBOX_UPDATE_DWORDS
732
733 ret = intel_ring_begin(signaller, num_dwords);
734 if (ret)
735 return ret;
736
737 for_each_ring(waiter, dev_priv, i) {
738 u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
739 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
740 continue;
741
742 intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
743 MI_FLUSH_DW_OP_STOREDW);
744 intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
745 MI_FLUSH_DW_USE_GTT);
746 intel_ring_emit(signaller, upper_32_bits(gtt_offset));
747 intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
748 intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
749 MI_SEMAPHORE_TARGET(waiter->id));
750 intel_ring_emit(signaller, 0);
751 }
752
753 return 0;
754}
755
676static int gen6_signal(struct intel_engine_cs *signaller, 756static int gen6_signal(struct intel_engine_cs *signaller,
677 unsigned int num_dwords) 757 unsigned int num_dwords)
678{ 758{
679 struct drm_device *dev = signaller->dev; 759 struct drm_device *dev = signaller->dev;
680 struct drm_i915_private *dev_priv = dev->dev_private; 760 struct drm_i915_private *dev_priv = dev->dev_private;
681 struct intel_engine_cs *useless; 761 struct intel_engine_cs *useless;
682 int i, ret; 762 int i, ret, num_rings;
683 763
684 /* NB: In order to be able to do semaphore MBOX updates for varying 764#define MBOX_UPDATE_DWORDS 3
685 * number of rings, it's easiest if we round up each individual update 765 num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
686 * to a multiple of 2 (since ring updates must always be a multiple of 766 num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
687 * 2) even though the actual update only requires 3 dwords. 767#undef MBOX_UPDATE_DWORDS
688 */
689#define MBOX_UPDATE_DWORDS 4
690 if (i915_semaphore_is_enabled(dev))
691 num_dwords += ((I915_NUM_RINGS-1) * MBOX_UPDATE_DWORDS);
692 else
693 return intel_ring_begin(signaller, num_dwords);
694 768
695 ret = intel_ring_begin(signaller, num_dwords); 769 ret = intel_ring_begin(signaller, num_dwords);
696 if (ret) 770 if (ret)
697 return ret; 771 return ret;
698#undef MBOX_UPDATE_DWORDS
699 772
700 for_each_ring(useless, dev_priv, i) { 773 for_each_ring(useless, dev_priv, i) {
701 u32 mbox_reg = signaller->semaphore.mbox.signal[i]; 774 u32 mbox_reg = signaller->semaphore.mbox.signal[i];
@@ -703,15 +776,13 @@ static int gen6_signal(struct intel_engine_cs *signaller,
703 intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1)); 776 intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
704 intel_ring_emit(signaller, mbox_reg); 777 intel_ring_emit(signaller, mbox_reg);
705 intel_ring_emit(signaller, signaller->outstanding_lazy_seqno); 778 intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
706 intel_ring_emit(signaller, MI_NOOP);
707 } else {
708 intel_ring_emit(signaller, MI_NOOP);
709 intel_ring_emit(signaller, MI_NOOP);
710 intel_ring_emit(signaller, MI_NOOP);
711 intel_ring_emit(signaller, MI_NOOP);
712 } 779 }
713 } 780 }
714 781
782 /* If num_dwords was rounded, make sure the tail pointer is correct */
783 if (num_rings % 2 == 0)
784 intel_ring_emit(signaller, MI_NOOP);
785
715 return 0; 786 return 0;
716} 787}
717 788
@@ -729,7 +800,11 @@ gen6_add_request(struct intel_engine_cs *ring)
729{ 800{
730 int ret; 801 int ret;
731 802
732 ret = ring->semaphore.signal(ring, 4); 803 if (ring->semaphore.signal)
804 ret = ring->semaphore.signal(ring, 4);
805 else
806 ret = intel_ring_begin(ring, 4);
807
733 if (ret) 808 if (ret)
734 return ret; 809 return ret;
735 810
@@ -756,6 +831,32 @@ static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
756 * @signaller - ring which has, or will signal 831 * @signaller - ring which has, or will signal
757 * @seqno - seqno which the waiter will block on 832 * @seqno - seqno which the waiter will block on
758 */ 833 */
834
835static int
836gen8_ring_sync(struct intel_engine_cs *waiter,
837 struct intel_engine_cs *signaller,
838 u32 seqno)
839{
840 struct drm_i915_private *dev_priv = waiter->dev->dev_private;
841 int ret;
842
843 ret = intel_ring_begin(waiter, 4);
844 if (ret)
845 return ret;
846
847 intel_ring_emit(waiter, MI_SEMAPHORE_WAIT |
848 MI_SEMAPHORE_GLOBAL_GTT |
849 MI_SEMAPHORE_POLL |
850 MI_SEMAPHORE_SAD_GTE_SDD);
851 intel_ring_emit(waiter, seqno);
852 intel_ring_emit(waiter,
853 lower_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
854 intel_ring_emit(waiter,
855 upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
856 intel_ring_advance(waiter);
857 return 0;
858}
859
759static int 860static int
760gen6_ring_sync(struct intel_engine_cs *waiter, 861gen6_ring_sync(struct intel_engine_cs *waiter,
761 struct intel_engine_cs *signaller, 862 struct intel_engine_cs *signaller,
@@ -1331,6 +1432,7 @@ static int init_status_page(struct intel_engine_cs *ring)
1331 struct drm_i915_gem_object *obj; 1432 struct drm_i915_gem_object *obj;
1332 1433
1333 if ((obj = ring->status_page.obj) == NULL) { 1434 if ((obj = ring->status_page.obj) == NULL) {
1435 unsigned flags;
1334 int ret; 1436 int ret;
1335 1437
1336 obj = i915_gem_alloc_object(ring->dev, 4096); 1438 obj = i915_gem_alloc_object(ring->dev, 4096);
@@ -1343,7 +1445,20 @@ static int init_status_page(struct intel_engine_cs *ring)
1343 if (ret) 1445 if (ret)
1344 goto err_unref; 1446 goto err_unref;
1345 1447
1346 ret = i915_gem_obj_ggtt_pin(obj, 4096, 0); 1448 flags = 0;
1449 if (!HAS_LLC(ring->dev))
1450 /* On g33, we cannot place HWS above 256MiB, so
1451 * restrict its pinning to the low mappable arena.
1452 * Though this restriction is not documented for
1453 * gen4, gen5, or byt, they also behave similarly
1454 * and hang if the HWS is placed at the top of the
1455 * GTT. To generalise, it appears that all !llc
1456 * platforms have issues with us placing the HWS
1457 * above the mappable region (even though we never
1458 * actualy map it).
1459 */
1460 flags |= PIN_MAPPABLE;
1461 ret = i915_gem_obj_ggtt_pin(obj, 4096, flags);
1347 if (ret) { 1462 if (ret) {
1348err_unref: 1463err_unref:
1349 drm_gem_object_unreference(&obj->base); 1464 drm_gem_object_unreference(&obj->base);
@@ -1380,15 +1495,25 @@ static int init_phys_status_page(struct intel_engine_cs *ring)
1380 return 0; 1495 return 0;
1381} 1496}
1382 1497
1383static int allocate_ring_buffer(struct intel_engine_cs *ring) 1498static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
1499{
1500 if (!ringbuf->obj)
1501 return;
1502
1503 iounmap(ringbuf->virtual_start);
1504 i915_gem_object_ggtt_unpin(ringbuf->obj);
1505 drm_gem_object_unreference(&ringbuf->obj->base);
1506 ringbuf->obj = NULL;
1507}
1508
1509static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
1510 struct intel_ringbuffer *ringbuf)
1384{ 1511{
1385 struct drm_device *dev = ring->dev;
1386 struct drm_i915_private *dev_priv = to_i915(dev); 1512 struct drm_i915_private *dev_priv = to_i915(dev);
1387 struct intel_ringbuffer *ringbuf = ring->buffer;
1388 struct drm_i915_gem_object *obj; 1513 struct drm_i915_gem_object *obj;
1389 int ret; 1514 int ret;
1390 1515
1391 if (intel_ring_initialized(ring)) 1516 if (ringbuf->obj)
1392 return 0; 1517 return 0;
1393 1518
1394 obj = NULL; 1519 obj = NULL;
@@ -1460,7 +1585,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
1460 goto error; 1585 goto error;
1461 } 1586 }
1462 1587
1463 ret = allocate_ring_buffer(ring); 1588 ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
1464 if (ret) { 1589 if (ret) {
1465 DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret); 1590 DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret);
1466 goto error; 1591 goto error;
@@ -1501,11 +1626,7 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
1501 intel_stop_ring_buffer(ring); 1626 intel_stop_ring_buffer(ring);
1502 WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0); 1627 WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
1503 1628
1504 iounmap(ringbuf->virtual_start); 1629 intel_destroy_ringbuffer_obj(ringbuf);
1505
1506 i915_gem_object_ggtt_unpin(ringbuf->obj);
1507 drm_gem_object_unreference(&ringbuf->obj->base);
1508 ringbuf->obj = NULL;
1509 ring->preallocated_lazy_request = NULL; 1630 ring->preallocated_lazy_request = NULL;
1510 ring->outstanding_lazy_seqno = 0; 1631 ring->outstanding_lazy_seqno = 0;
1511 1632
@@ -1531,7 +1652,7 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
1531 ringbuf->head = ringbuf->last_retired_head; 1652 ringbuf->head = ringbuf->last_retired_head;
1532 ringbuf->last_retired_head = -1; 1653 ringbuf->last_retired_head = -1;
1533 1654
1534 ringbuf->space = ring_space(ring); 1655 ringbuf->space = ring_space(ringbuf);
1535 if (ringbuf->space >= n) 1656 if (ringbuf->space >= n)
1536 return 0; 1657 return 0;
1537 } 1658 }
@@ -1554,7 +1675,7 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
1554 ringbuf->head = ringbuf->last_retired_head; 1675 ringbuf->head = ringbuf->last_retired_head;
1555 ringbuf->last_retired_head = -1; 1676 ringbuf->last_retired_head = -1;
1556 1677
1557 ringbuf->space = ring_space(ring); 1678 ringbuf->space = ring_space(ringbuf);
1558 return 0; 1679 return 0;
1559} 1680}
1560 1681
@@ -1583,7 +1704,7 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
1583 trace_i915_ring_wait_begin(ring); 1704 trace_i915_ring_wait_begin(ring);
1584 do { 1705 do {
1585 ringbuf->head = I915_READ_HEAD(ring); 1706 ringbuf->head = I915_READ_HEAD(ring);
1586 ringbuf->space = ring_space(ring); 1707 ringbuf->space = ring_space(ringbuf);
1587 if (ringbuf->space >= n) { 1708 if (ringbuf->space >= n) {
1588 ret = 0; 1709 ret = 0;
1589 break; 1710 break;
@@ -1635,7 +1756,7 @@ static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
1635 iowrite32(MI_NOOP, virt++); 1756 iowrite32(MI_NOOP, virt++);
1636 1757
1637 ringbuf->tail = 0; 1758 ringbuf->tail = 0;
1638 ringbuf->space = ring_space(ring); 1759 ringbuf->space = ring_space(ringbuf);
1639 1760
1640 return 0; 1761 return 0;
1641} 1762}
@@ -1947,45 +2068,74 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1947{ 2068{
1948 struct drm_i915_private *dev_priv = dev->dev_private; 2069 struct drm_i915_private *dev_priv = dev->dev_private;
1949 struct intel_engine_cs *ring = &dev_priv->ring[RCS]; 2070 struct intel_engine_cs *ring = &dev_priv->ring[RCS];
2071 struct drm_i915_gem_object *obj;
2072 int ret;
1950 2073
1951 ring->name = "render ring"; 2074 ring->name = "render ring";
1952 ring->id = RCS; 2075 ring->id = RCS;
1953 ring->mmio_base = RENDER_RING_BASE; 2076 ring->mmio_base = RENDER_RING_BASE;
1954 2077
1955 if (INTEL_INFO(dev)->gen >= 6) { 2078 if (INTEL_INFO(dev)->gen >= 8) {
2079 if (i915_semaphore_is_enabled(dev)) {
2080 obj = i915_gem_alloc_object(dev, 4096);
2081 if (obj == NULL) {
2082 DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
2083 i915.semaphores = 0;
2084 } else {
2085 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
2086 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK);
2087 if (ret != 0) {
2088 drm_gem_object_unreference(&obj->base);
2089 DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n");
2090 i915.semaphores = 0;
2091 } else
2092 dev_priv->semaphore_obj = obj;
2093 }
2094 }
2095 ring->add_request = gen6_add_request;
2096 ring->flush = gen8_render_ring_flush;
2097 ring->irq_get = gen8_ring_get_irq;
2098 ring->irq_put = gen8_ring_put_irq;
2099 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
2100 ring->get_seqno = gen6_ring_get_seqno;
2101 ring->set_seqno = ring_set_seqno;
2102 if (i915_semaphore_is_enabled(dev)) {
2103 WARN_ON(!dev_priv->semaphore_obj);
2104 ring->semaphore.sync_to = gen8_ring_sync;
2105 ring->semaphore.signal = gen8_rcs_signal;
2106 GEN8_RING_SEMAPHORE_INIT;
2107 }
2108 } else if (INTEL_INFO(dev)->gen >= 6) {
1956 ring->add_request = gen6_add_request; 2109 ring->add_request = gen6_add_request;
1957 ring->flush = gen7_render_ring_flush; 2110 ring->flush = gen7_render_ring_flush;
1958 if (INTEL_INFO(dev)->gen == 6) 2111 if (INTEL_INFO(dev)->gen == 6)
1959 ring->flush = gen6_render_ring_flush; 2112 ring->flush = gen6_render_ring_flush;
1960 if (INTEL_INFO(dev)->gen >= 8) { 2113 ring->irq_get = gen6_ring_get_irq;
1961 ring->flush = gen8_render_ring_flush; 2114 ring->irq_put = gen6_ring_put_irq;
1962 ring->irq_get = gen8_ring_get_irq;
1963 ring->irq_put = gen8_ring_put_irq;
1964 } else {
1965 ring->irq_get = gen6_ring_get_irq;
1966 ring->irq_put = gen6_ring_put_irq;
1967 }
1968 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT; 2115 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
1969 ring->get_seqno = gen6_ring_get_seqno; 2116 ring->get_seqno = gen6_ring_get_seqno;
1970 ring->set_seqno = ring_set_seqno; 2117 ring->set_seqno = ring_set_seqno;
1971 ring->semaphore.sync_to = gen6_ring_sync; 2118 if (i915_semaphore_is_enabled(dev)) {
1972 ring->semaphore.signal = gen6_signal; 2119 ring->semaphore.sync_to = gen6_ring_sync;
1973 /* 2120 ring->semaphore.signal = gen6_signal;
1974 * The current semaphore is only applied on pre-gen8 platform. 2121 /*
1975 * And there is no VCS2 ring on the pre-gen8 platform. So the 2122 * The current semaphore is only applied on pre-gen8
1976 * semaphore between RCS and VCS2 is initialized as INVALID. 2123 * platform. And there is no VCS2 ring on the pre-gen8
1977 * Gen8 will initialize the sema between VCS2 and RCS later. 2124 * platform. So the semaphore between RCS and VCS2 is
1978 */ 2125 * initialized as INVALID. Gen8 will initialize the
1979 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID; 2126 * sema between VCS2 and RCS later.
1980 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV; 2127 */
1981 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB; 2128 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
1982 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE; 2129 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV;
1983 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; 2130 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB;
1984 ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC; 2131 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE;
1985 ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC; 2132 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
1986 ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC; 2133 ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
1987 ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC; 2134 ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC;
1988 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; 2135 ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC;
2136 ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
2137 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2138 }
1989 } else if (IS_GEN5(dev)) { 2139 } else if (IS_GEN5(dev)) {
1990 ring->add_request = pc_render_add_request; 2140 ring->add_request = pc_render_add_request;
1991 ring->flush = gen4_render_ring_flush; 2141 ring->flush = gen4_render_ring_flush;
@@ -2013,6 +2163,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
2013 ring->irq_enable_mask = I915_USER_INTERRUPT; 2163 ring->irq_enable_mask = I915_USER_INTERRUPT;
2014 } 2164 }
2015 ring->write_tail = ring_write_tail; 2165 ring->write_tail = ring_write_tail;
2166
2016 if (IS_HASWELL(dev)) 2167 if (IS_HASWELL(dev))
2017 ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; 2168 ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
2018 else if (IS_GEN8(dev)) 2169 else if (IS_GEN8(dev))
@@ -2030,9 +2181,6 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
2030 2181
2031 /* Workaround batchbuffer to combat CS tlb bug. */ 2182 /* Workaround batchbuffer to combat CS tlb bug. */
2032 if (HAS_BROKEN_CS_TLB(dev)) { 2183 if (HAS_BROKEN_CS_TLB(dev)) {
2033 struct drm_i915_gem_object *obj;
2034 int ret;
2035
2036 obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT); 2184 obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
2037 if (obj == NULL) { 2185 if (obj == NULL) {
2038 DRM_ERROR("Failed to allocate batch bo\n"); 2186 DRM_ERROR("Failed to allocate batch bo\n");
@@ -2163,31 +2311,32 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
2163 ring->irq_put = gen8_ring_put_irq; 2311 ring->irq_put = gen8_ring_put_irq;
2164 ring->dispatch_execbuffer = 2312 ring->dispatch_execbuffer =
2165 gen8_ring_dispatch_execbuffer; 2313 gen8_ring_dispatch_execbuffer;
2314 if (i915_semaphore_is_enabled(dev)) {
2315 ring->semaphore.sync_to = gen8_ring_sync;
2316 ring->semaphore.signal = gen8_xcs_signal;
2317 GEN8_RING_SEMAPHORE_INIT;
2318 }
2166 } else { 2319 } else {
2167 ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; 2320 ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
2168 ring->irq_get = gen6_ring_get_irq; 2321 ring->irq_get = gen6_ring_get_irq;
2169 ring->irq_put = gen6_ring_put_irq; 2322 ring->irq_put = gen6_ring_put_irq;
2170 ring->dispatch_execbuffer = 2323 ring->dispatch_execbuffer =
2171 gen6_ring_dispatch_execbuffer; 2324 gen6_ring_dispatch_execbuffer;
2325 if (i915_semaphore_is_enabled(dev)) {
2326 ring->semaphore.sync_to = gen6_ring_sync;
2327 ring->semaphore.signal = gen6_signal;
2328 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
2329 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
2330 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
2331 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
2332 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
2333 ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
2334 ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
2335 ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
2336 ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
2337 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2338 }
2172 } 2339 }
2173 ring->semaphore.sync_to = gen6_ring_sync;
2174 ring->semaphore.signal = gen6_signal;
2175 /*
2176 * The current semaphore is only applied on pre-gen8 platform.
2177 * And there is no VCS2 ring on the pre-gen8 platform. So the
2178 * semaphore between VCS and VCS2 is initialized as INVALID.
2179 * Gen8 will initialize the sema between VCS2 and VCS later.
2180 */
2181 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
2182 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
2183 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
2184 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
2185 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
2186 ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
2187 ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
2188 ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
2189 ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
2190 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2191 } else { 2340 } else {
2192 ring->mmio_base = BSD_RING_BASE; 2341 ring->mmio_base = BSD_RING_BASE;
2193 ring->flush = bsd_ring_flush; 2342 ring->flush = bsd_ring_flush;
@@ -2224,7 +2373,7 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev)
2224 return -EINVAL; 2373 return -EINVAL;
2225 } 2374 }
2226 2375
2227 ring->name = "bds2_ring"; 2376 ring->name = "bsd2 ring";
2228 ring->id = VCS2; 2377 ring->id = VCS2;
2229 2378
2230 ring->write_tail = ring_write_tail; 2379 ring->write_tail = ring_write_tail;
@@ -2239,25 +2388,11 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev)
2239 ring->irq_put = gen8_ring_put_irq; 2388 ring->irq_put = gen8_ring_put_irq;
2240 ring->dispatch_execbuffer = 2389 ring->dispatch_execbuffer =
2241 gen8_ring_dispatch_execbuffer; 2390 gen8_ring_dispatch_execbuffer;
2242 ring->semaphore.sync_to = gen6_ring_sync; 2391 if (i915_semaphore_is_enabled(dev)) {
2243 ring->semaphore.signal = gen6_signal; 2392 ring->semaphore.sync_to = gen8_ring_sync;
2244 /* 2393 ring->semaphore.signal = gen8_xcs_signal;
2245 * The current semaphore is only applied on the pre-gen8. And there 2394 GEN8_RING_SEMAPHORE_INIT;
2246 * is no bsd2 ring on the pre-gen8. So now the semaphore_register 2395 }
2247 * between VCS2 and other ring is initialized as invalid.
2248 * Gen8 will initialize the sema between VCS2 and other ring later.
2249 */
2250 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
2251 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
2252 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
2253 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
2254 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
2255 ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
2256 ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
2257 ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
2258 ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
2259 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2260
2261 ring->init = init_ring_common; 2396 ring->init = init_ring_common;
2262 2397
2263 return intel_init_ring_buffer(dev, ring); 2398 return intel_init_ring_buffer(dev, ring);
@@ -2283,30 +2418,38 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
2283 ring->irq_get = gen8_ring_get_irq; 2418 ring->irq_get = gen8_ring_get_irq;
2284 ring->irq_put = gen8_ring_put_irq; 2419 ring->irq_put = gen8_ring_put_irq;
2285 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; 2420 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
2421 if (i915_semaphore_is_enabled(dev)) {
2422 ring->semaphore.sync_to = gen8_ring_sync;
2423 ring->semaphore.signal = gen8_xcs_signal;
2424 GEN8_RING_SEMAPHORE_INIT;
2425 }
2286 } else { 2426 } else {
2287 ring->irq_enable_mask = GT_BLT_USER_INTERRUPT; 2427 ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
2288 ring->irq_get = gen6_ring_get_irq; 2428 ring->irq_get = gen6_ring_get_irq;
2289 ring->irq_put = gen6_ring_put_irq; 2429 ring->irq_put = gen6_ring_put_irq;
2290 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2430 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2431 if (i915_semaphore_is_enabled(dev)) {
2432 ring->semaphore.signal = gen6_signal;
2433 ring->semaphore.sync_to = gen6_ring_sync;
2434 /*
2435 * The current semaphore is only applied on pre-gen8
2436 * platform. And there is no VCS2 ring on the pre-gen8
2437 * platform. So the semaphore between BCS and VCS2 is
2438 * initialized as INVALID. Gen8 will initialize the
2439 * sema between BCS and VCS2 later.
2440 */
2441 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
2442 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
2443 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
2444 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
2445 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
2446 ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
2447 ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
2448 ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
2449 ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
2450 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2451 }
2291 } 2452 }
2292 ring->semaphore.sync_to = gen6_ring_sync;
2293 ring->semaphore.signal = gen6_signal;
2294 /*
2295 * The current semaphore is only applied on pre-gen8 platform. And
2296 * there is no VCS2 ring on the pre-gen8 platform. So the semaphore
2297 * between BCS and VCS2 is initialized as INVALID.
2298 * Gen8 will initialize the sema between BCS and VCS2 later.
2299 */
2300 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
2301 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
2302 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
2303 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
2304 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
2305 ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
2306 ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
2307 ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
2308 ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
2309 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2310 ring->init = init_ring_common; 2453 ring->init = init_ring_common;
2311 2454
2312 return intel_init_ring_buffer(dev, ring); 2455 return intel_init_ring_buffer(dev, ring);
@@ -2333,24 +2476,31 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
2333 ring->irq_get = gen8_ring_get_irq; 2476 ring->irq_get = gen8_ring_get_irq;
2334 ring->irq_put = gen8_ring_put_irq; 2477 ring->irq_put = gen8_ring_put_irq;
2335 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; 2478 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
2479 if (i915_semaphore_is_enabled(dev)) {
2480 ring->semaphore.sync_to = gen8_ring_sync;
2481 ring->semaphore.signal = gen8_xcs_signal;
2482 GEN8_RING_SEMAPHORE_INIT;
2483 }
2336 } else { 2484 } else {
2337 ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; 2485 ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
2338 ring->irq_get = hsw_vebox_get_irq; 2486 ring->irq_get = hsw_vebox_get_irq;
2339 ring->irq_put = hsw_vebox_put_irq; 2487 ring->irq_put = hsw_vebox_put_irq;
2340 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2488 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2489 if (i915_semaphore_is_enabled(dev)) {
2490 ring->semaphore.sync_to = gen6_ring_sync;
2491 ring->semaphore.signal = gen6_signal;
2492 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
2493 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
2494 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
2495 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
2496 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
2497 ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
2498 ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
2499 ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
2500 ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
2501 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2502 }
2341 } 2503 }
2342 ring->semaphore.sync_to = gen6_ring_sync;
2343 ring->semaphore.signal = gen6_signal;
2344 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
2345 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
2346 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
2347 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
2348 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
2349 ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
2350 ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
2351 ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
2352 ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
2353 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2354 ring->init = init_ring_common; 2504 ring->init = init_ring_common;
2355 2505
2356 return intel_init_ring_buffer(dev, ring); 2506 return intel_init_ring_buffer(dev, ring);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index e72017bdcd7f..ed5941078f92 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -40,6 +40,32 @@ struct intel_hw_status_page {
40#define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base)) 40#define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
41#define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val) 41#define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
42 42
43/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
44 * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
45 */
46#define i915_semaphore_seqno_size sizeof(uint64_t)
47#define GEN8_SIGNAL_OFFSET(__ring, to) \
48 (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
49 ((__ring)->id * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
50 (i915_semaphore_seqno_size * (to)))
51
52#define GEN8_WAIT_OFFSET(__ring, from) \
53 (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
54 ((from) * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
55 (i915_semaphore_seqno_size * (__ring)->id))
56
57#define GEN8_RING_SEMAPHORE_INIT do { \
58 if (!dev_priv->semaphore_obj) { \
59 break; \
60 } \
61 ring->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET(ring, RCS); \
62 ring->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET(ring, VCS); \
63 ring->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET(ring, BCS); \
64 ring->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET(ring, VECS); \
65 ring->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET(ring, VCS2); \
66 ring->semaphore.signal_ggtt[ring->id] = MI_SEMAPHORE_SYNC_INVALID; \
67 } while(0)
68
43enum intel_ring_hangcheck_action { 69enum intel_ring_hangcheck_action {
44 HANGCHECK_IDLE = 0, 70 HANGCHECK_IDLE = 0,
45 HANGCHECK_WAIT, 71 HANGCHECK_WAIT,
@@ -127,15 +153,55 @@ struct intel_engine_cs {
127#define I915_DISPATCH_PINNED 0x2 153#define I915_DISPATCH_PINNED 0x2
128 void (*cleanup)(struct intel_engine_cs *ring); 154 void (*cleanup)(struct intel_engine_cs *ring);
129 155
156 /* GEN8 signal/wait table - never trust comments!
157 * signal to signal to signal to signal to signal to
158 * RCS VCS BCS VECS VCS2
159 * --------------------------------------------------------------------
160 * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
161 * |-------------------------------------------------------------------
162 * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
163 * |-------------------------------------------------------------------
164 * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
165 * |-------------------------------------------------------------------
166 * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) |
167 * |-------------------------------------------------------------------
168 * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) |
169 * |-------------------------------------------------------------------
170 *
171 * Generalization:
172 * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
173 * ie. transpose of g(x, y)
174 *
175 * sync from sync from sync from sync from sync from
176 * RCS VCS BCS VECS VCS2
177 * --------------------------------------------------------------------
178 * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
179 * |-------------------------------------------------------------------
180 * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
181 * |-------------------------------------------------------------------
182 * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
183 * |-------------------------------------------------------------------
184 * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) |
185 * |-------------------------------------------------------------------
186 * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) |
187 * |-------------------------------------------------------------------
188 *
189 * Generalization:
190 * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
191 * ie. transpose of f(x, y)
192 */
130 struct { 193 struct {
131 u32 sync_seqno[I915_NUM_RINGS-1]; 194 u32 sync_seqno[I915_NUM_RINGS-1];
132 195
133 struct { 196 union {
134 /* our mbox written by others */ 197 struct {
135 u32 wait[I915_NUM_RINGS]; 198 /* our mbox written by others */
136 /* mboxes this ring signals to */ 199 u32 wait[I915_NUM_RINGS];
137 u32 signal[I915_NUM_RINGS]; 200 /* mboxes this ring signals to */
138 } mbox; 201 u32 signal[I915_NUM_RINGS];
202 } mbox;
203 u64 signal_ggtt[I915_NUM_RINGS];
204 };
139 205
140 /* AKA wait() */ 206 /* AKA wait() */
141 int (*sync_to)(struct intel_engine_cs *ring, 207 int (*sync_to)(struct intel_engine_cs *ring,
@@ -238,9 +304,11 @@ intel_ring_sync_index(struct intel_engine_cs *ring,
238 int idx; 304 int idx;
239 305
240 /* 306 /*
241 * cs -> 0 = vcs, 1 = bcs 307 * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2;
242 * vcs -> 0 = bcs, 1 = cs, 308 * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs;
243 * bcs -> 0 = cs, 1 = vcs. 309 * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs;
310 * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs;
311 * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
244 */ 312 */
245 313
246 idx = (other - ring) - 1; 314 idx = (other - ring) - 1;
@@ -318,9 +386,9 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev);
318u64 intel_ring_get_active_head(struct intel_engine_cs *ring); 386u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
319void intel_ring_setup_status_page(struct intel_engine_cs *ring); 387void intel_ring_setup_status_page(struct intel_engine_cs *ring);
320 388
321static inline u32 intel_ring_get_tail(struct intel_engine_cs *ring) 389static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
322{ 390{
323 return ring->buffer->tail; 391 return ringbuf->tail;
324} 392}
325 393
326static inline u32 intel_ring_get_seqno(struct intel_engine_cs *ring) 394static inline u32 intel_ring_get_seqno(struct intel_engine_cs *ring)
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 29145df8ef64..e0f0843569a6 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1010,7 +1010,7 @@ int i915_get_reset_stats_ioctl(struct drm_device *dev,
1010 if (args->flags || args->pad) 1010 if (args->flags || args->pad)
1011 return -EINVAL; 1011 return -EINVAL;
1012 1012
1013 if (args->ctx_id == DEFAULT_CONTEXT_ID && !capable(CAP_SYS_ADMIN)) 1013 if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
1014 return -EPERM; 1014 return -EPERM;
1015 1015
1016 ret = mutex_lock_interruptible(&dev->struct_mutex); 1016 ret = mutex_lock_interruptible(&dev->struct_mutex);