aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c16
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c37
-rw-r--r--drivers/gpu/drm/i915/intel_display.c56
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c24
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c21
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c64
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c12
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo_regs.h5
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c53
11 files changed, 197 insertions, 101 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index eb2b3c25b9e1..5363e9c66c27 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -2032,6 +2032,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
2032 1, minor); 2032 1, minor);
2033 drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops, 2033 drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops,
2034 1, minor); 2034 1, minor);
2035 drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops,
2036 1, minor);
2035} 2037}
2036 2038
2037#endif /* CONFIG_DEBUG_FS */ 2039#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c1e5c66553df..288d7b8f49ae 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2063,10 +2063,8 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2063 if (obj->gtt_space == NULL) 2063 if (obj->gtt_space == NULL)
2064 return 0; 2064 return 0;
2065 2065
2066 if (obj->pin_count != 0) { 2066 if (obj->pin_count)
2067 DRM_ERROR("Attempting to unbind pinned buffer\n"); 2067 return -EBUSY;
2068 return -EINVAL;
2069 }
2070 2068
2071 ret = i915_gem_object_finish_gpu(obj); 2069 ret = i915_gem_object_finish_gpu(obj);
2072 if (ret) 2070 if (ret)
@@ -3293,6 +3291,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3293 struct drm_i915_private *dev_priv = dev->dev_private; 3291 struct drm_i915_private *dev_priv = dev->dev_private;
3294 struct drm_i915_gem_object *obj; 3292 struct drm_i915_gem_object *obj;
3295 struct address_space *mapping; 3293 struct address_space *mapping;
3294 u32 mask;
3296 3295
3297 obj = kzalloc(sizeof(*obj), GFP_KERNEL); 3296 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
3298 if (obj == NULL) 3297 if (obj == NULL)
@@ -3303,8 +3302,15 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3303 return NULL; 3302 return NULL;
3304 } 3303 }
3305 3304
3305 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
3306 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
3307 /* 965gm cannot relocate objects above 4GiB. */
3308 mask &= ~__GFP_HIGHMEM;
3309 mask |= __GFP_DMA32;
3310 }
3311
3306 mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; 3312 mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
3307 mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE); 3313 mapping_set_gfp_mask(mapping, mask);
3308 3314
3309 i915_gem_info_add_obj(dev_priv, size); 3315 i915_gem_info_add_obj(dev_priv, size);
3310 3316
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index cc4a63307611..1417660a93ec 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -350,8 +350,8 @@ static void gen6_pm_rps_work(struct work_struct *work)
350{ 350{
351 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 351 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
352 rps_work); 352 rps_work);
353 u8 new_delay = dev_priv->cur_delay;
354 u32 pm_iir, pm_imr; 353 u32 pm_iir, pm_imr;
354 u8 new_delay;
355 355
356 spin_lock_irq(&dev_priv->rps_lock); 356 spin_lock_irq(&dev_priv->rps_lock);
357 pm_iir = dev_priv->pm_iir; 357 pm_iir = dev_priv->pm_iir;
@@ -360,41 +360,18 @@ static void gen6_pm_rps_work(struct work_struct *work)
360 I915_WRITE(GEN6_PMIMR, 0); 360 I915_WRITE(GEN6_PMIMR, 0);
361 spin_unlock_irq(&dev_priv->rps_lock); 361 spin_unlock_irq(&dev_priv->rps_lock);
362 362
363 if (!pm_iir) 363 if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
364 return; 364 return;
365 365
366 mutex_lock(&dev_priv->dev->struct_mutex); 366 mutex_lock(&dev_priv->dev->struct_mutex);
367 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 367
368 if (dev_priv->cur_delay != dev_priv->max_delay) 368 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
369 new_delay = dev_priv->cur_delay + 1; 369 new_delay = dev_priv->cur_delay + 1;
370 if (new_delay > dev_priv->max_delay) 370 else
371 new_delay = dev_priv->max_delay; 371 new_delay = dev_priv->cur_delay - 1;
372 } else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) {
373 gen6_gt_force_wake_get(dev_priv);
374 if (dev_priv->cur_delay != dev_priv->min_delay)
375 new_delay = dev_priv->cur_delay - 1;
376 if (new_delay < dev_priv->min_delay) {
377 new_delay = dev_priv->min_delay;
378 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
379 I915_READ(GEN6_RP_INTERRUPT_LIMITS) |
380 ((new_delay << 16) & 0x3f0000));
381 } else {
382 /* Make sure we continue to get down interrupts
383 * until we hit the minimum frequency */
384 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
385 I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000);
386 }
387 gen6_gt_force_wake_put(dev_priv);
388 }
389 372
390 gen6_set_rps(dev_priv->dev, new_delay); 373 gen6_set_rps(dev_priv->dev, new_delay);
391 dev_priv->cur_delay = new_delay;
392 374
393 /*
394 * rps_lock not held here because clearing is non-destructive. There is
395 * an *extremely* unlikely race with gen6_rps_enable() that is prevented
396 * by holding struct_mutex for the duration of the write.
397 */
398 mutex_unlock(&dev_priv->dev->struct_mutex); 375 mutex_unlock(&dev_priv->dev->struct_mutex);
399} 376}
400 377
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index ee61ad1e642b..914789420906 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -910,9 +910,10 @@ static void assert_pll(struct drm_i915_private *dev_priv,
910 910
911/* For ILK+ */ 911/* For ILK+ */
912static void assert_pch_pll(struct drm_i915_private *dev_priv, 912static void assert_pch_pll(struct drm_i915_private *dev_priv,
913 struct intel_crtc *intel_crtc, bool state) 913 struct intel_pch_pll *pll,
914 struct intel_crtc *crtc,
915 bool state)
914{ 916{
915 int reg;
916 u32 val; 917 u32 val;
917 bool cur_state; 918 bool cur_state;
918 919
@@ -921,30 +922,37 @@ static void assert_pch_pll(struct drm_i915_private *dev_priv,
921 return; 922 return;
922 } 923 }
923 924
924 if (!intel_crtc->pch_pll) { 925 if (WARN (!pll,
925 WARN(1, "asserting PCH PLL enabled with no PLL\n"); 926 "asserting PCH PLL %s with no PLL\n", state_string(state)))
926 return; 927 return;
927 }
928 928
929 if (HAS_PCH_CPT(dev_priv->dev)) { 929 val = I915_READ(pll->pll_reg);
930 cur_state = !!(val & DPLL_VCO_ENABLE);
931 WARN(cur_state != state,
932 "PCH PLL state for reg %x assertion failure (expected %s, current %s), val=%08x\n",
933 pll->pll_reg, state_string(state), state_string(cur_state), val);
934
935 /* Make sure the selected PLL is correctly attached to the transcoder */
936 if (crtc && HAS_PCH_CPT(dev_priv->dev)) {
930 u32 pch_dpll; 937 u32 pch_dpll;
931 938
932 pch_dpll = I915_READ(PCH_DPLL_SEL); 939 pch_dpll = I915_READ(PCH_DPLL_SEL);
933 940 cur_state = pll->pll_reg == _PCH_DPLL_B;
934 /* Make sure the selected PLL is enabled to the transcoder */ 941 if (!WARN(((pch_dpll >> (4 * crtc->pipe)) & 1) != cur_state,
935 WARN(!((pch_dpll >> (4 * intel_crtc->pipe)) & 8), 942 "PLL[%d] not attached to this transcoder %d: %08x\n",
936 "transcoder %d PLL not enabled\n", intel_crtc->pipe); 943 cur_state, crtc->pipe, pch_dpll)) {
944 cur_state = !!(val >> (4*crtc->pipe + 3));
945 WARN(cur_state != state,
946 "PLL[%d] not %s on this transcoder %d: %08x\n",
947 pll->pll_reg == _PCH_DPLL_B,
948 state_string(state),
949 crtc->pipe,
950 val);
951 }
937 } 952 }
938
939 reg = intel_crtc->pch_pll->pll_reg;
940 val = I915_READ(reg);
941 cur_state = !!(val & DPLL_VCO_ENABLE);
942 WARN(cur_state != state,
943 "PCH PLL state assertion failure (expected %s, current %s)\n",
944 state_string(state), state_string(cur_state));
945} 953}
946#define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true) 954#define assert_pch_pll_enabled(d, p, c) assert_pch_pll(d, p, c, true)
947#define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false) 955#define assert_pch_pll_disabled(d, p, c) assert_pch_pll(d, p, c, false)
948 956
949static void assert_fdi_tx(struct drm_i915_private *dev_priv, 957static void assert_fdi_tx(struct drm_i915_private *dev_priv,
950 enum pipe pipe, bool state) 958 enum pipe pipe, bool state)
@@ -1424,7 +1432,7 @@ static void intel_enable_pch_pll(struct intel_crtc *intel_crtc)
1424 assert_pch_refclk_enabled(dev_priv); 1432 assert_pch_refclk_enabled(dev_priv);
1425 1433
1426 if (pll->active++ && pll->on) { 1434 if (pll->active++ && pll->on) {
1427 assert_pch_pll_enabled(dev_priv, intel_crtc); 1435 assert_pch_pll_enabled(dev_priv, pll, NULL);
1428 return; 1436 return;
1429 } 1437 }
1430 1438
@@ -1460,12 +1468,12 @@ static void intel_disable_pch_pll(struct intel_crtc *intel_crtc)
1460 intel_crtc->base.base.id); 1468 intel_crtc->base.base.id);
1461 1469
1462 if (WARN_ON(pll->active == 0)) { 1470 if (WARN_ON(pll->active == 0)) {
1463 assert_pch_pll_disabled(dev_priv, intel_crtc); 1471 assert_pch_pll_disabled(dev_priv, pll, NULL);
1464 return; 1472 return;
1465 } 1473 }
1466 1474
1467 if (--pll->active) { 1475 if (--pll->active) {
1468 assert_pch_pll_enabled(dev_priv, intel_crtc); 1476 assert_pch_pll_enabled(dev_priv, pll, NULL);
1469 return; 1477 return;
1470 } 1478 }
1471 1479
@@ -1495,7 +1503,9 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
1495 BUG_ON(dev_priv->info->gen < 5); 1503 BUG_ON(dev_priv->info->gen < 5);
1496 1504
1497 /* Make sure PCH DPLL is enabled */ 1505 /* Make sure PCH DPLL is enabled */
1498 assert_pch_pll_enabled(dev_priv, to_intel_crtc(crtc)); 1506 assert_pch_pll_enabled(dev_priv,
1507 to_intel_crtc(crtc)->pch_pll,
1508 to_intel_crtc(crtc));
1499 1509
1500 /* FDI must be feeding us bits for PCH ports */ 1510 /* FDI must be feeding us bits for PCH ports */
1501 assert_fdi_tx_enabled(dev_priv, pipe); 1511 assert_fdi_tx_enabled(dev_priv, pipe);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 71c7096e3869..296cfc201a81 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -266,6 +266,9 @@ intel_dp_mode_valid(struct drm_connector *connector,
266 if (mode->clock < 10000) 266 if (mode->clock < 10000)
267 return MODE_CLOCK_LOW; 267 return MODE_CLOCK_LOW;
268 268
269 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
270 return MODE_H_ILLEGAL;
271
269 return MODE_OK; 272 return MODE_OK;
270} 273}
271 274
@@ -702,6 +705,9 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
702 mode->clock = intel_dp->panel_fixed_mode->clock; 705 mode->clock = intel_dp->panel_fixed_mode->clock;
703 } 706 }
704 707
708 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
709 return false;
710
705 DRM_DEBUG_KMS("DP link computation with max lane count %i " 711 DRM_DEBUG_KMS("DP link computation with max lane count %i "
706 "max bw %02x pixel clock %iKHz\n", 712 "max bw %02x pixel clock %iKHz\n",
707 max_lane_count, bws[max_clock], mode->clock); 713 max_lane_count, bws[max_clock], mode->clock);
@@ -1154,11 +1160,10 @@ static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
1154 1160
1155 DRM_DEBUG_KMS("Turn eDP power off\n"); 1161 DRM_DEBUG_KMS("Turn eDP power off\n");
1156 1162
1157 WARN(intel_dp->want_panel_vdd, "Cannot turn power off while VDD is on\n"); 1163 WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
1158 ironlake_panel_vdd_off_sync(intel_dp); /* finish any pending work */
1159 1164
1160 pp = ironlake_get_pp_control(dev_priv); 1165 pp = ironlake_get_pp_control(dev_priv);
1161 pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE); 1166 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE);
1162 I915_WRITE(PCH_PP_CONTROL, pp); 1167 I915_WRITE(PCH_PP_CONTROL, pp);
1163 POSTING_READ(PCH_PP_CONTROL); 1168 POSTING_READ(PCH_PP_CONTROL);
1164 1169
@@ -1266,18 +1271,16 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
1266{ 1271{
1267 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1272 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1268 1273
1274
1275 /* Make sure the panel is off before trying to change the mode. But also
1276 * ensure that we have vdd while we switch off the panel. */
1277 ironlake_edp_panel_vdd_on(intel_dp);
1269 ironlake_edp_backlight_off(intel_dp); 1278 ironlake_edp_backlight_off(intel_dp);
1270 ironlake_edp_panel_off(intel_dp); 1279 ironlake_edp_panel_off(intel_dp);
1271 1280
1272 /* Wake up the sink first */
1273 ironlake_edp_panel_vdd_on(intel_dp);
1274 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1281 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1275 intel_dp_link_down(intel_dp); 1282 intel_dp_link_down(intel_dp);
1276 ironlake_edp_panel_vdd_off(intel_dp, false); 1283 ironlake_edp_panel_vdd_off(intel_dp, false);
1277
1278 /* Make sure the panel is off before trying to
1279 * change the mode
1280 */
1281} 1284}
1282 1285
1283static void intel_dp_commit(struct drm_encoder *encoder) 1286static void intel_dp_commit(struct drm_encoder *encoder)
@@ -1309,10 +1312,11 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
1309 uint32_t dp_reg = I915_READ(intel_dp->output_reg); 1312 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
1310 1313
1311 if (mode != DRM_MODE_DPMS_ON) { 1314 if (mode != DRM_MODE_DPMS_ON) {
1315 /* Switching the panel off requires vdd. */
1316 ironlake_edp_panel_vdd_on(intel_dp);
1312 ironlake_edp_backlight_off(intel_dp); 1317 ironlake_edp_backlight_off(intel_dp);
1313 ironlake_edp_panel_off(intel_dp); 1318 ironlake_edp_panel_off(intel_dp);
1314 1319
1315 ironlake_edp_panel_vdd_on(intel_dp);
1316 intel_dp_sink_dpms(intel_dp, mode); 1320 intel_dp_sink_dpms(intel_dp, mode);
1317 intel_dp_link_down(intel_dp); 1321 intel_dp_link_down(intel_dp);
1318 ironlake_edp_panel_vdd_off(intel_dp, false); 1322 ironlake_edp_panel_vdd_off(intel_dp, false);
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 4a9707dd0f9c..1991a4408cf9 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -396,11 +396,22 @@ clear_err:
396 * Wait for bus to IDLE before clearing NAK. 396 * Wait for bus to IDLE before clearing NAK.
397 * If we clear the NAK while bus is still active, then it will stay 397 * If we clear the NAK while bus is still active, then it will stay
398 * active and the next transaction may fail. 398 * active and the next transaction may fail.
399 *
400 * If no ACK is received during the address phase of a transaction, the
401 * adapter must report -ENXIO. It is not clear what to return if no ACK
402 * is received at other times. But we have to be careful to not return
403 * spurious -ENXIO because that will prevent i2c and drm edid functions
404 * from retrying. So return -ENXIO only when gmbus properly quiescents -
405 * timing out seems to happen when there _is_ a ddc chip present, but
406 * it's slow responding and only answers on the 2nd retry.
399 */ 407 */
408 ret = -ENXIO;
400 if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0, 409 if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0,
401 10)) 410 10)) {
402 DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n", 411 DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n",
403 adapter->name); 412 adapter->name);
413 ret = -ETIMEDOUT;
414 }
404 415
405 /* Toggle the Software Clear Interrupt bit. This has the effect 416 /* Toggle the Software Clear Interrupt bit. This has the effect
406 * of resetting the GMBUS controller and so clearing the 417 * of resetting the GMBUS controller and so clearing the
@@ -414,14 +425,6 @@ clear_err:
414 adapter->name, msgs[i].addr, 425 adapter->name, msgs[i].addr,
415 (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len); 426 (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
416 427
417 /*
418 * If no ACK is received during the address phase of a transaction,
419 * the adapter must report -ENXIO.
420 * It is not clear what to return if no ACK is received at other times.
421 * So, we always return -ENXIO in all NAK cases, to ensure we send
422 * it at least during the one case that is specified.
423 */
424 ret = -ENXIO;
425 goto out; 428 goto out;
426 429
427timeout: 430timeout:
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 9dee82350def..08eb04c787e8 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -747,6 +747,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
747 }, 747 },
748 { 748 {
749 .callback = intel_no_lvds_dmi_callback, 749 .callback = intel_no_lvds_dmi_callback,
750 .ident = "Hewlett-Packard HP t5740e Thin Client",
751 .matches = {
752 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
753 DMI_MATCH(DMI_PRODUCT_NAME, "HP t5740e Thin Client"),
754 },
755 },
756 {
757 .callback = intel_no_lvds_dmi_callback,
750 .ident = "Hewlett-Packard t5745", 758 .ident = "Hewlett-Packard t5745",
751 .matches = { 759 .matches = {
752 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), 760 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 8e79ff67ec98..d0ce2a5b1d3f 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2270,10 +2270,33 @@ void ironlake_disable_drps(struct drm_device *dev)
2270void gen6_set_rps(struct drm_device *dev, u8 val) 2270void gen6_set_rps(struct drm_device *dev, u8 val)
2271{ 2271{
2272 struct drm_i915_private *dev_priv = dev->dev_private; 2272 struct drm_i915_private *dev_priv = dev->dev_private;
2273 u32 swreq; 2273 u32 limits;
2274 2274
2275 swreq = (val & 0x3ff) << 25; 2275 limits = 0;
2276 I915_WRITE(GEN6_RPNSWREQ, swreq); 2276 if (val >= dev_priv->max_delay)
2277 val = dev_priv->max_delay;
2278 else
2279 limits |= dev_priv->max_delay << 24;
2280
2281 if (val <= dev_priv->min_delay)
2282 val = dev_priv->min_delay;
2283 else
2284 limits |= dev_priv->min_delay << 16;
2285
2286 if (val == dev_priv->cur_delay)
2287 return;
2288
2289 I915_WRITE(GEN6_RPNSWREQ,
2290 GEN6_FREQUENCY(val) |
2291 GEN6_OFFSET(0) |
2292 GEN6_AGGRESSIVE_TURBO);
2293
2294 /* Make sure we continue to get interrupts
2295 * until we hit the minimum or maximum frequencies.
2296 */
2297 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
2298
2299 dev_priv->cur_delay = val;
2277} 2300}
2278 2301
2279void gen6_disable_rps(struct drm_device *dev) 2302void gen6_disable_rps(struct drm_device *dev)
@@ -2327,11 +2350,10 @@ int intel_enable_rc6(const struct drm_device *dev)
2327void gen6_enable_rps(struct drm_i915_private *dev_priv) 2350void gen6_enable_rps(struct drm_i915_private *dev_priv)
2328{ 2351{
2329 struct intel_ring_buffer *ring; 2352 struct intel_ring_buffer *ring;
2330 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 2353 u32 rp_state_cap;
2331 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 2354 u32 gt_perf_status;
2332 u32 pcu_mbox, rc6_mask = 0; 2355 u32 pcu_mbox, rc6_mask = 0;
2333 u32 gtfifodbg; 2356 u32 gtfifodbg;
2334 int cur_freq, min_freq, max_freq;
2335 int rc6_mode; 2357 int rc6_mode;
2336 int i; 2358 int i;
2337 2359
@@ -2352,6 +2374,14 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
2352 2374
2353 gen6_gt_force_wake_get(dev_priv); 2375 gen6_gt_force_wake_get(dev_priv);
2354 2376
2377 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
2378 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
2379
2380 /* In units of 100MHz */
2381 dev_priv->max_delay = rp_state_cap & 0xff;
2382 dev_priv->min_delay = (rp_state_cap & 0xff0000) >> 16;
2383 dev_priv->cur_delay = 0;
2384
2355 /* disable the counters and set deterministic thresholds */ 2385 /* disable the counters and set deterministic thresholds */
2356 I915_WRITE(GEN6_RC_CONTROL, 0); 2386 I915_WRITE(GEN6_RC_CONTROL, 0);
2357 2387
@@ -2399,8 +2429,8 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
2399 2429
2400 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); 2430 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
2401 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, 2431 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
2402 18 << 24 | 2432 dev_priv->max_delay << 24 |
2403 6 << 16); 2433 dev_priv->min_delay << 16);
2404 I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000); 2434 I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
2405 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000); 2435 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
2406 I915_WRITE(GEN6_RP_UP_EI, 100000); 2436 I915_WRITE(GEN6_RP_UP_EI, 100000);
@@ -2408,7 +2438,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
2408 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); 2438 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
2409 I915_WRITE(GEN6_RP_CONTROL, 2439 I915_WRITE(GEN6_RP_CONTROL,
2410 GEN6_RP_MEDIA_TURBO | 2440 GEN6_RP_MEDIA_TURBO |
2411 GEN6_RP_MEDIA_HW_MODE | 2441 GEN6_RP_MEDIA_HW_NORMAL_MODE |
2412 GEN6_RP_MEDIA_IS_GFX | 2442 GEN6_RP_MEDIA_IS_GFX |
2413 GEN6_RP_ENABLE | 2443 GEN6_RP_ENABLE |
2414 GEN6_RP_UP_BUSY_AVG | 2444 GEN6_RP_UP_BUSY_AVG |
@@ -2426,10 +2456,6 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
2426 500)) 2456 500))
2427 DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); 2457 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
2428 2458
2429 min_freq = (rp_state_cap & 0xff0000) >> 16;
2430 max_freq = rp_state_cap & 0xff;
2431 cur_freq = (gt_perf_status & 0xff00) >> 8;
2432
2433 /* Check for overclock support */ 2459 /* Check for overclock support */
2434 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 2460 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
2435 500)) 2461 500))
@@ -2440,14 +2466,11 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
2440 500)) 2466 500))
2441 DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); 2467 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
2442 if (pcu_mbox & (1<<31)) { /* OC supported */ 2468 if (pcu_mbox & (1<<31)) { /* OC supported */
2443 max_freq = pcu_mbox & 0xff; 2469 dev_priv->max_delay = pcu_mbox & 0xff;
2444 DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50); 2470 DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
2445 } 2471 }
2446 2472
2447 /* In units of 100MHz */ 2473 gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
2448 dev_priv->max_delay = max_freq;
2449 dev_priv->min_delay = min_freq;
2450 dev_priv->cur_delay = cur_freq;
2451 2474
2452 /* requires MSI enabled */ 2475 /* requires MSI enabled */
2453 I915_WRITE(GEN6_PMIER, 2476 I915_WRITE(GEN6_PMIER,
@@ -3580,8 +3603,9 @@ static void gen6_sanitize_pm(struct drm_device *dev)
3580 limits |= (dev_priv->min_delay & 0x3f) << 16; 3603 limits |= (dev_priv->min_delay & 0x3f) << 16;
3581 3604
3582 if (old != limits) { 3605 if (old != limits) {
3583 DRM_ERROR("Power management discrepancy: GEN6_RP_INTERRUPT_LIMITS expected %08x, was %08x\n", 3606 /* Note that the known failure case is to read back 0. */
3584 limits, old); 3607 DRM_DEBUG_DRIVER("Power management discrepancy: GEN6_RP_INTERRUPT_LIMITS "
3608 "expected %08x, was %08x\n", limits, old);
3585 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits); 3609 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
3586 } 3610 }
3587 3611
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index a949b73880c8..b6a9d45fc3c6 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -783,10 +783,12 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
783 ((v_sync_len & 0x30) >> 4); 783 ((v_sync_len & 0x30) >> 4);
784 784
785 dtd->part2.dtd_flags = 0x18; 785 dtd->part2.dtd_flags = 0x18;
786 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
787 dtd->part2.dtd_flags |= DTD_FLAG_INTERLACE;
786 if (mode->flags & DRM_MODE_FLAG_PHSYNC) 788 if (mode->flags & DRM_MODE_FLAG_PHSYNC)
787 dtd->part2.dtd_flags |= 0x2; 789 dtd->part2.dtd_flags |= DTD_FLAG_HSYNC_POSITIVE;
788 if (mode->flags & DRM_MODE_FLAG_PVSYNC) 790 if (mode->flags & DRM_MODE_FLAG_PVSYNC)
789 dtd->part2.dtd_flags |= 0x4; 791 dtd->part2.dtd_flags |= DTD_FLAG_VSYNC_POSITIVE;
790 792
791 dtd->part2.sdvo_flags = 0; 793 dtd->part2.sdvo_flags = 0;
792 dtd->part2.v_sync_off_high = v_sync_offset & 0xc0; 794 dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
@@ -820,9 +822,11 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
820 mode->clock = dtd->part1.clock * 10; 822 mode->clock = dtd->part1.clock * 10;
821 823
822 mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC); 824 mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
823 if (dtd->part2.dtd_flags & 0x2) 825 if (dtd->part2.dtd_flags & DTD_FLAG_INTERLACE)
826 mode->flags |= DRM_MODE_FLAG_INTERLACE;
827 if (dtd->part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE)
824 mode->flags |= DRM_MODE_FLAG_PHSYNC; 828 mode->flags |= DRM_MODE_FLAG_PHSYNC;
825 if (dtd->part2.dtd_flags & 0x4) 829 if (dtd->part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE)
826 mode->flags |= DRM_MODE_FLAG_PVSYNC; 830 mode->flags |= DRM_MODE_FLAG_PVSYNC;
827} 831}
828 832
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h
index 6b7b22f4d63e..9d030142ee43 100644
--- a/drivers/gpu/drm/i915/intel_sdvo_regs.h
+++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h
@@ -61,6 +61,11 @@ struct intel_sdvo_caps {
61 u16 output_flags; 61 u16 output_flags;
62} __attribute__((packed)); 62} __attribute__((packed));
63 63
64/* Note: SDVO detailed timing flags match EDID misc flags. */
65#define DTD_FLAG_HSYNC_POSITIVE (1 << 1)
66#define DTD_FLAG_VSYNC_POSITIVE (1 << 2)
67#define DTD_FLAG_INTERLACE (1 << 7)
68
64/** This matches the EDID DTD structure, more or less */ 69/** This matches the EDID DTD structure, more or less */
65struct intel_sdvo_dtd { 70struct intel_sdvo_dtd {
66 struct { 71 struct {
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 3346612d2953..a233a51fd7e6 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -674,6 +674,54 @@ static const struct tv_mode tv_modes[] = {
674 .filter_table = filter_table, 674 .filter_table = filter_table,
675 }, 675 },
676 { 676 {
677 .name = "480p",
678 .clock = 107520,
679 .refresh = 59940,
680 .oversample = TV_OVERSAMPLE_4X,
681 .component_only = 1,
682
683 .hsync_end = 64, .hblank_end = 122,
684 .hblank_start = 842, .htotal = 857,
685
686 .progressive = true, .trilevel_sync = false,
687
688 .vsync_start_f1 = 12, .vsync_start_f2 = 12,
689 .vsync_len = 12,
690
691 .veq_ena = false,
692
693 .vi_end_f1 = 44, .vi_end_f2 = 44,
694 .nbr_end = 479,
695
696 .burst_ena = false,
697
698 .filter_table = filter_table,
699 },
700 {
701 .name = "576p",
702 .clock = 107520,
703 .refresh = 50000,
704 .oversample = TV_OVERSAMPLE_4X,
705 .component_only = 1,
706
707 .hsync_end = 64, .hblank_end = 139,
708 .hblank_start = 859, .htotal = 863,
709
710 .progressive = true, .trilevel_sync = false,
711
712 .vsync_start_f1 = 10, .vsync_start_f2 = 10,
713 .vsync_len = 10,
714
715 .veq_ena = false,
716
717 .vi_end_f1 = 48, .vi_end_f2 = 48,
718 .nbr_end = 575,
719
720 .burst_ena = false,
721
722 .filter_table = filter_table,
723 },
724 {
677 .name = "720p@60Hz", 725 .name = "720p@60Hz",
678 .clock = 148800, 726 .clock = 148800,
679 .refresh = 60000, 727 .refresh = 60000,
@@ -1194,6 +1242,11 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
1194 1242
1195 I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN); 1243 I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
1196 I915_WRITE(TV_CTL, save_tv_ctl); 1244 I915_WRITE(TV_CTL, save_tv_ctl);
1245 POSTING_READ(TV_CTL);
1246
1247 /* For unknown reasons the hw barfs if we don't do this vblank wait. */
1248 intel_wait_for_vblank(intel_tv->base.base.dev,
1249 to_intel_crtc(intel_tv->base.base.crtc)->pipe);
1197 1250
1198 /* Restore interrupt config */ 1251 /* Restore interrupt config */
1199 if (connector->polled & DRM_CONNECTOR_POLL_HPD) { 1252 if (connector->polled & DRM_CONNECTOR_POLL_HPD) {