aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVille Syrjälä <ville.syrjala@linux.intel.com>2014-08-19 06:24:25 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2014-09-03 05:04:02 -0400
commit1e0560e05db2830f61465ce98b995564d33dfbcc (patch)
treed10fbb87577ba9d0804a33106db93b1e86f82ab5
parentd337a341532d028920fc49832213c6dd2ce8289c (diff)
drm/i915: Rename edp vdd funcs for consistency
edp_* are now the lower level functions and intel_edp_* the higher level ones. One should use them in pairs. v2: Don't return void (Jani) Reviewed-by: Jani Nikula <jani.nikula@intel.com> Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c21
1 files changed, 13 insertions, 8 deletions
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 90b2d9f7c4e8..190e617f128e 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -111,7 +111,7 @@ static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
111} 111}
112 112
113static void intel_dp_link_down(struct intel_dp *intel_dp); 113static void intel_dp_link_down(struct intel_dp *intel_dp);
114static bool _edp_panel_vdd_on(struct intel_dp *intel_dp); 114static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
115static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); 115static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
116 116
117int 117int
@@ -533,7 +533,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
533 bool has_aux_irq = HAS_AUX_IRQ(dev); 533 bool has_aux_irq = HAS_AUX_IRQ(dev);
534 bool vdd; 534 bool vdd;
535 535
536 vdd = _edp_panel_vdd_on(intel_dp); 536 vdd = edp_panel_vdd_on(intel_dp);
537 537
538 /* dp aux is extremely sensitive to irq latency, hence request the 538 /* dp aux is extremely sensitive to irq latency, hence request the
539 * lowest possible wakeup latency and so prevent the cpu from going into 539 * lowest possible wakeup latency and so prevent the cpu from going into
@@ -1165,7 +1165,7 @@ static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1165 return control; 1165 return control;
1166} 1166}
1167 1167
1168static bool _edp_panel_vdd_on(struct intel_dp *intel_dp) 1168static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1169{ 1169{
1170 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1170 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1171 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1171 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
@@ -1216,7 +1216,7 @@ static bool _edp_panel_vdd_on(struct intel_dp *intel_dp)
1216void intel_edp_panel_vdd_on(struct intel_dp *intel_dp) 1216void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1217{ 1217{
1218 if (is_edp(intel_dp)) { 1218 if (is_edp(intel_dp)) {
1219 bool vdd = _edp_panel_vdd_on(intel_dp); 1219 bool vdd = edp_panel_vdd_on(intel_dp);
1220 1220
1221 WARN(!vdd, "eDP VDD already requested on\n"); 1221 WARN(!vdd, "eDP VDD already requested on\n");
1222 } 1222 }
@@ -1299,6 +1299,11 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1299 edp_panel_vdd_schedule_off(intel_dp); 1299 edp_panel_vdd_schedule_off(intel_dp);
1300} 1300}
1301 1301
1302static void intel_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1303{
1304 edp_panel_vdd_off(intel_dp, sync);
1305}
1306
1302void intel_edp_panel_on(struct intel_dp *intel_dp) 1307void intel_edp_panel_on(struct intel_dp *intel_dp)
1303{ 1308{
1304 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1309 struct drm_device *dev = intel_dp_to_dev(intel_dp);
@@ -2136,7 +2141,7 @@ static void intel_enable_dp(struct intel_encoder *encoder)
2136 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 2141 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2137 intel_dp_start_link_train(intel_dp); 2142 intel_dp_start_link_train(intel_dp);
2138 intel_edp_panel_on(intel_dp); 2143 intel_edp_panel_on(intel_dp);
2139 edp_panel_vdd_off(intel_dp, true); 2144 intel_edp_panel_vdd_off(intel_dp, true);
2140 intel_dp_complete_link_train(intel_dp); 2145 intel_dp_complete_link_train(intel_dp);
2141 intel_dp_stop_link_train(intel_dp); 2146 intel_dp_stop_link_train(intel_dp);
2142} 2147}
@@ -3416,7 +3421,7 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
3416 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", 3421 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3417 buf[0], buf[1], buf[2]); 3422 buf[0], buf[1], buf[2]);
3418 3423
3419 edp_panel_vdd_off(intel_dp, false); 3424 intel_edp_panel_vdd_off(intel_dp, false);
3420} 3425}
3421 3426
3422static bool 3427static bool
@@ -3440,7 +3445,7 @@ intel_dp_probe_mst(struct intel_dp *intel_dp)
3440 intel_dp->is_mst = false; 3445 intel_dp->is_mst = false;
3441 } 3446 }
3442 } 3447 }
3443 edp_panel_vdd_off(intel_dp, false); 3448 intel_edp_panel_vdd_off(intel_dp, false);
3444 3449
3445 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst); 3450 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3446 return intel_dp->is_mst; 3451 return intel_dp->is_mst;
@@ -4560,7 +4565,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
4560 /* Cache DPCD and EDID for edp. */ 4565 /* Cache DPCD and EDID for edp. */
4561 intel_edp_panel_vdd_on(intel_dp); 4566 intel_edp_panel_vdd_on(intel_dp);
4562 has_dpcd = intel_dp_get_dpcd(intel_dp); 4567 has_dpcd = intel_dp_get_dpcd(intel_dp);
4563 edp_panel_vdd_off(intel_dp, false); 4568 intel_edp_panel_vdd_off(intel_dp, false);
4564 4569
4565 if (has_dpcd) { 4570 if (has_dpcd) {
4566 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) 4571 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)