aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorRodrigo Vivi <rodrigo.vivi@intel.com>2014-11-14 11:52:28 -0500
committerDaniel Vetter <daniel.vetter@ffwll.ch>2014-11-17 13:12:28 -0500
commit0bc12bcb1b9686d7011f16410ba17ed0740167c3 (patch)
treef0897e5ec63e7a9d69ea53917888dffe9774a906 /drivers
parenta4f1289eaacdc2651355201b1ddded91710c89a2 (diff)
drm/i915: Introduce intel_psr.c
No functional changes. Just cleaning and reorganizing it. v2: Rebase it puting it to begin of psr rework. This helps to blame easily at least latest changes. Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c4
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c381
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h21
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c408
7 files changed, 428 insertions, 393 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 891e584e97ea..e4083e41a600 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -51,6 +51,7 @@ i915-y += intel_audio.o \
51 intel_frontbuffer.o \ 51 intel_frontbuffer.o \
52 intel_modes.o \ 52 intel_modes.o \
53 intel_overlay.o \ 53 intel_overlay.o \
54 intel_psr.o \
54 intel_sideband.o \ 55 intel_sideband.o \
55 intel_sprite.o 56 intel_sprite.o
56i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o 57i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index ca33ee9477f1..3e4c63cfa415 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1583,7 +1583,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
1583 intel_dp_stop_link_train(intel_dp); 1583 intel_dp_stop_link_train(intel_dp);
1584 1584
1585 intel_edp_backlight_on(intel_dp); 1585 intel_edp_backlight_on(intel_dp);
1586 intel_edp_psr_enable(intel_dp); 1586 intel_psr_enable(intel_dp);
1587 } 1587 }
1588 1588
1589 if (intel_crtc->config.has_audio) { 1589 if (intel_crtc->config.has_audio) {
@@ -1609,7 +1609,7 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
1609 if (type == INTEL_OUTPUT_EDP) { 1609 if (type == INTEL_OUTPUT_EDP) {
1610 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1610 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1611 1611
1612 intel_edp_psr_disable(intel_dp); 1612 intel_psr_disable(intel_dp);
1613 intel_edp_backlight_off(intel_dp); 1613 intel_edp_backlight_off(intel_dp);
1614 } 1614 }
1615} 1615}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 7d58a64195d6..2632f2adc319 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -12449,7 +12449,7 @@ static void intel_setup_outputs(struct drm_device *dev)
12449 if (SUPPORTS_TV(dev)) 12449 if (SUPPORTS_TV(dev))
12450 intel_tv_init(dev); 12450 intel_tv_init(dev);
12451 12451
12452 intel_edp_psr_init(dev); 12452 intel_psr_init(dev);
12453 12453
12454 for_each_intel_encoder(dev, encoder) { 12454 for_each_intel_encoder(dev, encoder) {
12455 encoder->base.possible_crtcs = encoder->crtc_mask; 12455 encoder->base.possible_crtcs = encoder->crtc_mask;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index a89585d809be..7369e6f10117 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -2066,385 +2066,6 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
2066 } 2066 }
2067} 2067}
2068 2068
2069static bool is_edp_psr(struct intel_dp *intel_dp)
2070{
2071 return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
2072}
2073
2074static bool intel_edp_is_psr_enabled(struct drm_device *dev)
2075{
2076 struct drm_i915_private *dev_priv = dev->dev_private;
2077
2078 if (!HAS_PSR(dev))
2079 return false;
2080
2081 return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
2082}
2083
2084static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp,
2085 struct edp_vsc_psr *vsc_psr)
2086{
2087 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2088 struct drm_device *dev = dig_port->base.base.dev;
2089 struct drm_i915_private *dev_priv = dev->dev_private;
2090 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
2091 u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder);
2092 u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder);
2093 uint32_t *data = (uint32_t *) vsc_psr;
2094 unsigned int i;
2095
2096 /* As per BSPec (Pipe Video Data Island Packet), we need to disable
2097 the video DIP being updated before program video DIP data buffer
2098 registers for DIP being updated. */
2099 I915_WRITE(ctl_reg, 0);
2100 POSTING_READ(ctl_reg);
2101
2102 for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) {
2103 if (i < sizeof(struct edp_vsc_psr))
2104 I915_WRITE(data_reg + i, *data++);
2105 else
2106 I915_WRITE(data_reg + i, 0);
2107 }
2108
2109 I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
2110 POSTING_READ(ctl_reg);
2111}
2112
2113static void intel_edp_psr_setup_vsc(struct intel_dp *intel_dp)
2114{
2115 struct edp_vsc_psr psr_vsc;
2116
2117 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
2118 memset(&psr_vsc, 0, sizeof(psr_vsc));
2119 psr_vsc.sdp_header.HB0 = 0;
2120 psr_vsc.sdp_header.HB1 = 0x7;
2121 psr_vsc.sdp_header.HB2 = 0x2;
2122 psr_vsc.sdp_header.HB3 = 0x8;
2123 intel_edp_psr_write_vsc(intel_dp, &psr_vsc);
2124}
2125
2126static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
2127{
2128 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2129 struct drm_device *dev = dig_port->base.base.dev;
2130 struct drm_i915_private *dev_priv = dev->dev_private;
2131 uint32_t aux_clock_divider;
2132 int precharge = 0x3;
2133 bool only_standby = false;
2134 static const uint8_t aux_msg[] = {
2135 [0] = DP_AUX_NATIVE_WRITE << 4,
2136 [1] = DP_SET_POWER >> 8,
2137 [2] = DP_SET_POWER & 0xff,
2138 [3] = 1 - 1,
2139 [4] = DP_SET_POWER_D0,
2140 };
2141 int i;
2142
2143 BUILD_BUG_ON(sizeof(aux_msg) > 20);
2144
2145 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
2146
2147 if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
2148 only_standby = true;
2149
2150 /* Enable PSR in sink */
2151 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby)
2152 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
2153 DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
2154 else
2155 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
2156 DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
2157
2158 /* Setup AUX registers */
2159 for (i = 0; i < sizeof(aux_msg); i += 4)
2160 I915_WRITE(EDP_PSR_AUX_DATA1(dev) + i,
2161 intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
2162
2163 I915_WRITE(EDP_PSR_AUX_CTL(dev),
2164 DP_AUX_CH_CTL_TIME_OUT_400us |
2165 (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
2166 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
2167 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
2168}
2169
2170static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
2171{
2172 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2173 struct drm_device *dev = dig_port->base.base.dev;
2174 struct drm_i915_private *dev_priv = dev->dev_private;
2175 uint32_t max_sleep_time = 0x1f;
2176 uint32_t idle_frames = 1;
2177 uint32_t val = 0x0;
2178 const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
2179 bool only_standby = false;
2180
2181 if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
2182 only_standby = true;
2183
2184 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) {
2185 val |= EDP_PSR_LINK_STANDBY;
2186 val |= EDP_PSR_TP2_TP3_TIME_0us;
2187 val |= EDP_PSR_TP1_TIME_0us;
2188 val |= EDP_PSR_SKIP_AUX_EXIT;
2189 val |= IS_BROADWELL(dev) ? BDW_PSR_SINGLE_FRAME : 0;
2190 } else
2191 val |= EDP_PSR_LINK_DISABLE;
2192
2193 I915_WRITE(EDP_PSR_CTL(dev), val |
2194 (IS_BROADWELL(dev) ? 0 : link_entry_time) |
2195 max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
2196 idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
2197 EDP_PSR_ENABLE);
2198}
2199
2200static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
2201{
2202 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2203 struct drm_device *dev = dig_port->base.base.dev;
2204 struct drm_i915_private *dev_priv = dev->dev_private;
2205 struct drm_crtc *crtc = dig_port->base.base.crtc;
2206 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2207
2208 lockdep_assert_held(&dev_priv->psr.lock);
2209 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
2210 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
2211
2212 dev_priv->psr.source_ok = false;
2213
2214 if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
2215 DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
2216 return false;
2217 }
2218
2219 if (!i915.enable_psr) {
2220 DRM_DEBUG_KMS("PSR disable by flag\n");
2221 return false;
2222 }
2223
2224 /* Below limitations aren't valid for Broadwell */
2225 if (IS_BROADWELL(dev))
2226 goto out;
2227
2228 if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
2229 S3D_ENABLE) {
2230 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
2231 return false;
2232 }
2233
2234 if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
2235 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
2236 return false;
2237 }
2238
2239 out:
2240 dev_priv->psr.source_ok = true;
2241 return true;
2242}
2243
2244static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
2245{
2246 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2247 struct drm_device *dev = intel_dig_port->base.base.dev;
2248 struct drm_i915_private *dev_priv = dev->dev_private;
2249
2250 WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
2251 WARN_ON(dev_priv->psr.active);
2252 lockdep_assert_held(&dev_priv->psr.lock);
2253
2254 /* Enable/Re-enable PSR on the host */
2255 intel_edp_psr_enable_source(intel_dp);
2256
2257 dev_priv->psr.active = true;
2258}
2259
2260void intel_edp_psr_enable(struct intel_dp *intel_dp)
2261{
2262 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2263 struct drm_i915_private *dev_priv = dev->dev_private;
2264
2265 if (!HAS_PSR(dev)) {
2266 DRM_DEBUG_KMS("PSR not supported on this platform\n");
2267 return;
2268 }
2269
2270 if (!is_edp_psr(intel_dp)) {
2271 DRM_DEBUG_KMS("PSR not supported by this panel\n");
2272 return;
2273 }
2274
2275 mutex_lock(&dev_priv->psr.lock);
2276 if (dev_priv->psr.enabled) {
2277 DRM_DEBUG_KMS("PSR already in use\n");
2278 goto unlock;
2279 }
2280
2281 if (!intel_edp_psr_match_conditions(intel_dp))
2282 goto unlock;
2283
2284 dev_priv->psr.busy_frontbuffer_bits = 0;
2285
2286 intel_edp_psr_setup_vsc(intel_dp);
2287
2288 /* Avoid continuous PSR exit by masking memup and hpd */
2289 I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
2290 EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
2291
2292 /* Enable PSR on the panel */
2293 intel_edp_psr_enable_sink(intel_dp);
2294
2295 dev_priv->psr.enabled = intel_dp;
2296unlock:
2297 mutex_unlock(&dev_priv->psr.lock);
2298}
2299
2300void intel_edp_psr_disable(struct intel_dp *intel_dp)
2301{
2302 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2303 struct drm_i915_private *dev_priv = dev->dev_private;
2304
2305 mutex_lock(&dev_priv->psr.lock);
2306 if (!dev_priv->psr.enabled) {
2307 mutex_unlock(&dev_priv->psr.lock);
2308 return;
2309 }
2310
2311 if (dev_priv->psr.active) {
2312 I915_WRITE(EDP_PSR_CTL(dev),
2313 I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
2314
2315 /* Wait till PSR is idle */
2316 if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
2317 EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
2318 DRM_ERROR("Timed out waiting for PSR Idle State\n");
2319
2320 dev_priv->psr.active = false;
2321 } else {
2322 WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
2323 }
2324
2325 dev_priv->psr.enabled = NULL;
2326 mutex_unlock(&dev_priv->psr.lock);
2327
2328 cancel_delayed_work_sync(&dev_priv->psr.work);
2329}
2330
2331static void intel_edp_psr_work(struct work_struct *work)
2332{
2333 struct drm_i915_private *dev_priv =
2334 container_of(work, typeof(*dev_priv), psr.work.work);
2335 struct intel_dp *intel_dp = dev_priv->psr.enabled;
2336
2337 /* We have to make sure PSR is ready for re-enable
2338 * otherwise it keeps disabled until next full enable/disable cycle.
2339 * PSR might take some time to get fully disabled
2340 * and be ready for re-enable.
2341 */
2342 if (wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev_priv->dev)) &
2343 EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
2344 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
2345 return;
2346 }
2347
2348 mutex_lock(&dev_priv->psr.lock);
2349 intel_dp = dev_priv->psr.enabled;
2350
2351 if (!intel_dp)
2352 goto unlock;
2353
2354 /*
2355 * The delayed work can race with an invalidate hence we need to
2356 * recheck. Since psr_flush first clears this and then reschedules we
2357 * won't ever miss a flush when bailing out here.
2358 */
2359 if (dev_priv->psr.busy_frontbuffer_bits)
2360 goto unlock;
2361
2362 intel_edp_psr_do_enable(intel_dp);
2363unlock:
2364 mutex_unlock(&dev_priv->psr.lock);
2365}
2366
2367static void intel_edp_psr_do_exit(struct drm_device *dev)
2368{
2369 struct drm_i915_private *dev_priv = dev->dev_private;
2370
2371 if (dev_priv->psr.active) {
2372 u32 val = I915_READ(EDP_PSR_CTL(dev));
2373
2374 WARN_ON(!(val & EDP_PSR_ENABLE));
2375
2376 I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
2377
2378 dev_priv->psr.active = false;
2379 }
2380
2381}
2382
2383void intel_edp_psr_invalidate(struct drm_device *dev,
2384 unsigned frontbuffer_bits)
2385{
2386 struct drm_i915_private *dev_priv = dev->dev_private;
2387 struct drm_crtc *crtc;
2388 enum pipe pipe;
2389
2390 mutex_lock(&dev_priv->psr.lock);
2391 if (!dev_priv->psr.enabled) {
2392 mutex_unlock(&dev_priv->psr.lock);
2393 return;
2394 }
2395
2396 crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
2397 pipe = to_intel_crtc(crtc)->pipe;
2398
2399 intel_edp_psr_do_exit(dev);
2400
2401 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
2402
2403 dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
2404 mutex_unlock(&dev_priv->psr.lock);
2405}
2406
2407void intel_edp_psr_flush(struct drm_device *dev,
2408 unsigned frontbuffer_bits)
2409{
2410 struct drm_i915_private *dev_priv = dev->dev_private;
2411 struct drm_crtc *crtc;
2412 enum pipe pipe;
2413
2414 mutex_lock(&dev_priv->psr.lock);
2415 if (!dev_priv->psr.enabled) {
2416 mutex_unlock(&dev_priv->psr.lock);
2417 return;
2418 }
2419
2420 crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
2421 pipe = to_intel_crtc(crtc)->pipe;
2422 dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
2423
2424 /*
2425 * On Haswell sprite plane updates don't result in a psr invalidating
2426 * signal in the hardware. Which means we need to manually fake this in
2427 * software for all flushes, not just when we've seen a preceding
2428 * invalidation through frontbuffer rendering.
2429 */
2430 if (IS_HASWELL(dev) &&
2431 (frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
2432 intel_edp_psr_do_exit(dev);
2433
2434 if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
2435 schedule_delayed_work(&dev_priv->psr.work,
2436 msecs_to_jiffies(100));
2437 mutex_unlock(&dev_priv->psr.lock);
2438}
2439
2440void intel_edp_psr_init(struct drm_device *dev)
2441{
2442 struct drm_i915_private *dev_priv = dev->dev_private;
2443
2444 INIT_DELAYED_WORK(&dev_priv->psr.work, intel_edp_psr_work);
2445 mutex_init(&dev_priv->psr.lock);
2446}
2447
2448static void intel_disable_dp(struct intel_encoder *encoder) 2069static void intel_disable_dp(struct intel_encoder *encoder)
2449{ 2070{
2450 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2071 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
@@ -5095,7 +4716,7 @@ void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5095 * hard to tell without seeing the user of this function of this code. 4716 * hard to tell without seeing the user of this function of this code.
5096 * Check locking and ordering once that lands. 4717 * Check locking and ordering once that lands.
5097 */ 4718 */
5098 if (INTEL_INFO(dev)->gen < 8 && intel_edp_is_psr_enabled(dev)) { 4719 if (INTEL_INFO(dev)->gen < 8 && intel_psr_is_enabled(dev)) {
5099 DRM_DEBUG_KMS("DRRS is disabled as PSR is enabled\n"); 4720 DRM_DEBUG_KMS("DRRS is disabled as PSR is enabled\n");
5100 return; 4721 return;
5101 } 4722 }
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index d67c59b15128..d1f9b639f0d1 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -999,21 +999,16 @@ void intel_edp_backlight_off(struct intel_dp *intel_dp);
999void intel_edp_panel_vdd_on(struct intel_dp *intel_dp); 999void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
1000void intel_edp_panel_on(struct intel_dp *intel_dp); 1000void intel_edp_panel_on(struct intel_dp *intel_dp);
1001void intel_edp_panel_off(struct intel_dp *intel_dp); 1001void intel_edp_panel_off(struct intel_dp *intel_dp);
1002void intel_edp_psr_enable(struct intel_dp *intel_dp);
1003void intel_edp_psr_disable(struct intel_dp *intel_dp);
1004void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate); 1002void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate);
1005void intel_edp_psr_invalidate(struct drm_device *dev,
1006 unsigned frontbuffer_bits);
1007void intel_edp_psr_flush(struct drm_device *dev,
1008 unsigned frontbuffer_bits);
1009void intel_edp_psr_init(struct drm_device *dev);
1010
1011void intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector); 1003void intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector);
1012void intel_dp_mst_suspend(struct drm_device *dev); 1004void intel_dp_mst_suspend(struct drm_device *dev);
1013void intel_dp_mst_resume(struct drm_device *dev); 1005void intel_dp_mst_resume(struct drm_device *dev);
1014int intel_dp_max_link_bw(struct intel_dp *intel_dp); 1006int intel_dp_max_link_bw(struct intel_dp *intel_dp);
1015void intel_dp_hot_plug(struct intel_encoder *intel_encoder); 1007void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
1016void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv); 1008void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv);
1009uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes);
1010void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes);
1011
1017/* intel_dp_mst.c */ 1012/* intel_dp_mst.c */
1018int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id); 1013int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
1019void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port); 1014void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
@@ -1117,6 +1112,16 @@ void intel_backlight_register(struct drm_device *dev);
1117void intel_backlight_unregister(struct drm_device *dev); 1112void intel_backlight_unregister(struct drm_device *dev);
1118 1113
1119 1114
1115/* intel_psr.c */
1116bool intel_psr_is_enabled(struct drm_device *dev);
1117void intel_psr_enable(struct intel_dp *intel_dp);
1118void intel_psr_disable(struct intel_dp *intel_dp);
1119void intel_psr_invalidate(struct drm_device *dev,
1120 unsigned frontbuffer_bits);
1121void intel_psr_flush(struct drm_device *dev,
1122 unsigned frontbuffer_bits);
1123void intel_psr_init(struct drm_device *dev);
1124
1120/* intel_runtime_pm.c */ 1125/* intel_runtime_pm.c */
1121int intel_power_domains_init(struct drm_i915_private *); 1126int intel_power_domains_init(struct drm_i915_private *);
1122void intel_power_domains_fini(struct drm_i915_private *); 1127void intel_power_domains_fini(struct drm_i915_private *);
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
index 58cf2e6b78f4..79f6d72179c5 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -156,7 +156,7 @@ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
156 156
157 intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring); 157 intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
158 158
159 intel_edp_psr_invalidate(dev, obj->frontbuffer_bits); 159 intel_psr_invalidate(dev, obj->frontbuffer_bits);
160} 160}
161 161
162/** 162/**
@@ -182,7 +182,7 @@ void intel_frontbuffer_flush(struct drm_device *dev,
182 182
183 intel_mark_fb_busy(dev, frontbuffer_bits, NULL); 183 intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
184 184
185 intel_edp_psr_flush(dev, frontbuffer_bits); 185 intel_psr_flush(dev, frontbuffer_bits);
186 186
187 /* 187 /*
188 * FIXME: Unconditional fbc flushing here is a rather gross hack and 188 * FIXME: Unconditional fbc flushing here is a rather gross hack and
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
new file mode 100644
index 000000000000..7b3ed910bc48
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -0,0 +1,408 @@
1/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24#include <drm/drmP.h>
25
26#include "intel_drv.h"
27#include "i915_drv.h"
28
29static bool is_edp_psr(struct intel_dp *intel_dp)
30{
31 return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
32}
33
34bool intel_psr_is_enabled(struct drm_device *dev)
35{
36 struct drm_i915_private *dev_priv = dev->dev_private;
37
38 if (!HAS_PSR(dev))
39 return false;
40
41 return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
42}
43
44static void intel_psr_write_vsc(struct intel_dp *intel_dp,
45 struct edp_vsc_psr *vsc_psr)
46{
47 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
48 struct drm_device *dev = dig_port->base.base.dev;
49 struct drm_i915_private *dev_priv = dev->dev_private;
50 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
51 u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder);
52 u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder);
53 uint32_t *data = (uint32_t *) vsc_psr;
54 unsigned int i;
55
56 /* As per BSPec (Pipe Video Data Island Packet), we need to disable
57 the video DIP being updated before program video DIP data buffer
58 registers for DIP being updated. */
59 I915_WRITE(ctl_reg, 0);
60 POSTING_READ(ctl_reg);
61
62 for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) {
63 if (i < sizeof(struct edp_vsc_psr))
64 I915_WRITE(data_reg + i, *data++);
65 else
66 I915_WRITE(data_reg + i, 0);
67 }
68
69 I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
70 POSTING_READ(ctl_reg);
71}
72
73static void intel_psr_setup_vsc(struct intel_dp *intel_dp)
74{
75 struct edp_vsc_psr psr_vsc;
76
77 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
78 memset(&psr_vsc, 0, sizeof(psr_vsc));
79 psr_vsc.sdp_header.HB0 = 0;
80 psr_vsc.sdp_header.HB1 = 0x7;
81 psr_vsc.sdp_header.HB2 = 0x2;
82 psr_vsc.sdp_header.HB3 = 0x8;
83 intel_psr_write_vsc(intel_dp, &psr_vsc);
84}
85
86static void intel_psr_enable_sink(struct intel_dp *intel_dp)
87{
88 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
89 struct drm_device *dev = dig_port->base.base.dev;
90 struct drm_i915_private *dev_priv = dev->dev_private;
91 uint32_t aux_clock_divider;
92 int precharge = 0x3;
93 bool only_standby = false;
94 static const uint8_t aux_msg[] = {
95 [0] = DP_AUX_NATIVE_WRITE << 4,
96 [1] = DP_SET_POWER >> 8,
97 [2] = DP_SET_POWER & 0xff,
98 [3] = 1 - 1,
99 [4] = DP_SET_POWER_D0,
100 };
101 int i;
102
103 BUILD_BUG_ON(sizeof(aux_msg) > 20);
104
105 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
106
107 if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
108 only_standby = true;
109
110 /* Enable PSR in sink */
111 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby)
112 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
113 DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
114 else
115 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
116 DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
117
118 /* Setup AUX registers */
119 for (i = 0; i < sizeof(aux_msg); i += 4)
120 I915_WRITE(EDP_PSR_AUX_DATA1(dev) + i,
121 intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
122
123 I915_WRITE(EDP_PSR_AUX_CTL(dev),
124 DP_AUX_CH_CTL_TIME_OUT_400us |
125 (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
126 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
127 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
128}
129
130static void intel_psr_enable_source(struct intel_dp *intel_dp)
131{
132 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
133 struct drm_device *dev = dig_port->base.base.dev;
134 struct drm_i915_private *dev_priv = dev->dev_private;
135 uint32_t max_sleep_time = 0x1f;
136 uint32_t idle_frames = 1;
137 uint32_t val = 0x0;
138 const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
139 bool only_standby = false;
140
141 if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
142 only_standby = true;
143
144 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) {
145 val |= EDP_PSR_LINK_STANDBY;
146 val |= EDP_PSR_TP2_TP3_TIME_0us;
147 val |= EDP_PSR_TP1_TIME_0us;
148 val |= EDP_PSR_SKIP_AUX_EXIT;
149 val |= IS_BROADWELL(dev) ? BDW_PSR_SINGLE_FRAME : 0;
150 } else
151 val |= EDP_PSR_LINK_DISABLE;
152
153 I915_WRITE(EDP_PSR_CTL(dev), val |
154 (IS_BROADWELL(dev) ? 0 : link_entry_time) |
155 max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
156 idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
157 EDP_PSR_ENABLE);
158}
159
160static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
161{
162 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
163 struct drm_device *dev = dig_port->base.base.dev;
164 struct drm_i915_private *dev_priv = dev->dev_private;
165 struct drm_crtc *crtc = dig_port->base.base.crtc;
166 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
167
168 lockdep_assert_held(&dev_priv->psr.lock);
169 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
170 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
171
172 dev_priv->psr.source_ok = false;
173
174 if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
175 DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
176 return false;
177 }
178
179 if (!i915.enable_psr) {
180 DRM_DEBUG_KMS("PSR disable by flag\n");
181 return false;
182 }
183
184 /* Below limitations aren't valid for Broadwell */
185 if (IS_BROADWELL(dev))
186 goto out;
187
188 if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
189 S3D_ENABLE) {
190 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
191 return false;
192 }
193
194 if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
195 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
196 return false;
197 }
198
199 out:
200 dev_priv->psr.source_ok = true;
201 return true;
202}
203
204static void intel_psr_do_enable(struct intel_dp *intel_dp)
205{
206 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
207 struct drm_device *dev = intel_dig_port->base.base.dev;
208 struct drm_i915_private *dev_priv = dev->dev_private;
209
210 WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
211 WARN_ON(dev_priv->psr.active);
212 lockdep_assert_held(&dev_priv->psr.lock);
213
214 /* Enable/Re-enable PSR on the host */
215 intel_psr_enable_source(intel_dp);
216
217 dev_priv->psr.active = true;
218}
219
220void intel_psr_enable(struct intel_dp *intel_dp)
221{
222 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
223 struct drm_device *dev = intel_dig_port->base.base.dev;
224 struct drm_i915_private *dev_priv = dev->dev_private;
225
226 if (!HAS_PSR(dev)) {
227 DRM_DEBUG_KMS("PSR not supported on this platform\n");
228 return;
229 }
230
231 if (!is_edp_psr(intel_dp)) {
232 DRM_DEBUG_KMS("PSR not supported by this panel\n");
233 return;
234 }
235
236 mutex_lock(&dev_priv->psr.lock);
237 if (dev_priv->psr.enabled) {
238 DRM_DEBUG_KMS("PSR already in use\n");
239 goto unlock;
240 }
241
242 if (!intel_psr_match_conditions(intel_dp))
243 goto unlock;
244
245 dev_priv->psr.busy_frontbuffer_bits = 0;
246
247 intel_psr_setup_vsc(intel_dp);
248
249 /* Avoid continuous PSR exit by masking memup and hpd */
250 I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
251 EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
252
253 /* Enable PSR on the panel */
254 intel_psr_enable_sink(intel_dp);
255
256 dev_priv->psr.enabled = intel_dp;
257unlock:
258 mutex_unlock(&dev_priv->psr.lock);
259}
260
261void intel_psr_disable(struct intel_dp *intel_dp)
262{
263 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
264 struct drm_device *dev = intel_dig_port->base.base.dev;
265 struct drm_i915_private *dev_priv = dev->dev_private;
266
267 mutex_lock(&dev_priv->psr.lock);
268 if (!dev_priv->psr.enabled) {
269 mutex_unlock(&dev_priv->psr.lock);
270 return;
271 }
272
273 if (dev_priv->psr.active) {
274 I915_WRITE(EDP_PSR_CTL(dev),
275 I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
276
277 /* Wait till PSR is idle */
278 if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
279 EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
280 DRM_ERROR("Timed out waiting for PSR Idle State\n");
281
282 dev_priv->psr.active = false;
283 } else {
284 WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
285 }
286
287 dev_priv->psr.enabled = NULL;
288 mutex_unlock(&dev_priv->psr.lock);
289
290 cancel_delayed_work_sync(&dev_priv->psr.work);
291}
292
293static void intel_psr_work(struct work_struct *work)
294{
295 struct drm_i915_private *dev_priv =
296 container_of(work, typeof(*dev_priv), psr.work.work);
297 struct intel_dp *intel_dp = dev_priv->psr.enabled;
298
299 /* We have to make sure PSR is ready for re-enable
300 * otherwise it keeps disabled until next full enable/disable cycle.
301 * PSR might take some time to get fully disabled
302 * and be ready for re-enable.
303 */
304 if (wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev_priv->dev)) &
305 EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
306 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
307 return;
308 }
309
310 mutex_lock(&dev_priv->psr.lock);
311 intel_dp = dev_priv->psr.enabled;
312
313 if (!intel_dp)
314 goto unlock;
315
316 /*
317 * The delayed work can race with an invalidate hence we need to
318 * recheck. Since psr_flush first clears this and then reschedules we
319 * won't ever miss a flush when bailing out here.
320 */
321 if (dev_priv->psr.busy_frontbuffer_bits)
322 goto unlock;
323
324 intel_psr_do_enable(intel_dp);
325unlock:
326 mutex_unlock(&dev_priv->psr.lock);
327}
328
329static void intel_psr_exit(struct drm_device *dev)
330{
331 struct drm_i915_private *dev_priv = dev->dev_private;
332
333 if (dev_priv->psr.active) {
334 u32 val = I915_READ(EDP_PSR_CTL(dev));
335
336 WARN_ON(!(val & EDP_PSR_ENABLE));
337
338 I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
339
340 dev_priv->psr.active = false;
341 }
342
343}
344
345void intel_psr_invalidate(struct drm_device *dev,
346 unsigned frontbuffer_bits)
347{
348 struct drm_i915_private *dev_priv = dev->dev_private;
349 struct drm_crtc *crtc;
350 enum pipe pipe;
351
352 mutex_lock(&dev_priv->psr.lock);
353 if (!dev_priv->psr.enabled) {
354 mutex_unlock(&dev_priv->psr.lock);
355 return;
356 }
357
358 crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
359 pipe = to_intel_crtc(crtc)->pipe;
360
361 intel_psr_exit(dev);
362
363 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
364
365 dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
366 mutex_unlock(&dev_priv->psr.lock);
367}
368
369void intel_psr_flush(struct drm_device *dev,
370 unsigned frontbuffer_bits)
371{
372 struct drm_i915_private *dev_priv = dev->dev_private;
373 struct drm_crtc *crtc;
374 enum pipe pipe;
375
376 mutex_lock(&dev_priv->psr.lock);
377 if (!dev_priv->psr.enabled) {
378 mutex_unlock(&dev_priv->psr.lock);
379 return;
380 }
381
382 crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
383 pipe = to_intel_crtc(crtc)->pipe;
384 dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
385
386 /*
387 * On Haswell sprite plane updates don't result in a psr invalidating
388 * signal in the hardware. Which means we need to manually fake this in
389 * software for all flushes, not just when we've seen a preceding
390 * invalidation through frontbuffer rendering.
391 */
392 if (IS_HASWELL(dev) &&
393 (frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
394 intel_psr_exit(dev);
395
396 if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
397 schedule_delayed_work(&dev_priv->psr.work,
398 msecs_to_jiffies(100));
399 mutex_unlock(&dev_priv->psr.lock);
400}
401
402void intel_psr_init(struct drm_device *dev)
403{
404 struct drm_i915_private *dev_priv = dev->dev_private;
405
406 INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
407 mutex_init(&dev_priv->psr.lock);
408}