aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_psr.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-15 18:52:01 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-15 18:52:01 -0500
commit988adfdffdd43cfd841df734664727993076d7cb (patch)
tree6794f7bba8f595500c2b7d33376ad6614adcfaf2 /drivers/gpu/drm/i915/intel_psr.c
parent26178ec11ef3c6c814bf16a0a2b9c2f7242e3c64 (diff)
parent4e0cd68115620bc3236ff4e58e4c073948629b41 (diff)
Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
Pull drm updates from Dave Airlie: "Highlights: - AMD KFD driver merge This is the AMD HSA interface for exposing a lowlevel interface for GPGPU use. They have an open source userspace built on top of this interface, and the code looks as good as it was going to get out of tree. - Initial atomic modesetting work The need for an atomic modesetting interface to allow userspace to try and send a complete set of modesetting state to the driver has arisen, and been suffering from neglect this past year. No more, the start of the common code and changes for msm driver to use it are in this tree. Ongoing work to get the userspace ioctl finished and the code clean will probably wait until next kernel. - DisplayID 1.3 and tiled monitor exposed to userspace. Tiled monitor property is now exposed for userspace to make use of. - Rockchip drm driver merged. - imx gpu driver moved out of staging Other stuff: - core: panel - MIPI DSI + new panels. expose suggested x/y properties for virtual GPUs - i915: Initial Skylake (SKL) support gen3/4 reset work start of dri1/ums removal infoframe tracking fixes for lots of things. - nouveau: tegra k1 voltage support GM204 modesetting support GT21x memory reclocking work - radeon: CI dpm fixes GPUVM improvements Initial DPM fan control - rcar-du: HDMI support added removed some support for old boards slave encoder driver for Analog Devices adv7511 - exynos: Exynos4415 SoC support - msm: a4xx gpu support atomic helper conversion - tegra: iommu support universal plane support ganged-mode DSI support - sti: HDMI i2c improvements - vmwgfx: some late fixes. - qxl: use suggested x/y properties" * 'drm-next' of git://people.freedesktop.org/~airlied/linux: (969 commits) drm: sti: fix module compilation issue drm/i915: save/restore GMBUS freq across suspend/resume on gen4 drm: sti: correctly cleanup CRTC and planes drm: sti: add HQVDP plane drm: sti: add cursor plane drm: sti: enable auxiliary CRTC drm: sti: fix delay in VTG programming drm: sti: prepare sti_tvout to support auxiliary crtc drm: sti: use drm_crtc_vblank_{on/off} instead of drm_vblank_{on/off} drm: sti: fix hdmi avi infoframe drm: sti: remove event lock while disabling vblank drm: sti: simplify gdp code drm: sti: clear all mixer control drm: sti: remove gpio for HDMI hot plug detection drm: sti: allow to change hdmi ddc i2c adapter drm/doc: Document drm_add_modes_noedid() usage drm/i915: Remove '& 0xffff' from the mask given to WA_REG() drm/i915: Invert the mask and val arguments in wa_add() and WA_REG() drm: Zero out DRM object memory upon cleanup drm/i915/bdw: Fix the write setting up the WIZ hashing mode ...
Diffstat (limited to 'drivers/gpu/drm/i915/intel_psr.c')
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c481
1 files changed, 481 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
new file mode 100644
index 000000000000..716b8a961eea
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -0,0 +1,481 @@
1/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24/**
25 * DOC: Panel Self Refresh (PSR/SRD)
26 *
27 * Since Haswell Display controller supports Panel Self-Refresh on display
28 * panels witch have a remote frame buffer (RFB) implemented according to PSR
29 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
30 * when system is idle but display is on as it eliminates display refresh
31 * request to DDR memory completely as long as the frame buffer for that
32 * display is unchanged.
33 *
34 * Panel Self Refresh must be supported by both Hardware (source) and
35 * Panel (sink).
36 *
37 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
38 * to power down the link and memory controller. For DSI panels the same idea
39 * is called "manual mode".
40 *
41 * The implementation uses the hardware-based PSR support which automatically
42 * enters/exits self-refresh mode. The hardware takes care of sending the
43 * required DP aux message and could even retrain the link (that part isn't
44 * enabled yet though). The hardware also keeps track of any frontbuffer
45 * changes to know when to exit self-refresh mode again. Unfortunately that
46 * part doesn't work too well, hence why the i915 PSR support uses the
47 * software frontbuffer tracking to make sure it doesn't miss a screen
48 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
49 * get called by the frontbuffer tracking code. Note that because of locking
50 * issues the self-refresh re-enable code is done from a work queue, which
51 * must be correctly synchronized/cancelled when shutting down the pipe."
52 */
53
54#include <drm/drmP.h>
55
56#include "intel_drv.h"
57#include "i915_drv.h"
58
59static bool is_edp_psr(struct intel_dp *intel_dp)
60{
61 return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
62}
63
64bool intel_psr_is_enabled(struct drm_device *dev)
65{
66 struct drm_i915_private *dev_priv = dev->dev_private;
67
68 if (!HAS_PSR(dev))
69 return false;
70
71 return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
72}
73
74static void intel_psr_write_vsc(struct intel_dp *intel_dp,
75 struct edp_vsc_psr *vsc_psr)
76{
77 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
78 struct drm_device *dev = dig_port->base.base.dev;
79 struct drm_i915_private *dev_priv = dev->dev_private;
80 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
81 u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder);
82 u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder);
83 uint32_t *data = (uint32_t *) vsc_psr;
84 unsigned int i;
85
86 /* As per BSPec (Pipe Video Data Island Packet), we need to disable
87 the video DIP being updated before program video DIP data buffer
88 registers for DIP being updated. */
89 I915_WRITE(ctl_reg, 0);
90 POSTING_READ(ctl_reg);
91
92 for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) {
93 if (i < sizeof(struct edp_vsc_psr))
94 I915_WRITE(data_reg + i, *data++);
95 else
96 I915_WRITE(data_reg + i, 0);
97 }
98
99 I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
100 POSTING_READ(ctl_reg);
101}
102
103static void intel_psr_setup_vsc(struct intel_dp *intel_dp)
104{
105 struct edp_vsc_psr psr_vsc;
106
107 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
108 memset(&psr_vsc, 0, sizeof(psr_vsc));
109 psr_vsc.sdp_header.HB0 = 0;
110 psr_vsc.sdp_header.HB1 = 0x7;
111 psr_vsc.sdp_header.HB2 = 0x2;
112 psr_vsc.sdp_header.HB3 = 0x8;
113 intel_psr_write_vsc(intel_dp, &psr_vsc);
114}
115
116static void intel_psr_enable_sink(struct intel_dp *intel_dp)
117{
118 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
119 struct drm_device *dev = dig_port->base.base.dev;
120 struct drm_i915_private *dev_priv = dev->dev_private;
121 uint32_t aux_clock_divider;
122 int precharge = 0x3;
123 bool only_standby = false;
124 static const uint8_t aux_msg[] = {
125 [0] = DP_AUX_NATIVE_WRITE << 4,
126 [1] = DP_SET_POWER >> 8,
127 [2] = DP_SET_POWER & 0xff,
128 [3] = 1 - 1,
129 [4] = DP_SET_POWER_D0,
130 };
131 int i;
132
133 BUILD_BUG_ON(sizeof(aux_msg) > 20);
134
135 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
136
137 if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
138 only_standby = true;
139
140 /* Enable PSR in sink */
141 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby)
142 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
143 DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
144 else
145 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
146 DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
147
148 /* Setup AUX registers */
149 for (i = 0; i < sizeof(aux_msg); i += 4)
150 I915_WRITE(EDP_PSR_AUX_DATA1(dev) + i,
151 intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
152
153 I915_WRITE(EDP_PSR_AUX_CTL(dev),
154 DP_AUX_CH_CTL_TIME_OUT_400us |
155 (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
156 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
157 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
158}
159
160static void intel_psr_enable_source(struct intel_dp *intel_dp)
161{
162 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
163 struct drm_device *dev = dig_port->base.base.dev;
164 struct drm_i915_private *dev_priv = dev->dev_private;
165 uint32_t max_sleep_time = 0x1f;
166 uint32_t idle_frames = 1;
167 uint32_t val = 0x0;
168 const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
169 bool only_standby = false;
170
171 if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
172 only_standby = true;
173
174 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) {
175 val |= EDP_PSR_LINK_STANDBY;
176 val |= EDP_PSR_TP2_TP3_TIME_0us;
177 val |= EDP_PSR_TP1_TIME_0us;
178 val |= EDP_PSR_SKIP_AUX_EXIT;
179 val |= IS_BROADWELL(dev) ? BDW_PSR_SINGLE_FRAME : 0;
180 } else
181 val |= EDP_PSR_LINK_DISABLE;
182
183 I915_WRITE(EDP_PSR_CTL(dev), val |
184 (IS_BROADWELL(dev) ? 0 : link_entry_time) |
185 max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
186 idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
187 EDP_PSR_ENABLE);
188}
189
190static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
191{
192 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
193 struct drm_device *dev = dig_port->base.base.dev;
194 struct drm_i915_private *dev_priv = dev->dev_private;
195 struct drm_crtc *crtc = dig_port->base.base.crtc;
196 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
197
198 lockdep_assert_held(&dev_priv->psr.lock);
199 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
200 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
201
202 dev_priv->psr.source_ok = false;
203
204 if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
205 DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
206 return false;
207 }
208
209 if (!i915.enable_psr) {
210 DRM_DEBUG_KMS("PSR disable by flag\n");
211 return false;
212 }
213
214 /* Below limitations aren't valid for Broadwell */
215 if (IS_BROADWELL(dev))
216 goto out;
217
218 if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
219 S3D_ENABLE) {
220 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
221 return false;
222 }
223
224 if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
225 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
226 return false;
227 }
228
229 out:
230 dev_priv->psr.source_ok = true;
231 return true;
232}
233
234static void intel_psr_do_enable(struct intel_dp *intel_dp)
235{
236 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
237 struct drm_device *dev = intel_dig_port->base.base.dev;
238 struct drm_i915_private *dev_priv = dev->dev_private;
239
240 WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
241 WARN_ON(dev_priv->psr.active);
242 lockdep_assert_held(&dev_priv->psr.lock);
243
244 /* Enable/Re-enable PSR on the host */
245 intel_psr_enable_source(intel_dp);
246
247 dev_priv->psr.active = true;
248}
249
250/**
251 * intel_psr_enable - Enable PSR
252 * @intel_dp: Intel DP
253 *
254 * This function can only be called after the pipe is fully trained and enabled.
255 */
256void intel_psr_enable(struct intel_dp *intel_dp)
257{
258 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
259 struct drm_device *dev = intel_dig_port->base.base.dev;
260 struct drm_i915_private *dev_priv = dev->dev_private;
261
262 if (!HAS_PSR(dev)) {
263 DRM_DEBUG_KMS("PSR not supported on this platform\n");
264 return;
265 }
266
267 if (!is_edp_psr(intel_dp)) {
268 DRM_DEBUG_KMS("PSR not supported by this panel\n");
269 return;
270 }
271
272 mutex_lock(&dev_priv->psr.lock);
273 if (dev_priv->psr.enabled) {
274 DRM_DEBUG_KMS("PSR already in use\n");
275 goto unlock;
276 }
277
278 if (!intel_psr_match_conditions(intel_dp))
279 goto unlock;
280
281 dev_priv->psr.busy_frontbuffer_bits = 0;
282
283 intel_psr_setup_vsc(intel_dp);
284
285 /* Avoid continuous PSR exit by masking memup and hpd */
286 I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
287 EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
288
289 /* Enable PSR on the panel */
290 intel_psr_enable_sink(intel_dp);
291
292 dev_priv->psr.enabled = intel_dp;
293unlock:
294 mutex_unlock(&dev_priv->psr.lock);
295}
296
297/**
298 * intel_psr_disable - Disable PSR
299 * @intel_dp: Intel DP
300 *
301 * This function needs to be called before disabling pipe.
302 */
303void intel_psr_disable(struct intel_dp *intel_dp)
304{
305 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
306 struct drm_device *dev = intel_dig_port->base.base.dev;
307 struct drm_i915_private *dev_priv = dev->dev_private;
308
309 mutex_lock(&dev_priv->psr.lock);
310 if (!dev_priv->psr.enabled) {
311 mutex_unlock(&dev_priv->psr.lock);
312 return;
313 }
314
315 if (dev_priv->psr.active) {
316 I915_WRITE(EDP_PSR_CTL(dev),
317 I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
318
319 /* Wait till PSR is idle */
320 if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
321 EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
322 DRM_ERROR("Timed out waiting for PSR Idle State\n");
323
324 dev_priv->psr.active = false;
325 } else {
326 WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
327 }
328
329 dev_priv->psr.enabled = NULL;
330 mutex_unlock(&dev_priv->psr.lock);
331
332 cancel_delayed_work_sync(&dev_priv->psr.work);
333}
334
335static void intel_psr_work(struct work_struct *work)
336{
337 struct drm_i915_private *dev_priv =
338 container_of(work, typeof(*dev_priv), psr.work.work);
339 struct intel_dp *intel_dp = dev_priv->psr.enabled;
340
341 /* We have to make sure PSR is ready for re-enable
342 * otherwise it keeps disabled until next full enable/disable cycle.
343 * PSR might take some time to get fully disabled
344 * and be ready for re-enable.
345 */
346 if (wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev_priv->dev)) &
347 EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
348 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
349 return;
350 }
351
352 mutex_lock(&dev_priv->psr.lock);
353 intel_dp = dev_priv->psr.enabled;
354
355 if (!intel_dp)
356 goto unlock;
357
358 /*
359 * The delayed work can race with an invalidate hence we need to
360 * recheck. Since psr_flush first clears this and then reschedules we
361 * won't ever miss a flush when bailing out here.
362 */
363 if (dev_priv->psr.busy_frontbuffer_bits)
364 goto unlock;
365
366 intel_psr_do_enable(intel_dp);
367unlock:
368 mutex_unlock(&dev_priv->psr.lock);
369}
370
371static void intel_psr_exit(struct drm_device *dev)
372{
373 struct drm_i915_private *dev_priv = dev->dev_private;
374
375 if (dev_priv->psr.active) {
376 u32 val = I915_READ(EDP_PSR_CTL(dev));
377
378 WARN_ON(!(val & EDP_PSR_ENABLE));
379
380 I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
381
382 dev_priv->psr.active = false;
383 }
384
385}
386
387/**
388 * intel_psr_invalidate - Invalidade PSR
389 * @dev: DRM device
390 * @frontbuffer_bits: frontbuffer plane tracking bits
391 *
392 * Since the hardware frontbuffer tracking has gaps we need to integrate
393 * with the software frontbuffer tracking. This function gets called every
394 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
395 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
396 *
397 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
398 */
399void intel_psr_invalidate(struct drm_device *dev,
400 unsigned frontbuffer_bits)
401{
402 struct drm_i915_private *dev_priv = dev->dev_private;
403 struct drm_crtc *crtc;
404 enum pipe pipe;
405
406 mutex_lock(&dev_priv->psr.lock);
407 if (!dev_priv->psr.enabled) {
408 mutex_unlock(&dev_priv->psr.lock);
409 return;
410 }
411
412 crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
413 pipe = to_intel_crtc(crtc)->pipe;
414
415 intel_psr_exit(dev);
416
417 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
418
419 dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
420 mutex_unlock(&dev_priv->psr.lock);
421}
422
423/**
424 * intel_psr_flush - Flush PSR
425 * @dev: DRM device
426 * @frontbuffer_bits: frontbuffer plane tracking bits
427 *
428 * Since the hardware frontbuffer tracking has gaps we need to integrate
429 * with the software frontbuffer tracking. This function gets called every
430 * time frontbuffer rendering has completed and flushed out to memory. PSR
431 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
432 *
433 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
434 */
435void intel_psr_flush(struct drm_device *dev,
436 unsigned frontbuffer_bits)
437{
438 struct drm_i915_private *dev_priv = dev->dev_private;
439 struct drm_crtc *crtc;
440 enum pipe pipe;
441
442 mutex_lock(&dev_priv->psr.lock);
443 if (!dev_priv->psr.enabled) {
444 mutex_unlock(&dev_priv->psr.lock);
445 return;
446 }
447
448 crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
449 pipe = to_intel_crtc(crtc)->pipe;
450 dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
451
452 /*
453 * On Haswell sprite plane updates don't result in a psr invalidating
454 * signal in the hardware. Which means we need to manually fake this in
455 * software for all flushes, not just when we've seen a preceding
456 * invalidation through frontbuffer rendering.
457 */
458 if (IS_HASWELL(dev) &&
459 (frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
460 intel_psr_exit(dev);
461
462 if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
463 schedule_delayed_work(&dev_priv->psr.work,
464 msecs_to_jiffies(100));
465 mutex_unlock(&dev_priv->psr.lock);
466}
467
468/**
469 * intel_psr_init - Init basic PSR work and mutex.
470 * @dev: DRM device
471 *
472 * This function is called only once at driver load to initialize basic
473 * PSR stuff.
474 */
475void intel_psr_init(struct drm_device *dev)
476{
477 struct drm_i915_private *dev_priv = dev->dev_private;
478
479 INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
480 mutex_init(&dev_priv->psr.lock);
481}