diff options
Diffstat (limited to 'drivers/gpu/drm/i915/intel_pm.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_pm.c | 1663 |
1 files changed, 615 insertions, 1048 deletions
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 26c29c173221..d77cc81900f9 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -30,7 +30,9 @@ | |||
30 | #include "intel_drv.h" | 30 | #include "intel_drv.h" |
31 | #include "../../../platform/x86/intel_ips.h" | 31 | #include "../../../platform/x86/intel_ips.h" |
32 | #include <linux/module.h> | 32 | #include <linux/module.h> |
33 | #include <linux/vgaarb.h> | ||
33 | #include <drm/i915_powerwell.h> | 34 | #include <drm/i915_powerwell.h> |
35 | #include <linux/pm_runtime.h> | ||
34 | 36 | ||
35 | /** | 37 | /** |
36 | * RC6 is a special power stage which allows the GPU to enter an very | 38 | * RC6 is a special power stage which allows the GPU to enter an very |
@@ -86,7 +88,7 @@ static void i8xx_disable_fbc(struct drm_device *dev) | |||
86 | DRM_DEBUG_KMS("disabled FBC\n"); | 88 | DRM_DEBUG_KMS("disabled FBC\n"); |
87 | } | 89 | } |
88 | 90 | ||
89 | static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | 91 | static void i8xx_enable_fbc(struct drm_crtc *crtc) |
90 | { | 92 | { |
91 | struct drm_device *dev = crtc->dev; | 93 | struct drm_device *dev = crtc->dev; |
92 | struct drm_i915_private *dev_priv = dev->dev_private; | 94 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -96,32 +98,40 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
96 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 98 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
97 | int cfb_pitch; | 99 | int cfb_pitch; |
98 | int plane, i; | 100 | int plane, i; |
99 | u32 fbc_ctl, fbc_ctl2; | 101 | u32 fbc_ctl; |
100 | 102 | ||
101 | cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE; | 103 | cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE; |
102 | if (fb->pitches[0] < cfb_pitch) | 104 | if (fb->pitches[0] < cfb_pitch) |
103 | cfb_pitch = fb->pitches[0]; | 105 | cfb_pitch = fb->pitches[0]; |
104 | 106 | ||
105 | /* FBC_CTL wants 64B units */ | 107 | /* FBC_CTL wants 32B or 64B units */ |
106 | cfb_pitch = (cfb_pitch / 64) - 1; | 108 | if (IS_GEN2(dev)) |
109 | cfb_pitch = (cfb_pitch / 32) - 1; | ||
110 | else | ||
111 | cfb_pitch = (cfb_pitch / 64) - 1; | ||
107 | plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB; | 112 | plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB; |
108 | 113 | ||
109 | /* Clear old tags */ | 114 | /* Clear old tags */ |
110 | for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) | 115 | for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) |
111 | I915_WRITE(FBC_TAG + (i * 4), 0); | 116 | I915_WRITE(FBC_TAG + (i * 4), 0); |
112 | 117 | ||
113 | /* Set it up... */ | 118 | if (IS_GEN4(dev)) { |
114 | fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE; | 119 | u32 fbc_ctl2; |
115 | fbc_ctl2 |= plane; | 120 | |
116 | I915_WRITE(FBC_CONTROL2, fbc_ctl2); | 121 | /* Set it up... */ |
117 | I915_WRITE(FBC_FENCE_OFF, crtc->y); | 122 | fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE; |
123 | fbc_ctl2 |= plane; | ||
124 | I915_WRITE(FBC_CONTROL2, fbc_ctl2); | ||
125 | I915_WRITE(FBC_FENCE_OFF, crtc->y); | ||
126 | } | ||
118 | 127 | ||
119 | /* enable it... */ | 128 | /* enable it... */ |
120 | fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; | 129 | fbc_ctl = I915_READ(FBC_CONTROL); |
130 | fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT; | ||
131 | fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC; | ||
121 | if (IS_I945GM(dev)) | 132 | if (IS_I945GM(dev)) |
122 | fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ | 133 | fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ |
123 | fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; | 134 | fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; |
124 | fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; | ||
125 | fbc_ctl |= obj->fence_reg; | 135 | fbc_ctl |= obj->fence_reg; |
126 | I915_WRITE(FBC_CONTROL, fbc_ctl); | 136 | I915_WRITE(FBC_CONTROL, fbc_ctl); |
127 | 137 | ||
@@ -136,7 +146,7 @@ static bool i8xx_fbc_enabled(struct drm_device *dev) | |||
136 | return I915_READ(FBC_CONTROL) & FBC_CTL_EN; | 146 | return I915_READ(FBC_CONTROL) & FBC_CTL_EN; |
137 | } | 147 | } |
138 | 148 | ||
139 | static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | 149 | static void g4x_enable_fbc(struct drm_crtc *crtc) |
140 | { | 150 | { |
141 | struct drm_device *dev = crtc->dev; | 151 | struct drm_device *dev = crtc->dev; |
142 | struct drm_i915_private *dev_priv = dev->dev_private; | 152 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -145,16 +155,12 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
145 | struct drm_i915_gem_object *obj = intel_fb->obj; | 155 | struct drm_i915_gem_object *obj = intel_fb->obj; |
146 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 156 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
147 | int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; | 157 | int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; |
148 | unsigned long stall_watermark = 200; | ||
149 | u32 dpfc_ctl; | 158 | u32 dpfc_ctl; |
150 | 159 | ||
151 | dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; | 160 | dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; |
152 | dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg; | 161 | dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg; |
153 | I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY); | 162 | I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY); |
154 | 163 | ||
155 | I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | | ||
156 | (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | | ||
157 | (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); | ||
158 | I915_WRITE(DPFC_FENCE_YOFF, crtc->y); | 164 | I915_WRITE(DPFC_FENCE_YOFF, crtc->y); |
159 | 165 | ||
160 | /* enable it... */ | 166 | /* enable it... */ |
@@ -191,7 +197,11 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev) | |||
191 | u32 blt_ecoskpd; | 197 | u32 blt_ecoskpd; |
192 | 198 | ||
193 | /* Make sure blitter notifies FBC of writes */ | 199 | /* Make sure blitter notifies FBC of writes */ |
194 | gen6_gt_force_wake_get(dev_priv); | 200 | |
201 | /* Blitter is part of Media powerwell on VLV. No impact of | ||
202 | * his param in other platforms for now */ | ||
203 | gen6_gt_force_wake_get(dev_priv, FORCEWAKE_MEDIA); | ||
204 | |||
195 | blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD); | 205 | blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD); |
196 | blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY << | 206 | blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY << |
197 | GEN6_BLITTER_LOCK_SHIFT; | 207 | GEN6_BLITTER_LOCK_SHIFT; |
@@ -202,10 +212,11 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev) | |||
202 | GEN6_BLITTER_LOCK_SHIFT); | 212 | GEN6_BLITTER_LOCK_SHIFT); |
203 | I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); | 213 | I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); |
204 | POSTING_READ(GEN6_BLITTER_ECOSKPD); | 214 | POSTING_READ(GEN6_BLITTER_ECOSKPD); |
205 | gen6_gt_force_wake_put(dev_priv); | 215 | |
216 | gen6_gt_force_wake_put(dev_priv, FORCEWAKE_MEDIA); | ||
206 | } | 217 | } |
207 | 218 | ||
208 | static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | 219 | static void ironlake_enable_fbc(struct drm_crtc *crtc) |
209 | { | 220 | { |
210 | struct drm_device *dev = crtc->dev; | 221 | struct drm_device *dev = crtc->dev; |
211 | struct drm_i915_private *dev_priv = dev->dev_private; | 222 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -214,7 +225,6 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
214 | struct drm_i915_gem_object *obj = intel_fb->obj; | 225 | struct drm_i915_gem_object *obj = intel_fb->obj; |
215 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 226 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
216 | int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; | 227 | int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; |
217 | unsigned long stall_watermark = 200; | ||
218 | u32 dpfc_ctl; | 228 | u32 dpfc_ctl; |
219 | 229 | ||
220 | dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); | 230 | dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); |
@@ -222,12 +232,11 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
222 | dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X); | 232 | dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X); |
223 | /* Set persistent mode for front-buffer rendering, ala X. */ | 233 | /* Set persistent mode for front-buffer rendering, ala X. */ |
224 | dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE; | 234 | dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE; |
225 | dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg); | 235 | dpfc_ctl |= DPFC_CTL_FENCE_EN; |
236 | if (IS_GEN5(dev)) | ||
237 | dpfc_ctl |= obj->fence_reg; | ||
226 | I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY); | 238 | I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY); |
227 | 239 | ||
228 | I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | | ||
229 | (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | | ||
230 | (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); | ||
231 | I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); | 240 | I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); |
232 | I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID); | 241 | I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID); |
233 | /* enable it... */ | 242 | /* enable it... */ |
@@ -265,7 +274,7 @@ static bool ironlake_fbc_enabled(struct drm_device *dev) | |||
265 | return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN; | 274 | return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN; |
266 | } | 275 | } |
267 | 276 | ||
268 | static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | 277 | static void gen7_enable_fbc(struct drm_crtc *crtc) |
269 | { | 278 | { |
270 | struct drm_device *dev = crtc->dev; | 279 | struct drm_device *dev = crtc->dev; |
271 | struct drm_i915_private *dev_priv = dev->dev_private; | 280 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -295,7 +304,7 @@ static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
295 | 304 | ||
296 | sandybridge_blit_fbc_update(dev); | 305 | sandybridge_blit_fbc_update(dev); |
297 | 306 | ||
298 | DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); | 307 | DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane)); |
299 | } | 308 | } |
300 | 309 | ||
301 | bool intel_fbc_enabled(struct drm_device *dev) | 310 | bool intel_fbc_enabled(struct drm_device *dev) |
@@ -322,8 +331,7 @@ static void intel_fbc_work_fn(struct work_struct *__work) | |||
322 | * the prior work. | 331 | * the prior work. |
323 | */ | 332 | */ |
324 | if (work->crtc->fb == work->fb) { | 333 | if (work->crtc->fb == work->fb) { |
325 | dev_priv->display.enable_fbc(work->crtc, | 334 | dev_priv->display.enable_fbc(work->crtc); |
326 | work->interval); | ||
327 | 335 | ||
328 | dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane; | 336 | dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane; |
329 | dev_priv->fbc.fb_id = work->crtc->fb->base.id; | 337 | dev_priv->fbc.fb_id = work->crtc->fb->base.id; |
@@ -360,7 +368,7 @@ static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv) | |||
360 | dev_priv->fbc.fbc_work = NULL; | 368 | dev_priv->fbc.fbc_work = NULL; |
361 | } | 369 | } |
362 | 370 | ||
363 | static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | 371 | static void intel_enable_fbc(struct drm_crtc *crtc) |
364 | { | 372 | { |
365 | struct intel_fbc_work *work; | 373 | struct intel_fbc_work *work; |
366 | struct drm_device *dev = crtc->dev; | 374 | struct drm_device *dev = crtc->dev; |
@@ -374,13 +382,12 @@ static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
374 | work = kzalloc(sizeof(*work), GFP_KERNEL); | 382 | work = kzalloc(sizeof(*work), GFP_KERNEL); |
375 | if (work == NULL) { | 383 | if (work == NULL) { |
376 | DRM_ERROR("Failed to allocate FBC work structure\n"); | 384 | DRM_ERROR("Failed to allocate FBC work structure\n"); |
377 | dev_priv->display.enable_fbc(crtc, interval); | 385 | dev_priv->display.enable_fbc(crtc); |
378 | return; | 386 | return; |
379 | } | 387 | } |
380 | 388 | ||
381 | work->crtc = crtc; | 389 | work->crtc = crtc; |
382 | work->fb = crtc->fb; | 390 | work->fb = crtc->fb; |
383 | work->interval = interval; | ||
384 | INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn); | 391 | INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn); |
385 | 392 | ||
386 | dev_priv->fbc.fbc_work = work; | 393 | dev_priv->fbc.fbc_work = work; |
@@ -454,7 +461,7 @@ void intel_update_fbc(struct drm_device *dev) | |||
454 | const struct drm_display_mode *adjusted_mode; | 461 | const struct drm_display_mode *adjusted_mode; |
455 | unsigned int max_width, max_height; | 462 | unsigned int max_width, max_height; |
456 | 463 | ||
457 | if (!I915_HAS_FBC(dev)) { | 464 | if (!HAS_FBC(dev)) { |
458 | set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED); | 465 | set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED); |
459 | return; | 466 | return; |
460 | } | 467 | } |
@@ -530,10 +537,10 @@ void intel_update_fbc(struct drm_device *dev) | |||
530 | DRM_DEBUG_KMS("mode too large for compression, disabling\n"); | 537 | DRM_DEBUG_KMS("mode too large for compression, disabling\n"); |
531 | goto out_disable; | 538 | goto out_disable; |
532 | } | 539 | } |
533 | if ((IS_I915GM(dev) || IS_I945GM(dev) || IS_HASWELL(dev)) && | 540 | if ((INTEL_INFO(dev)->gen < 4 || IS_HASWELL(dev)) && |
534 | intel_crtc->plane != 0) { | 541 | intel_crtc->plane != PLANE_A) { |
535 | if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE)) | 542 | if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE)) |
536 | DRM_DEBUG_KMS("plane not 0, disabling compression\n"); | 543 | DRM_DEBUG_KMS("plane not A, disabling compression\n"); |
537 | goto out_disable; | 544 | goto out_disable; |
538 | } | 545 | } |
539 | 546 | ||
@@ -595,7 +602,7 @@ void intel_update_fbc(struct drm_device *dev) | |||
595 | intel_disable_fbc(dev); | 602 | intel_disable_fbc(dev); |
596 | } | 603 | } |
597 | 604 | ||
598 | intel_enable_fbc(crtc, 500); | 605 | intel_enable_fbc(crtc); |
599 | dev_priv->fbc.no_fbc_reason = FBC_OK; | 606 | dev_priv->fbc.no_fbc_reason = FBC_OK; |
600 | return; | 607 | return; |
601 | 608 | ||
@@ -817,7 +824,7 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane) | |||
817 | return size; | 824 | return size; |
818 | } | 825 | } |
819 | 826 | ||
820 | static int i85x_get_fifo_size(struct drm_device *dev, int plane) | 827 | static int i830_get_fifo_size(struct drm_device *dev, int plane) |
821 | { | 828 | { |
822 | struct drm_i915_private *dev_priv = dev->dev_private; | 829 | struct drm_i915_private *dev_priv = dev->dev_private; |
823 | uint32_t dsparb = I915_READ(DSPARB); | 830 | uint32_t dsparb = I915_READ(DSPARB); |
@@ -850,21 +857,6 @@ static int i845_get_fifo_size(struct drm_device *dev, int plane) | |||
850 | return size; | 857 | return size; |
851 | } | 858 | } |
852 | 859 | ||
853 | static int i830_get_fifo_size(struct drm_device *dev, int plane) | ||
854 | { | ||
855 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
856 | uint32_t dsparb = I915_READ(DSPARB); | ||
857 | int size; | ||
858 | |||
859 | size = dsparb & 0x7f; | ||
860 | size >>= 1; /* Convert to cachelines */ | ||
861 | |||
862 | DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, | ||
863 | plane ? "B" : "A", size); | ||
864 | |||
865 | return size; | ||
866 | } | ||
867 | |||
868 | /* Pineview has different values for various configs */ | 860 | /* Pineview has different values for various configs */ |
869 | static const struct intel_watermark_params pineview_display_wm = { | 861 | static const struct intel_watermark_params pineview_display_wm = { |
870 | PINEVIEW_DISPLAY_FIFO, | 862 | PINEVIEW_DISPLAY_FIFO, |
@@ -943,14 +935,14 @@ static const struct intel_watermark_params i915_wm_info = { | |||
943 | 2, | 935 | 2, |
944 | I915_FIFO_LINE_SIZE | 936 | I915_FIFO_LINE_SIZE |
945 | }; | 937 | }; |
946 | static const struct intel_watermark_params i855_wm_info = { | 938 | static const struct intel_watermark_params i830_wm_info = { |
947 | I855GM_FIFO_SIZE, | 939 | I855GM_FIFO_SIZE, |
948 | I915_MAX_WM, | 940 | I915_MAX_WM, |
949 | 1, | 941 | 1, |
950 | 2, | 942 | 2, |
951 | I830_FIFO_LINE_SIZE | 943 | I830_FIFO_LINE_SIZE |
952 | }; | 944 | }; |
953 | static const struct intel_watermark_params i830_wm_info = { | 945 | static const struct intel_watermark_params i845_wm_info = { |
954 | I830_FIFO_SIZE, | 946 | I830_FIFO_SIZE, |
955 | I915_MAX_WM, | 947 | I915_MAX_WM, |
956 | 1, | 948 | 1, |
@@ -958,65 +950,6 @@ static const struct intel_watermark_params i830_wm_info = { | |||
958 | I830_FIFO_LINE_SIZE | 950 | I830_FIFO_LINE_SIZE |
959 | }; | 951 | }; |
960 | 952 | ||
961 | static const struct intel_watermark_params ironlake_display_wm_info = { | ||
962 | ILK_DISPLAY_FIFO, | ||
963 | ILK_DISPLAY_MAXWM, | ||
964 | ILK_DISPLAY_DFTWM, | ||
965 | 2, | ||
966 | ILK_FIFO_LINE_SIZE | ||
967 | }; | ||
968 | static const struct intel_watermark_params ironlake_cursor_wm_info = { | ||
969 | ILK_CURSOR_FIFO, | ||
970 | ILK_CURSOR_MAXWM, | ||
971 | ILK_CURSOR_DFTWM, | ||
972 | 2, | ||
973 | ILK_FIFO_LINE_SIZE | ||
974 | }; | ||
975 | static const struct intel_watermark_params ironlake_display_srwm_info = { | ||
976 | ILK_DISPLAY_SR_FIFO, | ||
977 | ILK_DISPLAY_MAX_SRWM, | ||
978 | ILK_DISPLAY_DFT_SRWM, | ||
979 | 2, | ||
980 | ILK_FIFO_LINE_SIZE | ||
981 | }; | ||
982 | static const struct intel_watermark_params ironlake_cursor_srwm_info = { | ||
983 | ILK_CURSOR_SR_FIFO, | ||
984 | ILK_CURSOR_MAX_SRWM, | ||
985 | ILK_CURSOR_DFT_SRWM, | ||
986 | 2, | ||
987 | ILK_FIFO_LINE_SIZE | ||
988 | }; | ||
989 | |||
990 | static const struct intel_watermark_params sandybridge_display_wm_info = { | ||
991 | SNB_DISPLAY_FIFO, | ||
992 | SNB_DISPLAY_MAXWM, | ||
993 | SNB_DISPLAY_DFTWM, | ||
994 | 2, | ||
995 | SNB_FIFO_LINE_SIZE | ||
996 | }; | ||
997 | static const struct intel_watermark_params sandybridge_cursor_wm_info = { | ||
998 | SNB_CURSOR_FIFO, | ||
999 | SNB_CURSOR_MAXWM, | ||
1000 | SNB_CURSOR_DFTWM, | ||
1001 | 2, | ||
1002 | SNB_FIFO_LINE_SIZE | ||
1003 | }; | ||
1004 | static const struct intel_watermark_params sandybridge_display_srwm_info = { | ||
1005 | SNB_DISPLAY_SR_FIFO, | ||
1006 | SNB_DISPLAY_MAX_SRWM, | ||
1007 | SNB_DISPLAY_DFT_SRWM, | ||
1008 | 2, | ||
1009 | SNB_FIFO_LINE_SIZE | ||
1010 | }; | ||
1011 | static const struct intel_watermark_params sandybridge_cursor_srwm_info = { | ||
1012 | SNB_CURSOR_SR_FIFO, | ||
1013 | SNB_CURSOR_MAX_SRWM, | ||
1014 | SNB_CURSOR_DFT_SRWM, | ||
1015 | 2, | ||
1016 | SNB_FIFO_LINE_SIZE | ||
1017 | }; | ||
1018 | |||
1019 | |||
1020 | /** | 953 | /** |
1021 | * intel_calculate_wm - calculate watermark level | 954 | * intel_calculate_wm - calculate watermark level |
1022 | * @clock_in_khz: pixel clock | 955 | * @clock_in_khz: pixel clock |
@@ -1567,7 +1500,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc) | |||
1567 | else if (!IS_GEN2(dev)) | 1500 | else if (!IS_GEN2(dev)) |
1568 | wm_info = &i915_wm_info; | 1501 | wm_info = &i915_wm_info; |
1569 | else | 1502 | else |
1570 | wm_info = &i855_wm_info; | 1503 | wm_info = &i830_wm_info; |
1571 | 1504 | ||
1572 | fifo_size = dev_priv->display.get_fifo_size(dev, 0); | 1505 | fifo_size = dev_priv->display.get_fifo_size(dev, 0); |
1573 | crtc = intel_get_crtc_for_plane(dev, 0); | 1506 | crtc = intel_get_crtc_for_plane(dev, 0); |
@@ -1615,7 +1548,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc) | |||
1615 | if (IS_I945G(dev) || IS_I945GM(dev)) | 1548 | if (IS_I945G(dev) || IS_I945GM(dev)) |
1616 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0); | 1549 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0); |
1617 | else if (IS_I915GM(dev)) | 1550 | else if (IS_I915GM(dev)) |
1618 | I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN); | 1551 | I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_SELF_EN)); |
1619 | 1552 | ||
1620 | /* Calc sr entries for one plane configs */ | 1553 | /* Calc sr entries for one plane configs */ |
1621 | if (HAS_FW_BLC(dev) && enabled) { | 1554 | if (HAS_FW_BLC(dev) && enabled) { |
@@ -1667,14 +1600,14 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc) | |||
1667 | I915_WRITE(FW_BLC_SELF, | 1600 | I915_WRITE(FW_BLC_SELF, |
1668 | FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN); | 1601 | FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN); |
1669 | else if (IS_I915GM(dev)) | 1602 | else if (IS_I915GM(dev)) |
1670 | I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN); | 1603 | I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_SELF_EN)); |
1671 | DRM_DEBUG_KMS("memory self refresh enabled\n"); | 1604 | DRM_DEBUG_KMS("memory self refresh enabled\n"); |
1672 | } else | 1605 | } else |
1673 | DRM_DEBUG_KMS("memory self refresh disabled\n"); | 1606 | DRM_DEBUG_KMS("memory self refresh disabled\n"); |
1674 | } | 1607 | } |
1675 | } | 1608 | } |
1676 | 1609 | ||
1677 | static void i830_update_wm(struct drm_crtc *unused_crtc) | 1610 | static void i845_update_wm(struct drm_crtc *unused_crtc) |
1678 | { | 1611 | { |
1679 | struct drm_device *dev = unused_crtc->dev; | 1612 | struct drm_device *dev = unused_crtc->dev; |
1680 | struct drm_i915_private *dev_priv = dev->dev_private; | 1613 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -1689,7 +1622,7 @@ static void i830_update_wm(struct drm_crtc *unused_crtc) | |||
1689 | 1622 | ||
1690 | adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; | 1623 | adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; |
1691 | planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock, | 1624 | planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock, |
1692 | &i830_wm_info, | 1625 | &i845_wm_info, |
1693 | dev_priv->display.get_fifo_size(dev, 0), | 1626 | dev_priv->display.get_fifo_size(dev, 0), |
1694 | 4, latency_ns); | 1627 | 4, latency_ns); |
1695 | fwater_lo = I915_READ(FW_BLC) & ~0xfff; | 1628 | fwater_lo = I915_READ(FW_BLC) & ~0xfff; |
@@ -1700,423 +1633,6 @@ static void i830_update_wm(struct drm_crtc *unused_crtc) | |||
1700 | I915_WRITE(FW_BLC, fwater_lo); | 1633 | I915_WRITE(FW_BLC, fwater_lo); |
1701 | } | 1634 | } |
1702 | 1635 | ||
1703 | /* | ||
1704 | * Check the wm result. | ||
1705 | * | ||
1706 | * If any calculated watermark values is larger than the maximum value that | ||
1707 | * can be programmed into the associated watermark register, that watermark | ||
1708 | * must be disabled. | ||
1709 | */ | ||
1710 | static bool ironlake_check_srwm(struct drm_device *dev, int level, | ||
1711 | int fbc_wm, int display_wm, int cursor_wm, | ||
1712 | const struct intel_watermark_params *display, | ||
1713 | const struct intel_watermark_params *cursor) | ||
1714 | { | ||
1715 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1716 | |||
1717 | DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d," | ||
1718 | " cursor %d\n", level, display_wm, fbc_wm, cursor_wm); | ||
1719 | |||
1720 | if (fbc_wm > SNB_FBC_MAX_SRWM) { | ||
1721 | DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n", | ||
1722 | fbc_wm, SNB_FBC_MAX_SRWM, level); | ||
1723 | |||
1724 | /* fbc has it's own way to disable FBC WM */ | ||
1725 | I915_WRITE(DISP_ARB_CTL, | ||
1726 | I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS); | ||
1727 | return false; | ||
1728 | } else if (INTEL_INFO(dev)->gen >= 6) { | ||
1729 | /* enable FBC WM (except on ILK, where it must remain off) */ | ||
1730 | I915_WRITE(DISP_ARB_CTL, | ||
1731 | I915_READ(DISP_ARB_CTL) & ~DISP_FBC_WM_DIS); | ||
1732 | } | ||
1733 | |||
1734 | if (display_wm > display->max_wm) { | ||
1735 | DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n", | ||
1736 | display_wm, SNB_DISPLAY_MAX_SRWM, level); | ||
1737 | return false; | ||
1738 | } | ||
1739 | |||
1740 | if (cursor_wm > cursor->max_wm) { | ||
1741 | DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n", | ||
1742 | cursor_wm, SNB_CURSOR_MAX_SRWM, level); | ||
1743 | return false; | ||
1744 | } | ||
1745 | |||
1746 | if (!(fbc_wm || display_wm || cursor_wm)) { | ||
1747 | DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level); | ||
1748 | return false; | ||
1749 | } | ||
1750 | |||
1751 | return true; | ||
1752 | } | ||
1753 | |||
1754 | /* | ||
1755 | * Compute watermark values of WM[1-3], | ||
1756 | */ | ||
1757 | static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane, | ||
1758 | int latency_ns, | ||
1759 | const struct intel_watermark_params *display, | ||
1760 | const struct intel_watermark_params *cursor, | ||
1761 | int *fbc_wm, int *display_wm, int *cursor_wm) | ||
1762 | { | ||
1763 | struct drm_crtc *crtc; | ||
1764 | const struct drm_display_mode *adjusted_mode; | ||
1765 | unsigned long line_time_us; | ||
1766 | int hdisplay, htotal, pixel_size, clock; | ||
1767 | int line_count, line_size; | ||
1768 | int small, large; | ||
1769 | int entries; | ||
1770 | |||
1771 | if (!latency_ns) { | ||
1772 | *fbc_wm = *display_wm = *cursor_wm = 0; | ||
1773 | return false; | ||
1774 | } | ||
1775 | |||
1776 | crtc = intel_get_crtc_for_plane(dev, plane); | ||
1777 | adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; | ||
1778 | clock = adjusted_mode->crtc_clock; | ||
1779 | htotal = adjusted_mode->crtc_htotal; | ||
1780 | hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; | ||
1781 | pixel_size = crtc->fb->bits_per_pixel / 8; | ||
1782 | |||
1783 | line_time_us = (htotal * 1000) / clock; | ||
1784 | line_count = (latency_ns / line_time_us + 1000) / 1000; | ||
1785 | line_size = hdisplay * pixel_size; | ||
1786 | |||
1787 | /* Use the minimum of the small and large buffer method for primary */ | ||
1788 | small = ((clock * pixel_size / 1000) * latency_ns) / 1000; | ||
1789 | large = line_count * line_size; | ||
1790 | |||
1791 | entries = DIV_ROUND_UP(min(small, large), display->cacheline_size); | ||
1792 | *display_wm = entries + display->guard_size; | ||
1793 | |||
1794 | /* | ||
1795 | * Spec says: | ||
1796 | * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2 | ||
1797 | */ | ||
1798 | *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2; | ||
1799 | |||
1800 | /* calculate the self-refresh watermark for display cursor */ | ||
1801 | entries = line_count * pixel_size * 64; | ||
1802 | entries = DIV_ROUND_UP(entries, cursor->cacheline_size); | ||
1803 | *cursor_wm = entries + cursor->guard_size; | ||
1804 | |||
1805 | return ironlake_check_srwm(dev, level, | ||
1806 | *fbc_wm, *display_wm, *cursor_wm, | ||
1807 | display, cursor); | ||
1808 | } | ||
1809 | |||
1810 | static void ironlake_update_wm(struct drm_crtc *crtc) | ||
1811 | { | ||
1812 | struct drm_device *dev = crtc->dev; | ||
1813 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1814 | int fbc_wm, plane_wm, cursor_wm; | ||
1815 | unsigned int enabled; | ||
1816 | |||
1817 | enabled = 0; | ||
1818 | if (g4x_compute_wm0(dev, PIPE_A, | ||
1819 | &ironlake_display_wm_info, | ||
1820 | dev_priv->wm.pri_latency[0] * 100, | ||
1821 | &ironlake_cursor_wm_info, | ||
1822 | dev_priv->wm.cur_latency[0] * 100, | ||
1823 | &plane_wm, &cursor_wm)) { | ||
1824 | I915_WRITE(WM0_PIPEA_ILK, | ||
1825 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); | ||
1826 | DRM_DEBUG_KMS("FIFO watermarks For pipe A -" | ||
1827 | " plane %d, " "cursor: %d\n", | ||
1828 | plane_wm, cursor_wm); | ||
1829 | enabled |= 1 << PIPE_A; | ||
1830 | } | ||
1831 | |||
1832 | if (g4x_compute_wm0(dev, PIPE_B, | ||
1833 | &ironlake_display_wm_info, | ||
1834 | dev_priv->wm.pri_latency[0] * 100, | ||
1835 | &ironlake_cursor_wm_info, | ||
1836 | dev_priv->wm.cur_latency[0] * 100, | ||
1837 | &plane_wm, &cursor_wm)) { | ||
1838 | I915_WRITE(WM0_PIPEB_ILK, | ||
1839 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); | ||
1840 | DRM_DEBUG_KMS("FIFO watermarks For pipe B -" | ||
1841 | " plane %d, cursor: %d\n", | ||
1842 | plane_wm, cursor_wm); | ||
1843 | enabled |= 1 << PIPE_B; | ||
1844 | } | ||
1845 | |||
1846 | /* | ||
1847 | * Calculate and update the self-refresh watermark only when one | ||
1848 | * display plane is used. | ||
1849 | */ | ||
1850 | I915_WRITE(WM3_LP_ILK, 0); | ||
1851 | I915_WRITE(WM2_LP_ILK, 0); | ||
1852 | I915_WRITE(WM1_LP_ILK, 0); | ||
1853 | |||
1854 | if (!single_plane_enabled(enabled)) | ||
1855 | return; | ||
1856 | enabled = ffs(enabled) - 1; | ||
1857 | |||
1858 | /* WM1 */ | ||
1859 | if (!ironlake_compute_srwm(dev, 1, enabled, | ||
1860 | dev_priv->wm.pri_latency[1] * 500, | ||
1861 | &ironlake_display_srwm_info, | ||
1862 | &ironlake_cursor_srwm_info, | ||
1863 | &fbc_wm, &plane_wm, &cursor_wm)) | ||
1864 | return; | ||
1865 | |||
1866 | I915_WRITE(WM1_LP_ILK, | ||
1867 | WM1_LP_SR_EN | | ||
1868 | (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) | | ||
1869 | (fbc_wm << WM1_LP_FBC_SHIFT) | | ||
1870 | (plane_wm << WM1_LP_SR_SHIFT) | | ||
1871 | cursor_wm); | ||
1872 | |||
1873 | /* WM2 */ | ||
1874 | if (!ironlake_compute_srwm(dev, 2, enabled, | ||
1875 | dev_priv->wm.pri_latency[2] * 500, | ||
1876 | &ironlake_display_srwm_info, | ||
1877 | &ironlake_cursor_srwm_info, | ||
1878 | &fbc_wm, &plane_wm, &cursor_wm)) | ||
1879 | return; | ||
1880 | |||
1881 | I915_WRITE(WM2_LP_ILK, | ||
1882 | WM2_LP_EN | | ||
1883 | (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) | | ||
1884 | (fbc_wm << WM1_LP_FBC_SHIFT) | | ||
1885 | (plane_wm << WM1_LP_SR_SHIFT) | | ||
1886 | cursor_wm); | ||
1887 | |||
1888 | /* | ||
1889 | * WM3 is unsupported on ILK, probably because we don't have latency | ||
1890 | * data for that power state | ||
1891 | */ | ||
1892 | } | ||
1893 | |||
1894 | static void sandybridge_update_wm(struct drm_crtc *crtc) | ||
1895 | { | ||
1896 | struct drm_device *dev = crtc->dev; | ||
1897 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1898 | int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */ | ||
1899 | u32 val; | ||
1900 | int fbc_wm, plane_wm, cursor_wm; | ||
1901 | unsigned int enabled; | ||
1902 | |||
1903 | enabled = 0; | ||
1904 | if (g4x_compute_wm0(dev, PIPE_A, | ||
1905 | &sandybridge_display_wm_info, latency, | ||
1906 | &sandybridge_cursor_wm_info, latency, | ||
1907 | &plane_wm, &cursor_wm)) { | ||
1908 | val = I915_READ(WM0_PIPEA_ILK); | ||
1909 | val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); | ||
1910 | I915_WRITE(WM0_PIPEA_ILK, val | | ||
1911 | ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm)); | ||
1912 | DRM_DEBUG_KMS("FIFO watermarks For pipe A -" | ||
1913 | " plane %d, " "cursor: %d\n", | ||
1914 | plane_wm, cursor_wm); | ||
1915 | enabled |= 1 << PIPE_A; | ||
1916 | } | ||
1917 | |||
1918 | if (g4x_compute_wm0(dev, PIPE_B, | ||
1919 | &sandybridge_display_wm_info, latency, | ||
1920 | &sandybridge_cursor_wm_info, latency, | ||
1921 | &plane_wm, &cursor_wm)) { | ||
1922 | val = I915_READ(WM0_PIPEB_ILK); | ||
1923 | val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); | ||
1924 | I915_WRITE(WM0_PIPEB_ILK, val | | ||
1925 | ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm)); | ||
1926 | DRM_DEBUG_KMS("FIFO watermarks For pipe B -" | ||
1927 | " plane %d, cursor: %d\n", | ||
1928 | plane_wm, cursor_wm); | ||
1929 | enabled |= 1 << PIPE_B; | ||
1930 | } | ||
1931 | |||
1932 | /* | ||
1933 | * Calculate and update the self-refresh watermark only when one | ||
1934 | * display plane is used. | ||
1935 | * | ||
1936 | * SNB support 3 levels of watermark. | ||
1937 | * | ||
1938 | * WM1/WM2/WM2 watermarks have to be enabled in the ascending order, | ||
1939 | * and disabled in the descending order | ||
1940 | * | ||
1941 | */ | ||
1942 | I915_WRITE(WM3_LP_ILK, 0); | ||
1943 | I915_WRITE(WM2_LP_ILK, 0); | ||
1944 | I915_WRITE(WM1_LP_ILK, 0); | ||
1945 | |||
1946 | if (!single_plane_enabled(enabled) || | ||
1947 | dev_priv->sprite_scaling_enabled) | ||
1948 | return; | ||
1949 | enabled = ffs(enabled) - 1; | ||
1950 | |||
1951 | /* WM1 */ | ||
1952 | if (!ironlake_compute_srwm(dev, 1, enabled, | ||
1953 | dev_priv->wm.pri_latency[1] * 500, | ||
1954 | &sandybridge_display_srwm_info, | ||
1955 | &sandybridge_cursor_srwm_info, | ||
1956 | &fbc_wm, &plane_wm, &cursor_wm)) | ||
1957 | return; | ||
1958 | |||
1959 | I915_WRITE(WM1_LP_ILK, | ||
1960 | WM1_LP_SR_EN | | ||
1961 | (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) | | ||
1962 | (fbc_wm << WM1_LP_FBC_SHIFT) | | ||
1963 | (plane_wm << WM1_LP_SR_SHIFT) | | ||
1964 | cursor_wm); | ||
1965 | |||
1966 | /* WM2 */ | ||
1967 | if (!ironlake_compute_srwm(dev, 2, enabled, | ||
1968 | dev_priv->wm.pri_latency[2] * 500, | ||
1969 | &sandybridge_display_srwm_info, | ||
1970 | &sandybridge_cursor_srwm_info, | ||
1971 | &fbc_wm, &plane_wm, &cursor_wm)) | ||
1972 | return; | ||
1973 | |||
1974 | I915_WRITE(WM2_LP_ILK, | ||
1975 | WM2_LP_EN | | ||
1976 | (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) | | ||
1977 | (fbc_wm << WM1_LP_FBC_SHIFT) | | ||
1978 | (plane_wm << WM1_LP_SR_SHIFT) | | ||
1979 | cursor_wm); | ||
1980 | |||
1981 | /* WM3 */ | ||
1982 | if (!ironlake_compute_srwm(dev, 3, enabled, | ||
1983 | dev_priv->wm.pri_latency[3] * 500, | ||
1984 | &sandybridge_display_srwm_info, | ||
1985 | &sandybridge_cursor_srwm_info, | ||
1986 | &fbc_wm, &plane_wm, &cursor_wm)) | ||
1987 | return; | ||
1988 | |||
1989 | I915_WRITE(WM3_LP_ILK, | ||
1990 | WM3_LP_EN | | ||
1991 | (dev_priv->wm.pri_latency[3] << WM1_LP_LATENCY_SHIFT) | | ||
1992 | (fbc_wm << WM1_LP_FBC_SHIFT) | | ||
1993 | (plane_wm << WM1_LP_SR_SHIFT) | | ||
1994 | cursor_wm); | ||
1995 | } | ||
1996 | |||
1997 | static void ivybridge_update_wm(struct drm_crtc *crtc) | ||
1998 | { | ||
1999 | struct drm_device *dev = crtc->dev; | ||
2000 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2001 | int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */ | ||
2002 | u32 val; | ||
2003 | int fbc_wm, plane_wm, cursor_wm; | ||
2004 | int ignore_fbc_wm, ignore_plane_wm, ignore_cursor_wm; | ||
2005 | unsigned int enabled; | ||
2006 | |||
2007 | enabled = 0; | ||
2008 | if (g4x_compute_wm0(dev, PIPE_A, | ||
2009 | &sandybridge_display_wm_info, latency, | ||
2010 | &sandybridge_cursor_wm_info, latency, | ||
2011 | &plane_wm, &cursor_wm)) { | ||
2012 | val = I915_READ(WM0_PIPEA_ILK); | ||
2013 | val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); | ||
2014 | I915_WRITE(WM0_PIPEA_ILK, val | | ||
2015 | ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm)); | ||
2016 | DRM_DEBUG_KMS("FIFO watermarks For pipe A -" | ||
2017 | " plane %d, " "cursor: %d\n", | ||
2018 | plane_wm, cursor_wm); | ||
2019 | enabled |= 1 << PIPE_A; | ||
2020 | } | ||
2021 | |||
2022 | if (g4x_compute_wm0(dev, PIPE_B, | ||
2023 | &sandybridge_display_wm_info, latency, | ||
2024 | &sandybridge_cursor_wm_info, latency, | ||
2025 | &plane_wm, &cursor_wm)) { | ||
2026 | val = I915_READ(WM0_PIPEB_ILK); | ||
2027 | val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); | ||
2028 | I915_WRITE(WM0_PIPEB_ILK, val | | ||
2029 | ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm)); | ||
2030 | DRM_DEBUG_KMS("FIFO watermarks For pipe B -" | ||
2031 | " plane %d, cursor: %d\n", | ||
2032 | plane_wm, cursor_wm); | ||
2033 | enabled |= 1 << PIPE_B; | ||
2034 | } | ||
2035 | |||
2036 | if (g4x_compute_wm0(dev, PIPE_C, | ||
2037 | &sandybridge_display_wm_info, latency, | ||
2038 | &sandybridge_cursor_wm_info, latency, | ||
2039 | &plane_wm, &cursor_wm)) { | ||
2040 | val = I915_READ(WM0_PIPEC_IVB); | ||
2041 | val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); | ||
2042 | I915_WRITE(WM0_PIPEC_IVB, val | | ||
2043 | ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm)); | ||
2044 | DRM_DEBUG_KMS("FIFO watermarks For pipe C -" | ||
2045 | " plane %d, cursor: %d\n", | ||
2046 | plane_wm, cursor_wm); | ||
2047 | enabled |= 1 << PIPE_C; | ||
2048 | } | ||
2049 | |||
2050 | /* | ||
2051 | * Calculate and update the self-refresh watermark only when one | ||
2052 | * display plane is used. | ||
2053 | * | ||
2054 | * SNB support 3 levels of watermark. | ||
2055 | * | ||
2056 | * WM1/WM2/WM2 watermarks have to be enabled in the ascending order, | ||
2057 | * and disabled in the descending order | ||
2058 | * | ||
2059 | */ | ||
2060 | I915_WRITE(WM3_LP_ILK, 0); | ||
2061 | I915_WRITE(WM2_LP_ILK, 0); | ||
2062 | I915_WRITE(WM1_LP_ILK, 0); | ||
2063 | |||
2064 | if (!single_plane_enabled(enabled) || | ||
2065 | dev_priv->sprite_scaling_enabled) | ||
2066 | return; | ||
2067 | enabled = ffs(enabled) - 1; | ||
2068 | |||
2069 | /* WM1 */ | ||
2070 | if (!ironlake_compute_srwm(dev, 1, enabled, | ||
2071 | dev_priv->wm.pri_latency[1] * 500, | ||
2072 | &sandybridge_display_srwm_info, | ||
2073 | &sandybridge_cursor_srwm_info, | ||
2074 | &fbc_wm, &plane_wm, &cursor_wm)) | ||
2075 | return; | ||
2076 | |||
2077 | I915_WRITE(WM1_LP_ILK, | ||
2078 | WM1_LP_SR_EN | | ||
2079 | (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) | | ||
2080 | (fbc_wm << WM1_LP_FBC_SHIFT) | | ||
2081 | (plane_wm << WM1_LP_SR_SHIFT) | | ||
2082 | cursor_wm); | ||
2083 | |||
2084 | /* WM2 */ | ||
2085 | if (!ironlake_compute_srwm(dev, 2, enabled, | ||
2086 | dev_priv->wm.pri_latency[2] * 500, | ||
2087 | &sandybridge_display_srwm_info, | ||
2088 | &sandybridge_cursor_srwm_info, | ||
2089 | &fbc_wm, &plane_wm, &cursor_wm)) | ||
2090 | return; | ||
2091 | |||
2092 | I915_WRITE(WM2_LP_ILK, | ||
2093 | WM2_LP_EN | | ||
2094 | (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) | | ||
2095 | (fbc_wm << WM1_LP_FBC_SHIFT) | | ||
2096 | (plane_wm << WM1_LP_SR_SHIFT) | | ||
2097 | cursor_wm); | ||
2098 | |||
2099 | /* WM3, note we have to correct the cursor latency */ | ||
2100 | if (!ironlake_compute_srwm(dev, 3, enabled, | ||
2101 | dev_priv->wm.pri_latency[3] * 500, | ||
2102 | &sandybridge_display_srwm_info, | ||
2103 | &sandybridge_cursor_srwm_info, | ||
2104 | &fbc_wm, &plane_wm, &ignore_cursor_wm) || | ||
2105 | !ironlake_compute_srwm(dev, 3, enabled, | ||
2106 | dev_priv->wm.cur_latency[3] * 500, | ||
2107 | &sandybridge_display_srwm_info, | ||
2108 | &sandybridge_cursor_srwm_info, | ||
2109 | &ignore_fbc_wm, &ignore_plane_wm, &cursor_wm)) | ||
2110 | return; | ||
2111 | |||
2112 | I915_WRITE(WM3_LP_ILK, | ||
2113 | WM3_LP_EN | | ||
2114 | (dev_priv->wm.pri_latency[3] << WM1_LP_LATENCY_SHIFT) | | ||
2115 | (fbc_wm << WM1_LP_FBC_SHIFT) | | ||
2116 | (plane_wm << WM1_LP_SR_SHIFT) | | ||
2117 | cursor_wm); | ||
2118 | } | ||
2119 | |||
2120 | static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev, | 1636 | static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev, |
2121 | struct drm_crtc *crtc) | 1637 | struct drm_crtc *crtc) |
2122 | { | 1638 | { |
@@ -2185,7 +1701,7 @@ static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels, | |||
2185 | return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2; | 1701 | return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2; |
2186 | } | 1702 | } |
2187 | 1703 | ||
2188 | struct hsw_pipe_wm_parameters { | 1704 | struct ilk_pipe_wm_parameters { |
2189 | bool active; | 1705 | bool active; |
2190 | uint32_t pipe_htotal; | 1706 | uint32_t pipe_htotal; |
2191 | uint32_t pixel_rate; | 1707 | uint32_t pixel_rate; |
@@ -2194,7 +1710,7 @@ struct hsw_pipe_wm_parameters { | |||
2194 | struct intel_plane_wm_parameters cur; | 1710 | struct intel_plane_wm_parameters cur; |
2195 | }; | 1711 | }; |
2196 | 1712 | ||
2197 | struct hsw_wm_maximums { | 1713 | struct ilk_wm_maximums { |
2198 | uint16_t pri; | 1714 | uint16_t pri; |
2199 | uint16_t spr; | 1715 | uint16_t spr; |
2200 | uint16_t cur; | 1716 | uint16_t cur; |
@@ -2212,7 +1728,7 @@ struct intel_wm_config { | |||
2212 | * For both WM_PIPE and WM_LP. | 1728 | * For both WM_PIPE and WM_LP. |
2213 | * mem_value must be in 0.1us units. | 1729 | * mem_value must be in 0.1us units. |
2214 | */ | 1730 | */ |
2215 | static uint32_t ilk_compute_pri_wm(const struct hsw_pipe_wm_parameters *params, | 1731 | static uint32_t ilk_compute_pri_wm(const struct ilk_pipe_wm_parameters *params, |
2216 | uint32_t mem_value, | 1732 | uint32_t mem_value, |
2217 | bool is_lp) | 1733 | bool is_lp) |
2218 | { | 1734 | { |
@@ -2241,7 +1757,7 @@ static uint32_t ilk_compute_pri_wm(const struct hsw_pipe_wm_parameters *params, | |||
2241 | * For both WM_PIPE and WM_LP. | 1757 | * For both WM_PIPE and WM_LP. |
2242 | * mem_value must be in 0.1us units. | 1758 | * mem_value must be in 0.1us units. |
2243 | */ | 1759 | */ |
2244 | static uint32_t ilk_compute_spr_wm(const struct hsw_pipe_wm_parameters *params, | 1760 | static uint32_t ilk_compute_spr_wm(const struct ilk_pipe_wm_parameters *params, |
2245 | uint32_t mem_value) | 1761 | uint32_t mem_value) |
2246 | { | 1762 | { |
2247 | uint32_t method1, method2; | 1763 | uint32_t method1, method2; |
@@ -2264,7 +1780,7 @@ static uint32_t ilk_compute_spr_wm(const struct hsw_pipe_wm_parameters *params, | |||
2264 | * For both WM_PIPE and WM_LP. | 1780 | * For both WM_PIPE and WM_LP. |
2265 | * mem_value must be in 0.1us units. | 1781 | * mem_value must be in 0.1us units. |
2266 | */ | 1782 | */ |
2267 | static uint32_t ilk_compute_cur_wm(const struct hsw_pipe_wm_parameters *params, | 1783 | static uint32_t ilk_compute_cur_wm(const struct ilk_pipe_wm_parameters *params, |
2268 | uint32_t mem_value) | 1784 | uint32_t mem_value) |
2269 | { | 1785 | { |
2270 | if (!params->active || !params->cur.enabled) | 1786 | if (!params->active || !params->cur.enabled) |
@@ -2278,7 +1794,7 @@ static uint32_t ilk_compute_cur_wm(const struct hsw_pipe_wm_parameters *params, | |||
2278 | } | 1794 | } |
2279 | 1795 | ||
2280 | /* Only for WM_LP. */ | 1796 | /* Only for WM_LP. */ |
2281 | static uint32_t ilk_compute_fbc_wm(const struct hsw_pipe_wm_parameters *params, | 1797 | static uint32_t ilk_compute_fbc_wm(const struct ilk_pipe_wm_parameters *params, |
2282 | uint32_t pri_val) | 1798 | uint32_t pri_val) |
2283 | { | 1799 | { |
2284 | if (!params->active || !params->pri.enabled) | 1800 | if (!params->active || !params->pri.enabled) |
@@ -2383,7 +1899,7 @@ static void ilk_compute_wm_maximums(struct drm_device *dev, | |||
2383 | int level, | 1899 | int level, |
2384 | const struct intel_wm_config *config, | 1900 | const struct intel_wm_config *config, |
2385 | enum intel_ddb_partitioning ddb_partitioning, | 1901 | enum intel_ddb_partitioning ddb_partitioning, |
2386 | struct hsw_wm_maximums *max) | 1902 | struct ilk_wm_maximums *max) |
2387 | { | 1903 | { |
2388 | max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false); | 1904 | max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false); |
2389 | max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true); | 1905 | max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true); |
@@ -2392,7 +1908,7 @@ static void ilk_compute_wm_maximums(struct drm_device *dev, | |||
2392 | } | 1908 | } |
2393 | 1909 | ||
2394 | static bool ilk_validate_wm_level(int level, | 1910 | static bool ilk_validate_wm_level(int level, |
2395 | const struct hsw_wm_maximums *max, | 1911 | const struct ilk_wm_maximums *max, |
2396 | struct intel_wm_level *result) | 1912 | struct intel_wm_level *result) |
2397 | { | 1913 | { |
2398 | bool ret; | 1914 | bool ret; |
@@ -2434,7 +1950,7 @@ static bool ilk_validate_wm_level(int level, | |||
2434 | 1950 | ||
2435 | static void ilk_compute_wm_level(struct drm_i915_private *dev_priv, | 1951 | static void ilk_compute_wm_level(struct drm_i915_private *dev_priv, |
2436 | int level, | 1952 | int level, |
2437 | const struct hsw_pipe_wm_parameters *p, | 1953 | const struct ilk_pipe_wm_parameters *p, |
2438 | struct intel_wm_level *result) | 1954 | struct intel_wm_level *result) |
2439 | { | 1955 | { |
2440 | uint16_t pri_latency = dev_priv->wm.pri_latency[level]; | 1956 | uint16_t pri_latency = dev_priv->wm.pri_latency[level]; |
@@ -2482,7 +1998,7 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[5]) | |||
2482 | { | 1998 | { |
2483 | struct drm_i915_private *dev_priv = dev->dev_private; | 1999 | struct drm_i915_private *dev_priv = dev->dev_private; |
2484 | 2000 | ||
2485 | if (IS_HASWELL(dev)) { | 2001 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { |
2486 | uint64_t sskpd = I915_READ64(MCH_SSKPD); | 2002 | uint64_t sskpd = I915_READ64(MCH_SSKPD); |
2487 | 2003 | ||
2488 | wm[0] = (sskpd >> 56) & 0xFF; | 2004 | wm[0] = (sskpd >> 56) & 0xFF; |
@@ -2530,7 +2046,7 @@ static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5]) | |||
2530 | static int ilk_wm_max_level(const struct drm_device *dev) | 2046 | static int ilk_wm_max_level(const struct drm_device *dev) |
2531 | { | 2047 | { |
2532 | /* how many WM levels are we expecting */ | 2048 | /* how many WM levels are we expecting */ |
2533 | if (IS_HASWELL(dev)) | 2049 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) |
2534 | return 4; | 2050 | return 4; |
2535 | else if (INTEL_INFO(dev)->gen >= 6) | 2051 | else if (INTEL_INFO(dev)->gen >= 6) |
2536 | return 3; | 2052 | return 3; |
@@ -2582,8 +2098,8 @@ static void intel_setup_wm_latency(struct drm_device *dev) | |||
2582 | intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency); | 2098 | intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency); |
2583 | } | 2099 | } |
2584 | 2100 | ||
2585 | static void hsw_compute_wm_parameters(struct drm_crtc *crtc, | 2101 | static void ilk_compute_wm_parameters(struct drm_crtc *crtc, |
2586 | struct hsw_pipe_wm_parameters *p, | 2102 | struct ilk_pipe_wm_parameters *p, |
2587 | struct intel_wm_config *config) | 2103 | struct intel_wm_config *config) |
2588 | { | 2104 | { |
2589 | struct drm_device *dev = crtc->dev; | 2105 | struct drm_device *dev = crtc->dev; |
@@ -2593,7 +2109,7 @@ static void hsw_compute_wm_parameters(struct drm_crtc *crtc, | |||
2593 | 2109 | ||
2594 | p->active = intel_crtc_active(crtc); | 2110 | p->active = intel_crtc_active(crtc); |
2595 | if (p->active) { | 2111 | if (p->active) { |
2596 | p->pipe_htotal = intel_crtc->config.adjusted_mode.htotal; | 2112 | p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal; |
2597 | p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc); | 2113 | p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc); |
2598 | p->pri.bytes_per_pixel = crtc->fb->bits_per_pixel / 8; | 2114 | p->pri.bytes_per_pixel = crtc->fb->bits_per_pixel / 8; |
2599 | p->cur.bytes_per_pixel = 4; | 2115 | p->cur.bytes_per_pixel = 4; |
@@ -2620,7 +2136,7 @@ static void hsw_compute_wm_parameters(struct drm_crtc *crtc, | |||
2620 | 2136 | ||
2621 | /* Compute new watermarks for the pipe */ | 2137 | /* Compute new watermarks for the pipe */ |
2622 | static bool intel_compute_pipe_wm(struct drm_crtc *crtc, | 2138 | static bool intel_compute_pipe_wm(struct drm_crtc *crtc, |
2623 | const struct hsw_pipe_wm_parameters *params, | 2139 | const struct ilk_pipe_wm_parameters *params, |
2624 | struct intel_pipe_wm *pipe_wm) | 2140 | struct intel_pipe_wm *pipe_wm) |
2625 | { | 2141 | { |
2626 | struct drm_device *dev = crtc->dev; | 2142 | struct drm_device *dev = crtc->dev; |
@@ -2632,16 +2148,25 @@ static bool intel_compute_pipe_wm(struct drm_crtc *crtc, | |||
2632 | .sprites_enabled = params->spr.enabled, | 2148 | .sprites_enabled = params->spr.enabled, |
2633 | .sprites_scaled = params->spr.scaled, | 2149 | .sprites_scaled = params->spr.scaled, |
2634 | }; | 2150 | }; |
2635 | struct hsw_wm_maximums max; | 2151 | struct ilk_wm_maximums max; |
2636 | 2152 | ||
2637 | /* LP0 watermarks always use 1/2 DDB partitioning */ | 2153 | /* LP0 watermarks always use 1/2 DDB partitioning */ |
2638 | ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max); | 2154 | ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max); |
2639 | 2155 | ||
2156 | /* ILK/SNB: LP2+ watermarks only w/o sprites */ | ||
2157 | if (INTEL_INFO(dev)->gen <= 6 && params->spr.enabled) | ||
2158 | max_level = 1; | ||
2159 | |||
2160 | /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */ | ||
2161 | if (params->spr.scaled) | ||
2162 | max_level = 0; | ||
2163 | |||
2640 | for (level = 0; level <= max_level; level++) | 2164 | for (level = 0; level <= max_level; level++) |
2641 | ilk_compute_wm_level(dev_priv, level, params, | 2165 | ilk_compute_wm_level(dev_priv, level, params, |
2642 | &pipe_wm->wm[level]); | 2166 | &pipe_wm->wm[level]); |
2643 | 2167 | ||
2644 | pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc); | 2168 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) |
2169 | pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc); | ||
2645 | 2170 | ||
2646 | /* At least LP0 must be valid */ | 2171 | /* At least LP0 must be valid */ |
2647 | return ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]); | 2172 | return ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]); |
@@ -2676,12 +2201,19 @@ static void ilk_merge_wm_level(struct drm_device *dev, | |||
2676 | * Merge all low power watermarks for all active pipes. | 2201 | * Merge all low power watermarks for all active pipes. |
2677 | */ | 2202 | */ |
2678 | static void ilk_wm_merge(struct drm_device *dev, | 2203 | static void ilk_wm_merge(struct drm_device *dev, |
2679 | const struct hsw_wm_maximums *max, | 2204 | const struct intel_wm_config *config, |
2205 | const struct ilk_wm_maximums *max, | ||
2680 | struct intel_pipe_wm *merged) | 2206 | struct intel_pipe_wm *merged) |
2681 | { | 2207 | { |
2682 | int level, max_level = ilk_wm_max_level(dev); | 2208 | int level, max_level = ilk_wm_max_level(dev); |
2683 | 2209 | ||
2684 | merged->fbc_wm_enabled = true; | 2210 | /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */ |
2211 | if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) && | ||
2212 | config->num_pipes_active > 1) | ||
2213 | return; | ||
2214 | |||
2215 | /* ILK: FBC WM must be disabled always */ | ||
2216 | merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6; | ||
2685 | 2217 | ||
2686 | /* merge each WM1+ level */ | 2218 | /* merge each WM1+ level */ |
2687 | for (level = 1; level <= max_level; level++) { | 2219 | for (level = 1; level <= max_level; level++) { |
@@ -2701,6 +2233,20 @@ static void ilk_wm_merge(struct drm_device *dev, | |||
2701 | wm->fbc_val = 0; | 2233 | wm->fbc_val = 0; |
2702 | } | 2234 | } |
2703 | } | 2235 | } |
2236 | |||
2237 | /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */ | ||
2238 | /* | ||
2239 | * FIXME this is racy. FBC might get enabled later. | ||
2240 | * What we should check here is whether FBC can be | ||
2241 | * enabled sometime later. | ||
2242 | */ | ||
2243 | if (IS_GEN5(dev) && !merged->fbc_wm_enabled && intel_fbc_enabled(dev)) { | ||
2244 | for (level = 2; level <= max_level; level++) { | ||
2245 | struct intel_wm_level *wm = &merged->wm[level]; | ||
2246 | |||
2247 | wm->enable = false; | ||
2248 | } | ||
2249 | } | ||
2704 | } | 2250 | } |
2705 | 2251 | ||
2706 | static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm) | 2252 | static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm) |
@@ -2709,10 +2255,21 @@ static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm) | |||
2709 | return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable); | 2255 | return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable); |
2710 | } | 2256 | } |
2711 | 2257 | ||
2712 | static void hsw_compute_wm_results(struct drm_device *dev, | 2258 | /* The value we need to program into the WM_LPx latency field */ |
2259 | static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level) | ||
2260 | { | ||
2261 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2262 | |||
2263 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) | ||
2264 | return 2 * level; | ||
2265 | else | ||
2266 | return dev_priv->wm.pri_latency[level]; | ||
2267 | } | ||
2268 | |||
2269 | static void ilk_compute_wm_results(struct drm_device *dev, | ||
2713 | const struct intel_pipe_wm *merged, | 2270 | const struct intel_pipe_wm *merged, |
2714 | enum intel_ddb_partitioning partitioning, | 2271 | enum intel_ddb_partitioning partitioning, |
2715 | struct hsw_wm_values *results) | 2272 | struct ilk_wm_values *results) |
2716 | { | 2273 | { |
2717 | struct intel_crtc *intel_crtc; | 2274 | struct intel_crtc *intel_crtc; |
2718 | int level, wm_lp; | 2275 | int level, wm_lp; |
@@ -2731,7 +2288,7 @@ static void hsw_compute_wm_results(struct drm_device *dev, | |||
2731 | break; | 2288 | break; |
2732 | 2289 | ||
2733 | results->wm_lp[wm_lp - 1] = WM3_LP_EN | | 2290 | results->wm_lp[wm_lp - 1] = WM3_LP_EN | |
2734 | ((level * 2) << WM1_LP_LATENCY_SHIFT) | | 2291 | (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) | |
2735 | (r->pri_val << WM1_LP_SR_SHIFT) | | 2292 | (r->pri_val << WM1_LP_SR_SHIFT) | |
2736 | r->cur_val; | 2293 | r->cur_val; |
2737 | 2294 | ||
@@ -2742,7 +2299,11 @@ static void hsw_compute_wm_results(struct drm_device *dev, | |||
2742 | results->wm_lp[wm_lp - 1] |= | 2299 | results->wm_lp[wm_lp - 1] |= |
2743 | r->fbc_val << WM1_LP_FBC_SHIFT; | 2300 | r->fbc_val << WM1_LP_FBC_SHIFT; |
2744 | 2301 | ||
2745 | results->wm_lp_spr[wm_lp - 1] = r->spr_val; | 2302 | if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) { |
2303 | WARN_ON(wm_lp != 1); | ||
2304 | results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val; | ||
2305 | } else | ||
2306 | results->wm_lp_spr[wm_lp - 1] = r->spr_val; | ||
2746 | } | 2307 | } |
2747 | 2308 | ||
2748 | /* LP0 register values */ | 2309 | /* LP0 register values */ |
@@ -2765,7 +2326,7 @@ static void hsw_compute_wm_results(struct drm_device *dev, | |||
2765 | 2326 | ||
2766 | /* Find the result with the highest level enabled. Check for enable_fbc_wm in | 2327 | /* Find the result with the highest level enabled. Check for enable_fbc_wm in |
2767 | * case both are at the same level. Prefer r1 in case they're the same. */ | 2328 | * case both are at the same level. Prefer r1 in case they're the same. */ |
2768 | static struct intel_pipe_wm *hsw_find_best_result(struct drm_device *dev, | 2329 | static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev, |
2769 | struct intel_pipe_wm *r1, | 2330 | struct intel_pipe_wm *r1, |
2770 | struct intel_pipe_wm *r2) | 2331 | struct intel_pipe_wm *r2) |
2771 | { | 2332 | { |
@@ -2800,8 +2361,8 @@ static struct intel_pipe_wm *hsw_find_best_result(struct drm_device *dev, | |||
2800 | #define WM_DIRTY_DDB (1 << 25) | 2361 | #define WM_DIRTY_DDB (1 << 25) |
2801 | 2362 | ||
2802 | static unsigned int ilk_compute_wm_dirty(struct drm_device *dev, | 2363 | static unsigned int ilk_compute_wm_dirty(struct drm_device *dev, |
2803 | const struct hsw_wm_values *old, | 2364 | const struct ilk_wm_values *old, |
2804 | const struct hsw_wm_values *new) | 2365 | const struct ilk_wm_values *new) |
2805 | { | 2366 | { |
2806 | unsigned int dirty = 0; | 2367 | unsigned int dirty = 0; |
2807 | enum pipe pipe; | 2368 | enum pipe pipe; |
@@ -2851,27 +2412,53 @@ static unsigned int ilk_compute_wm_dirty(struct drm_device *dev, | |||
2851 | return dirty; | 2412 | return dirty; |
2852 | } | 2413 | } |
2853 | 2414 | ||
2415 | static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv, | ||
2416 | unsigned int dirty) | ||
2417 | { | ||
2418 | struct ilk_wm_values *previous = &dev_priv->wm.hw; | ||
2419 | bool changed = false; | ||
2420 | |||
2421 | if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) { | ||
2422 | previous->wm_lp[2] &= ~WM1_LP_SR_EN; | ||
2423 | I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]); | ||
2424 | changed = true; | ||
2425 | } | ||
2426 | if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) { | ||
2427 | previous->wm_lp[1] &= ~WM1_LP_SR_EN; | ||
2428 | I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]); | ||
2429 | changed = true; | ||
2430 | } | ||
2431 | if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) { | ||
2432 | previous->wm_lp[0] &= ~WM1_LP_SR_EN; | ||
2433 | I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]); | ||
2434 | changed = true; | ||
2435 | } | ||
2436 | |||
2437 | /* | ||
2438 | * Don't touch WM1S_LP_EN here. | ||
2439 | * Doing so could cause underruns. | ||
2440 | */ | ||
2441 | |||
2442 | return changed; | ||
2443 | } | ||
2444 | |||
2854 | /* | 2445 | /* |
2855 | * The spec says we shouldn't write when we don't need, because every write | 2446 | * The spec says we shouldn't write when we don't need, because every write |
2856 | * causes WMs to be re-evaluated, expending some power. | 2447 | * causes WMs to be re-evaluated, expending some power. |
2857 | */ | 2448 | */ |
2858 | static void hsw_write_wm_values(struct drm_i915_private *dev_priv, | 2449 | static void ilk_write_wm_values(struct drm_i915_private *dev_priv, |
2859 | struct hsw_wm_values *results) | 2450 | struct ilk_wm_values *results) |
2860 | { | 2451 | { |
2861 | struct hsw_wm_values *previous = &dev_priv->wm.hw; | 2452 | struct drm_device *dev = dev_priv->dev; |
2453 | struct ilk_wm_values *previous = &dev_priv->wm.hw; | ||
2862 | unsigned int dirty; | 2454 | unsigned int dirty; |
2863 | uint32_t val; | 2455 | uint32_t val; |
2864 | 2456 | ||
2865 | dirty = ilk_compute_wm_dirty(dev_priv->dev, previous, results); | 2457 | dirty = ilk_compute_wm_dirty(dev, previous, results); |
2866 | if (!dirty) | 2458 | if (!dirty) |
2867 | return; | 2459 | return; |
2868 | 2460 | ||
2869 | if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != 0) | 2461 | _ilk_disable_lp_wm(dev_priv, dirty); |
2870 | I915_WRITE(WM3_LP_ILK, 0); | ||
2871 | if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != 0) | ||
2872 | I915_WRITE(WM2_LP_ILK, 0); | ||
2873 | if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != 0) | ||
2874 | I915_WRITE(WM1_LP_ILK, 0); | ||
2875 | 2462 | ||
2876 | if (dirty & WM_DIRTY_PIPE(PIPE_A)) | 2463 | if (dirty & WM_DIRTY_PIPE(PIPE_A)) |
2877 | I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]); | 2464 | I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]); |
@@ -2888,12 +2475,21 @@ static void hsw_write_wm_values(struct drm_i915_private *dev_priv, | |||
2888 | I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]); | 2475 | I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]); |
2889 | 2476 | ||
2890 | if (dirty & WM_DIRTY_DDB) { | 2477 | if (dirty & WM_DIRTY_DDB) { |
2891 | val = I915_READ(WM_MISC); | 2478 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { |
2892 | if (results->partitioning == INTEL_DDB_PART_1_2) | 2479 | val = I915_READ(WM_MISC); |
2893 | val &= ~WM_MISC_DATA_PARTITION_5_6; | 2480 | if (results->partitioning == INTEL_DDB_PART_1_2) |
2894 | else | 2481 | val &= ~WM_MISC_DATA_PARTITION_5_6; |
2895 | val |= WM_MISC_DATA_PARTITION_5_6; | 2482 | else |
2896 | I915_WRITE(WM_MISC, val); | 2483 | val |= WM_MISC_DATA_PARTITION_5_6; |
2484 | I915_WRITE(WM_MISC, val); | ||
2485 | } else { | ||
2486 | val = I915_READ(DISP_ARB_CTL2); | ||
2487 | if (results->partitioning == INTEL_DDB_PART_1_2) | ||
2488 | val &= ~DISP_DATA_PARTITION_5_6; | ||
2489 | else | ||
2490 | val |= DISP_DATA_PARTITION_5_6; | ||
2491 | I915_WRITE(DISP_ARB_CTL2, val); | ||
2492 | } | ||
2897 | } | 2493 | } |
2898 | 2494 | ||
2899 | if (dirty & WM_DIRTY_FBC) { | 2495 | if (dirty & WM_DIRTY_FBC) { |
@@ -2905,37 +2501,48 @@ static void hsw_write_wm_values(struct drm_i915_private *dev_priv, | |||
2905 | I915_WRITE(DISP_ARB_CTL, val); | 2501 | I915_WRITE(DISP_ARB_CTL, val); |
2906 | } | 2502 | } |
2907 | 2503 | ||
2908 | if (dirty & WM_DIRTY_LP(1) && previous->wm_lp_spr[0] != results->wm_lp_spr[0]) | 2504 | if (dirty & WM_DIRTY_LP(1) && |
2505 | previous->wm_lp_spr[0] != results->wm_lp_spr[0]) | ||
2909 | I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]); | 2506 | I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]); |
2910 | if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1]) | ||
2911 | I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]); | ||
2912 | if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2]) | ||
2913 | I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]); | ||
2914 | 2507 | ||
2915 | if (dirty & WM_DIRTY_LP(1) && results->wm_lp[0] != 0) | 2508 | if (INTEL_INFO(dev)->gen >= 7) { |
2509 | if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1]) | ||
2510 | I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]); | ||
2511 | if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2]) | ||
2512 | I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]); | ||
2513 | } | ||
2514 | |||
2515 | if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0]) | ||
2916 | I915_WRITE(WM1_LP_ILK, results->wm_lp[0]); | 2516 | I915_WRITE(WM1_LP_ILK, results->wm_lp[0]); |
2917 | if (dirty & WM_DIRTY_LP(2) && results->wm_lp[1] != 0) | 2517 | if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1]) |
2918 | I915_WRITE(WM2_LP_ILK, results->wm_lp[1]); | 2518 | I915_WRITE(WM2_LP_ILK, results->wm_lp[1]); |
2919 | if (dirty & WM_DIRTY_LP(3) && results->wm_lp[2] != 0) | 2519 | if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2]) |
2920 | I915_WRITE(WM3_LP_ILK, results->wm_lp[2]); | 2520 | I915_WRITE(WM3_LP_ILK, results->wm_lp[2]); |
2921 | 2521 | ||
2922 | dev_priv->wm.hw = *results; | 2522 | dev_priv->wm.hw = *results; |
2923 | } | 2523 | } |
2924 | 2524 | ||
2925 | static void haswell_update_wm(struct drm_crtc *crtc) | 2525 | static bool ilk_disable_lp_wm(struct drm_device *dev) |
2526 | { | ||
2527 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2528 | |||
2529 | return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL); | ||
2530 | } | ||
2531 | |||
2532 | static void ilk_update_wm(struct drm_crtc *crtc) | ||
2926 | { | 2533 | { |
2927 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 2534 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2928 | struct drm_device *dev = crtc->dev; | 2535 | struct drm_device *dev = crtc->dev; |
2929 | struct drm_i915_private *dev_priv = dev->dev_private; | 2536 | struct drm_i915_private *dev_priv = dev->dev_private; |
2930 | struct hsw_wm_maximums max; | 2537 | struct ilk_wm_maximums max; |
2931 | struct hsw_pipe_wm_parameters params = {}; | 2538 | struct ilk_pipe_wm_parameters params = {}; |
2932 | struct hsw_wm_values results = {}; | 2539 | struct ilk_wm_values results = {}; |
2933 | enum intel_ddb_partitioning partitioning; | 2540 | enum intel_ddb_partitioning partitioning; |
2934 | struct intel_pipe_wm pipe_wm = {}; | 2541 | struct intel_pipe_wm pipe_wm = {}; |
2935 | struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm; | 2542 | struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm; |
2936 | struct intel_wm_config config = {}; | 2543 | struct intel_wm_config config = {}; |
2937 | 2544 | ||
2938 | hsw_compute_wm_parameters(crtc, ¶ms, &config); | 2545 | ilk_compute_wm_parameters(crtc, ¶ms, &config); |
2939 | 2546 | ||
2940 | intel_compute_pipe_wm(crtc, ¶ms, &pipe_wm); | 2547 | intel_compute_pipe_wm(crtc, ¶ms, &pipe_wm); |
2941 | 2548 | ||
@@ -2945,15 +2552,15 @@ static void haswell_update_wm(struct drm_crtc *crtc) | |||
2945 | intel_crtc->wm.active = pipe_wm; | 2552 | intel_crtc->wm.active = pipe_wm; |
2946 | 2553 | ||
2947 | ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max); | 2554 | ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max); |
2948 | ilk_wm_merge(dev, &max, &lp_wm_1_2); | 2555 | ilk_wm_merge(dev, &config, &max, &lp_wm_1_2); |
2949 | 2556 | ||
2950 | /* 5/6 split only in single pipe config on IVB+ */ | 2557 | /* 5/6 split only in single pipe config on IVB+ */ |
2951 | if (INTEL_INFO(dev)->gen >= 7 && | 2558 | if (INTEL_INFO(dev)->gen >= 7 && |
2952 | config.num_pipes_active == 1 && config.sprites_enabled) { | 2559 | config.num_pipes_active == 1 && config.sprites_enabled) { |
2953 | ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max); | 2560 | ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max); |
2954 | ilk_wm_merge(dev, &max, &lp_wm_5_6); | 2561 | ilk_wm_merge(dev, &config, &max, &lp_wm_5_6); |
2955 | 2562 | ||
2956 | best_lp_wm = hsw_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6); | 2563 | best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6); |
2957 | } else { | 2564 | } else { |
2958 | best_lp_wm = &lp_wm_1_2; | 2565 | best_lp_wm = &lp_wm_1_2; |
2959 | } | 2566 | } |
@@ -2961,16 +2568,17 @@ static void haswell_update_wm(struct drm_crtc *crtc) | |||
2961 | partitioning = (best_lp_wm == &lp_wm_1_2) ? | 2568 | partitioning = (best_lp_wm == &lp_wm_1_2) ? |
2962 | INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6; | 2569 | INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6; |
2963 | 2570 | ||
2964 | hsw_compute_wm_results(dev, best_lp_wm, partitioning, &results); | 2571 | ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results); |
2965 | 2572 | ||
2966 | hsw_write_wm_values(dev_priv, &results); | 2573 | ilk_write_wm_values(dev_priv, &results); |
2967 | } | 2574 | } |
2968 | 2575 | ||
2969 | static void haswell_update_sprite_wm(struct drm_plane *plane, | 2576 | static void ilk_update_sprite_wm(struct drm_plane *plane, |
2970 | struct drm_crtc *crtc, | 2577 | struct drm_crtc *crtc, |
2971 | uint32_t sprite_width, int pixel_size, | 2578 | uint32_t sprite_width, int pixel_size, |
2972 | bool enabled, bool scaled) | 2579 | bool enabled, bool scaled) |
2973 | { | 2580 | { |
2581 | struct drm_device *dev = plane->dev; | ||
2974 | struct intel_plane *intel_plane = to_intel_plane(plane); | 2582 | struct intel_plane *intel_plane = to_intel_plane(plane); |
2975 | 2583 | ||
2976 | intel_plane->wm.enabled = enabled; | 2584 | intel_plane->wm.enabled = enabled; |
@@ -2978,176 +2586,24 @@ static void haswell_update_sprite_wm(struct drm_plane *plane, | |||
2978 | intel_plane->wm.horiz_pixels = sprite_width; | 2586 | intel_plane->wm.horiz_pixels = sprite_width; |
2979 | intel_plane->wm.bytes_per_pixel = pixel_size; | 2587 | intel_plane->wm.bytes_per_pixel = pixel_size; |
2980 | 2588 | ||
2981 | haswell_update_wm(crtc); | 2589 | /* |
2982 | } | 2590 | * IVB workaround: must disable low power watermarks for at least |
2983 | 2591 | * one frame before enabling scaling. LP watermarks can be re-enabled | |
2984 | static bool | 2592 | * when scaling is disabled. |
2985 | sandybridge_compute_sprite_wm(struct drm_device *dev, int plane, | 2593 | * |
2986 | uint32_t sprite_width, int pixel_size, | 2594 | * WaCxSRDisabledForSpriteScaling:ivb |
2987 | const struct intel_watermark_params *display, | 2595 | */ |
2988 | int display_latency_ns, int *sprite_wm) | 2596 | if (IS_IVYBRIDGE(dev) && scaled && ilk_disable_lp_wm(dev)) |
2989 | { | 2597 | intel_wait_for_vblank(dev, intel_plane->pipe); |
2990 | struct drm_crtc *crtc; | ||
2991 | int clock; | ||
2992 | int entries, tlb_miss; | ||
2993 | |||
2994 | crtc = intel_get_crtc_for_plane(dev, plane); | ||
2995 | if (!intel_crtc_active(crtc)) { | ||
2996 | *sprite_wm = display->guard_size; | ||
2997 | return false; | ||
2998 | } | ||
2999 | |||
3000 | clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock; | ||
3001 | |||
3002 | /* Use the small buffer method to calculate the sprite watermark */ | ||
3003 | entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000; | ||
3004 | tlb_miss = display->fifo_size*display->cacheline_size - | ||
3005 | sprite_width * 8; | ||
3006 | if (tlb_miss > 0) | ||
3007 | entries += tlb_miss; | ||
3008 | entries = DIV_ROUND_UP(entries, display->cacheline_size); | ||
3009 | *sprite_wm = entries + display->guard_size; | ||
3010 | if (*sprite_wm > (int)display->max_wm) | ||
3011 | *sprite_wm = display->max_wm; | ||
3012 | |||
3013 | return true; | ||
3014 | } | ||
3015 | |||
3016 | static bool | ||
3017 | sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane, | ||
3018 | uint32_t sprite_width, int pixel_size, | ||
3019 | const struct intel_watermark_params *display, | ||
3020 | int latency_ns, int *sprite_wm) | ||
3021 | { | ||
3022 | struct drm_crtc *crtc; | ||
3023 | unsigned long line_time_us; | ||
3024 | int clock; | ||
3025 | int line_count, line_size; | ||
3026 | int small, large; | ||
3027 | int entries; | ||
3028 | |||
3029 | if (!latency_ns) { | ||
3030 | *sprite_wm = 0; | ||
3031 | return false; | ||
3032 | } | ||
3033 | |||
3034 | crtc = intel_get_crtc_for_plane(dev, plane); | ||
3035 | clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock; | ||
3036 | if (!clock) { | ||
3037 | *sprite_wm = 0; | ||
3038 | return false; | ||
3039 | } | ||
3040 | |||
3041 | line_time_us = (sprite_width * 1000) / clock; | ||
3042 | if (!line_time_us) { | ||
3043 | *sprite_wm = 0; | ||
3044 | return false; | ||
3045 | } | ||
3046 | |||
3047 | line_count = (latency_ns / line_time_us + 1000) / 1000; | ||
3048 | line_size = sprite_width * pixel_size; | ||
3049 | |||
3050 | /* Use the minimum of the small and large buffer method for primary */ | ||
3051 | small = ((clock * pixel_size / 1000) * latency_ns) / 1000; | ||
3052 | large = line_count * line_size; | ||
3053 | |||
3054 | entries = DIV_ROUND_UP(min(small, large), display->cacheline_size); | ||
3055 | *sprite_wm = entries + display->guard_size; | ||
3056 | |||
3057 | return *sprite_wm > 0x3ff ? false : true; | ||
3058 | } | ||
3059 | |||
3060 | static void sandybridge_update_sprite_wm(struct drm_plane *plane, | ||
3061 | struct drm_crtc *crtc, | ||
3062 | uint32_t sprite_width, int pixel_size, | ||
3063 | bool enabled, bool scaled) | ||
3064 | { | ||
3065 | struct drm_device *dev = plane->dev; | ||
3066 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3067 | int pipe = to_intel_plane(plane)->pipe; | ||
3068 | int latency = dev_priv->wm.spr_latency[0] * 100; /* In unit 0.1us */ | ||
3069 | u32 val; | ||
3070 | int sprite_wm, reg; | ||
3071 | int ret; | ||
3072 | |||
3073 | if (!enabled) | ||
3074 | return; | ||
3075 | |||
3076 | switch (pipe) { | ||
3077 | case 0: | ||
3078 | reg = WM0_PIPEA_ILK; | ||
3079 | break; | ||
3080 | case 1: | ||
3081 | reg = WM0_PIPEB_ILK; | ||
3082 | break; | ||
3083 | case 2: | ||
3084 | reg = WM0_PIPEC_IVB; | ||
3085 | break; | ||
3086 | default: | ||
3087 | return; /* bad pipe */ | ||
3088 | } | ||
3089 | |||
3090 | ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size, | ||
3091 | &sandybridge_display_wm_info, | ||
3092 | latency, &sprite_wm); | ||
3093 | if (!ret) { | ||
3094 | DRM_DEBUG_KMS("failed to compute sprite wm for pipe %c\n", | ||
3095 | pipe_name(pipe)); | ||
3096 | return; | ||
3097 | } | ||
3098 | |||
3099 | val = I915_READ(reg); | ||
3100 | val &= ~WM0_PIPE_SPRITE_MASK; | ||
3101 | I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT)); | ||
3102 | DRM_DEBUG_KMS("sprite watermarks For pipe %c - %d\n", pipe_name(pipe), sprite_wm); | ||
3103 | |||
3104 | |||
3105 | ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width, | ||
3106 | pixel_size, | ||
3107 | &sandybridge_display_srwm_info, | ||
3108 | dev_priv->wm.spr_latency[1] * 500, | ||
3109 | &sprite_wm); | ||
3110 | if (!ret) { | ||
3111 | DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %c\n", | ||
3112 | pipe_name(pipe)); | ||
3113 | return; | ||
3114 | } | ||
3115 | I915_WRITE(WM1S_LP_ILK, sprite_wm); | ||
3116 | |||
3117 | /* Only IVB has two more LP watermarks for sprite */ | ||
3118 | if (!IS_IVYBRIDGE(dev)) | ||
3119 | return; | ||
3120 | |||
3121 | ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width, | ||
3122 | pixel_size, | ||
3123 | &sandybridge_display_srwm_info, | ||
3124 | dev_priv->wm.spr_latency[2] * 500, | ||
3125 | &sprite_wm); | ||
3126 | if (!ret) { | ||
3127 | DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %c\n", | ||
3128 | pipe_name(pipe)); | ||
3129 | return; | ||
3130 | } | ||
3131 | I915_WRITE(WM2S_LP_IVB, sprite_wm); | ||
3132 | 2598 | ||
3133 | ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width, | 2599 | ilk_update_wm(crtc); |
3134 | pixel_size, | ||
3135 | &sandybridge_display_srwm_info, | ||
3136 | dev_priv->wm.spr_latency[3] * 500, | ||
3137 | &sprite_wm); | ||
3138 | if (!ret) { | ||
3139 | DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %c\n", | ||
3140 | pipe_name(pipe)); | ||
3141 | return; | ||
3142 | } | ||
3143 | I915_WRITE(WM3S_LP_IVB, sprite_wm); | ||
3144 | } | 2600 | } |
3145 | 2601 | ||
3146 | static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) | 2602 | static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) |
3147 | { | 2603 | { |
3148 | struct drm_device *dev = crtc->dev; | 2604 | struct drm_device *dev = crtc->dev; |
3149 | struct drm_i915_private *dev_priv = dev->dev_private; | 2605 | struct drm_i915_private *dev_priv = dev->dev_private; |
3150 | struct hsw_wm_values *hw = &dev_priv->wm.hw; | 2606 | struct ilk_wm_values *hw = &dev_priv->wm.hw; |
3151 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 2607 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3152 | struct intel_pipe_wm *active = &intel_crtc->wm.active; | 2608 | struct intel_pipe_wm *active = &intel_crtc->wm.active; |
3153 | enum pipe pipe = intel_crtc->pipe; | 2609 | enum pipe pipe = intel_crtc->pipe; |
@@ -3158,7 +2614,8 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) | |||
3158 | }; | 2614 | }; |
3159 | 2615 | ||
3160 | hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]); | 2616 | hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]); |
3161 | hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe)); | 2617 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) |
2618 | hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe)); | ||
3162 | 2619 | ||
3163 | if (intel_crtc_active(crtc)) { | 2620 | if (intel_crtc_active(crtc)) { |
3164 | u32 tmp = hw->wm_pipe[pipe]; | 2621 | u32 tmp = hw->wm_pipe[pipe]; |
@@ -3190,7 +2647,7 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) | |||
3190 | void ilk_wm_get_hw_state(struct drm_device *dev) | 2647 | void ilk_wm_get_hw_state(struct drm_device *dev) |
3191 | { | 2648 | { |
3192 | struct drm_i915_private *dev_priv = dev->dev_private; | 2649 | struct drm_i915_private *dev_priv = dev->dev_private; |
3193 | struct hsw_wm_values *hw = &dev_priv->wm.hw; | 2650 | struct ilk_wm_values *hw = &dev_priv->wm.hw; |
3194 | struct drm_crtc *crtc; | 2651 | struct drm_crtc *crtc; |
3195 | 2652 | ||
3196 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) | 2653 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) |
@@ -3204,8 +2661,12 @@ void ilk_wm_get_hw_state(struct drm_device *dev) | |||
3204 | hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB); | 2661 | hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB); |
3205 | hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB); | 2662 | hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB); |
3206 | 2663 | ||
3207 | hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ? | 2664 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) |
3208 | INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; | 2665 | hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ? |
2666 | INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; | ||
2667 | else if (IS_IVYBRIDGE(dev)) | ||
2668 | hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ? | ||
2669 | INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; | ||
3209 | 2670 | ||
3210 | hw->enable_fbc_wm = | 2671 | hw->enable_fbc_wm = |
3211 | !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS); | 2672 | !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS); |
@@ -3430,26 +2891,19 @@ static void ironlake_disable_drps(struct drm_device *dev) | |||
3430 | * ourselves, instead of doing a rmw cycle (which might result in us clearing | 2891 | * ourselves, instead of doing a rmw cycle (which might result in us clearing |
3431 | * all limits and the gpu stuck at whatever frequency it is at atm). | 2892 | * all limits and the gpu stuck at whatever frequency it is at atm). |
3432 | */ | 2893 | */ |
3433 | static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val) | 2894 | static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val) |
3434 | { | 2895 | { |
3435 | u32 limits; | 2896 | u32 limits; |
3436 | 2897 | ||
3437 | limits = 0; | ||
3438 | |||
3439 | if (*val >= dev_priv->rps.max_delay) | ||
3440 | *val = dev_priv->rps.max_delay; | ||
3441 | limits |= dev_priv->rps.max_delay << 24; | ||
3442 | |||
3443 | /* Only set the down limit when we've reached the lowest level to avoid | 2898 | /* Only set the down limit when we've reached the lowest level to avoid |
3444 | * getting more interrupts, otherwise leave this clear. This prevents a | 2899 | * getting more interrupts, otherwise leave this clear. This prevents a |
3445 | * race in the hw when coming out of rc6: There's a tiny window where | 2900 | * race in the hw when coming out of rc6: There's a tiny window where |
3446 | * the hw runs at the minimal clock before selecting the desired | 2901 | * the hw runs at the minimal clock before selecting the desired |
3447 | * frequency, if the down threshold expires in that window we will not | 2902 | * frequency, if the down threshold expires in that window we will not |
3448 | * receive a down interrupt. */ | 2903 | * receive a down interrupt. */ |
3449 | if (*val <= dev_priv->rps.min_delay) { | 2904 | limits = dev_priv->rps.max_delay << 24; |
3450 | *val = dev_priv->rps.min_delay; | 2905 | if (val <= dev_priv->rps.min_delay) |
3451 | limits |= dev_priv->rps.min_delay << 16; | 2906 | limits |= dev_priv->rps.min_delay << 16; |
3452 | } | ||
3453 | 2907 | ||
3454 | return limits; | 2908 | return limits; |
3455 | } | 2909 | } |
@@ -3549,7 +3003,6 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val) | |||
3549 | void gen6_set_rps(struct drm_device *dev, u8 val) | 3003 | void gen6_set_rps(struct drm_device *dev, u8 val) |
3550 | { | 3004 | { |
3551 | struct drm_i915_private *dev_priv = dev->dev_private; | 3005 | struct drm_i915_private *dev_priv = dev->dev_private; |
3552 | u32 limits = gen6_rps_limits(dev_priv, &val); | ||
3553 | 3006 | ||
3554 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | 3007 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); |
3555 | WARN_ON(val > dev_priv->rps.max_delay); | 3008 | WARN_ON(val > dev_priv->rps.max_delay); |
@@ -3572,7 +3025,8 @@ void gen6_set_rps(struct drm_device *dev, u8 val) | |||
3572 | /* Make sure we continue to get interrupts | 3025 | /* Make sure we continue to get interrupts |
3573 | * until we hit the minimum or maximum frequencies. | 3026 | * until we hit the minimum or maximum frequencies. |
3574 | */ | 3027 | */ |
3575 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits); | 3028 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, |
3029 | gen6_rps_limits(dev_priv, val)); | ||
3576 | 3030 | ||
3577 | POSTING_READ(GEN6_RPNSWREQ); | 3031 | POSTING_READ(GEN6_RPNSWREQ); |
3578 | 3032 | ||
@@ -3583,9 +3037,11 @@ void gen6_set_rps(struct drm_device *dev, u8 val) | |||
3583 | 3037 | ||
3584 | void gen6_rps_idle(struct drm_i915_private *dev_priv) | 3038 | void gen6_rps_idle(struct drm_i915_private *dev_priv) |
3585 | { | 3039 | { |
3040 | struct drm_device *dev = dev_priv->dev; | ||
3041 | |||
3586 | mutex_lock(&dev_priv->rps.hw_lock); | 3042 | mutex_lock(&dev_priv->rps.hw_lock); |
3587 | if (dev_priv->rps.enabled) { | 3043 | if (dev_priv->rps.enabled) { |
3588 | if (dev_priv->info->is_valleyview) | 3044 | if (IS_VALLEYVIEW(dev)) |
3589 | valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_delay); | 3045 | valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_delay); |
3590 | else | 3046 | else |
3591 | gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay); | 3047 | gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay); |
@@ -3596,9 +3052,11 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv) | |||
3596 | 3052 | ||
3597 | void gen6_rps_boost(struct drm_i915_private *dev_priv) | 3053 | void gen6_rps_boost(struct drm_i915_private *dev_priv) |
3598 | { | 3054 | { |
3055 | struct drm_device *dev = dev_priv->dev; | ||
3056 | |||
3599 | mutex_lock(&dev_priv->rps.hw_lock); | 3057 | mutex_lock(&dev_priv->rps.hw_lock); |
3600 | if (dev_priv->rps.enabled) { | 3058 | if (dev_priv->rps.enabled) { |
3601 | if (dev_priv->info->is_valleyview) | 3059 | if (IS_VALLEYVIEW(dev)) |
3602 | valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_delay); | 3060 | valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_delay); |
3603 | else | 3061 | else |
3604 | gen6_set_rps(dev_priv->dev, dev_priv->rps.max_delay); | 3062 | gen6_set_rps(dev_priv->dev, dev_priv->rps.max_delay); |
@@ -3607,48 +3065,18 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv) | |||
3607 | mutex_unlock(&dev_priv->rps.hw_lock); | 3065 | mutex_unlock(&dev_priv->rps.hw_lock); |
3608 | } | 3066 | } |
3609 | 3067 | ||
3610 | /* | ||
3611 | * Wait until the previous freq change has completed, | ||
3612 | * or the timeout elapsed, and then update our notion | ||
3613 | * of the current GPU frequency. | ||
3614 | */ | ||
3615 | static void vlv_update_rps_cur_delay(struct drm_i915_private *dev_priv) | ||
3616 | { | ||
3617 | u32 pval; | ||
3618 | |||
3619 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | ||
3620 | |||
3621 | if (wait_for(((pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS)) & GENFREQSTATUS) == 0, 10)) | ||
3622 | DRM_DEBUG_DRIVER("timed out waiting for Punit\n"); | ||
3623 | |||
3624 | pval >>= 8; | ||
3625 | |||
3626 | if (pval != dev_priv->rps.cur_delay) | ||
3627 | DRM_DEBUG_DRIVER("Punit overrode GPU freq: %d MHz (%u) requested, but got %d Mhz (%u)\n", | ||
3628 | vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.cur_delay), | ||
3629 | dev_priv->rps.cur_delay, | ||
3630 | vlv_gpu_freq(dev_priv->mem_freq, pval), pval); | ||
3631 | |||
3632 | dev_priv->rps.cur_delay = pval; | ||
3633 | } | ||
3634 | |||
3635 | void valleyview_set_rps(struct drm_device *dev, u8 val) | 3068 | void valleyview_set_rps(struct drm_device *dev, u8 val) |
3636 | { | 3069 | { |
3637 | struct drm_i915_private *dev_priv = dev->dev_private; | 3070 | struct drm_i915_private *dev_priv = dev->dev_private; |
3638 | 3071 | ||
3639 | gen6_rps_limits(dev_priv, &val); | ||
3640 | |||
3641 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | 3072 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); |
3642 | WARN_ON(val > dev_priv->rps.max_delay); | 3073 | WARN_ON(val > dev_priv->rps.max_delay); |
3643 | WARN_ON(val < dev_priv->rps.min_delay); | 3074 | WARN_ON(val < dev_priv->rps.min_delay); |
3644 | 3075 | ||
3645 | vlv_update_rps_cur_delay(dev_priv); | ||
3646 | |||
3647 | DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n", | 3076 | DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n", |
3648 | vlv_gpu_freq(dev_priv->mem_freq, | 3077 | vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay), |
3649 | dev_priv->rps.cur_delay), | ||
3650 | dev_priv->rps.cur_delay, | 3078 | dev_priv->rps.cur_delay, |
3651 | vlv_gpu_freq(dev_priv->mem_freq, val), val); | 3079 | vlv_gpu_freq(dev_priv, val), val); |
3652 | 3080 | ||
3653 | if (val == dev_priv->rps.cur_delay) | 3081 | if (val == dev_priv->rps.cur_delay) |
3654 | return; | 3082 | return; |
@@ -3657,7 +3085,7 @@ void valleyview_set_rps(struct drm_device *dev, u8 val) | |||
3657 | 3085 | ||
3658 | dev_priv->rps.cur_delay = val; | 3086 | dev_priv->rps.cur_delay = val; |
3659 | 3087 | ||
3660 | trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv->mem_freq, val)); | 3088 | trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val)); |
3661 | } | 3089 | } |
3662 | 3090 | ||
3663 | static void gen6_disable_rps_interrupts(struct drm_device *dev) | 3091 | static void gen6_disable_rps_interrupts(struct drm_device *dev) |
@@ -3775,7 +3203,7 @@ static void gen8_enable_rps(struct drm_device *dev) | |||
3775 | 3203 | ||
3776 | /* 1c & 1d: Get forcewake during program sequence. Although the driver | 3204 | /* 1c & 1d: Get forcewake during program sequence. Although the driver |
3777 | * hasn't enabled a state yet where we need forcewake, BIOS may have.*/ | 3205 | * hasn't enabled a state yet where we need forcewake, BIOS may have.*/ |
3778 | gen6_gt_force_wake_get(dev_priv); | 3206 | gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); |
3779 | 3207 | ||
3780 | /* 2a: Disable RC states. */ | 3208 | /* 2a: Disable RC states. */ |
3781 | I915_WRITE(GEN6_RC_CONTROL, 0); | 3209 | I915_WRITE(GEN6_RC_CONTROL, 0); |
@@ -3832,7 +3260,7 @@ static void gen8_enable_rps(struct drm_device *dev) | |||
3832 | 3260 | ||
3833 | gen6_enable_rps_interrupts(dev); | 3261 | gen6_enable_rps_interrupts(dev); |
3834 | 3262 | ||
3835 | gen6_gt_force_wake_put(dev_priv); | 3263 | gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); |
3836 | } | 3264 | } |
3837 | 3265 | ||
3838 | static void gen6_enable_rps(struct drm_device *dev) | 3266 | static void gen6_enable_rps(struct drm_device *dev) |
@@ -3862,7 +3290,7 @@ static void gen6_enable_rps(struct drm_device *dev) | |||
3862 | I915_WRITE(GTFIFODBG, gtfifodbg); | 3290 | I915_WRITE(GTFIFODBG, gtfifodbg); |
3863 | } | 3291 | } |
3864 | 3292 | ||
3865 | gen6_gt_force_wake_get(dev_priv); | 3293 | gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); |
3866 | 3294 | ||
3867 | rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); | 3295 | rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); |
3868 | gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); | 3296 | gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); |
@@ -3954,7 +3382,7 @@ static void gen6_enable_rps(struct drm_device *dev) | |||
3954 | DRM_ERROR("Couldn't fix incorrect rc6 voltage\n"); | 3382 | DRM_ERROR("Couldn't fix incorrect rc6 voltage\n"); |
3955 | } | 3383 | } |
3956 | 3384 | ||
3957 | gen6_gt_force_wake_put(dev_priv); | 3385 | gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); |
3958 | } | 3386 | } |
3959 | 3387 | ||
3960 | void gen6_update_ring_freq(struct drm_device *dev) | 3388 | void gen6_update_ring_freq(struct drm_device *dev) |
@@ -4116,7 +3544,8 @@ static void valleyview_enable_rps(struct drm_device *dev) | |||
4116 | 3544 | ||
4117 | valleyview_setup_pctx(dev); | 3545 | valleyview_setup_pctx(dev); |
4118 | 3546 | ||
4119 | gen6_gt_force_wake_get(dev_priv); | 3547 | /* If VLV, Forcewake all wells, else re-direct to regular path */ |
3548 | gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); | ||
4120 | 3549 | ||
4121 | I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400); | 3550 | I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400); |
4122 | I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000); | 3551 | I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000); |
@@ -4140,7 +3569,7 @@ static void valleyview_enable_rps(struct drm_device *dev) | |||
4140 | for_each_ring(ring, dev_priv, i) | 3569 | for_each_ring(ring, dev_priv, i) |
4141 | I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); | 3570 | I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); |
4142 | 3571 | ||
4143 | I915_WRITE(GEN6_RC6_THRESHOLD, 0xc350); | 3572 | I915_WRITE(GEN6_RC6_THRESHOLD, 0x557); |
4144 | 3573 | ||
4145 | /* allows RC6 residency counter to work */ | 3574 | /* allows RC6 residency counter to work */ |
4146 | I915_WRITE(VLV_COUNTER_CONTROL, | 3575 | I915_WRITE(VLV_COUNTER_CONTROL, |
@@ -4148,65 +3577,47 @@ static void valleyview_enable_rps(struct drm_device *dev) | |||
4148 | VLV_MEDIA_RC6_COUNT_EN | | 3577 | VLV_MEDIA_RC6_COUNT_EN | |
4149 | VLV_RENDER_RC6_COUNT_EN)); | 3578 | VLV_RENDER_RC6_COUNT_EN)); |
4150 | if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) | 3579 | if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) |
4151 | rc6_mode = GEN7_RC_CTL_TO_MODE; | 3580 | rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL; |
4152 | 3581 | ||
4153 | intel_print_rc6_info(dev, rc6_mode); | 3582 | intel_print_rc6_info(dev, rc6_mode); |
4154 | 3583 | ||
4155 | I915_WRITE(GEN6_RC_CONTROL, rc6_mode); | 3584 | I915_WRITE(GEN6_RC_CONTROL, rc6_mode); |
4156 | 3585 | ||
4157 | val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); | 3586 | val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); |
4158 | switch ((val >> 6) & 3) { | ||
4159 | case 0: | ||
4160 | case 1: | ||
4161 | dev_priv->mem_freq = 800; | ||
4162 | break; | ||
4163 | case 2: | ||
4164 | dev_priv->mem_freq = 1066; | ||
4165 | break; | ||
4166 | case 3: | ||
4167 | dev_priv->mem_freq = 1333; | ||
4168 | break; | ||
4169 | } | ||
4170 | DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq); | ||
4171 | 3587 | ||
4172 | DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no"); | 3588 | DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no"); |
4173 | DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); | 3589 | DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); |
4174 | 3590 | ||
4175 | dev_priv->rps.cur_delay = (val >> 8) & 0xff; | 3591 | dev_priv->rps.cur_delay = (val >> 8) & 0xff; |
4176 | DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n", | 3592 | DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n", |
4177 | vlv_gpu_freq(dev_priv->mem_freq, | 3593 | vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay), |
4178 | dev_priv->rps.cur_delay), | ||
4179 | dev_priv->rps.cur_delay); | 3594 | dev_priv->rps.cur_delay); |
4180 | 3595 | ||
4181 | dev_priv->rps.max_delay = valleyview_rps_max_freq(dev_priv); | 3596 | dev_priv->rps.max_delay = valleyview_rps_max_freq(dev_priv); |
4182 | dev_priv->rps.hw_max = dev_priv->rps.max_delay; | 3597 | dev_priv->rps.hw_max = dev_priv->rps.max_delay; |
4183 | DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", | 3598 | DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", |
4184 | vlv_gpu_freq(dev_priv->mem_freq, | 3599 | vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay), |
4185 | dev_priv->rps.max_delay), | ||
4186 | dev_priv->rps.max_delay); | 3600 | dev_priv->rps.max_delay); |
4187 | 3601 | ||
4188 | dev_priv->rps.rpe_delay = valleyview_rps_rpe_freq(dev_priv); | 3602 | dev_priv->rps.rpe_delay = valleyview_rps_rpe_freq(dev_priv); |
4189 | DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n", | 3603 | DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n", |
4190 | vlv_gpu_freq(dev_priv->mem_freq, | 3604 | vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay), |
4191 | dev_priv->rps.rpe_delay), | ||
4192 | dev_priv->rps.rpe_delay); | 3605 | dev_priv->rps.rpe_delay); |
4193 | 3606 | ||
4194 | dev_priv->rps.min_delay = valleyview_rps_min_freq(dev_priv); | 3607 | dev_priv->rps.min_delay = valleyview_rps_min_freq(dev_priv); |
4195 | DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", | 3608 | DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", |
4196 | vlv_gpu_freq(dev_priv->mem_freq, | 3609 | vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay), |
4197 | dev_priv->rps.min_delay), | ||
4198 | dev_priv->rps.min_delay); | 3610 | dev_priv->rps.min_delay); |
4199 | 3611 | ||
4200 | DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n", | 3612 | DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n", |
4201 | vlv_gpu_freq(dev_priv->mem_freq, | 3613 | vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay), |
4202 | dev_priv->rps.rpe_delay), | ||
4203 | dev_priv->rps.rpe_delay); | 3614 | dev_priv->rps.rpe_delay); |
4204 | 3615 | ||
4205 | valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay); | 3616 | valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay); |
4206 | 3617 | ||
4207 | gen6_enable_rps_interrupts(dev); | 3618 | gen6_enable_rps_interrupts(dev); |
4208 | 3619 | ||
4209 | gen6_gt_force_wake_put(dev_priv); | 3620 | gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); |
4210 | } | 3621 | } |
4211 | 3622 | ||
4212 | void ironlake_teardown_rc6(struct drm_device *dev) | 3623 | void ironlake_teardown_rc6(struct drm_device *dev) |
@@ -5019,6 +4430,20 @@ static void g4x_disable_trickle_feed(struct drm_device *dev) | |||
5019 | } | 4430 | } |
5020 | } | 4431 | } |
5021 | 4432 | ||
4433 | static void ilk_init_lp_watermarks(struct drm_device *dev) | ||
4434 | { | ||
4435 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
4436 | |||
4437 | I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN); | ||
4438 | I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN); | ||
4439 | I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN); | ||
4440 | |||
4441 | /* | ||
4442 | * Don't touch WM1S_LP_EN here. | ||
4443 | * Doing so could cause underruns. | ||
4444 | */ | ||
4445 | } | ||
4446 | |||
5022 | static void ironlake_init_clock_gating(struct drm_device *dev) | 4447 | static void ironlake_init_clock_gating(struct drm_device *dev) |
5023 | { | 4448 | { |
5024 | struct drm_i915_private *dev_priv = dev->dev_private; | 4449 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -5052,9 +4477,8 @@ static void ironlake_init_clock_gating(struct drm_device *dev) | |||
5052 | I915_WRITE(DISP_ARB_CTL, | 4477 | I915_WRITE(DISP_ARB_CTL, |
5053 | (I915_READ(DISP_ARB_CTL) | | 4478 | (I915_READ(DISP_ARB_CTL) | |
5054 | DISP_FBC_WM_DIS)); | 4479 | DISP_FBC_WM_DIS)); |
5055 | I915_WRITE(WM3_LP_ILK, 0); | 4480 | |
5056 | I915_WRITE(WM2_LP_ILK, 0); | 4481 | ilk_init_lp_watermarks(dev); |
5057 | I915_WRITE(WM1_LP_ILK, 0); | ||
5058 | 4482 | ||
5059 | /* | 4483 | /* |
5060 | * Based on the document from hardware guys the following bits | 4484 | * Based on the document from hardware guys the following bits |
@@ -5161,9 +4585,7 @@ static void gen6_init_clock_gating(struct drm_device *dev) | |||
5161 | I915_WRITE(GEN6_GT_MODE, | 4585 | I915_WRITE(GEN6_GT_MODE, |
5162 | _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE)); | 4586 | _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE)); |
5163 | 4587 | ||
5164 | I915_WRITE(WM3_LP_ILK, 0); | 4588 | ilk_init_lp_watermarks(dev); |
5165 | I915_WRITE(WM2_LP_ILK, 0); | ||
5166 | I915_WRITE(WM1_LP_ILK, 0); | ||
5167 | 4589 | ||
5168 | I915_WRITE(CACHE_MODE_0, | 4590 | I915_WRITE(CACHE_MODE_0, |
5169 | _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); | 4591 | _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); |
@@ -5304,28 +4726,40 @@ static void gen8_init_clock_gating(struct drm_device *dev) | |||
5304 | I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, | 4726 | I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, |
5305 | _MASKED_BIT_ENABLE(GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE)); | 4727 | _MASKED_BIT_ENABLE(GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE)); |
5306 | 4728 | ||
5307 | /* WaSwitchSolVfFArbitrationPriority */ | 4729 | /* WaSwitchSolVfFArbitrationPriority:bdw */ |
5308 | I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); | 4730 | I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); |
5309 | 4731 | ||
5310 | /* WaPsrDPAMaskVBlankInSRD */ | 4732 | /* WaPsrDPAMaskVBlankInSRD:bdw */ |
5311 | I915_WRITE(CHICKEN_PAR1_1, | 4733 | I915_WRITE(CHICKEN_PAR1_1, |
5312 | I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD); | 4734 | I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD); |
5313 | 4735 | ||
5314 | /* WaPsrDPRSUnmaskVBlankInSRD */ | 4736 | /* WaPsrDPRSUnmaskVBlankInSRD:bdw */ |
5315 | for_each_pipe(i) { | 4737 | for_each_pipe(i) { |
5316 | I915_WRITE(CHICKEN_PIPESL_1(i), | 4738 | I915_WRITE(CHICKEN_PIPESL_1(i), |
5317 | I915_READ(CHICKEN_PIPESL_1(i) | | 4739 | I915_READ(CHICKEN_PIPESL_1(i) | |
5318 | DPRS_MASK_VBLANK_SRD)); | 4740 | DPRS_MASK_VBLANK_SRD)); |
5319 | } | 4741 | } |
4742 | |||
4743 | /* Use Force Non-Coherent whenever executing a 3D context. This is a | ||
4744 | * workaround for for a possible hang in the unlikely event a TLB | ||
4745 | * invalidation occurs during a PSD flush. | ||
4746 | */ | ||
4747 | I915_WRITE(HDC_CHICKEN0, | ||
4748 | I915_READ(HDC_CHICKEN0) | | ||
4749 | _MASKED_BIT_ENABLE(HDC_FORCE_NON_COHERENT)); | ||
4750 | |||
4751 | /* WaVSRefCountFullforceMissDisable:bdw */ | ||
4752 | /* WaDSRefCountFullforceMissDisable:bdw */ | ||
4753 | I915_WRITE(GEN7_FF_THREAD_MODE, | ||
4754 | I915_READ(GEN7_FF_THREAD_MODE) & | ||
4755 | ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME)); | ||
5320 | } | 4756 | } |
5321 | 4757 | ||
5322 | static void haswell_init_clock_gating(struct drm_device *dev) | 4758 | static void haswell_init_clock_gating(struct drm_device *dev) |
5323 | { | 4759 | { |
5324 | struct drm_i915_private *dev_priv = dev->dev_private; | 4760 | struct drm_i915_private *dev_priv = dev->dev_private; |
5325 | 4761 | ||
5326 | I915_WRITE(WM3_LP_ILK, 0); | 4762 | ilk_init_lp_watermarks(dev); |
5327 | I915_WRITE(WM2_LP_ILK, 0); | ||
5328 | I915_WRITE(WM1_LP_ILK, 0); | ||
5329 | 4763 | ||
5330 | /* According to the spec, bit 13 (RCZUNIT) must be set on IVB. | 4764 | /* According to the spec, bit 13 (RCZUNIT) must be set on IVB. |
5331 | * This implements the WaDisableRCZUnitClockGating:hsw workaround. | 4765 | * This implements the WaDisableRCZUnitClockGating:hsw workaround. |
@@ -5374,9 +4808,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev) | |||
5374 | struct drm_i915_private *dev_priv = dev->dev_private; | 4808 | struct drm_i915_private *dev_priv = dev->dev_private; |
5375 | uint32_t snpcr; | 4809 | uint32_t snpcr; |
5376 | 4810 | ||
5377 | I915_WRITE(WM3_LP_ILK, 0); | 4811 | ilk_init_lp_watermarks(dev); |
5378 | I915_WRITE(WM2_LP_ILK, 0); | ||
5379 | I915_WRITE(WM1_LP_ILK, 0); | ||
5380 | 4812 | ||
5381 | I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE); | 4813 | I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE); |
5382 | 4814 | ||
@@ -5463,6 +4895,26 @@ static void ivybridge_init_clock_gating(struct drm_device *dev) | |||
5463 | static void valleyview_init_clock_gating(struct drm_device *dev) | 4895 | static void valleyview_init_clock_gating(struct drm_device *dev) |
5464 | { | 4896 | { |
5465 | struct drm_i915_private *dev_priv = dev->dev_private; | 4897 | struct drm_i915_private *dev_priv = dev->dev_private; |
4898 | u32 val; | ||
4899 | |||
4900 | mutex_lock(&dev_priv->rps.hw_lock); | ||
4901 | val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); | ||
4902 | mutex_unlock(&dev_priv->rps.hw_lock); | ||
4903 | switch ((val >> 6) & 3) { | ||
4904 | case 0: | ||
4905 | dev_priv->mem_freq = 800; | ||
4906 | break; | ||
4907 | case 1: | ||
4908 | dev_priv->mem_freq = 1066; | ||
4909 | break; | ||
4910 | case 2: | ||
4911 | dev_priv->mem_freq = 1333; | ||
4912 | break; | ||
4913 | case 3: | ||
4914 | dev_priv->mem_freq = 1333; | ||
4915 | break; | ||
4916 | } | ||
4917 | DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq); | ||
5466 | 4918 | ||
5467 | I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE); | 4919 | I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE); |
5468 | 4920 | ||
@@ -5642,50 +5094,133 @@ void intel_suspend_hw(struct drm_device *dev) | |||
5642 | lpt_suspend_hw(dev); | 5094 | lpt_suspend_hw(dev); |
5643 | } | 5095 | } |
5644 | 5096 | ||
5645 | static bool is_always_on_power_domain(struct drm_device *dev, | 5097 | #define for_each_power_well(i, power_well, domain_mask, power_domains) \ |
5646 | enum intel_display_power_domain domain) | 5098 | for (i = 0; \ |
5647 | { | 5099 | i < (power_domains)->power_well_count && \ |
5648 | unsigned long always_on_domains; | 5100 | ((power_well) = &(power_domains)->power_wells[i]); \ |
5101 | i++) \ | ||
5102 | if ((power_well)->domains & (domain_mask)) | ||
5649 | 5103 | ||
5650 | BUG_ON(BIT(domain) & ~POWER_DOMAIN_MASK); | 5104 | #define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \ |
5651 | 5105 | for (i = (power_domains)->power_well_count - 1; \ | |
5652 | if (IS_BROADWELL(dev)) { | 5106 | i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\ |
5653 | always_on_domains = BDW_ALWAYS_ON_POWER_DOMAINS; | 5107 | i--) \ |
5654 | } else if (IS_HASWELL(dev)) { | 5108 | if ((power_well)->domains & (domain_mask)) |
5655 | always_on_domains = HSW_ALWAYS_ON_POWER_DOMAINS; | ||
5656 | } else { | ||
5657 | WARN_ON(1); | ||
5658 | return true; | ||
5659 | } | ||
5660 | |||
5661 | return BIT(domain) & always_on_domains; | ||
5662 | } | ||
5663 | 5109 | ||
5664 | /** | 5110 | /** |
5665 | * We should only use the power well if we explicitly asked the hardware to | 5111 | * We should only use the power well if we explicitly asked the hardware to |
5666 | * enable it, so check if it's enabled and also check if we've requested it to | 5112 | * enable it, so check if it's enabled and also check if we've requested it to |
5667 | * be enabled. | 5113 | * be enabled. |
5668 | */ | 5114 | */ |
5115 | static bool hsw_power_well_enabled(struct drm_device *dev, | ||
5116 | struct i915_power_well *power_well) | ||
5117 | { | ||
5118 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
5119 | |||
5120 | return I915_READ(HSW_PWR_WELL_DRIVER) == | ||
5121 | (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED); | ||
5122 | } | ||
5123 | |||
5124 | bool intel_display_power_enabled_sw(struct drm_device *dev, | ||
5125 | enum intel_display_power_domain domain) | ||
5126 | { | ||
5127 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
5128 | struct i915_power_domains *power_domains; | ||
5129 | |||
5130 | power_domains = &dev_priv->power_domains; | ||
5131 | |||
5132 | return power_domains->domain_use_count[domain]; | ||
5133 | } | ||
5134 | |||
5669 | bool intel_display_power_enabled(struct drm_device *dev, | 5135 | bool intel_display_power_enabled(struct drm_device *dev, |
5670 | enum intel_display_power_domain domain) | 5136 | enum intel_display_power_domain domain) |
5671 | { | 5137 | { |
5672 | struct drm_i915_private *dev_priv = dev->dev_private; | 5138 | struct drm_i915_private *dev_priv = dev->dev_private; |
5139 | struct i915_power_domains *power_domains; | ||
5140 | struct i915_power_well *power_well; | ||
5141 | bool is_enabled; | ||
5142 | int i; | ||
5673 | 5143 | ||
5674 | if (!HAS_POWER_WELL(dev)) | 5144 | power_domains = &dev_priv->power_domains; |
5675 | return true; | ||
5676 | 5145 | ||
5677 | if (is_always_on_power_domain(dev, domain)) | 5146 | is_enabled = true; |
5678 | return true; | ||
5679 | 5147 | ||
5680 | return I915_READ(HSW_PWR_WELL_DRIVER) == | 5148 | mutex_lock(&power_domains->lock); |
5681 | (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED); | 5149 | for_each_power_well_rev(i, power_well, BIT(domain), power_domains) { |
5150 | if (power_well->always_on) | ||
5151 | continue; | ||
5152 | |||
5153 | if (!power_well->is_enabled(dev, power_well)) { | ||
5154 | is_enabled = false; | ||
5155 | break; | ||
5156 | } | ||
5157 | } | ||
5158 | mutex_unlock(&power_domains->lock); | ||
5159 | |||
5160 | return is_enabled; | ||
5161 | } | ||
5162 | |||
5163 | static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv) | ||
5164 | { | ||
5165 | struct drm_device *dev = dev_priv->dev; | ||
5166 | unsigned long irqflags; | ||
5167 | |||
5168 | /* | ||
5169 | * After we re-enable the power well, if we touch VGA register 0x3d5 | ||
5170 | * we'll get unclaimed register interrupts. This stops after we write | ||
5171 | * anything to the VGA MSR register. The vgacon module uses this | ||
5172 | * register all the time, so if we unbind our driver and, as a | ||
5173 | * consequence, bind vgacon, we'll get stuck in an infinite loop at | ||
5174 | * console_unlock(). So make here we touch the VGA MSR register, making | ||
5175 | * sure vgacon can keep working normally without triggering interrupts | ||
5176 | * and error messages. | ||
5177 | */ | ||
5178 | vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); | ||
5179 | outb(inb(VGA_MSR_READ), VGA_MSR_WRITE); | ||
5180 | vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); | ||
5181 | |||
5182 | if (IS_BROADWELL(dev)) { | ||
5183 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | ||
5184 | I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B), | ||
5185 | dev_priv->de_irq_mask[PIPE_B]); | ||
5186 | I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B), | ||
5187 | ~dev_priv->de_irq_mask[PIPE_B] | | ||
5188 | GEN8_PIPE_VBLANK); | ||
5189 | I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C), | ||
5190 | dev_priv->de_irq_mask[PIPE_C]); | ||
5191 | I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C), | ||
5192 | ~dev_priv->de_irq_mask[PIPE_C] | | ||
5193 | GEN8_PIPE_VBLANK); | ||
5194 | POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C)); | ||
5195 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | ||
5196 | } | ||
5682 | } | 5197 | } |
5683 | 5198 | ||
5684 | static void __intel_set_power_well(struct drm_device *dev, bool enable) | 5199 | static void hsw_power_well_post_disable(struct drm_i915_private *dev_priv) |
5200 | { | ||
5201 | struct drm_device *dev = dev_priv->dev; | ||
5202 | enum pipe p; | ||
5203 | unsigned long irqflags; | ||
5204 | |||
5205 | /* | ||
5206 | * After this, the registers on the pipes that are part of the power | ||
5207 | * well will become zero, so we have to adjust our counters according to | ||
5208 | * that. | ||
5209 | * | ||
5210 | * FIXME: Should we do this in general in drm_vblank_post_modeset? | ||
5211 | */ | ||
5212 | spin_lock_irqsave(&dev->vbl_lock, irqflags); | ||
5213 | for_each_pipe(p) | ||
5214 | if (p != PIPE_A) | ||
5215 | dev->vblank[p].last = 0; | ||
5216 | spin_unlock_irqrestore(&dev->vbl_lock, irqflags); | ||
5217 | } | ||
5218 | |||
5219 | static void hsw_set_power_well(struct drm_device *dev, | ||
5220 | struct i915_power_well *power_well, bool enable) | ||
5685 | { | 5221 | { |
5686 | struct drm_i915_private *dev_priv = dev->dev_private; | 5222 | struct drm_i915_private *dev_priv = dev->dev_private; |
5687 | bool is_enabled, enable_requested; | 5223 | bool is_enabled, enable_requested; |
5688 | unsigned long irqflags; | ||
5689 | uint32_t tmp; | 5224 | uint32_t tmp; |
5690 | 5225 | ||
5691 | WARN_ON(dev_priv->pc8.enabled); | 5226 | WARN_ON(dev_priv->pc8.enabled); |
@@ -5706,42 +5241,14 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable) | |||
5706 | DRM_ERROR("Timeout enabling power well\n"); | 5241 | DRM_ERROR("Timeout enabling power well\n"); |
5707 | } | 5242 | } |
5708 | 5243 | ||
5709 | if (IS_BROADWELL(dev)) { | 5244 | hsw_power_well_post_enable(dev_priv); |
5710 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | ||
5711 | I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B), | ||
5712 | dev_priv->de_irq_mask[PIPE_B]); | ||
5713 | I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B), | ||
5714 | ~dev_priv->de_irq_mask[PIPE_B] | | ||
5715 | GEN8_PIPE_VBLANK); | ||
5716 | I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C), | ||
5717 | dev_priv->de_irq_mask[PIPE_C]); | ||
5718 | I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C), | ||
5719 | ~dev_priv->de_irq_mask[PIPE_C] | | ||
5720 | GEN8_PIPE_VBLANK); | ||
5721 | POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C)); | ||
5722 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | ||
5723 | } | ||
5724 | } else { | 5245 | } else { |
5725 | if (enable_requested) { | 5246 | if (enable_requested) { |
5726 | enum pipe p; | ||
5727 | |||
5728 | I915_WRITE(HSW_PWR_WELL_DRIVER, 0); | 5247 | I915_WRITE(HSW_PWR_WELL_DRIVER, 0); |
5729 | POSTING_READ(HSW_PWR_WELL_DRIVER); | 5248 | POSTING_READ(HSW_PWR_WELL_DRIVER); |
5730 | DRM_DEBUG_KMS("Requesting to disable the power well\n"); | 5249 | DRM_DEBUG_KMS("Requesting to disable the power well\n"); |
5731 | 5250 | ||
5732 | /* | 5251 | hsw_power_well_post_disable(dev_priv); |
5733 | * After this, the registers on the pipes that are part | ||
5734 | * of the power well will become zero, so we have to | ||
5735 | * adjust our counters according to that. | ||
5736 | * | ||
5737 | * FIXME: Should we do this in general in | ||
5738 | * drm_vblank_post_modeset? | ||
5739 | */ | ||
5740 | spin_lock_irqsave(&dev->vbl_lock, irqflags); | ||
5741 | for_each_pipe(p) | ||
5742 | if (p != PIPE_A) | ||
5743 | dev->vblank[p].last = 0; | ||
5744 | spin_unlock_irqrestore(&dev->vbl_lock, irqflags); | ||
5745 | } | 5252 | } |
5746 | } | 5253 | } |
5747 | } | 5254 | } |
@@ -5751,9 +5258,9 @@ static void __intel_power_well_get(struct drm_device *dev, | |||
5751 | { | 5258 | { |
5752 | struct drm_i915_private *dev_priv = dev->dev_private; | 5259 | struct drm_i915_private *dev_priv = dev->dev_private; |
5753 | 5260 | ||
5754 | if (!power_well->count++) { | 5261 | if (!power_well->count++ && power_well->set) { |
5755 | hsw_disable_package_c8(dev_priv); | 5262 | hsw_disable_package_c8(dev_priv); |
5756 | __intel_set_power_well(dev, true); | 5263 | power_well->set(dev, power_well, true); |
5757 | } | 5264 | } |
5758 | } | 5265 | } |
5759 | 5266 | ||
@@ -5763,8 +5270,10 @@ static void __intel_power_well_put(struct drm_device *dev, | |||
5763 | struct drm_i915_private *dev_priv = dev->dev_private; | 5270 | struct drm_i915_private *dev_priv = dev->dev_private; |
5764 | 5271 | ||
5765 | WARN_ON(!power_well->count); | 5272 | WARN_ON(!power_well->count); |
5766 | if (!--power_well->count && i915_disable_power_well) { | 5273 | |
5767 | __intel_set_power_well(dev, false); | 5274 | if (!--power_well->count && power_well->set && |
5275 | i915_disable_power_well) { | ||
5276 | power_well->set(dev, power_well, false); | ||
5768 | hsw_enable_package_c8(dev_priv); | 5277 | hsw_enable_package_c8(dev_priv); |
5769 | } | 5278 | } |
5770 | } | 5279 | } |
@@ -5774,17 +5283,18 @@ void intel_display_power_get(struct drm_device *dev, | |||
5774 | { | 5283 | { |
5775 | struct drm_i915_private *dev_priv = dev->dev_private; | 5284 | struct drm_i915_private *dev_priv = dev->dev_private; |
5776 | struct i915_power_domains *power_domains; | 5285 | struct i915_power_domains *power_domains; |
5777 | 5286 | struct i915_power_well *power_well; | |
5778 | if (!HAS_POWER_WELL(dev)) | 5287 | int i; |
5779 | return; | ||
5780 | |||
5781 | if (is_always_on_power_domain(dev, domain)) | ||
5782 | return; | ||
5783 | 5288 | ||
5784 | power_domains = &dev_priv->power_domains; | 5289 | power_domains = &dev_priv->power_domains; |
5785 | 5290 | ||
5786 | mutex_lock(&power_domains->lock); | 5291 | mutex_lock(&power_domains->lock); |
5787 | __intel_power_well_get(dev, &power_domains->power_wells[0]); | 5292 | |
5293 | for_each_power_well(i, power_well, BIT(domain), power_domains) | ||
5294 | __intel_power_well_get(dev, power_well); | ||
5295 | |||
5296 | power_domains->domain_use_count[domain]++; | ||
5297 | |||
5788 | mutex_unlock(&power_domains->lock); | 5298 | mutex_unlock(&power_domains->lock); |
5789 | } | 5299 | } |
5790 | 5300 | ||
@@ -5793,17 +5303,19 @@ void intel_display_power_put(struct drm_device *dev, | |||
5793 | { | 5303 | { |
5794 | struct drm_i915_private *dev_priv = dev->dev_private; | 5304 | struct drm_i915_private *dev_priv = dev->dev_private; |
5795 | struct i915_power_domains *power_domains; | 5305 | struct i915_power_domains *power_domains; |
5796 | 5306 | struct i915_power_well *power_well; | |
5797 | if (!HAS_POWER_WELL(dev)) | 5307 | int i; |
5798 | return; | ||
5799 | |||
5800 | if (is_always_on_power_domain(dev, domain)) | ||
5801 | return; | ||
5802 | 5308 | ||
5803 | power_domains = &dev_priv->power_domains; | 5309 | power_domains = &dev_priv->power_domains; |
5804 | 5310 | ||
5805 | mutex_lock(&power_domains->lock); | 5311 | mutex_lock(&power_domains->lock); |
5806 | __intel_power_well_put(dev, &power_domains->power_wells[0]); | 5312 | |
5313 | WARN_ON(!power_domains->domain_use_count[domain]); | ||
5314 | power_domains->domain_use_count[domain]--; | ||
5315 | |||
5316 | for_each_power_well_rev(i, power_well, BIT(domain), power_domains) | ||
5317 | __intel_power_well_put(dev, power_well); | ||
5318 | |||
5807 | mutex_unlock(&power_domains->lock); | 5319 | mutex_unlock(&power_domains->lock); |
5808 | } | 5320 | } |
5809 | 5321 | ||
@@ -5819,10 +5331,7 @@ void i915_request_power_well(void) | |||
5819 | 5331 | ||
5820 | dev_priv = container_of(hsw_pwr, struct drm_i915_private, | 5332 | dev_priv = container_of(hsw_pwr, struct drm_i915_private, |
5821 | power_domains); | 5333 | power_domains); |
5822 | 5334 | intel_display_power_get(dev_priv->dev, POWER_DOMAIN_AUDIO); | |
5823 | mutex_lock(&hsw_pwr->lock); | ||
5824 | __intel_power_well_get(dev_priv->dev, &hsw_pwr->power_wells[0]); | ||
5825 | mutex_unlock(&hsw_pwr->lock); | ||
5826 | } | 5335 | } |
5827 | EXPORT_SYMBOL_GPL(i915_request_power_well); | 5336 | EXPORT_SYMBOL_GPL(i915_request_power_well); |
5828 | 5337 | ||
@@ -5836,24 +5345,71 @@ void i915_release_power_well(void) | |||
5836 | 5345 | ||
5837 | dev_priv = container_of(hsw_pwr, struct drm_i915_private, | 5346 | dev_priv = container_of(hsw_pwr, struct drm_i915_private, |
5838 | power_domains); | 5347 | power_domains); |
5839 | 5348 | intel_display_power_put(dev_priv->dev, POWER_DOMAIN_AUDIO); | |
5840 | mutex_lock(&hsw_pwr->lock); | ||
5841 | __intel_power_well_put(dev_priv->dev, &hsw_pwr->power_wells[0]); | ||
5842 | mutex_unlock(&hsw_pwr->lock); | ||
5843 | } | 5349 | } |
5844 | EXPORT_SYMBOL_GPL(i915_release_power_well); | 5350 | EXPORT_SYMBOL_GPL(i915_release_power_well); |
5845 | 5351 | ||
5352 | static struct i915_power_well i9xx_always_on_power_well[] = { | ||
5353 | { | ||
5354 | .name = "always-on", | ||
5355 | .always_on = 1, | ||
5356 | .domains = POWER_DOMAIN_MASK, | ||
5357 | }, | ||
5358 | }; | ||
5359 | |||
5360 | static struct i915_power_well hsw_power_wells[] = { | ||
5361 | { | ||
5362 | .name = "always-on", | ||
5363 | .always_on = 1, | ||
5364 | .domains = HSW_ALWAYS_ON_POWER_DOMAINS, | ||
5365 | }, | ||
5366 | { | ||
5367 | .name = "display", | ||
5368 | .domains = POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS, | ||
5369 | .is_enabled = hsw_power_well_enabled, | ||
5370 | .set = hsw_set_power_well, | ||
5371 | }, | ||
5372 | }; | ||
5373 | |||
5374 | static struct i915_power_well bdw_power_wells[] = { | ||
5375 | { | ||
5376 | .name = "always-on", | ||
5377 | .always_on = 1, | ||
5378 | .domains = BDW_ALWAYS_ON_POWER_DOMAINS, | ||
5379 | }, | ||
5380 | { | ||
5381 | .name = "display", | ||
5382 | .domains = POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS, | ||
5383 | .is_enabled = hsw_power_well_enabled, | ||
5384 | .set = hsw_set_power_well, | ||
5385 | }, | ||
5386 | }; | ||
5387 | |||
5388 | #define set_power_wells(power_domains, __power_wells) ({ \ | ||
5389 | (power_domains)->power_wells = (__power_wells); \ | ||
5390 | (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \ | ||
5391 | }) | ||
5392 | |||
5846 | int intel_power_domains_init(struct drm_device *dev) | 5393 | int intel_power_domains_init(struct drm_device *dev) |
5847 | { | 5394 | { |
5848 | struct drm_i915_private *dev_priv = dev->dev_private; | 5395 | struct drm_i915_private *dev_priv = dev->dev_private; |
5849 | struct i915_power_domains *power_domains = &dev_priv->power_domains; | 5396 | struct i915_power_domains *power_domains = &dev_priv->power_domains; |
5850 | struct i915_power_well *power_well; | ||
5851 | 5397 | ||
5852 | mutex_init(&power_domains->lock); | 5398 | mutex_init(&power_domains->lock); |
5853 | hsw_pwr = power_domains; | ||
5854 | 5399 | ||
5855 | power_well = &power_domains->power_wells[0]; | 5400 | /* |
5856 | power_well->count = 0; | 5401 | * The enabling order will be from lower to higher indexed wells, |
5402 | * the disabling order is reversed. | ||
5403 | */ | ||
5404 | if (IS_HASWELL(dev)) { | ||
5405 | set_power_wells(power_domains, hsw_power_wells); | ||
5406 | hsw_pwr = power_domains; | ||
5407 | } else if (IS_BROADWELL(dev)) { | ||
5408 | set_power_wells(power_domains, bdw_power_wells); | ||
5409 | hsw_pwr = power_domains; | ||
5410 | } else { | ||
5411 | set_power_wells(power_domains, i9xx_always_on_power_well); | ||
5412 | } | ||
5857 | 5413 | ||
5858 | return 0; | 5414 | return 0; |
5859 | } | 5415 | } |
@@ -5868,15 +5424,13 @@ static void intel_power_domains_resume(struct drm_device *dev) | |||
5868 | struct drm_i915_private *dev_priv = dev->dev_private; | 5424 | struct drm_i915_private *dev_priv = dev->dev_private; |
5869 | struct i915_power_domains *power_domains = &dev_priv->power_domains; | 5425 | struct i915_power_domains *power_domains = &dev_priv->power_domains; |
5870 | struct i915_power_well *power_well; | 5426 | struct i915_power_well *power_well; |
5871 | 5427 | int i; | |
5872 | if (!HAS_POWER_WELL(dev)) | ||
5873 | return; | ||
5874 | 5428 | ||
5875 | mutex_lock(&power_domains->lock); | 5429 | mutex_lock(&power_domains->lock); |
5876 | 5430 | for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) { | |
5877 | power_well = &power_domains->power_wells[0]; | 5431 | if (power_well->set) |
5878 | __intel_set_power_well(dev, power_well->count > 0); | 5432 | power_well->set(dev, power_well, power_well->count > 0); |
5879 | 5433 | } | |
5880 | mutex_unlock(&power_domains->lock); | 5434 | mutex_unlock(&power_domains->lock); |
5881 | } | 5435 | } |
5882 | 5436 | ||
@@ -5890,13 +5444,13 @@ void intel_power_domains_init_hw(struct drm_device *dev) | |||
5890 | { | 5444 | { |
5891 | struct drm_i915_private *dev_priv = dev->dev_private; | 5445 | struct drm_i915_private *dev_priv = dev->dev_private; |
5892 | 5446 | ||
5893 | if (!HAS_POWER_WELL(dev)) | ||
5894 | return; | ||
5895 | |||
5896 | /* For now, we need the power well to be always enabled. */ | 5447 | /* For now, we need the power well to be always enabled. */ |
5897 | intel_display_set_init_power(dev, true); | 5448 | intel_display_set_init_power(dev, true); |
5898 | intel_power_domains_resume(dev); | 5449 | intel_power_domains_resume(dev); |
5899 | 5450 | ||
5451 | if (!(IS_HASWELL(dev) || IS_BROADWELL(dev))) | ||
5452 | return; | ||
5453 | |||
5900 | /* We're taking over the BIOS, so clear any requests made by it since | 5454 | /* We're taking over the BIOS, so clear any requests made by it since |
5901 | * the driver is in charge now. */ | 5455 | * the driver is in charge now. */ |
5902 | if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST) | 5456 | if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST) |
@@ -5914,31 +5468,86 @@ void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv) | |||
5914 | hsw_enable_package_c8(dev_priv); | 5468 | hsw_enable_package_c8(dev_priv); |
5915 | } | 5469 | } |
5916 | 5470 | ||
5471 | void intel_runtime_pm_get(struct drm_i915_private *dev_priv) | ||
5472 | { | ||
5473 | struct drm_device *dev = dev_priv->dev; | ||
5474 | struct device *device = &dev->pdev->dev; | ||
5475 | |||
5476 | if (!HAS_RUNTIME_PM(dev)) | ||
5477 | return; | ||
5478 | |||
5479 | pm_runtime_get_sync(device); | ||
5480 | WARN(dev_priv->pm.suspended, "Device still suspended.\n"); | ||
5481 | } | ||
5482 | |||
5483 | void intel_runtime_pm_put(struct drm_i915_private *dev_priv) | ||
5484 | { | ||
5485 | struct drm_device *dev = dev_priv->dev; | ||
5486 | struct device *device = &dev->pdev->dev; | ||
5487 | |||
5488 | if (!HAS_RUNTIME_PM(dev)) | ||
5489 | return; | ||
5490 | |||
5491 | pm_runtime_mark_last_busy(device); | ||
5492 | pm_runtime_put_autosuspend(device); | ||
5493 | } | ||
5494 | |||
5495 | void intel_init_runtime_pm(struct drm_i915_private *dev_priv) | ||
5496 | { | ||
5497 | struct drm_device *dev = dev_priv->dev; | ||
5498 | struct device *device = &dev->pdev->dev; | ||
5499 | |||
5500 | dev_priv->pm.suspended = false; | ||
5501 | |||
5502 | if (!HAS_RUNTIME_PM(dev)) | ||
5503 | return; | ||
5504 | |||
5505 | pm_runtime_set_active(device); | ||
5506 | |||
5507 | pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */ | ||
5508 | pm_runtime_mark_last_busy(device); | ||
5509 | pm_runtime_use_autosuspend(device); | ||
5510 | } | ||
5511 | |||
5512 | void intel_fini_runtime_pm(struct drm_i915_private *dev_priv) | ||
5513 | { | ||
5514 | struct drm_device *dev = dev_priv->dev; | ||
5515 | struct device *device = &dev->pdev->dev; | ||
5516 | |||
5517 | if (!HAS_RUNTIME_PM(dev)) | ||
5518 | return; | ||
5519 | |||
5520 | /* Make sure we're not suspended first. */ | ||
5521 | pm_runtime_get_sync(device); | ||
5522 | pm_runtime_disable(device); | ||
5523 | } | ||
5524 | |||
5917 | /* Set up chip specific power management-related functions */ | 5525 | /* Set up chip specific power management-related functions */ |
5918 | void intel_init_pm(struct drm_device *dev) | 5526 | void intel_init_pm(struct drm_device *dev) |
5919 | { | 5527 | { |
5920 | struct drm_i915_private *dev_priv = dev->dev_private; | 5528 | struct drm_i915_private *dev_priv = dev->dev_private; |
5921 | 5529 | ||
5922 | if (I915_HAS_FBC(dev)) { | 5530 | if (HAS_FBC(dev)) { |
5923 | if (HAS_PCH_SPLIT(dev)) { | 5531 | if (INTEL_INFO(dev)->gen >= 7) { |
5924 | dev_priv->display.fbc_enabled = ironlake_fbc_enabled; | 5532 | dev_priv->display.fbc_enabled = ironlake_fbc_enabled; |
5925 | if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) | 5533 | dev_priv->display.enable_fbc = gen7_enable_fbc; |
5926 | dev_priv->display.enable_fbc = | 5534 | dev_priv->display.disable_fbc = ironlake_disable_fbc; |
5927 | gen7_enable_fbc; | 5535 | } else if (INTEL_INFO(dev)->gen >= 5) { |
5928 | else | 5536 | dev_priv->display.fbc_enabled = ironlake_fbc_enabled; |
5929 | dev_priv->display.enable_fbc = | 5537 | dev_priv->display.enable_fbc = ironlake_enable_fbc; |
5930 | ironlake_enable_fbc; | ||
5931 | dev_priv->display.disable_fbc = ironlake_disable_fbc; | 5538 | dev_priv->display.disable_fbc = ironlake_disable_fbc; |
5932 | } else if (IS_GM45(dev)) { | 5539 | } else if (IS_GM45(dev)) { |
5933 | dev_priv->display.fbc_enabled = g4x_fbc_enabled; | 5540 | dev_priv->display.fbc_enabled = g4x_fbc_enabled; |
5934 | dev_priv->display.enable_fbc = g4x_enable_fbc; | 5541 | dev_priv->display.enable_fbc = g4x_enable_fbc; |
5935 | dev_priv->display.disable_fbc = g4x_disable_fbc; | 5542 | dev_priv->display.disable_fbc = g4x_disable_fbc; |
5936 | } else if (IS_CRESTLINE(dev)) { | 5543 | } else { |
5937 | dev_priv->display.fbc_enabled = i8xx_fbc_enabled; | 5544 | dev_priv->display.fbc_enabled = i8xx_fbc_enabled; |
5938 | dev_priv->display.enable_fbc = i8xx_enable_fbc; | 5545 | dev_priv->display.enable_fbc = i8xx_enable_fbc; |
5939 | dev_priv->display.disable_fbc = i8xx_disable_fbc; | 5546 | dev_priv->display.disable_fbc = i8xx_disable_fbc; |
5547 | |||
5548 | /* This value was pulled out of someone's hat */ | ||
5549 | I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT); | ||
5940 | } | 5550 | } |
5941 | /* 855GM needs testing */ | ||
5942 | } | 5551 | } |
5943 | 5552 | ||
5944 | /* For cxsr */ | 5553 | /* For cxsr */ |
@@ -5951,58 +5560,27 @@ void intel_init_pm(struct drm_device *dev) | |||
5951 | if (HAS_PCH_SPLIT(dev)) { | 5560 | if (HAS_PCH_SPLIT(dev)) { |
5952 | intel_setup_wm_latency(dev); | 5561 | intel_setup_wm_latency(dev); |
5953 | 5562 | ||
5954 | if (IS_GEN5(dev)) { | 5563 | if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] && |
5955 | if (dev_priv->wm.pri_latency[1] && | 5564 | dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) || |
5956 | dev_priv->wm.spr_latency[1] && | 5565 | (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] && |
5957 | dev_priv->wm.cur_latency[1]) | 5566 | dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) { |
5958 | dev_priv->display.update_wm = ironlake_update_wm; | 5567 | dev_priv->display.update_wm = ilk_update_wm; |
5959 | else { | 5568 | dev_priv->display.update_sprite_wm = ilk_update_sprite_wm; |
5960 | DRM_DEBUG_KMS("Failed to get proper latency. " | 5569 | } else { |
5961 | "Disable CxSR\n"); | 5570 | DRM_DEBUG_KMS("Failed to read display plane latency. " |
5962 | dev_priv->display.update_wm = NULL; | 5571 | "Disable CxSR\n"); |
5963 | } | 5572 | } |
5573 | |||
5574 | if (IS_GEN5(dev)) | ||
5964 | dev_priv->display.init_clock_gating = ironlake_init_clock_gating; | 5575 | dev_priv->display.init_clock_gating = ironlake_init_clock_gating; |
5965 | } else if (IS_GEN6(dev)) { | 5576 | else if (IS_GEN6(dev)) |
5966 | if (dev_priv->wm.pri_latency[0] && | ||
5967 | dev_priv->wm.spr_latency[0] && | ||
5968 | dev_priv->wm.cur_latency[0]) { | ||
5969 | dev_priv->display.update_wm = sandybridge_update_wm; | ||
5970 | dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm; | ||
5971 | } else { | ||
5972 | DRM_DEBUG_KMS("Failed to read display plane latency. " | ||
5973 | "Disable CxSR\n"); | ||
5974 | dev_priv->display.update_wm = NULL; | ||
5975 | } | ||
5976 | dev_priv->display.init_clock_gating = gen6_init_clock_gating; | 5577 | dev_priv->display.init_clock_gating = gen6_init_clock_gating; |
5977 | } else if (IS_IVYBRIDGE(dev)) { | 5578 | else if (IS_IVYBRIDGE(dev)) |
5978 | if (dev_priv->wm.pri_latency[0] && | ||
5979 | dev_priv->wm.spr_latency[0] && | ||
5980 | dev_priv->wm.cur_latency[0]) { | ||
5981 | dev_priv->display.update_wm = ivybridge_update_wm; | ||
5982 | dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm; | ||
5983 | } else { | ||
5984 | DRM_DEBUG_KMS("Failed to read display plane latency. " | ||
5985 | "Disable CxSR\n"); | ||
5986 | dev_priv->display.update_wm = NULL; | ||
5987 | } | ||
5988 | dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; | 5579 | dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; |
5989 | } else if (IS_HASWELL(dev)) { | 5580 | else if (IS_HASWELL(dev)) |
5990 | if (dev_priv->wm.pri_latency[0] && | ||
5991 | dev_priv->wm.spr_latency[0] && | ||
5992 | dev_priv->wm.cur_latency[0]) { | ||
5993 | dev_priv->display.update_wm = haswell_update_wm; | ||
5994 | dev_priv->display.update_sprite_wm = | ||
5995 | haswell_update_sprite_wm; | ||
5996 | } else { | ||
5997 | DRM_DEBUG_KMS("Failed to read display plane latency. " | ||
5998 | "Disable CxSR\n"); | ||
5999 | dev_priv->display.update_wm = NULL; | ||
6000 | } | ||
6001 | dev_priv->display.init_clock_gating = haswell_init_clock_gating; | 5581 | dev_priv->display.init_clock_gating = haswell_init_clock_gating; |
6002 | } else if (INTEL_INFO(dev)->gen == 8) { | 5582 | else if (INTEL_INFO(dev)->gen == 8) |
6003 | dev_priv->display.init_clock_gating = gen8_init_clock_gating; | 5583 | dev_priv->display.init_clock_gating = gen8_init_clock_gating; |
6004 | } else | ||
6005 | dev_priv->display.update_wm = NULL; | ||
6006 | } else if (IS_VALLEYVIEW(dev)) { | 5584 | } else if (IS_VALLEYVIEW(dev)) { |
6007 | dev_priv->display.update_wm = valleyview_update_wm; | 5585 | dev_priv->display.update_wm = valleyview_update_wm; |
6008 | dev_priv->display.init_clock_gating = | 5586 | dev_priv->display.init_clock_gating = |
@@ -6036,21 +5614,21 @@ void intel_init_pm(struct drm_device *dev) | |||
6036 | dev_priv->display.update_wm = i9xx_update_wm; | 5614 | dev_priv->display.update_wm = i9xx_update_wm; |
6037 | dev_priv->display.get_fifo_size = i9xx_get_fifo_size; | 5615 | dev_priv->display.get_fifo_size = i9xx_get_fifo_size; |
6038 | dev_priv->display.init_clock_gating = gen3_init_clock_gating; | 5616 | dev_priv->display.init_clock_gating = gen3_init_clock_gating; |
6039 | } else if (IS_I865G(dev)) { | 5617 | } else if (IS_GEN2(dev)) { |
6040 | dev_priv->display.update_wm = i830_update_wm; | 5618 | if (INTEL_INFO(dev)->num_pipes == 1) { |
6041 | dev_priv->display.init_clock_gating = i85x_init_clock_gating; | 5619 | dev_priv->display.update_wm = i845_update_wm; |
6042 | dev_priv->display.get_fifo_size = i830_get_fifo_size; | ||
6043 | } else if (IS_I85X(dev)) { | ||
6044 | dev_priv->display.update_wm = i9xx_update_wm; | ||
6045 | dev_priv->display.get_fifo_size = i85x_get_fifo_size; | ||
6046 | dev_priv->display.init_clock_gating = i85x_init_clock_gating; | ||
6047 | } else { | ||
6048 | dev_priv->display.update_wm = i830_update_wm; | ||
6049 | dev_priv->display.init_clock_gating = i830_init_clock_gating; | ||
6050 | if (IS_845G(dev)) | ||
6051 | dev_priv->display.get_fifo_size = i845_get_fifo_size; | 5620 | dev_priv->display.get_fifo_size = i845_get_fifo_size; |
6052 | else | 5621 | } else { |
5622 | dev_priv->display.update_wm = i9xx_update_wm; | ||
6053 | dev_priv->display.get_fifo_size = i830_get_fifo_size; | 5623 | dev_priv->display.get_fifo_size = i830_get_fifo_size; |
5624 | } | ||
5625 | |||
5626 | if (IS_I85X(dev) || IS_I865G(dev)) | ||
5627 | dev_priv->display.init_clock_gating = i85x_init_clock_gating; | ||
5628 | else | ||
5629 | dev_priv->display.init_clock_gating = i830_init_clock_gating; | ||
5630 | } else { | ||
5631 | DRM_ERROR("unexpected fall-through in intel_init_pm\n"); | ||
6054 | } | 5632 | } |
6055 | } | 5633 | } |
6056 | 5634 | ||
@@ -6101,59 +5679,48 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val) | |||
6101 | return 0; | 5679 | return 0; |
6102 | } | 5680 | } |
6103 | 5681 | ||
6104 | int vlv_gpu_freq(int ddr_freq, int val) | 5682 | int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val) |
6105 | { | 5683 | { |
6106 | int mult, base; | 5684 | int div; |
6107 | 5685 | ||
6108 | switch (ddr_freq) { | 5686 | /* 4 x czclk */ |
5687 | switch (dev_priv->mem_freq) { | ||
6109 | case 800: | 5688 | case 800: |
6110 | mult = 20; | 5689 | div = 10; |
6111 | base = 120; | ||
6112 | break; | 5690 | break; |
6113 | case 1066: | 5691 | case 1066: |
6114 | mult = 22; | 5692 | div = 12; |
6115 | base = 133; | ||
6116 | break; | 5693 | break; |
6117 | case 1333: | 5694 | case 1333: |
6118 | mult = 21; | 5695 | div = 16; |
6119 | base = 125; | ||
6120 | break; | 5696 | break; |
6121 | default: | 5697 | default: |
6122 | return -1; | 5698 | return -1; |
6123 | } | 5699 | } |
6124 | 5700 | ||
6125 | return ((val - 0xbd) * mult) + base; | 5701 | return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div); |
6126 | } | 5702 | } |
6127 | 5703 | ||
6128 | int vlv_freq_opcode(int ddr_freq, int val) | 5704 | int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val) |
6129 | { | 5705 | { |
6130 | int mult, base; | 5706 | int mul; |
6131 | 5707 | ||
6132 | switch (ddr_freq) { | 5708 | /* 4 x czclk */ |
5709 | switch (dev_priv->mem_freq) { | ||
6133 | case 800: | 5710 | case 800: |
6134 | mult = 20; | 5711 | mul = 10; |
6135 | base = 120; | ||
6136 | break; | 5712 | break; |
6137 | case 1066: | 5713 | case 1066: |
6138 | mult = 22; | 5714 | mul = 12; |
6139 | base = 133; | ||
6140 | break; | 5715 | break; |
6141 | case 1333: | 5716 | case 1333: |
6142 | mult = 21; | 5717 | mul = 16; |
6143 | base = 125; | ||
6144 | break; | 5718 | break; |
6145 | default: | 5719 | default: |
6146 | return -1; | 5720 | return -1; |
6147 | } | 5721 | } |
6148 | 5722 | ||
6149 | val /= mult; | 5723 | return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6; |
6150 | val -= base / mult; | ||
6151 | val += 0xbd; | ||
6152 | |||
6153 | if (val > 0xea) | ||
6154 | val = 0xea; | ||
6155 | |||
6156 | return val; | ||
6157 | } | 5724 | } |
6158 | 5725 | ||
6159 | void intel_pm_setup(struct drm_device *dev) | 5726 | void intel_pm_setup(struct drm_device *dev) |