diff options
Diffstat (limited to 'drivers/gpu/drm/i915/intel_display.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 1222 |
1 files changed, 930 insertions, 292 deletions
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index fca523288aca..98967f3b7724 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -642,26 +642,23 @@ static const intel_limit_t intel_limits_ironlake_display_port = { | |||
642 | .find_pll = intel_find_pll_ironlake_dp, | 642 | .find_pll = intel_find_pll_ironlake_dp, |
643 | }; | 643 | }; |
644 | 644 | ||
645 | static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc) | 645 | static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, |
646 | int refclk) | ||
646 | { | 647 | { |
647 | struct drm_device *dev = crtc->dev; | 648 | struct drm_device *dev = crtc->dev; |
648 | struct drm_i915_private *dev_priv = dev->dev_private; | 649 | struct drm_i915_private *dev_priv = dev->dev_private; |
649 | const intel_limit_t *limit; | 650 | const intel_limit_t *limit; |
650 | int refclk = 120; | ||
651 | 651 | ||
652 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | 652 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
653 | if (dev_priv->lvds_use_ssc && dev_priv->lvds_ssc_freq == 100) | ||
654 | refclk = 100; | ||
655 | |||
656 | if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == | 653 | if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == |
657 | LVDS_CLKB_POWER_UP) { | 654 | LVDS_CLKB_POWER_UP) { |
658 | /* LVDS dual channel */ | 655 | /* LVDS dual channel */ |
659 | if (refclk == 100) | 656 | if (refclk == 100000) |
660 | limit = &intel_limits_ironlake_dual_lvds_100m; | 657 | limit = &intel_limits_ironlake_dual_lvds_100m; |
661 | else | 658 | else |
662 | limit = &intel_limits_ironlake_dual_lvds; | 659 | limit = &intel_limits_ironlake_dual_lvds; |
663 | } else { | 660 | } else { |
664 | if (refclk == 100) | 661 | if (refclk == 100000) |
665 | limit = &intel_limits_ironlake_single_lvds_100m; | 662 | limit = &intel_limits_ironlake_single_lvds_100m; |
666 | else | 663 | else |
667 | limit = &intel_limits_ironlake_single_lvds; | 664 | limit = &intel_limits_ironlake_single_lvds; |
@@ -702,13 +699,13 @@ static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) | |||
702 | return limit; | 699 | return limit; |
703 | } | 700 | } |
704 | 701 | ||
705 | static const intel_limit_t *intel_limit(struct drm_crtc *crtc) | 702 | static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk) |
706 | { | 703 | { |
707 | struct drm_device *dev = crtc->dev; | 704 | struct drm_device *dev = crtc->dev; |
708 | const intel_limit_t *limit; | 705 | const intel_limit_t *limit; |
709 | 706 | ||
710 | if (HAS_PCH_SPLIT(dev)) | 707 | if (HAS_PCH_SPLIT(dev)) |
711 | limit = intel_ironlake_limit(crtc); | 708 | limit = intel_ironlake_limit(crtc, refclk); |
712 | else if (IS_G4X(dev)) { | 709 | else if (IS_G4X(dev)) { |
713 | limit = intel_g4x_limit(crtc); | 710 | limit = intel_g4x_limit(crtc); |
714 | } else if (IS_PINEVIEW(dev)) { | 711 | } else if (IS_PINEVIEW(dev)) { |
@@ -773,11 +770,10 @@ bool intel_pipe_has_type(struct drm_crtc *crtc, int type) | |||
773 | * the given connectors. | 770 | * the given connectors. |
774 | */ | 771 | */ |
775 | 772 | ||
776 | static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock) | 773 | static bool intel_PLL_is_valid(struct drm_device *dev, |
774 | const intel_limit_t *limit, | ||
775 | const intel_clock_t *clock) | ||
777 | { | 776 | { |
778 | const intel_limit_t *limit = intel_limit (crtc); | ||
779 | struct drm_device *dev = crtc->dev; | ||
780 | |||
781 | if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) | 777 | if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) |
782 | INTELPllInvalid ("p1 out of range\n"); | 778 | INTELPllInvalid ("p1 out of range\n"); |
783 | if (clock->p < limit->p.min || limit->p.max < clock->p) | 779 | if (clock->p < limit->p.min || limit->p.max < clock->p) |
@@ -849,8 +845,8 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
849 | int this_err; | 845 | int this_err; |
850 | 846 | ||
851 | intel_clock(dev, refclk, &clock); | 847 | intel_clock(dev, refclk, &clock); |
852 | 848 | if (!intel_PLL_is_valid(dev, limit, | |
853 | if (!intel_PLL_is_valid(crtc, &clock)) | 849 | &clock)) |
854 | continue; | 850 | continue; |
855 | 851 | ||
856 | this_err = abs(clock.dot - target); | 852 | this_err = abs(clock.dot - target); |
@@ -912,9 +908,11 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
912 | int this_err; | 908 | int this_err; |
913 | 909 | ||
914 | intel_clock(dev, refclk, &clock); | 910 | intel_clock(dev, refclk, &clock); |
915 | if (!intel_PLL_is_valid(crtc, &clock)) | 911 | if (!intel_PLL_is_valid(dev, limit, |
912 | &clock)) | ||
916 | continue; | 913 | continue; |
917 | this_err = abs(clock.dot - target) ; | 914 | |
915 | this_err = abs(clock.dot - target); | ||
918 | if (this_err < err_most) { | 916 | if (this_err < err_most) { |
919 | *best_clock = clock; | 917 | *best_clock = clock; |
920 | err_most = this_err; | 918 | err_most = this_err; |
@@ -1066,13 +1064,13 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1066 | struct drm_i915_private *dev_priv = dev->dev_private; | 1064 | struct drm_i915_private *dev_priv = dev->dev_private; |
1067 | struct drm_framebuffer *fb = crtc->fb; | 1065 | struct drm_framebuffer *fb = crtc->fb; |
1068 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | 1066 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); |
1069 | struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); | 1067 | struct drm_i915_gem_object *obj = intel_fb->obj; |
1070 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1068 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1071 | int plane, i; | 1069 | int plane, i; |
1072 | u32 fbc_ctl, fbc_ctl2; | 1070 | u32 fbc_ctl, fbc_ctl2; |
1073 | 1071 | ||
1074 | if (fb->pitch == dev_priv->cfb_pitch && | 1072 | if (fb->pitch == dev_priv->cfb_pitch && |
1075 | obj_priv->fence_reg == dev_priv->cfb_fence && | 1073 | obj->fence_reg == dev_priv->cfb_fence && |
1076 | intel_crtc->plane == dev_priv->cfb_plane && | 1074 | intel_crtc->plane == dev_priv->cfb_plane && |
1077 | I915_READ(FBC_CONTROL) & FBC_CTL_EN) | 1075 | I915_READ(FBC_CONTROL) & FBC_CTL_EN) |
1078 | return; | 1076 | return; |
@@ -1086,7 +1084,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1086 | 1084 | ||
1087 | /* FBC_CTL wants 64B units */ | 1085 | /* FBC_CTL wants 64B units */ |
1088 | dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; | 1086 | dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; |
1089 | dev_priv->cfb_fence = obj_priv->fence_reg; | 1087 | dev_priv->cfb_fence = obj->fence_reg; |
1090 | dev_priv->cfb_plane = intel_crtc->plane; | 1088 | dev_priv->cfb_plane = intel_crtc->plane; |
1091 | plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB; | 1089 | plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB; |
1092 | 1090 | ||
@@ -1096,7 +1094,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1096 | 1094 | ||
1097 | /* Set it up... */ | 1095 | /* Set it up... */ |
1098 | fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane; | 1096 | fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane; |
1099 | if (obj_priv->tiling_mode != I915_TILING_NONE) | 1097 | if (obj->tiling_mode != I915_TILING_NONE) |
1100 | fbc_ctl2 |= FBC_CTL_CPU_FENCE; | 1098 | fbc_ctl2 |= FBC_CTL_CPU_FENCE; |
1101 | I915_WRITE(FBC_CONTROL2, fbc_ctl2); | 1099 | I915_WRITE(FBC_CONTROL2, fbc_ctl2); |
1102 | I915_WRITE(FBC_FENCE_OFF, crtc->y); | 1100 | I915_WRITE(FBC_FENCE_OFF, crtc->y); |
@@ -1107,7 +1105,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1107 | fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ | 1105 | fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ |
1108 | fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; | 1106 | fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; |
1109 | fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; | 1107 | fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; |
1110 | if (obj_priv->tiling_mode != I915_TILING_NONE) | 1108 | if (obj->tiling_mode != I915_TILING_NONE) |
1111 | fbc_ctl |= dev_priv->cfb_fence; | 1109 | fbc_ctl |= dev_priv->cfb_fence; |
1112 | I915_WRITE(FBC_CONTROL, fbc_ctl); | 1110 | I915_WRITE(FBC_CONTROL, fbc_ctl); |
1113 | 1111 | ||
@@ -1150,7 +1148,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1150 | struct drm_i915_private *dev_priv = dev->dev_private; | 1148 | struct drm_i915_private *dev_priv = dev->dev_private; |
1151 | struct drm_framebuffer *fb = crtc->fb; | 1149 | struct drm_framebuffer *fb = crtc->fb; |
1152 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | 1150 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); |
1153 | struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); | 1151 | struct drm_i915_gem_object *obj = intel_fb->obj; |
1154 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1152 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1155 | int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; | 1153 | int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; |
1156 | unsigned long stall_watermark = 200; | 1154 | unsigned long stall_watermark = 200; |
@@ -1159,7 +1157,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1159 | dpfc_ctl = I915_READ(DPFC_CONTROL); | 1157 | dpfc_ctl = I915_READ(DPFC_CONTROL); |
1160 | if (dpfc_ctl & DPFC_CTL_EN) { | 1158 | if (dpfc_ctl & DPFC_CTL_EN) { |
1161 | if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 && | 1159 | if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 && |
1162 | dev_priv->cfb_fence == obj_priv->fence_reg && | 1160 | dev_priv->cfb_fence == obj->fence_reg && |
1163 | dev_priv->cfb_plane == intel_crtc->plane && | 1161 | dev_priv->cfb_plane == intel_crtc->plane && |
1164 | dev_priv->cfb_y == crtc->y) | 1162 | dev_priv->cfb_y == crtc->y) |
1165 | return; | 1163 | return; |
@@ -1170,12 +1168,12 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1170 | } | 1168 | } |
1171 | 1169 | ||
1172 | dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; | 1170 | dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; |
1173 | dev_priv->cfb_fence = obj_priv->fence_reg; | 1171 | dev_priv->cfb_fence = obj->fence_reg; |
1174 | dev_priv->cfb_plane = intel_crtc->plane; | 1172 | dev_priv->cfb_plane = intel_crtc->plane; |
1175 | dev_priv->cfb_y = crtc->y; | 1173 | dev_priv->cfb_y = crtc->y; |
1176 | 1174 | ||
1177 | dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; | 1175 | dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; |
1178 | if (obj_priv->tiling_mode != I915_TILING_NONE) { | 1176 | if (obj->tiling_mode != I915_TILING_NONE) { |
1179 | dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence; | 1177 | dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence; |
1180 | I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY); | 1178 | I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY); |
1181 | } else { | 1179 | } else { |
@@ -1221,7 +1219,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1221 | struct drm_i915_private *dev_priv = dev->dev_private; | 1219 | struct drm_i915_private *dev_priv = dev->dev_private; |
1222 | struct drm_framebuffer *fb = crtc->fb; | 1220 | struct drm_framebuffer *fb = crtc->fb; |
1223 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | 1221 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); |
1224 | struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); | 1222 | struct drm_i915_gem_object *obj = intel_fb->obj; |
1225 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1223 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1226 | int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; | 1224 | int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; |
1227 | unsigned long stall_watermark = 200; | 1225 | unsigned long stall_watermark = 200; |
@@ -1230,9 +1228,9 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1230 | dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); | 1228 | dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); |
1231 | if (dpfc_ctl & DPFC_CTL_EN) { | 1229 | if (dpfc_ctl & DPFC_CTL_EN) { |
1232 | if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 && | 1230 | if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 && |
1233 | dev_priv->cfb_fence == obj_priv->fence_reg && | 1231 | dev_priv->cfb_fence == obj->fence_reg && |
1234 | dev_priv->cfb_plane == intel_crtc->plane && | 1232 | dev_priv->cfb_plane == intel_crtc->plane && |
1235 | dev_priv->cfb_offset == obj_priv->gtt_offset && | 1233 | dev_priv->cfb_offset == obj->gtt_offset && |
1236 | dev_priv->cfb_y == crtc->y) | 1234 | dev_priv->cfb_y == crtc->y) |
1237 | return; | 1235 | return; |
1238 | 1236 | ||
@@ -1242,14 +1240,14 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1242 | } | 1240 | } |
1243 | 1241 | ||
1244 | dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; | 1242 | dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; |
1245 | dev_priv->cfb_fence = obj_priv->fence_reg; | 1243 | dev_priv->cfb_fence = obj->fence_reg; |
1246 | dev_priv->cfb_plane = intel_crtc->plane; | 1244 | dev_priv->cfb_plane = intel_crtc->plane; |
1247 | dev_priv->cfb_offset = obj_priv->gtt_offset; | 1245 | dev_priv->cfb_offset = obj->gtt_offset; |
1248 | dev_priv->cfb_y = crtc->y; | 1246 | dev_priv->cfb_y = crtc->y; |
1249 | 1247 | ||
1250 | dpfc_ctl &= DPFC_RESERVED; | 1248 | dpfc_ctl &= DPFC_RESERVED; |
1251 | dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X); | 1249 | dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X); |
1252 | if (obj_priv->tiling_mode != I915_TILING_NONE) { | 1250 | if (obj->tiling_mode != I915_TILING_NONE) { |
1253 | dpfc_ctl |= (DPFC_CTL_FENCE_EN | dev_priv->cfb_fence); | 1251 | dpfc_ctl |= (DPFC_CTL_FENCE_EN | dev_priv->cfb_fence); |
1254 | I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY); | 1252 | I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY); |
1255 | } else { | 1253 | } else { |
@@ -1260,10 +1258,16 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1260 | (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | | 1258 | (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | |
1261 | (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); | 1259 | (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); |
1262 | I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); | 1260 | I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); |
1263 | I915_WRITE(ILK_FBC_RT_BASE, obj_priv->gtt_offset | ILK_FBC_RT_VALID); | 1261 | I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID); |
1264 | /* enable it... */ | 1262 | /* enable it... */ |
1265 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); | 1263 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); |
1266 | 1264 | ||
1265 | if (IS_GEN6(dev)) { | ||
1266 | I915_WRITE(SNB_DPFC_CTL_SA, | ||
1267 | SNB_CPU_FENCE_ENABLE | dev_priv->cfb_fence); | ||
1268 | I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y); | ||
1269 | } | ||
1270 | |||
1267 | DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); | 1271 | DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); |
1268 | } | 1272 | } |
1269 | 1273 | ||
@@ -1345,7 +1349,7 @@ static void intel_update_fbc(struct drm_device *dev) | |||
1345 | struct intel_crtc *intel_crtc; | 1349 | struct intel_crtc *intel_crtc; |
1346 | struct drm_framebuffer *fb; | 1350 | struct drm_framebuffer *fb; |
1347 | struct intel_framebuffer *intel_fb; | 1351 | struct intel_framebuffer *intel_fb; |
1348 | struct drm_i915_gem_object *obj_priv; | 1352 | struct drm_i915_gem_object *obj; |
1349 | 1353 | ||
1350 | DRM_DEBUG_KMS("\n"); | 1354 | DRM_DEBUG_KMS("\n"); |
1351 | 1355 | ||
@@ -1384,9 +1388,9 @@ static void intel_update_fbc(struct drm_device *dev) | |||
1384 | intel_crtc = to_intel_crtc(crtc); | 1388 | intel_crtc = to_intel_crtc(crtc); |
1385 | fb = crtc->fb; | 1389 | fb = crtc->fb; |
1386 | intel_fb = to_intel_framebuffer(fb); | 1390 | intel_fb = to_intel_framebuffer(fb); |
1387 | obj_priv = to_intel_bo(intel_fb->obj); | 1391 | obj = intel_fb->obj; |
1388 | 1392 | ||
1389 | if (intel_fb->obj->size > dev_priv->cfb_size) { | 1393 | if (intel_fb->obj->base.size > dev_priv->cfb_size) { |
1390 | DRM_DEBUG_KMS("framebuffer too large, disabling " | 1394 | DRM_DEBUG_KMS("framebuffer too large, disabling " |
1391 | "compression\n"); | 1395 | "compression\n"); |
1392 | dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; | 1396 | dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; |
@@ -1410,7 +1414,7 @@ static void intel_update_fbc(struct drm_device *dev) | |||
1410 | dev_priv->no_fbc_reason = FBC_BAD_PLANE; | 1414 | dev_priv->no_fbc_reason = FBC_BAD_PLANE; |
1411 | goto out_disable; | 1415 | goto out_disable; |
1412 | } | 1416 | } |
1413 | if (obj_priv->tiling_mode != I915_TILING_X) { | 1417 | if (obj->tiling_mode != I915_TILING_X) { |
1414 | DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n"); | 1418 | DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n"); |
1415 | dev_priv->no_fbc_reason = FBC_NOT_TILED; | 1419 | dev_priv->no_fbc_reason = FBC_NOT_TILED; |
1416 | goto out_disable; | 1420 | goto out_disable; |
@@ -1433,14 +1437,13 @@ out_disable: | |||
1433 | 1437 | ||
1434 | int | 1438 | int |
1435 | intel_pin_and_fence_fb_obj(struct drm_device *dev, | 1439 | intel_pin_and_fence_fb_obj(struct drm_device *dev, |
1436 | struct drm_gem_object *obj, | 1440 | struct drm_i915_gem_object *obj, |
1437 | bool pipelined) | 1441 | struct intel_ring_buffer *pipelined) |
1438 | { | 1442 | { |
1439 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
1440 | u32 alignment; | 1443 | u32 alignment; |
1441 | int ret; | 1444 | int ret; |
1442 | 1445 | ||
1443 | switch (obj_priv->tiling_mode) { | 1446 | switch (obj->tiling_mode) { |
1444 | case I915_TILING_NONE: | 1447 | case I915_TILING_NONE: |
1445 | if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) | 1448 | if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) |
1446 | alignment = 128 * 1024; | 1449 | alignment = 128 * 1024; |
@@ -1461,7 +1464,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, | |||
1461 | BUG(); | 1464 | BUG(); |
1462 | } | 1465 | } |
1463 | 1466 | ||
1464 | ret = i915_gem_object_pin(obj, alignment); | 1467 | ret = i915_gem_object_pin(obj, alignment, true); |
1465 | if (ret) | 1468 | if (ret) |
1466 | return ret; | 1469 | return ret; |
1467 | 1470 | ||
@@ -1474,9 +1477,8 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, | |||
1474 | * framebuffer compression. For simplicity, we always install | 1477 | * framebuffer compression. For simplicity, we always install |
1475 | * a fence as the cost is not that onerous. | 1478 | * a fence as the cost is not that onerous. |
1476 | */ | 1479 | */ |
1477 | if (obj_priv->fence_reg == I915_FENCE_REG_NONE && | 1480 | if (obj->tiling_mode != I915_TILING_NONE) { |
1478 | obj_priv->tiling_mode != I915_TILING_NONE) { | 1481 | ret = i915_gem_object_get_fence(obj, pipelined, false); |
1479 | ret = i915_gem_object_get_fence_reg(obj, false); | ||
1480 | if (ret) | 1482 | if (ret) |
1481 | goto err_unpin; | 1483 | goto err_unpin; |
1482 | } | 1484 | } |
@@ -1497,8 +1499,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
1497 | struct drm_i915_private *dev_priv = dev->dev_private; | 1499 | struct drm_i915_private *dev_priv = dev->dev_private; |
1498 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1500 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1499 | struct intel_framebuffer *intel_fb; | 1501 | struct intel_framebuffer *intel_fb; |
1500 | struct drm_i915_gem_object *obj_priv; | 1502 | struct drm_i915_gem_object *obj; |
1501 | struct drm_gem_object *obj; | ||
1502 | int plane = intel_crtc->plane; | 1503 | int plane = intel_crtc->plane; |
1503 | unsigned long Start, Offset; | 1504 | unsigned long Start, Offset; |
1504 | u32 dspcntr; | 1505 | u32 dspcntr; |
@@ -1515,7 +1516,6 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
1515 | 1516 | ||
1516 | intel_fb = to_intel_framebuffer(fb); | 1517 | intel_fb = to_intel_framebuffer(fb); |
1517 | obj = intel_fb->obj; | 1518 | obj = intel_fb->obj; |
1518 | obj_priv = to_intel_bo(obj); | ||
1519 | 1519 | ||
1520 | reg = DSPCNTR(plane); | 1520 | reg = DSPCNTR(plane); |
1521 | dspcntr = I915_READ(reg); | 1521 | dspcntr = I915_READ(reg); |
@@ -1540,7 +1540,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
1540 | return -EINVAL; | 1540 | return -EINVAL; |
1541 | } | 1541 | } |
1542 | if (INTEL_INFO(dev)->gen >= 4) { | 1542 | if (INTEL_INFO(dev)->gen >= 4) { |
1543 | if (obj_priv->tiling_mode != I915_TILING_NONE) | 1543 | if (obj->tiling_mode != I915_TILING_NONE) |
1544 | dspcntr |= DISPPLANE_TILED; | 1544 | dspcntr |= DISPPLANE_TILED; |
1545 | else | 1545 | else |
1546 | dspcntr &= ~DISPPLANE_TILED; | 1546 | dspcntr &= ~DISPPLANE_TILED; |
@@ -1552,7 +1552,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
1552 | 1552 | ||
1553 | I915_WRITE(reg, dspcntr); | 1553 | I915_WRITE(reg, dspcntr); |
1554 | 1554 | ||
1555 | Start = obj_priv->gtt_offset; | 1555 | Start = obj->gtt_offset; |
1556 | Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8); | 1556 | Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8); |
1557 | 1557 | ||
1558 | DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", | 1558 | DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", |
@@ -1598,7 +1598,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1598 | mutex_lock(&dev->struct_mutex); | 1598 | mutex_lock(&dev->struct_mutex); |
1599 | ret = intel_pin_and_fence_fb_obj(dev, | 1599 | ret = intel_pin_and_fence_fb_obj(dev, |
1600 | to_intel_framebuffer(crtc->fb)->obj, | 1600 | to_intel_framebuffer(crtc->fb)->obj, |
1601 | false); | 1601 | NULL); |
1602 | if (ret != 0) { | 1602 | if (ret != 0) { |
1603 | mutex_unlock(&dev->struct_mutex); | 1603 | mutex_unlock(&dev->struct_mutex); |
1604 | return ret; | 1604 | return ret; |
@@ -1606,18 +1606,17 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1606 | 1606 | ||
1607 | if (old_fb) { | 1607 | if (old_fb) { |
1608 | struct drm_i915_private *dev_priv = dev->dev_private; | 1608 | struct drm_i915_private *dev_priv = dev->dev_private; |
1609 | struct drm_gem_object *obj = to_intel_framebuffer(old_fb)->obj; | 1609 | struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj; |
1610 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
1611 | 1610 | ||
1612 | wait_event(dev_priv->pending_flip_queue, | 1611 | wait_event(dev_priv->pending_flip_queue, |
1613 | atomic_read(&obj_priv->pending_flip) == 0); | 1612 | atomic_read(&obj->pending_flip) == 0); |
1614 | 1613 | ||
1615 | /* Big Hammer, we also need to ensure that any pending | 1614 | /* Big Hammer, we also need to ensure that any pending |
1616 | * MI_WAIT_FOR_EVENT inside a user batch buffer on the | 1615 | * MI_WAIT_FOR_EVENT inside a user batch buffer on the |
1617 | * current scanout is retired before unpinning the old | 1616 | * current scanout is retired before unpinning the old |
1618 | * framebuffer. | 1617 | * framebuffer. |
1619 | */ | 1618 | */ |
1620 | ret = i915_gem_object_flush_gpu(obj_priv, false); | 1619 | ret = i915_gem_object_flush_gpu(obj, false); |
1621 | if (ret) { | 1620 | if (ret) { |
1622 | i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); | 1621 | i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); |
1623 | mutex_unlock(&dev->struct_mutex); | 1622 | mutex_unlock(&dev->struct_mutex); |
@@ -1633,8 +1632,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1633 | return ret; | 1632 | return ret; |
1634 | } | 1633 | } |
1635 | 1634 | ||
1636 | if (old_fb) | 1635 | if (old_fb) { |
1636 | intel_wait_for_vblank(dev, intel_crtc->pipe); | ||
1637 | i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj); | 1637 | i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj); |
1638 | } | ||
1638 | 1639 | ||
1639 | mutex_unlock(&dev->struct_mutex); | 1640 | mutex_unlock(&dev->struct_mutex); |
1640 | 1641 | ||
@@ -1996,31 +1997,31 @@ static void intel_flush_display_plane(struct drm_device *dev, | |||
1996 | static void intel_clear_scanline_wait(struct drm_device *dev) | 1997 | static void intel_clear_scanline_wait(struct drm_device *dev) |
1997 | { | 1998 | { |
1998 | struct drm_i915_private *dev_priv = dev->dev_private; | 1999 | struct drm_i915_private *dev_priv = dev->dev_private; |
2000 | struct intel_ring_buffer *ring; | ||
1999 | u32 tmp; | 2001 | u32 tmp; |
2000 | 2002 | ||
2001 | if (IS_GEN2(dev)) | 2003 | if (IS_GEN2(dev)) |
2002 | /* Can't break the hang on i8xx */ | 2004 | /* Can't break the hang on i8xx */ |
2003 | return; | 2005 | return; |
2004 | 2006 | ||
2005 | tmp = I915_READ(PRB0_CTL); | 2007 | ring = LP_RING(dev_priv); |
2006 | if (tmp & RING_WAIT) { | 2008 | tmp = I915_READ_CTL(ring); |
2007 | I915_WRITE(PRB0_CTL, tmp); | 2009 | if (tmp & RING_WAIT) |
2008 | POSTING_READ(PRB0_CTL); | 2010 | I915_WRITE_CTL(ring, tmp); |
2009 | } | ||
2010 | } | 2011 | } |
2011 | 2012 | ||
2012 | static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) | 2013 | static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) |
2013 | { | 2014 | { |
2014 | struct drm_i915_gem_object *obj_priv; | 2015 | struct drm_i915_gem_object *obj; |
2015 | struct drm_i915_private *dev_priv; | 2016 | struct drm_i915_private *dev_priv; |
2016 | 2017 | ||
2017 | if (crtc->fb == NULL) | 2018 | if (crtc->fb == NULL) |
2018 | return; | 2019 | return; |
2019 | 2020 | ||
2020 | obj_priv = to_intel_bo(to_intel_framebuffer(crtc->fb)->obj); | 2021 | obj = to_intel_framebuffer(crtc->fb)->obj; |
2021 | dev_priv = crtc->dev->dev_private; | 2022 | dev_priv = crtc->dev->dev_private; |
2022 | wait_event(dev_priv->pending_flip_queue, | 2023 | wait_event(dev_priv->pending_flip_queue, |
2023 | atomic_read(&obj_priv->pending_flip) == 0); | 2024 | atomic_read(&obj->pending_flip) == 0); |
2024 | } | 2025 | } |
2025 | 2026 | ||
2026 | static void ironlake_crtc_enable(struct drm_crtc *crtc) | 2027 | static void ironlake_crtc_enable(struct drm_crtc *crtc) |
@@ -2850,6 +2851,39 @@ static struct intel_watermark_params ironlake_cursor_srwm_info = { | |||
2850 | ILK_FIFO_LINE_SIZE | 2851 | ILK_FIFO_LINE_SIZE |
2851 | }; | 2852 | }; |
2852 | 2853 | ||
2854 | static struct intel_watermark_params sandybridge_display_wm_info = { | ||
2855 | SNB_DISPLAY_FIFO, | ||
2856 | SNB_DISPLAY_MAXWM, | ||
2857 | SNB_DISPLAY_DFTWM, | ||
2858 | 2, | ||
2859 | SNB_FIFO_LINE_SIZE | ||
2860 | }; | ||
2861 | |||
2862 | static struct intel_watermark_params sandybridge_cursor_wm_info = { | ||
2863 | SNB_CURSOR_FIFO, | ||
2864 | SNB_CURSOR_MAXWM, | ||
2865 | SNB_CURSOR_DFTWM, | ||
2866 | 2, | ||
2867 | SNB_FIFO_LINE_SIZE | ||
2868 | }; | ||
2869 | |||
2870 | static struct intel_watermark_params sandybridge_display_srwm_info = { | ||
2871 | SNB_DISPLAY_SR_FIFO, | ||
2872 | SNB_DISPLAY_MAX_SRWM, | ||
2873 | SNB_DISPLAY_DFT_SRWM, | ||
2874 | 2, | ||
2875 | SNB_FIFO_LINE_SIZE | ||
2876 | }; | ||
2877 | |||
2878 | static struct intel_watermark_params sandybridge_cursor_srwm_info = { | ||
2879 | SNB_CURSOR_SR_FIFO, | ||
2880 | SNB_CURSOR_MAX_SRWM, | ||
2881 | SNB_CURSOR_DFT_SRWM, | ||
2882 | 2, | ||
2883 | SNB_FIFO_LINE_SIZE | ||
2884 | }; | ||
2885 | |||
2886 | |||
2853 | /** | 2887 | /** |
2854 | * intel_calculate_wm - calculate watermark level | 2888 | * intel_calculate_wm - calculate watermark level |
2855 | * @clock_in_khz: pixel clock | 2889 | * @clock_in_khz: pixel clock |
@@ -3383,12 +3417,17 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused, | |||
3383 | 3417 | ||
3384 | static bool ironlake_compute_wm0(struct drm_device *dev, | 3418 | static bool ironlake_compute_wm0(struct drm_device *dev, |
3385 | int pipe, | 3419 | int pipe, |
3420 | const struct intel_watermark_params *display, | ||
3421 | int display_latency_ns, | ||
3422 | const struct intel_watermark_params *cursor, | ||
3423 | int cursor_latency_ns, | ||
3386 | int *plane_wm, | 3424 | int *plane_wm, |
3387 | int *cursor_wm) | 3425 | int *cursor_wm) |
3388 | { | 3426 | { |
3389 | struct drm_crtc *crtc; | 3427 | struct drm_crtc *crtc; |
3390 | int htotal, hdisplay, clock, pixel_size = 0; | 3428 | int htotal, hdisplay, clock, pixel_size; |
3391 | int line_time_us, line_count, entries; | 3429 | int line_time_us, line_count; |
3430 | int entries, tlb_miss; | ||
3392 | 3431 | ||
3393 | crtc = intel_get_crtc_for_pipe(dev, pipe); | 3432 | crtc = intel_get_crtc_for_pipe(dev, pipe); |
3394 | if (crtc->fb == NULL || !crtc->enabled) | 3433 | if (crtc->fb == NULL || !crtc->enabled) |
@@ -3400,37 +3439,141 @@ static bool ironlake_compute_wm0(struct drm_device *dev, | |||
3400 | pixel_size = crtc->fb->bits_per_pixel / 8; | 3439 | pixel_size = crtc->fb->bits_per_pixel / 8; |
3401 | 3440 | ||
3402 | /* Use the small buffer method to calculate plane watermark */ | 3441 | /* Use the small buffer method to calculate plane watermark */ |
3403 | entries = ((clock * pixel_size / 1000) * ILK_LP0_PLANE_LATENCY) / 1000; | 3442 | entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000; |
3404 | entries = DIV_ROUND_UP(entries, | 3443 | tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8; |
3405 | ironlake_display_wm_info.cacheline_size); | 3444 | if (tlb_miss > 0) |
3406 | *plane_wm = entries + ironlake_display_wm_info.guard_size; | 3445 | entries += tlb_miss; |
3407 | if (*plane_wm > (int)ironlake_display_wm_info.max_wm) | 3446 | entries = DIV_ROUND_UP(entries, display->cacheline_size); |
3408 | *plane_wm = ironlake_display_wm_info.max_wm; | 3447 | *plane_wm = entries + display->guard_size; |
3448 | if (*plane_wm > (int)display->max_wm) | ||
3449 | *plane_wm = display->max_wm; | ||
3409 | 3450 | ||
3410 | /* Use the large buffer method to calculate cursor watermark */ | 3451 | /* Use the large buffer method to calculate cursor watermark */ |
3411 | line_time_us = ((htotal * 1000) / clock); | 3452 | line_time_us = ((htotal * 1000) / clock); |
3412 | line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000; | 3453 | line_count = (cursor_latency_ns / line_time_us + 1000) / 1000; |
3413 | entries = line_count * 64 * pixel_size; | 3454 | entries = line_count * 64 * pixel_size; |
3414 | entries = DIV_ROUND_UP(entries, | 3455 | tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8; |
3415 | ironlake_cursor_wm_info.cacheline_size); | 3456 | if (tlb_miss > 0) |
3416 | *cursor_wm = entries + ironlake_cursor_wm_info.guard_size; | 3457 | entries += tlb_miss; |
3417 | if (*cursor_wm > ironlake_cursor_wm_info.max_wm) | 3458 | entries = DIV_ROUND_UP(entries, cursor->cacheline_size); |
3418 | *cursor_wm = ironlake_cursor_wm_info.max_wm; | 3459 | *cursor_wm = entries + cursor->guard_size; |
3460 | if (*cursor_wm > (int)cursor->max_wm) | ||
3461 | *cursor_wm = (int)cursor->max_wm; | ||
3462 | |||
3463 | return true; | ||
3464 | } | ||
3465 | |||
3466 | /* | ||
3467 | * Check the wm result. | ||
3468 | * | ||
3469 | * If any calculated watermark values is larger than the maximum value that | ||
3470 | * can be programmed into the associated watermark register, that watermark | ||
3471 | * must be disabled. | ||
3472 | */ | ||
3473 | static bool ironlake_check_srwm(struct drm_device *dev, int level, | ||
3474 | int fbc_wm, int display_wm, int cursor_wm, | ||
3475 | const struct intel_watermark_params *display, | ||
3476 | const struct intel_watermark_params *cursor) | ||
3477 | { | ||
3478 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3479 | |||
3480 | DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d," | ||
3481 | " cursor %d\n", level, display_wm, fbc_wm, cursor_wm); | ||
3482 | |||
3483 | if (fbc_wm > SNB_FBC_MAX_SRWM) { | ||
3484 | DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n", | ||
3485 | fbc_wm, SNB_FBC_MAX_SRWM, level); | ||
3486 | |||
3487 | /* fbc has it's own way to disable FBC WM */ | ||
3488 | I915_WRITE(DISP_ARB_CTL, | ||
3489 | I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS); | ||
3490 | return false; | ||
3491 | } | ||
3492 | |||
3493 | if (display_wm > display->max_wm) { | ||
3494 | DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n", | ||
3495 | display_wm, SNB_DISPLAY_MAX_SRWM, level); | ||
3496 | return false; | ||
3497 | } | ||
3498 | |||
3499 | if (cursor_wm > cursor->max_wm) { | ||
3500 | DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n", | ||
3501 | cursor_wm, SNB_CURSOR_MAX_SRWM, level); | ||
3502 | return false; | ||
3503 | } | ||
3504 | |||
3505 | if (!(fbc_wm || display_wm || cursor_wm)) { | ||
3506 | DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level); | ||
3507 | return false; | ||
3508 | } | ||
3419 | 3509 | ||
3420 | return true; | 3510 | return true; |
3421 | } | 3511 | } |
3422 | 3512 | ||
3513 | /* | ||
3514 | * Compute watermark values of WM[1-3], | ||
3515 | */ | ||
3516 | static bool ironlake_compute_srwm(struct drm_device *dev, int level, | ||
3517 | int hdisplay, int htotal, | ||
3518 | int pixel_size, int clock, int latency_ns, | ||
3519 | const struct intel_watermark_params *display, | ||
3520 | const struct intel_watermark_params *cursor, | ||
3521 | int *fbc_wm, int *display_wm, int *cursor_wm) | ||
3522 | { | ||
3523 | |||
3524 | unsigned long line_time_us; | ||
3525 | int line_count, line_size; | ||
3526 | int small, large; | ||
3527 | int entries; | ||
3528 | |||
3529 | if (!latency_ns) { | ||
3530 | *fbc_wm = *display_wm = *cursor_wm = 0; | ||
3531 | return false; | ||
3532 | } | ||
3533 | |||
3534 | line_time_us = (htotal * 1000) / clock; | ||
3535 | line_count = (latency_ns / line_time_us + 1000) / 1000; | ||
3536 | line_size = hdisplay * pixel_size; | ||
3537 | |||
3538 | /* Use the minimum of the small and large buffer method for primary */ | ||
3539 | small = ((clock * pixel_size / 1000) * latency_ns) / 1000; | ||
3540 | large = line_count * line_size; | ||
3541 | |||
3542 | entries = DIV_ROUND_UP(min(small, large), display->cacheline_size); | ||
3543 | *display_wm = entries + display->guard_size; | ||
3544 | |||
3545 | /* | ||
3546 | * Spec says: | ||
3547 | * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2 | ||
3548 | */ | ||
3549 | *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2; | ||
3550 | |||
3551 | /* calculate the self-refresh watermark for display cursor */ | ||
3552 | entries = line_count * pixel_size * 64; | ||
3553 | entries = DIV_ROUND_UP(entries, cursor->cacheline_size); | ||
3554 | *cursor_wm = entries + cursor->guard_size; | ||
3555 | |||
3556 | return ironlake_check_srwm(dev, level, | ||
3557 | *fbc_wm, *display_wm, *cursor_wm, | ||
3558 | display, cursor); | ||
3559 | } | ||
3560 | |||
3423 | static void ironlake_update_wm(struct drm_device *dev, | 3561 | static void ironlake_update_wm(struct drm_device *dev, |
3424 | int planea_clock, int planeb_clock, | 3562 | int planea_clock, int planeb_clock, |
3425 | int sr_hdisplay, int sr_htotal, | 3563 | int hdisplay, int htotal, |
3426 | int pixel_size) | 3564 | int pixel_size) |
3427 | { | 3565 | { |
3428 | struct drm_i915_private *dev_priv = dev->dev_private; | 3566 | struct drm_i915_private *dev_priv = dev->dev_private; |
3429 | int plane_wm, cursor_wm, enabled; | 3567 | int fbc_wm, plane_wm, cursor_wm, enabled; |
3430 | int tmp; | 3568 | int clock; |
3431 | 3569 | ||
3432 | enabled = 0; | 3570 | enabled = 0; |
3433 | if (ironlake_compute_wm0(dev, 0, &plane_wm, &cursor_wm)) { | 3571 | if (ironlake_compute_wm0(dev, 0, |
3572 | &ironlake_display_wm_info, | ||
3573 | ILK_LP0_PLANE_LATENCY, | ||
3574 | &ironlake_cursor_wm_info, | ||
3575 | ILK_LP0_CURSOR_LATENCY, | ||
3576 | &plane_wm, &cursor_wm)) { | ||
3434 | I915_WRITE(WM0_PIPEA_ILK, | 3577 | I915_WRITE(WM0_PIPEA_ILK, |
3435 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); | 3578 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); |
3436 | DRM_DEBUG_KMS("FIFO watermarks For pipe A -" | 3579 | DRM_DEBUG_KMS("FIFO watermarks For pipe A -" |
@@ -3439,7 +3582,12 @@ static void ironlake_update_wm(struct drm_device *dev, | |||
3439 | enabled++; | 3582 | enabled++; |
3440 | } | 3583 | } |
3441 | 3584 | ||
3442 | if (ironlake_compute_wm0(dev, 1, &plane_wm, &cursor_wm)) { | 3585 | if (ironlake_compute_wm0(dev, 1, |
3586 | &ironlake_display_wm_info, | ||
3587 | ILK_LP0_PLANE_LATENCY, | ||
3588 | &ironlake_cursor_wm_info, | ||
3589 | ILK_LP0_CURSOR_LATENCY, | ||
3590 | &plane_wm, &cursor_wm)) { | ||
3443 | I915_WRITE(WM0_PIPEB_ILK, | 3591 | I915_WRITE(WM0_PIPEB_ILK, |
3444 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); | 3592 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); |
3445 | DRM_DEBUG_KMS("FIFO watermarks For pipe B -" | 3593 | DRM_DEBUG_KMS("FIFO watermarks For pipe B -" |
@@ -3452,57 +3600,151 @@ static void ironlake_update_wm(struct drm_device *dev, | |||
3452 | * Calculate and update the self-refresh watermark only when one | 3600 | * Calculate and update the self-refresh watermark only when one |
3453 | * display plane is used. | 3601 | * display plane is used. |
3454 | */ | 3602 | */ |
3455 | tmp = 0; | 3603 | I915_WRITE(WM3_LP_ILK, 0); |
3456 | if (enabled == 1 && /* XXX disabled due to buggy implmentation? */ 0) { | 3604 | I915_WRITE(WM2_LP_ILK, 0); |
3457 | unsigned long line_time_us; | 3605 | I915_WRITE(WM1_LP_ILK, 0); |
3458 | int small, large, plane_fbc; | ||
3459 | int sr_clock, entries; | ||
3460 | int line_count, line_size; | ||
3461 | /* Read the self-refresh latency. The unit is 0.5us */ | ||
3462 | int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK; | ||
3463 | 3606 | ||
3464 | sr_clock = planea_clock ? planea_clock : planeb_clock; | 3607 | if (enabled != 1) |
3465 | line_time_us = (sr_htotal * 1000) / sr_clock; | 3608 | return; |
3466 | 3609 | ||
3467 | /* Use ns/us then divide to preserve precision */ | 3610 | clock = planea_clock ? planea_clock : planeb_clock; |
3468 | line_count = ((ilk_sr_latency * 500) / line_time_us + 1000) | ||
3469 | / 1000; | ||
3470 | line_size = sr_hdisplay * pixel_size; | ||
3471 | 3611 | ||
3472 | /* Use the minimum of the small and large buffer method for primary */ | 3612 | /* WM1 */ |
3473 | small = ((sr_clock * pixel_size / 1000) * (ilk_sr_latency * 500)) / 1000; | 3613 | if (!ironlake_compute_srwm(dev, 1, hdisplay, htotal, pixel_size, |
3474 | large = line_count * line_size; | 3614 | clock, ILK_READ_WM1_LATENCY() * 500, |
3615 | &ironlake_display_srwm_info, | ||
3616 | &ironlake_cursor_srwm_info, | ||
3617 | &fbc_wm, &plane_wm, &cursor_wm)) | ||
3618 | return; | ||
3475 | 3619 | ||
3476 | entries = DIV_ROUND_UP(min(small, large), | 3620 | I915_WRITE(WM1_LP_ILK, |
3477 | ironlake_display_srwm_info.cacheline_size); | 3621 | WM1_LP_SR_EN | |
3622 | (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | | ||
3623 | (fbc_wm << WM1_LP_FBC_SHIFT) | | ||
3624 | (plane_wm << WM1_LP_SR_SHIFT) | | ||
3625 | cursor_wm); | ||
3626 | |||
3627 | /* WM2 */ | ||
3628 | if (!ironlake_compute_srwm(dev, 2, hdisplay, htotal, pixel_size, | ||
3629 | clock, ILK_READ_WM2_LATENCY() * 500, | ||
3630 | &ironlake_display_srwm_info, | ||
3631 | &ironlake_cursor_srwm_info, | ||
3632 | &fbc_wm, &plane_wm, &cursor_wm)) | ||
3633 | return; | ||
3478 | 3634 | ||
3479 | plane_fbc = entries * 64; | 3635 | I915_WRITE(WM2_LP_ILK, |
3480 | plane_fbc = DIV_ROUND_UP(plane_fbc, line_size); | 3636 | WM2_LP_EN | |
3637 | (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | | ||
3638 | (fbc_wm << WM1_LP_FBC_SHIFT) | | ||
3639 | (plane_wm << WM1_LP_SR_SHIFT) | | ||
3640 | cursor_wm); | ||
3481 | 3641 | ||
3482 | plane_wm = entries + ironlake_display_srwm_info.guard_size; | 3642 | /* |
3483 | if (plane_wm > (int)ironlake_display_srwm_info.max_wm) | 3643 | * WM3 is unsupported on ILK, probably because we don't have latency |
3484 | plane_wm = ironlake_display_srwm_info.max_wm; | 3644 | * data for that power state |
3645 | */ | ||
3646 | } | ||
3485 | 3647 | ||
3486 | /* calculate the self-refresh watermark for display cursor */ | 3648 | static void sandybridge_update_wm(struct drm_device *dev, |
3487 | entries = line_count * pixel_size * 64; | 3649 | int planea_clock, int planeb_clock, |
3488 | entries = DIV_ROUND_UP(entries, | 3650 | int hdisplay, int htotal, |
3489 | ironlake_cursor_srwm_info.cacheline_size); | 3651 | int pixel_size) |
3652 | { | ||
3653 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3654 | int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ | ||
3655 | int fbc_wm, plane_wm, cursor_wm, enabled; | ||
3656 | int clock; | ||
3490 | 3657 | ||
3491 | cursor_wm = entries + ironlake_cursor_srwm_info.guard_size; | 3658 | enabled = 0; |
3492 | if (cursor_wm > (int)ironlake_cursor_srwm_info.max_wm) | 3659 | if (ironlake_compute_wm0(dev, 0, |
3493 | cursor_wm = ironlake_cursor_srwm_info.max_wm; | 3660 | &sandybridge_display_wm_info, latency, |
3661 | &sandybridge_cursor_wm_info, latency, | ||
3662 | &plane_wm, &cursor_wm)) { | ||
3663 | I915_WRITE(WM0_PIPEA_ILK, | ||
3664 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); | ||
3665 | DRM_DEBUG_KMS("FIFO watermarks For pipe A -" | ||
3666 | " plane %d, " "cursor: %d\n", | ||
3667 | plane_wm, cursor_wm); | ||
3668 | enabled++; | ||
3669 | } | ||
3494 | 3670 | ||
3495 | /* configure watermark and enable self-refresh */ | 3671 | if (ironlake_compute_wm0(dev, 1, |
3496 | tmp = (WM1_LP_SR_EN | | 3672 | &sandybridge_display_wm_info, latency, |
3497 | (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) | | 3673 | &sandybridge_cursor_wm_info, latency, |
3498 | (plane_fbc << WM1_LP_FBC_SHIFT) | | 3674 | &plane_wm, &cursor_wm)) { |
3499 | (plane_wm << WM1_LP_SR_SHIFT) | | 3675 | I915_WRITE(WM0_PIPEB_ILK, |
3500 | cursor_wm); | 3676 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); |
3501 | DRM_DEBUG_KMS("self-refresh watermark: display plane %d, fbc lines %d," | 3677 | DRM_DEBUG_KMS("FIFO watermarks For pipe B -" |
3502 | " cursor %d\n", plane_wm, plane_fbc, cursor_wm); | 3678 | " plane %d, cursor: %d\n", |
3679 | plane_wm, cursor_wm); | ||
3680 | enabled++; | ||
3503 | } | 3681 | } |
3504 | I915_WRITE(WM1_LP_ILK, tmp); | 3682 | |
3505 | /* XXX setup WM2 and WM3 */ | 3683 | /* |
3684 | * Calculate and update the self-refresh watermark only when one | ||
3685 | * display plane is used. | ||
3686 | * | ||
3687 | * SNB support 3 levels of watermark. | ||
3688 | * | ||
3689 | * WM1/WM2/WM2 watermarks have to be enabled in the ascending order, | ||
3690 | * and disabled in the descending order | ||
3691 | * | ||
3692 | */ | ||
3693 | I915_WRITE(WM3_LP_ILK, 0); | ||
3694 | I915_WRITE(WM2_LP_ILK, 0); | ||
3695 | I915_WRITE(WM1_LP_ILK, 0); | ||
3696 | |||
3697 | if (enabled != 1) | ||
3698 | return; | ||
3699 | |||
3700 | clock = planea_clock ? planea_clock : planeb_clock; | ||
3701 | |||
3702 | /* WM1 */ | ||
3703 | if (!ironlake_compute_srwm(dev, 1, hdisplay, htotal, pixel_size, | ||
3704 | clock, SNB_READ_WM1_LATENCY() * 500, | ||
3705 | &sandybridge_display_srwm_info, | ||
3706 | &sandybridge_cursor_srwm_info, | ||
3707 | &fbc_wm, &plane_wm, &cursor_wm)) | ||
3708 | return; | ||
3709 | |||
3710 | I915_WRITE(WM1_LP_ILK, | ||
3711 | WM1_LP_SR_EN | | ||
3712 | (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | | ||
3713 | (fbc_wm << WM1_LP_FBC_SHIFT) | | ||
3714 | (plane_wm << WM1_LP_SR_SHIFT) | | ||
3715 | cursor_wm); | ||
3716 | |||
3717 | /* WM2 */ | ||
3718 | if (!ironlake_compute_srwm(dev, 2, | ||
3719 | hdisplay, htotal, pixel_size, | ||
3720 | clock, SNB_READ_WM2_LATENCY() * 500, | ||
3721 | &sandybridge_display_srwm_info, | ||
3722 | &sandybridge_cursor_srwm_info, | ||
3723 | &fbc_wm, &plane_wm, &cursor_wm)) | ||
3724 | return; | ||
3725 | |||
3726 | I915_WRITE(WM2_LP_ILK, | ||
3727 | WM2_LP_EN | | ||
3728 | (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | | ||
3729 | (fbc_wm << WM1_LP_FBC_SHIFT) | | ||
3730 | (plane_wm << WM1_LP_SR_SHIFT) | | ||
3731 | cursor_wm); | ||
3732 | |||
3733 | /* WM3 */ | ||
3734 | if (!ironlake_compute_srwm(dev, 3, | ||
3735 | hdisplay, htotal, pixel_size, | ||
3736 | clock, SNB_READ_WM3_LATENCY() * 500, | ||
3737 | &sandybridge_display_srwm_info, | ||
3738 | &sandybridge_cursor_srwm_info, | ||
3739 | &fbc_wm, &plane_wm, &cursor_wm)) | ||
3740 | return; | ||
3741 | |||
3742 | I915_WRITE(WM3_LP_ILK, | ||
3743 | WM3_LP_EN | | ||
3744 | (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) | | ||
3745 | (fbc_wm << WM1_LP_FBC_SHIFT) | | ||
3746 | (plane_wm << WM1_LP_SR_SHIFT) | | ||
3747 | cursor_wm); | ||
3506 | } | 3748 | } |
3507 | 3749 | ||
3508 | /** | 3750 | /** |
@@ -3580,6 +3822,11 @@ static void intel_update_watermarks(struct drm_device *dev) | |||
3580 | sr_hdisplay, sr_htotal, pixel_size); | 3822 | sr_hdisplay, sr_htotal, pixel_size); |
3581 | } | 3823 | } |
3582 | 3824 | ||
3825 | static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) | ||
3826 | { | ||
3827 | return dev_priv->lvds_use_ssc && i915_panel_use_ssc; | ||
3828 | } | ||
3829 | |||
3583 | static int intel_crtc_mode_set(struct drm_crtc *crtc, | 3830 | static int intel_crtc_mode_set(struct drm_crtc *crtc, |
3584 | struct drm_display_mode *mode, | 3831 | struct drm_display_mode *mode, |
3585 | struct drm_display_mode *adjusted_mode, | 3832 | struct drm_display_mode *adjusted_mode, |
@@ -3642,7 +3889,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3642 | num_connectors++; | 3889 | num_connectors++; |
3643 | } | 3890 | } |
3644 | 3891 | ||
3645 | if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) { | 3892 | if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { |
3646 | refclk = dev_priv->lvds_ssc_freq * 1000; | 3893 | refclk = dev_priv->lvds_ssc_freq * 1000; |
3647 | DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", | 3894 | DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", |
3648 | refclk / 1000); | 3895 | refclk / 1000); |
@@ -3660,7 +3907,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3660 | * refclk, or FALSE. The returned values represent the clock equation: | 3907 | * refclk, or FALSE. The returned values represent the clock equation: |
3661 | * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. | 3908 | * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. |
3662 | */ | 3909 | */ |
3663 | limit = intel_limit(crtc); | 3910 | limit = intel_limit(crtc, refclk); |
3664 | ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock); | 3911 | ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock); |
3665 | if (!ok) { | 3912 | if (!ok) { |
3666 | DRM_ERROR("Couldn't find PLL settings for mode!\n"); | 3913 | DRM_ERROR("Couldn't find PLL settings for mode!\n"); |
@@ -3714,7 +3961,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3714 | int lane = 0, link_bw, bpp; | 3961 | int lane = 0, link_bw, bpp; |
3715 | /* CPU eDP doesn't require FDI link, so just set DP M/N | 3962 | /* CPU eDP doesn't require FDI link, so just set DP M/N |
3716 | according to current link config */ | 3963 | according to current link config */ |
3717 | if (has_edp_encoder && !intel_encoder_is_pch_edp(&encoder->base)) { | 3964 | if (has_edp_encoder && !intel_encoder_is_pch_edp(&has_edp_encoder->base)) { |
3718 | target_clock = mode->clock; | 3965 | target_clock = mode->clock; |
3719 | intel_edp_link_config(has_edp_encoder, | 3966 | intel_edp_link_config(has_edp_encoder, |
3720 | &lane, &link_bw); | 3967 | &lane, &link_bw); |
@@ -3817,7 +4064,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3817 | udelay(200); | 4064 | udelay(200); |
3818 | 4065 | ||
3819 | if (has_edp_encoder) { | 4066 | if (has_edp_encoder) { |
3820 | if (dev_priv->lvds_use_ssc) { | 4067 | if (intel_panel_use_ssc(dev_priv)) { |
3821 | temp |= DREF_SSC1_ENABLE; | 4068 | temp |= DREF_SSC1_ENABLE; |
3822 | I915_WRITE(PCH_DREF_CONTROL, temp); | 4069 | I915_WRITE(PCH_DREF_CONTROL, temp); |
3823 | 4070 | ||
@@ -3828,13 +4075,13 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3828 | 4075 | ||
3829 | /* Enable CPU source on CPU attached eDP */ | 4076 | /* Enable CPU source on CPU attached eDP */ |
3830 | if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) { | 4077 | if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) { |
3831 | if (dev_priv->lvds_use_ssc) | 4078 | if (intel_panel_use_ssc(dev_priv)) |
3832 | temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; | 4079 | temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; |
3833 | else | 4080 | else |
3834 | temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; | 4081 | temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; |
3835 | } else { | 4082 | } else { |
3836 | /* Enable SSC on PCH eDP if needed */ | 4083 | /* Enable SSC on PCH eDP if needed */ |
3837 | if (dev_priv->lvds_use_ssc) { | 4084 | if (intel_panel_use_ssc(dev_priv)) { |
3838 | DRM_ERROR("enabling SSC on PCH\n"); | 4085 | DRM_ERROR("enabling SSC on PCH\n"); |
3839 | temp |= DREF_SUPERSPREAD_SOURCE_ENABLE; | 4086 | temp |= DREF_SUPERSPREAD_SOURCE_ENABLE; |
3840 | } | 4087 | } |
@@ -3857,6 +4104,22 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3857 | reduced_clock.m2; | 4104 | reduced_clock.m2; |
3858 | } | 4105 | } |
3859 | 4106 | ||
4107 | /* Enable autotuning of the PLL clock (if permissible) */ | ||
4108 | if (HAS_PCH_SPLIT(dev)) { | ||
4109 | int factor = 21; | ||
4110 | |||
4111 | if (is_lvds) { | ||
4112 | if ((intel_panel_use_ssc(dev_priv) && | ||
4113 | dev_priv->lvds_ssc_freq == 100) || | ||
4114 | (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP) | ||
4115 | factor = 25; | ||
4116 | } else if (is_sdvo && is_tv) | ||
4117 | factor = 20; | ||
4118 | |||
4119 | if (clock.m1 < factor * clock.n) | ||
4120 | fp |= FP_CB_TUNE; | ||
4121 | } | ||
4122 | |||
3860 | dpll = 0; | 4123 | dpll = 0; |
3861 | if (!HAS_PCH_SPLIT(dev)) | 4124 | if (!HAS_PCH_SPLIT(dev)) |
3862 | dpll = DPLL_VGA_MODE_DIS; | 4125 | dpll = DPLL_VGA_MODE_DIS; |
@@ -3925,7 +4188,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3925 | /* XXX: just matching BIOS for now */ | 4188 | /* XXX: just matching BIOS for now */ |
3926 | /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ | 4189 | /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ |
3927 | dpll |= 3; | 4190 | dpll |= 3; |
3928 | else if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) | 4191 | else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) |
3929 | dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; | 4192 | dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; |
3930 | else | 4193 | else |
3931 | dpll |= PLL_REF_INPUT_DREFCLK; | 4194 | dpll |= PLL_REF_INPUT_DREFCLK; |
@@ -4071,7 +4334,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
4071 | } | 4334 | } |
4072 | 4335 | ||
4073 | if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { | 4336 | if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { |
4074 | I915_WRITE(fp_reg, fp); | ||
4075 | I915_WRITE(dpll_reg, dpll); | 4337 | I915_WRITE(dpll_reg, dpll); |
4076 | 4338 | ||
4077 | /* Wait for the clocks to stabilize. */ | 4339 | /* Wait for the clocks to stabilize. */ |
@@ -4089,13 +4351,13 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
4089 | } | 4351 | } |
4090 | I915_WRITE(DPLL_MD(pipe), temp); | 4352 | I915_WRITE(DPLL_MD(pipe), temp); |
4091 | } else { | 4353 | } else { |
4092 | /* write it again -- the BIOS does, after all */ | 4354 | /* The pixel multiplier can only be updated once the |
4355 | * DPLL is enabled and the clocks are stable. | ||
4356 | * | ||
4357 | * So write it again. | ||
4358 | */ | ||
4093 | I915_WRITE(dpll_reg, dpll); | 4359 | I915_WRITE(dpll_reg, dpll); |
4094 | } | 4360 | } |
4095 | |||
4096 | /* Wait for the clocks to stabilize. */ | ||
4097 | POSTING_READ(dpll_reg); | ||
4098 | udelay(150); | ||
4099 | } | 4361 | } |
4100 | 4362 | ||
4101 | intel_crtc->lowfreq_avail = false; | 4363 | intel_crtc->lowfreq_avail = false; |
@@ -4331,15 +4593,14 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, | |||
4331 | } | 4593 | } |
4332 | 4594 | ||
4333 | static int intel_crtc_cursor_set(struct drm_crtc *crtc, | 4595 | static int intel_crtc_cursor_set(struct drm_crtc *crtc, |
4334 | struct drm_file *file_priv, | 4596 | struct drm_file *file, |
4335 | uint32_t handle, | 4597 | uint32_t handle, |
4336 | uint32_t width, uint32_t height) | 4598 | uint32_t width, uint32_t height) |
4337 | { | 4599 | { |
4338 | struct drm_device *dev = crtc->dev; | 4600 | struct drm_device *dev = crtc->dev; |
4339 | struct drm_i915_private *dev_priv = dev->dev_private; | 4601 | struct drm_i915_private *dev_priv = dev->dev_private; |
4340 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 4602 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4341 | struct drm_gem_object *bo; | 4603 | struct drm_i915_gem_object *obj; |
4342 | struct drm_i915_gem_object *obj_priv; | ||
4343 | uint32_t addr; | 4604 | uint32_t addr; |
4344 | int ret; | 4605 | int ret; |
4345 | 4606 | ||
@@ -4349,7 +4610,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
4349 | if (!handle) { | 4610 | if (!handle) { |
4350 | DRM_DEBUG_KMS("cursor off\n"); | 4611 | DRM_DEBUG_KMS("cursor off\n"); |
4351 | addr = 0; | 4612 | addr = 0; |
4352 | bo = NULL; | 4613 | obj = NULL; |
4353 | mutex_lock(&dev->struct_mutex); | 4614 | mutex_lock(&dev->struct_mutex); |
4354 | goto finish; | 4615 | goto finish; |
4355 | } | 4616 | } |
@@ -4360,13 +4621,11 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
4360 | return -EINVAL; | 4621 | return -EINVAL; |
4361 | } | 4622 | } |
4362 | 4623 | ||
4363 | bo = drm_gem_object_lookup(dev, file_priv, handle); | 4624 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle)); |
4364 | if (!bo) | 4625 | if (!obj) |
4365 | return -ENOENT; | 4626 | return -ENOENT; |
4366 | 4627 | ||
4367 | obj_priv = to_intel_bo(bo); | 4628 | if (obj->base.size < width * height * 4) { |
4368 | |||
4369 | if (bo->size < width * height * 4) { | ||
4370 | DRM_ERROR("buffer is to small\n"); | 4629 | DRM_ERROR("buffer is to small\n"); |
4371 | ret = -ENOMEM; | 4630 | ret = -ENOMEM; |
4372 | goto fail; | 4631 | goto fail; |
@@ -4375,29 +4634,41 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
4375 | /* we only need to pin inside GTT if cursor is non-phy */ | 4634 | /* we only need to pin inside GTT if cursor is non-phy */ |
4376 | mutex_lock(&dev->struct_mutex); | 4635 | mutex_lock(&dev->struct_mutex); |
4377 | if (!dev_priv->info->cursor_needs_physical) { | 4636 | if (!dev_priv->info->cursor_needs_physical) { |
4378 | ret = i915_gem_object_pin(bo, PAGE_SIZE); | 4637 | if (obj->tiling_mode) { |
4638 | DRM_ERROR("cursor cannot be tiled\n"); | ||
4639 | ret = -EINVAL; | ||
4640 | goto fail_locked; | ||
4641 | } | ||
4642 | |||
4643 | ret = i915_gem_object_pin(obj, PAGE_SIZE, true); | ||
4379 | if (ret) { | 4644 | if (ret) { |
4380 | DRM_ERROR("failed to pin cursor bo\n"); | 4645 | DRM_ERROR("failed to pin cursor bo\n"); |
4381 | goto fail_locked; | 4646 | goto fail_locked; |
4382 | } | 4647 | } |
4383 | 4648 | ||
4384 | ret = i915_gem_object_set_to_gtt_domain(bo, 0); | 4649 | ret = i915_gem_object_set_to_gtt_domain(obj, 0); |
4385 | if (ret) { | 4650 | if (ret) { |
4386 | DRM_ERROR("failed to move cursor bo into the GTT\n"); | 4651 | DRM_ERROR("failed to move cursor bo into the GTT\n"); |
4387 | goto fail_unpin; | 4652 | goto fail_unpin; |
4388 | } | 4653 | } |
4389 | 4654 | ||
4390 | addr = obj_priv->gtt_offset; | 4655 | ret = i915_gem_object_put_fence(obj); |
4656 | if (ret) { | ||
4657 | DRM_ERROR("failed to move cursor bo into the GTT\n"); | ||
4658 | goto fail_unpin; | ||
4659 | } | ||
4660 | |||
4661 | addr = obj->gtt_offset; | ||
4391 | } else { | 4662 | } else { |
4392 | int align = IS_I830(dev) ? 16 * 1024 : 256; | 4663 | int align = IS_I830(dev) ? 16 * 1024 : 256; |
4393 | ret = i915_gem_attach_phys_object(dev, bo, | 4664 | ret = i915_gem_attach_phys_object(dev, obj, |
4394 | (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1, | 4665 | (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1, |
4395 | align); | 4666 | align); |
4396 | if (ret) { | 4667 | if (ret) { |
4397 | DRM_ERROR("failed to attach phys object\n"); | 4668 | DRM_ERROR("failed to attach phys object\n"); |
4398 | goto fail_locked; | 4669 | goto fail_locked; |
4399 | } | 4670 | } |
4400 | addr = obj_priv->phys_obj->handle->busaddr; | 4671 | addr = obj->phys_obj->handle->busaddr; |
4401 | } | 4672 | } |
4402 | 4673 | ||
4403 | if (IS_GEN2(dev)) | 4674 | if (IS_GEN2(dev)) |
@@ -4406,17 +4677,17 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
4406 | finish: | 4677 | finish: |
4407 | if (intel_crtc->cursor_bo) { | 4678 | if (intel_crtc->cursor_bo) { |
4408 | if (dev_priv->info->cursor_needs_physical) { | 4679 | if (dev_priv->info->cursor_needs_physical) { |
4409 | if (intel_crtc->cursor_bo != bo) | 4680 | if (intel_crtc->cursor_bo != obj) |
4410 | i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); | 4681 | i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); |
4411 | } else | 4682 | } else |
4412 | i915_gem_object_unpin(intel_crtc->cursor_bo); | 4683 | i915_gem_object_unpin(intel_crtc->cursor_bo); |
4413 | drm_gem_object_unreference(intel_crtc->cursor_bo); | 4684 | drm_gem_object_unreference(&intel_crtc->cursor_bo->base); |
4414 | } | 4685 | } |
4415 | 4686 | ||
4416 | mutex_unlock(&dev->struct_mutex); | 4687 | mutex_unlock(&dev->struct_mutex); |
4417 | 4688 | ||
4418 | intel_crtc->cursor_addr = addr; | 4689 | intel_crtc->cursor_addr = addr; |
4419 | intel_crtc->cursor_bo = bo; | 4690 | intel_crtc->cursor_bo = obj; |
4420 | intel_crtc->cursor_width = width; | 4691 | intel_crtc->cursor_width = width; |
4421 | intel_crtc->cursor_height = height; | 4692 | intel_crtc->cursor_height = height; |
4422 | 4693 | ||
@@ -4424,11 +4695,11 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
4424 | 4695 | ||
4425 | return 0; | 4696 | return 0; |
4426 | fail_unpin: | 4697 | fail_unpin: |
4427 | i915_gem_object_unpin(bo); | 4698 | i915_gem_object_unpin(obj); |
4428 | fail_locked: | 4699 | fail_locked: |
4429 | mutex_unlock(&dev->struct_mutex); | 4700 | mutex_unlock(&dev->struct_mutex); |
4430 | fail: | 4701 | fail: |
4431 | drm_gem_object_unreference_unlocked(bo); | 4702 | drm_gem_object_unreference_unlocked(&obj->base); |
4432 | return ret; | 4703 | return ret; |
4433 | } | 4704 | } |
4434 | 4705 | ||
@@ -4739,8 +5010,14 @@ static void intel_gpu_idle_timer(unsigned long arg) | |||
4739 | struct drm_device *dev = (struct drm_device *)arg; | 5010 | struct drm_device *dev = (struct drm_device *)arg; |
4740 | drm_i915_private_t *dev_priv = dev->dev_private; | 5011 | drm_i915_private_t *dev_priv = dev->dev_private; |
4741 | 5012 | ||
4742 | dev_priv->busy = false; | 5013 | if (!list_empty(&dev_priv->mm.active_list)) { |
5014 | /* Still processing requests, so just re-arm the timer. */ | ||
5015 | mod_timer(&dev_priv->idle_timer, jiffies + | ||
5016 | msecs_to_jiffies(GPU_IDLE_TIMEOUT)); | ||
5017 | return; | ||
5018 | } | ||
4743 | 5019 | ||
5020 | dev_priv->busy = false; | ||
4744 | queue_work(dev_priv->wq, &dev_priv->idle_work); | 5021 | queue_work(dev_priv->wq, &dev_priv->idle_work); |
4745 | } | 5022 | } |
4746 | 5023 | ||
@@ -4751,9 +5028,17 @@ static void intel_crtc_idle_timer(unsigned long arg) | |||
4751 | struct intel_crtc *intel_crtc = (struct intel_crtc *)arg; | 5028 | struct intel_crtc *intel_crtc = (struct intel_crtc *)arg; |
4752 | struct drm_crtc *crtc = &intel_crtc->base; | 5029 | struct drm_crtc *crtc = &intel_crtc->base; |
4753 | drm_i915_private_t *dev_priv = crtc->dev->dev_private; | 5030 | drm_i915_private_t *dev_priv = crtc->dev->dev_private; |
5031 | struct intel_framebuffer *intel_fb; | ||
4754 | 5032 | ||
4755 | intel_crtc->busy = false; | 5033 | intel_fb = to_intel_framebuffer(crtc->fb); |
5034 | if (intel_fb && intel_fb->obj->active) { | ||
5035 | /* The framebuffer is still being accessed by the GPU. */ | ||
5036 | mod_timer(&intel_crtc->idle_timer, jiffies + | ||
5037 | msecs_to_jiffies(CRTC_IDLE_TIMEOUT)); | ||
5038 | return; | ||
5039 | } | ||
4756 | 5040 | ||
5041 | intel_crtc->busy = false; | ||
4757 | queue_work(dev_priv->wq, &dev_priv->idle_work); | 5042 | queue_work(dev_priv->wq, &dev_priv->idle_work); |
4758 | } | 5043 | } |
4759 | 5044 | ||
@@ -4763,8 +5048,8 @@ static void intel_increase_pllclock(struct drm_crtc *crtc) | |||
4763 | drm_i915_private_t *dev_priv = dev->dev_private; | 5048 | drm_i915_private_t *dev_priv = dev->dev_private; |
4764 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 5049 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4765 | int pipe = intel_crtc->pipe; | 5050 | int pipe = intel_crtc->pipe; |
4766 | int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; | 5051 | int dpll_reg = DPLL(pipe); |
4767 | int dpll = I915_READ(dpll_reg); | 5052 | int dpll; |
4768 | 5053 | ||
4769 | if (HAS_PCH_SPLIT(dev)) | 5054 | if (HAS_PCH_SPLIT(dev)) |
4770 | return; | 5055 | return; |
@@ -4772,17 +5057,19 @@ static void intel_increase_pllclock(struct drm_crtc *crtc) | |||
4772 | if (!dev_priv->lvds_downclock_avail) | 5057 | if (!dev_priv->lvds_downclock_avail) |
4773 | return; | 5058 | return; |
4774 | 5059 | ||
5060 | dpll = I915_READ(dpll_reg); | ||
4775 | if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) { | 5061 | if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) { |
4776 | DRM_DEBUG_DRIVER("upclocking LVDS\n"); | 5062 | DRM_DEBUG_DRIVER("upclocking LVDS\n"); |
4777 | 5063 | ||
4778 | /* Unlock panel regs */ | 5064 | /* Unlock panel regs */ |
4779 | I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | | 5065 | I915_WRITE(PP_CONTROL, |
4780 | PANEL_UNLOCK_REGS); | 5066 | I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); |
4781 | 5067 | ||
4782 | dpll &= ~DISPLAY_RATE_SELECT_FPA1; | 5068 | dpll &= ~DISPLAY_RATE_SELECT_FPA1; |
4783 | I915_WRITE(dpll_reg, dpll); | 5069 | I915_WRITE(dpll_reg, dpll); |
4784 | dpll = I915_READ(dpll_reg); | 5070 | POSTING_READ(dpll_reg); |
4785 | intel_wait_for_vblank(dev, pipe); | 5071 | intel_wait_for_vblank(dev, pipe); |
5072 | |||
4786 | dpll = I915_READ(dpll_reg); | 5073 | dpll = I915_READ(dpll_reg); |
4787 | if (dpll & DISPLAY_RATE_SELECT_FPA1) | 5074 | if (dpll & DISPLAY_RATE_SELECT_FPA1) |
4788 | DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); | 5075 | DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); |
@@ -4888,7 +5175,7 @@ static void intel_idle_update(struct work_struct *work) | |||
4888 | * buffer), we'll also mark the display as busy, so we know to increase its | 5175 | * buffer), we'll also mark the display as busy, so we know to increase its |
4889 | * clock frequency. | 5176 | * clock frequency. |
4890 | */ | 5177 | */ |
4891 | void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj) | 5178 | void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj) |
4892 | { | 5179 | { |
4893 | drm_i915_private_t *dev_priv = dev->dev_private; | 5180 | drm_i915_private_t *dev_priv = dev->dev_private; |
4894 | struct drm_crtc *crtc = NULL; | 5181 | struct drm_crtc *crtc = NULL; |
@@ -4969,8 +5256,9 @@ static void intel_unpin_work_fn(struct work_struct *__work) | |||
4969 | 5256 | ||
4970 | mutex_lock(&work->dev->struct_mutex); | 5257 | mutex_lock(&work->dev->struct_mutex); |
4971 | i915_gem_object_unpin(work->old_fb_obj); | 5258 | i915_gem_object_unpin(work->old_fb_obj); |
4972 | drm_gem_object_unreference(work->pending_flip_obj); | 5259 | drm_gem_object_unreference(&work->pending_flip_obj->base); |
4973 | drm_gem_object_unreference(work->old_fb_obj); | 5260 | drm_gem_object_unreference(&work->old_fb_obj->base); |
5261 | |||
4974 | mutex_unlock(&work->dev->struct_mutex); | 5262 | mutex_unlock(&work->dev->struct_mutex); |
4975 | kfree(work); | 5263 | kfree(work); |
4976 | } | 5264 | } |
@@ -4981,15 +5269,17 @@ static void do_intel_finish_page_flip(struct drm_device *dev, | |||
4981 | drm_i915_private_t *dev_priv = dev->dev_private; | 5269 | drm_i915_private_t *dev_priv = dev->dev_private; |
4982 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 5270 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4983 | struct intel_unpin_work *work; | 5271 | struct intel_unpin_work *work; |
4984 | struct drm_i915_gem_object *obj_priv; | 5272 | struct drm_i915_gem_object *obj; |
4985 | struct drm_pending_vblank_event *e; | 5273 | struct drm_pending_vblank_event *e; |
4986 | struct timeval now; | 5274 | struct timeval tnow, tvbl; |
4987 | unsigned long flags; | 5275 | unsigned long flags; |
4988 | 5276 | ||
4989 | /* Ignore early vblank irqs */ | 5277 | /* Ignore early vblank irqs */ |
4990 | if (intel_crtc == NULL) | 5278 | if (intel_crtc == NULL) |
4991 | return; | 5279 | return; |
4992 | 5280 | ||
5281 | do_gettimeofday(&tnow); | ||
5282 | |||
4993 | spin_lock_irqsave(&dev->event_lock, flags); | 5283 | spin_lock_irqsave(&dev->event_lock, flags); |
4994 | work = intel_crtc->unpin_work; | 5284 | work = intel_crtc->unpin_work; |
4995 | if (work == NULL || !work->pending) { | 5285 | if (work == NULL || !work->pending) { |
@@ -4998,26 +5288,49 @@ static void do_intel_finish_page_flip(struct drm_device *dev, | |||
4998 | } | 5288 | } |
4999 | 5289 | ||
5000 | intel_crtc->unpin_work = NULL; | 5290 | intel_crtc->unpin_work = NULL; |
5001 | drm_vblank_put(dev, intel_crtc->pipe); | ||
5002 | 5291 | ||
5003 | if (work->event) { | 5292 | if (work->event) { |
5004 | e = work->event; | 5293 | e = work->event; |
5005 | do_gettimeofday(&now); | 5294 | e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl); |
5006 | e->event.sequence = drm_vblank_count(dev, intel_crtc->pipe); | 5295 | |
5007 | e->event.tv_sec = now.tv_sec; | 5296 | /* Called before vblank count and timestamps have |
5008 | e->event.tv_usec = now.tv_usec; | 5297 | * been updated for the vblank interval of flip |
5298 | * completion? Need to increment vblank count and | ||
5299 | * add one videorefresh duration to returned timestamp | ||
5300 | * to account for this. We assume this happened if we | ||
5301 | * get called over 0.9 frame durations after the last | ||
5302 | * timestamped vblank. | ||
5303 | * | ||
5304 | * This calculation can not be used with vrefresh rates | ||
5305 | * below 5Hz (10Hz to be on the safe side) without | ||
5306 | * promoting to 64 integers. | ||
5307 | */ | ||
5308 | if (10 * (timeval_to_ns(&tnow) - timeval_to_ns(&tvbl)) > | ||
5309 | 9 * crtc->framedur_ns) { | ||
5310 | e->event.sequence++; | ||
5311 | tvbl = ns_to_timeval(timeval_to_ns(&tvbl) + | ||
5312 | crtc->framedur_ns); | ||
5313 | } | ||
5314 | |||
5315 | e->event.tv_sec = tvbl.tv_sec; | ||
5316 | e->event.tv_usec = tvbl.tv_usec; | ||
5317 | |||
5009 | list_add_tail(&e->base.link, | 5318 | list_add_tail(&e->base.link, |
5010 | &e->base.file_priv->event_list); | 5319 | &e->base.file_priv->event_list); |
5011 | wake_up_interruptible(&e->base.file_priv->event_wait); | 5320 | wake_up_interruptible(&e->base.file_priv->event_wait); |
5012 | } | 5321 | } |
5013 | 5322 | ||
5323 | drm_vblank_put(dev, intel_crtc->pipe); | ||
5324 | |||
5014 | spin_unlock_irqrestore(&dev->event_lock, flags); | 5325 | spin_unlock_irqrestore(&dev->event_lock, flags); |
5015 | 5326 | ||
5016 | obj_priv = to_intel_bo(work->old_fb_obj); | 5327 | obj = work->old_fb_obj; |
5328 | |||
5017 | atomic_clear_mask(1 << intel_crtc->plane, | 5329 | atomic_clear_mask(1 << intel_crtc->plane, |
5018 | &obj_priv->pending_flip.counter); | 5330 | &obj->pending_flip.counter); |
5019 | if (atomic_read(&obj_priv->pending_flip) == 0) | 5331 | if (atomic_read(&obj->pending_flip) == 0) |
5020 | wake_up(&dev_priv->pending_flip_queue); | 5332 | wake_up(&dev_priv->pending_flip_queue); |
5333 | |||
5021 | schedule_work(&work->work); | 5334 | schedule_work(&work->work); |
5022 | 5335 | ||
5023 | trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj); | 5336 | trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj); |
@@ -5063,8 +5376,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
5063 | struct drm_device *dev = crtc->dev; | 5376 | struct drm_device *dev = crtc->dev; |
5064 | struct drm_i915_private *dev_priv = dev->dev_private; | 5377 | struct drm_i915_private *dev_priv = dev->dev_private; |
5065 | struct intel_framebuffer *intel_fb; | 5378 | struct intel_framebuffer *intel_fb; |
5066 | struct drm_i915_gem_object *obj_priv; | 5379 | struct drm_i915_gem_object *obj; |
5067 | struct drm_gem_object *obj; | ||
5068 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 5380 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
5069 | struct intel_unpin_work *work; | 5381 | struct intel_unpin_work *work; |
5070 | unsigned long flags, offset; | 5382 | unsigned long flags, offset; |
@@ -5098,13 +5410,13 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
5098 | obj = intel_fb->obj; | 5410 | obj = intel_fb->obj; |
5099 | 5411 | ||
5100 | mutex_lock(&dev->struct_mutex); | 5412 | mutex_lock(&dev->struct_mutex); |
5101 | ret = intel_pin_and_fence_fb_obj(dev, obj, true); | 5413 | ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); |
5102 | if (ret) | 5414 | if (ret) |
5103 | goto cleanup_work; | 5415 | goto cleanup_work; |
5104 | 5416 | ||
5105 | /* Reference the objects for the scheduled work. */ | 5417 | /* Reference the objects for the scheduled work. */ |
5106 | drm_gem_object_reference(work->old_fb_obj); | 5418 | drm_gem_object_reference(&work->old_fb_obj->base); |
5107 | drm_gem_object_reference(obj); | 5419 | drm_gem_object_reference(&obj->base); |
5108 | 5420 | ||
5109 | crtc->fb = fb; | 5421 | crtc->fb = fb; |
5110 | 5422 | ||
@@ -5112,22 +5424,16 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
5112 | if (ret) | 5424 | if (ret) |
5113 | goto cleanup_objs; | 5425 | goto cleanup_objs; |
5114 | 5426 | ||
5115 | /* Block clients from rendering to the new back buffer until | ||
5116 | * the flip occurs and the object is no longer visible. | ||
5117 | */ | ||
5118 | atomic_add(1 << intel_crtc->plane, | ||
5119 | &to_intel_bo(work->old_fb_obj)->pending_flip); | ||
5120 | |||
5121 | work->pending_flip_obj = obj; | ||
5122 | obj_priv = to_intel_bo(obj); | ||
5123 | |||
5124 | if (IS_GEN3(dev) || IS_GEN2(dev)) { | 5427 | if (IS_GEN3(dev) || IS_GEN2(dev)) { |
5125 | u32 flip_mask; | 5428 | u32 flip_mask; |
5126 | 5429 | ||
5127 | /* Can't queue multiple flips, so wait for the previous | 5430 | /* Can't queue multiple flips, so wait for the previous |
5128 | * one to finish before executing the next. | 5431 | * one to finish before executing the next. |
5129 | */ | 5432 | */ |
5130 | BEGIN_LP_RING(2); | 5433 | ret = BEGIN_LP_RING(2); |
5434 | if (ret) | ||
5435 | goto cleanup_objs; | ||
5436 | |||
5131 | if (intel_crtc->plane) | 5437 | if (intel_crtc->plane) |
5132 | flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; | 5438 | flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; |
5133 | else | 5439 | else |
@@ -5137,18 +5443,28 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
5137 | ADVANCE_LP_RING(); | 5443 | ADVANCE_LP_RING(); |
5138 | } | 5444 | } |
5139 | 5445 | ||
5446 | work->pending_flip_obj = obj; | ||
5447 | |||
5140 | work->enable_stall_check = true; | 5448 | work->enable_stall_check = true; |
5141 | 5449 | ||
5142 | /* Offset into the new buffer for cases of shared fbs between CRTCs */ | 5450 | /* Offset into the new buffer for cases of shared fbs between CRTCs */ |
5143 | offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8; | 5451 | offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8; |
5144 | 5452 | ||
5145 | BEGIN_LP_RING(4); | 5453 | ret = BEGIN_LP_RING(4); |
5146 | switch(INTEL_INFO(dev)->gen) { | 5454 | if (ret) |
5455 | goto cleanup_objs; | ||
5456 | |||
5457 | /* Block clients from rendering to the new back buffer until | ||
5458 | * the flip occurs and the object is no longer visible. | ||
5459 | */ | ||
5460 | atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); | ||
5461 | |||
5462 | switch (INTEL_INFO(dev)->gen) { | ||
5147 | case 2: | 5463 | case 2: |
5148 | OUT_RING(MI_DISPLAY_FLIP | | 5464 | OUT_RING(MI_DISPLAY_FLIP | |
5149 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | 5465 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); |
5150 | OUT_RING(fb->pitch); | 5466 | OUT_RING(fb->pitch); |
5151 | OUT_RING(obj_priv->gtt_offset + offset); | 5467 | OUT_RING(obj->gtt_offset + offset); |
5152 | OUT_RING(MI_NOOP); | 5468 | OUT_RING(MI_NOOP); |
5153 | break; | 5469 | break; |
5154 | 5470 | ||
@@ -5156,7 +5472,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
5156 | OUT_RING(MI_DISPLAY_FLIP_I915 | | 5472 | OUT_RING(MI_DISPLAY_FLIP_I915 | |
5157 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | 5473 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); |
5158 | OUT_RING(fb->pitch); | 5474 | OUT_RING(fb->pitch); |
5159 | OUT_RING(obj_priv->gtt_offset + offset); | 5475 | OUT_RING(obj->gtt_offset + offset); |
5160 | OUT_RING(MI_NOOP); | 5476 | OUT_RING(MI_NOOP); |
5161 | break; | 5477 | break; |
5162 | 5478 | ||
@@ -5169,7 +5485,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
5169 | OUT_RING(MI_DISPLAY_FLIP | | 5485 | OUT_RING(MI_DISPLAY_FLIP | |
5170 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | 5486 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); |
5171 | OUT_RING(fb->pitch); | 5487 | OUT_RING(fb->pitch); |
5172 | OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode); | 5488 | OUT_RING(obj->gtt_offset | obj->tiling_mode); |
5173 | 5489 | ||
5174 | /* XXX Enabling the panel-fitter across page-flip is so far | 5490 | /* XXX Enabling the panel-fitter across page-flip is so far |
5175 | * untested on non-native modes, so ignore it for now. | 5491 | * untested on non-native modes, so ignore it for now. |
@@ -5183,8 +5499,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
5183 | case 6: | 5499 | case 6: |
5184 | OUT_RING(MI_DISPLAY_FLIP | | 5500 | OUT_RING(MI_DISPLAY_FLIP | |
5185 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | 5501 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); |
5186 | OUT_RING(fb->pitch | obj_priv->tiling_mode); | 5502 | OUT_RING(fb->pitch | obj->tiling_mode); |
5187 | OUT_RING(obj_priv->gtt_offset); | 5503 | OUT_RING(obj->gtt_offset); |
5188 | 5504 | ||
5189 | pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; | 5505 | pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; |
5190 | pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff; | 5506 | pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff; |
@@ -5200,8 +5516,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
5200 | return 0; | 5516 | return 0; |
5201 | 5517 | ||
5202 | cleanup_objs: | 5518 | cleanup_objs: |
5203 | drm_gem_object_unreference(work->old_fb_obj); | 5519 | drm_gem_object_unreference(&work->old_fb_obj->base); |
5204 | drm_gem_object_unreference(obj); | 5520 | drm_gem_object_unreference(&obj->base); |
5205 | cleanup_work: | 5521 | cleanup_work: |
5206 | mutex_unlock(&dev->struct_mutex); | 5522 | mutex_unlock(&dev->struct_mutex); |
5207 | 5523 | ||
@@ -5338,7 +5654,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) | |||
5338 | } | 5654 | } |
5339 | 5655 | ||
5340 | int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, | 5656 | int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, |
5341 | struct drm_file *file_priv) | 5657 | struct drm_file *file) |
5342 | { | 5658 | { |
5343 | drm_i915_private_t *dev_priv = dev->dev_private; | 5659 | drm_i915_private_t *dev_priv = dev->dev_private; |
5344 | struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; | 5660 | struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; |
@@ -5498,6 +5814,8 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
5498 | encoder->base.possible_clones = | 5814 | encoder->base.possible_clones = |
5499 | intel_encoder_clones(dev, encoder->clone_mask); | 5815 | intel_encoder_clones(dev, encoder->clone_mask); |
5500 | } | 5816 | } |
5817 | |||
5818 | intel_panel_setup_backlight(dev); | ||
5501 | } | 5819 | } |
5502 | 5820 | ||
5503 | static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) | 5821 | static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) |
@@ -5505,19 +5823,19 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) | |||
5505 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | 5823 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); |
5506 | 5824 | ||
5507 | drm_framebuffer_cleanup(fb); | 5825 | drm_framebuffer_cleanup(fb); |
5508 | drm_gem_object_unreference_unlocked(intel_fb->obj); | 5826 | drm_gem_object_unreference_unlocked(&intel_fb->obj->base); |
5509 | 5827 | ||
5510 | kfree(intel_fb); | 5828 | kfree(intel_fb); |
5511 | } | 5829 | } |
5512 | 5830 | ||
5513 | static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, | 5831 | static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, |
5514 | struct drm_file *file_priv, | 5832 | struct drm_file *file, |
5515 | unsigned int *handle) | 5833 | unsigned int *handle) |
5516 | { | 5834 | { |
5517 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | 5835 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); |
5518 | struct drm_gem_object *object = intel_fb->obj; | 5836 | struct drm_i915_gem_object *obj = intel_fb->obj; |
5519 | 5837 | ||
5520 | return drm_gem_handle_create(file_priv, object, handle); | 5838 | return drm_gem_handle_create(file, &obj->base, handle); |
5521 | } | 5839 | } |
5522 | 5840 | ||
5523 | static const struct drm_framebuffer_funcs intel_fb_funcs = { | 5841 | static const struct drm_framebuffer_funcs intel_fb_funcs = { |
@@ -5528,12 +5846,11 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = { | |||
5528 | int intel_framebuffer_init(struct drm_device *dev, | 5846 | int intel_framebuffer_init(struct drm_device *dev, |
5529 | struct intel_framebuffer *intel_fb, | 5847 | struct intel_framebuffer *intel_fb, |
5530 | struct drm_mode_fb_cmd *mode_cmd, | 5848 | struct drm_mode_fb_cmd *mode_cmd, |
5531 | struct drm_gem_object *obj) | 5849 | struct drm_i915_gem_object *obj) |
5532 | { | 5850 | { |
5533 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
5534 | int ret; | 5851 | int ret; |
5535 | 5852 | ||
5536 | if (obj_priv->tiling_mode == I915_TILING_Y) | 5853 | if (obj->tiling_mode == I915_TILING_Y) |
5537 | return -EINVAL; | 5854 | return -EINVAL; |
5538 | 5855 | ||
5539 | if (mode_cmd->pitch & 63) | 5856 | if (mode_cmd->pitch & 63) |
@@ -5565,11 +5882,11 @@ intel_user_framebuffer_create(struct drm_device *dev, | |||
5565 | struct drm_file *filp, | 5882 | struct drm_file *filp, |
5566 | struct drm_mode_fb_cmd *mode_cmd) | 5883 | struct drm_mode_fb_cmd *mode_cmd) |
5567 | { | 5884 | { |
5568 | struct drm_gem_object *obj; | 5885 | struct drm_i915_gem_object *obj; |
5569 | struct intel_framebuffer *intel_fb; | 5886 | struct intel_framebuffer *intel_fb; |
5570 | int ret; | 5887 | int ret; |
5571 | 5888 | ||
5572 | obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle); | 5889 | obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle)); |
5573 | if (!obj) | 5890 | if (!obj) |
5574 | return ERR_PTR(-ENOENT); | 5891 | return ERR_PTR(-ENOENT); |
5575 | 5892 | ||
@@ -5577,10 +5894,9 @@ intel_user_framebuffer_create(struct drm_device *dev, | |||
5577 | if (!intel_fb) | 5894 | if (!intel_fb) |
5578 | return ERR_PTR(-ENOMEM); | 5895 | return ERR_PTR(-ENOMEM); |
5579 | 5896 | ||
5580 | ret = intel_framebuffer_init(dev, intel_fb, | 5897 | ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); |
5581 | mode_cmd, obj); | ||
5582 | if (ret) { | 5898 | if (ret) { |
5583 | drm_gem_object_unreference_unlocked(obj); | 5899 | drm_gem_object_unreference_unlocked(&obj->base); |
5584 | kfree(intel_fb); | 5900 | kfree(intel_fb); |
5585 | return ERR_PTR(ret); | 5901 | return ERR_PTR(ret); |
5586 | } | 5902 | } |
@@ -5593,10 +5909,10 @@ static const struct drm_mode_config_funcs intel_mode_funcs = { | |||
5593 | .output_poll_changed = intel_fb_output_poll_changed, | 5909 | .output_poll_changed = intel_fb_output_poll_changed, |
5594 | }; | 5910 | }; |
5595 | 5911 | ||
5596 | static struct drm_gem_object * | 5912 | static struct drm_i915_gem_object * |
5597 | intel_alloc_context_page(struct drm_device *dev) | 5913 | intel_alloc_context_page(struct drm_device *dev) |
5598 | { | 5914 | { |
5599 | struct drm_gem_object *ctx; | 5915 | struct drm_i915_gem_object *ctx; |
5600 | int ret; | 5916 | int ret; |
5601 | 5917 | ||
5602 | ctx = i915_gem_alloc_object(dev, 4096); | 5918 | ctx = i915_gem_alloc_object(dev, 4096); |
@@ -5606,7 +5922,7 @@ intel_alloc_context_page(struct drm_device *dev) | |||
5606 | } | 5922 | } |
5607 | 5923 | ||
5608 | mutex_lock(&dev->struct_mutex); | 5924 | mutex_lock(&dev->struct_mutex); |
5609 | ret = i915_gem_object_pin(ctx, 4096); | 5925 | ret = i915_gem_object_pin(ctx, 4096, true); |
5610 | if (ret) { | 5926 | if (ret) { |
5611 | DRM_ERROR("failed to pin power context: %d\n", ret); | 5927 | DRM_ERROR("failed to pin power context: %d\n", ret); |
5612 | goto err_unref; | 5928 | goto err_unref; |
@@ -5624,7 +5940,7 @@ intel_alloc_context_page(struct drm_device *dev) | |||
5624 | err_unpin: | 5940 | err_unpin: |
5625 | i915_gem_object_unpin(ctx); | 5941 | i915_gem_object_unpin(ctx); |
5626 | err_unref: | 5942 | err_unref: |
5627 | drm_gem_object_unreference(ctx); | 5943 | drm_gem_object_unreference(&ctx->base); |
5628 | mutex_unlock(&dev->struct_mutex); | 5944 | mutex_unlock(&dev->struct_mutex); |
5629 | return NULL; | 5945 | return NULL; |
5630 | } | 5946 | } |
@@ -5736,6 +6052,25 @@ void ironlake_disable_drps(struct drm_device *dev) | |||
5736 | 6052 | ||
5737 | } | 6053 | } |
5738 | 6054 | ||
6055 | void gen6_set_rps(struct drm_device *dev, u8 val) | ||
6056 | { | ||
6057 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
6058 | u32 swreq; | ||
6059 | |||
6060 | swreq = (val & 0x3ff) << 25; | ||
6061 | I915_WRITE(GEN6_RPNSWREQ, swreq); | ||
6062 | } | ||
6063 | |||
6064 | void gen6_disable_rps(struct drm_device *dev) | ||
6065 | { | ||
6066 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
6067 | |||
6068 | I915_WRITE(GEN6_RPNSWREQ, 1 << 31); | ||
6069 | I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); | ||
6070 | I915_WRITE(GEN6_PMIER, 0); | ||
6071 | I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); | ||
6072 | } | ||
6073 | |||
5739 | static unsigned long intel_pxfreq(u32 vidfreq) | 6074 | static unsigned long intel_pxfreq(u32 vidfreq) |
5740 | { | 6075 | { |
5741 | unsigned long freq; | 6076 | unsigned long freq; |
@@ -5822,7 +6157,123 @@ void intel_init_emon(struct drm_device *dev) | |||
5822 | dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); | 6157 | dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); |
5823 | } | 6158 | } |
5824 | 6159 | ||
5825 | void intel_init_clock_gating(struct drm_device *dev) | 6160 | void gen6_enable_rps(struct drm_i915_private *dev_priv) |
6161 | { | ||
6162 | u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); | ||
6163 | u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); | ||
6164 | u32 pcu_mbox; | ||
6165 | int cur_freq, min_freq, max_freq; | ||
6166 | int i; | ||
6167 | |||
6168 | /* Here begins a magic sequence of register writes to enable | ||
6169 | * auto-downclocking. | ||
6170 | * | ||
6171 | * Perhaps there might be some value in exposing these to | ||
6172 | * userspace... | ||
6173 | */ | ||
6174 | I915_WRITE(GEN6_RC_STATE, 0); | ||
6175 | __gen6_force_wake_get(dev_priv); | ||
6176 | |||
6177 | /* disable the counters and set deterministic thresholds */ | ||
6178 | I915_WRITE(GEN6_RC_CONTROL, 0); | ||
6179 | |||
6180 | I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16); | ||
6181 | I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30); | ||
6182 | I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30); | ||
6183 | I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); | ||
6184 | I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); | ||
6185 | |||
6186 | for (i = 0; i < I915_NUM_RINGS; i++) | ||
6187 | I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10); | ||
6188 | |||
6189 | I915_WRITE(GEN6_RC_SLEEP, 0); | ||
6190 | I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); | ||
6191 | I915_WRITE(GEN6_RC6_THRESHOLD, 50000); | ||
6192 | I915_WRITE(GEN6_RC6p_THRESHOLD, 100000); | ||
6193 | I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ | ||
6194 | |||
6195 | I915_WRITE(GEN6_RC_CONTROL, | ||
6196 | GEN6_RC_CTL_RC6p_ENABLE | | ||
6197 | GEN6_RC_CTL_RC6_ENABLE | | ||
6198 | GEN6_RC_CTL_EI_MODE(1) | | ||
6199 | GEN6_RC_CTL_HW_ENABLE); | ||
6200 | |||
6201 | I915_WRITE(GEN6_RPNSWREQ, | ||
6202 | GEN6_FREQUENCY(10) | | ||
6203 | GEN6_OFFSET(0) | | ||
6204 | GEN6_AGGRESSIVE_TURBO); | ||
6205 | I915_WRITE(GEN6_RC_VIDEO_FREQ, | ||
6206 | GEN6_FREQUENCY(12)); | ||
6207 | |||
6208 | I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); | ||
6209 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, | ||
6210 | 18 << 24 | | ||
6211 | 6 << 16); | ||
6212 | I915_WRITE(GEN6_RP_UP_THRESHOLD, 90000); | ||
6213 | I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 100000); | ||
6214 | I915_WRITE(GEN6_RP_UP_EI, 100000); | ||
6215 | I915_WRITE(GEN6_RP_DOWN_EI, 300000); | ||
6216 | I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); | ||
6217 | I915_WRITE(GEN6_RP_CONTROL, | ||
6218 | GEN6_RP_MEDIA_TURBO | | ||
6219 | GEN6_RP_USE_NORMAL_FREQ | | ||
6220 | GEN6_RP_MEDIA_IS_GFX | | ||
6221 | GEN6_RP_ENABLE | | ||
6222 | GEN6_RP_UP_BUSY_MAX | | ||
6223 | GEN6_RP_DOWN_BUSY_MIN); | ||
6224 | |||
6225 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, | ||
6226 | 500)) | ||
6227 | DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); | ||
6228 | |||
6229 | I915_WRITE(GEN6_PCODE_DATA, 0); | ||
6230 | I915_WRITE(GEN6_PCODE_MAILBOX, | ||
6231 | GEN6_PCODE_READY | | ||
6232 | GEN6_PCODE_WRITE_MIN_FREQ_TABLE); | ||
6233 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, | ||
6234 | 500)) | ||
6235 | DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); | ||
6236 | |||
6237 | min_freq = (rp_state_cap & 0xff0000) >> 16; | ||
6238 | max_freq = rp_state_cap & 0xff; | ||
6239 | cur_freq = (gt_perf_status & 0xff00) >> 8; | ||
6240 | |||
6241 | /* Check for overclock support */ | ||
6242 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, | ||
6243 | 500)) | ||
6244 | DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); | ||
6245 | I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS); | ||
6246 | pcu_mbox = I915_READ(GEN6_PCODE_DATA); | ||
6247 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, | ||
6248 | 500)) | ||
6249 | DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); | ||
6250 | if (pcu_mbox & (1<<31)) { /* OC supported */ | ||
6251 | max_freq = pcu_mbox & 0xff; | ||
6252 | DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 100); | ||
6253 | } | ||
6254 | |||
6255 | /* In units of 100MHz */ | ||
6256 | dev_priv->max_delay = max_freq; | ||
6257 | dev_priv->min_delay = min_freq; | ||
6258 | dev_priv->cur_delay = cur_freq; | ||
6259 | |||
6260 | /* requires MSI enabled */ | ||
6261 | I915_WRITE(GEN6_PMIER, | ||
6262 | GEN6_PM_MBOX_EVENT | | ||
6263 | GEN6_PM_THERMAL_EVENT | | ||
6264 | GEN6_PM_RP_DOWN_TIMEOUT | | ||
6265 | GEN6_PM_RP_UP_THRESHOLD | | ||
6266 | GEN6_PM_RP_DOWN_THRESHOLD | | ||
6267 | GEN6_PM_RP_UP_EI_EXPIRED | | ||
6268 | GEN6_PM_RP_DOWN_EI_EXPIRED); | ||
6269 | I915_WRITE(GEN6_PMIMR, 0); | ||
6270 | /* enable all PM interrupts */ | ||
6271 | I915_WRITE(GEN6_PMINTRMSK, 0); | ||
6272 | |||
6273 | __gen6_force_wake_put(dev_priv); | ||
6274 | } | ||
6275 | |||
6276 | void intel_enable_clock_gating(struct drm_device *dev) | ||
5826 | { | 6277 | { |
5827 | struct drm_i915_private *dev_priv = dev->dev_private; | 6278 | struct drm_i915_private *dev_priv = dev->dev_private; |
5828 | 6279 | ||
@@ -5872,9 +6323,9 @@ void intel_init_clock_gating(struct drm_device *dev) | |||
5872 | I915_WRITE(DISP_ARB_CTL, | 6323 | I915_WRITE(DISP_ARB_CTL, |
5873 | (I915_READ(DISP_ARB_CTL) | | 6324 | (I915_READ(DISP_ARB_CTL) | |
5874 | DISP_FBC_WM_DIS)); | 6325 | DISP_FBC_WM_DIS)); |
5875 | I915_WRITE(WM3_LP_ILK, 0); | 6326 | I915_WRITE(WM3_LP_ILK, 0); |
5876 | I915_WRITE(WM2_LP_ILK, 0); | 6327 | I915_WRITE(WM2_LP_ILK, 0); |
5877 | I915_WRITE(WM1_LP_ILK, 0); | 6328 | I915_WRITE(WM1_LP_ILK, 0); |
5878 | } | 6329 | } |
5879 | /* | 6330 | /* |
5880 | * Based on the document from hardware guys the following bits | 6331 | * Based on the document from hardware guys the following bits |
@@ -5896,7 +6347,49 @@ void intel_init_clock_gating(struct drm_device *dev) | |||
5896 | ILK_DPFC_DIS2 | | 6347 | ILK_DPFC_DIS2 | |
5897 | ILK_CLK_FBC); | 6348 | ILK_CLK_FBC); |
5898 | } | 6349 | } |
5899 | return; | 6350 | |
6351 | I915_WRITE(ILK_DISPLAY_CHICKEN2, | ||
6352 | I915_READ(ILK_DISPLAY_CHICKEN2) | | ||
6353 | ILK_ELPIN_409_SELECT); | ||
6354 | |||
6355 | if (IS_GEN5(dev)) { | ||
6356 | I915_WRITE(_3D_CHICKEN2, | ||
6357 | _3D_CHICKEN2_WM_READ_PIPELINED << 16 | | ||
6358 | _3D_CHICKEN2_WM_READ_PIPELINED); | ||
6359 | } | ||
6360 | |||
6361 | if (IS_GEN6(dev)) { | ||
6362 | I915_WRITE(WM3_LP_ILK, 0); | ||
6363 | I915_WRITE(WM2_LP_ILK, 0); | ||
6364 | I915_WRITE(WM1_LP_ILK, 0); | ||
6365 | |||
6366 | /* | ||
6367 | * According to the spec the following bits should be | ||
6368 | * set in order to enable memory self-refresh and fbc: | ||
6369 | * The bit21 and bit22 of 0x42000 | ||
6370 | * The bit21 and bit22 of 0x42004 | ||
6371 | * The bit5 and bit7 of 0x42020 | ||
6372 | * The bit14 of 0x70180 | ||
6373 | * The bit14 of 0x71180 | ||
6374 | */ | ||
6375 | I915_WRITE(ILK_DISPLAY_CHICKEN1, | ||
6376 | I915_READ(ILK_DISPLAY_CHICKEN1) | | ||
6377 | ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS); | ||
6378 | I915_WRITE(ILK_DISPLAY_CHICKEN2, | ||
6379 | I915_READ(ILK_DISPLAY_CHICKEN2) | | ||
6380 | ILK_DPARB_GATE | ILK_VSDPFD_FULL); | ||
6381 | I915_WRITE(ILK_DSPCLK_GATE, | ||
6382 | I915_READ(ILK_DSPCLK_GATE) | | ||
6383 | ILK_DPARB_CLK_GATE | | ||
6384 | ILK_DPFD_CLK_GATE); | ||
6385 | |||
6386 | I915_WRITE(DSPACNTR, | ||
6387 | I915_READ(DSPACNTR) | | ||
6388 | DISPPLANE_TRICKLE_FEED_DISABLE); | ||
6389 | I915_WRITE(DSPBCNTR, | ||
6390 | I915_READ(DSPBCNTR) | | ||
6391 | DISPPLANE_TRICKLE_FEED_DISABLE); | ||
6392 | } | ||
5900 | } else if (IS_G4X(dev)) { | 6393 | } else if (IS_G4X(dev)) { |
5901 | uint32_t dspclk_gate; | 6394 | uint32_t dspclk_gate; |
5902 | I915_WRITE(RENCLK_GATE_D1, 0); | 6395 | I915_WRITE(RENCLK_GATE_D1, 0); |
@@ -5934,55 +6427,84 @@ void intel_init_clock_gating(struct drm_device *dev) | |||
5934 | } else if (IS_I830(dev)) { | 6427 | } else if (IS_I830(dev)) { |
5935 | I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); | 6428 | I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); |
5936 | } | 6429 | } |
6430 | } | ||
5937 | 6431 | ||
5938 | /* | 6432 | void intel_disable_clock_gating(struct drm_device *dev) |
5939 | * GPU can automatically power down the render unit if given a page | 6433 | { |
5940 | * to save state. | 6434 | struct drm_i915_private *dev_priv = dev->dev_private; |
5941 | */ | 6435 | |
5942 | if (IS_IRONLAKE_M(dev)) { | 6436 | if (dev_priv->renderctx) { |
5943 | if (dev_priv->renderctx == NULL) | 6437 | struct drm_i915_gem_object *obj = dev_priv->renderctx; |
5944 | dev_priv->renderctx = intel_alloc_context_page(dev); | 6438 | |
5945 | if (dev_priv->renderctx) { | 6439 | I915_WRITE(CCID, 0); |
5946 | struct drm_i915_gem_object *obj_priv; | 6440 | POSTING_READ(CCID); |
5947 | obj_priv = to_intel_bo(dev_priv->renderctx); | 6441 | |
5948 | if (obj_priv) { | 6442 | i915_gem_object_unpin(obj); |
5949 | BEGIN_LP_RING(4); | 6443 | drm_gem_object_unreference(&obj->base); |
5950 | OUT_RING(MI_SET_CONTEXT); | 6444 | dev_priv->renderctx = NULL; |
5951 | OUT_RING(obj_priv->gtt_offset | | ||
5952 | MI_MM_SPACE_GTT | | ||
5953 | MI_SAVE_EXT_STATE_EN | | ||
5954 | MI_RESTORE_EXT_STATE_EN | | ||
5955 | MI_RESTORE_INHIBIT); | ||
5956 | OUT_RING(MI_NOOP); | ||
5957 | OUT_RING(MI_FLUSH); | ||
5958 | ADVANCE_LP_RING(); | ||
5959 | } | ||
5960 | } else | ||
5961 | DRM_DEBUG_KMS("Failed to allocate render context." | ||
5962 | "Disable RC6\n"); | ||
5963 | } | 6445 | } |
5964 | 6446 | ||
5965 | if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) { | 6447 | if (dev_priv->pwrctx) { |
5966 | struct drm_i915_gem_object *obj_priv = NULL; | 6448 | struct drm_i915_gem_object *obj = dev_priv->pwrctx; |
5967 | 6449 | ||
5968 | if (dev_priv->pwrctx) { | 6450 | I915_WRITE(PWRCTXA, 0); |
5969 | obj_priv = to_intel_bo(dev_priv->pwrctx); | 6451 | POSTING_READ(PWRCTXA); |
5970 | } else { | ||
5971 | struct drm_gem_object *pwrctx; | ||
5972 | 6452 | ||
5973 | pwrctx = intel_alloc_context_page(dev); | 6453 | i915_gem_object_unpin(obj); |
5974 | if (pwrctx) { | 6454 | drm_gem_object_unreference(&obj->base); |
5975 | dev_priv->pwrctx = pwrctx; | 6455 | dev_priv->pwrctx = NULL; |
5976 | obj_priv = to_intel_bo(pwrctx); | 6456 | } |
5977 | } | 6457 | } |
5978 | } | ||
5979 | 6458 | ||
5980 | if (obj_priv) { | 6459 | static void ironlake_disable_rc6(struct drm_device *dev) |
5981 | I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN); | 6460 | { |
5982 | I915_WRITE(MCHBAR_RENDER_STANDBY, | 6461 | struct drm_i915_private *dev_priv = dev->dev_private; |
5983 | I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT); | 6462 | |
5984 | } | 6463 | /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */ |
6464 | I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT); | ||
6465 | wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON), | ||
6466 | 10); | ||
6467 | POSTING_READ(CCID); | ||
6468 | I915_WRITE(PWRCTXA, 0); | ||
6469 | POSTING_READ(PWRCTXA); | ||
6470 | I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); | ||
6471 | POSTING_READ(RSTDBYCTL); | ||
6472 | i915_gem_object_unpin(dev_priv->renderctx); | ||
6473 | drm_gem_object_unreference(&dev_priv->renderctx->base); | ||
6474 | dev_priv->renderctx = NULL; | ||
6475 | i915_gem_object_unpin(dev_priv->pwrctx); | ||
6476 | drm_gem_object_unreference(&dev_priv->pwrctx->base); | ||
6477 | dev_priv->pwrctx = NULL; | ||
6478 | } | ||
6479 | |||
6480 | void ironlake_enable_rc6(struct drm_device *dev) | ||
6481 | { | ||
6482 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
6483 | int ret; | ||
6484 | |||
6485 | /* | ||
6486 | * GPU can automatically power down the render unit if given a page | ||
6487 | * to save state. | ||
6488 | */ | ||
6489 | ret = BEGIN_LP_RING(6); | ||
6490 | if (ret) { | ||
6491 | ironlake_disable_rc6(dev); | ||
6492 | return; | ||
5985 | } | 6493 | } |
6494 | OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); | ||
6495 | OUT_RING(MI_SET_CONTEXT); | ||
6496 | OUT_RING(dev_priv->renderctx->gtt_offset | | ||
6497 | MI_MM_SPACE_GTT | | ||
6498 | MI_SAVE_EXT_STATE_EN | | ||
6499 | MI_RESTORE_EXT_STATE_EN | | ||
6500 | MI_RESTORE_INHIBIT); | ||
6501 | OUT_RING(MI_SUSPEND_FLUSH); | ||
6502 | OUT_RING(MI_NOOP); | ||
6503 | OUT_RING(MI_FLUSH); | ||
6504 | ADVANCE_LP_RING(); | ||
6505 | |||
6506 | I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN); | ||
6507 | I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); | ||
5986 | } | 6508 | } |
5987 | 6509 | ||
5988 | /* Set up chip specific display functions */ | 6510 | /* Set up chip specific display functions */ |
@@ -5997,7 +6519,7 @@ static void intel_init_display(struct drm_device *dev) | |||
5997 | dev_priv->display.dpms = i9xx_crtc_dpms; | 6519 | dev_priv->display.dpms = i9xx_crtc_dpms; |
5998 | 6520 | ||
5999 | if (I915_HAS_FBC(dev)) { | 6521 | if (I915_HAS_FBC(dev)) { |
6000 | if (IS_IRONLAKE_M(dev)) { | 6522 | if (HAS_PCH_SPLIT(dev)) { |
6001 | dev_priv->display.fbc_enabled = ironlake_fbc_enabled; | 6523 | dev_priv->display.fbc_enabled = ironlake_fbc_enabled; |
6002 | dev_priv->display.enable_fbc = ironlake_enable_fbc; | 6524 | dev_priv->display.enable_fbc = ironlake_enable_fbc; |
6003 | dev_priv->display.disable_fbc = ironlake_disable_fbc; | 6525 | dev_priv->display.disable_fbc = ironlake_disable_fbc; |
@@ -6046,6 +6568,14 @@ static void intel_init_display(struct drm_device *dev) | |||
6046 | "Disable CxSR\n"); | 6568 | "Disable CxSR\n"); |
6047 | dev_priv->display.update_wm = NULL; | 6569 | dev_priv->display.update_wm = NULL; |
6048 | } | 6570 | } |
6571 | } else if (IS_GEN6(dev)) { | ||
6572 | if (SNB_READ_WM0_LATENCY()) { | ||
6573 | dev_priv->display.update_wm = sandybridge_update_wm; | ||
6574 | } else { | ||
6575 | DRM_DEBUG_KMS("Failed to read display plane latency. " | ||
6576 | "Disable CxSR\n"); | ||
6577 | dev_priv->display.update_wm = NULL; | ||
6578 | } | ||
6049 | } else | 6579 | } else |
6050 | dev_priv->display.update_wm = NULL; | 6580 | dev_priv->display.update_wm = NULL; |
6051 | } else if (IS_PINEVIEW(dev)) { | 6581 | } else if (IS_PINEVIEW(dev)) { |
@@ -6191,12 +6721,7 @@ void intel_modeset_init(struct drm_device *dev) | |||
6191 | dev->mode_config.max_width = 8192; | 6721 | dev->mode_config.max_width = 8192; |
6192 | dev->mode_config.max_height = 8192; | 6722 | dev->mode_config.max_height = 8192; |
6193 | } | 6723 | } |
6194 | 6724 | dev->mode_config.fb_base = dev->agp->base; | |
6195 | /* set memory base */ | ||
6196 | if (IS_GEN2(dev)) | ||
6197 | dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0); | ||
6198 | else | ||
6199 | dev->mode_config.fb_base = pci_resource_start(dev->pdev, 2); | ||
6200 | 6725 | ||
6201 | if (IS_MOBILE(dev) || !IS_GEN2(dev)) | 6726 | if (IS_MOBILE(dev) || !IS_GEN2(dev)) |
6202 | dev_priv->num_pipe = 2; | 6727 | dev_priv->num_pipe = 2; |
@@ -6211,7 +6736,7 @@ void intel_modeset_init(struct drm_device *dev) | |||
6211 | 6736 | ||
6212 | intel_setup_outputs(dev); | 6737 | intel_setup_outputs(dev); |
6213 | 6738 | ||
6214 | intel_init_clock_gating(dev); | 6739 | intel_enable_clock_gating(dev); |
6215 | 6740 | ||
6216 | /* Just disable it once at startup */ | 6741 | /* Just disable it once at startup */ |
6217 | i915_disable_vga(dev); | 6742 | i915_disable_vga(dev); |
@@ -6221,6 +6746,24 @@ void intel_modeset_init(struct drm_device *dev) | |||
6221 | intel_init_emon(dev); | 6746 | intel_init_emon(dev); |
6222 | } | 6747 | } |
6223 | 6748 | ||
6749 | if (IS_GEN6(dev)) | ||
6750 | gen6_enable_rps(dev_priv); | ||
6751 | |||
6752 | if (IS_IRONLAKE_M(dev)) { | ||
6753 | dev_priv->renderctx = intel_alloc_context_page(dev); | ||
6754 | if (!dev_priv->renderctx) | ||
6755 | goto skip_rc6; | ||
6756 | dev_priv->pwrctx = intel_alloc_context_page(dev); | ||
6757 | if (!dev_priv->pwrctx) { | ||
6758 | i915_gem_object_unpin(dev_priv->renderctx); | ||
6759 | drm_gem_object_unreference(&dev_priv->renderctx->base); | ||
6760 | dev_priv->renderctx = NULL; | ||
6761 | goto skip_rc6; | ||
6762 | } | ||
6763 | ironlake_enable_rc6(dev); | ||
6764 | } | ||
6765 | |||
6766 | skip_rc6: | ||
6224 | INIT_WORK(&dev_priv->idle_work, intel_idle_update); | 6767 | INIT_WORK(&dev_priv->idle_work, intel_idle_update); |
6225 | setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, | 6768 | setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, |
6226 | (unsigned long)dev); | 6769 | (unsigned long)dev); |
@@ -6252,28 +6795,13 @@ void intel_modeset_cleanup(struct drm_device *dev) | |||
6252 | if (dev_priv->display.disable_fbc) | 6795 | if (dev_priv->display.disable_fbc) |
6253 | dev_priv->display.disable_fbc(dev); | 6796 | dev_priv->display.disable_fbc(dev); |
6254 | 6797 | ||
6255 | if (dev_priv->renderctx) { | ||
6256 | struct drm_i915_gem_object *obj_priv; | ||
6257 | |||
6258 | obj_priv = to_intel_bo(dev_priv->renderctx); | ||
6259 | I915_WRITE(CCID, obj_priv->gtt_offset &~ CCID_EN); | ||
6260 | I915_READ(CCID); | ||
6261 | i915_gem_object_unpin(dev_priv->renderctx); | ||
6262 | drm_gem_object_unreference(dev_priv->renderctx); | ||
6263 | } | ||
6264 | |||
6265 | if (dev_priv->pwrctx) { | ||
6266 | struct drm_i915_gem_object *obj_priv; | ||
6267 | |||
6268 | obj_priv = to_intel_bo(dev_priv->pwrctx); | ||
6269 | I915_WRITE(PWRCTXA, obj_priv->gtt_offset &~ PWRCTX_EN); | ||
6270 | I915_READ(PWRCTXA); | ||
6271 | i915_gem_object_unpin(dev_priv->pwrctx); | ||
6272 | drm_gem_object_unreference(dev_priv->pwrctx); | ||
6273 | } | ||
6274 | |||
6275 | if (IS_IRONLAKE_M(dev)) | 6798 | if (IS_IRONLAKE_M(dev)) |
6276 | ironlake_disable_drps(dev); | 6799 | ironlake_disable_drps(dev); |
6800 | if (IS_GEN6(dev)) | ||
6801 | gen6_disable_rps(dev); | ||
6802 | |||
6803 | if (IS_IRONLAKE_M(dev)) | ||
6804 | ironlake_disable_rc6(dev); | ||
6277 | 6805 | ||
6278 | mutex_unlock(&dev->struct_mutex); | 6806 | mutex_unlock(&dev->struct_mutex); |
6279 | 6807 | ||
@@ -6325,3 +6853,113 @@ int intel_modeset_vga_set_state(struct drm_device *dev, bool state) | |||
6325 | pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl); | 6853 | pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl); |
6326 | return 0; | 6854 | return 0; |
6327 | } | 6855 | } |
6856 | |||
6857 | #ifdef CONFIG_DEBUG_FS | ||
6858 | #include <linux/seq_file.h> | ||
6859 | |||
6860 | struct intel_display_error_state { | ||
6861 | struct intel_cursor_error_state { | ||
6862 | u32 control; | ||
6863 | u32 position; | ||
6864 | u32 base; | ||
6865 | u32 size; | ||
6866 | } cursor[2]; | ||
6867 | |||
6868 | struct intel_pipe_error_state { | ||
6869 | u32 conf; | ||
6870 | u32 source; | ||
6871 | |||
6872 | u32 htotal; | ||
6873 | u32 hblank; | ||
6874 | u32 hsync; | ||
6875 | u32 vtotal; | ||
6876 | u32 vblank; | ||
6877 | u32 vsync; | ||
6878 | } pipe[2]; | ||
6879 | |||
6880 | struct intel_plane_error_state { | ||
6881 | u32 control; | ||
6882 | u32 stride; | ||
6883 | u32 size; | ||
6884 | u32 pos; | ||
6885 | u32 addr; | ||
6886 | u32 surface; | ||
6887 | u32 tile_offset; | ||
6888 | } plane[2]; | ||
6889 | }; | ||
6890 | |||
6891 | struct intel_display_error_state * | ||
6892 | intel_display_capture_error_state(struct drm_device *dev) | ||
6893 | { | ||
6894 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
6895 | struct intel_display_error_state *error; | ||
6896 | int i; | ||
6897 | |||
6898 | error = kmalloc(sizeof(*error), GFP_ATOMIC); | ||
6899 | if (error == NULL) | ||
6900 | return NULL; | ||
6901 | |||
6902 | for (i = 0; i < 2; i++) { | ||
6903 | error->cursor[i].control = I915_READ(CURCNTR(i)); | ||
6904 | error->cursor[i].position = I915_READ(CURPOS(i)); | ||
6905 | error->cursor[i].base = I915_READ(CURBASE(i)); | ||
6906 | |||
6907 | error->plane[i].control = I915_READ(DSPCNTR(i)); | ||
6908 | error->plane[i].stride = I915_READ(DSPSTRIDE(i)); | ||
6909 | error->plane[i].size = I915_READ(DSPSIZE(i)); | ||
6910 | error->plane[i].pos= I915_READ(DSPPOS(i)); | ||
6911 | error->plane[i].addr = I915_READ(DSPADDR(i)); | ||
6912 | if (INTEL_INFO(dev)->gen >= 4) { | ||
6913 | error->plane[i].surface = I915_READ(DSPSURF(i)); | ||
6914 | error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); | ||
6915 | } | ||
6916 | |||
6917 | error->pipe[i].conf = I915_READ(PIPECONF(i)); | ||
6918 | error->pipe[i].source = I915_READ(PIPESRC(i)); | ||
6919 | error->pipe[i].htotal = I915_READ(HTOTAL(i)); | ||
6920 | error->pipe[i].hblank = I915_READ(HBLANK(i)); | ||
6921 | error->pipe[i].hsync = I915_READ(HSYNC(i)); | ||
6922 | error->pipe[i].vtotal = I915_READ(VTOTAL(i)); | ||
6923 | error->pipe[i].vblank = I915_READ(VBLANK(i)); | ||
6924 | error->pipe[i].vsync = I915_READ(VSYNC(i)); | ||
6925 | } | ||
6926 | |||
6927 | return error; | ||
6928 | } | ||
6929 | |||
6930 | void | ||
6931 | intel_display_print_error_state(struct seq_file *m, | ||
6932 | struct drm_device *dev, | ||
6933 | struct intel_display_error_state *error) | ||
6934 | { | ||
6935 | int i; | ||
6936 | |||
6937 | for (i = 0; i < 2; i++) { | ||
6938 | seq_printf(m, "Pipe [%d]:\n", i); | ||
6939 | seq_printf(m, " CONF: %08x\n", error->pipe[i].conf); | ||
6940 | seq_printf(m, " SRC: %08x\n", error->pipe[i].source); | ||
6941 | seq_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal); | ||
6942 | seq_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank); | ||
6943 | seq_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync); | ||
6944 | seq_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal); | ||
6945 | seq_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank); | ||
6946 | seq_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync); | ||
6947 | |||
6948 | seq_printf(m, "Plane [%d]:\n", i); | ||
6949 | seq_printf(m, " CNTR: %08x\n", error->plane[i].control); | ||
6950 | seq_printf(m, " STRIDE: %08x\n", error->plane[i].stride); | ||
6951 | seq_printf(m, " SIZE: %08x\n", error->plane[i].size); | ||
6952 | seq_printf(m, " POS: %08x\n", error->plane[i].pos); | ||
6953 | seq_printf(m, " ADDR: %08x\n", error->plane[i].addr); | ||
6954 | if (INTEL_INFO(dev)->gen >= 4) { | ||
6955 | seq_printf(m, " SURF: %08x\n", error->plane[i].surface); | ||
6956 | seq_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); | ||
6957 | } | ||
6958 | |||
6959 | seq_printf(m, "Cursor [%d]:\n", i); | ||
6960 | seq_printf(m, " CNTR: %08x\n", error->cursor[i].control); | ||
6961 | seq_printf(m, " POS: %08x\n", error->cursor[i].position); | ||
6962 | seq_printf(m, " BASE: %08x\n", error->cursor[i].base); | ||
6963 | } | ||
6964 | } | ||
6965 | #endif | ||