diff options
Diffstat (limited to 'drivers/gpu/drm/i915/intel_pm.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_pm.c | 897 |
1 files changed, 679 insertions, 218 deletions
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index f1233f544f3e..c3bb925b2e65 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -93,8 +93,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc) | |||
93 | struct drm_device *dev = crtc->dev; | 93 | struct drm_device *dev = crtc->dev; |
94 | struct drm_i915_private *dev_priv = dev->dev_private; | 94 | struct drm_i915_private *dev_priv = dev->dev_private; |
95 | struct drm_framebuffer *fb = crtc->primary->fb; | 95 | struct drm_framebuffer *fb = crtc->primary->fb; |
96 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | 96 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); |
97 | struct drm_i915_gem_object *obj = intel_fb->obj; | ||
98 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 97 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
99 | int cfb_pitch; | 98 | int cfb_pitch; |
100 | int i; | 99 | int i; |
@@ -150,8 +149,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc) | |||
150 | struct drm_device *dev = crtc->dev; | 149 | struct drm_device *dev = crtc->dev; |
151 | struct drm_i915_private *dev_priv = dev->dev_private; | 150 | struct drm_i915_private *dev_priv = dev->dev_private; |
152 | struct drm_framebuffer *fb = crtc->primary->fb; | 151 | struct drm_framebuffer *fb = crtc->primary->fb; |
153 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | 152 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); |
154 | struct drm_i915_gem_object *obj = intel_fb->obj; | ||
155 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 153 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
156 | u32 dpfc_ctl; | 154 | u32 dpfc_ctl; |
157 | 155 | ||
@@ -222,16 +220,26 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc) | |||
222 | struct drm_device *dev = crtc->dev; | 220 | struct drm_device *dev = crtc->dev; |
223 | struct drm_i915_private *dev_priv = dev->dev_private; | 221 | struct drm_i915_private *dev_priv = dev->dev_private; |
224 | struct drm_framebuffer *fb = crtc->primary->fb; | 222 | struct drm_framebuffer *fb = crtc->primary->fb; |
225 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | 223 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); |
226 | struct drm_i915_gem_object *obj = intel_fb->obj; | ||
227 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 224 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
228 | u32 dpfc_ctl; | 225 | u32 dpfc_ctl; |
229 | 226 | ||
230 | dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane); | 227 | dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane); |
231 | if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) | 228 | if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) |
229 | dev_priv->fbc.threshold++; | ||
230 | |||
231 | switch (dev_priv->fbc.threshold) { | ||
232 | case 4: | ||
233 | case 3: | ||
234 | dpfc_ctl |= DPFC_CTL_LIMIT_4X; | ||
235 | break; | ||
236 | case 2: | ||
232 | dpfc_ctl |= DPFC_CTL_LIMIT_2X; | 237 | dpfc_ctl |= DPFC_CTL_LIMIT_2X; |
233 | else | 238 | break; |
239 | case 1: | ||
234 | dpfc_ctl |= DPFC_CTL_LIMIT_1X; | 240 | dpfc_ctl |= DPFC_CTL_LIMIT_1X; |
241 | break; | ||
242 | } | ||
235 | dpfc_ctl |= DPFC_CTL_FENCE_EN; | 243 | dpfc_ctl |= DPFC_CTL_FENCE_EN; |
236 | if (IS_GEN5(dev)) | 244 | if (IS_GEN5(dev)) |
237 | dpfc_ctl |= obj->fence_reg; | 245 | dpfc_ctl |= obj->fence_reg; |
@@ -278,16 +286,27 @@ static void gen7_enable_fbc(struct drm_crtc *crtc) | |||
278 | struct drm_device *dev = crtc->dev; | 286 | struct drm_device *dev = crtc->dev; |
279 | struct drm_i915_private *dev_priv = dev->dev_private; | 287 | struct drm_i915_private *dev_priv = dev->dev_private; |
280 | struct drm_framebuffer *fb = crtc->primary->fb; | 288 | struct drm_framebuffer *fb = crtc->primary->fb; |
281 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | 289 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); |
282 | struct drm_i915_gem_object *obj = intel_fb->obj; | ||
283 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 290 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
284 | u32 dpfc_ctl; | 291 | u32 dpfc_ctl; |
285 | 292 | ||
286 | dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane); | 293 | dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane); |
287 | if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) | 294 | if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) |
295 | dev_priv->fbc.threshold++; | ||
296 | |||
297 | switch (dev_priv->fbc.threshold) { | ||
298 | case 4: | ||
299 | case 3: | ||
300 | dpfc_ctl |= DPFC_CTL_LIMIT_4X; | ||
301 | break; | ||
302 | case 2: | ||
288 | dpfc_ctl |= DPFC_CTL_LIMIT_2X; | 303 | dpfc_ctl |= DPFC_CTL_LIMIT_2X; |
289 | else | 304 | break; |
305 | case 1: | ||
290 | dpfc_ctl |= DPFC_CTL_LIMIT_1X; | 306 | dpfc_ctl |= DPFC_CTL_LIMIT_1X; |
307 | break; | ||
308 | } | ||
309 | |||
291 | dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; | 310 | dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; |
292 | 311 | ||
293 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); | 312 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); |
@@ -462,7 +481,6 @@ void intel_update_fbc(struct drm_device *dev) | |||
462 | struct drm_crtc *crtc = NULL, *tmp_crtc; | 481 | struct drm_crtc *crtc = NULL, *tmp_crtc; |
463 | struct intel_crtc *intel_crtc; | 482 | struct intel_crtc *intel_crtc; |
464 | struct drm_framebuffer *fb; | 483 | struct drm_framebuffer *fb; |
465 | struct intel_framebuffer *intel_fb; | ||
466 | struct drm_i915_gem_object *obj; | 484 | struct drm_i915_gem_object *obj; |
467 | const struct drm_display_mode *adjusted_mode; | 485 | const struct drm_display_mode *adjusted_mode; |
468 | unsigned int max_width, max_height; | 486 | unsigned int max_width, max_height; |
@@ -507,8 +525,7 @@ void intel_update_fbc(struct drm_device *dev) | |||
507 | 525 | ||
508 | intel_crtc = to_intel_crtc(crtc); | 526 | intel_crtc = to_intel_crtc(crtc); |
509 | fb = crtc->primary->fb; | 527 | fb = crtc->primary->fb; |
510 | intel_fb = to_intel_framebuffer(fb); | 528 | obj = intel_fb_obj(fb); |
511 | obj = intel_fb->obj; | ||
512 | adjusted_mode = &intel_crtc->config.adjusted_mode; | 529 | adjusted_mode = &intel_crtc->config.adjusted_mode; |
513 | 530 | ||
514 | if (i915.enable_fbc < 0) { | 531 | if (i915.enable_fbc < 0) { |
@@ -529,7 +546,10 @@ void intel_update_fbc(struct drm_device *dev) | |||
529 | goto out_disable; | 546 | goto out_disable; |
530 | } | 547 | } |
531 | 548 | ||
532 | if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { | 549 | if (INTEL_INFO(dev)->gen >= 8 || IS_HASWELL(dev)) { |
550 | max_width = 4096; | ||
551 | max_height = 4096; | ||
552 | } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { | ||
533 | max_width = 4096; | 553 | max_width = 4096; |
534 | max_height = 2048; | 554 | max_height = 2048; |
535 | } else { | 555 | } else { |
@@ -563,7 +583,8 @@ void intel_update_fbc(struct drm_device *dev) | |||
563 | if (in_dbg_master()) | 583 | if (in_dbg_master()) |
564 | goto out_disable; | 584 | goto out_disable; |
565 | 585 | ||
566 | if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) { | 586 | if (i915_gem_stolen_setup_compression(dev, obj->base.size, |
587 | drm_format_plane_cpp(fb->pixel_format, 0))) { | ||
567 | if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL)) | 588 | if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL)) |
568 | DRM_DEBUG_KMS("framebuffer too large, disabling compression\n"); | 589 | DRM_DEBUG_KMS("framebuffer too large, disabling compression\n"); |
569 | goto out_disable; | 590 | goto out_disable; |
@@ -789,12 +810,33 @@ static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, | |||
789 | return NULL; | 810 | return NULL; |
790 | } | 811 | } |
791 | 812 | ||
792 | static void pineview_disable_cxsr(struct drm_device *dev) | 813 | void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) |
793 | { | 814 | { |
794 | struct drm_i915_private *dev_priv = dev->dev_private; | 815 | struct drm_device *dev = dev_priv->dev; |
816 | u32 val; | ||
817 | |||
818 | if (IS_VALLEYVIEW(dev)) { | ||
819 | I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0); | ||
820 | } else if (IS_G4X(dev) || IS_CRESTLINE(dev)) { | ||
821 | I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0); | ||
822 | } else if (IS_PINEVIEW(dev)) { | ||
823 | val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN; | ||
824 | val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0; | ||
825 | I915_WRITE(DSPFW3, val); | ||
826 | } else if (IS_I945G(dev) || IS_I945GM(dev)) { | ||
827 | val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) : | ||
828 | _MASKED_BIT_DISABLE(FW_BLC_SELF_EN); | ||
829 | I915_WRITE(FW_BLC_SELF, val); | ||
830 | } else if (IS_I915GM(dev)) { | ||
831 | val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) : | ||
832 | _MASKED_BIT_DISABLE(INSTPM_SELF_EN); | ||
833 | I915_WRITE(INSTPM, val); | ||
834 | } else { | ||
835 | return; | ||
836 | } | ||
795 | 837 | ||
796 | /* deactivate cxsr */ | 838 | DRM_DEBUG_KMS("memory self-refresh is %s\n", |
797 | I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN); | 839 | enable ? "enabled" : "disabled"); |
798 | } | 840 | } |
799 | 841 | ||
800 | /* | 842 | /* |
@@ -864,95 +906,95 @@ static int i845_get_fifo_size(struct drm_device *dev, int plane) | |||
864 | 906 | ||
865 | /* Pineview has different values for various configs */ | 907 | /* Pineview has different values for various configs */ |
866 | static const struct intel_watermark_params pineview_display_wm = { | 908 | static const struct intel_watermark_params pineview_display_wm = { |
867 | PINEVIEW_DISPLAY_FIFO, | 909 | .fifo_size = PINEVIEW_DISPLAY_FIFO, |
868 | PINEVIEW_MAX_WM, | 910 | .max_wm = PINEVIEW_MAX_WM, |
869 | PINEVIEW_DFT_WM, | 911 | .default_wm = PINEVIEW_DFT_WM, |
870 | PINEVIEW_GUARD_WM, | 912 | .guard_size = PINEVIEW_GUARD_WM, |
871 | PINEVIEW_FIFO_LINE_SIZE | 913 | .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, |
872 | }; | 914 | }; |
873 | static const struct intel_watermark_params pineview_display_hplloff_wm = { | 915 | static const struct intel_watermark_params pineview_display_hplloff_wm = { |
874 | PINEVIEW_DISPLAY_FIFO, | 916 | .fifo_size = PINEVIEW_DISPLAY_FIFO, |
875 | PINEVIEW_MAX_WM, | 917 | .max_wm = PINEVIEW_MAX_WM, |
876 | PINEVIEW_DFT_HPLLOFF_WM, | 918 | .default_wm = PINEVIEW_DFT_HPLLOFF_WM, |
877 | PINEVIEW_GUARD_WM, | 919 | .guard_size = PINEVIEW_GUARD_WM, |
878 | PINEVIEW_FIFO_LINE_SIZE | 920 | .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, |
879 | }; | 921 | }; |
880 | static const struct intel_watermark_params pineview_cursor_wm = { | 922 | static const struct intel_watermark_params pineview_cursor_wm = { |
881 | PINEVIEW_CURSOR_FIFO, | 923 | .fifo_size = PINEVIEW_CURSOR_FIFO, |
882 | PINEVIEW_CURSOR_MAX_WM, | 924 | .max_wm = PINEVIEW_CURSOR_MAX_WM, |
883 | PINEVIEW_CURSOR_DFT_WM, | 925 | .default_wm = PINEVIEW_CURSOR_DFT_WM, |
884 | PINEVIEW_CURSOR_GUARD_WM, | 926 | .guard_size = PINEVIEW_CURSOR_GUARD_WM, |
885 | PINEVIEW_FIFO_LINE_SIZE, | 927 | .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, |
886 | }; | 928 | }; |
887 | static const struct intel_watermark_params pineview_cursor_hplloff_wm = { | 929 | static const struct intel_watermark_params pineview_cursor_hplloff_wm = { |
888 | PINEVIEW_CURSOR_FIFO, | 930 | .fifo_size = PINEVIEW_CURSOR_FIFO, |
889 | PINEVIEW_CURSOR_MAX_WM, | 931 | .max_wm = PINEVIEW_CURSOR_MAX_WM, |
890 | PINEVIEW_CURSOR_DFT_WM, | 932 | .default_wm = PINEVIEW_CURSOR_DFT_WM, |
891 | PINEVIEW_CURSOR_GUARD_WM, | 933 | .guard_size = PINEVIEW_CURSOR_GUARD_WM, |
892 | PINEVIEW_FIFO_LINE_SIZE | 934 | .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, |
893 | }; | 935 | }; |
894 | static const struct intel_watermark_params g4x_wm_info = { | 936 | static const struct intel_watermark_params g4x_wm_info = { |
895 | G4X_FIFO_SIZE, | 937 | .fifo_size = G4X_FIFO_SIZE, |
896 | G4X_MAX_WM, | 938 | .max_wm = G4X_MAX_WM, |
897 | G4X_MAX_WM, | 939 | .default_wm = G4X_MAX_WM, |
898 | 2, | 940 | .guard_size = 2, |
899 | G4X_FIFO_LINE_SIZE, | 941 | .cacheline_size = G4X_FIFO_LINE_SIZE, |
900 | }; | 942 | }; |
901 | static const struct intel_watermark_params g4x_cursor_wm_info = { | 943 | static const struct intel_watermark_params g4x_cursor_wm_info = { |
902 | I965_CURSOR_FIFO, | 944 | .fifo_size = I965_CURSOR_FIFO, |
903 | I965_CURSOR_MAX_WM, | 945 | .max_wm = I965_CURSOR_MAX_WM, |
904 | I965_CURSOR_DFT_WM, | 946 | .default_wm = I965_CURSOR_DFT_WM, |
905 | 2, | 947 | .guard_size = 2, |
906 | G4X_FIFO_LINE_SIZE, | 948 | .cacheline_size = G4X_FIFO_LINE_SIZE, |
907 | }; | 949 | }; |
908 | static const struct intel_watermark_params valleyview_wm_info = { | 950 | static const struct intel_watermark_params valleyview_wm_info = { |
909 | VALLEYVIEW_FIFO_SIZE, | 951 | .fifo_size = VALLEYVIEW_FIFO_SIZE, |
910 | VALLEYVIEW_MAX_WM, | 952 | .max_wm = VALLEYVIEW_MAX_WM, |
911 | VALLEYVIEW_MAX_WM, | 953 | .default_wm = VALLEYVIEW_MAX_WM, |
912 | 2, | 954 | .guard_size = 2, |
913 | G4X_FIFO_LINE_SIZE, | 955 | .cacheline_size = G4X_FIFO_LINE_SIZE, |
914 | }; | 956 | }; |
915 | static const struct intel_watermark_params valleyview_cursor_wm_info = { | 957 | static const struct intel_watermark_params valleyview_cursor_wm_info = { |
916 | I965_CURSOR_FIFO, | 958 | .fifo_size = I965_CURSOR_FIFO, |
917 | VALLEYVIEW_CURSOR_MAX_WM, | 959 | .max_wm = VALLEYVIEW_CURSOR_MAX_WM, |
918 | I965_CURSOR_DFT_WM, | 960 | .default_wm = I965_CURSOR_DFT_WM, |
919 | 2, | 961 | .guard_size = 2, |
920 | G4X_FIFO_LINE_SIZE, | 962 | .cacheline_size = G4X_FIFO_LINE_SIZE, |
921 | }; | 963 | }; |
922 | static const struct intel_watermark_params i965_cursor_wm_info = { | 964 | static const struct intel_watermark_params i965_cursor_wm_info = { |
923 | I965_CURSOR_FIFO, | 965 | .fifo_size = I965_CURSOR_FIFO, |
924 | I965_CURSOR_MAX_WM, | 966 | .max_wm = I965_CURSOR_MAX_WM, |
925 | I965_CURSOR_DFT_WM, | 967 | .default_wm = I965_CURSOR_DFT_WM, |
926 | 2, | 968 | .guard_size = 2, |
927 | I915_FIFO_LINE_SIZE, | 969 | .cacheline_size = I915_FIFO_LINE_SIZE, |
928 | }; | 970 | }; |
929 | static const struct intel_watermark_params i945_wm_info = { | 971 | static const struct intel_watermark_params i945_wm_info = { |
930 | I945_FIFO_SIZE, | 972 | .fifo_size = I945_FIFO_SIZE, |
931 | I915_MAX_WM, | 973 | .max_wm = I915_MAX_WM, |
932 | 1, | 974 | .default_wm = 1, |
933 | 2, | 975 | .guard_size = 2, |
934 | I915_FIFO_LINE_SIZE | 976 | .cacheline_size = I915_FIFO_LINE_SIZE, |
935 | }; | 977 | }; |
936 | static const struct intel_watermark_params i915_wm_info = { | 978 | static const struct intel_watermark_params i915_wm_info = { |
937 | I915_FIFO_SIZE, | 979 | .fifo_size = I915_FIFO_SIZE, |
938 | I915_MAX_WM, | 980 | .max_wm = I915_MAX_WM, |
939 | 1, | 981 | .default_wm = 1, |
940 | 2, | 982 | .guard_size = 2, |
941 | I915_FIFO_LINE_SIZE | 983 | .cacheline_size = I915_FIFO_LINE_SIZE, |
942 | }; | 984 | }; |
943 | static const struct intel_watermark_params i830_wm_info = { | 985 | static const struct intel_watermark_params i830_wm_info = { |
944 | I855GM_FIFO_SIZE, | 986 | .fifo_size = I855GM_FIFO_SIZE, |
945 | I915_MAX_WM, | 987 | .max_wm = I915_MAX_WM, |
946 | 1, | 988 | .default_wm = 1, |
947 | 2, | 989 | .guard_size = 2, |
948 | I830_FIFO_LINE_SIZE | 990 | .cacheline_size = I830_FIFO_LINE_SIZE, |
949 | }; | 991 | }; |
950 | static const struct intel_watermark_params i845_wm_info = { | 992 | static const struct intel_watermark_params i845_wm_info = { |
951 | I830_FIFO_SIZE, | 993 | .fifo_size = I830_FIFO_SIZE, |
952 | I915_MAX_WM, | 994 | .max_wm = I915_MAX_WM, |
953 | 1, | 995 | .default_wm = 1, |
954 | 2, | 996 | .guard_size = 2, |
955 | I830_FIFO_LINE_SIZE | 997 | .cacheline_size = I830_FIFO_LINE_SIZE, |
956 | }; | 998 | }; |
957 | 999 | ||
958 | /** | 1000 | /** |
@@ -1033,7 +1075,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc) | |||
1033 | dev_priv->fsb_freq, dev_priv->mem_freq); | 1075 | dev_priv->fsb_freq, dev_priv->mem_freq); |
1034 | if (!latency) { | 1076 | if (!latency) { |
1035 | DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); | 1077 | DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); |
1036 | pineview_disable_cxsr(dev); | 1078 | intel_set_memory_cxsr(dev_priv, false); |
1037 | return; | 1079 | return; |
1038 | } | 1080 | } |
1039 | 1081 | ||
@@ -1084,13 +1126,9 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc) | |||
1084 | I915_WRITE(DSPFW3, reg); | 1126 | I915_WRITE(DSPFW3, reg); |
1085 | DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg); | 1127 | DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg); |
1086 | 1128 | ||
1087 | /* activate cxsr */ | 1129 | intel_set_memory_cxsr(dev_priv, true); |
1088 | I915_WRITE(DSPFW3, | ||
1089 | I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN); | ||
1090 | DRM_DEBUG_KMS("Self-refresh is enabled\n"); | ||
1091 | } else { | 1130 | } else { |
1092 | pineview_disable_cxsr(dev); | 1131 | intel_set_memory_cxsr(dev_priv, false); |
1093 | DRM_DEBUG_KMS("Self-refresh is disabled\n"); | ||
1094 | } | 1132 | } |
1095 | } | 1133 | } |
1096 | 1134 | ||
@@ -1316,6 +1354,7 @@ static void valleyview_update_wm(struct drm_crtc *crtc) | |||
1316 | int plane_sr, cursor_sr; | 1354 | int plane_sr, cursor_sr; |
1317 | int ignore_plane_sr, ignore_cursor_sr; | 1355 | int ignore_plane_sr, ignore_cursor_sr; |
1318 | unsigned int enabled = 0; | 1356 | unsigned int enabled = 0; |
1357 | bool cxsr_enabled; | ||
1319 | 1358 | ||
1320 | vlv_update_drain_latency(dev); | 1359 | vlv_update_drain_latency(dev); |
1321 | 1360 | ||
@@ -1342,10 +1381,10 @@ static void valleyview_update_wm(struct drm_crtc *crtc) | |||
1342 | &valleyview_wm_info, | 1381 | &valleyview_wm_info, |
1343 | &valleyview_cursor_wm_info, | 1382 | &valleyview_cursor_wm_info, |
1344 | &ignore_plane_sr, &cursor_sr)) { | 1383 | &ignore_plane_sr, &cursor_sr)) { |
1345 | I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN); | 1384 | cxsr_enabled = true; |
1346 | } else { | 1385 | } else { |
1347 | I915_WRITE(FW_BLC_SELF_VLV, | 1386 | cxsr_enabled = false; |
1348 | I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN); | 1387 | intel_set_memory_cxsr(dev_priv, false); |
1349 | plane_sr = cursor_sr = 0; | 1388 | plane_sr = cursor_sr = 0; |
1350 | } | 1389 | } |
1351 | 1390 | ||
@@ -1365,6 +1404,9 @@ static void valleyview_update_wm(struct drm_crtc *crtc) | |||
1365 | I915_WRITE(DSPFW3, | 1404 | I915_WRITE(DSPFW3, |
1366 | (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) | | 1405 | (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) | |
1367 | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); | 1406 | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); |
1407 | |||
1408 | if (cxsr_enabled) | ||
1409 | intel_set_memory_cxsr(dev_priv, true); | ||
1368 | } | 1410 | } |
1369 | 1411 | ||
1370 | static void g4x_update_wm(struct drm_crtc *crtc) | 1412 | static void g4x_update_wm(struct drm_crtc *crtc) |
@@ -1375,6 +1417,7 @@ static void g4x_update_wm(struct drm_crtc *crtc) | |||
1375 | int planea_wm, planeb_wm, cursora_wm, cursorb_wm; | 1417 | int planea_wm, planeb_wm, cursora_wm, cursorb_wm; |
1376 | int plane_sr, cursor_sr; | 1418 | int plane_sr, cursor_sr; |
1377 | unsigned int enabled = 0; | 1419 | unsigned int enabled = 0; |
1420 | bool cxsr_enabled; | ||
1378 | 1421 | ||
1379 | if (g4x_compute_wm0(dev, PIPE_A, | 1422 | if (g4x_compute_wm0(dev, PIPE_A, |
1380 | &g4x_wm_info, latency_ns, | 1423 | &g4x_wm_info, latency_ns, |
@@ -1394,10 +1437,10 @@ static void g4x_update_wm(struct drm_crtc *crtc) | |||
1394 | &g4x_wm_info, | 1437 | &g4x_wm_info, |
1395 | &g4x_cursor_wm_info, | 1438 | &g4x_cursor_wm_info, |
1396 | &plane_sr, &cursor_sr)) { | 1439 | &plane_sr, &cursor_sr)) { |
1397 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); | 1440 | cxsr_enabled = true; |
1398 | } else { | 1441 | } else { |
1399 | I915_WRITE(FW_BLC_SELF, | 1442 | cxsr_enabled = false; |
1400 | I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN); | 1443 | intel_set_memory_cxsr(dev_priv, false); |
1401 | plane_sr = cursor_sr = 0; | 1444 | plane_sr = cursor_sr = 0; |
1402 | } | 1445 | } |
1403 | 1446 | ||
@@ -1418,6 +1461,9 @@ static void g4x_update_wm(struct drm_crtc *crtc) | |||
1418 | I915_WRITE(DSPFW3, | 1461 | I915_WRITE(DSPFW3, |
1419 | (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) | | 1462 | (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) | |
1420 | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); | 1463 | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); |
1464 | |||
1465 | if (cxsr_enabled) | ||
1466 | intel_set_memory_cxsr(dev_priv, true); | ||
1421 | } | 1467 | } |
1422 | 1468 | ||
1423 | static void i965_update_wm(struct drm_crtc *unused_crtc) | 1469 | static void i965_update_wm(struct drm_crtc *unused_crtc) |
@@ -1427,6 +1473,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc) | |||
1427 | struct drm_crtc *crtc; | 1473 | struct drm_crtc *crtc; |
1428 | int srwm = 1; | 1474 | int srwm = 1; |
1429 | int cursor_sr = 16; | 1475 | int cursor_sr = 16; |
1476 | bool cxsr_enabled; | ||
1430 | 1477 | ||
1431 | /* Calc sr entries for one plane configs */ | 1478 | /* Calc sr entries for one plane configs */ |
1432 | crtc = single_enabled_crtc(dev); | 1479 | crtc = single_enabled_crtc(dev); |
@@ -1468,13 +1515,11 @@ static void i965_update_wm(struct drm_crtc *unused_crtc) | |||
1468 | DRM_DEBUG_KMS("self-refresh watermark: display plane %d " | 1515 | DRM_DEBUG_KMS("self-refresh watermark: display plane %d " |
1469 | "cursor %d\n", srwm, cursor_sr); | 1516 | "cursor %d\n", srwm, cursor_sr); |
1470 | 1517 | ||
1471 | if (IS_CRESTLINE(dev)) | 1518 | cxsr_enabled = true; |
1472 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); | ||
1473 | } else { | 1519 | } else { |
1520 | cxsr_enabled = false; | ||
1474 | /* Turn off self refresh if both pipes are enabled */ | 1521 | /* Turn off self refresh if both pipes are enabled */ |
1475 | if (IS_CRESTLINE(dev)) | 1522 | intel_set_memory_cxsr(dev_priv, false); |
1476 | I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) | ||
1477 | & ~FW_BLC_SELF_EN); | ||
1478 | } | 1523 | } |
1479 | 1524 | ||
1480 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", | 1525 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", |
@@ -1486,6 +1531,9 @@ static void i965_update_wm(struct drm_crtc *unused_crtc) | |||
1486 | I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); | 1531 | I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); |
1487 | /* update cursor SR watermark */ | 1532 | /* update cursor SR watermark */ |
1488 | I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); | 1533 | I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); |
1534 | |||
1535 | if (cxsr_enabled) | ||
1536 | intel_set_memory_cxsr(dev_priv, true); | ||
1489 | } | 1537 | } |
1490 | 1538 | ||
1491 | static void i9xx_update_wm(struct drm_crtc *unused_crtc) | 1539 | static void i9xx_update_wm(struct drm_crtc *unused_crtc) |
@@ -1545,12 +1593,12 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc) | |||
1545 | DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); | 1593 | DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); |
1546 | 1594 | ||
1547 | if (IS_I915GM(dev) && enabled) { | 1595 | if (IS_I915GM(dev) && enabled) { |
1548 | struct intel_framebuffer *fb; | 1596 | struct drm_i915_gem_object *obj; |
1549 | 1597 | ||
1550 | fb = to_intel_framebuffer(enabled->primary->fb); | 1598 | obj = intel_fb_obj(enabled->primary->fb); |
1551 | 1599 | ||
1552 | /* self-refresh seems busted with untiled */ | 1600 | /* self-refresh seems busted with untiled */ |
1553 | if (fb->obj->tiling_mode == I915_TILING_NONE) | 1601 | if (obj->tiling_mode == I915_TILING_NONE) |
1554 | enabled = NULL; | 1602 | enabled = NULL; |
1555 | } | 1603 | } |
1556 | 1604 | ||
@@ -1560,10 +1608,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc) | |||
1560 | cwm = 2; | 1608 | cwm = 2; |
1561 | 1609 | ||
1562 | /* Play safe and disable self-refresh before adjusting watermarks. */ | 1610 | /* Play safe and disable self-refresh before adjusting watermarks. */ |
1563 | if (IS_I945G(dev) || IS_I945GM(dev)) | 1611 | intel_set_memory_cxsr(dev_priv, false); |
1564 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0); | ||
1565 | else if (IS_I915GM(dev)) | ||
1566 | I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_SELF_EN)); | ||
1567 | 1612 | ||
1568 | /* Calc sr entries for one plane configs */ | 1613 | /* Calc sr entries for one plane configs */ |
1569 | if (HAS_FW_BLC(dev) && enabled) { | 1614 | if (HAS_FW_BLC(dev) && enabled) { |
@@ -1609,17 +1654,8 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc) | |||
1609 | I915_WRITE(FW_BLC, fwater_lo); | 1654 | I915_WRITE(FW_BLC, fwater_lo); |
1610 | I915_WRITE(FW_BLC2, fwater_hi); | 1655 | I915_WRITE(FW_BLC2, fwater_hi); |
1611 | 1656 | ||
1612 | if (HAS_FW_BLC(dev)) { | 1657 | if (enabled) |
1613 | if (enabled) { | 1658 | intel_set_memory_cxsr(dev_priv, true); |
1614 | if (IS_I945G(dev) || IS_I945GM(dev)) | ||
1615 | I915_WRITE(FW_BLC_SELF, | ||
1616 | FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN); | ||
1617 | else if (IS_I915GM(dev)) | ||
1618 | I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_SELF_EN)); | ||
1619 | DRM_DEBUG_KMS("memory self refresh enabled\n"); | ||
1620 | } else | ||
1621 | DRM_DEBUG_KMS("memory self refresh disabled\n"); | ||
1622 | } | ||
1623 | } | 1659 | } |
1624 | 1660 | ||
1625 | static void i845_update_wm(struct drm_crtc *unused_crtc) | 1661 | static void i845_update_wm(struct drm_crtc *unused_crtc) |
@@ -2707,10 +2743,11 @@ static void ilk_update_wm(struct drm_crtc *crtc) | |||
2707 | ilk_write_wm_values(dev_priv, &results); | 2743 | ilk_write_wm_values(dev_priv, &results); |
2708 | } | 2744 | } |
2709 | 2745 | ||
2710 | static void ilk_update_sprite_wm(struct drm_plane *plane, | 2746 | static void |
2711 | struct drm_crtc *crtc, | 2747 | ilk_update_sprite_wm(struct drm_plane *plane, |
2712 | uint32_t sprite_width, int pixel_size, | 2748 | struct drm_crtc *crtc, |
2713 | bool enabled, bool scaled) | 2749 | uint32_t sprite_width, uint32_t sprite_height, |
2750 | int pixel_size, bool enabled, bool scaled) | ||
2714 | { | 2751 | { |
2715 | struct drm_device *dev = plane->dev; | 2752 | struct drm_device *dev = plane->dev; |
2716 | struct intel_plane *intel_plane = to_intel_plane(plane); | 2753 | struct intel_plane *intel_plane = to_intel_plane(plane); |
@@ -2718,6 +2755,7 @@ static void ilk_update_sprite_wm(struct drm_plane *plane, | |||
2718 | intel_plane->wm.enabled = enabled; | 2755 | intel_plane->wm.enabled = enabled; |
2719 | intel_plane->wm.scaled = scaled; | 2756 | intel_plane->wm.scaled = scaled; |
2720 | intel_plane->wm.horiz_pixels = sprite_width; | 2757 | intel_plane->wm.horiz_pixels = sprite_width; |
2758 | intel_plane->wm.vert_pixels = sprite_width; | ||
2721 | intel_plane->wm.bytes_per_pixel = pixel_size; | 2759 | intel_plane->wm.bytes_per_pixel = pixel_size; |
2722 | 2760 | ||
2723 | /* | 2761 | /* |
@@ -2852,13 +2890,16 @@ void intel_update_watermarks(struct drm_crtc *crtc) | |||
2852 | 2890 | ||
2853 | void intel_update_sprite_watermarks(struct drm_plane *plane, | 2891 | void intel_update_sprite_watermarks(struct drm_plane *plane, |
2854 | struct drm_crtc *crtc, | 2892 | struct drm_crtc *crtc, |
2855 | uint32_t sprite_width, int pixel_size, | 2893 | uint32_t sprite_width, |
2894 | uint32_t sprite_height, | ||
2895 | int pixel_size, | ||
2856 | bool enabled, bool scaled) | 2896 | bool enabled, bool scaled) |
2857 | { | 2897 | { |
2858 | struct drm_i915_private *dev_priv = plane->dev->dev_private; | 2898 | struct drm_i915_private *dev_priv = plane->dev->dev_private; |
2859 | 2899 | ||
2860 | if (dev_priv->display.update_sprite_wm) | 2900 | if (dev_priv->display.update_sprite_wm) |
2861 | dev_priv->display.update_sprite_wm(plane, crtc, sprite_width, | 2901 | dev_priv->display.update_sprite_wm(plane, crtc, |
2902 | sprite_width, sprite_height, | ||
2862 | pixel_size, enabled, scaled); | 2903 | pixel_size, enabled, scaled); |
2863 | } | 2904 | } |
2864 | 2905 | ||
@@ -3147,6 +3188,9 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val) | |||
3147 | if (val < dev_priv->rps.max_freq_softlimit) | 3188 | if (val < dev_priv->rps.max_freq_softlimit) |
3148 | mask |= GEN6_PM_RP_UP_THRESHOLD; | 3189 | mask |= GEN6_PM_RP_UP_THRESHOLD; |
3149 | 3190 | ||
3191 | mask |= dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED); | ||
3192 | mask &= dev_priv->pm_rps_events; | ||
3193 | |||
3150 | /* IVB and SNB hard hangs on looping batchbuffer | 3194 | /* IVB and SNB hard hangs on looping batchbuffer |
3151 | * if GEN6_PM_UP_EI_EXPIRED is masked. | 3195 | * if GEN6_PM_UP_EI_EXPIRED is masked. |
3152 | */ | 3196 | */ |
@@ -3250,7 +3294,9 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv) | |||
3250 | 3294 | ||
3251 | mutex_lock(&dev_priv->rps.hw_lock); | 3295 | mutex_lock(&dev_priv->rps.hw_lock); |
3252 | if (dev_priv->rps.enabled) { | 3296 | if (dev_priv->rps.enabled) { |
3253 | if (IS_VALLEYVIEW(dev)) | 3297 | if (IS_CHERRYVIEW(dev)) |
3298 | valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit); | ||
3299 | else if (IS_VALLEYVIEW(dev)) | ||
3254 | vlv_set_rps_idle(dev_priv); | 3300 | vlv_set_rps_idle(dev_priv); |
3255 | else | 3301 | else |
3256 | gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit); | 3302 | gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit); |
@@ -3348,6 +3394,15 @@ static void gen6_disable_rps(struct drm_device *dev) | |||
3348 | gen6_disable_rps_interrupts(dev); | 3394 | gen6_disable_rps_interrupts(dev); |
3349 | } | 3395 | } |
3350 | 3396 | ||
3397 | static void cherryview_disable_rps(struct drm_device *dev) | ||
3398 | { | ||
3399 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3400 | |||
3401 | I915_WRITE(GEN6_RC_CONTROL, 0); | ||
3402 | |||
3403 | gen8_disable_rps_interrupts(dev); | ||
3404 | } | ||
3405 | |||
3351 | static void valleyview_disable_rps(struct drm_device *dev) | 3406 | static void valleyview_disable_rps(struct drm_device *dev) |
3352 | { | 3407 | { |
3353 | struct drm_i915_private *dev_priv = dev->dev_private; | 3408 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -3419,7 +3474,7 @@ static void gen8_enable_rps_interrupts(struct drm_device *dev) | |||
3419 | 3474 | ||
3420 | spin_lock_irq(&dev_priv->irq_lock); | 3475 | spin_lock_irq(&dev_priv->irq_lock); |
3421 | WARN_ON(dev_priv->rps.pm_iir); | 3476 | WARN_ON(dev_priv->rps.pm_iir); |
3422 | bdw_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); | 3477 | gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); |
3423 | I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events); | 3478 | I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events); |
3424 | spin_unlock_irq(&dev_priv->irq_lock); | 3479 | spin_unlock_irq(&dev_priv->irq_lock); |
3425 | } | 3480 | } |
@@ -3430,7 +3485,7 @@ static void gen6_enable_rps_interrupts(struct drm_device *dev) | |||
3430 | 3485 | ||
3431 | spin_lock_irq(&dev_priv->irq_lock); | 3486 | spin_lock_irq(&dev_priv->irq_lock); |
3432 | WARN_ON(dev_priv->rps.pm_iir); | 3487 | WARN_ON(dev_priv->rps.pm_iir); |
3433 | snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); | 3488 | gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); |
3434 | I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events); | 3489 | I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events); |
3435 | spin_unlock_irq(&dev_priv->irq_lock); | 3490 | spin_unlock_irq(&dev_priv->irq_lock); |
3436 | } | 3491 | } |
@@ -3483,15 +3538,23 @@ static void gen8_enable_rps(struct drm_device *dev) | |||
3483 | for_each_ring(ring, dev_priv, unused) | 3538 | for_each_ring(ring, dev_priv, unused) |
3484 | I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); | 3539 | I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); |
3485 | I915_WRITE(GEN6_RC_SLEEP, 0); | 3540 | I915_WRITE(GEN6_RC_SLEEP, 0); |
3486 | I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */ | 3541 | if (IS_BROADWELL(dev)) |
3542 | I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */ | ||
3543 | else | ||
3544 | I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */ | ||
3487 | 3545 | ||
3488 | /* 3: Enable RC6 */ | 3546 | /* 3: Enable RC6 */ |
3489 | if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) | 3547 | if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) |
3490 | rc6_mask = GEN6_RC_CTL_RC6_ENABLE; | 3548 | rc6_mask = GEN6_RC_CTL_RC6_ENABLE; |
3491 | intel_print_rc6_info(dev, rc6_mask); | 3549 | intel_print_rc6_info(dev, rc6_mask); |
3492 | I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | | 3550 | if (IS_BROADWELL(dev)) |
3493 | GEN6_RC_CTL_EI_MODE(1) | | 3551 | I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | |
3494 | rc6_mask); | 3552 | GEN7_RC_CTL_TO_MODE | |
3553 | rc6_mask); | ||
3554 | else | ||
3555 | I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | | ||
3556 | GEN6_RC_CTL_EI_MODE(1) | | ||
3557 | rc6_mask); | ||
3495 | 3558 | ||
3496 | /* 4 Program defaults and thresholds for RPS*/ | 3559 | /* 4 Program defaults and thresholds for RPS*/ |
3497 | I915_WRITE(GEN6_RPNSWREQ, | 3560 | I915_WRITE(GEN6_RPNSWREQ, |
@@ -3727,7 +3790,57 @@ void gen6_update_ring_freq(struct drm_device *dev) | |||
3727 | mutex_unlock(&dev_priv->rps.hw_lock); | 3790 | mutex_unlock(&dev_priv->rps.hw_lock); |
3728 | } | 3791 | } |
3729 | 3792 | ||
3730 | int valleyview_rps_max_freq(struct drm_i915_private *dev_priv) | 3793 | static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv) |
3794 | { | ||
3795 | u32 val, rp0; | ||
3796 | |||
3797 | val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG); | ||
3798 | rp0 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK; | ||
3799 | |||
3800 | return rp0; | ||
3801 | } | ||
3802 | |||
3803 | static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv) | ||
3804 | { | ||
3805 | u32 val, rpe; | ||
3806 | |||
3807 | val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG); | ||
3808 | rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK; | ||
3809 | |||
3810 | return rpe; | ||
3811 | } | ||
3812 | |||
3813 | static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv) | ||
3814 | { | ||
3815 | u32 val, rp1; | ||
3816 | |||
3817 | val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); | ||
3818 | rp1 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK; | ||
3819 | |||
3820 | return rp1; | ||
3821 | } | ||
3822 | |||
3823 | static int cherryview_rps_min_freq(struct drm_i915_private *dev_priv) | ||
3824 | { | ||
3825 | u32 val, rpn; | ||
3826 | |||
3827 | val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG); | ||
3828 | rpn = (val >> PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT) & PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK; | ||
3829 | return rpn; | ||
3830 | } | ||
3831 | |||
3832 | static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv) | ||
3833 | { | ||
3834 | u32 val, rp1; | ||
3835 | |||
3836 | val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE); | ||
3837 | |||
3838 | rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT; | ||
3839 | |||
3840 | return rp1; | ||
3841 | } | ||
3842 | |||
3843 | static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv) | ||
3731 | { | 3844 | { |
3732 | u32 val, rp0; | 3845 | u32 val, rp0; |
3733 | 3846 | ||
@@ -3752,7 +3865,7 @@ static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv) | |||
3752 | return rpe; | 3865 | return rpe; |
3753 | } | 3866 | } |
3754 | 3867 | ||
3755 | int valleyview_rps_min_freq(struct drm_i915_private *dev_priv) | 3868 | static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv) |
3756 | { | 3869 | { |
3757 | return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff; | 3870 | return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff; |
3758 | } | 3871 | } |
@@ -3766,6 +3879,35 @@ static void valleyview_check_pctx(struct drm_i915_private *dev_priv) | |||
3766 | dev_priv->vlv_pctx->stolen->start); | 3879 | dev_priv->vlv_pctx->stolen->start); |
3767 | } | 3880 | } |
3768 | 3881 | ||
3882 | |||
3883 | /* Check that the pcbr address is not empty. */ | ||
3884 | static void cherryview_check_pctx(struct drm_i915_private *dev_priv) | ||
3885 | { | ||
3886 | unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095; | ||
3887 | |||
3888 | WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0); | ||
3889 | } | ||
3890 | |||
3891 | static void cherryview_setup_pctx(struct drm_device *dev) | ||
3892 | { | ||
3893 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3894 | unsigned long pctx_paddr, paddr; | ||
3895 | struct i915_gtt *gtt = &dev_priv->gtt; | ||
3896 | u32 pcbr; | ||
3897 | int pctx_size = 32*1024; | ||
3898 | |||
3899 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | ||
3900 | |||
3901 | pcbr = I915_READ(VLV_PCBR); | ||
3902 | if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) { | ||
3903 | paddr = (dev_priv->mm.stolen_base + | ||
3904 | (gtt->stolen_size - pctx_size)); | ||
3905 | |||
3906 | pctx_paddr = (paddr & (~4095)); | ||
3907 | I915_WRITE(VLV_PCBR, pctx_paddr); | ||
3908 | } | ||
3909 | } | ||
3910 | |||
3769 | static void valleyview_setup_pctx(struct drm_device *dev) | 3911 | static void valleyview_setup_pctx(struct drm_device *dev) |
3770 | { | 3912 | { |
3771 | struct drm_i915_private *dev_priv = dev->dev_private; | 3913 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -3840,6 +3982,11 @@ static void valleyview_init_gt_powersave(struct drm_device *dev) | |||
3840 | vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), | 3982 | vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), |
3841 | dev_priv->rps.efficient_freq); | 3983 | dev_priv->rps.efficient_freq); |
3842 | 3984 | ||
3985 | dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv); | ||
3986 | DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n", | ||
3987 | vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq), | ||
3988 | dev_priv->rps.rp1_freq); | ||
3989 | |||
3843 | dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv); | 3990 | dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv); |
3844 | DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", | 3991 | DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", |
3845 | vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq), | 3992 | vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq), |
@@ -3855,11 +4002,142 @@ static void valleyview_init_gt_powersave(struct drm_device *dev) | |||
3855 | mutex_unlock(&dev_priv->rps.hw_lock); | 4002 | mutex_unlock(&dev_priv->rps.hw_lock); |
3856 | } | 4003 | } |
3857 | 4004 | ||
4005 | static void cherryview_init_gt_powersave(struct drm_device *dev) | ||
4006 | { | ||
4007 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
4008 | |||
4009 | cherryview_setup_pctx(dev); | ||
4010 | |||
4011 | mutex_lock(&dev_priv->rps.hw_lock); | ||
4012 | |||
4013 | dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv); | ||
4014 | dev_priv->rps.rp0_freq = dev_priv->rps.max_freq; | ||
4015 | DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", | ||
4016 | vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq), | ||
4017 | dev_priv->rps.max_freq); | ||
4018 | |||
4019 | dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv); | ||
4020 | DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n", | ||
4021 | vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), | ||
4022 | dev_priv->rps.efficient_freq); | ||
4023 | |||
4024 | dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv); | ||
4025 | DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n", | ||
4026 | vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq), | ||
4027 | dev_priv->rps.rp1_freq); | ||
4028 | |||
4029 | dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv); | ||
4030 | DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", | ||
4031 | vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq), | ||
4032 | dev_priv->rps.min_freq); | ||
4033 | |||
4034 | /* Preserve min/max settings in case of re-init */ | ||
4035 | if (dev_priv->rps.max_freq_softlimit == 0) | ||
4036 | dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; | ||
4037 | |||
4038 | if (dev_priv->rps.min_freq_softlimit == 0) | ||
4039 | dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq; | ||
4040 | |||
4041 | mutex_unlock(&dev_priv->rps.hw_lock); | ||
4042 | } | ||
4043 | |||
3858 | static void valleyview_cleanup_gt_powersave(struct drm_device *dev) | 4044 | static void valleyview_cleanup_gt_powersave(struct drm_device *dev) |
3859 | { | 4045 | { |
3860 | valleyview_cleanup_pctx(dev); | 4046 | valleyview_cleanup_pctx(dev); |
3861 | } | 4047 | } |
3862 | 4048 | ||
4049 | static void cherryview_enable_rps(struct drm_device *dev) | ||
4050 | { | ||
4051 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
4052 | struct intel_engine_cs *ring; | ||
4053 | u32 gtfifodbg, val, rc6_mode = 0, pcbr; | ||
4054 | int i; | ||
4055 | |||
4056 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | ||
4057 | |||
4058 | gtfifodbg = I915_READ(GTFIFODBG); | ||
4059 | if (gtfifodbg) { | ||
4060 | DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n", | ||
4061 | gtfifodbg); | ||
4062 | I915_WRITE(GTFIFODBG, gtfifodbg); | ||
4063 | } | ||
4064 | |||
4065 | cherryview_check_pctx(dev_priv); | ||
4066 | |||
4067 | /* 1a & 1b: Get forcewake during program sequence. Although the driver | ||
4068 | * hasn't enabled a state yet where we need forcewake, BIOS may have.*/ | ||
4069 | gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); | ||
4070 | |||
4071 | /* 2a: Program RC6 thresholds.*/ | ||
4072 | I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16); | ||
4073 | I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ | ||
4074 | I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ | ||
4075 | |||
4076 | for_each_ring(ring, dev_priv, i) | ||
4077 | I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); | ||
4078 | I915_WRITE(GEN6_RC_SLEEP, 0); | ||
4079 | |||
4080 | I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */ | ||
4081 | |||
4082 | /* allows RC6 residency counter to work */ | ||
4083 | I915_WRITE(VLV_COUNTER_CONTROL, | ||
4084 | _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH | | ||
4085 | VLV_MEDIA_RC6_COUNT_EN | | ||
4086 | VLV_RENDER_RC6_COUNT_EN)); | ||
4087 | |||
4088 | /* For now we assume BIOS is allocating and populating the PCBR */ | ||
4089 | pcbr = I915_READ(VLV_PCBR); | ||
4090 | |||
4091 | DRM_DEBUG_DRIVER("PCBR offset : 0x%x\n", pcbr); | ||
4092 | |||
4093 | /* 3: Enable RC6 */ | ||
4094 | if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) && | ||
4095 | (pcbr >> VLV_PCBR_ADDR_SHIFT)) | ||
4096 | rc6_mode = GEN6_RC_CTL_EI_MODE(1); | ||
4097 | |||
4098 | I915_WRITE(GEN6_RC_CONTROL, rc6_mode); | ||
4099 | |||
4100 | /* 4 Program defaults and thresholds for RPS*/ | ||
4101 | I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400); | ||
4102 | I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000); | ||
4103 | I915_WRITE(GEN6_RP_UP_EI, 66000); | ||
4104 | I915_WRITE(GEN6_RP_DOWN_EI, 350000); | ||
4105 | |||
4106 | I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); | ||
4107 | |||
4108 | /* WaDisablePwrmtrEvent:chv (pre-production hw) */ | ||
4109 | I915_WRITE(0xA80C, I915_READ(0xA80C) & 0x00ffffff); | ||
4110 | I915_WRITE(0xA810, I915_READ(0xA810) & 0xffffff00); | ||
4111 | |||
4112 | /* 5: Enable RPS */ | ||
4113 | I915_WRITE(GEN6_RP_CONTROL, | ||
4114 | GEN6_RP_MEDIA_HW_NORMAL_MODE | | ||
4115 | GEN6_RP_MEDIA_IS_GFX | /* WaSetMaskForGfxBusyness:chv (pre-production hw ?) */ | ||
4116 | GEN6_RP_ENABLE | | ||
4117 | GEN6_RP_UP_BUSY_AVG | | ||
4118 | GEN6_RP_DOWN_IDLE_AVG); | ||
4119 | |||
4120 | val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); | ||
4121 | |||
4122 | DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no"); | ||
4123 | DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); | ||
4124 | |||
4125 | dev_priv->rps.cur_freq = (val >> 8) & 0xff; | ||
4126 | DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n", | ||
4127 | vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq), | ||
4128 | dev_priv->rps.cur_freq); | ||
4129 | |||
4130 | DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n", | ||
4131 | vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), | ||
4132 | dev_priv->rps.efficient_freq); | ||
4133 | |||
4134 | valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq); | ||
4135 | |||
4136 | gen8_enable_rps_interrupts(dev); | ||
4137 | |||
4138 | gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); | ||
4139 | } | ||
4140 | |||
3863 | static void valleyview_enable_rps(struct drm_device *dev) | 4141 | static void valleyview_enable_rps(struct drm_device *dev) |
3864 | { | 4142 | { |
3865 | struct drm_i915_private *dev_priv = dev->dev_private; | 4143 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -3886,6 +4164,7 @@ static void valleyview_enable_rps(struct drm_device *dev) | |||
3886 | I915_WRITE(GEN6_RP_DOWN_EI, 350000); | 4164 | I915_WRITE(GEN6_RP_DOWN_EI, 350000); |
3887 | 4165 | ||
3888 | I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); | 4166 | I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); |
4167 | I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 0xf4240); | ||
3889 | 4168 | ||
3890 | I915_WRITE(GEN6_RP_CONTROL, | 4169 | I915_WRITE(GEN6_RP_CONTROL, |
3891 | GEN6_RP_MEDIA_TURBO | | 4170 | GEN6_RP_MEDIA_TURBO | |
@@ -3906,9 +4185,11 @@ static void valleyview_enable_rps(struct drm_device *dev) | |||
3906 | 4185 | ||
3907 | /* allows RC6 residency counter to work */ | 4186 | /* allows RC6 residency counter to work */ |
3908 | I915_WRITE(VLV_COUNTER_CONTROL, | 4187 | I915_WRITE(VLV_COUNTER_CONTROL, |
3909 | _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH | | 4188 | _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN | |
4189 | VLV_RENDER_RC0_COUNT_EN | | ||
3910 | VLV_MEDIA_RC6_COUNT_EN | | 4190 | VLV_MEDIA_RC6_COUNT_EN | |
3911 | VLV_RENDER_RC6_COUNT_EN)); | 4191 | VLV_RENDER_RC6_COUNT_EN)); |
4192 | |||
3912 | if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) | 4193 | if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) |
3913 | rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL; | 4194 | rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL; |
3914 | 4195 | ||
@@ -4666,33 +4947,60 @@ void intel_init_gt_powersave(struct drm_device *dev) | |||
4666 | { | 4947 | { |
4667 | i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6); | 4948 | i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6); |
4668 | 4949 | ||
4669 | if (IS_VALLEYVIEW(dev)) | 4950 | if (IS_CHERRYVIEW(dev)) |
4951 | cherryview_init_gt_powersave(dev); | ||
4952 | else if (IS_VALLEYVIEW(dev)) | ||
4670 | valleyview_init_gt_powersave(dev); | 4953 | valleyview_init_gt_powersave(dev); |
4671 | } | 4954 | } |
4672 | 4955 | ||
4673 | void intel_cleanup_gt_powersave(struct drm_device *dev) | 4956 | void intel_cleanup_gt_powersave(struct drm_device *dev) |
4674 | { | 4957 | { |
4675 | if (IS_VALLEYVIEW(dev)) | 4958 | if (IS_CHERRYVIEW(dev)) |
4959 | return; | ||
4960 | else if (IS_VALLEYVIEW(dev)) | ||
4676 | valleyview_cleanup_gt_powersave(dev); | 4961 | valleyview_cleanup_gt_powersave(dev); |
4677 | } | 4962 | } |
4678 | 4963 | ||
4964 | /** | ||
4965 | * intel_suspend_gt_powersave - suspend PM work and helper threads | ||
4966 | * @dev: drm device | ||
4967 | * | ||
4968 | * We don't want to disable RC6 or other features here, we just want | ||
4969 | * to make sure any work we've queued has finished and won't bother | ||
4970 | * us while we're suspended. | ||
4971 | */ | ||
4972 | void intel_suspend_gt_powersave(struct drm_device *dev) | ||
4973 | { | ||
4974 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
4975 | |||
4976 | /* Interrupts should be disabled already to avoid re-arming. */ | ||
4977 | WARN_ON(intel_irqs_enabled(dev_priv)); | ||
4978 | |||
4979 | flush_delayed_work(&dev_priv->rps.delayed_resume_work); | ||
4980 | |||
4981 | cancel_work_sync(&dev_priv->rps.work); | ||
4982 | |||
4983 | /* Force GPU to min freq during suspend */ | ||
4984 | gen6_rps_idle(dev_priv); | ||
4985 | } | ||
4986 | |||
4679 | void intel_disable_gt_powersave(struct drm_device *dev) | 4987 | void intel_disable_gt_powersave(struct drm_device *dev) |
4680 | { | 4988 | { |
4681 | struct drm_i915_private *dev_priv = dev->dev_private; | 4989 | struct drm_i915_private *dev_priv = dev->dev_private; |
4682 | 4990 | ||
4683 | /* Interrupts should be disabled already to avoid re-arming. */ | 4991 | /* Interrupts should be disabled already to avoid re-arming. */ |
4684 | WARN_ON(dev->irq_enabled); | 4992 | WARN_ON(intel_irqs_enabled(dev_priv)); |
4685 | 4993 | ||
4686 | if (IS_IRONLAKE_M(dev)) { | 4994 | if (IS_IRONLAKE_M(dev)) { |
4687 | ironlake_disable_drps(dev); | 4995 | ironlake_disable_drps(dev); |
4688 | ironlake_disable_rc6(dev); | 4996 | ironlake_disable_rc6(dev); |
4689 | } else if (IS_GEN6(dev) || IS_GEN7(dev) || IS_BROADWELL(dev)) { | 4997 | } else if (INTEL_INFO(dev)->gen >= 6) { |
4690 | if (cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work)) | 4998 | intel_suspend_gt_powersave(dev); |
4691 | intel_runtime_pm_put(dev_priv); | ||
4692 | 4999 | ||
4693 | cancel_work_sync(&dev_priv->rps.work); | ||
4694 | mutex_lock(&dev_priv->rps.hw_lock); | 5000 | mutex_lock(&dev_priv->rps.hw_lock); |
4695 | if (IS_VALLEYVIEW(dev)) | 5001 | if (IS_CHERRYVIEW(dev)) |
5002 | cherryview_disable_rps(dev); | ||
5003 | else if (IS_VALLEYVIEW(dev)) | ||
4696 | valleyview_disable_rps(dev); | 5004 | valleyview_disable_rps(dev); |
4697 | else | 5005 | else |
4698 | gen6_disable_rps(dev); | 5006 | gen6_disable_rps(dev); |
@@ -4710,7 +5018,9 @@ static void intel_gen6_powersave_work(struct work_struct *work) | |||
4710 | 5018 | ||
4711 | mutex_lock(&dev_priv->rps.hw_lock); | 5019 | mutex_lock(&dev_priv->rps.hw_lock); |
4712 | 5020 | ||
4713 | if (IS_VALLEYVIEW(dev)) { | 5021 | if (IS_CHERRYVIEW(dev)) { |
5022 | cherryview_enable_rps(dev); | ||
5023 | } else if (IS_VALLEYVIEW(dev)) { | ||
4714 | valleyview_enable_rps(dev); | 5024 | valleyview_enable_rps(dev); |
4715 | } else if (IS_BROADWELL(dev)) { | 5025 | } else if (IS_BROADWELL(dev)) { |
4716 | gen8_enable_rps(dev); | 5026 | gen8_enable_rps(dev); |
@@ -4735,7 +5045,7 @@ void intel_enable_gt_powersave(struct drm_device *dev) | |||
4735 | ironlake_enable_rc6(dev); | 5045 | ironlake_enable_rc6(dev); |
4736 | intel_init_emon(dev); | 5046 | intel_init_emon(dev); |
4737 | mutex_unlock(&dev->struct_mutex); | 5047 | mutex_unlock(&dev->struct_mutex); |
4738 | } else if (IS_GEN6(dev) || IS_GEN7(dev) || IS_BROADWELL(dev)) { | 5048 | } else if (INTEL_INFO(dev)->gen >= 6) { |
4739 | /* | 5049 | /* |
4740 | * PCU communication is slow and this doesn't need to be | 5050 | * PCU communication is slow and this doesn't need to be |
4741 | * done at any specific time, so do this out of our fast path | 5051 | * done at any specific time, so do this out of our fast path |
@@ -5108,7 +5418,7 @@ static void gen8_init_clock_gating(struct drm_device *dev) | |||
5108 | I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE)); | 5418 | I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE)); |
5109 | 5419 | ||
5110 | I915_WRITE(_3D_CHICKEN3, | 5420 | I915_WRITE(_3D_CHICKEN3, |
5111 | _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2)); | 5421 | _MASKED_BIT_ENABLE(_3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2))); |
5112 | 5422 | ||
5113 | I915_WRITE(COMMON_SLICE_CHICKEN2, | 5423 | I915_WRITE(COMMON_SLICE_CHICKEN2, |
5114 | _MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE)); | 5424 | _MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE)); |
@@ -5343,10 +5653,6 @@ static void valleyview_init_clock_gating(struct drm_device *dev) | |||
5343 | } | 5653 | } |
5344 | DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq); | 5654 | DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq); |
5345 | 5655 | ||
5346 | dev_priv->vlv_cdclk_freq = valleyview_cur_cdclk(dev_priv); | ||
5347 | DRM_DEBUG_DRIVER("Current CD clock rate: %d MHz", | ||
5348 | dev_priv->vlv_cdclk_freq); | ||
5349 | |||
5350 | I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE); | 5656 | I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE); |
5351 | 5657 | ||
5352 | /* WaDisableEarlyCull:vlv */ | 5658 | /* WaDisableEarlyCull:vlv */ |
@@ -5421,6 +5727,35 @@ static void valleyview_init_clock_gating(struct drm_device *dev) | |||
5421 | static void cherryview_init_clock_gating(struct drm_device *dev) | 5727 | static void cherryview_init_clock_gating(struct drm_device *dev) |
5422 | { | 5728 | { |
5423 | struct drm_i915_private *dev_priv = dev->dev_private; | 5729 | struct drm_i915_private *dev_priv = dev->dev_private; |
5730 | u32 val; | ||
5731 | |||
5732 | mutex_lock(&dev_priv->rps.hw_lock); | ||
5733 | val = vlv_punit_read(dev_priv, CCK_FUSE_REG); | ||
5734 | mutex_unlock(&dev_priv->rps.hw_lock); | ||
5735 | switch ((val >> 2) & 0x7) { | ||
5736 | case 0: | ||
5737 | case 1: | ||
5738 | dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_200; | ||
5739 | dev_priv->mem_freq = 1600; | ||
5740 | break; | ||
5741 | case 2: | ||
5742 | dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_267; | ||
5743 | dev_priv->mem_freq = 1600; | ||
5744 | break; | ||
5745 | case 3: | ||
5746 | dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_333; | ||
5747 | dev_priv->mem_freq = 2000; | ||
5748 | break; | ||
5749 | case 4: | ||
5750 | dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_320; | ||
5751 | dev_priv->mem_freq = 1600; | ||
5752 | break; | ||
5753 | case 5: | ||
5754 | dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_400; | ||
5755 | dev_priv->mem_freq = 1600; | ||
5756 | break; | ||
5757 | } | ||
5758 | DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq); | ||
5424 | 5759 | ||
5425 | I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE); | 5760 | I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE); |
5426 | 5761 | ||
@@ -5661,7 +5996,6 @@ bool intel_display_power_enabled(struct drm_i915_private *dev_priv, | |||
5661 | static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv) | 5996 | static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv) |
5662 | { | 5997 | { |
5663 | struct drm_device *dev = dev_priv->dev; | 5998 | struct drm_device *dev = dev_priv->dev; |
5664 | unsigned long irqflags; | ||
5665 | 5999 | ||
5666 | /* | 6000 | /* |
5667 | * After we re-enable the power well, if we touch VGA register 0x3d5 | 6001 | * After we re-enable the power well, if we touch VGA register 0x3d5 |
@@ -5677,21 +6011,8 @@ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv) | |||
5677 | outb(inb(VGA_MSR_READ), VGA_MSR_WRITE); | 6011 | outb(inb(VGA_MSR_READ), VGA_MSR_WRITE); |
5678 | vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); | 6012 | vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); |
5679 | 6013 | ||
5680 | if (IS_BROADWELL(dev)) { | 6014 | if (IS_BROADWELL(dev)) |
5681 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 6015 | gen8_irq_power_well_post_enable(dev_priv); |
5682 | I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B), | ||
5683 | dev_priv->de_irq_mask[PIPE_B]); | ||
5684 | I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B), | ||
5685 | ~dev_priv->de_irq_mask[PIPE_B] | | ||
5686 | GEN8_PIPE_VBLANK); | ||
5687 | I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C), | ||
5688 | dev_priv->de_irq_mask[PIPE_C]); | ||
5689 | I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C), | ||
5690 | ~dev_priv->de_irq_mask[PIPE_C] | | ||
5691 | GEN8_PIPE_VBLANK); | ||
5692 | POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C)); | ||
5693 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | ||
5694 | } | ||
5695 | } | 6016 | } |
5696 | 6017 | ||
5697 | static void hsw_set_power_well(struct drm_i915_private *dev_priv, | 6018 | static void hsw_set_power_well(struct drm_i915_private *dev_priv, |
@@ -5762,34 +6083,13 @@ static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv, | |||
5762 | return true; | 6083 | return true; |
5763 | } | 6084 | } |
5764 | 6085 | ||
5765 | void __vlv_set_power_well(struct drm_i915_private *dev_priv, | 6086 | static void vlv_set_power_well(struct drm_i915_private *dev_priv, |
5766 | enum punit_power_well power_well_id, bool enable) | 6087 | struct i915_power_well *power_well, bool enable) |
5767 | { | 6088 | { |
5768 | struct drm_device *dev = dev_priv->dev; | 6089 | enum punit_power_well power_well_id = power_well->data; |
5769 | u32 mask; | 6090 | u32 mask; |
5770 | u32 state; | 6091 | u32 state; |
5771 | u32 ctrl; | 6092 | u32 ctrl; |
5772 | enum pipe pipe; | ||
5773 | |||
5774 | if (power_well_id == PUNIT_POWER_WELL_DPIO_CMN_BC) { | ||
5775 | if (enable) { | ||
5776 | /* | ||
5777 | * Enable the CRI clock source so we can get at the | ||
5778 | * display and the reference clock for VGA | ||
5779 | * hotplug / manual detection. | ||
5780 | */ | ||
5781 | I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | | ||
5782 | DPLL_REFA_CLK_ENABLE_VLV | | ||
5783 | DPLL_INTEGRATED_CRI_CLK_VLV); | ||
5784 | udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ | ||
5785 | } else { | ||
5786 | for_each_pipe(pipe) | ||
5787 | assert_pll_disabled(dev_priv, pipe); | ||
5788 | /* Assert common reset */ | ||
5789 | I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & | ||
5790 | ~DPIO_CMNRST); | ||
5791 | } | ||
5792 | } | ||
5793 | 6093 | ||
5794 | mask = PUNIT_PWRGT_MASK(power_well_id); | 6094 | mask = PUNIT_PWRGT_MASK(power_well_id); |
5795 | state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) : | 6095 | state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) : |
@@ -5817,28 +6117,6 @@ void __vlv_set_power_well(struct drm_i915_private *dev_priv, | |||
5817 | 6117 | ||
5818 | out: | 6118 | out: |
5819 | mutex_unlock(&dev_priv->rps.hw_lock); | 6119 | mutex_unlock(&dev_priv->rps.hw_lock); |
5820 | |||
5821 | /* | ||
5822 | * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - | ||
5823 | * 6. De-assert cmn_reset/side_reset. Same as VLV X0. | ||
5824 | * a. GUnit 0x2110 bit[0] set to 1 (def 0) | ||
5825 | * b. The other bits such as sfr settings / modesel may all | ||
5826 | * be set to 0. | ||
5827 | * | ||
5828 | * This should only be done on init and resume from S3 with | ||
5829 | * both PLLs disabled, or we risk losing DPIO and PLL | ||
5830 | * synchronization. | ||
5831 | */ | ||
5832 | if (power_well_id == PUNIT_POWER_WELL_DPIO_CMN_BC && enable) | ||
5833 | I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST); | ||
5834 | } | ||
5835 | |||
5836 | static void vlv_set_power_well(struct drm_i915_private *dev_priv, | ||
5837 | struct i915_power_well *power_well, bool enable) | ||
5838 | { | ||
5839 | enum punit_power_well power_well_id = power_well->data; | ||
5840 | |||
5841 | __vlv_set_power_well(dev_priv, power_well_id, enable); | ||
5842 | } | 6120 | } |
5843 | 6121 | ||
5844 | static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv, | 6122 | static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv, |
@@ -5930,6 +6208,53 @@ static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv, | |||
5930 | vlv_set_power_well(dev_priv, power_well, false); | 6208 | vlv_set_power_well(dev_priv, power_well, false); |
5931 | } | 6209 | } |
5932 | 6210 | ||
6211 | static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, | ||
6212 | struct i915_power_well *power_well) | ||
6213 | { | ||
6214 | WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC); | ||
6215 | |||
6216 | /* | ||
6217 | * Enable the CRI clock source so we can get at the | ||
6218 | * display and the reference clock for VGA | ||
6219 | * hotplug / manual detection. | ||
6220 | */ | ||
6221 | I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | | ||
6222 | DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV); | ||
6223 | udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ | ||
6224 | |||
6225 | vlv_set_power_well(dev_priv, power_well, true); | ||
6226 | |||
6227 | /* | ||
6228 | * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - | ||
6229 | * 6. De-assert cmn_reset/side_reset. Same as VLV X0. | ||
6230 | * a. GUnit 0x2110 bit[0] set to 1 (def 0) | ||
6231 | * b. The other bits such as sfr settings / modesel may all | ||
6232 | * be set to 0. | ||
6233 | * | ||
6234 | * This should only be done on init and resume from S3 with | ||
6235 | * both PLLs disabled, or we risk losing DPIO and PLL | ||
6236 | * synchronization. | ||
6237 | */ | ||
6238 | I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST); | ||
6239 | } | ||
6240 | |||
6241 | static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, | ||
6242 | struct i915_power_well *power_well) | ||
6243 | { | ||
6244 | struct drm_device *dev = dev_priv->dev; | ||
6245 | enum pipe pipe; | ||
6246 | |||
6247 | WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC); | ||
6248 | |||
6249 | for_each_pipe(pipe) | ||
6250 | assert_pll_disabled(dev_priv, pipe); | ||
6251 | |||
6252 | /* Assert common reset */ | ||
6253 | I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST); | ||
6254 | |||
6255 | vlv_set_power_well(dev_priv, power_well, false); | ||
6256 | } | ||
6257 | |||
5933 | static void check_power_well_state(struct drm_i915_private *dev_priv, | 6258 | static void check_power_well_state(struct drm_i915_private *dev_priv, |
5934 | struct i915_power_well *power_well) | 6259 | struct i915_power_well *power_well) |
5935 | { | 6260 | { |
@@ -6079,6 +6404,7 @@ EXPORT_SYMBOL_GPL(i915_get_cdclk_freq); | |||
6079 | BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \ | 6404 | BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \ |
6080 | BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \ | 6405 | BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \ |
6081 | BIT(POWER_DOMAIN_PORT_CRT) | \ | 6406 | BIT(POWER_DOMAIN_PORT_CRT) | \ |
6407 | BIT(POWER_DOMAIN_PLLS) | \ | ||
6082 | BIT(POWER_DOMAIN_INIT)) | 6408 | BIT(POWER_DOMAIN_INIT)) |
6083 | #define HSW_DISPLAY_POWER_DOMAINS ( \ | 6409 | #define HSW_DISPLAY_POWER_DOMAINS ( \ |
6084 | (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \ | 6410 | (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \ |
@@ -6178,6 +6504,13 @@ static const struct i915_power_well_ops vlv_display_power_well_ops = { | |||
6178 | .is_enabled = vlv_power_well_enabled, | 6504 | .is_enabled = vlv_power_well_enabled, |
6179 | }; | 6505 | }; |
6180 | 6506 | ||
6507 | static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = { | ||
6508 | .sync_hw = vlv_power_well_sync_hw, | ||
6509 | .enable = vlv_dpio_cmn_power_well_enable, | ||
6510 | .disable = vlv_dpio_cmn_power_well_disable, | ||
6511 | .is_enabled = vlv_power_well_enabled, | ||
6512 | }; | ||
6513 | |||
6181 | static const struct i915_power_well_ops vlv_dpio_power_well_ops = { | 6514 | static const struct i915_power_well_ops vlv_dpio_power_well_ops = { |
6182 | .sync_hw = vlv_power_well_sync_hw, | 6515 | .sync_hw = vlv_power_well_sync_hw, |
6183 | .enable = vlv_power_well_enable, | 6516 | .enable = vlv_power_well_enable, |
@@ -6238,10 +6571,25 @@ static struct i915_power_well vlv_power_wells[] = { | |||
6238 | .name = "dpio-common", | 6571 | .name = "dpio-common", |
6239 | .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS, | 6572 | .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS, |
6240 | .data = PUNIT_POWER_WELL_DPIO_CMN_BC, | 6573 | .data = PUNIT_POWER_WELL_DPIO_CMN_BC, |
6241 | .ops = &vlv_dpio_power_well_ops, | 6574 | .ops = &vlv_dpio_cmn_power_well_ops, |
6242 | }, | 6575 | }, |
6243 | }; | 6576 | }; |
6244 | 6577 | ||
6578 | static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv, | ||
6579 | enum punit_power_well power_well_id) | ||
6580 | { | ||
6581 | struct i915_power_domains *power_domains = &dev_priv->power_domains; | ||
6582 | struct i915_power_well *power_well; | ||
6583 | int i; | ||
6584 | |||
6585 | for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) { | ||
6586 | if (power_well->data == power_well_id) | ||
6587 | return power_well; | ||
6588 | } | ||
6589 | |||
6590 | return NULL; | ||
6591 | } | ||
6592 | |||
6245 | #define set_power_wells(power_domains, __power_wells) ({ \ | 6593 | #define set_power_wells(power_domains, __power_wells) ({ \ |
6246 | (power_domains)->power_wells = (__power_wells); \ | 6594 | (power_domains)->power_wells = (__power_wells); \ |
6247 | (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \ | 6595 | (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \ |
@@ -6292,11 +6640,50 @@ static void intel_power_domains_resume(struct drm_i915_private *dev_priv) | |||
6292 | mutex_unlock(&power_domains->lock); | 6640 | mutex_unlock(&power_domains->lock); |
6293 | } | 6641 | } |
6294 | 6642 | ||
6643 | static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) | ||
6644 | { | ||
6645 | struct i915_power_well *cmn = | ||
6646 | lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC); | ||
6647 | struct i915_power_well *disp2d = | ||
6648 | lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D); | ||
6649 | |||
6650 | /* nothing to do if common lane is already off */ | ||
6651 | if (!cmn->ops->is_enabled(dev_priv, cmn)) | ||
6652 | return; | ||
6653 | |||
6654 | /* If the display might be already active skip this */ | ||
6655 | if (disp2d->ops->is_enabled(dev_priv, disp2d) && | ||
6656 | I915_READ(DPIO_CTL) & DPIO_CMNRST) | ||
6657 | return; | ||
6658 | |||
6659 | DRM_DEBUG_KMS("toggling display PHY side reset\n"); | ||
6660 | |||
6661 | /* cmnlane needs DPLL registers */ | ||
6662 | disp2d->ops->enable(dev_priv, disp2d); | ||
6663 | |||
6664 | /* | ||
6665 | * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx: | ||
6666 | * Need to assert and de-assert PHY SB reset by gating the | ||
6667 | * common lane power, then un-gating it. | ||
6668 | * Simply ungating isn't enough to reset the PHY enough to get | ||
6669 | * ports and lanes running. | ||
6670 | */ | ||
6671 | cmn->ops->disable(dev_priv, cmn); | ||
6672 | } | ||
6673 | |||
6295 | void intel_power_domains_init_hw(struct drm_i915_private *dev_priv) | 6674 | void intel_power_domains_init_hw(struct drm_i915_private *dev_priv) |
6296 | { | 6675 | { |
6676 | struct drm_device *dev = dev_priv->dev; | ||
6297 | struct i915_power_domains *power_domains = &dev_priv->power_domains; | 6677 | struct i915_power_domains *power_domains = &dev_priv->power_domains; |
6298 | 6678 | ||
6299 | power_domains->initializing = true; | 6679 | power_domains->initializing = true; |
6680 | |||
6681 | if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) { | ||
6682 | mutex_lock(&power_domains->lock); | ||
6683 | vlv_cmnlane_wa(dev_priv); | ||
6684 | mutex_unlock(&power_domains->lock); | ||
6685 | } | ||
6686 | |||
6300 | /* For now, we need the power well to be always enabled. */ | 6687 | /* For now, we need the power well to be always enabled. */ |
6301 | intel_display_set_init_power(dev_priv, true); | 6688 | intel_display_set_init_power(dev_priv, true); |
6302 | intel_power_domains_resume(dev_priv); | 6689 | intel_power_domains_resume(dev_priv); |
@@ -6469,7 +6856,7 @@ void intel_init_pm(struct drm_device *dev) | |||
6469 | (dev_priv->is_ddr3 == 1) ? "3" : "2", | 6856 | (dev_priv->is_ddr3 == 1) ? "3" : "2", |
6470 | dev_priv->fsb_freq, dev_priv->mem_freq); | 6857 | dev_priv->fsb_freq, dev_priv->mem_freq); |
6471 | /* Disable CxSR and never update its watermark again */ | 6858 | /* Disable CxSR and never update its watermark again */ |
6472 | pineview_disable_cxsr(dev); | 6859 | intel_set_memory_cxsr(dev_priv, false); |
6473 | dev_priv->display.update_wm = NULL; | 6860 | dev_priv->display.update_wm = NULL; |
6474 | } else | 6861 | } else |
6475 | dev_priv->display.update_wm = pineview_update_wm; | 6862 | dev_priv->display.update_wm = pineview_update_wm; |
@@ -6552,7 +6939,7 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val) | |||
6552 | return 0; | 6939 | return 0; |
6553 | } | 6940 | } |
6554 | 6941 | ||
6555 | int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val) | 6942 | static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val) |
6556 | { | 6943 | { |
6557 | int div; | 6944 | int div; |
6558 | 6945 | ||
@@ -6574,7 +6961,7 @@ int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val) | |||
6574 | return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div); | 6961 | return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div); |
6575 | } | 6962 | } |
6576 | 6963 | ||
6577 | int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val) | 6964 | static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val) |
6578 | { | 6965 | { |
6579 | int mul; | 6966 | int mul; |
6580 | 6967 | ||
@@ -6596,6 +6983,80 @@ int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val) | |||
6596 | return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6; | 6983 | return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6; |
6597 | } | 6984 | } |
6598 | 6985 | ||
6986 | static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val) | ||
6987 | { | ||
6988 | int div, freq; | ||
6989 | |||
6990 | switch (dev_priv->rps.cz_freq) { | ||
6991 | case 200: | ||
6992 | div = 5; | ||
6993 | break; | ||
6994 | case 267: | ||
6995 | div = 6; | ||
6996 | break; | ||
6997 | case 320: | ||
6998 | case 333: | ||
6999 | case 400: | ||
7000 | div = 8; | ||
7001 | break; | ||
7002 | default: | ||
7003 | return -1; | ||
7004 | } | ||
7005 | |||
7006 | freq = (DIV_ROUND_CLOSEST((dev_priv->rps.cz_freq * val), 2 * div) / 2); | ||
7007 | |||
7008 | return freq; | ||
7009 | } | ||
7010 | |||
7011 | static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val) | ||
7012 | { | ||
7013 | int mul, opcode; | ||
7014 | |||
7015 | switch (dev_priv->rps.cz_freq) { | ||
7016 | case 200: | ||
7017 | mul = 5; | ||
7018 | break; | ||
7019 | case 267: | ||
7020 | mul = 6; | ||
7021 | break; | ||
7022 | case 320: | ||
7023 | case 333: | ||
7024 | case 400: | ||
7025 | mul = 8; | ||
7026 | break; | ||
7027 | default: | ||
7028 | return -1; | ||
7029 | } | ||
7030 | |||
7031 | opcode = (DIV_ROUND_CLOSEST((val * 2 * mul), dev_priv->rps.cz_freq) * 2); | ||
7032 | |||
7033 | return opcode; | ||
7034 | } | ||
7035 | |||
7036 | int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val) | ||
7037 | { | ||
7038 | int ret = -1; | ||
7039 | |||
7040 | if (IS_CHERRYVIEW(dev_priv->dev)) | ||
7041 | ret = chv_gpu_freq(dev_priv, val); | ||
7042 | else if (IS_VALLEYVIEW(dev_priv->dev)) | ||
7043 | ret = byt_gpu_freq(dev_priv, val); | ||
7044 | |||
7045 | return ret; | ||
7046 | } | ||
7047 | |||
7048 | int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val) | ||
7049 | { | ||
7050 | int ret = -1; | ||
7051 | |||
7052 | if (IS_CHERRYVIEW(dev_priv->dev)) | ||
7053 | ret = chv_freq_opcode(dev_priv, val); | ||
7054 | else if (IS_VALLEYVIEW(dev_priv->dev)) | ||
7055 | ret = byt_freq_opcode(dev_priv, val); | ||
7056 | |||
7057 | return ret; | ||
7058 | } | ||
7059 | |||
6599 | void intel_pm_setup(struct drm_device *dev) | 7060 | void intel_pm_setup(struct drm_device *dev) |
6600 | { | 7061 | { |
6601 | struct drm_i915_private *dev_priv = dev->dev_private; | 7062 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -6606,5 +7067,5 @@ void intel_pm_setup(struct drm_device *dev) | |||
6606 | intel_gen6_powersave_work); | 7067 | intel_gen6_powersave_work); |
6607 | 7068 | ||
6608 | dev_priv->pm.suspended = false; | 7069 | dev_priv->pm.suspended = false; |
6609 | dev_priv->pm.irqs_disabled = false; | 7070 | dev_priv->pm._irqs_disabled = false; |
6610 | } | 7071 | } |