aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h6
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c179
2 files changed, 139 insertions, 46 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index d9d07b70f05c..fb7fd7de2b76 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -324,6 +324,12 @@ struct i915_hotplug {
324 &dev->mode_config.plane_list, \ 324 &dev->mode_config.plane_list, \
325 base.head) 325 base.head)
326 326
327#define for_each_intel_plane_mask(dev, intel_plane, plane_mask) \
328 list_for_each_entry(intel_plane, &dev->mode_config.plane_list, \
329 base.head) \
330 for_each_if ((plane_mask) & \
331 (1 << drm_plane_index(&intel_plane->base)))
332
327#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \ 333#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \
328 list_for_each_entry(intel_plane, \ 334 list_for_each_entry(intel_plane, \
329 &(dev)->mode_config.plane_list, \ 335 &(dev)->mode_config.plane_list, \
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 05b04a6ca37a..ca38f6c0e3c8 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2849,13 +2849,25 @@ skl_wm_plane_id(const struct intel_plane *plane)
2849static void 2849static void
2850skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, 2850skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
2851 const struct intel_crtc_state *cstate, 2851 const struct intel_crtc_state *cstate,
2852 const struct intel_wm_config *config, 2852 struct intel_wm_config *config,
2853 struct skl_ddb_entry *alloc /* out */) 2853 struct skl_ddb_entry *alloc, /* out */
2854 int *num_active /* out */)
2854{ 2855{
2856 struct drm_atomic_state *state = cstate->base.state;
2857 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
2858 struct drm_i915_private *dev_priv = to_i915(dev);
2855 struct drm_crtc *for_crtc = cstate->base.crtc; 2859 struct drm_crtc *for_crtc = cstate->base.crtc;
2856 struct drm_crtc *crtc; 2860 struct drm_crtc *crtc;
2857 unsigned int pipe_size, ddb_size; 2861 unsigned int pipe_size, ddb_size;
2858 int nth_active_pipe; 2862 int nth_active_pipe;
2863 int pipe = to_intel_crtc(for_crtc)->pipe;
2864
2865 if (intel_state && intel_state->active_pipe_changes)
2866 *num_active = hweight32(intel_state->active_crtcs);
2867 else if (intel_state)
2868 *num_active = hweight32(dev_priv->active_crtcs);
2869 else
2870 *num_active = config->num_pipes_active;
2859 2871
2860 if (!cstate->base.active) { 2872 if (!cstate->base.active) {
2861 alloc->start = 0; 2873 alloc->start = 0;
@@ -2870,25 +2882,56 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
2870 2882
2871 ddb_size -= 4; /* 4 blocks for bypass path allocation */ 2883 ddb_size -= 4; /* 4 blocks for bypass path allocation */
2872 2884
2873 nth_active_pipe = 0; 2885 /*
2874 for_each_crtc(dev, crtc) { 2886 * FIXME: At the moment we may be called on either in-flight or fully
2875 if (!to_intel_crtc(crtc)->active) 2887 * committed cstate's. Once we fully move DDB allocation in the check
2876 continue; 2888 * phase, we'll only be called on in-flight states and the 'else'
2889 * branch here will go away.
2890 *
2891 * The 'else' branch is slightly racy here, but it was racy to begin
2892 * with; since it's going away soon, no effort is made to address that.
2893 */
2894 if (state) {
2895 /*
2896 * If the state doesn't change the active CRTC's, then there's
2897 * no need to recalculate; the existing pipe allocation limits
2898 * should remain unchanged. Note that we're safe from racing
2899 * commits since any racing commit that changes the active CRTC
2900 * list would need to grab _all_ crtc locks, including the one
2901 * we currently hold.
2902 */
2903 if (!intel_state->active_pipe_changes) {
2904 *alloc = dev_priv->wm.skl_hw.ddb.pipe[pipe];
2905 return;
2906 }
2877 2907
2878 if (crtc == for_crtc) 2908 nth_active_pipe = hweight32(intel_state->active_crtcs &
2879 break; 2909 (drm_crtc_mask(for_crtc) - 1));
2910 pipe_size = ddb_size / hweight32(intel_state->active_crtcs);
2911 alloc->start = nth_active_pipe * ddb_size / *num_active;
2912 alloc->end = alloc->start + pipe_size;
2913 } else {
2914 nth_active_pipe = 0;
2915 for_each_crtc(dev, crtc) {
2916 if (!to_intel_crtc(crtc)->active)
2917 continue;
2880 2918
2881 nth_active_pipe++; 2919 if (crtc == for_crtc)
2882 } 2920 break;
2883 2921
2884 pipe_size = ddb_size / config->num_pipes_active; 2922 nth_active_pipe++;
2885 alloc->start = nth_active_pipe * ddb_size / config->num_pipes_active; 2923 }
2886 alloc->end = alloc->start + pipe_size; 2924
2925 pipe_size = ddb_size / config->num_pipes_active;
2926 alloc->start = nth_active_pipe * ddb_size /
2927 config->num_pipes_active;
2928 alloc->end = alloc->start + pipe_size;
2929 }
2887} 2930}
2888 2931
2889static unsigned int skl_cursor_allocation(const struct intel_wm_config *config) 2932static unsigned int skl_cursor_allocation(int num_active)
2890{ 2933{
2891 if (config->num_pipes_active == 1) 2934 if (num_active == 1)
2892 return 32; 2935 return 32;
2893 2936
2894 return 8; 2937 return 8;
@@ -3054,33 +3097,44 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate)
3054 return total_data_rate; 3097 return total_data_rate;
3055} 3098}
3056 3099
3057static void 3100static int
3058skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, 3101skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
3059 struct skl_ddb_allocation *ddb /* out */) 3102 struct skl_ddb_allocation *ddb /* out */)
3060{ 3103{
3104 struct drm_atomic_state *state = cstate->base.state;
3061 struct drm_crtc *crtc = cstate->base.crtc; 3105 struct drm_crtc *crtc = cstate->base.crtc;
3062 struct drm_device *dev = crtc->dev; 3106 struct drm_device *dev = crtc->dev;
3063 struct drm_i915_private *dev_priv = to_i915(dev); 3107 struct drm_i915_private *dev_priv = to_i915(dev);
3064 struct intel_wm_config *config = &dev_priv->wm.config; 3108 struct intel_wm_config *config = &dev_priv->wm.config;
3065 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3109 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3066 struct intel_plane *intel_plane; 3110 struct intel_plane *intel_plane;
3111 struct drm_plane *plane;
3112 struct drm_plane_state *pstate;
3067 enum pipe pipe = intel_crtc->pipe; 3113 enum pipe pipe = intel_crtc->pipe;
3068 struct skl_ddb_entry *alloc = &ddb->pipe[pipe]; 3114 struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
3069 uint16_t alloc_size, start, cursor_blocks; 3115 uint16_t alloc_size, start, cursor_blocks;
3070 uint16_t *minimum = cstate->wm.skl.minimum_blocks; 3116 uint16_t *minimum = cstate->wm.skl.minimum_blocks;
3071 uint16_t *y_minimum = cstate->wm.skl.minimum_y_blocks; 3117 uint16_t *y_minimum = cstate->wm.skl.minimum_y_blocks;
3072 unsigned int total_data_rate; 3118 unsigned int total_data_rate;
3119 int num_active;
3120 int id, i;
3073 3121
3074 skl_ddb_get_pipe_allocation_limits(dev, cstate, config, alloc); 3122 if (!cstate->base.active) {
3123 ddb->pipe[pipe].start = ddb->pipe[pipe].end = 0;
3124 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
3125 memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe]));
3126 return 0;
3127 }
3128
3129 skl_ddb_get_pipe_allocation_limits(dev, cstate, config, alloc,
3130 &num_active);
3075 alloc_size = skl_ddb_entry_size(alloc); 3131 alloc_size = skl_ddb_entry_size(alloc);
3076 if (alloc_size == 0) { 3132 if (alloc_size == 0) {
3077 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe])); 3133 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
3078 memset(&ddb->plane[pipe][PLANE_CURSOR], 0, 3134 return 0;
3079 sizeof(ddb->plane[pipe][PLANE_CURSOR]));
3080 return;
3081 } 3135 }
3082 3136
3083 cursor_blocks = skl_cursor_allocation(config); 3137 cursor_blocks = skl_cursor_allocation(num_active);
3084 ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - cursor_blocks; 3138 ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - cursor_blocks;
3085 ddb->plane[pipe][PLANE_CURSOR].end = alloc->end; 3139 ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
3086 3140
@@ -3088,21 +3142,55 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
3088 alloc->end -= cursor_blocks; 3142 alloc->end -= cursor_blocks;
3089 3143
3090 /* 1. Allocate the mininum required blocks for each active plane */ 3144 /* 1. Allocate the mininum required blocks for each active plane */
3091 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 3145 /*
3092 struct drm_plane *plane = &intel_plane->base; 3146 * TODO: Remove support for already-committed state once we
3093 struct drm_framebuffer *fb = plane->state->fb; 3147 * only allocate DDB on in-flight states.
3094 int id = skl_wm_plane_id(intel_plane); 3148 */
3149 if (state) {
3150 for_each_plane_in_state(state, plane, pstate, i) {
3151 intel_plane = to_intel_plane(plane);
3152 id = skl_wm_plane_id(intel_plane);
3095 3153
3096 if (!to_intel_plane_state(plane->state)->visible) 3154 if (intel_plane->pipe != pipe)
3097 continue; 3155 continue;
3098 3156
3099 if (plane->type == DRM_PLANE_TYPE_CURSOR) 3157 if (!to_intel_plane_state(pstate)->visible) {
3100 continue; 3158 minimum[id] = 0;
3159 y_minimum[id] = 0;
3160 continue;
3161 }
3162 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
3163 minimum[id] = 0;
3164 y_minimum[id] = 0;
3165 continue;
3166 }
3167
3168 minimum[id] = 8;
3169 if (pstate->fb->pixel_format == DRM_FORMAT_NV12)
3170 y_minimum[id] = 8;
3171 else
3172 y_minimum[id] = 0;
3173 }
3174 } else {
3175 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3176 struct drm_plane *plane = &intel_plane->base;
3177 struct drm_framebuffer *fb = plane->state->fb;
3178 int id = skl_wm_plane_id(intel_plane);
3179
3180 if (!to_intel_plane_state(plane->state)->visible)
3181 continue;
3182
3183 if (plane->type == DRM_PLANE_TYPE_CURSOR)
3184 continue;
3185
3186 minimum[id] = 8;
3187 y_minimum[id] = (fb->pixel_format == DRM_FORMAT_NV12) ? 8 : 0;
3188 }
3189 }
3101 3190
3102 minimum[id] = 8; 3191 for (i = 0; i < PLANE_CURSOR; i++) {
3103 alloc_size -= minimum[id]; 3192 alloc_size -= minimum[i];
3104 y_minimum[id] = (fb->pixel_format == DRM_FORMAT_NV12) ? 8 : 0; 3193 alloc_size -= y_minimum[i];
3105 alloc_size -= y_minimum[id];
3106 } 3194 }
3107 3195
3108 /* 3196 /*
@@ -3113,21 +3201,14 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
3113 */ 3201 */
3114 total_data_rate = skl_get_total_relative_data_rate(cstate); 3202 total_data_rate = skl_get_total_relative_data_rate(cstate);
3115 if (total_data_rate == 0) 3203 if (total_data_rate == 0)
3116 return; 3204 return 0;
3117 3205
3118 start = alloc->start; 3206 start = alloc->start;
3119 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 3207 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3120 struct drm_plane *plane = &intel_plane->base;
3121 struct drm_plane_state *pstate = intel_plane->base.state;
3122 unsigned int data_rate, y_data_rate; 3208 unsigned int data_rate, y_data_rate;
3123 uint16_t plane_blocks, y_plane_blocks = 0; 3209 uint16_t plane_blocks, y_plane_blocks = 0;
3124 int id = skl_wm_plane_id(intel_plane); 3210 int id = skl_wm_plane_id(intel_plane);
3125 3211
3126 if (!to_intel_plane_state(pstate)->visible)
3127 continue;
3128 if (plane->type == DRM_PLANE_TYPE_CURSOR)
3129 continue;
3130
3131 data_rate = cstate->wm.skl.plane_data_rate[id]; 3212 data_rate = cstate->wm.skl.plane_data_rate[id];
3132 3213
3133 /* 3214 /*
@@ -3139,8 +3220,11 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
3139 plane_blocks += div_u64((uint64_t)alloc_size * data_rate, 3220 plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
3140 total_data_rate); 3221 total_data_rate);
3141 3222
3142 ddb->plane[pipe][id].start = start; 3223 /* Leave disabled planes at (0,0) */
3143 ddb->plane[pipe][id].end = start + plane_blocks; 3224 if (data_rate) {
3225 ddb->plane[pipe][id].start = start;
3226 ddb->plane[pipe][id].end = start + plane_blocks;
3227 }
3144 3228
3145 start += plane_blocks; 3229 start += plane_blocks;
3146 3230
@@ -3153,12 +3237,15 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
3153 y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate, 3237 y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
3154 total_data_rate); 3238 total_data_rate);
3155 3239
3156 ddb->y_plane[pipe][id].start = start; 3240 if (y_data_rate) {
3157 ddb->y_plane[pipe][id].end = start + y_plane_blocks; 3241 ddb->y_plane[pipe][id].start = start;
3242 ddb->y_plane[pipe][id].end = start + y_plane_blocks;
3243 }
3158 3244
3159 start += y_plane_blocks; 3245 start += y_plane_blocks;
3160 } 3246 }
3161 3247
3248 return 0;
3162} 3249}
3163 3250
3164static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config) 3251static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config)
@@ -3649,7 +3736,7 @@ static bool skl_update_pipe_wm(struct drm_crtc *crtc,
3649 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3736 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3650 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); 3737 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
3651 3738
3652 skl_allocate_pipe_ddb(cstate, ddb); 3739 WARN_ON(skl_allocate_pipe_ddb(cstate, ddb) != 0);
3653 skl_build_pipe_wm(cstate, ddb, pipe_wm); 3740 skl_build_pipe_wm(cstate, ddb, pipe_wm);
3654 3741
3655 if (!memcmp(&intel_crtc->wm.active.skl, pipe_wm, sizeof(*pipe_wm))) 3742 if (!memcmp(&intel_crtc->wm.active.skl, pipe_wm, sizeof(*pipe_wm)))