diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /drivers/gpu/drm/i915/intel_display.c | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'drivers/gpu/drm/i915/intel_display.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 6805 |
1 files changed, 4405 insertions, 2400 deletions
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 979228594599..0f1c799afea1 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -43,8 +43,8 @@ | |||
43 | 43 | ||
44 | bool intel_pipe_has_type (struct drm_crtc *crtc, int type); | 44 | bool intel_pipe_has_type (struct drm_crtc *crtc, int type); |
45 | static void intel_update_watermarks(struct drm_device *dev); | 45 | static void intel_update_watermarks(struct drm_device *dev); |
46 | static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule); | 46 | static void intel_increase_pllclock(struct drm_crtc *crtc); |
47 | static void intel_crtc_update_cursor(struct drm_crtc *crtc); | 47 | static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); |
48 | 48 | ||
49 | typedef struct { | 49 | typedef struct { |
50 | /* given values */ | 50 | /* given values */ |
@@ -76,255 +76,6 @@ struct intel_limit { | |||
76 | int, int, intel_clock_t *); | 76 | int, int, intel_clock_t *); |
77 | }; | 77 | }; |
78 | 78 | ||
79 | #define I8XX_DOT_MIN 25000 | ||
80 | #define I8XX_DOT_MAX 350000 | ||
81 | #define I8XX_VCO_MIN 930000 | ||
82 | #define I8XX_VCO_MAX 1400000 | ||
83 | #define I8XX_N_MIN 3 | ||
84 | #define I8XX_N_MAX 16 | ||
85 | #define I8XX_M_MIN 96 | ||
86 | #define I8XX_M_MAX 140 | ||
87 | #define I8XX_M1_MIN 18 | ||
88 | #define I8XX_M1_MAX 26 | ||
89 | #define I8XX_M2_MIN 6 | ||
90 | #define I8XX_M2_MAX 16 | ||
91 | #define I8XX_P_MIN 4 | ||
92 | #define I8XX_P_MAX 128 | ||
93 | #define I8XX_P1_MIN 2 | ||
94 | #define I8XX_P1_MAX 33 | ||
95 | #define I8XX_P1_LVDS_MIN 1 | ||
96 | #define I8XX_P1_LVDS_MAX 6 | ||
97 | #define I8XX_P2_SLOW 4 | ||
98 | #define I8XX_P2_FAST 2 | ||
99 | #define I8XX_P2_LVDS_SLOW 14 | ||
100 | #define I8XX_P2_LVDS_FAST 7 | ||
101 | #define I8XX_P2_SLOW_LIMIT 165000 | ||
102 | |||
103 | #define I9XX_DOT_MIN 20000 | ||
104 | #define I9XX_DOT_MAX 400000 | ||
105 | #define I9XX_VCO_MIN 1400000 | ||
106 | #define I9XX_VCO_MAX 2800000 | ||
107 | #define PINEVIEW_VCO_MIN 1700000 | ||
108 | #define PINEVIEW_VCO_MAX 3500000 | ||
109 | #define I9XX_N_MIN 1 | ||
110 | #define I9XX_N_MAX 6 | ||
111 | /* Pineview's Ncounter is a ring counter */ | ||
112 | #define PINEVIEW_N_MIN 3 | ||
113 | #define PINEVIEW_N_MAX 6 | ||
114 | #define I9XX_M_MIN 70 | ||
115 | #define I9XX_M_MAX 120 | ||
116 | #define PINEVIEW_M_MIN 2 | ||
117 | #define PINEVIEW_M_MAX 256 | ||
118 | #define I9XX_M1_MIN 10 | ||
119 | #define I9XX_M1_MAX 22 | ||
120 | #define I9XX_M2_MIN 5 | ||
121 | #define I9XX_M2_MAX 9 | ||
122 | /* Pineview M1 is reserved, and must be 0 */ | ||
123 | #define PINEVIEW_M1_MIN 0 | ||
124 | #define PINEVIEW_M1_MAX 0 | ||
125 | #define PINEVIEW_M2_MIN 0 | ||
126 | #define PINEVIEW_M2_MAX 254 | ||
127 | #define I9XX_P_SDVO_DAC_MIN 5 | ||
128 | #define I9XX_P_SDVO_DAC_MAX 80 | ||
129 | #define I9XX_P_LVDS_MIN 7 | ||
130 | #define I9XX_P_LVDS_MAX 98 | ||
131 | #define PINEVIEW_P_LVDS_MIN 7 | ||
132 | #define PINEVIEW_P_LVDS_MAX 112 | ||
133 | #define I9XX_P1_MIN 1 | ||
134 | #define I9XX_P1_MAX 8 | ||
135 | #define I9XX_P2_SDVO_DAC_SLOW 10 | ||
136 | #define I9XX_P2_SDVO_DAC_FAST 5 | ||
137 | #define I9XX_P2_SDVO_DAC_SLOW_LIMIT 200000 | ||
138 | #define I9XX_P2_LVDS_SLOW 14 | ||
139 | #define I9XX_P2_LVDS_FAST 7 | ||
140 | #define I9XX_P2_LVDS_SLOW_LIMIT 112000 | ||
141 | |||
142 | /*The parameter is for SDVO on G4x platform*/ | ||
143 | #define G4X_DOT_SDVO_MIN 25000 | ||
144 | #define G4X_DOT_SDVO_MAX 270000 | ||
145 | #define G4X_VCO_MIN 1750000 | ||
146 | #define G4X_VCO_MAX 3500000 | ||
147 | #define G4X_N_SDVO_MIN 1 | ||
148 | #define G4X_N_SDVO_MAX 4 | ||
149 | #define G4X_M_SDVO_MIN 104 | ||
150 | #define G4X_M_SDVO_MAX 138 | ||
151 | #define G4X_M1_SDVO_MIN 17 | ||
152 | #define G4X_M1_SDVO_MAX 23 | ||
153 | #define G4X_M2_SDVO_MIN 5 | ||
154 | #define G4X_M2_SDVO_MAX 11 | ||
155 | #define G4X_P_SDVO_MIN 10 | ||
156 | #define G4X_P_SDVO_MAX 30 | ||
157 | #define G4X_P1_SDVO_MIN 1 | ||
158 | #define G4X_P1_SDVO_MAX 3 | ||
159 | #define G4X_P2_SDVO_SLOW 10 | ||
160 | #define G4X_P2_SDVO_FAST 10 | ||
161 | #define G4X_P2_SDVO_LIMIT 270000 | ||
162 | |||
163 | /*The parameter is for HDMI_DAC on G4x platform*/ | ||
164 | #define G4X_DOT_HDMI_DAC_MIN 22000 | ||
165 | #define G4X_DOT_HDMI_DAC_MAX 400000 | ||
166 | #define G4X_N_HDMI_DAC_MIN 1 | ||
167 | #define G4X_N_HDMI_DAC_MAX 4 | ||
168 | #define G4X_M_HDMI_DAC_MIN 104 | ||
169 | #define G4X_M_HDMI_DAC_MAX 138 | ||
170 | #define G4X_M1_HDMI_DAC_MIN 16 | ||
171 | #define G4X_M1_HDMI_DAC_MAX 23 | ||
172 | #define G4X_M2_HDMI_DAC_MIN 5 | ||
173 | #define G4X_M2_HDMI_DAC_MAX 11 | ||
174 | #define G4X_P_HDMI_DAC_MIN 5 | ||
175 | #define G4X_P_HDMI_DAC_MAX 80 | ||
176 | #define G4X_P1_HDMI_DAC_MIN 1 | ||
177 | #define G4X_P1_HDMI_DAC_MAX 8 | ||
178 | #define G4X_P2_HDMI_DAC_SLOW 10 | ||
179 | #define G4X_P2_HDMI_DAC_FAST 5 | ||
180 | #define G4X_P2_HDMI_DAC_LIMIT 165000 | ||
181 | |||
182 | /*The parameter is for SINGLE_CHANNEL_LVDS on G4x platform*/ | ||
183 | #define G4X_DOT_SINGLE_CHANNEL_LVDS_MIN 20000 | ||
184 | #define G4X_DOT_SINGLE_CHANNEL_LVDS_MAX 115000 | ||
185 | #define G4X_N_SINGLE_CHANNEL_LVDS_MIN 1 | ||
186 | #define G4X_N_SINGLE_CHANNEL_LVDS_MAX 3 | ||
187 | #define G4X_M_SINGLE_CHANNEL_LVDS_MIN 104 | ||
188 | #define G4X_M_SINGLE_CHANNEL_LVDS_MAX 138 | ||
189 | #define G4X_M1_SINGLE_CHANNEL_LVDS_MIN 17 | ||
190 | #define G4X_M1_SINGLE_CHANNEL_LVDS_MAX 23 | ||
191 | #define G4X_M2_SINGLE_CHANNEL_LVDS_MIN 5 | ||
192 | #define G4X_M2_SINGLE_CHANNEL_LVDS_MAX 11 | ||
193 | #define G4X_P_SINGLE_CHANNEL_LVDS_MIN 28 | ||
194 | #define G4X_P_SINGLE_CHANNEL_LVDS_MAX 112 | ||
195 | #define G4X_P1_SINGLE_CHANNEL_LVDS_MIN 2 | ||
196 | #define G4X_P1_SINGLE_CHANNEL_LVDS_MAX 8 | ||
197 | #define G4X_P2_SINGLE_CHANNEL_LVDS_SLOW 14 | ||
198 | #define G4X_P2_SINGLE_CHANNEL_LVDS_FAST 14 | ||
199 | #define G4X_P2_SINGLE_CHANNEL_LVDS_LIMIT 0 | ||
200 | |||
201 | /*The parameter is for DUAL_CHANNEL_LVDS on G4x platform*/ | ||
202 | #define G4X_DOT_DUAL_CHANNEL_LVDS_MIN 80000 | ||
203 | #define G4X_DOT_DUAL_CHANNEL_LVDS_MAX 224000 | ||
204 | #define G4X_N_DUAL_CHANNEL_LVDS_MIN 1 | ||
205 | #define G4X_N_DUAL_CHANNEL_LVDS_MAX 3 | ||
206 | #define G4X_M_DUAL_CHANNEL_LVDS_MIN 104 | ||
207 | #define G4X_M_DUAL_CHANNEL_LVDS_MAX 138 | ||
208 | #define G4X_M1_DUAL_CHANNEL_LVDS_MIN 17 | ||
209 | #define G4X_M1_DUAL_CHANNEL_LVDS_MAX 23 | ||
210 | #define G4X_M2_DUAL_CHANNEL_LVDS_MIN 5 | ||
211 | #define G4X_M2_DUAL_CHANNEL_LVDS_MAX 11 | ||
212 | #define G4X_P_DUAL_CHANNEL_LVDS_MIN 14 | ||
213 | #define G4X_P_DUAL_CHANNEL_LVDS_MAX 42 | ||
214 | #define G4X_P1_DUAL_CHANNEL_LVDS_MIN 2 | ||
215 | #define G4X_P1_DUAL_CHANNEL_LVDS_MAX 6 | ||
216 | #define G4X_P2_DUAL_CHANNEL_LVDS_SLOW 7 | ||
217 | #define G4X_P2_DUAL_CHANNEL_LVDS_FAST 7 | ||
218 | #define G4X_P2_DUAL_CHANNEL_LVDS_LIMIT 0 | ||
219 | |||
220 | /*The parameter is for DISPLAY PORT on G4x platform*/ | ||
221 | #define G4X_DOT_DISPLAY_PORT_MIN 161670 | ||
222 | #define G4X_DOT_DISPLAY_PORT_MAX 227000 | ||
223 | #define G4X_N_DISPLAY_PORT_MIN 1 | ||
224 | #define G4X_N_DISPLAY_PORT_MAX 2 | ||
225 | #define G4X_M_DISPLAY_PORT_MIN 97 | ||
226 | #define G4X_M_DISPLAY_PORT_MAX 108 | ||
227 | #define G4X_M1_DISPLAY_PORT_MIN 0x10 | ||
228 | #define G4X_M1_DISPLAY_PORT_MAX 0x12 | ||
229 | #define G4X_M2_DISPLAY_PORT_MIN 0x05 | ||
230 | #define G4X_M2_DISPLAY_PORT_MAX 0x06 | ||
231 | #define G4X_P_DISPLAY_PORT_MIN 10 | ||
232 | #define G4X_P_DISPLAY_PORT_MAX 20 | ||
233 | #define G4X_P1_DISPLAY_PORT_MIN 1 | ||
234 | #define G4X_P1_DISPLAY_PORT_MAX 2 | ||
235 | #define G4X_P2_DISPLAY_PORT_SLOW 10 | ||
236 | #define G4X_P2_DISPLAY_PORT_FAST 10 | ||
237 | #define G4X_P2_DISPLAY_PORT_LIMIT 0 | ||
238 | |||
239 | /* Ironlake / Sandybridge */ | ||
240 | /* as we calculate clock using (register_value + 2) for | ||
241 | N/M1/M2, so here the range value for them is (actual_value-2). | ||
242 | */ | ||
243 | #define IRONLAKE_DOT_MIN 25000 | ||
244 | #define IRONLAKE_DOT_MAX 350000 | ||
245 | #define IRONLAKE_VCO_MIN 1760000 | ||
246 | #define IRONLAKE_VCO_MAX 3510000 | ||
247 | #define IRONLAKE_M1_MIN 12 | ||
248 | #define IRONLAKE_M1_MAX 22 | ||
249 | #define IRONLAKE_M2_MIN 5 | ||
250 | #define IRONLAKE_M2_MAX 9 | ||
251 | #define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */ | ||
252 | |||
253 | /* We have parameter ranges for different type of outputs. */ | ||
254 | |||
255 | /* DAC & HDMI Refclk 120Mhz */ | ||
256 | #define IRONLAKE_DAC_N_MIN 1 | ||
257 | #define IRONLAKE_DAC_N_MAX 5 | ||
258 | #define IRONLAKE_DAC_M_MIN 79 | ||
259 | #define IRONLAKE_DAC_M_MAX 127 | ||
260 | #define IRONLAKE_DAC_P_MIN 5 | ||
261 | #define IRONLAKE_DAC_P_MAX 80 | ||
262 | #define IRONLAKE_DAC_P1_MIN 1 | ||
263 | #define IRONLAKE_DAC_P1_MAX 8 | ||
264 | #define IRONLAKE_DAC_P2_SLOW 10 | ||
265 | #define IRONLAKE_DAC_P2_FAST 5 | ||
266 | |||
267 | /* LVDS single-channel 120Mhz refclk */ | ||
268 | #define IRONLAKE_LVDS_S_N_MIN 1 | ||
269 | #define IRONLAKE_LVDS_S_N_MAX 3 | ||
270 | #define IRONLAKE_LVDS_S_M_MIN 79 | ||
271 | #define IRONLAKE_LVDS_S_M_MAX 118 | ||
272 | #define IRONLAKE_LVDS_S_P_MIN 28 | ||
273 | #define IRONLAKE_LVDS_S_P_MAX 112 | ||
274 | #define IRONLAKE_LVDS_S_P1_MIN 2 | ||
275 | #define IRONLAKE_LVDS_S_P1_MAX 8 | ||
276 | #define IRONLAKE_LVDS_S_P2_SLOW 14 | ||
277 | #define IRONLAKE_LVDS_S_P2_FAST 14 | ||
278 | |||
279 | /* LVDS dual-channel 120Mhz refclk */ | ||
280 | #define IRONLAKE_LVDS_D_N_MIN 1 | ||
281 | #define IRONLAKE_LVDS_D_N_MAX 3 | ||
282 | #define IRONLAKE_LVDS_D_M_MIN 79 | ||
283 | #define IRONLAKE_LVDS_D_M_MAX 127 | ||
284 | #define IRONLAKE_LVDS_D_P_MIN 14 | ||
285 | #define IRONLAKE_LVDS_D_P_MAX 56 | ||
286 | #define IRONLAKE_LVDS_D_P1_MIN 2 | ||
287 | #define IRONLAKE_LVDS_D_P1_MAX 8 | ||
288 | #define IRONLAKE_LVDS_D_P2_SLOW 7 | ||
289 | #define IRONLAKE_LVDS_D_P2_FAST 7 | ||
290 | |||
291 | /* LVDS single-channel 100Mhz refclk */ | ||
292 | #define IRONLAKE_LVDS_S_SSC_N_MIN 1 | ||
293 | #define IRONLAKE_LVDS_S_SSC_N_MAX 2 | ||
294 | #define IRONLAKE_LVDS_S_SSC_M_MIN 79 | ||
295 | #define IRONLAKE_LVDS_S_SSC_M_MAX 126 | ||
296 | #define IRONLAKE_LVDS_S_SSC_P_MIN 28 | ||
297 | #define IRONLAKE_LVDS_S_SSC_P_MAX 112 | ||
298 | #define IRONLAKE_LVDS_S_SSC_P1_MIN 2 | ||
299 | #define IRONLAKE_LVDS_S_SSC_P1_MAX 8 | ||
300 | #define IRONLAKE_LVDS_S_SSC_P2_SLOW 14 | ||
301 | #define IRONLAKE_LVDS_S_SSC_P2_FAST 14 | ||
302 | |||
303 | /* LVDS dual-channel 100Mhz refclk */ | ||
304 | #define IRONLAKE_LVDS_D_SSC_N_MIN 1 | ||
305 | #define IRONLAKE_LVDS_D_SSC_N_MAX 3 | ||
306 | #define IRONLAKE_LVDS_D_SSC_M_MIN 79 | ||
307 | #define IRONLAKE_LVDS_D_SSC_M_MAX 126 | ||
308 | #define IRONLAKE_LVDS_D_SSC_P_MIN 14 | ||
309 | #define IRONLAKE_LVDS_D_SSC_P_MAX 42 | ||
310 | #define IRONLAKE_LVDS_D_SSC_P1_MIN 2 | ||
311 | #define IRONLAKE_LVDS_D_SSC_P1_MAX 6 | ||
312 | #define IRONLAKE_LVDS_D_SSC_P2_SLOW 7 | ||
313 | #define IRONLAKE_LVDS_D_SSC_P2_FAST 7 | ||
314 | |||
315 | /* DisplayPort */ | ||
316 | #define IRONLAKE_DP_N_MIN 1 | ||
317 | #define IRONLAKE_DP_N_MAX 2 | ||
318 | #define IRONLAKE_DP_M_MIN 81 | ||
319 | #define IRONLAKE_DP_M_MAX 90 | ||
320 | #define IRONLAKE_DP_P_MIN 10 | ||
321 | #define IRONLAKE_DP_P_MAX 20 | ||
322 | #define IRONLAKE_DP_P2_FAST 10 | ||
323 | #define IRONLAKE_DP_P2_SLOW 10 | ||
324 | #define IRONLAKE_DP_P2_LIMIT 0 | ||
325 | #define IRONLAKE_DP_P1_MIN 1 | ||
326 | #define IRONLAKE_DP_P1_MAX 2 | ||
327 | |||
328 | /* FDI */ | 79 | /* FDI */ |
329 | #define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */ | 80 | #define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */ |
330 | 81 | ||
@@ -342,316 +93,284 @@ static bool | |||
342 | intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc, | 93 | intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc, |
343 | int target, int refclk, intel_clock_t *best_clock); | 94 | int target, int refclk, intel_clock_t *best_clock); |
344 | 95 | ||
96 | static inline u32 /* units of 100MHz */ | ||
97 | intel_fdi_link_freq(struct drm_device *dev) | ||
98 | { | ||
99 | if (IS_GEN5(dev)) { | ||
100 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
101 | return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2; | ||
102 | } else | ||
103 | return 27; | ||
104 | } | ||
105 | |||
345 | static const intel_limit_t intel_limits_i8xx_dvo = { | 106 | static const intel_limit_t intel_limits_i8xx_dvo = { |
346 | .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, | 107 | .dot = { .min = 25000, .max = 350000 }, |
347 | .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX }, | 108 | .vco = { .min = 930000, .max = 1400000 }, |
348 | .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX }, | 109 | .n = { .min = 3, .max = 16 }, |
349 | .m = { .min = I8XX_M_MIN, .max = I8XX_M_MAX }, | 110 | .m = { .min = 96, .max = 140 }, |
350 | .m1 = { .min = I8XX_M1_MIN, .max = I8XX_M1_MAX }, | 111 | .m1 = { .min = 18, .max = 26 }, |
351 | .m2 = { .min = I8XX_M2_MIN, .max = I8XX_M2_MAX }, | 112 | .m2 = { .min = 6, .max = 16 }, |
352 | .p = { .min = I8XX_P_MIN, .max = I8XX_P_MAX }, | 113 | .p = { .min = 4, .max = 128 }, |
353 | .p1 = { .min = I8XX_P1_MIN, .max = I8XX_P1_MAX }, | 114 | .p1 = { .min = 2, .max = 33 }, |
354 | .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, | 115 | .p2 = { .dot_limit = 165000, |
355 | .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST }, | 116 | .p2_slow = 4, .p2_fast = 2 }, |
356 | .find_pll = intel_find_best_PLL, | 117 | .find_pll = intel_find_best_PLL, |
357 | }; | 118 | }; |
358 | 119 | ||
359 | static const intel_limit_t intel_limits_i8xx_lvds = { | 120 | static const intel_limit_t intel_limits_i8xx_lvds = { |
360 | .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, | 121 | .dot = { .min = 25000, .max = 350000 }, |
361 | .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX }, | 122 | .vco = { .min = 930000, .max = 1400000 }, |
362 | .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX }, | 123 | .n = { .min = 3, .max = 16 }, |
363 | .m = { .min = I8XX_M_MIN, .max = I8XX_M_MAX }, | 124 | .m = { .min = 96, .max = 140 }, |
364 | .m1 = { .min = I8XX_M1_MIN, .max = I8XX_M1_MAX }, | 125 | .m1 = { .min = 18, .max = 26 }, |
365 | .m2 = { .min = I8XX_M2_MIN, .max = I8XX_M2_MAX }, | 126 | .m2 = { .min = 6, .max = 16 }, |
366 | .p = { .min = I8XX_P_MIN, .max = I8XX_P_MAX }, | 127 | .p = { .min = 4, .max = 128 }, |
367 | .p1 = { .min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX }, | 128 | .p1 = { .min = 1, .max = 6 }, |
368 | .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, | 129 | .p2 = { .dot_limit = 165000, |
369 | .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST }, | 130 | .p2_slow = 14, .p2_fast = 7 }, |
370 | .find_pll = intel_find_best_PLL, | 131 | .find_pll = intel_find_best_PLL, |
371 | }; | 132 | }; |
372 | 133 | ||
373 | static const intel_limit_t intel_limits_i9xx_sdvo = { | 134 | static const intel_limit_t intel_limits_i9xx_sdvo = { |
374 | .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, | 135 | .dot = { .min = 20000, .max = 400000 }, |
375 | .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX }, | 136 | .vco = { .min = 1400000, .max = 2800000 }, |
376 | .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX }, | 137 | .n = { .min = 1, .max = 6 }, |
377 | .m = { .min = I9XX_M_MIN, .max = I9XX_M_MAX }, | 138 | .m = { .min = 70, .max = 120 }, |
378 | .m1 = { .min = I9XX_M1_MIN, .max = I9XX_M1_MAX }, | 139 | .m1 = { .min = 10, .max = 22 }, |
379 | .m2 = { .min = I9XX_M2_MIN, .max = I9XX_M2_MAX }, | 140 | .m2 = { .min = 5, .max = 9 }, |
380 | .p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX }, | 141 | .p = { .min = 5, .max = 80 }, |
381 | .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, | 142 | .p1 = { .min = 1, .max = 8 }, |
382 | .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, | 143 | .p2 = { .dot_limit = 200000, |
383 | .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, | 144 | .p2_slow = 10, .p2_fast = 5 }, |
384 | .find_pll = intel_find_best_PLL, | 145 | .find_pll = intel_find_best_PLL, |
385 | }; | 146 | }; |
386 | 147 | ||
387 | static const intel_limit_t intel_limits_i9xx_lvds = { | 148 | static const intel_limit_t intel_limits_i9xx_lvds = { |
388 | .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, | 149 | .dot = { .min = 20000, .max = 400000 }, |
389 | .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX }, | 150 | .vco = { .min = 1400000, .max = 2800000 }, |
390 | .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX }, | 151 | .n = { .min = 1, .max = 6 }, |
391 | .m = { .min = I9XX_M_MIN, .max = I9XX_M_MAX }, | 152 | .m = { .min = 70, .max = 120 }, |
392 | .m1 = { .min = I9XX_M1_MIN, .max = I9XX_M1_MAX }, | 153 | .m1 = { .min = 10, .max = 22 }, |
393 | .m2 = { .min = I9XX_M2_MIN, .max = I9XX_M2_MAX }, | 154 | .m2 = { .min = 5, .max = 9 }, |
394 | .p = { .min = I9XX_P_LVDS_MIN, .max = I9XX_P_LVDS_MAX }, | 155 | .p = { .min = 7, .max = 98 }, |
395 | .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, | 156 | .p1 = { .min = 1, .max = 8 }, |
396 | /* The single-channel range is 25-112Mhz, and dual-channel | 157 | .p2 = { .dot_limit = 112000, |
397 | * is 80-224Mhz. Prefer single channel as much as possible. | 158 | .p2_slow = 14, .p2_fast = 7 }, |
398 | */ | ||
399 | .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, | ||
400 | .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST }, | ||
401 | .find_pll = intel_find_best_PLL, | 159 | .find_pll = intel_find_best_PLL, |
402 | }; | 160 | }; |
403 | 161 | ||
404 | /* below parameter and function is for G4X Chipset Family*/ | 162 | |
405 | static const intel_limit_t intel_limits_g4x_sdvo = { | 163 | static const intel_limit_t intel_limits_g4x_sdvo = { |
406 | .dot = { .min = G4X_DOT_SDVO_MIN, .max = G4X_DOT_SDVO_MAX }, | 164 | .dot = { .min = 25000, .max = 270000 }, |
407 | .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX}, | 165 | .vco = { .min = 1750000, .max = 3500000}, |
408 | .n = { .min = G4X_N_SDVO_MIN, .max = G4X_N_SDVO_MAX }, | 166 | .n = { .min = 1, .max = 4 }, |
409 | .m = { .min = G4X_M_SDVO_MIN, .max = G4X_M_SDVO_MAX }, | 167 | .m = { .min = 104, .max = 138 }, |
410 | .m1 = { .min = G4X_M1_SDVO_MIN, .max = G4X_M1_SDVO_MAX }, | 168 | .m1 = { .min = 17, .max = 23 }, |
411 | .m2 = { .min = G4X_M2_SDVO_MIN, .max = G4X_M2_SDVO_MAX }, | 169 | .m2 = { .min = 5, .max = 11 }, |
412 | .p = { .min = G4X_P_SDVO_MIN, .max = G4X_P_SDVO_MAX }, | 170 | .p = { .min = 10, .max = 30 }, |
413 | .p1 = { .min = G4X_P1_SDVO_MIN, .max = G4X_P1_SDVO_MAX}, | 171 | .p1 = { .min = 1, .max = 3}, |
414 | .p2 = { .dot_limit = G4X_P2_SDVO_LIMIT, | 172 | .p2 = { .dot_limit = 270000, |
415 | .p2_slow = G4X_P2_SDVO_SLOW, | 173 | .p2_slow = 10, |
416 | .p2_fast = G4X_P2_SDVO_FAST | 174 | .p2_fast = 10 |
417 | }, | 175 | }, |
418 | .find_pll = intel_g4x_find_best_PLL, | 176 | .find_pll = intel_g4x_find_best_PLL, |
419 | }; | 177 | }; |
420 | 178 | ||
421 | static const intel_limit_t intel_limits_g4x_hdmi = { | 179 | static const intel_limit_t intel_limits_g4x_hdmi = { |
422 | .dot = { .min = G4X_DOT_HDMI_DAC_MIN, .max = G4X_DOT_HDMI_DAC_MAX }, | 180 | .dot = { .min = 22000, .max = 400000 }, |
423 | .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX}, | 181 | .vco = { .min = 1750000, .max = 3500000}, |
424 | .n = { .min = G4X_N_HDMI_DAC_MIN, .max = G4X_N_HDMI_DAC_MAX }, | 182 | .n = { .min = 1, .max = 4 }, |
425 | .m = { .min = G4X_M_HDMI_DAC_MIN, .max = G4X_M_HDMI_DAC_MAX }, | 183 | .m = { .min = 104, .max = 138 }, |
426 | .m1 = { .min = G4X_M1_HDMI_DAC_MIN, .max = G4X_M1_HDMI_DAC_MAX }, | 184 | .m1 = { .min = 16, .max = 23 }, |
427 | .m2 = { .min = G4X_M2_HDMI_DAC_MIN, .max = G4X_M2_HDMI_DAC_MAX }, | 185 | .m2 = { .min = 5, .max = 11 }, |
428 | .p = { .min = G4X_P_HDMI_DAC_MIN, .max = G4X_P_HDMI_DAC_MAX }, | 186 | .p = { .min = 5, .max = 80 }, |
429 | .p1 = { .min = G4X_P1_HDMI_DAC_MIN, .max = G4X_P1_HDMI_DAC_MAX}, | 187 | .p1 = { .min = 1, .max = 8}, |
430 | .p2 = { .dot_limit = G4X_P2_HDMI_DAC_LIMIT, | 188 | .p2 = { .dot_limit = 165000, |
431 | .p2_slow = G4X_P2_HDMI_DAC_SLOW, | 189 | .p2_slow = 10, .p2_fast = 5 }, |
432 | .p2_fast = G4X_P2_HDMI_DAC_FAST | ||
433 | }, | ||
434 | .find_pll = intel_g4x_find_best_PLL, | 190 | .find_pll = intel_g4x_find_best_PLL, |
435 | }; | 191 | }; |
436 | 192 | ||
437 | static const intel_limit_t intel_limits_g4x_single_channel_lvds = { | 193 | static const intel_limit_t intel_limits_g4x_single_channel_lvds = { |
438 | .dot = { .min = G4X_DOT_SINGLE_CHANNEL_LVDS_MIN, | 194 | .dot = { .min = 20000, .max = 115000 }, |
439 | .max = G4X_DOT_SINGLE_CHANNEL_LVDS_MAX }, | 195 | .vco = { .min = 1750000, .max = 3500000 }, |
440 | .vco = { .min = G4X_VCO_MIN, | 196 | .n = { .min = 1, .max = 3 }, |
441 | .max = G4X_VCO_MAX }, | 197 | .m = { .min = 104, .max = 138 }, |
442 | .n = { .min = G4X_N_SINGLE_CHANNEL_LVDS_MIN, | 198 | .m1 = { .min = 17, .max = 23 }, |
443 | .max = G4X_N_SINGLE_CHANNEL_LVDS_MAX }, | 199 | .m2 = { .min = 5, .max = 11 }, |
444 | .m = { .min = G4X_M_SINGLE_CHANNEL_LVDS_MIN, | 200 | .p = { .min = 28, .max = 112 }, |
445 | .max = G4X_M_SINGLE_CHANNEL_LVDS_MAX }, | 201 | .p1 = { .min = 2, .max = 8 }, |
446 | .m1 = { .min = G4X_M1_SINGLE_CHANNEL_LVDS_MIN, | 202 | .p2 = { .dot_limit = 0, |
447 | .max = G4X_M1_SINGLE_CHANNEL_LVDS_MAX }, | 203 | .p2_slow = 14, .p2_fast = 14 |
448 | .m2 = { .min = G4X_M2_SINGLE_CHANNEL_LVDS_MIN, | ||
449 | .max = G4X_M2_SINGLE_CHANNEL_LVDS_MAX }, | ||
450 | .p = { .min = G4X_P_SINGLE_CHANNEL_LVDS_MIN, | ||
451 | .max = G4X_P_SINGLE_CHANNEL_LVDS_MAX }, | ||
452 | .p1 = { .min = G4X_P1_SINGLE_CHANNEL_LVDS_MIN, | ||
453 | .max = G4X_P1_SINGLE_CHANNEL_LVDS_MAX }, | ||
454 | .p2 = { .dot_limit = G4X_P2_SINGLE_CHANNEL_LVDS_LIMIT, | ||
455 | .p2_slow = G4X_P2_SINGLE_CHANNEL_LVDS_SLOW, | ||
456 | .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST | ||
457 | }, | 204 | }, |
458 | .find_pll = intel_g4x_find_best_PLL, | 205 | .find_pll = intel_g4x_find_best_PLL, |
459 | }; | 206 | }; |
460 | 207 | ||
461 | static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { | 208 | static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { |
462 | .dot = { .min = G4X_DOT_DUAL_CHANNEL_LVDS_MIN, | 209 | .dot = { .min = 80000, .max = 224000 }, |
463 | .max = G4X_DOT_DUAL_CHANNEL_LVDS_MAX }, | 210 | .vco = { .min = 1750000, .max = 3500000 }, |
464 | .vco = { .min = G4X_VCO_MIN, | 211 | .n = { .min = 1, .max = 3 }, |
465 | .max = G4X_VCO_MAX }, | 212 | .m = { .min = 104, .max = 138 }, |
466 | .n = { .min = G4X_N_DUAL_CHANNEL_LVDS_MIN, | 213 | .m1 = { .min = 17, .max = 23 }, |
467 | .max = G4X_N_DUAL_CHANNEL_LVDS_MAX }, | 214 | .m2 = { .min = 5, .max = 11 }, |
468 | .m = { .min = G4X_M_DUAL_CHANNEL_LVDS_MIN, | 215 | .p = { .min = 14, .max = 42 }, |
469 | .max = G4X_M_DUAL_CHANNEL_LVDS_MAX }, | 216 | .p1 = { .min = 2, .max = 6 }, |
470 | .m1 = { .min = G4X_M1_DUAL_CHANNEL_LVDS_MIN, | 217 | .p2 = { .dot_limit = 0, |
471 | .max = G4X_M1_DUAL_CHANNEL_LVDS_MAX }, | 218 | .p2_slow = 7, .p2_fast = 7 |
472 | .m2 = { .min = G4X_M2_DUAL_CHANNEL_LVDS_MIN, | ||
473 | .max = G4X_M2_DUAL_CHANNEL_LVDS_MAX }, | ||
474 | .p = { .min = G4X_P_DUAL_CHANNEL_LVDS_MIN, | ||
475 | .max = G4X_P_DUAL_CHANNEL_LVDS_MAX }, | ||
476 | .p1 = { .min = G4X_P1_DUAL_CHANNEL_LVDS_MIN, | ||
477 | .max = G4X_P1_DUAL_CHANNEL_LVDS_MAX }, | ||
478 | .p2 = { .dot_limit = G4X_P2_DUAL_CHANNEL_LVDS_LIMIT, | ||
479 | .p2_slow = G4X_P2_DUAL_CHANNEL_LVDS_SLOW, | ||
480 | .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST | ||
481 | }, | 219 | }, |
482 | .find_pll = intel_g4x_find_best_PLL, | 220 | .find_pll = intel_g4x_find_best_PLL, |
483 | }; | 221 | }; |
484 | 222 | ||
485 | static const intel_limit_t intel_limits_g4x_display_port = { | 223 | static const intel_limit_t intel_limits_g4x_display_port = { |
486 | .dot = { .min = G4X_DOT_DISPLAY_PORT_MIN, | 224 | .dot = { .min = 161670, .max = 227000 }, |
487 | .max = G4X_DOT_DISPLAY_PORT_MAX }, | 225 | .vco = { .min = 1750000, .max = 3500000}, |
488 | .vco = { .min = G4X_VCO_MIN, | 226 | .n = { .min = 1, .max = 2 }, |
489 | .max = G4X_VCO_MAX}, | 227 | .m = { .min = 97, .max = 108 }, |
490 | .n = { .min = G4X_N_DISPLAY_PORT_MIN, | 228 | .m1 = { .min = 0x10, .max = 0x12 }, |
491 | .max = G4X_N_DISPLAY_PORT_MAX }, | 229 | .m2 = { .min = 0x05, .max = 0x06 }, |
492 | .m = { .min = G4X_M_DISPLAY_PORT_MIN, | 230 | .p = { .min = 10, .max = 20 }, |
493 | .max = G4X_M_DISPLAY_PORT_MAX }, | 231 | .p1 = { .min = 1, .max = 2}, |
494 | .m1 = { .min = G4X_M1_DISPLAY_PORT_MIN, | 232 | .p2 = { .dot_limit = 0, |
495 | .max = G4X_M1_DISPLAY_PORT_MAX }, | 233 | .p2_slow = 10, .p2_fast = 10 }, |
496 | .m2 = { .min = G4X_M2_DISPLAY_PORT_MIN, | ||
497 | .max = G4X_M2_DISPLAY_PORT_MAX }, | ||
498 | .p = { .min = G4X_P_DISPLAY_PORT_MIN, | ||
499 | .max = G4X_P_DISPLAY_PORT_MAX }, | ||
500 | .p1 = { .min = G4X_P1_DISPLAY_PORT_MIN, | ||
501 | .max = G4X_P1_DISPLAY_PORT_MAX}, | ||
502 | .p2 = { .dot_limit = G4X_P2_DISPLAY_PORT_LIMIT, | ||
503 | .p2_slow = G4X_P2_DISPLAY_PORT_SLOW, | ||
504 | .p2_fast = G4X_P2_DISPLAY_PORT_FAST }, | ||
505 | .find_pll = intel_find_pll_g4x_dp, | 234 | .find_pll = intel_find_pll_g4x_dp, |
506 | }; | 235 | }; |
507 | 236 | ||
508 | static const intel_limit_t intel_limits_pineview_sdvo = { | 237 | static const intel_limit_t intel_limits_pineview_sdvo = { |
509 | .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX}, | 238 | .dot = { .min = 20000, .max = 400000}, |
510 | .vco = { .min = PINEVIEW_VCO_MIN, .max = PINEVIEW_VCO_MAX }, | 239 | .vco = { .min = 1700000, .max = 3500000 }, |
511 | .n = { .min = PINEVIEW_N_MIN, .max = PINEVIEW_N_MAX }, | 240 | /* Pineview's Ncounter is a ring counter */ |
512 | .m = { .min = PINEVIEW_M_MIN, .max = PINEVIEW_M_MAX }, | 241 | .n = { .min = 3, .max = 6 }, |
513 | .m1 = { .min = PINEVIEW_M1_MIN, .max = PINEVIEW_M1_MAX }, | 242 | .m = { .min = 2, .max = 256 }, |
514 | .m2 = { .min = PINEVIEW_M2_MIN, .max = PINEVIEW_M2_MAX }, | 243 | /* Pineview only has one combined m divider, which we treat as m2. */ |
515 | .p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX }, | 244 | .m1 = { .min = 0, .max = 0 }, |
516 | .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, | 245 | .m2 = { .min = 0, .max = 254 }, |
517 | .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, | 246 | .p = { .min = 5, .max = 80 }, |
518 | .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, | 247 | .p1 = { .min = 1, .max = 8 }, |
248 | .p2 = { .dot_limit = 200000, | ||
249 | .p2_slow = 10, .p2_fast = 5 }, | ||
519 | .find_pll = intel_find_best_PLL, | 250 | .find_pll = intel_find_best_PLL, |
520 | }; | 251 | }; |
521 | 252 | ||
522 | static const intel_limit_t intel_limits_pineview_lvds = { | 253 | static const intel_limit_t intel_limits_pineview_lvds = { |
523 | .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, | 254 | .dot = { .min = 20000, .max = 400000 }, |
524 | .vco = { .min = PINEVIEW_VCO_MIN, .max = PINEVIEW_VCO_MAX }, | 255 | .vco = { .min = 1700000, .max = 3500000 }, |
525 | .n = { .min = PINEVIEW_N_MIN, .max = PINEVIEW_N_MAX }, | 256 | .n = { .min = 3, .max = 6 }, |
526 | .m = { .min = PINEVIEW_M_MIN, .max = PINEVIEW_M_MAX }, | 257 | .m = { .min = 2, .max = 256 }, |
527 | .m1 = { .min = PINEVIEW_M1_MIN, .max = PINEVIEW_M1_MAX }, | 258 | .m1 = { .min = 0, .max = 0 }, |
528 | .m2 = { .min = PINEVIEW_M2_MIN, .max = PINEVIEW_M2_MAX }, | 259 | .m2 = { .min = 0, .max = 254 }, |
529 | .p = { .min = PINEVIEW_P_LVDS_MIN, .max = PINEVIEW_P_LVDS_MAX }, | 260 | .p = { .min = 7, .max = 112 }, |
530 | .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, | 261 | .p1 = { .min = 1, .max = 8 }, |
531 | /* Pineview only supports single-channel mode. */ | 262 | .p2 = { .dot_limit = 112000, |
532 | .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, | 263 | .p2_slow = 14, .p2_fast = 14 }, |
533 | .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW }, | ||
534 | .find_pll = intel_find_best_PLL, | 264 | .find_pll = intel_find_best_PLL, |
535 | }; | 265 | }; |
536 | 266 | ||
267 | /* Ironlake / Sandybridge | ||
268 | * | ||
269 | * We calculate clock using (register_value + 2) for N/M1/M2, so here | ||
270 | * the range value for them is (actual_value - 2). | ||
271 | */ | ||
537 | static const intel_limit_t intel_limits_ironlake_dac = { | 272 | static const intel_limit_t intel_limits_ironlake_dac = { |
538 | .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, | 273 | .dot = { .min = 25000, .max = 350000 }, |
539 | .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, | 274 | .vco = { .min = 1760000, .max = 3510000 }, |
540 | .n = { .min = IRONLAKE_DAC_N_MIN, .max = IRONLAKE_DAC_N_MAX }, | 275 | .n = { .min = 1, .max = 5 }, |
541 | .m = { .min = IRONLAKE_DAC_M_MIN, .max = IRONLAKE_DAC_M_MAX }, | 276 | .m = { .min = 79, .max = 127 }, |
542 | .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, | 277 | .m1 = { .min = 12, .max = 22 }, |
543 | .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, | 278 | .m2 = { .min = 5, .max = 9 }, |
544 | .p = { .min = IRONLAKE_DAC_P_MIN, .max = IRONLAKE_DAC_P_MAX }, | 279 | .p = { .min = 5, .max = 80 }, |
545 | .p1 = { .min = IRONLAKE_DAC_P1_MIN, .max = IRONLAKE_DAC_P1_MAX }, | 280 | .p1 = { .min = 1, .max = 8 }, |
546 | .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, | 281 | .p2 = { .dot_limit = 225000, |
547 | .p2_slow = IRONLAKE_DAC_P2_SLOW, | 282 | .p2_slow = 10, .p2_fast = 5 }, |
548 | .p2_fast = IRONLAKE_DAC_P2_FAST }, | ||
549 | .find_pll = intel_g4x_find_best_PLL, | 283 | .find_pll = intel_g4x_find_best_PLL, |
550 | }; | 284 | }; |
551 | 285 | ||
552 | static const intel_limit_t intel_limits_ironlake_single_lvds = { | 286 | static const intel_limit_t intel_limits_ironlake_single_lvds = { |
553 | .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, | 287 | .dot = { .min = 25000, .max = 350000 }, |
554 | .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, | 288 | .vco = { .min = 1760000, .max = 3510000 }, |
555 | .n = { .min = IRONLAKE_LVDS_S_N_MIN, .max = IRONLAKE_LVDS_S_N_MAX }, | 289 | .n = { .min = 1, .max = 3 }, |
556 | .m = { .min = IRONLAKE_LVDS_S_M_MIN, .max = IRONLAKE_LVDS_S_M_MAX }, | 290 | .m = { .min = 79, .max = 118 }, |
557 | .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, | 291 | .m1 = { .min = 12, .max = 22 }, |
558 | .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, | 292 | .m2 = { .min = 5, .max = 9 }, |
559 | .p = { .min = IRONLAKE_LVDS_S_P_MIN, .max = IRONLAKE_LVDS_S_P_MAX }, | 293 | .p = { .min = 28, .max = 112 }, |
560 | .p1 = { .min = IRONLAKE_LVDS_S_P1_MIN, .max = IRONLAKE_LVDS_S_P1_MAX }, | 294 | .p1 = { .min = 2, .max = 8 }, |
561 | .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, | 295 | .p2 = { .dot_limit = 225000, |
562 | .p2_slow = IRONLAKE_LVDS_S_P2_SLOW, | 296 | .p2_slow = 14, .p2_fast = 14 }, |
563 | .p2_fast = IRONLAKE_LVDS_S_P2_FAST }, | ||
564 | .find_pll = intel_g4x_find_best_PLL, | 297 | .find_pll = intel_g4x_find_best_PLL, |
565 | }; | 298 | }; |
566 | 299 | ||
567 | static const intel_limit_t intel_limits_ironlake_dual_lvds = { | 300 | static const intel_limit_t intel_limits_ironlake_dual_lvds = { |
568 | .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, | 301 | .dot = { .min = 25000, .max = 350000 }, |
569 | .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, | 302 | .vco = { .min = 1760000, .max = 3510000 }, |
570 | .n = { .min = IRONLAKE_LVDS_D_N_MIN, .max = IRONLAKE_LVDS_D_N_MAX }, | 303 | .n = { .min = 1, .max = 3 }, |
571 | .m = { .min = IRONLAKE_LVDS_D_M_MIN, .max = IRONLAKE_LVDS_D_M_MAX }, | 304 | .m = { .min = 79, .max = 127 }, |
572 | .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, | 305 | .m1 = { .min = 12, .max = 22 }, |
573 | .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, | 306 | .m2 = { .min = 5, .max = 9 }, |
574 | .p = { .min = IRONLAKE_LVDS_D_P_MIN, .max = IRONLAKE_LVDS_D_P_MAX }, | 307 | .p = { .min = 14, .max = 56 }, |
575 | .p1 = { .min = IRONLAKE_LVDS_D_P1_MIN, .max = IRONLAKE_LVDS_D_P1_MAX }, | 308 | .p1 = { .min = 2, .max = 8 }, |
576 | .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, | 309 | .p2 = { .dot_limit = 225000, |
577 | .p2_slow = IRONLAKE_LVDS_D_P2_SLOW, | 310 | .p2_slow = 7, .p2_fast = 7 }, |
578 | .p2_fast = IRONLAKE_LVDS_D_P2_FAST }, | ||
579 | .find_pll = intel_g4x_find_best_PLL, | 311 | .find_pll = intel_g4x_find_best_PLL, |
580 | }; | 312 | }; |
581 | 313 | ||
314 | /* LVDS 100mhz refclk limits. */ | ||
582 | static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { | 315 | static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { |
583 | .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, | 316 | .dot = { .min = 25000, .max = 350000 }, |
584 | .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, | 317 | .vco = { .min = 1760000, .max = 3510000 }, |
585 | .n = { .min = IRONLAKE_LVDS_S_SSC_N_MIN, .max = IRONLAKE_LVDS_S_SSC_N_MAX }, | 318 | .n = { .min = 1, .max = 2 }, |
586 | .m = { .min = IRONLAKE_LVDS_S_SSC_M_MIN, .max = IRONLAKE_LVDS_S_SSC_M_MAX }, | 319 | .m = { .min = 79, .max = 126 }, |
587 | .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, | 320 | .m1 = { .min = 12, .max = 22 }, |
588 | .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, | 321 | .m2 = { .min = 5, .max = 9 }, |
589 | .p = { .min = IRONLAKE_LVDS_S_SSC_P_MIN, .max = IRONLAKE_LVDS_S_SSC_P_MAX }, | 322 | .p = { .min = 28, .max = 112 }, |
590 | .p1 = { .min = IRONLAKE_LVDS_S_SSC_P1_MIN,.max = IRONLAKE_LVDS_S_SSC_P1_MAX }, | 323 | .p1 = { .min = 2,.max = 8 }, |
591 | .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, | 324 | .p2 = { .dot_limit = 225000, |
592 | .p2_slow = IRONLAKE_LVDS_S_SSC_P2_SLOW, | 325 | .p2_slow = 14, .p2_fast = 14 }, |
593 | .p2_fast = IRONLAKE_LVDS_S_SSC_P2_FAST }, | ||
594 | .find_pll = intel_g4x_find_best_PLL, | 326 | .find_pll = intel_g4x_find_best_PLL, |
595 | }; | 327 | }; |
596 | 328 | ||
597 | static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { | 329 | static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { |
598 | .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, | 330 | .dot = { .min = 25000, .max = 350000 }, |
599 | .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, | 331 | .vco = { .min = 1760000, .max = 3510000 }, |
600 | .n = { .min = IRONLAKE_LVDS_D_SSC_N_MIN, .max = IRONLAKE_LVDS_D_SSC_N_MAX }, | 332 | .n = { .min = 1, .max = 3 }, |
601 | .m = { .min = IRONLAKE_LVDS_D_SSC_M_MIN, .max = IRONLAKE_LVDS_D_SSC_M_MAX }, | 333 | .m = { .min = 79, .max = 126 }, |
602 | .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, | 334 | .m1 = { .min = 12, .max = 22 }, |
603 | .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, | 335 | .m2 = { .min = 5, .max = 9 }, |
604 | .p = { .min = IRONLAKE_LVDS_D_SSC_P_MIN, .max = IRONLAKE_LVDS_D_SSC_P_MAX }, | 336 | .p = { .min = 14, .max = 42 }, |
605 | .p1 = { .min = IRONLAKE_LVDS_D_SSC_P1_MIN,.max = IRONLAKE_LVDS_D_SSC_P1_MAX }, | 337 | .p1 = { .min = 2,.max = 6 }, |
606 | .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, | 338 | .p2 = { .dot_limit = 225000, |
607 | .p2_slow = IRONLAKE_LVDS_D_SSC_P2_SLOW, | 339 | .p2_slow = 7, .p2_fast = 7 }, |
608 | .p2_fast = IRONLAKE_LVDS_D_SSC_P2_FAST }, | ||
609 | .find_pll = intel_g4x_find_best_PLL, | 340 | .find_pll = intel_g4x_find_best_PLL, |
610 | }; | 341 | }; |
611 | 342 | ||
612 | static const intel_limit_t intel_limits_ironlake_display_port = { | 343 | static const intel_limit_t intel_limits_ironlake_display_port = { |
613 | .dot = { .min = IRONLAKE_DOT_MIN, | 344 | .dot = { .min = 25000, .max = 350000 }, |
614 | .max = IRONLAKE_DOT_MAX }, | 345 | .vco = { .min = 1760000, .max = 3510000}, |
615 | .vco = { .min = IRONLAKE_VCO_MIN, | 346 | .n = { .min = 1, .max = 2 }, |
616 | .max = IRONLAKE_VCO_MAX}, | 347 | .m = { .min = 81, .max = 90 }, |
617 | .n = { .min = IRONLAKE_DP_N_MIN, | 348 | .m1 = { .min = 12, .max = 22 }, |
618 | .max = IRONLAKE_DP_N_MAX }, | 349 | .m2 = { .min = 5, .max = 9 }, |
619 | .m = { .min = IRONLAKE_DP_M_MIN, | 350 | .p = { .min = 10, .max = 20 }, |
620 | .max = IRONLAKE_DP_M_MAX }, | 351 | .p1 = { .min = 1, .max = 2}, |
621 | .m1 = { .min = IRONLAKE_M1_MIN, | 352 | .p2 = { .dot_limit = 0, |
622 | .max = IRONLAKE_M1_MAX }, | 353 | .p2_slow = 10, .p2_fast = 10 }, |
623 | .m2 = { .min = IRONLAKE_M2_MIN, | ||
624 | .max = IRONLAKE_M2_MAX }, | ||
625 | .p = { .min = IRONLAKE_DP_P_MIN, | ||
626 | .max = IRONLAKE_DP_P_MAX }, | ||
627 | .p1 = { .min = IRONLAKE_DP_P1_MIN, | ||
628 | .max = IRONLAKE_DP_P1_MAX}, | ||
629 | .p2 = { .dot_limit = IRONLAKE_DP_P2_LIMIT, | ||
630 | .p2_slow = IRONLAKE_DP_P2_SLOW, | ||
631 | .p2_fast = IRONLAKE_DP_P2_FAST }, | ||
632 | .find_pll = intel_find_pll_ironlake_dp, | 354 | .find_pll = intel_find_pll_ironlake_dp, |
633 | }; | 355 | }; |
634 | 356 | ||
635 | static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc) | 357 | static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, |
358 | int refclk) | ||
636 | { | 359 | { |
637 | struct drm_device *dev = crtc->dev; | 360 | struct drm_device *dev = crtc->dev; |
638 | struct drm_i915_private *dev_priv = dev->dev_private; | 361 | struct drm_i915_private *dev_priv = dev->dev_private; |
639 | const intel_limit_t *limit; | 362 | const intel_limit_t *limit; |
640 | int refclk = 120; | ||
641 | 363 | ||
642 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | 364 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
643 | if (dev_priv->lvds_use_ssc && dev_priv->lvds_ssc_freq == 100) | ||
644 | refclk = 100; | ||
645 | |||
646 | if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == | 365 | if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == |
647 | LVDS_CLKB_POWER_UP) { | 366 | LVDS_CLKB_POWER_UP) { |
648 | /* LVDS dual channel */ | 367 | /* LVDS dual channel */ |
649 | if (refclk == 100) | 368 | if (refclk == 100000) |
650 | limit = &intel_limits_ironlake_dual_lvds_100m; | 369 | limit = &intel_limits_ironlake_dual_lvds_100m; |
651 | else | 370 | else |
652 | limit = &intel_limits_ironlake_dual_lvds; | 371 | limit = &intel_limits_ironlake_dual_lvds; |
653 | } else { | 372 | } else { |
654 | if (refclk == 100) | 373 | if (refclk == 100000) |
655 | limit = &intel_limits_ironlake_single_lvds_100m; | 374 | limit = &intel_limits_ironlake_single_lvds_100m; |
656 | else | 375 | else |
657 | limit = &intel_limits_ironlake_single_lvds; | 376 | limit = &intel_limits_ironlake_single_lvds; |
@@ -692,25 +411,25 @@ static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) | |||
692 | return limit; | 411 | return limit; |
693 | } | 412 | } |
694 | 413 | ||
695 | static const intel_limit_t *intel_limit(struct drm_crtc *crtc) | 414 | static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk) |
696 | { | 415 | { |
697 | struct drm_device *dev = crtc->dev; | 416 | struct drm_device *dev = crtc->dev; |
698 | const intel_limit_t *limit; | 417 | const intel_limit_t *limit; |
699 | 418 | ||
700 | if (HAS_PCH_SPLIT(dev)) | 419 | if (HAS_PCH_SPLIT(dev)) |
701 | limit = intel_ironlake_limit(crtc); | 420 | limit = intel_ironlake_limit(crtc, refclk); |
702 | else if (IS_G4X(dev)) { | 421 | else if (IS_G4X(dev)) { |
703 | limit = intel_g4x_limit(crtc); | 422 | limit = intel_g4x_limit(crtc); |
704 | } else if (IS_I9XX(dev) && !IS_PINEVIEW(dev)) { | ||
705 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) | ||
706 | limit = &intel_limits_i9xx_lvds; | ||
707 | else | ||
708 | limit = &intel_limits_i9xx_sdvo; | ||
709 | } else if (IS_PINEVIEW(dev)) { | 423 | } else if (IS_PINEVIEW(dev)) { |
710 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) | 424 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) |
711 | limit = &intel_limits_pineview_lvds; | 425 | limit = &intel_limits_pineview_lvds; |
712 | else | 426 | else |
713 | limit = &intel_limits_pineview_sdvo; | 427 | limit = &intel_limits_pineview_sdvo; |
428 | } else if (!IS_GEN2(dev)) { | ||
429 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) | ||
430 | limit = &intel_limits_i9xx_lvds; | ||
431 | else | ||
432 | limit = &intel_limits_i9xx_sdvo; | ||
714 | } else { | 433 | } else { |
715 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) | 434 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) |
716 | limit = &intel_limits_i8xx_lvds; | 435 | limit = &intel_limits_i8xx_lvds; |
@@ -744,20 +463,17 @@ static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock | |||
744 | /** | 463 | /** |
745 | * Returns whether any output on the specified pipe is of the specified type | 464 | * Returns whether any output on the specified pipe is of the specified type |
746 | */ | 465 | */ |
747 | bool intel_pipe_has_type (struct drm_crtc *crtc, int type) | 466 | bool intel_pipe_has_type(struct drm_crtc *crtc, int type) |
748 | { | 467 | { |
749 | struct drm_device *dev = crtc->dev; | 468 | struct drm_device *dev = crtc->dev; |
750 | struct drm_mode_config *mode_config = &dev->mode_config; | 469 | struct drm_mode_config *mode_config = &dev->mode_config; |
751 | struct drm_encoder *l_entry; | 470 | struct intel_encoder *encoder; |
752 | 471 | ||
753 | list_for_each_entry(l_entry, &mode_config->encoder_list, head) { | 472 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) |
754 | if (l_entry && l_entry->crtc == crtc) { | 473 | if (encoder->base.crtc == crtc && encoder->type == type) |
755 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(l_entry); | 474 | return true; |
756 | if (intel_encoder->type == type) | 475 | |
757 | return true; | 476 | return false; |
758 | } | ||
759 | } | ||
760 | return false; | ||
761 | } | 477 | } |
762 | 478 | ||
763 | #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) | 479 | #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) |
@@ -766,11 +482,10 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type) | |||
766 | * the given connectors. | 482 | * the given connectors. |
767 | */ | 483 | */ |
768 | 484 | ||
769 | static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock) | 485 | static bool intel_PLL_is_valid(struct drm_device *dev, |
486 | const intel_limit_t *limit, | ||
487 | const intel_clock_t *clock) | ||
770 | { | 488 | { |
771 | const intel_limit_t *limit = intel_limit (crtc); | ||
772 | struct drm_device *dev = crtc->dev; | ||
773 | |||
774 | if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) | 489 | if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) |
775 | INTELPllInvalid ("p1 out of range\n"); | 490 | INTELPllInvalid ("p1 out of range\n"); |
776 | if (clock->p < limit->p.min || limit->p.max < clock->p) | 491 | if (clock->p < limit->p.min || limit->p.max < clock->p) |
@@ -842,8 +557,8 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
842 | int this_err; | 557 | int this_err; |
843 | 558 | ||
844 | intel_clock(dev, refclk, &clock); | 559 | intel_clock(dev, refclk, &clock); |
845 | 560 | if (!intel_PLL_is_valid(dev, limit, | |
846 | if (!intel_PLL_is_valid(crtc, &clock)) | 561 | &clock)) |
847 | continue; | 562 | continue; |
848 | 563 | ||
849 | this_err = abs(clock.dot - target); | 564 | this_err = abs(clock.dot - target); |
@@ -905,9 +620,11 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
905 | int this_err; | 620 | int this_err; |
906 | 621 | ||
907 | intel_clock(dev, refclk, &clock); | 622 | intel_clock(dev, refclk, &clock); |
908 | if (!intel_PLL_is_valid(crtc, &clock)) | 623 | if (!intel_PLL_is_valid(dev, limit, |
624 | &clock)) | ||
909 | continue; | 625 | continue; |
910 | this_err = abs(clock.dot - target) ; | 626 | |
627 | this_err = abs(clock.dot - target); | ||
911 | if (this_err < err_most) { | 628 | if (this_err < err_most) { |
912 | *best_clock = clock; | 629 | *best_clock = clock; |
913 | err_most = this_err; | 630 | err_most = this_err; |
@@ -928,10 +645,6 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
928 | struct drm_device *dev = crtc->dev; | 645 | struct drm_device *dev = crtc->dev; |
929 | intel_clock_t clock; | 646 | intel_clock_t clock; |
930 | 647 | ||
931 | /* return directly when it is eDP */ | ||
932 | if (HAS_eDP) | ||
933 | return true; | ||
934 | |||
935 | if (target < 200000) { | 648 | if (target < 200000) { |
936 | clock.n = 1; | 649 | clock.n = 1; |
937 | clock.p1 = 2; | 650 | clock.p1 = 2; |
@@ -955,26 +668,26 @@ static bool | |||
955 | intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, | 668 | intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, |
956 | int target, int refclk, intel_clock_t *best_clock) | 669 | int target, int refclk, intel_clock_t *best_clock) |
957 | { | 670 | { |
958 | intel_clock_t clock; | 671 | intel_clock_t clock; |
959 | if (target < 200000) { | 672 | if (target < 200000) { |
960 | clock.p1 = 2; | 673 | clock.p1 = 2; |
961 | clock.p2 = 10; | 674 | clock.p2 = 10; |
962 | clock.n = 2; | 675 | clock.n = 2; |
963 | clock.m1 = 23; | 676 | clock.m1 = 23; |
964 | clock.m2 = 8; | 677 | clock.m2 = 8; |
965 | } else { | 678 | } else { |
966 | clock.p1 = 1; | 679 | clock.p1 = 1; |
967 | clock.p2 = 10; | 680 | clock.p2 = 10; |
968 | clock.n = 1; | 681 | clock.n = 1; |
969 | clock.m1 = 14; | 682 | clock.m1 = 14; |
970 | clock.m2 = 2; | 683 | clock.m2 = 2; |
971 | } | 684 | } |
972 | clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2); | 685 | clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2); |
973 | clock.p = (clock.p1 * clock.p2); | 686 | clock.p = (clock.p1 * clock.p2); |
974 | clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p; | 687 | clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p; |
975 | clock.vco = 0; | 688 | clock.vco = 0; |
976 | memcpy(best_clock, &clock, sizeof(intel_clock_t)); | 689 | memcpy(best_clock, &clock, sizeof(intel_clock_t)); |
977 | return true; | 690 | return true; |
978 | } | 691 | } |
979 | 692 | ||
980 | /** | 693 | /** |
@@ -988,7 +701,7 @@ intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
988 | void intel_wait_for_vblank(struct drm_device *dev, int pipe) | 701 | void intel_wait_for_vblank(struct drm_device *dev, int pipe) |
989 | { | 702 | { |
990 | struct drm_i915_private *dev_priv = dev->dev_private; | 703 | struct drm_i915_private *dev_priv = dev->dev_private; |
991 | int pipestat_reg = (pipe == 0 ? PIPEASTAT : PIPEBSTAT); | 704 | int pipestat_reg = PIPESTAT(pipe); |
992 | 705 | ||
993 | /* Clear existing vblank status. Note this will clear any other | 706 | /* Clear existing vblank status. Note this will clear any other |
994 | * sticky status fields as well. | 707 | * sticky status fields as well. |
@@ -1007,9 +720,9 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe) | |||
1007 | I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS); | 720 | I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS); |
1008 | 721 | ||
1009 | /* Wait for vblank interrupt bit to set */ | 722 | /* Wait for vblank interrupt bit to set */ |
1010 | if (wait_for((I915_READ(pipestat_reg) & | 723 | if (wait_for(I915_READ(pipestat_reg) & |
1011 | PIPE_VBLANK_INTERRUPT_STATUS), | 724 | PIPE_VBLANK_INTERRUPT_STATUS, |
1012 | 50, 0)) | 725 | 50)) |
1013 | DRM_DEBUG_KMS("vblank wait timed out\n"); | 726 | DRM_DEBUG_KMS("vblank wait timed out\n"); |
1014 | } | 727 | } |
1015 | 728 | ||
@@ -1028,47 +741,664 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe) | |||
1028 | * Otherwise: | 741 | * Otherwise: |
1029 | * wait for the display line value to settle (it usually | 742 | * wait for the display line value to settle (it usually |
1030 | * ends up stopping at the start of the next frame). | 743 | * ends up stopping at the start of the next frame). |
1031 | * | 744 | * |
1032 | */ | 745 | */ |
1033 | static void intel_wait_for_pipe_off(struct drm_device *dev, int pipe) | 746 | void intel_wait_for_pipe_off(struct drm_device *dev, int pipe) |
1034 | { | 747 | { |
1035 | struct drm_i915_private *dev_priv = dev->dev_private; | 748 | struct drm_i915_private *dev_priv = dev->dev_private; |
1036 | 749 | ||
1037 | if (INTEL_INFO(dev)->gen >= 4) { | 750 | if (INTEL_INFO(dev)->gen >= 4) { |
1038 | int pipeconf_reg = (pipe == 0 ? PIPEACONF : PIPEBCONF); | 751 | int reg = PIPECONF(pipe); |
1039 | 752 | ||
1040 | /* Wait for the Pipe State to go off */ | 753 | /* Wait for the Pipe State to go off */ |
1041 | if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0, | 754 | if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, |
1042 | 100, 0)) | 755 | 100)) |
1043 | DRM_DEBUG_KMS("pipe_off wait timed out\n"); | 756 | DRM_DEBUG_KMS("pipe_off wait timed out\n"); |
1044 | } else { | 757 | } else { |
1045 | u32 last_line; | 758 | u32 last_line; |
1046 | int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL); | 759 | int reg = PIPEDSL(pipe); |
1047 | unsigned long timeout = jiffies + msecs_to_jiffies(100); | 760 | unsigned long timeout = jiffies + msecs_to_jiffies(100); |
1048 | 761 | ||
1049 | /* Wait for the display line to settle */ | 762 | /* Wait for the display line to settle */ |
1050 | do { | 763 | do { |
1051 | last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK; | 764 | last_line = I915_READ(reg) & DSL_LINEMASK; |
1052 | mdelay(5); | 765 | mdelay(5); |
1053 | } while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) && | 766 | } while (((I915_READ(reg) & DSL_LINEMASK) != last_line) && |
1054 | time_after(timeout, jiffies)); | 767 | time_after(timeout, jiffies)); |
1055 | if (time_after(jiffies, timeout)) | 768 | if (time_after(jiffies, timeout)) |
1056 | DRM_DEBUG_KMS("pipe_off wait timed out\n"); | 769 | DRM_DEBUG_KMS("pipe_off wait timed out\n"); |
1057 | } | 770 | } |
1058 | } | 771 | } |
1059 | 772 | ||
1060 | /* Parameters have changed, update FBC info */ | 773 | static const char *state_string(bool enabled) |
774 | { | ||
775 | return enabled ? "on" : "off"; | ||
776 | } | ||
777 | |||
778 | /* Only for pre-ILK configs */ | ||
779 | static void assert_pll(struct drm_i915_private *dev_priv, | ||
780 | enum pipe pipe, bool state) | ||
781 | { | ||
782 | int reg; | ||
783 | u32 val; | ||
784 | bool cur_state; | ||
785 | |||
786 | reg = DPLL(pipe); | ||
787 | val = I915_READ(reg); | ||
788 | cur_state = !!(val & DPLL_VCO_ENABLE); | ||
789 | WARN(cur_state != state, | ||
790 | "PLL state assertion failure (expected %s, current %s)\n", | ||
791 | state_string(state), state_string(cur_state)); | ||
792 | } | ||
793 | #define assert_pll_enabled(d, p) assert_pll(d, p, true) | ||
794 | #define assert_pll_disabled(d, p) assert_pll(d, p, false) | ||
795 | |||
796 | /* For ILK+ */ | ||
797 | static void assert_pch_pll(struct drm_i915_private *dev_priv, | ||
798 | enum pipe pipe, bool state) | ||
799 | { | ||
800 | int reg; | ||
801 | u32 val; | ||
802 | bool cur_state; | ||
803 | |||
804 | reg = PCH_DPLL(pipe); | ||
805 | val = I915_READ(reg); | ||
806 | cur_state = !!(val & DPLL_VCO_ENABLE); | ||
807 | WARN(cur_state != state, | ||
808 | "PCH PLL state assertion failure (expected %s, current %s)\n", | ||
809 | state_string(state), state_string(cur_state)); | ||
810 | } | ||
811 | #define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true) | ||
812 | #define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false) | ||
813 | |||
814 | static void assert_fdi_tx(struct drm_i915_private *dev_priv, | ||
815 | enum pipe pipe, bool state) | ||
816 | { | ||
817 | int reg; | ||
818 | u32 val; | ||
819 | bool cur_state; | ||
820 | |||
821 | reg = FDI_TX_CTL(pipe); | ||
822 | val = I915_READ(reg); | ||
823 | cur_state = !!(val & FDI_TX_ENABLE); | ||
824 | WARN(cur_state != state, | ||
825 | "FDI TX state assertion failure (expected %s, current %s)\n", | ||
826 | state_string(state), state_string(cur_state)); | ||
827 | } | ||
828 | #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true) | ||
829 | #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false) | ||
830 | |||
831 | static void assert_fdi_rx(struct drm_i915_private *dev_priv, | ||
832 | enum pipe pipe, bool state) | ||
833 | { | ||
834 | int reg; | ||
835 | u32 val; | ||
836 | bool cur_state; | ||
837 | |||
838 | reg = FDI_RX_CTL(pipe); | ||
839 | val = I915_READ(reg); | ||
840 | cur_state = !!(val & FDI_RX_ENABLE); | ||
841 | WARN(cur_state != state, | ||
842 | "FDI RX state assertion failure (expected %s, current %s)\n", | ||
843 | state_string(state), state_string(cur_state)); | ||
844 | } | ||
845 | #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true) | ||
846 | #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false) | ||
847 | |||
848 | static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, | ||
849 | enum pipe pipe) | ||
850 | { | ||
851 | int reg; | ||
852 | u32 val; | ||
853 | |||
854 | /* ILK FDI PLL is always enabled */ | ||
855 | if (dev_priv->info->gen == 5) | ||
856 | return; | ||
857 | |||
858 | reg = FDI_TX_CTL(pipe); | ||
859 | val = I915_READ(reg); | ||
860 | WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); | ||
861 | } | ||
862 | |||
863 | static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv, | ||
864 | enum pipe pipe) | ||
865 | { | ||
866 | int reg; | ||
867 | u32 val; | ||
868 | |||
869 | reg = FDI_RX_CTL(pipe); | ||
870 | val = I915_READ(reg); | ||
871 | WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n"); | ||
872 | } | ||
873 | |||
874 | static void assert_panel_unlocked(struct drm_i915_private *dev_priv, | ||
875 | enum pipe pipe) | ||
876 | { | ||
877 | int pp_reg, lvds_reg; | ||
878 | u32 val; | ||
879 | enum pipe panel_pipe = PIPE_A; | ||
880 | bool locked = locked; | ||
881 | |||
882 | if (HAS_PCH_SPLIT(dev_priv->dev)) { | ||
883 | pp_reg = PCH_PP_CONTROL; | ||
884 | lvds_reg = PCH_LVDS; | ||
885 | } else { | ||
886 | pp_reg = PP_CONTROL; | ||
887 | lvds_reg = LVDS; | ||
888 | } | ||
889 | |||
890 | val = I915_READ(pp_reg); | ||
891 | if (!(val & PANEL_POWER_ON) || | ||
892 | ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)) | ||
893 | locked = false; | ||
894 | |||
895 | if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT) | ||
896 | panel_pipe = PIPE_B; | ||
897 | |||
898 | WARN(panel_pipe == pipe && locked, | ||
899 | "panel assertion failure, pipe %c regs locked\n", | ||
900 | pipe_name(pipe)); | ||
901 | } | ||
902 | |||
903 | static void assert_pipe(struct drm_i915_private *dev_priv, | ||
904 | enum pipe pipe, bool state) | ||
905 | { | ||
906 | int reg; | ||
907 | u32 val; | ||
908 | bool cur_state; | ||
909 | |||
910 | reg = PIPECONF(pipe); | ||
911 | val = I915_READ(reg); | ||
912 | cur_state = !!(val & PIPECONF_ENABLE); | ||
913 | WARN(cur_state != state, | ||
914 | "pipe %c assertion failure (expected %s, current %s)\n", | ||
915 | pipe_name(pipe), state_string(state), state_string(cur_state)); | ||
916 | } | ||
917 | #define assert_pipe_enabled(d, p) assert_pipe(d, p, true) | ||
918 | #define assert_pipe_disabled(d, p) assert_pipe(d, p, false) | ||
919 | |||
920 | static void assert_plane_enabled(struct drm_i915_private *dev_priv, | ||
921 | enum plane plane) | ||
922 | { | ||
923 | int reg; | ||
924 | u32 val; | ||
925 | |||
926 | reg = DSPCNTR(plane); | ||
927 | val = I915_READ(reg); | ||
928 | WARN(!(val & DISPLAY_PLANE_ENABLE), | ||
929 | "plane %c assertion failure, should be active but is disabled\n", | ||
930 | plane_name(plane)); | ||
931 | } | ||
932 | |||
933 | static void assert_planes_disabled(struct drm_i915_private *dev_priv, | ||
934 | enum pipe pipe) | ||
935 | { | ||
936 | int reg, i; | ||
937 | u32 val; | ||
938 | int cur_pipe; | ||
939 | |||
940 | /* Planes are fixed to pipes on ILK+ */ | ||
941 | if (HAS_PCH_SPLIT(dev_priv->dev)) | ||
942 | return; | ||
943 | |||
944 | /* Need to check both planes against the pipe */ | ||
945 | for (i = 0; i < 2; i++) { | ||
946 | reg = DSPCNTR(i); | ||
947 | val = I915_READ(reg); | ||
948 | cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> | ||
949 | DISPPLANE_SEL_PIPE_SHIFT; | ||
950 | WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe, | ||
951 | "plane %c assertion failure, should be off on pipe %c but is still active\n", | ||
952 | plane_name(i), pipe_name(pipe)); | ||
953 | } | ||
954 | } | ||
955 | |||
956 | static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) | ||
957 | { | ||
958 | u32 val; | ||
959 | bool enabled; | ||
960 | |||
961 | val = I915_READ(PCH_DREF_CONTROL); | ||
962 | enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK | | ||
963 | DREF_SUPERSPREAD_SOURCE_MASK)); | ||
964 | WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n"); | ||
965 | } | ||
966 | |||
967 | static void assert_transcoder_disabled(struct drm_i915_private *dev_priv, | ||
968 | enum pipe pipe) | ||
969 | { | ||
970 | int reg; | ||
971 | u32 val; | ||
972 | bool enabled; | ||
973 | |||
974 | reg = TRANSCONF(pipe); | ||
975 | val = I915_READ(reg); | ||
976 | enabled = !!(val & TRANS_ENABLE); | ||
977 | WARN(enabled, | ||
978 | "transcoder assertion failed, should be off on pipe %c but is still active\n", | ||
979 | pipe_name(pipe)); | ||
980 | } | ||
981 | |||
982 | static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, | ||
983 | enum pipe pipe, int reg) | ||
984 | { | ||
985 | u32 val = I915_READ(reg); | ||
986 | WARN(DP_PIPE_ENABLED(val, pipe), | ||
987 | "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", | ||
988 | reg, pipe_name(pipe)); | ||
989 | } | ||
990 | |||
991 | static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, | ||
992 | enum pipe pipe, int reg) | ||
993 | { | ||
994 | u32 val = I915_READ(reg); | ||
995 | WARN(HDMI_PIPE_ENABLED(val, pipe), | ||
996 | "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", | ||
997 | reg, pipe_name(pipe)); | ||
998 | } | ||
999 | |||
1000 | static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, | ||
1001 | enum pipe pipe) | ||
1002 | { | ||
1003 | int reg; | ||
1004 | u32 val; | ||
1005 | |||
1006 | assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B); | ||
1007 | assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C); | ||
1008 | assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D); | ||
1009 | |||
1010 | reg = PCH_ADPA; | ||
1011 | val = I915_READ(reg); | ||
1012 | WARN(ADPA_PIPE_ENABLED(val, pipe), | ||
1013 | "PCH VGA enabled on transcoder %c, should be disabled\n", | ||
1014 | pipe_name(pipe)); | ||
1015 | |||
1016 | reg = PCH_LVDS; | ||
1017 | val = I915_READ(reg); | ||
1018 | WARN(LVDS_PIPE_ENABLED(val, pipe), | ||
1019 | "PCH LVDS enabled on transcoder %c, should be disabled\n", | ||
1020 | pipe_name(pipe)); | ||
1021 | |||
1022 | assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB); | ||
1023 | assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC); | ||
1024 | assert_pch_hdmi_disabled(dev_priv, pipe, HDMID); | ||
1025 | } | ||
1026 | |||
1027 | /** | ||
1028 | * intel_enable_pll - enable a PLL | ||
1029 | * @dev_priv: i915 private structure | ||
1030 | * @pipe: pipe PLL to enable | ||
1031 | * | ||
1032 | * Enable @pipe's PLL so we can start pumping pixels from a plane. Check to | ||
1033 | * make sure the PLL reg is writable first though, since the panel write | ||
1034 | * protect mechanism may be enabled. | ||
1035 | * | ||
1036 | * Note! This is for pre-ILK only. | ||
1037 | */ | ||
1038 | static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) | ||
1039 | { | ||
1040 | int reg; | ||
1041 | u32 val; | ||
1042 | |||
1043 | /* No really, not for ILK+ */ | ||
1044 | BUG_ON(dev_priv->info->gen >= 5); | ||
1045 | |||
1046 | /* PLL is protected by panel, make sure we can write it */ | ||
1047 | if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev)) | ||
1048 | assert_panel_unlocked(dev_priv, pipe); | ||
1049 | |||
1050 | reg = DPLL(pipe); | ||
1051 | val = I915_READ(reg); | ||
1052 | val |= DPLL_VCO_ENABLE; | ||
1053 | |||
1054 | /* We do this three times for luck */ | ||
1055 | I915_WRITE(reg, val); | ||
1056 | POSTING_READ(reg); | ||
1057 | udelay(150); /* wait for warmup */ | ||
1058 | I915_WRITE(reg, val); | ||
1059 | POSTING_READ(reg); | ||
1060 | udelay(150); /* wait for warmup */ | ||
1061 | I915_WRITE(reg, val); | ||
1062 | POSTING_READ(reg); | ||
1063 | udelay(150); /* wait for warmup */ | ||
1064 | } | ||
1065 | |||
1066 | /** | ||
1067 | * intel_disable_pll - disable a PLL | ||
1068 | * @dev_priv: i915 private structure | ||
1069 | * @pipe: pipe PLL to disable | ||
1070 | * | ||
1071 | * Disable the PLL for @pipe, making sure the pipe is off first. | ||
1072 | * | ||
1073 | * Note! This is for pre-ILK only. | ||
1074 | */ | ||
1075 | static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) | ||
1076 | { | ||
1077 | int reg; | ||
1078 | u32 val; | ||
1079 | |||
1080 | /* Don't disable pipe A or pipe A PLLs if needed */ | ||
1081 | if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) | ||
1082 | return; | ||
1083 | |||
1084 | /* Make sure the pipe isn't still relying on us */ | ||
1085 | assert_pipe_disabled(dev_priv, pipe); | ||
1086 | |||
1087 | reg = DPLL(pipe); | ||
1088 | val = I915_READ(reg); | ||
1089 | val &= ~DPLL_VCO_ENABLE; | ||
1090 | I915_WRITE(reg, val); | ||
1091 | POSTING_READ(reg); | ||
1092 | } | ||
1093 | |||
1094 | /** | ||
1095 | * intel_enable_pch_pll - enable PCH PLL | ||
1096 | * @dev_priv: i915 private structure | ||
1097 | * @pipe: pipe PLL to enable | ||
1098 | * | ||
1099 | * The PCH PLL needs to be enabled before the PCH transcoder, since it | ||
1100 | * drives the transcoder clock. | ||
1101 | */ | ||
1102 | static void intel_enable_pch_pll(struct drm_i915_private *dev_priv, | ||
1103 | enum pipe pipe) | ||
1104 | { | ||
1105 | int reg; | ||
1106 | u32 val; | ||
1107 | |||
1108 | /* PCH only available on ILK+ */ | ||
1109 | BUG_ON(dev_priv->info->gen < 5); | ||
1110 | |||
1111 | /* PCH refclock must be enabled first */ | ||
1112 | assert_pch_refclk_enabled(dev_priv); | ||
1113 | |||
1114 | reg = PCH_DPLL(pipe); | ||
1115 | val = I915_READ(reg); | ||
1116 | val |= DPLL_VCO_ENABLE; | ||
1117 | I915_WRITE(reg, val); | ||
1118 | POSTING_READ(reg); | ||
1119 | udelay(200); | ||
1120 | } | ||
1121 | |||
1122 | static void intel_disable_pch_pll(struct drm_i915_private *dev_priv, | ||
1123 | enum pipe pipe) | ||
1124 | { | ||
1125 | int reg; | ||
1126 | u32 val; | ||
1127 | |||
1128 | /* PCH only available on ILK+ */ | ||
1129 | BUG_ON(dev_priv->info->gen < 5); | ||
1130 | |||
1131 | /* Make sure transcoder isn't still depending on us */ | ||
1132 | assert_transcoder_disabled(dev_priv, pipe); | ||
1133 | |||
1134 | reg = PCH_DPLL(pipe); | ||
1135 | val = I915_READ(reg); | ||
1136 | val &= ~DPLL_VCO_ENABLE; | ||
1137 | I915_WRITE(reg, val); | ||
1138 | POSTING_READ(reg); | ||
1139 | udelay(200); | ||
1140 | } | ||
1141 | |||
1142 | static void intel_enable_transcoder(struct drm_i915_private *dev_priv, | ||
1143 | enum pipe pipe) | ||
1144 | { | ||
1145 | int reg; | ||
1146 | u32 val; | ||
1147 | |||
1148 | /* PCH only available on ILK+ */ | ||
1149 | BUG_ON(dev_priv->info->gen < 5); | ||
1150 | |||
1151 | /* Make sure PCH DPLL is enabled */ | ||
1152 | assert_pch_pll_enabled(dev_priv, pipe); | ||
1153 | |||
1154 | /* FDI must be feeding us bits for PCH ports */ | ||
1155 | assert_fdi_tx_enabled(dev_priv, pipe); | ||
1156 | assert_fdi_rx_enabled(dev_priv, pipe); | ||
1157 | |||
1158 | reg = TRANSCONF(pipe); | ||
1159 | val = I915_READ(reg); | ||
1160 | /* | ||
1161 | * make the BPC in transcoder be consistent with | ||
1162 | * that in pipeconf reg. | ||
1163 | */ | ||
1164 | val &= ~PIPE_BPC_MASK; | ||
1165 | val |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK; | ||
1166 | I915_WRITE(reg, val | TRANS_ENABLE); | ||
1167 | if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) | ||
1168 | DRM_ERROR("failed to enable transcoder %d\n", pipe); | ||
1169 | } | ||
1170 | |||
1171 | static void intel_disable_transcoder(struct drm_i915_private *dev_priv, | ||
1172 | enum pipe pipe) | ||
1173 | { | ||
1174 | int reg; | ||
1175 | u32 val; | ||
1176 | |||
1177 | /* FDI relies on the transcoder */ | ||
1178 | assert_fdi_tx_disabled(dev_priv, pipe); | ||
1179 | assert_fdi_rx_disabled(dev_priv, pipe); | ||
1180 | |||
1181 | /* Ports must be off as well */ | ||
1182 | assert_pch_ports_disabled(dev_priv, pipe); | ||
1183 | |||
1184 | reg = TRANSCONF(pipe); | ||
1185 | val = I915_READ(reg); | ||
1186 | val &= ~TRANS_ENABLE; | ||
1187 | I915_WRITE(reg, val); | ||
1188 | /* wait for PCH transcoder off, transcoder state */ | ||
1189 | if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50)) | ||
1190 | DRM_ERROR("failed to disable transcoder\n"); | ||
1191 | } | ||
1192 | |||
1193 | /** | ||
1194 | * intel_enable_pipe - enable a pipe, asserting requirements | ||
1195 | * @dev_priv: i915 private structure | ||
1196 | * @pipe: pipe to enable | ||
1197 | * @pch_port: on ILK+, is this pipe driving a PCH port or not | ||
1198 | * | ||
1199 | * Enable @pipe, making sure that various hardware specific requirements | ||
1200 | * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc. | ||
1201 | * | ||
1202 | * @pipe should be %PIPE_A or %PIPE_B. | ||
1203 | * | ||
1204 | * Will wait until the pipe is actually running (i.e. first vblank) before | ||
1205 | * returning. | ||
1206 | */ | ||
1207 | static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, | ||
1208 | bool pch_port) | ||
1209 | { | ||
1210 | int reg; | ||
1211 | u32 val; | ||
1212 | |||
1213 | /* | ||
1214 | * A pipe without a PLL won't actually be able to drive bits from | ||
1215 | * a plane. On ILK+ the pipe PLLs are integrated, so we don't | ||
1216 | * need the check. | ||
1217 | */ | ||
1218 | if (!HAS_PCH_SPLIT(dev_priv->dev)) | ||
1219 | assert_pll_enabled(dev_priv, pipe); | ||
1220 | else { | ||
1221 | if (pch_port) { | ||
1222 | /* if driving the PCH, we need FDI enabled */ | ||
1223 | assert_fdi_rx_pll_enabled(dev_priv, pipe); | ||
1224 | assert_fdi_tx_pll_enabled(dev_priv, pipe); | ||
1225 | } | ||
1226 | /* FIXME: assert CPU port conditions for SNB+ */ | ||
1227 | } | ||
1228 | |||
1229 | reg = PIPECONF(pipe); | ||
1230 | val = I915_READ(reg); | ||
1231 | if (val & PIPECONF_ENABLE) | ||
1232 | return; | ||
1233 | |||
1234 | I915_WRITE(reg, val | PIPECONF_ENABLE); | ||
1235 | intel_wait_for_vblank(dev_priv->dev, pipe); | ||
1236 | } | ||
1237 | |||
1238 | /** | ||
1239 | * intel_disable_pipe - disable a pipe, asserting requirements | ||
1240 | * @dev_priv: i915 private structure | ||
1241 | * @pipe: pipe to disable | ||
1242 | * | ||
1243 | * Disable @pipe, making sure that various hardware specific requirements | ||
1244 | * are met, if applicable, e.g. plane disabled, panel fitter off, etc. | ||
1245 | * | ||
1246 | * @pipe should be %PIPE_A or %PIPE_B. | ||
1247 | * | ||
1248 | * Will wait until the pipe has shut down before returning. | ||
1249 | */ | ||
1250 | static void intel_disable_pipe(struct drm_i915_private *dev_priv, | ||
1251 | enum pipe pipe) | ||
1252 | { | ||
1253 | int reg; | ||
1254 | u32 val; | ||
1255 | |||
1256 | /* | ||
1257 | * Make sure planes won't keep trying to pump pixels to us, | ||
1258 | * or we might hang the display. | ||
1259 | */ | ||
1260 | assert_planes_disabled(dev_priv, pipe); | ||
1261 | |||
1262 | /* Don't disable pipe A or pipe A PLLs if needed */ | ||
1263 | if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) | ||
1264 | return; | ||
1265 | |||
1266 | reg = PIPECONF(pipe); | ||
1267 | val = I915_READ(reg); | ||
1268 | if ((val & PIPECONF_ENABLE) == 0) | ||
1269 | return; | ||
1270 | |||
1271 | I915_WRITE(reg, val & ~PIPECONF_ENABLE); | ||
1272 | intel_wait_for_pipe_off(dev_priv->dev, pipe); | ||
1273 | } | ||
1274 | |||
1275 | /** | ||
1276 | * intel_enable_plane - enable a display plane on a given pipe | ||
1277 | * @dev_priv: i915 private structure | ||
1278 | * @plane: plane to enable | ||
1279 | * @pipe: pipe being fed | ||
1280 | * | ||
1281 | * Enable @plane on @pipe, making sure that @pipe is running first. | ||
1282 | */ | ||
1283 | static void intel_enable_plane(struct drm_i915_private *dev_priv, | ||
1284 | enum plane plane, enum pipe pipe) | ||
1285 | { | ||
1286 | int reg; | ||
1287 | u32 val; | ||
1288 | |||
1289 | /* If the pipe isn't enabled, we can't pump pixels and may hang */ | ||
1290 | assert_pipe_enabled(dev_priv, pipe); | ||
1291 | |||
1292 | reg = DSPCNTR(plane); | ||
1293 | val = I915_READ(reg); | ||
1294 | if (val & DISPLAY_PLANE_ENABLE) | ||
1295 | return; | ||
1296 | |||
1297 | I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE); | ||
1298 | intel_wait_for_vblank(dev_priv->dev, pipe); | ||
1299 | } | ||
1300 | |||
1301 | /* | ||
1302 | * Plane regs are double buffered, going from enabled->disabled needs a | ||
1303 | * trigger in order to latch. The display address reg provides this. | ||
1304 | */ | ||
1305 | static void intel_flush_display_plane(struct drm_i915_private *dev_priv, | ||
1306 | enum plane plane) | ||
1307 | { | ||
1308 | u32 reg = DSPADDR(plane); | ||
1309 | I915_WRITE(reg, I915_READ(reg)); | ||
1310 | } | ||
1311 | |||
1312 | /** | ||
1313 | * intel_disable_plane - disable a display plane | ||
1314 | * @dev_priv: i915 private structure | ||
1315 | * @plane: plane to disable | ||
1316 | * @pipe: pipe consuming the data | ||
1317 | * | ||
1318 | * Disable @plane; should be an independent operation. | ||
1319 | */ | ||
1320 | static void intel_disable_plane(struct drm_i915_private *dev_priv, | ||
1321 | enum plane plane, enum pipe pipe) | ||
1322 | { | ||
1323 | int reg; | ||
1324 | u32 val; | ||
1325 | |||
1326 | reg = DSPCNTR(plane); | ||
1327 | val = I915_READ(reg); | ||
1328 | if ((val & DISPLAY_PLANE_ENABLE) == 0) | ||
1329 | return; | ||
1330 | |||
1331 | I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE); | ||
1332 | intel_flush_display_plane(dev_priv, plane); | ||
1333 | intel_wait_for_vblank(dev_priv->dev, pipe); | ||
1334 | } | ||
1335 | |||
1336 | static void disable_pch_dp(struct drm_i915_private *dev_priv, | ||
1337 | enum pipe pipe, int reg) | ||
1338 | { | ||
1339 | u32 val = I915_READ(reg); | ||
1340 | if (DP_PIPE_ENABLED(val, pipe)) | ||
1341 | I915_WRITE(reg, val & ~DP_PORT_EN); | ||
1342 | } | ||
1343 | |||
1344 | static void disable_pch_hdmi(struct drm_i915_private *dev_priv, | ||
1345 | enum pipe pipe, int reg) | ||
1346 | { | ||
1347 | u32 val = I915_READ(reg); | ||
1348 | if (HDMI_PIPE_ENABLED(val, pipe)) | ||
1349 | I915_WRITE(reg, val & ~PORT_ENABLE); | ||
1350 | } | ||
1351 | |||
1352 | /* Disable any ports connected to this transcoder */ | ||
1353 | static void intel_disable_pch_ports(struct drm_i915_private *dev_priv, | ||
1354 | enum pipe pipe) | ||
1355 | { | ||
1356 | u32 reg, val; | ||
1357 | |||
1358 | val = I915_READ(PCH_PP_CONTROL); | ||
1359 | I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS); | ||
1360 | |||
1361 | disable_pch_dp(dev_priv, pipe, PCH_DP_B); | ||
1362 | disable_pch_dp(dev_priv, pipe, PCH_DP_C); | ||
1363 | disable_pch_dp(dev_priv, pipe, PCH_DP_D); | ||
1364 | |||
1365 | reg = PCH_ADPA; | ||
1366 | val = I915_READ(reg); | ||
1367 | if (ADPA_PIPE_ENABLED(val, pipe)) | ||
1368 | I915_WRITE(reg, val & ~ADPA_DAC_ENABLE); | ||
1369 | |||
1370 | reg = PCH_LVDS; | ||
1371 | val = I915_READ(reg); | ||
1372 | if (LVDS_PIPE_ENABLED(val, pipe)) { | ||
1373 | I915_WRITE(reg, val & ~LVDS_PORT_EN); | ||
1374 | POSTING_READ(reg); | ||
1375 | udelay(100); | ||
1376 | } | ||
1377 | |||
1378 | disable_pch_hdmi(dev_priv, pipe, HDMIB); | ||
1379 | disable_pch_hdmi(dev_priv, pipe, HDMIC); | ||
1380 | disable_pch_hdmi(dev_priv, pipe, HDMID); | ||
1381 | } | ||
1382 | |||
1061 | static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | 1383 | static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) |
1062 | { | 1384 | { |
1063 | struct drm_device *dev = crtc->dev; | 1385 | struct drm_device *dev = crtc->dev; |
1064 | struct drm_i915_private *dev_priv = dev->dev_private; | 1386 | struct drm_i915_private *dev_priv = dev->dev_private; |
1065 | struct drm_framebuffer *fb = crtc->fb; | 1387 | struct drm_framebuffer *fb = crtc->fb; |
1066 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | 1388 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); |
1067 | struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); | 1389 | struct drm_i915_gem_object *obj = intel_fb->obj; |
1068 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1390 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1069 | int plane, i; | 1391 | int plane, i; |
1070 | u32 fbc_ctl, fbc_ctl2; | 1392 | u32 fbc_ctl, fbc_ctl2; |
1071 | 1393 | ||
1394 | if (fb->pitch == dev_priv->cfb_pitch && | ||
1395 | obj->fence_reg == dev_priv->cfb_fence && | ||
1396 | intel_crtc->plane == dev_priv->cfb_plane && | ||
1397 | I915_READ(FBC_CONTROL) & FBC_CTL_EN) | ||
1398 | return; | ||
1399 | |||
1400 | i8xx_disable_fbc(dev); | ||
1401 | |||
1072 | dev_priv->cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE; | 1402 | dev_priv->cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE; |
1073 | 1403 | ||
1074 | if (fb->pitch < dev_priv->cfb_pitch) | 1404 | if (fb->pitch < dev_priv->cfb_pitch) |
@@ -1076,7 +1406,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1076 | 1406 | ||
1077 | /* FBC_CTL wants 64B units */ | 1407 | /* FBC_CTL wants 64B units */ |
1078 | dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; | 1408 | dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; |
1079 | dev_priv->cfb_fence = obj_priv->fence_reg; | 1409 | dev_priv->cfb_fence = obj->fence_reg; |
1080 | dev_priv->cfb_plane = intel_crtc->plane; | 1410 | dev_priv->cfb_plane = intel_crtc->plane; |
1081 | plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB; | 1411 | plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB; |
1082 | 1412 | ||
@@ -1086,7 +1416,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1086 | 1416 | ||
1087 | /* Set it up... */ | 1417 | /* Set it up... */ |
1088 | fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane; | 1418 | fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane; |
1089 | if (obj_priv->tiling_mode != I915_TILING_NONE) | 1419 | if (obj->tiling_mode != I915_TILING_NONE) |
1090 | fbc_ctl2 |= FBC_CTL_CPU_FENCE; | 1420 | fbc_ctl2 |= FBC_CTL_CPU_FENCE; |
1091 | I915_WRITE(FBC_CONTROL2, fbc_ctl2); | 1421 | I915_WRITE(FBC_CONTROL2, fbc_ctl2); |
1092 | I915_WRITE(FBC_FENCE_OFF, crtc->y); | 1422 | I915_WRITE(FBC_FENCE_OFF, crtc->y); |
@@ -1097,12 +1427,12 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1097 | fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ | 1427 | fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ |
1098 | fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; | 1428 | fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; |
1099 | fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; | 1429 | fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; |
1100 | if (obj_priv->tiling_mode != I915_TILING_NONE) | 1430 | if (obj->tiling_mode != I915_TILING_NONE) |
1101 | fbc_ctl |= dev_priv->cfb_fence; | 1431 | fbc_ctl |= dev_priv->cfb_fence; |
1102 | I915_WRITE(FBC_CONTROL, fbc_ctl); | 1432 | I915_WRITE(FBC_CONTROL, fbc_ctl); |
1103 | 1433 | ||
1104 | DRM_DEBUG_KMS("enabled FBC, pitch %ld, yoff %d, plane %d, ", | 1434 | DRM_DEBUG_KMS("enabled FBC, pitch %ld, yoff %d, plane %d, ", |
1105 | dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane); | 1435 | dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane); |
1106 | } | 1436 | } |
1107 | 1437 | ||
1108 | void i8xx_disable_fbc(struct drm_device *dev) | 1438 | void i8xx_disable_fbc(struct drm_device *dev) |
@@ -1110,19 +1440,16 @@ void i8xx_disable_fbc(struct drm_device *dev) | |||
1110 | struct drm_i915_private *dev_priv = dev->dev_private; | 1440 | struct drm_i915_private *dev_priv = dev->dev_private; |
1111 | u32 fbc_ctl; | 1441 | u32 fbc_ctl; |
1112 | 1442 | ||
1113 | if (!I915_HAS_FBC(dev)) | ||
1114 | return; | ||
1115 | |||
1116 | if (!(I915_READ(FBC_CONTROL) & FBC_CTL_EN)) | ||
1117 | return; /* Already off, just return */ | ||
1118 | |||
1119 | /* Disable compression */ | 1443 | /* Disable compression */ |
1120 | fbc_ctl = I915_READ(FBC_CONTROL); | 1444 | fbc_ctl = I915_READ(FBC_CONTROL); |
1445 | if ((fbc_ctl & FBC_CTL_EN) == 0) | ||
1446 | return; | ||
1447 | |||
1121 | fbc_ctl &= ~FBC_CTL_EN; | 1448 | fbc_ctl &= ~FBC_CTL_EN; |
1122 | I915_WRITE(FBC_CONTROL, fbc_ctl); | 1449 | I915_WRITE(FBC_CONTROL, fbc_ctl); |
1123 | 1450 | ||
1124 | /* Wait for compressing bit to clear */ | 1451 | /* Wait for compressing bit to clear */ |
1125 | if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10, 0)) { | 1452 | if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) { |
1126 | DRM_DEBUG_KMS("FBC idle timed out\n"); | 1453 | DRM_DEBUG_KMS("FBC idle timed out\n"); |
1127 | return; | 1454 | return; |
1128 | } | 1455 | } |
@@ -1143,26 +1470,37 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1143 | struct drm_i915_private *dev_priv = dev->dev_private; | 1470 | struct drm_i915_private *dev_priv = dev->dev_private; |
1144 | struct drm_framebuffer *fb = crtc->fb; | 1471 | struct drm_framebuffer *fb = crtc->fb; |
1145 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | 1472 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); |
1146 | struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); | 1473 | struct drm_i915_gem_object *obj = intel_fb->obj; |
1147 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1474 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1148 | int plane = (intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : | 1475 | int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; |
1149 | DPFC_CTL_PLANEB); | ||
1150 | unsigned long stall_watermark = 200; | 1476 | unsigned long stall_watermark = 200; |
1151 | u32 dpfc_ctl; | 1477 | u32 dpfc_ctl; |
1152 | 1478 | ||
1479 | dpfc_ctl = I915_READ(DPFC_CONTROL); | ||
1480 | if (dpfc_ctl & DPFC_CTL_EN) { | ||
1481 | if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 && | ||
1482 | dev_priv->cfb_fence == obj->fence_reg && | ||
1483 | dev_priv->cfb_plane == intel_crtc->plane && | ||
1484 | dev_priv->cfb_y == crtc->y) | ||
1485 | return; | ||
1486 | |||
1487 | I915_WRITE(DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN); | ||
1488 | intel_wait_for_vblank(dev, intel_crtc->pipe); | ||
1489 | } | ||
1490 | |||
1153 | dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; | 1491 | dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; |
1154 | dev_priv->cfb_fence = obj_priv->fence_reg; | 1492 | dev_priv->cfb_fence = obj->fence_reg; |
1155 | dev_priv->cfb_plane = intel_crtc->plane; | 1493 | dev_priv->cfb_plane = intel_crtc->plane; |
1494 | dev_priv->cfb_y = crtc->y; | ||
1156 | 1495 | ||
1157 | dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; | 1496 | dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; |
1158 | if (obj_priv->tiling_mode != I915_TILING_NONE) { | 1497 | if (obj->tiling_mode != I915_TILING_NONE) { |
1159 | dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence; | 1498 | dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence; |
1160 | I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY); | 1499 | I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY); |
1161 | } else { | 1500 | } else { |
1162 | I915_WRITE(DPFC_CHICKEN, ~DPFC_HT_MODIFY); | 1501 | I915_WRITE(DPFC_CHICKEN, ~DPFC_HT_MODIFY); |
1163 | } | 1502 | } |
1164 | 1503 | ||
1165 | I915_WRITE(DPFC_CONTROL, dpfc_ctl); | ||
1166 | I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | | 1504 | I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | |
1167 | (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | | 1505 | (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | |
1168 | (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); | 1506 | (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); |
@@ -1181,10 +1519,12 @@ void g4x_disable_fbc(struct drm_device *dev) | |||
1181 | 1519 | ||
1182 | /* Disable compression */ | 1520 | /* Disable compression */ |
1183 | dpfc_ctl = I915_READ(DPFC_CONTROL); | 1521 | dpfc_ctl = I915_READ(DPFC_CONTROL); |
1184 | dpfc_ctl &= ~DPFC_CTL_EN; | 1522 | if (dpfc_ctl & DPFC_CTL_EN) { |
1185 | I915_WRITE(DPFC_CONTROL, dpfc_ctl); | 1523 | dpfc_ctl &= ~DPFC_CTL_EN; |
1524 | I915_WRITE(DPFC_CONTROL, dpfc_ctl); | ||
1186 | 1525 | ||
1187 | DRM_DEBUG_KMS("disabled FBC\n"); | 1526 | DRM_DEBUG_KMS("disabled FBC\n"); |
1527 | } | ||
1188 | } | 1528 | } |
1189 | 1529 | ||
1190 | static bool g4x_fbc_enabled(struct drm_device *dev) | 1530 | static bool g4x_fbc_enabled(struct drm_device *dev) |
@@ -1194,42 +1534,80 @@ static bool g4x_fbc_enabled(struct drm_device *dev) | |||
1194 | return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; | 1534 | return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; |
1195 | } | 1535 | } |
1196 | 1536 | ||
1537 | static void sandybridge_blit_fbc_update(struct drm_device *dev) | ||
1538 | { | ||
1539 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1540 | u32 blt_ecoskpd; | ||
1541 | |||
1542 | /* Make sure blitter notifies FBC of writes */ | ||
1543 | gen6_gt_force_wake_get(dev_priv); | ||
1544 | blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD); | ||
1545 | blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY << | ||
1546 | GEN6_BLITTER_LOCK_SHIFT; | ||
1547 | I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); | ||
1548 | blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY; | ||
1549 | I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); | ||
1550 | blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY << | ||
1551 | GEN6_BLITTER_LOCK_SHIFT); | ||
1552 | I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); | ||
1553 | POSTING_READ(GEN6_BLITTER_ECOSKPD); | ||
1554 | gen6_gt_force_wake_put(dev_priv); | ||
1555 | } | ||
1556 | |||
1197 | static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | 1557 | static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) |
1198 | { | 1558 | { |
1199 | struct drm_device *dev = crtc->dev; | 1559 | struct drm_device *dev = crtc->dev; |
1200 | struct drm_i915_private *dev_priv = dev->dev_private; | 1560 | struct drm_i915_private *dev_priv = dev->dev_private; |
1201 | struct drm_framebuffer *fb = crtc->fb; | 1561 | struct drm_framebuffer *fb = crtc->fb; |
1202 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | 1562 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); |
1203 | struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); | 1563 | struct drm_i915_gem_object *obj = intel_fb->obj; |
1204 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1564 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1205 | int plane = (intel_crtc->plane == 0) ? DPFC_CTL_PLANEA : | 1565 | int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; |
1206 | DPFC_CTL_PLANEB; | ||
1207 | unsigned long stall_watermark = 200; | 1566 | unsigned long stall_watermark = 200; |
1208 | u32 dpfc_ctl; | 1567 | u32 dpfc_ctl; |
1209 | 1568 | ||
1569 | dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); | ||
1570 | if (dpfc_ctl & DPFC_CTL_EN) { | ||
1571 | if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 && | ||
1572 | dev_priv->cfb_fence == obj->fence_reg && | ||
1573 | dev_priv->cfb_plane == intel_crtc->plane && | ||
1574 | dev_priv->cfb_offset == obj->gtt_offset && | ||
1575 | dev_priv->cfb_y == crtc->y) | ||
1576 | return; | ||
1577 | |||
1578 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN); | ||
1579 | intel_wait_for_vblank(dev, intel_crtc->pipe); | ||
1580 | } | ||
1581 | |||
1210 | dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; | 1582 | dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; |
1211 | dev_priv->cfb_fence = obj_priv->fence_reg; | 1583 | dev_priv->cfb_fence = obj->fence_reg; |
1212 | dev_priv->cfb_plane = intel_crtc->plane; | 1584 | dev_priv->cfb_plane = intel_crtc->plane; |
1585 | dev_priv->cfb_offset = obj->gtt_offset; | ||
1586 | dev_priv->cfb_y = crtc->y; | ||
1213 | 1587 | ||
1214 | dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); | ||
1215 | dpfc_ctl &= DPFC_RESERVED; | 1588 | dpfc_ctl &= DPFC_RESERVED; |
1216 | dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X); | 1589 | dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X); |
1217 | if (obj_priv->tiling_mode != I915_TILING_NONE) { | 1590 | if (obj->tiling_mode != I915_TILING_NONE) { |
1218 | dpfc_ctl |= (DPFC_CTL_FENCE_EN | dev_priv->cfb_fence); | 1591 | dpfc_ctl |= (DPFC_CTL_FENCE_EN | dev_priv->cfb_fence); |
1219 | I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY); | 1592 | I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY); |
1220 | } else { | 1593 | } else { |
1221 | I915_WRITE(ILK_DPFC_CHICKEN, ~DPFC_HT_MODIFY); | 1594 | I915_WRITE(ILK_DPFC_CHICKEN, ~DPFC_HT_MODIFY); |
1222 | } | 1595 | } |
1223 | 1596 | ||
1224 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); | ||
1225 | I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | | 1597 | I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | |
1226 | (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | | 1598 | (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | |
1227 | (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); | 1599 | (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); |
1228 | I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); | 1600 | I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); |
1229 | I915_WRITE(ILK_FBC_RT_BASE, obj_priv->gtt_offset | ILK_FBC_RT_VALID); | 1601 | I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID); |
1230 | /* enable it... */ | 1602 | /* enable it... */ |
1231 | I915_WRITE(ILK_DPFC_CONTROL, I915_READ(ILK_DPFC_CONTROL) | | 1603 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); |
1232 | DPFC_CTL_EN); | 1604 | |
1605 | if (IS_GEN6(dev)) { | ||
1606 | I915_WRITE(SNB_DPFC_CTL_SA, | ||
1607 | SNB_CPU_FENCE_ENABLE | dev_priv->cfb_fence); | ||
1608 | I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y); | ||
1609 | sandybridge_blit_fbc_update(dev); | ||
1610 | } | ||
1233 | 1611 | ||
1234 | DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); | 1612 | DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); |
1235 | } | 1613 | } |
@@ -1241,10 +1619,12 @@ void ironlake_disable_fbc(struct drm_device *dev) | |||
1241 | 1619 | ||
1242 | /* Disable compression */ | 1620 | /* Disable compression */ |
1243 | dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); | 1621 | dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); |
1244 | dpfc_ctl &= ~DPFC_CTL_EN; | 1622 | if (dpfc_ctl & DPFC_CTL_EN) { |
1245 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); | 1623 | dpfc_ctl &= ~DPFC_CTL_EN; |
1624 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); | ||
1246 | 1625 | ||
1247 | DRM_DEBUG_KMS("disabled FBC\n"); | 1626 | DRM_DEBUG_KMS("disabled FBC\n"); |
1627 | } | ||
1248 | } | 1628 | } |
1249 | 1629 | ||
1250 | static bool ironlake_fbc_enabled(struct drm_device *dev) | 1630 | static bool ironlake_fbc_enabled(struct drm_device *dev) |
@@ -1286,8 +1666,7 @@ void intel_disable_fbc(struct drm_device *dev) | |||
1286 | 1666 | ||
1287 | /** | 1667 | /** |
1288 | * intel_update_fbc - enable/disable FBC as needed | 1668 | * intel_update_fbc - enable/disable FBC as needed |
1289 | * @crtc: CRTC to point the compressor at | 1669 | * @dev: the drm_device |
1290 | * @mode: mode in use | ||
1291 | * | 1670 | * |
1292 | * Set up the framebuffer compression hardware at mode set time. We | 1671 | * Set up the framebuffer compression hardware at mode set time. We |
1293 | * enable it if possible: | 1672 | * enable it if possible: |
@@ -1304,18 +1683,14 @@ void intel_disable_fbc(struct drm_device *dev) | |||
1304 | * | 1683 | * |
1305 | * We need to enable/disable FBC on a global basis. | 1684 | * We need to enable/disable FBC on a global basis. |
1306 | */ | 1685 | */ |
1307 | static void intel_update_fbc(struct drm_crtc *crtc, | 1686 | static void intel_update_fbc(struct drm_device *dev) |
1308 | struct drm_display_mode *mode) | ||
1309 | { | 1687 | { |
1310 | struct drm_device *dev = crtc->dev; | ||
1311 | struct drm_i915_private *dev_priv = dev->dev_private; | 1688 | struct drm_i915_private *dev_priv = dev->dev_private; |
1312 | struct drm_framebuffer *fb = crtc->fb; | 1689 | struct drm_crtc *crtc = NULL, *tmp_crtc; |
1690 | struct intel_crtc *intel_crtc; | ||
1691 | struct drm_framebuffer *fb; | ||
1313 | struct intel_framebuffer *intel_fb; | 1692 | struct intel_framebuffer *intel_fb; |
1314 | struct drm_i915_gem_object *obj_priv; | 1693 | struct drm_i915_gem_object *obj; |
1315 | struct drm_crtc *tmp_crtc; | ||
1316 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
1317 | int plane = intel_crtc->plane; | ||
1318 | int crtcs_enabled = 0; | ||
1319 | 1694 | ||
1320 | DRM_DEBUG_KMS("\n"); | 1695 | DRM_DEBUG_KMS("\n"); |
1321 | 1696 | ||
@@ -1325,12 +1700,6 @@ static void intel_update_fbc(struct drm_crtc *crtc, | |||
1325 | if (!I915_HAS_FBC(dev)) | 1700 | if (!I915_HAS_FBC(dev)) |
1326 | return; | 1701 | return; |
1327 | 1702 | ||
1328 | if (!crtc->fb) | ||
1329 | return; | ||
1330 | |||
1331 | intel_fb = to_intel_framebuffer(fb); | ||
1332 | obj_priv = to_intel_bo(intel_fb->obj); | ||
1333 | |||
1334 | /* | 1703 | /* |
1335 | * If FBC is already on, we just have to verify that we can | 1704 | * If FBC is already on, we just have to verify that we can |
1336 | * keep it that way... | 1705 | * keep it that way... |
@@ -1341,40 +1710,57 @@ static void intel_update_fbc(struct drm_crtc *crtc, | |||
1341 | * - going to an unsupported config (interlace, pixel multiply, etc.) | 1710 | * - going to an unsupported config (interlace, pixel multiply, etc.) |
1342 | */ | 1711 | */ |
1343 | list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) { | 1712 | list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) { |
1344 | if (tmp_crtc->enabled) | 1713 | if (tmp_crtc->enabled && tmp_crtc->fb) { |
1345 | crtcs_enabled++; | 1714 | if (crtc) { |
1715 | DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); | ||
1716 | dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES; | ||
1717 | goto out_disable; | ||
1718 | } | ||
1719 | crtc = tmp_crtc; | ||
1720 | } | ||
1346 | } | 1721 | } |
1347 | DRM_DEBUG_KMS("%d pipes active\n", crtcs_enabled); | 1722 | |
1348 | if (crtcs_enabled > 1) { | 1723 | if (!crtc || crtc->fb == NULL) { |
1349 | DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); | 1724 | DRM_DEBUG_KMS("no output, disabling\n"); |
1350 | dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES; | 1725 | dev_priv->no_fbc_reason = FBC_NO_OUTPUT; |
1351 | goto out_disable; | 1726 | goto out_disable; |
1352 | } | 1727 | } |
1353 | if (intel_fb->obj->size > dev_priv->cfb_size) { | 1728 | |
1729 | intel_crtc = to_intel_crtc(crtc); | ||
1730 | fb = crtc->fb; | ||
1731 | intel_fb = to_intel_framebuffer(fb); | ||
1732 | obj = intel_fb->obj; | ||
1733 | |||
1734 | if (!i915_enable_fbc) { | ||
1735 | DRM_DEBUG_KMS("fbc disabled per module param (default off)\n"); | ||
1736 | dev_priv->no_fbc_reason = FBC_MODULE_PARAM; | ||
1737 | goto out_disable; | ||
1738 | } | ||
1739 | if (intel_fb->obj->base.size > dev_priv->cfb_size) { | ||
1354 | DRM_DEBUG_KMS("framebuffer too large, disabling " | 1740 | DRM_DEBUG_KMS("framebuffer too large, disabling " |
1355 | "compression\n"); | 1741 | "compression\n"); |
1356 | dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; | 1742 | dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; |
1357 | goto out_disable; | 1743 | goto out_disable; |
1358 | } | 1744 | } |
1359 | if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || | 1745 | if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) || |
1360 | (mode->flags & DRM_MODE_FLAG_DBLSCAN)) { | 1746 | (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) { |
1361 | DRM_DEBUG_KMS("mode incompatible with compression, " | 1747 | DRM_DEBUG_KMS("mode incompatible with compression, " |
1362 | "disabling\n"); | 1748 | "disabling\n"); |
1363 | dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE; | 1749 | dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE; |
1364 | goto out_disable; | 1750 | goto out_disable; |
1365 | } | 1751 | } |
1366 | if ((mode->hdisplay > 2048) || | 1752 | if ((crtc->mode.hdisplay > 2048) || |
1367 | (mode->vdisplay > 1536)) { | 1753 | (crtc->mode.vdisplay > 1536)) { |
1368 | DRM_DEBUG_KMS("mode too large for compression, disabling\n"); | 1754 | DRM_DEBUG_KMS("mode too large for compression, disabling\n"); |
1369 | dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE; | 1755 | dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE; |
1370 | goto out_disable; | 1756 | goto out_disable; |
1371 | } | 1757 | } |
1372 | if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) { | 1758 | if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) { |
1373 | DRM_DEBUG_KMS("plane not 0, disabling compression\n"); | 1759 | DRM_DEBUG_KMS("plane not 0, disabling compression\n"); |
1374 | dev_priv->no_fbc_reason = FBC_BAD_PLANE; | 1760 | dev_priv->no_fbc_reason = FBC_BAD_PLANE; |
1375 | goto out_disable; | 1761 | goto out_disable; |
1376 | } | 1762 | } |
1377 | if (obj_priv->tiling_mode != I915_TILING_X) { | 1763 | if (obj->tiling_mode != I915_TILING_X) { |
1378 | DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n"); | 1764 | DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n"); |
1379 | dev_priv->no_fbc_reason = FBC_NOT_TILED; | 1765 | dev_priv->no_fbc_reason = FBC_NOT_TILED; |
1380 | goto out_disable; | 1766 | goto out_disable; |
@@ -1384,18 +1770,7 @@ static void intel_update_fbc(struct drm_crtc *crtc, | |||
1384 | if (in_dbg_master()) | 1770 | if (in_dbg_master()) |
1385 | goto out_disable; | 1771 | goto out_disable; |
1386 | 1772 | ||
1387 | if (intel_fbc_enabled(dev)) { | 1773 | intel_enable_fbc(crtc, 500); |
1388 | /* We can re-enable it in this case, but need to update pitch */ | ||
1389 | if ((fb->pitch > dev_priv->cfb_pitch) || | ||
1390 | (obj_priv->fence_reg != dev_priv->cfb_fence) || | ||
1391 | (plane != dev_priv->cfb_plane)) | ||
1392 | intel_disable_fbc(dev); | ||
1393 | } | ||
1394 | |||
1395 | /* Now try to turn it back on if possible */ | ||
1396 | if (!intel_fbc_enabled(dev)) | ||
1397 | intel_enable_fbc(crtc, 500); | ||
1398 | |||
1399 | return; | 1774 | return; |
1400 | 1775 | ||
1401 | out_disable: | 1776 | out_disable: |
@@ -1407,17 +1782,19 @@ out_disable: | |||
1407 | } | 1782 | } |
1408 | 1783 | ||
1409 | int | 1784 | int |
1410 | intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) | 1785 | intel_pin_and_fence_fb_obj(struct drm_device *dev, |
1786 | struct drm_i915_gem_object *obj, | ||
1787 | struct intel_ring_buffer *pipelined) | ||
1411 | { | 1788 | { |
1412 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 1789 | struct drm_i915_private *dev_priv = dev->dev_private; |
1413 | u32 alignment; | 1790 | u32 alignment; |
1414 | int ret; | 1791 | int ret; |
1415 | 1792 | ||
1416 | switch (obj_priv->tiling_mode) { | 1793 | switch (obj->tiling_mode) { |
1417 | case I915_TILING_NONE: | 1794 | case I915_TILING_NONE: |
1418 | if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) | 1795 | if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) |
1419 | alignment = 128 * 1024; | 1796 | alignment = 128 * 1024; |
1420 | else if (IS_I965G(dev)) | 1797 | else if (INTEL_INFO(dev)->gen >= 4) |
1421 | alignment = 4 * 1024; | 1798 | alignment = 4 * 1024; |
1422 | else | 1799 | else |
1423 | alignment = 64 * 1024; | 1800 | alignment = 64 * 1024; |
@@ -1434,46 +1811,50 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) | |||
1434 | BUG(); | 1811 | BUG(); |
1435 | } | 1812 | } |
1436 | 1813 | ||
1437 | ret = i915_gem_object_pin(obj, alignment); | 1814 | dev_priv->mm.interruptible = false; |
1438 | if (ret != 0) | 1815 | ret = i915_gem_object_pin(obj, alignment, true); |
1439 | return ret; | 1816 | if (ret) |
1817 | goto err_interruptible; | ||
1818 | |||
1819 | ret = i915_gem_object_set_to_display_plane(obj, pipelined); | ||
1820 | if (ret) | ||
1821 | goto err_unpin; | ||
1440 | 1822 | ||
1441 | /* Install a fence for tiled scan-out. Pre-i965 always needs a | 1823 | /* Install a fence for tiled scan-out. Pre-i965 always needs a |
1442 | * fence, whereas 965+ only requires a fence if using | 1824 | * fence, whereas 965+ only requires a fence if using |
1443 | * framebuffer compression. For simplicity, we always install | 1825 | * framebuffer compression. For simplicity, we always install |
1444 | * a fence as the cost is not that onerous. | 1826 | * a fence as the cost is not that onerous. |
1445 | */ | 1827 | */ |
1446 | if (obj_priv->fence_reg == I915_FENCE_REG_NONE && | 1828 | if (obj->tiling_mode != I915_TILING_NONE) { |
1447 | obj_priv->tiling_mode != I915_TILING_NONE) { | 1829 | ret = i915_gem_object_get_fence(obj, pipelined); |
1448 | ret = i915_gem_object_get_fence_reg(obj); | 1830 | if (ret) |
1449 | if (ret != 0) { | 1831 | goto err_unpin; |
1450 | i915_gem_object_unpin(obj); | ||
1451 | return ret; | ||
1452 | } | ||
1453 | } | 1832 | } |
1454 | 1833 | ||
1834 | dev_priv->mm.interruptible = true; | ||
1455 | return 0; | 1835 | return 0; |
1836 | |||
1837 | err_unpin: | ||
1838 | i915_gem_object_unpin(obj); | ||
1839 | err_interruptible: | ||
1840 | dev_priv->mm.interruptible = true; | ||
1841 | return ret; | ||
1456 | } | 1842 | } |
1457 | 1843 | ||
1458 | /* Assume fb object is pinned & idle & fenced and just update base pointers */ | 1844 | /* Assume fb object is pinned & idle & fenced and just update base pointers */ |
1459 | static int | 1845 | static int |
1460 | intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, | 1846 | intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, |
1461 | int x, int y) | 1847 | int x, int y, enum mode_set_atomic state) |
1462 | { | 1848 | { |
1463 | struct drm_device *dev = crtc->dev; | 1849 | struct drm_device *dev = crtc->dev; |
1464 | struct drm_i915_private *dev_priv = dev->dev_private; | 1850 | struct drm_i915_private *dev_priv = dev->dev_private; |
1465 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1851 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1466 | struct intel_framebuffer *intel_fb; | 1852 | struct intel_framebuffer *intel_fb; |
1467 | struct drm_i915_gem_object *obj_priv; | 1853 | struct drm_i915_gem_object *obj; |
1468 | struct drm_gem_object *obj; | ||
1469 | int plane = intel_crtc->plane; | 1854 | int plane = intel_crtc->plane; |
1470 | unsigned long Start, Offset; | 1855 | unsigned long Start, Offset; |
1471 | int dspbase = (plane == 0 ? DSPAADDR : DSPBADDR); | ||
1472 | int dspsurf = (plane == 0 ? DSPASURF : DSPBSURF); | ||
1473 | int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE; | ||
1474 | int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF); | ||
1475 | int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; | ||
1476 | u32 dspcntr; | 1856 | u32 dspcntr; |
1857 | u32 reg; | ||
1477 | 1858 | ||
1478 | switch (plane) { | 1859 | switch (plane) { |
1479 | case 0: | 1860 | case 0: |
@@ -1486,9 +1867,9 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
1486 | 1867 | ||
1487 | intel_fb = to_intel_framebuffer(fb); | 1868 | intel_fb = to_intel_framebuffer(fb); |
1488 | obj = intel_fb->obj; | 1869 | obj = intel_fb->obj; |
1489 | obj_priv = to_intel_bo(obj); | ||
1490 | 1870 | ||
1491 | dspcntr = I915_READ(dspcntr_reg); | 1871 | reg = DSPCNTR(plane); |
1872 | dspcntr = I915_READ(reg); | ||
1492 | /* Mask out pixel format bits in case we change it */ | 1873 | /* Mask out pixel format bits in case we change it */ |
1493 | dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; | 1874 | dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; |
1494 | switch (fb->bits_per_pixel) { | 1875 | switch (fb->bits_per_pixel) { |
@@ -1509,8 +1890,8 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
1509 | DRM_ERROR("Unknown color depth\n"); | 1890 | DRM_ERROR("Unknown color depth\n"); |
1510 | return -EINVAL; | 1891 | return -EINVAL; |
1511 | } | 1892 | } |
1512 | if (IS_I965G(dev)) { | 1893 | if (INTEL_INFO(dev)->gen >= 4) { |
1513 | if (obj_priv->tiling_mode != I915_TILING_NONE) | 1894 | if (obj->tiling_mode != I915_TILING_NONE) |
1514 | dspcntr |= DISPPLANE_TILED; | 1895 | dspcntr |= DISPPLANE_TILED; |
1515 | else | 1896 | else |
1516 | dspcntr &= ~DISPPLANE_TILED; | 1897 | dspcntr &= ~DISPPLANE_TILED; |
@@ -1520,28 +1901,24 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
1520 | /* must disable */ | 1901 | /* must disable */ |
1521 | dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; | 1902 | dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; |
1522 | 1903 | ||
1523 | I915_WRITE(dspcntr_reg, dspcntr); | 1904 | I915_WRITE(reg, dspcntr); |
1524 | 1905 | ||
1525 | Start = obj_priv->gtt_offset; | 1906 | Start = obj->gtt_offset; |
1526 | Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8); | 1907 | Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8); |
1527 | 1908 | ||
1528 | DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", | 1909 | DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", |
1529 | Start, Offset, x, y, fb->pitch); | 1910 | Start, Offset, x, y, fb->pitch); |
1530 | I915_WRITE(dspstride, fb->pitch); | 1911 | I915_WRITE(DSPSTRIDE(plane), fb->pitch); |
1531 | if (IS_I965G(dev)) { | 1912 | if (INTEL_INFO(dev)->gen >= 4) { |
1532 | I915_WRITE(dspsurf, Start); | 1913 | I915_WRITE(DSPSURF(plane), Start); |
1533 | I915_WRITE(dsptileoff, (y << 16) | x); | 1914 | I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); |
1534 | I915_WRITE(dspbase, Offset); | 1915 | I915_WRITE(DSPADDR(plane), Offset); |
1535 | } else { | 1916 | } else |
1536 | I915_WRITE(dspbase, Start + Offset); | 1917 | I915_WRITE(DSPADDR(plane), Start + Offset); |
1537 | } | 1918 | POSTING_READ(reg); |
1538 | POSTING_READ(dspbase); | ||
1539 | |||
1540 | if (IS_I965G(dev) || plane == 0) | ||
1541 | intel_update_fbc(crtc, &crtc->mode); | ||
1542 | 1919 | ||
1543 | intel_wait_for_vblank(dev, intel_crtc->pipe); | 1920 | intel_update_fbc(dev); |
1544 | intel_increase_pllclock(crtc, true); | 1921 | intel_increase_pllclock(crtc); |
1545 | 1922 | ||
1546 | return 0; | 1923 | return 0; |
1547 | } | 1924 | } |
@@ -1553,11 +1930,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1553 | struct drm_device *dev = crtc->dev; | 1930 | struct drm_device *dev = crtc->dev; |
1554 | struct drm_i915_master_private *master_priv; | 1931 | struct drm_i915_master_private *master_priv; |
1555 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1932 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1556 | struct intel_framebuffer *intel_fb; | ||
1557 | struct drm_i915_gem_object *obj_priv; | ||
1558 | struct drm_gem_object *obj; | ||
1559 | int pipe = intel_crtc->pipe; | ||
1560 | int plane = intel_crtc->plane; | ||
1561 | int ret; | 1933 | int ret; |
1562 | 1934 | ||
1563 | /* no fb bound */ | 1935 | /* no fb bound */ |
@@ -1566,44 +1938,54 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1566 | return 0; | 1938 | return 0; |
1567 | } | 1939 | } |
1568 | 1940 | ||
1569 | switch (plane) { | 1941 | switch (intel_crtc->plane) { |
1570 | case 0: | 1942 | case 0: |
1571 | case 1: | 1943 | case 1: |
1572 | break; | 1944 | break; |
1573 | default: | 1945 | default: |
1574 | DRM_ERROR("Can't update plane %d in SAREA\n", plane); | ||
1575 | return -EINVAL; | 1946 | return -EINVAL; |
1576 | } | 1947 | } |
1577 | 1948 | ||
1578 | intel_fb = to_intel_framebuffer(crtc->fb); | ||
1579 | obj = intel_fb->obj; | ||
1580 | obj_priv = to_intel_bo(obj); | ||
1581 | |||
1582 | mutex_lock(&dev->struct_mutex); | 1949 | mutex_lock(&dev->struct_mutex); |
1583 | ret = intel_pin_and_fence_fb_obj(dev, obj); | 1950 | ret = intel_pin_and_fence_fb_obj(dev, |
1951 | to_intel_framebuffer(crtc->fb)->obj, | ||
1952 | NULL); | ||
1584 | if (ret != 0) { | 1953 | if (ret != 0) { |
1585 | mutex_unlock(&dev->struct_mutex); | 1954 | mutex_unlock(&dev->struct_mutex); |
1586 | return ret; | 1955 | return ret; |
1587 | } | 1956 | } |
1588 | 1957 | ||
1589 | ret = i915_gem_object_set_to_display_plane(obj); | 1958 | if (old_fb) { |
1590 | if (ret != 0) { | 1959 | struct drm_i915_private *dev_priv = dev->dev_private; |
1591 | i915_gem_object_unpin(obj); | 1960 | struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj; |
1592 | mutex_unlock(&dev->struct_mutex); | 1961 | |
1593 | return ret; | 1962 | wait_event(dev_priv->pending_flip_queue, |
1963 | atomic_read(&dev_priv->mm.wedged) || | ||
1964 | atomic_read(&obj->pending_flip) == 0); | ||
1965 | |||
1966 | /* Big Hammer, we also need to ensure that any pending | ||
1967 | * MI_WAIT_FOR_EVENT inside a user batch buffer on the | ||
1968 | * current scanout is retired before unpinning the old | ||
1969 | * framebuffer. | ||
1970 | * | ||
1971 | * This should only fail upon a hung GPU, in which case we | ||
1972 | * can safely continue. | ||
1973 | */ | ||
1974 | ret = i915_gem_object_flush_gpu(obj); | ||
1975 | (void) ret; | ||
1594 | } | 1976 | } |
1595 | 1977 | ||
1596 | ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y); | 1978 | ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, |
1979 | LEAVE_ATOMIC_MODE_SET); | ||
1597 | if (ret) { | 1980 | if (ret) { |
1598 | i915_gem_object_unpin(obj); | 1981 | i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); |
1599 | mutex_unlock(&dev->struct_mutex); | 1982 | mutex_unlock(&dev->struct_mutex); |
1600 | return ret; | 1983 | return ret; |
1601 | } | 1984 | } |
1602 | 1985 | ||
1603 | if (old_fb) { | 1986 | if (old_fb) { |
1604 | intel_fb = to_intel_framebuffer(old_fb); | 1987 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
1605 | obj_priv = to_intel_bo(intel_fb->obj); | 1988 | i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj); |
1606 | i915_gem_object_unpin(intel_fb->obj); | ||
1607 | } | 1989 | } |
1608 | 1990 | ||
1609 | mutex_unlock(&dev->struct_mutex); | 1991 | mutex_unlock(&dev->struct_mutex); |
@@ -1615,7 +1997,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1615 | if (!master_priv->sarea_priv) | 1997 | if (!master_priv->sarea_priv) |
1616 | return 0; | 1998 | return 0; |
1617 | 1999 | ||
1618 | if (pipe) { | 2000 | if (intel_crtc->pipe) { |
1619 | master_priv->sarea_priv->pipeB_x = x; | 2001 | master_priv->sarea_priv->pipeB_x = x; |
1620 | master_priv->sarea_priv->pipeB_y = y; | 2002 | master_priv->sarea_priv->pipeB_y = y; |
1621 | } else { | 2003 | } else { |
@@ -1626,7 +2008,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1626 | return 0; | 2008 | return 0; |
1627 | } | 2009 | } |
1628 | 2010 | ||
1629 | static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock) | 2011 | static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock) |
1630 | { | 2012 | { |
1631 | struct drm_device *dev = crtc->dev; | 2013 | struct drm_device *dev = crtc->dev; |
1632 | struct drm_i915_private *dev_priv = dev->dev_private; | 2014 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -1659,9 +2041,51 @@ static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock) | |||
1659 | } | 2041 | } |
1660 | I915_WRITE(DP_A, dpa_ctl); | 2042 | I915_WRITE(DP_A, dpa_ctl); |
1661 | 2043 | ||
2044 | POSTING_READ(DP_A); | ||
1662 | udelay(500); | 2045 | udelay(500); |
1663 | } | 2046 | } |
1664 | 2047 | ||
2048 | static void intel_fdi_normal_train(struct drm_crtc *crtc) | ||
2049 | { | ||
2050 | struct drm_device *dev = crtc->dev; | ||
2051 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2052 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
2053 | int pipe = intel_crtc->pipe; | ||
2054 | u32 reg, temp; | ||
2055 | |||
2056 | /* enable normal train */ | ||
2057 | reg = FDI_TX_CTL(pipe); | ||
2058 | temp = I915_READ(reg); | ||
2059 | if (IS_IVYBRIDGE(dev)) { | ||
2060 | temp &= ~FDI_LINK_TRAIN_NONE_IVB; | ||
2061 | temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE; | ||
2062 | } else { | ||
2063 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
2064 | temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; | ||
2065 | } | ||
2066 | I915_WRITE(reg, temp); | ||
2067 | |||
2068 | reg = FDI_RX_CTL(pipe); | ||
2069 | temp = I915_READ(reg); | ||
2070 | if (HAS_PCH_CPT(dev)) { | ||
2071 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | ||
2072 | temp |= FDI_LINK_TRAIN_NORMAL_CPT; | ||
2073 | } else { | ||
2074 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
2075 | temp |= FDI_LINK_TRAIN_NONE; | ||
2076 | } | ||
2077 | I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); | ||
2078 | |||
2079 | /* wait one idle pattern time */ | ||
2080 | POSTING_READ(reg); | ||
2081 | udelay(1000); | ||
2082 | |||
2083 | /* IVB wants error correction enabled */ | ||
2084 | if (IS_IVYBRIDGE(dev)) | ||
2085 | I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE | | ||
2086 | FDI_FE_ERRC_ENABLE); | ||
2087 | } | ||
2088 | |||
1665 | /* The FDI link training functions for ILK/Ibexpeak. */ | 2089 | /* The FDI link training functions for ILK/Ibexpeak. */ |
1666 | static void ironlake_fdi_link_train(struct drm_crtc *crtc) | 2090 | static void ironlake_fdi_link_train(struct drm_crtc *crtc) |
1667 | { | 2091 | { |
@@ -1669,84 +2093,97 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) | |||
1669 | struct drm_i915_private *dev_priv = dev->dev_private; | 2093 | struct drm_i915_private *dev_priv = dev->dev_private; |
1670 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 2094 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1671 | int pipe = intel_crtc->pipe; | 2095 | int pipe = intel_crtc->pipe; |
1672 | int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; | 2096 | int plane = intel_crtc->plane; |
1673 | int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; | 2097 | u32 reg, temp, tries; |
1674 | int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR; | 2098 | |
1675 | int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; | 2099 | /* FDI needs bits from pipe & plane first */ |
1676 | u32 temp, tries = 0; | 2100 | assert_pipe_enabled(dev_priv, pipe); |
2101 | assert_plane_enabled(dev_priv, plane); | ||
1677 | 2102 | ||
1678 | /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit | 2103 | /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit |
1679 | for train result */ | 2104 | for train result */ |
1680 | temp = I915_READ(fdi_rx_imr_reg); | 2105 | reg = FDI_RX_IMR(pipe); |
2106 | temp = I915_READ(reg); | ||
1681 | temp &= ~FDI_RX_SYMBOL_LOCK; | 2107 | temp &= ~FDI_RX_SYMBOL_LOCK; |
1682 | temp &= ~FDI_RX_BIT_LOCK; | 2108 | temp &= ~FDI_RX_BIT_LOCK; |
1683 | I915_WRITE(fdi_rx_imr_reg, temp); | 2109 | I915_WRITE(reg, temp); |
1684 | I915_READ(fdi_rx_imr_reg); | 2110 | I915_READ(reg); |
1685 | udelay(150); | 2111 | udelay(150); |
1686 | 2112 | ||
1687 | /* enable CPU FDI TX and PCH FDI RX */ | 2113 | /* enable CPU FDI TX and PCH FDI RX */ |
1688 | temp = I915_READ(fdi_tx_reg); | 2114 | reg = FDI_TX_CTL(pipe); |
1689 | temp |= FDI_TX_ENABLE; | 2115 | temp = I915_READ(reg); |
1690 | temp &= ~(7 << 19); | 2116 | temp &= ~(7 << 19); |
1691 | temp |= (intel_crtc->fdi_lanes - 1) << 19; | 2117 | temp |= (intel_crtc->fdi_lanes - 1) << 19; |
1692 | temp &= ~FDI_LINK_TRAIN_NONE; | 2118 | temp &= ~FDI_LINK_TRAIN_NONE; |
1693 | temp |= FDI_LINK_TRAIN_PATTERN_1; | 2119 | temp |= FDI_LINK_TRAIN_PATTERN_1; |
1694 | I915_WRITE(fdi_tx_reg, temp); | 2120 | I915_WRITE(reg, temp | FDI_TX_ENABLE); |
1695 | I915_READ(fdi_tx_reg); | ||
1696 | 2121 | ||
1697 | temp = I915_READ(fdi_rx_reg); | 2122 | reg = FDI_RX_CTL(pipe); |
2123 | temp = I915_READ(reg); | ||
1698 | temp &= ~FDI_LINK_TRAIN_NONE; | 2124 | temp &= ~FDI_LINK_TRAIN_NONE; |
1699 | temp |= FDI_LINK_TRAIN_PATTERN_1; | 2125 | temp |= FDI_LINK_TRAIN_PATTERN_1; |
1700 | I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE); | 2126 | I915_WRITE(reg, temp | FDI_RX_ENABLE); |
1701 | I915_READ(fdi_rx_reg); | 2127 | |
2128 | POSTING_READ(reg); | ||
1702 | udelay(150); | 2129 | udelay(150); |
1703 | 2130 | ||
2131 | /* Ironlake workaround, enable clock pointer after FDI enable*/ | ||
2132 | if (HAS_PCH_IBX(dev)) { | ||
2133 | I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); | ||
2134 | I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | | ||
2135 | FDI_RX_PHASE_SYNC_POINTER_EN); | ||
2136 | } | ||
2137 | |||
2138 | reg = FDI_RX_IIR(pipe); | ||
1704 | for (tries = 0; tries < 5; tries++) { | 2139 | for (tries = 0; tries < 5; tries++) { |
1705 | temp = I915_READ(fdi_rx_iir_reg); | 2140 | temp = I915_READ(reg); |
1706 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); | 2141 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
1707 | 2142 | ||
1708 | if ((temp & FDI_RX_BIT_LOCK)) { | 2143 | if ((temp & FDI_RX_BIT_LOCK)) { |
1709 | DRM_DEBUG_KMS("FDI train 1 done.\n"); | 2144 | DRM_DEBUG_KMS("FDI train 1 done.\n"); |
1710 | I915_WRITE(fdi_rx_iir_reg, | 2145 | I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); |
1711 | temp | FDI_RX_BIT_LOCK); | ||
1712 | break; | 2146 | break; |
1713 | } | 2147 | } |
1714 | } | 2148 | } |
1715 | if (tries == 5) | 2149 | if (tries == 5) |
1716 | DRM_DEBUG_KMS("FDI train 1 fail!\n"); | 2150 | DRM_ERROR("FDI train 1 fail!\n"); |
1717 | 2151 | ||
1718 | /* Train 2 */ | 2152 | /* Train 2 */ |
1719 | temp = I915_READ(fdi_tx_reg); | 2153 | reg = FDI_TX_CTL(pipe); |
2154 | temp = I915_READ(reg); | ||
1720 | temp &= ~FDI_LINK_TRAIN_NONE; | 2155 | temp &= ~FDI_LINK_TRAIN_NONE; |
1721 | temp |= FDI_LINK_TRAIN_PATTERN_2; | 2156 | temp |= FDI_LINK_TRAIN_PATTERN_2; |
1722 | I915_WRITE(fdi_tx_reg, temp); | 2157 | I915_WRITE(reg, temp); |
1723 | 2158 | ||
1724 | temp = I915_READ(fdi_rx_reg); | 2159 | reg = FDI_RX_CTL(pipe); |
2160 | temp = I915_READ(reg); | ||
1725 | temp &= ~FDI_LINK_TRAIN_NONE; | 2161 | temp &= ~FDI_LINK_TRAIN_NONE; |
1726 | temp |= FDI_LINK_TRAIN_PATTERN_2; | 2162 | temp |= FDI_LINK_TRAIN_PATTERN_2; |
1727 | I915_WRITE(fdi_rx_reg, temp); | 2163 | I915_WRITE(reg, temp); |
1728 | udelay(150); | ||
1729 | 2164 | ||
1730 | tries = 0; | 2165 | POSTING_READ(reg); |
2166 | udelay(150); | ||
1731 | 2167 | ||
2168 | reg = FDI_RX_IIR(pipe); | ||
1732 | for (tries = 0; tries < 5; tries++) { | 2169 | for (tries = 0; tries < 5; tries++) { |
1733 | temp = I915_READ(fdi_rx_iir_reg); | 2170 | temp = I915_READ(reg); |
1734 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); | 2171 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
1735 | 2172 | ||
1736 | if (temp & FDI_RX_SYMBOL_LOCK) { | 2173 | if (temp & FDI_RX_SYMBOL_LOCK) { |
1737 | I915_WRITE(fdi_rx_iir_reg, | 2174 | I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); |
1738 | temp | FDI_RX_SYMBOL_LOCK); | ||
1739 | DRM_DEBUG_KMS("FDI train 2 done.\n"); | 2175 | DRM_DEBUG_KMS("FDI train 2 done.\n"); |
1740 | break; | 2176 | break; |
1741 | } | 2177 | } |
1742 | } | 2178 | } |
1743 | if (tries == 5) | 2179 | if (tries == 5) |
1744 | DRM_DEBUG_KMS("FDI train 2 fail!\n"); | 2180 | DRM_ERROR("FDI train 2 fail!\n"); |
1745 | 2181 | ||
1746 | DRM_DEBUG_KMS("FDI train done\n"); | 2182 | DRM_DEBUG_KMS("FDI train done\n"); |
2183 | |||
1747 | } | 2184 | } |
1748 | 2185 | ||
1749 | static int snb_b_fdi_train_param [] = { | 2186 | static const int snb_b_fdi_train_param [] = { |
1750 | FDI_LINK_TRAIN_400MV_0DB_SNB_B, | 2187 | FDI_LINK_TRAIN_400MV_0DB_SNB_B, |
1751 | FDI_LINK_TRAIN_400MV_6DB_SNB_B, | 2188 | FDI_LINK_TRAIN_400MV_6DB_SNB_B, |
1752 | FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, | 2189 | FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, |
@@ -1760,24 +2197,22 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) | |||
1760 | struct drm_i915_private *dev_priv = dev->dev_private; | 2197 | struct drm_i915_private *dev_priv = dev->dev_private; |
1761 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 2198 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1762 | int pipe = intel_crtc->pipe; | 2199 | int pipe = intel_crtc->pipe; |
1763 | int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; | 2200 | u32 reg, temp, i; |
1764 | int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; | ||
1765 | int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR; | ||
1766 | int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; | ||
1767 | u32 temp, i; | ||
1768 | 2201 | ||
1769 | /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit | 2202 | /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit |
1770 | for train result */ | 2203 | for train result */ |
1771 | temp = I915_READ(fdi_rx_imr_reg); | 2204 | reg = FDI_RX_IMR(pipe); |
2205 | temp = I915_READ(reg); | ||
1772 | temp &= ~FDI_RX_SYMBOL_LOCK; | 2206 | temp &= ~FDI_RX_SYMBOL_LOCK; |
1773 | temp &= ~FDI_RX_BIT_LOCK; | 2207 | temp &= ~FDI_RX_BIT_LOCK; |
1774 | I915_WRITE(fdi_rx_imr_reg, temp); | 2208 | I915_WRITE(reg, temp); |
1775 | I915_READ(fdi_rx_imr_reg); | 2209 | |
2210 | POSTING_READ(reg); | ||
1776 | udelay(150); | 2211 | udelay(150); |
1777 | 2212 | ||
1778 | /* enable CPU FDI TX and PCH FDI RX */ | 2213 | /* enable CPU FDI TX and PCH FDI RX */ |
1779 | temp = I915_READ(fdi_tx_reg); | 2214 | reg = FDI_TX_CTL(pipe); |
1780 | temp |= FDI_TX_ENABLE; | 2215 | temp = I915_READ(reg); |
1781 | temp &= ~(7 << 19); | 2216 | temp &= ~(7 << 19); |
1782 | temp |= (intel_crtc->fdi_lanes - 1) << 19; | 2217 | temp |= (intel_crtc->fdi_lanes - 1) << 19; |
1783 | temp &= ~FDI_LINK_TRAIN_NONE; | 2218 | temp &= ~FDI_LINK_TRAIN_NONE; |
@@ -1785,10 +2220,10 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) | |||
1785 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; | 2220 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
1786 | /* SNB-B */ | 2221 | /* SNB-B */ |
1787 | temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; | 2222 | temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; |
1788 | I915_WRITE(fdi_tx_reg, temp); | 2223 | I915_WRITE(reg, temp | FDI_TX_ENABLE); |
1789 | I915_READ(fdi_tx_reg); | ||
1790 | 2224 | ||
1791 | temp = I915_READ(fdi_rx_reg); | 2225 | reg = FDI_RX_CTL(pipe); |
2226 | temp = I915_READ(reg); | ||
1792 | if (HAS_PCH_CPT(dev)) { | 2227 | if (HAS_PCH_CPT(dev)) { |
1793 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | 2228 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; |
1794 | temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; | 2229 | temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; |
@@ -1796,32 +2231,37 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) | |||
1796 | temp &= ~FDI_LINK_TRAIN_NONE; | 2231 | temp &= ~FDI_LINK_TRAIN_NONE; |
1797 | temp |= FDI_LINK_TRAIN_PATTERN_1; | 2232 | temp |= FDI_LINK_TRAIN_PATTERN_1; |
1798 | } | 2233 | } |
1799 | I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE); | 2234 | I915_WRITE(reg, temp | FDI_RX_ENABLE); |
1800 | I915_READ(fdi_rx_reg); | 2235 | |
2236 | POSTING_READ(reg); | ||
1801 | udelay(150); | 2237 | udelay(150); |
1802 | 2238 | ||
1803 | for (i = 0; i < 4; i++ ) { | 2239 | for (i = 0; i < 4; i++ ) { |
1804 | temp = I915_READ(fdi_tx_reg); | 2240 | reg = FDI_TX_CTL(pipe); |
2241 | temp = I915_READ(reg); | ||
1805 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; | 2242 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
1806 | temp |= snb_b_fdi_train_param[i]; | 2243 | temp |= snb_b_fdi_train_param[i]; |
1807 | I915_WRITE(fdi_tx_reg, temp); | 2244 | I915_WRITE(reg, temp); |
2245 | |||
2246 | POSTING_READ(reg); | ||
1808 | udelay(500); | 2247 | udelay(500); |
1809 | 2248 | ||
1810 | temp = I915_READ(fdi_rx_iir_reg); | 2249 | reg = FDI_RX_IIR(pipe); |
2250 | temp = I915_READ(reg); | ||
1811 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); | 2251 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
1812 | 2252 | ||
1813 | if (temp & FDI_RX_BIT_LOCK) { | 2253 | if (temp & FDI_RX_BIT_LOCK) { |
1814 | I915_WRITE(fdi_rx_iir_reg, | 2254 | I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); |
1815 | temp | FDI_RX_BIT_LOCK); | ||
1816 | DRM_DEBUG_KMS("FDI train 1 done.\n"); | 2255 | DRM_DEBUG_KMS("FDI train 1 done.\n"); |
1817 | break; | 2256 | break; |
1818 | } | 2257 | } |
1819 | } | 2258 | } |
1820 | if (i == 4) | 2259 | if (i == 4) |
1821 | DRM_DEBUG_KMS("FDI train 1 fail!\n"); | 2260 | DRM_ERROR("FDI train 1 fail!\n"); |
1822 | 2261 | ||
1823 | /* Train 2 */ | 2262 | /* Train 2 */ |
1824 | temp = I915_READ(fdi_tx_reg); | 2263 | reg = FDI_TX_CTL(pipe); |
2264 | temp = I915_READ(reg); | ||
1825 | temp &= ~FDI_LINK_TRAIN_NONE; | 2265 | temp &= ~FDI_LINK_TRAIN_NONE; |
1826 | temp |= FDI_LINK_TRAIN_PATTERN_2; | 2266 | temp |= FDI_LINK_TRAIN_PATTERN_2; |
1827 | if (IS_GEN6(dev)) { | 2267 | if (IS_GEN6(dev)) { |
@@ -1829,9 +2269,10 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) | |||
1829 | /* SNB-B */ | 2269 | /* SNB-B */ |
1830 | temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; | 2270 | temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; |
1831 | } | 2271 | } |
1832 | I915_WRITE(fdi_tx_reg, temp); | 2272 | I915_WRITE(reg, temp); |
1833 | 2273 | ||
1834 | temp = I915_READ(fdi_rx_reg); | 2274 | reg = FDI_RX_CTL(pipe); |
2275 | temp = I915_READ(reg); | ||
1835 | if (HAS_PCH_CPT(dev)) { | 2276 | if (HAS_PCH_CPT(dev)) { |
1836 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | 2277 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; |
1837 | temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; | 2278 | temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; |
@@ -1839,445 +2280,544 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) | |||
1839 | temp &= ~FDI_LINK_TRAIN_NONE; | 2280 | temp &= ~FDI_LINK_TRAIN_NONE; |
1840 | temp |= FDI_LINK_TRAIN_PATTERN_2; | 2281 | temp |= FDI_LINK_TRAIN_PATTERN_2; |
1841 | } | 2282 | } |
1842 | I915_WRITE(fdi_rx_reg, temp); | 2283 | I915_WRITE(reg, temp); |
2284 | |||
2285 | POSTING_READ(reg); | ||
1843 | udelay(150); | 2286 | udelay(150); |
1844 | 2287 | ||
1845 | for (i = 0; i < 4; i++ ) { | 2288 | for (i = 0; i < 4; i++ ) { |
1846 | temp = I915_READ(fdi_tx_reg); | 2289 | reg = FDI_TX_CTL(pipe); |
2290 | temp = I915_READ(reg); | ||
1847 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; | 2291 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
1848 | temp |= snb_b_fdi_train_param[i]; | 2292 | temp |= snb_b_fdi_train_param[i]; |
1849 | I915_WRITE(fdi_tx_reg, temp); | 2293 | I915_WRITE(reg, temp); |
2294 | |||
2295 | POSTING_READ(reg); | ||
1850 | udelay(500); | 2296 | udelay(500); |
1851 | 2297 | ||
1852 | temp = I915_READ(fdi_rx_iir_reg); | 2298 | reg = FDI_RX_IIR(pipe); |
2299 | temp = I915_READ(reg); | ||
1853 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); | 2300 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
1854 | 2301 | ||
1855 | if (temp & FDI_RX_SYMBOL_LOCK) { | 2302 | if (temp & FDI_RX_SYMBOL_LOCK) { |
1856 | I915_WRITE(fdi_rx_iir_reg, | 2303 | I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); |
1857 | temp | FDI_RX_SYMBOL_LOCK); | ||
1858 | DRM_DEBUG_KMS("FDI train 2 done.\n"); | 2304 | DRM_DEBUG_KMS("FDI train 2 done.\n"); |
1859 | break; | 2305 | break; |
1860 | } | 2306 | } |
1861 | } | 2307 | } |
1862 | if (i == 4) | 2308 | if (i == 4) |
1863 | DRM_DEBUG_KMS("FDI train 2 fail!\n"); | 2309 | DRM_ERROR("FDI train 2 fail!\n"); |
1864 | 2310 | ||
1865 | DRM_DEBUG_KMS("FDI train done.\n"); | 2311 | DRM_DEBUG_KMS("FDI train done.\n"); |
1866 | } | 2312 | } |
1867 | 2313 | ||
1868 | static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | 2314 | /* Manual link training for Ivy Bridge A0 parts */ |
2315 | static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) | ||
1869 | { | 2316 | { |
1870 | struct drm_device *dev = crtc->dev; | 2317 | struct drm_device *dev = crtc->dev; |
1871 | struct drm_i915_private *dev_priv = dev->dev_private; | 2318 | struct drm_i915_private *dev_priv = dev->dev_private; |
1872 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 2319 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1873 | int pipe = intel_crtc->pipe; | 2320 | int pipe = intel_crtc->pipe; |
1874 | int plane = intel_crtc->plane; | 2321 | u32 reg, temp, i; |
1875 | int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B; | ||
1876 | int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; | ||
1877 | int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; | ||
1878 | int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR; | ||
1879 | int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; | ||
1880 | int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; | ||
1881 | int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF; | ||
1882 | int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; | ||
1883 | int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; | ||
1884 | int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B; | ||
1885 | int cpu_vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B; | ||
1886 | int cpu_vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B; | ||
1887 | int cpu_vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B; | ||
1888 | int trans_htot_reg = (pipe == 0) ? TRANS_HTOTAL_A : TRANS_HTOTAL_B; | ||
1889 | int trans_hblank_reg = (pipe == 0) ? TRANS_HBLANK_A : TRANS_HBLANK_B; | ||
1890 | int trans_hsync_reg = (pipe == 0) ? TRANS_HSYNC_A : TRANS_HSYNC_B; | ||
1891 | int trans_vtot_reg = (pipe == 0) ? TRANS_VTOTAL_A : TRANS_VTOTAL_B; | ||
1892 | int trans_vblank_reg = (pipe == 0) ? TRANS_VBLANK_A : TRANS_VBLANK_B; | ||
1893 | int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B; | ||
1894 | int trans_dpll_sel = (pipe == 0) ? 0 : 1; | ||
1895 | u32 temp; | ||
1896 | u32 pipe_bpc; | ||
1897 | |||
1898 | temp = I915_READ(pipeconf_reg); | ||
1899 | pipe_bpc = temp & PIPE_BPC_MASK; | ||
1900 | 2322 | ||
1901 | /* XXX: When our outputs are all unaware of DPMS modes other than off | 2323 | /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit |
1902 | * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. | 2324 | for train result */ |
1903 | */ | 2325 | reg = FDI_RX_IMR(pipe); |
1904 | switch (mode) { | 2326 | temp = I915_READ(reg); |
1905 | case DRM_MODE_DPMS_ON: | 2327 | temp &= ~FDI_RX_SYMBOL_LOCK; |
1906 | case DRM_MODE_DPMS_STANDBY: | 2328 | temp &= ~FDI_RX_BIT_LOCK; |
1907 | case DRM_MODE_DPMS_SUSPEND: | 2329 | I915_WRITE(reg, temp); |
1908 | DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane); | ||
1909 | 2330 | ||
1910 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | 2331 | POSTING_READ(reg); |
1911 | temp = I915_READ(PCH_LVDS); | 2332 | udelay(150); |
1912 | if ((temp & LVDS_PORT_EN) == 0) { | ||
1913 | I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); | ||
1914 | POSTING_READ(PCH_LVDS); | ||
1915 | } | ||
1916 | } | ||
1917 | 2333 | ||
1918 | if (!HAS_eDP) { | 2334 | /* enable CPU FDI TX and PCH FDI RX */ |
2335 | reg = FDI_TX_CTL(pipe); | ||
2336 | temp = I915_READ(reg); | ||
2337 | temp &= ~(7 << 19); | ||
2338 | temp |= (intel_crtc->fdi_lanes - 1) << 19; | ||
2339 | temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); | ||
2340 | temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; | ||
2341 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; | ||
2342 | temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; | ||
2343 | I915_WRITE(reg, temp | FDI_TX_ENABLE); | ||
1919 | 2344 | ||
1920 | /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ | 2345 | reg = FDI_RX_CTL(pipe); |
1921 | temp = I915_READ(fdi_rx_reg); | 2346 | temp = I915_READ(reg); |
1922 | /* | 2347 | temp &= ~FDI_LINK_TRAIN_AUTO; |
1923 | * make the BPC in FDI Rx be consistent with that in | 2348 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; |
1924 | * pipeconf reg. | 2349 | temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; |
1925 | */ | 2350 | I915_WRITE(reg, temp | FDI_RX_ENABLE); |
1926 | temp &= ~(0x7 << 16); | ||
1927 | temp |= (pipe_bpc << 11); | ||
1928 | temp &= ~(7 << 19); | ||
1929 | temp |= (intel_crtc->fdi_lanes - 1) << 19; | ||
1930 | I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE); | ||
1931 | I915_READ(fdi_rx_reg); | ||
1932 | udelay(200); | ||
1933 | 2351 | ||
1934 | /* Switch from Rawclk to PCDclk */ | 2352 | POSTING_READ(reg); |
1935 | temp = I915_READ(fdi_rx_reg); | 2353 | udelay(150); |
1936 | I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK); | ||
1937 | I915_READ(fdi_rx_reg); | ||
1938 | udelay(200); | ||
1939 | 2354 | ||
1940 | /* Enable CPU FDI TX PLL, always on for Ironlake */ | 2355 | for (i = 0; i < 4; i++ ) { |
1941 | temp = I915_READ(fdi_tx_reg); | 2356 | reg = FDI_TX_CTL(pipe); |
1942 | if ((temp & FDI_TX_PLL_ENABLE) == 0) { | 2357 | temp = I915_READ(reg); |
1943 | I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE); | 2358 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
1944 | I915_READ(fdi_tx_reg); | 2359 | temp |= snb_b_fdi_train_param[i]; |
1945 | udelay(100); | 2360 | I915_WRITE(reg, temp); |
1946 | } | ||
1947 | } | ||
1948 | 2361 | ||
1949 | /* Enable panel fitting for LVDS */ | 2362 | POSTING_READ(reg); |
1950 | if (dev_priv->pch_pf_size && | 2363 | udelay(500); |
1951 | (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) | ||
1952 | || HAS_eDP || intel_pch_has_edp(crtc))) { | ||
1953 | /* Force use of hard-coded filter coefficients | ||
1954 | * as some pre-programmed values are broken, | ||
1955 | * e.g. x201. | ||
1956 | */ | ||
1957 | I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, | ||
1958 | PF_ENABLE | PF_FILTER_MED_3x3); | ||
1959 | I915_WRITE(pipe ? PFB_WIN_POS : PFA_WIN_POS, | ||
1960 | dev_priv->pch_pf_pos); | ||
1961 | I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, | ||
1962 | dev_priv->pch_pf_size); | ||
1963 | } | ||
1964 | 2364 | ||
1965 | /* Enable CPU pipe */ | 2365 | reg = FDI_RX_IIR(pipe); |
1966 | temp = I915_READ(pipeconf_reg); | 2366 | temp = I915_READ(reg); |
1967 | if ((temp & PIPEACONF_ENABLE) == 0) { | 2367 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
1968 | I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE); | ||
1969 | I915_READ(pipeconf_reg); | ||
1970 | udelay(100); | ||
1971 | } | ||
1972 | 2368 | ||
1973 | /* configure and enable CPU plane */ | 2369 | if (temp & FDI_RX_BIT_LOCK || |
1974 | temp = I915_READ(dspcntr_reg); | 2370 | (I915_READ(reg) & FDI_RX_BIT_LOCK)) { |
1975 | if ((temp & DISPLAY_PLANE_ENABLE) == 0) { | 2371 | I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); |
1976 | I915_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE); | 2372 | DRM_DEBUG_KMS("FDI train 1 done.\n"); |
1977 | /* Flush the plane changes */ | 2373 | break; |
1978 | I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); | ||
1979 | } | 2374 | } |
2375 | } | ||
2376 | if (i == 4) | ||
2377 | DRM_ERROR("FDI train 1 fail!\n"); | ||
1980 | 2378 | ||
1981 | if (!HAS_eDP) { | 2379 | /* Train 2 */ |
1982 | /* For PCH output, training FDI link */ | 2380 | reg = FDI_TX_CTL(pipe); |
1983 | if (IS_GEN6(dev)) | 2381 | temp = I915_READ(reg); |
1984 | gen6_fdi_link_train(crtc); | 2382 | temp &= ~FDI_LINK_TRAIN_NONE_IVB; |
1985 | else | 2383 | temp |= FDI_LINK_TRAIN_PATTERN_2_IVB; |
1986 | ironlake_fdi_link_train(crtc); | 2384 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
2385 | temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; | ||
2386 | I915_WRITE(reg, temp); | ||
1987 | 2387 | ||
1988 | /* enable PCH DPLL */ | 2388 | reg = FDI_RX_CTL(pipe); |
1989 | temp = I915_READ(pch_dpll_reg); | 2389 | temp = I915_READ(reg); |
1990 | if ((temp & DPLL_VCO_ENABLE) == 0) { | 2390 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; |
1991 | I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE); | 2391 | temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; |
1992 | I915_READ(pch_dpll_reg); | 2392 | I915_WRITE(reg, temp); |
1993 | } | ||
1994 | udelay(200); | ||
1995 | 2393 | ||
1996 | if (HAS_PCH_CPT(dev)) { | 2394 | POSTING_READ(reg); |
1997 | /* Be sure PCH DPLL SEL is set */ | 2395 | udelay(150); |
1998 | temp = I915_READ(PCH_DPLL_SEL); | ||
1999 | if (trans_dpll_sel == 0 && | ||
2000 | (temp & TRANSA_DPLL_ENABLE) == 0) | ||
2001 | temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); | ||
2002 | else if (trans_dpll_sel == 1 && | ||
2003 | (temp & TRANSB_DPLL_ENABLE) == 0) | ||
2004 | temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); | ||
2005 | I915_WRITE(PCH_DPLL_SEL, temp); | ||
2006 | I915_READ(PCH_DPLL_SEL); | ||
2007 | } | ||
2008 | 2396 | ||
2009 | /* set transcoder timing */ | 2397 | for (i = 0; i < 4; i++ ) { |
2010 | I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg)); | 2398 | reg = FDI_TX_CTL(pipe); |
2011 | I915_WRITE(trans_hblank_reg, I915_READ(cpu_hblank_reg)); | 2399 | temp = I915_READ(reg); |
2012 | I915_WRITE(trans_hsync_reg, I915_READ(cpu_hsync_reg)); | 2400 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
2013 | 2401 | temp |= snb_b_fdi_train_param[i]; | |
2014 | I915_WRITE(trans_vtot_reg, I915_READ(cpu_vtot_reg)); | 2402 | I915_WRITE(reg, temp); |
2015 | I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg)); | ||
2016 | I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg)); | ||
2017 | |||
2018 | /* enable normal train */ | ||
2019 | temp = I915_READ(fdi_tx_reg); | ||
2020 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
2021 | I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE | | ||
2022 | FDI_TX_ENHANCE_FRAME_ENABLE); | ||
2023 | I915_READ(fdi_tx_reg); | ||
2024 | |||
2025 | temp = I915_READ(fdi_rx_reg); | ||
2026 | if (HAS_PCH_CPT(dev)) { | ||
2027 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | ||
2028 | temp |= FDI_LINK_TRAIN_NORMAL_CPT; | ||
2029 | } else { | ||
2030 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
2031 | temp |= FDI_LINK_TRAIN_NONE; | ||
2032 | } | ||
2033 | I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); | ||
2034 | I915_READ(fdi_rx_reg); | ||
2035 | |||
2036 | /* wait one idle pattern time */ | ||
2037 | udelay(100); | ||
2038 | |||
2039 | /* For PCH DP, enable TRANS_DP_CTL */ | ||
2040 | if (HAS_PCH_CPT(dev) && | ||
2041 | intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { | ||
2042 | int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B; | ||
2043 | int reg; | ||
2044 | |||
2045 | reg = I915_READ(trans_dp_ctl); | ||
2046 | reg &= ~(TRANS_DP_PORT_SEL_MASK | | ||
2047 | TRANS_DP_SYNC_MASK); | ||
2048 | reg |= (TRANS_DP_OUTPUT_ENABLE | | ||
2049 | TRANS_DP_ENH_FRAMING); | ||
2050 | |||
2051 | if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) | ||
2052 | reg |= TRANS_DP_HSYNC_ACTIVE_HIGH; | ||
2053 | if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) | ||
2054 | reg |= TRANS_DP_VSYNC_ACTIVE_HIGH; | ||
2055 | |||
2056 | switch (intel_trans_dp_port_sel(crtc)) { | ||
2057 | case PCH_DP_B: | ||
2058 | reg |= TRANS_DP_PORT_SEL_B; | ||
2059 | break; | ||
2060 | case PCH_DP_C: | ||
2061 | reg |= TRANS_DP_PORT_SEL_C; | ||
2062 | break; | ||
2063 | case PCH_DP_D: | ||
2064 | reg |= TRANS_DP_PORT_SEL_D; | ||
2065 | break; | ||
2066 | default: | ||
2067 | DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n"); | ||
2068 | reg |= TRANS_DP_PORT_SEL_B; | ||
2069 | break; | ||
2070 | } | ||
2071 | 2403 | ||
2072 | I915_WRITE(trans_dp_ctl, reg); | 2404 | POSTING_READ(reg); |
2073 | POSTING_READ(trans_dp_ctl); | 2405 | udelay(500); |
2074 | } | ||
2075 | 2406 | ||
2076 | /* enable PCH transcoder */ | 2407 | reg = FDI_RX_IIR(pipe); |
2077 | temp = I915_READ(transconf_reg); | 2408 | temp = I915_READ(reg); |
2078 | /* | 2409 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
2079 | * make the BPC in transcoder be consistent with | ||
2080 | * that in pipeconf reg. | ||
2081 | */ | ||
2082 | temp &= ~PIPE_BPC_MASK; | ||
2083 | temp |= pipe_bpc; | ||
2084 | I915_WRITE(transconf_reg, temp | TRANS_ENABLE); | ||
2085 | I915_READ(transconf_reg); | ||
2086 | 2410 | ||
2087 | if (wait_for(I915_READ(transconf_reg) & TRANS_STATE_ENABLE, 100, 1)) | 2411 | if (temp & FDI_RX_SYMBOL_LOCK) { |
2088 | DRM_ERROR("failed to enable transcoder\n"); | 2412 | I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); |
2413 | DRM_DEBUG_KMS("FDI train 2 done.\n"); | ||
2414 | break; | ||
2089 | } | 2415 | } |
2416 | } | ||
2417 | if (i == 4) | ||
2418 | DRM_ERROR("FDI train 2 fail!\n"); | ||
2090 | 2419 | ||
2091 | intel_crtc_load_lut(crtc); | 2420 | DRM_DEBUG_KMS("FDI train done.\n"); |
2421 | } | ||
2092 | 2422 | ||
2093 | intel_update_fbc(crtc, &crtc->mode); | 2423 | static void ironlake_fdi_pll_enable(struct drm_crtc *crtc) |
2094 | break; | 2424 | { |
2425 | struct drm_device *dev = crtc->dev; | ||
2426 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2427 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
2428 | int pipe = intel_crtc->pipe; | ||
2429 | u32 reg, temp; | ||
2095 | 2430 | ||
2096 | case DRM_MODE_DPMS_OFF: | 2431 | /* Write the TU size bits so error detection works */ |
2097 | DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane); | 2432 | I915_WRITE(FDI_RX_TUSIZE1(pipe), |
2433 | I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); | ||
2098 | 2434 | ||
2099 | drm_vblank_off(dev, pipe); | 2435 | /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ |
2100 | /* Disable display plane */ | 2436 | reg = FDI_RX_CTL(pipe); |
2101 | temp = I915_READ(dspcntr_reg); | 2437 | temp = I915_READ(reg); |
2102 | if ((temp & DISPLAY_PLANE_ENABLE) != 0) { | 2438 | temp &= ~((0x7 << 19) | (0x7 << 16)); |
2103 | I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE); | 2439 | temp |= (intel_crtc->fdi_lanes - 1) << 19; |
2104 | /* Flush the plane changes */ | 2440 | temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; |
2105 | I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); | 2441 | I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); |
2106 | I915_READ(dspbase_reg); | ||
2107 | } | ||
2108 | 2442 | ||
2109 | if (dev_priv->cfb_plane == plane && | 2443 | POSTING_READ(reg); |
2110 | dev_priv->display.disable_fbc) | 2444 | udelay(200); |
2111 | dev_priv->display.disable_fbc(dev); | ||
2112 | 2445 | ||
2113 | /* disable cpu pipe, disable after all planes disabled */ | 2446 | /* Switch from Rawclk to PCDclk */ |
2114 | temp = I915_READ(pipeconf_reg); | 2447 | temp = I915_READ(reg); |
2115 | if ((temp & PIPEACONF_ENABLE) != 0) { | 2448 | I915_WRITE(reg, temp | FDI_PCDCLK); |
2116 | I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE); | ||
2117 | 2449 | ||
2118 | /* wait for cpu pipe off, pipe state */ | 2450 | POSTING_READ(reg); |
2119 | if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0, 50, 1)) | 2451 | udelay(200); |
2120 | DRM_ERROR("failed to turn off cpu pipe\n"); | 2452 | |
2121 | } else | 2453 | /* Enable CPU FDI TX PLL, always on for Ironlake */ |
2122 | DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); | 2454 | reg = FDI_TX_CTL(pipe); |
2455 | temp = I915_READ(reg); | ||
2456 | if ((temp & FDI_TX_PLL_ENABLE) == 0) { | ||
2457 | I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); | ||
2123 | 2458 | ||
2459 | POSTING_READ(reg); | ||
2124 | udelay(100); | 2460 | udelay(100); |
2461 | } | ||
2462 | } | ||
2463 | |||
2464 | static void ironlake_fdi_disable(struct drm_crtc *crtc) | ||
2465 | { | ||
2466 | struct drm_device *dev = crtc->dev; | ||
2467 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2468 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
2469 | int pipe = intel_crtc->pipe; | ||
2470 | u32 reg, temp; | ||
2471 | |||
2472 | /* disable CPU FDI tx and PCH FDI rx */ | ||
2473 | reg = FDI_TX_CTL(pipe); | ||
2474 | temp = I915_READ(reg); | ||
2475 | I915_WRITE(reg, temp & ~FDI_TX_ENABLE); | ||
2476 | POSTING_READ(reg); | ||
2477 | |||
2478 | reg = FDI_RX_CTL(pipe); | ||
2479 | temp = I915_READ(reg); | ||
2480 | temp &= ~(0x7 << 16); | ||
2481 | temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; | ||
2482 | I915_WRITE(reg, temp & ~FDI_RX_ENABLE); | ||
2483 | |||
2484 | POSTING_READ(reg); | ||
2485 | udelay(100); | ||
2486 | |||
2487 | /* Ironlake workaround, disable clock pointer after downing FDI */ | ||
2488 | if (HAS_PCH_IBX(dev)) { | ||
2489 | I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); | ||
2490 | I915_WRITE(FDI_RX_CHICKEN(pipe), | ||
2491 | I915_READ(FDI_RX_CHICKEN(pipe) & | ||
2492 | ~FDI_RX_PHASE_SYNC_POINTER_EN)); | ||
2493 | } | ||
2494 | |||
2495 | /* still set train pattern 1 */ | ||
2496 | reg = FDI_TX_CTL(pipe); | ||
2497 | temp = I915_READ(reg); | ||
2498 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
2499 | temp |= FDI_LINK_TRAIN_PATTERN_1; | ||
2500 | I915_WRITE(reg, temp); | ||
2501 | |||
2502 | reg = FDI_RX_CTL(pipe); | ||
2503 | temp = I915_READ(reg); | ||
2504 | if (HAS_PCH_CPT(dev)) { | ||
2505 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | ||
2506 | temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; | ||
2507 | } else { | ||
2508 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
2509 | temp |= FDI_LINK_TRAIN_PATTERN_1; | ||
2510 | } | ||
2511 | /* BPC in FDI rx is consistent with that in PIPECONF */ | ||
2512 | temp &= ~(0x07 << 16); | ||
2513 | temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; | ||
2514 | I915_WRITE(reg, temp); | ||
2125 | 2515 | ||
2126 | /* Disable PF */ | 2516 | POSTING_READ(reg); |
2127 | I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, 0); | 2517 | udelay(100); |
2128 | I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, 0); | 2518 | } |
2129 | 2519 | ||
2130 | /* disable CPU FDI tx and PCH FDI rx */ | 2520 | /* |
2131 | temp = I915_READ(fdi_tx_reg); | 2521 | * When we disable a pipe, we need to clear any pending scanline wait events |
2132 | I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_ENABLE); | 2522 | * to avoid hanging the ring, which we assume we are waiting on. |
2133 | I915_READ(fdi_tx_reg); | 2523 | */ |
2524 | static void intel_clear_scanline_wait(struct drm_device *dev) | ||
2525 | { | ||
2526 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2527 | struct intel_ring_buffer *ring; | ||
2528 | u32 tmp; | ||
2134 | 2529 | ||
2135 | temp = I915_READ(fdi_rx_reg); | 2530 | if (IS_GEN2(dev)) |
2136 | /* BPC in FDI rx is consistent with that in pipeconf */ | 2531 | /* Can't break the hang on i8xx */ |
2137 | temp &= ~(0x07 << 16); | 2532 | return; |
2138 | temp |= (pipe_bpc << 11); | ||
2139 | I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE); | ||
2140 | I915_READ(fdi_rx_reg); | ||
2141 | 2533 | ||
2142 | udelay(100); | 2534 | ring = LP_RING(dev_priv); |
2535 | tmp = I915_READ_CTL(ring); | ||
2536 | if (tmp & RING_WAIT) | ||
2537 | I915_WRITE_CTL(ring, tmp); | ||
2538 | } | ||
2143 | 2539 | ||
2144 | /* still set train pattern 1 */ | 2540 | static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) |
2145 | temp = I915_READ(fdi_tx_reg); | 2541 | { |
2146 | temp &= ~FDI_LINK_TRAIN_NONE; | 2542 | struct drm_i915_gem_object *obj; |
2147 | temp |= FDI_LINK_TRAIN_PATTERN_1; | 2543 | struct drm_i915_private *dev_priv; |
2148 | I915_WRITE(fdi_tx_reg, temp); | ||
2149 | POSTING_READ(fdi_tx_reg); | ||
2150 | 2544 | ||
2151 | temp = I915_READ(fdi_rx_reg); | 2545 | if (crtc->fb == NULL) |
2152 | if (HAS_PCH_CPT(dev)) { | 2546 | return; |
2153 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | ||
2154 | temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; | ||
2155 | } else { | ||
2156 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
2157 | temp |= FDI_LINK_TRAIN_PATTERN_1; | ||
2158 | } | ||
2159 | I915_WRITE(fdi_rx_reg, temp); | ||
2160 | POSTING_READ(fdi_rx_reg); | ||
2161 | 2547 | ||
2162 | udelay(100); | 2548 | obj = to_intel_framebuffer(crtc->fb)->obj; |
2549 | dev_priv = crtc->dev->dev_private; | ||
2550 | wait_event(dev_priv->pending_flip_queue, | ||
2551 | atomic_read(&obj->pending_flip) == 0); | ||
2552 | } | ||
2163 | 2553 | ||
2164 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | 2554 | static bool intel_crtc_driving_pch(struct drm_crtc *crtc) |
2165 | temp = I915_READ(PCH_LVDS); | 2555 | { |
2166 | I915_WRITE(PCH_LVDS, temp & ~LVDS_PORT_EN); | 2556 | struct drm_device *dev = crtc->dev; |
2167 | I915_READ(PCH_LVDS); | 2557 | struct drm_mode_config *mode_config = &dev->mode_config; |
2168 | udelay(100); | 2558 | struct intel_encoder *encoder; |
2169 | } | ||
2170 | 2559 | ||
2171 | /* disable PCH transcoder */ | 2560 | /* |
2172 | temp = I915_READ(transconf_reg); | 2561 | * If there's a non-PCH eDP on this crtc, it must be DP_A, and that |
2173 | if ((temp & TRANS_ENABLE) != 0) { | 2562 | * must be driven by its own crtc; no sharing is possible. |
2174 | I915_WRITE(transconf_reg, temp & ~TRANS_ENABLE); | 2563 | */ |
2564 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { | ||
2565 | if (encoder->base.crtc != crtc) | ||
2566 | continue; | ||
2175 | 2567 | ||
2176 | /* wait for PCH transcoder off, transcoder state */ | 2568 | switch (encoder->type) { |
2177 | if (wait_for((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0, 50, 1)) | 2569 | case INTEL_OUTPUT_EDP: |
2178 | DRM_ERROR("failed to disable transcoder\n"); | 2570 | if (!intel_encoder_is_pch_edp(&encoder->base)) |
2571 | return false; | ||
2572 | continue; | ||
2179 | } | 2573 | } |
2574 | } | ||
2180 | 2575 | ||
2181 | temp = I915_READ(transconf_reg); | 2576 | return true; |
2182 | /* BPC in transcoder is consistent with that in pipeconf */ | 2577 | } |
2183 | temp &= ~PIPE_BPC_MASK; | ||
2184 | temp |= pipe_bpc; | ||
2185 | I915_WRITE(transconf_reg, temp); | ||
2186 | I915_READ(transconf_reg); | ||
2187 | udelay(100); | ||
2188 | 2578 | ||
2189 | if (HAS_PCH_CPT(dev)) { | 2579 | /* |
2190 | /* disable TRANS_DP_CTL */ | 2580 | * Enable PCH resources required for PCH ports: |
2191 | int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B; | 2581 | * - PCH PLLs |
2192 | int reg; | 2582 | * - FDI training & RX/TX |
2583 | * - update transcoder timings | ||
2584 | * - DP transcoding bits | ||
2585 | * - transcoder | ||
2586 | */ | ||
2587 | static void ironlake_pch_enable(struct drm_crtc *crtc) | ||
2588 | { | ||
2589 | struct drm_device *dev = crtc->dev; | ||
2590 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2591 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
2592 | int pipe = intel_crtc->pipe; | ||
2593 | u32 reg, temp; | ||
2193 | 2594 | ||
2194 | reg = I915_READ(trans_dp_ctl); | 2595 | /* For PCH output, training FDI link */ |
2195 | reg &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK); | 2596 | dev_priv->display.fdi_link_train(crtc); |
2196 | I915_WRITE(trans_dp_ctl, reg); | ||
2197 | POSTING_READ(trans_dp_ctl); | ||
2198 | 2597 | ||
2199 | /* disable DPLL_SEL */ | 2598 | intel_enable_pch_pll(dev_priv, pipe); |
2200 | temp = I915_READ(PCH_DPLL_SEL); | 2599 | |
2201 | if (trans_dpll_sel == 0) | 2600 | if (HAS_PCH_CPT(dev)) { |
2202 | temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL); | 2601 | /* Be sure PCH DPLL SEL is set */ |
2203 | else | 2602 | temp = I915_READ(PCH_DPLL_SEL); |
2204 | temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); | 2603 | if (pipe == 0 && (temp & TRANSA_DPLL_ENABLE) == 0) |
2205 | I915_WRITE(PCH_DPLL_SEL, temp); | 2604 | temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); |
2206 | I915_READ(PCH_DPLL_SEL); | 2605 | else if (pipe == 1 && (temp & TRANSB_DPLL_ENABLE) == 0) |
2606 | temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); | ||
2607 | I915_WRITE(PCH_DPLL_SEL, temp); | ||
2608 | } | ||
2207 | 2609 | ||
2610 | /* set transcoder timing, panel must allow it */ | ||
2611 | assert_panel_unlocked(dev_priv, pipe); | ||
2612 | I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe))); | ||
2613 | I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe))); | ||
2614 | I915_WRITE(TRANS_HSYNC(pipe), I915_READ(HSYNC(pipe))); | ||
2615 | |||
2616 | I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe))); | ||
2617 | I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe))); | ||
2618 | I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); | ||
2619 | |||
2620 | intel_fdi_normal_train(crtc); | ||
2621 | |||
2622 | /* For PCH DP, enable TRANS_DP_CTL */ | ||
2623 | if (HAS_PCH_CPT(dev) && | ||
2624 | intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { | ||
2625 | reg = TRANS_DP_CTL(pipe); | ||
2626 | temp = I915_READ(reg); | ||
2627 | temp &= ~(TRANS_DP_PORT_SEL_MASK | | ||
2628 | TRANS_DP_SYNC_MASK | | ||
2629 | TRANS_DP_BPC_MASK); | ||
2630 | temp |= (TRANS_DP_OUTPUT_ENABLE | | ||
2631 | TRANS_DP_ENH_FRAMING); | ||
2632 | temp |= TRANS_DP_8BPC; | ||
2633 | |||
2634 | if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) | ||
2635 | temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; | ||
2636 | if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) | ||
2637 | temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; | ||
2638 | |||
2639 | switch (intel_trans_dp_port_sel(crtc)) { | ||
2640 | case PCH_DP_B: | ||
2641 | temp |= TRANS_DP_PORT_SEL_B; | ||
2642 | break; | ||
2643 | case PCH_DP_C: | ||
2644 | temp |= TRANS_DP_PORT_SEL_C; | ||
2645 | break; | ||
2646 | case PCH_DP_D: | ||
2647 | temp |= TRANS_DP_PORT_SEL_D; | ||
2648 | break; | ||
2649 | default: | ||
2650 | DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n"); | ||
2651 | temp |= TRANS_DP_PORT_SEL_B; | ||
2652 | break; | ||
2208 | } | 2653 | } |
2209 | 2654 | ||
2210 | /* disable PCH DPLL */ | 2655 | I915_WRITE(reg, temp); |
2211 | temp = I915_READ(pch_dpll_reg); | 2656 | } |
2212 | I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE); | ||
2213 | I915_READ(pch_dpll_reg); | ||
2214 | |||
2215 | /* Switch from PCDclk to Rawclk */ | ||
2216 | temp = I915_READ(fdi_rx_reg); | ||
2217 | temp &= ~FDI_SEL_PCDCLK; | ||
2218 | I915_WRITE(fdi_rx_reg, temp); | ||
2219 | I915_READ(fdi_rx_reg); | ||
2220 | |||
2221 | /* Disable CPU FDI TX PLL */ | ||
2222 | temp = I915_READ(fdi_tx_reg); | ||
2223 | I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE); | ||
2224 | I915_READ(fdi_tx_reg); | ||
2225 | udelay(100); | ||
2226 | 2657 | ||
2227 | temp = I915_READ(fdi_rx_reg); | 2658 | intel_enable_transcoder(dev_priv, pipe); |
2228 | temp &= ~FDI_RX_PLL_ENABLE; | 2659 | } |
2229 | I915_WRITE(fdi_rx_reg, temp); | ||
2230 | I915_READ(fdi_rx_reg); | ||
2231 | 2660 | ||
2232 | /* Wait for the clocks to turn off. */ | 2661 | static void ironlake_crtc_enable(struct drm_crtc *crtc) |
2233 | udelay(100); | 2662 | { |
2234 | break; | 2663 | struct drm_device *dev = crtc->dev; |
2664 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2665 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
2666 | int pipe = intel_crtc->pipe; | ||
2667 | int plane = intel_crtc->plane; | ||
2668 | u32 temp; | ||
2669 | bool is_pch_port; | ||
2670 | |||
2671 | if (intel_crtc->active) | ||
2672 | return; | ||
2673 | |||
2674 | intel_crtc->active = true; | ||
2675 | intel_update_watermarks(dev); | ||
2676 | |||
2677 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | ||
2678 | temp = I915_READ(PCH_LVDS); | ||
2679 | if ((temp & LVDS_PORT_EN) == 0) | ||
2680 | I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); | ||
2235 | } | 2681 | } |
2682 | |||
2683 | is_pch_port = intel_crtc_driving_pch(crtc); | ||
2684 | |||
2685 | if (is_pch_port) | ||
2686 | ironlake_fdi_pll_enable(crtc); | ||
2687 | else | ||
2688 | ironlake_fdi_disable(crtc); | ||
2689 | |||
2690 | /* Enable panel fitting for LVDS */ | ||
2691 | if (dev_priv->pch_pf_size && | ||
2692 | (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) { | ||
2693 | /* Force use of hard-coded filter coefficients | ||
2694 | * as some pre-programmed values are broken, | ||
2695 | * e.g. x201. | ||
2696 | */ | ||
2697 | I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); | ||
2698 | I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos); | ||
2699 | I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size); | ||
2700 | } | ||
2701 | |||
2702 | intel_enable_pipe(dev_priv, pipe, is_pch_port); | ||
2703 | intel_enable_plane(dev_priv, plane, pipe); | ||
2704 | |||
2705 | if (is_pch_port) | ||
2706 | ironlake_pch_enable(crtc); | ||
2707 | |||
2708 | intel_crtc_load_lut(crtc); | ||
2709 | |||
2710 | mutex_lock(&dev->struct_mutex); | ||
2711 | intel_update_fbc(dev); | ||
2712 | mutex_unlock(&dev->struct_mutex); | ||
2713 | |||
2714 | intel_crtc_update_cursor(crtc, true); | ||
2236 | } | 2715 | } |
2237 | 2716 | ||
2238 | static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) | 2717 | static void ironlake_crtc_disable(struct drm_crtc *crtc) |
2239 | { | 2718 | { |
2240 | struct intel_overlay *overlay; | 2719 | struct drm_device *dev = crtc->dev; |
2241 | int ret; | 2720 | struct drm_i915_private *dev_priv = dev->dev_private; |
2721 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
2722 | int pipe = intel_crtc->pipe; | ||
2723 | int plane = intel_crtc->plane; | ||
2724 | u32 reg, temp; | ||
2242 | 2725 | ||
2243 | if (!enable && intel_crtc->overlay) { | 2726 | if (!intel_crtc->active) |
2244 | overlay = intel_crtc->overlay; | 2727 | return; |
2245 | mutex_lock(&overlay->dev->struct_mutex); | ||
2246 | for (;;) { | ||
2247 | ret = intel_overlay_switch_off(overlay); | ||
2248 | if (ret == 0) | ||
2249 | break; | ||
2250 | 2728 | ||
2251 | ret = intel_overlay_recover_from_interrupt(overlay, 0); | 2729 | intel_crtc_wait_for_pending_flips(crtc); |
2252 | if (ret != 0) { | 2730 | drm_vblank_off(dev, pipe); |
2253 | /* overlay doesn't react anymore. Usually | 2731 | intel_crtc_update_cursor(crtc, false); |
2254 | * results in a black screen and an unkillable | 2732 | |
2255 | * X server. */ | 2733 | intel_disable_plane(dev_priv, plane, pipe); |
2256 | BUG(); | 2734 | |
2257 | overlay->hw_wedged = HW_WEDGED; | 2735 | if (dev_priv->cfb_plane == plane && |
2258 | break; | 2736 | dev_priv->display.disable_fbc) |
2259 | } | 2737 | dev_priv->display.disable_fbc(dev); |
2738 | |||
2739 | intel_disable_pipe(dev_priv, pipe); | ||
2740 | |||
2741 | /* Disable PF */ | ||
2742 | I915_WRITE(PF_CTL(pipe), 0); | ||
2743 | I915_WRITE(PF_WIN_SZ(pipe), 0); | ||
2744 | |||
2745 | ironlake_fdi_disable(crtc); | ||
2746 | |||
2747 | /* This is a horrible layering violation; we should be doing this in | ||
2748 | * the connector/encoder ->prepare instead, but we don't always have | ||
2749 | * enough information there about the config to know whether it will | ||
2750 | * actually be necessary or just cause undesired flicker. | ||
2751 | */ | ||
2752 | intel_disable_pch_ports(dev_priv, pipe); | ||
2753 | |||
2754 | intel_disable_transcoder(dev_priv, pipe); | ||
2755 | |||
2756 | if (HAS_PCH_CPT(dev)) { | ||
2757 | /* disable TRANS_DP_CTL */ | ||
2758 | reg = TRANS_DP_CTL(pipe); | ||
2759 | temp = I915_READ(reg); | ||
2760 | temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK); | ||
2761 | temp |= TRANS_DP_PORT_SEL_NONE; | ||
2762 | I915_WRITE(reg, temp); | ||
2763 | |||
2764 | /* disable DPLL_SEL */ | ||
2765 | temp = I915_READ(PCH_DPLL_SEL); | ||
2766 | switch (pipe) { | ||
2767 | case 0: | ||
2768 | temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); | ||
2769 | break; | ||
2770 | case 1: | ||
2771 | temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); | ||
2772 | break; | ||
2773 | case 2: | ||
2774 | /* FIXME: manage transcoder PLLs? */ | ||
2775 | temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL); | ||
2776 | break; | ||
2777 | default: | ||
2778 | BUG(); /* wtf */ | ||
2260 | } | 2779 | } |
2261 | mutex_unlock(&overlay->dev->struct_mutex); | 2780 | I915_WRITE(PCH_DPLL_SEL, temp); |
2262 | } | 2781 | } |
2263 | /* Let userspace switch the overlay on again. In most cases userspace | ||
2264 | * has to recompute where to put it anyway. */ | ||
2265 | 2782 | ||
2266 | return; | 2783 | /* disable PCH DPLL */ |
2784 | intel_disable_pch_pll(dev_priv, pipe); | ||
2785 | |||
2786 | /* Switch from PCDclk to Rawclk */ | ||
2787 | reg = FDI_RX_CTL(pipe); | ||
2788 | temp = I915_READ(reg); | ||
2789 | I915_WRITE(reg, temp & ~FDI_PCDCLK); | ||
2790 | |||
2791 | /* Disable CPU FDI TX PLL */ | ||
2792 | reg = FDI_TX_CTL(pipe); | ||
2793 | temp = I915_READ(reg); | ||
2794 | I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE); | ||
2795 | |||
2796 | POSTING_READ(reg); | ||
2797 | udelay(100); | ||
2798 | |||
2799 | reg = FDI_RX_CTL(pipe); | ||
2800 | temp = I915_READ(reg); | ||
2801 | I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE); | ||
2802 | |||
2803 | /* Wait for the clocks to turn off. */ | ||
2804 | POSTING_READ(reg); | ||
2805 | udelay(100); | ||
2806 | |||
2807 | intel_crtc->active = false; | ||
2808 | intel_update_watermarks(dev); | ||
2809 | |||
2810 | mutex_lock(&dev->struct_mutex); | ||
2811 | intel_update_fbc(dev); | ||
2812 | intel_clear_scanline_wait(dev); | ||
2813 | mutex_unlock(&dev->struct_mutex); | ||
2267 | } | 2814 | } |
2268 | 2815 | ||
2269 | static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) | 2816 | static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) |
2270 | { | 2817 | { |
2271 | struct drm_device *dev = crtc->dev; | ||
2272 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2273 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 2818 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2274 | int pipe = intel_crtc->pipe; | 2819 | int pipe = intel_crtc->pipe; |
2275 | int plane = intel_crtc->plane; | 2820 | int plane = intel_crtc->plane; |
2276 | int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; | ||
2277 | int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; | ||
2278 | int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR; | ||
2279 | int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; | ||
2280 | u32 temp; | ||
2281 | 2821 | ||
2282 | /* XXX: When our outputs are all unaware of DPMS modes other than off | 2822 | /* XXX: When our outputs are all unaware of DPMS modes other than off |
2283 | * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. | 2823 | * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. |
@@ -2286,88 +2826,105 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
2286 | case DRM_MODE_DPMS_ON: | 2826 | case DRM_MODE_DPMS_ON: |
2287 | case DRM_MODE_DPMS_STANDBY: | 2827 | case DRM_MODE_DPMS_STANDBY: |
2288 | case DRM_MODE_DPMS_SUSPEND: | 2828 | case DRM_MODE_DPMS_SUSPEND: |
2289 | /* Enable the DPLL */ | 2829 | DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane); |
2290 | temp = I915_READ(dpll_reg); | 2830 | ironlake_crtc_enable(crtc); |
2291 | if ((temp & DPLL_VCO_ENABLE) == 0) { | 2831 | break; |
2292 | I915_WRITE(dpll_reg, temp); | ||
2293 | I915_READ(dpll_reg); | ||
2294 | /* Wait for the clocks to stabilize. */ | ||
2295 | udelay(150); | ||
2296 | I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE); | ||
2297 | I915_READ(dpll_reg); | ||
2298 | /* Wait for the clocks to stabilize. */ | ||
2299 | udelay(150); | ||
2300 | I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE); | ||
2301 | I915_READ(dpll_reg); | ||
2302 | /* Wait for the clocks to stabilize. */ | ||
2303 | udelay(150); | ||
2304 | } | ||
2305 | 2832 | ||
2306 | /* Enable the pipe */ | 2833 | case DRM_MODE_DPMS_OFF: |
2307 | temp = I915_READ(pipeconf_reg); | 2834 | DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane); |
2308 | if ((temp & PIPEACONF_ENABLE) == 0) | 2835 | ironlake_crtc_disable(crtc); |
2309 | I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE); | 2836 | break; |
2310 | 2837 | } | |
2311 | /* Enable the plane */ | 2838 | } |
2312 | temp = I915_READ(dspcntr_reg); | 2839 | |
2313 | if ((temp & DISPLAY_PLANE_ENABLE) == 0) { | 2840 | static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) |
2314 | I915_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE); | 2841 | { |
2315 | /* Flush the plane changes */ | 2842 | if (!enable && intel_crtc->overlay) { |
2316 | I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); | 2843 | struct drm_device *dev = intel_crtc->base.dev; |
2317 | } | 2844 | struct drm_i915_private *dev_priv = dev->dev_private; |
2845 | |||
2846 | mutex_lock(&dev->struct_mutex); | ||
2847 | dev_priv->mm.interruptible = false; | ||
2848 | (void) intel_overlay_switch_off(intel_crtc->overlay); | ||
2849 | dev_priv->mm.interruptible = true; | ||
2850 | mutex_unlock(&dev->struct_mutex); | ||
2851 | } | ||
2318 | 2852 | ||
2319 | intel_crtc_load_lut(crtc); | 2853 | /* Let userspace switch the overlay on again. In most cases userspace |
2854 | * has to recompute where to put it anyway. | ||
2855 | */ | ||
2856 | } | ||
2320 | 2857 | ||
2321 | if ((IS_I965G(dev) || plane == 0)) | 2858 | static void i9xx_crtc_enable(struct drm_crtc *crtc) |
2322 | intel_update_fbc(crtc, &crtc->mode); | 2859 | { |
2860 | struct drm_device *dev = crtc->dev; | ||
2861 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2862 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
2863 | int pipe = intel_crtc->pipe; | ||
2864 | int plane = intel_crtc->plane; | ||
2323 | 2865 | ||
2324 | /* Give the overlay scaler a chance to enable if it's on this pipe */ | 2866 | if (intel_crtc->active) |
2325 | intel_crtc_dpms_overlay(intel_crtc, true); | 2867 | return; |
2326 | break; | ||
2327 | case DRM_MODE_DPMS_OFF: | ||
2328 | /* Give the overlay scaler a chance to disable if it's on this pipe */ | ||
2329 | intel_crtc_dpms_overlay(intel_crtc, false); | ||
2330 | drm_vblank_off(dev, pipe); | ||
2331 | |||
2332 | if (dev_priv->cfb_plane == plane && | ||
2333 | dev_priv->display.disable_fbc) | ||
2334 | dev_priv->display.disable_fbc(dev); | ||
2335 | |||
2336 | /* Disable display plane */ | ||
2337 | temp = I915_READ(dspcntr_reg); | ||
2338 | if ((temp & DISPLAY_PLANE_ENABLE) != 0) { | ||
2339 | I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE); | ||
2340 | /* Flush the plane changes */ | ||
2341 | I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); | ||
2342 | I915_READ(dspbase_reg); | ||
2343 | } | ||
2344 | 2868 | ||
2345 | /* Don't disable pipe A or pipe A PLLs if needed */ | 2869 | intel_crtc->active = true; |
2346 | if (pipeconf_reg == PIPEACONF && | 2870 | intel_update_watermarks(dev); |
2347 | (dev_priv->quirks & QUIRK_PIPEA_FORCE)) { | ||
2348 | /* Wait for vblank for the disable to take effect */ | ||
2349 | intel_wait_for_vblank(dev, pipe); | ||
2350 | goto skip_pipe_off; | ||
2351 | } | ||
2352 | 2871 | ||
2353 | /* Next, disable display pipes */ | 2872 | intel_enable_pll(dev_priv, pipe); |
2354 | temp = I915_READ(pipeconf_reg); | 2873 | intel_enable_pipe(dev_priv, pipe, false); |
2355 | if ((temp & PIPEACONF_ENABLE) != 0) { | 2874 | intel_enable_plane(dev_priv, plane, pipe); |
2356 | I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE); | ||
2357 | I915_READ(pipeconf_reg); | ||
2358 | } | ||
2359 | 2875 | ||
2360 | /* Wait for the pipe to turn off */ | 2876 | intel_crtc_load_lut(crtc); |
2361 | intel_wait_for_pipe_off(dev, pipe); | 2877 | intel_update_fbc(dev); |
2362 | 2878 | ||
2363 | temp = I915_READ(dpll_reg); | 2879 | /* Give the overlay scaler a chance to enable if it's on this pipe */ |
2364 | if ((temp & DPLL_VCO_ENABLE) != 0) { | 2880 | intel_crtc_dpms_overlay(intel_crtc, true); |
2365 | I915_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE); | 2881 | intel_crtc_update_cursor(crtc, true); |
2366 | I915_READ(dpll_reg); | 2882 | } |
2367 | } | 2883 | |
2368 | skip_pipe_off: | 2884 | static void i9xx_crtc_disable(struct drm_crtc *crtc) |
2369 | /* Wait for the clocks to turn off. */ | 2885 | { |
2370 | udelay(150); | 2886 | struct drm_device *dev = crtc->dev; |
2887 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2888 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
2889 | int pipe = intel_crtc->pipe; | ||
2890 | int plane = intel_crtc->plane; | ||
2891 | |||
2892 | if (!intel_crtc->active) | ||
2893 | return; | ||
2894 | |||
2895 | /* Give the overlay scaler a chance to disable if it's on this pipe */ | ||
2896 | intel_crtc_wait_for_pending_flips(crtc); | ||
2897 | drm_vblank_off(dev, pipe); | ||
2898 | intel_crtc_dpms_overlay(intel_crtc, false); | ||
2899 | intel_crtc_update_cursor(crtc, false); | ||
2900 | |||
2901 | if (dev_priv->cfb_plane == plane && | ||
2902 | dev_priv->display.disable_fbc) | ||
2903 | dev_priv->display.disable_fbc(dev); | ||
2904 | |||
2905 | intel_disable_plane(dev_priv, plane, pipe); | ||
2906 | intel_disable_pipe(dev_priv, pipe); | ||
2907 | intel_disable_pll(dev_priv, pipe); | ||
2908 | |||
2909 | intel_crtc->active = false; | ||
2910 | intel_update_fbc(dev); | ||
2911 | intel_update_watermarks(dev); | ||
2912 | intel_clear_scanline_wait(dev); | ||
2913 | } | ||
2914 | |||
2915 | static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) | ||
2916 | { | ||
2917 | /* XXX: When our outputs are all unaware of DPMS modes other than off | ||
2918 | * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. | ||
2919 | */ | ||
2920 | switch (mode) { | ||
2921 | case DRM_MODE_DPMS_ON: | ||
2922 | case DRM_MODE_DPMS_STANDBY: | ||
2923 | case DRM_MODE_DPMS_SUSPEND: | ||
2924 | i9xx_crtc_enable(crtc); | ||
2925 | break; | ||
2926 | case DRM_MODE_DPMS_OFF: | ||
2927 | i9xx_crtc_disable(crtc); | ||
2371 | break; | 2928 | break; |
2372 | } | 2929 | } |
2373 | } | 2930 | } |
@@ -2388,26 +2945,9 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
2388 | return; | 2945 | return; |
2389 | 2946 | ||
2390 | intel_crtc->dpms_mode = mode; | 2947 | intel_crtc->dpms_mode = mode; |
2391 | intel_crtc->cursor_on = mode == DRM_MODE_DPMS_ON; | ||
2392 | |||
2393 | /* When switching on the display, ensure that SR is disabled | ||
2394 | * with multiple pipes prior to enabling to new pipe. | ||
2395 | * | ||
2396 | * When switching off the display, make sure the cursor is | ||
2397 | * properly hidden prior to disabling the pipe. | ||
2398 | */ | ||
2399 | if (mode == DRM_MODE_DPMS_ON) | ||
2400 | intel_update_watermarks(dev); | ||
2401 | else | ||
2402 | intel_crtc_update_cursor(crtc); | ||
2403 | 2948 | ||
2404 | dev_priv->display.dpms(crtc, mode); | 2949 | dev_priv->display.dpms(crtc, mode); |
2405 | 2950 | ||
2406 | if (mode == DRM_MODE_DPMS_ON) | ||
2407 | intel_crtc_update_cursor(crtc); | ||
2408 | else | ||
2409 | intel_update_watermarks(dev); | ||
2410 | |||
2411 | if (!dev->primary->master) | 2951 | if (!dev->primary->master) |
2412 | return; | 2952 | return; |
2413 | 2953 | ||
@@ -2427,21 +2967,51 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
2427 | master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0; | 2967 | master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0; |
2428 | break; | 2968 | break; |
2429 | default: | 2969 | default: |
2430 | DRM_ERROR("Can't update pipe %d in SAREA\n", pipe); | 2970 | DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe)); |
2431 | break; | 2971 | break; |
2432 | } | 2972 | } |
2433 | } | 2973 | } |
2434 | 2974 | ||
2435 | static void intel_crtc_prepare (struct drm_crtc *crtc) | 2975 | static void intel_crtc_disable(struct drm_crtc *crtc) |
2436 | { | 2976 | { |
2437 | struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; | 2977 | struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; |
2978 | struct drm_device *dev = crtc->dev; | ||
2979 | |||
2438 | crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); | 2980 | crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); |
2981 | |||
2982 | if (crtc->fb) { | ||
2983 | mutex_lock(&dev->struct_mutex); | ||
2984 | i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); | ||
2985 | mutex_unlock(&dev->struct_mutex); | ||
2986 | } | ||
2987 | } | ||
2988 | |||
2989 | /* Prepare for a mode set. | ||
2990 | * | ||
2991 | * Note we could be a lot smarter here. We need to figure out which outputs | ||
2992 | * will be enabled, which disabled (in short, how the config will changes) | ||
2993 | * and perform the minimum necessary steps to accomplish that, e.g. updating | ||
2994 | * watermarks, FBC configuration, making sure PLLs are programmed correctly, | ||
2995 | * panel fitting is in the proper state, etc. | ||
2996 | */ | ||
2997 | static void i9xx_crtc_prepare(struct drm_crtc *crtc) | ||
2998 | { | ||
2999 | i9xx_crtc_disable(crtc); | ||
2439 | } | 3000 | } |
2440 | 3001 | ||
2441 | static void intel_crtc_commit (struct drm_crtc *crtc) | 3002 | static void i9xx_crtc_commit(struct drm_crtc *crtc) |
2442 | { | 3003 | { |
2443 | struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; | 3004 | i9xx_crtc_enable(crtc); |
2444 | crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); | 3005 | } |
3006 | |||
3007 | static void ironlake_crtc_prepare(struct drm_crtc *crtc) | ||
3008 | { | ||
3009 | ironlake_crtc_disable(crtc); | ||
3010 | } | ||
3011 | |||
3012 | static void ironlake_crtc_commit(struct drm_crtc *crtc) | ||
3013 | { | ||
3014 | ironlake_crtc_enable(crtc); | ||
2445 | } | 3015 | } |
2446 | 3016 | ||
2447 | void intel_encoder_prepare (struct drm_encoder *encoder) | 3017 | void intel_encoder_prepare (struct drm_encoder *encoder) |
@@ -2460,13 +3030,7 @@ void intel_encoder_commit (struct drm_encoder *encoder) | |||
2460 | 3030 | ||
2461 | void intel_encoder_destroy(struct drm_encoder *encoder) | 3031 | void intel_encoder_destroy(struct drm_encoder *encoder) |
2462 | { | 3032 | { |
2463 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 3033 | struct intel_encoder *intel_encoder = to_intel_encoder(encoder); |
2464 | |||
2465 | if (intel_encoder->ddc_bus) | ||
2466 | intel_i2c_destroy(intel_encoder->ddc_bus); | ||
2467 | |||
2468 | if (intel_encoder->i2c_bus) | ||
2469 | intel_i2c_destroy(intel_encoder->i2c_bus); | ||
2470 | 3034 | ||
2471 | drm_encoder_cleanup(encoder); | 3035 | drm_encoder_cleanup(encoder); |
2472 | kfree(intel_encoder); | 3036 | kfree(intel_encoder); |
@@ -2557,33 +3121,6 @@ static int i830_get_display_clock_speed(struct drm_device *dev) | |||
2557 | return 133000; | 3121 | return 133000; |
2558 | } | 3122 | } |
2559 | 3123 | ||
2560 | /** | ||
2561 | * Return the pipe currently connected to the panel fitter, | ||
2562 | * or -1 if the panel fitter is not present or not in use | ||
2563 | */ | ||
2564 | int intel_panel_fitter_pipe (struct drm_device *dev) | ||
2565 | { | ||
2566 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2567 | u32 pfit_control; | ||
2568 | |||
2569 | /* i830 doesn't have a panel fitter */ | ||
2570 | if (IS_I830(dev)) | ||
2571 | return -1; | ||
2572 | |||
2573 | pfit_control = I915_READ(PFIT_CONTROL); | ||
2574 | |||
2575 | /* See if the panel fitter is in use */ | ||
2576 | if ((pfit_control & PFIT_ENABLE) == 0) | ||
2577 | return -1; | ||
2578 | |||
2579 | /* 965 can place panel fitter on either pipe */ | ||
2580 | if (IS_I965G(dev)) | ||
2581 | return (pfit_control >> 29) & 0x3; | ||
2582 | |||
2583 | /* older chips can only use pipe 1 */ | ||
2584 | return 1; | ||
2585 | } | ||
2586 | |||
2587 | struct fdi_m_n { | 3124 | struct fdi_m_n { |
2588 | u32 tu; | 3125 | u32 tu; |
2589 | u32 gmch_m; | 3126 | u32 gmch_m; |
@@ -2601,27 +3138,19 @@ fdi_reduce_ratio(u32 *num, u32 *den) | |||
2601 | } | 3138 | } |
2602 | } | 3139 | } |
2603 | 3140 | ||
2604 | #define DATA_N 0x800000 | ||
2605 | #define LINK_N 0x80000 | ||
2606 | |||
2607 | static void | 3141 | static void |
2608 | ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock, | 3142 | ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock, |
2609 | int link_clock, struct fdi_m_n *m_n) | 3143 | int link_clock, struct fdi_m_n *m_n) |
2610 | { | 3144 | { |
2611 | u64 temp; | ||
2612 | |||
2613 | m_n->tu = 64; /* default size */ | 3145 | m_n->tu = 64; /* default size */ |
2614 | 3146 | ||
2615 | temp = (u64) DATA_N * pixel_clock; | 3147 | /* BUG_ON(pixel_clock > INT_MAX / 36); */ |
2616 | temp = div_u64(temp, link_clock); | 3148 | m_n->gmch_m = bits_per_pixel * pixel_clock; |
2617 | m_n->gmch_m = div_u64(temp * bits_per_pixel, nlanes); | 3149 | m_n->gmch_n = link_clock * nlanes * 8; |
2618 | m_n->gmch_m >>= 3; /* convert to bytes_per_pixel */ | ||
2619 | m_n->gmch_n = DATA_N; | ||
2620 | fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); | 3150 | fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); |
2621 | 3151 | ||
2622 | temp = (u64) LINK_N * pixel_clock; | 3152 | m_n->link_m = pixel_clock; |
2623 | m_n->link_m = div_u64(temp, link_clock); | 3153 | m_n->link_n = link_clock; |
2624 | m_n->link_n = LINK_N; | ||
2625 | fdi_reduce_ratio(&m_n->link_m, &m_n->link_n); | 3154 | fdi_reduce_ratio(&m_n->link_m, &m_n->link_n); |
2626 | } | 3155 | } |
2627 | 3156 | ||
@@ -2635,77 +3164,77 @@ struct intel_watermark_params { | |||
2635 | }; | 3164 | }; |
2636 | 3165 | ||
2637 | /* Pineview has different values for various configs */ | 3166 | /* Pineview has different values for various configs */ |
2638 | static struct intel_watermark_params pineview_display_wm = { | 3167 | static const struct intel_watermark_params pineview_display_wm = { |
2639 | PINEVIEW_DISPLAY_FIFO, | 3168 | PINEVIEW_DISPLAY_FIFO, |
2640 | PINEVIEW_MAX_WM, | 3169 | PINEVIEW_MAX_WM, |
2641 | PINEVIEW_DFT_WM, | 3170 | PINEVIEW_DFT_WM, |
2642 | PINEVIEW_GUARD_WM, | 3171 | PINEVIEW_GUARD_WM, |
2643 | PINEVIEW_FIFO_LINE_SIZE | 3172 | PINEVIEW_FIFO_LINE_SIZE |
2644 | }; | 3173 | }; |
2645 | static struct intel_watermark_params pineview_display_hplloff_wm = { | 3174 | static const struct intel_watermark_params pineview_display_hplloff_wm = { |
2646 | PINEVIEW_DISPLAY_FIFO, | 3175 | PINEVIEW_DISPLAY_FIFO, |
2647 | PINEVIEW_MAX_WM, | 3176 | PINEVIEW_MAX_WM, |
2648 | PINEVIEW_DFT_HPLLOFF_WM, | 3177 | PINEVIEW_DFT_HPLLOFF_WM, |
2649 | PINEVIEW_GUARD_WM, | 3178 | PINEVIEW_GUARD_WM, |
2650 | PINEVIEW_FIFO_LINE_SIZE | 3179 | PINEVIEW_FIFO_LINE_SIZE |
2651 | }; | 3180 | }; |
2652 | static struct intel_watermark_params pineview_cursor_wm = { | 3181 | static const struct intel_watermark_params pineview_cursor_wm = { |
2653 | PINEVIEW_CURSOR_FIFO, | 3182 | PINEVIEW_CURSOR_FIFO, |
2654 | PINEVIEW_CURSOR_MAX_WM, | 3183 | PINEVIEW_CURSOR_MAX_WM, |
2655 | PINEVIEW_CURSOR_DFT_WM, | 3184 | PINEVIEW_CURSOR_DFT_WM, |
2656 | PINEVIEW_CURSOR_GUARD_WM, | 3185 | PINEVIEW_CURSOR_GUARD_WM, |
2657 | PINEVIEW_FIFO_LINE_SIZE, | 3186 | PINEVIEW_FIFO_LINE_SIZE, |
2658 | }; | 3187 | }; |
2659 | static struct intel_watermark_params pineview_cursor_hplloff_wm = { | 3188 | static const struct intel_watermark_params pineview_cursor_hplloff_wm = { |
2660 | PINEVIEW_CURSOR_FIFO, | 3189 | PINEVIEW_CURSOR_FIFO, |
2661 | PINEVIEW_CURSOR_MAX_WM, | 3190 | PINEVIEW_CURSOR_MAX_WM, |
2662 | PINEVIEW_CURSOR_DFT_WM, | 3191 | PINEVIEW_CURSOR_DFT_WM, |
2663 | PINEVIEW_CURSOR_GUARD_WM, | 3192 | PINEVIEW_CURSOR_GUARD_WM, |
2664 | PINEVIEW_FIFO_LINE_SIZE | 3193 | PINEVIEW_FIFO_LINE_SIZE |
2665 | }; | 3194 | }; |
2666 | static struct intel_watermark_params g4x_wm_info = { | 3195 | static const struct intel_watermark_params g4x_wm_info = { |
2667 | G4X_FIFO_SIZE, | 3196 | G4X_FIFO_SIZE, |
2668 | G4X_MAX_WM, | 3197 | G4X_MAX_WM, |
2669 | G4X_MAX_WM, | 3198 | G4X_MAX_WM, |
2670 | 2, | 3199 | 2, |
2671 | G4X_FIFO_LINE_SIZE, | 3200 | G4X_FIFO_LINE_SIZE, |
2672 | }; | 3201 | }; |
2673 | static struct intel_watermark_params g4x_cursor_wm_info = { | 3202 | static const struct intel_watermark_params g4x_cursor_wm_info = { |
2674 | I965_CURSOR_FIFO, | 3203 | I965_CURSOR_FIFO, |
2675 | I965_CURSOR_MAX_WM, | 3204 | I965_CURSOR_MAX_WM, |
2676 | I965_CURSOR_DFT_WM, | 3205 | I965_CURSOR_DFT_WM, |
2677 | 2, | 3206 | 2, |
2678 | G4X_FIFO_LINE_SIZE, | 3207 | G4X_FIFO_LINE_SIZE, |
2679 | }; | 3208 | }; |
2680 | static struct intel_watermark_params i965_cursor_wm_info = { | 3209 | static const struct intel_watermark_params i965_cursor_wm_info = { |
2681 | I965_CURSOR_FIFO, | 3210 | I965_CURSOR_FIFO, |
2682 | I965_CURSOR_MAX_WM, | 3211 | I965_CURSOR_MAX_WM, |
2683 | I965_CURSOR_DFT_WM, | 3212 | I965_CURSOR_DFT_WM, |
2684 | 2, | 3213 | 2, |
2685 | I915_FIFO_LINE_SIZE, | 3214 | I915_FIFO_LINE_SIZE, |
2686 | }; | 3215 | }; |
2687 | static struct intel_watermark_params i945_wm_info = { | 3216 | static const struct intel_watermark_params i945_wm_info = { |
2688 | I945_FIFO_SIZE, | 3217 | I945_FIFO_SIZE, |
2689 | I915_MAX_WM, | 3218 | I915_MAX_WM, |
2690 | 1, | 3219 | 1, |
2691 | 2, | 3220 | 2, |
2692 | I915_FIFO_LINE_SIZE | 3221 | I915_FIFO_LINE_SIZE |
2693 | }; | 3222 | }; |
2694 | static struct intel_watermark_params i915_wm_info = { | 3223 | static const struct intel_watermark_params i915_wm_info = { |
2695 | I915_FIFO_SIZE, | 3224 | I915_FIFO_SIZE, |
2696 | I915_MAX_WM, | 3225 | I915_MAX_WM, |
2697 | 1, | 3226 | 1, |
2698 | 2, | 3227 | 2, |
2699 | I915_FIFO_LINE_SIZE | 3228 | I915_FIFO_LINE_SIZE |
2700 | }; | 3229 | }; |
2701 | static struct intel_watermark_params i855_wm_info = { | 3230 | static const struct intel_watermark_params i855_wm_info = { |
2702 | I855GM_FIFO_SIZE, | 3231 | I855GM_FIFO_SIZE, |
2703 | I915_MAX_WM, | 3232 | I915_MAX_WM, |
2704 | 1, | 3233 | 1, |
2705 | 2, | 3234 | 2, |
2706 | I830_FIFO_LINE_SIZE | 3235 | I830_FIFO_LINE_SIZE |
2707 | }; | 3236 | }; |
2708 | static struct intel_watermark_params i830_wm_info = { | 3237 | static const struct intel_watermark_params i830_wm_info = { |
2709 | I830_FIFO_SIZE, | 3238 | I830_FIFO_SIZE, |
2710 | I915_MAX_WM, | 3239 | I915_MAX_WM, |
2711 | 1, | 3240 | 1, |
@@ -2713,31 +3242,28 @@ static struct intel_watermark_params i830_wm_info = { | |||
2713 | I830_FIFO_LINE_SIZE | 3242 | I830_FIFO_LINE_SIZE |
2714 | }; | 3243 | }; |
2715 | 3244 | ||
2716 | static struct intel_watermark_params ironlake_display_wm_info = { | 3245 | static const struct intel_watermark_params ironlake_display_wm_info = { |
2717 | ILK_DISPLAY_FIFO, | 3246 | ILK_DISPLAY_FIFO, |
2718 | ILK_DISPLAY_MAXWM, | 3247 | ILK_DISPLAY_MAXWM, |
2719 | ILK_DISPLAY_DFTWM, | 3248 | ILK_DISPLAY_DFTWM, |
2720 | 2, | 3249 | 2, |
2721 | ILK_FIFO_LINE_SIZE | 3250 | ILK_FIFO_LINE_SIZE |
2722 | }; | 3251 | }; |
2723 | 3252 | static const struct intel_watermark_params ironlake_cursor_wm_info = { | |
2724 | static struct intel_watermark_params ironlake_cursor_wm_info = { | ||
2725 | ILK_CURSOR_FIFO, | 3253 | ILK_CURSOR_FIFO, |
2726 | ILK_CURSOR_MAXWM, | 3254 | ILK_CURSOR_MAXWM, |
2727 | ILK_CURSOR_DFTWM, | 3255 | ILK_CURSOR_DFTWM, |
2728 | 2, | 3256 | 2, |
2729 | ILK_FIFO_LINE_SIZE | 3257 | ILK_FIFO_LINE_SIZE |
2730 | }; | 3258 | }; |
2731 | 3259 | static const struct intel_watermark_params ironlake_display_srwm_info = { | |
2732 | static struct intel_watermark_params ironlake_display_srwm_info = { | ||
2733 | ILK_DISPLAY_SR_FIFO, | 3260 | ILK_DISPLAY_SR_FIFO, |
2734 | ILK_DISPLAY_MAX_SRWM, | 3261 | ILK_DISPLAY_MAX_SRWM, |
2735 | ILK_DISPLAY_DFT_SRWM, | 3262 | ILK_DISPLAY_DFT_SRWM, |
2736 | 2, | 3263 | 2, |
2737 | ILK_FIFO_LINE_SIZE | 3264 | ILK_FIFO_LINE_SIZE |
2738 | }; | 3265 | }; |
2739 | 3266 | static const struct intel_watermark_params ironlake_cursor_srwm_info = { | |
2740 | static struct intel_watermark_params ironlake_cursor_srwm_info = { | ||
2741 | ILK_CURSOR_SR_FIFO, | 3267 | ILK_CURSOR_SR_FIFO, |
2742 | ILK_CURSOR_MAX_SRWM, | 3268 | ILK_CURSOR_MAX_SRWM, |
2743 | ILK_CURSOR_DFT_SRWM, | 3269 | ILK_CURSOR_DFT_SRWM, |
@@ -2745,6 +3271,36 @@ static struct intel_watermark_params ironlake_cursor_srwm_info = { | |||
2745 | ILK_FIFO_LINE_SIZE | 3271 | ILK_FIFO_LINE_SIZE |
2746 | }; | 3272 | }; |
2747 | 3273 | ||
3274 | static const struct intel_watermark_params sandybridge_display_wm_info = { | ||
3275 | SNB_DISPLAY_FIFO, | ||
3276 | SNB_DISPLAY_MAXWM, | ||
3277 | SNB_DISPLAY_DFTWM, | ||
3278 | 2, | ||
3279 | SNB_FIFO_LINE_SIZE | ||
3280 | }; | ||
3281 | static const struct intel_watermark_params sandybridge_cursor_wm_info = { | ||
3282 | SNB_CURSOR_FIFO, | ||
3283 | SNB_CURSOR_MAXWM, | ||
3284 | SNB_CURSOR_DFTWM, | ||
3285 | 2, | ||
3286 | SNB_FIFO_LINE_SIZE | ||
3287 | }; | ||
3288 | static const struct intel_watermark_params sandybridge_display_srwm_info = { | ||
3289 | SNB_DISPLAY_SR_FIFO, | ||
3290 | SNB_DISPLAY_MAX_SRWM, | ||
3291 | SNB_DISPLAY_DFT_SRWM, | ||
3292 | 2, | ||
3293 | SNB_FIFO_LINE_SIZE | ||
3294 | }; | ||
3295 | static const struct intel_watermark_params sandybridge_cursor_srwm_info = { | ||
3296 | SNB_CURSOR_SR_FIFO, | ||
3297 | SNB_CURSOR_MAX_SRWM, | ||
3298 | SNB_CURSOR_DFT_SRWM, | ||
3299 | 2, | ||
3300 | SNB_FIFO_LINE_SIZE | ||
3301 | }; | ||
3302 | |||
3303 | |||
2748 | /** | 3304 | /** |
2749 | * intel_calculate_wm - calculate watermark level | 3305 | * intel_calculate_wm - calculate watermark level |
2750 | * @clock_in_khz: pixel clock | 3306 | * @clock_in_khz: pixel clock |
@@ -2764,7 +3320,8 @@ static struct intel_watermark_params ironlake_cursor_srwm_info = { | |||
2764 | * will occur, and a display engine hang could result. | 3320 | * will occur, and a display engine hang could result. |
2765 | */ | 3321 | */ |
2766 | static unsigned long intel_calculate_wm(unsigned long clock_in_khz, | 3322 | static unsigned long intel_calculate_wm(unsigned long clock_in_khz, |
2767 | struct intel_watermark_params *wm, | 3323 | const struct intel_watermark_params *wm, |
3324 | int fifo_size, | ||
2768 | int pixel_size, | 3325 | int pixel_size, |
2769 | unsigned long latency_ns) | 3326 | unsigned long latency_ns) |
2770 | { | 3327 | { |
@@ -2780,11 +3337,11 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz, | |||
2780 | 1000; | 3337 | 1000; |
2781 | entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size); | 3338 | entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size); |
2782 | 3339 | ||
2783 | DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries_required); | 3340 | DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required); |
2784 | 3341 | ||
2785 | wm_size = wm->fifo_size - (entries_required + wm->guard_size); | 3342 | wm_size = fifo_size - (entries_required + wm->guard_size); |
2786 | 3343 | ||
2787 | DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size); | 3344 | DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size); |
2788 | 3345 | ||
2789 | /* Don't promote wm_size to unsigned... */ | 3346 | /* Don't promote wm_size to unsigned... */ |
2790 | if (wm_size > (long)wm->max_wm) | 3347 | if (wm_size > (long)wm->max_wm) |
@@ -2902,7 +3459,7 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane) | |||
2902 | size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size; | 3459 | size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size; |
2903 | 3460 | ||
2904 | DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, | 3461 | DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, |
2905 | plane ? "B" : "A", size); | 3462 | plane ? "B" : "A", size); |
2906 | 3463 | ||
2907 | return size; | 3464 | return size; |
2908 | } | 3465 | } |
@@ -2919,7 +3476,7 @@ static int i85x_get_fifo_size(struct drm_device *dev, int plane) | |||
2919 | size >>= 1; /* Convert to cachelines */ | 3476 | size >>= 1; /* Convert to cachelines */ |
2920 | 3477 | ||
2921 | DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, | 3478 | DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, |
2922 | plane ? "B" : "A", size); | 3479 | plane ? "B" : "A", size); |
2923 | 3480 | ||
2924 | return size; | 3481 | return size; |
2925 | } | 3482 | } |
@@ -2934,8 +3491,8 @@ static int i845_get_fifo_size(struct drm_device *dev, int plane) | |||
2934 | size >>= 2; /* Convert to cachelines */ | 3491 | size >>= 2; /* Convert to cachelines */ |
2935 | 3492 | ||
2936 | DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, | 3493 | DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, |
2937 | plane ? "B" : "A", | 3494 | plane ? "B" : "A", |
2938 | size); | 3495 | size); |
2939 | 3496 | ||
2940 | return size; | 3497 | return size; |
2941 | } | 3498 | } |
@@ -2950,20 +3507,33 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane) | |||
2950 | size >>= 1; /* Convert to cachelines */ | 3507 | size >>= 1; /* Convert to cachelines */ |
2951 | 3508 | ||
2952 | DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, | 3509 | DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, |
2953 | plane ? "B" : "A", size); | 3510 | plane ? "B" : "A", size); |
2954 | 3511 | ||
2955 | return size; | 3512 | return size; |
2956 | } | 3513 | } |
2957 | 3514 | ||
2958 | static void pineview_update_wm(struct drm_device *dev, int planea_clock, | 3515 | static struct drm_crtc *single_enabled_crtc(struct drm_device *dev) |
2959 | int planeb_clock, int sr_hdisplay, int unused, | 3516 | { |
2960 | int pixel_size) | 3517 | struct drm_crtc *crtc, *enabled = NULL; |
3518 | |||
3519 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
3520 | if (crtc->enabled && crtc->fb) { | ||
3521 | if (enabled) | ||
3522 | return NULL; | ||
3523 | enabled = crtc; | ||
3524 | } | ||
3525 | } | ||
3526 | |||
3527 | return enabled; | ||
3528 | } | ||
3529 | |||
3530 | static void pineview_update_wm(struct drm_device *dev) | ||
2961 | { | 3531 | { |
2962 | struct drm_i915_private *dev_priv = dev->dev_private; | 3532 | struct drm_i915_private *dev_priv = dev->dev_private; |
3533 | struct drm_crtc *crtc; | ||
2963 | const struct cxsr_latency *latency; | 3534 | const struct cxsr_latency *latency; |
2964 | u32 reg; | 3535 | u32 reg; |
2965 | unsigned long wm; | 3536 | unsigned long wm; |
2966 | int sr_clock; | ||
2967 | 3537 | ||
2968 | latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3, | 3538 | latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3, |
2969 | dev_priv->fsb_freq, dev_priv->mem_freq); | 3539 | dev_priv->fsb_freq, dev_priv->mem_freq); |
@@ -2973,11 +3543,14 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock, | |||
2973 | return; | 3543 | return; |
2974 | } | 3544 | } |
2975 | 3545 | ||
2976 | if (!planea_clock || !planeb_clock) { | 3546 | crtc = single_enabled_crtc(dev); |
2977 | sr_clock = planea_clock ? planea_clock : planeb_clock; | 3547 | if (crtc) { |
3548 | int clock = crtc->mode.clock; | ||
3549 | int pixel_size = crtc->fb->bits_per_pixel / 8; | ||
2978 | 3550 | ||
2979 | /* Display SR */ | 3551 | /* Display SR */ |
2980 | wm = intel_calculate_wm(sr_clock, &pineview_display_wm, | 3552 | wm = intel_calculate_wm(clock, &pineview_display_wm, |
3553 | pineview_display_wm.fifo_size, | ||
2981 | pixel_size, latency->display_sr); | 3554 | pixel_size, latency->display_sr); |
2982 | reg = I915_READ(DSPFW1); | 3555 | reg = I915_READ(DSPFW1); |
2983 | reg &= ~DSPFW_SR_MASK; | 3556 | reg &= ~DSPFW_SR_MASK; |
@@ -2986,7 +3559,8 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock, | |||
2986 | DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg); | 3559 | DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg); |
2987 | 3560 | ||
2988 | /* cursor SR */ | 3561 | /* cursor SR */ |
2989 | wm = intel_calculate_wm(sr_clock, &pineview_cursor_wm, | 3562 | wm = intel_calculate_wm(clock, &pineview_cursor_wm, |
3563 | pineview_display_wm.fifo_size, | ||
2990 | pixel_size, latency->cursor_sr); | 3564 | pixel_size, latency->cursor_sr); |
2991 | reg = I915_READ(DSPFW3); | 3565 | reg = I915_READ(DSPFW3); |
2992 | reg &= ~DSPFW_CURSOR_SR_MASK; | 3566 | reg &= ~DSPFW_CURSOR_SR_MASK; |
@@ -2994,7 +3568,8 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock, | |||
2994 | I915_WRITE(DSPFW3, reg); | 3568 | I915_WRITE(DSPFW3, reg); |
2995 | 3569 | ||
2996 | /* Display HPLL off SR */ | 3570 | /* Display HPLL off SR */ |
2997 | wm = intel_calculate_wm(sr_clock, &pineview_display_hplloff_wm, | 3571 | wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm, |
3572 | pineview_display_hplloff_wm.fifo_size, | ||
2998 | pixel_size, latency->display_hpll_disable); | 3573 | pixel_size, latency->display_hpll_disable); |
2999 | reg = I915_READ(DSPFW3); | 3574 | reg = I915_READ(DSPFW3); |
3000 | reg &= ~DSPFW_HPLL_SR_MASK; | 3575 | reg &= ~DSPFW_HPLL_SR_MASK; |
@@ -3002,7 +3577,8 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock, | |||
3002 | I915_WRITE(DSPFW3, reg); | 3577 | I915_WRITE(DSPFW3, reg); |
3003 | 3578 | ||
3004 | /* cursor HPLL off SR */ | 3579 | /* cursor HPLL off SR */ |
3005 | wm = intel_calculate_wm(sr_clock, &pineview_cursor_hplloff_wm, | 3580 | wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, |
3581 | pineview_display_hplloff_wm.fifo_size, | ||
3006 | pixel_size, latency->cursor_hpll_disable); | 3582 | pixel_size, latency->cursor_hpll_disable); |
3007 | reg = I915_READ(DSPFW3); | 3583 | reg = I915_READ(DSPFW3); |
3008 | reg &= ~DSPFW_HPLL_CURSOR_MASK; | 3584 | reg &= ~DSPFW_HPLL_CURSOR_MASK; |
@@ -3020,125 +3596,229 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock, | |||
3020 | } | 3596 | } |
3021 | } | 3597 | } |
3022 | 3598 | ||
3023 | static void g4x_update_wm(struct drm_device *dev, int planea_clock, | 3599 | static bool g4x_compute_wm0(struct drm_device *dev, |
3024 | int planeb_clock, int sr_hdisplay, int sr_htotal, | 3600 | int plane, |
3025 | int pixel_size) | 3601 | const struct intel_watermark_params *display, |
3602 | int display_latency_ns, | ||
3603 | const struct intel_watermark_params *cursor, | ||
3604 | int cursor_latency_ns, | ||
3605 | int *plane_wm, | ||
3606 | int *cursor_wm) | ||
3026 | { | 3607 | { |
3027 | struct drm_i915_private *dev_priv = dev->dev_private; | 3608 | struct drm_crtc *crtc; |
3028 | int total_size, cacheline_size; | 3609 | int htotal, hdisplay, clock, pixel_size; |
3029 | int planea_wm, planeb_wm, cursora_wm, cursorb_wm, cursor_sr; | 3610 | int line_time_us, line_count; |
3030 | struct intel_watermark_params planea_params, planeb_params; | 3611 | int entries, tlb_miss; |
3031 | unsigned long line_time_us; | 3612 | |
3032 | int sr_clock, sr_entries = 0, entries_required; | 3613 | crtc = intel_get_crtc_for_plane(dev, plane); |
3033 | 3614 | if (crtc->fb == NULL || !crtc->enabled) { | |
3034 | /* Create copies of the base settings for each pipe */ | 3615 | *cursor_wm = cursor->guard_size; |
3035 | planea_params = planeb_params = g4x_wm_info; | 3616 | *plane_wm = display->guard_size; |
3617 | return false; | ||
3618 | } | ||
3036 | 3619 | ||
3037 | /* Grab a couple of global values before we overwrite them */ | 3620 | htotal = crtc->mode.htotal; |
3038 | total_size = planea_params.fifo_size; | 3621 | hdisplay = crtc->mode.hdisplay; |
3039 | cacheline_size = planea_params.cacheline_size; | 3622 | clock = crtc->mode.clock; |
3623 | pixel_size = crtc->fb->bits_per_pixel / 8; | ||
3624 | |||
3625 | /* Use the small buffer method to calculate plane watermark */ | ||
3626 | entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000; | ||
3627 | tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8; | ||
3628 | if (tlb_miss > 0) | ||
3629 | entries += tlb_miss; | ||
3630 | entries = DIV_ROUND_UP(entries, display->cacheline_size); | ||
3631 | *plane_wm = entries + display->guard_size; | ||
3632 | if (*plane_wm > (int)display->max_wm) | ||
3633 | *plane_wm = display->max_wm; | ||
3634 | |||
3635 | /* Use the large buffer method to calculate cursor watermark */ | ||
3636 | line_time_us = ((htotal * 1000) / clock); | ||
3637 | line_count = (cursor_latency_ns / line_time_us + 1000) / 1000; | ||
3638 | entries = line_count * 64 * pixel_size; | ||
3639 | tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8; | ||
3640 | if (tlb_miss > 0) | ||
3641 | entries += tlb_miss; | ||
3642 | entries = DIV_ROUND_UP(entries, cursor->cacheline_size); | ||
3643 | *cursor_wm = entries + cursor->guard_size; | ||
3644 | if (*cursor_wm > (int)cursor->max_wm) | ||
3645 | *cursor_wm = (int)cursor->max_wm; | ||
3040 | 3646 | ||
3041 | /* | 3647 | return true; |
3042 | * Note: we need to make sure we don't overflow for various clock & | 3648 | } |
3043 | * latency values. | ||
3044 | * clocks go from a few thousand to several hundred thousand. | ||
3045 | * latency is usually a few thousand | ||
3046 | */ | ||
3047 | entries_required = ((planea_clock / 1000) * pixel_size * latency_ns) / | ||
3048 | 1000; | ||
3049 | entries_required = DIV_ROUND_UP(entries_required, G4X_FIFO_LINE_SIZE); | ||
3050 | planea_wm = entries_required + planea_params.guard_size; | ||
3051 | 3649 | ||
3052 | entries_required = ((planeb_clock / 1000) * pixel_size * latency_ns) / | 3650 | /* |
3053 | 1000; | 3651 | * Check the wm result. |
3054 | entries_required = DIV_ROUND_UP(entries_required, G4X_FIFO_LINE_SIZE); | 3652 | * |
3055 | planeb_wm = entries_required + planeb_params.guard_size; | 3653 | * If any calculated watermark values is larger than the maximum value that |
3654 | * can be programmed into the associated watermark register, that watermark | ||
3655 | * must be disabled. | ||
3656 | */ | ||
3657 | static bool g4x_check_srwm(struct drm_device *dev, | ||
3658 | int display_wm, int cursor_wm, | ||
3659 | const struct intel_watermark_params *display, | ||
3660 | const struct intel_watermark_params *cursor) | ||
3661 | { | ||
3662 | DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n", | ||
3663 | display_wm, cursor_wm); | ||
3056 | 3664 | ||
3057 | cursora_wm = cursorb_wm = 16; | 3665 | if (display_wm > display->max_wm) { |
3058 | cursor_sr = 32; | 3666 | DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n", |
3667 | display_wm, display->max_wm); | ||
3668 | return false; | ||
3669 | } | ||
3059 | 3670 | ||
3060 | DRM_DEBUG("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); | 3671 | if (cursor_wm > cursor->max_wm) { |
3672 | DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n", | ||
3673 | cursor_wm, cursor->max_wm); | ||
3674 | return false; | ||
3675 | } | ||
3061 | 3676 | ||
3062 | /* Calc sr entries for one plane configs */ | 3677 | if (!(display_wm || cursor_wm)) { |
3063 | if (sr_hdisplay && (!planea_clock || !planeb_clock)) { | 3678 | DRM_DEBUG_KMS("SR latency is 0, disabling\n"); |
3064 | /* self-refresh has much higher latency */ | 3679 | return false; |
3065 | static const int sr_latency_ns = 12000; | 3680 | } |
3066 | 3681 | ||
3067 | sr_clock = planea_clock ? planea_clock : planeb_clock; | 3682 | return true; |
3068 | line_time_us = ((sr_htotal * 1000) / sr_clock); | 3683 | } |
3069 | 3684 | ||
3070 | /* Use ns/us then divide to preserve precision */ | 3685 | static bool g4x_compute_srwm(struct drm_device *dev, |
3071 | sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * | 3686 | int plane, |
3072 | pixel_size * sr_hdisplay; | 3687 | int latency_ns, |
3073 | sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size); | 3688 | const struct intel_watermark_params *display, |
3074 | 3689 | const struct intel_watermark_params *cursor, | |
3075 | entries_required = (((sr_latency_ns / line_time_us) + | 3690 | int *display_wm, int *cursor_wm) |
3076 | 1000) / 1000) * pixel_size * 64; | 3691 | { |
3077 | entries_required = DIV_ROUND_UP(entries_required, | 3692 | struct drm_crtc *crtc; |
3078 | g4x_cursor_wm_info.cacheline_size); | 3693 | int hdisplay, htotal, pixel_size, clock; |
3079 | cursor_sr = entries_required + g4x_cursor_wm_info.guard_size; | 3694 | unsigned long line_time_us; |
3080 | 3695 | int line_count, line_size; | |
3081 | if (cursor_sr > g4x_cursor_wm_info.max_wm) | 3696 | int small, large; |
3082 | cursor_sr = g4x_cursor_wm_info.max_wm; | 3697 | int entries; |
3083 | DRM_DEBUG_KMS("self-refresh watermark: display plane %d " | ||
3084 | "cursor %d\n", sr_entries, cursor_sr); | ||
3085 | 3698 | ||
3086 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); | 3699 | if (!latency_ns) { |
3087 | } else { | 3700 | *display_wm = *cursor_wm = 0; |
3088 | /* Turn off self refresh if both pipes are enabled */ | 3701 | return false; |
3089 | I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) | ||
3090 | & ~FW_BLC_SELF_EN); | ||
3091 | } | 3702 | } |
3092 | 3703 | ||
3093 | DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n", | 3704 | crtc = intel_get_crtc_for_plane(dev, plane); |
3094 | planea_wm, planeb_wm, sr_entries); | 3705 | hdisplay = crtc->mode.hdisplay; |
3706 | htotal = crtc->mode.htotal; | ||
3707 | clock = crtc->mode.clock; | ||
3708 | pixel_size = crtc->fb->bits_per_pixel / 8; | ||
3709 | |||
3710 | line_time_us = (htotal * 1000) / clock; | ||
3711 | line_count = (latency_ns / line_time_us + 1000) / 1000; | ||
3712 | line_size = hdisplay * pixel_size; | ||
3095 | 3713 | ||
3096 | planea_wm &= 0x3f; | 3714 | /* Use the minimum of the small and large buffer method for primary */ |
3097 | planeb_wm &= 0x3f; | 3715 | small = ((clock * pixel_size / 1000) * latency_ns) / 1000; |
3716 | large = line_count * line_size; | ||
3717 | |||
3718 | entries = DIV_ROUND_UP(min(small, large), display->cacheline_size); | ||
3719 | *display_wm = entries + display->guard_size; | ||
3720 | |||
3721 | /* calculate the self-refresh watermark for display cursor */ | ||
3722 | entries = line_count * pixel_size * 64; | ||
3723 | entries = DIV_ROUND_UP(entries, cursor->cacheline_size); | ||
3724 | *cursor_wm = entries + cursor->guard_size; | ||
3725 | |||
3726 | return g4x_check_srwm(dev, | ||
3727 | *display_wm, *cursor_wm, | ||
3728 | display, cursor); | ||
3729 | } | ||
3730 | |||
3731 | #define single_plane_enabled(mask) is_power_of_2(mask) | ||
3732 | |||
3733 | static void g4x_update_wm(struct drm_device *dev) | ||
3734 | { | ||
3735 | static const int sr_latency_ns = 12000; | ||
3736 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3737 | int planea_wm, planeb_wm, cursora_wm, cursorb_wm; | ||
3738 | int plane_sr, cursor_sr; | ||
3739 | unsigned int enabled = 0; | ||
3740 | |||
3741 | if (g4x_compute_wm0(dev, 0, | ||
3742 | &g4x_wm_info, latency_ns, | ||
3743 | &g4x_cursor_wm_info, latency_ns, | ||
3744 | &planea_wm, &cursora_wm)) | ||
3745 | enabled |= 1; | ||
3746 | |||
3747 | if (g4x_compute_wm0(dev, 1, | ||
3748 | &g4x_wm_info, latency_ns, | ||
3749 | &g4x_cursor_wm_info, latency_ns, | ||
3750 | &planeb_wm, &cursorb_wm)) | ||
3751 | enabled |= 2; | ||
3752 | |||
3753 | plane_sr = cursor_sr = 0; | ||
3754 | if (single_plane_enabled(enabled) && | ||
3755 | g4x_compute_srwm(dev, ffs(enabled) - 1, | ||
3756 | sr_latency_ns, | ||
3757 | &g4x_wm_info, | ||
3758 | &g4x_cursor_wm_info, | ||
3759 | &plane_sr, &cursor_sr)) | ||
3760 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); | ||
3761 | else | ||
3762 | I915_WRITE(FW_BLC_SELF, | ||
3763 | I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN); | ||
3098 | 3764 | ||
3099 | I915_WRITE(DSPFW1, (sr_entries << DSPFW_SR_SHIFT) | | 3765 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", |
3766 | planea_wm, cursora_wm, | ||
3767 | planeb_wm, cursorb_wm, | ||
3768 | plane_sr, cursor_sr); | ||
3769 | |||
3770 | I915_WRITE(DSPFW1, | ||
3771 | (plane_sr << DSPFW_SR_SHIFT) | | ||
3100 | (cursorb_wm << DSPFW_CURSORB_SHIFT) | | 3772 | (cursorb_wm << DSPFW_CURSORB_SHIFT) | |
3101 | (planeb_wm << DSPFW_PLANEB_SHIFT) | planea_wm); | 3773 | (planeb_wm << DSPFW_PLANEB_SHIFT) | |
3102 | I915_WRITE(DSPFW2, (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) | | 3774 | planea_wm); |
3775 | I915_WRITE(DSPFW2, | ||
3776 | (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) | | ||
3103 | (cursora_wm << DSPFW_CURSORA_SHIFT)); | 3777 | (cursora_wm << DSPFW_CURSORA_SHIFT)); |
3104 | /* HPLL off in SR has some issues on G4x... disable it */ | 3778 | /* HPLL off in SR has some issues on G4x... disable it */ |
3105 | I915_WRITE(DSPFW3, (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) | | 3779 | I915_WRITE(DSPFW3, |
3780 | (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) | | ||
3106 | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); | 3781 | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); |
3107 | } | 3782 | } |
3108 | 3783 | ||
3109 | static void i965_update_wm(struct drm_device *dev, int planea_clock, | 3784 | static void i965_update_wm(struct drm_device *dev) |
3110 | int planeb_clock, int sr_hdisplay, int sr_htotal, | ||
3111 | int pixel_size) | ||
3112 | { | 3785 | { |
3113 | struct drm_i915_private *dev_priv = dev->dev_private; | 3786 | struct drm_i915_private *dev_priv = dev->dev_private; |
3114 | unsigned long line_time_us; | 3787 | struct drm_crtc *crtc; |
3115 | int sr_clock, sr_entries, srwm = 1; | 3788 | int srwm = 1; |
3116 | int cursor_sr = 16; | 3789 | int cursor_sr = 16; |
3117 | 3790 | ||
3118 | /* Calc sr entries for one plane configs */ | 3791 | /* Calc sr entries for one plane configs */ |
3119 | if (sr_hdisplay && (!planea_clock || !planeb_clock)) { | 3792 | crtc = single_enabled_crtc(dev); |
3793 | if (crtc) { | ||
3120 | /* self-refresh has much higher latency */ | 3794 | /* self-refresh has much higher latency */ |
3121 | static const int sr_latency_ns = 12000; | 3795 | static const int sr_latency_ns = 12000; |
3796 | int clock = crtc->mode.clock; | ||
3797 | int htotal = crtc->mode.htotal; | ||
3798 | int hdisplay = crtc->mode.hdisplay; | ||
3799 | int pixel_size = crtc->fb->bits_per_pixel / 8; | ||
3800 | unsigned long line_time_us; | ||
3801 | int entries; | ||
3122 | 3802 | ||
3123 | sr_clock = planea_clock ? planea_clock : planeb_clock; | 3803 | line_time_us = ((htotal * 1000) / clock); |
3124 | line_time_us = ((sr_htotal * 1000) / sr_clock); | ||
3125 | 3804 | ||
3126 | /* Use ns/us then divide to preserve precision */ | 3805 | /* Use ns/us then divide to preserve precision */ |
3127 | sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * | 3806 | entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * |
3128 | pixel_size * sr_hdisplay; | 3807 | pixel_size * hdisplay; |
3129 | sr_entries = DIV_ROUND_UP(sr_entries, I915_FIFO_LINE_SIZE); | 3808 | entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE); |
3130 | DRM_DEBUG("self-refresh entries: %d\n", sr_entries); | 3809 | srwm = I965_FIFO_SIZE - entries; |
3131 | srwm = I965_FIFO_SIZE - sr_entries; | ||
3132 | if (srwm < 0) | 3810 | if (srwm < 0) |
3133 | srwm = 1; | 3811 | srwm = 1; |
3134 | srwm &= 0x1ff; | 3812 | srwm &= 0x1ff; |
3813 | DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n", | ||
3814 | entries, srwm); | ||
3135 | 3815 | ||
3136 | sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * | 3816 | entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * |
3137 | pixel_size * 64; | 3817 | pixel_size * 64; |
3138 | sr_entries = DIV_ROUND_UP(sr_entries, | 3818 | entries = DIV_ROUND_UP(entries, |
3139 | i965_cursor_wm_info.cacheline_size); | 3819 | i965_cursor_wm_info.cacheline_size); |
3140 | cursor_sr = i965_cursor_wm_info.fifo_size - | 3820 | cursor_sr = i965_cursor_wm_info.fifo_size - |
3141 | (sr_entries + i965_cursor_wm_info.guard_size); | 3821 | (entries + i965_cursor_wm_info.guard_size); |
3142 | 3822 | ||
3143 | if (cursor_sr > i965_cursor_wm_info.max_wm) | 3823 | if (cursor_sr > i965_cursor_wm_info.max_wm) |
3144 | cursor_sr = i965_cursor_wm_info.max_wm; | 3824 | cursor_sr = i965_cursor_wm_info.max_wm; |
@@ -3146,11 +3826,11 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock, | |||
3146 | DRM_DEBUG_KMS("self-refresh watermark: display plane %d " | 3826 | DRM_DEBUG_KMS("self-refresh watermark: display plane %d " |
3147 | "cursor %d\n", srwm, cursor_sr); | 3827 | "cursor %d\n", srwm, cursor_sr); |
3148 | 3828 | ||
3149 | if (IS_I965GM(dev)) | 3829 | if (IS_CRESTLINE(dev)) |
3150 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); | 3830 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); |
3151 | } else { | 3831 | } else { |
3152 | /* Turn off self refresh if both pipes are enabled */ | 3832 | /* Turn off self refresh if both pipes are enabled */ |
3153 | if (IS_I965GM(dev)) | 3833 | if (IS_CRESTLINE(dev)) |
3154 | I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) | 3834 | I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) |
3155 | & ~FW_BLC_SELF_EN); | 3835 | & ~FW_BLC_SELF_EN); |
3156 | } | 3836 | } |
@@ -3159,46 +3839,56 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock, | |||
3159 | srwm); | 3839 | srwm); |
3160 | 3840 | ||
3161 | /* 965 has limitations... */ | 3841 | /* 965 has limitations... */ |
3162 | I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | (8 << 16) | (8 << 8) | | 3842 | I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | |
3163 | (8 << 0)); | 3843 | (8 << 16) | (8 << 8) | (8 << 0)); |
3164 | I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); | 3844 | I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); |
3165 | /* update cursor SR watermark */ | 3845 | /* update cursor SR watermark */ |
3166 | I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); | 3846 | I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); |
3167 | } | 3847 | } |
3168 | 3848 | ||
3169 | static void i9xx_update_wm(struct drm_device *dev, int planea_clock, | 3849 | static void i9xx_update_wm(struct drm_device *dev) |
3170 | int planeb_clock, int sr_hdisplay, int sr_htotal, | ||
3171 | int pixel_size) | ||
3172 | { | 3850 | { |
3173 | struct drm_i915_private *dev_priv = dev->dev_private; | 3851 | struct drm_i915_private *dev_priv = dev->dev_private; |
3852 | const struct intel_watermark_params *wm_info; | ||
3174 | uint32_t fwater_lo; | 3853 | uint32_t fwater_lo; |
3175 | uint32_t fwater_hi; | 3854 | uint32_t fwater_hi; |
3176 | int total_size, cacheline_size, cwm, srwm = 1; | 3855 | int cwm, srwm = 1; |
3856 | int fifo_size; | ||
3177 | int planea_wm, planeb_wm; | 3857 | int planea_wm, planeb_wm; |
3178 | struct intel_watermark_params planea_params, planeb_params; | 3858 | struct drm_crtc *crtc, *enabled = NULL; |
3179 | unsigned long line_time_us; | ||
3180 | int sr_clock, sr_entries = 0; | ||
3181 | 3859 | ||
3182 | /* Create copies of the base settings for each pipe */ | 3860 | if (IS_I945GM(dev)) |
3183 | if (IS_I965GM(dev) || IS_I945GM(dev)) | 3861 | wm_info = &i945_wm_info; |
3184 | planea_params = planeb_params = i945_wm_info; | 3862 | else if (!IS_GEN2(dev)) |
3185 | else if (IS_I9XX(dev)) | 3863 | wm_info = &i915_wm_info; |
3186 | planea_params = planeb_params = i915_wm_info; | ||
3187 | else | 3864 | else |
3188 | planea_params = planeb_params = i855_wm_info; | 3865 | wm_info = &i855_wm_info; |
3189 | 3866 | ||
3190 | /* Grab a couple of global values before we overwrite them */ | 3867 | fifo_size = dev_priv->display.get_fifo_size(dev, 0); |
3191 | total_size = planea_params.fifo_size; | 3868 | crtc = intel_get_crtc_for_plane(dev, 0); |
3192 | cacheline_size = planea_params.cacheline_size; | 3869 | if (crtc->enabled && crtc->fb) { |
3193 | 3870 | planea_wm = intel_calculate_wm(crtc->mode.clock, | |
3194 | /* Update per-plane FIFO sizes */ | 3871 | wm_info, fifo_size, |
3195 | planea_params.fifo_size = dev_priv->display.get_fifo_size(dev, 0); | 3872 | crtc->fb->bits_per_pixel / 8, |
3196 | planeb_params.fifo_size = dev_priv->display.get_fifo_size(dev, 1); | 3873 | latency_ns); |
3874 | enabled = crtc; | ||
3875 | } else | ||
3876 | planea_wm = fifo_size - wm_info->guard_size; | ||
3877 | |||
3878 | fifo_size = dev_priv->display.get_fifo_size(dev, 1); | ||
3879 | crtc = intel_get_crtc_for_plane(dev, 1); | ||
3880 | if (crtc->enabled && crtc->fb) { | ||
3881 | planeb_wm = intel_calculate_wm(crtc->mode.clock, | ||
3882 | wm_info, fifo_size, | ||
3883 | crtc->fb->bits_per_pixel / 8, | ||
3884 | latency_ns); | ||
3885 | if (enabled == NULL) | ||
3886 | enabled = crtc; | ||
3887 | else | ||
3888 | enabled = NULL; | ||
3889 | } else | ||
3890 | planeb_wm = fifo_size - wm_info->guard_size; | ||
3197 | 3891 | ||
3198 | planea_wm = intel_calculate_wm(planea_clock, &planea_params, | ||
3199 | pixel_size, latency_ns); | ||
3200 | planeb_wm = intel_calculate_wm(planeb_clock, &planeb_params, | ||
3201 | pixel_size, latency_ns); | ||
3202 | DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); | 3892 | DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); |
3203 | 3893 | ||
3204 | /* | 3894 | /* |
@@ -3206,43 +3896,43 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, | |||
3206 | */ | 3896 | */ |
3207 | cwm = 2; | 3897 | cwm = 2; |
3208 | 3898 | ||
3899 | /* Play safe and disable self-refresh before adjusting watermarks. */ | ||
3900 | if (IS_I945G(dev) || IS_I945GM(dev)) | ||
3901 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0); | ||
3902 | else if (IS_I915GM(dev)) | ||
3903 | I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN); | ||
3904 | |||
3209 | /* Calc sr entries for one plane configs */ | 3905 | /* Calc sr entries for one plane configs */ |
3210 | if (HAS_FW_BLC(dev) && sr_hdisplay && | 3906 | if (HAS_FW_BLC(dev) && enabled) { |
3211 | (!planea_clock || !planeb_clock)) { | ||
3212 | /* self-refresh has much higher latency */ | 3907 | /* self-refresh has much higher latency */ |
3213 | static const int sr_latency_ns = 6000; | 3908 | static const int sr_latency_ns = 6000; |
3909 | int clock = enabled->mode.clock; | ||
3910 | int htotal = enabled->mode.htotal; | ||
3911 | int hdisplay = enabled->mode.hdisplay; | ||
3912 | int pixel_size = enabled->fb->bits_per_pixel / 8; | ||
3913 | unsigned long line_time_us; | ||
3914 | int entries; | ||
3214 | 3915 | ||
3215 | sr_clock = planea_clock ? planea_clock : planeb_clock; | 3916 | line_time_us = (htotal * 1000) / clock; |
3216 | line_time_us = ((sr_htotal * 1000) / sr_clock); | ||
3217 | 3917 | ||
3218 | /* Use ns/us then divide to preserve precision */ | 3918 | /* Use ns/us then divide to preserve precision */ |
3219 | sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * | 3919 | entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * |
3220 | pixel_size * sr_hdisplay; | 3920 | pixel_size * hdisplay; |
3221 | sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size); | 3921 | entries = DIV_ROUND_UP(entries, wm_info->cacheline_size); |
3222 | DRM_DEBUG_KMS("self-refresh entries: %d\n", sr_entries); | 3922 | DRM_DEBUG_KMS("self-refresh entries: %d\n", entries); |
3223 | srwm = total_size - sr_entries; | 3923 | srwm = wm_info->fifo_size - entries; |
3224 | if (srwm < 0) | 3924 | if (srwm < 0) |
3225 | srwm = 1; | 3925 | srwm = 1; |
3226 | 3926 | ||
3227 | if (IS_I945G(dev) || IS_I945GM(dev)) | 3927 | if (IS_I945G(dev) || IS_I945GM(dev)) |
3228 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_FIFO_MASK | (srwm & 0xff)); | 3928 | I915_WRITE(FW_BLC_SELF, |
3229 | else if (IS_I915GM(dev)) { | 3929 | FW_BLC_SELF_FIFO_MASK | (srwm & 0xff)); |
3230 | /* 915M has a smaller SRWM field */ | 3930 | else if (IS_I915GM(dev)) |
3231 | I915_WRITE(FW_BLC_SELF, srwm & 0x3f); | 3931 | I915_WRITE(FW_BLC_SELF, srwm & 0x3f); |
3232 | I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN); | ||
3233 | } | ||
3234 | } else { | ||
3235 | /* Turn off self refresh if both pipes are enabled */ | ||
3236 | if (IS_I945G(dev) || IS_I945GM(dev)) { | ||
3237 | I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) | ||
3238 | & ~FW_BLC_SELF_EN); | ||
3239 | } else if (IS_I915GM(dev)) { | ||
3240 | I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN); | ||
3241 | } | ||
3242 | } | 3932 | } |
3243 | 3933 | ||
3244 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", | 3934 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", |
3245 | planea_wm, planeb_wm, cwm, srwm); | 3935 | planea_wm, planeb_wm, cwm, srwm); |
3246 | 3936 | ||
3247 | fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f); | 3937 | fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f); |
3248 | fwater_hi = (cwm & 0x1f); | 3938 | fwater_hi = (cwm & 0x1f); |
@@ -3253,19 +3943,36 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, | |||
3253 | 3943 | ||
3254 | I915_WRITE(FW_BLC, fwater_lo); | 3944 | I915_WRITE(FW_BLC, fwater_lo); |
3255 | I915_WRITE(FW_BLC2, fwater_hi); | 3945 | I915_WRITE(FW_BLC2, fwater_hi); |
3946 | |||
3947 | if (HAS_FW_BLC(dev)) { | ||
3948 | if (enabled) { | ||
3949 | if (IS_I945G(dev) || IS_I945GM(dev)) | ||
3950 | I915_WRITE(FW_BLC_SELF, | ||
3951 | FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN); | ||
3952 | else if (IS_I915GM(dev)) | ||
3953 | I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN); | ||
3954 | DRM_DEBUG_KMS("memory self refresh enabled\n"); | ||
3955 | } else | ||
3956 | DRM_DEBUG_KMS("memory self refresh disabled\n"); | ||
3957 | } | ||
3256 | } | 3958 | } |
3257 | 3959 | ||
3258 | static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused, | 3960 | static void i830_update_wm(struct drm_device *dev) |
3259 | int unused2, int unused3, int pixel_size) | ||
3260 | { | 3961 | { |
3261 | struct drm_i915_private *dev_priv = dev->dev_private; | 3962 | struct drm_i915_private *dev_priv = dev->dev_private; |
3262 | uint32_t fwater_lo = I915_READ(FW_BLC) & ~0xfff; | 3963 | struct drm_crtc *crtc; |
3964 | uint32_t fwater_lo; | ||
3263 | int planea_wm; | 3965 | int planea_wm; |
3264 | 3966 | ||
3265 | i830_wm_info.fifo_size = dev_priv->display.get_fifo_size(dev, 0); | 3967 | crtc = single_enabled_crtc(dev); |
3968 | if (crtc == NULL) | ||
3969 | return; | ||
3266 | 3970 | ||
3267 | planea_wm = intel_calculate_wm(planea_clock, &i830_wm_info, | 3971 | planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info, |
3268 | pixel_size, latency_ns); | 3972 | dev_priv->display.get_fifo_size(dev, 0), |
3973 | crtc->fb->bits_per_pixel / 8, | ||
3974 | latency_ns); | ||
3975 | fwater_lo = I915_READ(FW_BLC) & ~0xfff; | ||
3269 | fwater_lo |= (3<<8) | planea_wm; | 3976 | fwater_lo |= (3<<8) | planea_wm; |
3270 | 3977 | ||
3271 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm); | 3978 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm); |
@@ -3276,146 +3983,286 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused, | |||
3276 | #define ILK_LP0_PLANE_LATENCY 700 | 3983 | #define ILK_LP0_PLANE_LATENCY 700 |
3277 | #define ILK_LP0_CURSOR_LATENCY 1300 | 3984 | #define ILK_LP0_CURSOR_LATENCY 1300 |
3278 | 3985 | ||
3279 | static void ironlake_update_wm(struct drm_device *dev, int planea_clock, | 3986 | /* |
3280 | int planeb_clock, int sr_hdisplay, int sr_htotal, | 3987 | * Check the wm result. |
3281 | int pixel_size) | 3988 | * |
3989 | * If any calculated watermark values is larger than the maximum value that | ||
3990 | * can be programmed into the associated watermark register, that watermark | ||
3991 | * must be disabled. | ||
3992 | */ | ||
3993 | static bool ironlake_check_srwm(struct drm_device *dev, int level, | ||
3994 | int fbc_wm, int display_wm, int cursor_wm, | ||
3995 | const struct intel_watermark_params *display, | ||
3996 | const struct intel_watermark_params *cursor) | ||
3282 | { | 3997 | { |
3283 | struct drm_i915_private *dev_priv = dev->dev_private; | 3998 | struct drm_i915_private *dev_priv = dev->dev_private; |
3284 | int planea_wm, planeb_wm, cursora_wm, cursorb_wm; | 3999 | |
3285 | int sr_wm, cursor_wm; | 4000 | DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d," |
3286 | unsigned long line_time_us; | 4001 | " cursor %d\n", level, display_wm, fbc_wm, cursor_wm); |
3287 | int sr_clock, entries_required; | 4002 | |
3288 | u32 reg_value; | 4003 | if (fbc_wm > SNB_FBC_MAX_SRWM) { |
3289 | int line_count; | 4004 | DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n", |
3290 | int planea_htotal = 0, planeb_htotal = 0; | 4005 | fbc_wm, SNB_FBC_MAX_SRWM, level); |
4006 | |||
4007 | /* fbc has it's own way to disable FBC WM */ | ||
4008 | I915_WRITE(DISP_ARB_CTL, | ||
4009 | I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS); | ||
4010 | return false; | ||
4011 | } | ||
4012 | |||
4013 | if (display_wm > display->max_wm) { | ||
4014 | DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n", | ||
4015 | display_wm, SNB_DISPLAY_MAX_SRWM, level); | ||
4016 | return false; | ||
4017 | } | ||
4018 | |||
4019 | if (cursor_wm > cursor->max_wm) { | ||
4020 | DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n", | ||
4021 | cursor_wm, SNB_CURSOR_MAX_SRWM, level); | ||
4022 | return false; | ||
4023 | } | ||
4024 | |||
4025 | if (!(fbc_wm || display_wm || cursor_wm)) { | ||
4026 | DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level); | ||
4027 | return false; | ||
4028 | } | ||
4029 | |||
4030 | return true; | ||
4031 | } | ||
4032 | |||
4033 | /* | ||
4034 | * Compute watermark values of WM[1-3], | ||
4035 | */ | ||
4036 | static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane, | ||
4037 | int latency_ns, | ||
4038 | const struct intel_watermark_params *display, | ||
4039 | const struct intel_watermark_params *cursor, | ||
4040 | int *fbc_wm, int *display_wm, int *cursor_wm) | ||
4041 | { | ||
3291 | struct drm_crtc *crtc; | 4042 | struct drm_crtc *crtc; |
4043 | unsigned long line_time_us; | ||
4044 | int hdisplay, htotal, pixel_size, clock; | ||
4045 | int line_count, line_size; | ||
4046 | int small, large; | ||
4047 | int entries; | ||
3292 | 4048 | ||
3293 | /* Need htotal for all active display plane */ | 4049 | if (!latency_ns) { |
3294 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 4050 | *fbc_wm = *display_wm = *cursor_wm = 0; |
3295 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 4051 | return false; |
3296 | if (intel_crtc->dpms_mode == DRM_MODE_DPMS_ON) { | ||
3297 | if (intel_crtc->plane == 0) | ||
3298 | planea_htotal = crtc->mode.htotal; | ||
3299 | else | ||
3300 | planeb_htotal = crtc->mode.htotal; | ||
3301 | } | ||
3302 | } | 4052 | } |
3303 | 4053 | ||
3304 | /* Calculate and update the watermark for plane A */ | 4054 | crtc = intel_get_crtc_for_plane(dev, plane); |
3305 | if (planea_clock) { | 4055 | hdisplay = crtc->mode.hdisplay; |
3306 | entries_required = ((planea_clock / 1000) * pixel_size * | 4056 | htotal = crtc->mode.htotal; |
3307 | ILK_LP0_PLANE_LATENCY) / 1000; | 4057 | clock = crtc->mode.clock; |
3308 | entries_required = DIV_ROUND_UP(entries_required, | 4058 | pixel_size = crtc->fb->bits_per_pixel / 8; |
3309 | ironlake_display_wm_info.cacheline_size); | ||
3310 | planea_wm = entries_required + | ||
3311 | ironlake_display_wm_info.guard_size; | ||
3312 | 4059 | ||
3313 | if (planea_wm > (int)ironlake_display_wm_info.max_wm) | 4060 | line_time_us = (htotal * 1000) / clock; |
3314 | planea_wm = ironlake_display_wm_info.max_wm; | 4061 | line_count = (latency_ns / line_time_us + 1000) / 1000; |
4062 | line_size = hdisplay * pixel_size; | ||
3315 | 4063 | ||
3316 | /* Use the large buffer method to calculate cursor watermark */ | 4064 | /* Use the minimum of the small and large buffer method for primary */ |
3317 | line_time_us = (planea_htotal * 1000) / planea_clock; | 4065 | small = ((clock * pixel_size / 1000) * latency_ns) / 1000; |
4066 | large = line_count * line_size; | ||
3318 | 4067 | ||
3319 | /* Use ns/us then divide to preserve precision */ | 4068 | entries = DIV_ROUND_UP(min(small, large), display->cacheline_size); |
3320 | line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000; | 4069 | *display_wm = entries + display->guard_size; |
3321 | |||
3322 | /* calculate the cursor watermark for cursor A */ | ||
3323 | entries_required = line_count * 64 * pixel_size; | ||
3324 | entries_required = DIV_ROUND_UP(entries_required, | ||
3325 | ironlake_cursor_wm_info.cacheline_size); | ||
3326 | cursora_wm = entries_required + ironlake_cursor_wm_info.guard_size; | ||
3327 | if (cursora_wm > ironlake_cursor_wm_info.max_wm) | ||
3328 | cursora_wm = ironlake_cursor_wm_info.max_wm; | ||
3329 | |||
3330 | reg_value = I915_READ(WM0_PIPEA_ILK); | ||
3331 | reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); | ||
3332 | reg_value |= (planea_wm << WM0_PIPE_PLANE_SHIFT) | | ||
3333 | (cursora_wm & WM0_PIPE_CURSOR_MASK); | ||
3334 | I915_WRITE(WM0_PIPEA_ILK, reg_value); | ||
3335 | DRM_DEBUG_KMS("FIFO watermarks For pipe A - plane %d, " | ||
3336 | "cursor: %d\n", planea_wm, cursora_wm); | ||
3337 | } | ||
3338 | /* Calculate and update the watermark for plane B */ | ||
3339 | if (planeb_clock) { | ||
3340 | entries_required = ((planeb_clock / 1000) * pixel_size * | ||
3341 | ILK_LP0_PLANE_LATENCY) / 1000; | ||
3342 | entries_required = DIV_ROUND_UP(entries_required, | ||
3343 | ironlake_display_wm_info.cacheline_size); | ||
3344 | planeb_wm = entries_required + | ||
3345 | ironlake_display_wm_info.guard_size; | ||
3346 | |||
3347 | if (planeb_wm > (int)ironlake_display_wm_info.max_wm) | ||
3348 | planeb_wm = ironlake_display_wm_info.max_wm; | ||
3349 | |||
3350 | /* Use the large buffer method to calculate cursor watermark */ | ||
3351 | line_time_us = (planeb_htotal * 1000) / planeb_clock; | ||
3352 | 4070 | ||
3353 | /* Use ns/us then divide to preserve precision */ | 4071 | /* |
3354 | line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000; | 4072 | * Spec says: |
4073 | * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2 | ||
4074 | */ | ||
4075 | *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2; | ||
4076 | |||
4077 | /* calculate the self-refresh watermark for display cursor */ | ||
4078 | entries = line_count * pixel_size * 64; | ||
4079 | entries = DIV_ROUND_UP(entries, cursor->cacheline_size); | ||
4080 | *cursor_wm = entries + cursor->guard_size; | ||
3355 | 4081 | ||
3356 | /* calculate the cursor watermark for cursor B */ | 4082 | return ironlake_check_srwm(dev, level, |
3357 | entries_required = line_count * 64 * pixel_size; | 4083 | *fbc_wm, *display_wm, *cursor_wm, |
3358 | entries_required = DIV_ROUND_UP(entries_required, | 4084 | display, cursor); |
3359 | ironlake_cursor_wm_info.cacheline_size); | 4085 | } |
3360 | cursorb_wm = entries_required + ironlake_cursor_wm_info.guard_size; | ||
3361 | if (cursorb_wm > ironlake_cursor_wm_info.max_wm) | ||
3362 | cursorb_wm = ironlake_cursor_wm_info.max_wm; | ||
3363 | 4086 | ||
3364 | reg_value = I915_READ(WM0_PIPEB_ILK); | 4087 | static void ironlake_update_wm(struct drm_device *dev) |
3365 | reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); | 4088 | { |
3366 | reg_value |= (planeb_wm << WM0_PIPE_PLANE_SHIFT) | | 4089 | struct drm_i915_private *dev_priv = dev->dev_private; |
3367 | (cursorb_wm & WM0_PIPE_CURSOR_MASK); | 4090 | int fbc_wm, plane_wm, cursor_wm; |
3368 | I915_WRITE(WM0_PIPEB_ILK, reg_value); | 4091 | unsigned int enabled; |
3369 | DRM_DEBUG_KMS("FIFO watermarks For pipe B - plane %d, " | 4092 | |
3370 | "cursor: %d\n", planeb_wm, cursorb_wm); | 4093 | enabled = 0; |
4094 | if (g4x_compute_wm0(dev, 0, | ||
4095 | &ironlake_display_wm_info, | ||
4096 | ILK_LP0_PLANE_LATENCY, | ||
4097 | &ironlake_cursor_wm_info, | ||
4098 | ILK_LP0_CURSOR_LATENCY, | ||
4099 | &plane_wm, &cursor_wm)) { | ||
4100 | I915_WRITE(WM0_PIPEA_ILK, | ||
4101 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); | ||
4102 | DRM_DEBUG_KMS("FIFO watermarks For pipe A -" | ||
4103 | " plane %d, " "cursor: %d\n", | ||
4104 | plane_wm, cursor_wm); | ||
4105 | enabled |= 1; | ||
4106 | } | ||
4107 | |||
4108 | if (g4x_compute_wm0(dev, 1, | ||
4109 | &ironlake_display_wm_info, | ||
4110 | ILK_LP0_PLANE_LATENCY, | ||
4111 | &ironlake_cursor_wm_info, | ||
4112 | ILK_LP0_CURSOR_LATENCY, | ||
4113 | &plane_wm, &cursor_wm)) { | ||
4114 | I915_WRITE(WM0_PIPEB_ILK, | ||
4115 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); | ||
4116 | DRM_DEBUG_KMS("FIFO watermarks For pipe B -" | ||
4117 | " plane %d, cursor: %d\n", | ||
4118 | plane_wm, cursor_wm); | ||
4119 | enabled |= 2; | ||
3371 | } | 4120 | } |
3372 | 4121 | ||
3373 | /* | 4122 | /* |
3374 | * Calculate and update the self-refresh watermark only when one | 4123 | * Calculate and update the self-refresh watermark only when one |
3375 | * display plane is used. | 4124 | * display plane is used. |
3376 | */ | 4125 | */ |
3377 | if (!planea_clock || !planeb_clock) { | 4126 | I915_WRITE(WM3_LP_ILK, 0); |
4127 | I915_WRITE(WM2_LP_ILK, 0); | ||
4128 | I915_WRITE(WM1_LP_ILK, 0); | ||
3378 | 4129 | ||
3379 | /* Read the self-refresh latency. The unit is 0.5us */ | 4130 | if (!single_plane_enabled(enabled)) |
3380 | int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK; | 4131 | return; |
4132 | enabled = ffs(enabled) - 1; | ||
4133 | |||
4134 | /* WM1 */ | ||
4135 | if (!ironlake_compute_srwm(dev, 1, enabled, | ||
4136 | ILK_READ_WM1_LATENCY() * 500, | ||
4137 | &ironlake_display_srwm_info, | ||
4138 | &ironlake_cursor_srwm_info, | ||
4139 | &fbc_wm, &plane_wm, &cursor_wm)) | ||
4140 | return; | ||
3381 | 4141 | ||
3382 | sr_clock = planea_clock ? planea_clock : planeb_clock; | 4142 | I915_WRITE(WM1_LP_ILK, |
3383 | line_time_us = ((sr_htotal * 1000) / sr_clock); | 4143 | WM1_LP_SR_EN | |
4144 | (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | | ||
4145 | (fbc_wm << WM1_LP_FBC_SHIFT) | | ||
4146 | (plane_wm << WM1_LP_SR_SHIFT) | | ||
4147 | cursor_wm); | ||
4148 | |||
4149 | /* WM2 */ | ||
4150 | if (!ironlake_compute_srwm(dev, 2, enabled, | ||
4151 | ILK_READ_WM2_LATENCY() * 500, | ||
4152 | &ironlake_display_srwm_info, | ||
4153 | &ironlake_cursor_srwm_info, | ||
4154 | &fbc_wm, &plane_wm, &cursor_wm)) | ||
4155 | return; | ||
3384 | 4156 | ||
3385 | /* Use ns/us then divide to preserve precision */ | 4157 | I915_WRITE(WM2_LP_ILK, |
3386 | line_count = ((ilk_sr_latency * 500) / line_time_us + 1000) | 4158 | WM2_LP_EN | |
3387 | / 1000; | 4159 | (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | |
3388 | 4160 | (fbc_wm << WM1_LP_FBC_SHIFT) | | |
3389 | /* calculate the self-refresh watermark for display plane */ | 4161 | (plane_wm << WM1_LP_SR_SHIFT) | |
3390 | entries_required = line_count * sr_hdisplay * pixel_size; | 4162 | cursor_wm); |
3391 | entries_required = DIV_ROUND_UP(entries_required, | ||
3392 | ironlake_display_srwm_info.cacheline_size); | ||
3393 | sr_wm = entries_required + | ||
3394 | ironlake_display_srwm_info.guard_size; | ||
3395 | |||
3396 | /* calculate the self-refresh watermark for display cursor */ | ||
3397 | entries_required = line_count * pixel_size * 64; | ||
3398 | entries_required = DIV_ROUND_UP(entries_required, | ||
3399 | ironlake_cursor_srwm_info.cacheline_size); | ||
3400 | cursor_wm = entries_required + | ||
3401 | ironlake_cursor_srwm_info.guard_size; | ||
3402 | |||
3403 | /* configure watermark and enable self-refresh */ | ||
3404 | reg_value = I915_READ(WM1_LP_ILK); | ||
3405 | reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK | | ||
3406 | WM1_LP_CURSOR_MASK); | ||
3407 | reg_value |= (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) | | ||
3408 | (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm; | ||
3409 | |||
3410 | I915_WRITE(WM1_LP_ILK, reg_value); | ||
3411 | DRM_DEBUG_KMS("self-refresh watermark: display plane %d " | ||
3412 | "cursor %d\n", sr_wm, cursor_wm); | ||
3413 | 4163 | ||
3414 | } else { | 4164 | /* |
3415 | /* Turn off self refresh if both pipes are enabled */ | 4165 | * WM3 is unsupported on ILK, probably because we don't have latency |
3416 | I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN); | 4166 | * data for that power state |
4167 | */ | ||
4168 | } | ||
4169 | |||
4170 | static void sandybridge_update_wm(struct drm_device *dev) | ||
4171 | { | ||
4172 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
4173 | int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ | ||
4174 | int fbc_wm, plane_wm, cursor_wm; | ||
4175 | unsigned int enabled; | ||
4176 | |||
4177 | enabled = 0; | ||
4178 | if (g4x_compute_wm0(dev, 0, | ||
4179 | &sandybridge_display_wm_info, latency, | ||
4180 | &sandybridge_cursor_wm_info, latency, | ||
4181 | &plane_wm, &cursor_wm)) { | ||
4182 | I915_WRITE(WM0_PIPEA_ILK, | ||
4183 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); | ||
4184 | DRM_DEBUG_KMS("FIFO watermarks For pipe A -" | ||
4185 | " plane %d, " "cursor: %d\n", | ||
4186 | plane_wm, cursor_wm); | ||
4187 | enabled |= 1; | ||
4188 | } | ||
4189 | |||
4190 | if (g4x_compute_wm0(dev, 1, | ||
4191 | &sandybridge_display_wm_info, latency, | ||
4192 | &sandybridge_cursor_wm_info, latency, | ||
4193 | &plane_wm, &cursor_wm)) { | ||
4194 | I915_WRITE(WM0_PIPEB_ILK, | ||
4195 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); | ||
4196 | DRM_DEBUG_KMS("FIFO watermarks For pipe B -" | ||
4197 | " plane %d, cursor: %d\n", | ||
4198 | plane_wm, cursor_wm); | ||
4199 | enabled |= 2; | ||
3417 | } | 4200 | } |
4201 | |||
4202 | /* | ||
4203 | * Calculate and update the self-refresh watermark only when one | ||
4204 | * display plane is used. | ||
4205 | * | ||
4206 | * SNB support 3 levels of watermark. | ||
4207 | * | ||
4208 | * WM1/WM2/WM2 watermarks have to be enabled in the ascending order, | ||
4209 | * and disabled in the descending order | ||
4210 | * | ||
4211 | */ | ||
4212 | I915_WRITE(WM3_LP_ILK, 0); | ||
4213 | I915_WRITE(WM2_LP_ILK, 0); | ||
4214 | I915_WRITE(WM1_LP_ILK, 0); | ||
4215 | |||
4216 | if (!single_plane_enabled(enabled)) | ||
4217 | return; | ||
4218 | enabled = ffs(enabled) - 1; | ||
4219 | |||
4220 | /* WM1 */ | ||
4221 | if (!ironlake_compute_srwm(dev, 1, enabled, | ||
4222 | SNB_READ_WM1_LATENCY() * 500, | ||
4223 | &sandybridge_display_srwm_info, | ||
4224 | &sandybridge_cursor_srwm_info, | ||
4225 | &fbc_wm, &plane_wm, &cursor_wm)) | ||
4226 | return; | ||
4227 | |||
4228 | I915_WRITE(WM1_LP_ILK, | ||
4229 | WM1_LP_SR_EN | | ||
4230 | (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | | ||
4231 | (fbc_wm << WM1_LP_FBC_SHIFT) | | ||
4232 | (plane_wm << WM1_LP_SR_SHIFT) | | ||
4233 | cursor_wm); | ||
4234 | |||
4235 | /* WM2 */ | ||
4236 | if (!ironlake_compute_srwm(dev, 2, enabled, | ||
4237 | SNB_READ_WM2_LATENCY() * 500, | ||
4238 | &sandybridge_display_srwm_info, | ||
4239 | &sandybridge_cursor_srwm_info, | ||
4240 | &fbc_wm, &plane_wm, &cursor_wm)) | ||
4241 | return; | ||
4242 | |||
4243 | I915_WRITE(WM2_LP_ILK, | ||
4244 | WM2_LP_EN | | ||
4245 | (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | | ||
4246 | (fbc_wm << WM1_LP_FBC_SHIFT) | | ||
4247 | (plane_wm << WM1_LP_SR_SHIFT) | | ||
4248 | cursor_wm); | ||
4249 | |||
4250 | /* WM3 */ | ||
4251 | if (!ironlake_compute_srwm(dev, 3, enabled, | ||
4252 | SNB_READ_WM3_LATENCY() * 500, | ||
4253 | &sandybridge_display_srwm_info, | ||
4254 | &sandybridge_cursor_srwm_info, | ||
4255 | &fbc_wm, &plane_wm, &cursor_wm)) | ||
4256 | return; | ||
4257 | |||
4258 | I915_WRITE(WM3_LP_ILK, | ||
4259 | WM3_LP_EN | | ||
4260 | (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) | | ||
4261 | (fbc_wm << WM1_LP_FBC_SHIFT) | | ||
4262 | (plane_wm << WM1_LP_SR_SHIFT) | | ||
4263 | cursor_wm); | ||
3418 | } | 4264 | } |
4265 | |||
3419 | /** | 4266 | /** |
3420 | * intel_update_watermarks - update FIFO watermark values based on current modes | 4267 | * intel_update_watermarks - update FIFO watermark values based on current modes |
3421 | * | 4268 | * |
@@ -3447,117 +4294,56 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock, | |||
3447 | * | 4294 | * |
3448 | * We don't use the sprite, so we can ignore that. And on Crestline we have | 4295 | * We don't use the sprite, so we can ignore that. And on Crestline we have |
3449 | * to set the non-SR watermarks to 8. | 4296 | * to set the non-SR watermarks to 8. |
3450 | */ | 4297 | */ |
3451 | static void intel_update_watermarks(struct drm_device *dev) | 4298 | static void intel_update_watermarks(struct drm_device *dev) |
3452 | { | 4299 | { |
3453 | struct drm_i915_private *dev_priv = dev->dev_private; | 4300 | struct drm_i915_private *dev_priv = dev->dev_private; |
3454 | struct drm_crtc *crtc; | ||
3455 | int sr_hdisplay = 0; | ||
3456 | unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0; | ||
3457 | int enabled = 0, pixel_size = 0; | ||
3458 | int sr_htotal = 0; | ||
3459 | 4301 | ||
3460 | if (!dev_priv->display.update_wm) | 4302 | if (dev_priv->display.update_wm) |
3461 | return; | 4303 | dev_priv->display.update_wm(dev); |
3462 | 4304 | } | |
3463 | /* Get the clock config from both planes */ | ||
3464 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
3465 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
3466 | if (intel_crtc->dpms_mode == DRM_MODE_DPMS_ON) { | ||
3467 | enabled++; | ||
3468 | if (intel_crtc->plane == 0) { | ||
3469 | DRM_DEBUG_KMS("plane A (pipe %d) clock: %d\n", | ||
3470 | intel_crtc->pipe, crtc->mode.clock); | ||
3471 | planea_clock = crtc->mode.clock; | ||
3472 | } else { | ||
3473 | DRM_DEBUG_KMS("plane B (pipe %d) clock: %d\n", | ||
3474 | intel_crtc->pipe, crtc->mode.clock); | ||
3475 | planeb_clock = crtc->mode.clock; | ||
3476 | } | ||
3477 | sr_hdisplay = crtc->mode.hdisplay; | ||
3478 | sr_clock = crtc->mode.clock; | ||
3479 | sr_htotal = crtc->mode.htotal; | ||
3480 | if (crtc->fb) | ||
3481 | pixel_size = crtc->fb->bits_per_pixel / 8; | ||
3482 | else | ||
3483 | pixel_size = 4; /* by default */ | ||
3484 | } | ||
3485 | } | ||
3486 | |||
3487 | if (enabled <= 0) | ||
3488 | return; | ||
3489 | 4305 | ||
3490 | dev_priv->display.update_wm(dev, planea_clock, planeb_clock, | 4306 | static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) |
3491 | sr_hdisplay, sr_htotal, pixel_size); | 4307 | { |
4308 | return dev_priv->lvds_use_ssc && i915_panel_use_ssc | ||
4309 | && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); | ||
3492 | } | 4310 | } |
3493 | 4311 | ||
3494 | static int intel_crtc_mode_set(struct drm_crtc *crtc, | 4312 | static int i9xx_crtc_mode_set(struct drm_crtc *crtc, |
3495 | struct drm_display_mode *mode, | 4313 | struct drm_display_mode *mode, |
3496 | struct drm_display_mode *adjusted_mode, | 4314 | struct drm_display_mode *adjusted_mode, |
3497 | int x, int y, | 4315 | int x, int y, |
3498 | struct drm_framebuffer *old_fb) | 4316 | struct drm_framebuffer *old_fb) |
3499 | { | 4317 | { |
3500 | struct drm_device *dev = crtc->dev; | 4318 | struct drm_device *dev = crtc->dev; |
3501 | struct drm_i915_private *dev_priv = dev->dev_private; | 4319 | struct drm_i915_private *dev_priv = dev->dev_private; |
3502 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 4320 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3503 | int pipe = intel_crtc->pipe; | 4321 | int pipe = intel_crtc->pipe; |
3504 | int plane = intel_crtc->plane; | 4322 | int plane = intel_crtc->plane; |
3505 | int fp_reg = (pipe == 0) ? FPA0 : FPB0; | ||
3506 | int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; | ||
3507 | int dpll_md_reg = (intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD; | ||
3508 | int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; | ||
3509 | int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; | ||
3510 | int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; | ||
3511 | int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; | ||
3512 | int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B; | ||
3513 | int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B; | ||
3514 | int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B; | ||
3515 | int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B; | ||
3516 | int dspsize_reg = (plane == 0) ? DSPASIZE : DSPBSIZE; | ||
3517 | int dsppos_reg = (plane == 0) ? DSPAPOS : DSPBPOS; | ||
3518 | int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC; | ||
3519 | int refclk, num_connectors = 0; | 4323 | int refclk, num_connectors = 0; |
3520 | intel_clock_t clock, reduced_clock; | 4324 | intel_clock_t clock, reduced_clock; |
3521 | u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf; | 4325 | u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf; |
3522 | bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false; | 4326 | bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false; |
3523 | bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; | 4327 | bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; |
3524 | struct intel_encoder *has_edp_encoder = NULL; | ||
3525 | struct drm_mode_config *mode_config = &dev->mode_config; | 4328 | struct drm_mode_config *mode_config = &dev->mode_config; |
3526 | struct drm_encoder *encoder; | 4329 | struct intel_encoder *encoder; |
3527 | const intel_limit_t *limit; | 4330 | const intel_limit_t *limit; |
3528 | int ret; | 4331 | int ret; |
3529 | struct fdi_m_n m_n = {0}; | ||
3530 | int data_m1_reg = (pipe == 0) ? PIPEA_DATA_M1 : PIPEB_DATA_M1; | ||
3531 | int data_n1_reg = (pipe == 0) ? PIPEA_DATA_N1 : PIPEB_DATA_N1; | ||
3532 | int link_m1_reg = (pipe == 0) ? PIPEA_LINK_M1 : PIPEB_LINK_M1; | ||
3533 | int link_n1_reg = (pipe == 0) ? PIPEA_LINK_N1 : PIPEB_LINK_N1; | ||
3534 | int pch_fp_reg = (pipe == 0) ? PCH_FPA0 : PCH_FPB0; | ||
3535 | int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B; | ||
3536 | int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; | ||
3537 | int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; | ||
3538 | int trans_dpll_sel = (pipe == 0) ? 0 : 1; | ||
3539 | int lvds_reg = LVDS; | ||
3540 | u32 temp; | 4332 | u32 temp; |
3541 | int sdvo_pixel_multiply; | 4333 | u32 lvds_sync = 0; |
3542 | int target_clock; | ||
3543 | |||
3544 | drm_vblank_pre_modeset(dev, pipe); | ||
3545 | |||
3546 | list_for_each_entry(encoder, &mode_config->encoder_list, head) { | ||
3547 | struct intel_encoder *intel_encoder; | ||
3548 | 4334 | ||
3549 | if (encoder->crtc != crtc) | 4335 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { |
4336 | if (encoder->base.crtc != crtc) | ||
3550 | continue; | 4337 | continue; |
3551 | 4338 | ||
3552 | intel_encoder = enc_to_intel_encoder(encoder); | 4339 | switch (encoder->type) { |
3553 | switch (intel_encoder->type) { | ||
3554 | case INTEL_OUTPUT_LVDS: | 4340 | case INTEL_OUTPUT_LVDS: |
3555 | is_lvds = true; | 4341 | is_lvds = true; |
3556 | break; | 4342 | break; |
3557 | case INTEL_OUTPUT_SDVO: | 4343 | case INTEL_OUTPUT_SDVO: |
3558 | case INTEL_OUTPUT_HDMI: | 4344 | case INTEL_OUTPUT_HDMI: |
3559 | is_sdvo = true; | 4345 | is_sdvo = true; |
3560 | if (intel_encoder->needs_tv_clock) | 4346 | if (encoder->needs_tv_clock) |
3561 | is_tv = true; | 4347 | is_tv = true; |
3562 | break; | 4348 | break; |
3563 | case INTEL_OUTPUT_DVO: | 4349 | case INTEL_OUTPUT_DVO: |
@@ -3572,48 +4358,41 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3572 | case INTEL_OUTPUT_DISPLAYPORT: | 4358 | case INTEL_OUTPUT_DISPLAYPORT: |
3573 | is_dp = true; | 4359 | is_dp = true; |
3574 | break; | 4360 | break; |
3575 | case INTEL_OUTPUT_EDP: | ||
3576 | has_edp_encoder = intel_encoder; | ||
3577 | break; | ||
3578 | } | 4361 | } |
3579 | 4362 | ||
3580 | num_connectors++; | 4363 | num_connectors++; |
3581 | } | 4364 | } |
3582 | 4365 | ||
3583 | if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) { | 4366 | if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { |
3584 | refclk = dev_priv->lvds_ssc_freq * 1000; | 4367 | refclk = dev_priv->lvds_ssc_freq * 1000; |
3585 | DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", | 4368 | DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", |
3586 | refclk / 1000); | 4369 | refclk / 1000); |
3587 | } else if (IS_I9XX(dev)) { | 4370 | } else if (!IS_GEN2(dev)) { |
3588 | refclk = 96000; | 4371 | refclk = 96000; |
3589 | if (HAS_PCH_SPLIT(dev)) | ||
3590 | refclk = 120000; /* 120Mhz refclk */ | ||
3591 | } else { | 4372 | } else { |
3592 | refclk = 48000; | 4373 | refclk = 48000; |
3593 | } | 4374 | } |
3594 | |||
3595 | 4375 | ||
3596 | /* | 4376 | /* |
3597 | * Returns a set of divisors for the desired target clock with the given | 4377 | * Returns a set of divisors for the desired target clock with the given |
3598 | * refclk, or FALSE. The returned values represent the clock equation: | 4378 | * refclk, or FALSE. The returned values represent the clock equation: |
3599 | * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. | 4379 | * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. |
3600 | */ | 4380 | */ |
3601 | limit = intel_limit(crtc); | 4381 | limit = intel_limit(crtc, refclk); |
3602 | ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock); | 4382 | ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock); |
3603 | if (!ok) { | 4383 | if (!ok) { |
3604 | DRM_ERROR("Couldn't find PLL settings for mode!\n"); | 4384 | DRM_ERROR("Couldn't find PLL settings for mode!\n"); |
3605 | drm_vblank_post_modeset(dev, pipe); | ||
3606 | return -EINVAL; | 4385 | return -EINVAL; |
3607 | } | 4386 | } |
3608 | 4387 | ||
3609 | /* Ensure that the cursor is valid for the new mode before changing... */ | 4388 | /* Ensure that the cursor is valid for the new mode before changing... */ |
3610 | intel_crtc_update_cursor(crtc); | 4389 | intel_crtc_update_cursor(crtc, true); |
3611 | 4390 | ||
3612 | if (is_lvds && dev_priv->lvds_downclock_avail) { | 4391 | if (is_lvds && dev_priv->lvds_downclock_avail) { |
3613 | has_reduced_clock = limit->find_pll(limit, crtc, | 4392 | has_reduced_clock = limit->find_pll(limit, crtc, |
3614 | dev_priv->lvds_downclock, | 4393 | dev_priv->lvds_downclock, |
3615 | refclk, | 4394 | refclk, |
3616 | &reduced_clock); | 4395 | &reduced_clock); |
3617 | if (has_reduced_clock && (clock.p != reduced_clock.p)) { | 4396 | if (has_reduced_clock && (clock.p != reduced_clock.p)) { |
3618 | /* | 4397 | /* |
3619 | * If the different P is found, it means that we can't | 4398 | * If the different P is found, it means that we can't |
@@ -3622,7 +4401,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3622 | * feature. | 4401 | * feature. |
3623 | */ | 4402 | */ |
3624 | DRM_DEBUG_KMS("Different P is found for " | 4403 | DRM_DEBUG_KMS("Different P is found for " |
3625 | "LVDS clock/downclock\n"); | 4404 | "LVDS clock/downclock\n"); |
3626 | has_reduced_clock = 0; | 4405 | has_reduced_clock = 0; |
3627 | } | 4406 | } |
3628 | } | 4407 | } |
@@ -3630,14 +4409,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3630 | this mirrors vbios setting. */ | 4409 | this mirrors vbios setting. */ |
3631 | if (is_sdvo && is_tv) { | 4410 | if (is_sdvo && is_tv) { |
3632 | if (adjusted_mode->clock >= 100000 | 4411 | if (adjusted_mode->clock >= 100000 |
3633 | && adjusted_mode->clock < 140500) { | 4412 | && adjusted_mode->clock < 140500) { |
3634 | clock.p1 = 2; | 4413 | clock.p1 = 2; |
3635 | clock.p2 = 10; | 4414 | clock.p2 = 10; |
3636 | clock.n = 3; | 4415 | clock.n = 3; |
3637 | clock.m1 = 16; | 4416 | clock.m1 = 16; |
3638 | clock.m2 = 8; | 4417 | clock.m2 = 8; |
3639 | } else if (adjusted_mode->clock >= 140500 | 4418 | } else if (adjusted_mode->clock >= 140500 |
3640 | && adjusted_mode->clock <= 200000) { | 4419 | && adjusted_mode->clock <= 200000) { |
3641 | clock.p1 = 1; | 4420 | clock.p1 = 1; |
3642 | clock.p2 = 10; | 4421 | clock.p2 = 10; |
3643 | clock.n = 6; | 4422 | clock.n = 6; |
@@ -3646,128 +4425,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3646 | } | 4425 | } |
3647 | } | 4426 | } |
3648 | 4427 | ||
3649 | /* FDI link */ | ||
3650 | if (HAS_PCH_SPLIT(dev)) { | ||
3651 | int lane = 0, link_bw, bpp; | ||
3652 | /* eDP doesn't require FDI link, so just set DP M/N | ||
3653 | according to current link config */ | ||
3654 | if (has_edp_encoder) { | ||
3655 | target_clock = mode->clock; | ||
3656 | intel_edp_link_config(has_edp_encoder, | ||
3657 | &lane, &link_bw); | ||
3658 | } else { | ||
3659 | /* DP over FDI requires target mode clock | ||
3660 | instead of link clock */ | ||
3661 | if (is_dp) | ||
3662 | target_clock = mode->clock; | ||
3663 | else | ||
3664 | target_clock = adjusted_mode->clock; | ||
3665 | link_bw = 270000; | ||
3666 | } | ||
3667 | |||
3668 | /* determine panel color depth */ | ||
3669 | temp = I915_READ(pipeconf_reg); | ||
3670 | temp &= ~PIPE_BPC_MASK; | ||
3671 | if (is_lvds) { | ||
3672 | int lvds_reg = I915_READ(PCH_LVDS); | ||
3673 | /* the BPC will be 6 if it is 18-bit LVDS panel */ | ||
3674 | if ((lvds_reg & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP) | ||
3675 | temp |= PIPE_8BPC; | ||
3676 | else | ||
3677 | temp |= PIPE_6BPC; | ||
3678 | } else if (has_edp_encoder || (is_dp && intel_pch_has_edp(crtc))) { | ||
3679 | switch (dev_priv->edp_bpp/3) { | ||
3680 | case 8: | ||
3681 | temp |= PIPE_8BPC; | ||
3682 | break; | ||
3683 | case 10: | ||
3684 | temp |= PIPE_10BPC; | ||
3685 | break; | ||
3686 | case 6: | ||
3687 | temp |= PIPE_6BPC; | ||
3688 | break; | ||
3689 | case 12: | ||
3690 | temp |= PIPE_12BPC; | ||
3691 | break; | ||
3692 | } | ||
3693 | } else | ||
3694 | temp |= PIPE_8BPC; | ||
3695 | I915_WRITE(pipeconf_reg, temp); | ||
3696 | I915_READ(pipeconf_reg); | ||
3697 | |||
3698 | switch (temp & PIPE_BPC_MASK) { | ||
3699 | case PIPE_8BPC: | ||
3700 | bpp = 24; | ||
3701 | break; | ||
3702 | case PIPE_10BPC: | ||
3703 | bpp = 30; | ||
3704 | break; | ||
3705 | case PIPE_6BPC: | ||
3706 | bpp = 18; | ||
3707 | break; | ||
3708 | case PIPE_12BPC: | ||
3709 | bpp = 36; | ||
3710 | break; | ||
3711 | default: | ||
3712 | DRM_ERROR("unknown pipe bpc value\n"); | ||
3713 | bpp = 24; | ||
3714 | } | ||
3715 | |||
3716 | if (!lane) { | ||
3717 | /* | ||
3718 | * Account for spread spectrum to avoid | ||
3719 | * oversubscribing the link. Max center spread | ||
3720 | * is 2.5%; use 5% for safety's sake. | ||
3721 | */ | ||
3722 | u32 bps = target_clock * bpp * 21 / 20; | ||
3723 | lane = bps / (link_bw * 8) + 1; | ||
3724 | } | ||
3725 | |||
3726 | intel_crtc->fdi_lanes = lane; | ||
3727 | |||
3728 | ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n); | ||
3729 | } | ||
3730 | |||
3731 | /* Ironlake: try to setup display ref clock before DPLL | ||
3732 | * enabling. This is only under driver's control after | ||
3733 | * PCH B stepping, previous chipset stepping should be | ||
3734 | * ignoring this setting. | ||
3735 | */ | ||
3736 | if (HAS_PCH_SPLIT(dev)) { | ||
3737 | temp = I915_READ(PCH_DREF_CONTROL); | ||
3738 | /* Always enable nonspread source */ | ||
3739 | temp &= ~DREF_NONSPREAD_SOURCE_MASK; | ||
3740 | temp |= DREF_NONSPREAD_SOURCE_ENABLE; | ||
3741 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
3742 | POSTING_READ(PCH_DREF_CONTROL); | ||
3743 | |||
3744 | temp &= ~DREF_SSC_SOURCE_MASK; | ||
3745 | temp |= DREF_SSC_SOURCE_ENABLE; | ||
3746 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
3747 | POSTING_READ(PCH_DREF_CONTROL); | ||
3748 | |||
3749 | udelay(200); | ||
3750 | |||
3751 | if (has_edp_encoder) { | ||
3752 | if (dev_priv->lvds_use_ssc) { | ||
3753 | temp |= DREF_SSC1_ENABLE; | ||
3754 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
3755 | POSTING_READ(PCH_DREF_CONTROL); | ||
3756 | |||
3757 | udelay(200); | ||
3758 | |||
3759 | temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; | ||
3760 | temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; | ||
3761 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
3762 | POSTING_READ(PCH_DREF_CONTROL); | ||
3763 | } else { | ||
3764 | temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; | ||
3765 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
3766 | POSTING_READ(PCH_DREF_CONTROL); | ||
3767 | } | ||
3768 | } | ||
3769 | } | ||
3770 | |||
3771 | if (IS_PINEVIEW(dev)) { | 4428 | if (IS_PINEVIEW(dev)) { |
3772 | fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2; | 4429 | fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2; |
3773 | if (has_reduced_clock) | 4430 | if (has_reduced_clock) |
@@ -3780,21 +4437,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3780 | reduced_clock.m2; | 4437 | reduced_clock.m2; |
3781 | } | 4438 | } |
3782 | 4439 | ||
3783 | if (!HAS_PCH_SPLIT(dev)) | 4440 | dpll = DPLL_VGA_MODE_DIS; |
3784 | dpll = DPLL_VGA_MODE_DIS; | ||
3785 | 4441 | ||
3786 | if (IS_I9XX(dev)) { | 4442 | if (!IS_GEN2(dev)) { |
3787 | if (is_lvds) | 4443 | if (is_lvds) |
3788 | dpll |= DPLLB_MODE_LVDS; | 4444 | dpll |= DPLLB_MODE_LVDS; |
3789 | else | 4445 | else |
3790 | dpll |= DPLLB_MODE_DAC_SERIAL; | 4446 | dpll |= DPLLB_MODE_DAC_SERIAL; |
3791 | if (is_sdvo) { | 4447 | if (is_sdvo) { |
4448 | int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); | ||
4449 | if (pixel_multiplier > 1) { | ||
4450 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | ||
4451 | dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; | ||
4452 | } | ||
3792 | dpll |= DPLL_DVO_HIGH_SPEED; | 4453 | dpll |= DPLL_DVO_HIGH_SPEED; |
3793 | sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; | ||
3794 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | ||
3795 | dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; | ||
3796 | else if (HAS_PCH_SPLIT(dev)) | ||
3797 | dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; | ||
3798 | } | 4454 | } |
3799 | if (is_dp) | 4455 | if (is_dp) |
3800 | dpll |= DPLL_DVO_HIGH_SPEED; | 4456 | dpll |= DPLL_DVO_HIGH_SPEED; |
@@ -3804,9 +4460,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3804 | dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; | 4460 | dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; |
3805 | else { | 4461 | else { |
3806 | dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; | 4462 | dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; |
3807 | /* also FPA1 */ | ||
3808 | if (HAS_PCH_SPLIT(dev)) | ||
3809 | dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; | ||
3810 | if (IS_G4X(dev) && has_reduced_clock) | 4463 | if (IS_G4X(dev) && has_reduced_clock) |
3811 | dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; | 4464 | dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; |
3812 | } | 4465 | } |
@@ -3824,7 +4477,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3824 | dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; | 4477 | dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; |
3825 | break; | 4478 | break; |
3826 | } | 4479 | } |
3827 | if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) | 4480 | if (INTEL_INFO(dev)->gen >= 4) |
3828 | dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); | 4481 | dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); |
3829 | } else { | 4482 | } else { |
3830 | if (is_lvds) { | 4483 | if (is_lvds) { |
@@ -3845,27 +4498,25 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3845 | /* XXX: just matching BIOS for now */ | 4498 | /* XXX: just matching BIOS for now */ |
3846 | /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ | 4499 | /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ |
3847 | dpll |= 3; | 4500 | dpll |= 3; |
3848 | else if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) | 4501 | else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) |
3849 | dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; | 4502 | dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; |
3850 | else | 4503 | else |
3851 | dpll |= PLL_REF_INPUT_DREFCLK; | 4504 | dpll |= PLL_REF_INPUT_DREFCLK; |
3852 | 4505 | ||
3853 | /* setup pipeconf */ | 4506 | /* setup pipeconf */ |
3854 | pipeconf = I915_READ(pipeconf_reg); | 4507 | pipeconf = I915_READ(PIPECONF(pipe)); |
3855 | 4508 | ||
3856 | /* Set up the display plane register */ | 4509 | /* Set up the display plane register */ |
3857 | dspcntr = DISPPLANE_GAMMA_ENABLE; | 4510 | dspcntr = DISPPLANE_GAMMA_ENABLE; |
3858 | 4511 | ||
3859 | /* Ironlake's plane is forced to pipe, bit 24 is to | 4512 | /* Ironlake's plane is forced to pipe, bit 24 is to |
3860 | enable color space conversion */ | 4513 | enable color space conversion */ |
3861 | if (!HAS_PCH_SPLIT(dev)) { | 4514 | if (pipe == 0) |
3862 | if (pipe == 0) | 4515 | dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; |
3863 | dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; | 4516 | else |
3864 | else | 4517 | dspcntr |= DISPPLANE_SEL_PIPE_B; |
3865 | dspcntr |= DISPPLANE_SEL_PIPE_B; | ||
3866 | } | ||
3867 | 4518 | ||
3868 | if (pipe == 0 && !IS_I965G(dev)) { | 4519 | if (pipe == 0 && INTEL_INFO(dev)->gen < 4) { |
3869 | /* Enable pixel doubling when the dot clock is > 90% of the (display) | 4520 | /* Enable pixel doubling when the dot clock is > 90% of the (display) |
3870 | * core speed. | 4521 | * core speed. |
3871 | * | 4522 | * |
@@ -3874,51 +4525,536 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3874 | */ | 4525 | */ |
3875 | if (mode->clock > | 4526 | if (mode->clock > |
3876 | dev_priv->display.get_display_clock_speed(dev) * 9 / 10) | 4527 | dev_priv->display.get_display_clock_speed(dev) * 9 / 10) |
3877 | pipeconf |= PIPEACONF_DOUBLE_WIDE; | 4528 | pipeconf |= PIPECONF_DOUBLE_WIDE; |
3878 | else | 4529 | else |
3879 | pipeconf &= ~PIPEACONF_DOUBLE_WIDE; | 4530 | pipeconf &= ~PIPECONF_DOUBLE_WIDE; |
3880 | } | 4531 | } |
3881 | 4532 | ||
3882 | dspcntr |= DISPLAY_PLANE_ENABLE; | ||
3883 | pipeconf |= PIPEACONF_ENABLE; | ||
3884 | dpll |= DPLL_VCO_ENABLE; | 4533 | dpll |= DPLL_VCO_ENABLE; |
3885 | 4534 | ||
4535 | DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); | ||
4536 | drm_mode_debug_printmodeline(mode); | ||
3886 | 4537 | ||
3887 | /* Disable the panel fitter if it was on our pipe */ | 4538 | I915_WRITE(FP0(pipe), fp); |
3888 | if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe) | 4539 | I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); |
3889 | I915_WRITE(PFIT_CONTROL, 0); | 4540 | |
4541 | POSTING_READ(DPLL(pipe)); | ||
4542 | udelay(150); | ||
4543 | |||
4544 | /* The LVDS pin pair needs to be on before the DPLLs are enabled. | ||
4545 | * This is an exception to the general rule that mode_set doesn't turn | ||
4546 | * things on. | ||
4547 | */ | ||
4548 | if (is_lvds) { | ||
4549 | temp = I915_READ(LVDS); | ||
4550 | temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; | ||
4551 | if (pipe == 1) { | ||
4552 | temp |= LVDS_PIPEB_SELECT; | ||
4553 | } else { | ||
4554 | temp &= ~LVDS_PIPEB_SELECT; | ||
4555 | } | ||
4556 | /* set the corresponsding LVDS_BORDER bit */ | ||
4557 | temp |= dev_priv->lvds_border_bits; | ||
4558 | /* Set the B0-B3 data pairs corresponding to whether we're going to | ||
4559 | * set the DPLLs for dual-channel mode or not. | ||
4560 | */ | ||
4561 | if (clock.p2 == 7) | ||
4562 | temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; | ||
4563 | else | ||
4564 | temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); | ||
4565 | |||
4566 | /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) | ||
4567 | * appropriately here, but we need to look more thoroughly into how | ||
4568 | * panels behave in the two modes. | ||
4569 | */ | ||
4570 | /* set the dithering flag on LVDS as needed */ | ||
4571 | if (INTEL_INFO(dev)->gen >= 4) { | ||
4572 | if (dev_priv->lvds_dither) | ||
4573 | temp |= LVDS_ENABLE_DITHER; | ||
4574 | else | ||
4575 | temp &= ~LVDS_ENABLE_DITHER; | ||
4576 | } | ||
4577 | if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) | ||
4578 | lvds_sync |= LVDS_HSYNC_POLARITY; | ||
4579 | if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) | ||
4580 | lvds_sync |= LVDS_VSYNC_POLARITY; | ||
4581 | if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY)) | ||
4582 | != lvds_sync) { | ||
4583 | char flags[2] = "-+"; | ||
4584 | DRM_INFO("Changing LVDS panel from " | ||
4585 | "(%chsync, %cvsync) to (%chsync, %cvsync)\n", | ||
4586 | flags[!(temp & LVDS_HSYNC_POLARITY)], | ||
4587 | flags[!(temp & LVDS_VSYNC_POLARITY)], | ||
4588 | flags[!(lvds_sync & LVDS_HSYNC_POLARITY)], | ||
4589 | flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]); | ||
4590 | temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); | ||
4591 | temp |= lvds_sync; | ||
4592 | } | ||
4593 | I915_WRITE(LVDS, temp); | ||
4594 | } | ||
4595 | |||
4596 | if (is_dp) { | ||
4597 | intel_dp_set_m_n(crtc, mode, adjusted_mode); | ||
4598 | } | ||
4599 | |||
4600 | I915_WRITE(DPLL(pipe), dpll); | ||
4601 | |||
4602 | /* Wait for the clocks to stabilize. */ | ||
4603 | POSTING_READ(DPLL(pipe)); | ||
4604 | udelay(150); | ||
4605 | |||
4606 | if (INTEL_INFO(dev)->gen >= 4) { | ||
4607 | temp = 0; | ||
4608 | if (is_sdvo) { | ||
4609 | temp = intel_mode_get_pixel_multiplier(adjusted_mode); | ||
4610 | if (temp > 1) | ||
4611 | temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; | ||
4612 | else | ||
4613 | temp = 0; | ||
4614 | } | ||
4615 | I915_WRITE(DPLL_MD(pipe), temp); | ||
4616 | } else { | ||
4617 | /* The pixel multiplier can only be updated once the | ||
4618 | * DPLL is enabled and the clocks are stable. | ||
4619 | * | ||
4620 | * So write it again. | ||
4621 | */ | ||
4622 | I915_WRITE(DPLL(pipe), dpll); | ||
4623 | } | ||
4624 | |||
4625 | intel_crtc->lowfreq_avail = false; | ||
4626 | if (is_lvds && has_reduced_clock && i915_powersave) { | ||
4627 | I915_WRITE(FP1(pipe), fp2); | ||
4628 | intel_crtc->lowfreq_avail = true; | ||
4629 | if (HAS_PIPE_CXSR(dev)) { | ||
4630 | DRM_DEBUG_KMS("enabling CxSR downclocking\n"); | ||
4631 | pipeconf |= PIPECONF_CXSR_DOWNCLOCK; | ||
4632 | } | ||
4633 | } else { | ||
4634 | I915_WRITE(FP1(pipe), fp); | ||
4635 | if (HAS_PIPE_CXSR(dev)) { | ||
4636 | DRM_DEBUG_KMS("disabling CxSR downclocking\n"); | ||
4637 | pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; | ||
4638 | } | ||
4639 | } | ||
4640 | |||
4641 | if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { | ||
4642 | pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; | ||
4643 | /* the chip adds 2 halflines automatically */ | ||
4644 | adjusted_mode->crtc_vdisplay -= 1; | ||
4645 | adjusted_mode->crtc_vtotal -= 1; | ||
4646 | adjusted_mode->crtc_vblank_start -= 1; | ||
4647 | adjusted_mode->crtc_vblank_end -= 1; | ||
4648 | adjusted_mode->crtc_vsync_end -= 1; | ||
4649 | adjusted_mode->crtc_vsync_start -= 1; | ||
4650 | } else | ||
4651 | pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */ | ||
4652 | |||
4653 | I915_WRITE(HTOTAL(pipe), | ||
4654 | (adjusted_mode->crtc_hdisplay - 1) | | ||
4655 | ((adjusted_mode->crtc_htotal - 1) << 16)); | ||
4656 | I915_WRITE(HBLANK(pipe), | ||
4657 | (adjusted_mode->crtc_hblank_start - 1) | | ||
4658 | ((adjusted_mode->crtc_hblank_end - 1) << 16)); | ||
4659 | I915_WRITE(HSYNC(pipe), | ||
4660 | (adjusted_mode->crtc_hsync_start - 1) | | ||
4661 | ((adjusted_mode->crtc_hsync_end - 1) << 16)); | ||
4662 | |||
4663 | I915_WRITE(VTOTAL(pipe), | ||
4664 | (adjusted_mode->crtc_vdisplay - 1) | | ||
4665 | ((adjusted_mode->crtc_vtotal - 1) << 16)); | ||
4666 | I915_WRITE(VBLANK(pipe), | ||
4667 | (adjusted_mode->crtc_vblank_start - 1) | | ||
4668 | ((adjusted_mode->crtc_vblank_end - 1) << 16)); | ||
4669 | I915_WRITE(VSYNC(pipe), | ||
4670 | (adjusted_mode->crtc_vsync_start - 1) | | ||
4671 | ((adjusted_mode->crtc_vsync_end - 1) << 16)); | ||
4672 | |||
4673 | /* pipesrc and dspsize control the size that is scaled from, | ||
4674 | * which should always be the user's requested size. | ||
4675 | */ | ||
4676 | I915_WRITE(DSPSIZE(plane), | ||
4677 | ((mode->vdisplay - 1) << 16) | | ||
4678 | (mode->hdisplay - 1)); | ||
4679 | I915_WRITE(DSPPOS(plane), 0); | ||
4680 | I915_WRITE(PIPESRC(pipe), | ||
4681 | ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); | ||
4682 | |||
4683 | I915_WRITE(PIPECONF(pipe), pipeconf); | ||
4684 | POSTING_READ(PIPECONF(pipe)); | ||
4685 | intel_enable_pipe(dev_priv, pipe, false); | ||
4686 | |||
4687 | intel_wait_for_vblank(dev, pipe); | ||
4688 | |||
4689 | I915_WRITE(DSPCNTR(plane), dspcntr); | ||
4690 | POSTING_READ(DSPCNTR(plane)); | ||
4691 | intel_enable_plane(dev_priv, plane, pipe); | ||
4692 | |||
4693 | ret = intel_pipe_set_base(crtc, x, y, old_fb); | ||
4694 | |||
4695 | intel_update_watermarks(dev); | ||
4696 | |||
4697 | return ret; | ||
4698 | } | ||
4699 | |||
4700 | static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | ||
4701 | struct drm_display_mode *mode, | ||
4702 | struct drm_display_mode *adjusted_mode, | ||
4703 | int x, int y, | ||
4704 | struct drm_framebuffer *old_fb) | ||
4705 | { | ||
4706 | struct drm_device *dev = crtc->dev; | ||
4707 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
4708 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
4709 | int pipe = intel_crtc->pipe; | ||
4710 | int plane = intel_crtc->plane; | ||
4711 | int refclk, num_connectors = 0; | ||
4712 | intel_clock_t clock, reduced_clock; | ||
4713 | u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf; | ||
4714 | bool ok, has_reduced_clock = false, is_sdvo = false; | ||
4715 | bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; | ||
4716 | struct intel_encoder *has_edp_encoder = NULL; | ||
4717 | struct drm_mode_config *mode_config = &dev->mode_config; | ||
4718 | struct intel_encoder *encoder; | ||
4719 | const intel_limit_t *limit; | ||
4720 | int ret; | ||
4721 | struct fdi_m_n m_n = {0}; | ||
4722 | u32 temp; | ||
4723 | u32 lvds_sync = 0; | ||
4724 | int target_clock, pixel_multiplier, lane, link_bw, bpp, factor; | ||
4725 | |||
4726 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { | ||
4727 | if (encoder->base.crtc != crtc) | ||
4728 | continue; | ||
4729 | |||
4730 | switch (encoder->type) { | ||
4731 | case INTEL_OUTPUT_LVDS: | ||
4732 | is_lvds = true; | ||
4733 | break; | ||
4734 | case INTEL_OUTPUT_SDVO: | ||
4735 | case INTEL_OUTPUT_HDMI: | ||
4736 | is_sdvo = true; | ||
4737 | if (encoder->needs_tv_clock) | ||
4738 | is_tv = true; | ||
4739 | break; | ||
4740 | case INTEL_OUTPUT_TVOUT: | ||
4741 | is_tv = true; | ||
4742 | break; | ||
4743 | case INTEL_OUTPUT_ANALOG: | ||
4744 | is_crt = true; | ||
4745 | break; | ||
4746 | case INTEL_OUTPUT_DISPLAYPORT: | ||
4747 | is_dp = true; | ||
4748 | break; | ||
4749 | case INTEL_OUTPUT_EDP: | ||
4750 | has_edp_encoder = encoder; | ||
4751 | break; | ||
4752 | } | ||
4753 | |||
4754 | num_connectors++; | ||
4755 | } | ||
4756 | |||
4757 | if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { | ||
4758 | refclk = dev_priv->lvds_ssc_freq * 1000; | ||
4759 | DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", | ||
4760 | refclk / 1000); | ||
4761 | } else { | ||
4762 | refclk = 96000; | ||
4763 | if (!has_edp_encoder || | ||
4764 | intel_encoder_is_pch_edp(&has_edp_encoder->base)) | ||
4765 | refclk = 120000; /* 120Mhz refclk */ | ||
4766 | } | ||
4767 | |||
4768 | /* | ||
4769 | * Returns a set of divisors for the desired target clock with the given | ||
4770 | * refclk, or FALSE. The returned values represent the clock equation: | ||
4771 | * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. | ||
4772 | */ | ||
4773 | limit = intel_limit(crtc, refclk); | ||
4774 | ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock); | ||
4775 | if (!ok) { | ||
4776 | DRM_ERROR("Couldn't find PLL settings for mode!\n"); | ||
4777 | return -EINVAL; | ||
4778 | } | ||
4779 | |||
4780 | /* Ensure that the cursor is valid for the new mode before changing... */ | ||
4781 | intel_crtc_update_cursor(crtc, true); | ||
4782 | |||
4783 | if (is_lvds && dev_priv->lvds_downclock_avail) { | ||
4784 | has_reduced_clock = limit->find_pll(limit, crtc, | ||
4785 | dev_priv->lvds_downclock, | ||
4786 | refclk, | ||
4787 | &reduced_clock); | ||
4788 | if (has_reduced_clock && (clock.p != reduced_clock.p)) { | ||
4789 | /* | ||
4790 | * If the different P is found, it means that we can't | ||
4791 | * switch the display clock by using the FP0/FP1. | ||
4792 | * In such case we will disable the LVDS downclock | ||
4793 | * feature. | ||
4794 | */ | ||
4795 | DRM_DEBUG_KMS("Different P is found for " | ||
4796 | "LVDS clock/downclock\n"); | ||
4797 | has_reduced_clock = 0; | ||
4798 | } | ||
4799 | } | ||
4800 | /* SDVO TV has fixed PLL values depend on its clock range, | ||
4801 | this mirrors vbios setting. */ | ||
4802 | if (is_sdvo && is_tv) { | ||
4803 | if (adjusted_mode->clock >= 100000 | ||
4804 | && adjusted_mode->clock < 140500) { | ||
4805 | clock.p1 = 2; | ||
4806 | clock.p2 = 10; | ||
4807 | clock.n = 3; | ||
4808 | clock.m1 = 16; | ||
4809 | clock.m2 = 8; | ||
4810 | } else if (adjusted_mode->clock >= 140500 | ||
4811 | && adjusted_mode->clock <= 200000) { | ||
4812 | clock.p1 = 1; | ||
4813 | clock.p2 = 10; | ||
4814 | clock.n = 6; | ||
4815 | clock.m1 = 12; | ||
4816 | clock.m2 = 8; | ||
4817 | } | ||
4818 | } | ||
4819 | |||
4820 | /* FDI link */ | ||
4821 | pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); | ||
4822 | lane = 0; | ||
4823 | /* CPU eDP doesn't require FDI link, so just set DP M/N | ||
4824 | according to current link config */ | ||
4825 | if (has_edp_encoder && | ||
4826 | !intel_encoder_is_pch_edp(&has_edp_encoder->base)) { | ||
4827 | target_clock = mode->clock; | ||
4828 | intel_edp_link_config(has_edp_encoder, | ||
4829 | &lane, &link_bw); | ||
4830 | } else { | ||
4831 | /* [e]DP over FDI requires target mode clock | ||
4832 | instead of link clock */ | ||
4833 | if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) | ||
4834 | target_clock = mode->clock; | ||
4835 | else | ||
4836 | target_clock = adjusted_mode->clock; | ||
4837 | |||
4838 | /* FDI is a binary signal running at ~2.7GHz, encoding | ||
4839 | * each output octet as 10 bits. The actual frequency | ||
4840 | * is stored as a divider into a 100MHz clock, and the | ||
4841 | * mode pixel clock is stored in units of 1KHz. | ||
4842 | * Hence the bw of each lane in terms of the mode signal | ||
4843 | * is: | ||
4844 | */ | ||
4845 | link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10; | ||
4846 | } | ||
4847 | |||
4848 | /* determine panel color depth */ | ||
4849 | temp = I915_READ(PIPECONF(pipe)); | ||
4850 | temp &= ~PIPE_BPC_MASK; | ||
4851 | if (is_lvds) { | ||
4852 | /* the BPC will be 6 if it is 18-bit LVDS panel */ | ||
4853 | if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP) | ||
4854 | temp |= PIPE_8BPC; | ||
4855 | else | ||
4856 | temp |= PIPE_6BPC; | ||
4857 | } else if (has_edp_encoder) { | ||
4858 | switch (dev_priv->edp.bpp/3) { | ||
4859 | case 8: | ||
4860 | temp |= PIPE_8BPC; | ||
4861 | break; | ||
4862 | case 10: | ||
4863 | temp |= PIPE_10BPC; | ||
4864 | break; | ||
4865 | case 6: | ||
4866 | temp |= PIPE_6BPC; | ||
4867 | break; | ||
4868 | case 12: | ||
4869 | temp |= PIPE_12BPC; | ||
4870 | break; | ||
4871 | } | ||
4872 | } else | ||
4873 | temp |= PIPE_8BPC; | ||
4874 | I915_WRITE(PIPECONF(pipe), temp); | ||
4875 | |||
4876 | switch (temp & PIPE_BPC_MASK) { | ||
4877 | case PIPE_8BPC: | ||
4878 | bpp = 24; | ||
4879 | break; | ||
4880 | case PIPE_10BPC: | ||
4881 | bpp = 30; | ||
4882 | break; | ||
4883 | case PIPE_6BPC: | ||
4884 | bpp = 18; | ||
4885 | break; | ||
4886 | case PIPE_12BPC: | ||
4887 | bpp = 36; | ||
4888 | break; | ||
4889 | default: | ||
4890 | DRM_ERROR("unknown pipe bpc value\n"); | ||
4891 | bpp = 24; | ||
4892 | } | ||
4893 | |||
4894 | if (!lane) { | ||
4895 | /* | ||
4896 | * Account for spread spectrum to avoid | ||
4897 | * oversubscribing the link. Max center spread | ||
4898 | * is 2.5%; use 5% for safety's sake. | ||
4899 | */ | ||
4900 | u32 bps = target_clock * bpp * 21 / 20; | ||
4901 | lane = bps / (link_bw * 8) + 1; | ||
4902 | } | ||
4903 | |||
4904 | intel_crtc->fdi_lanes = lane; | ||
4905 | |||
4906 | if (pixel_multiplier > 1) | ||
4907 | link_bw *= pixel_multiplier; | ||
4908 | ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n); | ||
4909 | |||
4910 | /* Ironlake: try to setup display ref clock before DPLL | ||
4911 | * enabling. This is only under driver's control after | ||
4912 | * PCH B stepping, previous chipset stepping should be | ||
4913 | * ignoring this setting. | ||
4914 | */ | ||
4915 | temp = I915_READ(PCH_DREF_CONTROL); | ||
4916 | /* Always enable nonspread source */ | ||
4917 | temp &= ~DREF_NONSPREAD_SOURCE_MASK; | ||
4918 | temp |= DREF_NONSPREAD_SOURCE_ENABLE; | ||
4919 | temp &= ~DREF_SSC_SOURCE_MASK; | ||
4920 | temp |= DREF_SSC_SOURCE_ENABLE; | ||
4921 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
4922 | |||
4923 | POSTING_READ(PCH_DREF_CONTROL); | ||
4924 | udelay(200); | ||
4925 | |||
4926 | if (has_edp_encoder) { | ||
4927 | if (intel_panel_use_ssc(dev_priv)) { | ||
4928 | temp |= DREF_SSC1_ENABLE; | ||
4929 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
4930 | |||
4931 | POSTING_READ(PCH_DREF_CONTROL); | ||
4932 | udelay(200); | ||
4933 | } | ||
4934 | temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; | ||
4935 | |||
4936 | /* Enable CPU source on CPU attached eDP */ | ||
4937 | if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) { | ||
4938 | if (intel_panel_use_ssc(dev_priv)) | ||
4939 | temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; | ||
4940 | else | ||
4941 | temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; | ||
4942 | } else { | ||
4943 | /* Enable SSC on PCH eDP if needed */ | ||
4944 | if (intel_panel_use_ssc(dev_priv)) { | ||
4945 | DRM_ERROR("enabling SSC on PCH\n"); | ||
4946 | temp |= DREF_SUPERSPREAD_SOURCE_ENABLE; | ||
4947 | } | ||
4948 | } | ||
4949 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
4950 | POSTING_READ(PCH_DREF_CONTROL); | ||
4951 | udelay(200); | ||
4952 | } | ||
4953 | |||
4954 | fp = clock.n << 16 | clock.m1 << 8 | clock.m2; | ||
4955 | if (has_reduced_clock) | ||
4956 | fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 | | ||
4957 | reduced_clock.m2; | ||
4958 | |||
4959 | /* Enable autotuning of the PLL clock (if permissible) */ | ||
4960 | factor = 21; | ||
4961 | if (is_lvds) { | ||
4962 | if ((intel_panel_use_ssc(dev_priv) && | ||
4963 | dev_priv->lvds_ssc_freq == 100) || | ||
4964 | (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP) | ||
4965 | factor = 25; | ||
4966 | } else if (is_sdvo && is_tv) | ||
4967 | factor = 20; | ||
4968 | |||
4969 | if (clock.m1 < factor * clock.n) | ||
4970 | fp |= FP_CB_TUNE; | ||
4971 | |||
4972 | dpll = 0; | ||
4973 | |||
4974 | if (is_lvds) | ||
4975 | dpll |= DPLLB_MODE_LVDS; | ||
4976 | else | ||
4977 | dpll |= DPLLB_MODE_DAC_SERIAL; | ||
4978 | if (is_sdvo) { | ||
4979 | int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); | ||
4980 | if (pixel_multiplier > 1) { | ||
4981 | dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; | ||
4982 | } | ||
4983 | dpll |= DPLL_DVO_HIGH_SPEED; | ||
4984 | } | ||
4985 | if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) | ||
4986 | dpll |= DPLL_DVO_HIGH_SPEED; | ||
4987 | |||
4988 | /* compute bitmask from p1 value */ | ||
4989 | dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; | ||
4990 | /* also FPA1 */ | ||
4991 | dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; | ||
4992 | |||
4993 | switch (clock.p2) { | ||
4994 | case 5: | ||
4995 | dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; | ||
4996 | break; | ||
4997 | case 7: | ||
4998 | dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; | ||
4999 | break; | ||
5000 | case 10: | ||
5001 | dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; | ||
5002 | break; | ||
5003 | case 14: | ||
5004 | dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; | ||
5005 | break; | ||
5006 | } | ||
5007 | |||
5008 | if (is_sdvo && is_tv) | ||
5009 | dpll |= PLL_REF_INPUT_TVCLKINBC; | ||
5010 | else if (is_tv) | ||
5011 | /* XXX: just matching BIOS for now */ | ||
5012 | /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ | ||
5013 | dpll |= 3; | ||
5014 | else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) | ||
5015 | dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; | ||
5016 | else | ||
5017 | dpll |= PLL_REF_INPUT_DREFCLK; | ||
5018 | |||
5019 | /* setup pipeconf */ | ||
5020 | pipeconf = I915_READ(PIPECONF(pipe)); | ||
5021 | |||
5022 | /* Set up the display plane register */ | ||
5023 | dspcntr = DISPPLANE_GAMMA_ENABLE; | ||
3890 | 5024 | ||
3891 | DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); | 5025 | DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); |
3892 | drm_mode_debug_printmodeline(mode); | 5026 | drm_mode_debug_printmodeline(mode); |
3893 | 5027 | ||
3894 | /* assign to Ironlake registers */ | 5028 | /* PCH eDP needs FDI, but CPU eDP does not */ |
3895 | if (HAS_PCH_SPLIT(dev)) { | 5029 | if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { |
3896 | fp_reg = pch_fp_reg; | 5030 | I915_WRITE(PCH_FP0(pipe), fp); |
3897 | dpll_reg = pch_dpll_reg; | 5031 | I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); |
3898 | } | ||
3899 | 5032 | ||
3900 | if (!has_edp_encoder) { | 5033 | POSTING_READ(PCH_DPLL(pipe)); |
3901 | I915_WRITE(fp_reg, fp); | ||
3902 | I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); | ||
3903 | I915_READ(dpll_reg); | ||
3904 | udelay(150); | 5034 | udelay(150); |
3905 | } | 5035 | } |
3906 | 5036 | ||
3907 | /* enable transcoder DPLL */ | 5037 | /* enable transcoder DPLL */ |
3908 | if (HAS_PCH_CPT(dev)) { | 5038 | if (HAS_PCH_CPT(dev)) { |
3909 | temp = I915_READ(PCH_DPLL_SEL); | 5039 | temp = I915_READ(PCH_DPLL_SEL); |
3910 | if (trans_dpll_sel == 0) | 5040 | switch (pipe) { |
3911 | temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); | 5041 | case 0: |
3912 | else | 5042 | temp |= TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL; |
3913 | temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); | 5043 | break; |
5044 | case 1: | ||
5045 | temp |= TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL; | ||
5046 | break; | ||
5047 | case 2: | ||
5048 | /* FIXME: manage transcoder PLLs? */ | ||
5049 | temp |= TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL; | ||
5050 | break; | ||
5051 | default: | ||
5052 | BUG(); | ||
5053 | } | ||
3914 | I915_WRITE(PCH_DPLL_SEL, temp); | 5054 | I915_WRITE(PCH_DPLL_SEL, temp); |
3915 | I915_READ(PCH_DPLL_SEL); | ||
3916 | udelay(150); | ||
3917 | } | ||
3918 | 5055 | ||
3919 | if (HAS_PCH_SPLIT(dev)) { | 5056 | POSTING_READ(PCH_DPLL_SEL); |
3920 | pipeconf &= ~PIPE_ENABLE_DITHER; | 5057 | udelay(150); |
3921 | pipeconf &= ~PIPE_DITHER_TYPE_MASK; | ||
3922 | } | 5058 | } |
3923 | 5059 | ||
3924 | /* The LVDS pin pair needs to be on before the DPLLs are enabled. | 5060 | /* The LVDS pin pair needs to be on before the DPLLs are enabled. |
@@ -3926,105 +5062,96 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3926 | * things on. | 5062 | * things on. |
3927 | */ | 5063 | */ |
3928 | if (is_lvds) { | 5064 | if (is_lvds) { |
3929 | u32 lvds; | 5065 | temp = I915_READ(PCH_LVDS); |
3930 | 5066 | temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; | |
3931 | if (HAS_PCH_SPLIT(dev)) | ||
3932 | lvds_reg = PCH_LVDS; | ||
3933 | |||
3934 | lvds = I915_READ(lvds_reg); | ||
3935 | lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; | ||
3936 | if (pipe == 1) { | 5067 | if (pipe == 1) { |
3937 | if (HAS_PCH_CPT(dev)) | 5068 | if (HAS_PCH_CPT(dev)) |
3938 | lvds |= PORT_TRANS_B_SEL_CPT; | 5069 | temp |= PORT_TRANS_B_SEL_CPT; |
3939 | else | 5070 | else |
3940 | lvds |= LVDS_PIPEB_SELECT; | 5071 | temp |= LVDS_PIPEB_SELECT; |
3941 | } else { | 5072 | } else { |
3942 | if (HAS_PCH_CPT(dev)) | 5073 | if (HAS_PCH_CPT(dev)) |
3943 | lvds &= ~PORT_TRANS_SEL_MASK; | 5074 | temp &= ~PORT_TRANS_SEL_MASK; |
3944 | else | 5075 | else |
3945 | lvds &= ~LVDS_PIPEB_SELECT; | 5076 | temp &= ~LVDS_PIPEB_SELECT; |
3946 | } | 5077 | } |
3947 | /* set the corresponsding LVDS_BORDER bit */ | 5078 | /* set the corresponsding LVDS_BORDER bit */ |
3948 | lvds |= dev_priv->lvds_border_bits; | 5079 | temp |= dev_priv->lvds_border_bits; |
3949 | /* Set the B0-B3 data pairs corresponding to whether we're going to | 5080 | /* Set the B0-B3 data pairs corresponding to whether we're going to |
3950 | * set the DPLLs for dual-channel mode or not. | 5081 | * set the DPLLs for dual-channel mode or not. |
3951 | */ | 5082 | */ |
3952 | if (clock.p2 == 7) | 5083 | if (clock.p2 == 7) |
3953 | lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; | 5084 | temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; |
3954 | else | 5085 | else |
3955 | lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); | 5086 | temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); |
3956 | 5087 | ||
3957 | /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) | 5088 | /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) |
3958 | * appropriately here, but we need to look more thoroughly into how | 5089 | * appropriately here, but we need to look more thoroughly into how |
3959 | * panels behave in the two modes. | 5090 | * panels behave in the two modes. |
3960 | */ | 5091 | */ |
3961 | /* set the dithering flag */ | 5092 | if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) |
3962 | if (IS_I965G(dev)) { | 5093 | lvds_sync |= LVDS_HSYNC_POLARITY; |
3963 | if (dev_priv->lvds_dither) { | 5094 | if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) |
3964 | if (HAS_PCH_SPLIT(dev)) { | 5095 | lvds_sync |= LVDS_VSYNC_POLARITY; |
3965 | pipeconf |= PIPE_ENABLE_DITHER; | 5096 | if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY)) |
3966 | pipeconf |= PIPE_DITHER_TYPE_ST01; | 5097 | != lvds_sync) { |
3967 | } else | 5098 | char flags[2] = "-+"; |
3968 | lvds |= LVDS_ENABLE_DITHER; | 5099 | DRM_INFO("Changing LVDS panel from " |
3969 | } else { | 5100 | "(%chsync, %cvsync) to (%chsync, %cvsync)\n", |
3970 | if (!HAS_PCH_SPLIT(dev)) { | 5101 | flags[!(temp & LVDS_HSYNC_POLARITY)], |
3971 | lvds &= ~LVDS_ENABLE_DITHER; | 5102 | flags[!(temp & LVDS_VSYNC_POLARITY)], |
3972 | } | 5103 | flags[!(lvds_sync & LVDS_HSYNC_POLARITY)], |
3973 | } | 5104 | flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]); |
5105 | temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); | ||
5106 | temp |= lvds_sync; | ||
3974 | } | 5107 | } |
3975 | I915_WRITE(lvds_reg, lvds); | 5108 | I915_WRITE(PCH_LVDS, temp); |
3976 | I915_READ(lvds_reg); | ||
3977 | } | 5109 | } |
3978 | if (is_dp) | 5110 | |
5111 | /* set the dithering flag and clear for anything other than a panel. */ | ||
5112 | pipeconf &= ~PIPECONF_DITHER_EN; | ||
5113 | pipeconf &= ~PIPECONF_DITHER_TYPE_MASK; | ||
5114 | if (dev_priv->lvds_dither && (is_lvds || has_edp_encoder)) { | ||
5115 | pipeconf |= PIPECONF_DITHER_EN; | ||
5116 | pipeconf |= PIPECONF_DITHER_TYPE_ST1; | ||
5117 | } | ||
5118 | |||
5119 | if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { | ||
3979 | intel_dp_set_m_n(crtc, mode, adjusted_mode); | 5120 | intel_dp_set_m_n(crtc, mode, adjusted_mode); |
3980 | else if (HAS_PCH_SPLIT(dev)) { | 5121 | } else { |
3981 | /* For non-DP output, clear any trans DP clock recovery setting.*/ | 5122 | /* For non-DP output, clear any trans DP clock recovery setting.*/ |
3982 | if (pipe == 0) { | 5123 | I915_WRITE(TRANSDATA_M1(pipe), 0); |
3983 | I915_WRITE(TRANSA_DATA_M1, 0); | 5124 | I915_WRITE(TRANSDATA_N1(pipe), 0); |
3984 | I915_WRITE(TRANSA_DATA_N1, 0); | 5125 | I915_WRITE(TRANSDPLINK_M1(pipe), 0); |
3985 | I915_WRITE(TRANSA_DP_LINK_M1, 0); | 5126 | I915_WRITE(TRANSDPLINK_N1(pipe), 0); |
3986 | I915_WRITE(TRANSA_DP_LINK_N1, 0); | ||
3987 | } else { | ||
3988 | I915_WRITE(TRANSB_DATA_M1, 0); | ||
3989 | I915_WRITE(TRANSB_DATA_N1, 0); | ||
3990 | I915_WRITE(TRANSB_DP_LINK_M1, 0); | ||
3991 | I915_WRITE(TRANSB_DP_LINK_N1, 0); | ||
3992 | } | ||
3993 | } | 5127 | } |
3994 | 5128 | ||
3995 | if (!has_edp_encoder) { | 5129 | if (!has_edp_encoder || |
3996 | I915_WRITE(fp_reg, fp); | 5130 | intel_encoder_is_pch_edp(&has_edp_encoder->base)) { |
3997 | I915_WRITE(dpll_reg, dpll); | 5131 | I915_WRITE(PCH_DPLL(pipe), dpll); |
3998 | I915_READ(dpll_reg); | ||
3999 | /* Wait for the clocks to stabilize. */ | ||
4000 | udelay(150); | ||
4001 | 5132 | ||
4002 | if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) { | ||
4003 | if (is_sdvo) { | ||
4004 | sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; | ||
4005 | I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | | ||
4006 | ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT)); | ||
4007 | } else | ||
4008 | I915_WRITE(dpll_md_reg, 0); | ||
4009 | } else { | ||
4010 | /* write it again -- the BIOS does, after all */ | ||
4011 | I915_WRITE(dpll_reg, dpll); | ||
4012 | } | ||
4013 | I915_READ(dpll_reg); | ||
4014 | /* Wait for the clocks to stabilize. */ | 5133 | /* Wait for the clocks to stabilize. */ |
5134 | POSTING_READ(PCH_DPLL(pipe)); | ||
4015 | udelay(150); | 5135 | udelay(150); |
5136 | |||
5137 | /* The pixel multiplier can only be updated once the | ||
5138 | * DPLL is enabled and the clocks are stable. | ||
5139 | * | ||
5140 | * So write it again. | ||
5141 | */ | ||
5142 | I915_WRITE(PCH_DPLL(pipe), dpll); | ||
4016 | } | 5143 | } |
4017 | 5144 | ||
5145 | intel_crtc->lowfreq_avail = false; | ||
4018 | if (is_lvds && has_reduced_clock && i915_powersave) { | 5146 | if (is_lvds && has_reduced_clock && i915_powersave) { |
4019 | I915_WRITE(fp_reg + 4, fp2); | 5147 | I915_WRITE(PCH_FP1(pipe), fp2); |
4020 | intel_crtc->lowfreq_avail = true; | 5148 | intel_crtc->lowfreq_avail = true; |
4021 | if (HAS_PIPE_CXSR(dev)) { | 5149 | if (HAS_PIPE_CXSR(dev)) { |
4022 | DRM_DEBUG_KMS("enabling CxSR downclocking\n"); | 5150 | DRM_DEBUG_KMS("enabling CxSR downclocking\n"); |
4023 | pipeconf |= PIPECONF_CXSR_DOWNCLOCK; | 5151 | pipeconf |= PIPECONF_CXSR_DOWNCLOCK; |
4024 | } | 5152 | } |
4025 | } else { | 5153 | } else { |
4026 | I915_WRITE(fp_reg + 4, fp); | 5154 | I915_WRITE(PCH_FP1(pipe), fp); |
4027 | intel_crtc->lowfreq_avail = false; | ||
4028 | if (HAS_PIPE_CXSR(dev)) { | 5155 | if (HAS_PIPE_CXSR(dev)) { |
4029 | DRM_DEBUG_KMS("disabling CxSR downclocking\n"); | 5156 | DRM_DEBUG_KMS("disabling CxSR downclocking\n"); |
4030 | pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; | 5157 | pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; |
@@ -4043,74 +5170,80 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
4043 | } else | 5170 | } else |
4044 | pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */ | 5171 | pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */ |
4045 | 5172 | ||
4046 | I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) | | 5173 | I915_WRITE(HTOTAL(pipe), |
5174 | (adjusted_mode->crtc_hdisplay - 1) | | ||
4047 | ((adjusted_mode->crtc_htotal - 1) << 16)); | 5175 | ((adjusted_mode->crtc_htotal - 1) << 16)); |
4048 | I915_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | | 5176 | I915_WRITE(HBLANK(pipe), |
5177 | (adjusted_mode->crtc_hblank_start - 1) | | ||
4049 | ((adjusted_mode->crtc_hblank_end - 1) << 16)); | 5178 | ((adjusted_mode->crtc_hblank_end - 1) << 16)); |
4050 | I915_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) | | 5179 | I915_WRITE(HSYNC(pipe), |
5180 | (adjusted_mode->crtc_hsync_start - 1) | | ||
4051 | ((adjusted_mode->crtc_hsync_end - 1) << 16)); | 5181 | ((adjusted_mode->crtc_hsync_end - 1) << 16)); |
4052 | I915_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) | | 5182 | |
5183 | I915_WRITE(VTOTAL(pipe), | ||
5184 | (adjusted_mode->crtc_vdisplay - 1) | | ||
4053 | ((adjusted_mode->crtc_vtotal - 1) << 16)); | 5185 | ((adjusted_mode->crtc_vtotal - 1) << 16)); |
4054 | I915_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) | | 5186 | I915_WRITE(VBLANK(pipe), |
5187 | (adjusted_mode->crtc_vblank_start - 1) | | ||
4055 | ((adjusted_mode->crtc_vblank_end - 1) << 16)); | 5188 | ((adjusted_mode->crtc_vblank_end - 1) << 16)); |
4056 | I915_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) | | 5189 | I915_WRITE(VSYNC(pipe), |
5190 | (adjusted_mode->crtc_vsync_start - 1) | | ||
4057 | ((adjusted_mode->crtc_vsync_end - 1) << 16)); | 5191 | ((adjusted_mode->crtc_vsync_end - 1) << 16)); |
4058 | /* pipesrc and dspsize control the size that is scaled from, which should | 5192 | |
5193 | /* pipesrc controls the size that is scaled from, which should | ||
4059 | * always be the user's requested size. | 5194 | * always be the user's requested size. |
4060 | */ | 5195 | */ |
4061 | if (!HAS_PCH_SPLIT(dev)) { | 5196 | I915_WRITE(PIPESRC(pipe), |
4062 | I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | | 5197 | ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); |
4063 | (mode->hdisplay - 1)); | ||
4064 | I915_WRITE(dsppos_reg, 0); | ||
4065 | } | ||
4066 | I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); | ||
4067 | |||
4068 | if (HAS_PCH_SPLIT(dev)) { | ||
4069 | I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m); | ||
4070 | I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n); | ||
4071 | I915_WRITE(link_m1_reg, m_n.link_m); | ||
4072 | I915_WRITE(link_n1_reg, m_n.link_n); | ||
4073 | |||
4074 | if (has_edp_encoder) { | ||
4075 | ironlake_set_pll_edp(crtc, adjusted_mode->clock); | ||
4076 | } else { | ||
4077 | /* enable FDI RX PLL too */ | ||
4078 | temp = I915_READ(fdi_rx_reg); | ||
4079 | I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE); | ||
4080 | I915_READ(fdi_rx_reg); | ||
4081 | udelay(200); | ||
4082 | 5198 | ||
4083 | /* enable FDI TX PLL too */ | 5199 | I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m); |
4084 | temp = I915_READ(fdi_tx_reg); | 5200 | I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n); |
4085 | I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE); | 5201 | I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m); |
4086 | I915_READ(fdi_tx_reg); | 5202 | I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n); |
4087 | 5203 | ||
4088 | /* enable FDI RX PCDCLK */ | 5204 | if (has_edp_encoder && |
4089 | temp = I915_READ(fdi_rx_reg); | 5205 | !intel_encoder_is_pch_edp(&has_edp_encoder->base)) { |
4090 | I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK); | 5206 | ironlake_set_pll_edp(crtc, adjusted_mode->clock); |
4091 | I915_READ(fdi_rx_reg); | ||
4092 | udelay(200); | ||
4093 | } | ||
4094 | } | 5207 | } |
4095 | 5208 | ||
4096 | I915_WRITE(pipeconf_reg, pipeconf); | 5209 | I915_WRITE(PIPECONF(pipe), pipeconf); |
4097 | I915_READ(pipeconf_reg); | 5210 | POSTING_READ(PIPECONF(pipe)); |
4098 | 5211 | ||
4099 | intel_wait_for_vblank(dev, pipe); | 5212 | intel_wait_for_vblank(dev, pipe); |
4100 | 5213 | ||
4101 | if (IS_IRONLAKE(dev)) { | 5214 | if (IS_GEN5(dev)) { |
4102 | /* enable address swizzle for tiling buffer */ | 5215 | /* enable address swizzle for tiling buffer */ |
4103 | temp = I915_READ(DISP_ARB_CTL); | 5216 | temp = I915_READ(DISP_ARB_CTL); |
4104 | I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING); | 5217 | I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING); |
4105 | } | 5218 | } |
4106 | 5219 | ||
4107 | I915_WRITE(dspcntr_reg, dspcntr); | 5220 | I915_WRITE(DSPCNTR(plane), dspcntr); |
5221 | POSTING_READ(DSPCNTR(plane)); | ||
4108 | 5222 | ||
4109 | /* Flush the plane changes */ | ||
4110 | ret = intel_pipe_set_base(crtc, x, y, old_fb); | 5223 | ret = intel_pipe_set_base(crtc, x, y, old_fb); |
4111 | 5224 | ||
4112 | intel_update_watermarks(dev); | 5225 | intel_update_watermarks(dev); |
4113 | 5226 | ||
5227 | return ret; | ||
5228 | } | ||
5229 | |||
5230 | static int intel_crtc_mode_set(struct drm_crtc *crtc, | ||
5231 | struct drm_display_mode *mode, | ||
5232 | struct drm_display_mode *adjusted_mode, | ||
5233 | int x, int y, | ||
5234 | struct drm_framebuffer *old_fb) | ||
5235 | { | ||
5236 | struct drm_device *dev = crtc->dev; | ||
5237 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
5238 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
5239 | int pipe = intel_crtc->pipe; | ||
5240 | int ret; | ||
5241 | |||
5242 | drm_vblank_pre_modeset(dev, pipe); | ||
5243 | |||
5244 | ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode, | ||
5245 | x, y, old_fb); | ||
5246 | |||
4114 | drm_vblank_post_modeset(dev, pipe); | 5247 | drm_vblank_post_modeset(dev, pipe); |
4115 | 5248 | ||
4116 | return ret; | 5249 | return ret; |
@@ -4122,7 +5255,7 @@ void intel_crtc_load_lut(struct drm_crtc *crtc) | |||
4122 | struct drm_device *dev = crtc->dev; | 5255 | struct drm_device *dev = crtc->dev; |
4123 | struct drm_i915_private *dev_priv = dev->dev_private; | 5256 | struct drm_i915_private *dev_priv = dev->dev_private; |
4124 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 5257 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4125 | int palreg = (intel_crtc->pipe == 0) ? PALETTE_A : PALETTE_B; | 5258 | int palreg = PALETTE(intel_crtc->pipe); |
4126 | int i; | 5259 | int i; |
4127 | 5260 | ||
4128 | /* The clocks have to be on to load the palette. */ | 5261 | /* The clocks have to be on to load the palette. */ |
@@ -4131,8 +5264,7 @@ void intel_crtc_load_lut(struct drm_crtc *crtc) | |||
4131 | 5264 | ||
4132 | /* use legacy palette for Ironlake */ | 5265 | /* use legacy palette for Ironlake */ |
4133 | if (HAS_PCH_SPLIT(dev)) | 5266 | if (HAS_PCH_SPLIT(dev)) |
4134 | palreg = (intel_crtc->pipe == 0) ? LGC_PALETTE_A : | 5267 | palreg = LGC_PALETTE(intel_crtc->pipe); |
4135 | LGC_PALETTE_B; | ||
4136 | 5268 | ||
4137 | for (i = 0; i < 256; i++) { | 5269 | for (i = 0; i < 256; i++) { |
4138 | I915_WRITE(palreg + 4 * i, | 5270 | I915_WRITE(palreg + 4 * i, |
@@ -4153,12 +5285,12 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base) | |||
4153 | if (intel_crtc->cursor_visible == visible) | 5285 | if (intel_crtc->cursor_visible == visible) |
4154 | return; | 5286 | return; |
4155 | 5287 | ||
4156 | cntl = I915_READ(CURACNTR); | 5288 | cntl = I915_READ(_CURACNTR); |
4157 | if (visible) { | 5289 | if (visible) { |
4158 | /* On these chipsets we can only modify the base whilst | 5290 | /* On these chipsets we can only modify the base whilst |
4159 | * the cursor is disabled. | 5291 | * the cursor is disabled. |
4160 | */ | 5292 | */ |
4161 | I915_WRITE(CURABASE, base); | 5293 | I915_WRITE(_CURABASE, base); |
4162 | 5294 | ||
4163 | cntl &= ~(CURSOR_FORMAT_MASK); | 5295 | cntl &= ~(CURSOR_FORMAT_MASK); |
4164 | /* XXX width must be 64, stride 256 => 0x00 << 28 */ | 5296 | /* XXX width must be 64, stride 256 => 0x00 << 28 */ |
@@ -4167,7 +5299,7 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base) | |||
4167 | CURSOR_FORMAT_ARGB; | 5299 | CURSOR_FORMAT_ARGB; |
4168 | } else | 5300 | } else |
4169 | cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE); | 5301 | cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE); |
4170 | I915_WRITE(CURACNTR, cntl); | 5302 | I915_WRITE(_CURACNTR, cntl); |
4171 | 5303 | ||
4172 | intel_crtc->cursor_visible = visible; | 5304 | intel_crtc->cursor_visible = visible; |
4173 | } | 5305 | } |
@@ -4181,7 +5313,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) | |||
4181 | bool visible = base != 0; | 5313 | bool visible = base != 0; |
4182 | 5314 | ||
4183 | if (intel_crtc->cursor_visible != visible) { | 5315 | if (intel_crtc->cursor_visible != visible) { |
4184 | uint32_t cntl = I915_READ(pipe == 0 ? CURACNTR : CURBCNTR); | 5316 | uint32_t cntl = I915_READ(CURCNTR(pipe)); |
4185 | if (base) { | 5317 | if (base) { |
4186 | cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT); | 5318 | cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT); |
4187 | cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; | 5319 | cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; |
@@ -4190,16 +5322,17 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) | |||
4190 | cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); | 5322 | cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); |
4191 | cntl |= CURSOR_MODE_DISABLE; | 5323 | cntl |= CURSOR_MODE_DISABLE; |
4192 | } | 5324 | } |
4193 | I915_WRITE(pipe == 0 ? CURACNTR : CURBCNTR, cntl); | 5325 | I915_WRITE(CURCNTR(pipe), cntl); |
4194 | 5326 | ||
4195 | intel_crtc->cursor_visible = visible; | 5327 | intel_crtc->cursor_visible = visible; |
4196 | } | 5328 | } |
4197 | /* and commit changes on next vblank */ | 5329 | /* and commit changes on next vblank */ |
4198 | I915_WRITE(pipe == 0 ? CURABASE : CURBBASE, base); | 5330 | I915_WRITE(CURBASE(pipe), base); |
4199 | } | 5331 | } |
4200 | 5332 | ||
4201 | /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ | 5333 | /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ |
4202 | static void intel_crtc_update_cursor(struct drm_crtc *crtc) | 5334 | static void intel_crtc_update_cursor(struct drm_crtc *crtc, |
5335 | bool on) | ||
4203 | { | 5336 | { |
4204 | struct drm_device *dev = crtc->dev; | 5337 | struct drm_device *dev = crtc->dev; |
4205 | struct drm_i915_private *dev_priv = dev->dev_private; | 5338 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -4212,7 +5345,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc) | |||
4212 | 5345 | ||
4213 | pos = 0; | 5346 | pos = 0; |
4214 | 5347 | ||
4215 | if (intel_crtc->cursor_on && crtc->fb) { | 5348 | if (on && crtc->enabled && crtc->fb) { |
4216 | base = intel_crtc->cursor_addr; | 5349 | base = intel_crtc->cursor_addr; |
4217 | if (x > (int) crtc->fb->width) | 5350 | if (x > (int) crtc->fb->width) |
4218 | base = 0; | 5351 | base = 0; |
@@ -4244,7 +5377,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc) | |||
4244 | if (!visible && !intel_crtc->cursor_visible) | 5377 | if (!visible && !intel_crtc->cursor_visible) |
4245 | return; | 5378 | return; |
4246 | 5379 | ||
4247 | I915_WRITE(pipe == 0 ? CURAPOS : CURBPOS, pos); | 5380 | I915_WRITE(CURPOS(pipe), pos); |
4248 | if (IS_845G(dev) || IS_I865G(dev)) | 5381 | if (IS_845G(dev) || IS_I865G(dev)) |
4249 | i845_update_cursor(crtc, base); | 5382 | i845_update_cursor(crtc, base); |
4250 | else | 5383 | else |
@@ -4255,15 +5388,14 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc) | |||
4255 | } | 5388 | } |
4256 | 5389 | ||
4257 | static int intel_crtc_cursor_set(struct drm_crtc *crtc, | 5390 | static int intel_crtc_cursor_set(struct drm_crtc *crtc, |
4258 | struct drm_file *file_priv, | 5391 | struct drm_file *file, |
4259 | uint32_t handle, | 5392 | uint32_t handle, |
4260 | uint32_t width, uint32_t height) | 5393 | uint32_t width, uint32_t height) |
4261 | { | 5394 | { |
4262 | struct drm_device *dev = crtc->dev; | 5395 | struct drm_device *dev = crtc->dev; |
4263 | struct drm_i915_private *dev_priv = dev->dev_private; | 5396 | struct drm_i915_private *dev_priv = dev->dev_private; |
4264 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 5397 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4265 | struct drm_gem_object *bo; | 5398 | struct drm_i915_gem_object *obj; |
4266 | struct drm_i915_gem_object *obj_priv; | ||
4267 | uint32_t addr; | 5399 | uint32_t addr; |
4268 | int ret; | 5400 | int ret; |
4269 | 5401 | ||
@@ -4273,7 +5405,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
4273 | if (!handle) { | 5405 | if (!handle) { |
4274 | DRM_DEBUG_KMS("cursor off\n"); | 5406 | DRM_DEBUG_KMS("cursor off\n"); |
4275 | addr = 0; | 5407 | addr = 0; |
4276 | bo = NULL; | 5408 | obj = NULL; |
4277 | mutex_lock(&dev->struct_mutex); | 5409 | mutex_lock(&dev->struct_mutex); |
4278 | goto finish; | 5410 | goto finish; |
4279 | } | 5411 | } |
@@ -4284,13 +5416,11 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
4284 | return -EINVAL; | 5416 | return -EINVAL; |
4285 | } | 5417 | } |
4286 | 5418 | ||
4287 | bo = drm_gem_object_lookup(dev, file_priv, handle); | 5419 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle)); |
4288 | if (!bo) | 5420 | if (&obj->base == NULL) |
4289 | return -ENOENT; | 5421 | return -ENOENT; |
4290 | 5422 | ||
4291 | obj_priv = to_intel_bo(bo); | 5423 | if (obj->base.size < width * height * 4) { |
4292 | |||
4293 | if (bo->size < width * height * 4) { | ||
4294 | DRM_ERROR("buffer is to small\n"); | 5424 | DRM_ERROR("buffer is to small\n"); |
4295 | ret = -ENOMEM; | 5425 | ret = -ENOMEM; |
4296 | goto fail; | 5426 | goto fail; |
@@ -4299,60 +5429,72 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
4299 | /* we only need to pin inside GTT if cursor is non-phy */ | 5429 | /* we only need to pin inside GTT if cursor is non-phy */ |
4300 | mutex_lock(&dev->struct_mutex); | 5430 | mutex_lock(&dev->struct_mutex); |
4301 | if (!dev_priv->info->cursor_needs_physical) { | 5431 | if (!dev_priv->info->cursor_needs_physical) { |
4302 | ret = i915_gem_object_pin(bo, PAGE_SIZE); | 5432 | if (obj->tiling_mode) { |
5433 | DRM_ERROR("cursor cannot be tiled\n"); | ||
5434 | ret = -EINVAL; | ||
5435 | goto fail_locked; | ||
5436 | } | ||
5437 | |||
5438 | ret = i915_gem_object_pin(obj, PAGE_SIZE, true); | ||
4303 | if (ret) { | 5439 | if (ret) { |
4304 | DRM_ERROR("failed to pin cursor bo\n"); | 5440 | DRM_ERROR("failed to pin cursor bo\n"); |
4305 | goto fail_locked; | 5441 | goto fail_locked; |
4306 | } | 5442 | } |
4307 | 5443 | ||
4308 | ret = i915_gem_object_set_to_gtt_domain(bo, 0); | 5444 | ret = i915_gem_object_set_to_gtt_domain(obj, 0); |
5445 | if (ret) { | ||
5446 | DRM_ERROR("failed to move cursor bo into the GTT\n"); | ||
5447 | goto fail_unpin; | ||
5448 | } | ||
5449 | |||
5450 | ret = i915_gem_object_put_fence(obj); | ||
4309 | if (ret) { | 5451 | if (ret) { |
4310 | DRM_ERROR("failed to move cursor bo into the GTT\n"); | 5452 | DRM_ERROR("failed to move cursor bo into the GTT\n"); |
4311 | goto fail_unpin; | 5453 | goto fail_unpin; |
4312 | } | 5454 | } |
4313 | 5455 | ||
4314 | addr = obj_priv->gtt_offset; | 5456 | addr = obj->gtt_offset; |
4315 | } else { | 5457 | } else { |
4316 | int align = IS_I830(dev) ? 16 * 1024 : 256; | 5458 | int align = IS_I830(dev) ? 16 * 1024 : 256; |
4317 | ret = i915_gem_attach_phys_object(dev, bo, | 5459 | ret = i915_gem_attach_phys_object(dev, obj, |
4318 | (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1, | 5460 | (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1, |
4319 | align); | 5461 | align); |
4320 | if (ret) { | 5462 | if (ret) { |
4321 | DRM_ERROR("failed to attach phys object\n"); | 5463 | DRM_ERROR("failed to attach phys object\n"); |
4322 | goto fail_locked; | 5464 | goto fail_locked; |
4323 | } | 5465 | } |
4324 | addr = obj_priv->phys_obj->handle->busaddr; | 5466 | addr = obj->phys_obj->handle->busaddr; |
4325 | } | 5467 | } |
4326 | 5468 | ||
4327 | if (!IS_I9XX(dev)) | 5469 | if (IS_GEN2(dev)) |
4328 | I915_WRITE(CURSIZE, (height << 12) | width); | 5470 | I915_WRITE(CURSIZE, (height << 12) | width); |
4329 | 5471 | ||
4330 | finish: | 5472 | finish: |
4331 | if (intel_crtc->cursor_bo) { | 5473 | if (intel_crtc->cursor_bo) { |
4332 | if (dev_priv->info->cursor_needs_physical) { | 5474 | if (dev_priv->info->cursor_needs_physical) { |
4333 | if (intel_crtc->cursor_bo != bo) | 5475 | if (intel_crtc->cursor_bo != obj) |
4334 | i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); | 5476 | i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); |
4335 | } else | 5477 | } else |
4336 | i915_gem_object_unpin(intel_crtc->cursor_bo); | 5478 | i915_gem_object_unpin(intel_crtc->cursor_bo); |
4337 | drm_gem_object_unreference(intel_crtc->cursor_bo); | 5479 | drm_gem_object_unreference(&intel_crtc->cursor_bo->base); |
4338 | } | 5480 | } |
4339 | 5481 | ||
4340 | mutex_unlock(&dev->struct_mutex); | 5482 | mutex_unlock(&dev->struct_mutex); |
4341 | 5483 | ||
4342 | intel_crtc->cursor_addr = addr; | 5484 | intel_crtc->cursor_addr = addr; |
4343 | intel_crtc->cursor_bo = bo; | 5485 | intel_crtc->cursor_bo = obj; |
4344 | intel_crtc->cursor_width = width; | 5486 | intel_crtc->cursor_width = width; |
4345 | intel_crtc->cursor_height = height; | 5487 | intel_crtc->cursor_height = height; |
4346 | 5488 | ||
4347 | intel_crtc_update_cursor(crtc); | 5489 | intel_crtc_update_cursor(crtc, true); |
4348 | 5490 | ||
4349 | return 0; | 5491 | return 0; |
4350 | fail_unpin: | 5492 | fail_unpin: |
4351 | i915_gem_object_unpin(bo); | 5493 | i915_gem_object_unpin(obj); |
4352 | fail_locked: | 5494 | fail_locked: |
4353 | mutex_unlock(&dev->struct_mutex); | 5495 | mutex_unlock(&dev->struct_mutex); |
4354 | fail: | 5496 | fail: |
4355 | drm_gem_object_unreference_unlocked(bo); | 5497 | drm_gem_object_unreference_unlocked(&obj->base); |
4356 | return ret; | 5498 | return ret; |
4357 | } | 5499 | } |
4358 | 5500 | ||
@@ -4363,7 +5505,7 @@ static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) | |||
4363 | intel_crtc->cursor_x = x; | 5505 | intel_crtc->cursor_x = x; |
4364 | intel_crtc->cursor_y = y; | 5506 | intel_crtc->cursor_y = y; |
4365 | 5507 | ||
4366 | intel_crtc_update_cursor(crtc); | 5508 | intel_crtc_update_cursor(crtc, true); |
4367 | 5509 | ||
4368 | return 0; | 5510 | return 0; |
4369 | } | 5511 | } |
@@ -4424,43 +5566,140 @@ static struct drm_display_mode load_detect_mode = { | |||
4424 | 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), | 5566 | 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), |
4425 | }; | 5567 | }; |
4426 | 5568 | ||
4427 | struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, | 5569 | static struct drm_framebuffer * |
4428 | struct drm_connector *connector, | 5570 | intel_framebuffer_create(struct drm_device *dev, |
4429 | struct drm_display_mode *mode, | 5571 | struct drm_mode_fb_cmd *mode_cmd, |
4430 | int *dpms_mode) | 5572 | struct drm_i915_gem_object *obj) |
5573 | { | ||
5574 | struct intel_framebuffer *intel_fb; | ||
5575 | int ret; | ||
5576 | |||
5577 | intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); | ||
5578 | if (!intel_fb) { | ||
5579 | drm_gem_object_unreference_unlocked(&obj->base); | ||
5580 | return ERR_PTR(-ENOMEM); | ||
5581 | } | ||
5582 | |||
5583 | ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); | ||
5584 | if (ret) { | ||
5585 | drm_gem_object_unreference_unlocked(&obj->base); | ||
5586 | kfree(intel_fb); | ||
5587 | return ERR_PTR(ret); | ||
5588 | } | ||
5589 | |||
5590 | return &intel_fb->base; | ||
5591 | } | ||
5592 | |||
5593 | static u32 | ||
5594 | intel_framebuffer_pitch_for_width(int width, int bpp) | ||
5595 | { | ||
5596 | u32 pitch = DIV_ROUND_UP(width * bpp, 8); | ||
5597 | return ALIGN(pitch, 64); | ||
5598 | } | ||
5599 | |||
5600 | static u32 | ||
5601 | intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp) | ||
5602 | { | ||
5603 | u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp); | ||
5604 | return ALIGN(pitch * mode->vdisplay, PAGE_SIZE); | ||
5605 | } | ||
5606 | |||
5607 | static struct drm_framebuffer * | ||
5608 | intel_framebuffer_create_for_mode(struct drm_device *dev, | ||
5609 | struct drm_display_mode *mode, | ||
5610 | int depth, int bpp) | ||
5611 | { | ||
5612 | struct drm_i915_gem_object *obj; | ||
5613 | struct drm_mode_fb_cmd mode_cmd; | ||
5614 | |||
5615 | obj = i915_gem_alloc_object(dev, | ||
5616 | intel_framebuffer_size_for_mode(mode, bpp)); | ||
5617 | if (obj == NULL) | ||
5618 | return ERR_PTR(-ENOMEM); | ||
5619 | |||
5620 | mode_cmd.width = mode->hdisplay; | ||
5621 | mode_cmd.height = mode->vdisplay; | ||
5622 | mode_cmd.depth = depth; | ||
5623 | mode_cmd.bpp = bpp; | ||
5624 | mode_cmd.pitch = intel_framebuffer_pitch_for_width(mode_cmd.width, bpp); | ||
5625 | |||
5626 | return intel_framebuffer_create(dev, &mode_cmd, obj); | ||
5627 | } | ||
5628 | |||
5629 | static struct drm_framebuffer * | ||
5630 | mode_fits_in_fbdev(struct drm_device *dev, | ||
5631 | struct drm_display_mode *mode) | ||
5632 | { | ||
5633 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
5634 | struct drm_i915_gem_object *obj; | ||
5635 | struct drm_framebuffer *fb; | ||
5636 | |||
5637 | if (dev_priv->fbdev == NULL) | ||
5638 | return NULL; | ||
5639 | |||
5640 | obj = dev_priv->fbdev->ifb.obj; | ||
5641 | if (obj == NULL) | ||
5642 | return NULL; | ||
5643 | |||
5644 | fb = &dev_priv->fbdev->ifb.base; | ||
5645 | if (fb->pitch < intel_framebuffer_pitch_for_width(mode->hdisplay, | ||
5646 | fb->bits_per_pixel)) | ||
5647 | return NULL; | ||
5648 | |||
5649 | if (obj->base.size < mode->vdisplay * fb->pitch) | ||
5650 | return NULL; | ||
5651 | |||
5652 | return fb; | ||
5653 | } | ||
5654 | |||
5655 | bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, | ||
5656 | struct drm_connector *connector, | ||
5657 | struct drm_display_mode *mode, | ||
5658 | struct intel_load_detect_pipe *old) | ||
4431 | { | 5659 | { |
4432 | struct intel_crtc *intel_crtc; | 5660 | struct intel_crtc *intel_crtc; |
4433 | struct drm_crtc *possible_crtc; | 5661 | struct drm_crtc *possible_crtc; |
4434 | struct drm_crtc *supported_crtc =NULL; | 5662 | struct drm_encoder *encoder = &intel_encoder->base; |
4435 | struct drm_encoder *encoder = &intel_encoder->enc; | ||
4436 | struct drm_crtc *crtc = NULL; | 5663 | struct drm_crtc *crtc = NULL; |
4437 | struct drm_device *dev = encoder->dev; | 5664 | struct drm_device *dev = encoder->dev; |
4438 | struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; | 5665 | struct drm_framebuffer *old_fb; |
4439 | struct drm_crtc_helper_funcs *crtc_funcs; | ||
4440 | int i = -1; | 5666 | int i = -1; |
4441 | 5667 | ||
5668 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", | ||
5669 | connector->base.id, drm_get_connector_name(connector), | ||
5670 | encoder->base.id, drm_get_encoder_name(encoder)); | ||
5671 | |||
4442 | /* | 5672 | /* |
4443 | * Algorithm gets a little messy: | 5673 | * Algorithm gets a little messy: |
5674 | * | ||
4444 | * - if the connector already has an assigned crtc, use it (but make | 5675 | * - if the connector already has an assigned crtc, use it (but make |
4445 | * sure it's on first) | 5676 | * sure it's on first) |
5677 | * | ||
4446 | * - try to find the first unused crtc that can drive this connector, | 5678 | * - try to find the first unused crtc that can drive this connector, |
4447 | * and use that if we find one | 5679 | * and use that if we find one |
4448 | * - if there are no unused crtcs available, try to use the first | ||
4449 | * one we found that supports the connector | ||
4450 | */ | 5680 | */ |
4451 | 5681 | ||
4452 | /* See if we already have a CRTC for this connector */ | 5682 | /* See if we already have a CRTC for this connector */ |
4453 | if (encoder->crtc) { | 5683 | if (encoder->crtc) { |
4454 | crtc = encoder->crtc; | 5684 | crtc = encoder->crtc; |
4455 | /* Make sure the crtc and connector are running */ | 5685 | |
4456 | intel_crtc = to_intel_crtc(crtc); | 5686 | intel_crtc = to_intel_crtc(crtc); |
4457 | *dpms_mode = intel_crtc->dpms_mode; | 5687 | old->dpms_mode = intel_crtc->dpms_mode; |
5688 | old->load_detect_temp = false; | ||
5689 | |||
5690 | /* Make sure the crtc and connector are running */ | ||
4458 | if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) { | 5691 | if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) { |
5692 | struct drm_encoder_helper_funcs *encoder_funcs; | ||
5693 | struct drm_crtc_helper_funcs *crtc_funcs; | ||
5694 | |||
4459 | crtc_funcs = crtc->helper_private; | 5695 | crtc_funcs = crtc->helper_private; |
4460 | crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); | 5696 | crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); |
5697 | |||
5698 | encoder_funcs = encoder->helper_private; | ||
4461 | encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); | 5699 | encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); |
4462 | } | 5700 | } |
4463 | return crtc; | 5701 | |
5702 | return true; | ||
4464 | } | 5703 | } |
4465 | 5704 | ||
4466 | /* Find an unused one (if possible) */ | 5705 | /* Find an unused one (if possible) */ |
@@ -4472,66 +5711,91 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, | |||
4472 | crtc = possible_crtc; | 5711 | crtc = possible_crtc; |
4473 | break; | 5712 | break; |
4474 | } | 5713 | } |
4475 | if (!supported_crtc) | ||
4476 | supported_crtc = possible_crtc; | ||
4477 | } | 5714 | } |
4478 | 5715 | ||
4479 | /* | 5716 | /* |
4480 | * If we didn't find an unused CRTC, don't use any. | 5717 | * If we didn't find an unused CRTC, don't use any. |
4481 | */ | 5718 | */ |
4482 | if (!crtc) { | 5719 | if (!crtc) { |
4483 | return NULL; | 5720 | DRM_DEBUG_KMS("no pipe available for load-detect\n"); |
5721 | return false; | ||
4484 | } | 5722 | } |
4485 | 5723 | ||
4486 | encoder->crtc = crtc; | 5724 | encoder->crtc = crtc; |
4487 | connector->encoder = encoder; | 5725 | connector->encoder = encoder; |
4488 | intel_encoder->load_detect_temp = true; | ||
4489 | 5726 | ||
4490 | intel_crtc = to_intel_crtc(crtc); | 5727 | intel_crtc = to_intel_crtc(crtc); |
4491 | *dpms_mode = intel_crtc->dpms_mode; | 5728 | old->dpms_mode = intel_crtc->dpms_mode; |
5729 | old->load_detect_temp = true; | ||
5730 | old->release_fb = NULL; | ||
4492 | 5731 | ||
4493 | if (!crtc->enabled) { | 5732 | if (!mode) |
4494 | if (!mode) | 5733 | mode = &load_detect_mode; |
4495 | mode = &load_detect_mode; | 5734 | |
4496 | drm_crtc_helper_set_mode(crtc, mode, 0, 0, crtc->fb); | 5735 | old_fb = crtc->fb; |
4497 | } else { | ||
4498 | if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) { | ||
4499 | crtc_funcs = crtc->helper_private; | ||
4500 | crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); | ||
4501 | } | ||
4502 | 5736 | ||
4503 | /* Add this connector to the crtc */ | 5737 | /* We need a framebuffer large enough to accommodate all accesses |
4504 | encoder_funcs->mode_set(encoder, &crtc->mode, &crtc->mode); | 5738 | * that the plane may generate whilst we perform load detection. |
4505 | encoder_funcs->commit(encoder); | 5739 | * We can not rely on the fbcon either being present (we get called |
5740 | * during its initialisation to detect all boot displays, or it may | ||
5741 | * not even exist) or that it is large enough to satisfy the | ||
5742 | * requested mode. | ||
5743 | */ | ||
5744 | crtc->fb = mode_fits_in_fbdev(dev, mode); | ||
5745 | if (crtc->fb == NULL) { | ||
5746 | DRM_DEBUG_KMS("creating tmp fb for load-detection\n"); | ||
5747 | crtc->fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32); | ||
5748 | old->release_fb = crtc->fb; | ||
5749 | } else | ||
5750 | DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); | ||
5751 | if (IS_ERR(crtc->fb)) { | ||
5752 | DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); | ||
5753 | crtc->fb = old_fb; | ||
5754 | return false; | ||
4506 | } | 5755 | } |
5756 | |||
5757 | if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) { | ||
5758 | DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); | ||
5759 | if (old->release_fb) | ||
5760 | old->release_fb->funcs->destroy(old->release_fb); | ||
5761 | crtc->fb = old_fb; | ||
5762 | return false; | ||
5763 | } | ||
5764 | |||
4507 | /* let the connector get through one full cycle before testing */ | 5765 | /* let the connector get through one full cycle before testing */ |
4508 | intel_wait_for_vblank(dev, intel_crtc->pipe); | 5766 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
4509 | 5767 | ||
4510 | return crtc; | 5768 | return true; |
4511 | } | 5769 | } |
4512 | 5770 | ||
4513 | void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, | 5771 | void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, |
4514 | struct drm_connector *connector, int dpms_mode) | 5772 | struct drm_connector *connector, |
5773 | struct intel_load_detect_pipe *old) | ||
4515 | { | 5774 | { |
4516 | struct drm_encoder *encoder = &intel_encoder->enc; | 5775 | struct drm_encoder *encoder = &intel_encoder->base; |
4517 | struct drm_device *dev = encoder->dev; | 5776 | struct drm_device *dev = encoder->dev; |
4518 | struct drm_crtc *crtc = encoder->crtc; | 5777 | struct drm_crtc *crtc = encoder->crtc; |
4519 | struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; | 5778 | struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; |
4520 | struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; | 5779 | struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; |
4521 | 5780 | ||
4522 | if (intel_encoder->load_detect_temp) { | 5781 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", |
4523 | encoder->crtc = NULL; | 5782 | connector->base.id, drm_get_connector_name(connector), |
5783 | encoder->base.id, drm_get_encoder_name(encoder)); | ||
5784 | |||
5785 | if (old->load_detect_temp) { | ||
4524 | connector->encoder = NULL; | 5786 | connector->encoder = NULL; |
4525 | intel_encoder->load_detect_temp = false; | ||
4526 | crtc->enabled = drm_helper_crtc_in_use(crtc); | ||
4527 | drm_helper_disable_unused_functions(dev); | 5787 | drm_helper_disable_unused_functions(dev); |
5788 | |||
5789 | if (old->release_fb) | ||
5790 | old->release_fb->funcs->destroy(old->release_fb); | ||
5791 | |||
5792 | return; | ||
4528 | } | 5793 | } |
4529 | 5794 | ||
4530 | /* Switch crtc and encoder back off if necessary */ | 5795 | /* Switch crtc and encoder back off if necessary */ |
4531 | if (crtc->enabled && dpms_mode != DRM_MODE_DPMS_ON) { | 5796 | if (old->dpms_mode != DRM_MODE_DPMS_ON) { |
4532 | if (encoder->crtc == crtc) | 5797 | encoder_funcs->dpms(encoder, old->dpms_mode); |
4533 | encoder_funcs->dpms(encoder, dpms_mode); | 5798 | crtc_funcs->dpms(crtc, old->dpms_mode); |
4534 | crtc_funcs->dpms(crtc, dpms_mode); | ||
4535 | } | 5799 | } |
4536 | } | 5800 | } |
4537 | 5801 | ||
@@ -4541,14 +5805,14 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) | |||
4541 | struct drm_i915_private *dev_priv = dev->dev_private; | 5805 | struct drm_i915_private *dev_priv = dev->dev_private; |
4542 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 5806 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4543 | int pipe = intel_crtc->pipe; | 5807 | int pipe = intel_crtc->pipe; |
4544 | u32 dpll = I915_READ((pipe == 0) ? DPLL_A : DPLL_B); | 5808 | u32 dpll = I915_READ(DPLL(pipe)); |
4545 | u32 fp; | 5809 | u32 fp; |
4546 | intel_clock_t clock; | 5810 | intel_clock_t clock; |
4547 | 5811 | ||
4548 | if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) | 5812 | if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) |
4549 | fp = I915_READ((pipe == 0) ? FPA0 : FPB0); | 5813 | fp = I915_READ(FP0(pipe)); |
4550 | else | 5814 | else |
4551 | fp = I915_READ((pipe == 0) ? FPA1 : FPB1); | 5815 | fp = I915_READ(FP1(pipe)); |
4552 | 5816 | ||
4553 | clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; | 5817 | clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; |
4554 | if (IS_PINEVIEW(dev)) { | 5818 | if (IS_PINEVIEW(dev)) { |
@@ -4559,7 +5823,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) | |||
4559 | clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; | 5823 | clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; |
4560 | } | 5824 | } |
4561 | 5825 | ||
4562 | if (IS_I9XX(dev)) { | 5826 | if (!IS_GEN2(dev)) { |
4563 | if (IS_PINEVIEW(dev)) | 5827 | if (IS_PINEVIEW(dev)) |
4564 | clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> | 5828 | clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> |
4565 | DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); | 5829 | DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); |
@@ -4630,10 +5894,10 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, | |||
4630 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 5894 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4631 | int pipe = intel_crtc->pipe; | 5895 | int pipe = intel_crtc->pipe; |
4632 | struct drm_display_mode *mode; | 5896 | struct drm_display_mode *mode; |
4633 | int htot = I915_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B); | 5897 | int htot = I915_READ(HTOTAL(pipe)); |
4634 | int hsync = I915_READ((pipe == 0) ? HSYNC_A : HSYNC_B); | 5898 | int hsync = I915_READ(HSYNC(pipe)); |
4635 | int vtot = I915_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B); | 5899 | int vtot = I915_READ(VTOTAL(pipe)); |
4636 | int vsync = I915_READ((pipe == 0) ? VSYNC_A : VSYNC_B); | 5900 | int vsync = I915_READ(VSYNC(pipe)); |
4637 | 5901 | ||
4638 | mode = kzalloc(sizeof(*mode), GFP_KERNEL); | 5902 | mode = kzalloc(sizeof(*mode), GFP_KERNEL); |
4639 | if (!mode) | 5903 | if (!mode) |
@@ -4663,10 +5927,14 @@ static void intel_gpu_idle_timer(unsigned long arg) | |||
4663 | struct drm_device *dev = (struct drm_device *)arg; | 5927 | struct drm_device *dev = (struct drm_device *)arg; |
4664 | drm_i915_private_t *dev_priv = dev->dev_private; | 5928 | drm_i915_private_t *dev_priv = dev->dev_private; |
4665 | 5929 | ||
4666 | DRM_DEBUG_DRIVER("idle timer fired, downclocking\n"); | 5930 | if (!list_empty(&dev_priv->mm.active_list)) { |
5931 | /* Still processing requests, so just re-arm the timer. */ | ||
5932 | mod_timer(&dev_priv->idle_timer, jiffies + | ||
5933 | msecs_to_jiffies(GPU_IDLE_TIMEOUT)); | ||
5934 | return; | ||
5935 | } | ||
4667 | 5936 | ||
4668 | dev_priv->busy = false; | 5937 | dev_priv->busy = false; |
4669 | |||
4670 | queue_work(dev_priv->wq, &dev_priv->idle_work); | 5938 | queue_work(dev_priv->wq, &dev_priv->idle_work); |
4671 | } | 5939 | } |
4672 | 5940 | ||
@@ -4677,22 +5945,28 @@ static void intel_crtc_idle_timer(unsigned long arg) | |||
4677 | struct intel_crtc *intel_crtc = (struct intel_crtc *)arg; | 5945 | struct intel_crtc *intel_crtc = (struct intel_crtc *)arg; |
4678 | struct drm_crtc *crtc = &intel_crtc->base; | 5946 | struct drm_crtc *crtc = &intel_crtc->base; |
4679 | drm_i915_private_t *dev_priv = crtc->dev->dev_private; | 5947 | drm_i915_private_t *dev_priv = crtc->dev->dev_private; |
5948 | struct intel_framebuffer *intel_fb; | ||
4680 | 5949 | ||
4681 | DRM_DEBUG_DRIVER("idle timer fired, downclocking\n"); | 5950 | intel_fb = to_intel_framebuffer(crtc->fb); |
5951 | if (intel_fb && intel_fb->obj->active) { | ||
5952 | /* The framebuffer is still being accessed by the GPU. */ | ||
5953 | mod_timer(&intel_crtc->idle_timer, jiffies + | ||
5954 | msecs_to_jiffies(CRTC_IDLE_TIMEOUT)); | ||
5955 | return; | ||
5956 | } | ||
4682 | 5957 | ||
4683 | intel_crtc->busy = false; | 5958 | intel_crtc->busy = false; |
4684 | |||
4685 | queue_work(dev_priv->wq, &dev_priv->idle_work); | 5959 | queue_work(dev_priv->wq, &dev_priv->idle_work); |
4686 | } | 5960 | } |
4687 | 5961 | ||
4688 | static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule) | 5962 | static void intel_increase_pllclock(struct drm_crtc *crtc) |
4689 | { | 5963 | { |
4690 | struct drm_device *dev = crtc->dev; | 5964 | struct drm_device *dev = crtc->dev; |
4691 | drm_i915_private_t *dev_priv = dev->dev_private; | 5965 | drm_i915_private_t *dev_priv = dev->dev_private; |
4692 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 5966 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4693 | int pipe = intel_crtc->pipe; | 5967 | int pipe = intel_crtc->pipe; |
4694 | int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; | 5968 | int dpll_reg = DPLL(pipe); |
4695 | int dpll = I915_READ(dpll_reg); | 5969 | int dpll; |
4696 | 5970 | ||
4697 | if (HAS_PCH_SPLIT(dev)) | 5971 | if (HAS_PCH_SPLIT(dev)) |
4698 | return; | 5972 | return; |
@@ -4700,17 +5974,18 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule) | |||
4700 | if (!dev_priv->lvds_downclock_avail) | 5974 | if (!dev_priv->lvds_downclock_avail) |
4701 | return; | 5975 | return; |
4702 | 5976 | ||
5977 | dpll = I915_READ(dpll_reg); | ||
4703 | if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) { | 5978 | if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) { |
4704 | DRM_DEBUG_DRIVER("upclocking LVDS\n"); | 5979 | DRM_DEBUG_DRIVER("upclocking LVDS\n"); |
4705 | 5980 | ||
4706 | /* Unlock panel regs */ | 5981 | /* Unlock panel regs */ |
4707 | I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | | 5982 | I915_WRITE(PP_CONTROL, |
4708 | PANEL_UNLOCK_REGS); | 5983 | I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); |
4709 | 5984 | ||
4710 | dpll &= ~DISPLAY_RATE_SELECT_FPA1; | 5985 | dpll &= ~DISPLAY_RATE_SELECT_FPA1; |
4711 | I915_WRITE(dpll_reg, dpll); | 5986 | I915_WRITE(dpll_reg, dpll); |
4712 | dpll = I915_READ(dpll_reg); | ||
4713 | intel_wait_for_vblank(dev, pipe); | 5987 | intel_wait_for_vblank(dev, pipe); |
5988 | |||
4714 | dpll = I915_READ(dpll_reg); | 5989 | dpll = I915_READ(dpll_reg); |
4715 | if (dpll & DISPLAY_RATE_SELECT_FPA1) | 5990 | if (dpll & DISPLAY_RATE_SELECT_FPA1) |
4716 | DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); | 5991 | DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); |
@@ -4720,9 +5995,8 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule) | |||
4720 | } | 5995 | } |
4721 | 5996 | ||
4722 | /* Schedule downclock */ | 5997 | /* Schedule downclock */ |
4723 | if (schedule) | 5998 | mod_timer(&intel_crtc->idle_timer, jiffies + |
4724 | mod_timer(&intel_crtc->idle_timer, jiffies + | 5999 | msecs_to_jiffies(CRTC_IDLE_TIMEOUT)); |
4725 | msecs_to_jiffies(CRTC_IDLE_TIMEOUT)); | ||
4726 | } | 6000 | } |
4727 | 6001 | ||
4728 | static void intel_decrease_pllclock(struct drm_crtc *crtc) | 6002 | static void intel_decrease_pllclock(struct drm_crtc *crtc) |
@@ -4731,7 +6005,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc) | |||
4731 | drm_i915_private_t *dev_priv = dev->dev_private; | 6005 | drm_i915_private_t *dev_priv = dev->dev_private; |
4732 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 6006 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4733 | int pipe = intel_crtc->pipe; | 6007 | int pipe = intel_crtc->pipe; |
4734 | int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; | 6008 | int dpll_reg = DPLL(pipe); |
4735 | int dpll = I915_READ(dpll_reg); | 6009 | int dpll = I915_READ(dpll_reg); |
4736 | 6010 | ||
4737 | if (HAS_PCH_SPLIT(dev)) | 6011 | if (HAS_PCH_SPLIT(dev)) |
@@ -4753,7 +6027,6 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc) | |||
4753 | 6027 | ||
4754 | dpll |= DISPLAY_RATE_SELECT_FPA1; | 6028 | dpll |= DISPLAY_RATE_SELECT_FPA1; |
4755 | I915_WRITE(dpll_reg, dpll); | 6029 | I915_WRITE(dpll_reg, dpll); |
4756 | dpll = I915_READ(dpll_reg); | ||
4757 | intel_wait_for_vblank(dev, pipe); | 6030 | intel_wait_for_vblank(dev, pipe); |
4758 | dpll = I915_READ(dpll_reg); | 6031 | dpll = I915_READ(dpll_reg); |
4759 | if (!(dpll & DISPLAY_RATE_SELECT_FPA1)) | 6032 | if (!(dpll & DISPLAY_RATE_SELECT_FPA1)) |
@@ -4779,7 +6052,6 @@ static void intel_idle_update(struct work_struct *work) | |||
4779 | struct drm_device *dev = dev_priv->dev; | 6052 | struct drm_device *dev = dev_priv->dev; |
4780 | struct drm_crtc *crtc; | 6053 | struct drm_crtc *crtc; |
4781 | struct intel_crtc *intel_crtc; | 6054 | struct intel_crtc *intel_crtc; |
4782 | int enabled = 0; | ||
4783 | 6055 | ||
4784 | if (!i915_powersave) | 6056 | if (!i915_powersave) |
4785 | return; | 6057 | return; |
@@ -4793,16 +6065,11 @@ static void intel_idle_update(struct work_struct *work) | |||
4793 | if (!crtc->fb) | 6065 | if (!crtc->fb) |
4794 | continue; | 6066 | continue; |
4795 | 6067 | ||
4796 | enabled++; | ||
4797 | intel_crtc = to_intel_crtc(crtc); | 6068 | intel_crtc = to_intel_crtc(crtc); |
4798 | if (!intel_crtc->busy) | 6069 | if (!intel_crtc->busy) |
4799 | intel_decrease_pllclock(crtc); | 6070 | intel_decrease_pllclock(crtc); |
4800 | } | 6071 | } |
4801 | 6072 | ||
4802 | if ((enabled == 1) && (IS_I945G(dev) || IS_I945GM(dev))) { | ||
4803 | DRM_DEBUG_DRIVER("enable memory self refresh on 945\n"); | ||
4804 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN); | ||
4805 | } | ||
4806 | 6073 | ||
4807 | mutex_unlock(&dev->struct_mutex); | 6074 | mutex_unlock(&dev->struct_mutex); |
4808 | } | 6075 | } |
@@ -4817,7 +6084,7 @@ static void intel_idle_update(struct work_struct *work) | |||
4817 | * buffer), we'll also mark the display as busy, so we know to increase its | 6084 | * buffer), we'll also mark the display as busy, so we know to increase its |
4818 | * clock frequency. | 6085 | * clock frequency. |
4819 | */ | 6086 | */ |
4820 | void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj) | 6087 | void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj) |
4821 | { | 6088 | { |
4822 | drm_i915_private_t *dev_priv = dev->dev_private; | 6089 | drm_i915_private_t *dev_priv = dev->dev_private; |
4823 | struct drm_crtc *crtc = NULL; | 6090 | struct drm_crtc *crtc = NULL; |
@@ -4827,17 +6094,9 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj) | |||
4827 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | 6094 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
4828 | return; | 6095 | return; |
4829 | 6096 | ||
4830 | if (!dev_priv->busy) { | 6097 | if (!dev_priv->busy) |
4831 | if (IS_I945G(dev) || IS_I945GM(dev)) { | ||
4832 | u32 fw_blc_self; | ||
4833 | |||
4834 | DRM_DEBUG_DRIVER("disable memory self refresh on 945\n"); | ||
4835 | fw_blc_self = I915_READ(FW_BLC_SELF); | ||
4836 | fw_blc_self &= ~FW_BLC_SELF_EN; | ||
4837 | I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK); | ||
4838 | } | ||
4839 | dev_priv->busy = true; | 6098 | dev_priv->busy = true; |
4840 | } else | 6099 | else |
4841 | mod_timer(&dev_priv->idle_timer, jiffies + | 6100 | mod_timer(&dev_priv->idle_timer, jiffies + |
4842 | msecs_to_jiffies(GPU_IDLE_TIMEOUT)); | 6101 | msecs_to_jiffies(GPU_IDLE_TIMEOUT)); |
4843 | 6102 | ||
@@ -4849,16 +6108,8 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj) | |||
4849 | intel_fb = to_intel_framebuffer(crtc->fb); | 6108 | intel_fb = to_intel_framebuffer(crtc->fb); |
4850 | if (intel_fb->obj == obj) { | 6109 | if (intel_fb->obj == obj) { |
4851 | if (!intel_crtc->busy) { | 6110 | if (!intel_crtc->busy) { |
4852 | if (IS_I945G(dev) || IS_I945GM(dev)) { | ||
4853 | u32 fw_blc_self; | ||
4854 | |||
4855 | DRM_DEBUG_DRIVER("disable memory self refresh on 945\n"); | ||
4856 | fw_blc_self = I915_READ(FW_BLC_SELF); | ||
4857 | fw_blc_self &= ~FW_BLC_SELF_EN; | ||
4858 | I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK); | ||
4859 | } | ||
4860 | /* Non-busy -> busy, upclock */ | 6111 | /* Non-busy -> busy, upclock */ |
4861 | intel_increase_pllclock(crtc, true); | 6112 | intel_increase_pllclock(crtc); |
4862 | intel_crtc->busy = true; | 6113 | intel_crtc->busy = true; |
4863 | } else { | 6114 | } else { |
4864 | /* Busy -> busy, put off timer */ | 6115 | /* Busy -> busy, put off timer */ |
@@ -4872,8 +6123,22 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj) | |||
4872 | static void intel_crtc_destroy(struct drm_crtc *crtc) | 6123 | static void intel_crtc_destroy(struct drm_crtc *crtc) |
4873 | { | 6124 | { |
4874 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 6125 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6126 | struct drm_device *dev = crtc->dev; | ||
6127 | struct intel_unpin_work *work; | ||
6128 | unsigned long flags; | ||
6129 | |||
6130 | spin_lock_irqsave(&dev->event_lock, flags); | ||
6131 | work = intel_crtc->unpin_work; | ||
6132 | intel_crtc->unpin_work = NULL; | ||
6133 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
6134 | |||
6135 | if (work) { | ||
6136 | cancel_work_sync(&work->work); | ||
6137 | kfree(work); | ||
6138 | } | ||
4875 | 6139 | ||
4876 | drm_crtc_cleanup(crtc); | 6140 | drm_crtc_cleanup(crtc); |
6141 | |||
4877 | kfree(intel_crtc); | 6142 | kfree(intel_crtc); |
4878 | } | 6143 | } |
4879 | 6144 | ||
@@ -4884,8 +6149,9 @@ static void intel_unpin_work_fn(struct work_struct *__work) | |||
4884 | 6149 | ||
4885 | mutex_lock(&work->dev->struct_mutex); | 6150 | mutex_lock(&work->dev->struct_mutex); |
4886 | i915_gem_object_unpin(work->old_fb_obj); | 6151 | i915_gem_object_unpin(work->old_fb_obj); |
4887 | drm_gem_object_unreference(work->pending_flip_obj); | 6152 | drm_gem_object_unreference(&work->pending_flip_obj->base); |
4888 | drm_gem_object_unreference(work->old_fb_obj); | 6153 | drm_gem_object_unreference(&work->old_fb_obj->base); |
6154 | |||
4889 | mutex_unlock(&work->dev->struct_mutex); | 6155 | mutex_unlock(&work->dev->struct_mutex); |
4890 | kfree(work); | 6156 | kfree(work); |
4891 | } | 6157 | } |
@@ -4896,15 +6162,17 @@ static void do_intel_finish_page_flip(struct drm_device *dev, | |||
4896 | drm_i915_private_t *dev_priv = dev->dev_private; | 6162 | drm_i915_private_t *dev_priv = dev->dev_private; |
4897 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 6163 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4898 | struct intel_unpin_work *work; | 6164 | struct intel_unpin_work *work; |
4899 | struct drm_i915_gem_object *obj_priv; | 6165 | struct drm_i915_gem_object *obj; |
4900 | struct drm_pending_vblank_event *e; | 6166 | struct drm_pending_vblank_event *e; |
4901 | struct timeval now; | 6167 | struct timeval tnow, tvbl; |
4902 | unsigned long flags; | 6168 | unsigned long flags; |
4903 | 6169 | ||
4904 | /* Ignore early vblank irqs */ | 6170 | /* Ignore early vblank irqs */ |
4905 | if (intel_crtc == NULL) | 6171 | if (intel_crtc == NULL) |
4906 | return; | 6172 | return; |
4907 | 6173 | ||
6174 | do_gettimeofday(&tnow); | ||
6175 | |||
4908 | spin_lock_irqsave(&dev->event_lock, flags); | 6176 | spin_lock_irqsave(&dev->event_lock, flags); |
4909 | work = intel_crtc->unpin_work; | 6177 | work = intel_crtc->unpin_work; |
4910 | if (work == NULL || !work->pending) { | 6178 | if (work == NULL || !work->pending) { |
@@ -4913,27 +6181,49 @@ static void do_intel_finish_page_flip(struct drm_device *dev, | |||
4913 | } | 6181 | } |
4914 | 6182 | ||
4915 | intel_crtc->unpin_work = NULL; | 6183 | intel_crtc->unpin_work = NULL; |
4916 | drm_vblank_put(dev, intel_crtc->pipe); | ||
4917 | 6184 | ||
4918 | if (work->event) { | 6185 | if (work->event) { |
4919 | e = work->event; | 6186 | e = work->event; |
4920 | do_gettimeofday(&now); | 6187 | e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl); |
4921 | e->event.sequence = drm_vblank_count(dev, intel_crtc->pipe); | 6188 | |
4922 | e->event.tv_sec = now.tv_sec; | 6189 | /* Called before vblank count and timestamps have |
4923 | e->event.tv_usec = now.tv_usec; | 6190 | * been updated for the vblank interval of flip |
6191 | * completion? Need to increment vblank count and | ||
6192 | * add one videorefresh duration to returned timestamp | ||
6193 | * to account for this. We assume this happened if we | ||
6194 | * get called over 0.9 frame durations after the last | ||
6195 | * timestamped vblank. | ||
6196 | * | ||
6197 | * This calculation can not be used with vrefresh rates | ||
6198 | * below 5Hz (10Hz to be on the safe side) without | ||
6199 | * promoting to 64 integers. | ||
6200 | */ | ||
6201 | if (10 * (timeval_to_ns(&tnow) - timeval_to_ns(&tvbl)) > | ||
6202 | 9 * crtc->framedur_ns) { | ||
6203 | e->event.sequence++; | ||
6204 | tvbl = ns_to_timeval(timeval_to_ns(&tvbl) + | ||
6205 | crtc->framedur_ns); | ||
6206 | } | ||
6207 | |||
6208 | e->event.tv_sec = tvbl.tv_sec; | ||
6209 | e->event.tv_usec = tvbl.tv_usec; | ||
6210 | |||
4924 | list_add_tail(&e->base.link, | 6211 | list_add_tail(&e->base.link, |
4925 | &e->base.file_priv->event_list); | 6212 | &e->base.file_priv->event_list); |
4926 | wake_up_interruptible(&e->base.file_priv->event_wait); | 6213 | wake_up_interruptible(&e->base.file_priv->event_wait); |
4927 | } | 6214 | } |
4928 | 6215 | ||
6216 | drm_vblank_put(dev, intel_crtc->pipe); | ||
6217 | |||
4929 | spin_unlock_irqrestore(&dev->event_lock, flags); | 6218 | spin_unlock_irqrestore(&dev->event_lock, flags); |
4930 | 6219 | ||
4931 | obj_priv = to_intel_bo(work->pending_flip_obj); | 6220 | obj = work->old_fb_obj; |
6221 | |||
6222 | atomic_clear_mask(1 << intel_crtc->plane, | ||
6223 | &obj->pending_flip.counter); | ||
6224 | if (atomic_read(&obj->pending_flip) == 0) | ||
6225 | wake_up(&dev_priv->pending_flip_queue); | ||
4932 | 6226 | ||
4933 | /* Initial scanout buffer will have a 0 pending flip count */ | ||
4934 | if ((atomic_read(&obj_priv->pending_flip) == 0) || | ||
4935 | atomic_dec_and_test(&obj_priv->pending_flip)) | ||
4936 | DRM_WAKEUP(&dev_priv->pending_flip_queue); | ||
4937 | schedule_work(&work->work); | 6227 | schedule_work(&work->work); |
4938 | 6228 | ||
4939 | trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj); | 6229 | trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj); |
@@ -4972,6 +6262,197 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane) | |||
4972 | spin_unlock_irqrestore(&dev->event_lock, flags); | 6262 | spin_unlock_irqrestore(&dev->event_lock, flags); |
4973 | } | 6263 | } |
4974 | 6264 | ||
6265 | static int intel_gen2_queue_flip(struct drm_device *dev, | ||
6266 | struct drm_crtc *crtc, | ||
6267 | struct drm_framebuffer *fb, | ||
6268 | struct drm_i915_gem_object *obj) | ||
6269 | { | ||
6270 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
6271 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
6272 | unsigned long offset; | ||
6273 | u32 flip_mask; | ||
6274 | int ret; | ||
6275 | |||
6276 | ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); | ||
6277 | if (ret) | ||
6278 | goto out; | ||
6279 | |||
6280 | /* Offset into the new buffer for cases of shared fbs between CRTCs */ | ||
6281 | offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8; | ||
6282 | |||
6283 | ret = BEGIN_LP_RING(6); | ||
6284 | if (ret) | ||
6285 | goto out; | ||
6286 | |||
6287 | /* Can't queue multiple flips, so wait for the previous | ||
6288 | * one to finish before executing the next. | ||
6289 | */ | ||
6290 | if (intel_crtc->plane) | ||
6291 | flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; | ||
6292 | else | ||
6293 | flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; | ||
6294 | OUT_RING(MI_WAIT_FOR_EVENT | flip_mask); | ||
6295 | OUT_RING(MI_NOOP); | ||
6296 | OUT_RING(MI_DISPLAY_FLIP | | ||
6297 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | ||
6298 | OUT_RING(fb->pitch); | ||
6299 | OUT_RING(obj->gtt_offset + offset); | ||
6300 | OUT_RING(MI_NOOP); | ||
6301 | ADVANCE_LP_RING(); | ||
6302 | out: | ||
6303 | return ret; | ||
6304 | } | ||
6305 | |||
6306 | static int intel_gen3_queue_flip(struct drm_device *dev, | ||
6307 | struct drm_crtc *crtc, | ||
6308 | struct drm_framebuffer *fb, | ||
6309 | struct drm_i915_gem_object *obj) | ||
6310 | { | ||
6311 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
6312 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
6313 | unsigned long offset; | ||
6314 | u32 flip_mask; | ||
6315 | int ret; | ||
6316 | |||
6317 | ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); | ||
6318 | if (ret) | ||
6319 | goto out; | ||
6320 | |||
6321 | /* Offset into the new buffer for cases of shared fbs between CRTCs */ | ||
6322 | offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8; | ||
6323 | |||
6324 | ret = BEGIN_LP_RING(6); | ||
6325 | if (ret) | ||
6326 | goto out; | ||
6327 | |||
6328 | if (intel_crtc->plane) | ||
6329 | flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; | ||
6330 | else | ||
6331 | flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; | ||
6332 | OUT_RING(MI_WAIT_FOR_EVENT | flip_mask); | ||
6333 | OUT_RING(MI_NOOP); | ||
6334 | OUT_RING(MI_DISPLAY_FLIP_I915 | | ||
6335 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | ||
6336 | OUT_RING(fb->pitch); | ||
6337 | OUT_RING(obj->gtt_offset + offset); | ||
6338 | OUT_RING(MI_NOOP); | ||
6339 | |||
6340 | ADVANCE_LP_RING(); | ||
6341 | out: | ||
6342 | return ret; | ||
6343 | } | ||
6344 | |||
6345 | static int intel_gen4_queue_flip(struct drm_device *dev, | ||
6346 | struct drm_crtc *crtc, | ||
6347 | struct drm_framebuffer *fb, | ||
6348 | struct drm_i915_gem_object *obj) | ||
6349 | { | ||
6350 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
6351 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
6352 | uint32_t pf, pipesrc; | ||
6353 | int ret; | ||
6354 | |||
6355 | ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); | ||
6356 | if (ret) | ||
6357 | goto out; | ||
6358 | |||
6359 | ret = BEGIN_LP_RING(4); | ||
6360 | if (ret) | ||
6361 | goto out; | ||
6362 | |||
6363 | /* i965+ uses the linear or tiled offsets from the | ||
6364 | * Display Registers (which do not change across a page-flip) | ||
6365 | * so we need only reprogram the base address. | ||
6366 | */ | ||
6367 | OUT_RING(MI_DISPLAY_FLIP | | ||
6368 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | ||
6369 | OUT_RING(fb->pitch); | ||
6370 | OUT_RING(obj->gtt_offset | obj->tiling_mode); | ||
6371 | |||
6372 | /* XXX Enabling the panel-fitter across page-flip is so far | ||
6373 | * untested on non-native modes, so ignore it for now. | ||
6374 | * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; | ||
6375 | */ | ||
6376 | pf = 0; | ||
6377 | pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; | ||
6378 | OUT_RING(pf | pipesrc); | ||
6379 | ADVANCE_LP_RING(); | ||
6380 | out: | ||
6381 | return ret; | ||
6382 | } | ||
6383 | |||
6384 | static int intel_gen6_queue_flip(struct drm_device *dev, | ||
6385 | struct drm_crtc *crtc, | ||
6386 | struct drm_framebuffer *fb, | ||
6387 | struct drm_i915_gem_object *obj) | ||
6388 | { | ||
6389 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
6390 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
6391 | uint32_t pf, pipesrc; | ||
6392 | int ret; | ||
6393 | |||
6394 | ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); | ||
6395 | if (ret) | ||
6396 | goto out; | ||
6397 | |||
6398 | ret = BEGIN_LP_RING(4); | ||
6399 | if (ret) | ||
6400 | goto out; | ||
6401 | |||
6402 | OUT_RING(MI_DISPLAY_FLIP | | ||
6403 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | ||
6404 | OUT_RING(fb->pitch | obj->tiling_mode); | ||
6405 | OUT_RING(obj->gtt_offset); | ||
6406 | |||
6407 | pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE; | ||
6408 | pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; | ||
6409 | OUT_RING(pf | pipesrc); | ||
6410 | ADVANCE_LP_RING(); | ||
6411 | out: | ||
6412 | return ret; | ||
6413 | } | ||
6414 | |||
6415 | /* | ||
6416 | * On gen7 we currently use the blit ring because (in early silicon at least) | ||
6417 | * the render ring doesn't give us interrpts for page flip completion, which | ||
6418 | * means clients will hang after the first flip is queued. Fortunately the | ||
6419 | * blit ring generates interrupts properly, so use it instead. | ||
6420 | */ | ||
6421 | static int intel_gen7_queue_flip(struct drm_device *dev, | ||
6422 | struct drm_crtc *crtc, | ||
6423 | struct drm_framebuffer *fb, | ||
6424 | struct drm_i915_gem_object *obj) | ||
6425 | { | ||
6426 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
6427 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
6428 | struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; | ||
6429 | int ret; | ||
6430 | |||
6431 | ret = intel_pin_and_fence_fb_obj(dev, obj, ring); | ||
6432 | if (ret) | ||
6433 | goto out; | ||
6434 | |||
6435 | ret = intel_ring_begin(ring, 4); | ||
6436 | if (ret) | ||
6437 | goto out; | ||
6438 | |||
6439 | intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19)); | ||
6440 | intel_ring_emit(ring, (fb->pitch | obj->tiling_mode)); | ||
6441 | intel_ring_emit(ring, (obj->gtt_offset)); | ||
6442 | intel_ring_emit(ring, (MI_NOOP)); | ||
6443 | intel_ring_advance(ring); | ||
6444 | out: | ||
6445 | return ret; | ||
6446 | } | ||
6447 | |||
6448 | static int intel_default_queue_flip(struct drm_device *dev, | ||
6449 | struct drm_crtc *crtc, | ||
6450 | struct drm_framebuffer *fb, | ||
6451 | struct drm_i915_gem_object *obj) | ||
6452 | { | ||
6453 | return -ENODEV; | ||
6454 | } | ||
6455 | |||
4975 | static int intel_crtc_page_flip(struct drm_crtc *crtc, | 6456 | static int intel_crtc_page_flip(struct drm_crtc *crtc, |
4976 | struct drm_framebuffer *fb, | 6457 | struct drm_framebuffer *fb, |
4977 | struct drm_pending_vblank_event *event) | 6458 | struct drm_pending_vblank_event *event) |
@@ -4979,13 +6460,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
4979 | struct drm_device *dev = crtc->dev; | 6460 | struct drm_device *dev = crtc->dev; |
4980 | struct drm_i915_private *dev_priv = dev->dev_private; | 6461 | struct drm_i915_private *dev_priv = dev->dev_private; |
4981 | struct intel_framebuffer *intel_fb; | 6462 | struct intel_framebuffer *intel_fb; |
4982 | struct drm_i915_gem_object *obj_priv; | 6463 | struct drm_i915_gem_object *obj; |
4983 | struct drm_gem_object *obj; | ||
4984 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 6464 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4985 | struct intel_unpin_work *work; | 6465 | struct intel_unpin_work *work; |
4986 | unsigned long flags, offset; | 6466 | unsigned long flags; |
4987 | int pipe = intel_crtc->pipe; | ||
4988 | u32 pf, pipesrc; | ||
4989 | int ret; | 6467 | int ret; |
4990 | 6468 | ||
4991 | work = kzalloc(sizeof *work, GFP_KERNEL); | 6469 | work = kzalloc(sizeof *work, GFP_KERNEL); |
@@ -5014,96 +6492,29 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
5014 | obj = intel_fb->obj; | 6492 | obj = intel_fb->obj; |
5015 | 6493 | ||
5016 | mutex_lock(&dev->struct_mutex); | 6494 | mutex_lock(&dev->struct_mutex); |
5017 | ret = intel_pin_and_fence_fb_obj(dev, obj); | ||
5018 | if (ret) | ||
5019 | goto cleanup_work; | ||
5020 | 6495 | ||
5021 | /* Reference the objects for the scheduled work. */ | 6496 | /* Reference the objects for the scheduled work. */ |
5022 | drm_gem_object_reference(work->old_fb_obj); | 6497 | drm_gem_object_reference(&work->old_fb_obj->base); |
5023 | drm_gem_object_reference(obj); | 6498 | drm_gem_object_reference(&obj->base); |
5024 | 6499 | ||
5025 | crtc->fb = fb; | 6500 | crtc->fb = fb; |
5026 | ret = i915_gem_object_flush_write_domain(obj); | ||
5027 | if (ret) | ||
5028 | goto cleanup_objs; | ||
5029 | 6501 | ||
5030 | ret = drm_vblank_get(dev, intel_crtc->pipe); | 6502 | ret = drm_vblank_get(dev, intel_crtc->pipe); |
5031 | if (ret) | 6503 | if (ret) |
5032 | goto cleanup_objs; | 6504 | goto cleanup_objs; |
5033 | 6505 | ||
5034 | obj_priv = to_intel_bo(obj); | ||
5035 | atomic_inc(&obj_priv->pending_flip); | ||
5036 | work->pending_flip_obj = obj; | 6506 | work->pending_flip_obj = obj; |
5037 | 6507 | ||
5038 | if (IS_GEN3(dev) || IS_GEN2(dev)) { | ||
5039 | u32 flip_mask; | ||
5040 | |||
5041 | if (intel_crtc->plane) | ||
5042 | flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; | ||
5043 | else | ||
5044 | flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; | ||
5045 | |||
5046 | BEGIN_LP_RING(2); | ||
5047 | OUT_RING(MI_WAIT_FOR_EVENT | flip_mask); | ||
5048 | OUT_RING(0); | ||
5049 | ADVANCE_LP_RING(); | ||
5050 | } | ||
5051 | |||
5052 | work->enable_stall_check = true; | 6508 | work->enable_stall_check = true; |
5053 | 6509 | ||
5054 | /* Offset into the new buffer for cases of shared fbs between CRTCs */ | 6510 | /* Block clients from rendering to the new back buffer until |
5055 | offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8; | 6511 | * the flip occurs and the object is no longer visible. |
5056 | 6512 | */ | |
5057 | BEGIN_LP_RING(4); | 6513 | atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); |
5058 | switch(INTEL_INFO(dev)->gen) { | ||
5059 | case 2: | ||
5060 | OUT_RING(MI_DISPLAY_FLIP | | ||
5061 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | ||
5062 | OUT_RING(fb->pitch); | ||
5063 | OUT_RING(obj_priv->gtt_offset + offset); | ||
5064 | OUT_RING(MI_NOOP); | ||
5065 | break; | ||
5066 | |||
5067 | case 3: | ||
5068 | OUT_RING(MI_DISPLAY_FLIP_I915 | | ||
5069 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | ||
5070 | OUT_RING(fb->pitch); | ||
5071 | OUT_RING(obj_priv->gtt_offset + offset); | ||
5072 | OUT_RING(MI_NOOP); | ||
5073 | break; | ||
5074 | |||
5075 | case 4: | ||
5076 | case 5: | ||
5077 | /* i965+ uses the linear or tiled offsets from the | ||
5078 | * Display Registers (which do not change across a page-flip) | ||
5079 | * so we need only reprogram the base address. | ||
5080 | */ | ||
5081 | OUT_RING(MI_DISPLAY_FLIP | | ||
5082 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | ||
5083 | OUT_RING(fb->pitch); | ||
5084 | OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode); | ||
5085 | |||
5086 | /* XXX Enabling the panel-fitter across page-flip is so far | ||
5087 | * untested on non-native modes, so ignore it for now. | ||
5088 | * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; | ||
5089 | */ | ||
5090 | pf = 0; | ||
5091 | pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff; | ||
5092 | OUT_RING(pf | pipesrc); | ||
5093 | break; | ||
5094 | 6514 | ||
5095 | case 6: | 6515 | ret = dev_priv->display.queue_flip(dev, crtc, fb, obj); |
5096 | OUT_RING(MI_DISPLAY_FLIP | | 6516 | if (ret) |
5097 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | 6517 | goto cleanup_pending; |
5098 | OUT_RING(fb->pitch | obj_priv->tiling_mode); | ||
5099 | OUT_RING(obj_priv->gtt_offset); | ||
5100 | |||
5101 | pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; | ||
5102 | pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff; | ||
5103 | OUT_RING(pf | pipesrc); | ||
5104 | break; | ||
5105 | } | ||
5106 | ADVANCE_LP_RING(); | ||
5107 | 6518 | ||
5108 | mutex_unlock(&dev->struct_mutex); | 6519 | mutex_unlock(&dev->struct_mutex); |
5109 | 6520 | ||
@@ -5111,10 +6522,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
5111 | 6522 | ||
5112 | return 0; | 6523 | return 0; |
5113 | 6524 | ||
6525 | cleanup_pending: | ||
6526 | atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); | ||
5114 | cleanup_objs: | 6527 | cleanup_objs: |
5115 | drm_gem_object_unreference(work->old_fb_obj); | 6528 | drm_gem_object_unreference(&work->old_fb_obj->base); |
5116 | drm_gem_object_unreference(obj); | 6529 | drm_gem_object_unreference(&obj->base); |
5117 | cleanup_work: | ||
5118 | mutex_unlock(&dev->struct_mutex); | 6530 | mutex_unlock(&dev->struct_mutex); |
5119 | 6531 | ||
5120 | spin_lock_irqsave(&dev->event_lock, flags); | 6532 | spin_lock_irqsave(&dev->event_lock, flags); |
@@ -5126,18 +6538,70 @@ cleanup_work: | |||
5126 | return ret; | 6538 | return ret; |
5127 | } | 6539 | } |
5128 | 6540 | ||
5129 | static const struct drm_crtc_helper_funcs intel_helper_funcs = { | 6541 | static void intel_sanitize_modesetting(struct drm_device *dev, |
6542 | int pipe, int plane) | ||
6543 | { | ||
6544 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
6545 | u32 reg, val; | ||
6546 | |||
6547 | if (HAS_PCH_SPLIT(dev)) | ||
6548 | return; | ||
6549 | |||
6550 | /* Who knows what state these registers were left in by the BIOS or | ||
6551 | * grub? | ||
6552 | * | ||
6553 | * If we leave the registers in a conflicting state (e.g. with the | ||
6554 | * display plane reading from the other pipe than the one we intend | ||
6555 | * to use) then when we attempt to teardown the active mode, we will | ||
6556 | * not disable the pipes and planes in the correct order -- leaving | ||
6557 | * a plane reading from a disabled pipe and possibly leading to | ||
6558 | * undefined behaviour. | ||
6559 | */ | ||
6560 | |||
6561 | reg = DSPCNTR(plane); | ||
6562 | val = I915_READ(reg); | ||
6563 | |||
6564 | if ((val & DISPLAY_PLANE_ENABLE) == 0) | ||
6565 | return; | ||
6566 | if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe) | ||
6567 | return; | ||
6568 | |||
6569 | /* This display plane is active and attached to the other CPU pipe. */ | ||
6570 | pipe = !pipe; | ||
6571 | |||
6572 | /* Disable the plane and wait for it to stop reading from the pipe. */ | ||
6573 | intel_disable_plane(dev_priv, plane, pipe); | ||
6574 | intel_disable_pipe(dev_priv, pipe); | ||
6575 | } | ||
6576 | |||
6577 | static void intel_crtc_reset(struct drm_crtc *crtc) | ||
6578 | { | ||
6579 | struct drm_device *dev = crtc->dev; | ||
6580 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
6581 | |||
6582 | /* Reset flags back to the 'unknown' status so that they | ||
6583 | * will be correctly set on the initial modeset. | ||
6584 | */ | ||
6585 | intel_crtc->dpms_mode = -1; | ||
6586 | |||
6587 | /* We need to fix up any BIOS configuration that conflicts with | ||
6588 | * our expectations. | ||
6589 | */ | ||
6590 | intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane); | ||
6591 | } | ||
6592 | |||
6593 | static struct drm_crtc_helper_funcs intel_helper_funcs = { | ||
5130 | .dpms = intel_crtc_dpms, | 6594 | .dpms = intel_crtc_dpms, |
5131 | .mode_fixup = intel_crtc_mode_fixup, | 6595 | .mode_fixup = intel_crtc_mode_fixup, |
5132 | .mode_set = intel_crtc_mode_set, | 6596 | .mode_set = intel_crtc_mode_set, |
5133 | .mode_set_base = intel_pipe_set_base, | 6597 | .mode_set_base = intel_pipe_set_base, |
5134 | .mode_set_base_atomic = intel_pipe_set_base_atomic, | 6598 | .mode_set_base_atomic = intel_pipe_set_base_atomic, |
5135 | .prepare = intel_crtc_prepare, | ||
5136 | .commit = intel_crtc_commit, | ||
5137 | .load_lut = intel_crtc_load_lut, | 6599 | .load_lut = intel_crtc_load_lut, |
6600 | .disable = intel_crtc_disable, | ||
5138 | }; | 6601 | }; |
5139 | 6602 | ||
5140 | static const struct drm_crtc_funcs intel_crtc_funcs = { | 6603 | static const struct drm_crtc_funcs intel_crtc_funcs = { |
6604 | .reset = intel_crtc_reset, | ||
5141 | .cursor_set = intel_crtc_cursor_set, | 6605 | .cursor_set = intel_crtc_cursor_set, |
5142 | .cursor_move = intel_crtc_cursor_move, | 6606 | .cursor_move = intel_crtc_cursor_move, |
5143 | .gamma_set = intel_crtc_gamma_set, | 6607 | .gamma_set = intel_crtc_gamma_set, |
@@ -5146,7 +6610,6 @@ static const struct drm_crtc_funcs intel_crtc_funcs = { | |||
5146 | .page_flip = intel_crtc_page_flip, | 6610 | .page_flip = intel_crtc_page_flip, |
5147 | }; | 6611 | }; |
5148 | 6612 | ||
5149 | |||
5150 | static void intel_crtc_init(struct drm_device *dev, int pipe) | 6613 | static void intel_crtc_init(struct drm_device *dev, int pipe) |
5151 | { | 6614 | { |
5152 | drm_i915_private_t *dev_priv = dev->dev_private; | 6615 | drm_i915_private_t *dev_priv = dev->dev_private; |
@@ -5160,8 +6623,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) | |||
5160 | drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs); | 6623 | drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs); |
5161 | 6624 | ||
5162 | drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); | 6625 | drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); |
5163 | intel_crtc->pipe = pipe; | ||
5164 | intel_crtc->plane = pipe; | ||
5165 | for (i = 0; i < 256; i++) { | 6626 | for (i = 0; i < 256; i++) { |
5166 | intel_crtc->lut_r[i] = i; | 6627 | intel_crtc->lut_r[i] = i; |
5167 | intel_crtc->lut_g[i] = i; | 6628 | intel_crtc->lut_g[i] = i; |
@@ -5171,9 +6632,9 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) | |||
5171 | /* Swap pipes & planes for FBC on pre-965 */ | 6632 | /* Swap pipes & planes for FBC on pre-965 */ |
5172 | intel_crtc->pipe = pipe; | 6633 | intel_crtc->pipe = pipe; |
5173 | intel_crtc->plane = pipe; | 6634 | intel_crtc->plane = pipe; |
5174 | if (IS_MOBILE(dev) && (IS_I9XX(dev) && !IS_I965G(dev))) { | 6635 | if (IS_MOBILE(dev) && IS_GEN3(dev)) { |
5175 | DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); | 6636 | DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); |
5176 | intel_crtc->plane = ((pipe == 0) ? 1 : 0); | 6637 | intel_crtc->plane = !pipe; |
5177 | } | 6638 | } |
5178 | 6639 | ||
5179 | BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || | 6640 | BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || |
@@ -5181,8 +6642,17 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) | |||
5181 | dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; | 6642 | dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; |
5182 | dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; | 6643 | dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; |
5183 | 6644 | ||
5184 | intel_crtc->cursor_addr = 0; | 6645 | intel_crtc_reset(&intel_crtc->base); |
5185 | intel_crtc->dpms_mode = -1; | 6646 | intel_crtc->active = true; /* force the pipe off on setup_init_config */ |
6647 | |||
6648 | if (HAS_PCH_SPLIT(dev)) { | ||
6649 | intel_helper_funcs.prepare = ironlake_crtc_prepare; | ||
6650 | intel_helper_funcs.commit = ironlake_crtc_commit; | ||
6651 | } else { | ||
6652 | intel_helper_funcs.prepare = i9xx_crtc_prepare; | ||
6653 | intel_helper_funcs.commit = i9xx_crtc_commit; | ||
6654 | } | ||
6655 | |||
5186 | drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); | 6656 | drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); |
5187 | 6657 | ||
5188 | intel_crtc->busy = false; | 6658 | intel_crtc->busy = false; |
@@ -5192,7 +6662,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) | |||
5192 | } | 6662 | } |
5193 | 6663 | ||
5194 | int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, | 6664 | int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, |
5195 | struct drm_file *file_priv) | 6665 | struct drm_file *file) |
5196 | { | 6666 | { |
5197 | drm_i915_private_t *dev_priv = dev->dev_private; | 6667 | drm_i915_private_t *dev_priv = dev->dev_private; |
5198 | struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; | 6668 | struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; |
@@ -5218,47 +6688,56 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, | |||
5218 | return 0; | 6688 | return 0; |
5219 | } | 6689 | } |
5220 | 6690 | ||
5221 | struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe) | ||
5222 | { | ||
5223 | struct drm_crtc *crtc = NULL; | ||
5224 | |||
5225 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
5226 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
5227 | if (intel_crtc->pipe == pipe) | ||
5228 | break; | ||
5229 | } | ||
5230 | return crtc; | ||
5231 | } | ||
5232 | |||
5233 | static int intel_encoder_clones(struct drm_device *dev, int type_mask) | 6691 | static int intel_encoder_clones(struct drm_device *dev, int type_mask) |
5234 | { | 6692 | { |
6693 | struct intel_encoder *encoder; | ||
5235 | int index_mask = 0; | 6694 | int index_mask = 0; |
5236 | struct drm_encoder *encoder; | ||
5237 | int entry = 0; | 6695 | int entry = 0; |
5238 | 6696 | ||
5239 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | 6697 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { |
5240 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 6698 | if (type_mask & encoder->clone_mask) |
5241 | if (type_mask & intel_encoder->clone_mask) | ||
5242 | index_mask |= (1 << entry); | 6699 | index_mask |= (1 << entry); |
5243 | entry++; | 6700 | entry++; |
5244 | } | 6701 | } |
6702 | |||
5245 | return index_mask; | 6703 | return index_mask; |
5246 | } | 6704 | } |
5247 | 6705 | ||
6706 | static bool has_edp_a(struct drm_device *dev) | ||
6707 | { | ||
6708 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
6709 | |||
6710 | if (!IS_MOBILE(dev)) | ||
6711 | return false; | ||
6712 | |||
6713 | if ((I915_READ(DP_A) & DP_DETECTED) == 0) | ||
6714 | return false; | ||
6715 | |||
6716 | if (IS_GEN5(dev) && | ||
6717 | (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE)) | ||
6718 | return false; | ||
6719 | |||
6720 | return true; | ||
6721 | } | ||
5248 | 6722 | ||
5249 | static void intel_setup_outputs(struct drm_device *dev) | 6723 | static void intel_setup_outputs(struct drm_device *dev) |
5250 | { | 6724 | { |
5251 | struct drm_i915_private *dev_priv = dev->dev_private; | 6725 | struct drm_i915_private *dev_priv = dev->dev_private; |
5252 | struct drm_encoder *encoder; | 6726 | struct intel_encoder *encoder; |
5253 | bool dpd_is_edp = false; | 6727 | bool dpd_is_edp = false; |
6728 | bool has_lvds = false; | ||
5254 | 6729 | ||
5255 | if (IS_MOBILE(dev) && !IS_I830(dev)) | 6730 | if (IS_MOBILE(dev) && !IS_I830(dev)) |
5256 | intel_lvds_init(dev); | 6731 | has_lvds = intel_lvds_init(dev); |
6732 | if (!has_lvds && !HAS_PCH_SPLIT(dev)) { | ||
6733 | /* disable the panel fitter on everything but LVDS */ | ||
6734 | I915_WRITE(PFIT_CONTROL, 0); | ||
6735 | } | ||
5257 | 6736 | ||
5258 | if (HAS_PCH_SPLIT(dev)) { | 6737 | if (HAS_PCH_SPLIT(dev)) { |
5259 | dpd_is_edp = intel_dpd_is_edp(dev); | 6738 | dpd_is_edp = intel_dpd_is_edp(dev); |
5260 | 6739 | ||
5261 | if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED)) | 6740 | if (has_edp_a(dev)) |
5262 | intel_dp_init(dev, DP_A); | 6741 | intel_dp_init(dev, DP_A); |
5263 | 6742 | ||
5264 | if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) | 6743 | if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) |
@@ -5338,13 +6817,16 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
5338 | if (SUPPORTS_TV(dev)) | 6817 | if (SUPPORTS_TV(dev)) |
5339 | intel_tv_init(dev); | 6818 | intel_tv_init(dev); |
5340 | 6819 | ||
5341 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | 6820 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { |
5342 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 6821 | encoder->base.possible_crtcs = encoder->crtc_mask; |
5343 | 6822 | encoder->base.possible_clones = | |
5344 | encoder->possible_crtcs = intel_encoder->crtc_mask; | 6823 | intel_encoder_clones(dev, encoder->clone_mask); |
5345 | encoder->possible_clones = intel_encoder_clones(dev, | ||
5346 | intel_encoder->clone_mask); | ||
5347 | } | 6824 | } |
6825 | |||
6826 | intel_panel_setup_backlight(dev); | ||
6827 | |||
6828 | /* disable all the possible outputs/crtcs before entering KMS mode */ | ||
6829 | drm_helper_disable_unused_functions(dev); | ||
5348 | } | 6830 | } |
5349 | 6831 | ||
5350 | static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) | 6832 | static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) |
@@ -5352,19 +6834,19 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) | |||
5352 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | 6834 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); |
5353 | 6835 | ||
5354 | drm_framebuffer_cleanup(fb); | 6836 | drm_framebuffer_cleanup(fb); |
5355 | drm_gem_object_unreference_unlocked(intel_fb->obj); | 6837 | drm_gem_object_unreference_unlocked(&intel_fb->obj->base); |
5356 | 6838 | ||
5357 | kfree(intel_fb); | 6839 | kfree(intel_fb); |
5358 | } | 6840 | } |
5359 | 6841 | ||
5360 | static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, | 6842 | static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, |
5361 | struct drm_file *file_priv, | 6843 | struct drm_file *file, |
5362 | unsigned int *handle) | 6844 | unsigned int *handle) |
5363 | { | 6845 | { |
5364 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | 6846 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); |
5365 | struct drm_gem_object *object = intel_fb->obj; | 6847 | struct drm_i915_gem_object *obj = intel_fb->obj; |
5366 | 6848 | ||
5367 | return drm_gem_handle_create(file_priv, object, handle); | 6849 | return drm_gem_handle_create(file, &obj->base, handle); |
5368 | } | 6850 | } |
5369 | 6851 | ||
5370 | static const struct drm_framebuffer_funcs intel_fb_funcs = { | 6852 | static const struct drm_framebuffer_funcs intel_fb_funcs = { |
@@ -5375,10 +6857,26 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = { | |||
5375 | int intel_framebuffer_init(struct drm_device *dev, | 6857 | int intel_framebuffer_init(struct drm_device *dev, |
5376 | struct intel_framebuffer *intel_fb, | 6858 | struct intel_framebuffer *intel_fb, |
5377 | struct drm_mode_fb_cmd *mode_cmd, | 6859 | struct drm_mode_fb_cmd *mode_cmd, |
5378 | struct drm_gem_object *obj) | 6860 | struct drm_i915_gem_object *obj) |
5379 | { | 6861 | { |
5380 | int ret; | 6862 | int ret; |
5381 | 6863 | ||
6864 | if (obj->tiling_mode == I915_TILING_Y) | ||
6865 | return -EINVAL; | ||
6866 | |||
6867 | if (mode_cmd->pitch & 63) | ||
6868 | return -EINVAL; | ||
6869 | |||
6870 | switch (mode_cmd->bpp) { | ||
6871 | case 8: | ||
6872 | case 16: | ||
6873 | case 24: | ||
6874 | case 32: | ||
6875 | break; | ||
6876 | default: | ||
6877 | return -EINVAL; | ||
6878 | } | ||
6879 | |||
5382 | ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); | 6880 | ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); |
5383 | if (ret) { | 6881 | if (ret) { |
5384 | DRM_ERROR("framebuffer init failed %d\n", ret); | 6882 | DRM_ERROR("framebuffer init failed %d\n", ret); |
@@ -5395,27 +6893,13 @@ intel_user_framebuffer_create(struct drm_device *dev, | |||
5395 | struct drm_file *filp, | 6893 | struct drm_file *filp, |
5396 | struct drm_mode_fb_cmd *mode_cmd) | 6894 | struct drm_mode_fb_cmd *mode_cmd) |
5397 | { | 6895 | { |
5398 | struct drm_gem_object *obj; | 6896 | struct drm_i915_gem_object *obj; |
5399 | struct intel_framebuffer *intel_fb; | ||
5400 | int ret; | ||
5401 | 6897 | ||
5402 | obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle); | 6898 | obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle)); |
5403 | if (!obj) | 6899 | if (&obj->base == NULL) |
5404 | return ERR_PTR(-ENOENT); | 6900 | return ERR_PTR(-ENOENT); |
5405 | 6901 | ||
5406 | intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); | 6902 | return intel_framebuffer_create(dev, mode_cmd, obj); |
5407 | if (!intel_fb) | ||
5408 | return ERR_PTR(-ENOMEM); | ||
5409 | |||
5410 | ret = intel_framebuffer_init(dev, intel_fb, | ||
5411 | mode_cmd, obj); | ||
5412 | if (ret) { | ||
5413 | drm_gem_object_unreference_unlocked(obj); | ||
5414 | kfree(intel_fb); | ||
5415 | return ERR_PTR(ret); | ||
5416 | } | ||
5417 | |||
5418 | return &intel_fb->base; | ||
5419 | } | 6903 | } |
5420 | 6904 | ||
5421 | static const struct drm_mode_config_funcs intel_mode_funcs = { | 6905 | static const struct drm_mode_config_funcs intel_mode_funcs = { |
@@ -5423,20 +6907,21 @@ static const struct drm_mode_config_funcs intel_mode_funcs = { | |||
5423 | .output_poll_changed = intel_fb_output_poll_changed, | 6907 | .output_poll_changed = intel_fb_output_poll_changed, |
5424 | }; | 6908 | }; |
5425 | 6909 | ||
5426 | static struct drm_gem_object * | 6910 | static struct drm_i915_gem_object * |
5427 | intel_alloc_context_page(struct drm_device *dev) | 6911 | intel_alloc_context_page(struct drm_device *dev) |
5428 | { | 6912 | { |
5429 | struct drm_gem_object *ctx; | 6913 | struct drm_i915_gem_object *ctx; |
5430 | int ret; | 6914 | int ret; |
5431 | 6915 | ||
6916 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | ||
6917 | |||
5432 | ctx = i915_gem_alloc_object(dev, 4096); | 6918 | ctx = i915_gem_alloc_object(dev, 4096); |
5433 | if (!ctx) { | 6919 | if (!ctx) { |
5434 | DRM_DEBUG("failed to alloc power context, RC6 disabled\n"); | 6920 | DRM_DEBUG("failed to alloc power context, RC6 disabled\n"); |
5435 | return NULL; | 6921 | return NULL; |
5436 | } | 6922 | } |
5437 | 6923 | ||
5438 | mutex_lock(&dev->struct_mutex); | 6924 | ret = i915_gem_object_pin(ctx, 4096, true); |
5439 | ret = i915_gem_object_pin(ctx, 4096); | ||
5440 | if (ret) { | 6925 | if (ret) { |
5441 | DRM_ERROR("failed to pin power context: %d\n", ret); | 6926 | DRM_ERROR("failed to pin power context: %d\n", ret); |
5442 | goto err_unref; | 6927 | goto err_unref; |
@@ -5447,14 +6932,13 @@ intel_alloc_context_page(struct drm_device *dev) | |||
5447 | DRM_ERROR("failed to set-domain on power context: %d\n", ret); | 6932 | DRM_ERROR("failed to set-domain on power context: %d\n", ret); |
5448 | goto err_unpin; | 6933 | goto err_unpin; |
5449 | } | 6934 | } |
5450 | mutex_unlock(&dev->struct_mutex); | ||
5451 | 6935 | ||
5452 | return ctx; | 6936 | return ctx; |
5453 | 6937 | ||
5454 | err_unpin: | 6938 | err_unpin: |
5455 | i915_gem_object_unpin(ctx); | 6939 | i915_gem_object_unpin(ctx); |
5456 | err_unref: | 6940 | err_unref: |
5457 | drm_gem_object_unreference(ctx); | 6941 | drm_gem_object_unreference(&ctx->base); |
5458 | mutex_unlock(&dev->struct_mutex); | 6942 | mutex_unlock(&dev->struct_mutex); |
5459 | return NULL; | 6943 | return NULL; |
5460 | } | 6944 | } |
@@ -5487,6 +6971,10 @@ void ironlake_enable_drps(struct drm_device *dev) | |||
5487 | u32 rgvmodectl = I915_READ(MEMMODECTL); | 6971 | u32 rgvmodectl = I915_READ(MEMMODECTL); |
5488 | u8 fmax, fmin, fstart, vstart; | 6972 | u8 fmax, fmin, fstart, vstart; |
5489 | 6973 | ||
6974 | /* Enable temp reporting */ | ||
6975 | I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN); | ||
6976 | I915_WRITE16(TSC1, I915_READ(TSC1) | TSE); | ||
6977 | |||
5490 | /* 100ms RC evaluation intervals */ | 6978 | /* 100ms RC evaluation intervals */ |
5491 | I915_WRITE(RCUPEI, 100000); | 6979 | I915_WRITE(RCUPEI, 100000); |
5492 | I915_WRITE(RCDNEI, 100000); | 6980 | I915_WRITE(RCDNEI, 100000); |
@@ -5502,20 +6990,19 @@ void ironlake_enable_drps(struct drm_device *dev) | |||
5502 | fmin = (rgvmodectl & MEMMODE_FMIN_MASK); | 6990 | fmin = (rgvmodectl & MEMMODE_FMIN_MASK); |
5503 | fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> | 6991 | fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> |
5504 | MEMMODE_FSTART_SHIFT; | 6992 | MEMMODE_FSTART_SHIFT; |
5505 | fstart = fmax; | ||
5506 | 6993 | ||
5507 | vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> | 6994 | vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> |
5508 | PXVFREQ_PX_SHIFT; | 6995 | PXVFREQ_PX_SHIFT; |
5509 | 6996 | ||
5510 | dev_priv->fmax = fstart; /* IPS callback will increase this */ | 6997 | dev_priv->fmax = fmax; /* IPS callback will increase this */ |
5511 | dev_priv->fstart = fstart; | 6998 | dev_priv->fstart = fstart; |
5512 | 6999 | ||
5513 | dev_priv->max_delay = fmax; | 7000 | dev_priv->max_delay = fstart; |
5514 | dev_priv->min_delay = fmin; | 7001 | dev_priv->min_delay = fmin; |
5515 | dev_priv->cur_delay = fstart; | 7002 | dev_priv->cur_delay = fstart; |
5516 | 7003 | ||
5517 | DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", fmax, fmin, | 7004 | DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", |
5518 | fstart); | 7005 | fmax, fmin, fstart); |
5519 | 7006 | ||
5520 | I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); | 7007 | I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); |
5521 | 7008 | ||
@@ -5529,7 +7016,7 @@ void ironlake_enable_drps(struct drm_device *dev) | |||
5529 | rgvmodectl |= MEMMODE_SWMODE_EN; | 7016 | rgvmodectl |= MEMMODE_SWMODE_EN; |
5530 | I915_WRITE(MEMMODECTL, rgvmodectl); | 7017 | I915_WRITE(MEMMODECTL, rgvmodectl); |
5531 | 7018 | ||
5532 | if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 1, 0)) | 7019 | if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10)) |
5533 | DRM_ERROR("stuck trying to change perf mode\n"); | 7020 | DRM_ERROR("stuck trying to change perf mode\n"); |
5534 | msleep(1); | 7021 | msleep(1); |
5535 | 7022 | ||
@@ -5563,6 +7050,30 @@ void ironlake_disable_drps(struct drm_device *dev) | |||
5563 | 7050 | ||
5564 | } | 7051 | } |
5565 | 7052 | ||
7053 | void gen6_set_rps(struct drm_device *dev, u8 val) | ||
7054 | { | ||
7055 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
7056 | u32 swreq; | ||
7057 | |||
7058 | swreq = (val & 0x3ff) << 25; | ||
7059 | I915_WRITE(GEN6_RPNSWREQ, swreq); | ||
7060 | } | ||
7061 | |||
7062 | void gen6_disable_rps(struct drm_device *dev) | ||
7063 | { | ||
7064 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
7065 | |||
7066 | I915_WRITE(GEN6_RPNSWREQ, 1 << 31); | ||
7067 | I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); | ||
7068 | I915_WRITE(GEN6_PMIER, 0); | ||
7069 | |||
7070 | spin_lock_irq(&dev_priv->rps_lock); | ||
7071 | dev_priv->pm_iir = 0; | ||
7072 | spin_unlock_irq(&dev_priv->rps_lock); | ||
7073 | |||
7074 | I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); | ||
7075 | } | ||
7076 | |||
5566 | static unsigned long intel_pxfreq(u32 vidfreq) | 7077 | static unsigned long intel_pxfreq(u32 vidfreq) |
5567 | { | 7078 | { |
5568 | unsigned long freq; | 7079 | unsigned long freq; |
@@ -5649,158 +7160,475 @@ void intel_init_emon(struct drm_device *dev) | |||
5649 | dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); | 7160 | dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); |
5650 | } | 7161 | } |
5651 | 7162 | ||
5652 | void intel_init_clock_gating(struct drm_device *dev) | 7163 | void gen6_enable_rps(struct drm_i915_private *dev_priv) |
7164 | { | ||
7165 | u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); | ||
7166 | u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); | ||
7167 | u32 pcu_mbox, rc6_mask = 0; | ||
7168 | int cur_freq, min_freq, max_freq; | ||
7169 | int i; | ||
7170 | |||
7171 | /* Here begins a magic sequence of register writes to enable | ||
7172 | * auto-downclocking. | ||
7173 | * | ||
7174 | * Perhaps there might be some value in exposing these to | ||
7175 | * userspace... | ||
7176 | */ | ||
7177 | I915_WRITE(GEN6_RC_STATE, 0); | ||
7178 | mutex_lock(&dev_priv->dev->struct_mutex); | ||
7179 | gen6_gt_force_wake_get(dev_priv); | ||
7180 | |||
7181 | /* disable the counters and set deterministic thresholds */ | ||
7182 | I915_WRITE(GEN6_RC_CONTROL, 0); | ||
7183 | |||
7184 | I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16); | ||
7185 | I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30); | ||
7186 | I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30); | ||
7187 | I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); | ||
7188 | I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); | ||
7189 | |||
7190 | for (i = 0; i < I915_NUM_RINGS; i++) | ||
7191 | I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10); | ||
7192 | |||
7193 | I915_WRITE(GEN6_RC_SLEEP, 0); | ||
7194 | I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); | ||
7195 | I915_WRITE(GEN6_RC6_THRESHOLD, 50000); | ||
7196 | I915_WRITE(GEN6_RC6p_THRESHOLD, 100000); | ||
7197 | I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ | ||
7198 | |||
7199 | if (i915_enable_rc6) | ||
7200 | rc6_mask = GEN6_RC_CTL_RC6p_ENABLE | | ||
7201 | GEN6_RC_CTL_RC6_ENABLE; | ||
7202 | |||
7203 | I915_WRITE(GEN6_RC_CONTROL, | ||
7204 | rc6_mask | | ||
7205 | GEN6_RC_CTL_EI_MODE(1) | | ||
7206 | GEN6_RC_CTL_HW_ENABLE); | ||
7207 | |||
7208 | I915_WRITE(GEN6_RPNSWREQ, | ||
7209 | GEN6_FREQUENCY(10) | | ||
7210 | GEN6_OFFSET(0) | | ||
7211 | GEN6_AGGRESSIVE_TURBO); | ||
7212 | I915_WRITE(GEN6_RC_VIDEO_FREQ, | ||
7213 | GEN6_FREQUENCY(12)); | ||
7214 | |||
7215 | I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); | ||
7216 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, | ||
7217 | 18 << 24 | | ||
7218 | 6 << 16); | ||
7219 | I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000); | ||
7220 | I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000); | ||
7221 | I915_WRITE(GEN6_RP_UP_EI, 100000); | ||
7222 | I915_WRITE(GEN6_RP_DOWN_EI, 5000000); | ||
7223 | I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); | ||
7224 | I915_WRITE(GEN6_RP_CONTROL, | ||
7225 | GEN6_RP_MEDIA_TURBO | | ||
7226 | GEN6_RP_USE_NORMAL_FREQ | | ||
7227 | GEN6_RP_MEDIA_IS_GFX | | ||
7228 | GEN6_RP_ENABLE | | ||
7229 | GEN6_RP_UP_BUSY_AVG | | ||
7230 | GEN6_RP_DOWN_IDLE_CONT); | ||
7231 | |||
7232 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, | ||
7233 | 500)) | ||
7234 | DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); | ||
7235 | |||
7236 | I915_WRITE(GEN6_PCODE_DATA, 0); | ||
7237 | I915_WRITE(GEN6_PCODE_MAILBOX, | ||
7238 | GEN6_PCODE_READY | | ||
7239 | GEN6_PCODE_WRITE_MIN_FREQ_TABLE); | ||
7240 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, | ||
7241 | 500)) | ||
7242 | DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); | ||
7243 | |||
7244 | min_freq = (rp_state_cap & 0xff0000) >> 16; | ||
7245 | max_freq = rp_state_cap & 0xff; | ||
7246 | cur_freq = (gt_perf_status & 0xff00) >> 8; | ||
7247 | |||
7248 | /* Check for overclock support */ | ||
7249 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, | ||
7250 | 500)) | ||
7251 | DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); | ||
7252 | I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS); | ||
7253 | pcu_mbox = I915_READ(GEN6_PCODE_DATA); | ||
7254 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, | ||
7255 | 500)) | ||
7256 | DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); | ||
7257 | if (pcu_mbox & (1<<31)) { /* OC supported */ | ||
7258 | max_freq = pcu_mbox & 0xff; | ||
7259 | DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50); | ||
7260 | } | ||
7261 | |||
7262 | /* In units of 100MHz */ | ||
7263 | dev_priv->max_delay = max_freq; | ||
7264 | dev_priv->min_delay = min_freq; | ||
7265 | dev_priv->cur_delay = cur_freq; | ||
7266 | |||
7267 | /* requires MSI enabled */ | ||
7268 | I915_WRITE(GEN6_PMIER, | ||
7269 | GEN6_PM_MBOX_EVENT | | ||
7270 | GEN6_PM_THERMAL_EVENT | | ||
7271 | GEN6_PM_RP_DOWN_TIMEOUT | | ||
7272 | GEN6_PM_RP_UP_THRESHOLD | | ||
7273 | GEN6_PM_RP_DOWN_THRESHOLD | | ||
7274 | GEN6_PM_RP_UP_EI_EXPIRED | | ||
7275 | GEN6_PM_RP_DOWN_EI_EXPIRED); | ||
7276 | spin_lock_irq(&dev_priv->rps_lock); | ||
7277 | WARN_ON(dev_priv->pm_iir != 0); | ||
7278 | I915_WRITE(GEN6_PMIMR, 0); | ||
7279 | spin_unlock_irq(&dev_priv->rps_lock); | ||
7280 | /* enable all PM interrupts */ | ||
7281 | I915_WRITE(GEN6_PMINTRMSK, 0); | ||
7282 | |||
7283 | gen6_gt_force_wake_put(dev_priv); | ||
7284 | mutex_unlock(&dev_priv->dev->struct_mutex); | ||
7285 | } | ||
7286 | |||
7287 | static void ironlake_init_clock_gating(struct drm_device *dev) | ||
5653 | { | 7288 | { |
5654 | struct drm_i915_private *dev_priv = dev->dev_private; | 7289 | struct drm_i915_private *dev_priv = dev->dev_private; |
7290 | uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; | ||
7291 | |||
7292 | /* Required for FBC */ | ||
7293 | dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE | | ||
7294 | DPFCRUNIT_CLOCK_GATE_DISABLE | | ||
7295 | DPFDUNIT_CLOCK_GATE_DISABLE; | ||
7296 | /* Required for CxSR */ | ||
7297 | dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE; | ||
7298 | |||
7299 | I915_WRITE(PCH_3DCGDIS0, | ||
7300 | MARIUNIT_CLOCK_GATE_DISABLE | | ||
7301 | SVSMUNIT_CLOCK_GATE_DISABLE); | ||
7302 | I915_WRITE(PCH_3DCGDIS1, | ||
7303 | VFMUNIT_CLOCK_GATE_DISABLE); | ||
7304 | |||
7305 | I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); | ||
5655 | 7306 | ||
5656 | /* | 7307 | /* |
5657 | * Disable clock gating reported to work incorrectly according to the | 7308 | * According to the spec the following bits should be set in |
5658 | * specs, but enable as much else as we can. | 7309 | * order to enable memory self-refresh |
7310 | * The bit 22/21 of 0x42004 | ||
7311 | * The bit 5 of 0x42020 | ||
7312 | * The bit 15 of 0x45000 | ||
5659 | */ | 7313 | */ |
5660 | if (HAS_PCH_SPLIT(dev)) { | 7314 | I915_WRITE(ILK_DISPLAY_CHICKEN2, |
5661 | uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; | 7315 | (I915_READ(ILK_DISPLAY_CHICKEN2) | |
7316 | ILK_DPARB_GATE | ILK_VSDPFD_FULL)); | ||
7317 | I915_WRITE(ILK_DSPCLK_GATE, | ||
7318 | (I915_READ(ILK_DSPCLK_GATE) | | ||
7319 | ILK_DPARB_CLK_GATE)); | ||
7320 | I915_WRITE(DISP_ARB_CTL, | ||
7321 | (I915_READ(DISP_ARB_CTL) | | ||
7322 | DISP_FBC_WM_DIS)); | ||
7323 | I915_WRITE(WM3_LP_ILK, 0); | ||
7324 | I915_WRITE(WM2_LP_ILK, 0); | ||
7325 | I915_WRITE(WM1_LP_ILK, 0); | ||
5662 | 7326 | ||
5663 | if (IS_IRONLAKE(dev)) { | 7327 | /* |
5664 | /* Required for FBC */ | 7328 | * Based on the document from hardware guys the following bits |
5665 | dspclk_gate |= DPFDUNIT_CLOCK_GATE_DISABLE; | 7329 | * should be set unconditionally in order to enable FBC. |
5666 | /* Required for CxSR */ | 7330 | * The bit 22 of 0x42000 |
5667 | dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE; | 7331 | * The bit 22 of 0x42004 |
7332 | * The bit 7,8,9 of 0x42020. | ||
7333 | */ | ||
7334 | if (IS_IRONLAKE_M(dev)) { | ||
7335 | I915_WRITE(ILK_DISPLAY_CHICKEN1, | ||
7336 | I915_READ(ILK_DISPLAY_CHICKEN1) | | ||
7337 | ILK_FBCQ_DIS); | ||
7338 | I915_WRITE(ILK_DISPLAY_CHICKEN2, | ||
7339 | I915_READ(ILK_DISPLAY_CHICKEN2) | | ||
7340 | ILK_DPARB_GATE); | ||
7341 | I915_WRITE(ILK_DSPCLK_GATE, | ||
7342 | I915_READ(ILK_DSPCLK_GATE) | | ||
7343 | ILK_DPFC_DIS1 | | ||
7344 | ILK_DPFC_DIS2 | | ||
7345 | ILK_CLK_FBC); | ||
7346 | } | ||
7347 | |||
7348 | I915_WRITE(ILK_DISPLAY_CHICKEN2, | ||
7349 | I915_READ(ILK_DISPLAY_CHICKEN2) | | ||
7350 | ILK_ELPIN_409_SELECT); | ||
7351 | I915_WRITE(_3D_CHICKEN2, | ||
7352 | _3D_CHICKEN2_WM_READ_PIPELINED << 16 | | ||
7353 | _3D_CHICKEN2_WM_READ_PIPELINED); | ||
7354 | } | ||
5668 | 7355 | ||
5669 | I915_WRITE(PCH_3DCGDIS0, | 7356 | static void gen6_init_clock_gating(struct drm_device *dev) |
5670 | MARIUNIT_CLOCK_GATE_DISABLE | | 7357 | { |
5671 | SVSMUNIT_CLOCK_GATE_DISABLE); | 7358 | struct drm_i915_private *dev_priv = dev->dev_private; |
5672 | } | 7359 | int pipe; |
7360 | uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; | ||
5673 | 7361 | ||
5674 | I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); | 7362 | I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); |
5675 | 7363 | ||
5676 | /* | 7364 | I915_WRITE(ILK_DISPLAY_CHICKEN2, |
5677 | * According to the spec the following bits should be set in | 7365 | I915_READ(ILK_DISPLAY_CHICKEN2) | |
5678 | * order to enable memory self-refresh | 7366 | ILK_ELPIN_409_SELECT); |
5679 | * The bit 22/21 of 0x42004 | 7367 | |
5680 | * The bit 5 of 0x42020 | 7368 | I915_WRITE(WM3_LP_ILK, 0); |
5681 | * The bit 15 of 0x45000 | 7369 | I915_WRITE(WM2_LP_ILK, 0); |
5682 | */ | 7370 | I915_WRITE(WM1_LP_ILK, 0); |
5683 | if (IS_IRONLAKE(dev)) { | 7371 | |
5684 | I915_WRITE(ILK_DISPLAY_CHICKEN2, | 7372 | /* |
5685 | (I915_READ(ILK_DISPLAY_CHICKEN2) | | 7373 | * According to the spec the following bits should be |
5686 | ILK_DPARB_GATE | ILK_VSDPFD_FULL)); | 7374 | * set in order to enable memory self-refresh and fbc: |
5687 | I915_WRITE(ILK_DSPCLK_GATE, | 7375 | * The bit21 and bit22 of 0x42000 |
5688 | (I915_READ(ILK_DSPCLK_GATE) | | 7376 | * The bit21 and bit22 of 0x42004 |
5689 | ILK_DPARB_CLK_GATE)); | 7377 | * The bit5 and bit7 of 0x42020 |
5690 | I915_WRITE(DISP_ARB_CTL, | 7378 | * The bit14 of 0x70180 |
5691 | (I915_READ(DISP_ARB_CTL) | | 7379 | * The bit14 of 0x71180 |
5692 | DISP_FBC_WM_DIS)); | 7380 | */ |
5693 | I915_WRITE(WM3_LP_ILK, 0); | 7381 | I915_WRITE(ILK_DISPLAY_CHICKEN1, |
5694 | I915_WRITE(WM2_LP_ILK, 0); | 7382 | I915_READ(ILK_DISPLAY_CHICKEN1) | |
5695 | I915_WRITE(WM1_LP_ILK, 0); | 7383 | ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS); |
5696 | } | 7384 | I915_WRITE(ILK_DISPLAY_CHICKEN2, |
5697 | /* | 7385 | I915_READ(ILK_DISPLAY_CHICKEN2) | |
5698 | * Based on the document from hardware guys the following bits | 7386 | ILK_DPARB_GATE | ILK_VSDPFD_FULL); |
5699 | * should be set unconditionally in order to enable FBC. | 7387 | I915_WRITE(ILK_DSPCLK_GATE, |
5700 | * The bit 22 of 0x42000 | 7388 | I915_READ(ILK_DSPCLK_GATE) | |
5701 | * The bit 22 of 0x42004 | 7389 | ILK_DPARB_CLK_GATE | |
5702 | * The bit 7,8,9 of 0x42020. | 7390 | ILK_DPFD_CLK_GATE); |
5703 | */ | 7391 | |
5704 | if (IS_IRONLAKE_M(dev)) { | 7392 | for_each_pipe(pipe) |
5705 | I915_WRITE(ILK_DISPLAY_CHICKEN1, | 7393 | I915_WRITE(DSPCNTR(pipe), |
5706 | I915_READ(ILK_DISPLAY_CHICKEN1) | | 7394 | I915_READ(DSPCNTR(pipe)) | |
5707 | ILK_FBCQ_DIS); | 7395 | DISPPLANE_TRICKLE_FEED_DISABLE); |
5708 | I915_WRITE(ILK_DISPLAY_CHICKEN2, | 7396 | } |
5709 | I915_READ(ILK_DISPLAY_CHICKEN2) | | 7397 | |
5710 | ILK_DPARB_GATE); | 7398 | static void ivybridge_init_clock_gating(struct drm_device *dev) |
5711 | I915_WRITE(ILK_DSPCLK_GATE, | 7399 | { |
5712 | I915_READ(ILK_DSPCLK_GATE) | | 7400 | struct drm_i915_private *dev_priv = dev->dev_private; |
5713 | ILK_DPFC_DIS1 | | 7401 | int pipe; |
5714 | ILK_DPFC_DIS2 | | 7402 | uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; |
5715 | ILK_CLK_FBC); | 7403 | |
5716 | } | 7404 | I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); |
7405 | |||
7406 | I915_WRITE(WM3_LP_ILK, 0); | ||
7407 | I915_WRITE(WM2_LP_ILK, 0); | ||
7408 | I915_WRITE(WM1_LP_ILK, 0); | ||
7409 | |||
7410 | I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE); | ||
7411 | |||
7412 | for_each_pipe(pipe) | ||
7413 | I915_WRITE(DSPCNTR(pipe), | ||
7414 | I915_READ(DSPCNTR(pipe)) | | ||
7415 | DISPPLANE_TRICKLE_FEED_DISABLE); | ||
7416 | } | ||
7417 | |||
7418 | static void g4x_init_clock_gating(struct drm_device *dev) | ||
7419 | { | ||
7420 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
7421 | uint32_t dspclk_gate; | ||
7422 | |||
7423 | I915_WRITE(RENCLK_GATE_D1, 0); | ||
7424 | I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE | | ||
7425 | GS_UNIT_CLOCK_GATE_DISABLE | | ||
7426 | CL_UNIT_CLOCK_GATE_DISABLE); | ||
7427 | I915_WRITE(RAMCLK_GATE_D, 0); | ||
7428 | dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE | | ||
7429 | OVRUNIT_CLOCK_GATE_DISABLE | | ||
7430 | OVCUNIT_CLOCK_GATE_DISABLE; | ||
7431 | if (IS_GM45(dev)) | ||
7432 | dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE; | ||
7433 | I915_WRITE(DSPCLK_GATE_D, dspclk_gate); | ||
7434 | } | ||
7435 | |||
7436 | static void crestline_init_clock_gating(struct drm_device *dev) | ||
7437 | { | ||
7438 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
7439 | |||
7440 | I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE); | ||
7441 | I915_WRITE(RENCLK_GATE_D2, 0); | ||
7442 | I915_WRITE(DSPCLK_GATE_D, 0); | ||
7443 | I915_WRITE(RAMCLK_GATE_D, 0); | ||
7444 | I915_WRITE16(DEUC, 0); | ||
7445 | } | ||
7446 | |||
7447 | static void broadwater_init_clock_gating(struct drm_device *dev) | ||
7448 | { | ||
7449 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
7450 | |||
7451 | I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE | | ||
7452 | I965_RCC_CLOCK_GATE_DISABLE | | ||
7453 | I965_RCPB_CLOCK_GATE_DISABLE | | ||
7454 | I965_ISC_CLOCK_GATE_DISABLE | | ||
7455 | I965_FBC_CLOCK_GATE_DISABLE); | ||
7456 | I915_WRITE(RENCLK_GATE_D2, 0); | ||
7457 | } | ||
7458 | |||
7459 | static void gen3_init_clock_gating(struct drm_device *dev) | ||
7460 | { | ||
7461 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
7462 | u32 dstate = I915_READ(D_STATE); | ||
7463 | |||
7464 | dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING | | ||
7465 | DSTATE_DOT_CLOCK_GATING; | ||
7466 | I915_WRITE(D_STATE, dstate); | ||
7467 | } | ||
7468 | |||
7469 | static void i85x_init_clock_gating(struct drm_device *dev) | ||
7470 | { | ||
7471 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
7472 | |||
7473 | I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE); | ||
7474 | } | ||
7475 | |||
7476 | static void i830_init_clock_gating(struct drm_device *dev) | ||
7477 | { | ||
7478 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
7479 | |||
7480 | I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); | ||
7481 | } | ||
7482 | |||
7483 | static void ibx_init_clock_gating(struct drm_device *dev) | ||
7484 | { | ||
7485 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
7486 | |||
7487 | /* | ||
7488 | * On Ibex Peak and Cougar Point, we need to disable clock | ||
7489 | * gating for the panel power sequencer or it will fail to | ||
7490 | * start up when no ports are active. | ||
7491 | */ | ||
7492 | I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); | ||
7493 | } | ||
7494 | |||
7495 | static void cpt_init_clock_gating(struct drm_device *dev) | ||
7496 | { | ||
7497 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
7498 | |||
7499 | /* | ||
7500 | * On Ibex Peak and Cougar Point, we need to disable clock | ||
7501 | * gating for the panel power sequencer or it will fail to | ||
7502 | * start up when no ports are active. | ||
7503 | */ | ||
7504 | I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); | ||
7505 | I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | | ||
7506 | DPLS_EDP_PPS_FIX_DIS); | ||
7507 | } | ||
7508 | |||
7509 | static void ironlake_teardown_rc6(struct drm_device *dev) | ||
7510 | { | ||
7511 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
7512 | |||
7513 | if (dev_priv->renderctx) { | ||
7514 | i915_gem_object_unpin(dev_priv->renderctx); | ||
7515 | drm_gem_object_unreference(&dev_priv->renderctx->base); | ||
7516 | dev_priv->renderctx = NULL; | ||
7517 | } | ||
7518 | |||
7519 | if (dev_priv->pwrctx) { | ||
7520 | i915_gem_object_unpin(dev_priv->pwrctx); | ||
7521 | drm_gem_object_unreference(&dev_priv->pwrctx->base); | ||
7522 | dev_priv->pwrctx = NULL; | ||
7523 | } | ||
7524 | } | ||
7525 | |||
7526 | static void ironlake_disable_rc6(struct drm_device *dev) | ||
7527 | { | ||
7528 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
7529 | |||
7530 | if (I915_READ(PWRCTXA)) { | ||
7531 | /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */ | ||
7532 | I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT); | ||
7533 | wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON), | ||
7534 | 50); | ||
7535 | |||
7536 | I915_WRITE(PWRCTXA, 0); | ||
7537 | POSTING_READ(PWRCTXA); | ||
7538 | |||
7539 | I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); | ||
7540 | POSTING_READ(RSTDBYCTL); | ||
7541 | } | ||
7542 | |||
7543 | ironlake_teardown_rc6(dev); | ||
7544 | } | ||
7545 | |||
7546 | static int ironlake_setup_rc6(struct drm_device *dev) | ||
7547 | { | ||
7548 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
7549 | |||
7550 | if (dev_priv->renderctx == NULL) | ||
7551 | dev_priv->renderctx = intel_alloc_context_page(dev); | ||
7552 | if (!dev_priv->renderctx) | ||
7553 | return -ENOMEM; | ||
7554 | |||
7555 | if (dev_priv->pwrctx == NULL) | ||
7556 | dev_priv->pwrctx = intel_alloc_context_page(dev); | ||
7557 | if (!dev_priv->pwrctx) { | ||
7558 | ironlake_teardown_rc6(dev); | ||
7559 | return -ENOMEM; | ||
7560 | } | ||
7561 | |||
7562 | return 0; | ||
7563 | } | ||
7564 | |||
7565 | void ironlake_enable_rc6(struct drm_device *dev) | ||
7566 | { | ||
7567 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
7568 | int ret; | ||
7569 | |||
7570 | /* rc6 disabled by default due to repeated reports of hanging during | ||
7571 | * boot and resume. | ||
7572 | */ | ||
7573 | if (!i915_enable_rc6) | ||
7574 | return; | ||
7575 | |||
7576 | mutex_lock(&dev->struct_mutex); | ||
7577 | ret = ironlake_setup_rc6(dev); | ||
7578 | if (ret) { | ||
7579 | mutex_unlock(&dev->struct_mutex); | ||
5717 | return; | 7580 | return; |
5718 | } else if (IS_G4X(dev)) { | ||
5719 | uint32_t dspclk_gate; | ||
5720 | I915_WRITE(RENCLK_GATE_D1, 0); | ||
5721 | I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE | | ||
5722 | GS_UNIT_CLOCK_GATE_DISABLE | | ||
5723 | CL_UNIT_CLOCK_GATE_DISABLE); | ||
5724 | I915_WRITE(RAMCLK_GATE_D, 0); | ||
5725 | dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE | | ||
5726 | OVRUNIT_CLOCK_GATE_DISABLE | | ||
5727 | OVCUNIT_CLOCK_GATE_DISABLE; | ||
5728 | if (IS_GM45(dev)) | ||
5729 | dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE; | ||
5730 | I915_WRITE(DSPCLK_GATE_D, dspclk_gate); | ||
5731 | } else if (IS_I965GM(dev)) { | ||
5732 | I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE); | ||
5733 | I915_WRITE(RENCLK_GATE_D2, 0); | ||
5734 | I915_WRITE(DSPCLK_GATE_D, 0); | ||
5735 | I915_WRITE(RAMCLK_GATE_D, 0); | ||
5736 | I915_WRITE16(DEUC, 0); | ||
5737 | } else if (IS_I965G(dev)) { | ||
5738 | I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE | | ||
5739 | I965_RCC_CLOCK_GATE_DISABLE | | ||
5740 | I965_RCPB_CLOCK_GATE_DISABLE | | ||
5741 | I965_ISC_CLOCK_GATE_DISABLE | | ||
5742 | I965_FBC_CLOCK_GATE_DISABLE); | ||
5743 | I915_WRITE(RENCLK_GATE_D2, 0); | ||
5744 | } else if (IS_I9XX(dev)) { | ||
5745 | u32 dstate = I915_READ(D_STATE); | ||
5746 | |||
5747 | dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING | | ||
5748 | DSTATE_DOT_CLOCK_GATING; | ||
5749 | I915_WRITE(D_STATE, dstate); | ||
5750 | } else if (IS_I85X(dev) || IS_I865G(dev)) { | ||
5751 | I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE); | ||
5752 | } else if (IS_I830(dev)) { | ||
5753 | I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); | ||
5754 | } | 7581 | } |
5755 | 7582 | ||
5756 | /* | 7583 | /* |
5757 | * GPU can automatically power down the render unit if given a page | 7584 | * GPU can automatically power down the render unit if given a page |
5758 | * to save state. | 7585 | * to save state. |
5759 | */ | 7586 | */ |
5760 | if (IS_IRONLAKE_M(dev)) { | 7587 | ret = BEGIN_LP_RING(6); |
5761 | if (dev_priv->renderctx == NULL) | 7588 | if (ret) { |
5762 | dev_priv->renderctx = intel_alloc_context_page(dev); | 7589 | ironlake_teardown_rc6(dev); |
5763 | if (dev_priv->renderctx) { | 7590 | mutex_unlock(&dev->struct_mutex); |
5764 | struct drm_i915_gem_object *obj_priv; | 7591 | return; |
5765 | obj_priv = to_intel_bo(dev_priv->renderctx); | ||
5766 | if (obj_priv) { | ||
5767 | BEGIN_LP_RING(4); | ||
5768 | OUT_RING(MI_SET_CONTEXT); | ||
5769 | OUT_RING(obj_priv->gtt_offset | | ||
5770 | MI_MM_SPACE_GTT | | ||
5771 | MI_SAVE_EXT_STATE_EN | | ||
5772 | MI_RESTORE_EXT_STATE_EN | | ||
5773 | MI_RESTORE_INHIBIT); | ||
5774 | OUT_RING(MI_NOOP); | ||
5775 | OUT_RING(MI_FLUSH); | ||
5776 | ADVANCE_LP_RING(); | ||
5777 | } | ||
5778 | } else | ||
5779 | DRM_DEBUG_KMS("Failed to allocate render context." | ||
5780 | "Disable RC6\n"); | ||
5781 | } | 7592 | } |
5782 | 7593 | ||
5783 | if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) { | 7594 | OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); |
5784 | struct drm_i915_gem_object *obj_priv = NULL; | 7595 | OUT_RING(MI_SET_CONTEXT); |
7596 | OUT_RING(dev_priv->renderctx->gtt_offset | | ||
7597 | MI_MM_SPACE_GTT | | ||
7598 | MI_SAVE_EXT_STATE_EN | | ||
7599 | MI_RESTORE_EXT_STATE_EN | | ||
7600 | MI_RESTORE_INHIBIT); | ||
7601 | OUT_RING(MI_SUSPEND_FLUSH); | ||
7602 | OUT_RING(MI_NOOP); | ||
7603 | OUT_RING(MI_FLUSH); | ||
7604 | ADVANCE_LP_RING(); | ||
5785 | 7605 | ||
5786 | if (dev_priv->pwrctx) { | 7606 | /* |
5787 | obj_priv = to_intel_bo(dev_priv->pwrctx); | 7607 | * Wait for the command parser to advance past MI_SET_CONTEXT. The HW |
5788 | } else { | 7608 | * does an implicit flush, combined with MI_FLUSH above, it should be |
5789 | struct drm_gem_object *pwrctx; | 7609 | * safe to assume that renderctx is valid |
7610 | */ | ||
7611 | ret = intel_wait_ring_idle(LP_RING(dev_priv)); | ||
7612 | if (ret) { | ||
7613 | DRM_ERROR("failed to enable ironlake power power savings\n"); | ||
7614 | ironlake_teardown_rc6(dev); | ||
7615 | mutex_unlock(&dev->struct_mutex); | ||
7616 | return; | ||
7617 | } | ||
5790 | 7618 | ||
5791 | pwrctx = intel_alloc_context_page(dev); | 7619 | I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN); |
5792 | if (pwrctx) { | 7620 | I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); |
5793 | dev_priv->pwrctx = pwrctx; | 7621 | mutex_unlock(&dev->struct_mutex); |
5794 | obj_priv = to_intel_bo(pwrctx); | 7622 | } |
5795 | } | ||
5796 | } | ||
5797 | 7623 | ||
5798 | if (obj_priv) { | 7624 | void intel_init_clock_gating(struct drm_device *dev) |
5799 | I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN); | 7625 | { |
5800 | I915_WRITE(MCHBAR_RENDER_STANDBY, | 7626 | struct drm_i915_private *dev_priv = dev->dev_private; |
5801 | I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT); | 7627 | |
5802 | } | 7628 | dev_priv->display.init_clock_gating(dev); |
5803 | } | 7629 | |
7630 | if (dev_priv->display.init_pch_clock_gating) | ||
7631 | dev_priv->display.init_pch_clock_gating(dev); | ||
5804 | } | 7632 | } |
5805 | 7633 | ||
5806 | /* Set up chip specific display functions */ | 7634 | /* Set up chip specific display functions */ |
@@ -5809,13 +7637,16 @@ static void intel_init_display(struct drm_device *dev) | |||
5809 | struct drm_i915_private *dev_priv = dev->dev_private; | 7637 | struct drm_i915_private *dev_priv = dev->dev_private; |
5810 | 7638 | ||
5811 | /* We always want a DPMS function */ | 7639 | /* We always want a DPMS function */ |
5812 | if (HAS_PCH_SPLIT(dev)) | 7640 | if (HAS_PCH_SPLIT(dev)) { |
5813 | dev_priv->display.dpms = ironlake_crtc_dpms; | 7641 | dev_priv->display.dpms = ironlake_crtc_dpms; |
5814 | else | 7642 | dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; |
7643 | } else { | ||
5815 | dev_priv->display.dpms = i9xx_crtc_dpms; | 7644 | dev_priv->display.dpms = i9xx_crtc_dpms; |
7645 | dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; | ||
7646 | } | ||
5816 | 7647 | ||
5817 | if (I915_HAS_FBC(dev)) { | 7648 | if (I915_HAS_FBC(dev)) { |
5818 | if (IS_IRONLAKE_M(dev)) { | 7649 | if (HAS_PCH_SPLIT(dev)) { |
5819 | dev_priv->display.fbc_enabled = ironlake_fbc_enabled; | 7650 | dev_priv->display.fbc_enabled = ironlake_fbc_enabled; |
5820 | dev_priv->display.enable_fbc = ironlake_enable_fbc; | 7651 | dev_priv->display.enable_fbc = ironlake_enable_fbc; |
5821 | dev_priv->display.disable_fbc = ironlake_disable_fbc; | 7652 | dev_priv->display.disable_fbc = ironlake_disable_fbc; |
@@ -5823,7 +7654,7 @@ static void intel_init_display(struct drm_device *dev) | |||
5823 | dev_priv->display.fbc_enabled = g4x_fbc_enabled; | 7654 | dev_priv->display.fbc_enabled = g4x_fbc_enabled; |
5824 | dev_priv->display.enable_fbc = g4x_enable_fbc; | 7655 | dev_priv->display.enable_fbc = g4x_enable_fbc; |
5825 | dev_priv->display.disable_fbc = g4x_disable_fbc; | 7656 | dev_priv->display.disable_fbc = g4x_disable_fbc; |
5826 | } else if (IS_I965GM(dev)) { | 7657 | } else if (IS_CRESTLINE(dev)) { |
5827 | dev_priv->display.fbc_enabled = i8xx_fbc_enabled; | 7658 | dev_priv->display.fbc_enabled = i8xx_fbc_enabled; |
5828 | dev_priv->display.enable_fbc = i8xx_enable_fbc; | 7659 | dev_priv->display.enable_fbc = i8xx_enable_fbc; |
5829 | dev_priv->display.disable_fbc = i8xx_disable_fbc; | 7660 | dev_priv->display.disable_fbc = i8xx_disable_fbc; |
@@ -5856,7 +7687,12 @@ static void intel_init_display(struct drm_device *dev) | |||
5856 | 7687 | ||
5857 | /* For FIFO watermark updates */ | 7688 | /* For FIFO watermark updates */ |
5858 | if (HAS_PCH_SPLIT(dev)) { | 7689 | if (HAS_PCH_SPLIT(dev)) { |
5859 | if (IS_IRONLAKE(dev)) { | 7690 | if (HAS_PCH_IBX(dev)) |
7691 | dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating; | ||
7692 | else if (HAS_PCH_CPT(dev)) | ||
7693 | dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating; | ||
7694 | |||
7695 | if (IS_GEN5(dev)) { | ||
5860 | if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK) | 7696 | if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK) |
5861 | dev_priv->display.update_wm = ironlake_update_wm; | 7697 | dev_priv->display.update_wm = ironlake_update_wm; |
5862 | else { | 7698 | else { |
@@ -5864,6 +7700,30 @@ static void intel_init_display(struct drm_device *dev) | |||
5864 | "Disable CxSR\n"); | 7700 | "Disable CxSR\n"); |
5865 | dev_priv->display.update_wm = NULL; | 7701 | dev_priv->display.update_wm = NULL; |
5866 | } | 7702 | } |
7703 | dev_priv->display.fdi_link_train = ironlake_fdi_link_train; | ||
7704 | dev_priv->display.init_clock_gating = ironlake_init_clock_gating; | ||
7705 | } else if (IS_GEN6(dev)) { | ||
7706 | if (SNB_READ_WM0_LATENCY()) { | ||
7707 | dev_priv->display.update_wm = sandybridge_update_wm; | ||
7708 | } else { | ||
7709 | DRM_DEBUG_KMS("Failed to read display plane latency. " | ||
7710 | "Disable CxSR\n"); | ||
7711 | dev_priv->display.update_wm = NULL; | ||
7712 | } | ||
7713 | dev_priv->display.fdi_link_train = gen6_fdi_link_train; | ||
7714 | dev_priv->display.init_clock_gating = gen6_init_clock_gating; | ||
7715 | } else if (IS_IVYBRIDGE(dev)) { | ||
7716 | /* FIXME: detect B0+ stepping and use auto training */ | ||
7717 | dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; | ||
7718 | if (SNB_READ_WM0_LATENCY()) { | ||
7719 | dev_priv->display.update_wm = sandybridge_update_wm; | ||
7720 | } else { | ||
7721 | DRM_DEBUG_KMS("Failed to read display plane latency. " | ||
7722 | "Disable CxSR\n"); | ||
7723 | dev_priv->display.update_wm = NULL; | ||
7724 | } | ||
7725 | dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; | ||
7726 | |||
5867 | } else | 7727 | } else |
5868 | dev_priv->display.update_wm = NULL; | 7728 | dev_priv->display.update_wm = NULL; |
5869 | } else if (IS_PINEVIEW(dev)) { | 7729 | } else if (IS_PINEVIEW(dev)) { |
@@ -5881,23 +7741,61 @@ static void intel_init_display(struct drm_device *dev) | |||
5881 | dev_priv->display.update_wm = NULL; | 7741 | dev_priv->display.update_wm = NULL; |
5882 | } else | 7742 | } else |
5883 | dev_priv->display.update_wm = pineview_update_wm; | 7743 | dev_priv->display.update_wm = pineview_update_wm; |
5884 | } else if (IS_G4X(dev)) | 7744 | dev_priv->display.init_clock_gating = gen3_init_clock_gating; |
7745 | } else if (IS_G4X(dev)) { | ||
5885 | dev_priv->display.update_wm = g4x_update_wm; | 7746 | dev_priv->display.update_wm = g4x_update_wm; |
5886 | else if (IS_I965G(dev)) | 7747 | dev_priv->display.init_clock_gating = g4x_init_clock_gating; |
7748 | } else if (IS_GEN4(dev)) { | ||
5887 | dev_priv->display.update_wm = i965_update_wm; | 7749 | dev_priv->display.update_wm = i965_update_wm; |
5888 | else if (IS_I9XX(dev)) { | 7750 | if (IS_CRESTLINE(dev)) |
7751 | dev_priv->display.init_clock_gating = crestline_init_clock_gating; | ||
7752 | else if (IS_BROADWATER(dev)) | ||
7753 | dev_priv->display.init_clock_gating = broadwater_init_clock_gating; | ||
7754 | } else if (IS_GEN3(dev)) { | ||
5889 | dev_priv->display.update_wm = i9xx_update_wm; | 7755 | dev_priv->display.update_wm = i9xx_update_wm; |
5890 | dev_priv->display.get_fifo_size = i9xx_get_fifo_size; | 7756 | dev_priv->display.get_fifo_size = i9xx_get_fifo_size; |
7757 | dev_priv->display.init_clock_gating = gen3_init_clock_gating; | ||
7758 | } else if (IS_I865G(dev)) { | ||
7759 | dev_priv->display.update_wm = i830_update_wm; | ||
7760 | dev_priv->display.init_clock_gating = i85x_init_clock_gating; | ||
7761 | dev_priv->display.get_fifo_size = i830_get_fifo_size; | ||
5891 | } else if (IS_I85X(dev)) { | 7762 | } else if (IS_I85X(dev)) { |
5892 | dev_priv->display.update_wm = i9xx_update_wm; | 7763 | dev_priv->display.update_wm = i9xx_update_wm; |
5893 | dev_priv->display.get_fifo_size = i85x_get_fifo_size; | 7764 | dev_priv->display.get_fifo_size = i85x_get_fifo_size; |
7765 | dev_priv->display.init_clock_gating = i85x_init_clock_gating; | ||
5894 | } else { | 7766 | } else { |
5895 | dev_priv->display.update_wm = i830_update_wm; | 7767 | dev_priv->display.update_wm = i830_update_wm; |
7768 | dev_priv->display.init_clock_gating = i830_init_clock_gating; | ||
5896 | if (IS_845G(dev)) | 7769 | if (IS_845G(dev)) |
5897 | dev_priv->display.get_fifo_size = i845_get_fifo_size; | 7770 | dev_priv->display.get_fifo_size = i845_get_fifo_size; |
5898 | else | 7771 | else |
5899 | dev_priv->display.get_fifo_size = i830_get_fifo_size; | 7772 | dev_priv->display.get_fifo_size = i830_get_fifo_size; |
5900 | } | 7773 | } |
7774 | |||
7775 | /* Default just returns -ENODEV to indicate unsupported */ | ||
7776 | dev_priv->display.queue_flip = intel_default_queue_flip; | ||
7777 | |||
7778 | switch (INTEL_INFO(dev)->gen) { | ||
7779 | case 2: | ||
7780 | dev_priv->display.queue_flip = intel_gen2_queue_flip; | ||
7781 | break; | ||
7782 | |||
7783 | case 3: | ||
7784 | dev_priv->display.queue_flip = intel_gen3_queue_flip; | ||
7785 | break; | ||
7786 | |||
7787 | case 4: | ||
7788 | case 5: | ||
7789 | dev_priv->display.queue_flip = intel_gen4_queue_flip; | ||
7790 | break; | ||
7791 | |||
7792 | case 6: | ||
7793 | dev_priv->display.queue_flip = intel_gen6_queue_flip; | ||
7794 | break; | ||
7795 | case 7: | ||
7796 | dev_priv->display.queue_flip = intel_gen7_queue_flip; | ||
7797 | break; | ||
7798 | } | ||
5901 | } | 7799 | } |
5902 | 7800 | ||
5903 | /* | 7801 | /* |
@@ -5913,6 +7811,15 @@ static void quirk_pipea_force (struct drm_device *dev) | |||
5913 | DRM_DEBUG_DRIVER("applying pipe a force quirk\n"); | 7811 | DRM_DEBUG_DRIVER("applying pipe a force quirk\n"); |
5914 | } | 7812 | } |
5915 | 7813 | ||
7814 | /* | ||
7815 | * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason | ||
7816 | */ | ||
7817 | static void quirk_ssc_force_disable(struct drm_device *dev) | ||
7818 | { | ||
7819 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
7820 | dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE; | ||
7821 | } | ||
7822 | |||
5916 | struct intel_quirk { | 7823 | struct intel_quirk { |
5917 | int device; | 7824 | int device; |
5918 | int subsystem_vendor; | 7825 | int subsystem_vendor; |
@@ -5941,6 +7848,9 @@ struct intel_quirk intel_quirks[] = { | |||
5941 | /* 855 & before need to leave pipe A & dpll A up */ | 7848 | /* 855 & before need to leave pipe A & dpll A up */ |
5942 | { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, | 7849 | { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, |
5943 | { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, | 7850 | { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, |
7851 | |||
7852 | /* Lenovo U160 cannot use SSC on LVDS */ | ||
7853 | { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, | ||
5944 | }; | 7854 | }; |
5945 | 7855 | ||
5946 | static void intel_init_quirks(struct drm_device *dev) | 7856 | static void intel_init_quirks(struct drm_device *dev) |
@@ -5999,27 +7909,18 @@ void intel_modeset_init(struct drm_device *dev) | |||
5999 | 7909 | ||
6000 | intel_init_display(dev); | 7910 | intel_init_display(dev); |
6001 | 7911 | ||
6002 | if (IS_I965G(dev)) { | 7912 | if (IS_GEN2(dev)) { |
6003 | dev->mode_config.max_width = 8192; | 7913 | dev->mode_config.max_width = 2048; |
6004 | dev->mode_config.max_height = 8192; | 7914 | dev->mode_config.max_height = 2048; |
6005 | } else if (IS_I9XX(dev)) { | 7915 | } else if (IS_GEN3(dev)) { |
6006 | dev->mode_config.max_width = 4096; | 7916 | dev->mode_config.max_width = 4096; |
6007 | dev->mode_config.max_height = 4096; | 7917 | dev->mode_config.max_height = 4096; |
6008 | } else { | 7918 | } else { |
6009 | dev->mode_config.max_width = 2048; | 7919 | dev->mode_config.max_width = 8192; |
6010 | dev->mode_config.max_height = 2048; | 7920 | dev->mode_config.max_height = 8192; |
6011 | } | 7921 | } |
7922 | dev->mode_config.fb_base = dev->agp->base; | ||
6012 | 7923 | ||
6013 | /* set memory base */ | ||
6014 | if (IS_I9XX(dev)) | ||
6015 | dev->mode_config.fb_base = pci_resource_start(dev->pdev, 2); | ||
6016 | else | ||
6017 | dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0); | ||
6018 | |||
6019 | if (IS_MOBILE(dev) || IS_I9XX(dev)) | ||
6020 | dev_priv->num_pipe = 2; | ||
6021 | else | ||
6022 | dev_priv->num_pipe = 1; | ||
6023 | DRM_DEBUG_KMS("%d display pipe%s available.\n", | 7924 | DRM_DEBUG_KMS("%d display pipe%s available.\n", |
6024 | dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : ""); | 7925 | dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : ""); |
6025 | 7926 | ||
@@ -6027,21 +7928,29 @@ void intel_modeset_init(struct drm_device *dev) | |||
6027 | intel_crtc_init(dev, i); | 7928 | intel_crtc_init(dev, i); |
6028 | } | 7929 | } |
6029 | 7930 | ||
7931 | /* Just disable it once at startup */ | ||
7932 | i915_disable_vga(dev); | ||
6030 | intel_setup_outputs(dev); | 7933 | intel_setup_outputs(dev); |
6031 | 7934 | ||
6032 | intel_init_clock_gating(dev); | 7935 | intel_init_clock_gating(dev); |
6033 | 7936 | ||
6034 | /* Just disable it once at startup */ | ||
6035 | i915_disable_vga(dev); | ||
6036 | |||
6037 | if (IS_IRONLAKE_M(dev)) { | 7937 | if (IS_IRONLAKE_M(dev)) { |
6038 | ironlake_enable_drps(dev); | 7938 | ironlake_enable_drps(dev); |
6039 | intel_init_emon(dev); | 7939 | intel_init_emon(dev); |
6040 | } | 7940 | } |
6041 | 7941 | ||
7942 | if (IS_GEN6(dev)) | ||
7943 | gen6_enable_rps(dev_priv); | ||
7944 | |||
6042 | INIT_WORK(&dev_priv->idle_work, intel_idle_update); | 7945 | INIT_WORK(&dev_priv->idle_work, intel_idle_update); |
6043 | setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, | 7946 | setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, |
6044 | (unsigned long)dev); | 7947 | (unsigned long)dev); |
7948 | } | ||
7949 | |||
7950 | void intel_modeset_gem_init(struct drm_device *dev) | ||
7951 | { | ||
7952 | if (IS_IRONLAKE_M(dev)) | ||
7953 | ironlake_enable_rc6(dev); | ||
6045 | 7954 | ||
6046 | intel_setup_overlay(dev); | 7955 | intel_setup_overlay(dev); |
6047 | } | 7956 | } |
@@ -6052,10 +7961,11 @@ void intel_modeset_cleanup(struct drm_device *dev) | |||
6052 | struct drm_crtc *crtc; | 7961 | struct drm_crtc *crtc; |
6053 | struct intel_crtc *intel_crtc; | 7962 | struct intel_crtc *intel_crtc; |
6054 | 7963 | ||
7964 | drm_kms_helper_poll_fini(dev); | ||
6055 | mutex_lock(&dev->struct_mutex); | 7965 | mutex_lock(&dev->struct_mutex); |
6056 | 7966 | ||
6057 | drm_kms_helper_poll_fini(dev); | 7967 | intel_unregister_dsm_handler(); |
6058 | intel_fbdev_fini(dev); | 7968 | |
6059 | 7969 | ||
6060 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 7970 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
6061 | /* Skip inactive CRTCs */ | 7971 | /* Skip inactive CRTCs */ |
@@ -6063,67 +7973,52 @@ void intel_modeset_cleanup(struct drm_device *dev) | |||
6063 | continue; | 7973 | continue; |
6064 | 7974 | ||
6065 | intel_crtc = to_intel_crtc(crtc); | 7975 | intel_crtc = to_intel_crtc(crtc); |
6066 | intel_increase_pllclock(crtc, false); | 7976 | intel_increase_pllclock(crtc); |
6067 | del_timer_sync(&intel_crtc->idle_timer); | ||
6068 | } | 7977 | } |
6069 | 7978 | ||
6070 | del_timer_sync(&dev_priv->idle_timer); | ||
6071 | |||
6072 | if (dev_priv->display.disable_fbc) | 7979 | if (dev_priv->display.disable_fbc) |
6073 | dev_priv->display.disable_fbc(dev); | 7980 | dev_priv->display.disable_fbc(dev); |
6074 | 7981 | ||
6075 | if (dev_priv->renderctx) { | ||
6076 | struct drm_i915_gem_object *obj_priv; | ||
6077 | |||
6078 | obj_priv = to_intel_bo(dev_priv->renderctx); | ||
6079 | I915_WRITE(CCID, obj_priv->gtt_offset &~ CCID_EN); | ||
6080 | I915_READ(CCID); | ||
6081 | i915_gem_object_unpin(dev_priv->renderctx); | ||
6082 | drm_gem_object_unreference(dev_priv->renderctx); | ||
6083 | } | ||
6084 | |||
6085 | if (dev_priv->pwrctx) { | ||
6086 | struct drm_i915_gem_object *obj_priv; | ||
6087 | |||
6088 | obj_priv = to_intel_bo(dev_priv->pwrctx); | ||
6089 | I915_WRITE(PWRCTXA, obj_priv->gtt_offset &~ PWRCTX_EN); | ||
6090 | I915_READ(PWRCTXA); | ||
6091 | i915_gem_object_unpin(dev_priv->pwrctx); | ||
6092 | drm_gem_object_unreference(dev_priv->pwrctx); | ||
6093 | } | ||
6094 | |||
6095 | if (IS_IRONLAKE_M(dev)) | 7982 | if (IS_IRONLAKE_M(dev)) |
6096 | ironlake_disable_drps(dev); | 7983 | ironlake_disable_drps(dev); |
7984 | if (IS_GEN6(dev)) | ||
7985 | gen6_disable_rps(dev); | ||
7986 | |||
7987 | if (IS_IRONLAKE_M(dev)) | ||
7988 | ironlake_disable_rc6(dev); | ||
6097 | 7989 | ||
6098 | mutex_unlock(&dev->struct_mutex); | 7990 | mutex_unlock(&dev->struct_mutex); |
6099 | 7991 | ||
7992 | /* Disable the irq before mode object teardown, for the irq might | ||
7993 | * enqueue unpin/hotplug work. */ | ||
7994 | drm_irq_uninstall(dev); | ||
7995 | cancel_work_sync(&dev_priv->hotplug_work); | ||
7996 | |||
7997 | /* Shut off idle work before the crtcs get freed. */ | ||
7998 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
7999 | intel_crtc = to_intel_crtc(crtc); | ||
8000 | del_timer_sync(&intel_crtc->idle_timer); | ||
8001 | } | ||
8002 | del_timer_sync(&dev_priv->idle_timer); | ||
8003 | cancel_work_sync(&dev_priv->idle_work); | ||
8004 | |||
6100 | drm_mode_config_cleanup(dev); | 8005 | drm_mode_config_cleanup(dev); |
6101 | } | 8006 | } |
6102 | 8007 | ||
6103 | |||
6104 | /* | 8008 | /* |
6105 | * Return which encoder is currently attached for connector. | 8009 | * Return which encoder is currently attached for connector. |
6106 | */ | 8010 | */ |
6107 | struct drm_encoder *intel_attached_encoder (struct drm_connector *connector) | 8011 | struct drm_encoder *intel_best_encoder(struct drm_connector *connector) |
6108 | { | 8012 | { |
6109 | struct drm_mode_object *obj; | 8013 | return &intel_attached_encoder(connector)->base; |
6110 | struct drm_encoder *encoder; | 8014 | } |
6111 | int i; | ||
6112 | |||
6113 | for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { | ||
6114 | if (connector->encoder_ids[i] == 0) | ||
6115 | break; | ||
6116 | |||
6117 | obj = drm_mode_object_find(connector->dev, | ||
6118 | connector->encoder_ids[i], | ||
6119 | DRM_MODE_OBJECT_ENCODER); | ||
6120 | if (!obj) | ||
6121 | continue; | ||
6122 | 8015 | ||
6123 | encoder = obj_to_encoder(obj); | 8016 | void intel_connector_attach_encoder(struct intel_connector *connector, |
6124 | return encoder; | 8017 | struct intel_encoder *encoder) |
6125 | } | 8018 | { |
6126 | return NULL; | 8019 | connector->encoder = encoder; |
8020 | drm_mode_connector_attach_encoder(&connector->base, | ||
8021 | &encoder->base); | ||
6127 | } | 8022 | } |
6128 | 8023 | ||
6129 | /* | 8024 | /* |
@@ -6142,3 +8037,113 @@ int intel_modeset_vga_set_state(struct drm_device *dev, bool state) | |||
6142 | pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl); | 8037 | pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl); |
6143 | return 0; | 8038 | return 0; |
6144 | } | 8039 | } |
8040 | |||
8041 | #ifdef CONFIG_DEBUG_FS | ||
8042 | #include <linux/seq_file.h> | ||
8043 | |||
8044 | struct intel_display_error_state { | ||
8045 | struct intel_cursor_error_state { | ||
8046 | u32 control; | ||
8047 | u32 position; | ||
8048 | u32 base; | ||
8049 | u32 size; | ||
8050 | } cursor[2]; | ||
8051 | |||
8052 | struct intel_pipe_error_state { | ||
8053 | u32 conf; | ||
8054 | u32 source; | ||
8055 | |||
8056 | u32 htotal; | ||
8057 | u32 hblank; | ||
8058 | u32 hsync; | ||
8059 | u32 vtotal; | ||
8060 | u32 vblank; | ||
8061 | u32 vsync; | ||
8062 | } pipe[2]; | ||
8063 | |||
8064 | struct intel_plane_error_state { | ||
8065 | u32 control; | ||
8066 | u32 stride; | ||
8067 | u32 size; | ||
8068 | u32 pos; | ||
8069 | u32 addr; | ||
8070 | u32 surface; | ||
8071 | u32 tile_offset; | ||
8072 | } plane[2]; | ||
8073 | }; | ||
8074 | |||
8075 | struct intel_display_error_state * | ||
8076 | intel_display_capture_error_state(struct drm_device *dev) | ||
8077 | { | ||
8078 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
8079 | struct intel_display_error_state *error; | ||
8080 | int i; | ||
8081 | |||
8082 | error = kmalloc(sizeof(*error), GFP_ATOMIC); | ||
8083 | if (error == NULL) | ||
8084 | return NULL; | ||
8085 | |||
8086 | for (i = 0; i < 2; i++) { | ||
8087 | error->cursor[i].control = I915_READ(CURCNTR(i)); | ||
8088 | error->cursor[i].position = I915_READ(CURPOS(i)); | ||
8089 | error->cursor[i].base = I915_READ(CURBASE(i)); | ||
8090 | |||
8091 | error->plane[i].control = I915_READ(DSPCNTR(i)); | ||
8092 | error->plane[i].stride = I915_READ(DSPSTRIDE(i)); | ||
8093 | error->plane[i].size = I915_READ(DSPSIZE(i)); | ||
8094 | error->plane[i].pos= I915_READ(DSPPOS(i)); | ||
8095 | error->plane[i].addr = I915_READ(DSPADDR(i)); | ||
8096 | if (INTEL_INFO(dev)->gen >= 4) { | ||
8097 | error->plane[i].surface = I915_READ(DSPSURF(i)); | ||
8098 | error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); | ||
8099 | } | ||
8100 | |||
8101 | error->pipe[i].conf = I915_READ(PIPECONF(i)); | ||
8102 | error->pipe[i].source = I915_READ(PIPESRC(i)); | ||
8103 | error->pipe[i].htotal = I915_READ(HTOTAL(i)); | ||
8104 | error->pipe[i].hblank = I915_READ(HBLANK(i)); | ||
8105 | error->pipe[i].hsync = I915_READ(HSYNC(i)); | ||
8106 | error->pipe[i].vtotal = I915_READ(VTOTAL(i)); | ||
8107 | error->pipe[i].vblank = I915_READ(VBLANK(i)); | ||
8108 | error->pipe[i].vsync = I915_READ(VSYNC(i)); | ||
8109 | } | ||
8110 | |||
8111 | return error; | ||
8112 | } | ||
8113 | |||
8114 | void | ||
8115 | intel_display_print_error_state(struct seq_file *m, | ||
8116 | struct drm_device *dev, | ||
8117 | struct intel_display_error_state *error) | ||
8118 | { | ||
8119 | int i; | ||
8120 | |||
8121 | for (i = 0; i < 2; i++) { | ||
8122 | seq_printf(m, "Pipe [%d]:\n", i); | ||
8123 | seq_printf(m, " CONF: %08x\n", error->pipe[i].conf); | ||
8124 | seq_printf(m, " SRC: %08x\n", error->pipe[i].source); | ||
8125 | seq_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal); | ||
8126 | seq_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank); | ||
8127 | seq_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync); | ||
8128 | seq_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal); | ||
8129 | seq_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank); | ||
8130 | seq_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync); | ||
8131 | |||
8132 | seq_printf(m, "Plane [%d]:\n", i); | ||
8133 | seq_printf(m, " CNTR: %08x\n", error->plane[i].control); | ||
8134 | seq_printf(m, " STRIDE: %08x\n", error->plane[i].stride); | ||
8135 | seq_printf(m, " SIZE: %08x\n", error->plane[i].size); | ||
8136 | seq_printf(m, " POS: %08x\n", error->plane[i].pos); | ||
8137 | seq_printf(m, " ADDR: %08x\n", error->plane[i].addr); | ||
8138 | if (INTEL_INFO(dev)->gen >= 4) { | ||
8139 | seq_printf(m, " SURF: %08x\n", error->plane[i].surface); | ||
8140 | seq_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); | ||
8141 | } | ||
8142 | |||
8143 | seq_printf(m, "Cursor [%d]:\n", i); | ||
8144 | seq_printf(m, " CNTR: %08x\n", error->cursor[i].control); | ||
8145 | seq_printf(m, " POS: %08x\n", error->cursor[i].position); | ||
8146 | seq_printf(m, " BASE: %08x\n", error->cursor[i].base); | ||
8147 | } | ||
8148 | } | ||
8149 | #endif | ||