diff options
Diffstat (limited to 'drivers/gpu/drm/i915')
29 files changed, 1762 insertions, 1306 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index b0bacdba6d7e..0f2c5493242b 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile | |||
@@ -40,6 +40,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \ | |||
40 | dvo_ivch.o \ | 40 | dvo_ivch.o \ |
41 | dvo_tfp410.o \ | 41 | dvo_tfp410.o \ |
42 | dvo_sil164.o \ | 42 | dvo_sil164.o \ |
43 | dvo_ns2501.o \ | ||
43 | i915_gem_dmabuf.o | 44 | i915_gem_dmabuf.o |
44 | 45 | ||
45 | i915-$(CONFIG_COMPAT) += i915_ioc32.o | 46 | i915-$(CONFIG_COMPAT) += i915_ioc32.o |
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h index 58914691a77b..0c8ac4d92deb 100644 --- a/drivers/gpu/drm/i915/dvo.h +++ b/drivers/gpu/drm/i915/dvo.h | |||
@@ -140,5 +140,6 @@ extern struct intel_dvo_dev_ops ch7xxx_ops; | |||
140 | extern struct intel_dvo_dev_ops ivch_ops; | 140 | extern struct intel_dvo_dev_ops ivch_ops; |
141 | extern struct intel_dvo_dev_ops tfp410_ops; | 141 | extern struct intel_dvo_dev_ops tfp410_ops; |
142 | extern struct intel_dvo_dev_ops ch7017_ops; | 142 | extern struct intel_dvo_dev_ops ch7017_ops; |
143 | extern struct intel_dvo_dev_ops ns2501_ops; | ||
143 | 144 | ||
144 | #endif /* _INTEL_DVO_H */ | 145 | #endif /* _INTEL_DVO_H */ |
diff --git a/drivers/gpu/drm/i915/dvo_ns2501.c b/drivers/gpu/drm/i915/dvo_ns2501.c new file mode 100644 index 000000000000..1a0bad9a5fab --- /dev/null +++ b/drivers/gpu/drm/i915/dvo_ns2501.c | |||
@@ -0,0 +1,582 @@ | |||
1 | /* | ||
2 | * | ||
3 | * Copyright (c) 2012 Gilles Dartiguelongue, Thomas Richter | ||
4 | * | ||
5 | * All Rights Reserved. | ||
6 | * | ||
7 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
8 | * copy of this software and associated documentation files (the | ||
9 | * "Software"), to deal in the Software without restriction, including | ||
10 | * without limitation the rights to use, copy, modify, merge, publish, | ||
11 | * distribute, sub license, and/or sell copies of the Software, and to | ||
12 | * permit persons to whom the Software is furnished to do so, subject to | ||
13 | * the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the | ||
16 | * next paragraph) shall be included in all copies or substantial portions | ||
17 | * of the Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS | ||
20 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
21 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. | ||
22 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||
23 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, | ||
24 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE | ||
25 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
26 | * | ||
27 | */ | ||
28 | |||
29 | #include "dvo.h" | ||
30 | #include "i915_reg.h" | ||
31 | #include "i915_drv.h" | ||
32 | |||
33 | #define NS2501_VID 0x1305 | ||
34 | #define NS2501_DID 0x6726 | ||
35 | |||
36 | #define NS2501_VID_LO 0x00 | ||
37 | #define NS2501_VID_HI 0x01 | ||
38 | #define NS2501_DID_LO 0x02 | ||
39 | #define NS2501_DID_HI 0x03 | ||
40 | #define NS2501_REV 0x04 | ||
41 | #define NS2501_RSVD 0x05 | ||
42 | #define NS2501_FREQ_LO 0x06 | ||
43 | #define NS2501_FREQ_HI 0x07 | ||
44 | |||
45 | #define NS2501_REG8 0x08 | ||
46 | #define NS2501_8_VEN (1<<5) | ||
47 | #define NS2501_8_HEN (1<<4) | ||
48 | #define NS2501_8_DSEL (1<<3) | ||
49 | #define NS2501_8_BPAS (1<<2) | ||
50 | #define NS2501_8_RSVD (1<<1) | ||
51 | #define NS2501_8_PD (1<<0) | ||
52 | |||
53 | #define NS2501_REG9 0x09 | ||
54 | #define NS2501_9_VLOW (1<<7) | ||
55 | #define NS2501_9_MSEL_MASK (0x7<<4) | ||
56 | #define NS2501_9_TSEL (1<<3) | ||
57 | #define NS2501_9_RSEN (1<<2) | ||
58 | #define NS2501_9_RSVD (1<<1) | ||
59 | #define NS2501_9_MDI (1<<0) | ||
60 | |||
61 | #define NS2501_REGC 0x0c | ||
62 | |||
63 | struct ns2501_priv { | ||
64 | //I2CDevRec d; | ||
65 | bool quiet; | ||
66 | int reg_8_shadow; | ||
67 | int reg_8_set; | ||
68 | // Shadow registers for i915 | ||
69 | int dvoc; | ||
70 | int pll_a; | ||
71 | int srcdim; | ||
72 | int fw_blc; | ||
73 | }; | ||
74 | |||
75 | #define NSPTR(d) ((NS2501Ptr)(d->DriverPrivate.ptr)) | ||
76 | |||
77 | /* | ||
78 | * Include the PLL launcher prototype | ||
79 | */ | ||
80 | extern void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe); | ||
81 | |||
82 | /* | ||
83 | * For reasons unclear to me, the ns2501 at least on the Fujitsu/Siemens | ||
84 | * laptops does not react on the i2c bus unless | ||
85 | * both the PLL is running and the display is configured in its native | ||
86 | * resolution. | ||
87 | * This function forces the DVO on, and stores the registers it touches. | ||
88 | * Afterwards, registers are restored to regular values. | ||
89 | * | ||
90 | * This is pretty much a hack, though it works. | ||
91 | * Without that, ns2501_readb and ns2501_writeb fail | ||
92 | * when switching the resolution. | ||
93 | */ | ||
94 | |||
95 | static void enable_dvo(struct intel_dvo_device *dvo) | ||
96 | { | ||
97 | struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv); | ||
98 | struct i2c_adapter *adapter = dvo->i2c_bus; | ||
99 | struct intel_gmbus *bus = container_of(adapter, | ||
100 | struct intel_gmbus, | ||
101 | adapter); | ||
102 | struct drm_i915_private *dev_priv = bus->dev_priv; | ||
103 | |||
104 | DRM_DEBUG_KMS("%s: Trying to re-enable the DVO\n", __FUNCTION__); | ||
105 | |||
106 | ns->dvoc = I915_READ(DVO_C); | ||
107 | ns->pll_a = I915_READ(_DPLL_A); | ||
108 | ns->srcdim = I915_READ(DVOC_SRCDIM); | ||
109 | ns->fw_blc = I915_READ(FW_BLC); | ||
110 | |||
111 | I915_WRITE(DVOC, 0x10004084); | ||
112 | I915_WRITE(_DPLL_A, 0xd0820000); | ||
113 | I915_WRITE(DVOC_SRCDIM, 0x400300); // 1024x768 | ||
114 | I915_WRITE(FW_BLC, 0x1080304); | ||
115 | |||
116 | intel_enable_pll(dev_priv, 0); | ||
117 | |||
118 | I915_WRITE(DVOC, 0x90004084); | ||
119 | } | ||
120 | |||
121 | /* | ||
122 | * Restore the I915 registers modified by the above | ||
123 | * trigger function. | ||
124 | */ | ||
125 | static void restore_dvo(struct intel_dvo_device *dvo) | ||
126 | { | ||
127 | struct i2c_adapter *adapter = dvo->i2c_bus; | ||
128 | struct intel_gmbus *bus = container_of(adapter, | ||
129 | struct intel_gmbus, | ||
130 | adapter); | ||
131 | struct drm_i915_private *dev_priv = bus->dev_priv; | ||
132 | struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv); | ||
133 | |||
134 | I915_WRITE(DVOC, ns->dvoc); | ||
135 | I915_WRITE(_DPLL_A, ns->pll_a); | ||
136 | I915_WRITE(DVOC_SRCDIM, ns->srcdim); | ||
137 | I915_WRITE(FW_BLC, ns->fw_blc); | ||
138 | } | ||
139 | |||
140 | /* | ||
141 | ** Read a register from the ns2501. | ||
142 | ** Returns true if successful, false otherwise. | ||
143 | ** If it returns false, it might be wise to enable the | ||
144 | ** DVO with the above function. | ||
145 | */ | ||
146 | static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, uint8_t * ch) | ||
147 | { | ||
148 | struct ns2501_priv *ns = dvo->dev_priv; | ||
149 | struct i2c_adapter *adapter = dvo->i2c_bus; | ||
150 | u8 out_buf[2]; | ||
151 | u8 in_buf[2]; | ||
152 | |||
153 | struct i2c_msg msgs[] = { | ||
154 | { | ||
155 | .addr = dvo->slave_addr, | ||
156 | .flags = 0, | ||
157 | .len = 1, | ||
158 | .buf = out_buf, | ||
159 | }, | ||
160 | { | ||
161 | .addr = dvo->slave_addr, | ||
162 | .flags = I2C_M_RD, | ||
163 | .len = 1, | ||
164 | .buf = in_buf, | ||
165 | } | ||
166 | }; | ||
167 | |||
168 | out_buf[0] = addr; | ||
169 | out_buf[1] = 0; | ||
170 | |||
171 | if (i2c_transfer(adapter, msgs, 2) == 2) { | ||
172 | *ch = in_buf[0]; | ||
173 | return true; | ||
174 | }; | ||
175 | |||
176 | if (!ns->quiet) { | ||
177 | DRM_DEBUG_KMS | ||
178 | ("Unable to read register 0x%02x from %s:0x%02x.\n", addr, | ||
179 | adapter->name, dvo->slave_addr); | ||
180 | } | ||
181 | |||
182 | return false; | ||
183 | } | ||
184 | |||
185 | /* | ||
186 | ** Write a register to the ns2501. | ||
187 | ** Returns true if successful, false otherwise. | ||
188 | ** If it returns false, it might be wise to enable the | ||
189 | ** DVO with the above function. | ||
190 | */ | ||
191 | static bool ns2501_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) | ||
192 | { | ||
193 | struct ns2501_priv *ns = dvo->dev_priv; | ||
194 | struct i2c_adapter *adapter = dvo->i2c_bus; | ||
195 | uint8_t out_buf[2]; | ||
196 | |||
197 | struct i2c_msg msg = { | ||
198 | .addr = dvo->slave_addr, | ||
199 | .flags = 0, | ||
200 | .len = 2, | ||
201 | .buf = out_buf, | ||
202 | }; | ||
203 | |||
204 | out_buf[0] = addr; | ||
205 | out_buf[1] = ch; | ||
206 | |||
207 | if (i2c_transfer(adapter, &msg, 1) == 1) { | ||
208 | return true; | ||
209 | } | ||
210 | |||
211 | if (!ns->quiet) { | ||
212 | DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d\n", | ||
213 | addr, adapter->name, dvo->slave_addr); | ||
214 | } | ||
215 | |||
216 | return false; | ||
217 | } | ||
218 | |||
219 | /* National Semiconductor 2501 driver for chip on i2c bus | ||
220 | * scan for the chip on the bus. | ||
221 | * Hope the VBIOS initialized the PLL correctly so we can | ||
222 | * talk to it. If not, it will not be seen and not detected. | ||
223 | * Bummer! | ||
224 | */ | ||
225 | static bool ns2501_init(struct intel_dvo_device *dvo, | ||
226 | struct i2c_adapter *adapter) | ||
227 | { | ||
228 | /* this will detect the NS2501 chip on the specified i2c bus */ | ||
229 | struct ns2501_priv *ns; | ||
230 | unsigned char ch; | ||
231 | |||
232 | ns = kzalloc(sizeof(struct ns2501_priv), GFP_KERNEL); | ||
233 | if (ns == NULL) | ||
234 | return false; | ||
235 | |||
236 | dvo->i2c_bus = adapter; | ||
237 | dvo->dev_priv = ns; | ||
238 | ns->quiet = true; | ||
239 | |||
240 | if (!ns2501_readb(dvo, NS2501_VID_LO, &ch)) | ||
241 | goto out; | ||
242 | |||
243 | if (ch != (NS2501_VID & 0xff)) { | ||
244 | DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n", | ||
245 | ch, adapter->name, dvo->slave_addr); | ||
246 | goto out; | ||
247 | } | ||
248 | |||
249 | if (!ns2501_readb(dvo, NS2501_DID_LO, &ch)) | ||
250 | goto out; | ||
251 | |||
252 | if (ch != (NS2501_DID & 0xff)) { | ||
253 | DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n", | ||
254 | ch, adapter->name, dvo->slave_addr); | ||
255 | goto out; | ||
256 | } | ||
257 | ns->quiet = false; | ||
258 | ns->reg_8_set = 0; | ||
259 | ns->reg_8_shadow = | ||
260 | NS2501_8_PD | NS2501_8_BPAS | NS2501_8_VEN | NS2501_8_HEN; | ||
261 | |||
262 | DRM_DEBUG_KMS("init ns2501 dvo controller successfully!\n"); | ||
263 | return true; | ||
264 | |||
265 | out: | ||
266 | kfree(ns); | ||
267 | return false; | ||
268 | } | ||
269 | |||
270 | static enum drm_connector_status ns2501_detect(struct intel_dvo_device *dvo) | ||
271 | { | ||
272 | /* | ||
273 | * This is a Laptop display, it doesn't have hotplugging. | ||
274 | * Even if not, the detection bit of the 2501 is unreliable as | ||
275 | * it only works for some display types. | ||
276 | * It is even more unreliable as the PLL must be active for | ||
277 | * allowing reading from the chiop. | ||
278 | */ | ||
279 | return connector_status_connected; | ||
280 | } | ||
281 | |||
282 | static enum drm_mode_status ns2501_mode_valid(struct intel_dvo_device *dvo, | ||
283 | struct drm_display_mode *mode) | ||
284 | { | ||
285 | DRM_DEBUG_KMS | ||
286 | ("%s: is mode valid (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d)\n", | ||
287 | __FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay, | ||
288 | mode->vtotal); | ||
289 | |||
290 | /* | ||
291 | * Currently, these are all the modes I have data from. | ||
292 | * More might exist. Unclear how to find the native resolution | ||
293 | * of the panel in here so we could always accept it | ||
294 | * by disabling the scaler. | ||
295 | */ | ||
296 | if ((mode->hdisplay == 800 && mode->vdisplay == 600) || | ||
297 | (mode->hdisplay == 640 && mode->vdisplay == 480) || | ||
298 | (mode->hdisplay == 1024 && mode->vdisplay == 768)) { | ||
299 | return MODE_OK; | ||
300 | } else { | ||
301 | return MODE_ONE_SIZE; /* Is this a reasonable error? */ | ||
302 | } | ||
303 | } | ||
304 | |||
305 | static void ns2501_mode_set(struct intel_dvo_device *dvo, | ||
306 | struct drm_display_mode *mode, | ||
307 | struct drm_display_mode *adjusted_mode) | ||
308 | { | ||
309 | bool ok; | ||
310 | bool restore = false; | ||
311 | struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv); | ||
312 | |||
313 | DRM_DEBUG_KMS | ||
314 | ("%s: set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n", | ||
315 | __FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay, | ||
316 | mode->vtotal); | ||
317 | |||
318 | /* | ||
319 | * Where do I find the native resolution for which scaling is not required??? | ||
320 | * | ||
321 | * First trigger the DVO on as otherwise the chip does not appear on the i2c | ||
322 | * bus. | ||
323 | */ | ||
324 | do { | ||
325 | ok = true; | ||
326 | |||
327 | if (mode->hdisplay == 800 && mode->vdisplay == 600) { | ||
328 | /* mode 277 */ | ||
329 | ns->reg_8_shadow &= ~NS2501_8_BPAS; | ||
330 | DRM_DEBUG_KMS("%s: switching to 800x600\n", | ||
331 | __FUNCTION__); | ||
332 | |||
333 | /* | ||
334 | * No, I do not know where this data comes from. | ||
335 | * It is just what the video bios left in the DVO, so | ||
336 | * I'm just copying it here over. | ||
337 | * This also means that I cannot support any other modes | ||
338 | * except the ones supported by the bios. | ||
339 | */ | ||
340 | ok &= ns2501_writeb(dvo, 0x11, 0xc8); // 0xc7 also works. | ||
341 | ok &= ns2501_writeb(dvo, 0x1b, 0x19); | ||
342 | ok &= ns2501_writeb(dvo, 0x1c, 0x62); // VBIOS left 0x64 here, but 0x62 works nicer | ||
343 | ok &= ns2501_writeb(dvo, 0x1d, 0x02); | ||
344 | |||
345 | ok &= ns2501_writeb(dvo, 0x34, 0x03); | ||
346 | ok &= ns2501_writeb(dvo, 0x35, 0xff); | ||
347 | |||
348 | ok &= ns2501_writeb(dvo, 0x80, 0x27); | ||
349 | ok &= ns2501_writeb(dvo, 0x81, 0x03); | ||
350 | ok &= ns2501_writeb(dvo, 0x82, 0x41); | ||
351 | ok &= ns2501_writeb(dvo, 0x83, 0x05); | ||
352 | |||
353 | ok &= ns2501_writeb(dvo, 0x8d, 0x02); | ||
354 | ok &= ns2501_writeb(dvo, 0x8e, 0x04); | ||
355 | ok &= ns2501_writeb(dvo, 0x8f, 0x00); | ||
356 | |||
357 | ok &= ns2501_writeb(dvo, 0x90, 0xfe); /* vertical. VBIOS left 0xff here, but 0xfe works better */ | ||
358 | ok &= ns2501_writeb(dvo, 0x91, 0x07); | ||
359 | ok &= ns2501_writeb(dvo, 0x94, 0x00); | ||
360 | ok &= ns2501_writeb(dvo, 0x95, 0x00); | ||
361 | |||
362 | ok &= ns2501_writeb(dvo, 0x96, 0x00); | ||
363 | |||
364 | ok &= ns2501_writeb(dvo, 0x99, 0x00); | ||
365 | ok &= ns2501_writeb(dvo, 0x9a, 0x88); | ||
366 | |||
367 | ok &= ns2501_writeb(dvo, 0x9c, 0x23); /* Looks like first and last line of the image. */ | ||
368 | ok &= ns2501_writeb(dvo, 0x9d, 0x00); | ||
369 | ok &= ns2501_writeb(dvo, 0x9e, 0x25); | ||
370 | ok &= ns2501_writeb(dvo, 0x9f, 0x03); | ||
371 | |||
372 | ok &= ns2501_writeb(dvo, 0xa4, 0x80); | ||
373 | |||
374 | ok &= ns2501_writeb(dvo, 0xb6, 0x00); | ||
375 | |||
376 | ok &= ns2501_writeb(dvo, 0xb9, 0xc8); /* horizontal? */ | ||
377 | ok &= ns2501_writeb(dvo, 0xba, 0x00); /* horizontal? */ | ||
378 | |||
379 | ok &= ns2501_writeb(dvo, 0xc0, 0x05); /* horizontal? */ | ||
380 | ok &= ns2501_writeb(dvo, 0xc1, 0xd7); | ||
381 | |||
382 | ok &= ns2501_writeb(dvo, 0xc2, 0x00); | ||
383 | ok &= ns2501_writeb(dvo, 0xc3, 0xf8); | ||
384 | |||
385 | ok &= ns2501_writeb(dvo, 0xc4, 0x03); | ||
386 | ok &= ns2501_writeb(dvo, 0xc5, 0x1a); | ||
387 | |||
388 | ok &= ns2501_writeb(dvo, 0xc6, 0x00); | ||
389 | ok &= ns2501_writeb(dvo, 0xc7, 0x73); | ||
390 | ok &= ns2501_writeb(dvo, 0xc8, 0x02); | ||
391 | |||
392 | } else if (mode->hdisplay == 640 && mode->vdisplay == 480) { | ||
393 | /* mode 274 */ | ||
394 | DRM_DEBUG_KMS("%s: switching to 640x480\n", | ||
395 | __FUNCTION__); | ||
396 | /* | ||
397 | * No, I do not know where this data comes from. | ||
398 | * It is just what the video bios left in the DVO, so | ||
399 | * I'm just copying it here over. | ||
400 | * This also means that I cannot support any other modes | ||
401 | * except the ones supported by the bios. | ||
402 | */ | ||
403 | ns->reg_8_shadow &= ~NS2501_8_BPAS; | ||
404 | |||
405 | ok &= ns2501_writeb(dvo, 0x11, 0xa0); | ||
406 | ok &= ns2501_writeb(dvo, 0x1b, 0x11); | ||
407 | ok &= ns2501_writeb(dvo, 0x1c, 0x54); | ||
408 | ok &= ns2501_writeb(dvo, 0x1d, 0x03); | ||
409 | |||
410 | ok &= ns2501_writeb(dvo, 0x34, 0x03); | ||
411 | ok &= ns2501_writeb(dvo, 0x35, 0xff); | ||
412 | |||
413 | ok &= ns2501_writeb(dvo, 0x80, 0xff); | ||
414 | ok &= ns2501_writeb(dvo, 0x81, 0x07); | ||
415 | ok &= ns2501_writeb(dvo, 0x82, 0x3d); | ||
416 | ok &= ns2501_writeb(dvo, 0x83, 0x05); | ||
417 | |||
418 | ok &= ns2501_writeb(dvo, 0x8d, 0x02); | ||
419 | ok &= ns2501_writeb(dvo, 0x8e, 0x10); | ||
420 | ok &= ns2501_writeb(dvo, 0x8f, 0x00); | ||
421 | |||
422 | ok &= ns2501_writeb(dvo, 0x90, 0xff); /* vertical */ | ||
423 | ok &= ns2501_writeb(dvo, 0x91, 0x07); | ||
424 | ok &= ns2501_writeb(dvo, 0x94, 0x00); | ||
425 | ok &= ns2501_writeb(dvo, 0x95, 0x00); | ||
426 | |||
427 | ok &= ns2501_writeb(dvo, 0x96, 0x05); | ||
428 | |||
429 | ok &= ns2501_writeb(dvo, 0x99, 0x00); | ||
430 | ok &= ns2501_writeb(dvo, 0x9a, 0x88); | ||
431 | |||
432 | ok &= ns2501_writeb(dvo, 0x9c, 0x24); | ||
433 | ok &= ns2501_writeb(dvo, 0x9d, 0x00); | ||
434 | ok &= ns2501_writeb(dvo, 0x9e, 0x25); | ||
435 | ok &= ns2501_writeb(dvo, 0x9f, 0x03); | ||
436 | |||
437 | ok &= ns2501_writeb(dvo, 0xa4, 0x84); | ||
438 | |||
439 | ok &= ns2501_writeb(dvo, 0xb6, 0x09); | ||
440 | |||
441 | ok &= ns2501_writeb(dvo, 0xb9, 0xa0); /* horizontal? */ | ||
442 | ok &= ns2501_writeb(dvo, 0xba, 0x00); /* horizontal? */ | ||
443 | |||
444 | ok &= ns2501_writeb(dvo, 0xc0, 0x05); /* horizontal? */ | ||
445 | ok &= ns2501_writeb(dvo, 0xc1, 0x90); | ||
446 | |||
447 | ok &= ns2501_writeb(dvo, 0xc2, 0x00); | ||
448 | ok &= ns2501_writeb(dvo, 0xc3, 0x0f); | ||
449 | |||
450 | ok &= ns2501_writeb(dvo, 0xc4, 0x03); | ||
451 | ok &= ns2501_writeb(dvo, 0xc5, 0x16); | ||
452 | |||
453 | ok &= ns2501_writeb(dvo, 0xc6, 0x00); | ||
454 | ok &= ns2501_writeb(dvo, 0xc7, 0x02); | ||
455 | ok &= ns2501_writeb(dvo, 0xc8, 0x02); | ||
456 | |||
457 | } else if (mode->hdisplay == 1024 && mode->vdisplay == 768) { | ||
458 | /* mode 280 */ | ||
459 | DRM_DEBUG_KMS("%s: switching to 1024x768\n", | ||
460 | __FUNCTION__); | ||
461 | /* | ||
462 | * This might or might not work, actually. I'm silently | ||
463 | * assuming here that the native panel resolution is | ||
464 | * 1024x768. If not, then this leaves the scaler disabled | ||
465 | * generating a picture that is likely not the expected. | ||
466 | * | ||
467 | * Problem is that I do not know where to take the panel | ||
468 | * dimensions from. | ||
469 | * | ||
470 | * Enable the bypass, scaling not required. | ||
471 | * | ||
472 | * The scaler registers are irrelevant here.... | ||
473 | * | ||
474 | */ | ||
475 | ns->reg_8_shadow |= NS2501_8_BPAS; | ||
476 | ok &= ns2501_writeb(dvo, 0x37, 0x44); | ||
477 | } else { | ||
478 | /* | ||
479 | * Data not known. Bummer! | ||
480 | * Hopefully, the code should not go here | ||
481 | * as mode_OK delivered no other modes. | ||
482 | */ | ||
483 | ns->reg_8_shadow |= NS2501_8_BPAS; | ||
484 | } | ||
485 | ok &= ns2501_writeb(dvo, NS2501_REG8, ns->reg_8_shadow); | ||
486 | |||
487 | if (!ok) { | ||
488 | if (restore) | ||
489 | restore_dvo(dvo); | ||
490 | enable_dvo(dvo); | ||
491 | restore = true; | ||
492 | } | ||
493 | } while (!ok); | ||
494 | /* | ||
495 | * Restore the old i915 registers before | ||
496 | * forcing the ns2501 on. | ||
497 | */ | ||
498 | if (restore) | ||
499 | restore_dvo(dvo); | ||
500 | } | ||
501 | |||
502 | /* set the NS2501 power state */ | ||
503 | static void ns2501_dpms(struct intel_dvo_device *dvo, int mode) | ||
504 | { | ||
505 | bool ok; | ||
506 | bool restore = false; | ||
507 | struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv); | ||
508 | unsigned char ch; | ||
509 | |||
510 | DRM_DEBUG_KMS("%s: Trying set the dpms of the DVO to %d\n", | ||
511 | __FUNCTION__, mode); | ||
512 | |||
513 | ch = ns->reg_8_shadow; | ||
514 | |||
515 | if (mode == DRM_MODE_DPMS_ON) | ||
516 | ch |= NS2501_8_PD; | ||
517 | else | ||
518 | ch &= ~NS2501_8_PD; | ||
519 | |||
520 | if (ns->reg_8_set == 0 || ns->reg_8_shadow != ch) { | ||
521 | ns->reg_8_set = 1; | ||
522 | ns->reg_8_shadow = ch; | ||
523 | |||
524 | do { | ||
525 | ok = true; | ||
526 | ok &= ns2501_writeb(dvo, NS2501_REG8, ch); | ||
527 | ok &= | ||
528 | ns2501_writeb(dvo, 0x34, | ||
529 | (mode == | ||
530 | DRM_MODE_DPMS_ON) ? (0x03) : (0x00)); | ||
531 | ok &= | ||
532 | ns2501_writeb(dvo, 0x35, | ||
533 | (mode == | ||
534 | DRM_MODE_DPMS_ON) ? (0xff) : (0x00)); | ||
535 | if (!ok) { | ||
536 | if (restore) | ||
537 | restore_dvo(dvo); | ||
538 | enable_dvo(dvo); | ||
539 | restore = true; | ||
540 | } | ||
541 | } while (!ok); | ||
542 | |||
543 | if (restore) | ||
544 | restore_dvo(dvo); | ||
545 | } | ||
546 | } | ||
547 | |||
548 | static void ns2501_dump_regs(struct intel_dvo_device *dvo) | ||
549 | { | ||
550 | uint8_t val; | ||
551 | |||
552 | ns2501_readb(dvo, NS2501_FREQ_LO, &val); | ||
553 | DRM_LOG_KMS("NS2501_FREQ_LO: 0x%02x\n", val); | ||
554 | ns2501_readb(dvo, NS2501_FREQ_HI, &val); | ||
555 | DRM_LOG_KMS("NS2501_FREQ_HI: 0x%02x\n", val); | ||
556 | ns2501_readb(dvo, NS2501_REG8, &val); | ||
557 | DRM_LOG_KMS("NS2501_REG8: 0x%02x\n", val); | ||
558 | ns2501_readb(dvo, NS2501_REG9, &val); | ||
559 | DRM_LOG_KMS("NS2501_REG9: 0x%02x\n", val); | ||
560 | ns2501_readb(dvo, NS2501_REGC, &val); | ||
561 | DRM_LOG_KMS("NS2501_REGC: 0x%02x\n", val); | ||
562 | } | ||
563 | |||
564 | static void ns2501_destroy(struct intel_dvo_device *dvo) | ||
565 | { | ||
566 | struct ns2501_priv *ns = dvo->dev_priv; | ||
567 | |||
568 | if (ns) { | ||
569 | kfree(ns); | ||
570 | dvo->dev_priv = NULL; | ||
571 | } | ||
572 | } | ||
573 | |||
574 | struct intel_dvo_dev_ops ns2501_ops = { | ||
575 | .init = ns2501_init, | ||
576 | .detect = ns2501_detect, | ||
577 | .mode_valid = ns2501_mode_valid, | ||
578 | .mode_set = ns2501_mode_set, | ||
579 | .dpms = ns2501_dpms, | ||
580 | .dump_regs = ns2501_dump_regs, | ||
581 | .destroy = ns2501_destroy, | ||
582 | }; | ||
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 359f6e8b9b00..a18e93687b8b 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -44,7 +44,6 @@ | |||
44 | 44 | ||
45 | enum { | 45 | enum { |
46 | ACTIVE_LIST, | 46 | ACTIVE_LIST, |
47 | FLUSHING_LIST, | ||
48 | INACTIVE_LIST, | 47 | INACTIVE_LIST, |
49 | PINNED_LIST, | 48 | PINNED_LIST, |
50 | }; | 49 | }; |
@@ -62,28 +61,11 @@ static int i915_capabilities(struct seq_file *m, void *data) | |||
62 | 61 | ||
63 | seq_printf(m, "gen: %d\n", info->gen); | 62 | seq_printf(m, "gen: %d\n", info->gen); |
64 | seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev)); | 63 | seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev)); |
65 | #define B(x) seq_printf(m, #x ": %s\n", yesno(info->x)) | 64 | #define DEV_INFO_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x)) |
66 | B(is_mobile); | 65 | #define DEV_INFO_SEP ; |
67 | B(is_i85x); | 66 | DEV_INFO_FLAGS; |
68 | B(is_i915g); | 67 | #undef DEV_INFO_FLAG |
69 | B(is_i945gm); | 68 | #undef DEV_INFO_SEP |
70 | B(is_g33); | ||
71 | B(need_gfx_hws); | ||
72 | B(is_g4x); | ||
73 | B(is_pineview); | ||
74 | B(is_broadwater); | ||
75 | B(is_crestline); | ||
76 | B(has_fbc); | ||
77 | B(has_pipe_cxsr); | ||
78 | B(has_hotplug); | ||
79 | B(cursor_needs_physical); | ||
80 | B(has_overlay); | ||
81 | B(overlay_needs_physical); | ||
82 | B(supports_tv); | ||
83 | B(has_bsd_ring); | ||
84 | B(has_blt_ring); | ||
85 | B(has_llc); | ||
86 | #undef B | ||
87 | 69 | ||
88 | return 0; | 70 | return 0; |
89 | } | 71 | } |
@@ -121,14 +103,15 @@ static const char *cache_level_str(int type) | |||
121 | static void | 103 | static void |
122 | describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) | 104 | describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) |
123 | { | 105 | { |
124 | seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d%s%s%s", | 106 | seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d %d%s%s%s", |
125 | &obj->base, | 107 | &obj->base, |
126 | get_pin_flag(obj), | 108 | get_pin_flag(obj), |
127 | get_tiling_flag(obj), | 109 | get_tiling_flag(obj), |
128 | obj->base.size / 1024, | 110 | obj->base.size / 1024, |
129 | obj->base.read_domains, | 111 | obj->base.read_domains, |
130 | obj->base.write_domain, | 112 | obj->base.write_domain, |
131 | obj->last_rendering_seqno, | 113 | obj->last_read_seqno, |
114 | obj->last_write_seqno, | ||
132 | obj->last_fenced_seqno, | 115 | obj->last_fenced_seqno, |
133 | cache_level_str(obj->cache_level), | 116 | cache_level_str(obj->cache_level), |
134 | obj->dirty ? " dirty" : "", | 117 | obj->dirty ? " dirty" : "", |
@@ -177,10 +160,6 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) | |||
177 | seq_printf(m, "Inactive:\n"); | 160 | seq_printf(m, "Inactive:\n"); |
178 | head = &dev_priv->mm.inactive_list; | 161 | head = &dev_priv->mm.inactive_list; |
179 | break; | 162 | break; |
180 | case FLUSHING_LIST: | ||
181 | seq_printf(m, "Flushing:\n"); | ||
182 | head = &dev_priv->mm.flushing_list; | ||
183 | break; | ||
184 | default: | 163 | default: |
185 | mutex_unlock(&dev->struct_mutex); | 164 | mutex_unlock(&dev->struct_mutex); |
186 | return -EINVAL; | 165 | return -EINVAL; |
@@ -238,7 +217,6 @@ static int i915_gem_object_info(struct seq_file *m, void* data) | |||
238 | 217 | ||
239 | size = count = mappable_size = mappable_count = 0; | 218 | size = count = mappable_size = mappable_count = 0; |
240 | count_objects(&dev_priv->mm.active_list, mm_list); | 219 | count_objects(&dev_priv->mm.active_list, mm_list); |
241 | count_objects(&dev_priv->mm.flushing_list, mm_list); | ||
242 | seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", | 220 | seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", |
243 | count, mappable_count, size, mappable_size); | 221 | count, mappable_count, size, mappable_size); |
244 | 222 | ||
@@ -413,7 +391,7 @@ static void i915_ring_seqno_info(struct seq_file *m, | |||
413 | { | 391 | { |
414 | if (ring->get_seqno) { | 392 | if (ring->get_seqno) { |
415 | seq_printf(m, "Current sequence (%s): %d\n", | 393 | seq_printf(m, "Current sequence (%s): %d\n", |
416 | ring->name, ring->get_seqno(ring)); | 394 | ring->name, ring->get_seqno(ring, false)); |
417 | } | 395 | } |
418 | } | 396 | } |
419 | 397 | ||
@@ -630,12 +608,12 @@ static void print_error_buffers(struct seq_file *m, | |||
630 | seq_printf(m, "%s [%d]:\n", name, count); | 608 | seq_printf(m, "%s [%d]:\n", name, count); |
631 | 609 | ||
632 | while (count--) { | 610 | while (count--) { |
633 | seq_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s%s", | 611 | seq_printf(m, " %08x %8u %04x %04x %x %x%s%s%s%s%s%s%s", |
634 | err->gtt_offset, | 612 | err->gtt_offset, |
635 | err->size, | 613 | err->size, |
636 | err->read_domains, | 614 | err->read_domains, |
637 | err->write_domain, | 615 | err->write_domain, |
638 | err->seqno, | 616 | err->rseqno, err->wseqno, |
639 | pin_flag(err->pinned), | 617 | pin_flag(err->pinned), |
640 | tiling_flag(err->tiling), | 618 | tiling_flag(err->tiling), |
641 | dirty_flag(err->dirty), | 619 | dirty_flag(err->dirty), |
@@ -799,10 +777,14 @@ i915_error_state_write(struct file *filp, | |||
799 | struct seq_file *m = filp->private_data; | 777 | struct seq_file *m = filp->private_data; |
800 | struct i915_error_state_file_priv *error_priv = m->private; | 778 | struct i915_error_state_file_priv *error_priv = m->private; |
801 | struct drm_device *dev = error_priv->dev; | 779 | struct drm_device *dev = error_priv->dev; |
780 | int ret; | ||
802 | 781 | ||
803 | DRM_DEBUG_DRIVER("Resetting error state\n"); | 782 | DRM_DEBUG_DRIVER("Resetting error state\n"); |
804 | 783 | ||
805 | mutex_lock(&dev->struct_mutex); | 784 | ret = mutex_lock_interruptible(&dev->struct_mutex); |
785 | if (ret) | ||
786 | return ret; | ||
787 | |||
806 | i915_destroy_error_state(dev); | 788 | i915_destroy_error_state(dev); |
807 | mutex_unlock(&dev->struct_mutex); | 789 | mutex_unlock(&dev->struct_mutex); |
808 | 790 | ||
@@ -1292,7 +1274,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) | |||
1292 | 1274 | ||
1293 | seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n"); | 1275 | seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n"); |
1294 | 1276 | ||
1295 | for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay; | 1277 | for (gpu_freq = dev_priv->rps.min_delay; |
1278 | gpu_freq <= dev_priv->rps.max_delay; | ||
1296 | gpu_freq++) { | 1279 | gpu_freq++) { |
1297 | I915_WRITE(GEN6_PCODE_DATA, gpu_freq); | 1280 | I915_WRITE(GEN6_PCODE_DATA, gpu_freq); |
1298 | I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | | 1281 | I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | |
@@ -1472,8 +1455,12 @@ static int i915_swizzle_info(struct seq_file *m, void *data) | |||
1472 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 1455 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
1473 | struct drm_device *dev = node->minor->dev; | 1456 | struct drm_device *dev = node->minor->dev; |
1474 | struct drm_i915_private *dev_priv = dev->dev_private; | 1457 | struct drm_i915_private *dev_priv = dev->dev_private; |
1458 | int ret; | ||
1459 | |||
1460 | ret = mutex_lock_interruptible(&dev->struct_mutex); | ||
1461 | if (ret) | ||
1462 | return ret; | ||
1475 | 1463 | ||
1476 | mutex_lock(&dev->struct_mutex); | ||
1477 | seq_printf(m, "bit6 swizzle for X-tiling = %s\n", | 1464 | seq_printf(m, "bit6 swizzle for X-tiling = %s\n", |
1478 | swizzle_string(dev_priv->mm.bit_6_swizzle_x)); | 1465 | swizzle_string(dev_priv->mm.bit_6_swizzle_x)); |
1479 | seq_printf(m, "bit6 swizzle for Y-tiling = %s\n", | 1466 | seq_printf(m, "bit6 swizzle for Y-tiling = %s\n", |
@@ -1674,7 +1661,7 @@ i915_ring_stop_write(struct file *filp, | |||
1674 | struct drm_device *dev = filp->private_data; | 1661 | struct drm_device *dev = filp->private_data; |
1675 | struct drm_i915_private *dev_priv = dev->dev_private; | 1662 | struct drm_i915_private *dev_priv = dev->dev_private; |
1676 | char buf[20]; | 1663 | char buf[20]; |
1677 | int val = 0; | 1664 | int val = 0, ret; |
1678 | 1665 | ||
1679 | if (cnt > 0) { | 1666 | if (cnt > 0) { |
1680 | if (cnt > sizeof(buf) - 1) | 1667 | if (cnt > sizeof(buf) - 1) |
@@ -1689,7 +1676,10 @@ i915_ring_stop_write(struct file *filp, | |||
1689 | 1676 | ||
1690 | DRM_DEBUG_DRIVER("Stopping rings 0x%08x\n", val); | 1677 | DRM_DEBUG_DRIVER("Stopping rings 0x%08x\n", val); |
1691 | 1678 | ||
1692 | mutex_lock(&dev->struct_mutex); | 1679 | ret = mutex_lock_interruptible(&dev->struct_mutex); |
1680 | if (ret) | ||
1681 | return ret; | ||
1682 | |||
1693 | dev_priv->stop_rings = val; | 1683 | dev_priv->stop_rings = val; |
1694 | mutex_unlock(&dev->struct_mutex); | 1684 | mutex_unlock(&dev->struct_mutex); |
1695 | 1685 | ||
@@ -1713,10 +1703,18 @@ i915_max_freq_read(struct file *filp, | |||
1713 | struct drm_device *dev = filp->private_data; | 1703 | struct drm_device *dev = filp->private_data; |
1714 | drm_i915_private_t *dev_priv = dev->dev_private; | 1704 | drm_i915_private_t *dev_priv = dev->dev_private; |
1715 | char buf[80]; | 1705 | char buf[80]; |
1716 | int len; | 1706 | int len, ret; |
1707 | |||
1708 | if (!(IS_GEN6(dev) || IS_GEN7(dev))) | ||
1709 | return -ENODEV; | ||
1710 | |||
1711 | ret = mutex_lock_interruptible(&dev->struct_mutex); | ||
1712 | if (ret) | ||
1713 | return ret; | ||
1717 | 1714 | ||
1718 | len = snprintf(buf, sizeof(buf), | 1715 | len = snprintf(buf, sizeof(buf), |
1719 | "max freq: %d\n", dev_priv->max_delay * 50); | 1716 | "max freq: %d\n", dev_priv->rps.max_delay * 50); |
1717 | mutex_unlock(&dev->struct_mutex); | ||
1720 | 1718 | ||
1721 | if (len > sizeof(buf)) | 1719 | if (len > sizeof(buf)) |
1722 | len = sizeof(buf); | 1720 | len = sizeof(buf); |
@@ -1733,7 +1731,10 @@ i915_max_freq_write(struct file *filp, | |||
1733 | struct drm_device *dev = filp->private_data; | 1731 | struct drm_device *dev = filp->private_data; |
1734 | struct drm_i915_private *dev_priv = dev->dev_private; | 1732 | struct drm_i915_private *dev_priv = dev->dev_private; |
1735 | char buf[20]; | 1733 | char buf[20]; |
1736 | int val = 1; | 1734 | int val = 1, ret; |
1735 | |||
1736 | if (!(IS_GEN6(dev) || IS_GEN7(dev))) | ||
1737 | return -ENODEV; | ||
1737 | 1738 | ||
1738 | if (cnt > 0) { | 1739 | if (cnt > 0) { |
1739 | if (cnt > sizeof(buf) - 1) | 1740 | if (cnt > sizeof(buf) - 1) |
@@ -1748,12 +1749,17 @@ i915_max_freq_write(struct file *filp, | |||
1748 | 1749 | ||
1749 | DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val); | 1750 | DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val); |
1750 | 1751 | ||
1752 | ret = mutex_lock_interruptible(&dev->struct_mutex); | ||
1753 | if (ret) | ||
1754 | return ret; | ||
1755 | |||
1751 | /* | 1756 | /* |
1752 | * Turbo will still be enabled, but won't go above the set value. | 1757 | * Turbo will still be enabled, but won't go above the set value. |
1753 | */ | 1758 | */ |
1754 | dev_priv->max_delay = val / 50; | 1759 | dev_priv->rps.max_delay = val / 50; |
1755 | 1760 | ||
1756 | gen6_set_rps(dev, val / 50); | 1761 | gen6_set_rps(dev, val / 50); |
1762 | mutex_unlock(&dev->struct_mutex); | ||
1757 | 1763 | ||
1758 | return cnt; | 1764 | return cnt; |
1759 | } | 1765 | } |
@@ -1773,10 +1779,18 @@ i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max, | |||
1773 | struct drm_device *dev = filp->private_data; | 1779 | struct drm_device *dev = filp->private_data; |
1774 | drm_i915_private_t *dev_priv = dev->dev_private; | 1780 | drm_i915_private_t *dev_priv = dev->dev_private; |
1775 | char buf[80]; | 1781 | char buf[80]; |
1776 | int len; | 1782 | int len, ret; |
1783 | |||
1784 | if (!(IS_GEN6(dev) || IS_GEN7(dev))) | ||
1785 | return -ENODEV; | ||
1786 | |||
1787 | ret = mutex_lock_interruptible(&dev->struct_mutex); | ||
1788 | if (ret) | ||
1789 | return ret; | ||
1777 | 1790 | ||
1778 | len = snprintf(buf, sizeof(buf), | 1791 | len = snprintf(buf, sizeof(buf), |
1779 | "min freq: %d\n", dev_priv->min_delay * 50); | 1792 | "min freq: %d\n", dev_priv->rps.min_delay * 50); |
1793 | mutex_unlock(&dev->struct_mutex); | ||
1780 | 1794 | ||
1781 | if (len > sizeof(buf)) | 1795 | if (len > sizeof(buf)) |
1782 | len = sizeof(buf); | 1796 | len = sizeof(buf); |
@@ -1791,7 +1805,10 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
1791 | struct drm_device *dev = filp->private_data; | 1805 | struct drm_device *dev = filp->private_data; |
1792 | struct drm_i915_private *dev_priv = dev->dev_private; | 1806 | struct drm_i915_private *dev_priv = dev->dev_private; |
1793 | char buf[20]; | 1807 | char buf[20]; |
1794 | int val = 1; | 1808 | int val = 1, ret; |
1809 | |||
1810 | if (!(IS_GEN6(dev) || IS_GEN7(dev))) | ||
1811 | return -ENODEV; | ||
1795 | 1812 | ||
1796 | if (cnt > 0) { | 1813 | if (cnt > 0) { |
1797 | if (cnt > sizeof(buf) - 1) | 1814 | if (cnt > sizeof(buf) - 1) |
@@ -1806,12 +1823,17 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
1806 | 1823 | ||
1807 | DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val); | 1824 | DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val); |
1808 | 1825 | ||
1826 | ret = mutex_lock_interruptible(&dev->struct_mutex); | ||
1827 | if (ret) | ||
1828 | return ret; | ||
1829 | |||
1809 | /* | 1830 | /* |
1810 | * Turbo will still be enabled, but won't go below the set value. | 1831 | * Turbo will still be enabled, but won't go below the set value. |
1811 | */ | 1832 | */ |
1812 | dev_priv->min_delay = val / 50; | 1833 | dev_priv->rps.min_delay = val / 50; |
1813 | 1834 | ||
1814 | gen6_set_rps(dev, val / 50); | 1835 | gen6_set_rps(dev, val / 50); |
1836 | mutex_unlock(&dev->struct_mutex); | ||
1815 | 1837 | ||
1816 | return cnt; | 1838 | return cnt; |
1817 | } | 1839 | } |
@@ -1834,9 +1856,15 @@ i915_cache_sharing_read(struct file *filp, | |||
1834 | drm_i915_private_t *dev_priv = dev->dev_private; | 1856 | drm_i915_private_t *dev_priv = dev->dev_private; |
1835 | char buf[80]; | 1857 | char buf[80]; |
1836 | u32 snpcr; | 1858 | u32 snpcr; |
1837 | int len; | 1859 | int len, ret; |
1860 | |||
1861 | if (!(IS_GEN6(dev) || IS_GEN7(dev))) | ||
1862 | return -ENODEV; | ||
1863 | |||
1864 | ret = mutex_lock_interruptible(&dev->struct_mutex); | ||
1865 | if (ret) | ||
1866 | return ret; | ||
1838 | 1867 | ||
1839 | mutex_lock(&dev_priv->dev->struct_mutex); | ||
1840 | snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); | 1868 | snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); |
1841 | mutex_unlock(&dev_priv->dev->struct_mutex); | 1869 | mutex_unlock(&dev_priv->dev->struct_mutex); |
1842 | 1870 | ||
@@ -1862,6 +1890,9 @@ i915_cache_sharing_write(struct file *filp, | |||
1862 | u32 snpcr; | 1890 | u32 snpcr; |
1863 | int val = 1; | 1891 | int val = 1; |
1864 | 1892 | ||
1893 | if (!(IS_GEN6(dev) || IS_GEN7(dev))) | ||
1894 | return -ENODEV; | ||
1895 | |||
1865 | if (cnt > 0) { | 1896 | if (cnt > 0) { |
1866 | if (cnt > sizeof(buf) - 1) | 1897 | if (cnt > sizeof(buf) - 1) |
1867 | return -EINVAL; | 1898 | return -EINVAL; |
@@ -1925,16 +1956,11 @@ static int i915_forcewake_open(struct inode *inode, struct file *file) | |||
1925 | { | 1956 | { |
1926 | struct drm_device *dev = inode->i_private; | 1957 | struct drm_device *dev = inode->i_private; |
1927 | struct drm_i915_private *dev_priv = dev->dev_private; | 1958 | struct drm_i915_private *dev_priv = dev->dev_private; |
1928 | int ret; | ||
1929 | 1959 | ||
1930 | if (INTEL_INFO(dev)->gen < 6) | 1960 | if (INTEL_INFO(dev)->gen < 6) |
1931 | return 0; | 1961 | return 0; |
1932 | 1962 | ||
1933 | ret = mutex_lock_interruptible(&dev->struct_mutex); | ||
1934 | if (ret) | ||
1935 | return ret; | ||
1936 | gen6_gt_force_wake_get(dev_priv); | 1963 | gen6_gt_force_wake_get(dev_priv); |
1937 | mutex_unlock(&dev->struct_mutex); | ||
1938 | 1964 | ||
1939 | return 0; | 1965 | return 0; |
1940 | } | 1966 | } |
@@ -1947,16 +1973,7 @@ static int i915_forcewake_release(struct inode *inode, struct file *file) | |||
1947 | if (INTEL_INFO(dev)->gen < 6) | 1973 | if (INTEL_INFO(dev)->gen < 6) |
1948 | return 0; | 1974 | return 0; |
1949 | 1975 | ||
1950 | /* | ||
1951 | * It's bad that we can potentially hang userspace if struct_mutex gets | ||
1952 | * forever stuck. However, if we cannot acquire this lock it means that | ||
1953 | * almost certainly the driver has hung, is not unload-able. Therefore | ||
1954 | * hanging here is probably a minor inconvenience not to be seen my | ||
1955 | * almost every user. | ||
1956 | */ | ||
1957 | mutex_lock(&dev->struct_mutex); | ||
1958 | gen6_gt_force_wake_put(dev_priv); | 1976 | gen6_gt_force_wake_put(dev_priv); |
1959 | mutex_unlock(&dev->struct_mutex); | ||
1960 | 1977 | ||
1961 | return 0; | 1978 | return 0; |
1962 | } | 1979 | } |
@@ -2006,7 +2023,6 @@ static struct drm_info_list i915_debugfs_list[] = { | |||
2006 | {"i915_gem_gtt", i915_gem_gtt_info, 0}, | 2023 | {"i915_gem_gtt", i915_gem_gtt_info, 0}, |
2007 | {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST}, | 2024 | {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST}, |
2008 | {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, | 2025 | {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, |
2009 | {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, | ||
2010 | {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, | 2026 | {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, |
2011 | {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, | 2027 | {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, |
2012 | {"i915_gem_request", i915_gem_request_info, 0}, | 2028 | {"i915_gem_request", i915_gem_request_info, 0}, |
@@ -2067,6 +2083,7 @@ int i915_debugfs_init(struct drm_minor *minor) | |||
2067 | &i915_cache_sharing_fops); | 2083 | &i915_cache_sharing_fops); |
2068 | if (ret) | 2084 | if (ret) |
2069 | return ret; | 2085 | return ret; |
2086 | |||
2070 | ret = i915_debugfs_create(minor->debugfs_root, minor, | 2087 | ret = i915_debugfs_create(minor->debugfs_root, minor, |
2071 | "i915_ring_stop", | 2088 | "i915_ring_stop", |
2072 | &i915_ring_stop_fops); | 2089 | &i915_ring_stop_fops); |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 9cf7dfe022b9..0a1b64f8d442 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -1009,6 +1009,9 @@ static int i915_getparam(struct drm_device *dev, void *data, | |||
1009 | case I915_PARAM_HAS_WAIT_TIMEOUT: | 1009 | case I915_PARAM_HAS_WAIT_TIMEOUT: |
1010 | value = 1; | 1010 | value = 1; |
1011 | break; | 1011 | break; |
1012 | case I915_PARAM_HAS_SEMAPHORES: | ||
1013 | value = i915_semaphore_is_enabled(dev); | ||
1014 | break; | ||
1012 | default: | 1015 | default: |
1013 | DRM_DEBUG_DRIVER("Unknown parameter %d\n", | 1016 | DRM_DEBUG_DRIVER("Unknown parameter %d\n", |
1014 | param->param); | 1017 | param->param); |
@@ -1425,6 +1428,21 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) | |||
1425 | kfree(ap); | 1428 | kfree(ap); |
1426 | } | 1429 | } |
1427 | 1430 | ||
1431 | static void i915_dump_device_info(struct drm_i915_private *dev_priv) | ||
1432 | { | ||
1433 | const struct intel_device_info *info = dev_priv->info; | ||
1434 | |||
1435 | #define DEV_INFO_FLAG(name) info->name ? #name "," : "" | ||
1436 | #define DEV_INFO_SEP , | ||
1437 | DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags=" | ||
1438 | "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", | ||
1439 | info->gen, | ||
1440 | dev_priv->dev->pdev->device, | ||
1441 | DEV_INFO_FLAGS); | ||
1442 | #undef DEV_INFO_FLAG | ||
1443 | #undef DEV_INFO_SEP | ||
1444 | } | ||
1445 | |||
1428 | /** | 1446 | /** |
1429 | * i915_driver_load - setup chip and create an initial config | 1447 | * i915_driver_load - setup chip and create an initial config |
1430 | * @dev: DRM device | 1448 | * @dev: DRM device |
@@ -1449,7 +1467,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1449 | if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) | 1467 | if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) |
1450 | return -ENODEV; | 1468 | return -ENODEV; |
1451 | 1469 | ||
1452 | |||
1453 | /* i915 has 4 more counters */ | 1470 | /* i915 has 4 more counters */ |
1454 | dev->counters += 4; | 1471 | dev->counters += 4; |
1455 | dev->types[6] = _DRM_STAT_IRQ; | 1472 | dev->types[6] = _DRM_STAT_IRQ; |
@@ -1465,6 +1482,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1465 | dev_priv->dev = dev; | 1482 | dev_priv->dev = dev; |
1466 | dev_priv->info = info; | 1483 | dev_priv->info = info; |
1467 | 1484 | ||
1485 | i915_dump_device_info(dev_priv); | ||
1486 | |||
1468 | if (i915_get_bridge_dev(dev)) { | 1487 | if (i915_get_bridge_dev(dev)) { |
1469 | ret = -EIO; | 1488 | ret = -EIO; |
1470 | goto free_priv; | 1489 | goto free_priv; |
@@ -1586,7 +1605,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1586 | 1605 | ||
1587 | spin_lock_init(&dev_priv->irq_lock); | 1606 | spin_lock_init(&dev_priv->irq_lock); |
1588 | spin_lock_init(&dev_priv->error_lock); | 1607 | spin_lock_init(&dev_priv->error_lock); |
1589 | spin_lock_init(&dev_priv->rps_lock); | 1608 | spin_lock_init(&dev_priv->rps.lock); |
1590 | 1609 | ||
1591 | if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) | 1610 | if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) |
1592 | dev_priv->num_pipe = 3; | 1611 | dev_priv->num_pipe = 3; |
@@ -1835,6 +1854,8 @@ struct drm_ioctl_desc i915_ioctls[] = { | |||
1835 | DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), | 1854 | DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), |
1836 | DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), | 1855 | DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), |
1837 | DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), | 1856 | DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), |
1857 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHEING, i915_gem_set_cacheing_ioctl, DRM_UNLOCKED), | ||
1858 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHEING, i915_gem_get_cacheing_ioctl, DRM_UNLOCKED), | ||
1838 | DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED), | 1859 | DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED), |
1839 | DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), | 1860 | DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), |
1840 | DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), | 1861 | DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), |
@@ -1857,6 +1878,7 @@ struct drm_ioctl_desc i915_ioctls[] = { | |||
1857 | DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED), | 1878 | DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED), |
1858 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED), | 1879 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED), |
1859 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED), | 1880 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED), |
1881 | DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED), | ||
1860 | }; | 1882 | }; |
1861 | 1883 | ||
1862 | int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); | 1884 | int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index a24ffbe97c01..7ebb13b65133 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -1060,7 +1060,7 @@ static bool IS_DISPLAYREG(u32 reg) | |||
1060 | * This should make it easier to transition modules over to the | 1060 | * This should make it easier to transition modules over to the |
1061 | * new register block scheme, since we can do it incrementally. | 1061 | * new register block scheme, since we can do it incrementally. |
1062 | */ | 1062 | */ |
1063 | if (reg >= 0x180000) | 1063 | if (reg >= VLV_DISPLAY_BASE) |
1064 | return false; | 1064 | return false; |
1065 | 1065 | ||
1066 | if (reg >= RENDER_RING_BASE && | 1066 | if (reg >= RENDER_RING_BASE && |
@@ -1180,3 +1180,49 @@ __i915_write(16, w) | |||
1180 | __i915_write(32, l) | 1180 | __i915_write(32, l) |
1181 | __i915_write(64, q) | 1181 | __i915_write(64, q) |
1182 | #undef __i915_write | 1182 | #undef __i915_write |
1183 | |||
1184 | static const struct register_whitelist { | ||
1185 | uint64_t offset; | ||
1186 | uint32_t size; | ||
1187 | uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */ | ||
1188 | } whitelist[] = { | ||
1189 | { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 }, | ||
1190 | }; | ||
1191 | |||
1192 | int i915_reg_read_ioctl(struct drm_device *dev, | ||
1193 | void *data, struct drm_file *file) | ||
1194 | { | ||
1195 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1196 | struct drm_i915_reg_read *reg = data; | ||
1197 | struct register_whitelist const *entry = whitelist; | ||
1198 | int i; | ||
1199 | |||
1200 | for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { | ||
1201 | if (entry->offset == reg->offset && | ||
1202 | (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) | ||
1203 | break; | ||
1204 | } | ||
1205 | |||
1206 | if (i == ARRAY_SIZE(whitelist)) | ||
1207 | return -EINVAL; | ||
1208 | |||
1209 | switch (entry->size) { | ||
1210 | case 8: | ||
1211 | reg->val = I915_READ64(reg->offset); | ||
1212 | break; | ||
1213 | case 4: | ||
1214 | reg->val = I915_READ(reg->offset); | ||
1215 | break; | ||
1216 | case 2: | ||
1217 | reg->val = I915_READ16(reg->offset); | ||
1218 | break; | ||
1219 | case 1: | ||
1220 | reg->val = I915_READ8(reg->offset); | ||
1221 | break; | ||
1222 | default: | ||
1223 | WARN_ON(1); | ||
1224 | return -EINVAL; | ||
1225 | } | ||
1226 | |||
1227 | return 0; | ||
1228 | } | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 627fe35781b4..261fe2175afb 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -109,6 +109,7 @@ struct intel_pch_pll { | |||
109 | 109 | ||
110 | #define WATCH_COHERENCY 0 | 110 | #define WATCH_COHERENCY 0 |
111 | #define WATCH_LISTS 0 | 111 | #define WATCH_LISTS 0 |
112 | #define WATCH_GTT 0 | ||
112 | 113 | ||
113 | #define I915_GEM_PHYS_CURSOR_0 1 | 114 | #define I915_GEM_PHYS_CURSOR_0 1 |
114 | #define I915_GEM_PHYS_CURSOR_1 2 | 115 | #define I915_GEM_PHYS_CURSOR_1 2 |
@@ -221,7 +222,7 @@ struct drm_i915_error_state { | |||
221 | struct drm_i915_error_buffer { | 222 | struct drm_i915_error_buffer { |
222 | u32 size; | 223 | u32 size; |
223 | u32 name; | 224 | u32 name; |
224 | u32 seqno; | 225 | u32 rseqno, wseqno; |
225 | u32 gtt_offset; | 226 | u32 gtt_offset; |
226 | u32 read_domains; | 227 | u32 read_domains; |
227 | u32 write_domain; | 228 | u32 write_domain; |
@@ -248,7 +249,6 @@ struct drm_i915_display_funcs { | |||
248 | void (*update_wm)(struct drm_device *dev); | 249 | void (*update_wm)(struct drm_device *dev); |
249 | void (*update_sprite_wm)(struct drm_device *dev, int pipe, | 250 | void (*update_sprite_wm)(struct drm_device *dev, int pipe, |
250 | uint32_t sprite_width, int pixel_size); | 251 | uint32_t sprite_width, int pixel_size); |
251 | void (*sanitize_pm)(struct drm_device *dev); | ||
252 | void (*update_linetime_wm)(struct drm_device *dev, int pipe, | 252 | void (*update_linetime_wm)(struct drm_device *dev, int pipe, |
253 | struct drm_display_mode *mode); | 253 | struct drm_display_mode *mode); |
254 | int (*crtc_mode_set)(struct drm_crtc *crtc, | 254 | int (*crtc_mode_set)(struct drm_crtc *crtc, |
@@ -279,6 +279,32 @@ struct drm_i915_gt_funcs { | |||
279 | void (*force_wake_put)(struct drm_i915_private *dev_priv); | 279 | void (*force_wake_put)(struct drm_i915_private *dev_priv); |
280 | }; | 280 | }; |
281 | 281 | ||
282 | #define DEV_INFO_FLAGS \ | ||
283 | DEV_INFO_FLAG(is_mobile) DEV_INFO_SEP \ | ||
284 | DEV_INFO_FLAG(is_i85x) DEV_INFO_SEP \ | ||
285 | DEV_INFO_FLAG(is_i915g) DEV_INFO_SEP \ | ||
286 | DEV_INFO_FLAG(is_i945gm) DEV_INFO_SEP \ | ||
287 | DEV_INFO_FLAG(is_g33) DEV_INFO_SEP \ | ||
288 | DEV_INFO_FLAG(need_gfx_hws) DEV_INFO_SEP \ | ||
289 | DEV_INFO_FLAG(is_g4x) DEV_INFO_SEP \ | ||
290 | DEV_INFO_FLAG(is_pineview) DEV_INFO_SEP \ | ||
291 | DEV_INFO_FLAG(is_broadwater) DEV_INFO_SEP \ | ||
292 | DEV_INFO_FLAG(is_crestline) DEV_INFO_SEP \ | ||
293 | DEV_INFO_FLAG(is_ivybridge) DEV_INFO_SEP \ | ||
294 | DEV_INFO_FLAG(is_valleyview) DEV_INFO_SEP \ | ||
295 | DEV_INFO_FLAG(is_haswell) DEV_INFO_SEP \ | ||
296 | DEV_INFO_FLAG(has_force_wake) DEV_INFO_SEP \ | ||
297 | DEV_INFO_FLAG(has_fbc) DEV_INFO_SEP \ | ||
298 | DEV_INFO_FLAG(has_pipe_cxsr) DEV_INFO_SEP \ | ||
299 | DEV_INFO_FLAG(has_hotplug) DEV_INFO_SEP \ | ||
300 | DEV_INFO_FLAG(cursor_needs_physical) DEV_INFO_SEP \ | ||
301 | DEV_INFO_FLAG(has_overlay) DEV_INFO_SEP \ | ||
302 | DEV_INFO_FLAG(overlay_needs_physical) DEV_INFO_SEP \ | ||
303 | DEV_INFO_FLAG(supports_tv) DEV_INFO_SEP \ | ||
304 | DEV_INFO_FLAG(has_bsd_ring) DEV_INFO_SEP \ | ||
305 | DEV_INFO_FLAG(has_blt_ring) DEV_INFO_SEP \ | ||
306 | DEV_INFO_FLAG(has_llc) | ||
307 | |||
282 | struct intel_device_info { | 308 | struct intel_device_info { |
283 | u8 gen; | 309 | u8 gen; |
284 | u8 is_mobile:1; | 310 | u8 is_mobile:1; |
@@ -696,17 +722,6 @@ typedef struct drm_i915_private { | |||
696 | struct list_head active_list; | 722 | struct list_head active_list; |
697 | 723 | ||
698 | /** | 724 | /** |
699 | * List of objects which are not in the ringbuffer but which | ||
700 | * still have a write_domain which needs to be flushed before | ||
701 | * unbinding. | ||
702 | * | ||
703 | * last_rendering_seqno is 0 while an object is in this list. | ||
704 | * | ||
705 | * A reference is held on the buffer while on this list. | ||
706 | */ | ||
707 | struct list_head flushing_list; | ||
708 | |||
709 | /** | ||
710 | * LRU list of objects which are not in the ringbuffer and | 725 | * LRU list of objects which are not in the ringbuffer and |
711 | * are ready to unbind, but are still in the GTT. | 726 | * are ready to unbind, but are still in the GTT. |
712 | * | 727 | * |
@@ -796,9 +811,6 @@ typedef struct drm_i915_private { | |||
796 | bool lvds_downclock_avail; | 811 | bool lvds_downclock_avail; |
797 | /* indicates the reduced downclock for LVDS*/ | 812 | /* indicates the reduced downclock for LVDS*/ |
798 | int lvds_downclock; | 813 | int lvds_downclock; |
799 | struct work_struct idle_work; | ||
800 | struct timer_list idle_timer; | ||
801 | bool busy; | ||
802 | u16 orig_clock; | 814 | u16 orig_clock; |
803 | int child_dev_num; | 815 | int child_dev_num; |
804 | struct child_device_config *child_dev; | 816 | struct child_device_config *child_dev; |
@@ -807,9 +819,21 @@ typedef struct drm_i915_private { | |||
807 | 819 | ||
808 | bool mchbar_need_disable; | 820 | bool mchbar_need_disable; |
809 | 821 | ||
810 | struct work_struct rps_work; | 822 | /* gen6+ rps state */ |
811 | spinlock_t rps_lock; | 823 | struct { |
812 | u32 pm_iir; | 824 | struct work_struct work; |
825 | u32 pm_iir; | ||
826 | /* lock - irqsave spinlock that protectects the work_struct and | ||
827 | * pm_iir. */ | ||
828 | spinlock_t lock; | ||
829 | |||
830 | /* The below variables an all the rps hw state are protected by | ||
831 | * dev->struct mutext. */ | ||
832 | u8 cur_delay; | ||
833 | u8 min_delay; | ||
834 | u8 max_delay; | ||
835 | } rps; | ||
836 | |||
813 | 837 | ||
814 | u8 cur_delay; | 838 | u8 cur_delay; |
815 | u8 min_delay; | 839 | u8 min_delay; |
@@ -826,7 +850,6 @@ typedef struct drm_i915_private { | |||
826 | int c_m; | 850 | int c_m; |
827 | int r_t; | 851 | int r_t; |
828 | u8 corr; | 852 | u8 corr; |
829 | spinlock_t *mchdev_lock; | ||
830 | 853 | ||
831 | enum no_fbc_reason no_fbc_reason; | 854 | enum no_fbc_reason no_fbc_reason; |
832 | 855 | ||
@@ -861,9 +884,9 @@ enum hdmi_force_audio { | |||
861 | }; | 884 | }; |
862 | 885 | ||
863 | enum i915_cache_level { | 886 | enum i915_cache_level { |
864 | I915_CACHE_NONE, | 887 | I915_CACHE_NONE = 0, |
865 | I915_CACHE_LLC, | 888 | I915_CACHE_LLC, |
866 | I915_CACHE_LLC_MLC, /* gen6+ */ | 889 | I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */ |
867 | }; | 890 | }; |
868 | 891 | ||
869 | struct drm_i915_gem_object { | 892 | struct drm_i915_gem_object { |
@@ -873,18 +896,16 @@ struct drm_i915_gem_object { | |||
873 | struct drm_mm_node *gtt_space; | 896 | struct drm_mm_node *gtt_space; |
874 | struct list_head gtt_list; | 897 | struct list_head gtt_list; |
875 | 898 | ||
876 | /** This object's place on the active/flushing/inactive lists */ | 899 | /** This object's place on the active/inactive lists */ |
877 | struct list_head ring_list; | 900 | struct list_head ring_list; |
878 | struct list_head mm_list; | 901 | struct list_head mm_list; |
879 | /** This object's place on GPU write list */ | ||
880 | struct list_head gpu_write_list; | ||
881 | /** This object's place in the batchbuffer or on the eviction list */ | 902 | /** This object's place in the batchbuffer or on the eviction list */ |
882 | struct list_head exec_list; | 903 | struct list_head exec_list; |
883 | 904 | ||
884 | /** | 905 | /** |
885 | * This is set if the object is on the active or flushing lists | 906 | * This is set if the object is on the active lists (has pending |
886 | * (has pending rendering), and is not set if it's on inactive (ready | 907 | * rendering and so a non-zero seqno), and is not set if it i s on |
887 | * to be unbound). | 908 | * inactive (ready to be unbound) list. |
888 | */ | 909 | */ |
889 | unsigned int active:1; | 910 | unsigned int active:1; |
890 | 911 | ||
@@ -895,12 +916,6 @@ struct drm_i915_gem_object { | |||
895 | unsigned int dirty:1; | 916 | unsigned int dirty:1; |
896 | 917 | ||
897 | /** | 918 | /** |
898 | * This is set if the object has been written to since the last | ||
899 | * GPU flush. | ||
900 | */ | ||
901 | unsigned int pending_gpu_write:1; | ||
902 | |||
903 | /** | ||
904 | * Fence register bits (if any) for this object. Will be set | 919 | * Fence register bits (if any) for this object. Will be set |
905 | * as needed when mapped into the GTT. | 920 | * as needed when mapped into the GTT. |
906 | * Protected by dev->struct_mutex. | 921 | * Protected by dev->struct_mutex. |
@@ -992,7 +1007,8 @@ struct drm_i915_gem_object { | |||
992 | struct intel_ring_buffer *ring; | 1007 | struct intel_ring_buffer *ring; |
993 | 1008 | ||
994 | /** Breadcrumb of last rendering to the buffer. */ | 1009 | /** Breadcrumb of last rendering to the buffer. */ |
995 | uint32_t last_rendering_seqno; | 1010 | uint32_t last_read_seqno; |
1011 | uint32_t last_write_seqno; | ||
996 | /** Breadcrumb of last fenced GPU access to the buffer. */ | 1012 | /** Breadcrumb of last fenced GPU access to the buffer. */ |
997 | uint32_t last_fenced_seqno; | 1013 | uint32_t last_fenced_seqno; |
998 | 1014 | ||
@@ -1135,6 +1151,8 @@ struct drm_i915_file_private { | |||
1135 | 1151 | ||
1136 | #define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake) | 1152 | #define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake) |
1137 | 1153 | ||
1154 | #define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) | ||
1155 | |||
1138 | #include "i915_trace.h" | 1156 | #include "i915_trace.h" |
1139 | 1157 | ||
1140 | /** | 1158 | /** |
@@ -1256,6 +1274,10 @@ int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, | |||
1256 | struct drm_file *file_priv); | 1274 | struct drm_file *file_priv); |
1257 | int i915_gem_busy_ioctl(struct drm_device *dev, void *data, | 1275 | int i915_gem_busy_ioctl(struct drm_device *dev, void *data, |
1258 | struct drm_file *file_priv); | 1276 | struct drm_file *file_priv); |
1277 | int i915_gem_get_cacheing_ioctl(struct drm_device *dev, void *data, | ||
1278 | struct drm_file *file); | ||
1279 | int i915_gem_set_cacheing_ioctl(struct drm_device *dev, void *data, | ||
1280 | struct drm_file *file); | ||
1259 | int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, | 1281 | int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, |
1260 | struct drm_file *file_priv); | 1282 | struct drm_file *file_priv); |
1261 | int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, | 1283 | int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, |
@@ -1274,9 +1296,6 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data, | |||
1274 | struct drm_file *file_priv); | 1296 | struct drm_file *file_priv); |
1275 | void i915_gem_load(struct drm_device *dev); | 1297 | void i915_gem_load(struct drm_device *dev); |
1276 | int i915_gem_init_object(struct drm_gem_object *obj); | 1298 | int i915_gem_init_object(struct drm_gem_object *obj); |
1277 | int __must_check i915_gem_flush_ring(struct intel_ring_buffer *ring, | ||
1278 | uint32_t invalidate_domains, | ||
1279 | uint32_t flush_domains); | ||
1280 | struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, | 1299 | struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, |
1281 | size_t size); | 1300 | size_t size); |
1282 | void i915_gem_free_object(struct drm_gem_object *obj); | 1301 | void i915_gem_free_object(struct drm_gem_object *obj); |
@@ -1291,7 +1310,6 @@ void i915_gem_lastclose(struct drm_device *dev); | |||
1291 | int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, | 1310 | int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, |
1292 | gfp_t gfpmask); | 1311 | gfp_t gfpmask); |
1293 | int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); | 1312 | int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); |
1294 | int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj); | ||
1295 | int i915_gem_object_sync(struct drm_i915_gem_object *obj, | 1313 | int i915_gem_object_sync(struct drm_i915_gem_object *obj, |
1296 | struct intel_ring_buffer *to); | 1314 | struct intel_ring_buffer *to); |
1297 | void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, | 1315 | void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, |
@@ -1358,9 +1376,9 @@ void i915_gem_init_ppgtt(struct drm_device *dev); | |||
1358 | void i915_gem_cleanup_ringbuffer(struct drm_device *dev); | 1376 | void i915_gem_cleanup_ringbuffer(struct drm_device *dev); |
1359 | int __must_check i915_gpu_idle(struct drm_device *dev); | 1377 | int __must_check i915_gpu_idle(struct drm_device *dev); |
1360 | int __must_check i915_gem_idle(struct drm_device *dev); | 1378 | int __must_check i915_gem_idle(struct drm_device *dev); |
1361 | int __must_check i915_add_request(struct intel_ring_buffer *ring, | 1379 | int i915_add_request(struct intel_ring_buffer *ring, |
1362 | struct drm_file *file, | 1380 | struct drm_file *file, |
1363 | struct drm_i915_gem_request *request); | 1381 | struct drm_i915_gem_request *request); |
1364 | int __must_check i915_wait_seqno(struct intel_ring_buffer *ring, | 1382 | int __must_check i915_wait_seqno(struct intel_ring_buffer *ring, |
1365 | uint32_t seqno); | 1383 | uint32_t seqno); |
1366 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); | 1384 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); |
@@ -1429,7 +1447,9 @@ void i915_gem_init_global_gtt(struct drm_device *dev, | |||
1429 | 1447 | ||
1430 | /* i915_gem_evict.c */ | 1448 | /* i915_gem_evict.c */ |
1431 | int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, | 1449 | int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, |
1432 | unsigned alignment, bool mappable); | 1450 | unsigned alignment, |
1451 | unsigned cache_level, | ||
1452 | bool mappable); | ||
1433 | int i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only); | 1453 | int i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only); |
1434 | 1454 | ||
1435 | /* i915_gem_stolen.c */ | 1455 | /* i915_gem_stolen.c */ |
@@ -1529,6 +1549,8 @@ extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); | |||
1529 | extern int intel_enable_rc6(const struct drm_device *dev); | 1549 | extern int intel_enable_rc6(const struct drm_device *dev); |
1530 | 1550 | ||
1531 | extern bool i915_semaphore_is_enabled(struct drm_device *dev); | 1551 | extern bool i915_semaphore_is_enabled(struct drm_device *dev); |
1552 | int i915_reg_read_ioctl(struct drm_device *dev, void *data, | ||
1553 | struct drm_file *file); | ||
1532 | 1554 | ||
1533 | /* overlay */ | 1555 | /* overlay */ |
1534 | #ifdef CONFIG_DEBUG_FS | 1556 | #ifdef CONFIG_DEBUG_FS |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 489e2b162b27..31054fa44c47 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -37,7 +37,6 @@ | |||
37 | #include <linux/pci.h> | 37 | #include <linux/pci.h> |
38 | #include <linux/dma-buf.h> | 38 | #include <linux/dma-buf.h> |
39 | 39 | ||
40 | static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj); | ||
41 | static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); | 40 | static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); |
42 | static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); | 41 | static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); |
43 | static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, | 42 | static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, |
@@ -1441,7 +1440,7 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, | |||
1441 | list_move_tail(&obj->mm_list, &dev_priv->mm.active_list); | 1440 | list_move_tail(&obj->mm_list, &dev_priv->mm.active_list); |
1442 | list_move_tail(&obj->ring_list, &ring->active_list); | 1441 | list_move_tail(&obj->ring_list, &ring->active_list); |
1443 | 1442 | ||
1444 | obj->last_rendering_seqno = seqno; | 1443 | obj->last_read_seqno = seqno; |
1445 | 1444 | ||
1446 | if (obj->fenced_gpu_access) { | 1445 | if (obj->fenced_gpu_access) { |
1447 | obj->last_fenced_seqno = seqno; | 1446 | obj->last_fenced_seqno = seqno; |
@@ -1458,42 +1457,30 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, | |||
1458 | } | 1457 | } |
1459 | 1458 | ||
1460 | static void | 1459 | static void |
1461 | i915_gem_object_move_off_active(struct drm_i915_gem_object *obj) | 1460 | i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) |
1462 | { | ||
1463 | list_del_init(&obj->ring_list); | ||
1464 | obj->last_rendering_seqno = 0; | ||
1465 | obj->last_fenced_seqno = 0; | ||
1466 | } | ||
1467 | |||
1468 | static void | ||
1469 | i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj) | ||
1470 | { | 1461 | { |
1471 | struct drm_device *dev = obj->base.dev; | 1462 | struct drm_device *dev = obj->base.dev; |
1472 | drm_i915_private_t *dev_priv = dev->dev_private; | 1463 | struct drm_i915_private *dev_priv = dev->dev_private; |
1473 | 1464 | ||
1465 | BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS); | ||
1474 | BUG_ON(!obj->active); | 1466 | BUG_ON(!obj->active); |
1475 | list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list); | ||
1476 | 1467 | ||
1477 | i915_gem_object_move_off_active(obj); | 1468 | if (obj->pin_count) /* are we a framebuffer? */ |
1478 | } | 1469 | intel_mark_fb_idle(obj); |
1479 | |||
1480 | static void | ||
1481 | i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) | ||
1482 | { | ||
1483 | struct drm_device *dev = obj->base.dev; | ||
1484 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1485 | 1470 | ||
1486 | list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); | 1471 | list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); |
1487 | 1472 | ||
1488 | BUG_ON(!list_empty(&obj->gpu_write_list)); | 1473 | list_del_init(&obj->ring_list); |
1489 | BUG_ON(!obj->active); | ||
1490 | obj->ring = NULL; | 1474 | obj->ring = NULL; |
1491 | 1475 | ||
1492 | i915_gem_object_move_off_active(obj); | 1476 | obj->last_read_seqno = 0; |
1477 | obj->last_write_seqno = 0; | ||
1478 | obj->base.write_domain = 0; | ||
1479 | |||
1480 | obj->last_fenced_seqno = 0; | ||
1493 | obj->fenced_gpu_access = false; | 1481 | obj->fenced_gpu_access = false; |
1494 | 1482 | ||
1495 | obj->active = 0; | 1483 | obj->active = 0; |
1496 | obj->pending_gpu_write = false; | ||
1497 | drm_gem_object_unreference(&obj->base); | 1484 | drm_gem_object_unreference(&obj->base); |
1498 | 1485 | ||
1499 | WARN_ON(i915_verify_lists(dev)); | 1486 | WARN_ON(i915_verify_lists(dev)); |
@@ -1525,30 +1512,6 @@ i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj) | |||
1525 | return obj->madv == I915_MADV_DONTNEED; | 1512 | return obj->madv == I915_MADV_DONTNEED; |
1526 | } | 1513 | } |
1527 | 1514 | ||
1528 | static void | ||
1529 | i915_gem_process_flushing_list(struct intel_ring_buffer *ring, | ||
1530 | uint32_t flush_domains) | ||
1531 | { | ||
1532 | struct drm_i915_gem_object *obj, *next; | ||
1533 | |||
1534 | list_for_each_entry_safe(obj, next, | ||
1535 | &ring->gpu_write_list, | ||
1536 | gpu_write_list) { | ||
1537 | if (obj->base.write_domain & flush_domains) { | ||
1538 | uint32_t old_write_domain = obj->base.write_domain; | ||
1539 | |||
1540 | obj->base.write_domain = 0; | ||
1541 | list_del_init(&obj->gpu_write_list); | ||
1542 | i915_gem_object_move_to_active(obj, ring, | ||
1543 | i915_gem_next_request_seqno(ring)); | ||
1544 | |||
1545 | trace_i915_gem_object_change_domain(obj, | ||
1546 | obj->base.read_domains, | ||
1547 | old_write_domain); | ||
1548 | } | ||
1549 | } | ||
1550 | } | ||
1551 | |||
1552 | static u32 | 1515 | static u32 |
1553 | i915_gem_get_seqno(struct drm_device *dev) | 1516 | i915_gem_get_seqno(struct drm_device *dev) |
1554 | { | 1517 | { |
@@ -1589,15 +1552,16 @@ i915_add_request(struct intel_ring_buffer *ring, | |||
1589 | * is that the flush _must_ happen before the next request, no matter | 1552 | * is that the flush _must_ happen before the next request, no matter |
1590 | * what. | 1553 | * what. |
1591 | */ | 1554 | */ |
1592 | if (ring->gpu_caches_dirty) { | 1555 | ret = intel_ring_flush_all_caches(ring); |
1593 | ret = i915_gem_flush_ring(ring, 0, I915_GEM_GPU_DOMAINS); | 1556 | if (ret) |
1594 | if (ret) | 1557 | return ret; |
1595 | return ret; | ||
1596 | 1558 | ||
1597 | ring->gpu_caches_dirty = false; | 1559 | if (request == NULL) { |
1560 | request = kmalloc(sizeof(*request), GFP_KERNEL); | ||
1561 | if (request == NULL) | ||
1562 | return -ENOMEM; | ||
1598 | } | 1563 | } |
1599 | 1564 | ||
1600 | BUG_ON(request == NULL); | ||
1601 | seqno = i915_gem_next_request_seqno(ring); | 1565 | seqno = i915_gem_next_request_seqno(ring); |
1602 | 1566 | ||
1603 | /* Record the position of the start of the request so that | 1567 | /* Record the position of the start of the request so that |
@@ -1608,8 +1572,10 @@ i915_add_request(struct intel_ring_buffer *ring, | |||
1608 | request_ring_position = intel_ring_get_tail(ring); | 1572 | request_ring_position = intel_ring_get_tail(ring); |
1609 | 1573 | ||
1610 | ret = ring->add_request(ring, &seqno); | 1574 | ret = ring->add_request(ring, &seqno); |
1611 | if (ret) | 1575 | if (ret) { |
1612 | return ret; | 1576 | kfree(request); |
1577 | return ret; | ||
1578 | } | ||
1613 | 1579 | ||
1614 | trace_i915_gem_request_add(ring, seqno); | 1580 | trace_i915_gem_request_add(ring, seqno); |
1615 | 1581 | ||
@@ -1619,6 +1585,7 @@ i915_add_request(struct intel_ring_buffer *ring, | |||
1619 | request->emitted_jiffies = jiffies; | 1585 | request->emitted_jiffies = jiffies; |
1620 | was_empty = list_empty(&ring->request_list); | 1586 | was_empty = list_empty(&ring->request_list); |
1621 | list_add_tail(&request->list, &ring->request_list); | 1587 | list_add_tail(&request->list, &ring->request_list); |
1588 | request->file_priv = NULL; | ||
1622 | 1589 | ||
1623 | if (file) { | 1590 | if (file) { |
1624 | struct drm_i915_file_private *file_priv = file->driver_priv; | 1591 | struct drm_i915_file_private *file_priv = file->driver_priv; |
@@ -1638,13 +1605,13 @@ i915_add_request(struct intel_ring_buffer *ring, | |||
1638 | jiffies + | 1605 | jiffies + |
1639 | msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); | 1606 | msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); |
1640 | } | 1607 | } |
1641 | if (was_empty) | 1608 | if (was_empty) { |
1642 | queue_delayed_work(dev_priv->wq, | 1609 | queue_delayed_work(dev_priv->wq, |
1643 | &dev_priv->mm.retire_work, HZ); | 1610 | &dev_priv->mm.retire_work, HZ); |
1611 | intel_mark_busy(dev_priv->dev); | ||
1612 | } | ||
1644 | } | 1613 | } |
1645 | 1614 | ||
1646 | WARN_ON(!list_empty(&ring->gpu_write_list)); | ||
1647 | |||
1648 | return 0; | 1615 | return 0; |
1649 | } | 1616 | } |
1650 | 1617 | ||
@@ -1686,8 +1653,6 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, | |||
1686 | struct drm_i915_gem_object, | 1653 | struct drm_i915_gem_object, |
1687 | ring_list); | 1654 | ring_list); |
1688 | 1655 | ||
1689 | obj->base.write_domain = 0; | ||
1690 | list_del_init(&obj->gpu_write_list); | ||
1691 | i915_gem_object_move_to_inactive(obj); | 1656 | i915_gem_object_move_to_inactive(obj); |
1692 | } | 1657 | } |
1693 | } | 1658 | } |
@@ -1723,20 +1688,6 @@ void i915_gem_reset(struct drm_device *dev) | |||
1723 | for_each_ring(ring, dev_priv, i) | 1688 | for_each_ring(ring, dev_priv, i) |
1724 | i915_gem_reset_ring_lists(dev_priv, ring); | 1689 | i915_gem_reset_ring_lists(dev_priv, ring); |
1725 | 1690 | ||
1726 | /* Remove anything from the flushing lists. The GPU cache is likely | ||
1727 | * to be lost on reset along with the data, so simply move the | ||
1728 | * lost bo to the inactive list. | ||
1729 | */ | ||
1730 | while (!list_empty(&dev_priv->mm.flushing_list)) { | ||
1731 | obj = list_first_entry(&dev_priv->mm.flushing_list, | ||
1732 | struct drm_i915_gem_object, | ||
1733 | mm_list); | ||
1734 | |||
1735 | obj->base.write_domain = 0; | ||
1736 | list_del_init(&obj->gpu_write_list); | ||
1737 | i915_gem_object_move_to_inactive(obj); | ||
1738 | } | ||
1739 | |||
1740 | /* Move everything out of the GPU domains to ensure we do any | 1691 | /* Move everything out of the GPU domains to ensure we do any |
1741 | * necessary invalidation upon reuse. | 1692 | * necessary invalidation upon reuse. |
1742 | */ | 1693 | */ |
@@ -1765,7 +1716,7 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) | |||
1765 | 1716 | ||
1766 | WARN_ON(i915_verify_lists(ring->dev)); | 1717 | WARN_ON(i915_verify_lists(ring->dev)); |
1767 | 1718 | ||
1768 | seqno = ring->get_seqno(ring); | 1719 | seqno = ring->get_seqno(ring, true); |
1769 | 1720 | ||
1770 | for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) | 1721 | for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) |
1771 | if (seqno >= ring->sync_seqno[i]) | 1722 | if (seqno >= ring->sync_seqno[i]) |
@@ -1804,13 +1755,10 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) | |||
1804 | struct drm_i915_gem_object, | 1755 | struct drm_i915_gem_object, |
1805 | ring_list); | 1756 | ring_list); |
1806 | 1757 | ||
1807 | if (!i915_seqno_passed(seqno, obj->last_rendering_seqno)) | 1758 | if (!i915_seqno_passed(seqno, obj->last_read_seqno)) |
1808 | break; | 1759 | break; |
1809 | 1760 | ||
1810 | if (obj->base.write_domain != 0) | 1761 | i915_gem_object_move_to_inactive(obj); |
1811 | i915_gem_object_move_to_flushing(obj); | ||
1812 | else | ||
1813 | i915_gem_object_move_to_inactive(obj); | ||
1814 | } | 1762 | } |
1815 | 1763 | ||
1816 | if (unlikely(ring->trace_irq_seqno && | 1764 | if (unlikely(ring->trace_irq_seqno && |
@@ -1859,20 +1807,16 @@ i915_gem_retire_work_handler(struct work_struct *work) | |||
1859 | */ | 1807 | */ |
1860 | idle = true; | 1808 | idle = true; |
1861 | for_each_ring(ring, dev_priv, i) { | 1809 | for_each_ring(ring, dev_priv, i) { |
1862 | if (ring->gpu_caches_dirty) { | 1810 | if (ring->gpu_caches_dirty) |
1863 | struct drm_i915_gem_request *request; | 1811 | i915_add_request(ring, NULL, NULL); |
1864 | |||
1865 | request = kzalloc(sizeof(*request), GFP_KERNEL); | ||
1866 | if (request == NULL || | ||
1867 | i915_add_request(ring, NULL, request)) | ||
1868 | kfree(request); | ||
1869 | } | ||
1870 | 1812 | ||
1871 | idle &= list_empty(&ring->request_list); | 1813 | idle &= list_empty(&ring->request_list); |
1872 | } | 1814 | } |
1873 | 1815 | ||
1874 | if (!dev_priv->mm.suspended && !idle) | 1816 | if (!dev_priv->mm.suspended && !idle) |
1875 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); | 1817 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); |
1818 | if (idle) | ||
1819 | intel_mark_idle(dev); | ||
1876 | 1820 | ||
1877 | mutex_unlock(&dev->struct_mutex); | 1821 | mutex_unlock(&dev->struct_mutex); |
1878 | } | 1822 | } |
@@ -1913,25 +1857,13 @@ i915_gem_check_wedge(struct drm_i915_private *dev_priv, | |||
1913 | static int | 1857 | static int |
1914 | i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno) | 1858 | i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno) |
1915 | { | 1859 | { |
1916 | int ret = 0; | 1860 | int ret; |
1917 | 1861 | ||
1918 | BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex)); | 1862 | BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex)); |
1919 | 1863 | ||
1920 | if (seqno == ring->outstanding_lazy_request) { | 1864 | ret = 0; |
1921 | struct drm_i915_gem_request *request; | 1865 | if (seqno == ring->outstanding_lazy_request) |
1922 | 1866 | ret = i915_add_request(ring, NULL, NULL); | |
1923 | request = kzalloc(sizeof(*request), GFP_KERNEL); | ||
1924 | if (request == NULL) | ||
1925 | return -ENOMEM; | ||
1926 | |||
1927 | ret = i915_add_request(ring, NULL, request); | ||
1928 | if (ret) { | ||
1929 | kfree(request); | ||
1930 | return ret; | ||
1931 | } | ||
1932 | |||
1933 | BUG_ON(seqno != request->seqno); | ||
1934 | } | ||
1935 | 1867 | ||
1936 | return ret; | 1868 | return ret; |
1937 | } | 1869 | } |
@@ -1956,7 +1888,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, | |||
1956 | bool wait_forever = true; | 1888 | bool wait_forever = true; |
1957 | int ret; | 1889 | int ret; |
1958 | 1890 | ||
1959 | if (i915_seqno_passed(ring->get_seqno(ring), seqno)) | 1891 | if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) |
1960 | return 0; | 1892 | return 0; |
1961 | 1893 | ||
1962 | trace_i915_gem_request_wait_begin(ring, seqno); | 1894 | trace_i915_gem_request_wait_begin(ring, seqno); |
@@ -1975,7 +1907,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, | |||
1975 | getrawmonotonic(&before); | 1907 | getrawmonotonic(&before); |
1976 | 1908 | ||
1977 | #define EXIT_COND \ | 1909 | #define EXIT_COND \ |
1978 | (i915_seqno_passed(ring->get_seqno(ring), seqno) || \ | 1910 | (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \ |
1979 | atomic_read(&dev_priv->mm.wedged)) | 1911 | atomic_read(&dev_priv->mm.wedged)) |
1980 | do { | 1912 | do { |
1981 | if (interruptible) | 1913 | if (interruptible) |
@@ -2046,26 +1978,37 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno) | |||
2046 | * Ensures that all rendering to the object has completed and the object is | 1978 | * Ensures that all rendering to the object has completed and the object is |
2047 | * safe to unbind from the GTT or access from the CPU. | 1979 | * safe to unbind from the GTT or access from the CPU. |
2048 | */ | 1980 | */ |
2049 | int | 1981 | static __must_check int |
2050 | i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj) | 1982 | i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, |
1983 | bool readonly) | ||
2051 | { | 1984 | { |
1985 | u32 seqno; | ||
2052 | int ret; | 1986 | int ret; |
2053 | 1987 | ||
2054 | /* This function only exists to support waiting for existing rendering, | ||
2055 | * not for emitting required flushes. | ||
2056 | */ | ||
2057 | BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0); | ||
2058 | |||
2059 | /* If there is rendering queued on the buffer being evicted, wait for | 1988 | /* If there is rendering queued on the buffer being evicted, wait for |
2060 | * it. | 1989 | * it. |
2061 | */ | 1990 | */ |
2062 | if (obj->active) { | 1991 | if (readonly) |
2063 | ret = i915_wait_seqno(obj->ring, obj->last_rendering_seqno); | 1992 | seqno = obj->last_write_seqno; |
2064 | if (ret) | 1993 | else |
2065 | return ret; | 1994 | seqno = obj->last_read_seqno; |
2066 | i915_gem_retire_requests_ring(obj->ring); | 1995 | if (seqno == 0) |
1996 | return 0; | ||
1997 | |||
1998 | ret = i915_wait_seqno(obj->ring, seqno); | ||
1999 | if (ret) | ||
2000 | return ret; | ||
2001 | |||
2002 | /* Manually manage the write flush as we may have not yet retired | ||
2003 | * the buffer. | ||
2004 | */ | ||
2005 | if (obj->last_write_seqno && | ||
2006 | i915_seqno_passed(seqno, obj->last_write_seqno)) { | ||
2007 | obj->last_write_seqno = 0; | ||
2008 | obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; | ||
2067 | } | 2009 | } |
2068 | 2010 | ||
2011 | i915_gem_retire_requests_ring(obj->ring); | ||
2069 | return 0; | 2012 | return 0; |
2070 | } | 2013 | } |
2071 | 2014 | ||
@@ -2080,14 +2023,10 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj) | |||
2080 | int ret; | 2023 | int ret; |
2081 | 2024 | ||
2082 | if (obj->active) { | 2025 | if (obj->active) { |
2083 | ret = i915_gem_object_flush_gpu_write_domain(obj); | 2026 | ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno); |
2084 | if (ret) | 2027 | if (ret) |
2085 | return ret; | 2028 | return ret; |
2086 | 2029 | ||
2087 | ret = i915_gem_check_olr(obj->ring, | ||
2088 | obj->last_rendering_seqno); | ||
2089 | if (ret) | ||
2090 | return ret; | ||
2091 | i915_gem_retire_requests_ring(obj->ring); | 2030 | i915_gem_retire_requests_ring(obj->ring); |
2092 | } | 2031 | } |
2093 | 2032 | ||
@@ -2147,7 +2086,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) | |||
2147 | goto out; | 2086 | goto out; |
2148 | 2087 | ||
2149 | if (obj->active) { | 2088 | if (obj->active) { |
2150 | seqno = obj->last_rendering_seqno; | 2089 | seqno = obj->last_read_seqno; |
2151 | ring = obj->ring; | 2090 | ring = obj->ring; |
2152 | } | 2091 | } |
2153 | 2092 | ||
@@ -2202,11 +2141,11 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj, | |||
2202 | return 0; | 2141 | return 0; |
2203 | 2142 | ||
2204 | if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev)) | 2143 | if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev)) |
2205 | return i915_gem_object_wait_rendering(obj); | 2144 | return i915_gem_object_wait_rendering(obj, false); |
2206 | 2145 | ||
2207 | idx = intel_ring_sync_index(from, to); | 2146 | idx = intel_ring_sync_index(from, to); |
2208 | 2147 | ||
2209 | seqno = obj->last_rendering_seqno; | 2148 | seqno = obj->last_read_seqno; |
2210 | if (seqno <= from->sync_seqno[idx]) | 2149 | if (seqno <= from->sync_seqno[idx]) |
2211 | return 0; | 2150 | return 0; |
2212 | 2151 | ||
@@ -2318,42 +2257,11 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) | |||
2318 | return ret; | 2257 | return ret; |
2319 | } | 2258 | } |
2320 | 2259 | ||
2321 | int | ||
2322 | i915_gem_flush_ring(struct intel_ring_buffer *ring, | ||
2323 | uint32_t invalidate_domains, | ||
2324 | uint32_t flush_domains) | ||
2325 | { | ||
2326 | int ret; | ||
2327 | |||
2328 | if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0) | ||
2329 | return 0; | ||
2330 | |||
2331 | trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains); | ||
2332 | |||
2333 | ret = ring->flush(ring, invalidate_domains, flush_domains); | ||
2334 | if (ret) | ||
2335 | return ret; | ||
2336 | |||
2337 | if (flush_domains & I915_GEM_GPU_DOMAINS) | ||
2338 | i915_gem_process_flushing_list(ring, flush_domains); | ||
2339 | |||
2340 | return 0; | ||
2341 | } | ||
2342 | |||
2343 | static int i915_ring_idle(struct intel_ring_buffer *ring) | 2260 | static int i915_ring_idle(struct intel_ring_buffer *ring) |
2344 | { | 2261 | { |
2345 | int ret; | 2262 | if (list_empty(&ring->active_list)) |
2346 | |||
2347 | if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list)) | ||
2348 | return 0; | 2263 | return 0; |
2349 | 2264 | ||
2350 | if (!list_empty(&ring->gpu_write_list)) { | ||
2351 | ret = i915_gem_flush_ring(ring, | ||
2352 | I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); | ||
2353 | if (ret) | ||
2354 | return ret; | ||
2355 | } | ||
2356 | |||
2357 | return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring)); | 2265 | return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring)); |
2358 | } | 2266 | } |
2359 | 2267 | ||
@@ -2372,10 +2280,6 @@ int i915_gpu_idle(struct drm_device *dev) | |||
2372 | ret = i915_ring_idle(ring); | 2280 | ret = i915_ring_idle(ring); |
2373 | if (ret) | 2281 | if (ret) |
2374 | return ret; | 2282 | return ret; |
2375 | |||
2376 | /* Is the device fubar? */ | ||
2377 | if (WARN_ON(!list_empty(&ring->gpu_write_list))) | ||
2378 | return -EBUSY; | ||
2379 | } | 2283 | } |
2380 | 2284 | ||
2381 | return 0; | 2285 | return 0; |
@@ -2548,21 +2452,8 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, | |||
2548 | static int | 2452 | static int |
2549 | i915_gem_object_flush_fence(struct drm_i915_gem_object *obj) | 2453 | i915_gem_object_flush_fence(struct drm_i915_gem_object *obj) |
2550 | { | 2454 | { |
2551 | int ret; | ||
2552 | |||
2553 | if (obj->fenced_gpu_access) { | ||
2554 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { | ||
2555 | ret = i915_gem_flush_ring(obj->ring, | ||
2556 | 0, obj->base.write_domain); | ||
2557 | if (ret) | ||
2558 | return ret; | ||
2559 | } | ||
2560 | |||
2561 | obj->fenced_gpu_access = false; | ||
2562 | } | ||
2563 | |||
2564 | if (obj->last_fenced_seqno) { | 2455 | if (obj->last_fenced_seqno) { |
2565 | ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno); | 2456 | int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno); |
2566 | if (ret) | 2457 | if (ret) |
2567 | return ret; | 2458 | return ret; |
2568 | 2459 | ||
@@ -2575,6 +2466,7 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj) | |||
2575 | if (obj->base.read_domains & I915_GEM_DOMAIN_GTT) | 2466 | if (obj->base.read_domains & I915_GEM_DOMAIN_GTT) |
2576 | mb(); | 2467 | mb(); |
2577 | 2468 | ||
2469 | obj->fenced_gpu_access = false; | ||
2578 | return 0; | 2470 | return 0; |
2579 | } | 2471 | } |
2580 | 2472 | ||
@@ -2694,6 +2586,76 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj) | |||
2694 | return 0; | 2586 | return 0; |
2695 | } | 2587 | } |
2696 | 2588 | ||
2589 | static bool i915_gem_valid_gtt_space(struct drm_device *dev, | ||
2590 | struct drm_mm_node *gtt_space, | ||
2591 | unsigned long cache_level) | ||
2592 | { | ||
2593 | struct drm_mm_node *other; | ||
2594 | |||
2595 | /* On non-LLC machines we have to be careful when putting differing | ||
2596 | * types of snoopable memory together to avoid the prefetcher | ||
2597 | * crossing memory domains and dieing. | ||
2598 | */ | ||
2599 | if (HAS_LLC(dev)) | ||
2600 | return true; | ||
2601 | |||
2602 | if (gtt_space == NULL) | ||
2603 | return true; | ||
2604 | |||
2605 | if (list_empty(>t_space->node_list)) | ||
2606 | return true; | ||
2607 | |||
2608 | other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list); | ||
2609 | if (other->allocated && !other->hole_follows && other->color != cache_level) | ||
2610 | return false; | ||
2611 | |||
2612 | other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list); | ||
2613 | if (other->allocated && !gtt_space->hole_follows && other->color != cache_level) | ||
2614 | return false; | ||
2615 | |||
2616 | return true; | ||
2617 | } | ||
2618 | |||
2619 | static void i915_gem_verify_gtt(struct drm_device *dev) | ||
2620 | { | ||
2621 | #if WATCH_GTT | ||
2622 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2623 | struct drm_i915_gem_object *obj; | ||
2624 | int err = 0; | ||
2625 | |||
2626 | list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { | ||
2627 | if (obj->gtt_space == NULL) { | ||
2628 | printk(KERN_ERR "object found on GTT list with no space reserved\n"); | ||
2629 | err++; | ||
2630 | continue; | ||
2631 | } | ||
2632 | |||
2633 | if (obj->cache_level != obj->gtt_space->color) { | ||
2634 | printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n", | ||
2635 | obj->gtt_space->start, | ||
2636 | obj->gtt_space->start + obj->gtt_space->size, | ||
2637 | obj->cache_level, | ||
2638 | obj->gtt_space->color); | ||
2639 | err++; | ||
2640 | continue; | ||
2641 | } | ||
2642 | |||
2643 | if (!i915_gem_valid_gtt_space(dev, | ||
2644 | obj->gtt_space, | ||
2645 | obj->cache_level)) { | ||
2646 | printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n", | ||
2647 | obj->gtt_space->start, | ||
2648 | obj->gtt_space->start + obj->gtt_space->size, | ||
2649 | obj->cache_level); | ||
2650 | err++; | ||
2651 | continue; | ||
2652 | } | ||
2653 | } | ||
2654 | |||
2655 | WARN_ON(err); | ||
2656 | #endif | ||
2657 | } | ||
2658 | |||
2697 | /** | 2659 | /** |
2698 | * Finds free space in the GTT aperture and binds the object there. | 2660 | * Finds free space in the GTT aperture and binds the object there. |
2699 | */ | 2661 | */ |
@@ -2748,36 +2710,47 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, | |||
2748 | search_free: | 2710 | search_free: |
2749 | if (map_and_fenceable) | 2711 | if (map_and_fenceable) |
2750 | free_space = | 2712 | free_space = |
2751 | drm_mm_search_free_in_range(&dev_priv->mm.gtt_space, | 2713 | drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space, |
2752 | size, alignment, | 2714 | size, alignment, obj->cache_level, |
2753 | 0, dev_priv->mm.gtt_mappable_end, | 2715 | 0, dev_priv->mm.gtt_mappable_end, |
2754 | 0); | 2716 | false); |
2755 | else | 2717 | else |
2756 | free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, | 2718 | free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space, |
2757 | size, alignment, 0); | 2719 | size, alignment, obj->cache_level, |
2720 | false); | ||
2758 | 2721 | ||
2759 | if (free_space != NULL) { | 2722 | if (free_space != NULL) { |
2760 | if (map_and_fenceable) | 2723 | if (map_and_fenceable) |
2761 | obj->gtt_space = | 2724 | obj->gtt_space = |
2762 | drm_mm_get_block_range_generic(free_space, | 2725 | drm_mm_get_block_range_generic(free_space, |
2763 | size, alignment, 0, | 2726 | size, alignment, obj->cache_level, |
2764 | 0, dev_priv->mm.gtt_mappable_end, | 2727 | 0, dev_priv->mm.gtt_mappable_end, |
2765 | 0); | 2728 | false); |
2766 | else | 2729 | else |
2767 | obj->gtt_space = | 2730 | obj->gtt_space = |
2768 | drm_mm_get_block(free_space, size, alignment); | 2731 | drm_mm_get_block_generic(free_space, |
2732 | size, alignment, obj->cache_level, | ||
2733 | false); | ||
2769 | } | 2734 | } |
2770 | if (obj->gtt_space == NULL) { | 2735 | if (obj->gtt_space == NULL) { |
2771 | /* If the gtt is empty and we're still having trouble | 2736 | /* If the gtt is empty and we're still having trouble |
2772 | * fitting our object in, we're out of memory. | 2737 | * fitting our object in, we're out of memory. |
2773 | */ | 2738 | */ |
2774 | ret = i915_gem_evict_something(dev, size, alignment, | 2739 | ret = i915_gem_evict_something(dev, size, alignment, |
2740 | obj->cache_level, | ||
2775 | map_and_fenceable); | 2741 | map_and_fenceable); |
2776 | if (ret) | 2742 | if (ret) |
2777 | return ret; | 2743 | return ret; |
2778 | 2744 | ||
2779 | goto search_free; | 2745 | goto search_free; |
2780 | } | 2746 | } |
2747 | if (WARN_ON(!i915_gem_valid_gtt_space(dev, | ||
2748 | obj->gtt_space, | ||
2749 | obj->cache_level))) { | ||
2750 | drm_mm_put_block(obj->gtt_space); | ||
2751 | obj->gtt_space = NULL; | ||
2752 | return -EINVAL; | ||
2753 | } | ||
2781 | 2754 | ||
2782 | ret = i915_gem_object_get_pages_gtt(obj, gfpmask); | 2755 | ret = i915_gem_object_get_pages_gtt(obj, gfpmask); |
2783 | if (ret) { | 2756 | if (ret) { |
@@ -2840,6 +2813,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, | |||
2840 | obj->map_and_fenceable = mappable && fenceable; | 2813 | obj->map_and_fenceable = mappable && fenceable; |
2841 | 2814 | ||
2842 | trace_i915_gem_object_bind(obj, map_and_fenceable); | 2815 | trace_i915_gem_object_bind(obj, map_and_fenceable); |
2816 | i915_gem_verify_gtt(dev); | ||
2843 | return 0; | 2817 | return 0; |
2844 | } | 2818 | } |
2845 | 2819 | ||
@@ -2869,17 +2843,6 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj) | |||
2869 | drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE); | 2843 | drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE); |
2870 | } | 2844 | } |
2871 | 2845 | ||
2872 | /** Flushes any GPU write domain for the object if it's dirty. */ | ||
2873 | static int | ||
2874 | i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj) | ||
2875 | { | ||
2876 | if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) | ||
2877 | return 0; | ||
2878 | |||
2879 | /* Queue the GPU write cache flushing we need. */ | ||
2880 | return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain); | ||
2881 | } | ||
2882 | |||
2883 | /** Flushes the GTT write domain for the object if it's dirty. */ | 2846 | /** Flushes the GTT write domain for the object if it's dirty. */ |
2884 | static void | 2847 | static void |
2885 | i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj) | 2848 | i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj) |
@@ -2946,16 +2909,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) | |||
2946 | if (obj->base.write_domain == I915_GEM_DOMAIN_GTT) | 2909 | if (obj->base.write_domain == I915_GEM_DOMAIN_GTT) |
2947 | return 0; | 2910 | return 0; |
2948 | 2911 | ||
2949 | ret = i915_gem_object_flush_gpu_write_domain(obj); | 2912 | ret = i915_gem_object_wait_rendering(obj, !write); |
2950 | if (ret) | 2913 | if (ret) |
2951 | return ret; | 2914 | return ret; |
2952 | 2915 | ||
2953 | if (obj->pending_gpu_write || write) { | ||
2954 | ret = i915_gem_object_wait_rendering(obj); | ||
2955 | if (ret) | ||
2956 | return ret; | ||
2957 | } | ||
2958 | |||
2959 | i915_gem_object_flush_cpu_write_domain(obj); | 2916 | i915_gem_object_flush_cpu_write_domain(obj); |
2960 | 2917 | ||
2961 | old_write_domain = obj->base.write_domain; | 2918 | old_write_domain = obj->base.write_domain; |
@@ -2998,6 +2955,12 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, | |||
2998 | return -EBUSY; | 2955 | return -EBUSY; |
2999 | } | 2956 | } |
3000 | 2957 | ||
2958 | if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) { | ||
2959 | ret = i915_gem_object_unbind(obj); | ||
2960 | if (ret) | ||
2961 | return ret; | ||
2962 | } | ||
2963 | |||
3001 | if (obj->gtt_space) { | 2964 | if (obj->gtt_space) { |
3002 | ret = i915_gem_object_finish_gpu(obj); | 2965 | ret = i915_gem_object_finish_gpu(obj); |
3003 | if (ret) | 2966 | if (ret) |
@@ -3009,7 +2972,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, | |||
3009 | * registers with snooped memory, so relinquish any fences | 2972 | * registers with snooped memory, so relinquish any fences |
3010 | * currently pointing to our region in the aperture. | 2973 | * currently pointing to our region in the aperture. |
3011 | */ | 2974 | */ |
3012 | if (INTEL_INFO(obj->base.dev)->gen < 6) { | 2975 | if (INTEL_INFO(dev)->gen < 6) { |
3013 | ret = i915_gem_object_put_fence(obj); | 2976 | ret = i915_gem_object_put_fence(obj); |
3014 | if (ret) | 2977 | if (ret) |
3015 | return ret; | 2978 | return ret; |
@@ -3020,6 +2983,8 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, | |||
3020 | if (obj->has_aliasing_ppgtt_mapping) | 2983 | if (obj->has_aliasing_ppgtt_mapping) |
3021 | i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, | 2984 | i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, |
3022 | obj, cache_level); | 2985 | obj, cache_level); |
2986 | |||
2987 | obj->gtt_space->color = cache_level; | ||
3023 | } | 2988 | } |
3024 | 2989 | ||
3025 | if (cache_level == I915_CACHE_NONE) { | 2990 | if (cache_level == I915_CACHE_NONE) { |
@@ -3046,9 +3011,72 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, | |||
3046 | } | 3011 | } |
3047 | 3012 | ||
3048 | obj->cache_level = cache_level; | 3013 | obj->cache_level = cache_level; |
3014 | i915_gem_verify_gtt(dev); | ||
3049 | return 0; | 3015 | return 0; |
3050 | } | 3016 | } |
3051 | 3017 | ||
3018 | int i915_gem_get_cacheing_ioctl(struct drm_device *dev, void *data, | ||
3019 | struct drm_file *file) | ||
3020 | { | ||
3021 | struct drm_i915_gem_cacheing *args = data; | ||
3022 | struct drm_i915_gem_object *obj; | ||
3023 | int ret; | ||
3024 | |||
3025 | ret = i915_mutex_lock_interruptible(dev); | ||
3026 | if (ret) | ||
3027 | return ret; | ||
3028 | |||
3029 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); | ||
3030 | if (&obj->base == NULL) { | ||
3031 | ret = -ENOENT; | ||
3032 | goto unlock; | ||
3033 | } | ||
3034 | |||
3035 | args->cacheing = obj->cache_level != I915_CACHE_NONE; | ||
3036 | |||
3037 | drm_gem_object_unreference(&obj->base); | ||
3038 | unlock: | ||
3039 | mutex_unlock(&dev->struct_mutex); | ||
3040 | return ret; | ||
3041 | } | ||
3042 | |||
3043 | int i915_gem_set_cacheing_ioctl(struct drm_device *dev, void *data, | ||
3044 | struct drm_file *file) | ||
3045 | { | ||
3046 | struct drm_i915_gem_cacheing *args = data; | ||
3047 | struct drm_i915_gem_object *obj; | ||
3048 | enum i915_cache_level level; | ||
3049 | int ret; | ||
3050 | |||
3051 | ret = i915_mutex_lock_interruptible(dev); | ||
3052 | if (ret) | ||
3053 | return ret; | ||
3054 | |||
3055 | switch (args->cacheing) { | ||
3056 | case I915_CACHEING_NONE: | ||
3057 | level = I915_CACHE_NONE; | ||
3058 | break; | ||
3059 | case I915_CACHEING_CACHED: | ||
3060 | level = I915_CACHE_LLC; | ||
3061 | break; | ||
3062 | default: | ||
3063 | return -EINVAL; | ||
3064 | } | ||
3065 | |||
3066 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); | ||
3067 | if (&obj->base == NULL) { | ||
3068 | ret = -ENOENT; | ||
3069 | goto unlock; | ||
3070 | } | ||
3071 | |||
3072 | ret = i915_gem_object_set_cache_level(obj, level); | ||
3073 | |||
3074 | drm_gem_object_unreference(&obj->base); | ||
3075 | unlock: | ||
3076 | mutex_unlock(&dev->struct_mutex); | ||
3077 | return ret; | ||
3078 | } | ||
3079 | |||
3052 | /* | 3080 | /* |
3053 | * Prepare buffer for display plane (scanout, cursors, etc). | 3081 | * Prepare buffer for display plane (scanout, cursors, etc). |
3054 | * Can be called from an uninterruptible phase (modesetting) and allows | 3082 | * Can be called from an uninterruptible phase (modesetting) and allows |
@@ -3062,10 +3090,6 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, | |||
3062 | u32 old_read_domains, old_write_domain; | 3090 | u32 old_read_domains, old_write_domain; |
3063 | int ret; | 3091 | int ret; |
3064 | 3092 | ||
3065 | ret = i915_gem_object_flush_gpu_write_domain(obj); | ||
3066 | if (ret) | ||
3067 | return ret; | ||
3068 | |||
3069 | if (pipelined != obj->ring) { | 3093 | if (pipelined != obj->ring) { |
3070 | ret = i915_gem_object_sync(obj, pipelined); | 3094 | ret = i915_gem_object_sync(obj, pipelined); |
3071 | if (ret) | 3095 | if (ret) |
@@ -3101,7 +3125,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, | |||
3101 | /* It should now be out of any other write domains, and we can update | 3125 | /* It should now be out of any other write domains, and we can update |
3102 | * the domain values for our changes. | 3126 | * the domain values for our changes. |
3103 | */ | 3127 | */ |
3104 | BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0); | 3128 | obj->base.write_domain = 0; |
3105 | obj->base.read_domains |= I915_GEM_DOMAIN_GTT; | 3129 | obj->base.read_domains |= I915_GEM_DOMAIN_GTT; |
3106 | 3130 | ||
3107 | trace_i915_gem_object_change_domain(obj, | 3131 | trace_i915_gem_object_change_domain(obj, |
@@ -3119,13 +3143,7 @@ i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj) | |||
3119 | if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) | 3143 | if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) |
3120 | return 0; | 3144 | return 0; |
3121 | 3145 | ||
3122 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { | 3146 | ret = i915_gem_object_wait_rendering(obj, false); |
3123 | ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain); | ||
3124 | if (ret) | ||
3125 | return ret; | ||
3126 | } | ||
3127 | |||
3128 | ret = i915_gem_object_wait_rendering(obj); | ||
3129 | if (ret) | 3147 | if (ret) |
3130 | return ret; | 3148 | return ret; |
3131 | 3149 | ||
@@ -3149,16 +3167,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) | |||
3149 | if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) | 3167 | if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) |
3150 | return 0; | 3168 | return 0; |
3151 | 3169 | ||
3152 | ret = i915_gem_object_flush_gpu_write_domain(obj); | 3170 | ret = i915_gem_object_wait_rendering(obj, !write); |
3153 | if (ret) | 3171 | if (ret) |
3154 | return ret; | 3172 | return ret; |
3155 | 3173 | ||
3156 | if (write || obj->pending_gpu_write) { | ||
3157 | ret = i915_gem_object_wait_rendering(obj); | ||
3158 | if (ret) | ||
3159 | return ret; | ||
3160 | } | ||
3161 | |||
3162 | i915_gem_object_flush_gtt_write_domain(obj); | 3174 | i915_gem_object_flush_gtt_write_domain(obj); |
3163 | 3175 | ||
3164 | old_write_domain = obj->base.write_domain; | 3176 | old_write_domain = obj->base.write_domain; |
@@ -3400,6 +3412,10 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
3400 | ret = i915_gem_object_flush_active(obj); | 3412 | ret = i915_gem_object_flush_active(obj); |
3401 | 3413 | ||
3402 | args->busy = obj->active; | 3414 | args->busy = obj->active; |
3415 | if (obj->ring) { | ||
3416 | BUILD_BUG_ON(I915_NUM_RINGS > 16); | ||
3417 | args->busy |= intel_ring_flag(obj->ring) << 16; | ||
3418 | } | ||
3403 | 3419 | ||
3404 | drm_gem_object_unreference(&obj->base); | 3420 | drm_gem_object_unreference(&obj->base); |
3405 | unlock: | 3421 | unlock: |
@@ -3517,7 +3533,6 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, | |||
3517 | INIT_LIST_HEAD(&obj->gtt_list); | 3533 | INIT_LIST_HEAD(&obj->gtt_list); |
3518 | INIT_LIST_HEAD(&obj->ring_list); | 3534 | INIT_LIST_HEAD(&obj->ring_list); |
3519 | INIT_LIST_HEAD(&obj->exec_list); | 3535 | INIT_LIST_HEAD(&obj->exec_list); |
3520 | INIT_LIST_HEAD(&obj->gpu_write_list); | ||
3521 | obj->madv = I915_MADV_WILLNEED; | 3536 | obj->madv = I915_MADV_WILLNEED; |
3522 | /* Avoid an unnecessary call to unbind on the first bind. */ | 3537 | /* Avoid an unnecessary call to unbind on the first bind. */ |
3523 | obj->map_and_fenceable = true; | 3538 | obj->map_and_fenceable = true; |
@@ -3891,7 +3906,6 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, | |||
3891 | } | 3906 | } |
3892 | 3907 | ||
3893 | BUG_ON(!list_empty(&dev_priv->mm.active_list)); | 3908 | BUG_ON(!list_empty(&dev_priv->mm.active_list)); |
3894 | BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); | ||
3895 | BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); | 3909 | BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); |
3896 | mutex_unlock(&dev->struct_mutex); | 3910 | mutex_unlock(&dev->struct_mutex); |
3897 | 3911 | ||
@@ -3939,7 +3953,6 @@ init_ring_lists(struct intel_ring_buffer *ring) | |||
3939 | { | 3953 | { |
3940 | INIT_LIST_HEAD(&ring->active_list); | 3954 | INIT_LIST_HEAD(&ring->active_list); |
3941 | INIT_LIST_HEAD(&ring->request_list); | 3955 | INIT_LIST_HEAD(&ring->request_list); |
3942 | INIT_LIST_HEAD(&ring->gpu_write_list); | ||
3943 | } | 3956 | } |
3944 | 3957 | ||
3945 | void | 3958 | void |
@@ -3949,7 +3962,6 @@ i915_gem_load(struct drm_device *dev) | |||
3949 | drm_i915_private_t *dev_priv = dev->dev_private; | 3962 | drm_i915_private_t *dev_priv = dev->dev_private; |
3950 | 3963 | ||
3951 | INIT_LIST_HEAD(&dev_priv->mm.active_list); | 3964 | INIT_LIST_HEAD(&dev_priv->mm.active_list); |
3952 | INIT_LIST_HEAD(&dev_priv->mm.flushing_list); | ||
3953 | INIT_LIST_HEAD(&dev_priv->mm.inactive_list); | 3965 | INIT_LIST_HEAD(&dev_priv->mm.inactive_list); |
3954 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); | 3966 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); |
3955 | INIT_LIST_HEAD(&dev_priv->mm.gtt_list); | 3967 | INIT_LIST_HEAD(&dev_priv->mm.gtt_list); |
@@ -4200,12 +4212,7 @@ static int | |||
4200 | i915_gpu_is_active(struct drm_device *dev) | 4212 | i915_gpu_is_active(struct drm_device *dev) |
4201 | { | 4213 | { |
4202 | drm_i915_private_t *dev_priv = dev->dev_private; | 4214 | drm_i915_private_t *dev_priv = dev->dev_private; |
4203 | int lists_empty; | 4215 | return !list_empty(&dev_priv->mm.active_list); |
4204 | |||
4205 | lists_empty = list_empty(&dev_priv->mm.flushing_list) && | ||
4206 | list_empty(&dev_priv->mm.active_list); | ||
4207 | |||
4208 | return !lists_empty; | ||
4209 | } | 4216 | } |
4210 | 4217 | ||
4211 | static int | 4218 | static int |
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index a9d58d72bb4d..5c2d354cebbd 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c | |||
@@ -97,8 +97,7 @@ | |||
97 | 97 | ||
98 | static struct i915_hw_context * | 98 | static struct i915_hw_context * |
99 | i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id); | 99 | i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id); |
100 | static int do_switch(struct drm_i915_gem_object *from_obj, | 100 | static int do_switch(struct i915_hw_context *to); |
101 | struct i915_hw_context *to, u32 seqno); | ||
102 | 101 | ||
103 | static int get_context_size(struct drm_device *dev) | 102 | static int get_context_size(struct drm_device *dev) |
104 | { | 103 | { |
@@ -113,7 +112,10 @@ static int get_context_size(struct drm_device *dev) | |||
113 | break; | 112 | break; |
114 | case 7: | 113 | case 7: |
115 | reg = I915_READ(GEN7_CXT_SIZE); | 114 | reg = I915_READ(GEN7_CXT_SIZE); |
116 | ret = GEN7_CXT_TOTAL_SIZE(reg) * 64; | 115 | if (IS_HASWELL(dev)) |
116 | ret = HSW_CXT_TOTAL_SIZE(reg) * 64; | ||
117 | else | ||
118 | ret = GEN7_CXT_TOTAL_SIZE(reg) * 64; | ||
117 | break; | 119 | break; |
118 | default: | 120 | default: |
119 | BUG(); | 121 | BUG(); |
@@ -220,19 +222,20 @@ static int create_default_context(struct drm_i915_private *dev_priv) | |||
220 | */ | 222 | */ |
221 | dev_priv->ring[RCS].default_context = ctx; | 223 | dev_priv->ring[RCS].default_context = ctx; |
222 | ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false); | 224 | ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false); |
223 | if (ret) { | 225 | if (ret) |
224 | do_destroy(ctx); | 226 | goto err_destroy; |
225 | return ret; | ||
226 | } | ||
227 | 227 | ||
228 | ret = do_switch(NULL, ctx, 0); | 228 | ret = do_switch(ctx); |
229 | if (ret) { | 229 | if (ret) |
230 | i915_gem_object_unpin(ctx->obj); | 230 | goto err_unpin; |
231 | do_destroy(ctx); | ||
232 | } else { | ||
233 | DRM_DEBUG_DRIVER("Default HW context loaded\n"); | ||
234 | } | ||
235 | 231 | ||
232 | DRM_DEBUG_DRIVER("Default HW context loaded\n"); | ||
233 | return 0; | ||
234 | |||
235 | err_unpin: | ||
236 | i915_gem_object_unpin(ctx->obj); | ||
237 | err_destroy: | ||
238 | do_destroy(ctx); | ||
236 | return ret; | 239 | return ret; |
237 | } | 240 | } |
238 | 241 | ||
@@ -359,17 +362,18 @@ mi_set_context(struct intel_ring_buffer *ring, | |||
359 | return ret; | 362 | return ret; |
360 | } | 363 | } |
361 | 364 | ||
362 | static int do_switch(struct drm_i915_gem_object *from_obj, | 365 | static int do_switch(struct i915_hw_context *to) |
363 | struct i915_hw_context *to, | ||
364 | u32 seqno) | ||
365 | { | 366 | { |
366 | struct intel_ring_buffer *ring = NULL; | 367 | struct intel_ring_buffer *ring = to->ring; |
368 | struct drm_i915_gem_object *from_obj = ring->last_context_obj; | ||
367 | u32 hw_flags = 0; | 369 | u32 hw_flags = 0; |
368 | int ret; | 370 | int ret; |
369 | 371 | ||
370 | BUG_ON(to == NULL); | ||
371 | BUG_ON(from_obj != NULL && from_obj->pin_count == 0); | 372 | BUG_ON(from_obj != NULL && from_obj->pin_count == 0); |
372 | 373 | ||
374 | if (from_obj == to->obj) | ||
375 | return 0; | ||
376 | |||
373 | ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false); | 377 | ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false); |
374 | if (ret) | 378 | if (ret) |
375 | return ret; | 379 | return ret; |
@@ -393,7 +397,6 @@ static int do_switch(struct drm_i915_gem_object *from_obj, | |||
393 | else if (WARN_ON_ONCE(from_obj == to->obj)) /* not yet expected */ | 397 | else if (WARN_ON_ONCE(from_obj == to->obj)) /* not yet expected */ |
394 | hw_flags |= MI_FORCE_RESTORE; | 398 | hw_flags |= MI_FORCE_RESTORE; |
395 | 399 | ||
396 | ring = to->ring; | ||
397 | ret = mi_set_context(ring, to, hw_flags); | 400 | ret = mi_set_context(ring, to, hw_flags); |
398 | if (ret) { | 401 | if (ret) { |
399 | i915_gem_object_unpin(to->obj); | 402 | i915_gem_object_unpin(to->obj); |
@@ -407,6 +410,7 @@ static int do_switch(struct drm_i915_gem_object *from_obj, | |||
407 | * MI_SET_CONTEXT instead of when the next seqno has completed. | 410 | * MI_SET_CONTEXT instead of when the next seqno has completed. |
408 | */ | 411 | */ |
409 | if (from_obj != NULL) { | 412 | if (from_obj != NULL) { |
413 | u32 seqno = i915_gem_next_request_seqno(ring); | ||
410 | from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; | 414 | from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; |
411 | i915_gem_object_move_to_active(from_obj, ring, seqno); | 415 | i915_gem_object_move_to_active(from_obj, ring, seqno); |
412 | /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the | 416 | /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the |
@@ -417,7 +421,7 @@ static int do_switch(struct drm_i915_gem_object *from_obj, | |||
417 | * swapped, but there is no way to do that yet. | 421 | * swapped, but there is no way to do that yet. |
418 | */ | 422 | */ |
419 | from_obj->dirty = 1; | 423 | from_obj->dirty = 1; |
420 | BUG_ON(from_obj->ring != to->ring); | 424 | BUG_ON(from_obj->ring != ring); |
421 | i915_gem_object_unpin(from_obj); | 425 | i915_gem_object_unpin(from_obj); |
422 | 426 | ||
423 | drm_gem_object_unreference(&from_obj->base); | 427 | drm_gem_object_unreference(&from_obj->base); |
@@ -448,9 +452,7 @@ int i915_switch_context(struct intel_ring_buffer *ring, | |||
448 | int to_id) | 452 | int to_id) |
449 | { | 453 | { |
450 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | 454 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
451 | struct drm_i915_file_private *file_priv = NULL; | ||
452 | struct i915_hw_context *to; | 455 | struct i915_hw_context *to; |
453 | struct drm_i915_gem_object *from_obj = ring->last_context_obj; | ||
454 | 456 | ||
455 | if (dev_priv->hw_contexts_disabled) | 457 | if (dev_priv->hw_contexts_disabled) |
456 | return 0; | 458 | return 0; |
@@ -458,21 +460,18 @@ int i915_switch_context(struct intel_ring_buffer *ring, | |||
458 | if (ring != &dev_priv->ring[RCS]) | 460 | if (ring != &dev_priv->ring[RCS]) |
459 | return 0; | 461 | return 0; |
460 | 462 | ||
461 | if (file) | ||
462 | file_priv = file->driver_priv; | ||
463 | |||
464 | if (to_id == DEFAULT_CONTEXT_ID) { | 463 | if (to_id == DEFAULT_CONTEXT_ID) { |
465 | to = ring->default_context; | 464 | to = ring->default_context; |
466 | } else { | 465 | } else { |
467 | to = i915_gem_context_get(file_priv, to_id); | 466 | if (file == NULL) |
467 | return -EINVAL; | ||
468 | |||
469 | to = i915_gem_context_get(file->driver_priv, to_id); | ||
468 | if (to == NULL) | 470 | if (to == NULL) |
469 | return -ENOENT; | 471 | return -ENOENT; |
470 | } | 472 | } |
471 | 473 | ||
472 | if (from_obj == to->obj) | 474 | return do_switch(to); |
473 | return 0; | ||
474 | |||
475 | return do_switch(from_obj, to, i915_gem_next_request_seqno(to->ring)); | ||
476 | } | 475 | } |
477 | 476 | ||
478 | int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, | 477 | int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, |
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index eba0308f10e3..7279c31d4a9a 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c | |||
@@ -44,7 +44,8 @@ mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind) | |||
44 | 44 | ||
45 | int | 45 | int |
46 | i915_gem_evict_something(struct drm_device *dev, int min_size, | 46 | i915_gem_evict_something(struct drm_device *dev, int min_size, |
47 | unsigned alignment, bool mappable) | 47 | unsigned alignment, unsigned cache_level, |
48 | bool mappable) | ||
48 | { | 49 | { |
49 | drm_i915_private_t *dev_priv = dev->dev_private; | 50 | drm_i915_private_t *dev_priv = dev->dev_private; |
50 | struct list_head eviction_list, unwind_list; | 51 | struct list_head eviction_list, unwind_list; |
@@ -79,11 +80,11 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, | |||
79 | INIT_LIST_HEAD(&unwind_list); | 80 | INIT_LIST_HEAD(&unwind_list); |
80 | if (mappable) | 81 | if (mappable) |
81 | drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, | 82 | drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, |
82 | min_size, alignment, 0, | 83 | min_size, alignment, cache_level, |
83 | 0, dev_priv->mm.gtt_mappable_end); | 84 | 0, dev_priv->mm.gtt_mappable_end); |
84 | else | 85 | else |
85 | drm_mm_init_scan(&dev_priv->mm.gtt_space, | 86 | drm_mm_init_scan(&dev_priv->mm.gtt_space, |
86 | min_size, alignment, 0); | 87 | min_size, alignment, cache_level); |
87 | 88 | ||
88 | /* First see if there is a large enough contiguous idle region... */ | 89 | /* First see if there is a large enough contiguous idle region... */ |
89 | list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) { | 90 | list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) { |
@@ -93,23 +94,6 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, | |||
93 | 94 | ||
94 | /* Now merge in the soon-to-be-expired objects... */ | 95 | /* Now merge in the soon-to-be-expired objects... */ |
95 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { | 96 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { |
96 | /* Does the object require an outstanding flush? */ | ||
97 | if (obj->base.write_domain) | ||
98 | continue; | ||
99 | |||
100 | if (mark_free(obj, &unwind_list)) | ||
101 | goto found; | ||
102 | } | ||
103 | |||
104 | /* Finally add anything with a pending flush (in order of retirement) */ | ||
105 | list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) { | ||
106 | if (mark_free(obj, &unwind_list)) | ||
107 | goto found; | ||
108 | } | ||
109 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { | ||
110 | if (!obj->base.write_domain) | ||
111 | continue; | ||
112 | |||
113 | if (mark_free(obj, &unwind_list)) | 97 | if (mark_free(obj, &unwind_list)) |
114 | goto found; | 98 | goto found; |
115 | } | 99 | } |
@@ -172,7 +156,6 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only) | |||
172 | int ret; | 156 | int ret; |
173 | 157 | ||
174 | lists_empty = (list_empty(&dev_priv->mm.inactive_list) && | 158 | lists_empty = (list_empty(&dev_priv->mm.inactive_list) && |
175 | list_empty(&dev_priv->mm.flushing_list) && | ||
176 | list_empty(&dev_priv->mm.active_list)); | 159 | list_empty(&dev_priv->mm.active_list)); |
177 | if (lists_empty) | 160 | if (lists_empty) |
178 | return -ENOSPC; | 161 | return -ENOSPC; |
@@ -189,8 +172,6 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only) | |||
189 | 172 | ||
190 | i915_gem_retire_requests(dev); | 173 | i915_gem_retire_requests(dev); |
191 | 174 | ||
192 | BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); | ||
193 | |||
194 | /* Having flushed everything, unbind() should never raise an error */ | 175 | /* Having flushed everything, unbind() should never raise an error */ |
195 | list_for_each_entry_safe(obj, next, | 176 | list_for_each_entry_safe(obj, next, |
196 | &dev_priv->mm.inactive_list, mm_list) { | 177 | &dev_priv->mm.inactive_list, mm_list) { |
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index ff2819ea0813..afb312ee050c 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -34,180 +34,6 @@ | |||
34 | #include "intel_drv.h" | 34 | #include "intel_drv.h" |
35 | #include <linux/dma_remapping.h> | 35 | #include <linux/dma_remapping.h> |
36 | 36 | ||
37 | struct change_domains { | ||
38 | uint32_t invalidate_domains; | ||
39 | uint32_t flush_domains; | ||
40 | uint32_t flush_rings; | ||
41 | uint32_t flips; | ||
42 | }; | ||
43 | |||
44 | /* | ||
45 | * Set the next domain for the specified object. This | ||
46 | * may not actually perform the necessary flushing/invaliding though, | ||
47 | * as that may want to be batched with other set_domain operations | ||
48 | * | ||
49 | * This is (we hope) the only really tricky part of gem. The goal | ||
50 | * is fairly simple -- track which caches hold bits of the object | ||
51 | * and make sure they remain coherent. A few concrete examples may | ||
52 | * help to explain how it works. For shorthand, we use the notation | ||
53 | * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the | ||
54 | * a pair of read and write domain masks. | ||
55 | * | ||
56 | * Case 1: the batch buffer | ||
57 | * | ||
58 | * 1. Allocated | ||
59 | * 2. Written by CPU | ||
60 | * 3. Mapped to GTT | ||
61 | * 4. Read by GPU | ||
62 | * 5. Unmapped from GTT | ||
63 | * 6. Freed | ||
64 | * | ||
65 | * Let's take these a step at a time | ||
66 | * | ||
67 | * 1. Allocated | ||
68 | * Pages allocated from the kernel may still have | ||
69 | * cache contents, so we set them to (CPU, CPU) always. | ||
70 | * 2. Written by CPU (using pwrite) | ||
71 | * The pwrite function calls set_domain (CPU, CPU) and | ||
72 | * this function does nothing (as nothing changes) | ||
73 | * 3. Mapped by GTT | ||
74 | * This function asserts that the object is not | ||
75 | * currently in any GPU-based read or write domains | ||
76 | * 4. Read by GPU | ||
77 | * i915_gem_execbuffer calls set_domain (COMMAND, 0). | ||
78 | * As write_domain is zero, this function adds in the | ||
79 | * current read domains (CPU+COMMAND, 0). | ||
80 | * flush_domains is set to CPU. | ||
81 | * invalidate_domains is set to COMMAND | ||
82 | * clflush is run to get data out of the CPU caches | ||
83 | * then i915_dev_set_domain calls i915_gem_flush to | ||
84 | * emit an MI_FLUSH and drm_agp_chipset_flush | ||
85 | * 5. Unmapped from GTT | ||
86 | * i915_gem_object_unbind calls set_domain (CPU, CPU) | ||
87 | * flush_domains and invalidate_domains end up both zero | ||
88 | * so no flushing/invalidating happens | ||
89 | * 6. Freed | ||
90 | * yay, done | ||
91 | * | ||
92 | * Case 2: The shared render buffer | ||
93 | * | ||
94 | * 1. Allocated | ||
95 | * 2. Mapped to GTT | ||
96 | * 3. Read/written by GPU | ||
97 | * 4. set_domain to (CPU,CPU) | ||
98 | * 5. Read/written by CPU | ||
99 | * 6. Read/written by GPU | ||
100 | * | ||
101 | * 1. Allocated | ||
102 | * Same as last example, (CPU, CPU) | ||
103 | * 2. Mapped to GTT | ||
104 | * Nothing changes (assertions find that it is not in the GPU) | ||
105 | * 3. Read/written by GPU | ||
106 | * execbuffer calls set_domain (RENDER, RENDER) | ||
107 | * flush_domains gets CPU | ||
108 | * invalidate_domains gets GPU | ||
109 | * clflush (obj) | ||
110 | * MI_FLUSH and drm_agp_chipset_flush | ||
111 | * 4. set_domain (CPU, CPU) | ||
112 | * flush_domains gets GPU | ||
113 | * invalidate_domains gets CPU | ||
114 | * wait_rendering (obj) to make sure all drawing is complete. | ||
115 | * This will include an MI_FLUSH to get the data from GPU | ||
116 | * to memory | ||
117 | * clflush (obj) to invalidate the CPU cache | ||
118 | * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?) | ||
119 | * 5. Read/written by CPU | ||
120 | * cache lines are loaded and dirtied | ||
121 | * 6. Read written by GPU | ||
122 | * Same as last GPU access | ||
123 | * | ||
124 | * Case 3: The constant buffer | ||
125 | * | ||
126 | * 1. Allocated | ||
127 | * 2. Written by CPU | ||
128 | * 3. Read by GPU | ||
129 | * 4. Updated (written) by CPU again | ||
130 | * 5. Read by GPU | ||
131 | * | ||
132 | * 1. Allocated | ||
133 | * (CPU, CPU) | ||
134 | * 2. Written by CPU | ||
135 | * (CPU, CPU) | ||
136 | * 3. Read by GPU | ||
137 | * (CPU+RENDER, 0) | ||
138 | * flush_domains = CPU | ||
139 | * invalidate_domains = RENDER | ||
140 | * clflush (obj) | ||
141 | * MI_FLUSH | ||
142 | * drm_agp_chipset_flush | ||
143 | * 4. Updated (written) by CPU again | ||
144 | * (CPU, CPU) | ||
145 | * flush_domains = 0 (no previous write domain) | ||
146 | * invalidate_domains = 0 (no new read domains) | ||
147 | * 5. Read by GPU | ||
148 | * (CPU+RENDER, 0) | ||
149 | * flush_domains = CPU | ||
150 | * invalidate_domains = RENDER | ||
151 | * clflush (obj) | ||
152 | * MI_FLUSH | ||
153 | * drm_agp_chipset_flush | ||
154 | */ | ||
155 | static void | ||
156 | i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj, | ||
157 | struct intel_ring_buffer *ring, | ||
158 | struct change_domains *cd) | ||
159 | { | ||
160 | uint32_t invalidate_domains = 0, flush_domains = 0; | ||
161 | |||
162 | /* | ||
163 | * If the object isn't moving to a new write domain, | ||
164 | * let the object stay in multiple read domains | ||
165 | */ | ||
166 | if (obj->base.pending_write_domain == 0) | ||
167 | obj->base.pending_read_domains |= obj->base.read_domains; | ||
168 | |||
169 | /* | ||
170 | * Flush the current write domain if | ||
171 | * the new read domains don't match. Invalidate | ||
172 | * any read domains which differ from the old | ||
173 | * write domain | ||
174 | */ | ||
175 | if (obj->base.write_domain && | ||
176 | (((obj->base.write_domain != obj->base.pending_read_domains || | ||
177 | obj->ring != ring)) || | ||
178 | (obj->fenced_gpu_access && !obj->pending_fenced_gpu_access))) { | ||
179 | flush_domains |= obj->base.write_domain; | ||
180 | invalidate_domains |= | ||
181 | obj->base.pending_read_domains & ~obj->base.write_domain; | ||
182 | } | ||
183 | /* | ||
184 | * Invalidate any read caches which may have | ||
185 | * stale data. That is, any new read domains. | ||
186 | */ | ||
187 | invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains; | ||
188 | if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) | ||
189 | i915_gem_clflush_object(obj); | ||
190 | |||
191 | if (obj->base.pending_write_domain) | ||
192 | cd->flips |= atomic_read(&obj->pending_flip); | ||
193 | |||
194 | /* The actual obj->write_domain will be updated with | ||
195 | * pending_write_domain after we emit the accumulated flush for all | ||
196 | * of our domain changes in execbuffers (which clears objects' | ||
197 | * write_domains). So if we have a current write domain that we | ||
198 | * aren't changing, set pending_write_domain to that. | ||
199 | */ | ||
200 | if (flush_domains == 0 && obj->base.pending_write_domain == 0) | ||
201 | obj->base.pending_write_domain = obj->base.write_domain; | ||
202 | |||
203 | cd->invalidate_domains |= invalidate_domains; | ||
204 | cd->flush_domains |= flush_domains; | ||
205 | if (flush_domains & I915_GEM_GPU_DOMAINS) | ||
206 | cd->flush_rings |= intel_ring_flag(obj->ring); | ||
207 | if (invalidate_domains & I915_GEM_GPU_DOMAINS) | ||
208 | cd->flush_rings |= intel_ring_flag(ring); | ||
209 | } | ||
210 | |||
211 | struct eb_objects { | 37 | struct eb_objects { |
212 | int and; | 38 | int and; |
213 | struct hlist_head buckets[0]; | 39 | struct hlist_head buckets[0]; |
@@ -587,6 +413,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, | |||
587 | 413 | ||
588 | obj->base.pending_read_domains = 0; | 414 | obj->base.pending_read_domains = 0; |
589 | obj->base.pending_write_domain = 0; | 415 | obj->base.pending_write_domain = 0; |
416 | obj->pending_fenced_gpu_access = false; | ||
590 | } | 417 | } |
591 | list_splice(&ordered_objects, objects); | 418 | list_splice(&ordered_objects, objects); |
592 | 419 | ||
@@ -810,18 +637,6 @@ err: | |||
810 | return ret; | 637 | return ret; |
811 | } | 638 | } |
812 | 639 | ||
813 | static void | ||
814 | i915_gem_execbuffer_flush(struct drm_device *dev, | ||
815 | uint32_t invalidate_domains, | ||
816 | uint32_t flush_domains) | ||
817 | { | ||
818 | if (flush_domains & I915_GEM_DOMAIN_CPU) | ||
819 | intel_gtt_chipset_flush(); | ||
820 | |||
821 | if (flush_domains & I915_GEM_DOMAIN_GTT) | ||
822 | wmb(); | ||
823 | } | ||
824 | |||
825 | static int | 640 | static int |
826 | i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips) | 641 | i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips) |
827 | { | 642 | { |
@@ -854,48 +669,45 @@ i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips) | |||
854 | return 0; | 669 | return 0; |
855 | } | 670 | } |
856 | 671 | ||
857 | |||
858 | static int | 672 | static int |
859 | i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, | 673 | i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, |
860 | struct list_head *objects) | 674 | struct list_head *objects) |
861 | { | 675 | { |
862 | struct drm_i915_gem_object *obj; | 676 | struct drm_i915_gem_object *obj; |
863 | struct change_domains cd; | 677 | uint32_t flush_domains = 0; |
678 | uint32_t flips = 0; | ||
864 | int ret; | 679 | int ret; |
865 | 680 | ||
866 | memset(&cd, 0, sizeof(cd)); | 681 | list_for_each_entry(obj, objects, exec_list) { |
867 | list_for_each_entry(obj, objects, exec_list) | 682 | ret = i915_gem_object_sync(obj, ring); |
868 | i915_gem_object_set_to_gpu_domain(obj, ring, &cd); | ||
869 | |||
870 | if (cd.invalidate_domains | cd.flush_domains) { | ||
871 | i915_gem_execbuffer_flush(ring->dev, | ||
872 | cd.invalidate_domains, | ||
873 | cd.flush_domains); | ||
874 | } | ||
875 | |||
876 | if (cd.flips) { | ||
877 | ret = i915_gem_execbuffer_wait_for_flips(ring, cd.flips); | ||
878 | if (ret) | 683 | if (ret) |
879 | return ret; | 684 | return ret; |
685 | |||
686 | if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) | ||
687 | i915_gem_clflush_object(obj); | ||
688 | |||
689 | if (obj->base.pending_write_domain) | ||
690 | flips |= atomic_read(&obj->pending_flip); | ||
691 | |||
692 | flush_domains |= obj->base.write_domain; | ||
880 | } | 693 | } |
881 | 694 | ||
882 | list_for_each_entry(obj, objects, exec_list) { | 695 | if (flips) { |
883 | ret = i915_gem_object_sync(obj, ring); | 696 | ret = i915_gem_execbuffer_wait_for_flips(ring, flips); |
884 | if (ret) | 697 | if (ret) |
885 | return ret; | 698 | return ret; |
886 | } | 699 | } |
887 | 700 | ||
701 | if (flush_domains & I915_GEM_DOMAIN_CPU) | ||
702 | intel_gtt_chipset_flush(); | ||
703 | |||
704 | if (flush_domains & I915_GEM_DOMAIN_GTT) | ||
705 | wmb(); | ||
706 | |||
888 | /* Unconditionally invalidate gpu caches and ensure that we do flush | 707 | /* Unconditionally invalidate gpu caches and ensure that we do flush |
889 | * any residual writes from the previous batch. | 708 | * any residual writes from the previous batch. |
890 | */ | 709 | */ |
891 | ret = i915_gem_flush_ring(ring, | 710 | return intel_ring_invalidate_all_caches(ring); |
892 | I915_GEM_GPU_DOMAINS, | ||
893 | ring->gpu_caches_dirty ? I915_GEM_GPU_DOMAINS : 0); | ||
894 | if (ret) | ||
895 | return ret; | ||
896 | |||
897 | ring->gpu_caches_dirty = false; | ||
898 | return 0; | ||
899 | } | 711 | } |
900 | 712 | ||
901 | static bool | 713 | static bool |
@@ -943,9 +755,8 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects, | |||
943 | struct drm_i915_gem_object *obj; | 755 | struct drm_i915_gem_object *obj; |
944 | 756 | ||
945 | list_for_each_entry(obj, objects, exec_list) { | 757 | list_for_each_entry(obj, objects, exec_list) { |
946 | u32 old_read = obj->base.read_domains; | 758 | u32 old_read = obj->base.read_domains; |
947 | u32 old_write = obj->base.write_domain; | 759 | u32 old_write = obj->base.write_domain; |
948 | |||
949 | 760 | ||
950 | obj->base.read_domains = obj->base.pending_read_domains; | 761 | obj->base.read_domains = obj->base.pending_read_domains; |
951 | obj->base.write_domain = obj->base.pending_write_domain; | 762 | obj->base.write_domain = obj->base.pending_write_domain; |
@@ -954,17 +765,13 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects, | |||
954 | i915_gem_object_move_to_active(obj, ring, seqno); | 765 | i915_gem_object_move_to_active(obj, ring, seqno); |
955 | if (obj->base.write_domain) { | 766 | if (obj->base.write_domain) { |
956 | obj->dirty = 1; | 767 | obj->dirty = 1; |
957 | obj->pending_gpu_write = true; | 768 | obj->last_write_seqno = seqno; |
958 | list_move_tail(&obj->gpu_write_list, | ||
959 | &ring->gpu_write_list); | ||
960 | if (obj->pin_count) /* check for potential scanout */ | 769 | if (obj->pin_count) /* check for potential scanout */ |
961 | intel_mark_busy(ring->dev, obj); | 770 | intel_mark_fb_busy(obj); |
962 | } | 771 | } |
963 | 772 | ||
964 | trace_i915_gem_object_change_domain(obj, old_read, old_write); | 773 | trace_i915_gem_object_change_domain(obj, old_read, old_write); |
965 | } | 774 | } |
966 | |||
967 | intel_mark_busy(ring->dev, NULL); | ||
968 | } | 775 | } |
969 | 776 | ||
970 | static void | 777 | static void |
@@ -972,16 +779,11 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev, | |||
972 | struct drm_file *file, | 779 | struct drm_file *file, |
973 | struct intel_ring_buffer *ring) | 780 | struct intel_ring_buffer *ring) |
974 | { | 781 | { |
975 | struct drm_i915_gem_request *request; | ||
976 | |||
977 | /* Unconditionally force add_request to emit a full flush. */ | 782 | /* Unconditionally force add_request to emit a full flush. */ |
978 | ring->gpu_caches_dirty = true; | 783 | ring->gpu_caches_dirty = true; |
979 | 784 | ||
980 | /* Add a breadcrumb for the completion of the batch buffer */ | 785 | /* Add a breadcrumb for the completion of the batch buffer */ |
981 | request = kzalloc(sizeof(*request), GFP_KERNEL); | 786 | (void)i915_add_request(ring, file, NULL); |
982 | if (request == NULL || i915_add_request(ring, file, request)) { | ||
983 | kfree(request); | ||
984 | } | ||
985 | } | 787 | } |
986 | 788 | ||
987 | static int | 789 | static int |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index d9a5372ec56f..804d65345e2c 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -426,6 +426,23 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj) | |||
426 | undo_idling(dev_priv, interruptible); | 426 | undo_idling(dev_priv, interruptible); |
427 | } | 427 | } |
428 | 428 | ||
429 | static void i915_gtt_color_adjust(struct drm_mm_node *node, | ||
430 | unsigned long color, | ||
431 | unsigned long *start, | ||
432 | unsigned long *end) | ||
433 | { | ||
434 | if (node->color != color) | ||
435 | *start += 4096; | ||
436 | |||
437 | if (!list_empty(&node->node_list)) { | ||
438 | node = list_entry(node->node_list.next, | ||
439 | struct drm_mm_node, | ||
440 | node_list); | ||
441 | if (node->allocated && node->color != color) | ||
442 | *end -= 4096; | ||
443 | } | ||
444 | } | ||
445 | |||
429 | void i915_gem_init_global_gtt(struct drm_device *dev, | 446 | void i915_gem_init_global_gtt(struct drm_device *dev, |
430 | unsigned long start, | 447 | unsigned long start, |
431 | unsigned long mappable_end, | 448 | unsigned long mappable_end, |
@@ -435,6 +452,8 @@ void i915_gem_init_global_gtt(struct drm_device *dev, | |||
435 | 452 | ||
436 | /* Substract the guard page ... */ | 453 | /* Substract the guard page ... */ |
437 | drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE); | 454 | drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE); |
455 | if (!HAS_LLC(dev)) | ||
456 | dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust; | ||
438 | 457 | ||
439 | dev_priv->mm.gtt_start = start; | 458 | dev_priv->mm.gtt_start = start; |
440 | dev_priv->mm.gtt_mappable_end = mappable_end; | 459 | dev_priv->mm.gtt_mappable_end = mappable_end; |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 8a3828528b9d..a61b41a8c607 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -296,11 +296,21 @@ static void i915_hotplug_work_func(struct work_struct *work) | |||
296 | drm_helper_hpd_irq_event(dev); | 296 | drm_helper_hpd_irq_event(dev); |
297 | } | 297 | } |
298 | 298 | ||
299 | static void i915_handle_rps_change(struct drm_device *dev) | 299 | /* defined intel_pm.c */ |
300 | extern spinlock_t mchdev_lock; | ||
301 | |||
302 | static void ironlake_handle_rps_change(struct drm_device *dev) | ||
300 | { | 303 | { |
301 | drm_i915_private_t *dev_priv = dev->dev_private; | 304 | drm_i915_private_t *dev_priv = dev->dev_private; |
302 | u32 busy_up, busy_down, max_avg, min_avg; | 305 | u32 busy_up, busy_down, max_avg, min_avg; |
303 | u8 new_delay = dev_priv->cur_delay; | 306 | u8 new_delay; |
307 | unsigned long flags; | ||
308 | |||
309 | spin_lock_irqsave(&mchdev_lock, flags); | ||
310 | |||
311 | I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); | ||
312 | |||
313 | new_delay = dev_priv->cur_delay; | ||
304 | 314 | ||
305 | I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); | 315 | I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); |
306 | busy_up = I915_READ(RCPREVBSYTUPAVG); | 316 | busy_up = I915_READ(RCPREVBSYTUPAVG); |
@@ -324,6 +334,8 @@ static void i915_handle_rps_change(struct drm_device *dev) | |||
324 | if (ironlake_set_drps(dev, new_delay)) | 334 | if (ironlake_set_drps(dev, new_delay)) |
325 | dev_priv->cur_delay = new_delay; | 335 | dev_priv->cur_delay = new_delay; |
326 | 336 | ||
337 | spin_unlock_irqrestore(&mchdev_lock, flags); | ||
338 | |||
327 | return; | 339 | return; |
328 | } | 340 | } |
329 | 341 | ||
@@ -335,7 +347,7 @@ static void notify_ring(struct drm_device *dev, | |||
335 | if (ring->obj == NULL) | 347 | if (ring->obj == NULL) |
336 | return; | 348 | return; |
337 | 349 | ||
338 | trace_i915_gem_request_complete(ring, ring->get_seqno(ring)); | 350 | trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false)); |
339 | 351 | ||
340 | wake_up_all(&ring->irq_queue); | 352 | wake_up_all(&ring->irq_queue); |
341 | if (i915_enable_hangcheck) { | 353 | if (i915_enable_hangcheck) { |
@@ -349,16 +361,16 @@ static void notify_ring(struct drm_device *dev, | |||
349 | static void gen6_pm_rps_work(struct work_struct *work) | 361 | static void gen6_pm_rps_work(struct work_struct *work) |
350 | { | 362 | { |
351 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, | 363 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, |
352 | rps_work); | 364 | rps.work); |
353 | u32 pm_iir, pm_imr; | 365 | u32 pm_iir, pm_imr; |
354 | u8 new_delay; | 366 | u8 new_delay; |
355 | 367 | ||
356 | spin_lock_irq(&dev_priv->rps_lock); | 368 | spin_lock_irq(&dev_priv->rps.lock); |
357 | pm_iir = dev_priv->pm_iir; | 369 | pm_iir = dev_priv->rps.pm_iir; |
358 | dev_priv->pm_iir = 0; | 370 | dev_priv->rps.pm_iir = 0; |
359 | pm_imr = I915_READ(GEN6_PMIMR); | 371 | pm_imr = I915_READ(GEN6_PMIMR); |
360 | I915_WRITE(GEN6_PMIMR, 0); | 372 | I915_WRITE(GEN6_PMIMR, 0); |
361 | spin_unlock_irq(&dev_priv->rps_lock); | 373 | spin_unlock_irq(&dev_priv->rps.lock); |
362 | 374 | ||
363 | if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0) | 375 | if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0) |
364 | return; | 376 | return; |
@@ -366,9 +378,9 @@ static void gen6_pm_rps_work(struct work_struct *work) | |||
366 | mutex_lock(&dev_priv->dev->struct_mutex); | 378 | mutex_lock(&dev_priv->dev->struct_mutex); |
367 | 379 | ||
368 | if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) | 380 | if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) |
369 | new_delay = dev_priv->cur_delay + 1; | 381 | new_delay = dev_priv->rps.cur_delay + 1; |
370 | else | 382 | else |
371 | new_delay = dev_priv->cur_delay - 1; | 383 | new_delay = dev_priv->rps.cur_delay - 1; |
372 | 384 | ||
373 | gen6_set_rps(dev_priv->dev, new_delay); | 385 | gen6_set_rps(dev_priv->dev, new_delay); |
374 | 386 | ||
@@ -444,7 +456,7 @@ static void ivybridge_handle_parity_error(struct drm_device *dev) | |||
444 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 456 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
445 | unsigned long flags; | 457 | unsigned long flags; |
446 | 458 | ||
447 | if (!IS_IVYBRIDGE(dev)) | 459 | if (!HAS_L3_GPU_CACHE(dev)) |
448 | return; | 460 | return; |
449 | 461 | ||
450 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | 462 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
@@ -488,19 +500,19 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv, | |||
488 | * IIR bits should never already be set because IMR should | 500 | * IIR bits should never already be set because IMR should |
489 | * prevent an interrupt from being shown in IIR. The warning | 501 | * prevent an interrupt from being shown in IIR. The warning |
490 | * displays a case where we've unsafely cleared | 502 | * displays a case where we've unsafely cleared |
491 | * dev_priv->pm_iir. Although missing an interrupt of the same | 503 | * dev_priv->rps.pm_iir. Although missing an interrupt of the same |
492 | * type is not a problem, it displays a problem in the logic. | 504 | * type is not a problem, it displays a problem in the logic. |
493 | * | 505 | * |
494 | * The mask bit in IMR is cleared by rps_work. | 506 | * The mask bit in IMR is cleared by dev_priv->rps.work. |
495 | */ | 507 | */ |
496 | 508 | ||
497 | spin_lock_irqsave(&dev_priv->rps_lock, flags); | 509 | spin_lock_irqsave(&dev_priv->rps.lock, flags); |
498 | dev_priv->pm_iir |= pm_iir; | 510 | dev_priv->rps.pm_iir |= pm_iir; |
499 | I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir); | 511 | I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir); |
500 | POSTING_READ(GEN6_PMIMR); | 512 | POSTING_READ(GEN6_PMIMR); |
501 | spin_unlock_irqrestore(&dev_priv->rps_lock, flags); | 513 | spin_unlock_irqrestore(&dev_priv->rps.lock, flags); |
502 | 514 | ||
503 | queue_work(dev_priv->wq, &dev_priv->rps_work); | 515 | queue_work(dev_priv->wq, &dev_priv->rps.work); |
504 | } | 516 | } |
505 | 517 | ||
506 | static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS) | 518 | static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS) |
@@ -793,10 +805,8 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS) | |||
793 | ibx_irq_handler(dev, pch_iir); | 805 | ibx_irq_handler(dev, pch_iir); |
794 | } | 806 | } |
795 | 807 | ||
796 | if (de_iir & DE_PCU_EVENT) { | 808 | if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) |
797 | I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); | 809 | ironlake_handle_rps_change(dev); |
798 | i915_handle_rps_change(dev); | ||
799 | } | ||
800 | 810 | ||
801 | if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) | 811 | if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) |
802 | gen6_queue_rps_work(dev_priv, pm_iir); | 812 | gen6_queue_rps_work(dev_priv, pm_iir); |
@@ -949,7 +959,8 @@ static void capture_bo(struct drm_i915_error_buffer *err, | |||
949 | { | 959 | { |
950 | err->size = obj->base.size; | 960 | err->size = obj->base.size; |
951 | err->name = obj->base.name; | 961 | err->name = obj->base.name; |
952 | err->seqno = obj->last_rendering_seqno; | 962 | err->rseqno = obj->last_read_seqno; |
963 | err->wseqno = obj->last_write_seqno; | ||
953 | err->gtt_offset = obj->gtt_offset; | 964 | err->gtt_offset = obj->gtt_offset; |
954 | err->read_domains = obj->base.read_domains; | 965 | err->read_domains = obj->base.read_domains; |
955 | err->write_domain = obj->base.write_domain; | 966 | err->write_domain = obj->base.write_domain; |
@@ -1039,12 +1050,12 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, | |||
1039 | if (!ring->get_seqno) | 1050 | if (!ring->get_seqno) |
1040 | return NULL; | 1051 | return NULL; |
1041 | 1052 | ||
1042 | seqno = ring->get_seqno(ring); | 1053 | seqno = ring->get_seqno(ring, false); |
1043 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { | 1054 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { |
1044 | if (obj->ring != ring) | 1055 | if (obj->ring != ring) |
1045 | continue; | 1056 | continue; |
1046 | 1057 | ||
1047 | if (i915_seqno_passed(seqno, obj->last_rendering_seqno)) | 1058 | if (i915_seqno_passed(seqno, obj->last_read_seqno)) |
1048 | continue; | 1059 | continue; |
1049 | 1060 | ||
1050 | if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) | 1061 | if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) |
@@ -1093,7 +1104,7 @@ static void i915_record_ring_state(struct drm_device *dev, | |||
1093 | 1104 | ||
1094 | error->waiting[ring->id] = waitqueue_active(&ring->irq_queue); | 1105 | error->waiting[ring->id] = waitqueue_active(&ring->irq_queue); |
1095 | error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base)); | 1106 | error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base)); |
1096 | error->seqno[ring->id] = ring->get_seqno(ring); | 1107 | error->seqno[ring->id] = ring->get_seqno(ring, false); |
1097 | error->acthd[ring->id] = intel_ring_get_active_head(ring); | 1108 | error->acthd[ring->id] = intel_ring_get_active_head(ring); |
1098 | error->head[ring->id] = I915_READ_HEAD(ring); | 1109 | error->head[ring->id] = I915_READ_HEAD(ring); |
1099 | error->tail[ring->id] = I915_READ_TAIL(ring); | 1110 | error->tail[ring->id] = I915_READ_TAIL(ring); |
@@ -1590,7 +1601,8 @@ ring_last_seqno(struct intel_ring_buffer *ring) | |||
1590 | static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err) | 1601 | static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err) |
1591 | { | 1602 | { |
1592 | if (list_empty(&ring->request_list) || | 1603 | if (list_empty(&ring->request_list) || |
1593 | i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) { | 1604 | i915_seqno_passed(ring->get_seqno(ring, false), |
1605 | ring_last_seqno(ring))) { | ||
1594 | /* Issue a wake-up to catch stuck h/w. */ | 1606 | /* Issue a wake-up to catch stuck h/w. */ |
1595 | if (waitqueue_active(&ring->irq_queue)) { | 1607 | if (waitqueue_active(&ring->irq_queue)) { |
1596 | DRM_ERROR("Hangcheck timer elapsed... %s idle\n", | 1608 | DRM_ERROR("Hangcheck timer elapsed... %s idle\n", |
@@ -2647,7 +2659,7 @@ void intel_irq_init(struct drm_device *dev) | |||
2647 | 2659 | ||
2648 | INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); | 2660 | INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); |
2649 | INIT_WORK(&dev_priv->error_work, i915_error_work_func); | 2661 | INIT_WORK(&dev_priv->error_work, i915_error_work_func); |
2650 | INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work); | 2662 | INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); |
2651 | INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work); | 2663 | INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work); |
2652 | 2664 | ||
2653 | dev->driver->get_vblank_counter = i915_get_vblank_counter; | 2665 | dev->driver->get_vblank_counter = i915_get_vblank_counter; |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 28725ce5b82c..ab8cffe193cd 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -450,6 +450,7 @@ | |||
450 | #define RING_ACTHD(base) ((base)+0x74) | 450 | #define RING_ACTHD(base) ((base)+0x74) |
451 | #define RING_NOPID(base) ((base)+0x94) | 451 | #define RING_NOPID(base) ((base)+0x94) |
452 | #define RING_IMR(base) ((base)+0xa8) | 452 | #define RING_IMR(base) ((base)+0xa8) |
453 | #define RING_TIMESTAMP(base) ((base)+0x358) | ||
453 | #define TAIL_ADDR 0x001FFFF8 | 454 | #define TAIL_ADDR 0x001FFFF8 |
454 | #define HEAD_WRAP_COUNT 0xFFE00000 | 455 | #define HEAD_WRAP_COUNT 0xFFE00000 |
455 | #define HEAD_WRAP_ONE 0x00200000 | 456 | #define HEAD_WRAP_ONE 0x00200000 |
@@ -529,6 +530,8 @@ | |||
529 | #define GFX_PSMI_GRANULARITY (1<<10) | 530 | #define GFX_PSMI_GRANULARITY (1<<10) |
530 | #define GFX_PPGTT_ENABLE (1<<9) | 531 | #define GFX_PPGTT_ENABLE (1<<9) |
531 | 532 | ||
533 | #define VLV_DISPLAY_BASE 0x180000 | ||
534 | |||
532 | #define SCPD0 0x0209c /* 915+ only */ | 535 | #define SCPD0 0x0209c /* 915+ only */ |
533 | #define IER 0x020a0 | 536 | #define IER 0x020a0 |
534 | #define IIR 0x020a4 | 537 | #define IIR 0x020a4 |
@@ -1496,6 +1499,14 @@ | |||
1496 | GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \ | 1499 | GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \ |
1497 | GEN7_CXT_GT1_SIZE(ctx_reg) + \ | 1500 | GEN7_CXT_GT1_SIZE(ctx_reg) + \ |
1498 | GEN7_CXT_VFSTATE_SIZE(ctx_reg)) | 1501 | GEN7_CXT_VFSTATE_SIZE(ctx_reg)) |
1502 | #define HSW_CXT_POWER_SIZE(ctx_reg) ((ctx_reg >> 26) & 0x3f) | ||
1503 | #define HSW_CXT_RING_SIZE(ctx_reg) ((ctx_reg >> 23) & 0x7) | ||
1504 | #define HSW_CXT_RENDER_SIZE(ctx_reg) ((ctx_reg >> 15) & 0xff) | ||
1505 | #define HSW_CXT_TOTAL_SIZE(ctx_reg) (HSW_CXT_POWER_SIZE(ctx_reg) + \ | ||
1506 | HSW_CXT_RING_SIZE(ctx_reg) + \ | ||
1507 | HSW_CXT_RENDER_SIZE(ctx_reg) + \ | ||
1508 | GEN7_CXT_VFSTATE_SIZE(ctx_reg)) | ||
1509 | |||
1499 | 1510 | ||
1500 | /* | 1511 | /* |
1501 | * Overlay regs | 1512 | * Overlay regs |
@@ -1549,12 +1560,35 @@ | |||
1549 | 1560 | ||
1550 | /* VGA port control */ | 1561 | /* VGA port control */ |
1551 | #define ADPA 0x61100 | 1562 | #define ADPA 0x61100 |
1563 | #define PCH_ADPA 0xe1100 | ||
1564 | #define VLV_ADPA (VLV_DISPLAY_BASE + ADPA) | ||
1565 | |||
1552 | #define ADPA_DAC_ENABLE (1<<31) | 1566 | #define ADPA_DAC_ENABLE (1<<31) |
1553 | #define ADPA_DAC_DISABLE 0 | 1567 | #define ADPA_DAC_DISABLE 0 |
1554 | #define ADPA_PIPE_SELECT_MASK (1<<30) | 1568 | #define ADPA_PIPE_SELECT_MASK (1<<30) |
1555 | #define ADPA_PIPE_A_SELECT 0 | 1569 | #define ADPA_PIPE_A_SELECT 0 |
1556 | #define ADPA_PIPE_B_SELECT (1<<30) | 1570 | #define ADPA_PIPE_B_SELECT (1<<30) |
1557 | #define ADPA_PIPE_SELECT(pipe) ((pipe) << 30) | 1571 | #define ADPA_PIPE_SELECT(pipe) ((pipe) << 30) |
1572 | /* CPT uses bits 29:30 for pch transcoder select */ | ||
1573 | #define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */ | ||
1574 | #define ADPA_CRT_HOTPLUG_MONITOR_NONE (0<<24) | ||
1575 | #define ADPA_CRT_HOTPLUG_MONITOR_MASK (3<<24) | ||
1576 | #define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24) | ||
1577 | #define ADPA_CRT_HOTPLUG_MONITOR_MONO (2<<24) | ||
1578 | #define ADPA_CRT_HOTPLUG_ENABLE (1<<23) | ||
1579 | #define ADPA_CRT_HOTPLUG_PERIOD_64 (0<<22) | ||
1580 | #define ADPA_CRT_HOTPLUG_PERIOD_128 (1<<22) | ||
1581 | #define ADPA_CRT_HOTPLUG_WARMUP_5MS (0<<21) | ||
1582 | #define ADPA_CRT_HOTPLUG_WARMUP_10MS (1<<21) | ||
1583 | #define ADPA_CRT_HOTPLUG_SAMPLE_2S (0<<20) | ||
1584 | #define ADPA_CRT_HOTPLUG_SAMPLE_4S (1<<20) | ||
1585 | #define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0<<18) | ||
1586 | #define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1<<18) | ||
1587 | #define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2<<18) | ||
1588 | #define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3<<18) | ||
1589 | #define ADPA_CRT_HOTPLUG_VOLREF_325MV (0<<17) | ||
1590 | #define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17) | ||
1591 | #define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16) | ||
1558 | #define ADPA_USE_VGA_HVPOLARITY (1<<15) | 1592 | #define ADPA_USE_VGA_HVPOLARITY (1<<15) |
1559 | #define ADPA_SETS_HVPOLARITY 0 | 1593 | #define ADPA_SETS_HVPOLARITY 0 |
1560 | #define ADPA_VSYNC_CNTL_DISABLE (1<<11) | 1594 | #define ADPA_VSYNC_CNTL_DISABLE (1<<11) |
@@ -3889,31 +3923,6 @@ | |||
3889 | #define FDI_PLL_CTL_1 0xfe000 | 3923 | #define FDI_PLL_CTL_1 0xfe000 |
3890 | #define FDI_PLL_CTL_2 0xfe004 | 3924 | #define FDI_PLL_CTL_2 0xfe004 |
3891 | 3925 | ||
3892 | /* CRT */ | ||
3893 | #define PCH_ADPA 0xe1100 | ||
3894 | #define ADPA_TRANS_SELECT_MASK (1<<30) | ||
3895 | #define ADPA_TRANS_A_SELECT 0 | ||
3896 | #define ADPA_TRANS_B_SELECT (1<<30) | ||
3897 | #define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */ | ||
3898 | #define ADPA_CRT_HOTPLUG_MONITOR_NONE (0<<24) | ||
3899 | #define ADPA_CRT_HOTPLUG_MONITOR_MASK (3<<24) | ||
3900 | #define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24) | ||
3901 | #define ADPA_CRT_HOTPLUG_MONITOR_MONO (2<<24) | ||
3902 | #define ADPA_CRT_HOTPLUG_ENABLE (1<<23) | ||
3903 | #define ADPA_CRT_HOTPLUG_PERIOD_64 (0<<22) | ||
3904 | #define ADPA_CRT_HOTPLUG_PERIOD_128 (1<<22) | ||
3905 | #define ADPA_CRT_HOTPLUG_WARMUP_5MS (0<<21) | ||
3906 | #define ADPA_CRT_HOTPLUG_WARMUP_10MS (1<<21) | ||
3907 | #define ADPA_CRT_HOTPLUG_SAMPLE_2S (0<<20) | ||
3908 | #define ADPA_CRT_HOTPLUG_SAMPLE_4S (1<<20) | ||
3909 | #define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0<<18) | ||
3910 | #define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1<<18) | ||
3911 | #define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2<<18) | ||
3912 | #define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3<<18) | ||
3913 | #define ADPA_CRT_HOTPLUG_VOLREF_325MV (0<<17) | ||
3914 | #define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17) | ||
3915 | #define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16) | ||
3916 | |||
3917 | /* or SDVOB */ | 3926 | /* or SDVOB */ |
3918 | #define HDMIB 0xe1140 | 3927 | #define HDMIB 0xe1140 |
3919 | #define PORT_ENABLE (1 << 31) | 3928 | #define PORT_ENABLE (1 << 31) |
@@ -4270,194 +4279,184 @@ | |||
4270 | #define AUD_CONFIG_DISABLE_NCTS (1 << 3) | 4279 | #define AUD_CONFIG_DISABLE_NCTS (1 << 3) |
4271 | 4280 | ||
4272 | /* HSW Power Wells */ | 4281 | /* HSW Power Wells */ |
4273 | #define HSW_PWR_WELL_CTL1 0x45400 /* BIOS */ | 4282 | #define HSW_PWR_WELL_CTL1 0x45400 /* BIOS */ |
4274 | #define HSW_PWR_WELL_CTL2 0x45404 /* Driver */ | 4283 | #define HSW_PWR_WELL_CTL2 0x45404 /* Driver */ |
4275 | #define HSW_PWR_WELL_CTL3 0x45408 /* KVMR */ | 4284 | #define HSW_PWR_WELL_CTL3 0x45408 /* KVMR */ |
4276 | #define HSW_PWR_WELL_CTL4 0x4540C /* Debug */ | 4285 | #define HSW_PWR_WELL_CTL4 0x4540C /* Debug */ |
4277 | #define HSW_PWR_WELL_ENABLE (1<<31) | 4286 | #define HSW_PWR_WELL_ENABLE (1<<31) |
4278 | #define HSW_PWR_WELL_STATE (1<<30) | 4287 | #define HSW_PWR_WELL_STATE (1<<30) |
4279 | #define HSW_PWR_WELL_CTL5 0x45410 | 4288 | #define HSW_PWR_WELL_CTL5 0x45410 |
4280 | #define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31) | 4289 | #define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31) |
4281 | #define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20) | 4290 | #define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20) |
4282 | #define HSW_PWR_WELL_FORCE_ON (1<<19) | 4291 | #define HSW_PWR_WELL_FORCE_ON (1<<19) |
4283 | #define HSW_PWR_WELL_CTL6 0x45414 | 4292 | #define HSW_PWR_WELL_CTL6 0x45414 |
4284 | 4293 | ||
4285 | /* Per-pipe DDI Function Control */ | 4294 | /* Per-pipe DDI Function Control */ |
4286 | #define PIPE_DDI_FUNC_CTL_A 0x60400 | 4295 | #define PIPE_DDI_FUNC_CTL_A 0x60400 |
4287 | #define PIPE_DDI_FUNC_CTL_B 0x61400 | 4296 | #define PIPE_DDI_FUNC_CTL_B 0x61400 |
4288 | #define PIPE_DDI_FUNC_CTL_C 0x62400 | 4297 | #define PIPE_DDI_FUNC_CTL_C 0x62400 |
4289 | #define PIPE_DDI_FUNC_CTL_EDP 0x6F400 | 4298 | #define PIPE_DDI_FUNC_CTL_EDP 0x6F400 |
4290 | #define DDI_FUNC_CTL(pipe) _PIPE(pipe, \ | 4299 | #define DDI_FUNC_CTL(pipe) _PIPE(pipe, PIPE_DDI_FUNC_CTL_A, \ |
4291 | PIPE_DDI_FUNC_CTL_A, \ | 4300 | PIPE_DDI_FUNC_CTL_B) |
4292 | PIPE_DDI_FUNC_CTL_B) | ||
4293 | #define PIPE_DDI_FUNC_ENABLE (1<<31) | 4301 | #define PIPE_DDI_FUNC_ENABLE (1<<31) |
4294 | /* Those bits are ignored by pipe EDP since it can only connect to DDI A */ | 4302 | /* Those bits are ignored by pipe EDP since it can only connect to DDI A */ |
4295 | #define PIPE_DDI_PORT_MASK (7<<28) | 4303 | #define PIPE_DDI_PORT_MASK (7<<28) |
4296 | #define PIPE_DDI_SELECT_PORT(x) ((x)<<28) | 4304 | #define PIPE_DDI_SELECT_PORT(x) ((x)<<28) |
4297 | #define PIPE_DDI_MODE_SELECT_HDMI (0<<24) | 4305 | #define PIPE_DDI_MODE_SELECT_MASK (7<<24) |
4298 | #define PIPE_DDI_MODE_SELECT_DVI (1<<24) | 4306 | #define PIPE_DDI_MODE_SELECT_HDMI (0<<24) |
4307 | #define PIPE_DDI_MODE_SELECT_DVI (1<<24) | ||
4299 | #define PIPE_DDI_MODE_SELECT_DP_SST (2<<24) | 4308 | #define PIPE_DDI_MODE_SELECT_DP_SST (2<<24) |
4300 | #define PIPE_DDI_MODE_SELECT_DP_MST (3<<24) | 4309 | #define PIPE_DDI_MODE_SELECT_DP_MST (3<<24) |
4301 | #define PIPE_DDI_MODE_SELECT_FDI (4<<24) | 4310 | #define PIPE_DDI_MODE_SELECT_FDI (4<<24) |
4302 | #define PIPE_DDI_BPC_8 (0<<20) | 4311 | #define PIPE_DDI_BPC_MASK (7<<20) |
4303 | #define PIPE_DDI_BPC_10 (1<<20) | 4312 | #define PIPE_DDI_BPC_8 (0<<20) |
4304 | #define PIPE_DDI_BPC_6 (2<<20) | 4313 | #define PIPE_DDI_BPC_10 (1<<20) |
4305 | #define PIPE_DDI_BPC_12 (3<<20) | 4314 | #define PIPE_DDI_BPC_6 (2<<20) |
4306 | #define PIPE_DDI_BFI_ENABLE (1<<4) | 4315 | #define PIPE_DDI_BPC_12 (3<<20) |
4307 | #define PIPE_DDI_PORT_WIDTH_X1 (0<<1) | 4316 | #define PIPE_DDI_PVSYNC (1<<17) |
4308 | #define PIPE_DDI_PORT_WIDTH_X2 (1<<1) | 4317 | #define PIPE_DDI_PHSYNC (1<<16) |
4309 | #define PIPE_DDI_PORT_WIDTH_X4 (3<<1) | 4318 | #define PIPE_DDI_BFI_ENABLE (1<<4) |
4319 | #define PIPE_DDI_PORT_WIDTH_X1 (0<<1) | ||
4320 | #define PIPE_DDI_PORT_WIDTH_X2 (1<<1) | ||
4321 | #define PIPE_DDI_PORT_WIDTH_X4 (3<<1) | ||
4310 | 4322 | ||
4311 | /* DisplayPort Transport Control */ | 4323 | /* DisplayPort Transport Control */ |
4312 | #define DP_TP_CTL_A 0x64040 | 4324 | #define DP_TP_CTL_A 0x64040 |
4313 | #define DP_TP_CTL_B 0x64140 | 4325 | #define DP_TP_CTL_B 0x64140 |
4314 | #define DP_TP_CTL(port) _PORT(port, \ | 4326 | #define DP_TP_CTL(port) _PORT(port, DP_TP_CTL_A, DP_TP_CTL_B) |
4315 | DP_TP_CTL_A, \ | 4327 | #define DP_TP_CTL_ENABLE (1<<31) |
4316 | DP_TP_CTL_B) | 4328 | #define DP_TP_CTL_MODE_SST (0<<27) |
4317 | #define DP_TP_CTL_ENABLE (1<<31) | 4329 | #define DP_TP_CTL_MODE_MST (1<<27) |
4318 | #define DP_TP_CTL_MODE_SST (0<<27) | ||
4319 | #define DP_TP_CTL_MODE_MST (1<<27) | ||
4320 | #define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1<<18) | 4330 | #define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1<<18) |
4321 | #define DP_TP_CTL_FDI_AUTOTRAIN (1<<15) | 4331 | #define DP_TP_CTL_FDI_AUTOTRAIN (1<<15) |
4322 | #define DP_TP_CTL_LINK_TRAIN_MASK (7<<8) | 4332 | #define DP_TP_CTL_LINK_TRAIN_MASK (7<<8) |
4323 | #define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8) | 4333 | #define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8) |
4324 | #define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8) | 4334 | #define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8) |
4325 | #define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8) | 4335 | #define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8) |
4326 | 4336 | ||
4327 | /* DisplayPort Transport Status */ | 4337 | /* DisplayPort Transport Status */ |
4328 | #define DP_TP_STATUS_A 0x64044 | 4338 | #define DP_TP_STATUS_A 0x64044 |
4329 | #define DP_TP_STATUS_B 0x64144 | 4339 | #define DP_TP_STATUS_B 0x64144 |
4330 | #define DP_TP_STATUS(port) _PORT(port, \ | 4340 | #define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B) |
4331 | DP_TP_STATUS_A, \ | ||
4332 | DP_TP_STATUS_B) | ||
4333 | #define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12) | 4341 | #define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12) |
4334 | 4342 | ||
4335 | /* DDI Buffer Control */ | 4343 | /* DDI Buffer Control */ |
4336 | #define DDI_BUF_CTL_A 0x64000 | 4344 | #define DDI_BUF_CTL_A 0x64000 |
4337 | #define DDI_BUF_CTL_B 0x64100 | 4345 | #define DDI_BUF_CTL_B 0x64100 |
4338 | #define DDI_BUF_CTL(port) _PORT(port, \ | 4346 | #define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B) |
4339 | DDI_BUF_CTL_A, \ | 4347 | #define DDI_BUF_CTL_ENABLE (1<<31) |
4340 | DDI_BUF_CTL_B) | ||
4341 | #define DDI_BUF_CTL_ENABLE (1<<31) | ||
4342 | #define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */ | 4348 | #define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */ |
4343 | #define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */ | 4349 | #define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */ |
4344 | #define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */ | 4350 | #define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */ |
4345 | #define DDI_BUF_EMP_400MV_9_5DB_HSW (3<<24) /* Sel3 */ | 4351 | #define DDI_BUF_EMP_400MV_9_5DB_HSW (3<<24) /* Sel3 */ |
4346 | #define DDI_BUF_EMP_600MV_0DB_HSW (4<<24) /* Sel4 */ | 4352 | #define DDI_BUF_EMP_600MV_0DB_HSW (4<<24) /* Sel4 */ |
4347 | #define DDI_BUF_EMP_600MV_3_5DB_HSW (5<<24) /* Sel5 */ | 4353 | #define DDI_BUF_EMP_600MV_3_5DB_HSW (5<<24) /* Sel5 */ |
4348 | #define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */ | 4354 | #define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */ |
4349 | #define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */ | 4355 | #define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */ |
4350 | #define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */ | 4356 | #define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */ |
4351 | #define DDI_BUF_EMP_MASK (0xf<<24) | 4357 | #define DDI_BUF_EMP_MASK (0xf<<24) |
4352 | #define DDI_BUF_IS_IDLE (1<<7) | 4358 | #define DDI_BUF_IS_IDLE (1<<7) |
4353 | #define DDI_PORT_WIDTH_X1 (0<<1) | 4359 | #define DDI_PORT_WIDTH_X1 (0<<1) |
4354 | #define DDI_PORT_WIDTH_X2 (1<<1) | 4360 | #define DDI_PORT_WIDTH_X2 (1<<1) |
4355 | #define DDI_PORT_WIDTH_X4 (3<<1) | 4361 | #define DDI_PORT_WIDTH_X4 (3<<1) |
4356 | #define DDI_INIT_DISPLAY_DETECTED (1<<0) | 4362 | #define DDI_INIT_DISPLAY_DETECTED (1<<0) |
4357 | 4363 | ||
4358 | /* DDI Buffer Translations */ | 4364 | /* DDI Buffer Translations */ |
4359 | #define DDI_BUF_TRANS_A 0x64E00 | 4365 | #define DDI_BUF_TRANS_A 0x64E00 |
4360 | #define DDI_BUF_TRANS_B 0x64E60 | 4366 | #define DDI_BUF_TRANS_B 0x64E60 |
4361 | #define DDI_BUF_TRANS(port) _PORT(port, \ | 4367 | #define DDI_BUF_TRANS(port) _PORT(port, DDI_BUF_TRANS_A, DDI_BUF_TRANS_B) |
4362 | DDI_BUF_TRANS_A, \ | ||
4363 | DDI_BUF_TRANS_B) | ||
4364 | 4368 | ||
4365 | /* Sideband Interface (SBI) is programmed indirectly, via | 4369 | /* Sideband Interface (SBI) is programmed indirectly, via |
4366 | * SBI_ADDR, which contains the register offset; and SBI_DATA, | 4370 | * SBI_ADDR, which contains the register offset; and SBI_DATA, |
4367 | * which contains the payload */ | 4371 | * which contains the payload */ |
4368 | #define SBI_ADDR 0xC6000 | 4372 | #define SBI_ADDR 0xC6000 |
4369 | #define SBI_DATA 0xC6004 | 4373 | #define SBI_DATA 0xC6004 |
4370 | #define SBI_CTL_STAT 0xC6008 | 4374 | #define SBI_CTL_STAT 0xC6008 |
4371 | #define SBI_CTL_OP_CRRD (0x6<<8) | 4375 | #define SBI_CTL_OP_CRRD (0x6<<8) |
4372 | #define SBI_CTL_OP_CRWR (0x7<<8) | 4376 | #define SBI_CTL_OP_CRWR (0x7<<8) |
4373 | #define SBI_RESPONSE_FAIL (0x1<<1) | 4377 | #define SBI_RESPONSE_FAIL (0x1<<1) |
4374 | #define SBI_RESPONSE_SUCCESS (0x0<<1) | 4378 | #define SBI_RESPONSE_SUCCESS (0x0<<1) |
4375 | #define SBI_BUSY (0x1<<0) | 4379 | #define SBI_BUSY (0x1<<0) |
4376 | #define SBI_READY (0x0<<0) | 4380 | #define SBI_READY (0x0<<0) |
4377 | 4381 | ||
4378 | /* SBI offsets */ | 4382 | /* SBI offsets */ |
4379 | #define SBI_SSCDIVINTPHASE6 0x0600 | 4383 | #define SBI_SSCDIVINTPHASE6 0x0600 |
4380 | #define SBI_SSCDIVINTPHASE_DIVSEL_MASK ((0x7f)<<1) | 4384 | #define SBI_SSCDIVINTPHASE_DIVSEL_MASK ((0x7f)<<1) |
4381 | #define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x)<<1) | 4385 | #define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x)<<1) |
4382 | #define SBI_SSCDIVINTPHASE_INCVAL_MASK ((0x7f)<<8) | 4386 | #define SBI_SSCDIVINTPHASE_INCVAL_MASK ((0x7f)<<8) |
4383 | #define SBI_SSCDIVINTPHASE_INCVAL(x) ((x)<<8) | 4387 | #define SBI_SSCDIVINTPHASE_INCVAL(x) ((x)<<8) |
4384 | #define SBI_SSCDIVINTPHASE_DIR(x) ((x)<<15) | 4388 | #define SBI_SSCDIVINTPHASE_DIR(x) ((x)<<15) |
4385 | #define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0) | 4389 | #define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0) |
4386 | #define SBI_SSCCTL 0x020c | 4390 | #define SBI_SSCCTL 0x020c |
4387 | #define SBI_SSCCTL6 0x060C | 4391 | #define SBI_SSCCTL6 0x060C |
4388 | #define SBI_SSCCTL_DISABLE (1<<0) | 4392 | #define SBI_SSCCTL_DISABLE (1<<0) |
4389 | #define SBI_SSCAUXDIV6 0x0610 | 4393 | #define SBI_SSCAUXDIV6 0x0610 |
4390 | #define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4) | 4394 | #define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4) |
4391 | #define SBI_DBUFF0 0x2a00 | 4395 | #define SBI_DBUFF0 0x2a00 |
4392 | 4396 | ||
4393 | /* LPT PIXCLK_GATE */ | 4397 | /* LPT PIXCLK_GATE */ |
4394 | #define PIXCLK_GATE 0xC6020 | 4398 | #define PIXCLK_GATE 0xC6020 |
4395 | #define PIXCLK_GATE_UNGATE 1<<0 | 4399 | #define PIXCLK_GATE_UNGATE (1<<0) |
4396 | #define PIXCLK_GATE_GATE 0<<0 | 4400 | #define PIXCLK_GATE_GATE (0<<0) |
4397 | 4401 | ||
4398 | /* SPLL */ | 4402 | /* SPLL */ |
4399 | #define SPLL_CTL 0x46020 | 4403 | #define SPLL_CTL 0x46020 |
4400 | #define SPLL_PLL_ENABLE (1<<31) | 4404 | #define SPLL_PLL_ENABLE (1<<31) |
4401 | #define SPLL_PLL_SCC (1<<28) | 4405 | #define SPLL_PLL_SCC (1<<28) |
4402 | #define SPLL_PLL_NON_SCC (2<<28) | 4406 | #define SPLL_PLL_NON_SCC (2<<28) |
4403 | #define SPLL_PLL_FREQ_810MHz (0<<26) | 4407 | #define SPLL_PLL_FREQ_810MHz (0<<26) |
4404 | #define SPLL_PLL_FREQ_1350MHz (1<<26) | 4408 | #define SPLL_PLL_FREQ_1350MHz (1<<26) |
4405 | 4409 | ||
4406 | /* WRPLL */ | 4410 | /* WRPLL */ |
4407 | #define WRPLL_CTL1 0x46040 | 4411 | #define WRPLL_CTL1 0x46040 |
4408 | #define WRPLL_CTL2 0x46060 | 4412 | #define WRPLL_CTL2 0x46060 |
4409 | #define WRPLL_PLL_ENABLE (1<<31) | 4413 | #define WRPLL_PLL_ENABLE (1<<31) |
4410 | #define WRPLL_PLL_SELECT_SSC (0x01<<28) | 4414 | #define WRPLL_PLL_SELECT_SSC (0x01<<28) |
4411 | #define WRPLL_PLL_SELECT_NON_SCC (0x02<<28) | 4415 | #define WRPLL_PLL_SELECT_NON_SCC (0x02<<28) |
4412 | #define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28) | 4416 | #define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28) |
4413 | /* WRPLL divider programming */ | 4417 | /* WRPLL divider programming */ |
4414 | #define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0) | 4418 | #define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0) |
4415 | #define WRPLL_DIVIDER_POST(x) ((x)<<8) | 4419 | #define WRPLL_DIVIDER_POST(x) ((x)<<8) |
4416 | #define WRPLL_DIVIDER_FEEDBACK(x) ((x)<<16) | 4420 | #define WRPLL_DIVIDER_FEEDBACK(x) ((x)<<16) |
4417 | 4421 | ||
4418 | /* Port clock selection */ | 4422 | /* Port clock selection */ |
4419 | #define PORT_CLK_SEL_A 0x46100 | 4423 | #define PORT_CLK_SEL_A 0x46100 |
4420 | #define PORT_CLK_SEL_B 0x46104 | 4424 | #define PORT_CLK_SEL_B 0x46104 |
4421 | #define PORT_CLK_SEL(port) _PORT(port, \ | 4425 | #define PORT_CLK_SEL(port) _PORT(port, PORT_CLK_SEL_A, PORT_CLK_SEL_B) |
4422 | PORT_CLK_SEL_A, \ | ||
4423 | PORT_CLK_SEL_B) | ||
4424 | #define PORT_CLK_SEL_LCPLL_2700 (0<<29) | 4426 | #define PORT_CLK_SEL_LCPLL_2700 (0<<29) |
4425 | #define PORT_CLK_SEL_LCPLL_1350 (1<<29) | 4427 | #define PORT_CLK_SEL_LCPLL_1350 (1<<29) |
4426 | #define PORT_CLK_SEL_LCPLL_810 (2<<29) | 4428 | #define PORT_CLK_SEL_LCPLL_810 (2<<29) |
4427 | #define PORT_CLK_SEL_SPLL (3<<29) | 4429 | #define PORT_CLK_SEL_SPLL (3<<29) |
4428 | #define PORT_CLK_SEL_WRPLL1 (4<<29) | 4430 | #define PORT_CLK_SEL_WRPLL1 (4<<29) |
4429 | #define PORT_CLK_SEL_WRPLL2 (5<<29) | 4431 | #define PORT_CLK_SEL_WRPLL2 (5<<29) |
4430 | 4432 | ||
4431 | /* Pipe clock selection */ | 4433 | /* Pipe clock selection */ |
4432 | #define PIPE_CLK_SEL_A 0x46140 | 4434 | #define PIPE_CLK_SEL_A 0x46140 |
4433 | #define PIPE_CLK_SEL_B 0x46144 | 4435 | #define PIPE_CLK_SEL_B 0x46144 |
4434 | #define PIPE_CLK_SEL(pipe) _PIPE(pipe, \ | 4436 | #define PIPE_CLK_SEL(pipe) _PIPE(pipe, PIPE_CLK_SEL_A, PIPE_CLK_SEL_B) |
4435 | PIPE_CLK_SEL_A, \ | ||
4436 | PIPE_CLK_SEL_B) | ||
4437 | /* For each pipe, we need to select the corresponding port clock */ | 4437 | /* For each pipe, we need to select the corresponding port clock */ |
4438 | #define PIPE_CLK_SEL_DISABLED (0x0<<29) | 4438 | #define PIPE_CLK_SEL_DISABLED (0x0<<29) |
4439 | #define PIPE_CLK_SEL_PORT(x) ((x+1)<<29) | 4439 | #define PIPE_CLK_SEL_PORT(x) ((x+1)<<29) |
4440 | 4440 | ||
4441 | /* LCPLL Control */ | 4441 | /* LCPLL Control */ |
4442 | #define LCPLL_CTL 0x130040 | 4442 | #define LCPLL_CTL 0x130040 |
4443 | #define LCPLL_PLL_DISABLE (1<<31) | 4443 | #define LCPLL_PLL_DISABLE (1<<31) |
4444 | #define LCPLL_PLL_LOCK (1<<30) | 4444 | #define LCPLL_PLL_LOCK (1<<30) |
4445 | #define LCPLL_CD_CLOCK_DISABLE (1<<25) | 4445 | #define LCPLL_CD_CLOCK_DISABLE (1<<25) |
4446 | #define LCPLL_CD2X_CLOCK_DISABLE (1<<23) | 4446 | #define LCPLL_CD2X_CLOCK_DISABLE (1<<23) |
4447 | 4447 | ||
4448 | /* Pipe WM_LINETIME - watermark line time */ | 4448 | /* Pipe WM_LINETIME - watermark line time */ |
4449 | #define PIPE_WM_LINETIME_A 0x45270 | 4449 | #define PIPE_WM_LINETIME_A 0x45270 |
4450 | #define PIPE_WM_LINETIME_B 0x45274 | 4450 | #define PIPE_WM_LINETIME_B 0x45274 |
4451 | #define PIPE_WM_LINETIME(pipe) _PIPE(pipe, \ | 4451 | #define PIPE_WM_LINETIME(pipe) _PIPE(pipe, PIPE_WM_LINETIME_A, \ |
4452 | PIPE_WM_LINETIME_A, \ | 4452 | PIPE_WM_LINETIME_B) |
4453 | PIPE_WM_LINETIME_B) | 4453 | #define PIPE_WM_LINETIME_MASK (0x1ff) |
4454 | #define PIPE_WM_LINETIME_MASK (0x1ff) | 4454 | #define PIPE_WM_LINETIME_TIME(x) ((x)) |
4455 | #define PIPE_WM_LINETIME_TIME(x) ((x)) | ||
4456 | #define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff<<16) | 4455 | #define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff<<16) |
4457 | #define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x)<<16) | 4456 | #define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x)<<16) |
4458 | 4457 | ||
4459 | /* SFUSE_STRAP */ | 4458 | /* SFUSE_STRAP */ |
4460 | #define SFUSE_STRAP 0xc2014 | 4459 | #define SFUSE_STRAP 0xc2014 |
4461 | #define SFUSE_STRAP_DDIB_DETECTED (1<<2) | 4460 | #define SFUSE_STRAP_DDIB_DETECTED (1<<2) |
4462 | #define SFUSE_STRAP_DDIC_DETECTED (1<<1) | 4461 | #define SFUSE_STRAP_DDIC_DETECTED (1<<1) |
4463 | #define SFUSE_STRAP_DDID_DETECTED (1<<0) | 4462 | #define SFUSE_STRAP_DDID_DETECTED (1<<0) |
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 7631807a2788..c5ee7ee3b17e 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c | |||
@@ -213,7 +213,7 @@ void i915_setup_sysfs(struct drm_device *dev) | |||
213 | DRM_ERROR("RC6 residency sysfs setup failed\n"); | 213 | DRM_ERROR("RC6 residency sysfs setup failed\n"); |
214 | } | 214 | } |
215 | 215 | ||
216 | if (IS_IVYBRIDGE(dev)) { | 216 | if (HAS_L3_GPU_CACHE(dev)) { |
217 | ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs); | 217 | ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs); |
218 | if (ret) | 218 | if (ret) |
219 | DRM_ERROR("l3 parity sysfs setup failed\n"); | 219 | DRM_ERROR("l3 parity sysfs setup failed\n"); |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 23bdc8cd1458..80bf3112dc1f 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -47,6 +47,7 @@ | |||
47 | struct intel_crt { | 47 | struct intel_crt { |
48 | struct intel_encoder base; | 48 | struct intel_encoder base; |
49 | bool force_hotplug_required; | 49 | bool force_hotplug_required; |
50 | u32 adpa_reg; | ||
50 | }; | 51 | }; |
51 | 52 | ||
52 | static struct intel_crt *intel_attached_crt(struct drm_connector *connector) | 53 | static struct intel_crt *intel_attached_crt(struct drm_connector *connector) |
@@ -55,6 +56,11 @@ static struct intel_crt *intel_attached_crt(struct drm_connector *connector) | |||
55 | struct intel_crt, base); | 56 | struct intel_crt, base); |
56 | } | 57 | } |
57 | 58 | ||
59 | static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder) | ||
60 | { | ||
61 | return container_of(encoder, struct intel_crt, base); | ||
62 | } | ||
63 | |||
58 | static void pch_crt_dpms(struct drm_encoder *encoder, int mode) | 64 | static void pch_crt_dpms(struct drm_encoder *encoder, int mode) |
59 | { | 65 | { |
60 | struct drm_device *dev = encoder->dev; | 66 | struct drm_device *dev = encoder->dev; |
@@ -145,19 +151,15 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, | |||
145 | 151 | ||
146 | struct drm_device *dev = encoder->dev; | 152 | struct drm_device *dev = encoder->dev; |
147 | struct drm_crtc *crtc = encoder->crtc; | 153 | struct drm_crtc *crtc = encoder->crtc; |
154 | struct intel_crt *crt = | ||
155 | intel_encoder_to_crt(to_intel_encoder(encoder)); | ||
148 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 156 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
149 | struct drm_i915_private *dev_priv = dev->dev_private; | 157 | struct drm_i915_private *dev_priv = dev->dev_private; |
150 | int dpll_md_reg; | 158 | int dpll_md_reg; |
151 | u32 adpa, dpll_md; | 159 | u32 adpa, dpll_md; |
152 | u32 adpa_reg; | ||
153 | 160 | ||
154 | dpll_md_reg = DPLL_MD(intel_crtc->pipe); | 161 | dpll_md_reg = DPLL_MD(intel_crtc->pipe); |
155 | 162 | ||
156 | if (HAS_PCH_SPLIT(dev)) | ||
157 | adpa_reg = PCH_ADPA; | ||
158 | else | ||
159 | adpa_reg = ADPA; | ||
160 | |||
161 | /* | 163 | /* |
162 | * Disable separate mode multiplier used when cloning SDVO to CRT | 164 | * Disable separate mode multiplier used when cloning SDVO to CRT |
163 | * XXX this needs to be adjusted when we really are cloning | 165 | * XXX this needs to be adjusted when we really are cloning |
@@ -185,7 +187,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, | |||
185 | if (!HAS_PCH_SPLIT(dev)) | 187 | if (!HAS_PCH_SPLIT(dev)) |
186 | I915_WRITE(BCLRPAT(intel_crtc->pipe), 0); | 188 | I915_WRITE(BCLRPAT(intel_crtc->pipe), 0); |
187 | 189 | ||
188 | I915_WRITE(adpa_reg, adpa); | 190 | I915_WRITE(crt->adpa_reg, adpa); |
189 | } | 191 | } |
190 | 192 | ||
191 | static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) | 193 | static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) |
@@ -688,9 +690,7 @@ void intel_crt_init(struct drm_device *dev) | |||
688 | intel_connector_attach_encoder(intel_connector, &crt->base); | 690 | intel_connector_attach_encoder(intel_connector, &crt->base); |
689 | 691 | ||
690 | crt->base.type = INTEL_OUTPUT_ANALOG; | 692 | crt->base.type = INTEL_OUTPUT_ANALOG; |
691 | crt->base.clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT | | 693 | crt->base.cloneable = true; |
692 | 1 << INTEL_ANALOG_CLONE_BIT | | ||
693 | 1 << INTEL_SDVO_LVDS_CLONE_BIT); | ||
694 | if (IS_HASWELL(dev)) | 694 | if (IS_HASWELL(dev)) |
695 | crt->base.crtc_mask = (1 << 0); | 695 | crt->base.crtc_mask = (1 << 0); |
696 | else | 696 | else |
@@ -707,6 +707,13 @@ void intel_crt_init(struct drm_device *dev) | |||
707 | else | 707 | else |
708 | encoder_helper_funcs = &gmch_encoder_funcs; | 708 | encoder_helper_funcs = &gmch_encoder_funcs; |
709 | 709 | ||
710 | if (HAS_PCH_SPLIT(dev)) | ||
711 | crt->adpa_reg = PCH_ADPA; | ||
712 | else if (IS_VALLEYVIEW(dev)) | ||
713 | crt->adpa_reg = VLV_ADPA; | ||
714 | else | ||
715 | crt->adpa_reg = ADPA; | ||
716 | |||
710 | drm_encoder_helper_add(&crt->base.base, encoder_helper_funcs); | 717 | drm_encoder_helper_add(&crt->base.base, encoder_helper_funcs); |
711 | drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); | 718 | drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); |
712 | 719 | ||
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 933c74859172..958422606bc7 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
@@ -250,7 +250,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port) | |||
250 | case PORT_B: | 250 | case PORT_B: |
251 | case PORT_C: | 251 | case PORT_C: |
252 | case PORT_D: | 252 | case PORT_D: |
253 | intel_hdmi_init(dev, DDI_BUF_CTL(port)); | 253 | intel_hdmi_init(dev, DDI_BUF_CTL(port), port); |
254 | break; | 254 | break; |
255 | default: | 255 | default: |
256 | DRM_DEBUG_DRIVER("No handlers defined for port %d, skipping DDI initialization\n", | 256 | DRM_DEBUG_DRIVER("No handlers defined for port %d, skipping DDI initialization\n", |
@@ -267,7 +267,8 @@ struct wrpll_tmds_clock { | |||
267 | u16 r2; /* Reference divider */ | 267 | u16 r2; /* Reference divider */ |
268 | }; | 268 | }; |
269 | 269 | ||
270 | /* Table of matching values for WRPLL clocks programming for each frequency */ | 270 | /* Table of matching values for WRPLL clocks programming for each frequency. |
271 | * The code assumes this table is sorted. */ | ||
271 | static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = { | 272 | static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = { |
272 | {19750, 38, 25, 18}, | 273 | {19750, 38, 25, 18}, |
273 | {20000, 48, 32, 18}, | 274 | {20000, 48, 32, 18}, |
@@ -277,7 +278,6 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = { | |||
277 | {23000, 36, 23, 15}, | 278 | {23000, 36, 23, 15}, |
278 | {23500, 40, 40, 23}, | 279 | {23500, 40, 40, 23}, |
279 | {23750, 26, 16, 14}, | 280 | {23750, 26, 16, 14}, |
280 | {23750, 26, 16, 14}, | ||
281 | {24000, 36, 24, 15}, | 281 | {24000, 36, 24, 15}, |
282 | {25000, 36, 25, 15}, | 282 | {25000, 36, 25, 15}, |
283 | {25175, 26, 40, 33}, | 283 | {25175, 26, 40, 33}, |
@@ -437,7 +437,6 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = { | |||
437 | {108000, 8, 24, 15}, | 437 | {108000, 8, 24, 15}, |
438 | {108108, 8, 173, 108}, | 438 | {108108, 8, 173, 108}, |
439 | {109000, 6, 23, 19}, | 439 | {109000, 6, 23, 19}, |
440 | {109000, 6, 23, 19}, | ||
441 | {110000, 6, 22, 18}, | 440 | {110000, 6, 22, 18}, |
442 | {110013, 6, 22, 18}, | 441 | {110013, 6, 22, 18}, |
443 | {110250, 8, 49, 30}, | 442 | {110250, 8, 49, 30}, |
@@ -614,7 +613,6 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = { | |||
614 | {218250, 4, 42, 26}, | 613 | {218250, 4, 42, 26}, |
615 | {218750, 4, 34, 21}, | 614 | {218750, 4, 34, 21}, |
616 | {219000, 4, 47, 29}, | 615 | {219000, 4, 47, 29}, |
617 | {219000, 4, 47, 29}, | ||
618 | {220000, 4, 44, 27}, | 616 | {220000, 4, 44, 27}, |
619 | {220640, 4, 49, 30}, | 617 | {220640, 4, 49, 30}, |
620 | {220750, 4, 36, 22}, | 618 | {220750, 4, 36, 22}, |
@@ -658,7 +656,7 @@ void intel_ddi_mode_set(struct drm_encoder *encoder, | |||
658 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); | 656 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); |
659 | int port = intel_hdmi->ddi_port; | 657 | int port = intel_hdmi->ddi_port; |
660 | int pipe = intel_crtc->pipe; | 658 | int pipe = intel_crtc->pipe; |
661 | int p, n2, r2, valid=0; | 659 | int p, n2, r2; |
662 | u32 temp, i; | 660 | u32 temp, i; |
663 | 661 | ||
664 | /* On Haswell, we need to enable the clocks and prepare DDI function to | 662 | /* On Haswell, we need to enable the clocks and prepare DDI function to |
@@ -666,26 +664,23 @@ void intel_ddi_mode_set(struct drm_encoder *encoder, | |||
666 | */ | 664 | */ |
667 | DRM_DEBUG_KMS("Preparing HDMI DDI mode for Haswell on port %c, pipe %c\n", port_name(port), pipe_name(pipe)); | 665 | DRM_DEBUG_KMS("Preparing HDMI DDI mode for Haswell on port %c, pipe %c\n", port_name(port), pipe_name(pipe)); |
668 | 666 | ||
669 | for (i=0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++) { | 667 | for (i = 0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++) |
670 | if (crtc->mode.clock == wrpll_tmds_clock_table[i].clock) { | 668 | if (crtc->mode.clock <= wrpll_tmds_clock_table[i].clock) |
671 | p = wrpll_tmds_clock_table[i].p; | 669 | break; |
672 | n2 = wrpll_tmds_clock_table[i].n2; | ||
673 | r2 = wrpll_tmds_clock_table[i].r2; | ||
674 | 670 | ||
675 | DRM_DEBUG_KMS("WR PLL clock: found settings for %dKHz refresh rate: p=%d, n2=%d, r2=%d\n", | 671 | if (i == ARRAY_SIZE(wrpll_tmds_clock_table)) |
676 | crtc->mode.clock, | 672 | i--; |
677 | p, n2, r2); | ||
678 | 673 | ||
679 | valid = 1; | 674 | p = wrpll_tmds_clock_table[i].p; |
680 | break; | 675 | n2 = wrpll_tmds_clock_table[i].n2; |
681 | } | 676 | r2 = wrpll_tmds_clock_table[i].r2; |
682 | } | ||
683 | 677 | ||
684 | if (!valid) { | 678 | if (wrpll_tmds_clock_table[i].clock != crtc->mode.clock) |
685 | DRM_ERROR("Unable to find WR PLL clock settings for %dKHz refresh rate\n", | 679 | DRM_INFO("WR PLL: using settings for %dKHz on %dKHz mode\n", |
686 | crtc->mode.clock); | 680 | wrpll_tmds_clock_table[i].clock, crtc->mode.clock); |
687 | return; | 681 | |
688 | } | 682 | DRM_DEBUG_KMS("WR PLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n", |
683 | crtc->mode.clock, p, n2, r2); | ||
689 | 684 | ||
690 | /* Enable LCPLL if disabled */ | 685 | /* Enable LCPLL if disabled */ |
691 | temp = I915_READ(LCPLL_CTL); | 686 | temp = I915_READ(LCPLL_CTL); |
@@ -723,15 +718,35 @@ void intel_ddi_mode_set(struct drm_encoder *encoder, | |||
723 | } | 718 | } |
724 | 719 | ||
725 | /* Enable PIPE_DDI_FUNC_CTL for the pipe to work in HDMI mode */ | 720 | /* Enable PIPE_DDI_FUNC_CTL for the pipe to work in HDMI mode */ |
726 | temp = I915_READ(DDI_FUNC_CTL(pipe)); | 721 | temp = PIPE_DDI_FUNC_ENABLE | PIPE_DDI_SELECT_PORT(port); |
727 | temp &= ~PIPE_DDI_PORT_MASK; | 722 | |
728 | temp &= ~PIPE_DDI_BPC_12; | 723 | switch (intel_crtc->bpp) { |
729 | temp |= PIPE_DDI_SELECT_PORT(port) | | 724 | case 18: |
730 | PIPE_DDI_MODE_SELECT_HDMI | | 725 | temp |= PIPE_DDI_BPC_6; |
731 | ((intel_crtc->bpp > 24) ? | 726 | break; |
732 | PIPE_DDI_BPC_12 : | 727 | case 24: |
733 | PIPE_DDI_BPC_8) | | 728 | temp |= PIPE_DDI_BPC_8; |
734 | PIPE_DDI_FUNC_ENABLE; | 729 | break; |
730 | case 30: | ||
731 | temp |= PIPE_DDI_BPC_10; | ||
732 | break; | ||
733 | case 36: | ||
734 | temp |= PIPE_DDI_BPC_12; | ||
735 | break; | ||
736 | default: | ||
737 | WARN(1, "%d bpp unsupported by pipe DDI function\n", | ||
738 | intel_crtc->bpp); | ||
739 | } | ||
740 | |||
741 | if (intel_hdmi->has_hdmi_sink) | ||
742 | temp |= PIPE_DDI_MODE_SELECT_HDMI; | ||
743 | else | ||
744 | temp |= PIPE_DDI_MODE_SELECT_DVI; | ||
745 | |||
746 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) | ||
747 | temp |= PIPE_DDI_PVSYNC; | ||
748 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) | ||
749 | temp |= PIPE_DDI_PHSYNC; | ||
735 | 750 | ||
736 | I915_WRITE(DDI_FUNC_CTL(pipe), temp); | 751 | I915_WRITE(DDI_FUNC_CTL(pipe), temp); |
737 | 752 | ||
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index a69a3d0d3acf..a9ab1aff2c77 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -1429,8 +1429,10 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, | |||
1429 | * protect mechanism may be enabled. | 1429 | * protect mechanism may be enabled. |
1430 | * | 1430 | * |
1431 | * Note! This is for pre-ILK only. | 1431 | * Note! This is for pre-ILK only. |
1432 | * | ||
1433 | * Unfortunately needed by dvo_ns2501 since the dvo depends on it running. | ||
1432 | */ | 1434 | */ |
1433 | static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) | 1435 | void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) |
1434 | { | 1436 | { |
1435 | int reg; | 1437 | int reg; |
1436 | u32 val; | 1438 | u32 val; |
@@ -2836,13 +2838,13 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) | |||
2836 | static bool intel_crtc_driving_pch(struct drm_crtc *crtc) | 2838 | static bool intel_crtc_driving_pch(struct drm_crtc *crtc) |
2837 | { | 2839 | { |
2838 | struct drm_device *dev = crtc->dev; | 2840 | struct drm_device *dev = crtc->dev; |
2839 | struct intel_encoder *encoder; | 2841 | struct intel_encoder *intel_encoder; |
2840 | 2842 | ||
2841 | /* | 2843 | /* |
2842 | * If there's a non-PCH eDP on this crtc, it must be DP_A, and that | 2844 | * If there's a non-PCH eDP on this crtc, it must be DP_A, and that |
2843 | * must be driven by its own crtc; no sharing is possible. | 2845 | * must be driven by its own crtc; no sharing is possible. |
2844 | */ | 2846 | */ |
2845 | for_each_encoder_on_crtc(dev, crtc, encoder) { | 2847 | for_each_encoder_on_crtc(dev, crtc, intel_encoder) { |
2846 | 2848 | ||
2847 | /* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell | 2849 | /* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell |
2848 | * CPU handles all others */ | 2850 | * CPU handles all others */ |
@@ -2850,19 +2852,19 @@ static bool intel_crtc_driving_pch(struct drm_crtc *crtc) | |||
2850 | /* It is still unclear how this will work on PPT, so throw up a warning */ | 2852 | /* It is still unclear how this will work on PPT, so throw up a warning */ |
2851 | WARN_ON(!HAS_PCH_LPT(dev)); | 2853 | WARN_ON(!HAS_PCH_LPT(dev)); |
2852 | 2854 | ||
2853 | if (encoder->type == DRM_MODE_ENCODER_DAC) { | 2855 | if (intel_encoder->type == INTEL_OUTPUT_ANALOG) { |
2854 | DRM_DEBUG_KMS("Haswell detected DAC encoder, assuming is PCH\n"); | 2856 | DRM_DEBUG_KMS("Haswell detected DAC encoder, assuming is PCH\n"); |
2855 | return true; | 2857 | return true; |
2856 | } else { | 2858 | } else { |
2857 | DRM_DEBUG_KMS("Haswell detected encoder %d, assuming is CPU\n", | 2859 | DRM_DEBUG_KMS("Haswell detected encoder %d, assuming is CPU\n", |
2858 | encoder->type); | 2860 | intel_encoder->type); |
2859 | return false; | 2861 | return false; |
2860 | } | 2862 | } |
2861 | } | 2863 | } |
2862 | 2864 | ||
2863 | switch (encoder->type) { | 2865 | switch (intel_encoder->type) { |
2864 | case INTEL_OUTPUT_EDP: | 2866 | case INTEL_OUTPUT_EDP: |
2865 | if (!intel_encoder_is_pch_edp(&encoder->base)) | 2867 | if (!intel_encoder_is_pch_edp(&intel_encoder->base)) |
2866 | return false; | 2868 | return false; |
2867 | continue; | 2869 | continue; |
2868 | } | 2870 | } |
@@ -5848,46 +5850,6 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, | |||
5848 | return mode; | 5850 | return mode; |
5849 | } | 5851 | } |
5850 | 5852 | ||
5851 | #define GPU_IDLE_TIMEOUT 500 /* ms */ | ||
5852 | |||
5853 | /* When this timer fires, we've been idle for awhile */ | ||
5854 | static void intel_gpu_idle_timer(unsigned long arg) | ||
5855 | { | ||
5856 | struct drm_device *dev = (struct drm_device *)arg; | ||
5857 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
5858 | |||
5859 | if (!list_empty(&dev_priv->mm.active_list)) { | ||
5860 | /* Still processing requests, so just re-arm the timer. */ | ||
5861 | mod_timer(&dev_priv->idle_timer, jiffies + | ||
5862 | msecs_to_jiffies(GPU_IDLE_TIMEOUT)); | ||
5863 | return; | ||
5864 | } | ||
5865 | |||
5866 | dev_priv->busy = false; | ||
5867 | queue_work(dev_priv->wq, &dev_priv->idle_work); | ||
5868 | } | ||
5869 | |||
5870 | #define CRTC_IDLE_TIMEOUT 1000 /* ms */ | ||
5871 | |||
5872 | static void intel_crtc_idle_timer(unsigned long arg) | ||
5873 | { | ||
5874 | struct intel_crtc *intel_crtc = (struct intel_crtc *)arg; | ||
5875 | struct drm_crtc *crtc = &intel_crtc->base; | ||
5876 | drm_i915_private_t *dev_priv = crtc->dev->dev_private; | ||
5877 | struct intel_framebuffer *intel_fb; | ||
5878 | |||
5879 | intel_fb = to_intel_framebuffer(crtc->fb); | ||
5880 | if (intel_fb && intel_fb->obj->active) { | ||
5881 | /* The framebuffer is still being accessed by the GPU. */ | ||
5882 | mod_timer(&intel_crtc->idle_timer, jiffies + | ||
5883 | msecs_to_jiffies(CRTC_IDLE_TIMEOUT)); | ||
5884 | return; | ||
5885 | } | ||
5886 | |||
5887 | intel_crtc->busy = false; | ||
5888 | queue_work(dev_priv->wq, &dev_priv->idle_work); | ||
5889 | } | ||
5890 | |||
5891 | static void intel_increase_pllclock(struct drm_crtc *crtc) | 5853 | static void intel_increase_pllclock(struct drm_crtc *crtc) |
5892 | { | 5854 | { |
5893 | struct drm_device *dev = crtc->dev; | 5855 | struct drm_device *dev = crtc->dev; |
@@ -5917,10 +5879,6 @@ static void intel_increase_pllclock(struct drm_crtc *crtc) | |||
5917 | if (dpll & DISPLAY_RATE_SELECT_FPA1) | 5879 | if (dpll & DISPLAY_RATE_SELECT_FPA1) |
5918 | DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); | 5880 | DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); |
5919 | } | 5881 | } |
5920 | |||
5921 | /* Schedule downclock */ | ||
5922 | mod_timer(&intel_crtc->idle_timer, jiffies + | ||
5923 | msecs_to_jiffies(CRTC_IDLE_TIMEOUT)); | ||
5924 | } | 5882 | } |
5925 | 5883 | ||
5926 | static void intel_decrease_pllclock(struct drm_crtc *crtc) | 5884 | static void intel_decrease_pllclock(struct drm_crtc *crtc) |
@@ -5959,89 +5917,46 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc) | |||
5959 | 5917 | ||
5960 | } | 5918 | } |
5961 | 5919 | ||
5962 | /** | 5920 | void intel_mark_busy(struct drm_device *dev) |
5963 | * intel_idle_update - adjust clocks for idleness | 5921 | { |
5964 | * @work: work struct | 5922 | i915_update_gfx_val(dev->dev_private); |
5965 | * | 5923 | } |
5966 | * Either the GPU or display (or both) went idle. Check the busy status | 5924 | |
5967 | * here and adjust the CRTC and GPU clocks as necessary. | 5925 | void intel_mark_idle(struct drm_device *dev) |
5968 | */ | ||
5969 | static void intel_idle_update(struct work_struct *work) | ||
5970 | { | 5926 | { |
5971 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, | 5927 | } |
5972 | idle_work); | 5928 | |
5973 | struct drm_device *dev = dev_priv->dev; | 5929 | void intel_mark_fb_busy(struct drm_i915_gem_object *obj) |
5930 | { | ||
5931 | struct drm_device *dev = obj->base.dev; | ||
5974 | struct drm_crtc *crtc; | 5932 | struct drm_crtc *crtc; |
5975 | struct intel_crtc *intel_crtc; | ||
5976 | 5933 | ||
5977 | if (!i915_powersave) | 5934 | if (!i915_powersave) |
5978 | return; | 5935 | return; |
5979 | 5936 | ||
5980 | mutex_lock(&dev->struct_mutex); | ||
5981 | |||
5982 | i915_update_gfx_val(dev_priv); | ||
5983 | |||
5984 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 5937 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
5985 | /* Skip inactive CRTCs */ | ||
5986 | if (!crtc->fb) | 5938 | if (!crtc->fb) |
5987 | continue; | 5939 | continue; |
5988 | 5940 | ||
5989 | intel_crtc = to_intel_crtc(crtc); | 5941 | if (to_intel_framebuffer(crtc->fb)->obj == obj) |
5990 | if (!intel_crtc->busy) | 5942 | intel_increase_pllclock(crtc); |
5991 | intel_decrease_pllclock(crtc); | ||
5992 | } | 5943 | } |
5993 | |||
5994 | |||
5995 | mutex_unlock(&dev->struct_mutex); | ||
5996 | } | 5944 | } |
5997 | 5945 | ||
5998 | /** | 5946 | void intel_mark_fb_idle(struct drm_i915_gem_object *obj) |
5999 | * intel_mark_busy - mark the GPU and possibly the display busy | ||
6000 | * @dev: drm device | ||
6001 | * @obj: object we're operating on | ||
6002 | * | ||
6003 | * Callers can use this function to indicate that the GPU is busy processing | ||
6004 | * commands. If @obj matches one of the CRTC objects (i.e. it's a scanout | ||
6005 | * buffer), we'll also mark the display as busy, so we know to increase its | ||
6006 | * clock frequency. | ||
6007 | */ | ||
6008 | void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj) | ||
6009 | { | 5947 | { |
6010 | drm_i915_private_t *dev_priv = dev->dev_private; | 5948 | struct drm_device *dev = obj->base.dev; |
6011 | struct drm_crtc *crtc = NULL; | 5949 | struct drm_crtc *crtc; |
6012 | struct intel_framebuffer *intel_fb; | ||
6013 | struct intel_crtc *intel_crtc; | ||
6014 | |||
6015 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | ||
6016 | return; | ||
6017 | |||
6018 | if (!dev_priv->busy) { | ||
6019 | intel_sanitize_pm(dev); | ||
6020 | dev_priv->busy = true; | ||
6021 | } else | ||
6022 | mod_timer(&dev_priv->idle_timer, jiffies + | ||
6023 | msecs_to_jiffies(GPU_IDLE_TIMEOUT)); | ||
6024 | 5950 | ||
6025 | if (obj == NULL) | 5951 | if (!i915_powersave) |
6026 | return; | 5952 | return; |
6027 | 5953 | ||
6028 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 5954 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
6029 | if (!crtc->fb) | 5955 | if (!crtc->fb) |
6030 | continue; | 5956 | continue; |
6031 | 5957 | ||
6032 | intel_crtc = to_intel_crtc(crtc); | 5958 | if (to_intel_framebuffer(crtc->fb)->obj == obj) |
6033 | intel_fb = to_intel_framebuffer(crtc->fb); | 5959 | intel_decrease_pllclock(crtc); |
6034 | if (intel_fb->obj == obj) { | ||
6035 | if (!intel_crtc->busy) { | ||
6036 | /* Non-busy -> busy, upclock */ | ||
6037 | intel_increase_pllclock(crtc); | ||
6038 | intel_crtc->busy = true; | ||
6039 | } else { | ||
6040 | /* Busy -> busy, put off timer */ | ||
6041 | mod_timer(&intel_crtc->idle_timer, jiffies + | ||
6042 | msecs_to_jiffies(CRTC_IDLE_TIMEOUT)); | ||
6043 | } | ||
6044 | } | ||
6045 | } | 5960 | } |
6046 | } | 5961 | } |
6047 | 5962 | ||
@@ -6392,7 +6307,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev, | |||
6392 | default: | 6307 | default: |
6393 | WARN_ONCE(1, "unknown plane in flip command\n"); | 6308 | WARN_ONCE(1, "unknown plane in flip command\n"); |
6394 | ret = -ENODEV; | 6309 | ret = -ENODEV; |
6395 | goto err; | 6310 | goto err_unpin; |
6396 | } | 6311 | } |
6397 | 6312 | ||
6398 | ret = intel_ring_begin(ring, 4); | 6313 | ret = intel_ring_begin(ring, 4); |
@@ -6500,7 +6415,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
6500 | goto cleanup_pending; | 6415 | goto cleanup_pending; |
6501 | 6416 | ||
6502 | intel_disable_fbc(dev); | 6417 | intel_disable_fbc(dev); |
6503 | intel_mark_busy(dev, obj); | 6418 | intel_mark_fb_busy(obj); |
6504 | mutex_unlock(&dev->struct_mutex); | 6419 | mutex_unlock(&dev->struct_mutex); |
6505 | 6420 | ||
6506 | trace_i915_flip_request(intel_crtc->plane, obj); | 6421 | trace_i915_flip_request(intel_crtc->plane, obj); |
@@ -6666,11 +6581,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) | |||
6666 | } | 6581 | } |
6667 | 6582 | ||
6668 | drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); | 6583 | drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); |
6669 | |||
6670 | intel_crtc->busy = false; | ||
6671 | |||
6672 | setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer, | ||
6673 | (unsigned long)intel_crtc); | ||
6674 | } | 6584 | } |
6675 | 6585 | ||
6676 | int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, | 6586 | int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, |
@@ -6697,15 +6607,23 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, | |||
6697 | return 0; | 6607 | return 0; |
6698 | } | 6608 | } |
6699 | 6609 | ||
6700 | static int intel_encoder_clones(struct drm_device *dev, int type_mask) | 6610 | static int intel_encoder_clones(struct intel_encoder *encoder) |
6701 | { | 6611 | { |
6702 | struct intel_encoder *encoder; | 6612 | struct drm_device *dev = encoder->base.dev; |
6613 | struct intel_encoder *source_encoder; | ||
6703 | int index_mask = 0; | 6614 | int index_mask = 0; |
6704 | int entry = 0; | 6615 | int entry = 0; |
6705 | 6616 | ||
6706 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { | 6617 | list_for_each_entry(source_encoder, |
6707 | if (type_mask & encoder->clone_mask) | 6618 | &dev->mode_config.encoder_list, base.head) { |
6619 | |||
6620 | if (encoder == source_encoder) | ||
6708 | index_mask |= (1 << entry); | 6621 | index_mask |= (1 << entry); |
6622 | |||
6623 | /* Intel hw has only one MUX where enocoders could be cloned. */ | ||
6624 | if (encoder->cloneable && source_encoder->cloneable) | ||
6625 | index_mask |= (1 << entry); | ||
6626 | |||
6709 | entry++; | 6627 | entry++; |
6710 | } | 6628 | } |
6711 | 6629 | ||
@@ -6746,10 +6664,10 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
6746 | dpd_is_edp = intel_dpd_is_edp(dev); | 6664 | dpd_is_edp = intel_dpd_is_edp(dev); |
6747 | 6665 | ||
6748 | if (has_edp_a(dev)) | 6666 | if (has_edp_a(dev)) |
6749 | intel_dp_init(dev, DP_A); | 6667 | intel_dp_init(dev, DP_A, PORT_A); |
6750 | 6668 | ||
6751 | if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) | 6669 | if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) |
6752 | intel_dp_init(dev, PCH_DP_D); | 6670 | intel_dp_init(dev, PCH_DP_D, PORT_D); |
6753 | } | 6671 | } |
6754 | 6672 | ||
6755 | intel_crt_init(dev); | 6673 | intel_crt_init(dev); |
@@ -6780,22 +6698,22 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
6780 | /* PCH SDVOB multiplex with HDMIB */ | 6698 | /* PCH SDVOB multiplex with HDMIB */ |
6781 | found = intel_sdvo_init(dev, PCH_SDVOB, true); | 6699 | found = intel_sdvo_init(dev, PCH_SDVOB, true); |
6782 | if (!found) | 6700 | if (!found) |
6783 | intel_hdmi_init(dev, HDMIB); | 6701 | intel_hdmi_init(dev, HDMIB, PORT_B); |
6784 | if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) | 6702 | if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) |
6785 | intel_dp_init(dev, PCH_DP_B); | 6703 | intel_dp_init(dev, PCH_DP_B, PORT_B); |
6786 | } | 6704 | } |
6787 | 6705 | ||
6788 | if (I915_READ(HDMIC) & PORT_DETECTED) | 6706 | if (I915_READ(HDMIC) & PORT_DETECTED) |
6789 | intel_hdmi_init(dev, HDMIC); | 6707 | intel_hdmi_init(dev, HDMIC, PORT_C); |
6790 | 6708 | ||
6791 | if (!dpd_is_edp && I915_READ(HDMID) & PORT_DETECTED) | 6709 | if (!dpd_is_edp && I915_READ(HDMID) & PORT_DETECTED) |
6792 | intel_hdmi_init(dev, HDMID); | 6710 | intel_hdmi_init(dev, HDMID, PORT_D); |
6793 | 6711 | ||
6794 | if (I915_READ(PCH_DP_C) & DP_DETECTED) | 6712 | if (I915_READ(PCH_DP_C) & DP_DETECTED) |
6795 | intel_dp_init(dev, PCH_DP_C); | 6713 | intel_dp_init(dev, PCH_DP_C, PORT_C); |
6796 | 6714 | ||
6797 | if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) | 6715 | if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) |
6798 | intel_dp_init(dev, PCH_DP_D); | 6716 | intel_dp_init(dev, PCH_DP_D, PORT_D); |
6799 | } else if (IS_VALLEYVIEW(dev)) { | 6717 | } else if (IS_VALLEYVIEW(dev)) { |
6800 | int found; | 6718 | int found; |
6801 | 6719 | ||
@@ -6803,17 +6721,17 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
6803 | /* SDVOB multiplex with HDMIB */ | 6721 | /* SDVOB multiplex with HDMIB */ |
6804 | found = intel_sdvo_init(dev, SDVOB, true); | 6722 | found = intel_sdvo_init(dev, SDVOB, true); |
6805 | if (!found) | 6723 | if (!found) |
6806 | intel_hdmi_init(dev, SDVOB); | 6724 | intel_hdmi_init(dev, SDVOB, PORT_B); |
6807 | if (!found && (I915_READ(DP_B) & DP_DETECTED)) | 6725 | if (!found && (I915_READ(DP_B) & DP_DETECTED)) |
6808 | intel_dp_init(dev, DP_B); | 6726 | intel_dp_init(dev, DP_B, PORT_B); |
6809 | } | 6727 | } |
6810 | 6728 | ||
6811 | if (I915_READ(SDVOC) & PORT_DETECTED) | 6729 | if (I915_READ(SDVOC) & PORT_DETECTED) |
6812 | intel_hdmi_init(dev, SDVOC); | 6730 | intel_hdmi_init(dev, SDVOC, PORT_C); |
6813 | 6731 | ||
6814 | /* Shares lanes with HDMI on SDVOC */ | 6732 | /* Shares lanes with HDMI on SDVOC */ |
6815 | if (I915_READ(DP_C) & DP_DETECTED) | 6733 | if (I915_READ(DP_C) & DP_DETECTED) |
6816 | intel_dp_init(dev, DP_C); | 6734 | intel_dp_init(dev, DP_C, PORT_C); |
6817 | } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { | 6735 | } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { |
6818 | bool found = false; | 6736 | bool found = false; |
6819 | 6737 | ||
@@ -6822,12 +6740,12 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
6822 | found = intel_sdvo_init(dev, SDVOB, true); | 6740 | found = intel_sdvo_init(dev, SDVOB, true); |
6823 | if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) { | 6741 | if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) { |
6824 | DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); | 6742 | DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); |
6825 | intel_hdmi_init(dev, SDVOB); | 6743 | intel_hdmi_init(dev, SDVOB, PORT_B); |
6826 | } | 6744 | } |
6827 | 6745 | ||
6828 | if (!found && SUPPORTS_INTEGRATED_DP(dev)) { | 6746 | if (!found && SUPPORTS_INTEGRATED_DP(dev)) { |
6829 | DRM_DEBUG_KMS("probing DP_B\n"); | 6747 | DRM_DEBUG_KMS("probing DP_B\n"); |
6830 | intel_dp_init(dev, DP_B); | 6748 | intel_dp_init(dev, DP_B, PORT_B); |
6831 | } | 6749 | } |
6832 | } | 6750 | } |
6833 | 6751 | ||
@@ -6842,18 +6760,18 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
6842 | 6760 | ||
6843 | if (SUPPORTS_INTEGRATED_HDMI(dev)) { | 6761 | if (SUPPORTS_INTEGRATED_HDMI(dev)) { |
6844 | DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); | 6762 | DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); |
6845 | intel_hdmi_init(dev, SDVOC); | 6763 | intel_hdmi_init(dev, SDVOC, PORT_C); |
6846 | } | 6764 | } |
6847 | if (SUPPORTS_INTEGRATED_DP(dev)) { | 6765 | if (SUPPORTS_INTEGRATED_DP(dev)) { |
6848 | DRM_DEBUG_KMS("probing DP_C\n"); | 6766 | DRM_DEBUG_KMS("probing DP_C\n"); |
6849 | intel_dp_init(dev, DP_C); | 6767 | intel_dp_init(dev, DP_C, PORT_C); |
6850 | } | 6768 | } |
6851 | } | 6769 | } |
6852 | 6770 | ||
6853 | if (SUPPORTS_INTEGRATED_DP(dev) && | 6771 | if (SUPPORTS_INTEGRATED_DP(dev) && |
6854 | (I915_READ(DP_D) & DP_DETECTED)) { | 6772 | (I915_READ(DP_D) & DP_DETECTED)) { |
6855 | DRM_DEBUG_KMS("probing DP_D\n"); | 6773 | DRM_DEBUG_KMS("probing DP_D\n"); |
6856 | intel_dp_init(dev, DP_D); | 6774 | intel_dp_init(dev, DP_D, PORT_D); |
6857 | } | 6775 | } |
6858 | } else if (IS_GEN2(dev)) | 6776 | } else if (IS_GEN2(dev)) |
6859 | intel_dvo_init(dev); | 6777 | intel_dvo_init(dev); |
@@ -6864,7 +6782,7 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
6864 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { | 6782 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { |
6865 | encoder->base.possible_crtcs = encoder->crtc_mask; | 6783 | encoder->base.possible_crtcs = encoder->crtc_mask; |
6866 | encoder->base.possible_clones = | 6784 | encoder->base.possible_clones = |
6867 | intel_encoder_clones(dev, encoder->clone_mask); | 6785 | intel_encoder_clones(encoder); |
6868 | } | 6786 | } |
6869 | 6787 | ||
6870 | /* disable all the possible outputs/crtcs before entering KMS mode */ | 6788 | /* disable all the possible outputs/crtcs before entering KMS mode */ |
@@ -7229,10 +7147,6 @@ void intel_modeset_init(struct drm_device *dev) | |||
7229 | /* Just disable it once at startup */ | 7147 | /* Just disable it once at startup */ |
7230 | i915_disable_vga(dev); | 7148 | i915_disable_vga(dev); |
7231 | intel_setup_outputs(dev); | 7149 | intel_setup_outputs(dev); |
7232 | |||
7233 | INIT_WORK(&dev_priv->idle_work, intel_idle_update); | ||
7234 | setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, | ||
7235 | (unsigned long)dev); | ||
7236 | } | 7150 | } |
7237 | 7151 | ||
7238 | void intel_modeset_gem_init(struct drm_device *dev) | 7152 | void intel_modeset_gem_init(struct drm_device *dev) |
@@ -7278,19 +7192,11 @@ void intel_modeset_cleanup(struct drm_device *dev) | |||
7278 | * enqueue unpin/hotplug work. */ | 7192 | * enqueue unpin/hotplug work. */ |
7279 | drm_irq_uninstall(dev); | 7193 | drm_irq_uninstall(dev); |
7280 | cancel_work_sync(&dev_priv->hotplug_work); | 7194 | cancel_work_sync(&dev_priv->hotplug_work); |
7281 | cancel_work_sync(&dev_priv->rps_work); | 7195 | cancel_work_sync(&dev_priv->rps.work); |
7282 | 7196 | ||
7283 | /* flush any delayed tasks or pending work */ | 7197 | /* flush any delayed tasks or pending work */ |
7284 | flush_scheduled_work(); | 7198 | flush_scheduled_work(); |
7285 | 7199 | ||
7286 | /* Shut off idle work before the crtcs get freed. */ | ||
7287 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
7288 | intel_crtc = to_intel_crtc(crtc); | ||
7289 | del_timer_sync(&intel_crtc->idle_timer); | ||
7290 | } | ||
7291 | del_timer_sync(&dev_priv->idle_timer); | ||
7292 | cancel_work_sync(&dev_priv->idle_work); | ||
7293 | |||
7294 | drm_mode_config_cleanup(dev); | 7200 | drm_mode_config_cleanup(dev); |
7295 | } | 7201 | } |
7296 | 7202 | ||
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index a6c426afaa7a..d14b1e39244c 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -36,42 +36,10 @@ | |||
36 | #include "intel_drv.h" | 36 | #include "intel_drv.h" |
37 | #include "i915_drm.h" | 37 | #include "i915_drm.h" |
38 | #include "i915_drv.h" | 38 | #include "i915_drv.h" |
39 | #include "drm_dp_helper.h" | ||
40 | 39 | ||
41 | #define DP_RECEIVER_CAP_SIZE 0xf | ||
42 | #define DP_LINK_STATUS_SIZE 6 | 40 | #define DP_LINK_STATUS_SIZE 6 |
43 | #define DP_LINK_CHECK_TIMEOUT (10 * 1000) | 41 | #define DP_LINK_CHECK_TIMEOUT (10 * 1000) |
44 | 42 | ||
45 | #define DP_LINK_CONFIGURATION_SIZE 9 | ||
46 | |||
47 | struct intel_dp { | ||
48 | struct intel_encoder base; | ||
49 | uint32_t output_reg; | ||
50 | uint32_t DP; | ||
51 | uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; | ||
52 | bool has_audio; | ||
53 | enum hdmi_force_audio force_audio; | ||
54 | uint32_t color_range; | ||
55 | int dpms_mode; | ||
56 | uint8_t link_bw; | ||
57 | uint8_t lane_count; | ||
58 | uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; | ||
59 | struct i2c_adapter adapter; | ||
60 | struct i2c_algo_dp_aux_data algo; | ||
61 | bool is_pch_edp; | ||
62 | uint8_t train_set[4]; | ||
63 | int panel_power_up_delay; | ||
64 | int panel_power_down_delay; | ||
65 | int panel_power_cycle_delay; | ||
66 | int backlight_on_delay; | ||
67 | int backlight_off_delay; | ||
68 | struct drm_display_mode *panel_fixed_mode; /* for eDP */ | ||
69 | struct delayed_work panel_vdd_work; | ||
70 | bool want_panel_vdd; | ||
71 | struct edid *edid; /* cached EDID for eDP */ | ||
72 | int edid_mode_count; | ||
73 | }; | ||
74 | |||
75 | /** | 43 | /** |
76 | * is_edp - is the given port attached to an eDP panel (either CPU or PCH) | 44 | * is_edp - is the given port attached to an eDP panel (either CPU or PCH) |
77 | * @intel_dp: DP struct | 45 | * @intel_dp: DP struct |
@@ -1668,6 +1636,45 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, | |||
1668 | struct drm_i915_private *dev_priv = dev->dev_private; | 1636 | struct drm_i915_private *dev_priv = dev->dev_private; |
1669 | int ret; | 1637 | int ret; |
1670 | 1638 | ||
1639 | if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) { | ||
1640 | dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT; | ||
1641 | |||
1642 | switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { | ||
1643 | case DP_TRAINING_PATTERN_DISABLE: | ||
1644 | dp_reg_value |= DP_LINK_TRAIN_OFF_CPT; | ||
1645 | break; | ||
1646 | case DP_TRAINING_PATTERN_1: | ||
1647 | dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT; | ||
1648 | break; | ||
1649 | case DP_TRAINING_PATTERN_2: | ||
1650 | dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT; | ||
1651 | break; | ||
1652 | case DP_TRAINING_PATTERN_3: | ||
1653 | DRM_ERROR("DP training pattern 3 not supported\n"); | ||
1654 | dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT; | ||
1655 | break; | ||
1656 | } | ||
1657 | |||
1658 | } else { | ||
1659 | dp_reg_value &= ~DP_LINK_TRAIN_MASK; | ||
1660 | |||
1661 | switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { | ||
1662 | case DP_TRAINING_PATTERN_DISABLE: | ||
1663 | dp_reg_value |= DP_LINK_TRAIN_OFF; | ||
1664 | break; | ||
1665 | case DP_TRAINING_PATTERN_1: | ||
1666 | dp_reg_value |= DP_LINK_TRAIN_PAT_1; | ||
1667 | break; | ||
1668 | case DP_TRAINING_PATTERN_2: | ||
1669 | dp_reg_value |= DP_LINK_TRAIN_PAT_2; | ||
1670 | break; | ||
1671 | case DP_TRAINING_PATTERN_3: | ||
1672 | DRM_ERROR("DP training pattern 3 not supported\n"); | ||
1673 | dp_reg_value |= DP_LINK_TRAIN_PAT_2; | ||
1674 | break; | ||
1675 | } | ||
1676 | } | ||
1677 | |||
1671 | I915_WRITE(intel_dp->output_reg, dp_reg_value); | 1678 | I915_WRITE(intel_dp->output_reg, dp_reg_value); |
1672 | POSTING_READ(intel_dp->output_reg); | 1679 | POSTING_READ(intel_dp->output_reg); |
1673 | 1680 | ||
@@ -1675,12 +1682,15 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, | |||
1675 | DP_TRAINING_PATTERN_SET, | 1682 | DP_TRAINING_PATTERN_SET, |
1676 | dp_train_pat); | 1683 | dp_train_pat); |
1677 | 1684 | ||
1678 | ret = intel_dp_aux_native_write(intel_dp, | 1685 | if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) != |
1679 | DP_TRAINING_LANE0_SET, | 1686 | DP_TRAINING_PATTERN_DISABLE) { |
1680 | intel_dp->train_set, | 1687 | ret = intel_dp_aux_native_write(intel_dp, |
1681 | intel_dp->lane_count); | 1688 | DP_TRAINING_LANE0_SET, |
1682 | if (ret != intel_dp->lane_count) | 1689 | intel_dp->train_set, |
1683 | return false; | 1690 | intel_dp->lane_count); |
1691 | if (ret != intel_dp->lane_count) | ||
1692 | return false; | ||
1693 | } | ||
1684 | 1694 | ||
1685 | return true; | 1695 | return true; |
1686 | } | 1696 | } |
@@ -1696,7 +1706,6 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) | |||
1696 | uint8_t voltage; | 1706 | uint8_t voltage; |
1697 | bool clock_recovery = false; | 1707 | bool clock_recovery = false; |
1698 | int voltage_tries, loop_tries; | 1708 | int voltage_tries, loop_tries; |
1699 | u32 reg; | ||
1700 | uint32_t DP = intel_dp->DP; | 1709 | uint32_t DP = intel_dp->DP; |
1701 | 1710 | ||
1702 | /* | 1711 | /* |
@@ -1717,10 +1726,6 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) | |||
1717 | 1726 | ||
1718 | DP |= DP_PORT_EN; | 1727 | DP |= DP_PORT_EN; |
1719 | 1728 | ||
1720 | if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) | ||
1721 | DP &= ~DP_LINK_TRAIN_MASK_CPT; | ||
1722 | else | ||
1723 | DP &= ~DP_LINK_TRAIN_MASK; | ||
1724 | memset(intel_dp->train_set, 0, 4); | 1729 | memset(intel_dp->train_set, 0, 4); |
1725 | voltage = 0xff; | 1730 | voltage = 0xff; |
1726 | voltage_tries = 0; | 1731 | voltage_tries = 0; |
@@ -1744,12 +1749,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) | |||
1744 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; | 1749 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; |
1745 | } | 1750 | } |
1746 | 1751 | ||
1747 | if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) | 1752 | if (!intel_dp_set_link_train(intel_dp, DP, |
1748 | reg = DP | DP_LINK_TRAIN_PAT_1_CPT; | ||
1749 | else | ||
1750 | reg = DP | DP_LINK_TRAIN_PAT_1; | ||
1751 | |||
1752 | if (!intel_dp_set_link_train(intel_dp, reg, | ||
1753 | DP_TRAINING_PATTERN_1 | | 1753 | DP_TRAINING_PATTERN_1 | |
1754 | DP_LINK_SCRAMBLING_DISABLE)) | 1754 | DP_LINK_SCRAMBLING_DISABLE)) |
1755 | break; | 1755 | break; |
@@ -1804,10 +1804,8 @@ static void | |||
1804 | intel_dp_complete_link_train(struct intel_dp *intel_dp) | 1804 | intel_dp_complete_link_train(struct intel_dp *intel_dp) |
1805 | { | 1805 | { |
1806 | struct drm_device *dev = intel_dp->base.base.dev; | 1806 | struct drm_device *dev = intel_dp->base.base.dev; |
1807 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1808 | bool channel_eq = false; | 1807 | bool channel_eq = false; |
1809 | int tries, cr_tries; | 1808 | int tries, cr_tries; |
1810 | u32 reg; | ||
1811 | uint32_t DP = intel_dp->DP; | 1809 | uint32_t DP = intel_dp->DP; |
1812 | 1810 | ||
1813 | /* channel equalization */ | 1811 | /* channel equalization */ |
@@ -1836,13 +1834,8 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) | |||
1836 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; | 1834 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; |
1837 | } | 1835 | } |
1838 | 1836 | ||
1839 | if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) | ||
1840 | reg = DP | DP_LINK_TRAIN_PAT_2_CPT; | ||
1841 | else | ||
1842 | reg = DP | DP_LINK_TRAIN_PAT_2; | ||
1843 | |||
1844 | /* channel eq pattern */ | 1837 | /* channel eq pattern */ |
1845 | if (!intel_dp_set_link_train(intel_dp, reg, | 1838 | if (!intel_dp_set_link_train(intel_dp, DP, |
1846 | DP_TRAINING_PATTERN_2 | | 1839 | DP_TRAINING_PATTERN_2 | |
1847 | DP_LINK_SCRAMBLING_DISABLE)) | 1840 | DP_LINK_SCRAMBLING_DISABLE)) |
1848 | break; | 1841 | break; |
@@ -1877,15 +1870,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) | |||
1877 | ++tries; | 1870 | ++tries; |
1878 | } | 1871 | } |
1879 | 1872 | ||
1880 | if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) | 1873 | intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE); |
1881 | reg = DP | DP_LINK_TRAIN_OFF_CPT; | ||
1882 | else | ||
1883 | reg = DP | DP_LINK_TRAIN_OFF; | ||
1884 | |||
1885 | I915_WRITE(intel_dp->output_reg, reg); | ||
1886 | POSTING_READ(intel_dp->output_reg); | ||
1887 | intel_dp_aux_native_write_1(intel_dp, | ||
1888 | DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE); | ||
1889 | } | 1874 | } |
1890 | 1875 | ||
1891 | static void | 1876 | static void |
@@ -2160,7 +2145,6 @@ intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *ada | |||
2160 | ret = drm_add_edid_modes(connector, intel_dp->edid); | 2145 | ret = drm_add_edid_modes(connector, intel_dp->edid); |
2161 | drm_edid_to_eld(connector, | 2146 | drm_edid_to_eld(connector, |
2162 | intel_dp->edid); | 2147 | intel_dp->edid); |
2163 | connector->display_info.raw_edid = NULL; | ||
2164 | return intel_dp->edid_mode_count; | 2148 | return intel_dp->edid_mode_count; |
2165 | } | 2149 | } |
2166 | 2150 | ||
@@ -2206,7 +2190,6 @@ intel_dp_detect(struct drm_connector *connector, bool force) | |||
2206 | edid = intel_dp_get_edid(connector, &intel_dp->adapter); | 2190 | edid = intel_dp_get_edid(connector, &intel_dp->adapter); |
2207 | if (edid) { | 2191 | if (edid) { |
2208 | intel_dp->has_audio = drm_detect_monitor_audio(edid); | 2192 | intel_dp->has_audio = drm_detect_monitor_audio(edid); |
2209 | connector->display_info.raw_edid = NULL; | ||
2210 | kfree(edid); | 2193 | kfree(edid); |
2211 | } | 2194 | } |
2212 | } | 2195 | } |
@@ -2271,8 +2254,6 @@ intel_dp_detect_audio(struct drm_connector *connector) | |||
2271 | edid = intel_dp_get_edid(connector, &intel_dp->adapter); | 2254 | edid = intel_dp_get_edid(connector, &intel_dp->adapter); |
2272 | if (edid) { | 2255 | if (edid) { |
2273 | has_audio = drm_detect_monitor_audio(edid); | 2256 | has_audio = drm_detect_monitor_audio(edid); |
2274 | |||
2275 | connector->display_info.raw_edid = NULL; | ||
2276 | kfree(edid); | 2257 | kfree(edid); |
2277 | } | 2258 | } |
2278 | 2259 | ||
@@ -2441,7 +2422,7 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect | |||
2441 | } | 2422 | } |
2442 | 2423 | ||
2443 | void | 2424 | void |
2444 | intel_dp_init(struct drm_device *dev, int output_reg) | 2425 | intel_dp_init(struct drm_device *dev, int output_reg, enum port port) |
2445 | { | 2426 | { |
2446 | struct drm_i915_private *dev_priv = dev->dev_private; | 2427 | struct drm_i915_private *dev_priv = dev->dev_private; |
2447 | struct drm_connector *connector; | 2428 | struct drm_connector *connector; |
@@ -2456,6 +2437,7 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
2456 | return; | 2437 | return; |
2457 | 2438 | ||
2458 | intel_dp->output_reg = output_reg; | 2439 | intel_dp->output_reg = output_reg; |
2440 | intel_dp->port = port; | ||
2459 | intel_dp->dpms_mode = -1; | 2441 | intel_dp->dpms_mode = -1; |
2460 | 2442 | ||
2461 | intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); | 2443 | intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); |
@@ -2483,18 +2465,10 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
2483 | 2465 | ||
2484 | connector->polled = DRM_CONNECTOR_POLL_HPD; | 2466 | connector->polled = DRM_CONNECTOR_POLL_HPD; |
2485 | 2467 | ||
2486 | if (output_reg == DP_B || output_reg == PCH_DP_B) | 2468 | intel_encoder->cloneable = false; |
2487 | intel_encoder->clone_mask = (1 << INTEL_DP_B_CLONE_BIT); | ||
2488 | else if (output_reg == DP_C || output_reg == PCH_DP_C) | ||
2489 | intel_encoder->clone_mask = (1 << INTEL_DP_C_CLONE_BIT); | ||
2490 | else if (output_reg == DP_D || output_reg == PCH_DP_D) | ||
2491 | intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); | ||
2492 | 2469 | ||
2493 | if (is_edp(intel_dp)) { | 2470 | INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, |
2494 | intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT); | 2471 | ironlake_panel_vdd_work); |
2495 | INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, | ||
2496 | ironlake_panel_vdd_work); | ||
2497 | } | ||
2498 | 2472 | ||
2499 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); | 2473 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); |
2500 | 2474 | ||
@@ -2509,28 +2483,25 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
2509 | drm_sysfs_connector_add(connector); | 2483 | drm_sysfs_connector_add(connector); |
2510 | 2484 | ||
2511 | /* Set up the DDC bus. */ | 2485 | /* Set up the DDC bus. */ |
2512 | switch (output_reg) { | 2486 | switch (port) { |
2513 | case DP_A: | 2487 | case PORT_A: |
2514 | name = "DPDDC-A"; | 2488 | name = "DPDDC-A"; |
2515 | break; | 2489 | break; |
2516 | case DP_B: | 2490 | case PORT_B: |
2517 | case PCH_DP_B: | 2491 | dev_priv->hotplug_supported_mask |= DPB_HOTPLUG_INT_STATUS; |
2518 | dev_priv->hotplug_supported_mask |= | 2492 | name = "DPDDC-B"; |
2519 | DPB_HOTPLUG_INT_STATUS; | 2493 | break; |
2520 | name = "DPDDC-B"; | 2494 | case PORT_C: |
2521 | break; | 2495 | dev_priv->hotplug_supported_mask |= DPC_HOTPLUG_INT_STATUS; |
2522 | case DP_C: | 2496 | name = "DPDDC-C"; |
2523 | case PCH_DP_C: | 2497 | break; |
2524 | dev_priv->hotplug_supported_mask |= | 2498 | case PORT_D: |
2525 | DPC_HOTPLUG_INT_STATUS; | 2499 | dev_priv->hotplug_supported_mask |= DPD_HOTPLUG_INT_STATUS; |
2526 | name = "DPDDC-C"; | 2500 | name = "DPDDC-D"; |
2527 | break; | 2501 | break; |
2528 | case DP_D: | 2502 | default: |
2529 | case PCH_DP_D: | 2503 | WARN(1, "Invalid port %c\n", port_name(port)); |
2530 | dev_priv->hotplug_supported_mask |= | 2504 | break; |
2531 | DPD_HOTPLUG_INT_STATUS; | ||
2532 | name = "DPDDC-D"; | ||
2533 | break; | ||
2534 | } | 2505 | } |
2535 | 2506 | ||
2536 | intel_dp_i2c_init(intel_dp, intel_connector, name); | 2507 | intel_dp_i2c_init(intel_dp, intel_connector, name); |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index cd54cf88a28f..e86b3a20d70b 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -31,6 +31,7 @@ | |||
31 | #include "drm_crtc.h" | 31 | #include "drm_crtc.h" |
32 | #include "drm_crtc_helper.h" | 32 | #include "drm_crtc_helper.h" |
33 | #include "drm_fb_helper.h" | 33 | #include "drm_fb_helper.h" |
34 | #include "drm_dp_helper.h" | ||
34 | 35 | ||
35 | #define _wait_for(COND, MS, W) ({ \ | 36 | #define _wait_for(COND, MS, W) ({ \ |
36 | unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \ | 37 | unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \ |
@@ -90,25 +91,6 @@ | |||
90 | #define INTEL_OUTPUT_DISPLAYPORT 7 | 91 | #define INTEL_OUTPUT_DISPLAYPORT 7 |
91 | #define INTEL_OUTPUT_EDP 8 | 92 | #define INTEL_OUTPUT_EDP 8 |
92 | 93 | ||
93 | /* Intel Pipe Clone Bit */ | ||
94 | #define INTEL_HDMIB_CLONE_BIT 1 | ||
95 | #define INTEL_HDMIC_CLONE_BIT 2 | ||
96 | #define INTEL_HDMID_CLONE_BIT 3 | ||
97 | #define INTEL_HDMIE_CLONE_BIT 4 | ||
98 | #define INTEL_HDMIF_CLONE_BIT 5 | ||
99 | #define INTEL_SDVO_NON_TV_CLONE_BIT 6 | ||
100 | #define INTEL_SDVO_TV_CLONE_BIT 7 | ||
101 | #define INTEL_SDVO_LVDS_CLONE_BIT 8 | ||
102 | #define INTEL_ANALOG_CLONE_BIT 9 | ||
103 | #define INTEL_TV_CLONE_BIT 10 | ||
104 | #define INTEL_DP_B_CLONE_BIT 11 | ||
105 | #define INTEL_DP_C_CLONE_BIT 12 | ||
106 | #define INTEL_DP_D_CLONE_BIT 13 | ||
107 | #define INTEL_LVDS_CLONE_BIT 14 | ||
108 | #define INTEL_DVO_TMDS_CLONE_BIT 15 | ||
109 | #define INTEL_DVO_LVDS_CLONE_BIT 16 | ||
110 | #define INTEL_EDP_CLONE_BIT 17 | ||
111 | |||
112 | #define INTEL_DVO_CHIP_NONE 0 | 94 | #define INTEL_DVO_CHIP_NONE 0 |
113 | #define INTEL_DVO_CHIP_LVDS 1 | 95 | #define INTEL_DVO_CHIP_LVDS 1 |
114 | #define INTEL_DVO_CHIP_TMDS 2 | 96 | #define INTEL_DVO_CHIP_TMDS 2 |
@@ -153,9 +135,13 @@ struct intel_encoder { | |||
153 | struct drm_encoder base; | 135 | struct drm_encoder base; |
154 | int type; | 136 | int type; |
155 | bool needs_tv_clock; | 137 | bool needs_tv_clock; |
138 | /* | ||
139 | * Intel hw has only one MUX where encoders could be clone, hence a | ||
140 | * simple flag is enough to compute the possible_clones mask. | ||
141 | */ | ||
142 | bool cloneable; | ||
156 | void (*hot_plug)(struct intel_encoder *); | 143 | void (*hot_plug)(struct intel_encoder *); |
157 | int crtc_mask; | 144 | int crtc_mask; |
158 | int clone_mask; | ||
159 | }; | 145 | }; |
160 | 146 | ||
161 | struct intel_connector { | 147 | struct intel_connector { |
@@ -171,8 +157,6 @@ struct intel_crtc { | |||
171 | int dpms_mode; | 157 | int dpms_mode; |
172 | bool active; /* is the crtc on? independent of the dpms mode */ | 158 | bool active; /* is the crtc on? independent of the dpms mode */ |
173 | bool primary_disabled; /* is the crtc obscured by a plane? */ | 159 | bool primary_disabled; /* is the crtc obscured by a plane? */ |
174 | bool busy; /* is scanout buffer being updated frequently? */ | ||
175 | struct timer_list idle_timer; | ||
176 | bool lowfreq_avail; | 160 | bool lowfreq_avail; |
177 | struct intel_overlay *overlay; | 161 | struct intel_overlay *overlay; |
178 | struct intel_unpin_work *unpin_work; | 162 | struct intel_unpin_work *unpin_work; |
@@ -311,6 +295,38 @@ struct intel_hdmi { | |||
311 | struct drm_display_mode *adjusted_mode); | 295 | struct drm_display_mode *adjusted_mode); |
312 | }; | 296 | }; |
313 | 297 | ||
298 | #define DP_RECEIVER_CAP_SIZE 0xf | ||
299 | #define DP_LINK_CONFIGURATION_SIZE 9 | ||
300 | |||
301 | struct intel_dp { | ||
302 | struct intel_encoder base; | ||
303 | uint32_t output_reg; | ||
304 | uint32_t DP; | ||
305 | uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; | ||
306 | bool has_audio; | ||
307 | enum hdmi_force_audio force_audio; | ||
308 | enum port port; | ||
309 | uint32_t color_range; | ||
310 | int dpms_mode; | ||
311 | uint8_t link_bw; | ||
312 | uint8_t lane_count; | ||
313 | uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; | ||
314 | struct i2c_adapter adapter; | ||
315 | struct i2c_algo_dp_aux_data algo; | ||
316 | bool is_pch_edp; | ||
317 | uint8_t train_set[4]; | ||
318 | int panel_power_up_delay; | ||
319 | int panel_power_down_delay; | ||
320 | int panel_power_cycle_delay; | ||
321 | int backlight_on_delay; | ||
322 | int backlight_off_delay; | ||
323 | struct drm_display_mode *panel_fixed_mode; /* for eDP */ | ||
324 | struct delayed_work panel_vdd_work; | ||
325 | bool want_panel_vdd; | ||
326 | struct edid *edid; /* cached EDID for eDP */ | ||
327 | int edid_mode_count; | ||
328 | }; | ||
329 | |||
314 | static inline struct drm_crtc * | 330 | static inline struct drm_crtc * |
315 | intel_get_crtc_for_pipe(struct drm_device *dev, int pipe) | 331 | intel_get_crtc_for_pipe(struct drm_device *dev, int pipe) |
316 | { | 332 | { |
@@ -350,17 +366,21 @@ extern void intel_attach_force_audio_property(struct drm_connector *connector); | |||
350 | extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector); | 366 | extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector); |
351 | 367 | ||
352 | extern void intel_crt_init(struct drm_device *dev); | 368 | extern void intel_crt_init(struct drm_device *dev); |
353 | extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg); | 369 | extern void intel_hdmi_init(struct drm_device *dev, |
370 | int sdvox_reg, enum port port); | ||
354 | extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); | 371 | extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); |
355 | extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if); | 372 | extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if); |
356 | extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, | 373 | extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, |
357 | bool is_sdvob); | 374 | bool is_sdvob); |
358 | extern void intel_dvo_init(struct drm_device *dev); | 375 | extern void intel_dvo_init(struct drm_device *dev); |
359 | extern void intel_tv_init(struct drm_device *dev); | 376 | extern void intel_tv_init(struct drm_device *dev); |
360 | extern void intel_mark_busy(struct drm_device *dev, | 377 | extern void intel_mark_busy(struct drm_device *dev); |
361 | struct drm_i915_gem_object *obj); | 378 | extern void intel_mark_idle(struct drm_device *dev); |
379 | extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj); | ||
380 | extern void intel_mark_fb_idle(struct drm_i915_gem_object *obj); | ||
362 | extern bool intel_lvds_init(struct drm_device *dev); | 381 | extern bool intel_lvds_init(struct drm_device *dev); |
363 | extern void intel_dp_init(struct drm_device *dev, int dp_reg); | 382 | extern void intel_dp_init(struct drm_device *dev, int output_reg, |
383 | enum port port); | ||
364 | void | 384 | void |
365 | intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | 385 | intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, |
366 | struct drm_display_mode *adjusted_mode); | 386 | struct drm_display_mode *adjusted_mode); |
@@ -373,8 +393,6 @@ extern int intel_plane_init(struct drm_device *dev, enum pipe pipe); | |||
373 | extern void intel_flush_display_plane(struct drm_i915_private *dev_priv, | 393 | extern void intel_flush_display_plane(struct drm_i915_private *dev_priv, |
374 | enum plane plane); | 394 | enum plane plane); |
375 | 395 | ||
376 | void intel_sanitize_pm(struct drm_device *dev); | ||
377 | |||
378 | /* intel_panel.c */ | 396 | /* intel_panel.c */ |
379 | extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, | 397 | extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, |
380 | struct drm_display_mode *adjusted_mode); | 398 | struct drm_display_mode *adjusted_mode); |
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index 36c542e5036b..03dfdff8e003 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #define SIL164_ADDR 0x38 | 37 | #define SIL164_ADDR 0x38 |
38 | #define CH7xxx_ADDR 0x76 | 38 | #define CH7xxx_ADDR 0x76 |
39 | #define TFP410_ADDR 0x38 | 39 | #define TFP410_ADDR 0x38 |
40 | #define NS2501_ADDR 0x38 | ||
40 | 41 | ||
41 | static const struct intel_dvo_device intel_dvo_devices[] = { | 42 | static const struct intel_dvo_device intel_dvo_devices[] = { |
42 | { | 43 | { |
@@ -74,7 +75,14 @@ static const struct intel_dvo_device intel_dvo_devices[] = { | |||
74 | .slave_addr = 0x75, | 75 | .slave_addr = 0x75, |
75 | .gpio = GMBUS_PORT_DPB, | 76 | .gpio = GMBUS_PORT_DPB, |
76 | .dev_ops = &ch7017_ops, | 77 | .dev_ops = &ch7017_ops, |
77 | } | 78 | }, |
79 | { | ||
80 | .type = INTEL_DVO_CHIP_TMDS, | ||
81 | .name = "ns2501", | ||
82 | .dvo_reg = DVOC, | ||
83 | .slave_addr = NS2501_ADDR, | ||
84 | .dev_ops = &ns2501_ops, | ||
85 | } | ||
78 | }; | 86 | }; |
79 | 87 | ||
80 | struct intel_dvo { | 88 | struct intel_dvo { |
@@ -396,17 +404,14 @@ void intel_dvo_init(struct drm_device *dev) | |||
396 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); | 404 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); |
397 | switch (dvo->type) { | 405 | switch (dvo->type) { |
398 | case INTEL_DVO_CHIP_TMDS: | 406 | case INTEL_DVO_CHIP_TMDS: |
399 | intel_encoder->clone_mask = | 407 | intel_encoder->cloneable = true; |
400 | (1 << INTEL_DVO_TMDS_CLONE_BIT) | | ||
401 | (1 << INTEL_ANALOG_CLONE_BIT); | ||
402 | drm_connector_init(dev, connector, | 408 | drm_connector_init(dev, connector, |
403 | &intel_dvo_connector_funcs, | 409 | &intel_dvo_connector_funcs, |
404 | DRM_MODE_CONNECTOR_DVII); | 410 | DRM_MODE_CONNECTOR_DVII); |
405 | encoder_type = DRM_MODE_ENCODER_TMDS; | 411 | encoder_type = DRM_MODE_ENCODER_TMDS; |
406 | break; | 412 | break; |
407 | case INTEL_DVO_CHIP_LVDS: | 413 | case INTEL_DVO_CHIP_LVDS: |
408 | intel_encoder->clone_mask = | 414 | intel_encoder->cloneable = false; |
409 | (1 << INTEL_DVO_LVDS_CLONE_BIT); | ||
410 | drm_connector_init(dev, connector, | 415 | drm_connector_init(dev, connector, |
411 | &intel_dvo_connector_funcs, | 416 | &intel_dvo_connector_funcs, |
412 | DRM_MODE_CONNECTOR_LVDS); | 417 | DRM_MODE_CONNECTOR_LVDS); |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 98f602427eb8..35a6ee7a8cca 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -737,7 +737,6 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) | |||
737 | drm_detect_hdmi_monitor(edid); | 737 | drm_detect_hdmi_monitor(edid); |
738 | intel_hdmi->has_audio = drm_detect_monitor_audio(edid); | 738 | intel_hdmi->has_audio = drm_detect_monitor_audio(edid); |
739 | } | 739 | } |
740 | connector->display_info.raw_edid = NULL; | ||
741 | kfree(edid); | 740 | kfree(edid); |
742 | } | 741 | } |
743 | 742 | ||
@@ -778,8 +777,6 @@ intel_hdmi_detect_audio(struct drm_connector *connector) | |||
778 | if (edid) { | 777 | if (edid) { |
779 | if (edid->input & DRM_EDID_INPUT_DIGITAL) | 778 | if (edid->input & DRM_EDID_INPUT_DIGITAL) |
780 | has_audio = drm_detect_monitor_audio(edid); | 779 | has_audio = drm_detect_monitor_audio(edid); |
781 | |||
782 | connector->display_info.raw_edid = NULL; | ||
783 | kfree(edid); | 780 | kfree(edid); |
784 | } | 781 | } |
785 | 782 | ||
@@ -889,7 +886,7 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c | |||
889 | intel_attach_broadcast_rgb_property(connector); | 886 | intel_attach_broadcast_rgb_property(connector); |
890 | } | 887 | } |
891 | 888 | ||
892 | void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) | 889 | void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port) |
893 | { | 890 | { |
894 | struct drm_i915_private *dev_priv = dev->dev_private; | 891 | struct drm_i915_private *dev_priv = dev->dev_private; |
895 | struct drm_connector *connector; | 892 | struct drm_connector *connector; |
@@ -923,48 +920,25 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) | |||
923 | connector->doublescan_allowed = 0; | 920 | connector->doublescan_allowed = 0; |
924 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); | 921 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); |
925 | 922 | ||
926 | /* Set up the DDC bus. */ | 923 | intel_encoder->cloneable = false; |
927 | if (sdvox_reg == SDVOB) { | 924 | |
928 | intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); | 925 | intel_hdmi->ddi_port = port; |
929 | intel_hdmi->ddc_bus = GMBUS_PORT_DPB; | 926 | switch (port) { |
930 | dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; | 927 | case PORT_B: |
931 | } else if (sdvox_reg == SDVOC) { | ||
932 | intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT); | ||
933 | intel_hdmi->ddc_bus = GMBUS_PORT_DPC; | ||
934 | dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; | ||
935 | } else if (sdvox_reg == HDMIB) { | ||
936 | intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT); | ||
937 | intel_hdmi->ddc_bus = GMBUS_PORT_DPB; | ||
938 | dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; | ||
939 | } else if (sdvox_reg == HDMIC) { | ||
940 | intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT); | ||
941 | intel_hdmi->ddc_bus = GMBUS_PORT_DPC; | ||
942 | dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; | ||
943 | } else if (sdvox_reg == HDMID) { | ||
944 | intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT); | ||
945 | intel_hdmi->ddc_bus = GMBUS_PORT_DPD; | ||
946 | dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS; | ||
947 | } else if (sdvox_reg == DDI_BUF_CTL(PORT_B)) { | ||
948 | DRM_DEBUG_DRIVER("LPT: detected output on DDI B\n"); | ||
949 | intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); | ||
950 | intel_hdmi->ddc_bus = GMBUS_PORT_DPB; | 928 | intel_hdmi->ddc_bus = GMBUS_PORT_DPB; |
951 | intel_hdmi->ddi_port = PORT_B; | ||
952 | dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; | 929 | dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; |
953 | } else if (sdvox_reg == DDI_BUF_CTL(PORT_C)) { | 930 | break; |
954 | DRM_DEBUG_DRIVER("LPT: detected output on DDI C\n"); | 931 | case PORT_C: |
955 | intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT); | ||
956 | intel_hdmi->ddc_bus = GMBUS_PORT_DPC; | 932 | intel_hdmi->ddc_bus = GMBUS_PORT_DPC; |
957 | intel_hdmi->ddi_port = PORT_C; | ||
958 | dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; | 933 | dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; |
959 | } else if (sdvox_reg == DDI_BUF_CTL(PORT_D)) { | 934 | break; |
960 | DRM_DEBUG_DRIVER("LPT: detected output on DDI D\n"); | 935 | case PORT_D: |
961 | intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT); | ||
962 | intel_hdmi->ddc_bus = GMBUS_PORT_DPD; | 936 | intel_hdmi->ddc_bus = GMBUS_PORT_DPD; |
963 | intel_hdmi->ddi_port = PORT_D; | ||
964 | dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS; | 937 | dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS; |
965 | } else { | 938 | break; |
966 | /* If we got an unknown sdvox_reg, things are pretty much broken | 939 | case PORT_A: |
967 | * in a way that we should let the kernel know about it */ | 940 | /* Internal port only for eDP. */ |
941 | default: | ||
968 | BUG(); | 942 | BUG(); |
969 | } | 943 | } |
970 | 944 | ||
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index e05c0d3e3440..d789fdad5d37 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -967,7 +967,7 @@ bool intel_lvds_init(struct drm_device *dev) | |||
967 | intel_connector_attach_encoder(intel_connector, intel_encoder); | 967 | intel_connector_attach_encoder(intel_connector, intel_encoder); |
968 | intel_encoder->type = INTEL_OUTPUT_LVDS; | 968 | intel_encoder->type = INTEL_OUTPUT_LVDS; |
969 | 969 | ||
970 | intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); | 970 | intel_encoder->cloneable = false; |
971 | if (HAS_PCH_SPLIT(dev)) | 971 | if (HAS_PCH_SPLIT(dev)) |
972 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); | 972 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); |
973 | else if (IS_GEN4(dev)) | 973 | else if (IS_GEN4(dev)) |
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c index 29b72593fbb2..4bc1c0fc342a 100644 --- a/drivers/gpu/drm/i915/intel_modes.c +++ b/drivers/gpu/drm/i915/intel_modes.c | |||
@@ -45,7 +45,6 @@ int intel_connector_update_modes(struct drm_connector *connector, | |||
45 | drm_mode_connector_update_edid_property(connector, edid); | 45 | drm_mode_connector_update_edid_property(connector, edid); |
46 | ret = drm_add_edid_modes(connector, edid); | 46 | ret = drm_add_edid_modes(connector, edid); |
47 | drm_edid_to_eld(connector, edid); | 47 | drm_edid_to_eld(connector, edid); |
48 | connector->display_info.raw_edid = NULL; | ||
49 | kfree(edid); | 48 | kfree(edid); |
50 | 49 | ||
51 | return ret; | 50 | return ret; |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 1881c8c83f0e..9b05f7832dc2 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -2160,11 +2160,28 @@ err_unref: | |||
2160 | return NULL; | 2160 | return NULL; |
2161 | } | 2161 | } |
2162 | 2162 | ||
2163 | /** | ||
2164 | * Lock protecting IPS related data structures | ||
2165 | * - i915_mch_dev | ||
2166 | * - dev_priv->max_delay | ||
2167 | * - dev_priv->min_delay | ||
2168 | * - dev_priv->fmax | ||
2169 | * - dev_priv->gpu_busy | ||
2170 | * - dev_priv->gfx_power | ||
2171 | */ | ||
2172 | DEFINE_SPINLOCK(mchdev_lock); | ||
2173 | |||
2174 | /* Global for IPS driver to get at the current i915 device. Protected by | ||
2175 | * mchdev_lock. */ | ||
2176 | static struct drm_i915_private *i915_mch_dev; | ||
2177 | |||
2163 | bool ironlake_set_drps(struct drm_device *dev, u8 val) | 2178 | bool ironlake_set_drps(struct drm_device *dev, u8 val) |
2164 | { | 2179 | { |
2165 | struct drm_i915_private *dev_priv = dev->dev_private; | 2180 | struct drm_i915_private *dev_priv = dev->dev_private; |
2166 | u16 rgvswctl; | 2181 | u16 rgvswctl; |
2167 | 2182 | ||
2183 | assert_spin_locked(&mchdev_lock); | ||
2184 | |||
2168 | rgvswctl = I915_READ16(MEMSWCTL); | 2185 | rgvswctl = I915_READ16(MEMSWCTL); |
2169 | if (rgvswctl & MEMCTL_CMD_STS) { | 2186 | if (rgvswctl & MEMCTL_CMD_STS) { |
2170 | DRM_DEBUG("gpu busy, RCS change rejected\n"); | 2187 | DRM_DEBUG("gpu busy, RCS change rejected\n"); |
@@ -2188,6 +2205,8 @@ static void ironlake_enable_drps(struct drm_device *dev) | |||
2188 | u32 rgvmodectl = I915_READ(MEMMODECTL); | 2205 | u32 rgvmodectl = I915_READ(MEMMODECTL); |
2189 | u8 fmax, fmin, fstart, vstart; | 2206 | u8 fmax, fmin, fstart, vstart; |
2190 | 2207 | ||
2208 | spin_lock_irq(&mchdev_lock); | ||
2209 | |||
2191 | /* Enable temp reporting */ | 2210 | /* Enable temp reporting */ |
2192 | I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN); | 2211 | I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN); |
2193 | I915_WRITE16(TSC1, I915_READ(TSC1) | TSE); | 2212 | I915_WRITE16(TSC1, I915_READ(TSC1) | TSE); |
@@ -2233,9 +2252,9 @@ static void ironlake_enable_drps(struct drm_device *dev) | |||
2233 | rgvmodectl |= MEMMODE_SWMODE_EN; | 2252 | rgvmodectl |= MEMMODE_SWMODE_EN; |
2234 | I915_WRITE(MEMMODECTL, rgvmodectl); | 2253 | I915_WRITE(MEMMODECTL, rgvmodectl); |
2235 | 2254 | ||
2236 | if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10)) | 2255 | if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10)) |
2237 | DRM_ERROR("stuck trying to change perf mode\n"); | 2256 | DRM_ERROR("stuck trying to change perf mode\n"); |
2238 | msleep(1); | 2257 | mdelay(1); |
2239 | 2258 | ||
2240 | ironlake_set_drps(dev, fstart); | 2259 | ironlake_set_drps(dev, fstart); |
2241 | 2260 | ||
@@ -2244,12 +2263,18 @@ static void ironlake_enable_drps(struct drm_device *dev) | |||
2244 | dev_priv->last_time1 = jiffies_to_msecs(jiffies); | 2263 | dev_priv->last_time1 = jiffies_to_msecs(jiffies); |
2245 | dev_priv->last_count2 = I915_READ(0x112f4); | 2264 | dev_priv->last_count2 = I915_READ(0x112f4); |
2246 | getrawmonotonic(&dev_priv->last_time2); | 2265 | getrawmonotonic(&dev_priv->last_time2); |
2266 | |||
2267 | spin_unlock_irq(&mchdev_lock); | ||
2247 | } | 2268 | } |
2248 | 2269 | ||
2249 | static void ironlake_disable_drps(struct drm_device *dev) | 2270 | static void ironlake_disable_drps(struct drm_device *dev) |
2250 | { | 2271 | { |
2251 | struct drm_i915_private *dev_priv = dev->dev_private; | 2272 | struct drm_i915_private *dev_priv = dev->dev_private; |
2252 | u16 rgvswctl = I915_READ16(MEMSWCTL); | 2273 | u16 rgvswctl; |
2274 | |||
2275 | spin_lock_irq(&mchdev_lock); | ||
2276 | |||
2277 | rgvswctl = I915_READ16(MEMSWCTL); | ||
2253 | 2278 | ||
2254 | /* Ack interrupts, disable EFC interrupt */ | 2279 | /* Ack interrupts, disable EFC interrupt */ |
2255 | I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN); | 2280 | I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN); |
@@ -2260,30 +2285,51 @@ static void ironlake_disable_drps(struct drm_device *dev) | |||
2260 | 2285 | ||
2261 | /* Go back to the starting frequency */ | 2286 | /* Go back to the starting frequency */ |
2262 | ironlake_set_drps(dev, dev_priv->fstart); | 2287 | ironlake_set_drps(dev, dev_priv->fstart); |
2263 | msleep(1); | 2288 | mdelay(1); |
2264 | rgvswctl |= MEMCTL_CMD_STS; | 2289 | rgvswctl |= MEMCTL_CMD_STS; |
2265 | I915_WRITE(MEMSWCTL, rgvswctl); | 2290 | I915_WRITE(MEMSWCTL, rgvswctl); |
2266 | msleep(1); | 2291 | mdelay(1); |
2267 | 2292 | ||
2293 | spin_unlock_irq(&mchdev_lock); | ||
2268 | } | 2294 | } |
2269 | 2295 | ||
2270 | void gen6_set_rps(struct drm_device *dev, u8 val) | 2296 | /* There's a funny hw issue where the hw returns all 0 when reading from |
2297 | * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value | ||
2298 | * ourselves, instead of doing a rmw cycle (which might result in us clearing | ||
2299 | * all limits and the gpu stuck at whatever frequency it is at atm). | ||
2300 | */ | ||
2301 | static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val) | ||
2271 | { | 2302 | { |
2272 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2273 | u32 limits; | 2303 | u32 limits; |
2274 | 2304 | ||
2275 | limits = 0; | 2305 | limits = 0; |
2276 | if (val >= dev_priv->max_delay) | ||
2277 | val = dev_priv->max_delay; | ||
2278 | else | ||
2279 | limits |= dev_priv->max_delay << 24; | ||
2280 | 2306 | ||
2281 | if (val <= dev_priv->min_delay) | 2307 | if (*val >= dev_priv->rps.max_delay) |
2282 | val = dev_priv->min_delay; | 2308 | *val = dev_priv->rps.max_delay; |
2283 | else | 2309 | limits |= dev_priv->rps.max_delay << 24; |
2284 | limits |= dev_priv->min_delay << 16; | 2310 | |
2311 | /* Only set the down limit when we've reached the lowest level to avoid | ||
2312 | * getting more interrupts, otherwise leave this clear. This prevents a | ||
2313 | * race in the hw when coming out of rc6: There's a tiny window where | ||
2314 | * the hw runs at the minimal clock before selecting the desired | ||
2315 | * frequency, if the down threshold expires in that window we will not | ||
2316 | * receive a down interrupt. */ | ||
2317 | if (*val <= dev_priv->rps.min_delay) { | ||
2318 | *val = dev_priv->rps.min_delay; | ||
2319 | limits |= dev_priv->rps.min_delay << 16; | ||
2320 | } | ||
2321 | |||
2322 | return limits; | ||
2323 | } | ||
2285 | 2324 | ||
2286 | if (val == dev_priv->cur_delay) | 2325 | void gen6_set_rps(struct drm_device *dev, u8 val) |
2326 | { | ||
2327 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2328 | u32 limits = gen6_rps_limits(dev_priv, &val); | ||
2329 | |||
2330 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | ||
2331 | |||
2332 | if (val == dev_priv->rps.cur_delay) | ||
2287 | return; | 2333 | return; |
2288 | 2334 | ||
2289 | I915_WRITE(GEN6_RPNSWREQ, | 2335 | I915_WRITE(GEN6_RPNSWREQ, |
@@ -2296,7 +2342,7 @@ void gen6_set_rps(struct drm_device *dev, u8 val) | |||
2296 | */ | 2342 | */ |
2297 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits); | 2343 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits); |
2298 | 2344 | ||
2299 | dev_priv->cur_delay = val; | 2345 | dev_priv->rps.cur_delay = val; |
2300 | } | 2346 | } |
2301 | 2347 | ||
2302 | static void gen6_disable_rps(struct drm_device *dev) | 2348 | static void gen6_disable_rps(struct drm_device *dev) |
@@ -2312,40 +2358,35 @@ static void gen6_disable_rps(struct drm_device *dev) | |||
2312 | * register (PMIMR) to mask PM interrupts. The only risk is in leaving | 2358 | * register (PMIMR) to mask PM interrupts. The only risk is in leaving |
2313 | * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */ | 2359 | * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */ |
2314 | 2360 | ||
2315 | spin_lock_irq(&dev_priv->rps_lock); | 2361 | spin_lock_irq(&dev_priv->rps.lock); |
2316 | dev_priv->pm_iir = 0; | 2362 | dev_priv->rps.pm_iir = 0; |
2317 | spin_unlock_irq(&dev_priv->rps_lock); | 2363 | spin_unlock_irq(&dev_priv->rps.lock); |
2318 | 2364 | ||
2319 | I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); | 2365 | I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); |
2320 | } | 2366 | } |
2321 | 2367 | ||
2322 | int intel_enable_rc6(const struct drm_device *dev) | 2368 | int intel_enable_rc6(const struct drm_device *dev) |
2323 | { | 2369 | { |
2324 | /* | 2370 | /* Respect the kernel parameter if it is set */ |
2325 | * Respect the kernel parameter if it is set | ||
2326 | */ | ||
2327 | if (i915_enable_rc6 >= 0) | 2371 | if (i915_enable_rc6 >= 0) |
2328 | return i915_enable_rc6; | 2372 | return i915_enable_rc6; |
2329 | 2373 | ||
2330 | /* | 2374 | if (INTEL_INFO(dev)->gen == 5) { |
2331 | * Disable RC6 on Ironlake | 2375 | DRM_DEBUG_DRIVER("Ironlake: only RC6 available\n"); |
2332 | */ | 2376 | return INTEL_RC6_ENABLE; |
2333 | if (INTEL_INFO(dev)->gen == 5) | 2377 | } |
2334 | return 0; | ||
2335 | 2378 | ||
2336 | /* On Haswell, only RC6 is available. So let's enable it by default to | 2379 | if (IS_HASWELL(dev)) { |
2337 | * provide better testing and coverage since the beginning. | 2380 | DRM_DEBUG_DRIVER("Haswell: only RC6 available\n"); |
2338 | */ | ||
2339 | if (IS_HASWELL(dev)) | ||
2340 | return INTEL_RC6_ENABLE; | 2381 | return INTEL_RC6_ENABLE; |
2382 | } | ||
2341 | 2383 | ||
2342 | /* | 2384 | /* snb/ivb have more than one rc6 state. */ |
2343 | * Disable rc6 on Sandybridge | ||
2344 | */ | ||
2345 | if (INTEL_INFO(dev)->gen == 6) { | 2385 | if (INTEL_INFO(dev)->gen == 6) { |
2346 | DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n"); | 2386 | DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n"); |
2347 | return INTEL_RC6_ENABLE; | 2387 | return INTEL_RC6_ENABLE; |
2348 | } | 2388 | } |
2389 | |||
2349 | DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n"); | 2390 | DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n"); |
2350 | return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); | 2391 | return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); |
2351 | } | 2392 | } |
@@ -2383,9 +2424,9 @@ static void gen6_enable_rps(struct drm_device *dev) | |||
2383 | gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); | 2424 | gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); |
2384 | 2425 | ||
2385 | /* In units of 100MHz */ | 2426 | /* In units of 100MHz */ |
2386 | dev_priv->max_delay = rp_state_cap & 0xff; | 2427 | dev_priv->rps.max_delay = rp_state_cap & 0xff; |
2387 | dev_priv->min_delay = (rp_state_cap & 0xff0000) >> 16; | 2428 | dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16; |
2388 | dev_priv->cur_delay = 0; | 2429 | dev_priv->rps.cur_delay = 0; |
2389 | 2430 | ||
2390 | /* disable the counters and set deterministic thresholds */ | 2431 | /* disable the counters and set deterministic thresholds */ |
2391 | I915_WRITE(GEN6_RC_CONTROL, 0); | 2432 | I915_WRITE(GEN6_RC_CONTROL, 0); |
@@ -2438,8 +2479,8 @@ static void gen6_enable_rps(struct drm_device *dev) | |||
2438 | 2479 | ||
2439 | I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); | 2480 | I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); |
2440 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, | 2481 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, |
2441 | dev_priv->max_delay << 24 | | 2482 | dev_priv->rps.max_delay << 24 | |
2442 | dev_priv->min_delay << 16); | 2483 | dev_priv->rps.min_delay << 16); |
2443 | 2484 | ||
2444 | I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400); | 2485 | I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400); |
2445 | I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000); | 2486 | I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000); |
@@ -2477,7 +2518,7 @@ static void gen6_enable_rps(struct drm_device *dev) | |||
2477 | 500)) | 2518 | 500)) |
2478 | DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); | 2519 | DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); |
2479 | if (pcu_mbox & (1<<31)) { /* OC supported */ | 2520 | if (pcu_mbox & (1<<31)) { /* OC supported */ |
2480 | dev_priv->max_delay = pcu_mbox & 0xff; | 2521 | dev_priv->rps.max_delay = pcu_mbox & 0xff; |
2481 | DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50); | 2522 | DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50); |
2482 | } | 2523 | } |
2483 | 2524 | ||
@@ -2485,10 +2526,10 @@ static void gen6_enable_rps(struct drm_device *dev) | |||
2485 | 2526 | ||
2486 | /* requires MSI enabled */ | 2527 | /* requires MSI enabled */ |
2487 | I915_WRITE(GEN6_PMIER, GEN6_PM_DEFERRED_EVENTS); | 2528 | I915_WRITE(GEN6_PMIER, GEN6_PM_DEFERRED_EVENTS); |
2488 | spin_lock_irq(&dev_priv->rps_lock); | 2529 | spin_lock_irq(&dev_priv->rps.lock); |
2489 | WARN_ON(dev_priv->pm_iir != 0); | 2530 | WARN_ON(dev_priv->rps.pm_iir != 0); |
2490 | I915_WRITE(GEN6_PMIMR, 0); | 2531 | I915_WRITE(GEN6_PMIMR, 0); |
2491 | spin_unlock_irq(&dev_priv->rps_lock); | 2532 | spin_unlock_irq(&dev_priv->rps.lock); |
2492 | /* enable all PM interrupts */ | 2533 | /* enable all PM interrupts */ |
2493 | I915_WRITE(GEN6_PMINTRMSK, 0); | 2534 | I915_WRITE(GEN6_PMINTRMSK, 0); |
2494 | 2535 | ||
@@ -2520,9 +2561,9 @@ static void gen6_update_ring_freq(struct drm_device *dev) | |||
2520 | * to use for memory access. We do this by specifying the IA frequency | 2561 | * to use for memory access. We do this by specifying the IA frequency |
2521 | * the PCU should use as a reference to determine the ring frequency. | 2562 | * the PCU should use as a reference to determine the ring frequency. |
2522 | */ | 2563 | */ |
2523 | for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay; | 2564 | for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay; |
2524 | gpu_freq--) { | 2565 | gpu_freq--) { |
2525 | int diff = dev_priv->max_delay - gpu_freq; | 2566 | int diff = dev_priv->rps.max_delay - gpu_freq; |
2526 | 2567 | ||
2527 | /* | 2568 | /* |
2528 | * For GPU frequencies less than 750MHz, just use the lowest | 2569 | * For GPU frequencies less than 750MHz, just use the lowest |
@@ -2693,6 +2734,8 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) | |||
2693 | unsigned long now = jiffies_to_msecs(jiffies), diff1; | 2734 | unsigned long now = jiffies_to_msecs(jiffies), diff1; |
2694 | int i; | 2735 | int i; |
2695 | 2736 | ||
2737 | assert_spin_locked(&mchdev_lock); | ||
2738 | |||
2696 | diff1 = now - dev_priv->last_time1; | 2739 | diff1 = now - dev_priv->last_time1; |
2697 | 2740 | ||
2698 | /* Prevent division-by-zero if we are asking too fast. | 2741 | /* Prevent division-by-zero if we are asking too fast. |
@@ -2894,15 +2937,14 @@ static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) | |||
2894 | return v_table[pxvid].vd; | 2937 | return v_table[pxvid].vd; |
2895 | } | 2938 | } |
2896 | 2939 | ||
2897 | void i915_update_gfx_val(struct drm_i915_private *dev_priv) | 2940 | static void __i915_update_gfx_val(struct drm_i915_private *dev_priv) |
2898 | { | 2941 | { |
2899 | struct timespec now, diff1; | 2942 | struct timespec now, diff1; |
2900 | u64 diff; | 2943 | u64 diff; |
2901 | unsigned long diffms; | 2944 | unsigned long diffms; |
2902 | u32 count; | 2945 | u32 count; |
2903 | 2946 | ||
2904 | if (dev_priv->info->gen != 5) | 2947 | assert_spin_locked(&mchdev_lock); |
2905 | return; | ||
2906 | 2948 | ||
2907 | getrawmonotonic(&now); | 2949 | getrawmonotonic(&now); |
2908 | diff1 = timespec_sub(now, dev_priv->last_time2); | 2950 | diff1 = timespec_sub(now, dev_priv->last_time2); |
@@ -2930,12 +2972,26 @@ void i915_update_gfx_val(struct drm_i915_private *dev_priv) | |||
2930 | dev_priv->gfx_power = diff; | 2972 | dev_priv->gfx_power = diff; |
2931 | } | 2973 | } |
2932 | 2974 | ||
2975 | void i915_update_gfx_val(struct drm_i915_private *dev_priv) | ||
2976 | { | ||
2977 | if (dev_priv->info->gen != 5) | ||
2978 | return; | ||
2979 | |||
2980 | spin_lock_irq(&mchdev_lock); | ||
2981 | |||
2982 | __i915_update_gfx_val(dev_priv); | ||
2983 | |||
2984 | spin_unlock_irq(&mchdev_lock); | ||
2985 | } | ||
2986 | |||
2933 | unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) | 2987 | unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) |
2934 | { | 2988 | { |
2935 | unsigned long t, corr, state1, corr2, state2; | 2989 | unsigned long t, corr, state1, corr2, state2; |
2936 | u32 pxvid, ext_v; | 2990 | u32 pxvid, ext_v; |
2937 | 2991 | ||
2938 | pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4)); | 2992 | assert_spin_locked(&mchdev_lock); |
2993 | |||
2994 | pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_delay * 4)); | ||
2939 | pxvid = (pxvid >> 24) & 0x7f; | 2995 | pxvid = (pxvid >> 24) & 0x7f; |
2940 | ext_v = pvid_to_extvid(dev_priv, pxvid); | 2996 | ext_v = pvid_to_extvid(dev_priv, pxvid); |
2941 | 2997 | ||
@@ -2960,23 +3016,11 @@ unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) | |||
2960 | state2 = (corr2 * state1) / 10000; | 3016 | state2 = (corr2 * state1) / 10000; |
2961 | state2 /= 100; /* convert to mW */ | 3017 | state2 /= 100; /* convert to mW */ |
2962 | 3018 | ||
2963 | i915_update_gfx_val(dev_priv); | 3019 | __i915_update_gfx_val(dev_priv); |
2964 | 3020 | ||
2965 | return dev_priv->gfx_power + state2; | 3021 | return dev_priv->gfx_power + state2; |
2966 | } | 3022 | } |
2967 | 3023 | ||
2968 | /* Global for IPS driver to get at the current i915 device */ | ||
2969 | static struct drm_i915_private *i915_mch_dev; | ||
2970 | /* | ||
2971 | * Lock protecting IPS related data structures | ||
2972 | * - i915_mch_dev | ||
2973 | * - dev_priv->max_delay | ||
2974 | * - dev_priv->min_delay | ||
2975 | * - dev_priv->fmax | ||
2976 | * - dev_priv->gpu_busy | ||
2977 | */ | ||
2978 | static DEFINE_SPINLOCK(mchdev_lock); | ||
2979 | |||
2980 | /** | 3024 | /** |
2981 | * i915_read_mch_val - return value for IPS use | 3025 | * i915_read_mch_val - return value for IPS use |
2982 | * | 3026 | * |
@@ -2988,7 +3032,7 @@ unsigned long i915_read_mch_val(void) | |||
2988 | struct drm_i915_private *dev_priv; | 3032 | struct drm_i915_private *dev_priv; |
2989 | unsigned long chipset_val, graphics_val, ret = 0; | 3033 | unsigned long chipset_val, graphics_val, ret = 0; |
2990 | 3034 | ||
2991 | spin_lock(&mchdev_lock); | 3035 | spin_lock_irq(&mchdev_lock); |
2992 | if (!i915_mch_dev) | 3036 | if (!i915_mch_dev) |
2993 | goto out_unlock; | 3037 | goto out_unlock; |
2994 | dev_priv = i915_mch_dev; | 3038 | dev_priv = i915_mch_dev; |
@@ -2999,7 +3043,7 @@ unsigned long i915_read_mch_val(void) | |||
2999 | ret = chipset_val + graphics_val; | 3043 | ret = chipset_val + graphics_val; |
3000 | 3044 | ||
3001 | out_unlock: | 3045 | out_unlock: |
3002 | spin_unlock(&mchdev_lock); | 3046 | spin_unlock_irq(&mchdev_lock); |
3003 | 3047 | ||
3004 | return ret; | 3048 | return ret; |
3005 | } | 3049 | } |
@@ -3015,7 +3059,7 @@ bool i915_gpu_raise(void) | |||
3015 | struct drm_i915_private *dev_priv; | 3059 | struct drm_i915_private *dev_priv; |
3016 | bool ret = true; | 3060 | bool ret = true; |
3017 | 3061 | ||
3018 | spin_lock(&mchdev_lock); | 3062 | spin_lock_irq(&mchdev_lock); |
3019 | if (!i915_mch_dev) { | 3063 | if (!i915_mch_dev) { |
3020 | ret = false; | 3064 | ret = false; |
3021 | goto out_unlock; | 3065 | goto out_unlock; |
@@ -3026,7 +3070,7 @@ bool i915_gpu_raise(void) | |||
3026 | dev_priv->max_delay--; | 3070 | dev_priv->max_delay--; |
3027 | 3071 | ||
3028 | out_unlock: | 3072 | out_unlock: |
3029 | spin_unlock(&mchdev_lock); | 3073 | spin_unlock_irq(&mchdev_lock); |
3030 | 3074 | ||
3031 | return ret; | 3075 | return ret; |
3032 | } | 3076 | } |
@@ -3043,7 +3087,7 @@ bool i915_gpu_lower(void) | |||
3043 | struct drm_i915_private *dev_priv; | 3087 | struct drm_i915_private *dev_priv; |
3044 | bool ret = true; | 3088 | bool ret = true; |
3045 | 3089 | ||
3046 | spin_lock(&mchdev_lock); | 3090 | spin_lock_irq(&mchdev_lock); |
3047 | if (!i915_mch_dev) { | 3091 | if (!i915_mch_dev) { |
3048 | ret = false; | 3092 | ret = false; |
3049 | goto out_unlock; | 3093 | goto out_unlock; |
@@ -3054,7 +3098,7 @@ bool i915_gpu_lower(void) | |||
3054 | dev_priv->max_delay++; | 3098 | dev_priv->max_delay++; |
3055 | 3099 | ||
3056 | out_unlock: | 3100 | out_unlock: |
3057 | spin_unlock(&mchdev_lock); | 3101 | spin_unlock_irq(&mchdev_lock); |
3058 | 3102 | ||
3059 | return ret; | 3103 | return ret; |
3060 | } | 3104 | } |
@@ -3068,17 +3112,20 @@ EXPORT_SYMBOL_GPL(i915_gpu_lower); | |||
3068 | bool i915_gpu_busy(void) | 3112 | bool i915_gpu_busy(void) |
3069 | { | 3113 | { |
3070 | struct drm_i915_private *dev_priv; | 3114 | struct drm_i915_private *dev_priv; |
3115 | struct intel_ring_buffer *ring; | ||
3071 | bool ret = false; | 3116 | bool ret = false; |
3117 | int i; | ||
3072 | 3118 | ||
3073 | spin_lock(&mchdev_lock); | 3119 | spin_lock_irq(&mchdev_lock); |
3074 | if (!i915_mch_dev) | 3120 | if (!i915_mch_dev) |
3075 | goto out_unlock; | 3121 | goto out_unlock; |
3076 | dev_priv = i915_mch_dev; | 3122 | dev_priv = i915_mch_dev; |
3077 | 3123 | ||
3078 | ret = dev_priv->busy; | 3124 | for_each_ring(ring, dev_priv, i) |
3125 | ret |= !list_empty(&ring->request_list); | ||
3079 | 3126 | ||
3080 | out_unlock: | 3127 | out_unlock: |
3081 | spin_unlock(&mchdev_lock); | 3128 | spin_unlock_irq(&mchdev_lock); |
3082 | 3129 | ||
3083 | return ret; | 3130 | return ret; |
3084 | } | 3131 | } |
@@ -3095,7 +3142,7 @@ bool i915_gpu_turbo_disable(void) | |||
3095 | struct drm_i915_private *dev_priv; | 3142 | struct drm_i915_private *dev_priv; |
3096 | bool ret = true; | 3143 | bool ret = true; |
3097 | 3144 | ||
3098 | spin_lock(&mchdev_lock); | 3145 | spin_lock_irq(&mchdev_lock); |
3099 | if (!i915_mch_dev) { | 3146 | if (!i915_mch_dev) { |
3100 | ret = false; | 3147 | ret = false; |
3101 | goto out_unlock; | 3148 | goto out_unlock; |
@@ -3108,7 +3155,7 @@ bool i915_gpu_turbo_disable(void) | |||
3108 | ret = false; | 3155 | ret = false; |
3109 | 3156 | ||
3110 | out_unlock: | 3157 | out_unlock: |
3111 | spin_unlock(&mchdev_lock); | 3158 | spin_unlock_irq(&mchdev_lock); |
3112 | 3159 | ||
3113 | return ret; | 3160 | return ret; |
3114 | } | 3161 | } |
@@ -3136,19 +3183,20 @@ ips_ping_for_i915_load(void) | |||
3136 | 3183 | ||
3137 | void intel_gpu_ips_init(struct drm_i915_private *dev_priv) | 3184 | void intel_gpu_ips_init(struct drm_i915_private *dev_priv) |
3138 | { | 3185 | { |
3139 | spin_lock(&mchdev_lock); | 3186 | /* We only register the i915 ips part with intel-ips once everything is |
3187 | * set up, to avoid intel-ips sneaking in and reading bogus values. */ | ||
3188 | spin_lock_irq(&mchdev_lock); | ||
3140 | i915_mch_dev = dev_priv; | 3189 | i915_mch_dev = dev_priv; |
3141 | dev_priv->mchdev_lock = &mchdev_lock; | 3190 | spin_unlock_irq(&mchdev_lock); |
3142 | spin_unlock(&mchdev_lock); | ||
3143 | 3191 | ||
3144 | ips_ping_for_i915_load(); | 3192 | ips_ping_for_i915_load(); |
3145 | } | 3193 | } |
3146 | 3194 | ||
3147 | void intel_gpu_ips_teardown(void) | 3195 | void intel_gpu_ips_teardown(void) |
3148 | { | 3196 | { |
3149 | spin_lock(&mchdev_lock); | 3197 | spin_lock_irq(&mchdev_lock); |
3150 | i915_mch_dev = NULL; | 3198 | i915_mch_dev = NULL; |
3151 | spin_unlock(&mchdev_lock); | 3199 | spin_unlock_irq(&mchdev_lock); |
3152 | } | 3200 | } |
3153 | static void intel_init_emon(struct drm_device *dev) | 3201 | static void intel_init_emon(struct drm_device *dev) |
3154 | { | 3202 | { |
@@ -3728,42 +3776,6 @@ void intel_init_clock_gating(struct drm_device *dev) | |||
3728 | dev_priv->display.init_pch_clock_gating(dev); | 3776 | dev_priv->display.init_pch_clock_gating(dev); |
3729 | } | 3777 | } |
3730 | 3778 | ||
3731 | static void gen6_sanitize_pm(struct drm_device *dev) | ||
3732 | { | ||
3733 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3734 | u32 limits, delay, old; | ||
3735 | |||
3736 | gen6_gt_force_wake_get(dev_priv); | ||
3737 | |||
3738 | old = limits = I915_READ(GEN6_RP_INTERRUPT_LIMITS); | ||
3739 | /* Make sure we continue to get interrupts | ||
3740 | * until we hit the minimum or maximum frequencies. | ||
3741 | */ | ||
3742 | limits &= ~(0x3f << 16 | 0x3f << 24); | ||
3743 | delay = dev_priv->cur_delay; | ||
3744 | if (delay < dev_priv->max_delay) | ||
3745 | limits |= (dev_priv->max_delay & 0x3f) << 24; | ||
3746 | if (delay > dev_priv->min_delay) | ||
3747 | limits |= (dev_priv->min_delay & 0x3f) << 16; | ||
3748 | |||
3749 | if (old != limits) { | ||
3750 | /* Note that the known failure case is to read back 0. */ | ||
3751 | DRM_DEBUG_DRIVER("Power management discrepancy: GEN6_RP_INTERRUPT_LIMITS " | ||
3752 | "expected %08x, was %08x\n", limits, old); | ||
3753 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits); | ||
3754 | } | ||
3755 | |||
3756 | gen6_gt_force_wake_put(dev_priv); | ||
3757 | } | ||
3758 | |||
3759 | void intel_sanitize_pm(struct drm_device *dev) | ||
3760 | { | ||
3761 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3762 | |||
3763 | if (dev_priv->display.sanitize_pm) | ||
3764 | dev_priv->display.sanitize_pm(dev); | ||
3765 | } | ||
3766 | |||
3767 | /* Starting with Haswell, we have different power wells for | 3779 | /* Starting with Haswell, we have different power wells for |
3768 | * different parts of the GPU. This attempts to enable them all. | 3780 | * different parts of the GPU. This attempts to enable them all. |
3769 | */ | 3781 | */ |
@@ -3849,7 +3861,6 @@ void intel_init_pm(struct drm_device *dev) | |||
3849 | dev_priv->display.update_wm = NULL; | 3861 | dev_priv->display.update_wm = NULL; |
3850 | } | 3862 | } |
3851 | dev_priv->display.init_clock_gating = gen6_init_clock_gating; | 3863 | dev_priv->display.init_clock_gating = gen6_init_clock_gating; |
3852 | dev_priv->display.sanitize_pm = gen6_sanitize_pm; | ||
3853 | } else if (IS_IVYBRIDGE(dev)) { | 3864 | } else if (IS_IVYBRIDGE(dev)) { |
3854 | /* FIXME: detect B0+ stepping and use auto training */ | 3865 | /* FIXME: detect B0+ stepping and use auto training */ |
3855 | if (SNB_READ_WM0_LATENCY()) { | 3866 | if (SNB_READ_WM0_LATENCY()) { |
@@ -3861,7 +3872,6 @@ void intel_init_pm(struct drm_device *dev) | |||
3861 | dev_priv->display.update_wm = NULL; | 3872 | dev_priv->display.update_wm = NULL; |
3862 | } | 3873 | } |
3863 | dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; | 3874 | dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; |
3864 | dev_priv->display.sanitize_pm = gen6_sanitize_pm; | ||
3865 | } else if (IS_HASWELL(dev)) { | 3875 | } else if (IS_HASWELL(dev)) { |
3866 | if (SNB_READ_WM0_LATENCY()) { | 3876 | if (SNB_READ_WM0_LATENCY()) { |
3867 | dev_priv->display.update_wm = sandybridge_update_wm; | 3877 | dev_priv->display.update_wm = sandybridge_update_wm; |
@@ -3873,7 +3883,6 @@ void intel_init_pm(struct drm_device *dev) | |||
3873 | dev_priv->display.update_wm = NULL; | 3883 | dev_priv->display.update_wm = NULL; |
3874 | } | 3884 | } |
3875 | dev_priv->display.init_clock_gating = haswell_init_clock_gating; | 3885 | dev_priv->display.init_clock_gating = haswell_init_clock_gating; |
3876 | dev_priv->display.sanitize_pm = gen6_sanitize_pm; | ||
3877 | } else | 3886 | } else |
3878 | dev_priv->display.update_wm = NULL; | 3887 | dev_priv->display.update_wm = NULL; |
3879 | } else if (IS_VALLEYVIEW(dev)) { | 3888 | } else if (IS_VALLEYVIEW(dev)) { |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index e2a73b38abe9..c828169c73ae 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -218,11 +218,6 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring, | |||
218 | u32 scratch_addr = pc->gtt_offset + 128; | 218 | u32 scratch_addr = pc->gtt_offset + 128; |
219 | int ret; | 219 | int ret; |
220 | 220 | ||
221 | /* Force SNB workarounds for PIPE_CONTROL flushes */ | ||
222 | ret = intel_emit_post_sync_nonzero_flush(ring); | ||
223 | if (ret) | ||
224 | return ret; | ||
225 | |||
226 | /* Just flush everything. Experiments have shown that reducing the | 221 | /* Just flush everything. Experiments have shown that reducing the |
227 | * number of bits based on the write domains has little performance | 222 | * number of bits based on the write domains has little performance |
228 | * impact. | 223 | * impact. |
@@ -262,6 +257,20 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring, | |||
262 | return 0; | 257 | return 0; |
263 | } | 258 | } |
264 | 259 | ||
260 | static int | ||
261 | gen6_render_ring_flush__wa(struct intel_ring_buffer *ring, | ||
262 | u32 invalidate_domains, u32 flush_domains) | ||
263 | { | ||
264 | int ret; | ||
265 | |||
266 | /* Force SNB workarounds for PIPE_CONTROL flushes */ | ||
267 | ret = intel_emit_post_sync_nonzero_flush(ring); | ||
268 | if (ret) | ||
269 | return ret; | ||
270 | |||
271 | return gen6_render_ring_flush(ring, invalidate_domains, flush_domains); | ||
272 | } | ||
273 | |||
265 | static void ring_write_tail(struct intel_ring_buffer *ring, | 274 | static void ring_write_tail(struct intel_ring_buffer *ring, |
266 | u32 value) | 275 | u32 value) |
267 | { | 276 | { |
@@ -462,7 +471,7 @@ static int init_render_ring(struct intel_ring_buffer *ring) | |||
462 | if (INTEL_INFO(dev)->gen >= 6) | 471 | if (INTEL_INFO(dev)->gen >= 6) |
463 | I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); | 472 | I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); |
464 | 473 | ||
465 | if (IS_IVYBRIDGE(dev)) | 474 | if (HAS_L3_GPU_CACHE(dev)) |
466 | I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR); | 475 | I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR); |
467 | 476 | ||
468 | return ret; | 477 | return ret; |
@@ -628,26 +637,24 @@ pc_render_add_request(struct intel_ring_buffer *ring, | |||
628 | } | 637 | } |
629 | 638 | ||
630 | static u32 | 639 | static u32 |
631 | gen6_ring_get_seqno(struct intel_ring_buffer *ring) | 640 | gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) |
632 | { | 641 | { |
633 | struct drm_device *dev = ring->dev; | ||
634 | |||
635 | /* Workaround to force correct ordering between irq and seqno writes on | 642 | /* Workaround to force correct ordering between irq and seqno writes on |
636 | * ivb (and maybe also on snb) by reading from a CS register (like | 643 | * ivb (and maybe also on snb) by reading from a CS register (like |
637 | * ACTHD) before reading the status page. */ | 644 | * ACTHD) before reading the status page. */ |
638 | if (IS_GEN6(dev) || IS_GEN7(dev)) | 645 | if (!lazy_coherency) |
639 | intel_ring_get_active_head(ring); | 646 | intel_ring_get_active_head(ring); |
640 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); | 647 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); |
641 | } | 648 | } |
642 | 649 | ||
643 | static u32 | 650 | static u32 |
644 | ring_get_seqno(struct intel_ring_buffer *ring) | 651 | ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) |
645 | { | 652 | { |
646 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); | 653 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); |
647 | } | 654 | } |
648 | 655 | ||
649 | static u32 | 656 | static u32 |
650 | pc_render_get_seqno(struct intel_ring_buffer *ring) | 657 | pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) |
651 | { | 658 | { |
652 | struct pipe_control *pc = ring->private; | 659 | struct pipe_control *pc = ring->private; |
653 | return pc->cpu_page[0]; | 660 | return pc->cpu_page[0]; |
@@ -852,7 +859,7 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring) | |||
852 | 859 | ||
853 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | 860 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
854 | if (ring->irq_refcount++ == 0) { | 861 | if (ring->irq_refcount++ == 0) { |
855 | if (IS_IVYBRIDGE(dev) && ring->id == RCS) | 862 | if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) |
856 | I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | | 863 | I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | |
857 | GEN6_RENDER_L3_PARITY_ERROR)); | 864 | GEN6_RENDER_L3_PARITY_ERROR)); |
858 | else | 865 | else |
@@ -875,7 +882,7 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring) | |||
875 | 882 | ||
876 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | 883 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
877 | if (--ring->irq_refcount == 0) { | 884 | if (--ring->irq_refcount == 0) { |
878 | if (IS_IVYBRIDGE(dev) && ring->id == RCS) | 885 | if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) |
879 | I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR); | 886 | I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR); |
880 | else | 887 | else |
881 | I915_WRITE_IMR(ring, ~0); | 888 | I915_WRITE_IMR(ring, ~0); |
@@ -1010,7 +1017,6 @@ static int intel_init_ring_buffer(struct drm_device *dev, | |||
1010 | ring->dev = dev; | 1017 | ring->dev = dev; |
1011 | INIT_LIST_HEAD(&ring->active_list); | 1018 | INIT_LIST_HEAD(&ring->active_list); |
1012 | INIT_LIST_HEAD(&ring->request_list); | 1019 | INIT_LIST_HEAD(&ring->request_list); |
1013 | INIT_LIST_HEAD(&ring->gpu_write_list); | ||
1014 | ring->size = 32 * PAGE_SIZE; | 1020 | ring->size = 32 * PAGE_SIZE; |
1015 | 1021 | ||
1016 | init_waitqueue_head(&ring->irq_queue); | 1022 | init_waitqueue_head(&ring->irq_queue); |
@@ -1380,6 +1386,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev) | |||
1380 | if (INTEL_INFO(dev)->gen >= 6) { | 1386 | if (INTEL_INFO(dev)->gen >= 6) { |
1381 | ring->add_request = gen6_add_request; | 1387 | ring->add_request = gen6_add_request; |
1382 | ring->flush = gen6_render_ring_flush; | 1388 | ring->flush = gen6_render_ring_flush; |
1389 | if (INTEL_INFO(dev)->gen == 6) | ||
1390 | ring->flush = gen6_render_ring_flush__wa; | ||
1383 | ring->irq_get = gen6_ring_get_irq; | 1391 | ring->irq_get = gen6_ring_get_irq; |
1384 | ring->irq_put = gen6_ring_put_irq; | 1392 | ring->irq_put = gen6_ring_put_irq; |
1385 | ring->irq_enable_mask = GT_USER_INTERRUPT; | 1393 | ring->irq_enable_mask = GT_USER_INTERRUPT; |
@@ -1481,7 +1489,6 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) | |||
1481 | ring->dev = dev; | 1489 | ring->dev = dev; |
1482 | INIT_LIST_HEAD(&ring->active_list); | 1490 | INIT_LIST_HEAD(&ring->active_list); |
1483 | INIT_LIST_HEAD(&ring->request_list); | 1491 | INIT_LIST_HEAD(&ring->request_list); |
1484 | INIT_LIST_HEAD(&ring->gpu_write_list); | ||
1485 | 1492 | ||
1486 | ring->size = size; | 1493 | ring->size = size; |
1487 | ring->effective_size = ring->size; | 1494 | ring->effective_size = ring->size; |
@@ -1574,3 +1581,41 @@ int intel_init_blt_ring_buffer(struct drm_device *dev) | |||
1574 | 1581 | ||
1575 | return intel_init_ring_buffer(dev, ring); | 1582 | return intel_init_ring_buffer(dev, ring); |
1576 | } | 1583 | } |
1584 | |||
1585 | int | ||
1586 | intel_ring_flush_all_caches(struct intel_ring_buffer *ring) | ||
1587 | { | ||
1588 | int ret; | ||
1589 | |||
1590 | if (!ring->gpu_caches_dirty) | ||
1591 | return 0; | ||
1592 | |||
1593 | ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS); | ||
1594 | if (ret) | ||
1595 | return ret; | ||
1596 | |||
1597 | trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS); | ||
1598 | |||
1599 | ring->gpu_caches_dirty = false; | ||
1600 | return 0; | ||
1601 | } | ||
1602 | |||
1603 | int | ||
1604 | intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring) | ||
1605 | { | ||
1606 | uint32_t flush_domains; | ||
1607 | int ret; | ||
1608 | |||
1609 | flush_domains = 0; | ||
1610 | if (ring->gpu_caches_dirty) | ||
1611 | flush_domains = I915_GEM_GPU_DOMAINS; | ||
1612 | |||
1613 | ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains); | ||
1614 | if (ret) | ||
1615 | return ret; | ||
1616 | |||
1617 | trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains); | ||
1618 | |||
1619 | ring->gpu_caches_dirty = false; | ||
1620 | return 0; | ||
1621 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 1d3c81fdad92..2ea7a311a1f0 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -72,7 +72,14 @@ struct intel_ring_buffer { | |||
72 | u32 flush_domains); | 72 | u32 flush_domains); |
73 | int (*add_request)(struct intel_ring_buffer *ring, | 73 | int (*add_request)(struct intel_ring_buffer *ring, |
74 | u32 *seqno); | 74 | u32 *seqno); |
75 | u32 (*get_seqno)(struct intel_ring_buffer *ring); | 75 | /* Some chipsets are not quite as coherent as advertised and need |
76 | * an expensive kick to force a true read of the up-to-date seqno. | ||
77 | * However, the up-to-date seqno is not always required and the last | ||
78 | * seen value is good enough. Note that the seqno will always be | ||
79 | * monotonic, even if not coherent. | ||
80 | */ | ||
81 | u32 (*get_seqno)(struct intel_ring_buffer *ring, | ||
82 | bool lazy_coherency); | ||
76 | int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, | 83 | int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, |
77 | u32 offset, u32 length); | 84 | u32 offset, u32 length); |
78 | void (*cleanup)(struct intel_ring_buffer *ring); | 85 | void (*cleanup)(struct intel_ring_buffer *ring); |
@@ -101,15 +108,6 @@ struct intel_ring_buffer { | |||
101 | struct list_head request_list; | 108 | struct list_head request_list; |
102 | 109 | ||
103 | /** | 110 | /** |
104 | * List of objects currently pending a GPU write flush. | ||
105 | * | ||
106 | * All elements on this list will belong to either the | ||
107 | * active_list or flushing_list, last_rendering_seqno can | ||
108 | * be used to differentiate between the two elements. | ||
109 | */ | ||
110 | struct list_head gpu_write_list; | ||
111 | |||
112 | /** | ||
113 | * Do we have some not yet emitted requests outstanding? | 111 | * Do we have some not yet emitted requests outstanding? |
114 | */ | 112 | */ |
115 | u32 outstanding_lazy_request; | 113 | u32 outstanding_lazy_request; |
@@ -204,6 +202,8 @@ static inline void intel_ring_emit(struct intel_ring_buffer *ring, | |||
204 | void intel_ring_advance(struct intel_ring_buffer *ring); | 202 | void intel_ring_advance(struct intel_ring_buffer *ring); |
205 | 203 | ||
206 | u32 intel_ring_get_seqno(struct intel_ring_buffer *ring); | 204 | u32 intel_ring_get_seqno(struct intel_ring_buffer *ring); |
205 | int intel_ring_flush_all_caches(struct intel_ring_buffer *ring); | ||
206 | int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring); | ||
207 | 207 | ||
208 | int intel_init_render_ring_buffer(struct drm_device *dev); | 208 | int intel_init_render_ring_buffer(struct drm_device *dev); |
209 | int intel_init_bsd_ring_buffer(struct drm_device *dev); | 209 | int intel_init_bsd_ring_buffer(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index d81bb0bf2885..434b1d1d3c84 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -1345,7 +1345,6 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector) | |||
1345 | } | 1345 | } |
1346 | } else | 1346 | } else |
1347 | status = connector_status_disconnected; | 1347 | status = connector_status_disconnected; |
1348 | connector->display_info.raw_edid = NULL; | ||
1349 | kfree(edid); | 1348 | kfree(edid); |
1350 | } | 1349 | } |
1351 | 1350 | ||
@@ -1419,7 +1418,6 @@ intel_sdvo_detect(struct drm_connector *connector, bool force) | |||
1419 | else | 1418 | else |
1420 | ret = connector_status_disconnected; | 1419 | ret = connector_status_disconnected; |
1421 | 1420 | ||
1422 | connector->display_info.raw_edid = NULL; | ||
1423 | kfree(edid); | 1421 | kfree(edid); |
1424 | } else | 1422 | } else |
1425 | ret = connector_status_connected; | 1423 | ret = connector_status_connected; |
@@ -1465,7 +1463,6 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) | |||
1465 | drm_add_edid_modes(connector, edid); | 1463 | drm_add_edid_modes(connector, edid); |
1466 | } | 1464 | } |
1467 | 1465 | ||
1468 | connector->display_info.raw_edid = NULL; | ||
1469 | kfree(edid); | 1466 | kfree(edid); |
1470 | } | 1467 | } |
1471 | } | 1468 | } |
@@ -2082,8 +2079,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) | |||
2082 | connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; | 2079 | connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; |
2083 | intel_sdvo->is_hdmi = true; | 2080 | intel_sdvo->is_hdmi = true; |
2084 | } | 2081 | } |
2085 | intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | | 2082 | intel_sdvo->base.cloneable = true; |
2086 | (1 << INTEL_ANALOG_CLONE_BIT)); | ||
2087 | 2083 | ||
2088 | intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); | 2084 | intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); |
2089 | if (intel_sdvo->is_hdmi) | 2085 | if (intel_sdvo->is_hdmi) |
@@ -2114,7 +2110,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type) | |||
2114 | 2110 | ||
2115 | intel_sdvo->is_tv = true; | 2111 | intel_sdvo->is_tv = true; |
2116 | intel_sdvo->base.needs_tv_clock = true; | 2112 | intel_sdvo->base.needs_tv_clock = true; |
2117 | intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; | 2113 | intel_sdvo->base.cloneable = false; |
2118 | 2114 | ||
2119 | intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); | 2115 | intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); |
2120 | 2116 | ||
@@ -2157,8 +2153,7 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device) | |||
2157 | intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1; | 2153 | intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1; |
2158 | } | 2154 | } |
2159 | 2155 | ||
2160 | intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | | 2156 | intel_sdvo->base.cloneable = true; |
2161 | (1 << INTEL_ANALOG_CLONE_BIT)); | ||
2162 | 2157 | ||
2163 | intel_sdvo_connector_init(intel_sdvo_connector, | 2158 | intel_sdvo_connector_init(intel_sdvo_connector, |
2164 | intel_sdvo); | 2159 | intel_sdvo); |
@@ -2190,8 +2185,10 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) | |||
2190 | intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1; | 2185 | intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1; |
2191 | } | 2186 | } |
2192 | 2187 | ||
2193 | intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) | | 2188 | /* SDVO LVDS is cloneable because the SDVO encoder does the upscaling, |
2194 | (1 << INTEL_SDVO_LVDS_CLONE_BIT)); | 2189 | * as opposed to native LVDS, where we upscale with the panel-fitter |
2190 | * (and hence only the native LVDS resolution could be cloned). */ | ||
2191 | intel_sdvo->base.cloneable = true; | ||
2195 | 2192 | ||
2196 | intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); | 2193 | intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); |
2197 | if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) | 2194 | if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) |
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index befce6c49704..1a0bab07699e 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c | |||
@@ -1622,7 +1622,7 @@ intel_tv_init(struct drm_device *dev) | |||
1622 | intel_connector_attach_encoder(intel_connector, intel_encoder); | 1622 | intel_connector_attach_encoder(intel_connector, intel_encoder); |
1623 | intel_encoder->type = INTEL_OUTPUT_TVOUT; | 1623 | intel_encoder->type = INTEL_OUTPUT_TVOUT; |
1624 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); | 1624 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); |
1625 | intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT); | 1625 | intel_encoder->cloneable = false; |
1626 | intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1)); | 1626 | intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1)); |
1627 | intel_encoder->base.possible_clones = (1 << INTEL_OUTPUT_TVOUT); | 1627 | intel_encoder->base.possible_clones = (1 << INTEL_OUTPUT_TVOUT); |
1628 | intel_tv->type = DRM_MODE_CONNECTOR_Unknown; | 1628 | intel_tv->type = DRM_MODE_CONNECTOR_Unknown; |