diff options
author | Dave Airlie <airlied@redhat.com> | 2012-09-02 22:05:01 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2012-09-02 22:05:01 -0400 |
commit | 65983bd605a12edd16a4f39f513aad65f1cad062 (patch) | |
tree | bf8c84598565c26e251efb9cb591977446e7e76b | |
parent | 93bb70e0c00f1be4cc857e4d8375c44058cce71e (diff) | |
parent | d7c3b937bdf45f0b844400b7bf6fd3ed50bac604 (diff) |
Merge branch 'for-airlied' of git://people.freedesktop.org/~danvet/drm-intel into drm-next
Daniel writes:
"New stuff for -next. Highlights:
- prep patches for the modeset rework. Note that one of those patches
touches the fb helper in the common drm code.
- hasw hdmi audio support (Wang Xingchao)
- improved instdone dumping for gen7 (Ben)
- unbound tracking and a few follow-up patches from Chris
- dma_buf->begin/end_cpu_access plus fix for drm/udl (Dave)
- improve mmio error reporting for hsw
- prep patch for WQ_NON_REENTRANT removal (Tejun Heo)
"
* 'for-airlied' of git://people.freedesktop.org/~danvet/drm-intel: (41 commits)
drm/i915: Remove __GFP_NO_KSWAPD
drm/i915: disable rc6 on ilk when vt-d is enabled
drm/i915: Avoid unbinding due to an interrupted pin_and_fence during execbuffer
drm/i915: Use new INSTDONE registers (Gen7+)
drm/i915: Add new INSTDONE registers
drm/i915: Extract reading INSTDONE
drm/i915: Use a non-blocking wait for set-to-domain ioctl
drm/i915: Juggle code order to ease flow of the next patch
drm/i915: Use cpu relocations if the object is in the GTT but not mappable
drm/i915: Extract general object init routine
drm/i915: Protect private gem objects from truncate (such as imported dmabuf)
drm/i915: Only pwrite through the GTT if there is space in the aperture
i915: use alloc_ordered_workqueue() instead of explicit UNBOUND w/ max_active = 1
drm/i915: Find unclaimed MMIO writes.
drm/i915: Add ERR_INT to gen7 error state
drm/i915: Cantiga+ cannot handle a hsync front porch of 0
drm/i915: fix reassignment of variable "intel_dp->DP"
drm/i915: Try harder to allocate an mmap_offset
drm/i915: Show pin count in debugfs
drm/i915: Show (count, size) of purgeable objects in i915_gem_objects
...
34 files changed, 1052 insertions, 807 deletions
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 061d26dd0751..eb79515797d9 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
@@ -1230,7 +1230,6 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper) | |||
1230 | struct drm_device *dev = fb_helper->dev; | 1230 | struct drm_device *dev = fb_helper->dev; |
1231 | struct drm_fb_helper_crtc **crtcs; | 1231 | struct drm_fb_helper_crtc **crtcs; |
1232 | struct drm_display_mode **modes; | 1232 | struct drm_display_mode **modes; |
1233 | struct drm_encoder *encoder; | ||
1234 | struct drm_mode_set *modeset; | 1233 | struct drm_mode_set *modeset; |
1235 | bool *enabled; | 1234 | bool *enabled; |
1236 | int width, height; | 1235 | int width, height; |
@@ -1241,11 +1240,6 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper) | |||
1241 | width = dev->mode_config.max_width; | 1240 | width = dev->mode_config.max_width; |
1242 | height = dev->mode_config.max_height; | 1241 | height = dev->mode_config.max_height; |
1243 | 1242 | ||
1244 | /* clean out all the encoder/crtc combos */ | ||
1245 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | ||
1246 | encoder->crtc = NULL; | ||
1247 | } | ||
1248 | |||
1249 | crtcs = kcalloc(dev->mode_config.num_connector, | 1243 | crtcs = kcalloc(dev->mode_config.num_connector, |
1250 | sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL); | 1244 | sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL); |
1251 | modes = kcalloc(dev->mode_config.num_connector, | 1245 | modes = kcalloc(dev->mode_config.num_connector, |
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h index 0c8ac4d92deb..0fa839e439b3 100644 --- a/drivers/gpu/drm/i915/dvo.h +++ b/drivers/gpu/drm/i915/dvo.h | |||
@@ -58,13 +58,12 @@ struct intel_dvo_dev_ops { | |||
58 | void (*create_resources)(struct intel_dvo_device *dvo); | 58 | void (*create_resources)(struct intel_dvo_device *dvo); |
59 | 59 | ||
60 | /* | 60 | /* |
61 | * Turn on/off output or set intermediate power levels if available. | 61 | * Turn on/off output. |
62 | * | 62 | * |
63 | * Unsupported intermediate modes drop to the lower power setting. | 63 | * Because none of our dvo drivers support an intermediate power levels, |
64 | * If the mode is DPMSModeOff, the output must be disabled, | 64 | * we don't expose this in the interfac. |
65 | * as the DPLL may be disabled afterwards. | ||
66 | */ | 65 | */ |
67 | void (*dpms)(struct intel_dvo_device *dvo, int mode); | 66 | void (*dpms)(struct intel_dvo_device *dvo, bool enable); |
68 | 67 | ||
69 | /* | 68 | /* |
70 | * Callback for testing a video mode for a given output. | 69 | * Callback for testing a video mode for a given output. |
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c index 1ca799a1e1fc..71e7650a2994 100644 --- a/drivers/gpu/drm/i915/dvo_ch7017.c +++ b/drivers/gpu/drm/i915/dvo_ch7017.c | |||
@@ -163,7 +163,7 @@ struct ch7017_priv { | |||
163 | }; | 163 | }; |
164 | 164 | ||
165 | static void ch7017_dump_regs(struct intel_dvo_device *dvo); | 165 | static void ch7017_dump_regs(struct intel_dvo_device *dvo); |
166 | static void ch7017_dpms(struct intel_dvo_device *dvo, int mode); | 166 | static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable); |
167 | 167 | ||
168 | static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val) | 168 | static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val) |
169 | { | 169 | { |
@@ -309,7 +309,7 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo, | |||
309 | lvds_power_down = CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED | | 309 | lvds_power_down = CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED | |
310 | (mode->hdisplay & 0x0700) >> 8; | 310 | (mode->hdisplay & 0x0700) >> 8; |
311 | 311 | ||
312 | ch7017_dpms(dvo, DRM_MODE_DPMS_OFF); | 312 | ch7017_dpms(dvo, false); |
313 | ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, | 313 | ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, |
314 | horizontal_active_pixel_input); | 314 | horizontal_active_pixel_input); |
315 | ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT, | 315 | ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT, |
@@ -331,7 +331,7 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo, | |||
331 | } | 331 | } |
332 | 332 | ||
333 | /* set the CH7017 power state */ | 333 | /* set the CH7017 power state */ |
334 | static void ch7017_dpms(struct intel_dvo_device *dvo, int mode) | 334 | static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable) |
335 | { | 335 | { |
336 | uint8_t val; | 336 | uint8_t val; |
337 | 337 | ||
@@ -345,7 +345,7 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, int mode) | |||
345 | CH7017_DAC3_POWER_DOWN | | 345 | CH7017_DAC3_POWER_DOWN | |
346 | CH7017_TV_POWER_DOWN_EN); | 346 | CH7017_TV_POWER_DOWN_EN); |
347 | 347 | ||
348 | if (mode == DRM_MODE_DPMS_ON) { | 348 | if (enable) { |
349 | /* Turn on the LVDS */ | 349 | /* Turn on the LVDS */ |
350 | ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, | 350 | ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, |
351 | val & ~CH7017_LVDS_POWER_DOWN_EN); | 351 | val & ~CH7017_LVDS_POWER_DOWN_EN); |
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c index 4a036600e806..c1dea5b11f91 100644 --- a/drivers/gpu/drm/i915/dvo_ch7xxx.c +++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c | |||
@@ -289,9 +289,9 @@ static void ch7xxx_mode_set(struct intel_dvo_device *dvo, | |||
289 | } | 289 | } |
290 | 290 | ||
291 | /* set the CH7xxx power state */ | 291 | /* set the CH7xxx power state */ |
292 | static void ch7xxx_dpms(struct intel_dvo_device *dvo, int mode) | 292 | static void ch7xxx_dpms(struct intel_dvo_device *dvo, bool enable) |
293 | { | 293 | { |
294 | if (mode == DRM_MODE_DPMS_ON) | 294 | if (enable) |
295 | ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_DVIL | CH7xxx_PM_DVIP); | 295 | ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_DVIL | CH7xxx_PM_DVIP); |
296 | else | 296 | else |
297 | ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_FPD); | 297 | ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_FPD); |
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c index 04f2893d5e3c..fa8ff6b050fa 100644 --- a/drivers/gpu/drm/i915/dvo_ivch.c +++ b/drivers/gpu/drm/i915/dvo_ivch.c | |||
@@ -288,7 +288,7 @@ static enum drm_mode_status ivch_mode_valid(struct intel_dvo_device *dvo, | |||
288 | } | 288 | } |
289 | 289 | ||
290 | /** Sets the power state of the panel connected to the ivch */ | 290 | /** Sets the power state of the panel connected to the ivch */ |
291 | static void ivch_dpms(struct intel_dvo_device *dvo, int mode) | 291 | static void ivch_dpms(struct intel_dvo_device *dvo, bool enable) |
292 | { | 292 | { |
293 | int i; | 293 | int i; |
294 | uint16_t vr01, vr30, backlight; | 294 | uint16_t vr01, vr30, backlight; |
@@ -297,13 +297,13 @@ static void ivch_dpms(struct intel_dvo_device *dvo, int mode) | |||
297 | if (!ivch_read(dvo, VR01, &vr01)) | 297 | if (!ivch_read(dvo, VR01, &vr01)) |
298 | return; | 298 | return; |
299 | 299 | ||
300 | if (mode == DRM_MODE_DPMS_ON) | 300 | if (enable) |
301 | backlight = 1; | 301 | backlight = 1; |
302 | else | 302 | else |
303 | backlight = 0; | 303 | backlight = 0; |
304 | ivch_write(dvo, VR80, backlight); | 304 | ivch_write(dvo, VR80, backlight); |
305 | 305 | ||
306 | if (mode == DRM_MODE_DPMS_ON) | 306 | if (enable) |
307 | vr01 |= VR01_LCD_ENABLE | VR01_DVO_ENABLE; | 307 | vr01 |= VR01_LCD_ENABLE | VR01_DVO_ENABLE; |
308 | else | 308 | else |
309 | vr01 &= ~(VR01_LCD_ENABLE | VR01_DVO_ENABLE); | 309 | vr01 &= ~(VR01_LCD_ENABLE | VR01_DVO_ENABLE); |
@@ -315,7 +315,7 @@ static void ivch_dpms(struct intel_dvo_device *dvo, int mode) | |||
315 | if (!ivch_read(dvo, VR30, &vr30)) | 315 | if (!ivch_read(dvo, VR30, &vr30)) |
316 | break; | 316 | break; |
317 | 317 | ||
318 | if (((vr30 & VR30_PANEL_ON) != 0) == (mode == DRM_MODE_DPMS_ON)) | 318 | if (((vr30 & VR30_PANEL_ON) != 0) == enable) |
319 | break; | 319 | break; |
320 | udelay(1000); | 320 | udelay(1000); |
321 | } | 321 | } |
diff --git a/drivers/gpu/drm/i915/dvo_ns2501.c b/drivers/gpu/drm/i915/dvo_ns2501.c index 1a0bad9a5fab..c4d9f2f395e6 100644 --- a/drivers/gpu/drm/i915/dvo_ns2501.c +++ b/drivers/gpu/drm/i915/dvo_ns2501.c | |||
@@ -75,11 +75,6 @@ struct ns2501_priv { | |||
75 | #define NSPTR(d) ((NS2501Ptr)(d->DriverPrivate.ptr)) | 75 | #define NSPTR(d) ((NS2501Ptr)(d->DriverPrivate.ptr)) |
76 | 76 | ||
77 | /* | 77 | /* |
78 | * Include the PLL launcher prototype | ||
79 | */ | ||
80 | extern void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe); | ||
81 | |||
82 | /* | ||
83 | * For reasons unclear to me, the ns2501 at least on the Fujitsu/Siemens | 78 | * For reasons unclear to me, the ns2501 at least on the Fujitsu/Siemens |
84 | * laptops does not react on the i2c bus unless | 79 | * laptops does not react on the i2c bus unless |
85 | * both the PLL is running and the display is configured in its native | 80 | * both the PLL is running and the display is configured in its native |
@@ -113,8 +108,6 @@ static void enable_dvo(struct intel_dvo_device *dvo) | |||
113 | I915_WRITE(DVOC_SRCDIM, 0x400300); // 1024x768 | 108 | I915_WRITE(DVOC_SRCDIM, 0x400300); // 1024x768 |
114 | I915_WRITE(FW_BLC, 0x1080304); | 109 | I915_WRITE(FW_BLC, 0x1080304); |
115 | 110 | ||
116 | intel_enable_pll(dev_priv, 0); | ||
117 | |||
118 | I915_WRITE(DVOC, 0x90004084); | 111 | I915_WRITE(DVOC, 0x90004084); |
119 | } | 112 | } |
120 | 113 | ||
@@ -500,19 +493,19 @@ static void ns2501_mode_set(struct intel_dvo_device *dvo, | |||
500 | } | 493 | } |
501 | 494 | ||
502 | /* set the NS2501 power state */ | 495 | /* set the NS2501 power state */ |
503 | static void ns2501_dpms(struct intel_dvo_device *dvo, int mode) | 496 | static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable) |
504 | { | 497 | { |
505 | bool ok; | 498 | bool ok; |
506 | bool restore = false; | 499 | bool restore = false; |
507 | struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv); | 500 | struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv); |
508 | unsigned char ch; | 501 | unsigned char ch; |
509 | 502 | ||
510 | DRM_DEBUG_KMS("%s: Trying set the dpms of the DVO to %d\n", | 503 | DRM_DEBUG_KMS("%s: Trying set the dpms of the DVO to %i\n", |
511 | __FUNCTION__, mode); | 504 | __FUNCTION__, enable); |
512 | 505 | ||
513 | ch = ns->reg_8_shadow; | 506 | ch = ns->reg_8_shadow; |
514 | 507 | ||
515 | if (mode == DRM_MODE_DPMS_ON) | 508 | if (enable) |
516 | ch |= NS2501_8_PD; | 509 | ch |= NS2501_8_PD; |
517 | else | 510 | else |
518 | ch &= ~NS2501_8_PD; | 511 | ch &= ~NS2501_8_PD; |
@@ -526,12 +519,10 @@ static void ns2501_dpms(struct intel_dvo_device *dvo, int mode) | |||
526 | ok &= ns2501_writeb(dvo, NS2501_REG8, ch); | 519 | ok &= ns2501_writeb(dvo, NS2501_REG8, ch); |
527 | ok &= | 520 | ok &= |
528 | ns2501_writeb(dvo, 0x34, | 521 | ns2501_writeb(dvo, 0x34, |
529 | (mode == | 522 | enable ? 0x03 : 0x00); |
530 | DRM_MODE_DPMS_ON) ? (0x03) : (0x00)); | ||
531 | ok &= | 523 | ok &= |
532 | ns2501_writeb(dvo, 0x35, | 524 | ns2501_writeb(dvo, 0x35, |
533 | (mode == | 525 | enable ? 0xff : 0x00); |
534 | DRM_MODE_DPMS_ON) ? (0xff) : (0x00)); | ||
535 | if (!ok) { | 526 | if (!ok) { |
536 | if (restore) | 527 | if (restore) |
537 | restore_dvo(dvo); | 528 | restore_dvo(dvo); |
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c index a0b13a6f619d..cc24c1cabecd 100644 --- a/drivers/gpu/drm/i915/dvo_sil164.c +++ b/drivers/gpu/drm/i915/dvo_sil164.c | |||
@@ -208,7 +208,7 @@ static void sil164_mode_set(struct intel_dvo_device *dvo, | |||
208 | } | 208 | } |
209 | 209 | ||
210 | /* set the SIL164 power state */ | 210 | /* set the SIL164 power state */ |
211 | static void sil164_dpms(struct intel_dvo_device *dvo, int mode) | 211 | static void sil164_dpms(struct intel_dvo_device *dvo, bool enable) |
212 | { | 212 | { |
213 | int ret; | 213 | int ret; |
214 | unsigned char ch; | 214 | unsigned char ch; |
@@ -217,7 +217,7 @@ static void sil164_dpms(struct intel_dvo_device *dvo, int mode) | |||
217 | if (ret == false) | 217 | if (ret == false) |
218 | return; | 218 | return; |
219 | 219 | ||
220 | if (mode == DRM_MODE_DPMS_ON) | 220 | if (enable) |
221 | ch |= SIL164_8_PD; | 221 | ch |= SIL164_8_PD; |
222 | else | 222 | else |
223 | ch &= ~SIL164_8_PD; | 223 | ch &= ~SIL164_8_PD; |
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c index aa2cd3ec54aa..097b3e82b00f 100644 --- a/drivers/gpu/drm/i915/dvo_tfp410.c +++ b/drivers/gpu/drm/i915/dvo_tfp410.c | |||
@@ -234,14 +234,14 @@ static void tfp410_mode_set(struct intel_dvo_device *dvo, | |||
234 | } | 234 | } |
235 | 235 | ||
236 | /* set the tfp410 power state */ | 236 | /* set the tfp410 power state */ |
237 | static void tfp410_dpms(struct intel_dvo_device *dvo, int mode) | 237 | static void tfp410_dpms(struct intel_dvo_device *dvo, bool enable) |
238 | { | 238 | { |
239 | uint8_t ctl1; | 239 | uint8_t ctl1; |
240 | 240 | ||
241 | if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1)) | 241 | if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1)) |
242 | return; | 242 | return; |
243 | 243 | ||
244 | if (mode == DRM_MODE_DPMS_ON) | 244 | if (enable) |
245 | ctl1 |= TFP410_CTL_1_PD; | 245 | ctl1 |= TFP410_CTL_1_PD; |
246 | else | 246 | else |
247 | ctl1 &= ~TFP410_CTL_1_PD; | 247 | ctl1 &= ~TFP410_CTL_1_PD; |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index a18e93687b8b..3d886af2aae6 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -118,6 +118,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) | |||
118 | obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); | 118 | obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); |
119 | if (obj->base.name) | 119 | if (obj->base.name) |
120 | seq_printf(m, " (name: %d)", obj->base.name); | 120 | seq_printf(m, " (name: %d)", obj->base.name); |
121 | if (obj->pin_count) | ||
122 | seq_printf(m, " (pinned x %d)", obj->pin_count); | ||
121 | if (obj->fence_reg != I915_FENCE_REG_NONE) | 123 | if (obj->fence_reg != I915_FENCE_REG_NONE) |
122 | seq_printf(m, " (fence: %d)", obj->fence_reg); | 124 | seq_printf(m, " (fence: %d)", obj->fence_reg); |
123 | if (obj->gtt_space != NULL) | 125 | if (obj->gtt_space != NULL) |
@@ -197,8 +199,8 @@ static int i915_gem_object_info(struct seq_file *m, void* data) | |||
197 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 199 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
198 | struct drm_device *dev = node->minor->dev; | 200 | struct drm_device *dev = node->minor->dev; |
199 | struct drm_i915_private *dev_priv = dev->dev_private; | 201 | struct drm_i915_private *dev_priv = dev->dev_private; |
200 | u32 count, mappable_count; | 202 | u32 count, mappable_count, purgeable_count; |
201 | size_t size, mappable_size; | 203 | size_t size, mappable_size, purgeable_size; |
202 | struct drm_i915_gem_object *obj; | 204 | struct drm_i915_gem_object *obj; |
203 | int ret; | 205 | int ret; |
204 | 206 | ||
@@ -211,7 +213,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data) | |||
211 | dev_priv->mm.object_memory); | 213 | dev_priv->mm.object_memory); |
212 | 214 | ||
213 | size = count = mappable_size = mappable_count = 0; | 215 | size = count = mappable_size = mappable_count = 0; |
214 | count_objects(&dev_priv->mm.gtt_list, gtt_list); | 216 | count_objects(&dev_priv->mm.bound_list, gtt_list); |
215 | seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n", | 217 | seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n", |
216 | count, mappable_count, size, mappable_size); | 218 | count, mappable_count, size, mappable_size); |
217 | 219 | ||
@@ -225,8 +227,16 @@ static int i915_gem_object_info(struct seq_file *m, void* data) | |||
225 | seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", | 227 | seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", |
226 | count, mappable_count, size, mappable_size); | 228 | count, mappable_count, size, mappable_size); |
227 | 229 | ||
230 | size = count = purgeable_size = purgeable_count = 0; | ||
231 | list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list) { | ||
232 | size += obj->base.size, ++count; | ||
233 | if (obj->madv == I915_MADV_DONTNEED) | ||
234 | purgeable_size += obj->base.size, ++purgeable_count; | ||
235 | } | ||
236 | seq_printf(m, "%u unbound objects, %zu bytes\n", count, size); | ||
237 | |||
228 | size = count = mappable_size = mappable_count = 0; | 238 | size = count = mappable_size = mappable_count = 0; |
229 | list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { | 239 | list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { |
230 | if (obj->fault_mappable) { | 240 | if (obj->fault_mappable) { |
231 | size += obj->gtt_space->size; | 241 | size += obj->gtt_space->size; |
232 | ++count; | 242 | ++count; |
@@ -235,7 +245,13 @@ static int i915_gem_object_info(struct seq_file *m, void* data) | |||
235 | mappable_size += obj->gtt_space->size; | 245 | mappable_size += obj->gtt_space->size; |
236 | ++mappable_count; | 246 | ++mappable_count; |
237 | } | 247 | } |
248 | if (obj->madv == I915_MADV_DONTNEED) { | ||
249 | purgeable_size += obj->base.size; | ||
250 | ++purgeable_count; | ||
251 | } | ||
238 | } | 252 | } |
253 | seq_printf(m, "%u purgeable objects, %zu bytes\n", | ||
254 | purgeable_count, purgeable_size); | ||
239 | seq_printf(m, "%u pinned mappable objects, %zu bytes\n", | 255 | seq_printf(m, "%u pinned mappable objects, %zu bytes\n", |
240 | mappable_count, mappable_size); | 256 | mappable_count, mappable_size); |
241 | seq_printf(m, "%u fault mappable objects, %zu bytes\n", | 257 | seq_printf(m, "%u fault mappable objects, %zu bytes\n", |
@@ -264,7 +280,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data) | |||
264 | return ret; | 280 | return ret; |
265 | 281 | ||
266 | total_obj_size = total_gtt_size = count = 0; | 282 | total_obj_size = total_gtt_size = count = 0; |
267 | list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { | 283 | list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { |
268 | if (list == PINNED_LIST && obj->pin_count == 0) | 284 | if (list == PINNED_LIST && obj->pin_count == 0) |
269 | continue; | 285 | continue; |
270 | 286 | ||
@@ -526,7 +542,8 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data) | |||
526 | for (i = 0; i < dev_priv->num_fence_regs; i++) { | 542 | for (i = 0; i < dev_priv->num_fence_regs; i++) { |
527 | struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj; | 543 | struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj; |
528 | 544 | ||
529 | seq_printf(m, "Fenced object[%2d] = ", i); | 545 | seq_printf(m, "Fence %d, pin count = %d, object = ", |
546 | i, dev_priv->fence_regs[i].pin_count); | ||
530 | if (obj == NULL) | 547 | if (obj == NULL) |
531 | seq_printf(m, "unused"); | 548 | seq_printf(m, "unused"); |
532 | else | 549 | else |
@@ -645,10 +662,9 @@ static void i915_ring_error_state(struct seq_file *m, | |||
645 | seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]); | 662 | seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]); |
646 | seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]); | 663 | seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]); |
647 | seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]); | 664 | seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]); |
648 | if (ring == RCS && INTEL_INFO(dev)->gen >= 4) { | 665 | if (ring == RCS && INTEL_INFO(dev)->gen >= 4) |
649 | seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1); | ||
650 | seq_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr); | 666 | seq_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr); |
651 | } | 667 | |
652 | if (INTEL_INFO(dev)->gen >= 4) | 668 | if (INTEL_INFO(dev)->gen >= 4) |
653 | seq_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]); | 669 | seq_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]); |
654 | seq_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]); | 670 | seq_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]); |
@@ -697,11 +713,17 @@ static int i915_error_state(struct seq_file *m, void *unused) | |||
697 | for (i = 0; i < dev_priv->num_fence_regs; i++) | 713 | for (i = 0; i < dev_priv->num_fence_regs; i++) |
698 | seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); | 714 | seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); |
699 | 715 | ||
716 | for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++) | ||
717 | seq_printf(m, " INSTDONE_%d: 0x%08x\n", i, error->extra_instdone[i]); | ||
718 | |||
700 | if (INTEL_INFO(dev)->gen >= 6) { | 719 | if (INTEL_INFO(dev)->gen >= 6) { |
701 | seq_printf(m, "ERROR: 0x%08x\n", error->error); | 720 | seq_printf(m, "ERROR: 0x%08x\n", error->error); |
702 | seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); | 721 | seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); |
703 | } | 722 | } |
704 | 723 | ||
724 | if (INTEL_INFO(dev)->gen == 7) | ||
725 | seq_printf(m, "ERR_INT: 0x%08x\n", error->err_int); | ||
726 | |||
705 | for_each_ring(ring, dev_priv, i) | 727 | for_each_ring(ring, dev_priv, i) |
706 | i915_ring_error_state(m, dev, error, i); | 728 | i915_ring_error_state(m, dev, error, i); |
707 | 729 | ||
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 0a1b64f8d442..2c09900e3267 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -235,10 +235,10 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) | |||
235 | } | 235 | } |
236 | } | 236 | } |
237 | 237 | ||
238 | dev_priv->cpp = init->cpp; | 238 | dev_priv->dri1.cpp = init->cpp; |
239 | dev_priv->back_offset = init->back_offset; | 239 | dev_priv->dri1.back_offset = init->back_offset; |
240 | dev_priv->front_offset = init->front_offset; | 240 | dev_priv->dri1.front_offset = init->front_offset; |
241 | dev_priv->current_page = 0; | 241 | dev_priv->dri1.current_page = 0; |
242 | if (master_priv->sarea_priv) | 242 | if (master_priv->sarea_priv) |
243 | master_priv->sarea_priv->pf_current_page = 0; | 243 | master_priv->sarea_priv->pf_current_page = 0; |
244 | 244 | ||
@@ -575,7 +575,7 @@ static int i915_dispatch_flip(struct drm_device * dev) | |||
575 | 575 | ||
576 | DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n", | 576 | DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n", |
577 | __func__, | 577 | __func__, |
578 | dev_priv->current_page, | 578 | dev_priv->dri1.current_page, |
579 | master_priv->sarea_priv->pf_current_page); | 579 | master_priv->sarea_priv->pf_current_page); |
580 | 580 | ||
581 | i915_kernel_lost_context(dev); | 581 | i915_kernel_lost_context(dev); |
@@ -589,12 +589,12 @@ static int i915_dispatch_flip(struct drm_device * dev) | |||
589 | 589 | ||
590 | OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); | 590 | OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); |
591 | OUT_RING(0); | 591 | OUT_RING(0); |
592 | if (dev_priv->current_page == 0) { | 592 | if (dev_priv->dri1.current_page == 0) { |
593 | OUT_RING(dev_priv->back_offset); | 593 | OUT_RING(dev_priv->dri1.back_offset); |
594 | dev_priv->current_page = 1; | 594 | dev_priv->dri1.current_page = 1; |
595 | } else { | 595 | } else { |
596 | OUT_RING(dev_priv->front_offset); | 596 | OUT_RING(dev_priv->dri1.front_offset); |
597 | dev_priv->current_page = 0; | 597 | dev_priv->dri1.current_page = 0; |
598 | } | 598 | } |
599 | OUT_RING(0); | 599 | OUT_RING(0); |
600 | 600 | ||
@@ -613,7 +613,7 @@ static int i915_dispatch_flip(struct drm_device * dev) | |||
613 | ADVANCE_LP_RING(); | 613 | ADVANCE_LP_RING(); |
614 | } | 614 | } |
615 | 615 | ||
616 | master_priv->sarea_priv->pf_current_page = dev_priv->current_page; | 616 | master_priv->sarea_priv->pf_current_page = dev_priv->dri1.current_page; |
617 | return 0; | 617 | return 0; |
618 | } | 618 | } |
619 | 619 | ||
@@ -1012,6 +1012,9 @@ static int i915_getparam(struct drm_device *dev, void *data, | |||
1012 | case I915_PARAM_HAS_SEMAPHORES: | 1012 | case I915_PARAM_HAS_SEMAPHORES: |
1013 | value = i915_semaphore_is_enabled(dev); | 1013 | value = i915_semaphore_is_enabled(dev); |
1014 | break; | 1014 | break; |
1015 | case I915_PARAM_HAS_PRIME_VMAP_FLUSH: | ||
1016 | value = 1; | ||
1017 | break; | ||
1015 | default: | 1018 | default: |
1016 | DRM_DEBUG_DRIVER("Unknown parameter %d\n", | 1019 | DRM_DEBUG_DRIVER("Unknown parameter %d\n", |
1017 | param->param); | 1020 | param->param); |
@@ -1555,11 +1558,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1555 | * | 1558 | * |
1556 | * All tasks on the workqueue are expected to acquire the dev mutex | 1559 | * All tasks on the workqueue are expected to acquire the dev mutex |
1557 | * so there is no point in running more than one instance of the | 1560 | * so there is no point in running more than one instance of the |
1558 | * workqueue at any time: max_active = 1 and NON_REENTRANT. | 1561 | * workqueue at any time. Use an ordered one. |
1559 | */ | 1562 | */ |
1560 | dev_priv->wq = alloc_workqueue("i915", | 1563 | dev_priv->wq = alloc_ordered_workqueue("i915", 0); |
1561 | WQ_UNBOUND | WQ_NON_REENTRANT, | ||
1562 | 1); | ||
1563 | if (dev_priv->wq == NULL) { | 1564 | if (dev_priv->wq == NULL) { |
1564 | DRM_ERROR("Failed to create our workqueue.\n"); | 1565 | DRM_ERROR("Failed to create our workqueue.\n"); |
1565 | ret = -ENOMEM; | 1566 | ret = -ENOMEM; |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 7ebb13b65133..7f3863fb138c 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -1174,6 +1174,10 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ | |||
1174 | if (unlikely(__fifo_ret)) { \ | 1174 | if (unlikely(__fifo_ret)) { \ |
1175 | gen6_gt_check_fifodbg(dev_priv); \ | 1175 | gen6_gt_check_fifodbg(dev_priv); \ |
1176 | } \ | 1176 | } \ |
1177 | if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \ | ||
1178 | DRM_ERROR("Unclaimed write to %x\n", reg); \ | ||
1179 | writel(ERR_INT_MMIO_UNCLAIMED, dev_priv->regs + GEN7_ERR_INT); \ | ||
1180 | } \ | ||
1177 | } | 1181 | } |
1178 | __i915_write(8, b) | 1182 | __i915_write(8, b) |
1179 | __i915_write(16, w) | 1183 | __i915_write(16, w) |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 261fe2175afb..58b43db0a134 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -196,9 +196,10 @@ struct drm_i915_error_state { | |||
196 | u32 cpu_ring_head[I915_NUM_RINGS]; | 196 | u32 cpu_ring_head[I915_NUM_RINGS]; |
197 | u32 cpu_ring_tail[I915_NUM_RINGS]; | 197 | u32 cpu_ring_tail[I915_NUM_RINGS]; |
198 | u32 error; /* gen6+ */ | 198 | u32 error; /* gen6+ */ |
199 | u32 err_int; /* gen7 */ | ||
199 | u32 instpm[I915_NUM_RINGS]; | 200 | u32 instpm[I915_NUM_RINGS]; |
200 | u32 instps[I915_NUM_RINGS]; | 201 | u32 instps[I915_NUM_RINGS]; |
201 | u32 instdone1; | 202 | u32 extra_instdone[I915_NUM_INSTDONE_REG]; |
202 | u32 seqno[I915_NUM_RINGS]; | 203 | u32 seqno[I915_NUM_RINGS]; |
203 | u64 bbaddr; | 204 | u64 bbaddr; |
204 | u32 fault_reg[I915_NUM_RINGS]; | 205 | u32 fault_reg[I915_NUM_RINGS]; |
@@ -428,12 +429,6 @@ typedef struct drm_i915_private { | |||
428 | 429 | ||
429 | struct resource mch_res; | 430 | struct resource mch_res; |
430 | 431 | ||
431 | unsigned int cpp; | ||
432 | int back_offset; | ||
433 | int front_offset; | ||
434 | int current_page; | ||
435 | int page_flipping; | ||
436 | |||
437 | atomic_t irq_received; | 432 | atomic_t irq_received; |
438 | 433 | ||
439 | /* protects the irq masks */ | 434 | /* protects the irq masks */ |
@@ -451,7 +446,6 @@ typedef struct drm_i915_private { | |||
451 | u32 hotplug_supported_mask; | 446 | u32 hotplug_supported_mask; |
452 | struct work_struct hotplug_work; | 447 | struct work_struct hotplug_work; |
453 | 448 | ||
454 | unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; | ||
455 | int num_pipe; | 449 | int num_pipe; |
456 | int num_pch_pll; | 450 | int num_pch_pll; |
457 | 451 | ||
@@ -460,8 +454,7 @@ typedef struct drm_i915_private { | |||
460 | struct timer_list hangcheck_timer; | 454 | struct timer_list hangcheck_timer; |
461 | int hangcheck_count; | 455 | int hangcheck_count; |
462 | uint32_t last_acthd[I915_NUM_RINGS]; | 456 | uint32_t last_acthd[I915_NUM_RINGS]; |
463 | uint32_t last_instdone; | 457 | uint32_t prev_instdone[I915_NUM_INSTDONE_REG]; |
464 | uint32_t last_instdone1; | ||
465 | 458 | ||
466 | unsigned int stop_rings; | 459 | unsigned int stop_rings; |
467 | 460 | ||
@@ -692,7 +685,13 @@ typedef struct drm_i915_private { | |||
692 | struct drm_mm gtt_space; | 685 | struct drm_mm gtt_space; |
693 | /** List of all objects in gtt_space. Used to restore gtt | 686 | /** List of all objects in gtt_space. Used to restore gtt |
694 | * mappings on resume */ | 687 | * mappings on resume */ |
695 | struct list_head gtt_list; | 688 | struct list_head bound_list; |
689 | /** | ||
690 | * List of objects which are not bound to the GTT (thus | ||
691 | * are idle and not used by the GPU) but still have | ||
692 | * (presumably uncached) pages still attached. | ||
693 | */ | ||
694 | struct list_head unbound_list; | ||
696 | 695 | ||
697 | /** Usable portion of the GTT for GEM */ | 696 | /** Usable portion of the GTT for GEM */ |
698 | unsigned long gtt_start; | 697 | unsigned long gtt_start; |
@@ -790,6 +789,12 @@ typedef struct drm_i915_private { | |||
790 | struct { | 789 | struct { |
791 | unsigned allow_batchbuffer : 1; | 790 | unsigned allow_batchbuffer : 1; |
792 | u32 __iomem *gfx_hws_cpu_addr; | 791 | u32 __iomem *gfx_hws_cpu_addr; |
792 | |||
793 | unsigned int cpp; | ||
794 | int back_offset; | ||
795 | int front_offset; | ||
796 | int current_page; | ||
797 | int page_flipping; | ||
793 | } dri1; | 798 | } dri1; |
794 | 799 | ||
795 | /* Kernel Modesetting */ | 800 | /* Kernel Modesetting */ |
@@ -1296,19 +1301,20 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data, | |||
1296 | struct drm_file *file_priv); | 1301 | struct drm_file *file_priv); |
1297 | void i915_gem_load(struct drm_device *dev); | 1302 | void i915_gem_load(struct drm_device *dev); |
1298 | int i915_gem_init_object(struct drm_gem_object *obj); | 1303 | int i915_gem_init_object(struct drm_gem_object *obj); |
1304 | void i915_gem_object_init(struct drm_i915_gem_object *obj); | ||
1299 | struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, | 1305 | struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, |
1300 | size_t size); | 1306 | size_t size); |
1301 | void i915_gem_free_object(struct drm_gem_object *obj); | 1307 | void i915_gem_free_object(struct drm_gem_object *obj); |
1302 | int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, | 1308 | int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, |
1303 | uint32_t alignment, | 1309 | uint32_t alignment, |
1304 | bool map_and_fenceable); | 1310 | bool map_and_fenceable, |
1311 | bool nonblocking); | ||
1305 | void i915_gem_object_unpin(struct drm_i915_gem_object *obj); | 1312 | void i915_gem_object_unpin(struct drm_i915_gem_object *obj); |
1306 | int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj); | 1313 | int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj); |
1307 | void i915_gem_release_mmap(struct drm_i915_gem_object *obj); | 1314 | void i915_gem_release_mmap(struct drm_i915_gem_object *obj); |
1308 | void i915_gem_lastclose(struct drm_device *dev); | 1315 | void i915_gem_lastclose(struct drm_device *dev); |
1309 | 1316 | ||
1310 | int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, | 1317 | int __must_check i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj); |
1311 | gfp_t gfpmask); | ||
1312 | int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); | 1318 | int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); |
1313 | int i915_gem_object_sync(struct drm_i915_gem_object *obj, | 1319 | int i915_gem_object_sync(struct drm_i915_gem_object *obj, |
1314 | struct intel_ring_buffer *to); | 1320 | struct intel_ring_buffer *to); |
@@ -1449,8 +1455,9 @@ void i915_gem_init_global_gtt(struct drm_device *dev, | |||
1449 | int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, | 1455 | int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, |
1450 | unsigned alignment, | 1456 | unsigned alignment, |
1451 | unsigned cache_level, | 1457 | unsigned cache_level, |
1452 | bool mappable); | 1458 | bool mappable, |
1453 | int i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only); | 1459 | bool nonblock); |
1460 | int i915_gem_evict_everything(struct drm_device *dev); | ||
1454 | 1461 | ||
1455 | /* i915_gem_stolen.c */ | 1462 | /* i915_gem_stolen.c */ |
1456 | int i915_gem_init_stolen(struct drm_device *dev); | 1463 | int i915_gem_init_stolen(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 31054fa44c47..87a64e5f28fb 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -41,7 +41,8 @@ static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *o | |||
41 | static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); | 41 | static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); |
42 | static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, | 42 | static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, |
43 | unsigned alignment, | 43 | unsigned alignment, |
44 | bool map_and_fenceable); | 44 | bool map_and_fenceable, |
45 | bool nonblocking); | ||
45 | static int i915_gem_phys_pwrite(struct drm_device *dev, | 46 | static int i915_gem_phys_pwrite(struct drm_device *dev, |
46 | struct drm_i915_gem_object *obj, | 47 | struct drm_i915_gem_object *obj, |
47 | struct drm_i915_gem_pwrite *args, | 48 | struct drm_i915_gem_pwrite *args, |
@@ -55,6 +56,8 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, | |||
55 | 56 | ||
56 | static int i915_gem_inactive_shrink(struct shrinker *shrinker, | 57 | static int i915_gem_inactive_shrink(struct shrinker *shrinker, |
57 | struct shrink_control *sc); | 58 | struct shrink_control *sc); |
59 | static long i915_gem_purge(struct drm_i915_private *dev_priv, long target); | ||
60 | static void i915_gem_shrink_all(struct drm_i915_private *dev_priv); | ||
58 | static void i915_gem_object_truncate(struct drm_i915_gem_object *obj); | 61 | static void i915_gem_object_truncate(struct drm_i915_gem_object *obj); |
59 | 62 | ||
60 | static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj) | 63 | static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj) |
@@ -140,7 +143,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev) | |||
140 | static inline bool | 143 | static inline bool |
141 | i915_gem_object_is_inactive(struct drm_i915_gem_object *obj) | 144 | i915_gem_object_is_inactive(struct drm_i915_gem_object *obj) |
142 | { | 145 | { |
143 | return !obj->active; | 146 | return obj->gtt_space && !obj->active; |
144 | } | 147 | } |
145 | 148 | ||
146 | int | 149 | int |
@@ -179,7 +182,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, | |||
179 | 182 | ||
180 | pinned = 0; | 183 | pinned = 0; |
181 | mutex_lock(&dev->struct_mutex); | 184 | mutex_lock(&dev->struct_mutex); |
182 | list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) | 185 | list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) |
183 | if (obj->pin_count) | 186 | if (obj->pin_count) |
184 | pinned += obj->gtt_space->size; | 187 | pinned += obj->gtt_space->size; |
185 | mutex_unlock(&dev->struct_mutex); | 188 | mutex_unlock(&dev->struct_mutex); |
@@ -423,9 +426,11 @@ i915_gem_shmem_pread(struct drm_device *dev, | |||
423 | * anyway again before the next pread happens. */ | 426 | * anyway again before the next pread happens. */ |
424 | if (obj->cache_level == I915_CACHE_NONE) | 427 | if (obj->cache_level == I915_CACHE_NONE) |
425 | needs_clflush = 1; | 428 | needs_clflush = 1; |
426 | ret = i915_gem_object_set_to_gtt_domain(obj, false); | 429 | if (obj->gtt_space) { |
427 | if (ret) | 430 | ret = i915_gem_object_set_to_gtt_domain(obj, false); |
428 | return ret; | 431 | if (ret) |
432 | return ret; | ||
433 | } | ||
429 | } | 434 | } |
430 | 435 | ||
431 | offset = args->offset; | 436 | offset = args->offset; |
@@ -605,7 +610,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, | |||
605 | char __user *user_data; | 610 | char __user *user_data; |
606 | int page_offset, page_length, ret; | 611 | int page_offset, page_length, ret; |
607 | 612 | ||
608 | ret = i915_gem_object_pin(obj, 0, true); | 613 | ret = i915_gem_object_pin(obj, 0, true, true); |
609 | if (ret) | 614 | if (ret) |
610 | goto out; | 615 | goto out; |
611 | 616 | ||
@@ -751,9 +756,11 @@ i915_gem_shmem_pwrite(struct drm_device *dev, | |||
751 | * right away and we therefore have to clflush anyway. */ | 756 | * right away and we therefore have to clflush anyway. */ |
752 | if (obj->cache_level == I915_CACHE_NONE) | 757 | if (obj->cache_level == I915_CACHE_NONE) |
753 | needs_clflush_after = 1; | 758 | needs_clflush_after = 1; |
754 | ret = i915_gem_object_set_to_gtt_domain(obj, true); | 759 | if (obj->gtt_space) { |
755 | if (ret) | 760 | ret = i915_gem_object_set_to_gtt_domain(obj, true); |
756 | return ret; | 761 | if (ret) |
762 | return ret; | ||
763 | } | ||
757 | } | 764 | } |
758 | /* Same trick applies for invalidate partially written cachelines before | 765 | /* Same trick applies for invalidate partially written cachelines before |
759 | * writing. */ | 766 | * writing. */ |
@@ -919,10 +926,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
919 | goto out; | 926 | goto out; |
920 | } | 927 | } |
921 | 928 | ||
922 | if (obj->gtt_space && | 929 | if (obj->cache_level == I915_CACHE_NONE && |
923 | obj->cache_level == I915_CACHE_NONE && | ||
924 | obj->tiling_mode == I915_TILING_NONE && | 930 | obj->tiling_mode == I915_TILING_NONE && |
925 | obj->map_and_fenceable && | ||
926 | obj->base.write_domain != I915_GEM_DOMAIN_CPU) { | 931 | obj->base.write_domain != I915_GEM_DOMAIN_CPU) { |
927 | ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file); | 932 | ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file); |
928 | /* Note that the gtt paths might fail with non-page-backed user | 933 | /* Note that the gtt paths might fail with non-page-backed user |
@@ -930,7 +935,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
930 | * textures). Fallback to the shmem path in that case. */ | 935 | * textures). Fallback to the shmem path in that case. */ |
931 | } | 936 | } |
932 | 937 | ||
933 | if (ret == -EFAULT) | 938 | if (ret == -EFAULT || ret == -ENOSPC) |
934 | ret = i915_gem_shmem_pwrite(dev, obj, args, file); | 939 | ret = i915_gem_shmem_pwrite(dev, obj, args, file); |
935 | 940 | ||
936 | out: | 941 | out: |
@@ -940,6 +945,240 @@ unlock: | |||
940 | return ret; | 945 | return ret; |
941 | } | 946 | } |
942 | 947 | ||
948 | int | ||
949 | i915_gem_check_wedge(struct drm_i915_private *dev_priv, | ||
950 | bool interruptible) | ||
951 | { | ||
952 | if (atomic_read(&dev_priv->mm.wedged)) { | ||
953 | struct completion *x = &dev_priv->error_completion; | ||
954 | bool recovery_complete; | ||
955 | unsigned long flags; | ||
956 | |||
957 | /* Give the error handler a chance to run. */ | ||
958 | spin_lock_irqsave(&x->wait.lock, flags); | ||
959 | recovery_complete = x->done > 0; | ||
960 | spin_unlock_irqrestore(&x->wait.lock, flags); | ||
961 | |||
962 | /* Non-interruptible callers can't handle -EAGAIN, hence return | ||
963 | * -EIO unconditionally for these. */ | ||
964 | if (!interruptible) | ||
965 | return -EIO; | ||
966 | |||
967 | /* Recovery complete, but still wedged means reset failure. */ | ||
968 | if (recovery_complete) | ||
969 | return -EIO; | ||
970 | |||
971 | return -EAGAIN; | ||
972 | } | ||
973 | |||
974 | return 0; | ||
975 | } | ||
976 | |||
977 | /* | ||
978 | * Compare seqno against outstanding lazy request. Emit a request if they are | ||
979 | * equal. | ||
980 | */ | ||
981 | static int | ||
982 | i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno) | ||
983 | { | ||
984 | int ret; | ||
985 | |||
986 | BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex)); | ||
987 | |||
988 | ret = 0; | ||
989 | if (seqno == ring->outstanding_lazy_request) | ||
990 | ret = i915_add_request(ring, NULL, NULL); | ||
991 | |||
992 | return ret; | ||
993 | } | ||
994 | |||
995 | /** | ||
996 | * __wait_seqno - wait until execution of seqno has finished | ||
997 | * @ring: the ring expected to report seqno | ||
998 | * @seqno: duh! | ||
999 | * @interruptible: do an interruptible wait (normally yes) | ||
1000 | * @timeout: in - how long to wait (NULL forever); out - how much time remaining | ||
1001 | * | ||
1002 | * Returns 0 if the seqno was found within the alloted time. Else returns the | ||
1003 | * errno with remaining time filled in timeout argument. | ||
1004 | */ | ||
1005 | static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, | ||
1006 | bool interruptible, struct timespec *timeout) | ||
1007 | { | ||
1008 | drm_i915_private_t *dev_priv = ring->dev->dev_private; | ||
1009 | struct timespec before, now, wait_time={1,0}; | ||
1010 | unsigned long timeout_jiffies; | ||
1011 | long end; | ||
1012 | bool wait_forever = true; | ||
1013 | int ret; | ||
1014 | |||
1015 | if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) | ||
1016 | return 0; | ||
1017 | |||
1018 | trace_i915_gem_request_wait_begin(ring, seqno); | ||
1019 | |||
1020 | if (timeout != NULL) { | ||
1021 | wait_time = *timeout; | ||
1022 | wait_forever = false; | ||
1023 | } | ||
1024 | |||
1025 | timeout_jiffies = timespec_to_jiffies(&wait_time); | ||
1026 | |||
1027 | if (WARN_ON(!ring->irq_get(ring))) | ||
1028 | return -ENODEV; | ||
1029 | |||
1030 | /* Record current time in case interrupted by signal, or wedged * */ | ||
1031 | getrawmonotonic(&before); | ||
1032 | |||
1033 | #define EXIT_COND \ | ||
1034 | (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \ | ||
1035 | atomic_read(&dev_priv->mm.wedged)) | ||
1036 | do { | ||
1037 | if (interruptible) | ||
1038 | end = wait_event_interruptible_timeout(ring->irq_queue, | ||
1039 | EXIT_COND, | ||
1040 | timeout_jiffies); | ||
1041 | else | ||
1042 | end = wait_event_timeout(ring->irq_queue, EXIT_COND, | ||
1043 | timeout_jiffies); | ||
1044 | |||
1045 | ret = i915_gem_check_wedge(dev_priv, interruptible); | ||
1046 | if (ret) | ||
1047 | end = ret; | ||
1048 | } while (end == 0 && wait_forever); | ||
1049 | |||
1050 | getrawmonotonic(&now); | ||
1051 | |||
1052 | ring->irq_put(ring); | ||
1053 | trace_i915_gem_request_wait_end(ring, seqno); | ||
1054 | #undef EXIT_COND | ||
1055 | |||
1056 | if (timeout) { | ||
1057 | struct timespec sleep_time = timespec_sub(now, before); | ||
1058 | *timeout = timespec_sub(*timeout, sleep_time); | ||
1059 | } | ||
1060 | |||
1061 | switch (end) { | ||
1062 | case -EIO: | ||
1063 | case -EAGAIN: /* Wedged */ | ||
1064 | case -ERESTARTSYS: /* Signal */ | ||
1065 | return (int)end; | ||
1066 | case 0: /* Timeout */ | ||
1067 | if (timeout) | ||
1068 | set_normalized_timespec(timeout, 0, 0); | ||
1069 | return -ETIME; | ||
1070 | default: /* Completed */ | ||
1071 | WARN_ON(end < 0); /* We're not aware of other errors */ | ||
1072 | return 0; | ||
1073 | } | ||
1074 | } | ||
1075 | |||
1076 | /** | ||
1077 | * Waits for a sequence number to be signaled, and cleans up the | ||
1078 | * request and object lists appropriately for that event. | ||
1079 | */ | ||
1080 | int | ||
1081 | i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno) | ||
1082 | { | ||
1083 | struct drm_device *dev = ring->dev; | ||
1084 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1085 | bool interruptible = dev_priv->mm.interruptible; | ||
1086 | int ret; | ||
1087 | |||
1088 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); | ||
1089 | BUG_ON(seqno == 0); | ||
1090 | |||
1091 | ret = i915_gem_check_wedge(dev_priv, interruptible); | ||
1092 | if (ret) | ||
1093 | return ret; | ||
1094 | |||
1095 | ret = i915_gem_check_olr(ring, seqno); | ||
1096 | if (ret) | ||
1097 | return ret; | ||
1098 | |||
1099 | return __wait_seqno(ring, seqno, interruptible, NULL); | ||
1100 | } | ||
1101 | |||
1102 | /** | ||
1103 | * Ensures that all rendering to the object has completed and the object is | ||
1104 | * safe to unbind from the GTT or access from the CPU. | ||
1105 | */ | ||
1106 | static __must_check int | ||
1107 | i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, | ||
1108 | bool readonly) | ||
1109 | { | ||
1110 | struct intel_ring_buffer *ring = obj->ring; | ||
1111 | u32 seqno; | ||
1112 | int ret; | ||
1113 | |||
1114 | seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno; | ||
1115 | if (seqno == 0) | ||
1116 | return 0; | ||
1117 | |||
1118 | ret = i915_wait_seqno(ring, seqno); | ||
1119 | if (ret) | ||
1120 | return ret; | ||
1121 | |||
1122 | i915_gem_retire_requests_ring(ring); | ||
1123 | |||
1124 | /* Manually manage the write flush as we may have not yet | ||
1125 | * retired the buffer. | ||
1126 | */ | ||
1127 | if (obj->last_write_seqno && | ||
1128 | i915_seqno_passed(seqno, obj->last_write_seqno)) { | ||
1129 | obj->last_write_seqno = 0; | ||
1130 | obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; | ||
1131 | } | ||
1132 | |||
1133 | return 0; | ||
1134 | } | ||
1135 | |||
1136 | /* A nonblocking variant of the above wait. This is a highly dangerous routine | ||
1137 | * as the object state may change during this call. | ||
1138 | */ | ||
1139 | static __must_check int | ||
1140 | i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, | ||
1141 | bool readonly) | ||
1142 | { | ||
1143 | struct drm_device *dev = obj->base.dev; | ||
1144 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1145 | struct intel_ring_buffer *ring = obj->ring; | ||
1146 | u32 seqno; | ||
1147 | int ret; | ||
1148 | |||
1149 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); | ||
1150 | BUG_ON(!dev_priv->mm.interruptible); | ||
1151 | |||
1152 | seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno; | ||
1153 | if (seqno == 0) | ||
1154 | return 0; | ||
1155 | |||
1156 | ret = i915_gem_check_wedge(dev_priv, true); | ||
1157 | if (ret) | ||
1158 | return ret; | ||
1159 | |||
1160 | ret = i915_gem_check_olr(ring, seqno); | ||
1161 | if (ret) | ||
1162 | return ret; | ||
1163 | |||
1164 | mutex_unlock(&dev->struct_mutex); | ||
1165 | ret = __wait_seqno(ring, seqno, true, NULL); | ||
1166 | mutex_lock(&dev->struct_mutex); | ||
1167 | |||
1168 | i915_gem_retire_requests_ring(ring); | ||
1169 | |||
1170 | /* Manually manage the write flush as we may have not yet | ||
1171 | * retired the buffer. | ||
1172 | */ | ||
1173 | if (obj->last_write_seqno && | ||
1174 | i915_seqno_passed(seqno, obj->last_write_seqno)) { | ||
1175 | obj->last_write_seqno = 0; | ||
1176 | obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; | ||
1177 | } | ||
1178 | |||
1179 | return ret; | ||
1180 | } | ||
1181 | |||
943 | /** | 1182 | /** |
944 | * Called when user space prepares to use an object with the CPU, either | 1183 | * Called when user space prepares to use an object with the CPU, either |
945 | * through the mmap ioctl's mapping or a GTT mapping. | 1184 | * through the mmap ioctl's mapping or a GTT mapping. |
@@ -977,6 +1216,14 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | |||
977 | goto unlock; | 1216 | goto unlock; |
978 | } | 1217 | } |
979 | 1218 | ||
1219 | /* Try to flush the object off the GPU without holding the lock. | ||
1220 | * We will repeat the flush holding the lock in the normal manner | ||
1221 | * to catch cases where we are gazumped. | ||
1222 | */ | ||
1223 | ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain); | ||
1224 | if (ret) | ||
1225 | goto unref; | ||
1226 | |||
980 | if (read_domains & I915_GEM_DOMAIN_GTT) { | 1227 | if (read_domains & I915_GEM_DOMAIN_GTT) { |
981 | ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); | 1228 | ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); |
982 | 1229 | ||
@@ -990,6 +1237,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | |||
990 | ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); | 1237 | ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); |
991 | } | 1238 | } |
992 | 1239 | ||
1240 | unref: | ||
993 | drm_gem_object_unreference(&obj->base); | 1241 | drm_gem_object_unreference(&obj->base); |
994 | unlock: | 1242 | unlock: |
995 | mutex_unlock(&dev->struct_mutex); | 1243 | mutex_unlock(&dev->struct_mutex); |
@@ -1109,7 +1357,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
1109 | goto unlock; | 1357 | goto unlock; |
1110 | } | 1358 | } |
1111 | if (!obj->gtt_space) { | 1359 | if (!obj->gtt_space) { |
1112 | ret = i915_gem_object_bind_to_gtt(obj, 0, true); | 1360 | ret = i915_gem_object_bind_to_gtt(obj, 0, true, false); |
1113 | if (ret) | 1361 | if (ret) |
1114 | goto unlock; | 1362 | goto unlock; |
1115 | 1363 | ||
@@ -1270,6 +1518,42 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev, | |||
1270 | return i915_gem_get_gtt_size(dev, size, tiling_mode); | 1518 | return i915_gem_get_gtt_size(dev, size, tiling_mode); |
1271 | } | 1519 | } |
1272 | 1520 | ||
1521 | static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj) | ||
1522 | { | ||
1523 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; | ||
1524 | int ret; | ||
1525 | |||
1526 | if (obj->base.map_list.map) | ||
1527 | return 0; | ||
1528 | |||
1529 | ret = drm_gem_create_mmap_offset(&obj->base); | ||
1530 | if (ret != -ENOSPC) | ||
1531 | return ret; | ||
1532 | |||
1533 | /* Badly fragmented mmap space? The only way we can recover | ||
1534 | * space is by destroying unwanted objects. We can't randomly release | ||
1535 | * mmap_offsets as userspace expects them to be persistent for the | ||
1536 | * lifetime of the objects. The closest we can is to release the | ||
1537 | * offsets on purgeable objects by truncating it and marking it purged, | ||
1538 | * which prevents userspace from ever using that object again. | ||
1539 | */ | ||
1540 | i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT); | ||
1541 | ret = drm_gem_create_mmap_offset(&obj->base); | ||
1542 | if (ret != -ENOSPC) | ||
1543 | return ret; | ||
1544 | |||
1545 | i915_gem_shrink_all(dev_priv); | ||
1546 | return drm_gem_create_mmap_offset(&obj->base); | ||
1547 | } | ||
1548 | |||
1549 | static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj) | ||
1550 | { | ||
1551 | if (!obj->base.map_list.map) | ||
1552 | return; | ||
1553 | |||
1554 | drm_gem_free_mmap_offset(&obj->base); | ||
1555 | } | ||
1556 | |||
1273 | int | 1557 | int |
1274 | i915_gem_mmap_gtt(struct drm_file *file, | 1558 | i915_gem_mmap_gtt(struct drm_file *file, |
1275 | struct drm_device *dev, | 1559 | struct drm_device *dev, |
@@ -1301,11 +1585,9 @@ i915_gem_mmap_gtt(struct drm_file *file, | |||
1301 | goto out; | 1585 | goto out; |
1302 | } | 1586 | } |
1303 | 1587 | ||
1304 | if (!obj->base.map_list.map) { | 1588 | ret = i915_gem_object_create_mmap_offset(obj); |
1305 | ret = drm_gem_create_mmap_offset(&obj->base); | 1589 | if (ret) |
1306 | if (ret) | 1590 | goto out; |
1307 | goto out; | ||
1308 | } | ||
1309 | 1591 | ||
1310 | *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT; | 1592 | *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT; |
1311 | 1593 | ||
@@ -1340,64 +1622,58 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, | |||
1340 | return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset); | 1622 | return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset); |
1341 | } | 1623 | } |
1342 | 1624 | ||
1343 | int | 1625 | /* Immediately discard the backing storage */ |
1344 | i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, | 1626 | static void |
1345 | gfp_t gfpmask) | 1627 | i915_gem_object_truncate(struct drm_i915_gem_object *obj) |
1346 | { | 1628 | { |
1347 | int page_count, i; | ||
1348 | struct address_space *mapping; | ||
1349 | struct inode *inode; | 1629 | struct inode *inode; |
1350 | struct page *page; | ||
1351 | 1630 | ||
1352 | if (obj->pages || obj->sg_table) | 1631 | i915_gem_object_free_mmap_offset(obj); |
1353 | return 0; | ||
1354 | 1632 | ||
1355 | /* Get the list of pages out of our struct file. They'll be pinned | 1633 | if (obj->base.filp == NULL) |
1356 | * at this point until we release them. | 1634 | return; |
1357 | */ | ||
1358 | page_count = obj->base.size / PAGE_SIZE; | ||
1359 | BUG_ON(obj->pages != NULL); | ||
1360 | obj->pages = drm_malloc_ab(page_count, sizeof(struct page *)); | ||
1361 | if (obj->pages == NULL) | ||
1362 | return -ENOMEM; | ||
1363 | 1635 | ||
1636 | /* Our goal here is to return as much of the memory as | ||
1637 | * is possible back to the system as we are called from OOM. | ||
1638 | * To do this we must instruct the shmfs to drop all of its | ||
1639 | * backing pages, *now*. | ||
1640 | */ | ||
1364 | inode = obj->base.filp->f_path.dentry->d_inode; | 1641 | inode = obj->base.filp->f_path.dentry->d_inode; |
1365 | mapping = inode->i_mapping; | 1642 | shmem_truncate_range(inode, 0, (loff_t)-1); |
1366 | gfpmask |= mapping_gfp_mask(mapping); | ||
1367 | |||
1368 | for (i = 0; i < page_count; i++) { | ||
1369 | page = shmem_read_mapping_page_gfp(mapping, i, gfpmask); | ||
1370 | if (IS_ERR(page)) | ||
1371 | goto err_pages; | ||
1372 | |||
1373 | obj->pages[i] = page; | ||
1374 | } | ||
1375 | |||
1376 | if (i915_gem_object_needs_bit17_swizzle(obj)) | ||
1377 | i915_gem_object_do_bit_17_swizzle(obj); | ||
1378 | |||
1379 | return 0; | ||
1380 | 1643 | ||
1381 | err_pages: | 1644 | obj->madv = __I915_MADV_PURGED; |
1382 | while (i--) | 1645 | } |
1383 | page_cache_release(obj->pages[i]); | ||
1384 | 1646 | ||
1385 | drm_free_large(obj->pages); | 1647 | static inline int |
1386 | obj->pages = NULL; | 1648 | i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj) |
1387 | return PTR_ERR(page); | 1649 | { |
1650 | return obj->madv == I915_MADV_DONTNEED; | ||
1388 | } | 1651 | } |
1389 | 1652 | ||
1390 | static void | 1653 | static int |
1391 | i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) | 1654 | i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) |
1392 | { | 1655 | { |
1393 | int page_count = obj->base.size / PAGE_SIZE; | 1656 | int page_count = obj->base.size / PAGE_SIZE; |
1394 | int i; | 1657 | int ret, i; |
1395 | 1658 | ||
1396 | if (!obj->pages) | 1659 | BUG_ON(obj->gtt_space); |
1397 | return; | 1660 | |
1661 | if (obj->pages == NULL) | ||
1662 | return 0; | ||
1398 | 1663 | ||
1664 | BUG_ON(obj->gtt_space); | ||
1399 | BUG_ON(obj->madv == __I915_MADV_PURGED); | 1665 | BUG_ON(obj->madv == __I915_MADV_PURGED); |
1400 | 1666 | ||
1667 | ret = i915_gem_object_set_to_cpu_domain(obj, true); | ||
1668 | if (ret) { | ||
1669 | /* In the event of a disaster, abandon all caches and | ||
1670 | * hope for the best. | ||
1671 | */ | ||
1672 | WARN_ON(ret != -EIO); | ||
1673 | i915_gem_clflush_object(obj); | ||
1674 | obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; | ||
1675 | } | ||
1676 | |||
1401 | if (i915_gem_object_needs_bit17_swizzle(obj)) | 1677 | if (i915_gem_object_needs_bit17_swizzle(obj)) |
1402 | i915_gem_object_save_bit_17_swizzle(obj); | 1678 | i915_gem_object_save_bit_17_swizzle(obj); |
1403 | 1679 | ||
@@ -1417,6 +1693,129 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) | |||
1417 | 1693 | ||
1418 | drm_free_large(obj->pages); | 1694 | drm_free_large(obj->pages); |
1419 | obj->pages = NULL; | 1695 | obj->pages = NULL; |
1696 | |||
1697 | list_del(&obj->gtt_list); | ||
1698 | |||
1699 | if (i915_gem_object_is_purgeable(obj)) | ||
1700 | i915_gem_object_truncate(obj); | ||
1701 | |||
1702 | return 0; | ||
1703 | } | ||
1704 | |||
1705 | static long | ||
1706 | i915_gem_purge(struct drm_i915_private *dev_priv, long target) | ||
1707 | { | ||
1708 | struct drm_i915_gem_object *obj, *next; | ||
1709 | long count = 0; | ||
1710 | |||
1711 | list_for_each_entry_safe(obj, next, | ||
1712 | &dev_priv->mm.unbound_list, | ||
1713 | gtt_list) { | ||
1714 | if (i915_gem_object_is_purgeable(obj) && | ||
1715 | i915_gem_object_put_pages_gtt(obj) == 0) { | ||
1716 | count += obj->base.size >> PAGE_SHIFT; | ||
1717 | if (count >= target) | ||
1718 | return count; | ||
1719 | } | ||
1720 | } | ||
1721 | |||
1722 | list_for_each_entry_safe(obj, next, | ||
1723 | &dev_priv->mm.inactive_list, | ||
1724 | mm_list) { | ||
1725 | if (i915_gem_object_is_purgeable(obj) && | ||
1726 | i915_gem_object_unbind(obj) == 0 && | ||
1727 | i915_gem_object_put_pages_gtt(obj) == 0) { | ||
1728 | count += obj->base.size >> PAGE_SHIFT; | ||
1729 | if (count >= target) | ||
1730 | return count; | ||
1731 | } | ||
1732 | } | ||
1733 | |||
1734 | return count; | ||
1735 | } | ||
1736 | |||
1737 | static void | ||
1738 | i915_gem_shrink_all(struct drm_i915_private *dev_priv) | ||
1739 | { | ||
1740 | struct drm_i915_gem_object *obj, *next; | ||
1741 | |||
1742 | i915_gem_evict_everything(dev_priv->dev); | ||
1743 | |||
1744 | list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list) | ||
1745 | i915_gem_object_put_pages_gtt(obj); | ||
1746 | } | ||
1747 | |||
1748 | int | ||
1749 | i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) | ||
1750 | { | ||
1751 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; | ||
1752 | int page_count, i; | ||
1753 | struct address_space *mapping; | ||
1754 | struct page *page; | ||
1755 | gfp_t gfp; | ||
1756 | |||
1757 | if (obj->pages || obj->sg_table) | ||
1758 | return 0; | ||
1759 | |||
1760 | /* Assert that the object is not currently in any GPU domain. As it | ||
1761 | * wasn't in the GTT, there shouldn't be any way it could have been in | ||
1762 | * a GPU cache | ||
1763 | */ | ||
1764 | BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); | ||
1765 | BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); | ||
1766 | |||
1767 | /* Get the list of pages out of our struct file. They'll be pinned | ||
1768 | * at this point until we release them. | ||
1769 | */ | ||
1770 | page_count = obj->base.size / PAGE_SIZE; | ||
1771 | obj->pages = drm_malloc_ab(page_count, sizeof(struct page *)); | ||
1772 | if (obj->pages == NULL) | ||
1773 | return -ENOMEM; | ||
1774 | |||
1775 | /* Fail silently without starting the shrinker */ | ||
1776 | mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; | ||
1777 | gfp = mapping_gfp_mask(mapping); | ||
1778 | gfp |= __GFP_NORETRY | __GFP_NOWARN; | ||
1779 | gfp &= ~(__GFP_IO | __GFP_WAIT); | ||
1780 | for (i = 0; i < page_count; i++) { | ||
1781 | page = shmem_read_mapping_page_gfp(mapping, i, gfp); | ||
1782 | if (IS_ERR(page)) { | ||
1783 | i915_gem_purge(dev_priv, page_count); | ||
1784 | page = shmem_read_mapping_page_gfp(mapping, i, gfp); | ||
1785 | } | ||
1786 | if (IS_ERR(page)) { | ||
1787 | /* We've tried hard to allocate the memory by reaping | ||
1788 | * our own buffer, now let the real VM do its job and | ||
1789 | * go down in flames if truly OOM. | ||
1790 | */ | ||
1791 | gfp &= ~(__GFP_NORETRY | __GFP_NOWARN); | ||
1792 | gfp |= __GFP_IO | __GFP_WAIT; | ||
1793 | |||
1794 | i915_gem_shrink_all(dev_priv); | ||
1795 | page = shmem_read_mapping_page_gfp(mapping, i, gfp); | ||
1796 | if (IS_ERR(page)) | ||
1797 | goto err_pages; | ||
1798 | |||
1799 | gfp |= __GFP_NORETRY | __GFP_NOWARN; | ||
1800 | gfp &= ~(__GFP_IO | __GFP_WAIT); | ||
1801 | } | ||
1802 | |||
1803 | obj->pages[i] = page; | ||
1804 | } | ||
1805 | |||
1806 | if (i915_gem_object_needs_bit17_swizzle(obj)) | ||
1807 | i915_gem_object_do_bit_17_swizzle(obj); | ||
1808 | |||
1809 | list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list); | ||
1810 | return 0; | ||
1811 | |||
1812 | err_pages: | ||
1813 | while (i--) | ||
1814 | page_cache_release(obj->pages[i]); | ||
1815 | |||
1816 | drm_free_large(obj->pages); | ||
1817 | obj->pages = NULL; | ||
1818 | return PTR_ERR(page); | ||
1420 | } | 1819 | } |
1421 | 1820 | ||
1422 | void | 1821 | void |
@@ -1486,32 +1885,6 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) | |||
1486 | WARN_ON(i915_verify_lists(dev)); | 1885 | WARN_ON(i915_verify_lists(dev)); |
1487 | } | 1886 | } |
1488 | 1887 | ||
1489 | /* Immediately discard the backing storage */ | ||
1490 | static void | ||
1491 | i915_gem_object_truncate(struct drm_i915_gem_object *obj) | ||
1492 | { | ||
1493 | struct inode *inode; | ||
1494 | |||
1495 | /* Our goal here is to return as much of the memory as | ||
1496 | * is possible back to the system as we are called from OOM. | ||
1497 | * To do this we must instruct the shmfs to drop all of its | ||
1498 | * backing pages, *now*. | ||
1499 | */ | ||
1500 | inode = obj->base.filp->f_path.dentry->d_inode; | ||
1501 | shmem_truncate_range(inode, 0, (loff_t)-1); | ||
1502 | |||
1503 | if (obj->base.map_list.map) | ||
1504 | drm_gem_free_mmap_offset(&obj->base); | ||
1505 | |||
1506 | obj->madv = __I915_MADV_PURGED; | ||
1507 | } | ||
1508 | |||
1509 | static inline int | ||
1510 | i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj) | ||
1511 | { | ||
1512 | return obj->madv == I915_MADV_DONTNEED; | ||
1513 | } | ||
1514 | |||
1515 | static u32 | 1888 | static u32 |
1516 | i915_gem_get_seqno(struct drm_device *dev) | 1889 | i915_gem_get_seqno(struct drm_device *dev) |
1517 | { | 1890 | { |
@@ -1698,6 +2071,7 @@ void i915_gem_reset(struct drm_device *dev) | |||
1698 | obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; | 2071 | obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; |
1699 | } | 2072 | } |
1700 | 2073 | ||
2074 | |||
1701 | /* The fence registers are invalidated so clear them out */ | 2075 | /* The fence registers are invalidated so clear them out */ |
1702 | i915_gem_reset_fences(dev); | 2076 | i915_gem_reset_fences(dev); |
1703 | } | 2077 | } |
@@ -1821,197 +2195,6 @@ i915_gem_retire_work_handler(struct work_struct *work) | |||
1821 | mutex_unlock(&dev->struct_mutex); | 2195 | mutex_unlock(&dev->struct_mutex); |
1822 | } | 2196 | } |
1823 | 2197 | ||
1824 | int | ||
1825 | i915_gem_check_wedge(struct drm_i915_private *dev_priv, | ||
1826 | bool interruptible) | ||
1827 | { | ||
1828 | if (atomic_read(&dev_priv->mm.wedged)) { | ||
1829 | struct completion *x = &dev_priv->error_completion; | ||
1830 | bool recovery_complete; | ||
1831 | unsigned long flags; | ||
1832 | |||
1833 | /* Give the error handler a chance to run. */ | ||
1834 | spin_lock_irqsave(&x->wait.lock, flags); | ||
1835 | recovery_complete = x->done > 0; | ||
1836 | spin_unlock_irqrestore(&x->wait.lock, flags); | ||
1837 | |||
1838 | /* Non-interruptible callers can't handle -EAGAIN, hence return | ||
1839 | * -EIO unconditionally for these. */ | ||
1840 | if (!interruptible) | ||
1841 | return -EIO; | ||
1842 | |||
1843 | /* Recovery complete, but still wedged means reset failure. */ | ||
1844 | if (recovery_complete) | ||
1845 | return -EIO; | ||
1846 | |||
1847 | return -EAGAIN; | ||
1848 | } | ||
1849 | |||
1850 | return 0; | ||
1851 | } | ||
1852 | |||
1853 | /* | ||
1854 | * Compare seqno against outstanding lazy request. Emit a request if they are | ||
1855 | * equal. | ||
1856 | */ | ||
1857 | static int | ||
1858 | i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno) | ||
1859 | { | ||
1860 | int ret; | ||
1861 | |||
1862 | BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex)); | ||
1863 | |||
1864 | ret = 0; | ||
1865 | if (seqno == ring->outstanding_lazy_request) | ||
1866 | ret = i915_add_request(ring, NULL, NULL); | ||
1867 | |||
1868 | return ret; | ||
1869 | } | ||
1870 | |||
1871 | /** | ||
1872 | * __wait_seqno - wait until execution of seqno has finished | ||
1873 | * @ring: the ring expected to report seqno | ||
1874 | * @seqno: duh! | ||
1875 | * @interruptible: do an interruptible wait (normally yes) | ||
1876 | * @timeout: in - how long to wait (NULL forever); out - how much time remaining | ||
1877 | * | ||
1878 | * Returns 0 if the seqno was found within the alloted time. Else returns the | ||
1879 | * errno with remaining time filled in timeout argument. | ||
1880 | */ | ||
1881 | static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, | ||
1882 | bool interruptible, struct timespec *timeout) | ||
1883 | { | ||
1884 | drm_i915_private_t *dev_priv = ring->dev->dev_private; | ||
1885 | struct timespec before, now, wait_time={1,0}; | ||
1886 | unsigned long timeout_jiffies; | ||
1887 | long end; | ||
1888 | bool wait_forever = true; | ||
1889 | int ret; | ||
1890 | |||
1891 | if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) | ||
1892 | return 0; | ||
1893 | |||
1894 | trace_i915_gem_request_wait_begin(ring, seqno); | ||
1895 | |||
1896 | if (timeout != NULL) { | ||
1897 | wait_time = *timeout; | ||
1898 | wait_forever = false; | ||
1899 | } | ||
1900 | |||
1901 | timeout_jiffies = timespec_to_jiffies(&wait_time); | ||
1902 | |||
1903 | if (WARN_ON(!ring->irq_get(ring))) | ||
1904 | return -ENODEV; | ||
1905 | |||
1906 | /* Record current time in case interrupted by signal, or wedged * */ | ||
1907 | getrawmonotonic(&before); | ||
1908 | |||
1909 | #define EXIT_COND \ | ||
1910 | (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \ | ||
1911 | atomic_read(&dev_priv->mm.wedged)) | ||
1912 | do { | ||
1913 | if (interruptible) | ||
1914 | end = wait_event_interruptible_timeout(ring->irq_queue, | ||
1915 | EXIT_COND, | ||
1916 | timeout_jiffies); | ||
1917 | else | ||
1918 | end = wait_event_timeout(ring->irq_queue, EXIT_COND, | ||
1919 | timeout_jiffies); | ||
1920 | |||
1921 | ret = i915_gem_check_wedge(dev_priv, interruptible); | ||
1922 | if (ret) | ||
1923 | end = ret; | ||
1924 | } while (end == 0 && wait_forever); | ||
1925 | |||
1926 | getrawmonotonic(&now); | ||
1927 | |||
1928 | ring->irq_put(ring); | ||
1929 | trace_i915_gem_request_wait_end(ring, seqno); | ||
1930 | #undef EXIT_COND | ||
1931 | |||
1932 | if (timeout) { | ||
1933 | struct timespec sleep_time = timespec_sub(now, before); | ||
1934 | *timeout = timespec_sub(*timeout, sleep_time); | ||
1935 | } | ||
1936 | |||
1937 | switch (end) { | ||
1938 | case -EIO: | ||
1939 | case -EAGAIN: /* Wedged */ | ||
1940 | case -ERESTARTSYS: /* Signal */ | ||
1941 | return (int)end; | ||
1942 | case 0: /* Timeout */ | ||
1943 | if (timeout) | ||
1944 | set_normalized_timespec(timeout, 0, 0); | ||
1945 | return -ETIME; | ||
1946 | default: /* Completed */ | ||
1947 | WARN_ON(end < 0); /* We're not aware of other errors */ | ||
1948 | return 0; | ||
1949 | } | ||
1950 | } | ||
1951 | |||
1952 | /** | ||
1953 | * Waits for a sequence number to be signaled, and cleans up the | ||
1954 | * request and object lists appropriately for that event. | ||
1955 | */ | ||
1956 | int | ||
1957 | i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno) | ||
1958 | { | ||
1959 | drm_i915_private_t *dev_priv = ring->dev->dev_private; | ||
1960 | int ret = 0; | ||
1961 | |||
1962 | BUG_ON(seqno == 0); | ||
1963 | |||
1964 | ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible); | ||
1965 | if (ret) | ||
1966 | return ret; | ||
1967 | |||
1968 | ret = i915_gem_check_olr(ring, seqno); | ||
1969 | if (ret) | ||
1970 | return ret; | ||
1971 | |||
1972 | ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible, NULL); | ||
1973 | |||
1974 | return ret; | ||
1975 | } | ||
1976 | |||
1977 | /** | ||
1978 | * Ensures that all rendering to the object has completed and the object is | ||
1979 | * safe to unbind from the GTT or access from the CPU. | ||
1980 | */ | ||
1981 | static __must_check int | ||
1982 | i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, | ||
1983 | bool readonly) | ||
1984 | { | ||
1985 | u32 seqno; | ||
1986 | int ret; | ||
1987 | |||
1988 | /* If there is rendering queued on the buffer being evicted, wait for | ||
1989 | * it. | ||
1990 | */ | ||
1991 | if (readonly) | ||
1992 | seqno = obj->last_write_seqno; | ||
1993 | else | ||
1994 | seqno = obj->last_read_seqno; | ||
1995 | if (seqno == 0) | ||
1996 | return 0; | ||
1997 | |||
1998 | ret = i915_wait_seqno(obj->ring, seqno); | ||
1999 | if (ret) | ||
2000 | return ret; | ||
2001 | |||
2002 | /* Manually manage the write flush as we may have not yet retired | ||
2003 | * the buffer. | ||
2004 | */ | ||
2005 | if (obj->last_write_seqno && | ||
2006 | i915_seqno_passed(seqno, obj->last_write_seqno)) { | ||
2007 | obj->last_write_seqno = 0; | ||
2008 | obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; | ||
2009 | } | ||
2010 | |||
2011 | i915_gem_retire_requests_ring(obj->ring); | ||
2012 | return 0; | ||
2013 | } | ||
2014 | |||
2015 | /** | 2198 | /** |
2016 | * Ensures that an object will eventually get non-busy by flushing any required | 2199 | * Ensures that an object will eventually get non-busy by flushing any required |
2017 | * write domains, emitting any outstanding lazy request and retiring and | 2200 | * write domains, emitting any outstanding lazy request and retiring and |
@@ -2199,6 +2382,8 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) | |||
2199 | if (obj->pin_count) | 2382 | if (obj->pin_count) |
2200 | return -EBUSY; | 2383 | return -EBUSY; |
2201 | 2384 | ||
2385 | BUG_ON(obj->pages == NULL); | ||
2386 | |||
2202 | ret = i915_gem_object_finish_gpu(obj); | 2387 | ret = i915_gem_object_finish_gpu(obj); |
2203 | if (ret) | 2388 | if (ret) |
2204 | return ret; | 2389 | return ret; |
@@ -2209,22 +2394,6 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) | |||
2209 | 2394 | ||
2210 | i915_gem_object_finish_gtt(obj); | 2395 | i915_gem_object_finish_gtt(obj); |
2211 | 2396 | ||
2212 | /* Move the object to the CPU domain to ensure that | ||
2213 | * any possible CPU writes while it's not in the GTT | ||
2214 | * are flushed when we go to remap it. | ||
2215 | */ | ||
2216 | if (ret == 0) | ||
2217 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); | ||
2218 | if (ret == -ERESTARTSYS) | ||
2219 | return ret; | ||
2220 | if (ret) { | ||
2221 | /* In the event of a disaster, abandon all caches and | ||
2222 | * hope for the best. | ||
2223 | */ | ||
2224 | i915_gem_clflush_object(obj); | ||
2225 | obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; | ||
2226 | } | ||
2227 | |||
2228 | /* release the fence reg _after_ flushing */ | 2397 | /* release the fence reg _after_ flushing */ |
2229 | ret = i915_gem_object_put_fence(obj); | 2398 | ret = i915_gem_object_put_fence(obj); |
2230 | if (ret) | 2399 | if (ret) |
@@ -2240,10 +2409,8 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) | |||
2240 | } | 2409 | } |
2241 | i915_gem_gtt_finish_object(obj); | 2410 | i915_gem_gtt_finish_object(obj); |
2242 | 2411 | ||
2243 | i915_gem_object_put_pages_gtt(obj); | 2412 | list_del(&obj->mm_list); |
2244 | 2413 | list_move_tail(&obj->gtt_list, &dev_priv->mm.unbound_list); | |
2245 | list_del_init(&obj->gtt_list); | ||
2246 | list_del_init(&obj->mm_list); | ||
2247 | /* Avoid an unnecessary call to unbind on rebind. */ | 2414 | /* Avoid an unnecessary call to unbind on rebind. */ |
2248 | obj->map_and_fenceable = true; | 2415 | obj->map_and_fenceable = true; |
2249 | 2416 | ||
@@ -2251,10 +2418,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) | |||
2251 | obj->gtt_space = NULL; | 2418 | obj->gtt_space = NULL; |
2252 | obj->gtt_offset = 0; | 2419 | obj->gtt_offset = 0; |
2253 | 2420 | ||
2254 | if (i915_gem_object_is_purgeable(obj)) | 2421 | return 0; |
2255 | i915_gem_object_truncate(obj); | ||
2256 | |||
2257 | return ret; | ||
2258 | } | 2422 | } |
2259 | 2423 | ||
2260 | static int i915_ring_idle(struct intel_ring_buffer *ring) | 2424 | static int i915_ring_idle(struct intel_ring_buffer *ring) |
@@ -2662,12 +2826,12 @@ static void i915_gem_verify_gtt(struct drm_device *dev) | |||
2662 | static int | 2826 | static int |
2663 | i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, | 2827 | i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, |
2664 | unsigned alignment, | 2828 | unsigned alignment, |
2665 | bool map_and_fenceable) | 2829 | bool map_and_fenceable, |
2830 | bool nonblocking) | ||
2666 | { | 2831 | { |
2667 | struct drm_device *dev = obj->base.dev; | 2832 | struct drm_device *dev = obj->base.dev; |
2668 | drm_i915_private_t *dev_priv = dev->dev_private; | 2833 | drm_i915_private_t *dev_priv = dev->dev_private; |
2669 | struct drm_mm_node *free_space; | 2834 | struct drm_mm_node *free_space; |
2670 | gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; | ||
2671 | u32 size, fence_size, fence_alignment, unfenced_alignment; | 2835 | u32 size, fence_size, fence_alignment, unfenced_alignment; |
2672 | bool mappable, fenceable; | 2836 | bool mappable, fenceable; |
2673 | int ret; | 2837 | int ret; |
@@ -2707,6 +2871,10 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, | |||
2707 | return -E2BIG; | 2871 | return -E2BIG; |
2708 | } | 2872 | } |
2709 | 2873 | ||
2874 | ret = i915_gem_object_get_pages_gtt(obj); | ||
2875 | if (ret) | ||
2876 | return ret; | ||
2877 | |||
2710 | search_free: | 2878 | search_free: |
2711 | if (map_and_fenceable) | 2879 | if (map_and_fenceable) |
2712 | free_space = | 2880 | free_space = |
@@ -2733,12 +2901,10 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, | |||
2733 | false); | 2901 | false); |
2734 | } | 2902 | } |
2735 | if (obj->gtt_space == NULL) { | 2903 | if (obj->gtt_space == NULL) { |
2736 | /* If the gtt is empty and we're still having trouble | ||
2737 | * fitting our object in, we're out of memory. | ||
2738 | */ | ||
2739 | ret = i915_gem_evict_something(dev, size, alignment, | 2904 | ret = i915_gem_evict_something(dev, size, alignment, |
2740 | obj->cache_level, | 2905 | obj->cache_level, |
2741 | map_and_fenceable); | 2906 | map_and_fenceable, |
2907 | nonblocking); | ||
2742 | if (ret) | 2908 | if (ret) |
2743 | return ret; | 2909 | return ret; |
2744 | 2910 | ||
@@ -2752,55 +2918,20 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, | |||
2752 | return -EINVAL; | 2918 | return -EINVAL; |
2753 | } | 2919 | } |
2754 | 2920 | ||
2755 | ret = i915_gem_object_get_pages_gtt(obj, gfpmask); | ||
2756 | if (ret) { | ||
2757 | drm_mm_put_block(obj->gtt_space); | ||
2758 | obj->gtt_space = NULL; | ||
2759 | |||
2760 | if (ret == -ENOMEM) { | ||
2761 | /* first try to reclaim some memory by clearing the GTT */ | ||
2762 | ret = i915_gem_evict_everything(dev, false); | ||
2763 | if (ret) { | ||
2764 | /* now try to shrink everyone else */ | ||
2765 | if (gfpmask) { | ||
2766 | gfpmask = 0; | ||
2767 | goto search_free; | ||
2768 | } | ||
2769 | |||
2770 | return -ENOMEM; | ||
2771 | } | ||
2772 | |||
2773 | goto search_free; | ||
2774 | } | ||
2775 | |||
2776 | return ret; | ||
2777 | } | ||
2778 | 2921 | ||
2779 | ret = i915_gem_gtt_prepare_object(obj); | 2922 | ret = i915_gem_gtt_prepare_object(obj); |
2780 | if (ret) { | 2923 | if (ret) { |
2781 | i915_gem_object_put_pages_gtt(obj); | ||
2782 | drm_mm_put_block(obj->gtt_space); | 2924 | drm_mm_put_block(obj->gtt_space); |
2783 | obj->gtt_space = NULL; | 2925 | obj->gtt_space = NULL; |
2784 | 2926 | return ret; | |
2785 | if (i915_gem_evict_everything(dev, false)) | ||
2786 | return ret; | ||
2787 | |||
2788 | goto search_free; | ||
2789 | } | 2927 | } |
2790 | 2928 | ||
2791 | if (!dev_priv->mm.aliasing_ppgtt) | 2929 | if (!dev_priv->mm.aliasing_ppgtt) |
2792 | i915_gem_gtt_bind_object(obj, obj->cache_level); | 2930 | i915_gem_gtt_bind_object(obj, obj->cache_level); |
2793 | 2931 | ||
2794 | list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list); | 2932 | list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list); |
2795 | list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); | 2933 | list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); |
2796 | 2934 | ||
2797 | /* Assert that the object is not currently in any GPU domain. As it | ||
2798 | * wasn't in the GTT, there shouldn't be any way it could have been in | ||
2799 | * a GPU cache | ||
2800 | */ | ||
2801 | BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); | ||
2802 | BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); | ||
2803 | |||
2804 | obj->gtt_offset = obj->gtt_space->start; | 2935 | obj->gtt_offset = obj->gtt_space->start; |
2805 | 2936 | ||
2806 | fenceable = | 2937 | fenceable = |
@@ -3113,7 +3244,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, | |||
3113 | * (e.g. libkms for the bootup splash), we have to ensure that we | 3244 | * (e.g. libkms for the bootup splash), we have to ensure that we |
3114 | * always use map_and_fenceable for all scanout buffers. | 3245 | * always use map_and_fenceable for all scanout buffers. |
3115 | */ | 3246 | */ |
3116 | ret = i915_gem_object_pin(obj, alignment, true); | 3247 | ret = i915_gem_object_pin(obj, alignment, true, false); |
3117 | if (ret) | 3248 | if (ret) |
3118 | return ret; | 3249 | return ret; |
3119 | 3250 | ||
@@ -3250,7 +3381,8 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) | |||
3250 | int | 3381 | int |
3251 | i915_gem_object_pin(struct drm_i915_gem_object *obj, | 3382 | i915_gem_object_pin(struct drm_i915_gem_object *obj, |
3252 | uint32_t alignment, | 3383 | uint32_t alignment, |
3253 | bool map_and_fenceable) | 3384 | bool map_and_fenceable, |
3385 | bool nonblocking) | ||
3254 | { | 3386 | { |
3255 | int ret; | 3387 | int ret; |
3256 | 3388 | ||
@@ -3274,7 +3406,8 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, | |||
3274 | 3406 | ||
3275 | if (obj->gtt_space == NULL) { | 3407 | if (obj->gtt_space == NULL) { |
3276 | ret = i915_gem_object_bind_to_gtt(obj, alignment, | 3408 | ret = i915_gem_object_bind_to_gtt(obj, alignment, |
3277 | map_and_fenceable); | 3409 | map_and_fenceable, |
3410 | nonblocking); | ||
3278 | if (ret) | 3411 | if (ret) |
3279 | return ret; | 3412 | return ret; |
3280 | } | 3413 | } |
@@ -3332,7 +3465,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, | |||
3332 | obj->user_pin_count++; | 3465 | obj->user_pin_count++; |
3333 | obj->pin_filp = file; | 3466 | obj->pin_filp = file; |
3334 | if (obj->user_pin_count == 1) { | 3467 | if (obj->user_pin_count == 1) { |
3335 | ret = i915_gem_object_pin(obj, args->alignment, true); | 3468 | ret = i915_gem_object_pin(obj, args->alignment, true, false); |
3336 | if (ret) | 3469 | if (ret) |
3337 | goto out; | 3470 | goto out; |
3338 | } | 3471 | } |
@@ -3464,9 +3597,8 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, | |||
3464 | if (obj->madv != __I915_MADV_PURGED) | 3597 | if (obj->madv != __I915_MADV_PURGED) |
3465 | obj->madv = args->madv; | 3598 | obj->madv = args->madv; |
3466 | 3599 | ||
3467 | /* if the object is no longer bound, discard its backing storage */ | 3600 | /* if the object is no longer attached, discard its backing storage */ |
3468 | if (i915_gem_object_is_purgeable(obj) && | 3601 | if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL) |
3469 | obj->gtt_space == NULL) | ||
3470 | i915_gem_object_truncate(obj); | 3602 | i915_gem_object_truncate(obj); |
3471 | 3603 | ||
3472 | args->retained = obj->madv != __I915_MADV_PURGED; | 3604 | args->retained = obj->madv != __I915_MADV_PURGED; |
@@ -3478,10 +3610,26 @@ unlock: | |||
3478 | return ret; | 3610 | return ret; |
3479 | } | 3611 | } |
3480 | 3612 | ||
3613 | void i915_gem_object_init(struct drm_i915_gem_object *obj) | ||
3614 | { | ||
3615 | obj->base.driver_private = NULL; | ||
3616 | |||
3617 | INIT_LIST_HEAD(&obj->mm_list); | ||
3618 | INIT_LIST_HEAD(&obj->gtt_list); | ||
3619 | INIT_LIST_HEAD(&obj->ring_list); | ||
3620 | INIT_LIST_HEAD(&obj->exec_list); | ||
3621 | |||
3622 | obj->fence_reg = I915_FENCE_REG_NONE; | ||
3623 | obj->madv = I915_MADV_WILLNEED; | ||
3624 | /* Avoid an unnecessary call to unbind on the first bind. */ | ||
3625 | obj->map_and_fenceable = true; | ||
3626 | |||
3627 | i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size); | ||
3628 | } | ||
3629 | |||
3481 | struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, | 3630 | struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, |
3482 | size_t size) | 3631 | size_t size) |
3483 | { | 3632 | { |
3484 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3485 | struct drm_i915_gem_object *obj; | 3633 | struct drm_i915_gem_object *obj; |
3486 | struct address_space *mapping; | 3634 | struct address_space *mapping; |
3487 | u32 mask; | 3635 | u32 mask; |
@@ -3505,7 +3653,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, | |||
3505 | mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; | 3653 | mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; |
3506 | mapping_set_gfp_mask(mapping, mask); | 3654 | mapping_set_gfp_mask(mapping, mask); |
3507 | 3655 | ||
3508 | i915_gem_info_add_obj(dev_priv, size); | 3656 | i915_gem_object_init(obj); |
3509 | 3657 | ||
3510 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; | 3658 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; |
3511 | obj->base.read_domains = I915_GEM_DOMAIN_CPU; | 3659 | obj->base.read_domains = I915_GEM_DOMAIN_CPU; |
@@ -3527,16 +3675,6 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, | |||
3527 | } else | 3675 | } else |
3528 | obj->cache_level = I915_CACHE_NONE; | 3676 | obj->cache_level = I915_CACHE_NONE; |
3529 | 3677 | ||
3530 | obj->base.driver_private = NULL; | ||
3531 | obj->fence_reg = I915_FENCE_REG_NONE; | ||
3532 | INIT_LIST_HEAD(&obj->mm_list); | ||
3533 | INIT_LIST_HEAD(&obj->gtt_list); | ||
3534 | INIT_LIST_HEAD(&obj->ring_list); | ||
3535 | INIT_LIST_HEAD(&obj->exec_list); | ||
3536 | obj->madv = I915_MADV_WILLNEED; | ||
3537 | /* Avoid an unnecessary call to unbind on the first bind. */ | ||
3538 | obj->map_and_fenceable = true; | ||
3539 | |||
3540 | return obj; | 3678 | return obj; |
3541 | } | 3679 | } |
3542 | 3680 | ||
@@ -3573,8 +3711,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) | |||
3573 | dev_priv->mm.interruptible = was_interruptible; | 3711 | dev_priv->mm.interruptible = was_interruptible; |
3574 | } | 3712 | } |
3575 | 3713 | ||
3576 | if (obj->base.map_list.map) | 3714 | i915_gem_object_put_pages_gtt(obj); |
3577 | drm_gem_free_mmap_offset(&obj->base); | 3715 | i915_gem_object_free_mmap_offset(obj); |
3578 | 3716 | ||
3579 | drm_gem_object_release(&obj->base); | 3717 | drm_gem_object_release(&obj->base); |
3580 | i915_gem_info_remove_obj(dev_priv, obj->base.size); | 3718 | i915_gem_info_remove_obj(dev_priv, obj->base.size); |
@@ -3605,7 +3743,7 @@ i915_gem_idle(struct drm_device *dev) | |||
3605 | 3743 | ||
3606 | /* Under UMS, be paranoid and evict. */ | 3744 | /* Under UMS, be paranoid and evict. */ |
3607 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | 3745 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
3608 | i915_gem_evict_everything(dev, false); | 3746 | i915_gem_evict_everything(dev); |
3609 | 3747 | ||
3610 | i915_gem_reset_fences(dev); | 3748 | i915_gem_reset_fences(dev); |
3611 | 3749 | ||
@@ -3963,8 +4101,9 @@ i915_gem_load(struct drm_device *dev) | |||
3963 | 4101 | ||
3964 | INIT_LIST_HEAD(&dev_priv->mm.active_list); | 4102 | INIT_LIST_HEAD(&dev_priv->mm.active_list); |
3965 | INIT_LIST_HEAD(&dev_priv->mm.inactive_list); | 4103 | INIT_LIST_HEAD(&dev_priv->mm.inactive_list); |
4104 | INIT_LIST_HEAD(&dev_priv->mm.unbound_list); | ||
4105 | INIT_LIST_HEAD(&dev_priv->mm.bound_list); | ||
3966 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); | 4106 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); |
3967 | INIT_LIST_HEAD(&dev_priv->mm.gtt_list); | ||
3968 | for (i = 0; i < I915_NUM_RINGS; i++) | 4107 | for (i = 0; i < I915_NUM_RINGS; i++) |
3969 | init_ring_lists(&dev_priv->ring[i]); | 4108 | init_ring_lists(&dev_priv->ring[i]); |
3970 | for (i = 0; i < I915_MAX_NUM_FENCES; i++) | 4109 | for (i = 0; i < I915_MAX_NUM_FENCES; i++) |
@@ -4209,13 +4348,6 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file) | |||
4209 | } | 4348 | } |
4210 | 4349 | ||
4211 | static int | 4350 | static int |
4212 | i915_gpu_is_active(struct drm_device *dev) | ||
4213 | { | ||
4214 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
4215 | return !list_empty(&dev_priv->mm.active_list); | ||
4216 | } | ||
4217 | |||
4218 | static int | ||
4219 | i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) | 4351 | i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) |
4220 | { | 4352 | { |
4221 | struct drm_i915_private *dev_priv = | 4353 | struct drm_i915_private *dev_priv = |
@@ -4223,60 +4355,26 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) | |||
4223 | struct drm_i915_private, | 4355 | struct drm_i915_private, |
4224 | mm.inactive_shrinker); | 4356 | mm.inactive_shrinker); |
4225 | struct drm_device *dev = dev_priv->dev; | 4357 | struct drm_device *dev = dev_priv->dev; |
4226 | struct drm_i915_gem_object *obj, *next; | 4358 | struct drm_i915_gem_object *obj; |
4227 | int nr_to_scan = sc->nr_to_scan; | 4359 | int nr_to_scan = sc->nr_to_scan; |
4228 | int cnt; | 4360 | int cnt; |
4229 | 4361 | ||
4230 | if (!mutex_trylock(&dev->struct_mutex)) | 4362 | if (!mutex_trylock(&dev->struct_mutex)) |
4231 | return 0; | 4363 | return 0; |
4232 | 4364 | ||
4233 | /* "fast-path" to count number of available objects */ | 4365 | if (nr_to_scan) { |
4234 | if (nr_to_scan == 0) { | 4366 | nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan); |
4235 | cnt = 0; | 4367 | if (nr_to_scan > 0) |
4236 | list_for_each_entry(obj, | 4368 | i915_gem_shrink_all(dev_priv); |
4237 | &dev_priv->mm.inactive_list, | ||
4238 | mm_list) | ||
4239 | cnt++; | ||
4240 | mutex_unlock(&dev->struct_mutex); | ||
4241 | return cnt / 100 * sysctl_vfs_cache_pressure; | ||
4242 | } | 4369 | } |
4243 | 4370 | ||
4244 | rescan: | ||
4245 | /* first scan for clean buffers */ | ||
4246 | i915_gem_retire_requests(dev); | ||
4247 | |||
4248 | list_for_each_entry_safe(obj, next, | ||
4249 | &dev_priv->mm.inactive_list, | ||
4250 | mm_list) { | ||
4251 | if (i915_gem_object_is_purgeable(obj)) { | ||
4252 | if (i915_gem_object_unbind(obj) == 0 && | ||
4253 | --nr_to_scan == 0) | ||
4254 | break; | ||
4255 | } | ||
4256 | } | ||
4257 | |||
4258 | /* second pass, evict/count anything still on the inactive list */ | ||
4259 | cnt = 0; | 4371 | cnt = 0; |
4260 | list_for_each_entry_safe(obj, next, | 4372 | list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list) |
4261 | &dev_priv->mm.inactive_list, | 4373 | cnt += obj->base.size >> PAGE_SHIFT; |
4262 | mm_list) { | 4374 | list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) |
4263 | if (nr_to_scan && | 4375 | if (obj->pin_count == 0) |
4264 | i915_gem_object_unbind(obj) == 0) | 4376 | cnt += obj->base.size >> PAGE_SHIFT; |
4265 | nr_to_scan--; | ||
4266 | else | ||
4267 | cnt++; | ||
4268 | } | ||
4269 | 4377 | ||
4270 | if (nr_to_scan && i915_gpu_is_active(dev)) { | ||
4271 | /* | ||
4272 | * We are desperate for pages, so as a last resort, wait | ||
4273 | * for the GPU to finish and discard whatever we can. | ||
4274 | * This has a dramatic impact to reduce the number of | ||
4275 | * OOM-killer events whilst running the GPU aggressively. | ||
4276 | */ | ||
4277 | if (i915_gpu_idle(dev) == 0) | ||
4278 | goto rescan; | ||
4279 | } | ||
4280 | mutex_unlock(&dev->struct_mutex); | 4378 | mutex_unlock(&dev->struct_mutex); |
4281 | return cnt / 100 * sysctl_vfs_cache_pressure; | 4379 | return cnt; |
4282 | } | 4380 | } |
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 5c2d354cebbd..4aa7ecf77ede 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c | |||
@@ -221,7 +221,7 @@ static int create_default_context(struct drm_i915_private *dev_priv) | |||
221 | * default context. | 221 | * default context. |
222 | */ | 222 | */ |
223 | dev_priv->ring[RCS].default_context = ctx; | 223 | dev_priv->ring[RCS].default_context = ctx; |
224 | ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false); | 224 | ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false, false); |
225 | if (ret) | 225 | if (ret) |
226 | goto err_destroy; | 226 | goto err_destroy; |
227 | 227 | ||
@@ -374,7 +374,7 @@ static int do_switch(struct i915_hw_context *to) | |||
374 | if (from_obj == to->obj) | 374 | if (from_obj == to->obj) |
375 | return 0; | 375 | return 0; |
376 | 376 | ||
377 | ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false); | 377 | ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false, false); |
378 | if (ret) | 378 | if (ret) |
379 | return ret; | 379 | return ret; |
380 | 380 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index aa308e1337db..43c95307f99e 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c | |||
@@ -33,7 +33,7 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme | |||
33 | struct drm_i915_gem_object *obj = attachment->dmabuf->priv; | 33 | struct drm_i915_gem_object *obj = attachment->dmabuf->priv; |
34 | struct drm_device *dev = obj->base.dev; | 34 | struct drm_device *dev = obj->base.dev; |
35 | int npages = obj->base.size / PAGE_SIZE; | 35 | int npages = obj->base.size / PAGE_SIZE; |
36 | struct sg_table *sg = NULL; | 36 | struct sg_table *sg; |
37 | int ret; | 37 | int ret; |
38 | int nents; | 38 | int nents; |
39 | 39 | ||
@@ -41,10 +41,10 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme | |||
41 | if (ret) | 41 | if (ret) |
42 | return ERR_PTR(ret); | 42 | return ERR_PTR(ret); |
43 | 43 | ||
44 | if (!obj->pages) { | 44 | ret = i915_gem_object_get_pages_gtt(obj); |
45 | ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN); | 45 | if (ret) { |
46 | if (ret) | 46 | sg = ERR_PTR(ret); |
47 | goto out; | 47 | goto out; |
48 | } | 48 | } |
49 | 49 | ||
50 | /* link the pages into an SG then map the sg */ | 50 | /* link the pages into an SG then map the sg */ |
@@ -89,12 +89,10 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) | |||
89 | goto out_unlock; | 89 | goto out_unlock; |
90 | } | 90 | } |
91 | 91 | ||
92 | if (!obj->pages) { | 92 | ret = i915_gem_object_get_pages_gtt(obj); |
93 | ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN); | 93 | if (ret) { |
94 | if (ret) { | 94 | mutex_unlock(&dev->struct_mutex); |
95 | mutex_unlock(&dev->struct_mutex); | 95 | return ERR_PTR(ret); |
96 | return ERR_PTR(ret); | ||
97 | } | ||
98 | } | 96 | } |
99 | 97 | ||
100 | obj->dma_buf_vmapping = vmap(obj->pages, obj->base.size / PAGE_SIZE, 0, PAGE_KERNEL); | 98 | obj->dma_buf_vmapping = vmap(obj->pages, obj->base.size / PAGE_SIZE, 0, PAGE_KERNEL); |
@@ -151,6 +149,22 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct * | |||
151 | return -EINVAL; | 149 | return -EINVAL; |
152 | } | 150 | } |
153 | 151 | ||
152 | static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction) | ||
153 | { | ||
154 | struct drm_i915_gem_object *obj = dma_buf->priv; | ||
155 | struct drm_device *dev = obj->base.dev; | ||
156 | int ret; | ||
157 | bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE); | ||
158 | |||
159 | ret = i915_mutex_lock_interruptible(dev); | ||
160 | if (ret) | ||
161 | return ret; | ||
162 | |||
163 | ret = i915_gem_object_set_to_cpu_domain(obj, write); | ||
164 | mutex_unlock(&dev->struct_mutex); | ||
165 | return ret; | ||
166 | } | ||
167 | |||
154 | static const struct dma_buf_ops i915_dmabuf_ops = { | 168 | static const struct dma_buf_ops i915_dmabuf_ops = { |
155 | .map_dma_buf = i915_gem_map_dma_buf, | 169 | .map_dma_buf = i915_gem_map_dma_buf, |
156 | .unmap_dma_buf = i915_gem_unmap_dma_buf, | 170 | .unmap_dma_buf = i915_gem_unmap_dma_buf, |
@@ -162,6 +176,7 @@ static const struct dma_buf_ops i915_dmabuf_ops = { | |||
162 | .mmap = i915_gem_dmabuf_mmap, | 176 | .mmap = i915_gem_dmabuf_mmap, |
163 | .vmap = i915_gem_dmabuf_vmap, | 177 | .vmap = i915_gem_dmabuf_vmap, |
164 | .vunmap = i915_gem_dmabuf_vunmap, | 178 | .vunmap = i915_gem_dmabuf_vunmap, |
179 | .begin_cpu_access = i915_gem_begin_cpu_access, | ||
165 | }; | 180 | }; |
166 | 181 | ||
167 | struct dma_buf *i915_gem_prime_export(struct drm_device *dev, | 182 | struct dma_buf *i915_gem_prime_export(struct drm_device *dev, |
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 7279c31d4a9a..a2d8acde8550 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c | |||
@@ -45,7 +45,7 @@ mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind) | |||
45 | int | 45 | int |
46 | i915_gem_evict_something(struct drm_device *dev, int min_size, | 46 | i915_gem_evict_something(struct drm_device *dev, int min_size, |
47 | unsigned alignment, unsigned cache_level, | 47 | unsigned alignment, unsigned cache_level, |
48 | bool mappable) | 48 | bool mappable, bool nonblocking) |
49 | { | 49 | { |
50 | drm_i915_private_t *dev_priv = dev->dev_private; | 50 | drm_i915_private_t *dev_priv = dev->dev_private; |
51 | struct list_head eviction_list, unwind_list; | 51 | struct list_head eviction_list, unwind_list; |
@@ -92,12 +92,16 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, | |||
92 | goto found; | 92 | goto found; |
93 | } | 93 | } |
94 | 94 | ||
95 | if (nonblocking) | ||
96 | goto none; | ||
97 | |||
95 | /* Now merge in the soon-to-be-expired objects... */ | 98 | /* Now merge in the soon-to-be-expired objects... */ |
96 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { | 99 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { |
97 | if (mark_free(obj, &unwind_list)) | 100 | if (mark_free(obj, &unwind_list)) |
98 | goto found; | 101 | goto found; |
99 | } | 102 | } |
100 | 103 | ||
104 | none: | ||
101 | /* Nothing found, clean up and bail out! */ | 105 | /* Nothing found, clean up and bail out! */ |
102 | while (!list_empty(&unwind_list)) { | 106 | while (!list_empty(&unwind_list)) { |
103 | obj = list_first_entry(&unwind_list, | 107 | obj = list_first_entry(&unwind_list, |
@@ -148,7 +152,7 @@ found: | |||
148 | } | 152 | } |
149 | 153 | ||
150 | int | 154 | int |
151 | i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only) | 155 | i915_gem_evict_everything(struct drm_device *dev) |
152 | { | 156 | { |
153 | drm_i915_private_t *dev_priv = dev->dev_private; | 157 | drm_i915_private_t *dev_priv = dev->dev_private; |
154 | struct drm_i915_gem_object *obj, *next; | 158 | struct drm_i915_gem_object *obj, *next; |
@@ -160,7 +164,7 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only) | |||
160 | if (lists_empty) | 164 | if (lists_empty) |
161 | return -ENOSPC; | 165 | return -ENOSPC; |
162 | 166 | ||
163 | trace_i915_gem_evict_everything(dev, purgeable_only); | 167 | trace_i915_gem_evict_everything(dev); |
164 | 168 | ||
165 | /* The gpu_idle will flush everything in the write domain to the | 169 | /* The gpu_idle will flush everything in the write domain to the |
166 | * active list. Then we must move everything off the active list | 170 | * active list. Then we must move everything off the active list |
@@ -174,12 +178,9 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only) | |||
174 | 178 | ||
175 | /* Having flushed everything, unbind() should never raise an error */ | 179 | /* Having flushed everything, unbind() should never raise an error */ |
176 | list_for_each_entry_safe(obj, next, | 180 | list_for_each_entry_safe(obj, next, |
177 | &dev_priv->mm.inactive_list, mm_list) { | 181 | &dev_priv->mm.inactive_list, mm_list) |
178 | if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) { | 182 | if (obj->pin_count == 0) |
179 | if (obj->pin_count == 0) | 183 | WARN_ON(i915_gem_object_unbind(obj)); |
180 | WARN_ON(i915_gem_object_unbind(obj)); | ||
181 | } | ||
182 | } | ||
183 | 184 | ||
184 | return 0; | 185 | return 0; |
185 | } | 186 | } |
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index afb312ee050c..e6b2205ecf6d 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -95,6 +95,7 @@ eb_destroy(struct eb_objects *eb) | |||
95 | static inline int use_cpu_reloc(struct drm_i915_gem_object *obj) | 95 | static inline int use_cpu_reloc(struct drm_i915_gem_object *obj) |
96 | { | 96 | { |
97 | return (obj->base.write_domain == I915_GEM_DOMAIN_CPU || | 97 | return (obj->base.write_domain == I915_GEM_DOMAIN_CPU || |
98 | !obj->map_and_fenceable || | ||
98 | obj->cache_level != I915_CACHE_NONE); | 99 | obj->cache_level != I915_CACHE_NONE); |
99 | } | 100 | } |
100 | 101 | ||
@@ -330,7 +331,8 @@ i915_gem_execbuffer_relocate(struct drm_device *dev, | |||
330 | return ret; | 331 | return ret; |
331 | } | 332 | } |
332 | 333 | ||
333 | #define __EXEC_OBJECT_HAS_FENCE (1<<31) | 334 | #define __EXEC_OBJECT_HAS_PIN (1<<31) |
335 | #define __EXEC_OBJECT_HAS_FENCE (1<<30) | ||
334 | 336 | ||
335 | static int | 337 | static int |
336 | need_reloc_mappable(struct drm_i915_gem_object *obj) | 338 | need_reloc_mappable(struct drm_i915_gem_object *obj) |
@@ -340,9 +342,10 @@ need_reloc_mappable(struct drm_i915_gem_object *obj) | |||
340 | } | 342 | } |
341 | 343 | ||
342 | static int | 344 | static int |
343 | pin_and_fence_object(struct drm_i915_gem_object *obj, | 345 | i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj, |
344 | struct intel_ring_buffer *ring) | 346 | struct intel_ring_buffer *ring) |
345 | { | 347 | { |
348 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; | ||
346 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; | 349 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; |
347 | bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; | 350 | bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; |
348 | bool need_fence, need_mappable; | 351 | bool need_fence, need_mappable; |
@@ -354,15 +357,17 @@ pin_and_fence_object(struct drm_i915_gem_object *obj, | |||
354 | obj->tiling_mode != I915_TILING_NONE; | 357 | obj->tiling_mode != I915_TILING_NONE; |
355 | need_mappable = need_fence || need_reloc_mappable(obj); | 358 | need_mappable = need_fence || need_reloc_mappable(obj); |
356 | 359 | ||
357 | ret = i915_gem_object_pin(obj, entry->alignment, need_mappable); | 360 | ret = i915_gem_object_pin(obj, entry->alignment, need_mappable, false); |
358 | if (ret) | 361 | if (ret) |
359 | return ret; | 362 | return ret; |
360 | 363 | ||
364 | entry->flags |= __EXEC_OBJECT_HAS_PIN; | ||
365 | |||
361 | if (has_fenced_gpu_access) { | 366 | if (has_fenced_gpu_access) { |
362 | if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { | 367 | if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { |
363 | ret = i915_gem_object_get_fence(obj); | 368 | ret = i915_gem_object_get_fence(obj); |
364 | if (ret) | 369 | if (ret) |
365 | goto err_unpin; | 370 | return ret; |
366 | 371 | ||
367 | if (i915_gem_object_pin_fence(obj)) | 372 | if (i915_gem_object_pin_fence(obj)) |
368 | entry->flags |= __EXEC_OBJECT_HAS_FENCE; | 373 | entry->flags |= __EXEC_OBJECT_HAS_FENCE; |
@@ -371,12 +376,35 @@ pin_and_fence_object(struct drm_i915_gem_object *obj, | |||
371 | } | 376 | } |
372 | } | 377 | } |
373 | 378 | ||
379 | /* Ensure ppgtt mapping exists if needed */ | ||
380 | if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) { | ||
381 | i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, | ||
382 | obj, obj->cache_level); | ||
383 | |||
384 | obj->has_aliasing_ppgtt_mapping = 1; | ||
385 | } | ||
386 | |||
374 | entry->offset = obj->gtt_offset; | 387 | entry->offset = obj->gtt_offset; |
375 | return 0; | 388 | return 0; |
389 | } | ||
376 | 390 | ||
377 | err_unpin: | 391 | static void |
378 | i915_gem_object_unpin(obj); | 392 | i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj) |
379 | return ret; | 393 | { |
394 | struct drm_i915_gem_exec_object2 *entry; | ||
395 | |||
396 | if (!obj->gtt_space) | ||
397 | return; | ||
398 | |||
399 | entry = obj->exec_entry; | ||
400 | |||
401 | if (entry->flags & __EXEC_OBJECT_HAS_FENCE) | ||
402 | i915_gem_object_unpin_fence(obj); | ||
403 | |||
404 | if (entry->flags & __EXEC_OBJECT_HAS_PIN) | ||
405 | i915_gem_object_unpin(obj); | ||
406 | |||
407 | entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN); | ||
380 | } | 408 | } |
381 | 409 | ||
382 | static int | 410 | static int |
@@ -384,11 +412,10 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, | |||
384 | struct drm_file *file, | 412 | struct drm_file *file, |
385 | struct list_head *objects) | 413 | struct list_head *objects) |
386 | { | 414 | { |
387 | drm_i915_private_t *dev_priv = ring->dev->dev_private; | ||
388 | struct drm_i915_gem_object *obj; | 415 | struct drm_i915_gem_object *obj; |
389 | int ret, retry; | ||
390 | bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; | ||
391 | struct list_head ordered_objects; | 416 | struct list_head ordered_objects; |
417 | bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; | ||
418 | int retry; | ||
392 | 419 | ||
393 | INIT_LIST_HEAD(&ordered_objects); | 420 | INIT_LIST_HEAD(&ordered_objects); |
394 | while (!list_empty(objects)) { | 421 | while (!list_empty(objects)) { |
@@ -426,12 +453,12 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, | |||
426 | * 2. Bind new objects. | 453 | * 2. Bind new objects. |
427 | * 3. Decrement pin count. | 454 | * 3. Decrement pin count. |
428 | * | 455 | * |
429 | * This avoid unnecessary unbinding of later objects in order to makr | 456 | * This avoid unnecessary unbinding of later objects in order to make |
430 | * room for the earlier objects *unless* we need to defragment. | 457 | * room for the earlier objects *unless* we need to defragment. |
431 | */ | 458 | */ |
432 | retry = 0; | 459 | retry = 0; |
433 | do { | 460 | do { |
434 | ret = 0; | 461 | int ret = 0; |
435 | 462 | ||
436 | /* Unbind any ill-fitting objects or pin. */ | 463 | /* Unbind any ill-fitting objects or pin. */ |
437 | list_for_each_entry(obj, objects, exec_list) { | 464 | list_for_each_entry(obj, objects, exec_list) { |
@@ -451,7 +478,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, | |||
451 | (need_mappable && !obj->map_and_fenceable)) | 478 | (need_mappable && !obj->map_and_fenceable)) |
452 | ret = i915_gem_object_unbind(obj); | 479 | ret = i915_gem_object_unbind(obj); |
453 | else | 480 | else |
454 | ret = pin_and_fence_object(obj, ring); | 481 | ret = i915_gem_execbuffer_reserve_object(obj, ring); |
455 | if (ret) | 482 | if (ret) |
456 | goto err; | 483 | goto err; |
457 | } | 484 | } |
@@ -461,77 +488,22 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, | |||
461 | if (obj->gtt_space) | 488 | if (obj->gtt_space) |
462 | continue; | 489 | continue; |
463 | 490 | ||
464 | ret = pin_and_fence_object(obj, ring); | 491 | ret = i915_gem_execbuffer_reserve_object(obj, ring); |
465 | if (ret) { | 492 | if (ret) |
466 | int ret_ignore; | 493 | goto err; |
467 | |||
468 | /* This can potentially raise a harmless | ||
469 | * -EINVAL if we failed to bind in the above | ||
470 | * call. It cannot raise -EINTR since we know | ||
471 | * that the bo is freshly bound and so will | ||
472 | * not need to be flushed or waited upon. | ||
473 | */ | ||
474 | ret_ignore = i915_gem_object_unbind(obj); | ||
475 | (void)ret_ignore; | ||
476 | WARN_ON(obj->gtt_space); | ||
477 | break; | ||
478 | } | ||
479 | } | 494 | } |
480 | 495 | ||
481 | /* Decrement pin count for bound objects */ | 496 | err: /* Decrement pin count for bound objects */ |
482 | list_for_each_entry(obj, objects, exec_list) { | 497 | list_for_each_entry(obj, objects, exec_list) |
483 | struct drm_i915_gem_exec_object2 *entry; | 498 | i915_gem_execbuffer_unreserve_object(obj); |
484 | 499 | ||
485 | if (!obj->gtt_space) | 500 | if (ret != -ENOSPC || retry++) |
486 | continue; | ||
487 | |||
488 | entry = obj->exec_entry; | ||
489 | if (entry->flags & __EXEC_OBJECT_HAS_FENCE) { | ||
490 | i915_gem_object_unpin_fence(obj); | ||
491 | entry->flags &= ~__EXEC_OBJECT_HAS_FENCE; | ||
492 | } | ||
493 | |||
494 | i915_gem_object_unpin(obj); | ||
495 | |||
496 | /* ... and ensure ppgtt mapping exist if needed. */ | ||
497 | if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) { | ||
498 | i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, | ||
499 | obj, obj->cache_level); | ||
500 | |||
501 | obj->has_aliasing_ppgtt_mapping = 1; | ||
502 | } | ||
503 | } | ||
504 | |||
505 | if (ret != -ENOSPC || retry > 1) | ||
506 | return ret; | 501 | return ret; |
507 | 502 | ||
508 | /* First attempt, just clear anything that is purgeable. | 503 | ret = i915_gem_evict_everything(ring->dev); |
509 | * Second attempt, clear the entire GTT. | ||
510 | */ | ||
511 | ret = i915_gem_evict_everything(ring->dev, retry == 0); | ||
512 | if (ret) | 504 | if (ret) |
513 | return ret; | 505 | return ret; |
514 | |||
515 | retry++; | ||
516 | } while (1); | 506 | } while (1); |
517 | |||
518 | err: | ||
519 | list_for_each_entry_continue_reverse(obj, objects, exec_list) { | ||
520 | struct drm_i915_gem_exec_object2 *entry; | ||
521 | |||
522 | if (!obj->gtt_space) | ||
523 | continue; | ||
524 | |||
525 | entry = obj->exec_entry; | ||
526 | if (entry->flags & __EXEC_OBJECT_HAS_FENCE) { | ||
527 | i915_gem_object_unpin_fence(obj); | ||
528 | entry->flags &= ~__EXEC_OBJECT_HAS_FENCE; | ||
529 | } | ||
530 | |||
531 | i915_gem_object_unpin(obj); | ||
532 | } | ||
533 | |||
534 | return ret; | ||
535 | } | 507 | } |
536 | 508 | ||
537 | static int | 509 | static int |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 804d65345e2c..18477314d85d 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -351,7 +351,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) | |||
351 | intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE, | 351 | intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE, |
352 | (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE); | 352 | (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE); |
353 | 353 | ||
354 | list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { | 354 | list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { |
355 | i915_gem_clflush_object(obj); | 355 | i915_gem_clflush_object(obj); |
356 | i915_gem_gtt_bind_object(obj, obj->cache_level); | 356 | i915_gem_gtt_bind_object(obj, obj->cache_level); |
357 | } | 357 | } |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index a61b41a8c607..34dc7158f03c 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -1070,6 +1070,36 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, | |||
1070 | return NULL; | 1070 | return NULL; |
1071 | } | 1071 | } |
1072 | 1072 | ||
1073 | /* NB: please notice the memset */ | ||
1074 | static void i915_get_extra_instdone(struct drm_device *dev, | ||
1075 | uint32_t *instdone) | ||
1076 | { | ||
1077 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1078 | memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG); | ||
1079 | |||
1080 | switch(INTEL_INFO(dev)->gen) { | ||
1081 | case 2: | ||
1082 | case 3: | ||
1083 | instdone[0] = I915_READ(INSTDONE); | ||
1084 | break; | ||
1085 | case 4: | ||
1086 | case 5: | ||
1087 | case 6: | ||
1088 | instdone[0] = I915_READ(INSTDONE_I965); | ||
1089 | instdone[1] = I915_READ(INSTDONE1); | ||
1090 | break; | ||
1091 | default: | ||
1092 | WARN_ONCE(1, "Unsupported platform\n"); | ||
1093 | case 7: | ||
1094 | instdone[0] = I915_READ(GEN7_INSTDONE_1); | ||
1095 | instdone[1] = I915_READ(GEN7_SC_INSTDONE); | ||
1096 | instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); | ||
1097 | instdone[3] = I915_READ(GEN7_ROW_INSTDONE); | ||
1098 | break; | ||
1099 | } | ||
1100 | } | ||
1101 | |||
1102 | |||
1073 | static void i915_record_ring_state(struct drm_device *dev, | 1103 | static void i915_record_ring_state(struct drm_device *dev, |
1074 | struct drm_i915_error_state *error, | 1104 | struct drm_i915_error_state *error, |
1075 | struct intel_ring_buffer *ring) | 1105 | struct intel_ring_buffer *ring) |
@@ -1091,10 +1121,8 @@ static void i915_record_ring_state(struct drm_device *dev, | |||
1091 | error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base)); | 1121 | error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base)); |
1092 | error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base)); | 1122 | error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base)); |
1093 | error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base)); | 1123 | error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base)); |
1094 | if (ring->id == RCS) { | 1124 | if (ring->id == RCS) |
1095 | error->instdone1 = I915_READ(INSTDONE1); | ||
1096 | error->bbaddr = I915_READ64(BB_ADDR); | 1125 | error->bbaddr = I915_READ64(BB_ADDR); |
1097 | } | ||
1098 | } else { | 1126 | } else { |
1099 | error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX); | 1127 | error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX); |
1100 | error->ipeir[ring->id] = I915_READ(IPEIR); | 1128 | error->ipeir[ring->id] = I915_READ(IPEIR); |
@@ -1210,6 +1238,11 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
1210 | error->done_reg = I915_READ(DONE_REG); | 1238 | error->done_reg = I915_READ(DONE_REG); |
1211 | } | 1239 | } |
1212 | 1240 | ||
1241 | if (INTEL_INFO(dev)->gen == 7) | ||
1242 | error->err_int = I915_READ(GEN7_ERR_INT); | ||
1243 | |||
1244 | i915_get_extra_instdone(dev, error->extra_instdone); | ||
1245 | |||
1213 | i915_gem_record_fences(dev, error); | 1246 | i915_gem_record_fences(dev, error); |
1214 | i915_gem_record_rings(dev, error); | 1247 | i915_gem_record_rings(dev, error); |
1215 | 1248 | ||
@@ -1221,7 +1254,7 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
1221 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) | 1254 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) |
1222 | i++; | 1255 | i++; |
1223 | error->active_bo_count = i; | 1256 | error->active_bo_count = i; |
1224 | list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) | 1257 | list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) |
1225 | if (obj->pin_count) | 1258 | if (obj->pin_count) |
1226 | i++; | 1259 | i++; |
1227 | error->pinned_bo_count = i - error->active_bo_count; | 1260 | error->pinned_bo_count = i - error->active_bo_count; |
@@ -1246,7 +1279,7 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
1246 | error->pinned_bo_count = | 1279 | error->pinned_bo_count = |
1247 | capture_pinned_bo(error->pinned_bo, | 1280 | capture_pinned_bo(error->pinned_bo, |
1248 | error->pinned_bo_count, | 1281 | error->pinned_bo_count, |
1249 | &dev_priv->mm.gtt_list); | 1282 | &dev_priv->mm.bound_list); |
1250 | 1283 | ||
1251 | do_gettimeofday(&error->time); | 1284 | do_gettimeofday(&error->time); |
1252 | 1285 | ||
@@ -1285,24 +1318,26 @@ void i915_destroy_error_state(struct drm_device *dev) | |||
1285 | static void i915_report_and_clear_eir(struct drm_device *dev) | 1318 | static void i915_report_and_clear_eir(struct drm_device *dev) |
1286 | { | 1319 | { |
1287 | struct drm_i915_private *dev_priv = dev->dev_private; | 1320 | struct drm_i915_private *dev_priv = dev->dev_private; |
1321 | uint32_t instdone[I915_NUM_INSTDONE_REG]; | ||
1288 | u32 eir = I915_READ(EIR); | 1322 | u32 eir = I915_READ(EIR); |
1289 | int pipe; | 1323 | int pipe, i; |
1290 | 1324 | ||
1291 | if (!eir) | 1325 | if (!eir) |
1292 | return; | 1326 | return; |
1293 | 1327 | ||
1294 | pr_err("render error detected, EIR: 0x%08x\n", eir); | 1328 | pr_err("render error detected, EIR: 0x%08x\n", eir); |
1295 | 1329 | ||
1330 | i915_get_extra_instdone(dev, instdone); | ||
1331 | |||
1296 | if (IS_G4X(dev)) { | 1332 | if (IS_G4X(dev)) { |
1297 | if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { | 1333 | if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { |
1298 | u32 ipeir = I915_READ(IPEIR_I965); | 1334 | u32 ipeir = I915_READ(IPEIR_I965); |
1299 | 1335 | ||
1300 | pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); | 1336 | pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); |
1301 | pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); | 1337 | pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); |
1302 | pr_err(" INSTDONE: 0x%08x\n", | 1338 | for (i = 0; i < ARRAY_SIZE(instdone); i++) |
1303 | I915_READ(INSTDONE_I965)); | 1339 | pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); |
1304 | pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); | 1340 | pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); |
1305 | pr_err(" INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1)); | ||
1306 | pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); | 1341 | pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); |
1307 | I915_WRITE(IPEIR_I965, ipeir); | 1342 | I915_WRITE(IPEIR_I965, ipeir); |
1308 | POSTING_READ(IPEIR_I965); | 1343 | POSTING_READ(IPEIR_I965); |
@@ -1336,12 +1371,13 @@ static void i915_report_and_clear_eir(struct drm_device *dev) | |||
1336 | if (eir & I915_ERROR_INSTRUCTION) { | 1371 | if (eir & I915_ERROR_INSTRUCTION) { |
1337 | pr_err("instruction error\n"); | 1372 | pr_err("instruction error\n"); |
1338 | pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); | 1373 | pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); |
1374 | for (i = 0; i < ARRAY_SIZE(instdone); i++) | ||
1375 | pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); | ||
1339 | if (INTEL_INFO(dev)->gen < 4) { | 1376 | if (INTEL_INFO(dev)->gen < 4) { |
1340 | u32 ipeir = I915_READ(IPEIR); | 1377 | u32 ipeir = I915_READ(IPEIR); |
1341 | 1378 | ||
1342 | pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); | 1379 | pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); |
1343 | pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); | 1380 | pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); |
1344 | pr_err(" INSTDONE: 0x%08x\n", I915_READ(INSTDONE)); | ||
1345 | pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); | 1381 | pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); |
1346 | I915_WRITE(IPEIR, ipeir); | 1382 | I915_WRITE(IPEIR, ipeir); |
1347 | POSTING_READ(IPEIR); | 1383 | POSTING_READ(IPEIR); |
@@ -1350,10 +1386,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev) | |||
1350 | 1386 | ||
1351 | pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); | 1387 | pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); |
1352 | pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); | 1388 | pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); |
1353 | pr_err(" INSTDONE: 0x%08x\n", | ||
1354 | I915_READ(INSTDONE_I965)); | ||
1355 | pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); | 1389 | pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); |
1356 | pr_err(" INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1)); | ||
1357 | pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); | 1390 | pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); |
1358 | I915_WRITE(IPEIR_I965, ipeir); | 1391 | I915_WRITE(IPEIR_I965, ipeir); |
1359 | POSTING_READ(IPEIR_I965); | 1392 | POSTING_READ(IPEIR_I965); |
@@ -1668,7 +1701,7 @@ void i915_hangcheck_elapsed(unsigned long data) | |||
1668 | { | 1701 | { |
1669 | struct drm_device *dev = (struct drm_device *)data; | 1702 | struct drm_device *dev = (struct drm_device *)data; |
1670 | drm_i915_private_t *dev_priv = dev->dev_private; | 1703 | drm_i915_private_t *dev_priv = dev->dev_private; |
1671 | uint32_t acthd[I915_NUM_RINGS], instdone, instdone1; | 1704 | uint32_t acthd[I915_NUM_RINGS], instdone[I915_NUM_INSTDONE_REG]; |
1672 | struct intel_ring_buffer *ring; | 1705 | struct intel_ring_buffer *ring; |
1673 | bool err = false, idle; | 1706 | bool err = false, idle; |
1674 | int i; | 1707 | int i; |
@@ -1696,25 +1729,16 @@ void i915_hangcheck_elapsed(unsigned long data) | |||
1696 | return; | 1729 | return; |
1697 | } | 1730 | } |
1698 | 1731 | ||
1699 | if (INTEL_INFO(dev)->gen < 4) { | 1732 | i915_get_extra_instdone(dev, instdone); |
1700 | instdone = I915_READ(INSTDONE); | ||
1701 | instdone1 = 0; | ||
1702 | } else { | ||
1703 | instdone = I915_READ(INSTDONE_I965); | ||
1704 | instdone1 = I915_READ(INSTDONE1); | ||
1705 | } | ||
1706 | |||
1707 | if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 && | 1733 | if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 && |
1708 | dev_priv->last_instdone == instdone && | 1734 | memcmp(dev_priv->prev_instdone, instdone, sizeof(instdone)) == 0) { |
1709 | dev_priv->last_instdone1 == instdone1) { | ||
1710 | if (i915_hangcheck_hung(dev)) | 1735 | if (i915_hangcheck_hung(dev)) |
1711 | return; | 1736 | return; |
1712 | } else { | 1737 | } else { |
1713 | dev_priv->hangcheck_count = 0; | 1738 | dev_priv->hangcheck_count = 0; |
1714 | 1739 | ||
1715 | memcpy(dev_priv->last_acthd, acthd, sizeof(acthd)); | 1740 | memcpy(dev_priv->last_acthd, acthd, sizeof(acthd)); |
1716 | dev_priv->last_instdone = instdone; | 1741 | memcpy(dev_priv->prev_instdone, instdone, sizeof(instdone)); |
1717 | dev_priv->last_instdone1 = instdone1; | ||
1718 | } | 1742 | } |
1719 | 1743 | ||
1720 | repeat: | 1744 | repeat: |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index ab8cffe193cd..75dcfa4ec5ce 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -479,6 +479,11 @@ | |||
479 | #define IPEIR_I965 0x02064 | 479 | #define IPEIR_I965 0x02064 |
480 | #define IPEHR_I965 0x02068 | 480 | #define IPEHR_I965 0x02068 |
481 | #define INSTDONE_I965 0x0206c | 481 | #define INSTDONE_I965 0x0206c |
482 | #define GEN7_INSTDONE_1 0x0206c | ||
483 | #define GEN7_SC_INSTDONE 0x07100 | ||
484 | #define GEN7_SAMPLER_INSTDONE 0x0e160 | ||
485 | #define GEN7_ROW_INSTDONE 0x0e164 | ||
486 | #define I915_NUM_INSTDONE_REG 4 | ||
482 | #define RING_IPEIR(base) ((base)+0x64) | 487 | #define RING_IPEIR(base) ((base)+0x64) |
483 | #define RING_IPEHR(base) ((base)+0x68) | 488 | #define RING_IPEHR(base) ((base)+0x68) |
484 | #define RING_INSTDONE(base) ((base)+0x6c) | 489 | #define RING_INSTDONE(base) ((base)+0x6c) |
@@ -501,6 +506,8 @@ | |||
501 | #define DMA_FADD_I8XX 0x020d0 | 506 | #define DMA_FADD_I8XX 0x020d0 |
502 | 507 | ||
503 | #define ERROR_GEN6 0x040a0 | 508 | #define ERROR_GEN6 0x040a0 |
509 | #define GEN7_ERR_INT 0x44040 | ||
510 | #define ERR_INT_MMIO_UNCLAIMED (1<<13) | ||
504 | 511 | ||
505 | /* GM45+ chicken bits -- debug workaround bits that may be required | 512 | /* GM45+ chicken bits -- debug workaround bits that may be required |
506 | * for various sorts of correct behavior. The top 16 bits of each are | 513 | * for various sorts of correct behavior. The top 16 bits of each are |
@@ -4248,7 +4255,15 @@ | |||
4248 | #define G4X_HDMIW_HDMIEDID 0x6210C | 4255 | #define G4X_HDMIW_HDMIEDID 0x6210C |
4249 | 4256 | ||
4250 | #define IBX_HDMIW_HDMIEDID_A 0xE2050 | 4257 | #define IBX_HDMIW_HDMIEDID_A 0xE2050 |
4258 | #define IBX_HDMIW_HDMIEDID_B 0xE2150 | ||
4259 | #define IBX_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \ | ||
4260 | IBX_HDMIW_HDMIEDID_A, \ | ||
4261 | IBX_HDMIW_HDMIEDID_B) | ||
4251 | #define IBX_AUD_CNTL_ST_A 0xE20B4 | 4262 | #define IBX_AUD_CNTL_ST_A 0xE20B4 |
4263 | #define IBX_AUD_CNTL_ST_B 0xE21B4 | ||
4264 | #define IBX_AUD_CNTL_ST(pipe) _PIPE(pipe, \ | ||
4265 | IBX_AUD_CNTL_ST_A, \ | ||
4266 | IBX_AUD_CNTL_ST_B) | ||
4252 | #define IBX_ELD_BUFFER_SIZE (0x1f << 10) | 4267 | #define IBX_ELD_BUFFER_SIZE (0x1f << 10) |
4253 | #define IBX_ELD_ADDRESS (0x1f << 5) | 4268 | #define IBX_ELD_ADDRESS (0x1f << 5) |
4254 | #define IBX_ELD_ACK (1 << 4) | 4269 | #define IBX_ELD_ACK (1 << 4) |
@@ -4257,7 +4272,15 @@ | |||
4257 | #define IBX_CP_READYB (1 << 1) | 4272 | #define IBX_CP_READYB (1 << 1) |
4258 | 4273 | ||
4259 | #define CPT_HDMIW_HDMIEDID_A 0xE5050 | 4274 | #define CPT_HDMIW_HDMIEDID_A 0xE5050 |
4275 | #define CPT_HDMIW_HDMIEDID_B 0xE5150 | ||
4276 | #define CPT_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \ | ||
4277 | CPT_HDMIW_HDMIEDID_A, \ | ||
4278 | CPT_HDMIW_HDMIEDID_B) | ||
4260 | #define CPT_AUD_CNTL_ST_A 0xE50B4 | 4279 | #define CPT_AUD_CNTL_ST_A 0xE50B4 |
4280 | #define CPT_AUD_CNTL_ST_B 0xE51B4 | ||
4281 | #define CPT_AUD_CNTL_ST(pipe) _PIPE(pipe, \ | ||
4282 | CPT_AUD_CNTL_ST_A, \ | ||
4283 | CPT_AUD_CNTL_ST_B) | ||
4261 | #define CPT_AUD_CNTRL_ST2 0xE50C0 | 4284 | #define CPT_AUD_CNTRL_ST2 0xE50C0 |
4262 | 4285 | ||
4263 | /* These are the 4 32-bit write offset registers for each stream | 4286 | /* These are the 4 32-bit write offset registers for each stream |
@@ -4267,7 +4290,15 @@ | |||
4267 | #define GEN7_SO_WRITE_OFFSET(n) (0x5280 + (n) * 4) | 4290 | #define GEN7_SO_WRITE_OFFSET(n) (0x5280 + (n) * 4) |
4268 | 4291 | ||
4269 | #define IBX_AUD_CONFIG_A 0xe2000 | 4292 | #define IBX_AUD_CONFIG_A 0xe2000 |
4293 | #define IBX_AUD_CONFIG_B 0xe2100 | ||
4294 | #define IBX_AUD_CFG(pipe) _PIPE(pipe, \ | ||
4295 | IBX_AUD_CONFIG_A, \ | ||
4296 | IBX_AUD_CONFIG_B) | ||
4270 | #define CPT_AUD_CONFIG_A 0xe5000 | 4297 | #define CPT_AUD_CONFIG_A 0xe5000 |
4298 | #define CPT_AUD_CONFIG_B 0xe5100 | ||
4299 | #define CPT_AUD_CFG(pipe) _PIPE(pipe, \ | ||
4300 | CPT_AUD_CONFIG_A, \ | ||
4301 | CPT_AUD_CONFIG_B) | ||
4271 | #define AUD_CONFIG_N_VALUE_INDEX (1 << 29) | 4302 | #define AUD_CONFIG_N_VALUE_INDEX (1 << 29) |
4272 | #define AUD_CONFIG_N_PROG_ENABLE (1 << 28) | 4303 | #define AUD_CONFIG_N_PROG_ENABLE (1 << 28) |
4273 | #define AUD_CONFIG_UPPER_N_SHIFT 20 | 4304 | #define AUD_CONFIG_UPPER_N_SHIFT 20 |
@@ -4278,6 +4309,54 @@ | |||
4278 | #define AUD_CONFIG_PIXEL_CLOCK_HDMI (0xf << 16) | 4309 | #define AUD_CONFIG_PIXEL_CLOCK_HDMI (0xf << 16) |
4279 | #define AUD_CONFIG_DISABLE_NCTS (1 << 3) | 4310 | #define AUD_CONFIG_DISABLE_NCTS (1 << 3) |
4280 | 4311 | ||
4312 | /* HSW Audio */ | ||
4313 | #define HSW_AUD_CONFIG_A 0x65000 /* Audio Configuration Transcoder A */ | ||
4314 | #define HSW_AUD_CONFIG_B 0x65100 /* Audio Configuration Transcoder B */ | ||
4315 | #define HSW_AUD_CFG(pipe) _PIPE(pipe, \ | ||
4316 | HSW_AUD_CONFIG_A, \ | ||
4317 | HSW_AUD_CONFIG_B) | ||
4318 | |||
4319 | #define HSW_AUD_MISC_CTRL_A 0x65010 /* Audio Misc Control Convert 1 */ | ||
4320 | #define HSW_AUD_MISC_CTRL_B 0x65110 /* Audio Misc Control Convert 2 */ | ||
4321 | #define HSW_AUD_MISC_CTRL(pipe) _PIPE(pipe, \ | ||
4322 | HSW_AUD_MISC_CTRL_A, \ | ||
4323 | HSW_AUD_MISC_CTRL_B) | ||
4324 | |||
4325 | #define HSW_AUD_DIP_ELD_CTRL_ST_A 0x650b4 /* Audio DIP and ELD Control State Transcoder A */ | ||
4326 | #define HSW_AUD_DIP_ELD_CTRL_ST_B 0x651b4 /* Audio DIP and ELD Control State Transcoder B */ | ||
4327 | #define HSW_AUD_DIP_ELD_CTRL(pipe) _PIPE(pipe, \ | ||
4328 | HSW_AUD_DIP_ELD_CTRL_ST_A, \ | ||
4329 | HSW_AUD_DIP_ELD_CTRL_ST_B) | ||
4330 | |||
4331 | /* Audio Digital Converter */ | ||
4332 | #define HSW_AUD_DIG_CNVT_1 0x65080 /* Audio Converter 1 */ | ||
4333 | #define HSW_AUD_DIG_CNVT_2 0x65180 /* Audio Converter 1 */ | ||
4334 | #define AUD_DIG_CNVT(pipe) _PIPE(pipe, \ | ||
4335 | HSW_AUD_DIG_CNVT_1, \ | ||
4336 | HSW_AUD_DIG_CNVT_2) | ||
4337 | #define DIP_PORT_SEL_MASK 0x3 | ||
4338 | |||
4339 | #define HSW_AUD_EDID_DATA_A 0x65050 | ||
4340 | #define HSW_AUD_EDID_DATA_B 0x65150 | ||
4341 | #define HSW_AUD_EDID_DATA(pipe) _PIPE(pipe, \ | ||
4342 | HSW_AUD_EDID_DATA_A, \ | ||
4343 | HSW_AUD_EDID_DATA_B) | ||
4344 | |||
4345 | #define HSW_AUD_PIPE_CONV_CFG 0x6507c /* Audio pipe and converter configs */ | ||
4346 | #define HSW_AUD_PIN_ELD_CP_VLD 0x650c0 /* Audio ELD and CP Ready Status */ | ||
4347 | #define AUDIO_INACTIVE_C (1<<11) | ||
4348 | #define AUDIO_INACTIVE_B (1<<7) | ||
4349 | #define AUDIO_INACTIVE_A (1<<3) | ||
4350 | #define AUDIO_OUTPUT_ENABLE_A (1<<2) | ||
4351 | #define AUDIO_OUTPUT_ENABLE_B (1<<6) | ||
4352 | #define AUDIO_OUTPUT_ENABLE_C (1<<10) | ||
4353 | #define AUDIO_ELD_VALID_A (1<<0) | ||
4354 | #define AUDIO_ELD_VALID_B (1<<4) | ||
4355 | #define AUDIO_ELD_VALID_C (1<<8) | ||
4356 | #define AUDIO_CP_READY_A (1<<1) | ||
4357 | #define AUDIO_CP_READY_B (1<<5) | ||
4358 | #define AUDIO_CP_READY_C (1<<9) | ||
4359 | |||
4281 | /* HSW Power Wells */ | 4360 | /* HSW Power Wells */ |
4282 | #define HSW_PWR_WELL_CTL1 0x45400 /* BIOS */ | 4361 | #define HSW_PWR_WELL_CTL1 0x45400 /* BIOS */ |
4283 | #define HSW_PWR_WELL_CTL2 0x45404 /* Driver */ | 4362 | #define HSW_PWR_WELL_CTL2 0x45404 /* Driver */ |
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index fe90b3a84a6d..3c4093d91f60 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h | |||
@@ -214,22 +214,18 @@ TRACE_EVENT(i915_gem_evict, | |||
214 | ); | 214 | ); |
215 | 215 | ||
216 | TRACE_EVENT(i915_gem_evict_everything, | 216 | TRACE_EVENT(i915_gem_evict_everything, |
217 | TP_PROTO(struct drm_device *dev, bool purgeable), | 217 | TP_PROTO(struct drm_device *dev), |
218 | TP_ARGS(dev, purgeable), | 218 | TP_ARGS(dev), |
219 | 219 | ||
220 | TP_STRUCT__entry( | 220 | TP_STRUCT__entry( |
221 | __field(u32, dev) | 221 | __field(u32, dev) |
222 | __field(bool, purgeable) | ||
223 | ), | 222 | ), |
224 | 223 | ||
225 | TP_fast_assign( | 224 | TP_fast_assign( |
226 | __entry->dev = dev->primary->index; | 225 | __entry->dev = dev->primary->index; |
227 | __entry->purgeable = purgeable; | ||
228 | ), | 226 | ), |
229 | 227 | ||
230 | TP_printk("dev=%d%s", | 228 | TP_printk("dev=%d", __entry->dev) |
231 | __entry->dev, | ||
232 | __entry->purgeable ? ", purgeable only" : "") | ||
233 | ); | 229 | ); |
234 | 230 | ||
235 | TRACE_EVENT(i915_gem_ring_dispatch, | 231 | TRACE_EVENT(i915_gem_ring_dispatch, |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 80bf3112dc1f..236191377b0f 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -547,14 +547,12 @@ intel_crt_detect(struct drm_connector *connector, bool force) | |||
547 | return connector->status; | 547 | return connector->status; |
548 | 548 | ||
549 | /* for pre-945g platforms use load detect */ | 549 | /* for pre-945g platforms use load detect */ |
550 | if (intel_get_load_detect_pipe(&crt->base, connector, NULL, | 550 | if (intel_get_load_detect_pipe(connector, NULL, &tmp)) { |
551 | &tmp)) { | ||
552 | if (intel_crt_detect_ddc(connector)) | 551 | if (intel_crt_detect_ddc(connector)) |
553 | status = connector_status_connected; | 552 | status = connector_status_connected; |
554 | else | 553 | else |
555 | status = intel_crt_load_detect(crt); | 554 | status = intel_crt_load_detect(crt); |
556 | intel_release_load_detect_pipe(&crt->base, connector, | 555 | intel_release_load_detect_pipe(connector, &tmp); |
557 | &tmp); | ||
558 | } else | 556 | } else |
559 | status = connector_status_unknown; | 557 | status = connector_status_unknown; |
560 | 558 | ||
@@ -694,7 +692,7 @@ void intel_crt_init(struct drm_device *dev) | |||
694 | if (IS_HASWELL(dev)) | 692 | if (IS_HASWELL(dev)) |
695 | crt->base.crtc_mask = (1 << 0); | 693 | crt->base.crtc_mask = (1 << 0); |
696 | else | 694 | else |
697 | crt->base.crtc_mask = (1 << 0) | (1 << 1); | 695 | crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); |
698 | 696 | ||
699 | if (IS_GEN2(dev)) | 697 | if (IS_GEN2(dev)) |
700 | connector->interlace_allowed = 0; | 698 | connector->interlace_allowed = 0; |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 958422606bc7..170e3861aa4e 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
@@ -713,8 +713,12 @@ void intel_ddi_mode_set(struct drm_encoder *encoder, | |||
713 | /* Proper support for digital audio needs a new logic and a new set | 713 | /* Proper support for digital audio needs a new logic and a new set |
714 | * of registers, so we leave it for future patch bombing. | 714 | * of registers, so we leave it for future patch bombing. |
715 | */ | 715 | */ |
716 | DRM_DEBUG_DRIVER("HDMI audio on pipe %c not yet supported on DDI\n", | 716 | DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n", |
717 | pipe_name(intel_crtc->pipe)); | 717 | pipe_name(intel_crtc->pipe)); |
718 | |||
719 | /* write eld */ | ||
720 | DRM_DEBUG_DRIVER("HDMI audio: write eld information\n"); | ||
721 | intel_write_eld(encoder, adjusted_mode); | ||
718 | } | 722 | } |
719 | 723 | ||
720 | /* Enable PIPE_DDI_FUNC_CTL for the pipe to work in HDMI mode */ | 724 | /* Enable PIPE_DDI_FUNC_CTL for the pipe to work in HDMI mode */ |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index a9ab1aff2c77..778cbb88bddc 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -1384,7 +1384,7 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, | |||
1384 | enum pipe pipe, int reg) | 1384 | enum pipe pipe, int reg) |
1385 | { | 1385 | { |
1386 | u32 val = I915_READ(reg); | 1386 | u32 val = I915_READ(reg); |
1387 | WARN(hdmi_pipe_enabled(dev_priv, val, pipe), | 1387 | WARN(hdmi_pipe_enabled(dev_priv, pipe, val), |
1388 | "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", | 1388 | "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", |
1389 | reg, pipe_name(pipe)); | 1389 | reg, pipe_name(pipe)); |
1390 | 1390 | ||
@@ -1404,13 +1404,13 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, | |||
1404 | 1404 | ||
1405 | reg = PCH_ADPA; | 1405 | reg = PCH_ADPA; |
1406 | val = I915_READ(reg); | 1406 | val = I915_READ(reg); |
1407 | WARN(adpa_pipe_enabled(dev_priv, val, pipe), | 1407 | WARN(adpa_pipe_enabled(dev_priv, pipe, val), |
1408 | "PCH VGA enabled on transcoder %c, should be disabled\n", | 1408 | "PCH VGA enabled on transcoder %c, should be disabled\n", |
1409 | pipe_name(pipe)); | 1409 | pipe_name(pipe)); |
1410 | 1410 | ||
1411 | reg = PCH_LVDS; | 1411 | reg = PCH_LVDS; |
1412 | val = I915_READ(reg); | 1412 | val = I915_READ(reg); |
1413 | WARN(lvds_pipe_enabled(dev_priv, val, pipe), | 1413 | WARN(lvds_pipe_enabled(dev_priv, pipe, val), |
1414 | "PCH LVDS enabled on transcoder %c, should be disabled\n", | 1414 | "PCH LVDS enabled on transcoder %c, should be disabled\n", |
1415 | pipe_name(pipe)); | 1415 | pipe_name(pipe)); |
1416 | 1416 | ||
@@ -1432,7 +1432,7 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, | |||
1432 | * | 1432 | * |
1433 | * Unfortunately needed by dvo_ns2501 since the dvo depends on it running. | 1433 | * Unfortunately needed by dvo_ns2501 since the dvo depends on it running. |
1434 | */ | 1434 | */ |
1435 | void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) | 1435 | static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) |
1436 | { | 1436 | { |
1437 | int reg; | 1437 | int reg; |
1438 | u32 val; | 1438 | u32 val; |
@@ -1874,7 +1874,7 @@ static void disable_pch_hdmi(struct drm_i915_private *dev_priv, | |||
1874 | enum pipe pipe, int reg) | 1874 | enum pipe pipe, int reg) |
1875 | { | 1875 | { |
1876 | u32 val = I915_READ(reg); | 1876 | u32 val = I915_READ(reg); |
1877 | if (hdmi_pipe_enabled(dev_priv, val, pipe)) { | 1877 | if (hdmi_pipe_enabled(dev_priv, pipe, val)) { |
1878 | DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n", | 1878 | DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n", |
1879 | reg, pipe); | 1879 | reg, pipe); |
1880 | I915_WRITE(reg, val & ~PORT_ENABLE); | 1880 | I915_WRITE(reg, val & ~PORT_ENABLE); |
@@ -1896,12 +1896,12 @@ static void intel_disable_pch_ports(struct drm_i915_private *dev_priv, | |||
1896 | 1896 | ||
1897 | reg = PCH_ADPA; | 1897 | reg = PCH_ADPA; |
1898 | val = I915_READ(reg); | 1898 | val = I915_READ(reg); |
1899 | if (adpa_pipe_enabled(dev_priv, val, pipe)) | 1899 | if (adpa_pipe_enabled(dev_priv, pipe, val)) |
1900 | I915_WRITE(reg, val & ~ADPA_DAC_ENABLE); | 1900 | I915_WRITE(reg, val & ~ADPA_DAC_ENABLE); |
1901 | 1901 | ||
1902 | reg = PCH_LVDS; | 1902 | reg = PCH_LVDS; |
1903 | val = I915_READ(reg); | 1903 | val = I915_READ(reg); |
1904 | if (lvds_pipe_enabled(dev_priv, val, pipe)) { | 1904 | if (lvds_pipe_enabled(dev_priv, pipe, val)) { |
1905 | DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val); | 1905 | DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val); |
1906 | I915_WRITE(reg, val & ~LVDS_PORT_EN); | 1906 | I915_WRITE(reg, val & ~LVDS_PORT_EN); |
1907 | POSTING_READ(reg); | 1907 | POSTING_READ(reg); |
@@ -2709,11 +2709,10 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) | |||
2709 | DRM_DEBUG_KMS("FDI train done.\n"); | 2709 | DRM_DEBUG_KMS("FDI train done.\n"); |
2710 | } | 2710 | } |
2711 | 2711 | ||
2712 | static void ironlake_fdi_pll_enable(struct drm_crtc *crtc) | 2712 | static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc) |
2713 | { | 2713 | { |
2714 | struct drm_device *dev = crtc->dev; | 2714 | struct drm_device *dev = intel_crtc->base.dev; |
2715 | struct drm_i915_private *dev_priv = dev->dev_private; | 2715 | struct drm_i915_private *dev_priv = dev->dev_private; |
2716 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
2717 | int pipe = intel_crtc->pipe; | 2716 | int pipe = intel_crtc->pipe; |
2718 | u32 reg, temp; | 2717 | u32 reg, temp; |
2719 | 2718 | ||
@@ -2754,6 +2753,35 @@ static void ironlake_fdi_pll_enable(struct drm_crtc *crtc) | |||
2754 | } | 2753 | } |
2755 | } | 2754 | } |
2756 | 2755 | ||
2756 | static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc) | ||
2757 | { | ||
2758 | struct drm_device *dev = intel_crtc->base.dev; | ||
2759 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2760 | int pipe = intel_crtc->pipe; | ||
2761 | u32 reg, temp; | ||
2762 | |||
2763 | /* Switch from PCDclk to Rawclk */ | ||
2764 | reg = FDI_RX_CTL(pipe); | ||
2765 | temp = I915_READ(reg); | ||
2766 | I915_WRITE(reg, temp & ~FDI_PCDCLK); | ||
2767 | |||
2768 | /* Disable CPU FDI TX PLL */ | ||
2769 | reg = FDI_TX_CTL(pipe); | ||
2770 | temp = I915_READ(reg); | ||
2771 | I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE); | ||
2772 | |||
2773 | POSTING_READ(reg); | ||
2774 | udelay(100); | ||
2775 | |||
2776 | reg = FDI_RX_CTL(pipe); | ||
2777 | temp = I915_READ(reg); | ||
2778 | I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE); | ||
2779 | |||
2780 | /* Wait for the clocks to turn off. */ | ||
2781 | POSTING_READ(reg); | ||
2782 | udelay(100); | ||
2783 | } | ||
2784 | |||
2757 | static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe) | 2785 | static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe) |
2758 | { | 2786 | { |
2759 | struct drm_i915_private *dev_priv = dev->dev_private; | 2787 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -3201,7 +3229,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
3201 | is_pch_port = intel_crtc_driving_pch(crtc); | 3229 | is_pch_port = intel_crtc_driving_pch(crtc); |
3202 | 3230 | ||
3203 | if (is_pch_port) | 3231 | if (is_pch_port) |
3204 | ironlake_fdi_pll_enable(crtc); | 3232 | ironlake_fdi_pll_enable(intel_crtc); |
3205 | else | 3233 | else |
3206 | ironlake_fdi_disable(crtc); | 3234 | ironlake_fdi_disable(crtc); |
3207 | 3235 | ||
@@ -3304,26 +3332,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) | |||
3304 | /* disable PCH DPLL */ | 3332 | /* disable PCH DPLL */ |
3305 | intel_disable_pch_pll(intel_crtc); | 3333 | intel_disable_pch_pll(intel_crtc); |
3306 | 3334 | ||
3307 | /* Switch from PCDclk to Rawclk */ | 3335 | ironlake_fdi_pll_disable(intel_crtc); |
3308 | reg = FDI_RX_CTL(pipe); | ||
3309 | temp = I915_READ(reg); | ||
3310 | I915_WRITE(reg, temp & ~FDI_PCDCLK); | ||
3311 | |||
3312 | /* Disable CPU FDI TX PLL */ | ||
3313 | reg = FDI_TX_CTL(pipe); | ||
3314 | temp = I915_READ(reg); | ||
3315 | I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE); | ||
3316 | |||
3317 | POSTING_READ(reg); | ||
3318 | udelay(100); | ||
3319 | |||
3320 | reg = FDI_RX_CTL(pipe); | ||
3321 | temp = I915_READ(reg); | ||
3322 | I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE); | ||
3323 | |||
3324 | /* Wait for the clocks to turn off. */ | ||
3325 | POSTING_READ(reg); | ||
3326 | udelay(100); | ||
3327 | 3336 | ||
3328 | intel_crtc->active = false; | 3337 | intel_crtc->active = false; |
3329 | intel_update_watermarks(dev); | 3338 | intel_update_watermarks(dev); |
@@ -3593,6 +3602,13 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, | |||
3593 | if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET)) | 3602 | if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET)) |
3594 | drm_mode_set_crtcinfo(adjusted_mode, 0); | 3603 | drm_mode_set_crtcinfo(adjusted_mode, 0); |
3595 | 3604 | ||
3605 | /* WaPruneModeWithIncorrectHsyncOffset: Cantiga+ cannot handle modes | ||
3606 | * with a hsync front porch of 0. | ||
3607 | */ | ||
3608 | if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) && | ||
3609 | adjusted_mode->hsync_start == adjusted_mode->hdisplay) | ||
3610 | return false; | ||
3611 | |||
3596 | return true; | 3612 | return true; |
3597 | } | 3613 | } |
3598 | 3614 | ||
@@ -5057,6 +5073,91 @@ static void g4x_write_eld(struct drm_connector *connector, | |||
5057 | I915_WRITE(G4X_AUD_CNTL_ST, i); | 5073 | I915_WRITE(G4X_AUD_CNTL_ST, i); |
5058 | } | 5074 | } |
5059 | 5075 | ||
5076 | static void haswell_write_eld(struct drm_connector *connector, | ||
5077 | struct drm_crtc *crtc) | ||
5078 | { | ||
5079 | struct drm_i915_private *dev_priv = connector->dev->dev_private; | ||
5080 | uint8_t *eld = connector->eld; | ||
5081 | struct drm_device *dev = crtc->dev; | ||
5082 | uint32_t eldv; | ||
5083 | uint32_t i; | ||
5084 | int len; | ||
5085 | int pipe = to_intel_crtc(crtc)->pipe; | ||
5086 | int tmp; | ||
5087 | |||
5088 | int hdmiw_hdmiedid = HSW_AUD_EDID_DATA(pipe); | ||
5089 | int aud_cntl_st = HSW_AUD_DIP_ELD_CTRL(pipe); | ||
5090 | int aud_config = HSW_AUD_CFG(pipe); | ||
5091 | int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD; | ||
5092 | |||
5093 | |||
5094 | DRM_DEBUG_DRIVER("HDMI: Haswell Audio initialize....\n"); | ||
5095 | |||
5096 | /* Audio output enable */ | ||
5097 | DRM_DEBUG_DRIVER("HDMI audio: enable codec\n"); | ||
5098 | tmp = I915_READ(aud_cntrl_st2); | ||
5099 | tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4)); | ||
5100 | I915_WRITE(aud_cntrl_st2, tmp); | ||
5101 | |||
5102 | /* Wait for 1 vertical blank */ | ||
5103 | intel_wait_for_vblank(dev, pipe); | ||
5104 | |||
5105 | /* Set ELD valid state */ | ||
5106 | tmp = I915_READ(aud_cntrl_st2); | ||
5107 | DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%8x\n", tmp); | ||
5108 | tmp |= (AUDIO_ELD_VALID_A << (pipe * 4)); | ||
5109 | I915_WRITE(aud_cntrl_st2, tmp); | ||
5110 | tmp = I915_READ(aud_cntrl_st2); | ||
5111 | DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%8x\n", tmp); | ||
5112 | |||
5113 | /* Enable HDMI mode */ | ||
5114 | tmp = I915_READ(aud_config); | ||
5115 | DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%8x\n", tmp); | ||
5116 | /* clear N_programing_enable and N_value_index */ | ||
5117 | tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE); | ||
5118 | I915_WRITE(aud_config, tmp); | ||
5119 | |||
5120 | DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe)); | ||
5121 | |||
5122 | eldv = AUDIO_ELD_VALID_A << (pipe * 4); | ||
5123 | |||
5124 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { | ||
5125 | DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); | ||
5126 | eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */ | ||
5127 | I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */ | ||
5128 | } else | ||
5129 | I915_WRITE(aud_config, 0); | ||
5130 | |||
5131 | if (intel_eld_uptodate(connector, | ||
5132 | aud_cntrl_st2, eldv, | ||
5133 | aud_cntl_st, IBX_ELD_ADDRESS, | ||
5134 | hdmiw_hdmiedid)) | ||
5135 | return; | ||
5136 | |||
5137 | i = I915_READ(aud_cntrl_st2); | ||
5138 | i &= ~eldv; | ||
5139 | I915_WRITE(aud_cntrl_st2, i); | ||
5140 | |||
5141 | if (!eld[0]) | ||
5142 | return; | ||
5143 | |||
5144 | i = I915_READ(aud_cntl_st); | ||
5145 | i &= ~IBX_ELD_ADDRESS; | ||
5146 | I915_WRITE(aud_cntl_st, i); | ||
5147 | i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */ | ||
5148 | DRM_DEBUG_DRIVER("port num:%d\n", i); | ||
5149 | |||
5150 | len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */ | ||
5151 | DRM_DEBUG_DRIVER("ELD size %d\n", len); | ||
5152 | for (i = 0; i < len; i++) | ||
5153 | I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i)); | ||
5154 | |||
5155 | i = I915_READ(aud_cntrl_st2); | ||
5156 | i |= eldv; | ||
5157 | I915_WRITE(aud_cntrl_st2, i); | ||
5158 | |||
5159 | } | ||
5160 | |||
5060 | static void ironlake_write_eld(struct drm_connector *connector, | 5161 | static void ironlake_write_eld(struct drm_connector *connector, |
5061 | struct drm_crtc *crtc) | 5162 | struct drm_crtc *crtc) |
5062 | { | 5163 | { |
@@ -5069,28 +5170,24 @@ static void ironlake_write_eld(struct drm_connector *connector, | |||
5069 | int aud_config; | 5170 | int aud_config; |
5070 | int aud_cntl_st; | 5171 | int aud_cntl_st; |
5071 | int aud_cntrl_st2; | 5172 | int aud_cntrl_st2; |
5173 | int pipe = to_intel_crtc(crtc)->pipe; | ||
5072 | 5174 | ||
5073 | if (HAS_PCH_IBX(connector->dev)) { | 5175 | if (HAS_PCH_IBX(connector->dev)) { |
5074 | hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A; | 5176 | hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe); |
5075 | aud_config = IBX_AUD_CONFIG_A; | 5177 | aud_config = IBX_AUD_CFG(pipe); |
5076 | aud_cntl_st = IBX_AUD_CNTL_ST_A; | 5178 | aud_cntl_st = IBX_AUD_CNTL_ST(pipe); |
5077 | aud_cntrl_st2 = IBX_AUD_CNTL_ST2; | 5179 | aud_cntrl_st2 = IBX_AUD_CNTL_ST2; |
5078 | } else { | 5180 | } else { |
5079 | hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A; | 5181 | hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe); |
5080 | aud_config = CPT_AUD_CONFIG_A; | 5182 | aud_config = CPT_AUD_CFG(pipe); |
5081 | aud_cntl_st = CPT_AUD_CNTL_ST_A; | 5183 | aud_cntl_st = CPT_AUD_CNTL_ST(pipe); |
5082 | aud_cntrl_st2 = CPT_AUD_CNTRL_ST2; | 5184 | aud_cntrl_st2 = CPT_AUD_CNTRL_ST2; |
5083 | } | 5185 | } |
5084 | 5186 | ||
5085 | i = to_intel_crtc(crtc)->pipe; | 5187 | DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe)); |
5086 | hdmiw_hdmiedid += i * 0x100; | ||
5087 | aud_cntl_st += i * 0x100; | ||
5088 | aud_config += i * 0x100; | ||
5089 | |||
5090 | DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(i)); | ||
5091 | 5188 | ||
5092 | i = I915_READ(aud_cntl_st); | 5189 | i = I915_READ(aud_cntl_st); |
5093 | i = (i >> 29) & 0x3; /* DIP_Port_Select, 0x1 = PortB */ | 5190 | i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */ |
5094 | if (!i) { | 5191 | if (!i) { |
5095 | DRM_DEBUG_DRIVER("Audio directed to unknown port\n"); | 5192 | DRM_DEBUG_DRIVER("Audio directed to unknown port\n"); |
5096 | /* operate blindly on all ports */ | 5193 | /* operate blindly on all ports */ |
@@ -5584,12 +5681,13 @@ mode_fits_in_fbdev(struct drm_device *dev, | |||
5584 | return fb; | 5681 | return fb; |
5585 | } | 5682 | } |
5586 | 5683 | ||
5587 | bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, | 5684 | bool intel_get_load_detect_pipe(struct drm_connector *connector, |
5588 | struct drm_connector *connector, | ||
5589 | struct drm_display_mode *mode, | 5685 | struct drm_display_mode *mode, |
5590 | struct intel_load_detect_pipe *old) | 5686 | struct intel_load_detect_pipe *old) |
5591 | { | 5687 | { |
5592 | struct intel_crtc *intel_crtc; | 5688 | struct intel_crtc *intel_crtc; |
5689 | struct intel_encoder *intel_encoder = | ||
5690 | intel_attached_encoder(connector); | ||
5593 | struct drm_crtc *possible_crtc; | 5691 | struct drm_crtc *possible_crtc; |
5594 | struct drm_encoder *encoder = &intel_encoder->base; | 5692 | struct drm_encoder *encoder = &intel_encoder->base; |
5595 | struct drm_crtc *crtc = NULL; | 5693 | struct drm_crtc *crtc = NULL; |
@@ -5615,21 +5713,12 @@ bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, | |||
5615 | if (encoder->crtc) { | 5713 | if (encoder->crtc) { |
5616 | crtc = encoder->crtc; | 5714 | crtc = encoder->crtc; |
5617 | 5715 | ||
5618 | intel_crtc = to_intel_crtc(crtc); | 5716 | old->dpms_mode = connector->dpms; |
5619 | old->dpms_mode = intel_crtc->dpms_mode; | ||
5620 | old->load_detect_temp = false; | 5717 | old->load_detect_temp = false; |
5621 | 5718 | ||
5622 | /* Make sure the crtc and connector are running */ | 5719 | /* Make sure the crtc and connector are running */ |
5623 | if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) { | 5720 | if (connector->dpms != DRM_MODE_DPMS_ON) |
5624 | struct drm_encoder_helper_funcs *encoder_funcs; | 5721 | connector->funcs->dpms(connector, DRM_MODE_DPMS_ON); |
5625 | struct drm_crtc_helper_funcs *crtc_funcs; | ||
5626 | |||
5627 | crtc_funcs = crtc->helper_private; | ||
5628 | crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); | ||
5629 | |||
5630 | encoder_funcs = encoder->helper_private; | ||
5631 | encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); | ||
5632 | } | ||
5633 | 5722 | ||
5634 | return true; | 5723 | return true; |
5635 | } | 5724 | } |
@@ -5657,7 +5746,7 @@ bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, | |||
5657 | connector->encoder = encoder; | 5746 | connector->encoder = encoder; |
5658 | 5747 | ||
5659 | intel_crtc = to_intel_crtc(crtc); | 5748 | intel_crtc = to_intel_crtc(crtc); |
5660 | old->dpms_mode = intel_crtc->dpms_mode; | 5749 | old->dpms_mode = connector->dpms; |
5661 | old->load_detect_temp = true; | 5750 | old->load_detect_temp = true; |
5662 | old->release_fb = NULL; | 5751 | old->release_fb = NULL; |
5663 | 5752 | ||
@@ -5682,33 +5771,34 @@ bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, | |||
5682 | DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); | 5771 | DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); |
5683 | if (IS_ERR(crtc->fb)) { | 5772 | if (IS_ERR(crtc->fb)) { |
5684 | DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); | 5773 | DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); |
5685 | crtc->fb = old_fb; | 5774 | goto fail; |
5686 | return false; | ||
5687 | } | 5775 | } |
5688 | 5776 | ||
5689 | if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) { | 5777 | if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) { |
5690 | DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); | 5778 | DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); |
5691 | if (old->release_fb) | 5779 | if (old->release_fb) |
5692 | old->release_fb->funcs->destroy(old->release_fb); | 5780 | old->release_fb->funcs->destroy(old->release_fb); |
5693 | crtc->fb = old_fb; | 5781 | goto fail; |
5694 | return false; | ||
5695 | } | 5782 | } |
5696 | 5783 | ||
5697 | /* let the connector get through one full cycle before testing */ | 5784 | /* let the connector get through one full cycle before testing */ |
5698 | intel_wait_for_vblank(dev, intel_crtc->pipe); | 5785 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
5699 | 5786 | ||
5700 | return true; | 5787 | return true; |
5788 | fail: | ||
5789 | connector->encoder = NULL; | ||
5790 | encoder->crtc = NULL; | ||
5791 | crtc->fb = old_fb; | ||
5792 | return false; | ||
5701 | } | 5793 | } |
5702 | 5794 | ||
5703 | void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, | 5795 | void intel_release_load_detect_pipe(struct drm_connector *connector, |
5704 | struct drm_connector *connector, | ||
5705 | struct intel_load_detect_pipe *old) | 5796 | struct intel_load_detect_pipe *old) |
5706 | { | 5797 | { |
5798 | struct intel_encoder *intel_encoder = | ||
5799 | intel_attached_encoder(connector); | ||
5707 | struct drm_encoder *encoder = &intel_encoder->base; | 5800 | struct drm_encoder *encoder = &intel_encoder->base; |
5708 | struct drm_device *dev = encoder->dev; | 5801 | struct drm_device *dev = encoder->dev; |
5709 | struct drm_crtc *crtc = encoder->crtc; | ||
5710 | struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; | ||
5711 | struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; | ||
5712 | 5802 | ||
5713 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", | 5803 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", |
5714 | connector->base.id, drm_get_connector_name(connector), | 5804 | connector->base.id, drm_get_connector_name(connector), |
@@ -5716,6 +5806,7 @@ void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, | |||
5716 | 5806 | ||
5717 | if (old->load_detect_temp) { | 5807 | if (old->load_detect_temp) { |
5718 | connector->encoder = NULL; | 5808 | connector->encoder = NULL; |
5809 | encoder->crtc = NULL; | ||
5719 | drm_helper_disable_unused_functions(dev); | 5810 | drm_helper_disable_unused_functions(dev); |
5720 | 5811 | ||
5721 | if (old->release_fb) | 5812 | if (old->release_fb) |
@@ -5725,10 +5816,8 @@ void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, | |||
5725 | } | 5816 | } |
5726 | 5817 | ||
5727 | /* Switch crtc and encoder back off if necessary */ | 5818 | /* Switch crtc and encoder back off if necessary */ |
5728 | if (old->dpms_mode != DRM_MODE_DPMS_ON) { | 5819 | if (old->dpms_mode != DRM_MODE_DPMS_ON) |
5729 | encoder_funcs->dpms(encoder, old->dpms_mode); | 5820 | connector->funcs->dpms(connector, old->dpms_mode); |
5730 | crtc_funcs->dpms(crtc, old->dpms_mode); | ||
5731 | } | ||
5732 | } | 5821 | } |
5733 | 5822 | ||
5734 | /* Returns the clock of the currently programmed mode of the given pipe. */ | 5823 | /* Returns the clock of the currently programmed mode of the given pipe. */ |
@@ -6939,7 +7028,7 @@ static void intel_init_display(struct drm_device *dev) | |||
6939 | dev_priv->display.write_eld = ironlake_write_eld; | 7028 | dev_priv->display.write_eld = ironlake_write_eld; |
6940 | } else if (IS_HASWELL(dev)) { | 7029 | } else if (IS_HASWELL(dev)) { |
6941 | dev_priv->display.fdi_link_train = hsw_fdi_link_train; | 7030 | dev_priv->display.fdi_link_train = hsw_fdi_link_train; |
6942 | dev_priv->display.write_eld = ironlake_write_eld; | 7031 | dev_priv->display.write_eld = haswell_write_eld; |
6943 | } else | 7032 | } else |
6944 | dev_priv->display.update_wm = NULL; | 7033 | dev_priv->display.update_wm = NULL; |
6945 | } else if (IS_G4X(dev)) { | 7034 | } else if (IS_G4X(dev)) { |
@@ -7017,21 +7106,16 @@ static struct intel_quirk intel_quirks[] = { | |||
7017 | /* HP Mini needs pipe A force quirk (LP: #322104) */ | 7106 | /* HP Mini needs pipe A force quirk (LP: #322104) */ |
7018 | { 0x27ae, 0x103c, 0x361a, quirk_pipea_force }, | 7107 | { 0x27ae, 0x103c, 0x361a, quirk_pipea_force }, |
7019 | 7108 | ||
7020 | /* Thinkpad R31 needs pipe A force quirk */ | ||
7021 | { 0x3577, 0x1014, 0x0505, quirk_pipea_force }, | ||
7022 | /* Toshiba Protege R-205, S-209 needs pipe A force quirk */ | 7109 | /* Toshiba Protege R-205, S-209 needs pipe A force quirk */ |
7023 | { 0x2592, 0x1179, 0x0001, quirk_pipea_force }, | 7110 | { 0x2592, 0x1179, 0x0001, quirk_pipea_force }, |
7024 | 7111 | ||
7025 | /* ThinkPad X30 needs pipe A force quirk (LP: #304614) */ | ||
7026 | { 0x3577, 0x1014, 0x0513, quirk_pipea_force }, | ||
7027 | /* ThinkPad X40 needs pipe A force quirk */ | ||
7028 | |||
7029 | /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ | 7112 | /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ |
7030 | { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, | 7113 | { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, |
7031 | 7114 | ||
7032 | /* 855 & before need to leave pipe A & dpll A up */ | 7115 | /* 855 & before need to leave pipe A & dpll A up */ |
7033 | { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, | 7116 | { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, |
7034 | { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, | 7117 | { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, |
7118 | { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, | ||
7035 | 7119 | ||
7036 | /* Lenovo U160 cannot use SSC on LVDS */ | 7120 | /* Lenovo U160 cannot use SSC on LVDS */ |
7037 | { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, | 7121 | { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, |
@@ -7242,7 +7326,7 @@ struct intel_display_error_state { | |||
7242 | u32 position; | 7326 | u32 position; |
7243 | u32 base; | 7327 | u32 base; |
7244 | u32 size; | 7328 | u32 size; |
7245 | } cursor[2]; | 7329 | } cursor[I915_MAX_PIPES]; |
7246 | 7330 | ||
7247 | struct intel_pipe_error_state { | 7331 | struct intel_pipe_error_state { |
7248 | u32 conf; | 7332 | u32 conf; |
@@ -7254,7 +7338,7 @@ struct intel_display_error_state { | |||
7254 | u32 vtotal; | 7338 | u32 vtotal; |
7255 | u32 vblank; | 7339 | u32 vblank; |
7256 | u32 vsync; | 7340 | u32 vsync; |
7257 | } pipe[2]; | 7341 | } pipe[I915_MAX_PIPES]; |
7258 | 7342 | ||
7259 | struct intel_plane_error_state { | 7343 | struct intel_plane_error_state { |
7260 | u32 control; | 7344 | u32 control; |
@@ -7264,7 +7348,7 @@ struct intel_display_error_state { | |||
7264 | u32 addr; | 7348 | u32 addr; |
7265 | u32 surface; | 7349 | u32 surface; |
7266 | u32 tile_offset; | 7350 | u32 tile_offset; |
7267 | } plane[2]; | 7351 | } plane[I915_MAX_PIPES]; |
7268 | }; | 7352 | }; |
7269 | 7353 | ||
7270 | struct intel_display_error_state * | 7354 | struct intel_display_error_state * |
@@ -7278,7 +7362,7 @@ intel_display_capture_error_state(struct drm_device *dev) | |||
7278 | if (error == NULL) | 7362 | if (error == NULL) |
7279 | return NULL; | 7363 | return NULL; |
7280 | 7364 | ||
7281 | for (i = 0; i < 2; i++) { | 7365 | for_each_pipe(i) { |
7282 | error->cursor[i].control = I915_READ(CURCNTR(i)); | 7366 | error->cursor[i].control = I915_READ(CURCNTR(i)); |
7283 | error->cursor[i].position = I915_READ(CURPOS(i)); | 7367 | error->cursor[i].position = I915_READ(CURPOS(i)); |
7284 | error->cursor[i].base = I915_READ(CURBASE(i)); | 7368 | error->cursor[i].base = I915_READ(CURBASE(i)); |
@@ -7311,9 +7395,11 @@ intel_display_print_error_state(struct seq_file *m, | |||
7311 | struct drm_device *dev, | 7395 | struct drm_device *dev, |
7312 | struct intel_display_error_state *error) | 7396 | struct intel_display_error_state *error) |
7313 | { | 7397 | { |
7398 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
7314 | int i; | 7399 | int i; |
7315 | 7400 | ||
7316 | for (i = 0; i < 2; i++) { | 7401 | seq_printf(m, "Num Pipes: %d\n", dev_priv->num_pipe); |
7402 | for_each_pipe(i) { | ||
7317 | seq_printf(m, "Pipe [%d]:\n", i); | 7403 | seq_printf(m, "Pipe [%d]:\n", i); |
7318 | seq_printf(m, " CONF: %08x\n", error->pipe[i].conf); | 7404 | seq_printf(m, " CONF: %08x\n", error->pipe[i].conf); |
7319 | seq_printf(m, " SRC: %08x\n", error->pipe[i].source); | 7405 | seq_printf(m, " SRC: %08x\n", error->pipe[i].source); |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index d14b1e39244c..143d19c26752 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -850,10 +850,8 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
850 | * supposed to be read-only. | 850 | * supposed to be read-only. |
851 | */ | 851 | */ |
852 | intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED; | 852 | intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED; |
853 | intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; | ||
854 | 853 | ||
855 | /* Handle DP bits in common between all three register formats */ | 854 | /* Handle DP bits in common between all three register formats */ |
856 | |||
857 | intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; | 855 | intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; |
858 | 856 | ||
859 | switch (intel_dp->lane_count) { | 857 | switch (intel_dp->lane_count) { |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index e86b3a20d70b..ee0beb4833fb 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -435,12 +435,10 @@ struct intel_load_detect_pipe { | |||
435 | bool load_detect_temp; | 435 | bool load_detect_temp; |
436 | int dpms_mode; | 436 | int dpms_mode; |
437 | }; | 437 | }; |
438 | extern bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, | 438 | extern bool intel_get_load_detect_pipe(struct drm_connector *connector, |
439 | struct drm_connector *connector, | ||
440 | struct drm_display_mode *mode, | 439 | struct drm_display_mode *mode, |
441 | struct intel_load_detect_pipe *old); | 440 | struct intel_load_detect_pipe *old); |
442 | extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, | 441 | extern void intel_release_load_detect_pipe(struct drm_connector *connector, |
443 | struct drm_connector *connector, | ||
444 | struct intel_load_detect_pipe *old); | 442 | struct intel_load_detect_pipe *old); |
445 | 443 | ||
446 | extern void intelfb_restore(void); | 444 | extern void intelfb_restore(void); |
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index 03dfdff8e003..227551f12d25 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c | |||
@@ -115,9 +115,9 @@ static void intel_dvo_dpms(struct drm_encoder *encoder, int mode) | |||
115 | if (mode == DRM_MODE_DPMS_ON) { | 115 | if (mode == DRM_MODE_DPMS_ON) { |
116 | I915_WRITE(dvo_reg, temp | DVO_ENABLE); | 116 | I915_WRITE(dvo_reg, temp | DVO_ENABLE); |
117 | I915_READ(dvo_reg); | 117 | I915_READ(dvo_reg); |
118 | intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, mode); | 118 | intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true); |
119 | } else { | 119 | } else { |
120 | intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, mode); | 120 | intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false); |
121 | I915_WRITE(dvo_reg, temp & ~DVO_ENABLE); | 121 | I915_WRITE(dvo_reg, temp & ~DVO_ENABLE); |
122 | I915_READ(dvo_reg); | 122 | I915_READ(dvo_reg); |
123 | } | 123 | } |
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 830d0dd610e1..afd0f30ab882 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c | |||
@@ -235,54 +235,6 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay, | |||
235 | return 0; | 235 | return 0; |
236 | } | 236 | } |
237 | 237 | ||
238 | /* Workaround for i830 bug where pipe a must be enable to change control regs */ | ||
239 | static int | ||
240 | i830_activate_pipe_a(struct drm_device *dev) | ||
241 | { | ||
242 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
243 | struct intel_crtc *crtc; | ||
244 | struct drm_crtc_helper_funcs *crtc_funcs; | ||
245 | struct drm_display_mode vesa_640x480 = { | ||
246 | DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, | ||
247 | 752, 800, 0, 480, 489, 492, 525, 0, | ||
248 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) | ||
249 | }, *mode; | ||
250 | |||
251 | crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[0]); | ||
252 | if (crtc->dpms_mode == DRM_MODE_DPMS_ON) | ||
253 | return 0; | ||
254 | |||
255 | /* most i8xx have pipe a forced on, so don't trust dpms mode */ | ||
256 | if (I915_READ(_PIPEACONF) & PIPECONF_ENABLE) | ||
257 | return 0; | ||
258 | |||
259 | crtc_funcs = crtc->base.helper_private; | ||
260 | if (crtc_funcs->dpms == NULL) | ||
261 | return 0; | ||
262 | |||
263 | DRM_DEBUG_DRIVER("Enabling pipe A in order to enable overlay\n"); | ||
264 | |||
265 | mode = drm_mode_duplicate(dev, &vesa_640x480); | ||
266 | |||
267 | if (!drm_crtc_helper_set_mode(&crtc->base, mode, | ||
268 | crtc->base.x, crtc->base.y, | ||
269 | crtc->base.fb)) | ||
270 | return 0; | ||
271 | |||
272 | crtc_funcs->dpms(&crtc->base, DRM_MODE_DPMS_ON); | ||
273 | return 1; | ||
274 | } | ||
275 | |||
276 | static void | ||
277 | i830_deactivate_pipe_a(struct drm_device *dev) | ||
278 | { | ||
279 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
280 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[0]; | ||
281 | struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; | ||
282 | |||
283 | crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); | ||
284 | } | ||
285 | |||
286 | /* overlay needs to be disable in OCMD reg */ | 238 | /* overlay needs to be disable in OCMD reg */ |
287 | static int intel_overlay_on(struct intel_overlay *overlay) | 239 | static int intel_overlay_on(struct intel_overlay *overlay) |
288 | { | 240 | { |
@@ -290,17 +242,12 @@ static int intel_overlay_on(struct intel_overlay *overlay) | |||
290 | struct drm_i915_private *dev_priv = dev->dev_private; | 242 | struct drm_i915_private *dev_priv = dev->dev_private; |
291 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; | 243 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; |
292 | struct drm_i915_gem_request *request; | 244 | struct drm_i915_gem_request *request; |
293 | int pipe_a_quirk = 0; | ||
294 | int ret; | 245 | int ret; |
295 | 246 | ||
296 | BUG_ON(overlay->active); | 247 | BUG_ON(overlay->active); |
297 | overlay->active = 1; | 248 | overlay->active = 1; |
298 | 249 | ||
299 | if (IS_I830(dev)) { | 250 | WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE)); |
300 | pipe_a_quirk = i830_activate_pipe_a(dev); | ||
301 | if (pipe_a_quirk < 0) | ||
302 | return pipe_a_quirk; | ||
303 | } | ||
304 | 251 | ||
305 | request = kzalloc(sizeof(*request), GFP_KERNEL); | 252 | request = kzalloc(sizeof(*request), GFP_KERNEL); |
306 | if (request == NULL) { | 253 | if (request == NULL) { |
@@ -322,9 +269,6 @@ static int intel_overlay_on(struct intel_overlay *overlay) | |||
322 | 269 | ||
323 | ret = intel_overlay_do_wait_request(overlay, request, NULL); | 270 | ret = intel_overlay_do_wait_request(overlay, request, NULL); |
324 | out: | 271 | out: |
325 | if (pipe_a_quirk) | ||
326 | i830_deactivate_pipe_a(dev); | ||
327 | |||
328 | return ret; | 272 | return ret; |
329 | } | 273 | } |
330 | 274 | ||
@@ -1439,7 +1383,7 @@ void intel_setup_overlay(struct drm_device *dev) | |||
1439 | } | 1383 | } |
1440 | overlay->flip_addr = reg_bo->phys_obj->handle->busaddr; | 1384 | overlay->flip_addr = reg_bo->phys_obj->handle->busaddr; |
1441 | } else { | 1385 | } else { |
1442 | ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true); | 1386 | ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true, false); |
1443 | if (ret) { | 1387 | if (ret) { |
1444 | DRM_ERROR("failed to pin overlay register bo\n"); | 1388 | DRM_ERROR("failed to pin overlay register bo\n"); |
1445 | goto out_free_bo; | 1389 | goto out_free_bo; |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 9b05f7832dc2..a465debdfcf7 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -2138,7 +2138,7 @@ intel_alloc_context_page(struct drm_device *dev) | |||
2138 | return NULL; | 2138 | return NULL; |
2139 | } | 2139 | } |
2140 | 2140 | ||
2141 | ret = i915_gem_object_pin(ctx, 4096, true); | 2141 | ret = i915_gem_object_pin(ctx, 4096, true, false); |
2142 | if (ret) { | 2142 | if (ret) { |
2143 | DRM_ERROR("failed to pin power context: %d\n", ret); | 2143 | DRM_ERROR("failed to pin power context: %d\n", ret); |
2144 | goto err_unref; | 2144 | goto err_unref; |
@@ -2372,6 +2372,11 @@ int intel_enable_rc6(const struct drm_device *dev) | |||
2372 | return i915_enable_rc6; | 2372 | return i915_enable_rc6; |
2373 | 2373 | ||
2374 | if (INTEL_INFO(dev)->gen == 5) { | 2374 | if (INTEL_INFO(dev)->gen == 5) { |
2375 | #ifdef CONFIG_INTEL_IOMMU | ||
2376 | /* Disable rc6 on ilk if VT-d is on. */ | ||
2377 | if (intel_iommu_gfx_mapped) | ||
2378 | return false; | ||
2379 | #endif | ||
2375 | DRM_DEBUG_DRIVER("Ironlake: only RC6 available\n"); | 2380 | DRM_DEBUG_DRIVER("Ironlake: only RC6 available\n"); |
2376 | return INTEL_RC6_ENABLE; | 2381 | return INTEL_RC6_ENABLE; |
2377 | } | 2382 | } |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index c828169c73ae..ac93643731aa 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -391,7 +391,7 @@ init_pipe_control(struct intel_ring_buffer *ring) | |||
391 | 391 | ||
392 | i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); | 392 | i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); |
393 | 393 | ||
394 | ret = i915_gem_object_pin(obj, 4096, true); | 394 | ret = i915_gem_object_pin(obj, 4096, true, false); |
395 | if (ret) | 395 | if (ret) |
396 | goto err_unref; | 396 | goto err_unref; |
397 | 397 | ||
@@ -979,7 +979,7 @@ static int init_status_page(struct intel_ring_buffer *ring) | |||
979 | 979 | ||
980 | i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); | 980 | i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); |
981 | 981 | ||
982 | ret = i915_gem_object_pin(obj, 4096, true); | 982 | ret = i915_gem_object_pin(obj, 4096, true, false); |
983 | if (ret != 0) { | 983 | if (ret != 0) { |
984 | goto err_unref; | 984 | goto err_unref; |
985 | } | 985 | } |
@@ -1036,7 +1036,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, | |||
1036 | 1036 | ||
1037 | ring->obj = obj; | 1037 | ring->obj = obj; |
1038 | 1038 | ||
1039 | ret = i915_gem_object_pin(obj, PAGE_SIZE, true); | 1039 | ret = i915_gem_object_pin(obj, PAGE_SIZE, true, false); |
1040 | if (ret) | 1040 | if (ret) |
1041 | goto err_unref; | 1041 | goto err_unref; |
1042 | 1042 | ||
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 1a0bab07699e..95653a508987 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c | |||
@@ -1303,12 +1303,9 @@ intel_tv_detect(struct drm_connector *connector, bool force) | |||
1303 | if (force) { | 1303 | if (force) { |
1304 | struct intel_load_detect_pipe tmp; | 1304 | struct intel_load_detect_pipe tmp; |
1305 | 1305 | ||
1306 | if (intel_get_load_detect_pipe(&intel_tv->base, connector, | 1306 | if (intel_get_load_detect_pipe(connector, &mode, &tmp)) { |
1307 | &mode, &tmp)) { | ||
1308 | type = intel_tv_detect_type(intel_tv, connector); | 1307 | type = intel_tv_detect_type(intel_tv, connector); |
1309 | intel_release_load_detect_pipe(&intel_tv->base, | 1308 | intel_release_load_detect_pipe(connector, &tmp); |
1310 | connector, | ||
1311 | &tmp); | ||
1312 | } else | 1309 | } else |
1313 | return connector_status_unknown; | 1310 | return connector_status_unknown; |
1314 | } else | 1311 | } else |
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c index ce9a61179925..b8c00ed33051 100644 --- a/drivers/gpu/drm/udl/udl_fb.c +++ b/drivers/gpu/drm/udl/udl_fb.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
15 | #include <linux/fb.h> | 15 | #include <linux/fb.h> |
16 | #include <linux/dma-buf.h> | ||
16 | 17 | ||
17 | #include "drmP.h" | 18 | #include "drmP.h" |
18 | #include "drm.h" | 19 | #include "drm.h" |
@@ -377,16 +378,33 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb, | |||
377 | { | 378 | { |
378 | struct udl_framebuffer *ufb = to_udl_fb(fb); | 379 | struct udl_framebuffer *ufb = to_udl_fb(fb); |
379 | int i; | 380 | int i; |
381 | int ret = 0; | ||
380 | 382 | ||
381 | if (!ufb->active_16) | 383 | if (!ufb->active_16) |
382 | return 0; | 384 | return 0; |
383 | 385 | ||
386 | if (ufb->obj->base.import_attach) { | ||
387 | ret = dma_buf_begin_cpu_access(ufb->obj->base.import_attach->dmabuf, | ||
388 | 0, ufb->obj->base.size, | ||
389 | DMA_FROM_DEVICE); | ||
390 | if (ret) | ||
391 | return ret; | ||
392 | } | ||
393 | |||
384 | for (i = 0; i < num_clips; i++) { | 394 | for (i = 0; i < num_clips; i++) { |
385 | udl_handle_damage(ufb, clips[i].x1, clips[i].y1, | 395 | ret = udl_handle_damage(ufb, clips[i].x1, clips[i].y1, |
386 | clips[i].x2 - clips[i].x1, | 396 | clips[i].x2 - clips[i].x1, |
387 | clips[i].y2 - clips[i].y1); | 397 | clips[i].y2 - clips[i].y1); |
398 | if (ret) | ||
399 | break; | ||
388 | } | 400 | } |
389 | return 0; | 401 | |
402 | if (ufb->obj->base.import_attach) { | ||
403 | dma_buf_end_cpu_access(ufb->obj->base.import_attach->dmabuf, | ||
404 | 0, ufb->obj->base.size, | ||
405 | DMA_FROM_DEVICE); | ||
406 | } | ||
407 | return ret; | ||
390 | } | 408 | } |
391 | 409 | ||
392 | static void udl_user_framebuffer_destroy(struct drm_framebuffer *fb) | 410 | static void udl_user_framebuffer_destroy(struct drm_framebuffer *fb) |
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c index 291ecc145585..47b256375831 100644 --- a/drivers/gpu/drm/udl/udl_gem.c +++ b/drivers/gpu/drm/udl/udl_gem.c | |||
@@ -181,11 +181,6 @@ int udl_gem_vmap(struct udl_gem_object *obj) | |||
181 | int ret; | 181 | int ret; |
182 | 182 | ||
183 | if (obj->base.import_attach) { | 183 | if (obj->base.import_attach) { |
184 | ret = dma_buf_begin_cpu_access(obj->base.import_attach->dmabuf, | ||
185 | 0, obj->base.size, DMA_BIDIRECTIONAL); | ||
186 | if (ret) | ||
187 | return -EINVAL; | ||
188 | |||
189 | obj->vmapping = dma_buf_vmap(obj->base.import_attach->dmabuf); | 184 | obj->vmapping = dma_buf_vmap(obj->base.import_attach->dmabuf); |
190 | if (!obj->vmapping) | 185 | if (!obj->vmapping) |
191 | return -ENOMEM; | 186 | return -ENOMEM; |
@@ -206,8 +201,6 @@ void udl_gem_vunmap(struct udl_gem_object *obj) | |||
206 | { | 201 | { |
207 | if (obj->base.import_attach) { | 202 | if (obj->base.import_attach) { |
208 | dma_buf_vunmap(obj->base.import_attach->dmabuf, obj->vmapping); | 203 | dma_buf_vunmap(obj->base.import_attach->dmabuf, obj->vmapping); |
209 | dma_buf_end_cpu_access(obj->base.import_attach->dmabuf, 0, | ||
210 | obj->base.size, DMA_BIDIRECTIONAL); | ||
211 | return; | 204 | return; |
212 | } | 205 | } |
213 | 206 | ||
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index d8a79bf59ae7..05e24d31f329 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h | |||
@@ -312,6 +312,7 @@ typedef struct drm_i915_irq_wait { | |||
312 | #define I915_PARAM_HAS_ALIASING_PPGTT 18 | 312 | #define I915_PARAM_HAS_ALIASING_PPGTT 18 |
313 | #define I915_PARAM_HAS_WAIT_TIMEOUT 19 | 313 | #define I915_PARAM_HAS_WAIT_TIMEOUT 19 |
314 | #define I915_PARAM_HAS_SEMAPHORES 20 | 314 | #define I915_PARAM_HAS_SEMAPHORES 20 |
315 | #define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21 | ||
315 | 316 | ||
316 | typedef struct drm_i915_getparam { | 317 | typedef struct drm_i915_getparam { |
317 | int param; | 318 | int param; |