diff options
author | Dave Airlie <airlied@redhat.com> | 2010-08-03 19:51:27 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2010-08-03 19:51:27 -0400 |
commit | fa0a6024da61d96a12fab18991b9897292b43253 (patch) | |
tree | 35ba7b067863f649dc37c4b67a3ed740c0d9736d /drivers/gpu | |
parent | 4c70b2eae371ebe83019ac47de6088b78124ab36 (diff) | |
parent | 7b824ec2e5d7d086264ecae51e30e3c5e00cdecc (diff) |
Merge remote branch 'intel/drm-intel-next' of /ssd/git/drm-next into drm-core-next
* 'intel/drm-intel-next' of /ssd/git/drm-next: (230 commits)
drm/i915: Clear the Ironlake dithering flags when the pipe doesn't want it.
drm/agp/i915: trim stolen space to 32M
drm/i915: Unset cursor if out-of-bounds upon mode change (v4)
drm/i915: Unreference object not handle on creation
drm/i915: Attempt to uncouple object after catastrophic failure in unbind
drm/i915: Repeat unbinding during free if interrupted (v6)
drm/i915: Refactor i915_gem_retire_requests()
drm/i915: Warn if we run out of FIFO space for a mode
drm/i915: Round up the watermark entries (v3)
drm/i915: Typo in (unused) register mask for overlay.
drm/i915: Check overlay stride errata for i830 and i845
drm/i915: Validate the mode for eDP by using fixed panel size
drm/i915: Always use the fixed panel timing for eDP
drm/i915: Enable panel fitting for eDP
drm/i915: Add fixed panel mode parsed from EDID for eDP without fixed mode in VBT
drm/i915/sdvo: Set sync polarity based on actual mode
drm/i915/hdmi: Set sync polarity based on actual mode
drm/i915/pch: Set transcoder sync polarity for DP based on actual mode
drm/i915: Initialize LVDS and eDP outputs before anything else
drm/i915/dp: Correctly report eDP in the core connector type
...
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/drm/drm_edid.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_debugfs.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_dma.c | 19 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.c | 64 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 34 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 162 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_tiling.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_irq.c | 58 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_reg.h | 57 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_suspend.c | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 688 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_dp.c | 192 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_drv.h | 13 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_fb.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_hdmi.c | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_lvds.c | 354 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_overlay.c | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_sdvo.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_tv.c | 12 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_pm.c | 1 |
20 files changed, 1122 insertions, 580 deletions
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 83d8072066cb..ea1d57291b0e 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
@@ -864,8 +864,8 @@ drm_mode_std(struct drm_connector *connector, struct edid *edid, | |||
864 | mode = drm_cvt_mode(dev, 1366, 768, vrefresh_rate, 0, 0, | 864 | mode = drm_cvt_mode(dev, 1366, 768, vrefresh_rate, 0, 0, |
865 | false); | 865 | false); |
866 | mode->hdisplay = 1366; | 866 | mode->hdisplay = 1366; |
867 | mode->vsync_start = mode->vsync_start - 1; | 867 | mode->hsync_start = mode->hsync_start - 1; |
868 | mode->vsync_end = mode->vsync_end - 1; | 868 | mode->hsync_end = mode->hsync_end - 1; |
869 | return mode; | 869 | return mode; |
870 | } | 870 | } |
871 | 871 | ||
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index aee83fa178f6..9214119c0154 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -605,6 +605,9 @@ static int i915_fbc_status(struct seq_file *m, void *unused) | |||
605 | case FBC_NOT_TILED: | 605 | case FBC_NOT_TILED: |
606 | seq_printf(m, "scanout buffer not tiled"); | 606 | seq_printf(m, "scanout buffer not tiled"); |
607 | break; | 607 | break; |
608 | case FBC_MULTIPLE_PIPES: | ||
609 | seq_printf(m, "multiple pipes are enabled"); | ||
610 | break; | ||
608 | default: | 611 | default: |
609 | seq_printf(m, "unknown reason"); | 612 | seq_printf(m, "unknown reason"); |
610 | } | 613 | } |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 92898035845d..f19ffe87af3c 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -41,6 +41,8 @@ | |||
41 | #include <linux/vga_switcheroo.h> | 41 | #include <linux/vga_switcheroo.h> |
42 | #include <linux/slab.h> | 42 | #include <linux/slab.h> |
43 | 43 | ||
44 | extern int intel_max_stolen; /* from AGP driver */ | ||
45 | |||
44 | /** | 46 | /** |
45 | * Sets up the hardware status page for devices that need a physical address | 47 | * Sets up the hardware status page for devices that need a physical address |
46 | * in the register. | 48 | * in the register. |
@@ -1257,7 +1259,7 @@ static void i915_setup_compression(struct drm_device *dev, int size) | |||
1257 | drm_mm_put_block(compressed_fb); | 1259 | drm_mm_put_block(compressed_fb); |
1258 | } | 1260 | } |
1259 | 1261 | ||
1260 | if (!IS_GM45(dev)) { | 1262 | if (!(IS_GM45(dev) || IS_IRONLAKE_M(dev))) { |
1261 | compressed_llb = drm_mm_search_free(&dev_priv->vram, 4096, | 1263 | compressed_llb = drm_mm_search_free(&dev_priv->vram, 4096, |
1262 | 4096, 0); | 1264 | 4096, 0); |
1263 | if (!compressed_llb) { | 1265 | if (!compressed_llb) { |
@@ -1283,8 +1285,9 @@ static void i915_setup_compression(struct drm_device *dev, int size) | |||
1283 | 1285 | ||
1284 | intel_disable_fbc(dev); | 1286 | intel_disable_fbc(dev); |
1285 | dev_priv->compressed_fb = compressed_fb; | 1287 | dev_priv->compressed_fb = compressed_fb; |
1286 | 1288 | if (IS_IRONLAKE_M(dev)) | |
1287 | if (IS_GM45(dev)) { | 1289 | I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start); |
1290 | else if (IS_GM45(dev)) { | ||
1288 | I915_WRITE(DPFC_CB_BASE, compressed_fb->start); | 1291 | I915_WRITE(DPFC_CB_BASE, compressed_fb->start); |
1289 | } else { | 1292 | } else { |
1290 | I915_WRITE(FBC_CFB_BASE, cfb_base); | 1293 | I915_WRITE(FBC_CFB_BASE, cfb_base); |
@@ -1292,7 +1295,7 @@ static void i915_setup_compression(struct drm_device *dev, int size) | |||
1292 | dev_priv->compressed_llb = compressed_llb; | 1295 | dev_priv->compressed_llb = compressed_llb; |
1293 | } | 1296 | } |
1294 | 1297 | ||
1295 | DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base, | 1298 | DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base, |
1296 | ll_base, size >> 20); | 1299 | ll_base, size >> 20); |
1297 | } | 1300 | } |
1298 | 1301 | ||
@@ -1301,7 +1304,7 @@ static void i915_cleanup_compression(struct drm_device *dev) | |||
1301 | struct drm_i915_private *dev_priv = dev->dev_private; | 1304 | struct drm_i915_private *dev_priv = dev->dev_private; |
1302 | 1305 | ||
1303 | drm_mm_put_block(dev_priv->compressed_fb); | 1306 | drm_mm_put_block(dev_priv->compressed_fb); |
1304 | if (!IS_GM45(dev)) | 1307 | if (dev_priv->compressed_llb) |
1305 | drm_mm_put_block(dev_priv->compressed_llb); | 1308 | drm_mm_put_block(dev_priv->compressed_llb); |
1306 | } | 1309 | } |
1307 | 1310 | ||
@@ -2105,6 +2108,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
2105 | if (ret) | 2108 | if (ret) |
2106 | goto out_iomapfree; | 2109 | goto out_iomapfree; |
2107 | 2110 | ||
2111 | if (prealloc_size > intel_max_stolen) { | ||
2112 | DRM_INFO("detected %dM stolen memory, trimming to %dM\n", | ||
2113 | prealloc_size >> 20, intel_max_stolen >> 20); | ||
2114 | prealloc_size = intel_max_stolen; | ||
2115 | } | ||
2116 | |||
2108 | dev_priv->wq = create_singlethread_workqueue("i915"); | 2117 | dev_priv->wq = create_singlethread_workqueue("i915"); |
2109 | if (dev_priv->wq == NULL) { | 2118 | if (dev_priv->wq == NULL) { |
2110 | DRM_ERROR("Failed to create our workqueue.\n"); | 2119 | DRM_ERROR("Failed to create our workqueue.\n"); |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 65d3f3e8475b..5044f653e8ea 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -93,11 +93,11 @@ static const struct intel_device_info intel_i945gm_info = { | |||
93 | }; | 93 | }; |
94 | 94 | ||
95 | static const struct intel_device_info intel_i965g_info = { | 95 | static const struct intel_device_info intel_i965g_info = { |
96 | .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1, | 96 | .is_broadwater = 1, .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1, |
97 | }; | 97 | }; |
98 | 98 | ||
99 | static const struct intel_device_info intel_i965gm_info = { | 99 | static const struct intel_device_info intel_i965gm_info = { |
100 | .is_i965g = 1, .is_mobile = 1, .is_i965gm = 1, .is_i9xx = 1, | 100 | .is_crestline = 1, .is_i965g = 1, .is_i965gm = 1, .is_i9xx = 1, |
101 | .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, | 101 | .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, |
102 | .has_hotplug = 1, | 102 | .has_hotplug = 1, |
103 | }; | 103 | }; |
@@ -114,7 +114,7 @@ static const struct intel_device_info intel_g45_info = { | |||
114 | }; | 114 | }; |
115 | 115 | ||
116 | static const struct intel_device_info intel_gm45_info = { | 116 | static const struct intel_device_info intel_gm45_info = { |
117 | .is_i965g = 1, .is_mobile = 1, .is_g4x = 1, .is_i9xx = 1, | 117 | .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, |
118 | .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, | 118 | .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, |
119 | .has_pipe_cxsr = 1, | 119 | .has_pipe_cxsr = 1, |
120 | .has_hotplug = 1, | 120 | .has_hotplug = 1, |
@@ -134,7 +134,7 @@ static const struct intel_device_info intel_ironlake_d_info = { | |||
134 | 134 | ||
135 | static const struct intel_device_info intel_ironlake_m_info = { | 135 | static const struct intel_device_info intel_ironlake_m_info = { |
136 | .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1, | 136 | .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1, |
137 | .need_gfx_hws = 1, .has_rc6 = 1, | 137 | .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, |
138 | .has_hotplug = 1, | 138 | .has_hotplug = 1, |
139 | }; | 139 | }; |
140 | 140 | ||
@@ -148,33 +148,33 @@ static const struct intel_device_info intel_sandybridge_m_info = { | |||
148 | .has_hotplug = 1, .is_gen6 = 1, | 148 | .has_hotplug = 1, .is_gen6 = 1, |
149 | }; | 149 | }; |
150 | 150 | ||
151 | static const struct pci_device_id pciidlist[] = { | 151 | static const struct pci_device_id pciidlist[] = { /* aka */ |
152 | INTEL_VGA_DEVICE(0x3577, &intel_i830_info), | 152 | INTEL_VGA_DEVICE(0x3577, &intel_i830_info), /* I830_M */ |
153 | INTEL_VGA_DEVICE(0x2562, &intel_845g_info), | 153 | INTEL_VGA_DEVICE(0x2562, &intel_845g_info), /* 845_G */ |
154 | INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), | 154 | INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), /* I855_GM */ |
155 | INTEL_VGA_DEVICE(0x358e, &intel_i85x_info), | 155 | INTEL_VGA_DEVICE(0x358e, &intel_i85x_info), |
156 | INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), | 156 | INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), /* I865_G */ |
157 | INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), | 157 | INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), /* I915_G */ |
158 | INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), | 158 | INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), /* E7221_G */ |
159 | INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info), | 159 | INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info), /* I915_GM */ |
160 | INTEL_VGA_DEVICE(0x2772, &intel_i945g_info), | 160 | INTEL_VGA_DEVICE(0x2772, &intel_i945g_info), /* I945_G */ |
161 | INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info), | 161 | INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info), /* I945_GM */ |
162 | INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info), | 162 | INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info), /* I945_GME */ |
163 | INTEL_VGA_DEVICE(0x2972, &intel_i965g_info), | 163 | INTEL_VGA_DEVICE(0x2972, &intel_i965g_info), /* I946_GZ */ |
164 | INTEL_VGA_DEVICE(0x2982, &intel_i965g_info), | 164 | INTEL_VGA_DEVICE(0x2982, &intel_i965g_info), /* G35_G */ |
165 | INTEL_VGA_DEVICE(0x2992, &intel_i965g_info), | 165 | INTEL_VGA_DEVICE(0x2992, &intel_i965g_info), /* I965_Q */ |
166 | INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info), | 166 | INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info), /* I965_G */ |
167 | INTEL_VGA_DEVICE(0x29b2, &intel_g33_info), | 167 | INTEL_VGA_DEVICE(0x29b2, &intel_g33_info), /* Q35_G */ |
168 | INTEL_VGA_DEVICE(0x29c2, &intel_g33_info), | 168 | INTEL_VGA_DEVICE(0x29c2, &intel_g33_info), /* G33_G */ |
169 | INTEL_VGA_DEVICE(0x29d2, &intel_g33_info), | 169 | INTEL_VGA_DEVICE(0x29d2, &intel_g33_info), /* Q33_G */ |
170 | INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info), | 170 | INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info), /* I965_GM */ |
171 | INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info), | 171 | INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info), /* I965_GME */ |
172 | INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info), | 172 | INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info), /* GM45_G */ |
173 | INTEL_VGA_DEVICE(0x2e02, &intel_g45_info), | 173 | INTEL_VGA_DEVICE(0x2e02, &intel_g45_info), /* IGD_E_G */ |
174 | INTEL_VGA_DEVICE(0x2e12, &intel_g45_info), | 174 | INTEL_VGA_DEVICE(0x2e12, &intel_g45_info), /* Q45_G */ |
175 | INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), | 175 | INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), /* G45_G */ |
176 | INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), | 176 | INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), /* G41_G */ |
177 | INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), | 177 | INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), /* B43_G */ |
178 | INTEL_VGA_DEVICE(0xa001, &intel_pineview_info), | 178 | INTEL_VGA_DEVICE(0xa001, &intel_pineview_info), |
179 | INTEL_VGA_DEVICE(0xa011, &intel_pineview_info), | 179 | INTEL_VGA_DEVICE(0xa011, &intel_pineview_info), |
180 | INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info), | 180 | INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info), |
@@ -340,7 +340,7 @@ int i965_reset(struct drm_device *dev, u8 flags) | |||
340 | /* | 340 | /* |
341 | * Clear request list | 341 | * Clear request list |
342 | */ | 342 | */ |
343 | i915_gem_retire_requests(dev, &dev_priv->render_ring); | 343 | i915_gem_retire_requests(dev); |
344 | 344 | ||
345 | if (need_display) | 345 | if (need_display) |
346 | i915_save_display(dev); | 346 | i915_save_display(dev); |
@@ -482,7 +482,7 @@ static int i915_pm_poweroff(struct device *dev) | |||
482 | return i915_drm_freeze(drm_dev); | 482 | return i915_drm_freeze(drm_dev); |
483 | } | 483 | } |
484 | 484 | ||
485 | const struct dev_pm_ops i915_pm_ops = { | 485 | static const struct dev_pm_ops i915_pm_ops = { |
486 | .suspend = i915_pm_suspend, | 486 | .suspend = i915_pm_suspend, |
487 | .resume = i915_pm_resume, | 487 | .resume = i915_pm_resume, |
488 | .freeze = i915_pm_freeze, | 488 | .freeze = i915_pm_freeze, |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index d147ab2f5bfc..906663b9929e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -176,7 +176,8 @@ struct drm_i915_display_funcs { | |||
176 | int (*get_display_clock_speed)(struct drm_device *dev); | 176 | int (*get_display_clock_speed)(struct drm_device *dev); |
177 | int (*get_fifo_size)(struct drm_device *dev, int plane); | 177 | int (*get_fifo_size)(struct drm_device *dev, int plane); |
178 | void (*update_wm)(struct drm_device *dev, int planea_clock, | 178 | void (*update_wm)(struct drm_device *dev, int planea_clock, |
179 | int planeb_clock, int sr_hdisplay, int pixel_size); | 179 | int planeb_clock, int sr_hdisplay, int sr_htotal, |
180 | int pixel_size); | ||
180 | /* clock updates for mode set */ | 181 | /* clock updates for mode set */ |
181 | /* cursor updates */ | 182 | /* cursor updates */ |
182 | /* render clock increase/decrease */ | 183 | /* render clock increase/decrease */ |
@@ -200,6 +201,8 @@ struct intel_device_info { | |||
200 | u8 need_gfx_hws : 1; | 201 | u8 need_gfx_hws : 1; |
201 | u8 is_g4x : 1; | 202 | u8 is_g4x : 1; |
202 | u8 is_pineview : 1; | 203 | u8 is_pineview : 1; |
204 | u8 is_broadwater : 1; | ||
205 | u8 is_crestline : 1; | ||
203 | u8 is_ironlake : 1; | 206 | u8 is_ironlake : 1; |
204 | u8 is_gen6 : 1; | 207 | u8 is_gen6 : 1; |
205 | u8 has_fbc : 1; | 208 | u8 has_fbc : 1; |
@@ -215,6 +218,7 @@ enum no_fbc_reason { | |||
215 | FBC_MODE_TOO_LARGE, /* mode too large for compression */ | 218 | FBC_MODE_TOO_LARGE, /* mode too large for compression */ |
216 | FBC_BAD_PLANE, /* fbc not supported on plane */ | 219 | FBC_BAD_PLANE, /* fbc not supported on plane */ |
217 | FBC_NOT_TILED, /* buffer not tiled */ | 220 | FBC_NOT_TILED, /* buffer not tiled */ |
221 | FBC_MULTIPLE_PIPES, /* more than one pipe active */ | ||
218 | }; | 222 | }; |
219 | 223 | ||
220 | enum intel_pch { | 224 | enum intel_pch { |
@@ -222,6 +226,8 @@ enum intel_pch { | |||
222 | PCH_CPT, /* Cougarpoint PCH */ | 226 | PCH_CPT, /* Cougarpoint PCH */ |
223 | }; | 227 | }; |
224 | 228 | ||
229 | #define QUIRK_PIPEA_FORCE (1<<0) | ||
230 | |||
225 | struct intel_fbdev; | 231 | struct intel_fbdev; |
226 | 232 | ||
227 | typedef struct drm_i915_private { | 233 | typedef struct drm_i915_private { |
@@ -285,6 +291,8 @@ typedef struct drm_i915_private { | |||
285 | struct timer_list hangcheck_timer; | 291 | struct timer_list hangcheck_timer; |
286 | int hangcheck_count; | 292 | int hangcheck_count; |
287 | uint32_t last_acthd; | 293 | uint32_t last_acthd; |
294 | uint32_t last_instdone; | ||
295 | uint32_t last_instdone1; | ||
288 | 296 | ||
289 | struct drm_mm vram; | 297 | struct drm_mm vram; |
290 | 298 | ||
@@ -337,6 +345,8 @@ typedef struct drm_i915_private { | |||
337 | /* PCH chipset type */ | 345 | /* PCH chipset type */ |
338 | enum intel_pch pch_type; | 346 | enum intel_pch pch_type; |
339 | 347 | ||
348 | unsigned long quirks; | ||
349 | |||
340 | /* Register state */ | 350 | /* Register state */ |
341 | bool modeset_on_lid; | 351 | bool modeset_on_lid; |
342 | u8 saveLBB; | 352 | u8 saveLBB; |
@@ -542,6 +552,14 @@ typedef struct drm_i915_private { | |||
542 | struct list_head fence_list; | 552 | struct list_head fence_list; |
543 | 553 | ||
544 | /** | 554 | /** |
555 | * List of objects currently pending being freed. | ||
556 | * | ||
557 | * These objects are no longer in use, but due to a signal | ||
558 | * we were prevented from freeing them at the appointed time. | ||
559 | */ | ||
560 | struct list_head deferred_free_list; | ||
561 | |||
562 | /** | ||
545 | * We leave the user IRQ off as much as possible, | 563 | * We leave the user IRQ off as much as possible, |
546 | * but this means that requests will finish and never | 564 | * but this means that requests will finish and never |
547 | * be retired once the system goes idle. Set a timer to | 565 | * be retired once the system goes idle. Set a timer to |
@@ -672,7 +690,7 @@ struct drm_i915_gem_object { | |||
672 | * | 690 | * |
673 | * Size: 4 bits for 16 fences + sign (for FENCE_REG_NONE) | 691 | * Size: 4 bits for 16 fences + sign (for FENCE_REG_NONE) |
674 | */ | 692 | */ |
675 | int fence_reg : 5; | 693 | signed int fence_reg : 5; |
676 | 694 | ||
677 | /** | 695 | /** |
678 | * Used for checking the object doesn't appear more than once | 696 | * Used for checking the object doesn't appear more than once |
@@ -708,7 +726,7 @@ struct drm_i915_gem_object { | |||
708 | * | 726 | * |
709 | * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3 | 727 | * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3 |
710 | * bits with absolutely no headroom. So use 4 bits. */ | 728 | * bits with absolutely no headroom. So use 4 bits. */ |
711 | int pin_count : 4; | 729 | unsigned int pin_count : 4; |
712 | #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf | 730 | #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf |
713 | 731 | ||
714 | /** AGP memory structure for our GTT binding. */ | 732 | /** AGP memory structure for our GTT binding. */ |
@@ -738,7 +756,7 @@ struct drm_i915_gem_object { | |||
738 | uint32_t stride; | 756 | uint32_t stride; |
739 | 757 | ||
740 | /** Record of address bit 17 of each page at last unbind. */ | 758 | /** Record of address bit 17 of each page at last unbind. */ |
741 | long *bit_17; | 759 | unsigned long *bit_17; |
742 | 760 | ||
743 | /** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */ | 761 | /** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */ |
744 | uint32_t agp_type; | 762 | uint32_t agp_type; |
@@ -950,8 +968,7 @@ uint32_t i915_get_gem_seqno(struct drm_device *dev, | |||
950 | bool i915_seqno_passed(uint32_t seq1, uint32_t seq2); | 968 | bool i915_seqno_passed(uint32_t seq1, uint32_t seq2); |
951 | int i915_gem_object_get_fence_reg(struct drm_gem_object *obj); | 969 | int i915_gem_object_get_fence_reg(struct drm_gem_object *obj); |
952 | int i915_gem_object_put_fence_reg(struct drm_gem_object *obj); | 970 | int i915_gem_object_put_fence_reg(struct drm_gem_object *obj); |
953 | void i915_gem_retire_requests(struct drm_device *dev, | 971 | void i915_gem_retire_requests(struct drm_device *dev); |
954 | struct intel_ring_buffer *ring); | ||
955 | void i915_gem_retire_work_handler(struct work_struct *work); | 972 | void i915_gem_retire_work_handler(struct work_struct *work); |
956 | void i915_gem_clflush_object(struct drm_gem_object *obj); | 973 | void i915_gem_clflush_object(struct drm_gem_object *obj); |
957 | int i915_gem_object_set_domain(struct drm_gem_object *obj, | 974 | int i915_gem_object_set_domain(struct drm_gem_object *obj, |
@@ -981,7 +998,7 @@ void i915_gem_free_all_phys_object(struct drm_device *dev); | |||
981 | int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); | 998 | int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); |
982 | void i915_gem_object_put_pages(struct drm_gem_object *obj); | 999 | void i915_gem_object_put_pages(struct drm_gem_object *obj); |
983 | void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); | 1000 | void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); |
984 | void i915_gem_object_flush_write_domain(struct drm_gem_object *obj); | 1001 | int i915_gem_object_flush_write_domain(struct drm_gem_object *obj); |
985 | 1002 | ||
986 | void i915_gem_shrinker_init(void); | 1003 | void i915_gem_shrinker_init(void); |
987 | void i915_gem_shrinker_exit(void); | 1004 | void i915_gem_shrinker_exit(void); |
@@ -1041,6 +1058,7 @@ extern void intel_modeset_cleanup(struct drm_device *dev); | |||
1041 | extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); | 1058 | extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); |
1042 | extern void i8xx_disable_fbc(struct drm_device *dev); | 1059 | extern void i8xx_disable_fbc(struct drm_device *dev); |
1043 | extern void g4x_disable_fbc(struct drm_device *dev); | 1060 | extern void g4x_disable_fbc(struct drm_device *dev); |
1061 | extern void ironlake_disable_fbc(struct drm_device *dev); | ||
1044 | extern void intel_disable_fbc(struct drm_device *dev); | 1062 | extern void intel_disable_fbc(struct drm_device *dev); |
1045 | extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval); | 1063 | extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval); |
1046 | extern bool intel_fbc_enabled(struct drm_device *dev); | 1064 | extern bool intel_fbc_enabled(struct drm_device *dev); |
@@ -1130,6 +1148,8 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); | |||
1130 | #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) | 1148 | #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) |
1131 | #define IS_I965G(dev) (INTEL_INFO(dev)->is_i965g) | 1149 | #define IS_I965G(dev) (INTEL_INFO(dev)->is_i965g) |
1132 | #define IS_I965GM(dev) (INTEL_INFO(dev)->is_i965gm) | 1150 | #define IS_I965GM(dev) (INTEL_INFO(dev)->is_i965gm) |
1151 | #define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) | ||
1152 | #define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) | ||
1133 | #define IS_GM45(dev) ((dev)->pci_device == 0x2A42) | 1153 | #define IS_GM45(dev) ((dev)->pci_device == 0x2A42) |
1134 | #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) | 1154 | #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) |
1135 | #define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001) | 1155 | #define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001) |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 15d2d93aaca9..4efd4fd3b340 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -35,7 +35,7 @@ | |||
35 | #include <linux/swap.h> | 35 | #include <linux/swap.h> |
36 | #include <linux/pci.h> | 36 | #include <linux/pci.h> |
37 | 37 | ||
38 | static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); | 38 | static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); |
39 | static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); | 39 | static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); |
40 | static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); | 40 | static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); |
41 | static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, | 41 | static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, |
@@ -53,6 +53,7 @@ static int i915_gem_evict_from_inactive_list(struct drm_device *dev); | |||
53 | static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | 53 | static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, |
54 | struct drm_i915_gem_pwrite *args, | 54 | struct drm_i915_gem_pwrite *args, |
55 | struct drm_file *file_priv); | 55 | struct drm_file *file_priv); |
56 | static void i915_gem_free_object_tail(struct drm_gem_object *obj); | ||
56 | 57 | ||
57 | static LIST_HEAD(shrink_list); | 58 | static LIST_HEAD(shrink_list); |
58 | static DEFINE_SPINLOCK(shrink_list_lock); | 59 | static DEFINE_SPINLOCK(shrink_list_lock); |
@@ -127,8 +128,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, | |||
127 | return -ENOMEM; | 128 | return -ENOMEM; |
128 | 129 | ||
129 | ret = drm_gem_handle_create(file_priv, obj, &handle); | 130 | ret = drm_gem_handle_create(file_priv, obj, &handle); |
130 | drm_gem_object_handle_unreference_unlocked(obj); | 131 | drm_gem_object_unreference_unlocked(obj); |
131 | |||
132 | if (ret) | 132 | if (ret) |
133 | return ret; | 133 | return ret; |
134 | 134 | ||
@@ -1709,9 +1709,9 @@ i915_get_gem_seqno(struct drm_device *dev, | |||
1709 | /** | 1709 | /** |
1710 | * This function clears the request list as sequence numbers are passed. | 1710 | * This function clears the request list as sequence numbers are passed. |
1711 | */ | 1711 | */ |
1712 | void | 1712 | static void |
1713 | i915_gem_retire_requests(struct drm_device *dev, | 1713 | i915_gem_retire_requests_ring(struct drm_device *dev, |
1714 | struct intel_ring_buffer *ring) | 1714 | struct intel_ring_buffer *ring) |
1715 | { | 1715 | { |
1716 | drm_i915_private_t *dev_priv = dev->dev_private; | 1716 | drm_i915_private_t *dev_priv = dev->dev_private; |
1717 | uint32_t seqno; | 1717 | uint32_t seqno; |
@@ -1751,6 +1751,30 @@ i915_gem_retire_requests(struct drm_device *dev, | |||
1751 | } | 1751 | } |
1752 | 1752 | ||
1753 | void | 1753 | void |
1754 | i915_gem_retire_requests(struct drm_device *dev) | ||
1755 | { | ||
1756 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
1757 | |||
1758 | if (!list_empty(&dev_priv->mm.deferred_free_list)) { | ||
1759 | struct drm_i915_gem_object *obj_priv, *tmp; | ||
1760 | |||
1761 | /* We must be careful that during unbind() we do not | ||
1762 | * accidentally infinitely recurse into retire requests. | ||
1763 | * Currently: | ||
1764 | * retire -> free -> unbind -> wait -> retire_ring | ||
1765 | */ | ||
1766 | list_for_each_entry_safe(obj_priv, tmp, | ||
1767 | &dev_priv->mm.deferred_free_list, | ||
1768 | list) | ||
1769 | i915_gem_free_object_tail(&obj_priv->base); | ||
1770 | } | ||
1771 | |||
1772 | i915_gem_retire_requests_ring(dev, &dev_priv->render_ring); | ||
1773 | if (HAS_BSD(dev)) | ||
1774 | i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring); | ||
1775 | } | ||
1776 | |||
1777 | void | ||
1754 | i915_gem_retire_work_handler(struct work_struct *work) | 1778 | i915_gem_retire_work_handler(struct work_struct *work) |
1755 | { | 1779 | { |
1756 | drm_i915_private_t *dev_priv; | 1780 | drm_i915_private_t *dev_priv; |
@@ -1761,10 +1785,7 @@ i915_gem_retire_work_handler(struct work_struct *work) | |||
1761 | dev = dev_priv->dev; | 1785 | dev = dev_priv->dev; |
1762 | 1786 | ||
1763 | mutex_lock(&dev->struct_mutex); | 1787 | mutex_lock(&dev->struct_mutex); |
1764 | i915_gem_retire_requests(dev, &dev_priv->render_ring); | 1788 | i915_gem_retire_requests(dev); |
1765 | |||
1766 | if (HAS_BSD(dev)) | ||
1767 | i915_gem_retire_requests(dev, &dev_priv->bsd_ring); | ||
1768 | 1789 | ||
1769 | if (!dev_priv->mm.suspended && | 1790 | if (!dev_priv->mm.suspended && |
1770 | (!list_empty(&dev_priv->render_ring.request_list) || | 1791 | (!list_empty(&dev_priv->render_ring.request_list) || |
@@ -1832,7 +1853,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, | |||
1832 | * a separate wait queue to handle that. | 1853 | * a separate wait queue to handle that. |
1833 | */ | 1854 | */ |
1834 | if (ret == 0) | 1855 | if (ret == 0) |
1835 | i915_gem_retire_requests(dev, ring); | 1856 | i915_gem_retire_requests_ring(dev, ring); |
1836 | 1857 | ||
1837 | return ret; | 1858 | return ret; |
1838 | } | 1859 | } |
@@ -1945,11 +1966,12 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
1945 | * before we unbind. | 1966 | * before we unbind. |
1946 | */ | 1967 | */ |
1947 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); | 1968 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); |
1948 | if (ret) { | 1969 | if (ret == -ERESTARTSYS) |
1949 | if (ret != -ERESTARTSYS) | ||
1950 | DRM_ERROR("set_domain failed: %d\n", ret); | ||
1951 | return ret; | 1970 | return ret; |
1952 | } | 1971 | /* Continue on if we fail due to EIO, the GPU is hung so we |
1972 | * should be safe and we need to cleanup or else we might | ||
1973 | * cause memory corruption through use-after-free. | ||
1974 | */ | ||
1953 | 1975 | ||
1954 | BUG_ON(obj_priv->active); | 1976 | BUG_ON(obj_priv->active); |
1955 | 1977 | ||
@@ -1985,7 +2007,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
1985 | 2007 | ||
1986 | trace_i915_gem_object_unbind(obj); | 2008 | trace_i915_gem_object_unbind(obj); |
1987 | 2009 | ||
1988 | return 0; | 2010 | return ret; |
1989 | } | 2011 | } |
1990 | 2012 | ||
1991 | static struct drm_gem_object * | 2013 | static struct drm_gem_object * |
@@ -2107,10 +2129,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size) | |||
2107 | struct intel_ring_buffer *render_ring = &dev_priv->render_ring; | 2129 | struct intel_ring_buffer *render_ring = &dev_priv->render_ring; |
2108 | struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring; | 2130 | struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring; |
2109 | for (;;) { | 2131 | for (;;) { |
2110 | i915_gem_retire_requests(dev, render_ring); | 2132 | i915_gem_retire_requests(dev); |
2111 | |||
2112 | if (HAS_BSD(dev)) | ||
2113 | i915_gem_retire_requests(dev, bsd_ring); | ||
2114 | 2133 | ||
2115 | /* If there's an inactive buffer available now, grab it | 2134 | /* If there's an inactive buffer available now, grab it |
2116 | * and be done. | 2135 | * and be done. |
@@ -2583,7 +2602,10 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj) | |||
2583 | if (!IS_I965G(dev)) { | 2602 | if (!IS_I965G(dev)) { |
2584 | int ret; | 2603 | int ret; |
2585 | 2604 | ||
2586 | i915_gem_object_flush_gpu_write_domain(obj); | 2605 | ret = i915_gem_object_flush_gpu_write_domain(obj); |
2606 | if (ret != 0) | ||
2607 | return ret; | ||
2608 | |||
2587 | ret = i915_gem_object_wait_rendering(obj); | 2609 | ret = i915_gem_object_wait_rendering(obj); |
2588 | if (ret != 0) | 2610 | if (ret != 0) |
2589 | return ret; | 2611 | return ret; |
@@ -2731,7 +2753,7 @@ i915_gem_clflush_object(struct drm_gem_object *obj) | |||
2731 | } | 2753 | } |
2732 | 2754 | ||
2733 | /** Flushes any GPU write domain for the object if it's dirty. */ | 2755 | /** Flushes any GPU write domain for the object if it's dirty. */ |
2734 | static void | 2756 | static int |
2735 | i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) | 2757 | i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) |
2736 | { | 2758 | { |
2737 | struct drm_device *dev = obj->dev; | 2759 | struct drm_device *dev = obj->dev; |
@@ -2739,17 +2761,18 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) | |||
2739 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 2761 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
2740 | 2762 | ||
2741 | if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) | 2763 | if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) |
2742 | return; | 2764 | return 0; |
2743 | 2765 | ||
2744 | /* Queue the GPU write cache flushing we need. */ | 2766 | /* Queue the GPU write cache flushing we need. */ |
2745 | old_write_domain = obj->write_domain; | 2767 | old_write_domain = obj->write_domain; |
2746 | i915_gem_flush(dev, 0, obj->write_domain); | 2768 | i915_gem_flush(dev, 0, obj->write_domain); |
2747 | (void) i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring); | 2769 | if (i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring) == 0) |
2748 | BUG_ON(obj->write_domain); | 2770 | return -ENOMEM; |
2749 | 2771 | ||
2750 | trace_i915_gem_object_change_domain(obj, | 2772 | trace_i915_gem_object_change_domain(obj, |
2751 | obj->read_domains, | 2773 | obj->read_domains, |
2752 | old_write_domain); | 2774 | old_write_domain); |
2775 | return 0; | ||
2753 | } | 2776 | } |
2754 | 2777 | ||
2755 | /** Flushes the GTT write domain for the object if it's dirty. */ | 2778 | /** Flushes the GTT write domain for the object if it's dirty. */ |
@@ -2793,9 +2816,11 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj) | |||
2793 | old_write_domain); | 2816 | old_write_domain); |
2794 | } | 2817 | } |
2795 | 2818 | ||
2796 | void | 2819 | int |
2797 | i915_gem_object_flush_write_domain(struct drm_gem_object *obj) | 2820 | i915_gem_object_flush_write_domain(struct drm_gem_object *obj) |
2798 | { | 2821 | { |
2822 | int ret = 0; | ||
2823 | |||
2799 | switch (obj->write_domain) { | 2824 | switch (obj->write_domain) { |
2800 | case I915_GEM_DOMAIN_GTT: | 2825 | case I915_GEM_DOMAIN_GTT: |
2801 | i915_gem_object_flush_gtt_write_domain(obj); | 2826 | i915_gem_object_flush_gtt_write_domain(obj); |
@@ -2804,9 +2829,11 @@ i915_gem_object_flush_write_domain(struct drm_gem_object *obj) | |||
2804 | i915_gem_object_flush_cpu_write_domain(obj); | 2829 | i915_gem_object_flush_cpu_write_domain(obj); |
2805 | break; | 2830 | break; |
2806 | default: | 2831 | default: |
2807 | i915_gem_object_flush_gpu_write_domain(obj); | 2832 | ret = i915_gem_object_flush_gpu_write_domain(obj); |
2808 | break; | 2833 | break; |
2809 | } | 2834 | } |
2835 | |||
2836 | return ret; | ||
2810 | } | 2837 | } |
2811 | 2838 | ||
2812 | /** | 2839 | /** |
@@ -2826,7 +2853,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) | |||
2826 | if (obj_priv->gtt_space == NULL) | 2853 | if (obj_priv->gtt_space == NULL) |
2827 | return -EINVAL; | 2854 | return -EINVAL; |
2828 | 2855 | ||
2829 | i915_gem_object_flush_gpu_write_domain(obj); | 2856 | ret = i915_gem_object_flush_gpu_write_domain(obj); |
2857 | if (ret != 0) | ||
2858 | return ret; | ||
2859 | |||
2830 | /* Wait on any GPU rendering and flushing to occur. */ | 2860 | /* Wait on any GPU rendering and flushing to occur. */ |
2831 | ret = i915_gem_object_wait_rendering(obj); | 2861 | ret = i915_gem_object_wait_rendering(obj); |
2832 | if (ret != 0) | 2862 | if (ret != 0) |
@@ -2876,7 +2906,9 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj) | |||
2876 | if (obj_priv->gtt_space == NULL) | 2906 | if (obj_priv->gtt_space == NULL) |
2877 | return -EINVAL; | 2907 | return -EINVAL; |
2878 | 2908 | ||
2879 | i915_gem_object_flush_gpu_write_domain(obj); | 2909 | ret = i915_gem_object_flush_gpu_write_domain(obj); |
2910 | if (ret) | ||
2911 | return ret; | ||
2880 | 2912 | ||
2881 | /* Wait on any GPU rendering and flushing to occur. */ | 2913 | /* Wait on any GPU rendering and flushing to occur. */ |
2882 | if (obj_priv->active) { | 2914 | if (obj_priv->active) { |
@@ -2924,7 +2956,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) | |||
2924 | uint32_t old_write_domain, old_read_domains; | 2956 | uint32_t old_write_domain, old_read_domains; |
2925 | int ret; | 2957 | int ret; |
2926 | 2958 | ||
2927 | i915_gem_object_flush_gpu_write_domain(obj); | 2959 | ret = i915_gem_object_flush_gpu_write_domain(obj); |
2960 | if (ret) | ||
2961 | return ret; | ||
2962 | |||
2928 | /* Wait on any GPU rendering and flushing to occur. */ | 2963 | /* Wait on any GPU rendering and flushing to occur. */ |
2929 | ret = i915_gem_object_wait_rendering(obj); | 2964 | ret = i915_gem_object_wait_rendering(obj); |
2930 | if (ret != 0) | 2965 | if (ret != 0) |
@@ -3214,7 +3249,10 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, | |||
3214 | if (offset == 0 && size == obj->size) | 3249 | if (offset == 0 && size == obj->size) |
3215 | return i915_gem_object_set_to_cpu_domain(obj, 0); | 3250 | return i915_gem_object_set_to_cpu_domain(obj, 0); |
3216 | 3251 | ||
3217 | i915_gem_object_flush_gpu_write_domain(obj); | 3252 | ret = i915_gem_object_flush_gpu_write_domain(obj); |
3253 | if (ret) | ||
3254 | return ret; | ||
3255 | |||
3218 | /* Wait on any GPU rendering and flushing to occur. */ | 3256 | /* Wait on any GPU rendering and flushing to occur. */ |
3219 | ret = i915_gem_object_wait_rendering(obj); | 3257 | ret = i915_gem_object_wait_rendering(obj); |
3220 | if (ret != 0) | 3258 | if (ret != 0) |
@@ -3645,6 +3683,7 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev, | |||
3645 | return ret; | 3683 | return ret; |
3646 | } | 3684 | } |
3647 | 3685 | ||
3686 | |||
3648 | int | 3687 | int |
3649 | i915_gem_do_execbuffer(struct drm_device *dev, void *data, | 3688 | i915_gem_do_execbuffer(struct drm_device *dev, void *data, |
3650 | struct drm_file *file_priv, | 3689 | struct drm_file *file_priv, |
@@ -3792,7 +3831,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3792 | unsigned long long total_size = 0; | 3831 | unsigned long long total_size = 0; |
3793 | int num_fences = 0; | 3832 | int num_fences = 0; |
3794 | for (i = 0; i < args->buffer_count; i++) { | 3833 | for (i = 0; i < args->buffer_count; i++) { |
3795 | obj_priv = object_list[i]->driver_private; | 3834 | obj_priv = to_intel_bo(object_list[i]); |
3796 | 3835 | ||
3797 | total_size += object_list[i]->size; | 3836 | total_size += object_list[i]->size; |
3798 | num_fences += | 3837 | num_fences += |
@@ -4310,7 +4349,6 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
4310 | struct drm_i915_gem_busy *args = data; | 4349 | struct drm_i915_gem_busy *args = data; |
4311 | struct drm_gem_object *obj; | 4350 | struct drm_gem_object *obj; |
4312 | struct drm_i915_gem_object *obj_priv; | 4351 | struct drm_i915_gem_object *obj_priv; |
4313 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
4314 | 4352 | ||
4315 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 4353 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); |
4316 | if (obj == NULL) { | 4354 | if (obj == NULL) { |
@@ -4325,10 +4363,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
4325 | * actually unmasked, and our working set ends up being larger than | 4363 | * actually unmasked, and our working set ends up being larger than |
4326 | * required. | 4364 | * required. |
4327 | */ | 4365 | */ |
4328 | i915_gem_retire_requests(dev, &dev_priv->render_ring); | 4366 | i915_gem_retire_requests(dev); |
4329 | |||
4330 | if (HAS_BSD(dev)) | ||
4331 | i915_gem_retire_requests(dev, &dev_priv->bsd_ring); | ||
4332 | 4367 | ||
4333 | obj_priv = to_intel_bo(obj); | 4368 | obj_priv = to_intel_bo(obj); |
4334 | /* Don't count being on the flushing list against the object being | 4369 | /* Don't count being on the flushing list against the object being |
@@ -4438,20 +4473,19 @@ int i915_gem_init_object(struct drm_gem_object *obj) | |||
4438 | return 0; | 4473 | return 0; |
4439 | } | 4474 | } |
4440 | 4475 | ||
4441 | void i915_gem_free_object(struct drm_gem_object *obj) | 4476 | static void i915_gem_free_object_tail(struct drm_gem_object *obj) |
4442 | { | 4477 | { |
4443 | struct drm_device *dev = obj->dev; | 4478 | struct drm_device *dev = obj->dev; |
4479 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
4444 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 4480 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
4481 | int ret; | ||
4445 | 4482 | ||
4446 | trace_i915_gem_object_destroy(obj); | 4483 | ret = i915_gem_object_unbind(obj); |
4447 | 4484 | if (ret == -ERESTARTSYS) { | |
4448 | while (obj_priv->pin_count > 0) | 4485 | list_move(&obj_priv->list, |
4449 | i915_gem_object_unpin(obj); | 4486 | &dev_priv->mm.deferred_free_list); |
4450 | 4487 | return; | |
4451 | if (obj_priv->phys_obj) | 4488 | } |
4452 | i915_gem_detach_phys_object(dev, obj); | ||
4453 | |||
4454 | i915_gem_object_unbind(obj); | ||
4455 | 4489 | ||
4456 | if (obj_priv->mmap_offset) | 4490 | if (obj_priv->mmap_offset) |
4457 | i915_gem_free_mmap_offset(obj); | 4491 | i915_gem_free_mmap_offset(obj); |
@@ -4463,6 +4497,22 @@ void i915_gem_free_object(struct drm_gem_object *obj) | |||
4463 | kfree(obj_priv); | 4497 | kfree(obj_priv); |
4464 | } | 4498 | } |
4465 | 4499 | ||
4500 | void i915_gem_free_object(struct drm_gem_object *obj) | ||
4501 | { | ||
4502 | struct drm_device *dev = obj->dev; | ||
4503 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
4504 | |||
4505 | trace_i915_gem_object_destroy(obj); | ||
4506 | |||
4507 | while (obj_priv->pin_count > 0) | ||
4508 | i915_gem_object_unpin(obj); | ||
4509 | |||
4510 | if (obj_priv->phys_obj) | ||
4511 | i915_gem_detach_phys_object(dev, obj); | ||
4512 | |||
4513 | i915_gem_free_object_tail(obj); | ||
4514 | } | ||
4515 | |||
4466 | /** Unbinds all inactive objects. */ | 4516 | /** Unbinds all inactive objects. */ |
4467 | static int | 4517 | static int |
4468 | i915_gem_evict_from_inactive_list(struct drm_device *dev) | 4518 | i915_gem_evict_from_inactive_list(struct drm_device *dev) |
@@ -4686,9 +4736,19 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, | |||
4686 | BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list)); | 4736 | BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list)); |
4687 | mutex_unlock(&dev->struct_mutex); | 4737 | mutex_unlock(&dev->struct_mutex); |
4688 | 4738 | ||
4689 | drm_irq_install(dev); | 4739 | ret = drm_irq_install(dev); |
4740 | if (ret) | ||
4741 | goto cleanup_ringbuffer; | ||
4690 | 4742 | ||
4691 | return 0; | 4743 | return 0; |
4744 | |||
4745 | cleanup_ringbuffer: | ||
4746 | mutex_lock(&dev->struct_mutex); | ||
4747 | i915_gem_cleanup_ringbuffer(dev); | ||
4748 | dev_priv->mm.suspended = 1; | ||
4749 | mutex_unlock(&dev->struct_mutex); | ||
4750 | |||
4751 | return ret; | ||
4692 | } | 4752 | } |
4693 | 4753 | ||
4694 | int | 4754 | int |
@@ -4726,6 +4786,7 @@ i915_gem_load(struct drm_device *dev) | |||
4726 | INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list); | 4786 | INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list); |
4727 | INIT_LIST_HEAD(&dev_priv->mm.inactive_list); | 4787 | INIT_LIST_HEAD(&dev_priv->mm.inactive_list); |
4728 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); | 4788 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); |
4789 | INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list); | ||
4729 | INIT_LIST_HEAD(&dev_priv->render_ring.active_list); | 4790 | INIT_LIST_HEAD(&dev_priv->render_ring.active_list); |
4730 | INIT_LIST_HEAD(&dev_priv->render_ring.request_list); | 4791 | INIT_LIST_HEAD(&dev_priv->render_ring.request_list); |
4731 | if (HAS_BSD(dev)) { | 4792 | if (HAS_BSD(dev)) { |
@@ -5024,10 +5085,7 @@ rescan: | |||
5024 | continue; | 5085 | continue; |
5025 | 5086 | ||
5026 | spin_unlock(&shrink_list_lock); | 5087 | spin_unlock(&shrink_list_lock); |
5027 | i915_gem_retire_requests(dev, &dev_priv->render_ring); | 5088 | i915_gem_retire_requests(dev); |
5028 | |||
5029 | if (HAS_BSD(dev)) | ||
5030 | i915_gem_retire_requests(dev, &dev_priv->bsd_ring); | ||
5031 | 5089 | ||
5032 | list_for_each_entry_safe(obj_priv, next_obj, | 5090 | list_for_each_entry_safe(obj_priv, next_obj, |
5033 | &dev_priv->mm.inactive_list, | 5091 | &dev_priv->mm.inactive_list, |
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 4b7c49d4257d..155719e4d16f 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c | |||
@@ -333,8 +333,6 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, | |||
333 | i915_gem_release_mmap(obj); | 333 | i915_gem_release_mmap(obj); |
334 | 334 | ||
335 | if (ret != 0) { | 335 | if (ret != 0) { |
336 | WARN(ret != -ERESTARTSYS, | ||
337 | "failed to reset object for tiling switch"); | ||
338 | args->tiling_mode = obj_priv->tiling_mode; | 336 | args->tiling_mode = obj_priv->tiling_mode; |
339 | args->stride = obj_priv->stride; | 337 | args->stride = obj_priv->stride; |
340 | goto err; | 338 | goto err; |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index dba53d4b9fb3..85785a8844ed 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -171,10 +171,10 @@ void intel_enable_asle (struct drm_device *dev) | |||
171 | ironlake_enable_display_irq(dev_priv, DE_GSE); | 171 | ironlake_enable_display_irq(dev_priv, DE_GSE); |
172 | else { | 172 | else { |
173 | i915_enable_pipestat(dev_priv, 1, | 173 | i915_enable_pipestat(dev_priv, 1, |
174 | I915_LEGACY_BLC_EVENT_ENABLE); | 174 | PIPE_LEGACY_BLC_EVENT_ENABLE); |
175 | if (IS_I965G(dev)) | 175 | if (IS_I965G(dev)) |
176 | i915_enable_pipestat(dev_priv, 0, | 176 | i915_enable_pipestat(dev_priv, 0, |
177 | I915_LEGACY_BLC_EVENT_ENABLE); | 177 | PIPE_LEGACY_BLC_EVENT_ENABLE); |
178 | } | 178 | } |
179 | } | 179 | } |
180 | 180 | ||
@@ -842,7 +842,6 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
842 | u32 iir, new_iir; | 842 | u32 iir, new_iir; |
843 | u32 pipea_stats, pipeb_stats; | 843 | u32 pipea_stats, pipeb_stats; |
844 | u32 vblank_status; | 844 | u32 vblank_status; |
845 | u32 vblank_enable; | ||
846 | int vblank = 0; | 845 | int vblank = 0; |
847 | unsigned long irqflags; | 846 | unsigned long irqflags; |
848 | int irq_received; | 847 | int irq_received; |
@@ -856,13 +855,10 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
856 | 855 | ||
857 | iir = I915_READ(IIR); | 856 | iir = I915_READ(IIR); |
858 | 857 | ||
859 | if (IS_I965G(dev)) { | 858 | if (IS_I965G(dev)) |
860 | vblank_status = I915_START_VBLANK_INTERRUPT_STATUS; | 859 | vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS; |
861 | vblank_enable = PIPE_START_VBLANK_INTERRUPT_ENABLE; | 860 | else |
862 | } else { | 861 | vblank_status = PIPE_VBLANK_INTERRUPT_STATUS; |
863 | vblank_status = I915_VBLANK_INTERRUPT_STATUS; | ||
864 | vblank_enable = I915_VBLANK_INTERRUPT_ENABLE; | ||
865 | } | ||
866 | 862 | ||
867 | for (;;) { | 863 | for (;;) { |
868 | irq_received = iir != 0; | 864 | irq_received = iir != 0; |
@@ -966,8 +962,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
966 | intel_finish_page_flip(dev, 1); | 962 | intel_finish_page_flip(dev, 1); |
967 | } | 963 | } |
968 | 964 | ||
969 | if ((pipea_stats & I915_LEGACY_BLC_EVENT_STATUS) || | 965 | if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) || |
970 | (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) || | 966 | (pipeb_stats & PIPE_LEGACY_BLC_EVENT_STATUS) || |
971 | (iir & I915_ASLE_INTERRUPT)) | 967 | (iir & I915_ASLE_INTERRUPT)) |
972 | opregion_asle_intr(dev); | 968 | opregion_asle_intr(dev); |
973 | 969 | ||
@@ -1233,16 +1229,21 @@ void i915_hangcheck_elapsed(unsigned long data) | |||
1233 | { | 1229 | { |
1234 | struct drm_device *dev = (struct drm_device *)data; | 1230 | struct drm_device *dev = (struct drm_device *)data; |
1235 | drm_i915_private_t *dev_priv = dev->dev_private; | 1231 | drm_i915_private_t *dev_priv = dev->dev_private; |
1236 | uint32_t acthd; | 1232 | uint32_t acthd, instdone, instdone1; |
1237 | 1233 | ||
1238 | /* No reset support on this chip yet. */ | 1234 | /* No reset support on this chip yet. */ |
1239 | if (IS_GEN6(dev)) | 1235 | if (IS_GEN6(dev)) |
1240 | return; | 1236 | return; |
1241 | 1237 | ||
1242 | if (!IS_I965G(dev)) | 1238 | if (!IS_I965G(dev)) { |
1243 | acthd = I915_READ(ACTHD); | 1239 | acthd = I915_READ(ACTHD); |
1244 | else | 1240 | instdone = I915_READ(INSTDONE); |
1241 | instdone1 = 0; | ||
1242 | } else { | ||
1245 | acthd = I915_READ(ACTHD_I965); | 1243 | acthd = I915_READ(ACTHD_I965); |
1244 | instdone = I915_READ(INSTDONE_I965); | ||
1245 | instdone1 = I915_READ(INSTDONE1); | ||
1246 | } | ||
1246 | 1247 | ||
1247 | /* If all work is done then ACTHD clearly hasn't advanced. */ | 1248 | /* If all work is done then ACTHD clearly hasn't advanced. */ |
1248 | if (list_empty(&dev_priv->render_ring.request_list) || | 1249 | if (list_empty(&dev_priv->render_ring.request_list) || |
@@ -1253,21 +1254,24 @@ void i915_hangcheck_elapsed(unsigned long data) | |||
1253 | return; | 1254 | return; |
1254 | } | 1255 | } |
1255 | 1256 | ||
1256 | if (dev_priv->last_acthd == acthd && dev_priv->hangcheck_count > 0) { | 1257 | if (dev_priv->last_acthd == acthd && |
1257 | DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); | 1258 | dev_priv->last_instdone == instdone && |
1258 | i915_handle_error(dev, true); | 1259 | dev_priv->last_instdone1 == instdone1) { |
1259 | return; | 1260 | if (dev_priv->hangcheck_count++ > 1) { |
1260 | } | 1261 | DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); |
1262 | i915_handle_error(dev, true); | ||
1263 | return; | ||
1264 | } | ||
1265 | } else { | ||
1266 | dev_priv->hangcheck_count = 0; | ||
1267 | |||
1268 | dev_priv->last_acthd = acthd; | ||
1269 | dev_priv->last_instdone = instdone; | ||
1270 | dev_priv->last_instdone1 = instdone1; | ||
1271 | } | ||
1261 | 1272 | ||
1262 | /* Reset timer case chip hangs without another request being added */ | 1273 | /* Reset timer case chip hangs without another request being added */ |
1263 | mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); | 1274 | mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); |
1264 | |||
1265 | if (acthd != dev_priv->last_acthd) | ||
1266 | dev_priv->hangcheck_count = 0; | ||
1267 | else | ||
1268 | dev_priv->hangcheck_count++; | ||
1269 | |||
1270 | dev_priv->last_acthd = acthd; | ||
1271 | } | 1275 | } |
1272 | 1276 | ||
1273 | /* drm_dma.h hooks | 1277 | /* drm_dma.h hooks |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 6d9b0288272a..281db6e5403a 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -442,7 +442,7 @@ | |||
442 | #define GEN6_RENDER_IMR 0x20a8 | 442 | #define GEN6_RENDER_IMR 0x20a8 |
443 | #define GEN6_RENDER_CONTEXT_SWITCH_INTERRUPT (1 << 8) | 443 | #define GEN6_RENDER_CONTEXT_SWITCH_INTERRUPT (1 << 8) |
444 | #define GEN6_RENDER_PPGTT_PAGE_FAULT (1 << 7) | 444 | #define GEN6_RENDER_PPGTT_PAGE_FAULT (1 << 7) |
445 | #define GEN6_RENDER TIMEOUT_COUNTER_EXPIRED (1 << 6) | 445 | #define GEN6_RENDER_TIMEOUT_COUNTER_EXPIRED (1 << 6) |
446 | #define GEN6_RENDER_L3_PARITY_ERROR (1 << 5) | 446 | #define GEN6_RENDER_L3_PARITY_ERROR (1 << 5) |
447 | #define GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT (1 << 4) | 447 | #define GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT (1 << 4) |
448 | #define GEN6_RENDER_COMMAND_PARSER_MASTER_ERROR (1 << 3) | 448 | #define GEN6_RENDER_COMMAND_PARSER_MASTER_ERROR (1 << 3) |
@@ -530,6 +530,21 @@ | |||
530 | #define DPFC_CHICKEN 0x3224 | 530 | #define DPFC_CHICKEN 0x3224 |
531 | #define DPFC_HT_MODIFY (1<<31) | 531 | #define DPFC_HT_MODIFY (1<<31) |
532 | 532 | ||
533 | /* Framebuffer compression for Ironlake */ | ||
534 | #define ILK_DPFC_CB_BASE 0x43200 | ||
535 | #define ILK_DPFC_CONTROL 0x43208 | ||
536 | /* The bit 28-8 is reserved */ | ||
537 | #define DPFC_RESERVED (0x1FFFFF00) | ||
538 | #define ILK_DPFC_RECOMP_CTL 0x4320c | ||
539 | #define ILK_DPFC_STATUS 0x43210 | ||
540 | #define ILK_DPFC_FENCE_YOFF 0x43218 | ||
541 | #define ILK_DPFC_CHICKEN 0x43224 | ||
542 | #define ILK_FBC_RT_BASE 0x2128 | ||
543 | #define ILK_FBC_RT_VALID (1<<0) | ||
544 | |||
545 | #define ILK_DISPLAY_CHICKEN1 0x42000 | ||
546 | #define ILK_FBCQ_DIS (1<<22) | ||
547 | |||
533 | /* | 548 | /* |
534 | * GPIO regs | 549 | * GPIO regs |
535 | */ | 550 | */ |
@@ -595,32 +610,6 @@ | |||
595 | #define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */ | 610 | #define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */ |
596 | #define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */ | 611 | #define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */ |
597 | 612 | ||
598 | #define I915_FIFO_UNDERRUN_STATUS (1UL<<31) | ||
599 | #define I915_CRC_ERROR_ENABLE (1UL<<29) | ||
600 | #define I915_CRC_DONE_ENABLE (1UL<<28) | ||
601 | #define I915_GMBUS_EVENT_ENABLE (1UL<<27) | ||
602 | #define I915_VSYNC_INTERRUPT_ENABLE (1UL<<25) | ||
603 | #define I915_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24) | ||
604 | #define I915_DPST_EVENT_ENABLE (1UL<<23) | ||
605 | #define I915_LEGACY_BLC_EVENT_ENABLE (1UL<<22) | ||
606 | #define I915_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21) | ||
607 | #define I915_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20) | ||
608 | #define I915_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */ | ||
609 | #define I915_VBLANK_INTERRUPT_ENABLE (1UL<<17) | ||
610 | #define I915_OVERLAY_UPDATED_ENABLE (1UL<<16) | ||
611 | #define I915_CRC_ERROR_INTERRUPT_STATUS (1UL<<13) | ||
612 | #define I915_CRC_DONE_INTERRUPT_STATUS (1UL<<12) | ||
613 | #define I915_GMBUS_INTERRUPT_STATUS (1UL<<11) | ||
614 | #define I915_VSYNC_INTERRUPT_STATUS (1UL<<9) | ||
615 | #define I915_DISPLAY_LINE_COMPARE_STATUS (1UL<<8) | ||
616 | #define I915_DPST_EVENT_STATUS (1UL<<7) | ||
617 | #define I915_LEGACY_BLC_EVENT_STATUS (1UL<<6) | ||
618 | #define I915_ODD_FIELD_INTERRUPT_STATUS (1UL<<5) | ||
619 | #define I915_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4) | ||
620 | #define I915_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */ | ||
621 | #define I915_VBLANK_INTERRUPT_STATUS (1UL<<1) | ||
622 | #define I915_OVERLAY_UPDATED_STATUS (1UL<<0) | ||
623 | |||
624 | #define SRX_INDEX 0x3c4 | 613 | #define SRX_INDEX 0x3c4 |
625 | #define SRX_DATA 0x3c5 | 614 | #define SRX_DATA 0x3c5 |
626 | #define SR01 1 | 615 | #define SR01 1 |
@@ -2166,7 +2155,8 @@ | |||
2166 | #define I830_FIFO_LINE_SIZE 32 | 2155 | #define I830_FIFO_LINE_SIZE 32 |
2167 | 2156 | ||
2168 | #define G4X_FIFO_SIZE 127 | 2157 | #define G4X_FIFO_SIZE 127 |
2169 | #define I945_FIFO_SIZE 127 /* 945 & 965 */ | 2158 | #define I965_FIFO_SIZE 512 |
2159 | #define I945_FIFO_SIZE 127 | ||
2170 | #define I915_FIFO_SIZE 95 | 2160 | #define I915_FIFO_SIZE 95 |
2171 | #define I855GM_FIFO_SIZE 127 /* In cachelines */ | 2161 | #define I855GM_FIFO_SIZE 127 /* In cachelines */ |
2172 | #define I830_FIFO_SIZE 95 | 2162 | #define I830_FIFO_SIZE 95 |
@@ -2185,6 +2175,9 @@ | |||
2185 | #define PINEVIEW_CURSOR_DFT_WM 0 | 2175 | #define PINEVIEW_CURSOR_DFT_WM 0 |
2186 | #define PINEVIEW_CURSOR_GUARD_WM 5 | 2176 | #define PINEVIEW_CURSOR_GUARD_WM 5 |
2187 | 2177 | ||
2178 | #define I965_CURSOR_FIFO 64 | ||
2179 | #define I965_CURSOR_MAX_WM 32 | ||
2180 | #define I965_CURSOR_DFT_WM 8 | ||
2188 | 2181 | ||
2189 | /* define the Watermark register on Ironlake */ | 2182 | /* define the Watermark register on Ironlake */ |
2190 | #define WM0_PIPEA_ILK 0x45100 | 2183 | #define WM0_PIPEA_ILK 0x45100 |
@@ -2212,6 +2205,9 @@ | |||
2212 | #define ILK_DISPLAY_FIFO 128 | 2205 | #define ILK_DISPLAY_FIFO 128 |
2213 | #define ILK_DISPLAY_MAXWM 64 | 2206 | #define ILK_DISPLAY_MAXWM 64 |
2214 | #define ILK_DISPLAY_DFTWM 8 | 2207 | #define ILK_DISPLAY_DFTWM 8 |
2208 | #define ILK_CURSOR_FIFO 32 | ||
2209 | #define ILK_CURSOR_MAXWM 16 | ||
2210 | #define ILK_CURSOR_DFTWM 8 | ||
2215 | 2211 | ||
2216 | #define ILK_DISPLAY_SR_FIFO 512 | 2212 | #define ILK_DISPLAY_SR_FIFO 512 |
2217 | #define ILK_DISPLAY_MAX_SRWM 0x1ff | 2213 | #define ILK_DISPLAY_MAX_SRWM 0x1ff |
@@ -2510,6 +2506,10 @@ | |||
2510 | #define ILK_VSDPFD_FULL (1<<21) | 2506 | #define ILK_VSDPFD_FULL (1<<21) |
2511 | #define ILK_DSPCLK_GATE 0x42020 | 2507 | #define ILK_DSPCLK_GATE 0x42020 |
2512 | #define ILK_DPARB_CLK_GATE (1<<5) | 2508 | #define ILK_DPARB_CLK_GATE (1<<5) |
2509 | /* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */ | ||
2510 | #define ILK_CLK_FBC (1<<7) | ||
2511 | #define ILK_DPFC_DIS1 (1<<8) | ||
2512 | #define ILK_DPFC_DIS2 (1<<9) | ||
2513 | 2513 | ||
2514 | #define DISP_ARB_CTL 0x45000 | 2514 | #define DISP_ARB_CTL 0x45000 |
2515 | #define DISP_TILE_SURFACE_SWIZZLING (1<<13) | 2515 | #define DISP_TILE_SURFACE_SWIZZLING (1<<13) |
@@ -2869,6 +2869,7 @@ | |||
2869 | 2869 | ||
2870 | #define PCH_PP_STATUS 0xc7200 | 2870 | #define PCH_PP_STATUS 0xc7200 |
2871 | #define PCH_PP_CONTROL 0xc7204 | 2871 | #define PCH_PP_CONTROL 0xc7204 |
2872 | #define PANEL_UNLOCK_REGS (0xabcd << 16) | ||
2872 | #define EDP_FORCE_VDD (1 << 3) | 2873 | #define EDP_FORCE_VDD (1 << 3) |
2873 | #define EDP_BLC_ENABLE (1 << 2) | 2874 | #define EDP_BLC_ENABLE (1 << 2) |
2874 | #define PANEL_POWER_RESET (1 << 1) | 2875 | #define PANEL_POWER_RESET (1 << 1) |
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 60a5800fba6e..6e2025274db5 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c | |||
@@ -602,7 +602,9 @@ void i915_save_display(struct drm_device *dev) | |||
602 | 602 | ||
603 | /* Only save FBC state on the platform that supports FBC */ | 603 | /* Only save FBC state on the platform that supports FBC */ |
604 | if (I915_HAS_FBC(dev)) { | 604 | if (I915_HAS_FBC(dev)) { |
605 | if (IS_GM45(dev)) { | 605 | if (IS_IRONLAKE_M(dev)) { |
606 | dev_priv->saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE); | ||
607 | } else if (IS_GM45(dev)) { | ||
606 | dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE); | 608 | dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE); |
607 | } else { | 609 | } else { |
608 | dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE); | 610 | dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE); |
@@ -706,7 +708,10 @@ void i915_restore_display(struct drm_device *dev) | |||
706 | 708 | ||
707 | /* only restore FBC info on the platform that supports FBC*/ | 709 | /* only restore FBC info on the platform that supports FBC*/ |
708 | if (I915_HAS_FBC(dev)) { | 710 | if (I915_HAS_FBC(dev)) { |
709 | if (IS_GM45(dev)) { | 711 | if (IS_IRONLAKE_M(dev)) { |
712 | ironlake_disable_fbc(dev); | ||
713 | I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); | ||
714 | } else if (IS_GM45(dev)) { | ||
710 | g4x_disable_fbc(dev); | 715 | g4x_disable_fbc(dev); |
711 | I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); | 716 | I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); |
712 | } else { | 717 | } else { |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index f879589bead1..ae1718549eec 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -43,6 +43,7 @@ | |||
43 | bool intel_pipe_has_type (struct drm_crtc *crtc, int type); | 43 | bool intel_pipe_has_type (struct drm_crtc *crtc, int type); |
44 | static void intel_update_watermarks(struct drm_device *dev); | 44 | static void intel_update_watermarks(struct drm_device *dev); |
45 | static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule); | 45 | static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule); |
46 | static void intel_crtc_update_cursor(struct drm_crtc *crtc); | ||
46 | 47 | ||
47 | typedef struct { | 48 | typedef struct { |
48 | /* given values */ | 49 | /* given values */ |
@@ -323,6 +324,9 @@ struct intel_limit { | |||
323 | #define IRONLAKE_DP_P1_MIN 1 | 324 | #define IRONLAKE_DP_P1_MIN 1 |
324 | #define IRONLAKE_DP_P1_MAX 2 | 325 | #define IRONLAKE_DP_P1_MAX 2 |
325 | 326 | ||
327 | /* FDI */ | ||
328 | #define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */ | ||
329 | |||
326 | static bool | 330 | static bool |
327 | intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | 331 | intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, |
328 | int target, int refclk, intel_clock_t *best_clock); | 332 | int target, int refclk, intel_clock_t *best_clock); |
@@ -863,8 +867,8 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
863 | intel_clock_t clock; | 867 | intel_clock_t clock; |
864 | int max_n; | 868 | int max_n; |
865 | bool found; | 869 | bool found; |
866 | /* approximately equals target * 0.00488 */ | 870 | /* approximately equals target * 0.00585 */ |
867 | int err_most = (target >> 8) + (target >> 10); | 871 | int err_most = (target >> 8) + (target >> 9); |
868 | found = false; | 872 | found = false; |
869 | 873 | ||
870 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | 874 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
@@ -1123,6 +1127,67 @@ static bool g4x_fbc_enabled(struct drm_device *dev) | |||
1123 | return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; | 1127 | return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; |
1124 | } | 1128 | } |
1125 | 1129 | ||
1130 | static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | ||
1131 | { | ||
1132 | struct drm_device *dev = crtc->dev; | ||
1133 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1134 | struct drm_framebuffer *fb = crtc->fb; | ||
1135 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | ||
1136 | struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); | ||
1137 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
1138 | int plane = (intel_crtc->plane == 0) ? DPFC_CTL_PLANEA : | ||
1139 | DPFC_CTL_PLANEB; | ||
1140 | unsigned long stall_watermark = 200; | ||
1141 | u32 dpfc_ctl; | ||
1142 | |||
1143 | dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; | ||
1144 | dev_priv->cfb_fence = obj_priv->fence_reg; | ||
1145 | dev_priv->cfb_plane = intel_crtc->plane; | ||
1146 | |||
1147 | dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); | ||
1148 | dpfc_ctl &= DPFC_RESERVED; | ||
1149 | dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X); | ||
1150 | if (obj_priv->tiling_mode != I915_TILING_NONE) { | ||
1151 | dpfc_ctl |= (DPFC_CTL_FENCE_EN | dev_priv->cfb_fence); | ||
1152 | I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY); | ||
1153 | } else { | ||
1154 | I915_WRITE(ILK_DPFC_CHICKEN, ~DPFC_HT_MODIFY); | ||
1155 | } | ||
1156 | |||
1157 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); | ||
1158 | I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | | ||
1159 | (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | | ||
1160 | (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); | ||
1161 | I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); | ||
1162 | I915_WRITE(ILK_FBC_RT_BASE, obj_priv->gtt_offset | ILK_FBC_RT_VALID); | ||
1163 | /* enable it... */ | ||
1164 | I915_WRITE(ILK_DPFC_CONTROL, I915_READ(ILK_DPFC_CONTROL) | | ||
1165 | DPFC_CTL_EN); | ||
1166 | |||
1167 | DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); | ||
1168 | } | ||
1169 | |||
1170 | void ironlake_disable_fbc(struct drm_device *dev) | ||
1171 | { | ||
1172 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1173 | u32 dpfc_ctl; | ||
1174 | |||
1175 | /* Disable compression */ | ||
1176 | dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); | ||
1177 | dpfc_ctl &= ~DPFC_CTL_EN; | ||
1178 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); | ||
1179 | intel_wait_for_vblank(dev); | ||
1180 | |||
1181 | DRM_DEBUG_KMS("disabled FBC\n"); | ||
1182 | } | ||
1183 | |||
1184 | static bool ironlake_fbc_enabled(struct drm_device *dev) | ||
1185 | { | ||
1186 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1187 | |||
1188 | return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN; | ||
1189 | } | ||
1190 | |||
1126 | bool intel_fbc_enabled(struct drm_device *dev) | 1191 | bool intel_fbc_enabled(struct drm_device *dev) |
1127 | { | 1192 | { |
1128 | struct drm_i915_private *dev_priv = dev->dev_private; | 1193 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -1181,8 +1246,12 @@ static void intel_update_fbc(struct drm_crtc *crtc, | |||
1181 | struct drm_framebuffer *fb = crtc->fb; | 1246 | struct drm_framebuffer *fb = crtc->fb; |
1182 | struct intel_framebuffer *intel_fb; | 1247 | struct intel_framebuffer *intel_fb; |
1183 | struct drm_i915_gem_object *obj_priv; | 1248 | struct drm_i915_gem_object *obj_priv; |
1249 | struct drm_crtc *tmp_crtc; | ||
1184 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1250 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1185 | int plane = intel_crtc->plane; | 1251 | int plane = intel_crtc->plane; |
1252 | int crtcs_enabled = 0; | ||
1253 | |||
1254 | DRM_DEBUG_KMS("\n"); | ||
1186 | 1255 | ||
1187 | if (!i915_powersave) | 1256 | if (!i915_powersave) |
1188 | return; | 1257 | return; |
@@ -1200,10 +1269,21 @@ static void intel_update_fbc(struct drm_crtc *crtc, | |||
1200 | * If FBC is already on, we just have to verify that we can | 1269 | * If FBC is already on, we just have to verify that we can |
1201 | * keep it that way... | 1270 | * keep it that way... |
1202 | * Need to disable if: | 1271 | * Need to disable if: |
1272 | * - more than one pipe is active | ||
1203 | * - changing FBC params (stride, fence, mode) | 1273 | * - changing FBC params (stride, fence, mode) |
1204 | * - new fb is too large to fit in compressed buffer | 1274 | * - new fb is too large to fit in compressed buffer |
1205 | * - going to an unsupported config (interlace, pixel multiply, etc.) | 1275 | * - going to an unsupported config (interlace, pixel multiply, etc.) |
1206 | */ | 1276 | */ |
1277 | list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) { | ||
1278 | if (tmp_crtc->enabled) | ||
1279 | crtcs_enabled++; | ||
1280 | } | ||
1281 | DRM_DEBUG_KMS("%d pipes active\n", crtcs_enabled); | ||
1282 | if (crtcs_enabled > 1) { | ||
1283 | DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); | ||
1284 | dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES; | ||
1285 | goto out_disable; | ||
1286 | } | ||
1207 | if (intel_fb->obj->size > dev_priv->cfb_size) { | 1287 | if (intel_fb->obj->size > dev_priv->cfb_size) { |
1208 | DRM_DEBUG_KMS("framebuffer too large, disabling " | 1288 | DRM_DEBUG_KMS("framebuffer too large, disabling " |
1209 | "compression\n"); | 1289 | "compression\n"); |
@@ -1256,7 +1336,7 @@ out_disable: | |||
1256 | } | 1336 | } |
1257 | } | 1337 | } |
1258 | 1338 | ||
1259 | static int | 1339 | int |
1260 | intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) | 1340 | intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) |
1261 | { | 1341 | { |
1262 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 1342 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
@@ -1265,7 +1345,12 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) | |||
1265 | 1345 | ||
1266 | switch (obj_priv->tiling_mode) { | 1346 | switch (obj_priv->tiling_mode) { |
1267 | case I915_TILING_NONE: | 1347 | case I915_TILING_NONE: |
1268 | alignment = 64 * 1024; | 1348 | if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) |
1349 | alignment = 128 * 1024; | ||
1350 | else if (IS_I965G(dev)) | ||
1351 | alignment = 4 * 1024; | ||
1352 | else | ||
1353 | alignment = 64 * 1024; | ||
1269 | break; | 1354 | break; |
1270 | case I915_TILING_X: | 1355 | case I915_TILING_X: |
1271 | /* pin() will align the object as required by fence */ | 1356 | /* pin() will align the object as required by fence */ |
@@ -1540,6 +1625,15 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) | |||
1540 | int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; | 1625 | int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; |
1541 | u32 temp, tries = 0; | 1626 | u32 temp, tries = 0; |
1542 | 1627 | ||
1628 | /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit | ||
1629 | for train result */ | ||
1630 | temp = I915_READ(fdi_rx_imr_reg); | ||
1631 | temp &= ~FDI_RX_SYMBOL_LOCK; | ||
1632 | temp &= ~FDI_RX_BIT_LOCK; | ||
1633 | I915_WRITE(fdi_rx_imr_reg, temp); | ||
1634 | I915_READ(fdi_rx_imr_reg); | ||
1635 | udelay(150); | ||
1636 | |||
1543 | /* enable CPU FDI TX and PCH FDI RX */ | 1637 | /* enable CPU FDI TX and PCH FDI RX */ |
1544 | temp = I915_READ(fdi_tx_reg); | 1638 | temp = I915_READ(fdi_tx_reg); |
1545 | temp |= FDI_TX_ENABLE; | 1639 | temp |= FDI_TX_ENABLE; |
@@ -1557,16 +1651,7 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) | |||
1557 | I915_READ(fdi_rx_reg); | 1651 | I915_READ(fdi_rx_reg); |
1558 | udelay(150); | 1652 | udelay(150); |
1559 | 1653 | ||
1560 | /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit | 1654 | for (tries = 0; tries < 5; tries++) { |
1561 | for train result */ | ||
1562 | temp = I915_READ(fdi_rx_imr_reg); | ||
1563 | temp &= ~FDI_RX_SYMBOL_LOCK; | ||
1564 | temp &= ~FDI_RX_BIT_LOCK; | ||
1565 | I915_WRITE(fdi_rx_imr_reg, temp); | ||
1566 | I915_READ(fdi_rx_imr_reg); | ||
1567 | udelay(150); | ||
1568 | |||
1569 | for (;;) { | ||
1570 | temp = I915_READ(fdi_rx_iir_reg); | 1655 | temp = I915_READ(fdi_rx_iir_reg); |
1571 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); | 1656 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
1572 | 1657 | ||
@@ -1576,14 +1661,9 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) | |||
1576 | temp | FDI_RX_BIT_LOCK); | 1661 | temp | FDI_RX_BIT_LOCK); |
1577 | break; | 1662 | break; |
1578 | } | 1663 | } |
1579 | |||
1580 | tries++; | ||
1581 | |||
1582 | if (tries > 5) { | ||
1583 | DRM_DEBUG_KMS("FDI train 1 fail!\n"); | ||
1584 | break; | ||
1585 | } | ||
1586 | } | 1664 | } |
1665 | if (tries == 5) | ||
1666 | DRM_DEBUG_KMS("FDI train 1 fail!\n"); | ||
1587 | 1667 | ||
1588 | /* Train 2 */ | 1668 | /* Train 2 */ |
1589 | temp = I915_READ(fdi_tx_reg); | 1669 | temp = I915_READ(fdi_tx_reg); |
@@ -1599,7 +1679,7 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) | |||
1599 | 1679 | ||
1600 | tries = 0; | 1680 | tries = 0; |
1601 | 1681 | ||
1602 | for (;;) { | 1682 | for (tries = 0; tries < 5; tries++) { |
1603 | temp = I915_READ(fdi_rx_iir_reg); | 1683 | temp = I915_READ(fdi_rx_iir_reg); |
1604 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); | 1684 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
1605 | 1685 | ||
@@ -1609,14 +1689,9 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) | |||
1609 | DRM_DEBUG_KMS("FDI train 2 done.\n"); | 1689 | DRM_DEBUG_KMS("FDI train 2 done.\n"); |
1610 | break; | 1690 | break; |
1611 | } | 1691 | } |
1612 | |||
1613 | tries++; | ||
1614 | |||
1615 | if (tries > 5) { | ||
1616 | DRM_DEBUG_KMS("FDI train 2 fail!\n"); | ||
1617 | break; | ||
1618 | } | ||
1619 | } | 1692 | } |
1693 | if (tries == 5) | ||
1694 | DRM_DEBUG_KMS("FDI train 2 fail!\n"); | ||
1620 | 1695 | ||
1621 | DRM_DEBUG_KMS("FDI train done\n"); | 1696 | DRM_DEBUG_KMS("FDI train done\n"); |
1622 | } | 1697 | } |
@@ -1641,6 +1716,15 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) | |||
1641 | int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; | 1716 | int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; |
1642 | u32 temp, i; | 1717 | u32 temp, i; |
1643 | 1718 | ||
1719 | /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit | ||
1720 | for train result */ | ||
1721 | temp = I915_READ(fdi_rx_imr_reg); | ||
1722 | temp &= ~FDI_RX_SYMBOL_LOCK; | ||
1723 | temp &= ~FDI_RX_BIT_LOCK; | ||
1724 | I915_WRITE(fdi_rx_imr_reg, temp); | ||
1725 | I915_READ(fdi_rx_imr_reg); | ||
1726 | udelay(150); | ||
1727 | |||
1644 | /* enable CPU FDI TX and PCH FDI RX */ | 1728 | /* enable CPU FDI TX and PCH FDI RX */ |
1645 | temp = I915_READ(fdi_tx_reg); | 1729 | temp = I915_READ(fdi_tx_reg); |
1646 | temp |= FDI_TX_ENABLE; | 1730 | temp |= FDI_TX_ENABLE; |
@@ -1666,15 +1750,6 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) | |||
1666 | I915_READ(fdi_rx_reg); | 1750 | I915_READ(fdi_rx_reg); |
1667 | udelay(150); | 1751 | udelay(150); |
1668 | 1752 | ||
1669 | /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit | ||
1670 | for train result */ | ||
1671 | temp = I915_READ(fdi_rx_imr_reg); | ||
1672 | temp &= ~FDI_RX_SYMBOL_LOCK; | ||
1673 | temp &= ~FDI_RX_BIT_LOCK; | ||
1674 | I915_WRITE(fdi_rx_imr_reg, temp); | ||
1675 | I915_READ(fdi_rx_imr_reg); | ||
1676 | udelay(150); | ||
1677 | |||
1678 | for (i = 0; i < 4; i++ ) { | 1753 | for (i = 0; i < 4; i++ ) { |
1679 | temp = I915_READ(fdi_tx_reg); | 1754 | temp = I915_READ(fdi_tx_reg); |
1680 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; | 1755 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
@@ -1829,7 +1904,8 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1829 | } | 1904 | } |
1830 | 1905 | ||
1831 | /* Enable panel fitting for LVDS */ | 1906 | /* Enable panel fitting for LVDS */ |
1832 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | 1907 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) |
1908 | || HAS_eDP || intel_pch_has_edp(crtc)) { | ||
1833 | temp = I915_READ(pf_ctl_reg); | 1909 | temp = I915_READ(pf_ctl_reg); |
1834 | I915_WRITE(pf_ctl_reg, temp | PF_ENABLE | PF_FILTER_MED_3x3); | 1910 | I915_WRITE(pf_ctl_reg, temp | PF_ENABLE | PF_FILTER_MED_3x3); |
1835 | 1911 | ||
@@ -1924,9 +2000,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1924 | reg = I915_READ(trans_dp_ctl); | 2000 | reg = I915_READ(trans_dp_ctl); |
1925 | reg &= ~TRANS_DP_PORT_SEL_MASK; | 2001 | reg &= ~TRANS_DP_PORT_SEL_MASK; |
1926 | reg = TRANS_DP_OUTPUT_ENABLE | | 2002 | reg = TRANS_DP_OUTPUT_ENABLE | |
1927 | TRANS_DP_ENH_FRAMING | | 2003 | TRANS_DP_ENH_FRAMING; |
1928 | TRANS_DP_VSYNC_ACTIVE_HIGH | | 2004 | |
1929 | TRANS_DP_HSYNC_ACTIVE_HIGH; | 2005 | if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) |
2006 | reg |= TRANS_DP_HSYNC_ACTIVE_HIGH; | ||
2007 | if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) | ||
2008 | reg |= TRANS_DP_VSYNC_ACTIVE_HIGH; | ||
1930 | 2009 | ||
1931 | switch (intel_trans_dp_port_sel(crtc)) { | 2010 | switch (intel_trans_dp_port_sel(crtc)) { |
1932 | case PCH_DP_B: | 2011 | case PCH_DP_B: |
@@ -1966,6 +2045,8 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1966 | 2045 | ||
1967 | intel_crtc_load_lut(crtc); | 2046 | intel_crtc_load_lut(crtc); |
1968 | 2047 | ||
2048 | intel_update_fbc(crtc, &crtc->mode); | ||
2049 | |||
1969 | break; | 2050 | break; |
1970 | case DRM_MODE_DPMS_OFF: | 2051 | case DRM_MODE_DPMS_OFF: |
1971 | DRM_DEBUG_KMS("crtc %d dpms off\n", pipe); | 2052 | DRM_DEBUG_KMS("crtc %d dpms off\n", pipe); |
@@ -1980,6 +2061,10 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1980 | I915_READ(dspbase_reg); | 2061 | I915_READ(dspbase_reg); |
1981 | } | 2062 | } |
1982 | 2063 | ||
2064 | if (dev_priv->cfb_plane == plane && | ||
2065 | dev_priv->display.disable_fbc) | ||
2066 | dev_priv->display.disable_fbc(dev); | ||
2067 | |||
1983 | i915_disable_vga(dev); | 2068 | i915_disable_vga(dev); |
1984 | 2069 | ||
1985 | /* disable cpu pipe, disable after all planes disabled */ | 2070 | /* disable cpu pipe, disable after all planes disabled */ |
@@ -2256,6 +2341,11 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
2256 | intel_wait_for_vblank(dev); | 2341 | intel_wait_for_vblank(dev); |
2257 | } | 2342 | } |
2258 | 2343 | ||
2344 | /* Don't disable pipe A or pipe A PLLs if needed */ | ||
2345 | if (pipeconf_reg == PIPEACONF && | ||
2346 | (dev_priv->quirks & QUIRK_PIPEA_FORCE)) | ||
2347 | goto skip_pipe_off; | ||
2348 | |||
2259 | /* Next, disable display pipes */ | 2349 | /* Next, disable display pipes */ |
2260 | temp = I915_READ(pipeconf_reg); | 2350 | temp = I915_READ(pipeconf_reg); |
2261 | if ((temp & PIPEACONF_ENABLE) != 0) { | 2351 | if ((temp & PIPEACONF_ENABLE) != 0) { |
@@ -2271,7 +2361,7 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
2271 | I915_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE); | 2361 | I915_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE); |
2272 | I915_READ(dpll_reg); | 2362 | I915_READ(dpll_reg); |
2273 | } | 2363 | } |
2274 | 2364 | skip_pipe_off: | |
2275 | /* Wait for the clocks to turn off. */ | 2365 | /* Wait for the clocks to turn off. */ |
2276 | udelay(150); | 2366 | udelay(150); |
2277 | break; | 2367 | break; |
@@ -2354,11 +2444,9 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, | |||
2354 | struct drm_device *dev = crtc->dev; | 2444 | struct drm_device *dev = crtc->dev; |
2355 | if (HAS_PCH_SPLIT(dev)) { | 2445 | if (HAS_PCH_SPLIT(dev)) { |
2356 | /* FDI link clock is fixed at 2.7G */ | 2446 | /* FDI link clock is fixed at 2.7G */ |
2357 | if (mode->clock * 3 > 27000 * 4) | 2447 | if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4) |
2358 | return MODE_CLOCK_HIGH; | 2448 | return false; |
2359 | } | 2449 | } |
2360 | |||
2361 | drm_mode_set_crtcinfo(adjusted_mode, 0); | ||
2362 | return true; | 2450 | return true; |
2363 | } | 2451 | } |
2364 | 2452 | ||
@@ -2539,6 +2627,20 @@ static struct intel_watermark_params g4x_wm_info = { | |||
2539 | 2, | 2627 | 2, |
2540 | G4X_FIFO_LINE_SIZE, | 2628 | G4X_FIFO_LINE_SIZE, |
2541 | }; | 2629 | }; |
2630 | static struct intel_watermark_params g4x_cursor_wm_info = { | ||
2631 | I965_CURSOR_FIFO, | ||
2632 | I965_CURSOR_MAX_WM, | ||
2633 | I965_CURSOR_DFT_WM, | ||
2634 | 2, | ||
2635 | G4X_FIFO_LINE_SIZE, | ||
2636 | }; | ||
2637 | static struct intel_watermark_params i965_cursor_wm_info = { | ||
2638 | I965_CURSOR_FIFO, | ||
2639 | I965_CURSOR_MAX_WM, | ||
2640 | I965_CURSOR_DFT_WM, | ||
2641 | 2, | ||
2642 | I915_FIFO_LINE_SIZE, | ||
2643 | }; | ||
2542 | static struct intel_watermark_params i945_wm_info = { | 2644 | static struct intel_watermark_params i945_wm_info = { |
2543 | I945_FIFO_SIZE, | 2645 | I945_FIFO_SIZE, |
2544 | I915_MAX_WM, | 2646 | I915_MAX_WM, |
@@ -2576,6 +2678,14 @@ static struct intel_watermark_params ironlake_display_wm_info = { | |||
2576 | ILK_FIFO_LINE_SIZE | 2678 | ILK_FIFO_LINE_SIZE |
2577 | }; | 2679 | }; |
2578 | 2680 | ||
2681 | static struct intel_watermark_params ironlake_cursor_wm_info = { | ||
2682 | ILK_CURSOR_FIFO, | ||
2683 | ILK_CURSOR_MAXWM, | ||
2684 | ILK_CURSOR_DFTWM, | ||
2685 | 2, | ||
2686 | ILK_FIFO_LINE_SIZE | ||
2687 | }; | ||
2688 | |||
2579 | static struct intel_watermark_params ironlake_display_srwm_info = { | 2689 | static struct intel_watermark_params ironlake_display_srwm_info = { |
2580 | ILK_DISPLAY_SR_FIFO, | 2690 | ILK_DISPLAY_SR_FIFO, |
2581 | ILK_DISPLAY_MAX_SRWM, | 2691 | ILK_DISPLAY_MAX_SRWM, |
@@ -2625,7 +2735,7 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz, | |||
2625 | */ | 2735 | */ |
2626 | entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) / | 2736 | entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) / |
2627 | 1000; | 2737 | 1000; |
2628 | entries_required /= wm->cacheline_size; | 2738 | entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size); |
2629 | 2739 | ||
2630 | DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries_required); | 2740 | DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries_required); |
2631 | 2741 | ||
@@ -2636,8 +2746,14 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz, | |||
2636 | /* Don't promote wm_size to unsigned... */ | 2746 | /* Don't promote wm_size to unsigned... */ |
2637 | if (wm_size > (long)wm->max_wm) | 2747 | if (wm_size > (long)wm->max_wm) |
2638 | wm_size = wm->max_wm; | 2748 | wm_size = wm->max_wm; |
2639 | if (wm_size <= 0) | 2749 | if (wm_size <= 0) { |
2640 | wm_size = wm->default_wm; | 2750 | wm_size = wm->default_wm; |
2751 | DRM_ERROR("Insufficient FIFO for plane, expect flickering:" | ||
2752 | " entries required = %ld, available = %lu.\n", | ||
2753 | entries_required + wm->guard_size, | ||
2754 | wm->fifo_size); | ||
2755 | } | ||
2756 | |||
2641 | return wm_size; | 2757 | return wm_size; |
2642 | } | 2758 | } |
2643 | 2759 | ||
@@ -2746,11 +2862,9 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane) | |||
2746 | uint32_t dsparb = I915_READ(DSPARB); | 2862 | uint32_t dsparb = I915_READ(DSPARB); |
2747 | int size; | 2863 | int size; |
2748 | 2864 | ||
2749 | if (plane == 0) | 2865 | size = dsparb & 0x7f; |
2750 | size = dsparb & 0x7f; | 2866 | if (plane) |
2751 | else | 2867 | size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size; |
2752 | size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - | ||
2753 | (dsparb & 0x7f); | ||
2754 | 2868 | ||
2755 | DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, | 2869 | DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, |
2756 | plane ? "B" : "A", size); | 2870 | plane ? "B" : "A", size); |
@@ -2764,11 +2878,9 @@ static int i85x_get_fifo_size(struct drm_device *dev, int plane) | |||
2764 | uint32_t dsparb = I915_READ(DSPARB); | 2878 | uint32_t dsparb = I915_READ(DSPARB); |
2765 | int size; | 2879 | int size; |
2766 | 2880 | ||
2767 | if (plane == 0) | 2881 | size = dsparb & 0x1ff; |
2768 | size = dsparb & 0x1ff; | 2882 | if (plane) |
2769 | else | 2883 | size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size; |
2770 | size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - | ||
2771 | (dsparb & 0x1ff); | ||
2772 | size >>= 1; /* Convert to cachelines */ | 2884 | size >>= 1; /* Convert to cachelines */ |
2773 | 2885 | ||
2774 | DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, | 2886 | DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, |
@@ -2809,7 +2921,8 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane) | |||
2809 | } | 2921 | } |
2810 | 2922 | ||
2811 | static void pineview_update_wm(struct drm_device *dev, int planea_clock, | 2923 | static void pineview_update_wm(struct drm_device *dev, int planea_clock, |
2812 | int planeb_clock, int sr_hdisplay, int pixel_size) | 2924 | int planeb_clock, int sr_hdisplay, int unused, |
2925 | int pixel_size) | ||
2813 | { | 2926 | { |
2814 | struct drm_i915_private *dev_priv = dev->dev_private; | 2927 | struct drm_i915_private *dev_priv = dev->dev_private; |
2815 | u32 reg; | 2928 | u32 reg; |
@@ -2874,7 +2987,8 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock, | |||
2874 | } | 2987 | } |
2875 | 2988 | ||
2876 | static void g4x_update_wm(struct drm_device *dev, int planea_clock, | 2989 | static void g4x_update_wm(struct drm_device *dev, int planea_clock, |
2877 | int planeb_clock, int sr_hdisplay, int pixel_size) | 2990 | int planeb_clock, int sr_hdisplay, int sr_htotal, |
2991 | int pixel_size) | ||
2878 | { | 2992 | { |
2879 | struct drm_i915_private *dev_priv = dev->dev_private; | 2993 | struct drm_i915_private *dev_priv = dev->dev_private; |
2880 | int total_size, cacheline_size; | 2994 | int total_size, cacheline_size; |
@@ -2898,12 +3012,12 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock, | |||
2898 | */ | 3012 | */ |
2899 | entries_required = ((planea_clock / 1000) * pixel_size * latency_ns) / | 3013 | entries_required = ((planea_clock / 1000) * pixel_size * latency_ns) / |
2900 | 1000; | 3014 | 1000; |
2901 | entries_required /= G4X_FIFO_LINE_SIZE; | 3015 | entries_required = DIV_ROUND_UP(entries_required, G4X_FIFO_LINE_SIZE); |
2902 | planea_wm = entries_required + planea_params.guard_size; | 3016 | planea_wm = entries_required + planea_params.guard_size; |
2903 | 3017 | ||
2904 | entries_required = ((planeb_clock / 1000) * pixel_size * latency_ns) / | 3018 | entries_required = ((planeb_clock / 1000) * pixel_size * latency_ns) / |
2905 | 1000; | 3019 | 1000; |
2906 | entries_required /= G4X_FIFO_LINE_SIZE; | 3020 | entries_required = DIV_ROUND_UP(entries_required, G4X_FIFO_LINE_SIZE); |
2907 | planeb_wm = entries_required + planeb_params.guard_size; | 3021 | planeb_wm = entries_required + planeb_params.guard_size; |
2908 | 3022 | ||
2909 | cursora_wm = cursorb_wm = 16; | 3023 | cursora_wm = cursorb_wm = 16; |
@@ -2917,13 +3031,24 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock, | |||
2917 | static const int sr_latency_ns = 12000; | 3031 | static const int sr_latency_ns = 12000; |
2918 | 3032 | ||
2919 | sr_clock = planea_clock ? planea_clock : planeb_clock; | 3033 | sr_clock = planea_clock ? planea_clock : planeb_clock; |
2920 | line_time_us = ((sr_hdisplay * 1000) / sr_clock); | 3034 | line_time_us = ((sr_htotal * 1000) / sr_clock); |
2921 | 3035 | ||
2922 | /* Use ns/us then divide to preserve precision */ | 3036 | /* Use ns/us then divide to preserve precision */ |
2923 | sr_entries = (((sr_latency_ns / line_time_us) + 1) * | 3037 | sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * |
2924 | pixel_size * sr_hdisplay) / 1000; | 3038 | pixel_size * sr_hdisplay; |
2925 | sr_entries = roundup(sr_entries / cacheline_size, 1); | 3039 | sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size); |
2926 | DRM_DEBUG("self-refresh entries: %d\n", sr_entries); | 3040 | |
3041 | entries_required = (((sr_latency_ns / line_time_us) + | ||
3042 | 1000) / 1000) * pixel_size * 64; | ||
3043 | entries_required = DIV_ROUND_UP(entries_required, | ||
3044 | g4x_cursor_wm_info.cacheline_size); | ||
3045 | cursor_sr = entries_required + g4x_cursor_wm_info.guard_size; | ||
3046 | |||
3047 | if (cursor_sr > g4x_cursor_wm_info.max_wm) | ||
3048 | cursor_sr = g4x_cursor_wm_info.max_wm; | ||
3049 | DRM_DEBUG_KMS("self-refresh watermark: display plane %d " | ||
3050 | "cursor %d\n", sr_entries, cursor_sr); | ||
3051 | |||
2927 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); | 3052 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); |
2928 | } else { | 3053 | } else { |
2929 | /* Turn off self refresh if both pipes are enabled */ | 3054 | /* Turn off self refresh if both pipes are enabled */ |
@@ -2948,11 +3073,13 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock, | |||
2948 | } | 3073 | } |
2949 | 3074 | ||
2950 | static void i965_update_wm(struct drm_device *dev, int planea_clock, | 3075 | static void i965_update_wm(struct drm_device *dev, int planea_clock, |
2951 | int planeb_clock, int sr_hdisplay, int pixel_size) | 3076 | int planeb_clock, int sr_hdisplay, int sr_htotal, |
3077 | int pixel_size) | ||
2952 | { | 3078 | { |
2953 | struct drm_i915_private *dev_priv = dev->dev_private; | 3079 | struct drm_i915_private *dev_priv = dev->dev_private; |
2954 | unsigned long line_time_us; | 3080 | unsigned long line_time_us; |
2955 | int sr_clock, sr_entries, srwm = 1; | 3081 | int sr_clock, sr_entries, srwm = 1; |
3082 | int cursor_sr = 16; | ||
2956 | 3083 | ||
2957 | /* Calc sr entries for one plane configs */ | 3084 | /* Calc sr entries for one plane configs */ |
2958 | if (sr_hdisplay && (!planea_clock || !planeb_clock)) { | 3085 | if (sr_hdisplay && (!planea_clock || !planeb_clock)) { |
@@ -2960,17 +3087,31 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock, | |||
2960 | static const int sr_latency_ns = 12000; | 3087 | static const int sr_latency_ns = 12000; |
2961 | 3088 | ||
2962 | sr_clock = planea_clock ? planea_clock : planeb_clock; | 3089 | sr_clock = planea_clock ? planea_clock : planeb_clock; |
2963 | line_time_us = ((sr_hdisplay * 1000) / sr_clock); | 3090 | line_time_us = ((sr_htotal * 1000) / sr_clock); |
2964 | 3091 | ||
2965 | /* Use ns/us then divide to preserve precision */ | 3092 | /* Use ns/us then divide to preserve precision */ |
2966 | sr_entries = (((sr_latency_ns / line_time_us) + 1) * | 3093 | sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * |
2967 | pixel_size * sr_hdisplay) / 1000; | 3094 | pixel_size * sr_hdisplay; |
2968 | sr_entries = roundup(sr_entries / I915_FIFO_LINE_SIZE, 1); | 3095 | sr_entries = DIV_ROUND_UP(sr_entries, I915_FIFO_LINE_SIZE); |
2969 | DRM_DEBUG("self-refresh entries: %d\n", sr_entries); | 3096 | DRM_DEBUG("self-refresh entries: %d\n", sr_entries); |
2970 | srwm = I945_FIFO_SIZE - sr_entries; | 3097 | srwm = I965_FIFO_SIZE - sr_entries; |
2971 | if (srwm < 0) | 3098 | if (srwm < 0) |
2972 | srwm = 1; | 3099 | srwm = 1; |
2973 | srwm &= 0x3f; | 3100 | srwm &= 0x1ff; |
3101 | |||
3102 | sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * | ||
3103 | pixel_size * 64; | ||
3104 | sr_entries = DIV_ROUND_UP(sr_entries, | ||
3105 | i965_cursor_wm_info.cacheline_size); | ||
3106 | cursor_sr = i965_cursor_wm_info.fifo_size - | ||
3107 | (sr_entries + i965_cursor_wm_info.guard_size); | ||
3108 | |||
3109 | if (cursor_sr > i965_cursor_wm_info.max_wm) | ||
3110 | cursor_sr = i965_cursor_wm_info.max_wm; | ||
3111 | |||
3112 | DRM_DEBUG_KMS("self-refresh watermark: display plane %d " | ||
3113 | "cursor %d\n", srwm, cursor_sr); | ||
3114 | |||
2974 | if (IS_I965GM(dev)) | 3115 | if (IS_I965GM(dev)) |
2975 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); | 3116 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); |
2976 | } else { | 3117 | } else { |
@@ -2987,10 +3128,13 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock, | |||
2987 | I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | (8 << 16) | (8 << 8) | | 3128 | I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | (8 << 16) | (8 << 8) | |
2988 | (8 << 0)); | 3129 | (8 << 0)); |
2989 | I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); | 3130 | I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); |
3131 | /* update cursor SR watermark */ | ||
3132 | I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); | ||
2990 | } | 3133 | } |
2991 | 3134 | ||
2992 | static void i9xx_update_wm(struct drm_device *dev, int planea_clock, | 3135 | static void i9xx_update_wm(struct drm_device *dev, int planea_clock, |
2993 | int planeb_clock, int sr_hdisplay, int pixel_size) | 3136 | int planeb_clock, int sr_hdisplay, int sr_htotal, |
3137 | int pixel_size) | ||
2994 | { | 3138 | { |
2995 | struct drm_i915_private *dev_priv = dev->dev_private; | 3139 | struct drm_i915_private *dev_priv = dev->dev_private; |
2996 | uint32_t fwater_lo; | 3140 | uint32_t fwater_lo; |
@@ -3035,12 +3179,12 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, | |||
3035 | static const int sr_latency_ns = 6000; | 3179 | static const int sr_latency_ns = 6000; |
3036 | 3180 | ||
3037 | sr_clock = planea_clock ? planea_clock : planeb_clock; | 3181 | sr_clock = planea_clock ? planea_clock : planeb_clock; |
3038 | line_time_us = ((sr_hdisplay * 1000) / sr_clock); | 3182 | line_time_us = ((sr_htotal * 1000) / sr_clock); |
3039 | 3183 | ||
3040 | /* Use ns/us then divide to preserve precision */ | 3184 | /* Use ns/us then divide to preserve precision */ |
3041 | sr_entries = (((sr_latency_ns / line_time_us) + 1) * | 3185 | sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * |
3042 | pixel_size * sr_hdisplay) / 1000; | 3186 | pixel_size * sr_hdisplay; |
3043 | sr_entries = roundup(sr_entries / cacheline_size, 1); | 3187 | sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size); |
3044 | DRM_DEBUG_KMS("self-refresh entries: %d\n", sr_entries); | 3188 | DRM_DEBUG_KMS("self-refresh entries: %d\n", sr_entries); |
3045 | srwm = total_size - sr_entries; | 3189 | srwm = total_size - sr_entries; |
3046 | if (srwm < 0) | 3190 | if (srwm < 0) |
@@ -3078,7 +3222,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, | |||
3078 | } | 3222 | } |
3079 | 3223 | ||
3080 | static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused, | 3224 | static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused, |
3081 | int unused2, int pixel_size) | 3225 | int unused2, int unused3, int pixel_size) |
3082 | { | 3226 | { |
3083 | struct drm_i915_private *dev_priv = dev->dev_private; | 3227 | struct drm_i915_private *dev_priv = dev->dev_private; |
3084 | uint32_t fwater_lo = I915_READ(FW_BLC) & ~0xfff; | 3228 | uint32_t fwater_lo = I915_READ(FW_BLC) & ~0xfff; |
@@ -3096,9 +3240,11 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused, | |||
3096 | } | 3240 | } |
3097 | 3241 | ||
3098 | #define ILK_LP0_PLANE_LATENCY 700 | 3242 | #define ILK_LP0_PLANE_LATENCY 700 |
3243 | #define ILK_LP0_CURSOR_LATENCY 1300 | ||
3099 | 3244 | ||
3100 | static void ironlake_update_wm(struct drm_device *dev, int planea_clock, | 3245 | static void ironlake_update_wm(struct drm_device *dev, int planea_clock, |
3101 | int planeb_clock, int sr_hdisplay, int pixel_size) | 3246 | int planeb_clock, int sr_hdisplay, int sr_htotal, |
3247 | int pixel_size) | ||
3102 | { | 3248 | { |
3103 | struct drm_i915_private *dev_priv = dev->dev_private; | 3249 | struct drm_i915_private *dev_priv = dev->dev_private; |
3104 | int planea_wm, planeb_wm, cursora_wm, cursorb_wm; | 3250 | int planea_wm, planeb_wm, cursora_wm, cursorb_wm; |
@@ -3106,20 +3252,48 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock, | |||
3106 | unsigned long line_time_us; | 3252 | unsigned long line_time_us; |
3107 | int sr_clock, entries_required; | 3253 | int sr_clock, entries_required; |
3108 | u32 reg_value; | 3254 | u32 reg_value; |
3255 | int line_count; | ||
3256 | int planea_htotal = 0, planeb_htotal = 0; | ||
3257 | struct drm_crtc *crtc; | ||
3258 | struct intel_crtc *intel_crtc; | ||
3259 | |||
3260 | /* Need htotal for all active display plane */ | ||
3261 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
3262 | intel_crtc = to_intel_crtc(crtc); | ||
3263 | if (crtc->enabled) { | ||
3264 | if (intel_crtc->plane == 0) | ||
3265 | planea_htotal = crtc->mode.htotal; | ||
3266 | else | ||
3267 | planeb_htotal = crtc->mode.htotal; | ||
3268 | } | ||
3269 | } | ||
3109 | 3270 | ||
3110 | /* Calculate and update the watermark for plane A */ | 3271 | /* Calculate and update the watermark for plane A */ |
3111 | if (planea_clock) { | 3272 | if (planea_clock) { |
3112 | entries_required = ((planea_clock / 1000) * pixel_size * | 3273 | entries_required = ((planea_clock / 1000) * pixel_size * |
3113 | ILK_LP0_PLANE_LATENCY) / 1000; | 3274 | ILK_LP0_PLANE_LATENCY) / 1000; |
3114 | entries_required = DIV_ROUND_UP(entries_required, | 3275 | entries_required = DIV_ROUND_UP(entries_required, |
3115 | ironlake_display_wm_info.cacheline_size); | 3276 | ironlake_display_wm_info.cacheline_size); |
3116 | planea_wm = entries_required + | 3277 | planea_wm = entries_required + |
3117 | ironlake_display_wm_info.guard_size; | 3278 | ironlake_display_wm_info.guard_size; |
3118 | 3279 | ||
3119 | if (planea_wm > (int)ironlake_display_wm_info.max_wm) | 3280 | if (planea_wm > (int)ironlake_display_wm_info.max_wm) |
3120 | planea_wm = ironlake_display_wm_info.max_wm; | 3281 | planea_wm = ironlake_display_wm_info.max_wm; |
3121 | 3282 | ||
3122 | cursora_wm = 16; | 3283 | /* Use the large buffer method to calculate cursor watermark */ |
3284 | line_time_us = (planea_htotal * 1000) / planea_clock; | ||
3285 | |||
3286 | /* Use ns/us then divide to preserve precision */ | ||
3287 | line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000; | ||
3288 | |||
3289 | /* calculate the cursor watermark for cursor A */ | ||
3290 | entries_required = line_count * 64 * pixel_size; | ||
3291 | entries_required = DIV_ROUND_UP(entries_required, | ||
3292 | ironlake_cursor_wm_info.cacheline_size); | ||
3293 | cursora_wm = entries_required + ironlake_cursor_wm_info.guard_size; | ||
3294 | if (cursora_wm > ironlake_cursor_wm_info.max_wm) | ||
3295 | cursora_wm = ironlake_cursor_wm_info.max_wm; | ||
3296 | |||
3123 | reg_value = I915_READ(WM0_PIPEA_ILK); | 3297 | reg_value = I915_READ(WM0_PIPEA_ILK); |
3124 | reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); | 3298 | reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); |
3125 | reg_value |= (planea_wm << WM0_PIPE_PLANE_SHIFT) | | 3299 | reg_value |= (planea_wm << WM0_PIPE_PLANE_SHIFT) | |
@@ -3133,14 +3307,27 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock, | |||
3133 | entries_required = ((planeb_clock / 1000) * pixel_size * | 3307 | entries_required = ((planeb_clock / 1000) * pixel_size * |
3134 | ILK_LP0_PLANE_LATENCY) / 1000; | 3308 | ILK_LP0_PLANE_LATENCY) / 1000; |
3135 | entries_required = DIV_ROUND_UP(entries_required, | 3309 | entries_required = DIV_ROUND_UP(entries_required, |
3136 | ironlake_display_wm_info.cacheline_size); | 3310 | ironlake_display_wm_info.cacheline_size); |
3137 | planeb_wm = entries_required + | 3311 | planeb_wm = entries_required + |
3138 | ironlake_display_wm_info.guard_size; | 3312 | ironlake_display_wm_info.guard_size; |
3139 | 3313 | ||
3140 | if (planeb_wm > (int)ironlake_display_wm_info.max_wm) | 3314 | if (planeb_wm > (int)ironlake_display_wm_info.max_wm) |
3141 | planeb_wm = ironlake_display_wm_info.max_wm; | 3315 | planeb_wm = ironlake_display_wm_info.max_wm; |
3142 | 3316 | ||
3143 | cursorb_wm = 16; | 3317 | /* Use the large buffer method to calculate cursor watermark */ |
3318 | line_time_us = (planeb_htotal * 1000) / planeb_clock; | ||
3319 | |||
3320 | /* Use ns/us then divide to preserve precision */ | ||
3321 | line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000; | ||
3322 | |||
3323 | /* calculate the cursor watermark for cursor B */ | ||
3324 | entries_required = line_count * 64 * pixel_size; | ||
3325 | entries_required = DIV_ROUND_UP(entries_required, | ||
3326 | ironlake_cursor_wm_info.cacheline_size); | ||
3327 | cursorb_wm = entries_required + ironlake_cursor_wm_info.guard_size; | ||
3328 | if (cursorb_wm > ironlake_cursor_wm_info.max_wm) | ||
3329 | cursorb_wm = ironlake_cursor_wm_info.max_wm; | ||
3330 | |||
3144 | reg_value = I915_READ(WM0_PIPEB_ILK); | 3331 | reg_value = I915_READ(WM0_PIPEB_ILK); |
3145 | reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); | 3332 | reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); |
3146 | reg_value |= (planeb_wm << WM0_PIPE_PLANE_SHIFT) | | 3333 | reg_value |= (planeb_wm << WM0_PIPE_PLANE_SHIFT) | |
@@ -3155,12 +3342,12 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock, | |||
3155 | * display plane is used. | 3342 | * display plane is used. |
3156 | */ | 3343 | */ |
3157 | if (!planea_clock || !planeb_clock) { | 3344 | if (!planea_clock || !planeb_clock) { |
3158 | int line_count; | 3345 | |
3159 | /* Read the self-refresh latency. The unit is 0.5us */ | 3346 | /* Read the self-refresh latency. The unit is 0.5us */ |
3160 | int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK; | 3347 | int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK; |
3161 | 3348 | ||
3162 | sr_clock = planea_clock ? planea_clock : planeb_clock; | 3349 | sr_clock = planea_clock ? planea_clock : planeb_clock; |
3163 | line_time_us = ((sr_hdisplay * 1000) / sr_clock); | 3350 | line_time_us = ((sr_htotal * 1000) / sr_clock); |
3164 | 3351 | ||
3165 | /* Use ns/us then divide to preserve precision */ | 3352 | /* Use ns/us then divide to preserve precision */ |
3166 | line_count = ((ilk_sr_latency * 500) / line_time_us + 1000) | 3353 | line_count = ((ilk_sr_latency * 500) / line_time_us + 1000) |
@@ -3169,14 +3356,14 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock, | |||
3169 | /* calculate the self-refresh watermark for display plane */ | 3356 | /* calculate the self-refresh watermark for display plane */ |
3170 | entries_required = line_count * sr_hdisplay * pixel_size; | 3357 | entries_required = line_count * sr_hdisplay * pixel_size; |
3171 | entries_required = DIV_ROUND_UP(entries_required, | 3358 | entries_required = DIV_ROUND_UP(entries_required, |
3172 | ironlake_display_srwm_info.cacheline_size); | 3359 | ironlake_display_srwm_info.cacheline_size); |
3173 | sr_wm = entries_required + | 3360 | sr_wm = entries_required + |
3174 | ironlake_display_srwm_info.guard_size; | 3361 | ironlake_display_srwm_info.guard_size; |
3175 | 3362 | ||
3176 | /* calculate the self-refresh watermark for display cursor */ | 3363 | /* calculate the self-refresh watermark for display cursor */ |
3177 | entries_required = line_count * pixel_size * 64; | 3364 | entries_required = line_count * pixel_size * 64; |
3178 | entries_required = DIV_ROUND_UP(entries_required, | 3365 | entries_required = DIV_ROUND_UP(entries_required, |
3179 | ironlake_cursor_srwm_info.cacheline_size); | 3366 | ironlake_cursor_srwm_info.cacheline_size); |
3180 | cursor_wm = entries_required + | 3367 | cursor_wm = entries_required + |
3181 | ironlake_cursor_srwm_info.guard_size; | 3368 | ironlake_cursor_srwm_info.guard_size; |
3182 | 3369 | ||
@@ -3220,6 +3407,7 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock, | |||
3220 | * bytes per pixel | 3407 | * bytes per pixel |
3221 | * where | 3408 | * where |
3222 | * line time = htotal / dotclock | 3409 | * line time = htotal / dotclock |
3410 | * surface width = hdisplay for normal plane and 64 for cursor | ||
3223 | * and latency is assumed to be high, as above. | 3411 | * and latency is assumed to be high, as above. |
3224 | * | 3412 | * |
3225 | * The final value programmed to the register should always be rounded up, | 3413 | * The final value programmed to the register should always be rounded up, |
@@ -3236,6 +3424,7 @@ static void intel_update_watermarks(struct drm_device *dev) | |||
3236 | int sr_hdisplay = 0; | 3424 | int sr_hdisplay = 0; |
3237 | unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0; | 3425 | unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0; |
3238 | int enabled = 0, pixel_size = 0; | 3426 | int enabled = 0, pixel_size = 0; |
3427 | int sr_htotal = 0; | ||
3239 | 3428 | ||
3240 | if (!dev_priv->display.update_wm) | 3429 | if (!dev_priv->display.update_wm) |
3241 | return; | 3430 | return; |
@@ -3256,6 +3445,7 @@ static void intel_update_watermarks(struct drm_device *dev) | |||
3256 | } | 3445 | } |
3257 | sr_hdisplay = crtc->mode.hdisplay; | 3446 | sr_hdisplay = crtc->mode.hdisplay; |
3258 | sr_clock = crtc->mode.clock; | 3447 | sr_clock = crtc->mode.clock; |
3448 | sr_htotal = crtc->mode.htotal; | ||
3259 | if (crtc->fb) | 3449 | if (crtc->fb) |
3260 | pixel_size = crtc->fb->bits_per_pixel / 8; | 3450 | pixel_size = crtc->fb->bits_per_pixel / 8; |
3261 | else | 3451 | else |
@@ -3267,7 +3457,7 @@ static void intel_update_watermarks(struct drm_device *dev) | |||
3267 | return; | 3457 | return; |
3268 | 3458 | ||
3269 | dev_priv->display.update_wm(dev, planea_clock, planeb_clock, | 3459 | dev_priv->display.update_wm(dev, planea_clock, planeb_clock, |
3270 | sr_hdisplay, pixel_size); | 3460 | sr_hdisplay, sr_htotal, pixel_size); |
3271 | } | 3461 | } |
3272 | 3462 | ||
3273 | static int intel_crtc_mode_set(struct drm_crtc *crtc, | 3463 | static int intel_crtc_mode_set(struct drm_crtc *crtc, |
@@ -3386,6 +3576,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3386 | return -EINVAL; | 3576 | return -EINVAL; |
3387 | } | 3577 | } |
3388 | 3578 | ||
3579 | /* Ensure that the cursor is valid for the new mode before changing... */ | ||
3580 | intel_crtc_update_cursor(crtc); | ||
3581 | |||
3389 | if (is_lvds && dev_priv->lvds_downclock_avail) { | 3582 | if (is_lvds && dev_priv->lvds_downclock_avail) { |
3390 | has_reduced_clock = limit->find_pll(limit, crtc, | 3583 | has_reduced_clock = limit->find_pll(limit, crtc, |
3391 | dev_priv->lvds_downclock, | 3584 | dev_priv->lvds_downclock, |
@@ -3452,7 +3645,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3452 | temp |= PIPE_8BPC; | 3645 | temp |= PIPE_8BPC; |
3453 | else | 3646 | else |
3454 | temp |= PIPE_6BPC; | 3647 | temp |= PIPE_6BPC; |
3455 | } else if (is_edp) { | 3648 | } else if (is_edp || (is_dp && intel_pch_has_edp(crtc))) { |
3456 | switch (dev_priv->edp_bpp/3) { | 3649 | switch (dev_priv->edp_bpp/3) { |
3457 | case 8: | 3650 | case 8: |
3458 | temp |= PIPE_8BPC; | 3651 | temp |= PIPE_8BPC; |
@@ -3695,6 +3888,11 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3695 | udelay(150); | 3888 | udelay(150); |
3696 | } | 3889 | } |
3697 | 3890 | ||
3891 | if (HAS_PCH_SPLIT(dev)) { | ||
3892 | pipeconf &= ~PIPE_ENABLE_DITHER; | ||
3893 | pipeconf &= ~PIPE_DITHER_TYPE_MASK; | ||
3894 | } | ||
3895 | |||
3698 | /* The LVDS pin pair needs to be on before the DPLLs are enabled. | 3896 | /* The LVDS pin pair needs to be on before the DPLLs are enabled. |
3699 | * This is an exception to the general rule that mode_set doesn't turn | 3897 | * This is an exception to the general rule that mode_set doesn't turn |
3700 | * things on. | 3898 | * things on. |
@@ -3741,11 +3939,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3741 | } else | 3939 | } else |
3742 | lvds |= LVDS_ENABLE_DITHER; | 3940 | lvds |= LVDS_ENABLE_DITHER; |
3743 | } else { | 3941 | } else { |
3744 | if (HAS_PCH_SPLIT(dev)) { | 3942 | if (!HAS_PCH_SPLIT(dev)) { |
3745 | pipeconf &= ~PIPE_ENABLE_DITHER; | ||
3746 | pipeconf &= ~PIPE_DITHER_TYPE_MASK; | ||
3747 | } else | ||
3748 | lvds &= ~LVDS_ENABLE_DITHER; | 3943 | lvds &= ~LVDS_ENABLE_DITHER; |
3944 | } | ||
3749 | } | 3945 | } |
3750 | } | 3946 | } |
3751 | I915_WRITE(lvds_reg, lvds); | 3947 | I915_WRITE(lvds_reg, lvds); |
@@ -3921,6 +4117,85 @@ void intel_crtc_load_lut(struct drm_crtc *crtc) | |||
3921 | } | 4117 | } |
3922 | } | 4118 | } |
3923 | 4119 | ||
4120 | /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ | ||
4121 | static void intel_crtc_update_cursor(struct drm_crtc *crtc) | ||
4122 | { | ||
4123 | struct drm_device *dev = crtc->dev; | ||
4124 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
4125 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
4126 | int pipe = intel_crtc->pipe; | ||
4127 | int x = intel_crtc->cursor_x; | ||
4128 | int y = intel_crtc->cursor_y; | ||
4129 | uint32_t base, pos; | ||
4130 | bool visible; | ||
4131 | |||
4132 | pos = 0; | ||
4133 | |||
4134 | if (crtc->fb) { | ||
4135 | base = intel_crtc->cursor_addr; | ||
4136 | if (x > (int) crtc->fb->width) | ||
4137 | base = 0; | ||
4138 | |||
4139 | if (y > (int) crtc->fb->height) | ||
4140 | base = 0; | ||
4141 | } else | ||
4142 | base = 0; | ||
4143 | |||
4144 | if (x < 0) { | ||
4145 | if (x + intel_crtc->cursor_width < 0) | ||
4146 | base = 0; | ||
4147 | |||
4148 | pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; | ||
4149 | x = -x; | ||
4150 | } | ||
4151 | pos |= x << CURSOR_X_SHIFT; | ||
4152 | |||
4153 | if (y < 0) { | ||
4154 | if (y + intel_crtc->cursor_height < 0) | ||
4155 | base = 0; | ||
4156 | |||
4157 | pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; | ||
4158 | y = -y; | ||
4159 | } | ||
4160 | pos |= y << CURSOR_Y_SHIFT; | ||
4161 | |||
4162 | visible = base != 0; | ||
4163 | if (!visible && !intel_crtc->cursor_visble) | ||
4164 | return; | ||
4165 | |||
4166 | I915_WRITE(pipe == 0 ? CURAPOS : CURBPOS, pos); | ||
4167 | if (intel_crtc->cursor_visble != visible) { | ||
4168 | uint32_t cntl = I915_READ(pipe == 0 ? CURACNTR : CURBCNTR); | ||
4169 | if (base) { | ||
4170 | /* Hooray for CUR*CNTR differences */ | ||
4171 | if (IS_MOBILE(dev) || IS_I9XX(dev)) { | ||
4172 | cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT); | ||
4173 | cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; | ||
4174 | cntl |= pipe << 28; /* Connect to correct pipe */ | ||
4175 | } else { | ||
4176 | cntl &= ~(CURSOR_FORMAT_MASK); | ||
4177 | cntl |= CURSOR_ENABLE; | ||
4178 | cntl |= CURSOR_FORMAT_ARGB | CURSOR_GAMMA_ENABLE; | ||
4179 | } | ||
4180 | } else { | ||
4181 | if (IS_MOBILE(dev) || IS_I9XX(dev)) { | ||
4182 | cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); | ||
4183 | cntl |= CURSOR_MODE_DISABLE; | ||
4184 | } else { | ||
4185 | cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE); | ||
4186 | } | ||
4187 | } | ||
4188 | I915_WRITE(pipe == 0 ? CURACNTR : CURBCNTR, cntl); | ||
4189 | |||
4190 | intel_crtc->cursor_visble = visible; | ||
4191 | } | ||
4192 | /* and commit changes on next vblank */ | ||
4193 | I915_WRITE(pipe == 0 ? CURABASE : CURBBASE, base); | ||
4194 | |||
4195 | if (visible) | ||
4196 | intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj); | ||
4197 | } | ||
4198 | |||
3924 | static int intel_crtc_cursor_set(struct drm_crtc *crtc, | 4199 | static int intel_crtc_cursor_set(struct drm_crtc *crtc, |
3925 | struct drm_file *file_priv, | 4200 | struct drm_file *file_priv, |
3926 | uint32_t handle, | 4201 | uint32_t handle, |
@@ -3931,11 +4206,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
3931 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 4206 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3932 | struct drm_gem_object *bo; | 4207 | struct drm_gem_object *bo; |
3933 | struct drm_i915_gem_object *obj_priv; | 4208 | struct drm_i915_gem_object *obj_priv; |
3934 | int pipe = intel_crtc->pipe; | 4209 | uint32_t addr; |
3935 | uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR; | ||
3936 | uint32_t base = (pipe == 0) ? CURABASE : CURBBASE; | ||
3937 | uint32_t temp = I915_READ(control); | ||
3938 | size_t addr; | ||
3939 | int ret; | 4210 | int ret; |
3940 | 4211 | ||
3941 | DRM_DEBUG_KMS("\n"); | 4212 | DRM_DEBUG_KMS("\n"); |
@@ -3943,12 +4214,6 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
3943 | /* if we want to turn off the cursor ignore width and height */ | 4214 | /* if we want to turn off the cursor ignore width and height */ |
3944 | if (!handle) { | 4215 | if (!handle) { |
3945 | DRM_DEBUG_KMS("cursor off\n"); | 4216 | DRM_DEBUG_KMS("cursor off\n"); |
3946 | if (IS_MOBILE(dev) || IS_I9XX(dev)) { | ||
3947 | temp &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); | ||
3948 | temp |= CURSOR_MODE_DISABLE; | ||
3949 | } else { | ||
3950 | temp &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE); | ||
3951 | } | ||
3952 | addr = 0; | 4217 | addr = 0; |
3953 | bo = NULL; | 4218 | bo = NULL; |
3954 | mutex_lock(&dev->struct_mutex); | 4219 | mutex_lock(&dev->struct_mutex); |
@@ -3990,7 +4255,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
3990 | 4255 | ||
3991 | addr = obj_priv->gtt_offset; | 4256 | addr = obj_priv->gtt_offset; |
3992 | } else { | 4257 | } else { |
3993 | ret = i915_gem_attach_phys_object(dev, bo, (pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1); | 4258 | ret = i915_gem_attach_phys_object(dev, bo, |
4259 | (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1); | ||
3994 | if (ret) { | 4260 | if (ret) { |
3995 | DRM_ERROR("failed to attach phys object\n"); | 4261 | DRM_ERROR("failed to attach phys object\n"); |
3996 | goto fail_locked; | 4262 | goto fail_locked; |
@@ -4001,21 +4267,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
4001 | if (!IS_I9XX(dev)) | 4267 | if (!IS_I9XX(dev)) |
4002 | I915_WRITE(CURSIZE, (height << 12) | width); | 4268 | I915_WRITE(CURSIZE, (height << 12) | width); |
4003 | 4269 | ||
4004 | /* Hooray for CUR*CNTR differences */ | ||
4005 | if (IS_MOBILE(dev) || IS_I9XX(dev)) { | ||
4006 | temp &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT); | ||
4007 | temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; | ||
4008 | temp |= (pipe << 28); /* Connect to correct pipe */ | ||
4009 | } else { | ||
4010 | temp &= ~(CURSOR_FORMAT_MASK); | ||
4011 | temp |= CURSOR_ENABLE; | ||
4012 | temp |= CURSOR_FORMAT_ARGB | CURSOR_GAMMA_ENABLE; | ||
4013 | } | ||
4014 | |||
4015 | finish: | 4270 | finish: |
4016 | I915_WRITE(control, temp); | ||
4017 | I915_WRITE(base, addr); | ||
4018 | |||
4019 | if (intel_crtc->cursor_bo) { | 4271 | if (intel_crtc->cursor_bo) { |
4020 | if (dev_priv->info->cursor_needs_physical) { | 4272 | if (dev_priv->info->cursor_needs_physical) { |
4021 | if (intel_crtc->cursor_bo != bo) | 4273 | if (intel_crtc->cursor_bo != bo) |
@@ -4029,6 +4281,10 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
4029 | 4281 | ||
4030 | intel_crtc->cursor_addr = addr; | 4282 | intel_crtc->cursor_addr = addr; |
4031 | intel_crtc->cursor_bo = bo; | 4283 | intel_crtc->cursor_bo = bo; |
4284 | intel_crtc->cursor_width = width; | ||
4285 | intel_crtc->cursor_height = height; | ||
4286 | |||
4287 | intel_crtc_update_cursor(crtc); | ||
4032 | 4288 | ||
4033 | return 0; | 4289 | return 0; |
4034 | fail_unpin: | 4290 | fail_unpin: |
@@ -4042,34 +4298,12 @@ fail: | |||
4042 | 4298 | ||
4043 | static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) | 4299 | static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) |
4044 | { | 4300 | { |
4045 | struct drm_device *dev = crtc->dev; | ||
4046 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
4047 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 4301 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4048 | struct intel_framebuffer *intel_fb; | ||
4049 | int pipe = intel_crtc->pipe; | ||
4050 | uint32_t temp = 0; | ||
4051 | uint32_t adder; | ||
4052 | |||
4053 | if (crtc->fb) { | ||
4054 | intel_fb = to_intel_framebuffer(crtc->fb); | ||
4055 | intel_mark_busy(dev, intel_fb->obj); | ||
4056 | } | ||
4057 | |||
4058 | if (x < 0) { | ||
4059 | temp |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; | ||
4060 | x = -x; | ||
4061 | } | ||
4062 | if (y < 0) { | ||
4063 | temp |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; | ||
4064 | y = -y; | ||
4065 | } | ||
4066 | 4302 | ||
4067 | temp |= x << CURSOR_X_SHIFT; | 4303 | intel_crtc->cursor_x = x; |
4068 | temp |= y << CURSOR_Y_SHIFT; | 4304 | intel_crtc->cursor_y = y; |
4069 | 4305 | ||
4070 | adder = intel_crtc->cursor_addr; | 4306 | intel_crtc_update_cursor(crtc); |
4071 | I915_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp); | ||
4072 | I915_WRITE((pipe == 0) ? CURABASE : CURBBASE, adder); | ||
4073 | 4307 | ||
4074 | return 0; | 4308 | return 0; |
4075 | } | 4309 | } |
@@ -4413,7 +4647,8 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule) | |||
4413 | DRM_DEBUG_DRIVER("upclocking LVDS\n"); | 4647 | DRM_DEBUG_DRIVER("upclocking LVDS\n"); |
4414 | 4648 | ||
4415 | /* Unlock panel regs */ | 4649 | /* Unlock panel regs */ |
4416 | I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16)); | 4650 | I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | |
4651 | PANEL_UNLOCK_REGS); | ||
4417 | 4652 | ||
4418 | dpll &= ~DISPLAY_RATE_SELECT_FPA1; | 4653 | dpll &= ~DISPLAY_RATE_SELECT_FPA1; |
4419 | I915_WRITE(dpll_reg, dpll); | 4654 | I915_WRITE(dpll_reg, dpll); |
@@ -4456,7 +4691,8 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc) | |||
4456 | DRM_DEBUG_DRIVER("downclocking LVDS\n"); | 4691 | DRM_DEBUG_DRIVER("downclocking LVDS\n"); |
4457 | 4692 | ||
4458 | /* Unlock panel regs */ | 4693 | /* Unlock panel regs */ |
4459 | I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16)); | 4694 | I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | |
4695 | PANEL_UNLOCK_REGS); | ||
4460 | 4696 | ||
4461 | dpll |= DISPLAY_RATE_SELECT_FPA1; | 4697 | dpll |= DISPLAY_RATE_SELECT_FPA1; |
4462 | I915_WRITE(dpll_reg, dpll); | 4698 | I915_WRITE(dpll_reg, dpll); |
@@ -4698,7 +4934,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
4698 | struct drm_gem_object *obj; | 4934 | struct drm_gem_object *obj; |
4699 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 4935 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4700 | struct intel_unpin_work *work; | 4936 | struct intel_unpin_work *work; |
4701 | unsigned long flags; | 4937 | unsigned long flags, offset; |
4702 | int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC; | 4938 | int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC; |
4703 | int ret, pipesrc; | 4939 | int ret, pipesrc; |
4704 | u32 flip_mask; | 4940 | u32 flip_mask; |
@@ -4730,27 +4966,22 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
4730 | 4966 | ||
4731 | mutex_lock(&dev->struct_mutex); | 4967 | mutex_lock(&dev->struct_mutex); |
4732 | ret = intel_pin_and_fence_fb_obj(dev, obj); | 4968 | ret = intel_pin_and_fence_fb_obj(dev, obj); |
4733 | if (ret != 0) { | 4969 | if (ret) |
4734 | mutex_unlock(&dev->struct_mutex); | 4970 | goto cleanup_work; |
4735 | |||
4736 | spin_lock_irqsave(&dev->event_lock, flags); | ||
4737 | intel_crtc->unpin_work = NULL; | ||
4738 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
4739 | |||
4740 | kfree(work); | ||
4741 | |||
4742 | DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n", | ||
4743 | to_intel_bo(obj)); | ||
4744 | return ret; | ||
4745 | } | ||
4746 | 4971 | ||
4747 | /* Reference the objects for the scheduled work. */ | 4972 | /* Reference the objects for the scheduled work. */ |
4748 | drm_gem_object_reference(work->old_fb_obj); | 4973 | drm_gem_object_reference(work->old_fb_obj); |
4749 | drm_gem_object_reference(obj); | 4974 | drm_gem_object_reference(obj); |
4750 | 4975 | ||
4751 | crtc->fb = fb; | 4976 | crtc->fb = fb; |
4752 | i915_gem_object_flush_write_domain(obj); | 4977 | ret = i915_gem_object_flush_write_domain(obj); |
4753 | drm_vblank_get(dev, intel_crtc->pipe); | 4978 | if (ret) |
4979 | goto cleanup_objs; | ||
4980 | |||
4981 | ret = drm_vblank_get(dev, intel_crtc->pipe); | ||
4982 | if (ret) | ||
4983 | goto cleanup_objs; | ||
4984 | |||
4754 | obj_priv = to_intel_bo(obj); | 4985 | obj_priv = to_intel_bo(obj); |
4755 | atomic_inc(&obj_priv->pending_flip); | 4986 | atomic_inc(&obj_priv->pending_flip); |
4756 | work->pending_flip_obj = obj; | 4987 | work->pending_flip_obj = obj; |
@@ -4765,19 +4996,23 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
4765 | while (I915_READ(ISR) & flip_mask) | 4996 | while (I915_READ(ISR) & flip_mask) |
4766 | ; | 4997 | ; |
4767 | 4998 | ||
4999 | /* Offset into the new buffer for cases of shared fbs between CRTCs */ | ||
5000 | offset = obj_priv->gtt_offset; | ||
5001 | offset += (crtc->y * fb->pitch) + (crtc->x * (fb->bits_per_pixel) / 8); | ||
5002 | |||
4768 | BEGIN_LP_RING(4); | 5003 | BEGIN_LP_RING(4); |
4769 | if (IS_I965G(dev)) { | 5004 | if (IS_I965G(dev)) { |
4770 | OUT_RING(MI_DISPLAY_FLIP | | 5005 | OUT_RING(MI_DISPLAY_FLIP | |
4771 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | 5006 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); |
4772 | OUT_RING(fb->pitch); | 5007 | OUT_RING(fb->pitch); |
4773 | OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode); | 5008 | OUT_RING(offset | obj_priv->tiling_mode); |
4774 | pipesrc = I915_READ(pipesrc_reg); | 5009 | pipesrc = I915_READ(pipesrc_reg); |
4775 | OUT_RING(pipesrc & 0x0fff0fff); | 5010 | OUT_RING(pipesrc & 0x0fff0fff); |
4776 | } else { | 5011 | } else { |
4777 | OUT_RING(MI_DISPLAY_FLIP_I915 | | 5012 | OUT_RING(MI_DISPLAY_FLIP_I915 | |
4778 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | 5013 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); |
4779 | OUT_RING(fb->pitch); | 5014 | OUT_RING(fb->pitch); |
4780 | OUT_RING(obj_priv->gtt_offset); | 5015 | OUT_RING(offset); |
4781 | OUT_RING(MI_NOOP); | 5016 | OUT_RING(MI_NOOP); |
4782 | } | 5017 | } |
4783 | ADVANCE_LP_RING(); | 5018 | ADVANCE_LP_RING(); |
@@ -4787,6 +5022,20 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
4787 | trace_i915_flip_request(intel_crtc->plane, obj); | 5022 | trace_i915_flip_request(intel_crtc->plane, obj); |
4788 | 5023 | ||
4789 | return 0; | 5024 | return 0; |
5025 | |||
5026 | cleanup_objs: | ||
5027 | drm_gem_object_unreference(work->old_fb_obj); | ||
5028 | drm_gem_object_unreference(obj); | ||
5029 | cleanup_work: | ||
5030 | mutex_unlock(&dev->struct_mutex); | ||
5031 | |||
5032 | spin_lock_irqsave(&dev->event_lock, flags); | ||
5033 | intel_crtc->unpin_work = NULL; | ||
5034 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
5035 | |||
5036 | kfree(work); | ||
5037 | |||
5038 | return ret; | ||
4790 | } | 5039 | } |
4791 | 5040 | ||
4792 | static const struct drm_crtc_helper_funcs intel_helper_funcs = { | 5041 | static const struct drm_crtc_helper_funcs intel_helper_funcs = { |
@@ -4912,19 +5161,26 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
4912 | { | 5161 | { |
4913 | struct drm_i915_private *dev_priv = dev->dev_private; | 5162 | struct drm_i915_private *dev_priv = dev->dev_private; |
4914 | struct drm_encoder *encoder; | 5163 | struct drm_encoder *encoder; |
5164 | bool dpd_is_edp = false; | ||
4915 | 5165 | ||
4916 | intel_crt_init(dev); | ||
4917 | |||
4918 | /* Set up integrated LVDS */ | ||
4919 | if (IS_MOBILE(dev) && !IS_I830(dev)) | 5166 | if (IS_MOBILE(dev) && !IS_I830(dev)) |
4920 | intel_lvds_init(dev); | 5167 | intel_lvds_init(dev); |
4921 | 5168 | ||
4922 | if (HAS_PCH_SPLIT(dev)) { | 5169 | if (HAS_PCH_SPLIT(dev)) { |
4923 | int found; | 5170 | dpd_is_edp = intel_dpd_is_edp(dev); |
4924 | 5171 | ||
4925 | if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED)) | 5172 | if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED)) |
4926 | intel_dp_init(dev, DP_A); | 5173 | intel_dp_init(dev, DP_A); |
4927 | 5174 | ||
5175 | if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) | ||
5176 | intel_dp_init(dev, PCH_DP_D); | ||
5177 | } | ||
5178 | |||
5179 | intel_crt_init(dev); | ||
5180 | |||
5181 | if (HAS_PCH_SPLIT(dev)) { | ||
5182 | int found; | ||
5183 | |||
4928 | if (I915_READ(HDMIB) & PORT_DETECTED) { | 5184 | if (I915_READ(HDMIB) & PORT_DETECTED) { |
4929 | /* PCH SDVOB multiplex with HDMIB */ | 5185 | /* PCH SDVOB multiplex with HDMIB */ |
4930 | found = intel_sdvo_init(dev, PCH_SDVOB); | 5186 | found = intel_sdvo_init(dev, PCH_SDVOB); |
@@ -4943,7 +5199,7 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
4943 | if (I915_READ(PCH_DP_C) & DP_DETECTED) | 5199 | if (I915_READ(PCH_DP_C) & DP_DETECTED) |
4944 | intel_dp_init(dev, PCH_DP_C); | 5200 | intel_dp_init(dev, PCH_DP_C); |
4945 | 5201 | ||
4946 | if (I915_READ(PCH_DP_D) & DP_DETECTED) | 5202 | if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) |
4947 | intel_dp_init(dev, PCH_DP_D); | 5203 | intel_dp_init(dev, PCH_DP_D); |
4948 | 5204 | ||
4949 | } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { | 5205 | } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { |
@@ -5352,6 +5608,26 @@ void intel_init_clock_gating(struct drm_device *dev) | |||
5352 | (I915_READ(DISP_ARB_CTL) | | 5608 | (I915_READ(DISP_ARB_CTL) | |
5353 | DISP_FBC_WM_DIS)); | 5609 | DISP_FBC_WM_DIS)); |
5354 | } | 5610 | } |
5611 | /* | ||
5612 | * Based on the document from hardware guys the following bits | ||
5613 | * should be set unconditionally in order to enable FBC. | ||
5614 | * The bit 22 of 0x42000 | ||
5615 | * The bit 22 of 0x42004 | ||
5616 | * The bit 7,8,9 of 0x42020. | ||
5617 | */ | ||
5618 | if (IS_IRONLAKE_M(dev)) { | ||
5619 | I915_WRITE(ILK_DISPLAY_CHICKEN1, | ||
5620 | I915_READ(ILK_DISPLAY_CHICKEN1) | | ||
5621 | ILK_FBCQ_DIS); | ||
5622 | I915_WRITE(ILK_DISPLAY_CHICKEN2, | ||
5623 | I915_READ(ILK_DISPLAY_CHICKEN2) | | ||
5624 | ILK_DPARB_GATE); | ||
5625 | I915_WRITE(ILK_DSPCLK_GATE, | ||
5626 | I915_READ(ILK_DSPCLK_GATE) | | ||
5627 | ILK_DPFC_DIS1 | | ||
5628 | ILK_DPFC_DIS2 | | ||
5629 | ILK_CLK_FBC); | ||
5630 | } | ||
5355 | return; | 5631 | return; |
5356 | } else if (IS_G4X(dev)) { | 5632 | } else if (IS_G4X(dev)) { |
5357 | uint32_t dspclk_gate; | 5633 | uint32_t dspclk_gate; |
@@ -5430,7 +5706,11 @@ static void intel_init_display(struct drm_device *dev) | |||
5430 | dev_priv->display.dpms = i9xx_crtc_dpms; | 5706 | dev_priv->display.dpms = i9xx_crtc_dpms; |
5431 | 5707 | ||
5432 | if (I915_HAS_FBC(dev)) { | 5708 | if (I915_HAS_FBC(dev)) { |
5433 | if (IS_GM45(dev)) { | 5709 | if (IS_IRONLAKE_M(dev)) { |
5710 | dev_priv->display.fbc_enabled = ironlake_fbc_enabled; | ||
5711 | dev_priv->display.enable_fbc = ironlake_enable_fbc; | ||
5712 | dev_priv->display.disable_fbc = ironlake_disable_fbc; | ||
5713 | } else if (IS_GM45(dev)) { | ||
5434 | dev_priv->display.fbc_enabled = g4x_fbc_enabled; | 5714 | dev_priv->display.fbc_enabled = g4x_fbc_enabled; |
5435 | dev_priv->display.enable_fbc = g4x_enable_fbc; | 5715 | dev_priv->display.enable_fbc = g4x_enable_fbc; |
5436 | dev_priv->display.disable_fbc = g4x_disable_fbc; | 5716 | dev_priv->display.disable_fbc = g4x_disable_fbc; |
@@ -5511,6 +5791,66 @@ static void intel_init_display(struct drm_device *dev) | |||
5511 | } | 5791 | } |
5512 | } | 5792 | } |
5513 | 5793 | ||
5794 | /* | ||
5795 | * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend, | ||
5796 | * resume, or other times. This quirk makes sure that's the case for | ||
5797 | * affected systems. | ||
5798 | */ | ||
5799 | static void quirk_pipea_force (struct drm_device *dev) | ||
5800 | { | ||
5801 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
5802 | |||
5803 | dev_priv->quirks |= QUIRK_PIPEA_FORCE; | ||
5804 | DRM_DEBUG_DRIVER("applying pipe a force quirk\n"); | ||
5805 | } | ||
5806 | |||
5807 | struct intel_quirk { | ||
5808 | int device; | ||
5809 | int subsystem_vendor; | ||
5810 | int subsystem_device; | ||
5811 | void (*hook)(struct drm_device *dev); | ||
5812 | }; | ||
5813 | |||
5814 | struct intel_quirk intel_quirks[] = { | ||
5815 | /* HP Compaq 2730p needs pipe A force quirk (LP: #291555) */ | ||
5816 | { 0x2a42, 0x103c, 0x30eb, quirk_pipea_force }, | ||
5817 | /* HP Mini needs pipe A force quirk (LP: #322104) */ | ||
5818 | { 0x27ae,0x103c, 0x361a, quirk_pipea_force }, | ||
5819 | |||
5820 | /* Thinkpad R31 needs pipe A force quirk */ | ||
5821 | { 0x3577, 0x1014, 0x0505, quirk_pipea_force }, | ||
5822 | /* Toshiba Protege R-205, S-209 needs pipe A force quirk */ | ||
5823 | { 0x2592, 0x1179, 0x0001, quirk_pipea_force }, | ||
5824 | |||
5825 | /* ThinkPad X30 needs pipe A force quirk (LP: #304614) */ | ||
5826 | { 0x3577, 0x1014, 0x0513, quirk_pipea_force }, | ||
5827 | /* ThinkPad X40 needs pipe A force quirk */ | ||
5828 | |||
5829 | /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ | ||
5830 | { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, | ||
5831 | |||
5832 | /* 855 & before need to leave pipe A & dpll A up */ | ||
5833 | { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, | ||
5834 | { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, | ||
5835 | }; | ||
5836 | |||
5837 | static void intel_init_quirks(struct drm_device *dev) | ||
5838 | { | ||
5839 | struct pci_dev *d = dev->pdev; | ||
5840 | int i; | ||
5841 | |||
5842 | for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) { | ||
5843 | struct intel_quirk *q = &intel_quirks[i]; | ||
5844 | |||
5845 | if (d->device == q->device && | ||
5846 | (d->subsystem_vendor == q->subsystem_vendor || | ||
5847 | q->subsystem_vendor == PCI_ANY_ID) && | ||
5848 | (d->subsystem_device == q->subsystem_device || | ||
5849 | q->subsystem_device == PCI_ANY_ID)) | ||
5850 | q->hook(dev); | ||
5851 | } | ||
5852 | } | ||
5853 | |||
5514 | void intel_modeset_init(struct drm_device *dev) | 5854 | void intel_modeset_init(struct drm_device *dev) |
5515 | { | 5855 | { |
5516 | struct drm_i915_private *dev_priv = dev->dev_private; | 5856 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -5523,6 +5863,8 @@ void intel_modeset_init(struct drm_device *dev) | |||
5523 | 5863 | ||
5524 | dev->mode_config.funcs = (void *)&intel_mode_funcs; | 5864 | dev->mode_config.funcs = (void *)&intel_mode_funcs; |
5525 | 5865 | ||
5866 | intel_init_quirks(dev); | ||
5867 | |||
5526 | intel_init_display(dev); | 5868 | intel_init_display(dev); |
5527 | 5869 | ||
5528 | if (IS_I965G(dev)) { | 5870 | if (IS_I965G(dev)) { |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 1aac59e83bff..40be1fa65be1 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -43,6 +43,7 @@ | |||
43 | #define DP_LINK_CONFIGURATION_SIZE 9 | 43 | #define DP_LINK_CONFIGURATION_SIZE 9 |
44 | 44 | ||
45 | #define IS_eDP(i) ((i)->type == INTEL_OUTPUT_EDP) | 45 | #define IS_eDP(i) ((i)->type == INTEL_OUTPUT_EDP) |
46 | #define IS_PCH_eDP(dp_priv) ((dp_priv)->is_pch_edp) | ||
46 | 47 | ||
47 | struct intel_dp_priv { | 48 | struct intel_dp_priv { |
48 | uint32_t output_reg; | 49 | uint32_t output_reg; |
@@ -56,6 +57,7 @@ struct intel_dp_priv { | |||
56 | struct intel_encoder *intel_encoder; | 57 | struct intel_encoder *intel_encoder; |
57 | struct i2c_adapter adapter; | 58 | struct i2c_adapter adapter; |
58 | struct i2c_algo_dp_aux_data algo; | 59 | struct i2c_algo_dp_aux_data algo; |
60 | bool is_pch_edp; | ||
59 | }; | 61 | }; |
60 | 62 | ||
61 | static void | 63 | static void |
@@ -128,8 +130,9 @@ intel_dp_link_required(struct drm_device *dev, | |||
128 | struct intel_encoder *intel_encoder, int pixel_clock) | 130 | struct intel_encoder *intel_encoder, int pixel_clock) |
129 | { | 131 | { |
130 | struct drm_i915_private *dev_priv = dev->dev_private; | 132 | struct drm_i915_private *dev_priv = dev->dev_private; |
133 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; | ||
131 | 134 | ||
132 | if (IS_eDP(intel_encoder)) | 135 | if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) |
133 | return (pixel_clock * dev_priv->edp_bpp) / 8; | 136 | return (pixel_clock * dev_priv->edp_bpp) / 8; |
134 | else | 137 | else |
135 | return pixel_clock * 3; | 138 | return pixel_clock * 3; |
@@ -147,9 +150,21 @@ intel_dp_mode_valid(struct drm_connector *connector, | |||
147 | { | 150 | { |
148 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 151 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
149 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 152 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
153 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; | ||
154 | struct drm_device *dev = connector->dev; | ||
155 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
150 | int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder)); | 156 | int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder)); |
151 | int max_lanes = intel_dp_max_lane_count(intel_encoder); | 157 | int max_lanes = intel_dp_max_lane_count(intel_encoder); |
152 | 158 | ||
159 | if ((IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) && | ||
160 | dev_priv->panel_fixed_mode) { | ||
161 | if (mode->hdisplay > dev_priv->panel_fixed_mode->hdisplay) | ||
162 | return MODE_PANEL; | ||
163 | |||
164 | if (mode->vdisplay > dev_priv->panel_fixed_mode->vdisplay) | ||
165 | return MODE_PANEL; | ||
166 | } | ||
167 | |||
153 | /* only refuse the mode on non eDP since we have seen some wierd eDP panels | 168 | /* only refuse the mode on non eDP since we have seen some wierd eDP panels |
154 | which are outside spec tolerances but somehow work by magic */ | 169 | which are outside spec tolerances but somehow work by magic */ |
155 | if (!IS_eDP(intel_encoder) && | 170 | if (!IS_eDP(intel_encoder) && |
@@ -508,11 +523,37 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
508 | { | 523 | { |
509 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 524 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
510 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; | 525 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
526 | struct drm_device *dev = encoder->dev; | ||
527 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
511 | int lane_count, clock; | 528 | int lane_count, clock; |
512 | int max_lane_count = intel_dp_max_lane_count(intel_encoder); | 529 | int max_lane_count = intel_dp_max_lane_count(intel_encoder); |
513 | int max_clock = intel_dp_max_link_bw(intel_encoder) == DP_LINK_BW_2_7 ? 1 : 0; | 530 | int max_clock = intel_dp_max_link_bw(intel_encoder) == DP_LINK_BW_2_7 ? 1 : 0; |
514 | static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; | 531 | static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; |
515 | 532 | ||
533 | if ((IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) && | ||
534 | dev_priv->panel_fixed_mode) { | ||
535 | struct drm_display_mode *fixed_mode = dev_priv->panel_fixed_mode; | ||
536 | |||
537 | adjusted_mode->hdisplay = fixed_mode->hdisplay; | ||
538 | adjusted_mode->hsync_start = fixed_mode->hsync_start; | ||
539 | adjusted_mode->hsync_end = fixed_mode->hsync_end; | ||
540 | adjusted_mode->htotal = fixed_mode->htotal; | ||
541 | |||
542 | adjusted_mode->vdisplay = fixed_mode->vdisplay; | ||
543 | adjusted_mode->vsync_start = fixed_mode->vsync_start; | ||
544 | adjusted_mode->vsync_end = fixed_mode->vsync_end; | ||
545 | adjusted_mode->vtotal = fixed_mode->vtotal; | ||
546 | |||
547 | adjusted_mode->clock = fixed_mode->clock; | ||
548 | drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); | ||
549 | |||
550 | /* | ||
551 | * the mode->clock is used to calculate the Data&Link M/N | ||
552 | * of the pipe. For the eDP the fixed clock should be used. | ||
553 | */ | ||
554 | mode->clock = dev_priv->panel_fixed_mode->clock; | ||
555 | } | ||
556 | |||
516 | for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { | 557 | for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { |
517 | for (clock = 0; clock <= max_clock; clock++) { | 558 | for (clock = 0; clock <= max_clock; clock++) { |
518 | int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); | 559 | int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); |
@@ -531,7 +572,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
531 | } | 572 | } |
532 | } | 573 | } |
533 | 574 | ||
534 | if (IS_eDP(intel_encoder)) { | 575 | if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) { |
535 | /* okay we failed just pick the highest */ | 576 | /* okay we failed just pick the highest */ |
536 | dp_priv->lane_count = max_lane_count; | 577 | dp_priv->lane_count = max_lane_count; |
537 | dp_priv->link_bw = bws[max_clock]; | 578 | dp_priv->link_bw = bws[max_clock]; |
@@ -563,14 +604,14 @@ intel_reduce_ratio(uint32_t *num, uint32_t *den) | |||
563 | } | 604 | } |
564 | 605 | ||
565 | static void | 606 | static void |
566 | intel_dp_compute_m_n(int bytes_per_pixel, | 607 | intel_dp_compute_m_n(int bpp, |
567 | int nlanes, | 608 | int nlanes, |
568 | int pixel_clock, | 609 | int pixel_clock, |
569 | int link_clock, | 610 | int link_clock, |
570 | struct intel_dp_m_n *m_n) | 611 | struct intel_dp_m_n *m_n) |
571 | { | 612 | { |
572 | m_n->tu = 64; | 613 | m_n->tu = 64; |
573 | m_n->gmch_m = pixel_clock * bytes_per_pixel; | 614 | m_n->gmch_m = (pixel_clock * bpp) >> 3; |
574 | m_n->gmch_n = link_clock * nlanes; | 615 | m_n->gmch_n = link_clock * nlanes; |
575 | intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); | 616 | intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); |
576 | m_n->link_m = pixel_clock; | 617 | m_n->link_m = pixel_clock; |
@@ -578,6 +619,28 @@ intel_dp_compute_m_n(int bytes_per_pixel, | |||
578 | intel_reduce_ratio(&m_n->link_m, &m_n->link_n); | 619 | intel_reduce_ratio(&m_n->link_m, &m_n->link_n); |
579 | } | 620 | } |
580 | 621 | ||
622 | bool intel_pch_has_edp(struct drm_crtc *crtc) | ||
623 | { | ||
624 | struct drm_device *dev = crtc->dev; | ||
625 | struct drm_mode_config *mode_config = &dev->mode_config; | ||
626 | struct drm_encoder *encoder; | ||
627 | |||
628 | list_for_each_entry(encoder, &mode_config->encoder_list, head) { | ||
629 | struct intel_encoder *intel_encoder; | ||
630 | struct intel_dp_priv *dp_priv; | ||
631 | |||
632 | if (!encoder || encoder->crtc != crtc) | ||
633 | continue; | ||
634 | |||
635 | intel_encoder = enc_to_intel_encoder(encoder); | ||
636 | dp_priv = intel_encoder->dev_priv; | ||
637 | |||
638 | if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) | ||
639 | return dp_priv->is_pch_edp; | ||
640 | } | ||
641 | return false; | ||
642 | } | ||
643 | |||
581 | void | 644 | void |
582 | intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | 645 | intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, |
583 | struct drm_display_mode *adjusted_mode) | 646 | struct drm_display_mode *adjusted_mode) |
@@ -587,7 +650,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | |||
587 | struct drm_encoder *encoder; | 650 | struct drm_encoder *encoder; |
588 | struct drm_i915_private *dev_priv = dev->dev_private; | 651 | struct drm_i915_private *dev_priv = dev->dev_private; |
589 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 652 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
590 | int lane_count = 4; | 653 | int lane_count = 4, bpp = 24; |
591 | struct intel_dp_m_n m_n; | 654 | struct intel_dp_m_n m_n; |
592 | 655 | ||
593 | /* | 656 | /* |
@@ -605,6 +668,8 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | |||
605 | 668 | ||
606 | if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { | 669 | if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { |
607 | lane_count = dp_priv->lane_count; | 670 | lane_count = dp_priv->lane_count; |
671 | if (IS_PCH_eDP(dp_priv)) | ||
672 | bpp = dev_priv->edp_bpp; | ||
608 | break; | 673 | break; |
609 | } | 674 | } |
610 | } | 675 | } |
@@ -614,7 +679,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | |||
614 | * the number of bytes_per_pixel post-LUT, which we always | 679 | * the number of bytes_per_pixel post-LUT, which we always |
615 | * set up for 8-bits of R/G/B, or 3 bytes total. | 680 | * set up for 8-bits of R/G/B, or 3 bytes total. |
616 | */ | 681 | */ |
617 | intel_dp_compute_m_n(3, lane_count, | 682 | intel_dp_compute_m_n(bpp, lane_count, |
618 | mode->clock, adjusted_mode->clock, &m_n); | 683 | mode->clock, adjusted_mode->clock, &m_n); |
619 | 684 | ||
620 | if (HAS_PCH_SPLIT(dev)) { | 685 | if (HAS_PCH_SPLIT(dev)) { |
@@ -717,6 +782,51 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
717 | } | 782 | } |
718 | } | 783 | } |
719 | 784 | ||
785 | static void ironlake_edp_panel_on (struct drm_device *dev) | ||
786 | { | ||
787 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
788 | unsigned long timeout = jiffies + msecs_to_jiffies(5000); | ||
789 | u32 pp, pp_status; | ||
790 | |||
791 | pp_status = I915_READ(PCH_PP_STATUS); | ||
792 | if (pp_status & PP_ON) | ||
793 | return; | ||
794 | |||
795 | pp = I915_READ(PCH_PP_CONTROL); | ||
796 | pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON; | ||
797 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
798 | do { | ||
799 | pp_status = I915_READ(PCH_PP_STATUS); | ||
800 | } while (((pp_status & PP_ON) == 0) && !time_after(jiffies, timeout)); | ||
801 | |||
802 | if (time_after(jiffies, timeout)) | ||
803 | DRM_DEBUG_KMS("panel on wait timed out: 0x%08x\n", pp_status); | ||
804 | |||
805 | pp &= ~(PANEL_UNLOCK_REGS | EDP_FORCE_VDD); | ||
806 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
807 | } | ||
808 | |||
809 | static void ironlake_edp_panel_off (struct drm_device *dev) | ||
810 | { | ||
811 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
812 | unsigned long timeout = jiffies + msecs_to_jiffies(5000); | ||
813 | u32 pp, pp_status; | ||
814 | |||
815 | pp = I915_READ(PCH_PP_CONTROL); | ||
816 | pp &= ~POWER_TARGET_ON; | ||
817 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
818 | do { | ||
819 | pp_status = I915_READ(PCH_PP_STATUS); | ||
820 | } while ((pp_status & PP_ON) && !time_after(jiffies, timeout)); | ||
821 | |||
822 | if (time_after(jiffies, timeout)) | ||
823 | DRM_DEBUG_KMS("panel off wait timed out\n"); | ||
824 | |||
825 | /* Make sure VDD is enabled so DP AUX will work */ | ||
826 | pp |= EDP_FORCE_VDD; | ||
827 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
828 | } | ||
829 | |||
720 | static void ironlake_edp_backlight_on (struct drm_device *dev) | 830 | static void ironlake_edp_backlight_on (struct drm_device *dev) |
721 | { | 831 | { |
722 | struct drm_i915_private *dev_priv = dev->dev_private; | 832 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -751,14 +861,18 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) | |||
751 | if (mode != DRM_MODE_DPMS_ON) { | 861 | if (mode != DRM_MODE_DPMS_ON) { |
752 | if (dp_reg & DP_PORT_EN) { | 862 | if (dp_reg & DP_PORT_EN) { |
753 | intel_dp_link_down(intel_encoder, dp_priv->DP); | 863 | intel_dp_link_down(intel_encoder, dp_priv->DP); |
754 | if (IS_eDP(intel_encoder)) | 864 | if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) { |
755 | ironlake_edp_backlight_off(dev); | 865 | ironlake_edp_backlight_off(dev); |
866 | ironlake_edp_panel_off(dev); | ||
867 | } | ||
756 | } | 868 | } |
757 | } else { | 869 | } else { |
758 | if (!(dp_reg & DP_PORT_EN)) { | 870 | if (!(dp_reg & DP_PORT_EN)) { |
759 | intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration); | 871 | intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration); |
760 | if (IS_eDP(intel_encoder)) | 872 | if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) { |
873 | ironlake_edp_panel_on(dev); | ||
761 | ironlake_edp_backlight_on(dev); | 874 | ironlake_edp_backlight_on(dev); |
875 | } | ||
762 | } | 876 | } |
763 | } | 877 | } |
764 | dp_priv->dpms_mode = mode; | 878 | dp_priv->dpms_mode = mode; |
@@ -1291,17 +1405,32 @@ static int intel_dp_get_modes(struct drm_connector *connector) | |||
1291 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 1405 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
1292 | struct drm_device *dev = intel_encoder->enc.dev; | 1406 | struct drm_device *dev = intel_encoder->enc.dev; |
1293 | struct drm_i915_private *dev_priv = dev->dev_private; | 1407 | struct drm_i915_private *dev_priv = dev->dev_private; |
1408 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; | ||
1294 | int ret; | 1409 | int ret; |
1295 | 1410 | ||
1296 | /* We should parse the EDID data and find out if it has an audio sink | 1411 | /* We should parse the EDID data and find out if it has an audio sink |
1297 | */ | 1412 | */ |
1298 | 1413 | ||
1299 | ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus); | 1414 | ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus); |
1300 | if (ret) | 1415 | if (ret) { |
1416 | if ((IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) && | ||
1417 | !dev_priv->panel_fixed_mode) { | ||
1418 | struct drm_display_mode *newmode; | ||
1419 | list_for_each_entry(newmode, &connector->probed_modes, | ||
1420 | head) { | ||
1421 | if (newmode->type & DRM_MODE_TYPE_PREFERRED) { | ||
1422 | dev_priv->panel_fixed_mode = | ||
1423 | drm_mode_duplicate(dev, newmode); | ||
1424 | break; | ||
1425 | } | ||
1426 | } | ||
1427 | } | ||
1428 | |||
1301 | return ret; | 1429 | return ret; |
1430 | } | ||
1302 | 1431 | ||
1303 | /* if eDP has no EDID, try to use fixed panel mode from VBT */ | 1432 | /* if eDP has no EDID, try to use fixed panel mode from VBT */ |
1304 | if (IS_eDP(intel_encoder)) { | 1433 | if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) { |
1305 | if (dev_priv->panel_fixed_mode != NULL) { | 1434 | if (dev_priv->panel_fixed_mode != NULL) { |
1306 | struct drm_display_mode *mode; | 1435 | struct drm_display_mode *mode; |
1307 | mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); | 1436 | mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); |
@@ -1386,6 +1515,26 @@ intel_trans_dp_port_sel (struct drm_crtc *crtc) | |||
1386 | return -1; | 1515 | return -1; |
1387 | } | 1516 | } |
1388 | 1517 | ||
1518 | /* check the VBT to see whether the eDP is on DP-D port */ | ||
1519 | bool intel_dpd_is_edp(struct drm_device *dev) | ||
1520 | { | ||
1521 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1522 | struct child_device_config *p_child; | ||
1523 | int i; | ||
1524 | |||
1525 | if (!dev_priv->child_dev_num) | ||
1526 | return false; | ||
1527 | |||
1528 | for (i = 0; i < dev_priv->child_dev_num; i++) { | ||
1529 | p_child = dev_priv->child_dev + i; | ||
1530 | |||
1531 | if (p_child->dvo_port == PORT_IDPD && | ||
1532 | p_child->device_type == DEVICE_TYPE_eDP) | ||
1533 | return true; | ||
1534 | } | ||
1535 | return false; | ||
1536 | } | ||
1537 | |||
1389 | void | 1538 | void |
1390 | intel_dp_init(struct drm_device *dev, int output_reg) | 1539 | intel_dp_init(struct drm_device *dev, int output_reg) |
1391 | { | 1540 | { |
@@ -1395,6 +1544,7 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1395 | struct intel_connector *intel_connector; | 1544 | struct intel_connector *intel_connector; |
1396 | struct intel_dp_priv *dp_priv; | 1545 | struct intel_dp_priv *dp_priv; |
1397 | const char *name = NULL; | 1546 | const char *name = NULL; |
1547 | int type; | ||
1398 | 1548 | ||
1399 | intel_encoder = kcalloc(sizeof(struct intel_encoder) + | 1549 | intel_encoder = kcalloc(sizeof(struct intel_encoder) + |
1400 | sizeof(struct intel_dp_priv), 1, GFP_KERNEL); | 1550 | sizeof(struct intel_dp_priv), 1, GFP_KERNEL); |
@@ -1409,18 +1559,24 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1409 | 1559 | ||
1410 | dp_priv = (struct intel_dp_priv *)(intel_encoder + 1); | 1560 | dp_priv = (struct intel_dp_priv *)(intel_encoder + 1); |
1411 | 1561 | ||
1562 | if (HAS_PCH_SPLIT(dev) && (output_reg == PCH_DP_D)) | ||
1563 | if (intel_dpd_is_edp(dev)) | ||
1564 | dp_priv->is_pch_edp = true; | ||
1565 | |||
1566 | if (output_reg == DP_A || IS_PCH_eDP(dp_priv)) { | ||
1567 | type = DRM_MODE_CONNECTOR_eDP; | ||
1568 | intel_encoder->type = INTEL_OUTPUT_EDP; | ||
1569 | } else { | ||
1570 | type = DRM_MODE_CONNECTOR_DisplayPort; | ||
1571 | intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; | ||
1572 | } | ||
1573 | |||
1412 | connector = &intel_connector->base; | 1574 | connector = &intel_connector->base; |
1413 | drm_connector_init(dev, connector, &intel_dp_connector_funcs, | 1575 | drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); |
1414 | DRM_MODE_CONNECTOR_DisplayPort); | ||
1415 | drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); | 1576 | drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); |
1416 | 1577 | ||
1417 | connector->polled = DRM_CONNECTOR_POLL_HPD; | 1578 | connector->polled = DRM_CONNECTOR_POLL_HPD; |
1418 | 1579 | ||
1419 | if (output_reg == DP_A) | ||
1420 | intel_encoder->type = INTEL_OUTPUT_EDP; | ||
1421 | else | ||
1422 | intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; | ||
1423 | |||
1424 | if (output_reg == DP_B || output_reg == PCH_DP_B) | 1580 | if (output_reg == DP_B || output_reg == PCH_DP_B) |
1425 | intel_encoder->clone_mask = (1 << INTEL_DP_B_CLONE_BIT); | 1581 | intel_encoder->clone_mask = (1 << INTEL_DP_B_CLONE_BIT); |
1426 | else if (output_reg == DP_C || output_reg == PCH_DP_C) | 1582 | else if (output_reg == DP_C || output_reg == PCH_DP_C) |
@@ -1479,7 +1635,7 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1479 | intel_encoder->ddc_bus = &dp_priv->adapter; | 1635 | intel_encoder->ddc_bus = &dp_priv->adapter; |
1480 | intel_encoder->hot_plug = intel_dp_hot_plug; | 1636 | intel_encoder->hot_plug = intel_dp_hot_plug; |
1481 | 1637 | ||
1482 | if (output_reg == DP_A) { | 1638 | if (output_reg == DP_A || IS_PCH_eDP(dp_priv)) { |
1483 | /* initialize panel mode from VBT if available for eDP */ | 1639 | /* initialize panel mode from VBT if available for eDP */ |
1484 | if (dev_priv->lfp_lvds_vbt_mode) { | 1640 | if (dev_priv->lfp_lvds_vbt_mode) { |
1485 | dev_priv->panel_fixed_mode = | 1641 | dev_priv->panel_fixed_mode = |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 72206f37c4fb..b2190148703a 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -143,8 +143,6 @@ struct intel_crtc { | |||
143 | struct drm_crtc base; | 143 | struct drm_crtc base; |
144 | enum pipe pipe; | 144 | enum pipe pipe; |
145 | enum plane plane; | 145 | enum plane plane; |
146 | struct drm_gem_object *cursor_bo; | ||
147 | uint32_t cursor_addr; | ||
148 | u8 lut_r[256], lut_g[256], lut_b[256]; | 146 | u8 lut_r[256], lut_g[256], lut_b[256]; |
149 | int dpms_mode; | 147 | int dpms_mode; |
150 | bool busy; /* is scanout buffer being updated frequently? */ | 148 | bool busy; /* is scanout buffer being updated frequently? */ |
@@ -153,6 +151,12 @@ struct intel_crtc { | |||
153 | struct intel_overlay *overlay; | 151 | struct intel_overlay *overlay; |
154 | struct intel_unpin_work *unpin_work; | 152 | struct intel_unpin_work *unpin_work; |
155 | int fdi_lanes; | 153 | int fdi_lanes; |
154 | |||
155 | struct drm_gem_object *cursor_bo; | ||
156 | uint32_t cursor_addr; | ||
157 | int16_t cursor_x, cursor_y; | ||
158 | int16_t cursor_width, cursor_height; | ||
159 | bool cursor_visble; | ||
156 | }; | 160 | }; |
157 | 161 | ||
158 | #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) | 162 | #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) |
@@ -179,6 +183,8 @@ extern void intel_dp_init(struct drm_device *dev, int dp_reg); | |||
179 | void | 183 | void |
180 | intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | 184 | intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, |
181 | struct drm_display_mode *adjusted_mode); | 185 | struct drm_display_mode *adjusted_mode); |
186 | extern bool intel_pch_has_edp(struct drm_crtc *crtc); | ||
187 | extern bool intel_dpd_is_edp(struct drm_device *dev); | ||
182 | extern void intel_edp_link_config (struct intel_encoder *, int *, int *); | 188 | extern void intel_edp_link_config (struct intel_encoder *, int *, int *); |
183 | 189 | ||
184 | 190 | ||
@@ -215,6 +221,9 @@ extern void intel_init_clock_gating(struct drm_device *dev); | |||
215 | extern void ironlake_enable_drps(struct drm_device *dev); | 221 | extern void ironlake_enable_drps(struct drm_device *dev); |
216 | extern void ironlake_disable_drps(struct drm_device *dev); | 222 | extern void ironlake_disable_drps(struct drm_device *dev); |
217 | 223 | ||
224 | extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, | ||
225 | struct drm_gem_object *obj); | ||
226 | |||
218 | extern int intel_framebuffer_init(struct drm_device *dev, | 227 | extern int intel_framebuffer_init(struct drm_device *dev, |
219 | struct intel_framebuffer *ifb, | 228 | struct intel_framebuffer *ifb, |
220 | struct drm_mode_fb_cmd *mode_cmd, | 229 | struct drm_mode_fb_cmd *mode_cmd, |
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index c3c505244e07..3e18c9e7729b 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c | |||
@@ -98,7 +98,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev, | |||
98 | 98 | ||
99 | mutex_lock(&dev->struct_mutex); | 99 | mutex_lock(&dev->struct_mutex); |
100 | 100 | ||
101 | ret = i915_gem_object_pin(fbo, 64*1024); | 101 | ret = intel_pin_and_fence_fb_obj(dev, fbo); |
102 | if (ret) { | 102 | if (ret) { |
103 | DRM_ERROR("failed to pin fb: %d\n", ret); | 103 | DRM_ERROR("failed to pin fb: %d\n", ret); |
104 | goto out_unref; | 104 | goto out_unref; |
@@ -236,7 +236,7 @@ int intel_fbdev_destroy(struct drm_device *dev, | |||
236 | 236 | ||
237 | drm_framebuffer_cleanup(&ifb->base); | 237 | drm_framebuffer_cleanup(&ifb->base); |
238 | if (ifb->obj) | 238 | if (ifb->obj) |
239 | drm_gem_object_unreference_unlocked(ifb->obj); | 239 | drm_gem_object_unreference(ifb->obj); |
240 | 240 | ||
241 | return 0; | 241 | return 0; |
242 | } | 242 | } |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 83bd764b000e..197887ed1823 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -54,10 +54,11 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, | |||
54 | struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; | 54 | struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; |
55 | u32 sdvox; | 55 | u32 sdvox; |
56 | 56 | ||
57 | sdvox = SDVO_ENCODING_HDMI | | 57 | sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE; |
58 | SDVO_BORDER_ENABLE | | 58 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) |
59 | SDVO_VSYNC_ACTIVE_HIGH | | 59 | sdvox |= SDVO_VSYNC_ACTIVE_HIGH; |
60 | SDVO_HSYNC_ACTIVE_HIGH; | 60 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) |
61 | sdvox |= SDVO_HSYNC_ACTIVE_HIGH; | ||
61 | 62 | ||
62 | if (hdmi_priv->has_hdmi_sink) { | 63 | if (hdmi_priv->has_hdmi_sink) { |
63 | sdvox |= SDVO_AUDIO_ENABLE; | 64 | sdvox |= SDVO_AUDIO_ENABLE; |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 31df55f0a0a7..0a2e60059fb3 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -156,31 +156,73 @@ static int intel_lvds_mode_valid(struct drm_connector *connector, | |||
156 | return MODE_OK; | 156 | return MODE_OK; |
157 | } | 157 | } |
158 | 158 | ||
159 | static void | ||
160 | centre_horizontally(struct drm_display_mode *mode, | ||
161 | int width) | ||
162 | { | ||
163 | u32 border, sync_pos, blank_width, sync_width; | ||
164 | |||
165 | /* keep the hsync and hblank widths constant */ | ||
166 | sync_width = mode->crtc_hsync_end - mode->crtc_hsync_start; | ||
167 | blank_width = mode->crtc_hblank_end - mode->crtc_hblank_start; | ||
168 | sync_pos = (blank_width - sync_width + 1) / 2; | ||
169 | |||
170 | border = (mode->hdisplay - width + 1) / 2; | ||
171 | border += border & 1; /* make the border even */ | ||
172 | |||
173 | mode->crtc_hdisplay = width; | ||
174 | mode->crtc_hblank_start = width + border; | ||
175 | mode->crtc_hblank_end = mode->crtc_hblank_start + blank_width; | ||
176 | |||
177 | mode->crtc_hsync_start = mode->crtc_hblank_start + sync_pos; | ||
178 | mode->crtc_hsync_end = mode->crtc_hsync_start + sync_width; | ||
179 | } | ||
180 | |||
181 | static void | ||
182 | centre_vertically(struct drm_display_mode *mode, | ||
183 | int height) | ||
184 | { | ||
185 | u32 border, sync_pos, blank_width, sync_width; | ||
186 | |||
187 | /* keep the vsync and vblank widths constant */ | ||
188 | sync_width = mode->crtc_vsync_end - mode->crtc_vsync_start; | ||
189 | blank_width = mode->crtc_vblank_end - mode->crtc_vblank_start; | ||
190 | sync_pos = (blank_width - sync_width + 1) / 2; | ||
191 | |||
192 | border = (mode->vdisplay - height + 1) / 2; | ||
193 | |||
194 | mode->crtc_vdisplay = height; | ||
195 | mode->crtc_vblank_start = height + border; | ||
196 | mode->crtc_vblank_end = mode->crtc_vblank_start + blank_width; | ||
197 | |||
198 | mode->crtc_vsync_start = mode->crtc_vblank_start + sync_pos; | ||
199 | mode->crtc_vsync_end = mode->crtc_vsync_start + sync_width; | ||
200 | } | ||
201 | |||
202 | static inline u32 panel_fitter_scaling(u32 source, u32 target) | ||
203 | { | ||
204 | /* | ||
205 | * Floating point operation is not supported. So the FACTOR | ||
206 | * is defined, which can avoid the floating point computation | ||
207 | * when calculating the panel ratio. | ||
208 | */ | ||
209 | #define ACCURACY 12 | ||
210 | #define FACTOR (1 << ACCURACY) | ||
211 | u32 ratio = source * FACTOR / target; | ||
212 | return (FACTOR * ratio + FACTOR/2) / FACTOR; | ||
213 | } | ||
214 | |||
159 | static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | 215 | static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, |
160 | struct drm_display_mode *mode, | 216 | struct drm_display_mode *mode, |
161 | struct drm_display_mode *adjusted_mode) | 217 | struct drm_display_mode *adjusted_mode) |
162 | { | 218 | { |
163 | /* | ||
164 | * float point operation is not supported . So the PANEL_RATIO_FACTOR | ||
165 | * is defined, which can avoid the float point computation when | ||
166 | * calculating the panel ratio. | ||
167 | */ | ||
168 | #define PANEL_RATIO_FACTOR 8192 | ||
169 | struct drm_device *dev = encoder->dev; | 219 | struct drm_device *dev = encoder->dev; |
170 | struct drm_i915_private *dev_priv = dev->dev_private; | 220 | struct drm_i915_private *dev_priv = dev->dev_private; |
171 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | 221 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); |
172 | struct drm_encoder *tmp_encoder; | 222 | struct drm_encoder *tmp_encoder; |
173 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 223 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
174 | struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv; | 224 | struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv; |
175 | u32 pfit_control = 0, pfit_pgm_ratios = 0; | 225 | u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; |
176 | int left_border = 0, right_border = 0, top_border = 0; | ||
177 | int bottom_border = 0; | ||
178 | bool border = 0; | ||
179 | int panel_ratio, desired_ratio, vert_scale, horiz_scale; | ||
180 | int horiz_ratio, vert_ratio; | ||
181 | u32 hsync_width, vsync_width; | ||
182 | u32 hblank_width, vblank_width; | ||
183 | u32 hsync_pos, vsync_pos; | ||
184 | 226 | ||
185 | /* Should never happen!! */ | 227 | /* Should never happen!! */ |
186 | if (!IS_I965G(dev) && intel_crtc->pipe == 0) { | 228 | if (!IS_I965G(dev) && intel_crtc->pipe == 0) { |
@@ -200,27 +242,25 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
200 | if (dev_priv->panel_fixed_mode == NULL) | 242 | if (dev_priv->panel_fixed_mode == NULL) |
201 | return true; | 243 | return true; |
202 | /* | 244 | /* |
203 | * If we have timings from the BIOS for the panel, put them in | 245 | * We have timings from the BIOS for the panel, put them in |
204 | * to the adjusted mode. The CRTC will be set up for this mode, | 246 | * to the adjusted mode. The CRTC will be set up for this mode, |
205 | * with the panel scaling set up to source from the H/VDisplay | 247 | * with the panel scaling set up to source from the H/VDisplay |
206 | * of the original mode. | 248 | * of the original mode. |
207 | */ | 249 | */ |
208 | if (dev_priv->panel_fixed_mode != NULL) { | 250 | adjusted_mode->hdisplay = dev_priv->panel_fixed_mode->hdisplay; |
209 | adjusted_mode->hdisplay = dev_priv->panel_fixed_mode->hdisplay; | 251 | adjusted_mode->hsync_start = |
210 | adjusted_mode->hsync_start = | 252 | dev_priv->panel_fixed_mode->hsync_start; |
211 | dev_priv->panel_fixed_mode->hsync_start; | 253 | adjusted_mode->hsync_end = |
212 | adjusted_mode->hsync_end = | 254 | dev_priv->panel_fixed_mode->hsync_end; |
213 | dev_priv->panel_fixed_mode->hsync_end; | 255 | adjusted_mode->htotal = dev_priv->panel_fixed_mode->htotal; |
214 | adjusted_mode->htotal = dev_priv->panel_fixed_mode->htotal; | 256 | adjusted_mode->vdisplay = dev_priv->panel_fixed_mode->vdisplay; |
215 | adjusted_mode->vdisplay = dev_priv->panel_fixed_mode->vdisplay; | 257 | adjusted_mode->vsync_start = |
216 | adjusted_mode->vsync_start = | 258 | dev_priv->panel_fixed_mode->vsync_start; |
217 | dev_priv->panel_fixed_mode->vsync_start; | 259 | adjusted_mode->vsync_end = |
218 | adjusted_mode->vsync_end = | 260 | dev_priv->panel_fixed_mode->vsync_end; |
219 | dev_priv->panel_fixed_mode->vsync_end; | 261 | adjusted_mode->vtotal = dev_priv->panel_fixed_mode->vtotal; |
220 | adjusted_mode->vtotal = dev_priv->panel_fixed_mode->vtotal; | 262 | adjusted_mode->clock = dev_priv->panel_fixed_mode->clock; |
221 | adjusted_mode->clock = dev_priv->panel_fixed_mode->clock; | 263 | drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); |
222 | drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); | ||
223 | } | ||
224 | 264 | ||
225 | /* Make sure pre-965s set dither correctly */ | 265 | /* Make sure pre-965s set dither correctly */ |
226 | if (!IS_I965G(dev)) { | 266 | if (!IS_I965G(dev)) { |
@@ -230,11 +270,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
230 | 270 | ||
231 | /* Native modes don't need fitting */ | 271 | /* Native modes don't need fitting */ |
232 | if (adjusted_mode->hdisplay == mode->hdisplay && | 272 | if (adjusted_mode->hdisplay == mode->hdisplay && |
233 | adjusted_mode->vdisplay == mode->vdisplay) { | 273 | adjusted_mode->vdisplay == mode->vdisplay) |
234 | pfit_pgm_ratios = 0; | ||
235 | border = 0; | ||
236 | goto out; | 274 | goto out; |
237 | } | ||
238 | 275 | ||
239 | /* full screen scale for now */ | 276 | /* full screen scale for now */ |
240 | if (HAS_PCH_SPLIT(dev)) | 277 | if (HAS_PCH_SPLIT(dev)) |
@@ -242,25 +279,9 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
242 | 279 | ||
243 | /* 965+ wants fuzzy fitting */ | 280 | /* 965+ wants fuzzy fitting */ |
244 | if (IS_I965G(dev)) | 281 | if (IS_I965G(dev)) |
245 | pfit_control |= (intel_crtc->pipe << PFIT_PIPE_SHIFT) | | 282 | pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) | |
246 | PFIT_FILTER_FUZZY; | 283 | PFIT_FILTER_FUZZY); |
247 | 284 | ||
248 | hsync_width = adjusted_mode->crtc_hsync_end - | ||
249 | adjusted_mode->crtc_hsync_start; | ||
250 | vsync_width = adjusted_mode->crtc_vsync_end - | ||
251 | adjusted_mode->crtc_vsync_start; | ||
252 | hblank_width = adjusted_mode->crtc_hblank_end - | ||
253 | adjusted_mode->crtc_hblank_start; | ||
254 | vblank_width = adjusted_mode->crtc_vblank_end - | ||
255 | adjusted_mode->crtc_vblank_start; | ||
256 | /* | ||
257 | * Deal with panel fitting options. Figure out how to stretch the | ||
258 | * image based on its aspect ratio & the current panel fitting mode. | ||
259 | */ | ||
260 | panel_ratio = adjusted_mode->hdisplay * PANEL_RATIO_FACTOR / | ||
261 | adjusted_mode->vdisplay; | ||
262 | desired_ratio = mode->hdisplay * PANEL_RATIO_FACTOR / | ||
263 | mode->vdisplay; | ||
264 | /* | 285 | /* |
265 | * Enable automatic panel scaling for non-native modes so that they fill | 286 | * Enable automatic panel scaling for non-native modes so that they fill |
266 | * the screen. Should be enabled before the pipe is enabled, according | 287 | * the screen. Should be enabled before the pipe is enabled, according |
@@ -278,170 +299,63 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
278 | * For centered modes, we have to calculate border widths & | 299 | * For centered modes, we have to calculate border widths & |
279 | * heights and modify the values programmed into the CRTC. | 300 | * heights and modify the values programmed into the CRTC. |
280 | */ | 301 | */ |
281 | left_border = (adjusted_mode->hdisplay - mode->hdisplay) / 2; | 302 | centre_horizontally(adjusted_mode, mode->hdisplay); |
282 | right_border = left_border; | 303 | centre_vertically(adjusted_mode, mode->vdisplay); |
283 | if (mode->hdisplay & 1) | 304 | border = LVDS_BORDER_ENABLE; |
284 | right_border++; | ||
285 | top_border = (adjusted_mode->vdisplay - mode->vdisplay) / 2; | ||
286 | bottom_border = top_border; | ||
287 | if (mode->vdisplay & 1) | ||
288 | bottom_border++; | ||
289 | /* Set active & border values */ | ||
290 | adjusted_mode->crtc_hdisplay = mode->hdisplay; | ||
291 | /* Keep the boder be even */ | ||
292 | if (right_border & 1) | ||
293 | right_border++; | ||
294 | /* use the border directly instead of border minuse one */ | ||
295 | adjusted_mode->crtc_hblank_start = mode->hdisplay + | ||
296 | right_border; | ||
297 | /* keep the blank width constant */ | ||
298 | adjusted_mode->crtc_hblank_end = | ||
299 | adjusted_mode->crtc_hblank_start + hblank_width; | ||
300 | /* get the hsync pos relative to hblank start */ | ||
301 | hsync_pos = (hblank_width - hsync_width) / 2; | ||
302 | /* keep the hsync pos be even */ | ||
303 | if (hsync_pos & 1) | ||
304 | hsync_pos++; | ||
305 | adjusted_mode->crtc_hsync_start = | ||
306 | adjusted_mode->crtc_hblank_start + hsync_pos; | ||
307 | /* keep the hsync width constant */ | ||
308 | adjusted_mode->crtc_hsync_end = | ||
309 | adjusted_mode->crtc_hsync_start + hsync_width; | ||
310 | adjusted_mode->crtc_vdisplay = mode->vdisplay; | ||
311 | /* use the border instead of border minus one */ | ||
312 | adjusted_mode->crtc_vblank_start = mode->vdisplay + | ||
313 | bottom_border; | ||
314 | /* keep the vblank width constant */ | ||
315 | adjusted_mode->crtc_vblank_end = | ||
316 | adjusted_mode->crtc_vblank_start + vblank_width; | ||
317 | /* get the vsync start postion relative to vblank start */ | ||
318 | vsync_pos = (vblank_width - vsync_width) / 2; | ||
319 | adjusted_mode->crtc_vsync_start = | ||
320 | adjusted_mode->crtc_vblank_start + vsync_pos; | ||
321 | /* keep the vsync width constant */ | ||
322 | adjusted_mode->crtc_vsync_end = | ||
323 | adjusted_mode->crtc_vsync_start + vsync_width; | ||
324 | border = 1; | ||
325 | break; | 305 | break; |
306 | |||
326 | case DRM_MODE_SCALE_ASPECT: | 307 | case DRM_MODE_SCALE_ASPECT: |
327 | /* Scale but preserve the spect ratio */ | 308 | /* Scale but preserve the aspect ratio */ |
328 | pfit_control |= PFIT_ENABLE; | ||
329 | if (IS_I965G(dev)) { | 309 | if (IS_I965G(dev)) { |
310 | u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay; | ||
311 | u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay; | ||
312 | |||
313 | pfit_control |= PFIT_ENABLE; | ||
330 | /* 965+ is easy, it does everything in hw */ | 314 | /* 965+ is easy, it does everything in hw */ |
331 | if (panel_ratio > desired_ratio) | 315 | if (scaled_width > scaled_height) |
332 | pfit_control |= PFIT_SCALING_PILLAR; | 316 | pfit_control |= PFIT_SCALING_PILLAR; |
333 | else if (panel_ratio < desired_ratio) | 317 | else if (scaled_width < scaled_height) |
334 | pfit_control |= PFIT_SCALING_LETTER; | 318 | pfit_control |= PFIT_SCALING_LETTER; |
335 | else | 319 | else |
336 | pfit_control |= PFIT_SCALING_AUTO; | 320 | pfit_control |= PFIT_SCALING_AUTO; |
337 | } else { | 321 | } else { |
322 | u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay; | ||
323 | u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay; | ||
338 | /* | 324 | /* |
339 | * For earlier chips we have to calculate the scaling | 325 | * For earlier chips we have to calculate the scaling |
340 | * ratio by hand and program it into the | 326 | * ratio by hand and program it into the |
341 | * PFIT_PGM_RATIO register | 327 | * PFIT_PGM_RATIO register |
342 | */ | 328 | */ |
343 | u32 horiz_bits, vert_bits, bits = 12; | 329 | if (scaled_width > scaled_height) { /* pillar */ |
344 | horiz_ratio = mode->hdisplay * PANEL_RATIO_FACTOR/ | 330 | centre_horizontally(adjusted_mode, scaled_height / mode->vdisplay); |
345 | adjusted_mode->hdisplay; | 331 | |
346 | vert_ratio = mode->vdisplay * PANEL_RATIO_FACTOR/ | 332 | border = LVDS_BORDER_ENABLE; |
347 | adjusted_mode->vdisplay; | 333 | if (mode->vdisplay != adjusted_mode->vdisplay) { |
348 | horiz_scale = adjusted_mode->hdisplay * | 334 | u32 bits = panel_fitter_scaling(mode->vdisplay, adjusted_mode->vdisplay); |
349 | PANEL_RATIO_FACTOR / mode->hdisplay; | 335 | pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT | |
350 | vert_scale = adjusted_mode->vdisplay * | 336 | bits << PFIT_VERT_SCALE_SHIFT); |
351 | PANEL_RATIO_FACTOR / mode->vdisplay; | 337 | pfit_control |= (PFIT_ENABLE | |
352 | 338 | VERT_INTERP_BILINEAR | | |
353 | /* retain aspect ratio */ | 339 | HORIZ_INTERP_BILINEAR); |
354 | if (panel_ratio > desired_ratio) { /* Pillar */ | 340 | } |
355 | u32 scaled_width; | 341 | } else if (scaled_width < scaled_height) { /* letter */ |
356 | scaled_width = mode->hdisplay * vert_scale / | 342 | centre_vertically(adjusted_mode, scaled_width / mode->hdisplay); |
357 | PANEL_RATIO_FACTOR; | 343 | |
358 | horiz_ratio = vert_ratio; | 344 | border = LVDS_BORDER_ENABLE; |
359 | pfit_control |= (VERT_AUTO_SCALE | | 345 | if (mode->hdisplay != adjusted_mode->hdisplay) { |
360 | VERT_INTERP_BILINEAR | | 346 | u32 bits = panel_fitter_scaling(mode->hdisplay, adjusted_mode->hdisplay); |
361 | HORIZ_INTERP_BILINEAR); | 347 | pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT | |
362 | /* Pillar will have left/right borders */ | 348 | bits << PFIT_VERT_SCALE_SHIFT); |
363 | left_border = (adjusted_mode->hdisplay - | 349 | pfit_control |= (PFIT_ENABLE | |
364 | scaled_width) / 2; | 350 | VERT_INTERP_BILINEAR | |
365 | right_border = left_border; | 351 | HORIZ_INTERP_BILINEAR); |
366 | if (mode->hdisplay & 1) /* odd resolutions */ | 352 | } |
367 | right_border++; | 353 | } else |
368 | /* keep the border be even */ | 354 | /* Aspects match, Let hw scale both directions */ |
369 | if (right_border & 1) | 355 | pfit_control |= (PFIT_ENABLE | |
370 | right_border++; | 356 | VERT_AUTO_SCALE | HORIZ_AUTO_SCALE | |
371 | adjusted_mode->crtc_hdisplay = scaled_width; | ||
372 | /* use border instead of border minus one */ | ||
373 | adjusted_mode->crtc_hblank_start = | ||
374 | scaled_width + right_border; | ||
375 | /* keep the hblank width constant */ | ||
376 | adjusted_mode->crtc_hblank_end = | ||
377 | adjusted_mode->crtc_hblank_start + | ||
378 | hblank_width; | ||
379 | /* | ||
380 | * get the hsync start pos relative to | ||
381 | * hblank start | ||
382 | */ | ||
383 | hsync_pos = (hblank_width - hsync_width) / 2; | ||
384 | /* keep the hsync_pos be even */ | ||
385 | if (hsync_pos & 1) | ||
386 | hsync_pos++; | ||
387 | adjusted_mode->crtc_hsync_start = | ||
388 | adjusted_mode->crtc_hblank_start + | ||
389 | hsync_pos; | ||
390 | /* keept hsync width constant */ | ||
391 | adjusted_mode->crtc_hsync_end = | ||
392 | adjusted_mode->crtc_hsync_start + | ||
393 | hsync_width; | ||
394 | border = 1; | ||
395 | } else if (panel_ratio < desired_ratio) { /* letter */ | ||
396 | u32 scaled_height = mode->vdisplay * | ||
397 | horiz_scale / PANEL_RATIO_FACTOR; | ||
398 | vert_ratio = horiz_ratio; | ||
399 | pfit_control |= (HORIZ_AUTO_SCALE | | ||
400 | VERT_INTERP_BILINEAR | | ||
401 | HORIZ_INTERP_BILINEAR); | ||
402 | /* Letterbox will have top/bottom border */ | ||
403 | top_border = (adjusted_mode->vdisplay - | ||
404 | scaled_height) / 2; | ||
405 | bottom_border = top_border; | ||
406 | if (mode->vdisplay & 1) | ||
407 | bottom_border++; | ||
408 | adjusted_mode->crtc_vdisplay = scaled_height; | ||
409 | /* use border instead of border minus one */ | ||
410 | adjusted_mode->crtc_vblank_start = | ||
411 | scaled_height + bottom_border; | ||
412 | /* keep the vblank width constant */ | ||
413 | adjusted_mode->crtc_vblank_end = | ||
414 | adjusted_mode->crtc_vblank_start + | ||
415 | vblank_width; | ||
416 | /* | ||
417 | * get the vsync start pos relative to | ||
418 | * vblank start | ||
419 | */ | ||
420 | vsync_pos = (vblank_width - vsync_width) / 2; | ||
421 | adjusted_mode->crtc_vsync_start = | ||
422 | adjusted_mode->crtc_vblank_start + | ||
423 | vsync_pos; | ||
424 | /* keep the vsync width constant */ | ||
425 | adjusted_mode->crtc_vsync_end = | ||
426 | adjusted_mode->crtc_vsync_start + | ||
427 | vsync_width; | ||
428 | border = 1; | ||
429 | } else { | ||
430 | /* Aspects match, Let hw scale both directions */ | ||
431 | pfit_control |= (VERT_AUTO_SCALE | | ||
432 | HORIZ_AUTO_SCALE | | ||
433 | VERT_INTERP_BILINEAR | | 357 | VERT_INTERP_BILINEAR | |
434 | HORIZ_INTERP_BILINEAR); | 358 | HORIZ_INTERP_BILINEAR); |
435 | } | ||
436 | horiz_bits = (1 << bits) * horiz_ratio / | ||
437 | PANEL_RATIO_FACTOR; | ||
438 | vert_bits = (1 << bits) * vert_ratio / | ||
439 | PANEL_RATIO_FACTOR; | ||
440 | pfit_pgm_ratios = | ||
441 | ((vert_bits << PFIT_VERT_SCALE_SHIFT) & | ||
442 | PFIT_VERT_SCALE_MASK) | | ||
443 | ((horiz_bits << PFIT_HORIZ_SCALE_SHIFT) & | ||
444 | PFIT_HORIZ_SCALE_MASK); | ||
445 | } | 359 | } |
446 | break; | 360 | break; |
447 | 361 | ||
@@ -458,6 +372,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
458 | VERT_INTERP_BILINEAR | | 372 | VERT_INTERP_BILINEAR | |
459 | HORIZ_INTERP_BILINEAR); | 373 | HORIZ_INTERP_BILINEAR); |
460 | break; | 374 | break; |
375 | |||
461 | default: | 376 | default: |
462 | break; | 377 | break; |
463 | } | 378 | } |
@@ -465,14 +380,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
465 | out: | 380 | out: |
466 | lvds_priv->pfit_control = pfit_control; | 381 | lvds_priv->pfit_control = pfit_control; |
467 | lvds_priv->pfit_pgm_ratios = pfit_pgm_ratios; | 382 | lvds_priv->pfit_pgm_ratios = pfit_pgm_ratios; |
468 | /* | 383 | dev_priv->lvds_border_bits = border; |
469 | * When there exists the border, it means that the LVDS_BORDR | 384 | |
470 | * should be enabled. | ||
471 | */ | ||
472 | if (border) | ||
473 | dev_priv->lvds_border_bits |= LVDS_BORDER_ENABLE; | ||
474 | else | ||
475 | dev_priv->lvds_border_bits &= ~(LVDS_BORDER_ENABLE); | ||
476 | /* | 385 | /* |
477 | * XXX: It would be nice to support lower refresh rates on the | 386 | * XXX: It would be nice to support lower refresh rates on the |
478 | * panels to reduce power consumption, and perhaps match the | 387 | * panels to reduce power consumption, and perhaps match the |
@@ -599,6 +508,26 @@ static int intel_lvds_get_modes(struct drm_connector *connector) | |||
599 | return 0; | 508 | return 0; |
600 | } | 509 | } |
601 | 510 | ||
511 | static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id) | ||
512 | { | ||
513 | DRM_DEBUG_KMS("Skipping forced modeset for %s\n", id->ident); | ||
514 | return 1; | ||
515 | } | ||
516 | |||
517 | /* The GPU hangs up on these systems if modeset is performed on LID open */ | ||
518 | static const struct dmi_system_id intel_no_modeset_on_lid[] = { | ||
519 | { | ||
520 | .callback = intel_no_modeset_on_lid_dmi_callback, | ||
521 | .ident = "Toshiba Tecra A11", | ||
522 | .matches = { | ||
523 | DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), | ||
524 | DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A11"), | ||
525 | }, | ||
526 | }, | ||
527 | |||
528 | { } /* terminating entry */ | ||
529 | }; | ||
530 | |||
602 | /* | 531 | /* |
603 | * Lid events. Note the use of 'modeset_on_lid': | 532 | * Lid events. Note the use of 'modeset_on_lid': |
604 | * - we set it on lid close, and reset it on open | 533 | * - we set it on lid close, and reset it on open |
@@ -622,6 +551,9 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, | |||
622 | */ | 551 | */ |
623 | if (connector) | 552 | if (connector) |
624 | connector->status = connector->funcs->detect(connector); | 553 | connector->status = connector->funcs->detect(connector); |
554 | /* Don't force modeset on machines where it causes a GPU lockup */ | ||
555 | if (dmi_check_system(intel_no_modeset_on_lid)) | ||
556 | return NOTIFY_OK; | ||
625 | if (!acpi_lid_open()) { | 557 | if (!acpi_lid_open()) { |
626 | dev_priv->modeset_on_lid = 1; | 558 | dev_priv->modeset_on_lid = 1; |
627 | return NOTIFY_OK; | 559 | return NOTIFY_OK; |
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index d7ad5139d17c..f26ec2f27d36 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c | |||
@@ -65,7 +65,7 @@ | |||
65 | #define OCMD_YUV_410_PLANAR (0xe<<10) /* also 411 */ | 65 | #define OCMD_YUV_410_PLANAR (0xe<<10) /* also 411 */ |
66 | #define OCMD_TVSYNCFLIP_PARITY (0x1<<9) | 66 | #define OCMD_TVSYNCFLIP_PARITY (0x1<<9) |
67 | #define OCMD_TVSYNCFLIP_ENABLE (0x1<<7) | 67 | #define OCMD_TVSYNCFLIP_ENABLE (0x1<<7) |
68 | #define OCMD_BUF_TYPE_MASK (Ox1<<5) | 68 | #define OCMD_BUF_TYPE_MASK (0x1<<5) |
69 | #define OCMD_BUF_TYPE_FRAME (0x0<<5) | 69 | #define OCMD_BUF_TYPE_FRAME (0x0<<5) |
70 | #define OCMD_BUF_TYPE_FIELD (0x1<<5) | 70 | #define OCMD_BUF_TYPE_FIELD (0x1<<5) |
71 | #define OCMD_TEST_MODE (0x1<<4) | 71 | #define OCMD_TEST_MODE (0x1<<4) |
@@ -958,7 +958,7 @@ static int check_overlay_src(struct drm_device *dev, | |||
958 | || rec->src_width < N_HORIZ_Y_TAPS*4) | 958 | || rec->src_width < N_HORIZ_Y_TAPS*4) |
959 | return -EINVAL; | 959 | return -EINVAL; |
960 | 960 | ||
961 | /* check alingment constrains */ | 961 | /* check alignment constraints */ |
962 | switch (rec->flags & I915_OVERLAY_TYPE_MASK) { | 962 | switch (rec->flags & I915_OVERLAY_TYPE_MASK) { |
963 | case I915_OVERLAY_RGB: | 963 | case I915_OVERLAY_RGB: |
964 | /* not implemented */ | 964 | /* not implemented */ |
@@ -990,7 +990,10 @@ static int check_overlay_src(struct drm_device *dev, | |||
990 | return -EINVAL; | 990 | return -EINVAL; |
991 | 991 | ||
992 | /* stride checking */ | 992 | /* stride checking */ |
993 | stride_mask = 63; | 993 | if (IS_I830(dev) || IS_845G(dev)) |
994 | stride_mask = 255; | ||
995 | else | ||
996 | stride_mask = 63; | ||
994 | 997 | ||
995 | if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask) | 998 | if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask) |
996 | return -EINVAL; | 999 | return -EINVAL; |
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 76993ac16cc1..8b2bfc005c59 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -1237,9 +1237,11 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1237 | 1237 | ||
1238 | /* Set the SDVO control regs. */ | 1238 | /* Set the SDVO control regs. */ |
1239 | if (IS_I965G(dev)) { | 1239 | if (IS_I965G(dev)) { |
1240 | sdvox |= SDVO_BORDER_ENABLE | | 1240 | sdvox |= SDVO_BORDER_ENABLE; |
1241 | SDVO_VSYNC_ACTIVE_HIGH | | 1241 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) |
1242 | SDVO_HSYNC_ACTIVE_HIGH; | 1242 | sdvox |= SDVO_VSYNC_ACTIVE_HIGH; |
1243 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) | ||
1244 | sdvox |= SDVO_HSYNC_ACTIVE_HIGH; | ||
1243 | } else { | 1245 | } else { |
1244 | sdvox |= I915_READ(sdvo_priv->sdvo_reg); | 1246 | sdvox |= I915_READ(sdvo_priv->sdvo_reg); |
1245 | switch (sdvo_priv->sdvo_reg) { | 1247 | switch (sdvo_priv->sdvo_reg) { |
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 6d553c29d106..d61ffbc381e5 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c | |||
@@ -476,7 +476,7 @@ static const struct tv_mode tv_modes[] = { | |||
476 | .vi_end_f1 = 20, .vi_end_f2 = 21, | 476 | .vi_end_f1 = 20, .vi_end_f2 = 21, |
477 | .nbr_end = 240, | 477 | .nbr_end = 240, |
478 | 478 | ||
479 | .burst_ena = 8, | 479 | .burst_ena = true, |
480 | .hburst_start = 72, .hburst_len = 34, | 480 | .hburst_start = 72, .hburst_len = 34, |
481 | .vburst_start_f1 = 9, .vburst_end_f1 = 240, | 481 | .vburst_start_f1 = 9, .vburst_end_f1 = 240, |
482 | .vburst_start_f2 = 10, .vburst_end_f2 = 240, | 482 | .vburst_start_f2 = 10, .vburst_end_f2 = 240, |
@@ -896,8 +896,6 @@ static const struct tv_mode tv_modes[] = { | |||
896 | }, | 896 | }, |
897 | }; | 897 | }; |
898 | 898 | ||
899 | #define NUM_TV_MODES sizeof(tv_modes) / sizeof (tv_modes[0]) | ||
900 | |||
901 | static void | 899 | static void |
902 | intel_tv_dpms(struct drm_encoder *encoder, int mode) | 900 | intel_tv_dpms(struct drm_encoder *encoder, int mode) |
903 | { | 901 | { |
@@ -1512,7 +1510,7 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop | |||
1512 | tv_priv->margin[TV_MARGIN_BOTTOM] = val; | 1510 | tv_priv->margin[TV_MARGIN_BOTTOM] = val; |
1513 | changed = true; | 1511 | changed = true; |
1514 | } else if (property == dev->mode_config.tv_mode_property) { | 1512 | } else if (property == dev->mode_config.tv_mode_property) { |
1515 | if (val >= NUM_TV_MODES) { | 1513 | if (val >= ARRAY_SIZE(tv_modes)) { |
1516 | ret = -EINVAL; | 1514 | ret = -EINVAL; |
1517 | goto out; | 1515 | goto out; |
1518 | } | 1516 | } |
@@ -1693,13 +1691,13 @@ intel_tv_init(struct drm_device *dev) | |||
1693 | connector->doublescan_allowed = false; | 1691 | connector->doublescan_allowed = false; |
1694 | 1692 | ||
1695 | /* Create TV properties then attach current values */ | 1693 | /* Create TV properties then attach current values */ |
1696 | tv_format_names = kmalloc(sizeof(char *) * NUM_TV_MODES, | 1694 | tv_format_names = kmalloc(sizeof(char *) * ARRAY_SIZE(tv_modes), |
1697 | GFP_KERNEL); | 1695 | GFP_KERNEL); |
1698 | if (!tv_format_names) | 1696 | if (!tv_format_names) |
1699 | goto out; | 1697 | goto out; |
1700 | for (i = 0; i < NUM_TV_MODES; i++) | 1698 | for (i = 0; i < ARRAY_SIZE(tv_modes); i++) |
1701 | tv_format_names[i] = tv_modes[i].name; | 1699 | tv_format_names[i] = tv_modes[i].name; |
1702 | drm_mode_create_tv_properties(dev, NUM_TV_MODES, tv_format_names); | 1700 | drm_mode_create_tv_properties(dev, ARRAY_SIZE(tv_modes), tv_format_names); |
1703 | 1701 | ||
1704 | drm_connector_attach_property(connector, dev->mode_config.tv_mode_property, | 1702 | drm_connector_attach_property(connector, dev->mode_config.tv_mode_property, |
1705 | initial_mode); | 1703 | initial_mode); |
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index a3d25f419853..95f8b3a3c43d 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
@@ -335,6 +335,7 @@ static ssize_t radeon_get_pm_profile(struct device *dev, | |||
335 | return snprintf(buf, PAGE_SIZE, "%s\n", | 335 | return snprintf(buf, PAGE_SIZE, "%s\n", |
336 | (cp == PM_PROFILE_AUTO) ? "auto" : | 336 | (cp == PM_PROFILE_AUTO) ? "auto" : |
337 | (cp == PM_PROFILE_LOW) ? "low" : | 337 | (cp == PM_PROFILE_LOW) ? "low" : |
338 | (cp == PM_PROFILE_MID) ? "mid" : | ||
338 | (cp == PM_PROFILE_HIGH) ? "high" : "default"); | 339 | (cp == PM_PROFILE_HIGH) ? "high" : "default"); |
339 | } | 340 | } |
340 | 341 | ||